]*>(.*?)
|Events related with TV rsrsr
\n ''' + html_freq + html_plots\n\n return html","repo_name":"salomaoalves/DataScience_Visualization","sub_path":"GCalendar/Visu/streaming_events.py","file_name":"streaming_events.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"40490656856","text":"import numpy as np\nfrom PIL import Image\nimport cv2\nfrom random import randrange\nfrom torchvision import transforms\nimport matplotlib.pyplot as plt\n\ndef transform(image, mask=None, size=None):\n image = image.convert('L')\n image = cut_to_spin(image)\n\n if mask is not None:\n image, mask = rotate(image, mask)\n mask = cut_to_spin(mask)\n\n image = np.array(image)\n # image = histo_equalized(image)\n image = clahe_equalized(image)\n image = adjust_gamma(image, 1.2)\n\n #image = transforms.ToTensor()(image)\n if mask is not None:\n #mask = np.array(mask)\n #verify(image, mask)\n #mask = transforms.ToTensor()(mask)\n \n return [image, mask]\n else:\n return image\n\ndef image2gray(image):\n img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n return img_gray\n\ndef dataset_normalized(imgs):\n imgs_normalized = np.empty(imgs.shape)\n imgs_std = np.std(imgs)\n imgs_mean = np.mean(imgs)\n imgs_normalized = (imgs-imgs_mean)/imgs_std\n imgs_normalized = ((imgs_normalized - np.min(imgs_normalized)) / (np.max(imgs_normalized)-np.min(imgs_normalized)))*255\n return imgs_normalized\n\ndef histo_equalized(imgs):\n imgs_equalized = np.empty(imgs.shape)\n imgs_equalized = cv2.equalizeHist(np.array(imgs, dtype = np.uint8))\n return imgs_equalized\n\ndef clahe_equalized(imgs):\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\n imgs_equalized = np.empty(imgs.shape)\n imgs_equalized = clahe.apply(np.array(imgs, dtype = np.uint8))\n return imgs_equalized\n\ndef adjust_gamma(imgs, gamma=1.0):\n invGamma = 1.0 / gamma\n table = np.array([((i / 255.0) ** invGamma) * 255 for i in np.arange(0, 256)]).astype(\"uint8\")\n new_imgs = np.empty(imgs.shape)\n new_imgs = cv2.LUT(np.array(imgs, dtype = np.uint8), table)\n return new_imgs\n\ndef rotate(image, mask):\n func = [\"false\", Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM, Image.ROTATE_90, Image.ROTATE_180, Image.ROTATE_270]\n get_idx = randrange(len(func))\n \n if func[get_idx] != \"false\":\n #print(mask.shape)\n image = image.transpose(func[get_idx])\n mask = mask.transpose(func[get_idx])\n \n return [image, mask]\n\ndef cut_to_spin(image):\n x, y = image.size\n remaining = abs(x - y) // 2\n if x < y:\n image = image.crop((0, remaining, x, y-remaining))\n else:\n image = image.crop((remaining, 0, x-remaining, y))\n\n x, y = image.size\n\n if x < y:\n image = image.crop((0, 0, x, y - 1))\n elif x > y:\n image = image.crop((0, 0, x - 1, y))\n\n return image\n\ndef verify(image, mask):\n out = cv2.subtract(image.copy(), mask.copy())\n plt.figure()\n plt.title(\"verify\")\n plt.imshow(out, cmap='gray')\n plt.show()\n\n'''\n\nif __name__ == \"__main__\":\n image = cv2.imread(\"./dataset/DRIVE/training/images/21_training.tif\")\n mask = cv2.imread(\"./dataset/DRIVE/training/1st_manual/21_manual1.gif\")\n\n image, mask = spin(image, mask)\n print(mask.shape)\n\n cv2.imshow(\"image\", image)\n cv2.imshow(\"image2\", mask)\n\n cv2.waitKey(0)\n'''","repo_name":"carlylezqy/Deep_Learning","sub_path":"Torch_Network/Segmentation/2D_Luxonus_UNet/image_transforms.py","file_name":"image_transforms.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"36440097979","text":"import tool\nfrom numpy import *\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import Ridge\nfrom sklearn.linear_model import Lasso\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import LinearSVC\nfrom sklearn.naive_bayes import GaussianNB\n\n\ndef testLogistic():\n logreg = LogisticRegression(C=1)\n trainingset, traininglabels = tool.file2floatMatrix('horseColicTraining.txt', 21)\n testset, testlabels = tool.file2floatMatrix('horseColicTest.txt', 21)\n logreg.fit(trainingset, traininglabels)\n print(\"logreg.coef_: {}\".format(logreg.coef_))\n print(\"logreg.intercept_: {}\".format(logreg.intercept_))\n print(\"Training set score: {:.2f}\".format(logreg.score(trainingset, traininglabels)))\n print(\"Test set score: {:.2f}\".format(logreg.score(testset, testlabels)))\n\n\ndef testSVM():\n svc = LinearSVC(C=50)\n trainingset, traininglabels = tool.file2floatMatrix('horseColicTraining.txt', 21)\n testset, testlabels = tool.file2floatMatrix('horseColicTest.txt', 21)\n svc.fit(trainingset, traininglabels)\n print(\"svc.coef_: {}\".format(svc.coef_))\n print(\"svc.intercept_: {}\".format(svc.intercept_))\n print(\"Training set score: {:.2f}\".format(svc.score(trainingset, traininglabels)))\n print(\"Test set score: {:.2f}\".format(svc.score(testset, testlabels)))\n\n\nif __name__ == '__main__':\n # testSVM()\n testLogistic()\n # testLasso()\n # testOLS()\n","repo_name":"xuhui1231/ml","sub_path":"linearModelWithMglearn.py","file_name":"linearModelWithMglearn.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"85"} +{"seq_id":"29258682914","text":"import sys\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\ndef load_data(messages_filepath, categories_filepath):\n \"\"\"\n\n :param messages_filepath: path to the messages csv file\n :param categories_filepath: path to the categories csv file\n :return: messages and categories combined data_frame\n \"\"\"\n message_df = pd.read_csv(messages_filepath)\n categorie_df = pd.read_csv(categories_filepath)\n final_df = pd.merge(message_df, categorie_df, on='id')\n return final_df\n\n\ndef clean_data(df):\n \"\"\"\n\n :param df: combined dataframe made after merging messages and the categories.\n :return: cleaned df\n \"\"\"\n categories_df = df.categories.str.split(';', expand=True) # creating a dataframe of the 36 individual category columns.\n row = categories_df.loc[0, :] # Selecting first row\n category_col_header = row.apply(lambda x: x.split('-')[0]).values # getting different column headers\n categories_df.columns = category_col_header # renaming the columns of `categories`\n\n for column in categories_df:\n # set each value to be the last character of the string\n categories_df[column] = categories_df[column].str.split('-').str[1]\n\n categories_df.apply(pd.to_numeric)\n df.drop('categories', axis=1, inplace=True)\n df = pd.concat([df, categories_df], axis=1)\n return df\n\ndef save_data(df, database_filename):\n \"\"\"\n\n :param df: combined and cleaned dataframe\n :param database_filename: database filepath\n \"\"\"\n db_engine = create_engine('sqlite:///{}'.format(database_filename))\n df.to_sql(\"disaster_response\", db_engine, if_exists='replace', index=False)\n\n\n\n\ndef main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()","repo_name":"prast567/Disaster-response-pipeline","sub_path":"data/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"7605396272","text":"import numpy as np\nimport cv2\nimport torch\nfrom imageio import imread\n\n\ndef process_resize(w, h, resize):\n assert(len(resize) > 0 and len(resize) <= 2)\n if len(resize) == 1 and resize[0] > -1:\n scale = resize[0] / max(h, w)\n w_new, h_new = int(round(w*scale)), int(round(h*scale))\n elif len(resize) == 1 and resize[0] == -1:\n w_new, h_new = w, h\n else: # len(resize) == 2:\n w_new, h_new = resize[0], resize[1]\n\n # Issue warning if resolution is too small or too large.\n if max(w_new, h_new) < 160:\n print('Warning: input resolution is very small, results may vary')\n elif max(w_new, h_new) > 2000:\n print('Warning: input resolution is very large, results may vary')\n\n return w_new, h_new\n\n\ndef frame2tensor(frame, device):\n return torch.from_numpy(frame/255.).float()[None, None].to(device)\n\n\ndef get_new_resolution_with_minimum(minSize, I, strideNet):\n h, w = I.shape[:2]\n ratio = min(w / float(minSize), h / float(minSize))\n new_w, new_h = round(w / ratio), round(h / ratio)\n new_w, new_h = new_w // strideNet * strideNet, new_h // strideNet * strideNet\n return new_w, new_h\n\n\ndef read_image(path, device, rotation, resize_float, resize=None, min_size=None, strideNet=8):\n image = imread(str(path))\n if image is None:\n return None, None, None\n\n w, h = image.shape[1], image.shape[0]\n if min_size is not None:\n # it means we need to resize the image keeping aspect ratio so that smallest side is equal to min_size\n w_new, h_new = get_new_resolution_with_minimum(min_size, image, strideNet)\n else:\n w_new, h_new = process_resize(w, h, resize)\n scales = (float(w) / float(w_new), float(h) / float(h_new))\n\n if resize_float:\n image = cv2.resize(image.astype('float32'), (w_new, h_new))\n else:\n image = cv2.resize(image, (w_new, h_new))#.astype('float32')\n\n if rotation != 0:\n image = np.rot90(image, k=rotation).copy()\n # needs the copy for later to be read in torch !\n if rotation % 2:\n scales = scales[::-1]\n\n inp = frame2tensor(image, device)\n return image, inp, scales\n\n\n# --- GEOMETRY ---\ndef estimate_pose(kpts0, kpts1, K0, K1, ransac, thresh, conf=0.99999):\n if len(kpts0) < 5:\n return None\n\n f_mean = np.mean([K0[0, 0], K1[1, 1], K0[0, 0], K1[1, 1]])\n norm_thresh = thresh / f_mean\n\n kpts0 = (kpts0 - K0[[0, 1], [2, 2]][None]) / K0[[0, 1], [0, 1]][None]\n kpts1 = (kpts1 - K1[[0, 1], [2, 2]][None]) / K1[[0, 1], [0, 1]][None]\n\n if ransac:\n E, mask = cv2.findEssentialMat(\n kpts0, kpts1, np.eye(3), threshold=norm_thresh,\n prob=conf,\n method=cv2.RANSAC)\n else:\n E, mask = cv2.findFundamentalMat(\n kpts0, kpts1, method=cv2.FM_8POINT\n )\n\n ret = None\n if E is not None:\n best_num_inliers = 0\n\n for _E in np.split(E, len(E) / 3):\n n, R, t, _ = cv2.recoverPose(\n _E, kpts0, kpts1, np.eye(3), 1e9, mask=mask)\n if n > best_num_inliers:\n best_num_inliers = n\n ret = (R, t[:, 0], mask.ravel() > 0)\n return ret\n\n\ndef rotate_intrinsics(K, image_shape, rot):\n \"\"\"image_shape is the shape of the image after rotation\"\"\"\n assert rot <= 3\n h, w = image_shape[:2][::-1 if (rot % 2) else 1]\n fx, fy, cx, cy = K[0, 0], K[1, 1], K[0, 2], K[1, 2]\n rot = rot % 4\n if rot == 1:\n return np.array([[fy, 0., cy],\n [0., fx, w-1-cx],\n [0., 0., 1.]], dtype=K.dtype)\n elif rot == 2:\n return np.array([[fx, 0., w-1-cx],\n [0., fy, h-1-cy],\n [0., 0., 1.]], dtype=K.dtype)\n else: # if rot == 3:\n return np.array([[fy, 0., h-1-cy],\n [0., fx, cx],\n [0., 0., 1.]], dtype=K.dtype)\n\n\ndef rotate_pose_inplane(i_T_w, rot):\n rotation_matrices = [\n np.array([[np.cos(r), -np.sin(r), 0., 0.],\n [np.sin(r), np.cos(r), 0., 0.],\n [0., 0., 1., 0.],\n [0., 0., 0., 1.]], dtype=np.float32)\n for r in [np.deg2rad(d) for d in (0, 270, 180, 90)]\n ]\n return np.dot(rotation_matrices[rot], i_T_w)\n\n\ndef scale_intrinsics(K, scales):\n scales = np.diag([1./scales[0], 1./scales[1], 1.])\n return np.dot(scales, K)\n\n\ndef to_homogeneous(points):\n return np.concatenate([points, np.ones_like(points[:, :1])], axis=-1)\n\n\ndef angle_error_mat(R1, R2):\n cos = (np.trace(np.dot(R1.T, R2)) - 1) / 2\n cos = np.clip(cos, -1., 1.) # numercial errors can make it out of bounds\n return np.rad2deg(np.abs(np.arccos(cos)))\n\n\ndef angle_error_vec(v1, v2):\n n = np.linalg.norm(v1) * np.linalg.norm(v2)\n return np.rad2deg(np.arccos(np.clip(np.dot(v1, v2) / n, -1.0, 1.0)))\n\n\ndef compute_pose_error(T_0to1, R, t):\n R_gt = T_0to1[:3, :3]\n t_gt = T_0to1[:3, 3]\n error_t = angle_error_vec(t, t_gt)\n error_t = np.minimum(error_t, 180 - error_t) # ambiguity of E estimation\n error_R = angle_error_mat(R, R_gt)\n return error_t, error_R\n\n\ndef pose_auc(errors, thresholds):\n sort_idx = np.argsort(errors)\n errors = np.array(errors.copy())[sort_idx]\n recall = (np.arange(len(errors)) + 1) / len(errors)\n errors = np.r_[0., errors]\n recall = np.r_[0., recall]\n aucs = []\n for t in thresholds:\n last_index = np.searchsorted(errors, t)\n r = np.r_[recall[:last_index], recall[last_index-1]]\n e = np.r_[errors[:last_index], t]\n aucs.append(np.trapz(r, x=e)/t)\n return aucs\n\n\ndef matches_from_flow(flow, matchBinary, scaling=1.0):\n \"\"\"\n Retrieves the pixel coordinates of 'good' matches in source and target images, based on provided flow field\n (relating the target to the source image) and a binary mask indicating where the flow is 'good'.\n Args:\n flow: tensor of shape B, 2, H, W (will be reshaped if it is not the case). Flow field relating the target\n to the source image, defined in the target image coordinate system.\n binary_mask: bool mask corresponding to valid flow vectors, shape B, H, W\n scaling: scalar or list of scalar (horizontal and then vertical direction):\n scaling factor to apply to the retrieved pixel coordinates in both images.\n\n Returns:\n pixel coordinates of 'good' matches in the source image, Nx2 (numpy array)\n pixel coordinates of 'good' matches in the target image, Nx2 (numpy array)\n \"\"\"\n\n B, _, hB, wB = flow.shape\n xx = torch.arange(0, wB).view(1, -1).repeat(hB, 1)\n yy = torch.arange(0, hB).view(-1, 1).repeat(1, wB)\n xx = xx.view(1, 1, hB, wB).repeat(B, 1, 1, 1)\n yy = yy.view(1, 1, hB, wB).repeat(B, 1, 1, 1)\n grid = torch.cat((xx, yy), 1).float()\n\n if flow.is_cuda:\n grid = grid.cuda()\n matchBinary = matchBinary.cuda()\n\n mapping = flow + grid\n mapping_x = mapping.permute(0, 2, 3, 1)[:, :, :, 0]\n mapping_y = mapping.permute(0, 2, 3, 1)[:, :, :, 1]\n grid_x = grid.permute(0, 2, 3, 1)[:, :, :, 0]\n grid_y = grid.permute(0, 2, 3, 1)[:, :, :, 1]\n\n pts2 = torch.cat((grid_x[matchBinary].unsqueeze(1),\n grid_y[matchBinary].unsqueeze(1)), dim=1)\n pts1 = torch.cat((mapping_x[matchBinary].unsqueeze(1),\n mapping_y[matchBinary].unsqueeze(1)),\n dim=1) # convert to mapping and then take the correspondences\n\n return pts1.cpu().numpy()*scaling, pts2.cpu().numpy()*scaling\n","repo_name":"mattcoldwater/DM_modified","sub_path":"validation/utils_pose_estimation.py","file_name":"utils_pose_estimation.py","file_ext":"py","file_size_in_byte":7552,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"3832875616","text":"import csv,datetime \nfrom datetime import datetime,timedelta\n\n\ndef check_gas_testing_time_compliance() -> bool:\n \n EntFilePath=r'C:\\Users\\prash\\Desktop\\DE\\entrant_gas_reading.csv' #Provide path for entrant_gas_reading.csv\n PerFilePath=r'C:\\Users\\prash\\Desktop\\DE\\Periodic_Reading.csv' #Provide path for periodical_gas_reading.csv\n\n Entrant = {} #Creating a dict to store Entrant gas reading \n Periodic = [] #Creating a list to store PEriodical gas reading \n\n##Read the entrant file and store in variable Dict Entrant\n EntFile = open(EntFilePath, 'r')\n EntDict = csv.DictReader(EntFile)\n for row in EntDict:\n Entrant = row\n \n ##Read the Periodic file and store in variable Periodic \n PerFile = open(PerFilePath, 'r') \n PerList = csv.reader(PerFile)\n P = list(PerList)\n for x in P:\n for y in x:\n Periodic.append(y)\n Periodic.pop(0) ##Removing Column name gas reading time from list\n\n\n complaint = False\n\n #Converting string to datetime format \n exittime = datetime.strptime(Entrant['exit time'], \"%Y-%m-%dT%H:%M:%S.%fZ\")\n entrtime = datetime.strptime(Entrant['entry time'], \"%Y-%m-%dT%H:%M:%S.%fZ\")\n entrplus30mins = datetime.strptime(Entrant['entry time'], \"%Y-%m-%dT%H:%M:%S.%fZ\") + timedelta(minutes=30)\n diff = ((exittime - entrtime).total_seconds() / 60)\n\n \n if diff >= 30: # Check if the CW is staying more than 30 mins\n if len(Periodic) > 0: # checking if the periodic entry has values\n for x in Periodic: # checking if periodic entry is in compliance\n if datetime.strptime(x,\"%Y-%m-%dT%H:%M:%S.%fZ\")>=entrtime and datetime.strptime(x,\"%Y-%m-%dT%H:%M:%S.%fZ\")<=entrplus30mins:\n # checking if the periodic read time lies between Entry time and time less than 30 mins plus entry time\n complaint = True\n \n if complaint is True:\n return True\n else:\n return False\n\nif __name__ == \"__main__\":\n if check_gas_testing_time_compliance():\n print(\"Compliant\")\n else:\n print(\"Not Compliant\")\n","repo_name":"prashbala27/Codingtestsolution","sub_path":"MagellanX_Enclosed_Space_Entry_Compliant_Test.py","file_name":"MagellanX_Enclosed_Space_Entry_Compliant_Test.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"20447859773","text":"# coding: utf-8\n\nimport os\nimport logging\nfrom . import utils\n\n\ndef install(cwd=False):\n \"\"\"\n Appends the directory of the geckodriver binary file to PATH.\n\n :param cwd: Flag indicating whether to download to current working directory\n :return: The file path of geckodriver\n \"\"\"\n geckodriver_filepath = utils.download_geckodriver(cwd)\n if not geckodriver_filepath:\n logging.debug('Can not download geckodriver.')\n return\n geckodriver_dir = os.path.dirname(geckodriver_filepath)\n if 'PATH' not in os.environ:\n os.environ['PATH'] = geckodriver_dir\n elif geckodriver_dir not in os.environ['PATH']:\n os.environ['PATH'] = geckodriver_dir + utils.get_variable_separator() + os.environ['PATH']\n return geckodriver_filepath\n\n\ndef get_firefox_version():\n \"\"\"\n Get installed version of chrome on client\n\n :return: The version of chrome\n \"\"\"\n return utils.get_firefox_version()\n","repo_name":"yeongbin-jo/python-geckodriver-autoinstaller","sub_path":"geckodriver_autoinstaller/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"85"} +{"seq_id":"19900571196","text":"from sentence_transformers import SentenceTransformer\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport numpy as np\nimport itertools\nfrom transformers import AutoTokenizer, AutoModel\n\n\ndef words_distance(string, word, words, n, nc):\n\twords_d = cosine_similarity(word, word)\n\tidx = list(cosine_similarity(string, word).argsort()[0][-nc:])\n \n\twords_of_interest = words_d[np.ix_(idx, idx)]\n\tsum_dist_min=1*10**100\n\ttmp = None\n\tfor c in itertools.combinations(range(len(idx)), n):\n\t\tsum_dist_act = sum([words_of_interest[m][n] for m, n in zip(c, c) if not m == n])\n\t\tif sum_dist_act < sum_dist_min: tmp = c; sum_dist_min = sum_dist_act\n\n\treturn [[words[y] for y in idx][i] for i in tmp]\n\n\ndef word_extraction(string, size=(1, 1), n=5, c=5):\n\twords_vectorize = CountVectorizer(ngram_range=size).fit([string]).get_feature_names()\n\tmodel = SentenceTransformer('distilbert-base-nli-mean-tokens')\n\treturn words_distance(model.encode([string]), model.encode(words_vectorize), words_vectorize, n, c)\n\n","repo_name":"AIDRI/ENCY-AI","sub_path":"src/AI/not_util/word_extraction.py","file_name":"word_extraction.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"20191771604","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom googlecloudsdk.api_lib.compute import base_classes\nfrom googlecloudsdk.api_lib.compute.interconnects import client\nfrom googlecloudsdk.calliope import base\nfrom googlecloudsdk.command_lib.compute.interconnects import flags\n\nDETAILED_HELP = {\n 'DESCRIPTION':\n \"\"\"\\\n *{command}* is used to remove pre-shared key from MACsec configuration of\n interconnect.\n\n For an example, refer to the *EXAMPLES* section below.\n \"\"\",\n # pylint: disable=line-too-long\n 'EXAMPLES':\n \"\"\"\\\n To remove a pre-shared key from MACsec configuration, run:\n\n $ {command} example-interconnect --key-name=default-key\n \"\"\",\n # pylint: enable=line-too-long\n}\n\n\n@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA)\nclass RemoveKey(base.UpdateCommand):\n \"\"\"Remove pre-shared key from a Compute Engine interconnect MACsec configuration.\n\n *{command}* is used to remove pre-shared key from MACsec configuration of\n interconnect.\n \"\"\"\n\n INTERCONNECT_ARG = None\n\n @classmethod\n def Args(cls, parser):\n cls.INTERCONNECT_ARG = flags.InterconnectArgument()\n cls.INTERCONNECT_ARG.AddArgument(parser, operation_type='update')\n\n flags.AddMacsecPreSharedKeyNameForRomoveKey(parser)\n\n def Collection(self):\n return 'compute.interconnects'\n\n def Run(self, args):\n holder = base_classes.ComputeApiHolder(self.ReleaseTrack())\n ref = self.INTERCONNECT_ARG.ResolveAsResource(args, holder.resources)\n interconnect = client.Interconnect(ref, compute_client=holder.client)\n\n macsec = interconnect.Describe().macsec\n keys = macsec.preSharedKeys\n macsec.preSharedKeys = [key for key in keys if key.name != args.key_name]\n\n return interconnect.PatchAlphaBeta(\n description=None,\n interconnect_type=None,\n requested_link_count=None,\n link_type=None,\n admin_enabled=None,\n noc_contact_email=None,\n location=None,\n labels=None,\n label_fingerprint=None,\n macsec_enabled=None,\n macsec=macsec)\n\n\nRemoveKey.detailed_help = DETAILED_HELP\n","repo_name":"google-cloud-sdk-unofficial/google-cloud-sdk","sub_path":"lib/surface/compute/interconnects/macsec/remove_key.py","file_name":"remove_key.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"85"} +{"seq_id":"33491389919","text":"import unittest\nimport os\nimport copy\n\nfrom dotenv import load_dotenv\nfrom pathlib import Path\nfrom premembers.reports.batch import reports\nfrom premembers.repository import pm_reports\nfrom premembers.repository import pm_organizationTasks\nfrom premembers.common import common_utils\nfrom premembers.repository.const import Status\n\ntrace_id = \"eb3b5f76-8945-11e7-b15a-8f7e5433dada\"\nuser_id = common_utils.get_uuid4()\nmail_address = \"test-user{}@example.com\"\norganization_id = \"reports-78ee-11e7-89e6-OrganizationID\"\nproject_id = \"reports-77f1-11e7-adfe-ProjectID\"\nreport_id = \"reports-77f1-11e7-adfe-ReportID{}\"\nlog_id = \"reports-77f1-11e7-adfe-LogID{}\"\ntask_id = \"reports-77f1-11e7-adfe-TaskID{}\"\n\nreports_template = {\n \"ReportID\": report_id,\n \"ReportName\": \"reportName\",\n \"GenerateUser\": mail_address.format(str(0)),\n \"AWSAccounts\": [\"awsAccounts1\", \"awsAccounts2\", \"awsAccounts3\"],\n \"Status\": 0,\n \"ResourceInfoPath\": \"resourceInfoPathid\",\n \"JsonOutputPath\": \"jsonOutputPath\",\n \"JsonOutputTime\": \"jsonOutputTimeid\",\n \"HTMLOutputStatus\": 0,\n \"HTMLPath\": \"htmlPath\",\n \"HTMLOutputTime\": \"htmlOutputTime\",\n \"ExcelOutputStatus\": 0,\n \"ExcelPath\": \"7659CD67-03C1-423D-BDDA-6B7C5AF8B247/report/ja-JP/TESTREPORT.xlsx\",\n \"ExcelOutputTime\": \"excelOutputTime\",\n \"SchemaVersion\": 1,\n \"OrganizationID\": organization_id,\n \"ProjectID\": project_id\n}\n\ntask_code = \"DELETE_REPORT\"\norganization_task_template = {\n \"TaskID\": task_id,\n \"Code\": task_code,\n \"Target\": report_id,\n \"UserID\": user_id,\n \"MailAddress\": mail_address.format(str(0)),\n \"TaskStatus\": 0,\n \"RetryCount\": 1,\n \"MaxRetry\": 10,\n}\n\nreport_log_template = {\n 'ReportID': report_id,\n 'LogID': log_id,\n 'Code': \"Code\",\n 'UserID': user_id,\n 'MailAddress': mail_address.format(str(0)),\n 'JobID': common_utils.get_uuid4(),\n 'Parameter': None,\n 'LogStreamName': None\n}\n\n\nclass TestReports(unittest.TestCase):\n def setUp(self):\n dotenv_path = Path(os.getcwd()).joinpath('.env')\n if os.path.exists(dotenv_path):\n load_dotenv(dotenv_path)\n\n num = 1\n while num < 7:\n # Create Report\n tmp_report = copy.copy(reports_template)\n pm_reports.create_report(\n trace_id, tmp_report['ReportID'].format(str(num)),\n tmp_report['ReportName'], tmp_report['GenerateUser'],\n tmp_report['AWSAccounts'], 4, tmp_report['ResourceInfoPath'],\n tmp_report['JsonOutputPath'], tmp_report['JsonOutputTime'],\n tmp_report['HTMLOutputStatus'], tmp_report['HTMLPath'],\n tmp_report['HTMLOutputTime'], 2, tmp_report['ExcelPath'],\n tmp_report['ExcelOutputTime'], tmp_report['SchemaVersion'],\n tmp_report['OrganizationID'], tmp_report['ProjectID'])\n\n # Create organization task\n tmp_organization_task = copy.copy(organization_task_template)\n tmp_organization_task['TaskStatus'] = num\n if (num == 3 and num == 6):\n tmp_organization_task['TaskStatus'] = 0\n elif (num == 4):\n tmp_organization_task['TaskStatus'] = -1\n elif (num == 5):\n tmp_organization_task['TaskStatus'] = -1\n tmp_organization_task['RetryCount'] = tmp_organization_task['MaxRetry'] + 1\n\n pm_organizationTasks.create_organizationTask(\n trace_id, tmp_organization_task['TaskID'].format(str(num)),\n tmp_organization_task['Code'],\n tmp_organization_task['Target'].format(str(num)),\n tmp_organization_task['UserID'],\n tmp_organization_task['MailAddress'],\n tmp_organization_task['TaskStatus'],\n tmp_organization_task['RetryCount'],\n tmp_organization_task['MaxRetry'])\n num += 1\n\n def tearDown(self):\n num = 1\n while num < 7:\n pm_organizationTasks.delete(trace_id, task_id.format(str(num)))\n pm_reports.delete_reports(trace_id, report_id.format(str(num)))\n num += 1\n\n def test_batch_delete_reports_susscess(self):\n # Status = 0 Waiting\n event_mock = {\n 'TaskId': task_id.format(str(3)),\n 'Message': {\n 'MessageId': 'MessageId',\n 'ReceiptHandle': 'ReceiptHandle'\n }\n }\n reports.execute_delete_report_handler(event_mock, {})\n # Get data in database\n organization_task = pm_organizationTasks.query_key(task_id.format(str(3)))\n # Check data\n self.assertEqual(int(organization_task['TaskStatus']), Status.Done.value)\n\n # Status = 4 ERROR and RetryCount < MaxRetry\n event_mock['TaskId'] = task_id.format(str(4))\n reports.execute_delete_report_handler(event_mock, {})\n # Get data in database\n organization_task = pm_organizationTasks.query_key(task_id.format(str(4)))\n # Check data\n self.assertEqual(organization_task['TaskStatus'], Status.Done.value)\n\n def test_batch_delete_reports_error_status_task(self):\n # Status = 1 Running\n event_mock = {\n 'TaskId': task_id.format(str(1)),\n 'Message': {\n 'MessageId': 'MessageId',\n 'ReceiptHandle': 'ReceiptHandle'\n }\n }\n reports.execute_delete_report_handler(event_mock, {})\n # Get data in database\n organization_task = pm_organizationTasks.query_key(task_id.format(str(1)))\n # Check data\n self.assertEqual(organization_task['TaskStatus'], Status.Error.value)\n\n # Status = 2 Done\n event_mock['TaskId'] = task_id.format(str(2))\n reports.execute_delete_report_handler(event_mock, {})\n # Get data in database\n organization_task = pm_organizationTasks.query_key(task_id.format(str(2)))\n # Check data\n self.assertEqual(organization_task['TaskStatus'], Status.Error.value)\n\n # Status = -1 ERROR and RetryCount > MaxRetry\n event_mock['TaskId'] = task_id.format(str(5))\n reports.execute_delete_report_handler(event_mock, {})\n # Get data in database\n organization_task = pm_organizationTasks.query_key(task_id.format(str(5)))\n # Check data\n self.assertEqual(organization_task['TaskStatus'], Status.Error.value)\n","repo_name":"nisheeth84/prjs_sample","sub_path":"reference-code/lambda/cm-premembers-backend/tests/reports/batch/test_reports.py","file_name":"test_reports.py","file_ext":"py","file_size_in_byte":6400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"36142457467","text":"from .db import db\nfrom sqlalchemy import func\n\n\nclass Song(db.Model):\n __tablename__ = 'songs'\n\n id = db.Column(db.Integer, primary_key=True, nullable=False)\n title = db.Column(db.String(40), nullable=False)\n file_url = db.Column(db.String(500))\n song_img = db.Column(db.String(255))\n musician_id = db.Column(db.Integer, db.ForeignKey('musicians.id'))\n created_at = db.Column(db.DateTime(timezone=True),\n nullable=False, server_default=func.now())\n updated_at = db.Column(db.DateTime(timezone=True),\n nullable=False, server_default=func.now())\n\n musicians = db.relationship('Musician', back_populates='songs')\n comments = db.relationship('Comment', back_populates='songs')\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'title': self.title,\n 'file_url': self.file_url,\n 'song_img': self.song_img,\n 'musician_id': self.musician_id,\n 'created_at': self.created_at,\n 'updated_at': self.updated_at,\n }\n","repo_name":"brancifortnick/medley-sesh","sub_path":"app/models/song.py","file_name":"song.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"28705398404","text":"import random\nimport unittest\n\nimport numpy as np\n\nfrom transformers.testing_utils import require_torch, require_vision\nfrom transformers.utils import is_torch_available, is_vision_available\n\nfrom ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs\n\n\nif is_torch_available():\n import torch\n\nif is_vision_available():\n import PIL\n\n from transformers import FlavaImageProcessor\n from transformers.image_utils import PILImageResampling\n from transformers.models.flava.image_processing_flava import (\n FLAVA_CODEBOOK_MEAN,\n FLAVA_CODEBOOK_STD,\n FLAVA_IMAGE_MEAN,\n FLAVA_IMAGE_STD,\n )\nelse:\n FLAVA_IMAGE_MEAN = FLAVA_IMAGE_STD = FLAVA_CODEBOOK_MEAN = FLAVA_CODEBOOK_STD = None\n\n\nclass FlavaImageProcessingTester(unittest.TestCase):\n def __init__(\n self,\n parent,\n batch_size=7,\n num_channels=3,\n min_resolution=30,\n max_resolution=400,\n do_resize=True,\n size=None,\n do_center_crop=True,\n crop_size=None,\n resample=None,\n do_rescale=True,\n rescale_factor=1 / 255,\n do_normalize=True,\n image_mean=FLAVA_IMAGE_MEAN,\n image_std=FLAVA_IMAGE_STD,\n input_size_patches=14,\n total_mask_patches=75,\n mask_group_max_patches=None,\n mask_group_min_patches=16,\n mask_group_min_aspect_ratio=0.3,\n mask_group_max_aspect_ratio=None,\n codebook_do_resize=True,\n codebook_size=None,\n codebook_resample=None,\n codebook_do_center_crop=True,\n codebook_crop_size=None,\n codebook_do_map_pixels=True,\n codebook_do_normalize=True,\n codebook_image_mean=FLAVA_CODEBOOK_MEAN,\n codebook_image_std=FLAVA_CODEBOOK_STD,\n ):\n size = size if size is not None else {\"height\": 224, \"width\": 224}\n crop_size = crop_size if crop_size is not None else {\"height\": 224, \"width\": 224}\n codebook_size = codebook_size if codebook_size is not None else {\"height\": 112, \"width\": 112}\n codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else {\"height\": 112, \"width\": 112}\n\n self.parent = parent\n self.batch_size = batch_size\n self.num_channels = num_channels\n self.do_resize = do_resize\n self.do_rescale = do_rescale\n self.rescale_factor = rescale_factor\n self.min_resolution = min_resolution\n self.max_resolution = max_resolution\n self.size = size\n self.resample = resample if resample is not None else PILImageResampling.BICUBIC\n self.do_normalize = do_normalize\n self.image_mean = image_mean\n self.image_std = image_std\n self.do_center_crop = do_center_crop\n self.crop_size = crop_size\n\n self.input_size_patches = input_size_patches\n self.total_mask_patches = total_mask_patches\n self.mask_group_max_patches = mask_group_max_patches\n self.mask_group_min_patches = mask_group_min_patches\n self.mask_group_min_aspect_ratio = mask_group_min_aspect_ratio\n self.mask_group_max_aspect_ratio = mask_group_max_aspect_ratio\n\n self.codebook_do_resize = codebook_do_resize\n self.codebook_size = codebook_size\n self.codebook_resample = codebook_resample if codebook_resample is not None else PILImageResampling.LANCZOS\n self.codebook_do_center_crop = codebook_do_center_crop\n self.codebook_crop_size = codebook_crop_size\n self.codebook_do_map_pixels = codebook_do_map_pixels\n self.codebook_do_normalize = codebook_do_normalize\n self.codebook_image_mean = codebook_image_mean\n self.codebook_image_std = codebook_image_std\n\n def prepare_image_processor_dict(self):\n return {\n \"image_mean\": self.image_mean,\n \"image_std\": self.image_std,\n \"do_normalize\": self.do_normalize,\n \"do_resize\": self.do_resize,\n \"size\": self.size,\n \"resample\": self.resample,\n \"do_rescale\": self.do_rescale,\n \"rescale_factor\": self.rescale_factor,\n \"do_center_crop\": self.do_center_crop,\n \"crop_size\": self.crop_size,\n \"input_size_patches\": self.input_size_patches,\n \"total_mask_patches\": self.total_mask_patches,\n \"mask_group_max_patches\": self.mask_group_max_patches,\n \"mask_group_min_patches\": self.mask_group_min_patches,\n \"mask_group_min_aspect_ratio\": self.mask_group_min_aspect_ratio,\n \"mask_group_max_aspect_ratio\": self.mask_group_min_aspect_ratio,\n \"codebook_do_resize\": self.codebook_do_resize,\n \"codebook_size\": self.codebook_size,\n \"codebook_resample\": self.codebook_resample,\n \"codebook_do_center_crop\": self.codebook_do_center_crop,\n \"codebook_crop_size\": self.codebook_crop_size,\n \"codebook_do_map_pixels\": self.codebook_do_map_pixels,\n \"codebook_do_normalize\": self.codebook_do_normalize,\n \"codebook_image_mean\": self.codebook_image_mean,\n \"codebook_image_std\": self.codebook_image_std,\n }\n\n def get_expected_image_size(self):\n return (self.size[\"height\"], self.size[\"width\"])\n\n def get_expected_mask_size(self):\n return (\n (self.input_size_patches, self.input_size_patches)\n if not isinstance(self.input_size_patches, tuple)\n else self.input_size_patches\n )\n\n def get_expected_codebook_image_size(self):\n return (self.codebook_size[\"height\"], self.codebook_size[\"width\"])\n\n def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):\n return prepare_image_inputs(\n batch_size=self.batch_size,\n num_channels=self.num_channels,\n min_resolution=self.min_resolution,\n max_resolution=self.max_resolution,\n equal_resolution=equal_resolution,\n numpify=numpify,\n torchify=torchify,\n )\n\n\n@require_torch\n@require_vision\nclass FlavaImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):\n image_processing_class = FlavaImageProcessor if is_vision_available() else None\n maxDiff = None\n\n def setUp(self):\n self.image_processor_tester = FlavaImageProcessingTester(self)\n\n @property\n def image_processor_dict(self):\n return self.image_processor_tester.prepare_image_processor_dict()\n\n def test_image_processor_properties(self):\n image_processing = self.image_processing_class(**self.image_processor_dict)\n self.assertTrue(hasattr(image_processing, \"image_mean\"))\n self.assertTrue(hasattr(image_processing, \"image_std\"))\n self.assertTrue(hasattr(image_processing, \"do_normalize\"))\n self.assertTrue(hasattr(image_processing, \"do_resize\"))\n self.assertTrue(hasattr(image_processing, \"resample\"))\n self.assertTrue(hasattr(image_processing, \"crop_size\"))\n self.assertTrue(hasattr(image_processing, \"do_center_crop\"))\n self.assertTrue(hasattr(image_processing, \"do_rescale\"))\n self.assertTrue(hasattr(image_processing, \"rescale_factor\"))\n self.assertTrue(hasattr(image_processing, \"masking_generator\"))\n self.assertTrue(hasattr(image_processing, \"codebook_do_resize\"))\n self.assertTrue(hasattr(image_processing, \"codebook_size\"))\n self.assertTrue(hasattr(image_processing, \"codebook_resample\"))\n self.assertTrue(hasattr(image_processing, \"codebook_do_center_crop\"))\n self.assertTrue(hasattr(image_processing, \"codebook_crop_size\"))\n self.assertTrue(hasattr(image_processing, \"codebook_do_map_pixels\"))\n self.assertTrue(hasattr(image_processing, \"codebook_do_normalize\"))\n self.assertTrue(hasattr(image_processing, \"codebook_image_mean\"))\n self.assertTrue(hasattr(image_processing, \"codebook_image_std\"))\n\n def test_image_processor_from_dict_with_kwargs(self):\n image_processor = self.image_processing_class.from_dict(self.image_processor_dict)\n self.assertEqual(image_processor.size, {\"height\": 224, \"width\": 224})\n self.assertEqual(image_processor.crop_size, {\"height\": 224, \"width\": 224})\n self.assertEqual(image_processor.codebook_size, {\"height\": 112, \"width\": 112})\n self.assertEqual(image_processor.codebook_crop_size, {\"height\": 112, \"width\": 112})\n\n image_processor = self.image_processing_class.from_dict(\n self.image_processor_dict, size=42, crop_size=84, codebook_size=33, codebook_crop_size=66\n )\n self.assertEqual(image_processor.size, {\"height\": 42, \"width\": 42})\n self.assertEqual(image_processor.crop_size, {\"height\": 84, \"width\": 84})\n self.assertEqual(image_processor.codebook_size, {\"height\": 33, \"width\": 33})\n self.assertEqual(image_processor.codebook_crop_size, {\"height\": 66, \"width\": 66})\n\n def test_call_pil(self):\n # Initialize image_processing\n image_processing = self.image_processing_class(**self.image_processor_dict)\n # create random PIL images\n image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)\n for image in image_inputs:\n self.assertIsInstance(image, PIL.Image.Image)\n\n # Test not batched input\n encoded_images = image_processing(image_inputs[0], return_tensors=\"pt\")\n\n # Test no bool masked pos\n self.assertFalse(\"bool_masked_pos\" in encoded_images)\n\n expected_height, expected_width = self.image_processor_tester.get_expected_image_size()\n\n self.assertEqual(\n encoded_images.pixel_values.shape,\n (1, self.image_processor_tester.num_channels, expected_height, expected_width),\n )\n\n # Test batched\n encoded_images = image_processing(image_inputs, return_tensors=\"pt\")\n expected_height, expected_width = self.image_processor_tester.get_expected_image_size()\n\n # Test no bool masked pos\n self.assertFalse(\"bool_masked_pos\" in encoded_images)\n\n self.assertEqual(\n encoded_images.pixel_values.shape,\n (\n self.image_processor_tester.batch_size,\n self.image_processor_tester.num_channels,\n expected_height,\n expected_width,\n ),\n )\n\n def _test_call_framework(self, instance_class, prepare_kwargs):\n # Initialize image_processing\n image_processing = self.image_processing_class(**self.image_processor_dict)\n # create random tensors\n image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, **prepare_kwargs)\n for image in image_inputs:\n self.assertIsInstance(image, instance_class)\n\n # Test not batched input\n encoded_images = image_processing(image_inputs[0], return_tensors=\"pt\")\n\n expected_height, expected_width = self.image_processor_tester.get_expected_image_size()\n self.assertEqual(\n encoded_images.pixel_values.shape,\n (1, self.image_processor_tester.num_channels, expected_height, expected_width),\n )\n\n encoded_images = image_processing(image_inputs, return_image_mask=True, return_tensors=\"pt\")\n\n expected_height, expected_width = self.image_processor_tester.get_expected_image_size()\n self.assertEqual(\n encoded_images.pixel_values.shape,\n (\n self.image_processor_tester.batch_size,\n self.image_processor_tester.num_channels,\n expected_height,\n expected_width,\n ),\n )\n\n expected_height, expected_width = self.image_processor_tester.get_expected_mask_size()\n self.assertEqual(\n encoded_images.bool_masked_pos.shape,\n (\n self.image_processor_tester.batch_size,\n expected_height,\n expected_width,\n ),\n )\n\n # Test batched\n encoded_images = image_processing(image_inputs, return_tensors=\"pt\").pixel_values\n\n expected_height, expected_width = self.image_processor_tester.get_expected_image_size()\n self.assertEqual(\n encoded_images.shape,\n (\n self.image_processor_tester.batch_size,\n self.image_processor_tester.num_channels,\n expected_height,\n expected_width,\n ),\n )\n\n # Test masking\n encoded_images = image_processing(image_inputs, return_image_mask=True, return_tensors=\"pt\")\n\n expected_height, expected_width = self.image_processor_tester.get_expected_image_size()\n self.assertEqual(\n encoded_images.pixel_values.shape,\n (\n self.image_processor_tester.batch_size,\n self.image_processor_tester.num_channels,\n expected_height,\n expected_width,\n ),\n )\n\n expected_height, expected_width = self.image_processor_tester.get_expected_mask_size()\n self.assertEqual(\n encoded_images.bool_masked_pos.shape,\n (\n self.image_processor_tester.batch_size,\n expected_height,\n expected_width,\n ),\n )\n\n def test_call_numpy(self):\n self._test_call_framework(np.ndarray, prepare_kwargs={\"numpify\": True})\n\n def test_call_numpy_4_channels(self):\n self.image_processing_class.num_channels = 4\n self._test_call_framework(np.ndarray, prepare_kwargs={\"numpify\": True})\n self.image_processing_class.num_channels = 3\n\n def test_call_pytorch(self):\n self._test_call_framework(torch.Tensor, prepare_kwargs={\"torchify\": True})\n\n def test_masking(self):\n # Initialize image_processing\n random.seed(1234)\n image_processing = self.image_processing_class(**self.image_processor_dict)\n image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)\n\n # Test not batched input\n encoded_images = image_processing(image_inputs[0], return_image_mask=True, return_tensors=\"pt\")\n self.assertEqual(encoded_images.bool_masked_pos.sum().item(), 75)\n\n def test_codebook_pixels(self):\n # Initialize image_processing\n image_processing = self.image_processing_class(**self.image_processor_dict)\n # create random PIL images\n image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)\n for image in image_inputs:\n self.assertIsInstance(image, PIL.Image.Image)\n\n # Test not batched input\n encoded_images = image_processing(image_inputs[0], return_codebook_pixels=True, return_tensors=\"pt\")\n expected_height, expected_width = self.image_processor_tester.get_expected_codebook_image_size()\n self.assertEqual(\n encoded_images.codebook_pixel_values.shape,\n (1, self.image_processor_tester.num_channels, expected_height, expected_width),\n )\n\n # Test batched\n encoded_images = image_processing(image_inputs, return_codebook_pixels=True, return_tensors=\"pt\")\n expected_height, expected_width = self.image_processor_tester.get_expected_codebook_image_size()\n self.assertEqual(\n encoded_images.codebook_pixel_values.shape,\n (\n self.image_processor_tester.batch_size,\n self.image_processor_tester.num_channels,\n expected_height,\n expected_width,\n ),\n )\n","repo_name":"huggingface/transformers","sub_path":"tests/models/flava/test_image_processing_flava.py","file_name":"test_image_processing_flava.py","file_ext":"py","file_size_in_byte":15669,"program_lang":"python","lang":"en","doc_type":"code","stars":115573,"dataset":"github-code","pt":"85"} +{"seq_id":"13738953870","text":"import warnings\nfrom typing import Dict, Optional, Tuple, Union\n\nimport torch\nfrom torch import nn\n\nfrom sige.utils import reduce_mask\nfrom .base import SIGEModule\nfrom .utils import activation\n\n\nclass Gather(SIGEModule):\n def __init__(\n self,\n conv: nn.Conv2d,\n block_size: Union[int, Tuple[int, int]],\n offset: Optional[Union[int, Tuple[int, int]]] = None,\n activation_name: str = \"identity\",\n activation_first: bool = False,\n verbose: bool = False,\n ):\n super(Gather, self).__init__()\n if isinstance(block_size, int):\n block_size = (block_size, block_size)\n\n n0 = max(block_size[0] - conv.kernel_size[0], 0) // conv.stride[0]\n n1 = max(block_size[1] - conv.kernel_size[1], 0) // conv.stride[1]\n b0 = n0 * conv.stride[0] + conv.kernel_size[0]\n b1 = n1 * conv.stride[1] + conv.kernel_size[1]\n if (b0, b1) != block_size:\n warnings.warn(\"Change the block size from (%d, %d) to (%d, %d)\" % (*block_size, b0, b1))\n\n self.model_stride = conv.stride\n self.kernel_size = conv.kernel_size\n\n self.block_size = (b0, b1)\n self.block_stride = ((n0 + 1) * conv.stride[0], (n1 + 1) * conv.stride[1])\n if offset is None:\n self.offset = conv.padding\n else:\n if isinstance(offset, int):\n offset = (offset, offset)\n self.offset = offset\n self.activation_name = activation_name\n self.activation_first = activation_first\n self.verbose = verbose\n\n self.load_runtime(\"gather\")\n\n self.input_res: Optional[Tuple[int, int]] = None\n self.active_indices: Optional[torch.Tensor] = None\n\n def forward(\n self, x: torch.Tensor, scale: Optional[torch.Tensor] = None, shift: Optional[torch.Tensor] = None\n ) -> torch.Tensor:\n self.check_dtype(x, scale, shift)\n self.check_dim(x, scale, shift)\n b, c, h, w = x.shape\n if self.mode == \"profile\":\n output = torch.full(\n (b * self.active_indices.size(0), c, *self.block_size),\n fill_value=x[0, 0, 0, 0],\n dtype=x.dtype,\n device=x.device,\n ) # create a dummy gather output depending on the input for profiling\n if scale is not None:\n output = output * scale[0, 0, 0, 0]\n if shift is not None:\n output = output + shift[0, 0, 0, 0]\n output = activation(output, self.activation_name)\n elif self.mode == \"full\":\n self.input_res = x.shape[2:]\n assert scale is None\n assert shift is None\n output = x\n elif self.mode == \"sparse\":\n device = x.device.type\n runtime = self.runtime[device]\n assert runtime is not None\n output = runtime(\n x.contiguous(),\n self.block_size[0],\n self.block_size[1],\n self.active_indices.contiguous(),\n None if scale is None else scale.contiguous(),\n None if shift is None else shift.contiguous(),\n self.activation_name,\n self.activation_first,\n )\n else:\n raise NotImplementedError(\"Unknown mode: [%s]!!!\" % self.mode)\n return output\n\n def set_mask(self, masks: Dict, cache: Dict, timestamp: int):\n if self.timestamp != timestamp:\n super(Gather, self).set_mask(masks, cache, timestamp)\n assert self.input_res is not None\n res = tuple(self.input_res)\n mask = masks[res]\n self.mask = mask\n key = (\"active_indices\", *res, *self.block_size, *self.block_stride, *self.offset)\n active_indices = cache.get(key, None)\n if active_indices is None:\n active_indices = reduce_mask(\n mask, self.block_size, self.block_stride, self.offset, verbose=self.verbose\n )\n cache[key] = active_indices\n self.active_indices = active_indices\n","repo_name":"lmxyy/sige","sub_path":"sige/nn/gather.py","file_name":"gather.py","file_ext":"py","file_size_in_byte":4122,"program_lang":"python","lang":"en","doc_type":"code","stars":235,"dataset":"github-code","pt":"85"} +{"seq_id":"38710990685","text":"\"\"\"\nBasic usage\n===========\n\nBasic usage of building a model and fitting it to measurement data of SiO2 on Si.\n\n\"\"\"\n# %%\nimport elli\nfrom elli.fitting import ParamsHist, fit\n\n# sphinx_gallery_thumbnail_path = '_static/basic_usage.png'\n\n\n# %%\n# Reading data\n# ------------------------\n#\n# We load the data from the generated\n# `NeXus file' + html + '')\n return response\n#This code will convert the HTML to PDF\n\n\n\n\n","repo_name":"lagrandecode/oraclepayslip","sub_path":"payslip/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"41103188765","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\nTraining script.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"]='2'\nimport time\nimport sys\nimport math\nimport argparse\nfrom random import randint\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python import debug as tfdbg\nfrom PIL import Image\n#from scipy.misc import imread\nfrom imageio import imread\nimport matplotlib.pyplot as plt\n\nfrom preprocess import *\nfrom model import *\n\n# paths\ntf.app.flags.DEFINE_string('data_root', 'X:/liujin_densematching/MVS_traindata/meitan_RS/train', \"\"\"Path to whu train dataset.\"\"\")\n\ntf.app.flags.DEFINE_string('log_dir', 'MVS_TRANING/tf_log',\n \"\"\"Path to store the log.\"\"\")\ntf.app.flags.DEFINE_string('model_dir', 'MVS_TRANING/tf_model',\n \"\"\"Path to save the model.\"\"\")\ntf.app.flags.DEFINE_boolean('use_pretrain', False,\n \"\"\"Whether to train.\"\"\")\ntf.app.flags.DEFINE_integer('ckpt_step', 110000,\n \"\"\"ckpt step.\"\"\")\n\n# input parameters\ntf.app.flags.DEFINE_integer('view_num', 3, \n \"\"\"Number of images (1 ref image and view_num - 1 view images).\"\"\")\ntf.app.flags.DEFINE_integer('max_d', 128,\n \"\"\"Maximum depth step when training.\"\"\")\ntf.app.flags.DEFINE_integer('max_w', 768, \n \"\"\"Maximum image width when training.\"\"\")\ntf.app.flags.DEFINE_integer('max_h', 384, \n \"\"\"Maximum image height when training.\"\"\")\ntf.app.flags.DEFINE_float('sample_scale', 0.5,\n \"\"\"Downsample scale for building cost volume.\"\"\")\ntf.app.flags.DEFINE_float('interval_scale', 2,\n \"\"\"Downsample scale for building cost volume.\"\"\")\ntf.app.flags.DEFINE_float('interval', 0.1, \n \"\"\"Depth interval for building cost volume.\"\"\")\n\n# training parameters\ntf.app.flags.DEFINE_integer('num_gpus', 1, \n \"\"\"Number of GPUs.\"\"\")\ntf.app.flags.DEFINE_integer('batch_size', 1, \n \"\"\"Training batch size.\"\"\")\ntf.app.flags.DEFINE_integer('epoch', 21, \n \"\"\"Training epoch number.\"\"\")\ntf.app.flags.DEFINE_float('val_ratio', 0, \n \"\"\"Ratio of validation set when splitting dataset.\"\"\")\ntf.app.flags.DEFINE_float('base_lr', 0.001,\n \"\"\"Base learning rate.\"\"\")\ntf.app.flags.DEFINE_integer('display', 1,\n \"\"\"Interval of loginfo display.\"\"\")\ntf.app.flags.DEFINE_integer('stepvalue', 5000,\n \"\"\"Step interval to decay learning rate.\"\"\")\ntf.app.flags.DEFINE_integer('snapshot', 5000,\n \"\"\"Step interval to save the model.\"\"\")\ntf.app.flags.DEFINE_float('gamma', 0.9,\n \"\"\"Learning rate decay rate.\"\"\")\n\nFLAGS = tf.app.flags.FLAGS\n\nclass MVSGenerator:\n \"\"\" data generator class, tf only accept generator without param \"\"\"\n def __init__(self, sample_list, view_num):\n self.sample_list = sample_list\n self.view_num = view_num\n self.sample_num = len(sample_list)\n self.counter = 0\n \n def __iter__(self):\n while True:\n for data in self.sample_list: \n start_time = time.time()\n\n ###### read input data ######\n images = []\n cams = []\n for view in range(self.view_num):\n image = image_augment(Image.open(data[2 * view]))\n image = center_image(image)\n cam = tr_load_cam(open(data[2 * view + 1]), FLAGS.interval_scale)\n images.append(image)\n cams.append(cam)\n\n depimg = imread(os.path.join(data[2 * self.view_num]))\n depth_image = (np.float32(depimg) / 64.0) # WHU MVS dataset\n\n scaled_cams = scale_mvs_camera(cams, scale=FLAGS.sample_scale)\n\n # mask out-of-range depth pixels (in a relaxed range)\n depth_start = cams[0][1, 3, 0] + cams[0][1, 3, 1]\n depth_end = cams[0][1, 3, 0] + (FLAGS.max_d - 2) * cams[0][1, 3, 1]#\n depth_image = mask_depth_image(depth_image, depth_start, depth_end)\n\n # return mvs input\n self.counter += 1\n duration = time.time() - start_time\n images = np.stack(images, axis=0)\n scaled_cams = np.stack(scaled_cams, axis=0)\n\n yield (images, scaled_cams, cams, depth_image)\n\ndef average_gradients(tower_grads):\n \"\"\"Calculate the average gradient for each shared variable across all towers.\n Note that this function provides a synchronization point across all towers.\n Args:\n tower_grads: List of lists of (gradient, variable) tuples. The outer list\n is over individual gradients. The inner list is over the gradient\n calculation for each tower.\n Returns:\n List of pairs of (gradient, variable) where the gradient has been averaged\n across all towers.\n \"\"\"\n average_grads = []\n for grad_and_vars in zip(*tower_grads):\n # Note that each grad_and_vars looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n grads = []\n for g, _ in grad_and_vars:\n # Add 0 dimension to the gradients to represent the tower.\n expanded_g = tf.expand_dims(g, 0)\n\n # Append on a 'tower' dimension which we will average over below.\n grads.append(expanded_g)\n\n # Average over the 'tower' dimension.\n grad = tf.concat(axis=0, values=grads)\n grad = tf.reduce_mean(grad, 0)\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n return average_grads\n\ndef train(traning_list):\n \"\"\" training rednet \"\"\"\n training_sample_size = len(traning_list)\n print ('sample number: ', training_sample_size)\n\n with tf.Graph().as_default(), tf.device('/cpu:0'): \n\n ########## data iterator #########\n # training generators\n training_generator = iter(MVSGenerator(traning_list, FLAGS.view_num))\n generator_data_type = (tf.float32, tf.float32, tf.float32, tf.float32)\n # dataset from generator\n training_set = tf.data.Dataset.from_generator(lambda: training_generator, generator_data_type)\n training_set = training_set.batch(FLAGS.batch_size)\n training_set = training_set.prefetch(buffer_size=1)\n # iterators\n training_iterator = training_set.make_initializable_iterator()\n\n ########## optimization options ##########\n global_step = tf.Variable(0, trainable=False, name='global_step')\n lr_op = tf.train.exponential_decay(FLAGS.base_lr, global_step=global_step, \n decay_steps=FLAGS.stepvalue, decay_rate=FLAGS.gamma, name='lr')\n opt = tf.train.RMSPropOptimizer(learning_rate=lr_op)\n\n tower_grads = []\n for i in range(FLAGS.num_gpus):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('Model_tower%d' % i) as scope:\n # generate data\n images, scale_cams, cams, depth_image = training_iterator.get_next()\n images.set_shape(tf.TensorShape([None, FLAGS.view_num, None, None, 3]))\n scale_cams.set_shape(tf.TensorShape([None, FLAGS.view_num, 2, 4, 4]))\n cams.set_shape(tf.TensorShape([None, FLAGS.view_num, 2, 4, 4]))\n depth_image.set_shape(tf.TensorShape([None, None, None, 1]))\n depth_start = tf.reshape(\n tf.slice(scale_cams, [0, 0, 1, 3, 0], [FLAGS.batch_size, 1, 1, 1, 1]), [FLAGS.batch_size])\n depth_interval = tf.reshape(\n tf.slice(scale_cams, [0, 0, 1, 3, 1], [FLAGS.batch_size, 1, 1, 1, 1]), [FLAGS.batch_size])\n\n is_master_gpu = False\n if i == 0:\n is_master_gpu = True\n\n ## inference\n # probability volume\n prob_volume = inference_prob_recurrent(\n images, scale_cams, FLAGS.max_d, depth_start, depth_interval, is_master_gpu)\n\n # classification loss\n loss, mae, less_one_accuracy, less_three_accuracy, depth_map = \\\n tr_classification_loss(\n prob_volume, depth_image, FLAGS.max_d, depth_start, depth_interval)\n\n # retain the summaries from the final tower.\n summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)\n\n # calculate the gradients for the batch of data on this CIFAR tower.\n grads = opt.compute_gradients(loss)\n\n # keep track of the gradients across all towers.\n tower_grads.append(grads)\n \n # average gradient\n grads = average_gradients(tower_grads)\n \n # training opt\n train_opt = opt.apply_gradients(grads, global_step=global_step)\n\n # summary \n summaries.append(tf.summary.scalar('loss', loss))\n summaries.append(tf.summary.scalar('less_one_meter_accuracy', less_one_accuracy))\n summaries.append(tf.summary.scalar('less_three_interval_accuracy', less_three_accuracy))\n summaries.append(tf.summary.scalar('lr', lr_op))\n weights_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n for var in weights_list:\n summaries.append(tf.summary.histogram(var.op.name, var))\n for grad, var in grads:\n if grad is not None:\n summaries.append(tf.summary.histogram(var.op.name + '/gradients', grad))\n \n # saver\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=None) \n summary_op = tf.summary.merge(summaries)\n\n # initialization option\n init_op = tf.global_variables_initializer()\n config = tf.ConfigProto(allow_soft_placement = True)\n config.gpu_options.allow_growth = True\n\n with tf.Session(config=config) as sess: \n \n # initialization\n total_step = 0\n sess.run(init_op)\n summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)\n\n # load pre-trained model\n if FLAGS.use_pretrain:\n pretrained_model_path = os.path.join(FLAGS.model_dir, 'model.ckpt')\n restorer = tf.train.Saver(tf.global_variables())\n restorer.restore(sess, '-'.join([pretrained_model_path, str(FLAGS.ckpt_step)]))\n print('Pre-trained model restored from %s' %\n ('-'.join([pretrained_model_path, str(FLAGS.ckpt_step)])))\n total_step = FLAGS.ckpt_step\n\n # training several epochs\n for epoch in range(FLAGS.epoch):\n\n # training of one epoch\n step = 0\n sess.run(training_iterator.initializer)\n for _ in range(int(training_sample_size / FLAGS.num_gpus)):\n\n # run one batch\n start_time = time.time()\n try:\n out_summary_op, out_opt, out_loss, out_less_one, out_less_three = sess.run(\n [summary_op, train_opt, loss, less_one_accuracy, less_three_accuracy])\n except tf.errors.OutOfRangeError:\n print(\"End of dataset\") # ==> \"End of dataset\"\n break\n duration = time.time() - start_time\n\n # print info\n if step % FLAGS.display == 0:\n print('epoch, %d, step %d, total_step %d, loss = %.4f, (< 1m) = %.4f, (< 3px) = %.4f (%.3f sec/step)' %\n (epoch, step, total_step, out_loss, out_less_one, out_less_three, duration))\n \n # write summary\n if step % (FLAGS.display * 10) == 0:\n summary_writer.add_summary(out_summary_op, total_step)\n \n # save the model checkpoint periodically\n if (total_step % FLAGS.snapshot == 0 or step == (training_sample_size - 1)):\n model_folder = os.path.join(FLAGS.model_dir)\n if not os.path.exists(model_folder):\n os.mkdir(model_folder)\n ckpt_path = os.path.join(model_folder, 'model.ckpt')\n print('Saving model to %s' % ckpt_path)\n saver.save(sess, ckpt_path, global_step=total_step)\n step += FLAGS.batch_size * FLAGS.num_gpus\n total_step += FLAGS.batch_size * FLAGS.num_gpus\n\ndef main(argv=None):\n \"\"\" program entrance \"\"\"\n\n # Prepare all training samples\n sample_list = gen_train_mvs_list(FLAGS.data_root)\n\n # Shuffle\n random.shuffle(sample_list)\n\n # Training entrance.\n train(sample_list)\n\n\nif __name__ == '__main__':\n\n print ('Training RED-Net with %d views' % FLAGS.view_num)\n\n tf.app.run()","repo_name":"gpcv-liujin/REDNet","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":13546,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"85"} +{"seq_id":"5658893269","text":"import argparse\nimport logging\n\n\ndef arg_required(arg):\n if arg is None : \n logging.warning(\"can't find \" + arg)\n exit\n\ndef get_arguments(): \n parser = argparse.ArgumentParser(\"jinja_swagger\")\n parser.add_argument(\"-a\",\"--api_name\", help=\"Enter API Name (i.e: Navy USM Current Occupation)\")\n parser.add_argument(\"-c\",\"--csv\", help=\"api.csv is the default to overwrite us\")\n parser.add_argument(\"-mo\",\"--main_object\", help=\"The main object name not returned in the JSON \")\n parser.add_argument(\"-t\",\"--type\", help=\"type array or object\")\n parser.add_argument(\"-s\",\"--select_by\", help=\"Select by ID \")\n parser.add_argument(\"-ns\",\"--nested_object\", help=\"Object Name in the JSON\")\n parser.add_argument(\"-d\",\"--debug\", help= \"show debug information\")\n args = parser.parse_args()\n\n\n if args.debug is not None:\n logging.Logger.setLevel(logging.DEBUG)\n\n arg_required(args.csv) \n arg_required(args.api_name)\n arg_required(args.select_by) \n arg_required(args.nested_object)\n\n return args","repo_name":"hanymorcos/jinja_csv","sub_path":"helpers/arg_procesor.py","file_name":"arg_procesor.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"14805321150","text":"# Search for glyphs with \"ss\" in their suffix, create stylistic\n# sets feature code for them, and copy it to clipboard.\n\nimport mojo\nfrom AppKit import NSPasteboard, NSArray\n\nf = CurrentFont()\n\nsets = []\nfeatures = \"\"\n\nfor g in f.keys():\n if \".\" in g:\n if \"ss\" in g.split(\".\")[1] and g.split(\".\")[1] not in sets:\n sets.append(g.split(\".\")[1])\n\nsets.sort()\n\nfor set in sets:\n features += \"feature %s {\\n\" % set\n for g in f:\n if set in g.name:\n base = g.name.split(\".\")[0]\n suffix = g.name.split(\".\")[1]\n features += \"sub %s by %s;\\n\" % (base, g.name)\n features += \"} %s;\\n\\n\" % set\n\nif features != \"\":\n p = NSPasteboard.generalPasteboard()\n p.clearContents()\n a = NSArray.arrayWithObject_(features)\n p.writeObjects_(a)\n mojo.UI.Message('Stylistic sets features copied to your clipboard')\n\nif features == \"\":\n mojo.UI.Message(\"Could not find properly named glyphs.\")","repo_name":"ryanbugden/Misc-Robofont-Scripts","sub_path":"write_stylistic_sets.py","file_name":"write_stylistic_sets.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"40459439413","text":"from twe import *\n\nopts = Options()\nopts.save_path = \"../output/model/sou\"\nopts.train_data = \"../data/sou/train.dat\"\nopts.test_data = \"../data/sou/test.dat\"\n\nopts.start_time = -5679590961\nopts._start_time = -5679590961\nopts.end_time = 1138662000\nopts._end_time = 1138662000\nopts.time_transform = 681825296\n\nopts.nclst = 200\nopts.clst_window = 30\n\nopts.nepochs = 200\n\nopts.batch_size = 100\nopts.epoch_size = 100000\n\nopts.window_size = 20\nopts.max_pairs_from_sample = 100\nopts.max_same_target = 10\n\nmain(opts)\n","repo_name":"w-is-h/stwe","sub_path":"tests/test_sou.py","file_name":"test_sou.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"19364516069","text":"from response.generic_response import GenericResponse\n\n\nclass ErrorResponse(GenericResponse):\n ERROR_TYPE = 'error'\n\n def __init__(self, req_id: int, code: int, error: str):\n GenericResponse.__init__(self, req_id=req_id, properties={})\n self.response_type = self.ERROR_TYPE\n self.error_code = code\n self.error_message = error\n","repo_name":"migdea11/ibapi_handler","sub_path":"response/error_response.py","file_name":"error_response.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"19581036687","text":"# 8:50 ~ 9:50\nfrom collections import deque \n\nn, m = map(int, input().split())\n\narr_map = []\nfor _ in range(n):\n arr_map.append(list(map(int,input())))\n\ndef dfs(arr_map):\n x, y = 0, 0\n \n dx = [-1,0,1,0]\n dy = [0,-1,0,1]\n queue = deque()\n queue.append((x,y))\n\n while queue:\n x, y = queue.popleft()\n \n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n\n if 0 <= nx < n and 0 <= ny < m:\n if nx == n - 1 and ny == m - 1:\n return arr_map[x][y] + 1\n\n if arr_map[nx][ny] == 1:\n arr_map[nx][ny] = arr_map[x][y] + 1\n queue.append((nx,ny))\n return False\n\nres = dfs(arr_map)\nprint(res)\n\n \n\n\n","repo_name":"daunjeong824/Practice_Algorithm","sub_path":"BFS_DFS/2178.py","file_name":"2178.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"40861954871","text":"import math\nimport sys\n\nN = int(sys.stdin.readline().rstrip())\nnumbers = list(map(int, sys.stdin.readline().rstrip().split()))\n\nprime_numbers = []\n\nfor i in range(2, 1001):\n prime = True\n for j in range(2, int(math.sqrt(i) + 1)):\n if i % j == 0:\n prime = False\n\n if prime:\n prime_numbers.append(i)\n\ncount = 0\n\nfor num in numbers:\n if num in prime_numbers:\n count += 1\n\nprint(count)\n","repo_name":"KakaoFarm/unan-python-algorithm-study","sub_path":"BOJClass2/1978.py","file_name":"1978.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"8567068409","text":"#!/usr/bin/python3\n'''\nqueries the Reddit API and\nreturns the number of subscribers\n'''\nimport requests\n\n\ndef top_ten(subreddit):\n headers = {\n 'User-Agent': 'My User Agent 1.0'\n }\n URL = \"https://www.reddit.com/r/{}/top.json?limit=10\".format(subreddit)\n try:\n reddit_data = requests.get(url=URL, headers=headers)\n all_data = reddit_data.json().get('data').get('children')\n print('a')\n titles = []\n print('b')\n for i in all_data:\n if i.get('data').get('title'):\n titles.append(i.get('data').get('title'))\n\n print(i)\n print('c')\n for t in titles:\n print(t)\n except:\n print('None')\n","repo_name":"SoniaChevli/holberton-system_engineering-devops","sub_path":"0x16-api_advanced/1-top_ten.py","file_name":"1-top_ten.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"21464485999","text":"from typing import List\n\nfrom transformers import T5Tokenizer, T5Model\nimport torch\nimport numpy as np\n\n\nclass SentenceT5:\n \"\"\"Class to use the [sentence-t5-base-ja-mean-token](https://huggingface.co/sonoisa/sentence-t5-base-ja-mean-tokens)\n \"\"\"\n def __init__(self, model_name_or_path: str, device=None):\n self.tokenizer = T5Tokenizer.from_pretrained(model_name_or_path, is_fast=False)\n self.model = T5Model.from_pretrained(model_name_or_path).encoder\n self.model.eval()\n\n if device is None:\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self.device = torch.device(device)\n self.model.to(device)\n\n def _mean_pooling(self, model_output, attention_mask):\n token_embeddings = model_output[0] #First element of model_output contains all token embeddings\n input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n\n @torch.no_grad()\n def encode(self, docs: List[str], batch_size=8):\n all_embeddings = []\n iterator = range(0, len(docs), batch_size)\n for batch_idx in iterator:\n batch = docs[batch_idx:batch_idx + batch_size]\n\n encoded_input = self.tokenizer.batch_encode_plus(\n batch,\n max_length=4096,\n padding=\"longest\",\n truncation=True,\n return_tensors=\"pt\").to(self.device)\n model_output = self.model(**encoded_input)\n sentence_embeddings = self._mean_pooling(model_output, encoded_input[\"attention_mask\"]).to('cpu')\n\n all_embeddings.extend(sentence_embeddings)\n\n return torch.stack(all_embeddings)\n","repo_name":"nptdat/llm_retrieval_jawiki","sub_path":"llm_retrieval/src/qa/vectorizer/internal_model/sentence_t5.py","file_name":"sentence_t5.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"42897908346","text":"import typing\nfrom dataclasses import dataclass, field\n\n\n@dataclass\nclass Token:\n \"\"\"\n In an ideal world, this Token class would not be exposed via the user\n visible API. Unfortunately, getting to that point would take a significant\n amount of effort.\n\n It is not expected that these will change, but they might.\n\n At the moment, the only supported use of Token objects are in conjunction\n with the ``tokfmt`` function. As this library matures, we'll try to clarify\n the expectations around these. File an issue on github if you have ideas!\n \"\"\"\n\n #: Raw value of the token\n value: str\n\n #: Lex type of the token\n type: str = field(repr=False, compare=False, default=\"\")\n\n\n@dataclass\nclass Value:\n \"\"\"\n A unparsed list of tokens\n\n .. code-block:: c++\n\n int x = 0x1337;\n ~~~~~~\n \"\"\"\n\n #: Tokens corresponding to the value\n tokens: typing.List[Token]\n\n\n@dataclass\nclass NamespaceDecl:\n \"\"\"\n Namespace declarations\n\n .. code-block:: c++\n\n namespace foo::bar {}\n ~~~~~~~~\n \"\"\"\n\n #: These are the names (split by ::) for this namespace declaration,\n #: but does not include any parent namespace names\n #:\n #: An anonymous namespace is an empty list\n names: typing.List[str]\n inline: bool = False\n\n\n@dataclass\nclass DecltypeSpecifier:\n \"\"\"\n Contents of a decltype (inside the parentheses)\n\n .. code-block:: c++\n\n decltype(Foo::Bar)\n ~~~~~~~~\n \"\"\"\n\n #: Unparsed tokens within the decltype\n tokens: typing.List[Token]\n\n\n@dataclass\nclass FundamentalSpecifier:\n \"\"\"\n A specifier that only contains fundamental types\n \"\"\"\n\n name: str\n\n\n@dataclass\nclass NameSpecifier:\n \"\"\"\n An individual segment of a type name\n\n .. code-block:: c++\n\n Foo::Bar\n ~~~\n\n \"\"\"\n\n name: str\n\n specialization: typing.Optional[\"TemplateSpecialization\"] = None\n\n\n@dataclass\nclass AutoSpecifier:\n \"\"\"\n Used for an auto return type\n \"\"\"\n\n name: str = \"auto\"\n\n\n@dataclass\nclass AnonymousName:\n \"\"\"\n A name for an anonymous class, such as in a typedef. There is no string\n associated with this name, only an integer id. Things that share the same\n anonymous name have anonymous name instances with the same id\n \"\"\"\n\n #: Unique id associated with this name (only unique per parser instance!)\n id: int\n\n\nPQNameSegment = typing.Union[\n AnonymousName, FundamentalSpecifier, NameSpecifier, DecltypeSpecifier, AutoSpecifier\n]\n\n\n@dataclass\nclass PQName:\n \"\"\"\n Possibly qualified name of a C++ type.\n \"\"\"\n\n #: All of the segments of the name. This is always guaranteed to have at\n #: least one element in it. Name is segmented by '::'\n #:\n #: If a name refers to the global namespace, the first segment will be an\n #: empty NameSpecifier\n segments: typing.List[PQNameSegment]\n\n #: Set if the name starts with class/enum/struct\n classkey: typing.Optional[str] = None\n\n #: Set to true if the type was preceded with 'typename'\n has_typename: bool = False\n\n\n@dataclass\nclass Enumerator:\n \"\"\"\n An individual value of an enumeration\n \"\"\"\n\n #: The enumerator key name\n name: str\n\n #: None if not explicitly specified\n value: typing.Optional[Value] = None\n\n #: Documentation if present\n doxygen: typing.Optional[str] = None\n\n\n@dataclass\nclass EnumDecl:\n \"\"\"\n An enumeration type\n \"\"\"\n\n typename: PQName\n\n values: typing.List[Enumerator]\n\n base: typing.Optional[PQName] = None\n\n #: Documentation if present\n doxygen: typing.Optional[str] = None\n\n #: If within a class, the access level for this decl\n access: typing.Optional[str] = None\n\n\n@dataclass\nclass TemplateArgument:\n \"\"\"\n A single argument for a template specialization\n\n .. code-block:: c++\n\n Foo