diff --git "a/1174.jsonl" "b/1174.jsonl" new file mode 100644--- /dev/null +++ "b/1174.jsonl" @@ -0,0 +1,506 @@ +{"seq_id":"37363519764","text":"from JNetV3.utils.preprocessing import *\nfrom JNetV3.utils.train import train\nfrom JNetV3.utils.logs import trainlog\nfrom torch.nn import BCEWithLogitsLoss\nfrom torch.optim import lr_scheduler,Adam,RMSprop\nfrom JNetV3.utils.preprocessing import gen_dataloader\nfrom JNetV3.models.losses import CrossEntropyLoss2d,MSELoss2d\nfrom JNetV3.models.imJNetV3 import Mobile_Unet\nfrom JNetV3.Sdata.Saug import *\nfrom JNetV3.utils.preprocessing import gen_dataloader\nimport logging\n\nclass trainAug(object):\n def __init__(self, size=(768, 768)):\n self.augment = Compose([\n RandomSelect([\n RandomRotate(angles=(-30, 30), bound=None),\n RandomResizedCrop(size=size),\n RandomSmall(ratio=0.15),\n ]),\n RandomBrightness(delta=30),\n ResizeImg(size=size),\n RandomHflip(),\n RandomVflip(),\n Normalize(mean=None, std=None)\n ])\n\n def __call__(self, *args):\n return self.augment(*args)\n\n\nclass valAug(object):\n def __init__(self, size=(768, 768)):\n self.augment = Compose([\n # RandomSelect([\n # RandomRotate(angles=(-40, 40), bound=None),\n # RandomResizedCrop(size=size),\n # RandomSmall(ratio=0.15),\n # ]),\n # RandomBrightness(delta=30),\n ResizeImg(size=size),\n # RandomHflip(),\n Normalize(mean=None, std=None)\n ])\n\n def __call__(self, *args):\n return self.augment(*args)\n\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\n# img_root = '/media/hszc/data1/seg_data/'\n\n# resume = \"/home/handsome/Documents/DefectInspection/RCF-pytorch/JNetV3/model_tmp_mse/weights-553-9-[0.716].pth\"\n\nresume = None\n#\nstart_epoch = 0\n\nbs = 32\nsave_dir = '/home/handsome/Documents/code/orc-table-ssd/JNetV3/model_tmp_mse'\nmodel = Mobile_Unet(num_classes=1,alpha=0.15,alpha_up=0.25)\ntrain_root = '/media/handsome/backupdata/hanson/orc_cropped/segmentation/train'\nval_root = '/media/handsome/backupdata/hanson/orc_cropped/segmentation/test'\n\n# saving dir\nsave_dir = save_dir\nif not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\nlogfile = '%s/trainlog.log' % save_dir\ntrainlog(logfile)\n\ntrain_pd, _ = get_train_val(train_root, test_size=0.0)\n_, val_pd = get_train_val(val_root, test_size=1.0)\n\ndata_set, data_loader = gen_dataloader(train_pd, val_pd, trainAug(), valAug(), train_bs =bs, val_bs=bs,\n train_shuffle=True, val_shuffle=False, dis=None)\nprint(len(data_set['train']), len(data_set['val']))\n\n\nlogging.info(model)\n\ncriterion = torch.nn.MSELoss()\n\noptimizer = RMSprop(model.parameters(), lr=1e-3, alpha=0.9)\n# exp_lr_scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)\nmodel.cuda()\nif resume:\n model.eval()\n logging.info('resuming finetune from %s' % resume)\n try:\n model.load_state_dict(torch.load(resume, map_location=lambda storage, loc: storage))\n except KeyError:\n model = torch.nn.DataParallel(model)\n model.load_state_dict(torch.load(resume))\n # optimizer.load_state_dict(torch.load(os.path.join(save_dir, 'optimizer-state.pth')))\n\n\ntrain(model,\n epoch_num=10000,\n start_epoch=start_epoch,\n optimizer=optimizer,\n criterion=criterion,\n exp_lr_scheduler=None,\n data_set=data_set,\n data_loader=data_loader,\n save_dir=save_dir,\n print_inter=50,\n val_inter=500,\n )","repo_name":"hanson-young/ocr-table-ssd","sub_path":"JNetV3/train_main.py","file_name":"train_main.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"35"} +{"seq_id":"24797203476","text":"'''A module containing the Donuts class, used for measuring\nshifts between images in CCD data.\n'''\nfrom __future__ import print_function, with_statement, division\nfrom astropy.io import fits\nimport numpy as np\nfrom .image import Image\n\n\nclass Donuts(object):\n '''This class provides methods for measuring shifts between\n a series of images of the same star field. First we initialise\n the object and generate a reference image. Subsequent images are\n aligned to this frame of this reference image.\n\n Attributes\n ----------\n None\n '''\n\n def __init__(self, refimage, image_ext=0, exposure='EXPTIME',\n normalise=True, subtract_bkg=True, downweight_edges=True,\n prescan_width=0, overscan_width=0, scan_direction='x',\n border=64, ntiles=32, calculation_area_override=None,\n image_pixel_mask=None, image_class=Image):\n '''Initialise and generate a reference image.\n This reference image is used for measuring frame to frame offsets.\n\n Parameters\n ----------\n refimage : str\n The image representing the reference frame.\n image_ext: int, optional\n The fits image extension to extract. The default is 0.\n exposure : str, optional\n Fits header keyword for exposure time. The default is `EXPTIME`.\n normalise : bool, optional\n Convert image counts to counts/s. The default is True.\n subtract_bkg : bool, optional\n Subtract the sky background. The default is True.\n downweight_edges : bool, optional\n Downweight contribution from pixels near the image edge. The default is True.\n prescan_width : int, optional\n Width of prescan region (left) in pixels. The default is 0.\n overscan_width : int, optional\n Width of overscan region (right) in pixels. The default is 0.\n scan_direction : str, optional\n Direction along which the pre/overscan regions are found ('x' | 'y')\n border : int, optional\n Width of exclusion area to avoid errors from CCD edge effects.\n The default is 64.\n ntiles : int, optional\n Number of tiles used to sample the sky background.\n The default is 32.\n calculation_area_override : list|tuple, optional\n Manually supplied coordinates for shift calculation image area\n Image region is defined as (lower_y, upper_y, lower_x, upper_x)\n e.g. to calculate shifts using the lower left corner with a 500 pix\n square we'd supply:\n (0, 500, 0, 500)\n image_pixel_mask : array | str, optional\n Array of booleans (0|1 or False|True) where the affirmative corresponds\n to the locations of pixels to be masked out from shift calculations\n (e.g. the location of hot-pixels). This boolean array must have the same\n shape as the imager sensor array, including any pre/overscan areas. The mask\n is applied to the untrimmed image immediately after loading the data.\n\n If a str is supplied this is assumed to be the path to a fits image on\n disc that contains the image mask in the first (0th) image extension.\n As above, the fits image must be a boolean array (0|1 or False|True) of\n the same shape at the imager, including any pre/overscan regions.\n\n Returns\n -------\n None\n\n Raises\n ------\n None\n '''\n self.image_class = image_class\n self.image_ext = image_ext\n self.ntiles = ntiles\n self.exposure_keyname = exposure\n self.normalise = normalise\n self.subtract_bkg = subtract_bkg\n self.downweight_edges = downweight_edges\n self.prescan_width = prescan_width\n self.overscan_width = overscan_width\n self.scan_direction = scan_direction\n self.border = border\n self.refimage_filename = refimage\n\n # store the image geometry region corners\n # NOTE: ugh, adding manual image area selection adds a crazy number of checks\n # to be made, e.g. that upper bounds are greater than lower bounds, that they\n # are positive, that they fall within the image etc etc.\n # This is an advanced feature and people using this should know enough to\n # supply the values correctly. I'll add a bunch of error checking here if\n # it becomes an issue....\n if calculation_area_override:\n self.image_cly = calculation_area_override[0]\n self.image_cuy = calculation_area_override[1]\n self.image_clx = calculation_area_override[2]\n self.image_cux = calculation_area_override[3]\n self.image_geometry_set = True\n else:\n self.image_cly = None\n self.image_cuy = None\n self.image_clx = None\n self.image_cux = None\n self.image_geometry_set = False\n\n # determine if we're using a mask, if so is it a str (load a fits file) or\n # an array which we can directly apply\n if image_pixel_mask is not None and isinstance(image_pixel_mask, str):\n # load the fits image\n with fits.open(image_pixel_mask) as fitsfile:\n self.image_pixel_mask = fitsfile[0].data\n elif image_pixel_mask is not None and isinstance(image_pixel_mask, np.ndarray):\n # load the array as the mask\n self.image_pixel_mask = image_pixel_mask\n elif image_pixel_mask is None:\n # no mask, leave it as None\n self.image_pixel_mask = image_pixel_mask\n else:\n type_err_str = \"\"\"Unhandled mask type, please supply one of the following:\n 1: A numpy array of booleans\n 2: A str containing the path to a fits image with the mask\n 3: None, for no masking\"\"\"\n raise TypeError(type_err_str)\n\n # make a reference image object\n self.reference_image = self.construct_object(self.refimage_filename)\n\n def construct_object(self, filename):\n '''Builds an ``image_class`` instance which performs most of the work.\n See the :class:`~donuts.image.Image` class for more information.\n\n Parameters\n ----------\n filename : str\n FITS file to open and build an ``image_class`` instance from.\n\n Returns\n -------\n ``image_class`` instance\n\n Raises\n ------\n None\n '''\n # load the data from disc\n with fits.open(filename) as hdulist:\n hdu = hdulist[self.image_ext]\n image = hdu.data\n header = hdu.header\n\n # use masked arrays regardless of masking or not\n # by this point we should have an array or None\n # None will work, but an array of the wrong shape will throw an error\n # print some info if the shapes mismatch\n try:\n masked_image = np.ma.array(image, mask=self.image_pixel_mask, fill_value=0)\n except np.ma.core.MaskError:\n mask_err_str = \"\"\"Wrong mask shape for image\n Image shape: {}\n Mask shape: {}\"\"\".format(image.shape, self.image_pixel_mask.shape)\n raise Exception(mask_err_str)\n\n # create the image object\n image = self.image_class(masked_image, header)\n\n # run the preconstruct hook\n image.preconstruct_hook()\n\n # get the image geometry\n if not self.image_geometry_set:\n cly, cuy, clx, cux = image.calculate_image_geometry(\n prescan_width=self.prescan_width,\n overscan_width=self.overscan_width,\n scan_direction=self.scan_direction,\n border=self.border,\n ntiles=self.ntiles\n )\n self.image_cly = cly\n self.image_cuy = cuy\n self.image_clx = clx\n self.image_cux = cux\n self.image_geometry_set = True\n\n # trim the image to match the image geometry\n image.trim(self.image_cly,\n self.image_cuy,\n self.image_clx,\n self.image_cux)\n\n # apply exposure time normalisation\n if self.normalise:\n image.normalise(\n exposure_keyword=self.exposure_keyname\n )\n\n # remove the sky background\n if self.subtract_bkg:\n image.remove_background(\n ntiles=self.ntiles\n )\n if self.downweight_edges:\n image.downweight_edges()\n\n # run the postconstruct hook\n image.postconstruct_hook()\n\n # compute the x and y image projections\n image.compute_projections()\n\n # return the processed image object, ready for shift calculations\n return image\n\n def print_summary(self):\n '''Print a summary of the current settings\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n\n Raises\n ------\n None\n '''\n # TODO: print more useful things!\n print('Data Summary:')\n print('\\tExcluding a border of {0:d} pixels'.format(self.border))\n\n if self.subtract_bkg:\n print('Background Subtraction Summary:')\n print('\\tUsing {0:d} x {1:d} grid of tiles'.format(self.ntiles, self.ntiles))\n\n def measure_shift(self, checkimage_filename):\n '''Generate a check image and measure its offset from the reference\n This is done using the same settings as the reference image.\n\n Parameters\n ----------\n checkimage_filename : str\n Image filename to compare to reference image\n\n Returns\n -------\n image: :class:`~donuts.image.Image`\n Instance of an :class:`~donuts.image.Image` object, which has the ``x`` and ``y``\n atributes storing the value of the shift between the chosen image\n and reference image (passed to the :class:`~donuts.Donuts` constructor)\n\n Raises\n ------\n None\n '''\n checkimage = self.construct_object(checkimage_filename)\n checkimage.compute_offset(self.reference_image)\n return checkimage\n","repo_name":"jmccormac01/Donuts","sub_path":"donuts/donuts.py","file_name":"donuts.py","file_ext":"py","file_size_in_byte":10309,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"35"} +{"seq_id":"16532603488","text":"import pygame\n\npygame.init()\n\nwidth = 800\nheight = 500\nwin = pygame.display.set_mode((width,height))\nbutton_font = pygame.font.Font('game_over.ttf',75)\nhead_font = pygame.font.Font('game_over.ttf',200)\n\nwhite = (255,255,255)\nblack = (0,0,0)\nred = (255,0,0)\ngreen = (0,255,0)\nblue = (0,0,255)\n\ndef DisplayText(win, box1, box2, message1, message2):\n text1 = button_font.render(str(message1), True, black)\n text2 = button_font.render(str(message2), True, black)\n rect1 = text1.get_rect()\n rect2 = text2.get_rect()\n rect1.center = box1.center\n rect2.center = box2.center\n win.blit(text1, (rect1[0],rect1[1]))\n win.blit(text2, (rect2[0],rect1[1]))\n\ndef WriteHeading(win,message):\n heading = head_font.render(str(message), True, black)\n heading_rect = heading.get_rect()\n heading_rect.center = (int(width / 2), 100)\n win.blit(heading, (heading_rect[0],heading_rect[1]))\n\ndef DrawGameWindow(win,box1,box2,color1,color2):\n win.fill(white)\n WriteHeading(win, 'Ping Pong Game')\n pygame.draw.rect(win, color1, box1)\n pygame.draw.rect(win, color2, box2)\n DisplayText(win,box1,box2,'Single Player', 'Multi Player')\n\ndef Intro():\n\n single_player_box = pygame.Rect(150, int(height / 2), 200, 100)\n multi_player_box = pygame.Rect(450, int(height / 2), 200, 100)\n\n game_over = False\n\n while not game_over:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_over = True\n\n mouse_pos = pygame.mouse.get_pos()\n\n if single_player_box.collidepoint(mouse_pos):\n DrawGameWindow(win,single_player_box,multi_player_box,red,green)\n if pygame.mouse.get_pressed()[0] == 1:\n print(1)\n elif multi_player_box.collidepoint(mouse_pos):\n DrawGameWindow(win,single_player_box,multi_player_box,green,red)\n if 1 in pygame.mouse.get_pressed():\n print('Entered Game')\n else:\n DrawGameWindow(win,single_player_box,multi_player_box,green,green)\n\n pygame.display.update()\n\n pygame.quit()\n\nIntro()\n","repo_name":"aditya-sahai/Ping-Pong-Game","sub_path":"Game_Intro.py","file_name":"Game_Intro.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35413821095","text":"grid = []\nwith open(\"input.txt\") as file:\n for line in file:\n line = line.strip(\"\\n\")\n\n row = [int(x) for x in list(line)]\n grid.append(row)\n\n\nedge_visibility_count = (len(grid) * 2) + ((len(grid[0]) - 2) * 2)\nrow_count = len(grid)\ncol_count = len(grid[0])\n\ninside_visibility_count = set()\nfor i in range(1, row_count - 1):\n for j in range(1, col_count - 1):\n char = grid[i][j]\n try:\n next(e for e in grid[i][:j] if e >= char)\n next(e for e in grid[i][j + 1 :] if e >= char)\n next(grid[i2][j] for i2 in list(range(0, i)) if grid[i2][j] >= char)\n next(\n grid[i2][j]\n for i2 in list(range(i + 1, row_count))\n if grid[i2][j] >= char\n )\n except StopIteration:\n edge_visibility_count += 1\n\n\nprint(edge_visibility_count)\n","repo_name":"Shaharafat/Advent-of-Code","sub_path":"year2022/08/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"24417049984","text":"from argparse import ArgumentParser\nimport pathlib\nfrom datetime import datetime\nfrom datetime import date\n\nfrom prod.Cache import FileDailyCache\nfrom prod.Server import ProdServer\n\n\nDEF_SERVER_NAME = \"prod_server\"\nDEF_CFG_FILE = \"./configs/main.yml\"\nDEF_CACHE_FILE = \"./cache\"\nDEF_CACHE_DATE = date.today()\n\n\ndef prepare_argument_parser() -> ArgumentParser:\n\n parser = ArgumentParser(\n description=\"Caches out of stock products from remote server and returns the path to \" \\\n \"the file with cached data.\")\n \n parser.add_argument(\"--config\", dest=\"cfg_path\", type=str,\n default=DEF_CFG_FILE,\n help=\"Path to the file with configuration in YAML format.\")\n\n parser.add_argument(\"--name\", dest=\"srv_name\", type=str,\n default=DEF_SERVER_NAME,\n help=\"Name of the data server. \" \\\n \"Will be used to parse correct section from configuration \" \\\n \"YAML file (see --config option).\")\n\n parser.add_argument(\"--cache\", dest=\"cch_path\", type=str,\n default=DEF_CACHE_FILE,\n help=\"Path to the directory where the cache will be stored.\")\n\n parser.add_argument(\"--date\", dest=\"cch_date\", type=str,\n default=DEF_CACHE_DATE.isoformat(),\n help=\"Date in 'YYYY-MM-DD' format for which the data will be cached\")\n\n return parser\n\n\nif __name__ == \"__main__\":\n # Parse command line arguments:\n parser = prepare_argument_parser()\n args = parser.parse_args()\n configuration_file = pathlib.Path(args.cfg_path)\n cache_directory = pathlib.Path(args.cch_path)\n server_name = args.srv_name\n cache_date = datetime.strptime(args.cch_date, \"%Y-%m-%d\").date()\n\n # Create ProdServer object to request remote HTTP server for out of stock products:\n server = ProdServer(configuration_file, server_name)\n # Create FileDailyCache object to store (cache) retrieved product data into directory: \n cache = FileDailyCache(cache_directory, server, cache_date)\n\n data = cache.get_data() # <- Gets data and cache it at disk if not cached yet\n print(cache.get_cache_filepath())\n","repo_name":"IevgenV/edu-robot-dreams","sub_path":"data-engineering/l02/homework/homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"15411772037","text":"from internal.db.postgres.workflows.tokenization import MintWorkflow\nfrom internal.workflows.tokenization.contract_consts import MINT_STATUS_WORKFLOW_CREATED\n\n\ndef add_mint_workflow(data):\n\n MintWorkflow(\n workflow_id=data.get(\"workflow_id\"),\n sender=data.get(\"sender\"),\n receiver=data.get(\"receiver\"),\n networks=[data.get(\"network\")],\n amount=data.get(\"amount\"),\n asset_name=data.get(\"asset\"),\n event_cate=data.get(\"event_cate\"),\n ).save()\n\n\ndef get_mint_workflow(symbol):\n return MintWorkflow.select().where(\n MintWorkflow.asset_name == symbol,\n MintWorkflow.is_now == True,\n MintWorkflow.workflow_status == MINT_STATUS_WORKFLOW_CREATED\n ).first()\n\n\ndef update_mint_workflow(data):\n data.workflow_status = data.get(\"workflow_status\")\n data.id = None\n data.save()\n","repo_name":"yutu-75/eranthis_api","sub_path":"internal/db/postgres/api/tokenization.py","file_name":"tokenization.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"20986786310","text":"from topological_invariants.hamiltonians.base import (\nChiralHamiltonian,\nSIGMA_X,\nSIGMA_Y\n)\nimport numpy as np\n\n\nclass SuSchriefferHeeger(ChiralHamiltonian):\n\n def __init__(self,alpha:float,beta:float):\n self.alpha = alpha\n self.beta = beta\n\n def __call__(self,k:float):\n return (self.alpha + self.beta*np.cos(k)) * SIGMA_X \\\n + self.beta*np.sin(k) * SIGMA_Y\n","repo_name":"benoitdescamps/Artio","sub_path":"src/main/python/topological_invariants/hamiltonians/su_shrieffer_heeger.py","file_name":"su_shrieffer_heeger.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"33328301756","text":"#\n# Die Maschine, ruft eine piston auf was aus den Rohstoffen die Produkte macht\n#\n\nimport time\n\nfrom pyglet.image import load, ImageGrid, Animation\nimport cocos\n\nfrom actor import Actor\nfrom tool import Tool\n\n\n# test\n\nclass Machine(Actor):\n\n def load_animation(self, imgage, delay):\n seq = ImageGrid(load(imgage), 1, 6)\n return Animation.from_image_sequence(seq, delay, loop=False)\n\n def __init__(self, x, y, conveyor_direction, delay, image_file, tool_image):\n image = load(image_file)\n\n # now create actual instance\n super().__init__(image, x, y)\n self.x, self.y = self.nearest_spot(x, y)\n self.delay = delay\n self.toolImage = tool_image\n self.conveyor_direction = conveyor_direction\n\n if conveyor_direction in ('up', 'down'):\n self.orientation = 'horizontal'\n self.rotation = 90\n else:\n self.orientation = 'vertical'\n\n # define collision box\n # increase size of collision box to hit sooner\n # when conveyor belt is faster\n self.cshape.r = 0.25 / self.delay\n self.target = None\n\n # define timer for cool down period\n self.last_stamp = time.perf_counter()\n self.cooldown = 2.0\n self.upgrade_cost = 10\n self.upgrade_level = 0\n\n # starte druck aufbau\n self.reload_animation = None\n self.begin_reload()\n\n # so that machines are created in actual spots\n\n def nearest_spot(self, x, y):\n cell_size = 64\n cell_size2 = cell_size / 2\n new_x = round((x - cell_size2) / cell_size) * cell_size + cell_size2\n new_y = round((y - cell_size2) / cell_size) * cell_size + cell_size2\n return new_x, new_y\n\n def get_bounding_box(self):\n # original breite hoehe\n w, h = self.width, self.height\n if self.orientation == 'horizontal':\n w, h = h, w\n return cocos.rect.Rect(self.x - w / 2, self.y - h / 2, w, h)\n\n # make sure material is in a valid position\n # (in front and not behind the machine)\n def can_hit(self, material):\n if self.conveyor_direction == 'up':\n if material and material.y < self.y:\n return True\n if self.conveyor_direction == 'down':\n if material and material.y > self.y:\n return True\n if self.conveyor_direction == 'right':\n if material and material.x < self.x:\n return True\n if self.conveyor_direction == 'left':\n if material and material.x > self.x:\n return True\n return False\n\n def stamp(self):\n # if collision then:\n # create piston\n if self.target is not None:\n if not self.target.processed:\n if time.perf_counter() > self.last_stamp + self.cooldown:\n self.parent.add(Tool(self.x, self.y, self.orientation,\n self.target, self, self.delay, self.toolImage))\n # reset last stamp so dass er nicht drauf haut waerend es eine piston schon gibt\n self.last_stamp = time.perf_counter()\n self.target = None\n\n def collide(self, material):\n if self.can_hit(material):\n self.target = material\n self.stamp()\n\n def begin_reload(self):\n if self.reload_animation is not None:\n self.remove(self.reload_animation)\n self.last_stamp = time.perf_counter()\n # animation neu starten - ended von alleine\n animation = self.load_animation('img/machineReload.png', self.cooldown / 5)\n # punkte oben drauf legen\n self.reload_animation = cocos.sprite.Sprite(animation)\n self.add(self.reload_animation)\n\n def set_cooldown(self, val):\n self.cooldown = val\n self.begin_reload()\n\n def upgrade(self):\n if self.upgrade_level == 0:\n # noch kein upgrade bislang\n self.set_cooldown(1)\n self.upgrade_level = 1\n upgrade_image = cocos.sprite.Sprite('img/upgrade.png', (0, 0), opacity=200)\n self.add(upgrade_image)\n return True\n else:\n # bei max level - nix tun\n return False\n","repo_name":"dbingema/FactorySpiel","sub_path":"machine.py","file_name":"machine.py","file_ext":"py","file_size_in_byte":4262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"15029767829","text":"#! /usr/bin/python3\n# Pcf8591.py file\n\nimport PCF8591 as ADC\n\ndef setup():\n\tADC.setup(0x48)\n\nimport smbus\nimport time\nbus = smbus.SMBus (1)\naddress = 0x48\n\ndef read (control):\n write = bus.write_byte_data (address, control, 0)\n read = bus.read_byte (address)\n return read\n\ndef loop():\n while True:\n poti = read (0x40)\n light = read (0x41)\n temp = read (0x42)\n ain2 = read (0x43)\n print (\"temperature:\", temp, \"light:\", light,\"Voltage - Poti:\", poti)\n time. sleep (0.5)\n\n\nif __name__ == \"__main__\":\n\ttry:\n\t\tsetup()\n\t\tloop()\n\texcept KeyboardInterrupt:\n\t\tdestroy()\n\n\n","repo_name":"rcaspergit/pi_projects","sub_path":"A2D/a2d2_chip.py","file_name":"a2d2_chip.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"15343666653","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport zarr\n## toggle comment to switch between directory store and zip store\nstore = zarr.DirectoryStore('../zarr_test_data.zarr')\n# store = zarr.ZipStore('../zarr_test_data.zip', mode='w')\nroot_grp = zarr.group(store, overwrite=True)\n\n\n# In[ ]:\n\n\n# make array without group and uninitialized data, set fill_vaue\na = zarr.create(shape=(20, 20), chunks=(10, 10), dtype='f4', fill_value=999.0, store=store, overwrite=True)\na[:] = 0\n\n\n# In[ ]:\n\n\n# make group with '/' separator and 'F' order\nattrs_grp = root_grp.create_group('group_with_attrs', overwrite=True)\n\n\n# In[ ]:\n\n\n# add attributes to group\nattrs_grp.attrs['group_attr'] = 'foo'\n\n\n# In[ ]:\n\n\n# add array to group with 'F' order\na = attrs_grp.create_dataset('F_order_array', shape=(20, 20), chunks=(4, 5), dtype='i4', order='F', overwrite=True)\n\n\n# In[ ]:\n\n\n# add data to array\nimport numpy as np\ndata = np.tile(np.arange(20), (20,1))\na[:] = data\n\n\n# In[ ]:\n\n\n# add attributes to array\na.attrs['foo'] = 42\na.attrs['bar'] = 'apples'\na.attrs['baz'] = [1, 2, 3, 4]\n\n\n# In[ ]:\n\n\n# make group for multidimensonal data\ndims_grp = root_grp.create_group('group_with_dims', overwrite=True)\n\n\n# In[ ]:\n\n\n# add 1D array\na1 = dims_grp.create_dataset('var1D', shape=(20,), chunks=(4,), dtype='i4', overwrite=True, compressor=None)\ndata = np.arange(20)\na1[:] = data\n\n\n# In[ ]:\n\n\n# add 2D array\na2 = dims_grp.create_dataset('var2D', shape=(20,20), chunks=(4,4), dtype='i4', overwrite=True, compressor=None)\na2[:] = np.tile(data, (20,1))\n\n\n# In[ ]:\n\n\n# add 3D array\na3 = dims_grp.create_dataset('var3D', shape=(20,20,20), chunks=(4,4,4), dtype='i4', overwrite=True, compressor=None)\na3[:] = np.tile(data, (20,20,1))\n\n\n# In[ ]:\n\n\n# add 4D array\na4 = dims_grp.create_dataset('var4D', shape=(20,20,20,20), chunks=(4,4,4,4), dtype='i4', overwrite=True, compressor=None)\na4[:] = np.tile(data, (20,20,20,1))\n\n\n# In[ ]:\n\n\nstore.close()\n\n","repo_name":"Unidata/netcdf-java","sub_path":"cdm/zarr/src/test/data/scripts/make_zarr_test_data.py","file_name":"make_zarr_test_data.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","stars":120,"dataset":"github-code","pt":"35"} +{"seq_id":"37151929738","text":"from twilio.rest import Client \n \naccount_sid = 'AC72afce24613e859e132e13957f167501' \nauth_token = input(\"Enter auth_token : \")\nclient = Client(account_sid, auth_token) \n \ndef love_message():\n message = client.messages.create( \n from_='whatsapp:+14155238886', \n body='message', \n to='whatsapp:+919876543210' \n )\n print(message.sid)\n","repo_name":"PrasannaDhamo/Automate","sub_path":"auto-texter/Auto_message.py","file_name":"Auto_message.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"3217033634","text":"import os\n\ndef find_string_in_file(filename, search_string):\n \"\"\"Check if the search string is present in the file.\"\"\"\n with open(filename, 'r', encoding='utf-8', errors='ignore') as file:\n content = file.read()\n return search_string in content\n\ndef search_files_in_directory(directory, search_string):\n \"\"\"Search for the string in all files in the directory and its subdirectories.\"\"\"\n matching_files = []\n\n # Walk through root, directories and files in the directory\n for root, dirs, files in os.walk(directory):\n for filename in files:\n full_path = os.path.join(root, filename)\n if find_string_in_file(full_path, search_string):\n matching_files.append(full_path)\n\n return matching_files\n\ndef main():\n # Define the folder from which the Python file was run as the working directory (WD)\n working_directory = os.getcwd()\n\n # Prompt the user for a string\n search_string = input(\"Please enter the string you're searching for: \")\n\n # Search for the string inside all files in the WD and its subdirectories\n matching_files = search_files_in_directory(working_directory, search_string)\n\n # Print out a list of all file paths to matching files\n if matching_files:\n print(\"\\nMatching files are:\")\n for file in matching_files:\n print(file)\n else:\n print(\"\\nNo matching files found.\")\n\n # Prompt for \"press any key...\" at the end\n input(\"\\nPress any key to continue...\")\n\nif __name__ == '__main__':\n main()\n","repo_name":"treehann/th-Python-Windows-Tools","sub_path":"file-tree-internal-searcher.py","file_name":"file-tree-internal-searcher.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"40107413794","text":"import sys\r\nimport worker\r\nimport hashing\r\nimport multiprocessing\r\nfrom ui_form import Ui_PhotoHash\r\nfrom PySide2 import QtCore, QtWidgets\r\nfrom PySide2.QtWidgets import QApplication, QWidget, QMessageBox\r\nfrom PySide2.QtCore import QFile, QThreadPool, QUrl\r\nfrom PySide2.QtGui import QDesktopServices\r\nfrom PySide2 import QtXml\r\n\r\n\r\nclass window(QWidget, Ui_PhotoHash):\r\n def __init__(self):\r\n super(window, self).__init__()\r\n self.setupUi(self)\r\n self.pushButton_1.clicked.connect(self.button_1_clicked)\r\n self.pushButton_2.clicked.connect(self.button_2_clicked)\r\n self.pushButton_3.clicked.connect(self.button_3_clicked)\r\n self.pushButton_4.clicked.connect(self.button_4_clicked)\r\n self.pushButton_5.clicked.connect(self.button_5_clicked)\r\n self.pushButton_6.clicked.connect(self.button_6_clicked)\r\n self.threadpool = QThreadPool()\r\n self.path = \"\"\r\n self.copies = {}\r\n\r\n def progress_fn(self, k, t):\r\n if k == 0:\r\n self.label_2.setText(\"processing started\")\r\n elif k < 100:\r\n self.progressBar.setValue(int(k))\r\n ETA = int(t / k * (100 - k))\r\n PRO = int(t)\r\n self.label_2.setText(\r\n \"ETA: \"\r\n + str(PRO // 60)\r\n + \":\"\r\n + str(PRO % 60)\r\n + \" \\ \"\r\n + str(ETA // 60)\r\n + \":\"\r\n + str(ETA % 60)\r\n + \" seconds\"\r\n )\r\n self.label_2.adjustSize()\r\n elif k == 100:\r\n self.progressBar.setValue(100)\r\n self.label_2.setText(\"finishing up...\")\r\n self.label_2.adjustSize()\r\n\r\n def print_output(self, out):\r\n self.progressBar.setValue(100)\r\n self.label_2.setText(\"\")\r\n self.label_2.adjustSize()\r\n self.copies = out[4]\r\n QMessageBox.information(\r\n self,\r\n \"Information\",\r\n \"Done for \"\r\n + \"%s seconds \" % out[0]\r\n + \". Found \"\r\n + str(out[1])\r\n + \" copies.\",\r\n )\r\n\r\n if out[3] == self.path:\r\n self.label_1.setText(\r\n self.path + \" Photos found: \" + str(out[2])\r\n )\r\n self.label_1.adjustSize()\r\n self.pushButton_2.setEnabled(True)\r\n self.pushButton_3.setEnabled(True)\r\n self.pushButton_4.setEnabled(False)\r\n self.pushButton_6.setEnabled(True)\r\n\r\n def thread_complete(self, Len):\r\n self.pushButton_3.setEnabled(True)\r\n self.pushButton_6.setEnabled(True)\r\n QMessageBox.information(self, \"Information\", \"Done!\")\r\n self.label_1.setText(self.path + \" Photos found: \" + str(Len))\r\n self.label_1.adjustSize()\r\n\r\n def num_files(self, Len):\r\n self.label_1.setText(self.path + \" Photos found: \" + str(Len))\r\n self.label_1.adjustSize()\r\n\r\n def button_1_clicked(self):\r\n fileName = QtWidgets.QFileDialog.getExistingDirectory(self, \"OpenFile\")\r\n if fileName != \"\" and fileName != self.path:\r\n self.path = fileName\r\n self.label_1.setText(self.path + \" Looking for photos...\")\r\n self.label_1.adjustSize()\r\n self.copies = {}\r\n\r\n self.pushButton_2.setEnabled(True)\r\n self.pushButton_3.setEnabled(True)\r\n self.pushButton_5.setEnabled(True)\r\n self.pushButton_6.setEnabled(True)\r\n\r\n Num = worker.Worker(hashing.dir_open, self.path)\r\n Num.signals.result.connect(self.num_files)\r\n self.threadpool.start(Num)\r\n\r\n def button_2_clicked(self):\r\n self.progressBar.setValue(0)\r\n self.label_2.setText(\"processing started\")\r\n self.label_2.adjustSize()\r\n self.copies = {}\r\n\r\n self.work = worker.Worker(\r\n hashing.find_simular_images,\r\n [\r\n self.path,\r\n self.comboBox.currentIndex(),\r\n self.comboBox_2.currentIndex(),\r\n ],\r\n )\r\n self.work.signals.result.connect(self.print_output)\r\n self.work.signals.progress.connect(self.progress_fn)\r\n\r\n self.pushButton_4.setEnabled(True)\r\n self.pushButton_2.setEnabled(False)\r\n self.pushButton_3.setEnabled(False)\r\n self.pushButton_6.setEnabled(False)\r\n\r\n self.threadpool.start(self.work)\r\n\r\n def button_3_clicked(self):\r\n self.pushButton_3.setEnabled(False)\r\n self.pushButton_6.setEnabled(False)\r\n self.label_1.setText(\"processing started\")\r\n self.label_1.adjustSize()\r\n copy = worker.Worker(hashing.uncopy, self.path)\r\n copy.signals.result.connect(self.thread_complete)\r\n self.copies = {}\r\n\r\n self.threadpool.start(copy)\r\n\r\n def button_4_clicked(self):\r\n self.work.stop()\r\n\r\n def button_5_clicked(self):\r\n QDesktopServices.openUrl(QUrl.fromLocalFile(self.path))\r\n\r\n def button_6_clicked(self):\r\n qm = QMessageBox\r\n ret = qm.question(\r\n self,\r\n \"Delete copies\",\r\n \"Are you sure you want to delete the copies automatically? It is\"\r\n \" recommended to delete manually.\",\r\n qm.Yes | qm.No,\r\n )\r\n\r\n if ret == qm.Yes:\r\n self.pushButton_3.setEnabled(False)\r\n self.pushButton_6.setEnabled(False)\r\n self.label_1.setText(\"processing started\")\r\n self.label_1.adjustSize()\r\n delete = worker.Worker(hashing.delete, [self.path, self.copies])\r\n delete.signals.result.connect(self.thread_complete)\r\n self.copies = {}\r\n self.threadpool.start(delete)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n multiprocessing.freeze_support()\r\n QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_ShareOpenGLContexts)\r\n app = QtWidgets.QApplication(sys.argv)\r\n widget = window()\r\n widget.show()\r\n sys.exit(app.exec_())\r\n","repo_name":"maxshilin/photo_hash","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"7520277748","text":"import sys\nfrom pathlib import Path\nfrom docopt import docopt\nfrom negar import virastar\nsys.path.append(Path(__file__).parent.as_posix()) # https://stackoverflow.com/questions/16981921\nfrom version import __version__\n\n\ndef main(args=docopt(__doc__)):\n output_file = args[\"--output-file\"]\n file_name = args[\"--input-file\"]\n\n if args[\"--version\"]:\n print (__version__)\n sys.exit()\n\n # Make an argument list to pass to the virastar module for\n # controlling features from command line arguments.\n argli = [argument[2:] for argument in args if args[argument]]\n try:\n with open(file_name, encoding=\"utf8\") as fin:\n lines = fin.read()\n run_PE = virastar.PersianEditor(lines, *argli)\n edited_text = run_PE.cleanup()\n if output_file:\n with open(output_file, \"w\", encoding=\"utf8\") as fout:\n fout.write(edited_text)\n else:\n print(edited_text)\n except IOError:\n print(\"There is a problem! I can't read/write to the file.\")\n except Exception as e:\n print(e)\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 1:\n print (__doc__)\n exit()\n\n main(docopt(__doc__))\n","repo_name":"Alir3z4/negar-cli","sub_path":"negar_cli/negar_cli.py","file_name":"negar_cli.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"26764295647","text":"import os\nimport random\nfrom glob import glob\nfrom shutil import copy, move, copytree, rmtree\nimport cv2\n\nfrom config import paths, DATASET_DIR\n\n\"\"\"\nModule to rename, rearrange the files from CHAOS dataset and change MR ground label to binary for easier handling\nChange the file tree from\nInit---> CT ---> PatientID ---> DICOManon ---> *.dcm\n ---> Ground ---> *.png\n ---> MR ---> PatientID ---> T1DUAL ---> DICOManon ---> InPhase ---> *.dcm\n ---> OutPhase ---> *.dcm\n ---> Ground ---> *.png\n ---> T2SPIR ---> DICOManon ---> *.dcm\n ---> Ground ---> *.png\nto\nInit---> CT ---> PatientID ---> DICOManon ---> *.dcm\n ---> Ground ---> *.png\n ---> MR ---> PatientID ---> DICOManon ---> *.dcm files renamed to T1_In*, T1_Out*, T2_*\n ---> Ground ---> *.png\n\"\"\"\n\n\ndef remove_empty_folders(path, removeroot=True):\n \"\"\"Function to remove empty folders\"\"\"\n if not os.path.isdir(path):\n return\n # remove empty subfolders\n files = os.listdir(path)\n if len(files):\n for f in files:\n fullpath = os.path.join(path, f)\n if os.path.isdir(fullpath):\n remove_empty_folders(fullpath)\n # if folder empty, delete it\n files = os.listdir(path)\n if len(files) == 0 and removeroot:\n print(\"Removing empty folder:\", path)\n os.rmdir(path)\n\n\ndef check_save_png(image, new_path):\n if os.path.exists(new_path):\n print('File exist: {}'.format(new_path))\n else:\n print('Writing: {}'.format(new_path))\n os.makedirs(os.path.dirname(new_path), exist_ok=True)\n cv2.imwrite(new_path, image)\n\n\ndef check_save_dcm(old_path, new_path):\n if os.path.exists(new_path):\n print('File exist: {}'.format(new_path))\n else:\n print('Writing: {}'.format(new_path))\n os.makedirs(os.path.dirname(new_path), exist_ok=True)\n copy(old_path, new_path)\n\n\ndef reconstruct_file_system():\n if not os.path.isdir(paths['chaos']):\n raise ValueError(\"{} is not a directory\".format(paths['chaos']))\n dicoms_list = glob(paths['chaos'] + '/**/*.dcm', recursive=True)\n grounds_list = glob(paths['chaos'] + '/**/*.png', recursive=True)\n for dicom_file in dicoms_list:\n new_dicom_file = dicom_file.replace(paths['chaos'], paths['base'])\n if 'T1DUAL/DICOM_anon/InPhase' in new_dicom_file:\n new_dicom_file = new_dicom_file.replace('T1DUAL/DICOM_anon/InPhase/', 'DICOM_anon/T1_In_')\n check_save_dcm(dicom_file, new_dicom_file)\n elif 'T1DUAL/DICOM_anon/OutPhase' in new_dicom_file:\n new_dicom_file = new_dicom_file.replace('T1DUAL/DICOM_anon/OutPhase/', 'DICOM_anon/T1_Out_')\n check_save_dcm(dicom_file, new_dicom_file)\n elif 'T2SPIR/DICOM_anon' in new_dicom_file:\n new_dicom_file = new_dicom_file.replace('T2SPIR/DICOM_anon/', 'DICOM_anon/T2_')\n check_save_dcm(dicom_file, new_dicom_file)\n else:\n check_save_dcm(dicom_file, new_dicom_file)\n\n for ground_file in grounds_list:\n ground_image = cv2.imread(ground_file, 0) # Ensure label are binary\n ground_image[ground_image == 255] = 63\n ground_image[ground_image != 63] = 0\n ground_image[ground_image == 63] = 255\n new_ground_file = ground_file.replace(paths['chaos'], paths['base'])\n if 'T1DUAL/Ground' in new_ground_file:\n for i in ['Ground/T1_In_', 'Ground/T1_Out_']:\n new_ground_file_ = new_ground_file.replace('T1DUAL/Ground/', i)\n check_save_png(image=ground_image, new_path=new_ground_file_)\n elif 'T2SPIR/Ground' in new_ground_file:\n new_ground_file = new_ground_file.replace('T2SPIR/Ground/', 'Ground/T2_')\n check_save_png(image=ground_image, new_path=new_ground_file)\n else:\n check_save_png(image=ground_image, new_path=new_ground_file)\n remove_empty_folders(DATASET_DIR)\n\n\ndef split_evaluation():\n \"\"\"Split evaluation set from data dir. Gets also the processed files of the evaluation set.\"\"\"\n print('Split evaluation set from data dir.')\n if not os.path.exists(paths['eval']): # check if Eval_set dont exist, else make folder\n os.mkdir(paths['eval'])\n evaluation_data = glob(paths['eval'] + '/**/*', recursive=False) # Check if eval data exists\n if evaluation_data:\n print(\"Directory is not empty\")\n print(evaluation_data)\n for eval_patients in evaluation_data: # Remove existing patient files from Eval_Set\n if 'CT' in eval_patients or 'MR' in eval_patients:\n print('Removing {} from {}'.format(os.path.basename(eval_patients), os.path.dirname(eval_patients)))\n rmtree(eval_patients)\n print('Setting up evaluation data')\n data = glob(paths['base'] + '/**/') # Get modal paths from Base folder\n for modalities in data:\n print(modalities)\n if 'CT' in modalities or 'MR' in modalities: # For each modality gets randomly 2 patients to place in Eval_sets\n patients = sorted(glob(modalities + '/**/'))\n print('{0}\\nEvaluation patients\\n{0}'.format('*' * 20))\n eval_patients = random.sample(patients, k=2)\n for patient in eval_patients:\n patients.pop(patients.index(patient)) # pop patients from list\n patient_train = patient.replace(paths['base'], paths['train'])\n patient_eval = patient.replace(paths['base'], paths['eval'])\n if os.path.exists(patient_train): # If patient folder exists in Train_sets folder move it to Eval_Sets\n print('** Move from :{} **\\n** to : {} **'.format(patient_train, patient_eval))\n move(patient_train, patient_eval)\n else:\n print('Copy from: {} \\n to : {}'.format(patient, patient_eval)) # Copy from Base folder.\n copytree(patient, patient_eval)\n print('{0}\\nTraining patients\\n{0}'.format('*' * 20))\n for patient in patients:\n patient_train = patient.replace(paths['base'], paths['train'])\n if os.path.exists(patient_train):\n print('** Already exist : {} **\\nPASS'.format(patient_train))\n else:\n print('Copy from: {} \\n to : {}'.format(patient, patient_train)) # If folder does not exist,\n copytree(patient, patient.replace(paths['base'], paths['train'])) # copy it from base folder\n remove_empty_folders(DATASET_DIR)\n\n\nif __name__ == '__main__':\n reconstruct_file_system()\n split_evaluation()\n exit()\n","repo_name":"livanosg/pytorch_unet","sub_path":"setup_dataset.py","file_name":"setup_dataset.py","file_ext":"py","file_size_in_byte":6834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"6323884142","text":"# coding: utf-8\nfrom __future__ import division\nimport webiopi\nimport os\nimport numpy as np\nimport cv2\nimport math\nimport sys\nfrom PIL import Image\n\n#webiopi\n#save directory\nSAVEDIR = '/home/pi/ex7'\n#a function to take a picture when it is called from html\n@webiopi.macro\ndef camera():\n path = SAVEDIR + '/camera.jpg'\n #taking a photo\n command = 'fswebcam -r 640x480 -d /dev/video0 ' + path\n os.system(command)\n #writing to disk cache\n os.system('sync')\n return path\n\nface_cascade = cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_eye.xml')\n\nimg = cv2.imread(camera(), cv2.IMREAD_COLOR)\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nfaces = face_cascade.detectMultiScale(gray)\n \nfor (x,y,w,h) in faces:\n img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n roi_gray = gray[y:y+h, x:x+w]\n #顔の上半分を検出対象範囲\n eye_gray = gray[y : y + (int)(h/2), x : x+w]\n roi_color = img[y:y+h, x:x+w]\n totalx = x\n totaly = y\n for i in range(1,20):\n minValue = i * 5\n eyes = eye_cascade.detectMultiScale(eye_gray, scaleFactor=1.11, minNeighbors=5, minSize=(minValue,minValue)) \n ce = []\n el = list(eyes)\n for (ex,ey,ew,eh) in el:\n ce.append([ex,ey,ew,eh])\n if len(ce) == 2:\n break\n for (ex,ey,ew,eh) in ce:\n cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)\n\ncv2.imwrite('test.jpg',img)\n\n#サングラスの合成\nlayer1 = Image.open('camera.jpg')\nlayer2 = Image.open('glasses.png')\nw,h = layer2.size\n\n# 合成のため、RGBAモードに変更\nlayer1 = layer1.convert('RGBA')\n# 同じ大きさの透過キャンパスを用意\nc = Image.new('RGBA', layer1.size, (255, 255,255, 0))\n\naaax=0\naaay=0\nbbbx=0\nbbby=0\ntmp = 0\n\n# rect[0]:x, rect[1]:y, rect[2]:width, rect[3]:height\nfor rect in eyes:\n # 上書きする画像をリサイズ\n if rect[2] > rect[3]:\n rate = rect[2] / w\n else:\n rate = rect[3] / h\n\n resize_img = layer2.resize((int(w*rate*0.83), int(h*rate*0.83)))\n \n if aaax == 0:\n aaax = totalx+rect[0]\n aaay = totaly+rect[1]\n else:\n bbbx = totalx+rect[0]+rect[2]\n bbby = totaly+rect[1]\n\nif aaax==0 and aaay==0 and bbbx==0 and bbby==0:\n print(\"目の検出に失敗しました。再度撮影して下さい。\")\n sys.exit()\n\nif bbbx > 0:\n #左右の目の位置から角度を検出\n if aaax < bbbx:\n rad= math.atan2(bbby-aaay,bbbx-aaax)\n else:\n rad= math.atan2(aaay-bbby,aaax-bbbx)\n deg = math.degrees(rad)\n print(\"rad:\",rad,\" deg:\",deg)\n if deg > 0 and deg < 90:\n deg = 0-deg\n elif deg >= 90:\n deg = 180-deg\n elif deg < -90:\n deg = 0-deg\n deg = 180-deg\n else:\n deg = 0-deg\n \n#print(\"deg:\",deg)\nsg_rotate = resize_img.rotate(deg,expand=True)\n \n# 用意したキャンパスに上書き\nif deg > 10:\n aaay = aaay-30\n bbby = bbby-30\nif aaax < bbbx:\n c.paste(sg_rotate, (aaax-25,aaay+12), sg_rotate)\nelse:\n c.paste(sg_rotate, (bbbx-25,bbby+12), sg_rotate)\n \n# オリジナルとキャンパスを合成して保存\nresult = Image.alpha_composite(layer1, c)\nresult.save('result.jpg')\nres = cv2.imread('result.jpg', cv2.IMREAD_COLOR)\ncv2.imshow('result.jpg',res)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"kmdr21g/plus_sunglasses","sub_path":"photo_sunglasses.py","file_name":"photo_sunglasses.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"9567287045","text":"import numpy as np\nimport logistic_regression_functions as f\n\nclass GradientDescent(object):\n \"\"\"Perform the gradient descent optimization algorithm for an arbitrary\n cost function.\n \"\"\"\n\n def __init__(self, cost, gradient, predict_func, \n alpha=0.01,\n num_iterations=10000, fit_intercept = True, standardize = True):\n \"\"\"Initialize the instance attributes of a GradientDescent object.\n\n Parameters\n ----------\n cost: The cost function to be minimized.\n gradient: The gradient of the cost function.\n predict_func: A function to make predictions after the optimization has\n converged.\n alpha: The learning rate.\n num_iterations: Number of iterations to use in the descent.\n\n Returns\n -------\n self: The initialized GradientDescent object.\n \"\"\"\n # Initialize coefficients in run method once you know how many features\n # you have.\n self.coeffs = None\n self.cost = cost\n self.gradient = gradient\n self.predict_func = predict_func\n self.alpha = alpha\n self.num_iterations = num_iterations\n self.fit_intercept = fit_intercept\n self.standardize = standardize\n\n def fit(self, X, y, step_size = None):\n \"\"\"Run the gradient descent algorithm for num_iterations repetitions.\n\n Parameters\n ----------\n X: A two dimensional numpy array. The training data for the\n optimization.\n y: A one dimensional numpy array. The training response for the\n optimization.\n\n Returns\n -------\n self: The fit GradientDescent object.\n \"\"\"\n\n cost_step = []\n \n if self.standardize:\n \n X = self.scale_X(X)\n \n if self.fit_intercept:\n \n X = f.add_intercept(X)\n \n self.coeffs = np.zeros(X.shape[1])\n \n if step_size == None:\n \n for _ in range(self.num_iterations):\n \n grad = self.gradient(X, y, self.coeffs)\n self.coeffs = self.coeffs - self.alpha * grad\n \n \n else:\n while True:\n \n self.coeffs = self.coeffs - self.alpha * self.gradient(X, y, self.coeffs)\n cost_step.append(self.cost(X,y,self.coeffs)) \n \n if ((cost_step[-2] - cost_step[-1]) <= step_size):\n print(cost_step[-1], cost_step[-2], count)\n break\n \n \n \n\n def predict(self, X):\n \"\"\"Call self.predict_func to return predictions.\n\n Parameters\n ----------\n X: Data to make predictions on.\n\n Returns\n -------\n preds: A one dimensional numpy array of predictions.\n \"\"\"\n \n if self.standardize:\n \n X = self.scale_X(X)\n \n if self.fit_intercept:\n \n X = f.add_intercept(X)\n \n preds = self.predict_func(X, self.coeffs)\n \n return preds\n \n def scale_y(self, y):\n \n y = (y - np.mean(y)) / np.std(y)\n \n return y\n \n \n def scale_X(self, X):\n \n X = (X - np.mean(X, axis = 0)) / np.std(X, axis = 0)\n \n return X\n","repo_name":"ArenasJ7/DSI_FollowUp","sub_path":"Week 5/src_GD/GradientDescent.py","file_name":"GradientDescent.py","file_ext":"py","file_size_in_byte":3398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"42073776892","text":"# 컬러볼 ( https://www.acmicpc.net/problem/10800 )\n\n# 각 플레이어의 목표는 자기 공보다 크기가 작고 색이 다른 공을 사로잡아 그 공의 크기만큼의 점수를 얻는 것이다.\n# 다른 공을 사로잡은 이후에도 본인의 공의 색과 크기는 변하지 않는다.\n\n# 각 플레이어가 사로잡을 수 있는 모든 공들의 크기의 합을 출력하는 프로그램\n'''\n처음에 입력받은 배열을 정렬한 뒤 리턴하려고 보니, 공의 번호에 따라 값을 리턴해야 해서 dict()가 필요할 것이라 판단!\n'''\nfrom collections import defaultdict\nn = int(input()) #공의 개수 (1 ≤ N ≤ 200,000)\nstorage = []\nans = []\n\nfor i in range(n):\n c,s = map(int,input().split())\n storage.append([i+1,c,s]) # (공번호, 공 색깔, 공 크기) 순으로 붙여줌\nstorage.sort(key = lambda x:x[2])\n\nfor i in range(n):\n sum_ = 0\n for j in range(0,i):\n if storage[j][1] == storage[i][1]:\n continue\n sum_ += storage[j][2]\n ans.append((storage[i][0],sum_))\n\nans.sort(key = lambda x: x[0])\n\nfor i in ans:\n print(i[1])\n","repo_name":"apple2062/algorithm","sub_path":"study/week18/컬러볼(10800).py","file_name":"컬러볼(10800).py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34646581977","text":"import subprocess, datetime, ConfigParser\nfrom experiment_library import *\n\nwith open('machines_running_beefs', 'r') as machine_data:\n\tmachines = read_machines(machine_data)\n\nconf = {}\nconfig = ConfigParser.ConfigParser()\nconfig.readfp(open('enviroment'))\nfor troco, valor in config.items(\"General\"):\n\tconf[troco] = valor\n\nnow = datetime.datetime.now() \ncopy_start_time = now + datetime.timedelta(minutes=int(conf[\"minutes_until_start_copying\"])) + datetime.timedelta(hours=3) # +3 because UTC+3\ncopy_start_time_formatted = str(copy_start_time.day)+\"/\"+str(copy_start_time.month)+\"/\"+ str(copy_start_time.year)+\" \"+str(copy_start_time.hour)+\":\"+str(copy_start_time.minute)\n\n\nfor component, ip_list in machines.items():\n\tfor i in range(len(ip_list)):\n\t\t# For each machine, it sets the same time to start copying\n\t\tactual_ip, local_actual_ip = ip_list[i]\n\t\tcommands = \"\"\"\nimport time, subprocess, sys, datetime\ncommand = \"cp -r /mnt/workload/* /mnt/beefs/test_place/place\"\"\"+str(i)+\"\"\"\" \nwhile True: \n\tif datetime.datetime.now() >= datetime.datetime.strptime('\"\"\"+copy_start_time_formatted+\"\"\"', \"%d/%m/%Y %H:%M\"): break\nstartepoch = int(time.time())\nprocess = subprocess.Popen(command,\n\t\t\tshell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\nout, err = process.communicate()\nendepoch = int(time.time())\nprint datetime.datetime.now().strftime(\"%A, %d. %B %Y %I:%M%p\")\nprint endepoch - startepoch\n\n\"\"\"\t\t\n\t\tscript = open(\"copy.py\", \"w\")\n\t\tscript.write(commands)\n\t\tscript.close()\n\t\tcopy_file(\"copy.py\", actual_ip)\n\t\texecute_with_sudo(\"nohup python /mnt/copy.py > /tmp/result.txt &\", actual_ip)\n\n\n","repo_name":"joopeed/beefs-tester","sub_path":"escalability_experiments/set_copy_start.py","file_name":"set_copy_start.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41156134589","text":"#downsizing data\nimport importlib\nimport numpy as np\nimport mysklearn.mypytable\nimport os\nimport copy\n\nfrom mysklearn import myutils\nfrom mysklearn.mypytable import MyPyTable\n\n\n\ndef clean_data(random_seed_val, original_filename):\n np.random.seed(random_seed_val)\n\n filename = original_filename\n table = MyPyTable().load_from_file(filename)\n print(\"length before 'NA' values removed:\", len(table.data))\n\n table.remove_rows_with_missing_values()\n print(\"length after 'NA' values removed:\", len(table.data))\n\n # creating downsample data\n table_deep_copy = copy.deepcopy(table)\n\n\n non_stroke = []\n stroke_data = []\n for row in table.data:\n if row[-1] == 0.0:\n non_stroke.append(row)\n if row[-1] == 1.0:\n stroke_data.append(row)\n print(\"amount of non-strokes with no 'NA' rows:\", len(non_stroke))\n print(\"amount of strokes with no 'NA' rows:\", len(stroke_data))\n\n unknown_count = 0\n for row in stroke_data:\n if row[10] == \"Unknown\" or row[10] == \"unknown\":\n unknown_count += 1\n #print(\"num of strokes with unknown smoking status:\", unknown_count)\n\n downsized_non_stroke_data = []\n for i in range(0, 1000 - len(stroke_data)):\n index = np.random.randint(0, len(non_stroke))\n row = non_stroke[index]\n downsized_non_stroke_data.append(row)\n non_stroke.remove(row)\n\n print(\"length of downsized non-stroke data:\", len(downsized_non_stroke_data))\n\n data_downsized = stroke_data + downsized_non_stroke_data # adding 1.0 class label and downsized 0.0 sample\n\n print(\"length of all downsized data:\", len(data_downsized)) # 1000 0 class labels, 209 1 class labels (after removal of 'NA' rows)\n\n final_data = table_deep_copy\n final_data.data = data_downsized\n #print(\"length of final_data\", len(final_data.data))\n\n final_data.save_to_file(\"input_data/stroke-data-downsized.csv\")\n\n print(\"-----SAVED DOWNSIZED DATA-----\")\n stroke_count = 0\n non_stroke_count = 0\n for row in final_data.data:\n if row[-1] == 0.0:\n non_stroke_count += 1\n elif row[-1] == 1.0:\n stroke_count += 1\n\n #print(\"non-stroke amount:\", non_stroke_count)\n #print(\"stroke amount:\", stroke_count)\n\n # checking if theres any duplicate rows # can delete loop\n for i in range(len(final_data.data)): \n row = final_data.data[i]\n for j in range(len(final_data.data)):\n if row == final_data.data[j] and j != i:\n print(\"same\")\n\n #FINISH CLEANING IN MYPYTABLE FORM\n stroke_data = MyPyTable()\n stroke_data.load_from_file(\"input_data/stroke-data-downsized.csv\")\n\n #clean stroke data for classification- discretize, convert nominal to numeric\n stroke_data_discretized = myutils.discretize_attributes_for_stroke_classification(stroke_data)\n stroke_data_discretized.save_to_file(\"input_data/stroke-data-discretized.csv\")\n print(\"-----saved discretized columns-----\")\n #strings to numeric\n stroke_data_cleaned_numeric = myutils.numerize_all_strings(stroke_data_discretized)\n stroke_data_cleaned_numeric.save_to_file(\"input_data/stroke-data-all-attributes-cleaned.csv\")\n print(\"-----saved numerical final data-----\")\n\n #print(\"len before removing missing vals\", len(stroke_data.data))\n #code to remove rows w/missing values (use only once relevant attributes are decided)\n #stroke_data.remove_rows_with_missing_values()\n #stroke_data.remove_rows_with_missing_values_by_col()\n #print(\"len after removing missing vals\", len(stroke_data.data))\n\n #ATTRIBUTE SELECTION\n data_for_attribute_selection = copy.deepcopy(stroke_data_cleaned_numeric)\n #0gender,1age,2hypertension,3heart_disease,4ever_married,5work_type,6Residence_type,7avg_glucose_level,8bmi,9smoking_status,10stroke\n #KEEP: 1, 3, 7, 8, 9, 10 REMOVE: 0, 2, 4, 5, 6, \n data_for_attribute_selection.remove_columns([0, 2, 4, 5, 6])\n data_for_attribute_selection.save_to_file(\"input_data/stroke_data_atts_selected.csv\")\n print(\"-----attribute selection complete & saved-----\")","repo_name":"nalger2/cpsc-322-final-project","sub_path":"data_cleaning_work.py","file_name":"data_cleaning_work.py","file_ext":"py","file_size_in_byte":4087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"70822399141","text":"from board import *\nimport uct\n\n\nif __name__ == '__main__':\n b = Board()\n while b.get_result(b.playerJustMoved) is None:\n if b.playerJustMoved == PLAYER_O:\n available_moves = b.get_moves()\n move = int(input(f'Enter move (available: {available_moves})>'))\n assert move in available_moves, 'Move not in available moves'\n b.make_move(move)\n print(b)\n else:\n print('Engine thinking')\n move = uct.uct_multi(b, itermax=10000)\n b.make_move(move)\n print(f'Engine makes move {move}')\n print(b)\n\n result = b.get_result(PLAYER_O)\n if result == DRAW:\n print('Draw')\n elif result == WIN:\n print('O wins')\n elif result == LOSS:\n print('X wins')\n\n","repo_name":"AngelVI13/pytoe","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"32260506368","text":"#!/usr/bin/python\n\n\"\"\"\nExtract reads froma fastq file that appear to be multimappers\nin a BWA sam file. Such reads have one more base added to the 5' end\n(sam file being produced with reads trimmed n bases from the right)\n\"\"\"\n\nimport re\nimport sys\n# -----------------------------[ Input/Output ]--------------------------------\n\n# This is the file without adapters but with complete tags (27 bp)\nfastq= open('/exports/work/vet_roslin_nextgen/dario/bwa/output/20110106_fantom5_macro_monocyte_vs_ss9/Macrophage%20-%20monocyte%20derived%20donor1.CNhs10861.11232-116C8.hg19.ctss.fq', 'r')\n\n# Latest output of bwa\nsam= open('/exports/work/vet_roslin_nextgen/dario/bwa/output/20110106_fantom5_macro_monocyte_vs_ss9/bwa_output.sam')\n\n# Latest FASTQ input that was used for bwa:\nmulti_fastq= open('/exports/work/vet_roslin_nextgen/dario/bwa/output/20110106_fantom5_macro_monocyte_vs_ss9/bwa_input.fq', 'w')\n\n# Make sure the read names in SAM (bwa_output.sam) match those in the complete FASTQ.\n# The FASTQ qname starts '@', but not the SAM (usually). Also, the raw reads names have /1 or /2 at their end. These suffixes \n# are not present in the SAM.\n\n# ------------------------------[ Dictionary of multimappers ]-----------------\n\nmmdict= {}\nn= 0\nfor line in sam:\n if line.startswith('@'):\n continue\n \"\"\" Extract the digits in the X0:i tag \"\"\"\n mm= re.findall('.?\\tX0:i:(\\d+)\\t.?', line)\n n += 1\n# print(line)\n# print(mm)\n# if n > 10:\n# break\n# sys.exit()\n if mm == []:\n \"\"\" If X0 tag is not found, the read is unmapped \"\"\"\n continue\n mm= int(mm[0])\n if mm > 1:\n line= line.split('\\t')\n qname= line[0]\n mmdict[qname]= mm\n seq_len= len(line[9])\nprint('Number of multimapping reads: ' + str(len(mmdict)))\nprint('Sequence lenght in sam: ' + str(seq_len))\nsam.close()\n\nn= 0\nwhile True:\n qname= fastq.readline().rstrip().lstrip('@') ## Editing of FASTQ qname format to match SAM's qname (note the presence/absence of /1)\n# qname= fastq.readline().rstrip('/1\\n').lstrip('@')\n seq= fastq.readline()\n comment= fastq.readline()\n qual= fastq.readline()\n if qname == '':\n break\n if mmdict.has_key(qname):\n \"\"\" If a qname is found in the dictionary of multimappers, send the 4-line blockto output fastq \"\"\"\n multi_fastq.write('@' + qname + '\\n') ## Editing of FASTQ qname format to match SAM's qname (note the presence/absence of /1)\n# multi_fastq.write('@' + qname + '/1\\n')\n multi_fastq.write(seq[0:(seq_len+1)] + '\\n')\n multi_fastq.write(comment)\n multi_fastq.write(qual[0:(seq_len+1)] + '\\n')\n n += 1\nmulti_fastq.close()\nfastq.close()\nprint('Reads sent to output: ' + str(n))\n","repo_name":"dariober/bioinformatics-cafe","sub_path":"roslin/20110106_rescue_multimappers.py","file_name":"20110106_rescue_multimappers.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"35"} +{"seq_id":"72998082021","text":"\"\"\"\n converts KITTI object detection dataset to tfrecord for use\n with TF object detection API for vehicle detection\n can be used for singleclass or multiclass data\n\"\"\"\n\nimport tensorflow as tf\nimport dataset_util # this is found in /research/object_detection/utils/ (Tensorflow Object Detection API)\nimport os.path\nfrom PIL import Image\n\n__author__ = \"Moritz Kampelmuehler\"\n\n# constants\nTESTSPLIT = 10 # samples (leave TESTSPLIT samples for test split)\nNUM_SAMPLES = 7481\nIMAGE_FORMAT = b'png'\nBASEDIR = 'training/' # specify the basedir\nIMGDIR = 'image_2/'\nLABDIR = 'label_2/'\nMODE = 'car_only' # for single class detector\n# MODE = 'multi_class' # for multi class detector (Car (1), Van (2), Truck(3))\nVEHICLE_LABELS = ['Car', 'Van', 'Truck']\nVEHICLE_LABEL_IDS = {'Car': 1, 'Van': 2, 'Truck': 3}\nSHUFFLE = True\n\ndef create_tf_example(height, width, filename, encoded_image_data, image_format, xmins, xmaxs, ymins, ymaxs, classes_text, classes):\n tf_example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height), # Image height\n 'image/width': dataset_util.int64_feature(width), # Image width\n 'image/filename': dataset_util.bytes_feature(filename), # Filename of the image\n 'image/source_id': dataset_util.bytes_feature(filename), # Filename of the image\n 'image/encoded': dataset_util.bytes_feature(encoded_image_data), # Encoded image bytes\n 'image/format': dataset_util.bytes_feature(image_format), # b'jpeg' or b'png'\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), # normalized left x coordinate in bounding box\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), # normalized right x coordinate in bounding box\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), # normalized top y coordinate in bounding box\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), # normalized bottom y coordinate in bounding box\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), # string class name of bounding box\n 'image/object/class/label': dataset_util.int64_list_feature(classes), # integer class id of bounding box\n }))\n return tf_example\n \ndef loadAnnotations():\n annotations = []\n for i in range(NUM_SAMPLES):\n annotation = {'file_name': '{0:06d}.png'.format(i)}\n with open(os.path.join(BASEDIR, LABDIR, '{0:06d}.txt'.format(i)), 'r') as f:\n bboxes = []\n for line in f:\n bbox = {}\n line_split = line.split(' ')\n \n if line_split[0] not in VEHICLE_LABELS:\n # sort out non-vehicle entries\n continue\n \n # create bounding box\n bbox['left'] = float(line_split[4])\n bbox['right'] = float(line_split[6])\n bbox['top'] = float(line_split[5])\n bbox['bottom'] = float(line_split[7])\n \n if MODE == 'car_only':\n bbox['label'] = 'Car'\n bbox['label_id'] = VEHICLE_LABEL_IDS['Car']\n elif MODE == 'multi_class':\n bbox['label'] = line_split[0]\n bbox['label_id'] = VEHICLE_LABEL_IDS[line_split[0]] \n else:\n raise ValueError('unknown MODE')\n bboxes.append(bbox) \n \n if not bboxes:\n # sort out non-vehicle frames\n continue\n annotation['bbox'] = bboxes\n annotations.append(annotation)\n print('{} {} annotations of {} total annotations loaded succesfully'.format(len(annotations), MODE, NUM_SAMPLES))\n return annotations\n\ndef createTFRecord(mode, annotations):\n writer = tf.python_io.TFRecordWriter('KITTI_vehicle_{}.tfrecord'.format(mode)) \n if mode == 'train':\n sample_range = range(len(annotations)-TESTSPLIT)\n elif mode == 'test':\n sample_range = range(-TESTSPLIT,0)\n else:\n raise ValueError('unknown mode')\n \n for n in sample_range:\n print('Processing file {0:06d} of {1:06d}'.format(n+1 if mode == 'train' else n+1+TESTSPLIT, len(sample_range)))\n filename = annotations[n]['file_name']\n xmins, xmaxs, ymins, ymaxs, classes_text, classes = ([] for i in range(6))\n # read image\n image_location = os.path.join(BASEDIR, IMGDIR, filename)\n with tf.gfile.GFile(image_location, 'rb') as fid:\n encoded_image_data = fid.read()\n # get image size\n im = Image.open(image_location)\n width, height = im.size\n for annotation in annotations[n]['bbox']:\n xmins += [annotation['left']/width]\n xmaxs += [annotation['right']/width]\n ymins += [annotation['top']/height]\n ymaxs += [annotation['bottom']/height]\n classes_text += [annotation['label'].encode('utf8')]\n classes += [annotation['label_id']]\n tf_example = create_tf_example(height, width, filename.encode('utf8'), encoded_image_data, IMAGE_FORMAT, xmins, xmaxs, ymins, ymaxs, classes_text, classes)\n writer.write(tf_example.SerializeToString())\n writer.close()\n \ndef main(_):\n annotations = loadAnnotations()\n if SHUFFLE:\n from random import shuffle\n shuffle(annotations)\n \n createTFRecord('train', annotations)\n createTFRecord('test', annotations)\n\nif __name__ == '__main__':\n tf.app.run()\n","repo_name":"kampelmuehler/kittitools","sub_path":"create_tf_record.py","file_name":"create_tf_record.py","file_ext":"py","file_size_in_byte":5156,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"31992149162","text":"#!/usr/bin/env python\n#\n# flightsearch.py\n# @author Mike\n#\n# scrapes hipmunk for flight information\n#\n# the storage container, 'itineraries', is a dictionary\n# containing arrays of outgoing and return flights that \n# contain arrays of routes that contain arrays of legs \n# that contain dictionaries of leg flight information\n# \n# when developing a heuristic for chooosing the flight,\n# note that the outgoing and return itineraries are\n# sorted by 'agony' score, with the low-agony, direct flights\n# near the beginning and the high-agony, high-connection \n# flights are at the end\n\nimport datetime\nimport dateutil.parser\nimport pprint\nimport requests\nimport random\n\nOUTGOING = 0\nRETURN = 1\n\ndef select_flight(from_, to, start_date, end_date):\n start_date = start_date.strftime('%b%d')\n end_date = end_date.strftime('%b%d')\n csrf = requests.get('http://www.hipmunk.com')\n search_strings = { 'i': '%s.%s,%s.%s' % \\\n (from_, to, start_date, end_date)}\n headers = { 'X-Csrf-Token': csrf.headers['X-Csrf-Token'] }\n raw_data = requests.post('http://www.hipmunk.com/api/results', \\\n data=search_strings, headers=headers)\n\n #splits flights into outgoing and returns\n outgoingFlights = raw_data.json()['routings'][OUTGOING]\n returnFlights = raw_data.json()['routings'][RETURN]\n\n return {'outgoing': getFlights(outgoingFlights[0]), \\\n 'return': getFlights(returnFlights[0])}\n \n #take the first flight?\n #-----------------------------------------------------#\n #T0D0: write code here with a heuristic that selects\n #an itinerary\n #-----------------------------------------------------#\n\ndef getFlights(route):\n #routings = []\n flights = route['legs']\n trip = []\n for flight in flights:\n num = flight['marketing_num'][0] + ' ' + str(flight['marketing_num'][1])\n #print num\n origin = flight['from_code']\n #print origin\n dest = flight['to_code']\n #print dest\n depart = dateutil.parser.parse(flight['depart']).strftime('%m/%d/%y')\n #print depart\n arrive = dateutil.parser.parse(flight['arrive']).strftime('%m/%d/%y')\n #print arrive\n leg = {'flight': num, 'origin': origin, 'dest': dest, \\\n 'takeoff': depart, 'arrive': arrive}\n trip.append(leg)\n #routings.append(trip)\n #print '\\n'\n return trip\n\ndef main():\n itin = select_flight('chicago', 'prague', datetime.datetime.now(), \\\n datetime.datetime.now() + datetime.timedelta(days=random.randrange(3,15)))\n pprint.pprint(itin)\n\nif __name__ == '__main__':\n main()\n","repo_name":"brcooley/penn_apps","sub_path":"backend/flightsearch.py","file_name":"flightsearch.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"44510500026","text":"import django.utils.simplejson as json\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\n\nfrom apps.courses.models import Course\nfrom apps.reviews.models import CourseReview\n\ndef response(data):\n return HttpResponse(json.dumps(data), mimetype='text/plain')\n\ndef course(request, dep=None, num=None):\n try:\n c = Course.objects.get(dep=dep.lower(), num=num)\n r = CourseReview.objects.filter(course=c)\n except Course.DoesNotExist:\n return response(dict(error=u'invalid course'))\n\n first, second = None, None\n if len(r) >= 1:\n first = r[0].comment\n if len(r) >= 2:\n second = r[1].comment\n\n data = {\n 'first': first,\n 'second': second,\n 'link': reverse('apps.courses.views.course_detail', args=[c.cid]),\n 'gpa': unicode(c.gpa),\n }\n return response(data)\n","repo_name":"rclmenezes/USG-srv-dev","sub_path":"scg/apps/courses/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"41154143906","text":"from matplotlib import pyplot as plt\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg \nimport numpy as np\nimport cv2\n\ndef char2image(c, image_size=(28,28)):\n '''\n Returns a greyscale image showing the input character c\n '''\n \n if not isinstance(c, str) or len(c)>1:\n raise ValueError(\"c is not a char\")\n\n fig = plt.figure(figsize=(1,1))\n canvas = FigureCanvasAgg(fig)\n ax = fig.gca()\n\n ax.text(0.5,0.5,c, size=60, horizontalalignment='center', verticalalignment='center')\n ax.axis('off')\n plt.close(fig)\n\n canvas.draw()\n s, (width, height) = canvas.print_to_buffer()\n\n X = np.fromstring(s, np.uint8).reshape((height, width, 4))\n gray_image = cv2.bitwise_not(cv2.cvtColor(X, cv2.COLOR_BGR2GRAY))\n resized = cv2.resize(gray_image, image_size)/255.\n\n return resized\n","repo_name":"ale100584/GAN","sub_path":"image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36544756361","text":"\nimport os\nfrom torchvision.datasets import VisionDataset\nfrom torchvision import transforms\nF = transforms.functional\n\n\nclass StanfordDogsDataset(VisionDataset):\n \"\"\"docstring for StanfordDogsDataset\"\"\"\n\n def __init__(self,\n root,\n data_list_file=None,\n train=True,\n transform=None,\n target_transform=None,\n parse_class_name=None):\n \"\"\"\n\n\n\n Arguments:\n root {[path]} -- dataset root directory\n root is a path-like object,the directory tree example:\n root/\n Annotation/\n class_name1/\n file1,file2,...\n class_name2/\n file1,file2,...\n ...\n Images/\n class_name1/\n file1,file2,...\n class_name2/\n file1,file2,...\n ...\n\n data_list_file {[path]} -- data list file.\n data_list_file is a file-like object, the file context example:\n image_file,class\n image_name1,label1,\n image_name2,label2\n \"\"\"\n super(StanfordDogsDataset, self).__init__(\n root=root, transform=transform, target_transform=target_transform)\n\n self.parse_class_name = parse_class_name\n self.image_loader = None\n self.image_folder = os.path.join(self.root, \"Images\")\n\n try:\n # read image by PIL\n from PIL import Image\n self.image_loader = Image.open\n\n except ImportError:\n pass\n\n if self.image_loader is None:\n try:\n # read image by opencv\n from image_utils import read_rgb\n self.image_loader = read_rgb\n except ImportError:\n from PIL import Image\n self.image_loader = Image.open\n\n if self.image_loader is None:\n try:\n # read image by skimage\n from skimage import io\n self.image_loader = io.imread\n\n except ImportError:\n pass\n\n if self.image_loader is None:\n raise ImportError(\"Can't import OpenCV 3.x, PIL or scikit-image,\"\n \" please install one of them.\")\n\n if data_list_file:\n data_list_file = os.path.join(root, data_list_file)\n self.file_files, self.targets = self.__load_from_file(data_list_file)\n else:\n self.file_files, self.targets = self.__load_from_dir(self.image_folder)\n\n self.classes = self.__make_classes(self.file_files, self.targets)\n\n def __make_classes(self, file_files, targets):\n classes = {}\n for i in range(len(self)):\n img_file, target = file_files[i], int(targets[i])\n class_name, target = self.__make_label_name_pair(img_file, target)\n classes[target] = class_name\n return classes\n\n def __make_label_name_pair(self, class_name, target):\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n if self.parse_class_name is not None:\n class_name = self.parse_class_name(class_name)\n\n return class_name, target\n\n def __load_from_file(self, file):\n with open(file, 'r') as f:\n lines = f.readlines()\n\n file_files = []\n targets = []\n for line in lines[1:]: # skip first line.\n file, label = line.split(',')\n file_files.append(os.path.join(self.image_folder, file.strip()))\n targets.append(label.strip())\n return file_files, targets\n\n def __load_from_dir(self, path):\n file_files = []\n targets = []\n for i, _dir in enumerate(os.listdir(path)):\n for file in os.listdir(path):\n file_files.append(os.path.join(path, _dir, file))\n targets.append(i)\n\n return file_files, targets\n\n def __len__(self):\n return len(self.file_files)\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n img_file, target = self.file_files[index], self.targets[index]\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n\n img = self.image_loader(img_file)\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n\nclass SquarePad(object):\n def __init__(self, fill=\"zero\", padding_mode='constant'):\n if fill == \"zero\":\n self.fill = 0\n elif fill == \"mean\":\n self.fill = (124, 116, 104)\n\n self.padding_mode = padding_mode\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be padded.\n\n Returns:\n PIL Image: Padded image.\n \"\"\"\n w, h = img.size\n max_edge = max(w, h)\n padding = ((max_edge - w) // 2, (max_edge - h) // 2)\n return F.pad(img, padding, self.fill, self.padding_mode)\n\n def __repr__(self):\n return self.__class__.__name__ + \\\n '( fill={1}, padding_mode={2})'.format(self.fill, self.padding_mode)\n pass\n","repo_name":"wangvation/torch-mobilenet","sub_path":"dataset/stanford_dogs.py","file_name":"stanford_dogs.py","file_ext":"py","file_size_in_byte":4848,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"35"} +{"seq_id":"22782952993","text":"import numpy as np\nfrom pygeons.mjd import mjd\nfrom pygeons.io.io import text_from_dict\nfrom pygeons.basemap import make_basemap\nimport matplotlib.pyplot as plt\nnp.random.seed(1)\n\n## observation points\n#####################################################################\npos_geo = np.array([[-83.74,42.28,0.0],\n [-83.08,42.33,0.0],\n [-83.33,41.94,0.0]]) \nNx = len(pos_geo)\nbm = make_basemap(pos_geo[:,0],pos_geo[:,1])\npos_cart = np.array(bm(pos_geo[:,0],pos_geo[:,1])).T\ndx = pos_cart[:,0] - pos_cart[0,0]\ndy = pos_cart[:,1] - pos_cart[0,1]\n\ndispdx = np.array([[0.0,1e-6,0.0]]).repeat(Nx,axis=0)\ndispdy = np.array([[0.0,0.0,0.0]]).repeat(Nx,axis=0)\ndisp = dispdx*dx[:,None] + dispdy*dy[:,None]\nu,v,z = disp.T\ndudx,dvdx,dzdx = dispdx.T\ndudy,dvdy,dzdy = dispdy.T\n\n# make disp. time dependent\nstart_time = mjd('2015-07-01','%Y-%m-%d')\nstop_time = mjd('2017-07-01','%Y-%m-%d')\npeak_time = float(mjd('2016-07-01','%Y-%m-%d'))\ntimes = np.arange(start_time,stop_time+1).astype(float)\nNt = len(times)\n# slip rate (m/day) through time\nb = 0.005/(((times-peak_time)/10.0)**2 + 1.0) \n# slip (m) through time\nintb = np.cumsum(b)\n\n# create deformation rate gradients\n#ddudx\n# create displacements\nu = u[None,:]*intb[:,None]\nv = v[None,:]*intb[:,None]\nz = z[None,:]*intb[:,None]\n\ndudxdt = dudx[None,:]*b[:,None]\ndvdxdt = dvdx[None,:]*b[:,None]\ndzdxdt = dzdx[None,:]*b[:,None]\n\ndudydt = dudy[None,:]*b[:,None]\ndvdydt = dvdy[None,:]*b[:,None]\ndzdydt = dzdy[None,:]*b[:,None]\n\n# add noise\nsu = 0.0005*np.ones((Nt,Nx))\nsv = 0.0005*np.ones((Nt,Nx))\nsz = 0.0005*np.ones((Nt,Nx))\nu += np.random.normal(0.0,su)\nv += np.random.normal(0.0,sv)\nz += np.random.normal(0.0,sz)\n\n# time evolution\n### write synthetic data\n#####################################################################\ndata = {}\ndata['id'] = np.array(['A%03d' % i for i in range(Nx)])\ndata['longitude'] = pos_geo[:,0]\ndata['latitude'] = pos_geo[:,1]\ndata['time'] = times\ndata['east'] = u\ndata['north'] = v\ndata['vertical'] = z\ndata['east_std_dev'] = su\ndata['north_std_dev'] = sv\ndata['vertical_std_dev'] = sz\ndata['time_exponent'] = 0\ndata['space_exponent'] = 1\ntext_from_dict('data.csv',data)\n\n# xder\ndata = {}\ndata['id'] = np.array(['A%03d' % i for i in range(Nx)])\ndata['longitude'] = pos_geo[:,0]\ndata['latitude'] = pos_geo[:,1]\ndata['time'] = times\ndata['east'] = dudxdt\ndata['north'] = dvdxdt\ndata['vertical'] = dzdxdt\ndata['east_std_dev'] = np.zeros((Nt,Nx))\ndata['north_std_dev'] = np.zeros((Nt,Nx))\ndata['vertical_std_dev'] = np.zeros((Nt,Nx))\ndata['time_exponent'] = -1\ndata['space_exponent'] = 0\ntext_from_dict('soln.dudx.csv',data)\n\n# yder\ndata = {}\ndata['id'] = np.array(['A%03d' % i for i in range(Nx)])\ndata['longitude'] = pos_geo[:,0]\ndata['latitude'] = pos_geo[:,1]\ndata['time'] = times\ndata['east'] = dudydt\ndata['north'] = dvdydt\ndata['vertical'] = dzdydt\ndata['east_std_dev'] = np.zeros((Nt,Nx))\ndata['north_std_dev'] = np.zeros((Nt,Nx))\ndata['vertical_std_dev'] = np.zeros((Nt,Nx))\ndata['time_exponent'] = -1\ndata['space_exponent'] = 0\ntext_from_dict('soln.dudy.csv',data)\n","repo_name":"treverhines/PyGeoNS","sub_path":"demo/demo1/.write_synthetic.py","file_name":".write_synthetic.py","file_ext":"py","file_size_in_byte":3079,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"35"} +{"seq_id":"10215967009","text":"import time\n\nclass ParkManage(object):\n \"\"\"创建一个关于停车的类\"\"\"\n def __init__(self,max_car=100,): #定义最大停车辆数\n self.max_car=max_car\n self.car_list = []\n self.cur_car=len(self.car_list)\n\n\n def info(self):\n \"\"\" #显示系统功能信息\"\"\"\n print(\"\"\"\n —————————————————————————\n |***欢迎进入车辆管理系统***|\n ————————————————————————— \n{1} \n{2} 1)车辆入库登记{3}{2}\n{0} \n{2} 2)查询车辆信息{3}{2}\n{0}\n{2} 3)车辆出库登记{3}{2}\n{0}\n{2} 4)退出系统{3}{2}\n{1}\n \"\"\".format(\"-\"*40,\"=\"*40,\"|\",\" \"*16))\n\n def add_car(self,car):\n \"\"\"#车辆入库登记\"\"\"\n entrance_time = time.ctime()\n car[\"entrance_time\"]=entrance_time\n for Car in self.car_list:\n if Car.car_number == car.car_number:\n print(\"车牌号信息有误,重新输入\")\n break\n else:\n self.car_list.append(car)\n print(\"车牌号为%s的车入库成功\" %car.car_number)\n\n def search_By_Number(self):\n \"\"\"#按车牌号查询\"\"\"\n car_number=input(\"请输入你您要查找的车牌号:\")\n for car in self.car_list:\n if car.car_number==car_number:\n print(car)\n break\n else:\n print(\"未找到车牌号为%s的车辆\" %car_number)\n\n def search_By_Port(self,car_port):\n \"\"\"#按车位号查询\"\"\"\n for car in self.car_list:\n if car.car_port==car_port:\n return True\n else:\n return False \n\n def searchCar(self):\n self.search_By_Number()\n\n def delete_car(self,car):\n \"\"\"车辆出库登记\"\"\"\n exit_time=time.ctime()\n car[\"exit_time\"]=exit_time\n car.slot_card()\n self.car_list.remove(car)\n print(\"车牌号为%s的车辆成功出库\"%car.car_number)\n","repo_name":"li6676286/ParkManager","sub_path":"manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35495987228","text":"\ndef caesar(text, shift, direction):\n\n text_lst = list(text)\n\n if direction == \"decode\":\n shift = -shift\n\n for index in range(len(text_lst)):\n if text_lst[index] in alphabet:\n new_char_index = (alphabet.index(text_lst[index]) + shift) % alphabet_len\n text_lst[index] = alphabet[new_char_index]\n\n text = \"\".join(text_lst)\n \n print (f\"The {direction}d code is: {text}\")\n\n\nfrom art import logo\nprint(logo)\n\nalphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\nalphabet_len = len(alphabet)\nprogram_state = \"yes\"\n\nwhile program_state == \"yes\":\n\n direction = input(\"Type 'encode' to encrypt, type 'decode' to decrypt:\\n\") \n text = input(\"Type your message:\\n\").lower()\n shift = int(input(\"Type the shift number:\\n\"))\n caesar(text,shift,direction)\n program_state = input(\"Type 'yes' if you want to go again. Type 'no' if you want to exit the program.\\n\")\n if program_state == \"no\":\n print(\"Goodbye\")\n\n\n\n","repo_name":"kom1323/100-days-of-code","sub_path":"day8-caesar_cipher/day8-caesar_cipher.py","file_name":"day8-caesar_cipher.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"4673114496","text":"from asynctest import mock, patch, sentinel\n\nimport pytest\n\nfrom prpc.protocol import base\nfrom prpc.server import Server\n\n\n@pytest.fixture\n@patch.multiple(base.BaseProtocol, __abstractmethods__=set())\ndef protocol():\n protocol = base.BaseProtocol()\n\n return protocol\n\n\n@pytest.fixture\ndef server(transport, protocol):\n application = mock.CoroutineMock()\n application.handle_method.return_value = sentinel.return_value\n\n server = Server(application, lambda: protocol)\n\n return server\n\n\nasync def test_handle_message_request(buf, server, transport, protocol):\n\n transport.messages = [\n base.Request(method=sentinel.method),\n ]\n\n await server.handle_recv(transport, protocol)\n\n server.application.handle_method.assert_called_once_with(\n sentinel.method\n )\n\n assert len(buf) == 1\n assert buf[0].result == sentinel.return_value\n assert buf[0].error is None\n assert buf[0].id == transport.messages[0].id\n assert isinstance(buf[0], base.Response)\n\n\nasync def test_handle_message_notification(buf, server, transport, protocol):\n\n transport.messages = [\n base.Notification(method=sentinel.method),\n ]\n\n await server.handle_recv(transport, protocol)\n\n server.application.handle_method.assert_called_once_with(\n sentinel.method\n )\n\n assert len(buf) == 0\n\n\nasync def test_handle_message_request_exception(buf, server, transport, protocol):\n\n transport.messages = [\n base.Request(method=sentinel.method)\n ]\n\n server.application.handle_method.side_effect = ValueError('fake_error')\n\n await server.handle_recv(transport, protocol)\n\n server.application.handle_method.assert_called_once_with(\n sentinel.method\n )\n\n assert len(buf) == 1\n assert buf[0].result is None\n assert isinstance(buf[0].error, ValueError)\n assert buf[0].error.args == ('fake_error',)\n assert isinstance(buf[0], base.Response)\n","repo_name":"ptpb/prpc","sub_path":"tests/test_server.py","file_name":"test_server.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"19305557798","text":"#!python3\n\n\"\"\"\nDemonstration of the last-diminisher protocol.\n\nProgrammer: Erel Segal-Halevi\nSince: 2019-12\n\"\"\"\n\nfrom agents import *\n\nimport last_diminisher, logging, sys\nif __name__ == \"__main__\":\n\n last_diminisher.logger.addHandler(logging.StreamHandler(sys.stdout))\n last_diminisher.logger.setLevel(logging.INFO)\n\n Alice = PiecewiseConstantAgent([3, 6, 3], name=\"Alice\")\n George = PiecewiseConstantAgent([0, 2, 4, 6], name=\"George\")\n Abraham = PiecewiseConstantAgent([6, 4, 2, 0], name=\"Abraham\")\n Hanna = PiecewiseConstantAgent([3, 3, 3, 3], name=\"Hanna\")\n\n all_agents = [Alice, George, Abraham, Hanna]\n for a in all_agents:\n print(a)\n\n print(\"\\n### Order: Alice, George, Abraham, Hanna\")\n print(last_diminisher.last_diminisher(all_agents))\n\n print(\"\\n### Order: Hanna, Abraham, George, Alice\")\n all_agents.reverse()\n print(last_diminisher.last_diminisher(all_agents))\n","repo_name":"orizitzer365/economic_algorithms_project","sub_path":"last_diminisher_demo.py","file_name":"last_diminisher_demo.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25547417687","text":"\nimport smtplib, ssl\nfrom email.mime.base import MIMEBase\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email import encoders\n\nsmtp_server = \"smtp.gmail.com\"\nport = 587 # For starttls\n\n\nsender_email = input(\"Enter the email of the sender: \")\npassword = input(\"Enter the password: \")\n\n\n\ncontext = ssl.create_default_context()\nwith smtplib.SMTP(smtp_server, port) as server:\n server.ehlo() # Can be omitted\n server.starttls(context=context)\n server.ehlo() # Can be omitted\n server.login(sender_email, password)\n file = \"emails.csv\"\n with open(file, \"r\") as f:\n for line in f:\n # Get the email address from the line\n receiver_name, receiver_email, attachment = line.split(',')\n attachment = attachment.strip()\n receiver_email = receiver_email.strip()\n print(receiver_email)\n print(attachment)\n # Add the attachment to the message\n with open(attachment, \"rb\") as f:\n part = MIMEBase(\"application\", \"octet-stream\")\n part.set_payload(f.read())\n encoders.encode_base64(part)\n part.add_header(\n \"Content-Disposition\",\n f\"attachment; filename= {attachment}\",\n )\n html_msg = f\"\"\"\\\n \n \n

Hi {receiver_name}
\n

This is your certificate for GitHub Workshop.\n
\n \"\"\"\n message = MIMEMultipart()\n message[\"Subject\"] = \"Certificate\"\n message[\"From\"] = sender_email\n message[\"To\"] = receiver_email\n message.attach(MIMEText(html_msg, \"html\"))\n message.attach(part)\n text = message.as_string()\n server.sendmail(sender_email, receiver_email, text)\n print(\"Email sent to: \", receiver_email)\n print(\"Attachment: \", attachment)\n print(\"\\n\")\n continue\n print(\"Done\")","repo_name":"AvanishCodes/mass-mailer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"28781569603","text":"from requests.api import request\nfrom core.constant import NOTICIASURL\nfrom core.constant import CALENDARIORSS\nfrom xml.dom import minidom\nfrom dateutil import tz\nfrom datetime import datetime\n\nimport requests\nimport re\nimport html\nimport html.parser\n\nclass NoticiaService:\n\n\n def consultarCalendario(self):\n out = requests.get(CALENDARIORSS)\n r = ''\n\n xmldoc = minidom.parseString(out.text)\n items = xmldoc.getElementsByTagName('item')\n\n for item in items:\n i = 0\n noticia = ''\n datahora = ''\n\n for a in item.childNodes:\n\n if(len(a.childNodes) > 0):\n #titulo\n if i == 1:\n noticia = noticia + str(a.childNodes[0].data) + \"\\n\"\n\n #data\n elif i % 5 == 0:\n datahora = a.childNodes[0].data\n #print(a.childNodes[0].data)\n \n #conteudo\n elif i % 7 == 0:\n d = a.childNodes[0].data\n tmp = d.split('')\n\n dados = tmp[2].split('')\n #print(self.cleanhtml(dados[0]))\n noticia = noticia + 'At: ' + self.cleanhtml(dados[1]) + ' / ' + self.dateConvert(datahora) + '\\n'\n #print(self.cleanhtml(dados[2]))\n noticia = noticia + 'Prev: ' + self.cleanhtml(dados[3])\n #print(self.cleanhtml(dados[3]))\n noticia = noticia + ' / Cons: ' + self.cleanhtml(dados[4])\n #print(self.cleanhtml(dados[4]))\n noticia = noticia + ' / Act: ' + self.cleanhtml(dados[5]) + '\\n\\n'\n\n if str(dados[2]).find('sprite-medium-impact') > 0 or str(dados[2]).find('sprite-high-impact') > 0:\n r = r + noticia \n\n i = i+1\n\n return r\n\n def stripChars(self, data):\n p = self.striphtml(data)\n p = p.replace('\\n', '')\n p = p.strip(' ')\n p = p.strip('/')\n\n return p\n\n def striphtml(self, data):\n pdata = '')\n st = p.sub(' ', pdata)\n st = st.replace(' ', '')\n st = st.replace(' ', ' ')\n st = st.replace('\\n', ' ')\n st = st.replace(' ', ' ')\n st = st.strip('/')\n st = st.replace('|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')\n cleanr2 = re.compile('https?\\S+')\n cleantext = re.sub(cleanr, '', raw_html)\n cleantext = re.sub(cleanr2, '', cleantext)\n\n cleantext = cleantext.strip()\n cleantext = cleantext.strip(' ')\n cleantext = cleantext.replace(' ', '')\n cleantext = cleantext.replace(\"\\t\", \" \")\n cleantext = cleantext.replace(\"\\n\\n\", \"\")\n #cleantext = cleantext.replace(\"\\n\\n\", \"\")\n cleantext = cleantext.replace(\"\\r\\n\\r\\n\", \"\")\n \n # cleantext = self.stripChars(cleantext)\n # cleantext = self.striphtml(cleantext)\n\n return cleantext\n\n def dateConvert(self, txt):\n\n #print \"Date in GMT: {0}\".format(txt)\n # Hardcode from and to time zones\n #datetime.tzinfo\n from_zone = tz.gettz('GMT')\n to_zone = tz.gettz('America/Sao_Paulo')\n # gmt = datetime.gmtnow()\n gmt = datetime.strptime(txt, '%a, %d %b %Y %H:%M GMT')\n # Tell the datetime object that it's in GMT time zone\n gmt = gmt.replace(tzinfo=from_zone)\n \n # Convert time zone\n eastern_time = str(gmt.astimezone(to_zone))\n \n # Check if its EST or EDT \n if eastern_time[-6:] == \"-03:00\":\n print (\"Date in US/Eastern: \" +eastern_time.replace(\"-03:00\",\" EST\"))\n eastern_time = eastern_time.replace(\"-03:00\",\" UTC(-3)\")\n elif eastern_time[-6:] == \"-02:00\":\n print(\"Date in US/Eastern: \" +eastern_time.replace(\"-02:00\",\" UTC(-2)\"))\n eastern_time = eastern_time.replace(\"-03:00\",\" UTC(-3)\")\n \n return eastern_time\n \n #return\n","repo_name":"psmarques/insidemarket","sub_path":"core/services/noticiaService.py","file_name":"noticiaService.py","file_ext":"py","file_size_in_byte":4465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"12307073483","text":"from OpenGL.GL import *\r\nfrom OpenGL.GLU import *\r\nfrom OpenGL.GLUT import *\r\nimport sys\r\nfrom math import *\r\n \r\n\r\n# FLAGS\r\n\r\nIsMoving = False\r\nIsZoomIn = False\r\nIsZoomOut = False\r\nzoom_normal = [3,1,3]\r\n##############################################################################\r\n# vertices\r\n##############################################################################\r\n\r\nvertices=[#정점의 좌표\r\n -0.25,-0.25,0.25,\r\n -0.25,0.25,0.25,\r\n 0.25,0.25,0.25,\r\n 0.25,-0.25,0.25,\r\n -0.25,-0.25,-0.25,\r\n -0.25,0.25,-0.25,\r\n 0.25,0.25,-0.25,\r\n 0.25,-0.25,-0.25,\r\n ]\r\ncolors=[#각 정점의 색깔을 정의함 \r\n 0.2,0.2,0.2,\r\n 1.0,0.0,0.0,\r\n 1.0, 1.0, 0.0,\r\n 0.0,1.0,0.0,\r\n 0.0,0.0,1.0,\r\n 1.0,0.0,1.0,\r\n 1.0,1.0,1.0,\r\n 0.0,1.0,1.0,\r\n ]\r\nindices=[ #정점 리스트 : 6면을 4개의 정점으로 한 면을 정의함\r\n 0,3,2,1,\r\n 2,3,7,6,\r\n 0,4,7,3,\r\n 1,2,6,5,\r\n 4,5,6,7,\r\n 0,1,5,4,\r\n ]\r\n\r\nAngle=0.0\r\n##############################################################################\r\ndef init():\r\n glClearColor (1.0, 1.0, 1.0, 1.0)\r\n\r\ndef drawAxis():\r\n glBegin(GL_LINES)\r\n glColor3f(1,0,0)\r\n glVertex3f(0.0, -1.0, 0.0)\r\n glVertex3f(0.0, 1, 0.0)\r\n glEnd()\r\n glBegin(GL_LINES)\r\n glColor3f(0,1,0)\r\n glVertex3f(-1.0, 0.0, 0.0)\r\n glVertex3f(1, 0.0, 0.0)\r\n glEnd()\r\n glBegin(GL_LINES)\r\n glColor3f(0,0,1)\r\n glVertex3f(0.0, 0.0, -1)\r\n glVertex3f(0.0, 0.0, 1)\r\n glEnd()\r\n \r\n \r\ndef MyTimer(Value):\r\n\tglobal Angle\r\n\tif IsMoving:\r\n\t\tAngle += 0.01\r\n\telse:\r\n\t\tpass\r\n\tglutPostRedisplay()\r\n\tglutTimerFunc(40,MyTimer,1)\r\n\r\ndef reshape_func(w, h):\r\n\tglViewport(0,0,w, h)\r\n\tglMatrixMode(GL_PROJECTION)\r\n\tglLoadIdentity()\r\n\tgluPerspective(60,1.0,0.01,20.0)\r\n\r\ndef disp_func():\r\n\tglobal IsZoomIn, IsZoomOut, zoom_normal\r\n # clear\r\n\tglClear(GL_COLOR_BUFFER_BIT)\r\n\tglFrontFace(GL_CCW);\r\n\tglEnable(GL_CULL_FACE);\r\n\tglEnableClientState(GL_VERTEX_ARRAY)\r\n\tglEnableClientState(GL_COLOR_ARRAY)\r\n\tglVertexPointer(3, GL_FLOAT, 0, vertices) #정점변수 설정\r\n\tglColorPointer(3, GL_FLOAT, 0, colors)#정점색 저장변수 지정\r\n\r\n # view\r\n\tglMatrixMode(GL_MODELVIEW)\r\n\tglLoadIdentity()\r\n\t\t\r\n\tif IsZoomIn:\r\n\t\tfor i in range(3):\r\n\t\t\tzoom_normal[i] = zoom_normal[i] / 1.5\r\n\telif IsZoomOut:\r\n\t\tfor i in range(3):\r\n\t\t\tzoom_normal[i] = zoom_normal[i] * 1.5\r\n\r\n\tIsZoomIn = False\r\n\tIsZoomOut = False\r\n\tgluLookAt(zoom_normal[0] * cos(Angle), zoom_normal[1], zoom_normal[2] * sin(Angle), #카메라위치->perspective때는허용됨\r\n 0.0, 0.0, 0.0,#초점\r\n 0.0,1.0, 0.0) #카메라방향\r\n\t\r\n\tdrawAxis()\r\n\tglPushMatrix()\r\n\tglRotatef(30.0, 1.0, 1.0, 1.0)\r\n \r\n #for i in range(6):\r\n # glDrawElements(GL_POLYGON,4,GL_UNSIGNED_BYTE, indices[4*i])\r\n\tglDrawElements(GL_QUADS, 24, GL_UNSIGNED_BYTE,indices)\r\n\tglPopMatrix()\r\n \r\n\tglDisableClientState(GL_COLOR_ARRAY)\r\n\tglDisableClientState(GL_VERTEX_ARRAY)\r\n\tglFlush()\r\n\r\n\r\ndef MyMainMenu(entryID):\r\n\tif entryID == 3:\r\n\t\texit(0)\r\n\r\n\tglutPostRedisplay()\r\n\treturn 0\r\n\r\ndef AniSubMenu(entryID):\r\n\tglobal IsMoving\r\n\tif entryID == 1:\r\n\t\tIsMoving = True\r\n\telif entryID == 2:\r\n\t\tIsMoving = False\r\n\r\n\tglutPostRedisplay()\r\n\treturn 0\r\n\r\ndef CamSubMenu(entryID):\r\n\tglobal IsZoomIn, IsZoomOut\r\n\tif entryID == 1:\r\n\t\tIsZoomIn = True\r\n\telif entryID == 2:\r\n\t\tIsZoomOut = True\r\n\r\n\tglutPostRedisplay()\r\n\treturn 0\r\n\r\n\r\ndef main():\r\n\tglutInit()\r\n\tglutInitDisplayMode(GLUT_SINGLE|GLUT_RGBA)\r\n\tglutInitWindowSize(600, 600)\r\n\tglutCreateWindow(b\"Vertex Handling\")\r\n\tinit() \r\n\r\n\tAniSubMenuID = glutCreateMenu(AniSubMenu)\r\n\tglutAddMenuEntry('Moving cube', 1)\r\n\tglutAddMenuEntry('Stop motion', 2)\r\n\r\n\tCamSubMenuID = glutCreateMenu(CamSubMenu)\r\n\tglutAddMenuEntry('Zoom in', 1)\r\n\tglutAddMenuEntry('Zoom out',2)\r\n\r\n\tMyMainMenuID = glutCreateMenu(MyMainMenu)\r\n\tglutAddSubMenu('Animation Mode', AniSubMenuID)\r\n\tglutAddSubMenu('Camera Position', CamSubMenuID)\r\n\tglutAddMenuEntry('Exit', 3)\r\n\r\n\tglutAttachMenu(GLUT_RIGHT_BUTTON)\r\n\tglutDisplayFunc(disp_func)\r\n\tglutTimerFunc(40,MyTimer,1)\r\n\tglutReshapeFunc(reshape_func)\r\n\t\r\n\tglutMainLoop()\r\n\r\nif __name__==\"__main__\":\r\n main()\r\n\r\n","repo_name":"01-2/Graphics_2019","sub_path":"lab02/mission/lab02-mission.py","file_name":"lab02-mission.py","file_ext":"py","file_size_in_byte":4237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"38491509355","text":"from __future__ import annotations\n\nimport os\n\n\ndef solve(input_file: str)-> tuple[int, int]:\n\n assert os.path.exists(input_file), f'File {input_file} does not exist'\n assignments: list[tuple[list[int], list[int]]] = []\n with open(input_file, 'r') as f:\n for line in [line.strip() for line in f.read().splitlines() if line.strip()]:\n p1, p2 = line.split(',')\n s1, e1 = [int(p) for p in p1.split('-')]\n s2, e2 = [int(p) for p in p2.split('-')]\n\n assignments.append((list(range(s1, e1 + 1)), list(range(s2, e2 + 1))))\n\n print(p1, p2)\n print(assignments)\n\n res1 = 0\n res2 = 0\n\n # Searching for overlap\n for (p1, p2) in assignments:\n\n # Task1: Full overlap\n if all([s in p2 for s in p1]) or all([s in p1 for s in p2]):\n print(f'{p1} {p2} overlap')\n res1 += 1\n\n # Task2: Partial overlap\n if any([s in p2 for s in p1]) or any([s in p1 for s in p2]):\n print(f'{p1} {p2} partial overlap')\n res2 += 1\n\n\n return res1, res2\n\n\nif __name__ == '__main__':\n res_sample = solve('day04/sample_input.txt')\n assert res_sample[0] == 2, f'Expected 2, got {res_sample[0]}'\n assert res_sample[1] == 4, f'Expected 4, got {res_sample[1]}'\n\n res = solve('day04/input.txt')\n print(f'Part 1: {res[0]} - Part 2: {res[1]}')\n","repo_name":"BUCKFAE/AoC2022","sub_path":"day04/day04.py","file_name":"day04.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25256125539","text":"from unittest.mock import MagicMock\n\nimport pytest\nfrom discord import Permissions\nfrom discord.ext.commands import MissingPermissions\n\nfrom framework.permissions import permcheck\n\n\n@pytest.fixture()\ndef ctx():\n c = MagicMock()\n c.bot.is_owner.return_value = False\n return c\n\n\ndef test_permcheck_returns_true_if_owner(ctx):\n ctx.bot.is_owner.return_value = True\n assert permcheck(ctx) is True\n\n\ndef test_permcheck_returns_true_if_author_has_permissions(ctx):\n p = {\n 'kick_members': True,\n 'ban_members': True\n }\n perms = Permissions.none()\n perms.update(**p)\n ctx.channel.permissions_for.return_value = perms\n\n assert permcheck(ctx, **p) is True\n\n\ndef test_permcheck_returns_true_if_admin_regardless_permissions(ctx):\n p = {\n 'kick_members': True,\n 'ban_members': True\n }\n perms = Permissions.none()\n perms.update(administrator=True)\n ctx.channel.permissions_for.return_value = perms\n\n assert permcheck(ctx, **p) is True\n\n\ndef test_permcheck_raises_error_if_not_enough_permissions(ctx):\n p = {\n 'kick_members': True,\n 'ban_members': True\n }\n ctx.channel.permissions_for.return_value = Permissions.none()\n\n with pytest.raises(MissingPermissions):\n permcheck(ctx, **p)\n","repo_name":"3akev/Mudhakkir","sub_path":"tests/framework/test_permissions.py","file_name":"test_permissions.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37987532161","text":"import xml.etree.ElementTree as ET\nfrom twisted.trial import unittest\n\n# Import PyMh files and modules.\nfrom Modules.Housing.house import \\\n API as houseAPI, \\\n Xml as houseXml, \\\n Utility as houseUtil\nfrom Modules.Housing.test.xml_location import TESTING_LOCATION_STREET, TESTING_LOCATION_LATITUDE\nfrom Modules.Housing.test.xml_rooms import TESTING_ROOM_NAME_0\nfrom Modules.Housing.test.xml_housing import \\\n TESTING_HOUSE_NAME, \\\n TESTING_HOUSE_KEY, \\\n TESTING_HOUSE_ACTIVE, \\\n TESTING_HOUSE_UUID\nfrom Modules.Utilities import json_tools\nfrom test import xml_data\nfrom test.testing_mixin import SetupPyHouseObj\nfrom Modules.Utilities.debug_tools import PrettyFormatAny\n\n\nclass SetupMixin(object):\n\n def setUp(self, p_root):\n self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj(p_root)\n self.m_xml = SetupPyHouseObj().BuildXml(p_root)\n self.m_api = houseAPI(self.m_pyhouse_obj)\n\n\nclass A1_Setup(SetupMixin, unittest.TestCase):\n \"\"\"\n This section will verify the XML in the 'Modules.text.xml_data' file is correct and what the node_local\n module can read/write.\n \"\"\"\n\n def setUp(self):\n SetupMixin.setUp(self, ET.fromstring(xml_data.XML_LONG))\n\n def test_01_read_xml(self):\n self.assertEqual(self.m_xml.root.tag, 'PyHouse')\n self.assertEqual(self.m_xml.house_div.tag, 'HouseDivision')\n\n\nclass A2_Xml(SetupMixin, unittest.TestCase):\n \"\"\"\n This section will verify the XML in the 'Modules.text.xml_data' file is correct and what the node_local\n module can read/write.\n \"\"\"\n\n def setUp(self):\n SetupMixin.setUp(self, ET.fromstring(xml_data.XML_LONG))\n\n def test_1_read_xml(self):\n l_pyhouse = self.m_xml.root\n self.assertEqual(l_pyhouse.tag, 'PyHouse')\n\n def test_2_buildObjects(self):\n \"\"\" Test to be sure the compound object was built correctly - Rooms is an empty dict.\n \"\"\"\n print(PrettyFormatAny.form(self.m_pyhouse_obj.House, 'A2-2-A - House'))\n self.assertEqual(self.m_pyhouse_obj.House.Rooms, None)\n\n\nclass B1_Read(SetupMixin, unittest.TestCase):\n \"\"\"\n This section tests the reading and writing of XML used by house.\n \"\"\"\n\n def setUp(self):\n SetupMixin.setUp(self, ET.fromstring(xml_data.XML_LONG))\n\n def test_1_API(self):\n houseUtil._init_component_apis(self.m_pyhouse_obj, self)\n # print(PrettyFormatAny.form(self.m_pyhouse_obj, 'B1-1-A - XML'))\n self.assertEqual(self.m_pyhouse_obj.Uuids, {})\n\n def test_2_Base(self):\n l_obj = houseXml._read_house_base(self.m_pyhouse_obj)\n # print(PrettyFormatAny.form(l_obj, 'B1-2-A - XML'))\n self.assertEqual(l_obj.Name, TESTING_HOUSE_NAME)\n self.assertEqual(str(l_obj.Key), TESTING_HOUSE_KEY)\n self.assertEqual(str(l_obj.Active), TESTING_HOUSE_ACTIVE)\n self.assertEqual(l_obj.UUID, TESTING_HOUSE_UUID)\n\n def test_3_House(self):\n l_obj = houseXml.read_house_xml(self.m_pyhouse_obj)\n print(PrettyFormatAny.form(l_obj, 'B1-3-A - XML'))\n self.assertEqual(l_obj.Name, TESTING_HOUSE_NAME)\n self.assertEqual(str(l_obj.Key), TESTING_HOUSE_KEY)\n self.assertEqual(str(l_obj.Active), TESTING_HOUSE_ACTIVE)\n self.assertEqual(l_obj.UUID, TESTING_HOUSE_UUID)\n self.assertEqual(str(l_obj.Location.Latitude), TESTING_LOCATION_LATITUDE)\n self.assertEqual(l_obj.Rooms[0].Name, TESTING_ROOM_NAME_0)\n\n\n def test_4_House(self):\n l_obj = houseXml.read_house_xml(self.m_pyhouse_obj)\n print(PrettyFormatAny.form(l_obj, 'B1-4-A - XML'))\n self.assertEqual(l_obj.Name, TESTING_HOUSE_NAME)\n self.assertEqual(str(l_obj.Key), TESTING_HOUSE_KEY)\n self.assertEqual(str(l_obj.Active), TESTING_HOUSE_ACTIVE)\n self.assertEqual(l_obj.UUID, TESTING_HOUSE_UUID)\n\nclass C03_Write(SetupMixin, unittest.TestCase):\n \"\"\"\n This section tests the reading and writing of XML used by house.\n \"\"\"\n\n def setUp(self):\n SetupMixin.setUp(self, ET.fromstring(xml_data.XML_LONG))\n\n def test_01_House(self):\n l_house_obj = houseXml.read_house_xml(self.m_pyhouse_obj)\n self.m_pyhouse_obj.House = l_house_obj\n l_xml = houseXml.write_house_xml(self.m_pyhouse_obj)\n print(PrettyFormatAny.form(l_xml, 'XML'))\n self.assertEqual(l_xml.tag, 'HouseDivision')\n self.assertEqual(l_xml.attrib['Name'], TESTING_HOUSE_NAME)\n\n\nclass Z1_JSON(SetupMixin, unittest.TestCase):\n \"\"\"\n This section tests the reading and writing of XML used by house.\n \"\"\"\n\n def setUp(self):\n SetupMixin.setUp(self, ET.fromstring(xml_data.XML_LONG))\n\n def test_01_Create(self):\n \"\"\" Create a JSON object for Location.5\n \"\"\"\n l_house = houseXml.read_house_xml(self.m_pyhouse_obj)\n print('House: {0:}'.format(l_house))\n l_json = json_tools.encode_json(l_house)\n print('JSON: {0:}'.format(l_json))\n\n# ## END DBK\n","repo_name":"LyleH/PyHouse_1","sub_path":"src/Modules/Housing/test/test_house.py","file_name":"test_house.py","file_ext":"py","file_size_in_byte":4946,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"34838655652","text":"import discord\nfrom discord.ext import commands\nfrom discord_slash import SlashCommand, SlashContext\nfrom discord_slash.utils.manage_commands import create_option\n\nimport random\nimport asyncio\nfrom datetime import date\nimport praw\nimport prayertimes\nfrom discord.utils import get\nfrom discord import FFmpegPCMAudio\nimport os\nfrom os import system\nimport youtube_dl\nimport requests\nimport ffmpeg\nimport urllib.request\nimport re\n\nbot = commands.Bot(command_prefix=\"/\")\nslash = SlashCommand(bot, sync_commands=True)\nbot.remove_command(\"help\")\nTOKEN = \"\"\n\n# VARIABLE SETUP\n\ndiceList = [1, 2, 3, 4, 5, 6]\n\nflipCoin = [\"Heads\", \"Tails\"]\n\nresponses = [\"🎱 Yes\", \"🎱 No\", \"🎱 Maybe\", \"🎱 Try Again Later\", \"🎱 Perhaps\", \"🎱 My sources say no\"]\n\nprayerTimes = prayertimes.PrayTimes()\n\nreddit = praw.Reddit(\n client_id=\"\",\n client_secret=\"\",\n user_agent=\"\",\n username=\"\",\n password=\"\",\n)\nmemes = reddit.subreddit(\"dankmemes\")\ncatSubreddit = reddit.subreddit(\"catpictures\")\ndogSubreddit = reddit.subreddit(\"dogpictures\")\nmonkeSubreddit = reddit.subreddit(\"Monke\")\n\ngiveawayMembers = []\n\nstatus = random.choice([discord.Status.online, discord.Status.do_not_disturb, discord.Status.idle])\n\n# BOT EVENTS\n\n@bot.event\nasync def on_ready():\n print('Logged in as:')\n print(bot.user.name)\n print(\"Online\")\n await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.playing, name=\"discord.py\"), status=status, afk=False)\n\n@bot.event\nasync def on_message(message: discord.Message):\n dmChannel = bot.get_channel(724786109154066463)\n if message.author.id != 184408626306351104 and not message.author.bot and message.guild is None:\n embed = discord.Embed(title=\"Message received\", description=\"**From** \" + str(message.author.mention) + \"\\n \\n\" + str(message.content), color=0x0000ff)\n await dmChannel.send(embed=embed)\n await bot.process_commands(message)\n\n# BOT COMMANDS\n\n# Help command\n@slash.slash(name=\"help\", description=\"Provides a list of commands.\")\nasync def help(ctx: SlashContext):\n helpText = \"\"\"\n /help - Shows this message\n\n\n **Fun commands**\n\n /flipcoin - Flips a coin\n /rolldice - Rolls a dice\n /eightball [question] - Gives a random response to the given question\n /tp - Teleports mentioned person\n /russianroulette - Shoots (pings) a random user\n /meme - Sends a random meme from r/dankmemes\n /dog - Sends a random dog picture from r/dogpictures\n /cat - Sends a random cat picture from r/catpictures\n /monke - Sends a random ape picture from r/ape\n /math [operation] [number] [number] - Performs an operation on two numbers\n /prayertimes - Gives the prayer times\n /giveaway [time in minutes] [item] - Starts a giveaway\n /pp - Gives a random PP size\n /animate [mode] [message] - Animates a message\n /poll [reaction one] [reaction two] [question] - Creates a poll\n\n\n **Voice chat commands (requires \"DJ\" role)**\n\n /join - Bot join voice channel \n /play [query] - Plays a song from YouTube with the given query \n /leave - Bot leaves voice channel \n /pause - Bot pauses current audio \n /resume - Bot resumes current audio \n\n\n **Moderation commands (requires \"kool kid\" role)**\n\n /mute [user] [reason] - Mutes a user indefinitely\n /unmute [user] - Unmutes user\n /cancel [user] [reason] - Kicks user from server\n /kill [user] [reason] - Bans user from server\n /cleanse [number] - Deletes a certain number of messages\n\n\n **Misc commands**\n\n /hello - Says hello and mentions author\n /invite [user] [game] - Bot invites a user to play a game via DMs\n /message [user id] [message] - Bot DMs given user\n /ping - Returns bot latency\n /who [user id] - Returns a user's information with the given ID\n \"\"\"\n\n embed = discord.Embed(title=\"Commands\", description=helpText, color=0x0000ff)\n await ctx.send(embed=embed)\n\n# Says hello\n@slash.slash(name=\"hello\", description=\"Says hello!\")\nasync def hello(ctx: SlashContext):\n await ctx.send(\"Hello \" + str(ctx.author.mention) + \"!\")\n\n# Flips a coin\n@slash.slash(name=\"flipcoin\", description=\"Flip a coin.\")\nasync def flipcoin(ctx: SlashContext):\n embed = discord.Embed(title=\":coin:\", description=\"You got \" + str(random.choice(flipCoin)), color=0x0000ff)\n await ctx.send(embed=embed)\n\n# Rolls a dice\n@slash.slash(name=\"rolldice\", description=\"Roll a dice.\")\nasync def rolldice(ctx: SlashContext):\n embed = discord.Embed(title=\"🎲\", description=\"You got a \" + str(random.choice(diceList)), color=0x0000ff)\n await ctx.send(embed=embed)\n\n# Magic 8 Ball\n@slash.slash(name=\"eightball\", description=\"Answers from the beyond!\", options=[\n create_option(name=\"question\", description=\"Ask your question.\", required=True, option_type=3)\n])\nasync def eightball(ctx: SlashContext, question):\n embed = discord.Embed(title=question, description=str(random.choice(responses)), color=0x0000ff)\n await ctx.send(embed=embed)\n\n# Teleport command\n@slash.slash(name=\"tp\", description=\"Teleport someone to a random location!\", options=[\n create_option(name=\"person\", description=\"Who do you want to teleport?\", required=True, option_type=3)\n])\nasync def tp(ctx: SlashContext, person):\n embed = discord.Embed(title=\"Teleported!\", description=str(person) + \" teleported to \" + str(random.randrange(-1000, 1000)) + \", \" + str(random.randrange(-1000, 1000)) + \", \" + str(random.randrange(-1000, 1000)), color=0x0000ff)\n await ctx.send(embed=embed)\n\n# Picks out a random server member\n@slash.slash(name=\"russianroulette\", description=\"Ping a random server member!\")\nasync def russianroulette(ctx: SlashContext):\n user = random.choice(ctx.channel.guild.members)\n await ctx.send(f\"{ctx.author.mention} *shot* {user.mention}\")\n\n# Sends a meme from reddit\n@slash.slash(name=\"meme\", description=\"Sends a random meme from r/dankmemes\")\nasync def meme(ctx: SlashContext):\n posts = memes.hot(limit=100)\n random_post_number = random.randint(0,100)\n for i, post in enumerate(posts):\n if i == random_post_number:\n embed = discord.Embed(title=\"r/dankmemes\", color=0x0000ff)\n embed.set_image(url=str(post.url))\n await ctx.send(embed=embed)\n\n# Sends a cat image from reddit\n@slash.slash(name=\"cat\", description=\"Sends a random picture from r/catpictures\")\nasync def cat(ctx: SlashContext):\n posts = catSubreddit.top(limit=100)\n random_post_number = random.randint(0,100)\n for i, post in enumerate(posts):\n if i == random_post_number:\n embed = discord.Embed(title=\"r/catpictures\", color=0x0000ff)\n embed.set_image(url=str(post.url))\n await ctx.send(embed=embed)\n\n# Sends a dog image from reddit\n@slash.slash(name=\"dog\", description=\"Sends a random picture from r/dogpictures\")\nasync def dog(ctx: SlashContext):\n posts = dogSubreddit.top(limit=100)\n random_post_number = random.randint(0,100)\n for i, post in enumerate(posts):\n if i == random_post_number:\n embed = discord.Embed(title=\"r/dogpictures\", color=0x0000ff)\n embed.set_image(url=str(post.url))\n await ctx.send(embed=embed)\n\n# Sends a monke image from reddit\n@slash.slash(name=\"monke\", description=\"Sends a random meme from r/monke\")\nasync def monke(ctx: SlashContext):\n posts = monkeSubreddit.top(limit=100)\n random_post_number = random.randint(0,100)\n for i, post in enumerate(posts):\n if i == random_post_number:\n embed = discord.Embed(title=\"r/Monke\", color=0x0000ff)\n embed.set_image(url=str(post.url))\n await ctx.send(embed=embed)\n\n# This command solves math problems\n@slash.slash(name=\"math\", description=\"Solve a math problem\", options=[\n create_option(name=\"operation\", description=\"add, subtract, multiply, or divide?\", required=True, option_type=3),\n create_option(name=\"number1\", description=\"What's the first number?\", required=True, option_type=3),\n create_option(name=\"number2\", description=\"What's the second number??\", required=True, option_type=3)\n])\nasync def math(ctx: SlashContext, operation, number1, number2):\n if operation == \"add\":\n await ctx.send(str(number1) + \" + \" + str(number2) + \" = \" + str(int(number1) + int(number2)))\n elif operation == \"subtract\":\n await ctx.send(str(number1) + \" - \" + str(number2) + \" = \" + str(int(number1) - int(number2)))\n elif operation == \"multiply\":\n await ctx.send(str(number1) + \" * \" + str(number2) + \" = \" + str(int(number1) * int(number2)))\n elif operation == \"divide\":\n await ctx.send(str(number1) + \" / \" + str(number2) + \" = \" + str(int(number1) / int(number2)))\n\n# Bot joins the voice channel\n@slash.slash(name=\"join\", description=\"Bot joins the voice channel.\")\n@commands.has_role(\"DJ\")\nasync def join(ctx: SlashContext):\n author = ctx.author\n channel = author.voice.channel\n vc = await channel.connect()\n embed = discord.Embed(title=\"Connected\", description=\"Hello\" + \"\\n \\n\" + \"Action requested by \" + str(ctx.author.mention), color=0x0000ff)\n await ctx.send(embed=embed)\n\n# Plays audio with the given query\n@slash.slash(name=\"play\", description=\"Bot plays a YouTube video with the given query.\", options=[\n create_option(name=\"query\", description=\"What do you want to listen to?\", required=True, option_type=3),\n])\n@commands.has_role(\"DJ\")\nasync def play(ctx: SlashContext, *, query):\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n }\n videoTitle = \"\"\n voice = get(bot.voice_clients, guild=ctx.guild)\n songThere = os.path.isfile(\"song.mp3\")\n\n def repeat():\n voice.play(discord.FFmpegPCMAudio(\"song.mp3\"), after=lambda e: repeat())\n voice.volume = 100\n voice.is_playing()\n\n try:\n if songThere:\n os.remove(\"song.mp3\")\n except PermissionError:\n await ctx.send(\"Something went wrong :/\")\n return\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n searchQuery = query.replace(\" \", \"+\")\n html = urllib.request.urlopen(\"https://www.youtube.com/results?search_query=\" + searchQuery)\n video_ids = re.findall(r\"watch\\?v=(\\S{11})\", html.read().decode())\n result = \"https://www.youtube.com/watch?v=\" + video_ids[0]\n \n info_dict = ydl.extract_info(result, download=False)\n videoTitle = info_dict.get('title', None)\n videoThumbnail = info_dict.get('thumbnail', None)\n \n embedShortly = discord.Embed(title=\"Downloading\", description=videoTitle + \"\\n \\n\" + \"Audio requested by \" + str(ctx.author.mention), color=0x00ff00)\n await ctx.send(embed=embedShortly)\n\n ydl.download([result])\n \n for file in os.listdir(\"./\"):\n if file.endswith(\".mp3\"):\n os.rename(file, 'song.mp3')\n \n voice.play(discord.FFmpegPCMAudio(\"song.mp3\"), after=lambda e: repeat())\n voice.volume = 100\n voice.is_playing()\n embedPlaying = discord.Embed(title=\"Now playing\", description=videoTitle + \"\\n \\n\" + \"Audio requested by \" + str(ctx.author.mention), color=0x0000ff)\n embedPlaying.set_image(url=videoThumbnail)\n await ctx.send(embed=embedPlaying)\n await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=videoTitle), status=status, afk=False)\n\n# Bot pauses current audio\n@slash.slash(name=\"pause\", description=\"Bot pauses the playing audio.\")\n@commands.has_role(\"DJ\")\nasync def pause(ctx: SlashContext):\n voice = get(bot.voice_clients, guild=ctx.guild)\n voice.pause()\n embed = discord.Embed(title=\"Paused\", description=\"Audio has been paused\" + \"\\n \\n\" + \"Action requested by \" + str(ctx.author.mention), color=0x0000ff)\n await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.playing, name=\"discord.py\"), status=status, afk=False)\n await ctx.send(embed=embed)\n\n# Bot resumes playing audio\n@slash.slash(name=\"resume\", description=\"Bot resumes the playing audio.\")\n@commands.has_role(\"DJ\")\nasync def resume(ctx: SlashContext):\n voice = get(bot.voice_clients, guild=ctx.guild)\n voice.resume()\n embed = discord.Embed(title=\"Resumed\", description=\"Audio has been resumed\" + \"\\n \\n\" + \"Action requested by \" + str(ctx.author.mention), color=0x0000ff)\n await ctx.send(embed=embed)\n\n# Bot disconnects from voice channel\n@slash.slash(name=\"leave\", description=\"Bot disconnects from the voice channel.\")\n@commands.has_role(\"DJ\")\nasync def leave(ctx: SlashContext):\n voice = ctx.guild.voice_client\n await voice.disconnect(force = True)\n embed = discord.Embed(title=\"Disconnected\", description=\"Goodbye\" + \"\\n \\n\" + \"Action requested by \" + str(ctx.author.mention), color=0x0000ff)\n await ctx.send(embed=embed)\n await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.playing, name=\"discord.py\"), status=status, afk=False)\n\n# Returns the prayer times for that day\n@slash.slash(name=\"prayertimes\", description=\"Get the Islamic prayer times for the day.\")\nasync def prayertimes(ctx: SlashContext):\n prayerTimesList = []\n times = prayerTimes.getTimes(date.today(), (42, 73), 5.7)\n for i in ['Fajr', 'Dhuhr', 'Asr', 'Maghrib', 'Isha']:\n prayerTimesList.append(str(i + ': ' + times[i.lower()]))\n prayerTimesStr = prayerTimesList[0] + \"\\n\" + prayerTimesList[1] + \"\\n\" + prayerTimesList[2] + \"\\n\" + prayerTimesList[3] + \"\\n\" + prayerTimesList[4] + \"\\n\"\n embed = discord.Embed(title=\"Prayer times\", description=prayerTimesStr, color=0x0000ff)\n await ctx.send(embed=embed)\n\n# Starts a giveaway\n@slash.slash(name=\"giveaway\", description=\"Start a giveaway.\", options=[\n create_option(name=\"duration\", description=\"How long is the giveaway (in minutes)?\", required=True, option_type=3),\n create_option(name=\"item\", description=\"What are you giving away?\", required=True, option_type=3),\n])\nasync def giveaway(ctx: SlashContext, duration, *, item):\n embed = discord.Embed(title=item, description=str(ctx.author.mention) + \" is giving away \" + str(item) + \"\\n\" + \"\\n\" + \"React below to enter!\", color=0x0000ff)\n message = await ctx.send(embed=embed)\n await message.add_reaction(\"💩\")\n reaction, user = await bot.wait_for('reaction_add')\n giveawayMembers.append(user)\n\n await asyncio.sleep(int(duration) * 60)\n\n winner = random.choice(giveawayMembers)\n winEmbed = discord.Embed(title=\"Winner\", description=str(winner.mention + \" has won \" + item), color=0x0000ff)\n await ctx.send(embed=winEmbed)\n\n# PP size\n@slash.slash(name=\"pp\", description=\"How big is your PP?\")\nasync def pp(ctx: SlashContext):\n lengthOfPP = []\n lengthOfPPString = \"\"\n\n for i in range(random.randrange(1, 20)):\n lengthOfPP.append(\"=\")\n\n embed = discord.Embed(title=\"Penis size\", description=\"8\" + lengthOfPPString.join(lengthOfPP) + \"D\", color=0x0000ff)\n await ctx.send(embed=embed)\n\n# Override command\n@bot.command(pass_context = True)\n@commands.is_owner()\nasync def override(ctx, *, message):\n await ctx.send(message)\n await ctx.message.delete()\n\n user = bot.get_user(184408626306351104)\n message = discord.Embed(title=\"Override\", description=\"**Name** \" + str(ctx.message.author.name) + \"\\n\" + \"**User ID** \" + str(ctx.message.author.id) + \"\\n\" + \"**Message** \" + message, color=0x0000ff)\n await user.send(embed=message)\n\n# Invites a user to play a game\n@slash.slash(name=\"invite\", description=\"Invite a friend to a game!\", options=[\n create_option(name=\"user\", description=\"Ping your friend\", required=True, option_type=discord.Member),\n create_option(name=\"game\", description=\"What game do you wanna play?\", required=True, option_type=3),\n])\nasync def invite(ctx: SlashContext, member: discord.Member, game):\n user = await member.create_dm()\n await user.send(\"Hello, \" + ctx.author.mention + \" invited you to play \" + str(game))\n embed = discord.Embed(title=\"Invite sent\", description=\"**From** \" + str(ctx.author.mention) + \"\\n\" + \"**To** \" + str(member.mention), color=0x0000ff)\n await ctx.send(embed=embed)\n\n# Message command\n@slash.slash(name=\"message\", description=\"Message someone!\", options=[\n create_option(name=\"user\", description=\"Ping your friend\", required=True, option_type=discord.Member),\n create_option(name=\"message\", description=\"What do you wanna say?\", required=True, option_type=3),\n])\nasync def message(ctx: SlashContext, member: discord.Member, message):\n try:\n user = await member.create_dm()\n await user.send(message)\n embed = discord.Embed(title=\"Message sent\", description=message + \"\\n \\n**From** \" + str(ctx.author.mention) + \"\\n\" + \"**To** \" + member.mention, color=0x0000ff)\n await ctx.send(embed=embed)\n except:\n embed = discord.Embed(title=\"Messsage failed\", description=\"Error 404: User not found.\", color=0xff0000)\n await ctx.send(embed=embed)\n\n# Mutes user\n@slash.slash(name=\"mute\", description=\"Mute a server member.\", options=[\n create_option(name=\"member\", description=\"Ping the member\", required=True, option_type=discord.Member),\n create_option(name=\"reason\", description=\"Why will they be muted?\", required=True, option_type=3),\n])\n@commands.has_role(\"kool kid\")\nasync def mute(ctx: SlashContext, member: discord.Member, reason):\n role = discord.utils.get(member.guild.roles, name=\"Muted\")\n await member.add_roles(role)\n embed = discord.Embed(title=\"User muted\", description=\"**User** \" + str(member.mention) + \"\\n\" + \"**Reason** \" + str(reason), color=0x0000ff)\n embed.set_image(url=\"https://media.tenor.com/images/ac7f9ffd8f172477e28ab284b1134b76/tenor.gif\")\n await ctx.send(embed=embed)\n user = await member.create_dm()\n await user.send(\"You were muted for \" + str(reason))\n\n# Unmutes a user\n@slash.slash(name=\"unmute\", description=\"Unmute a server member.\", options=[\n create_option(name=\"member\", description=\"Ping the member\", required=True, option_type=discord.Member),\n])\n@commands.has_role(\"kool kid\")\nasync def unmute(ctx: SlashContext, member: discord.Member):\n roleMuted = discord.utils.get(member.guild.roles, name=\"Muted\")\n await member.remove_roles(roleMuted)\n await ctx.send(str(member.mention) + \" has been unmuted!\")\n\n# Kicks user\n@slash.slash(name=\"cancel\", description=\"Kick a server member.\", options=[\n create_option(name=\"member\", description=\"Ping the member\", required=True, option_type=discord.Member),\n create_option(name=\"reason\", description=\"Why will they be kicked?\", required=True, option_type=3)\n])\n@commands.has_role(\"kool kid\")\nasync def cancel(ctx: SlashContext, member: discord.Member, reason):\n await member.kick(reason=reason)\n embed = discord.Embed(title=\"User kicked\", description=\"**User** \" + str(member.mention) + \"\\n\" + \"**Reason** \" + str(reason), color=0x0000ff)\n embed.set_image(url=\"https://media1.giphy.com/media/edP47TgaxmTy4OV2cW/giphy.gif\")\n await ctx.send(embed=embed)\n user = await member.create_dm()\n await user.send(\"You were kicked for \" + str(reason))\n\n# Bans user\n@slash.slash(name=\"kill\", description=\"Ban a server member.\", options=[\n create_option(name=\"member\", description=\"Ping the member\", required=True, option_type=discord.Member),\n create_option(name=\"reason\", description=\"Why will they be banned?\", required=True, option_type=3)\n])\n@commands.has_role(\"kool kid\")\nasync def kill(ctx: SlashContext, member: discord.Member, reason):\n await member.ban(reason=reason)\n embed = discord.Embed(title=\"User banned\", description=\"**User** \" + str(member.mention) + \"\\n\" + \"**Reason** \" + str(reason), color=0x0000ff)\n embed.set_image(url=\"https://media.giphy.com/media/jxqOV4sZ8eM5o4W16H/giphy.gif\")\n await ctx.send(embed=embed)\n user = await member.create_dm()\n await user.send(\"You were banned for \" + str(reason))\n\n# Deletes messages\n@slash.slash(name=\"cleanse\", description=\"Delete a set number of messages.\", options=[\n create_option(name=\"number\", description=\"How many messages do you want to delete?\", required=True, option_type=3)\n])\n@commands.has_role(\"kool kid\")\nasync def cleanse(ctx: SlashContext, number):\n await ctx.channel.purge(limit=int(number))\n embed = discord.Embed(title=\"Cleansed\", description=\"**This channel has been cleansed** \\n\" + str(number) + \" messages have been deleted.\", color=0x0000ff)\n embed.set_image(url=\"https://media.tenor.com/images/00fb44a75f05b234087ed5c1c93763e9/tenor.gif\")\n await ctx.send(embed=embed)\n \n# Animates message\n@slash.slash(name=\"animate\", description=\"Animate a message.\", options=[\n create_option(name=\"mode\", description=\"Horizontal or vertical?\", required=True, option_type=3),\n create_option(name=\"message\", description=\"What do you want to animate?\", required=True, option_type=3),\n])\nasync def animate(ctx: SlashContext, mode, message):\n if str(mode).lower() == \"horizontal\":\n messageSpaces = \"\"\n botMessage = await ctx.send(message)\n for i in range(20):\n messageSpaces += \"â €\" * i\n await botMessage.edit(content = str(messageSpaces) + str(message))\n await asyncio.sleep(0.2)\n elif str(mode).lower() == \"vertical\":\n messageEnters = \"\"\n botMessage = await ctx.send(message)\n for i in range(10):\n messageEnters += \"\\n â €\" * i\n await botMessage.edit(content = str(messageEnters) + str(message))\n await asyncio.sleep(0.2)\n\n# Returns bot latency\n@slash.slash(name=\"ping\", description=\"Returns bot latency.\")\nasync def ping(ctx: SlashContext):\n embed = discord.Embed(title = \"Pong!\", description = \"**Latency** \" + str(round(bot.latency * 1000)) + \"ms\", color=0x0000ff)\n await ctx.send(embed=embed)\n\n# Returns user info\n@slash.slash(name=\"who\", description=\"Get user info.\", options=[\n create_option(name=\"userid\", description=\"What's the User ID? (you need developer mode enabled)\", required=True, option_type=3),\n])\nasync def who(ctx: SlashContext, userID):\n print(\"userID\")\n user = await bot.fetch_user(int(userID))\n userInfo = \"**User ID** \" + str(user.id) + \"\\n\" + \"**Is Bot** \" + str(user.bot) + \"\\n\" + \"**Created** \" + str(user.created_at) + \"\\n\"\n embed = discord.Embed(title = user.name + \"#\" + user.discriminator, description = userInfo, color=0x0000ff)\n embed.set_image(url=user.avatar_url)\n await ctx.send(embed=embed)\n\n# Creates a poll\n@slash.slash(name=\"poll\", description=\"Create a poll!\", options=[\n create_option(name=\"reaction1\", description=\"What's the first response? (emoji)\", required=True, option_type=3),\n create_option(name=\"reaction2\", description=\"What's the second response? (emoji)\", required=True, option_type=3),\n create_option(name=\"question\", description=\"What's the question?\", required=True, option_type=3),\n])\nasync def poll(ctx: SlashContext, reactionOne, reactionTwo, question):\n embed = discord.Embed(title = \"Poll\", description = question, color=0x0000ff)\n message = await ctx.send(embed=embed)\n await message.add_reaction(reactionOne)\n await message.add_reaction(reactionTwo)\n\n# Gives DJ and kool kid roles\n@bot.command(pass_context = True)\n@commands.is_owner()\nasync def roles(ctx):\n guild = ctx.guild\n koolKid = await guild.create_role(name=\"kool kid\")\n dj = await guild.create_role(name=\"DJ\")\n await ctx.author.add_roles(koolKid, dj)\n\n\nbot.run(TOKEN)\n","repo_name":"rtstudios/discord-py-bot","sub_path":"python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":22675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28168738781","text":"\"\"\"add fishes\n\nRevision ID: c7e7dddd4936\nRevises: 04f485073d17\nCreate Date: 2018-03-11 15:11:10.899885\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c7e7dddd4936'\ndown_revision = '04f485073d17'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('fishes', sa.Column('commercial', sa.Text(), nullable=True))\n op.add_column('fishes', sa.Column('danger_reason', sa.Text(), nullable=True))\n op.add_column('fishes', sa.Column('domestication', sa.Text(), nullable=True))\n op.add_column('fishes', sa.Column('foods', sa.Text(), nullable=True))\n op.add_column('fishes', sa.Column('growth', sa.Text(), nullable=True))\n op.add_column('fishes', sa.Column('protection', sa.Text(), nullable=True))\n op.add_column('fishes', sa.Column('protection_sugession', sa.Text(), nullable=True))\n op.add_column('fishes', sa.Column('quantity', sa.Text(), nullable=True))\n op.add_column('fishes', sa.Column('reproduce', sa.Text(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('fishes', 'reproduce')\n op.drop_column('fishes', 'quantity')\n op.drop_column('fishes', 'protection_sugession')\n op.drop_column('fishes', 'protection')\n op.drop_column('fishes', 'growth')\n op.drop_column('fishes', 'foods')\n op.drop_column('fishes', 'domestication')\n op.drop_column('fishes', 'danger_reason')\n op.drop_column('fishes', 'commercial')\n # ### end Alembic commands ###\n","repo_name":"WooWoods/fishery","sub_path":"migrations/versions/c7e7dddd4936_add_fishes.py","file_name":"c7e7dddd4936_add_fishes.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28976825619","text":"import sys\n\nimport time\nimport numpy as np\nfrom math import *\nimport copy\nimport random\nfrom math import sqrt, exp, log\nimport operator as op\nimport bisect\n\n\ndef cdf(weights):\n total = sum(weights)\n result = []\n cumsum = 0\n for w in weights:\n cumsum += w\n result.append(cumsum / total)\n return result\n\n\ndef choice(population, cumm):\n # assert len(population) == len(weights)\n # cdf_vals = cumm\n x = random.random()\n idx = bisect.bisect(cumm, x)\n return population[idx]\n\n\ndef likelihood(KKH, aarea, seta, ppm, userw, JJ): # seta: 自旋方向\n like = 0.\n l1 = 0.\n l2 = 0.\n for i in seta.keys(): # Hamiltonian function\n cor = userw[i]\n for j in ppm[i]:\n l1 = l1 + seta[i] * seta[j] * 0.5\n for k in range(len(KKH) - 1):\n for l in range(len(KKH[k])):\n l2 = l2 + KKH[k][l] * aarea[cor][1][k][l] * seta[i]\n l2 = l2 + KKH[3][0] * aarea[cor][1][4][0] * seta[i]\n l1 = l1 * JJ\n like = l1 + l2\n return like, l1, l2\n\n\ndef updatelike(KKH, aarea, seta, ppm, L, x, ppair, userw, JJ):\n old1 = 0.\n old2 = 0.\n new = 0.\n os = seta[x]\n cor = userw[x]\n\n for e in ppm[x]:\n old1 = old1 + os * seta[e]\n for k in range(len(KKH) - 1):\n for l in range(len(KKH[k])):\n old2 = old2 + KKH[k][l] * aarea[cor][1][k][l] * os\n old2 = old2 + KKH[3][0] * aarea[cor][1][4][0] * os\n old1 = old1 * JJ\n L2 = L - 2. * (old1 + old2)\n return L2\n\n\ndef generateAgg(K, Kparams, aarea, ppair):\n Agg = {}\n llen = aarea.keys() # area.keys:wardCode\n A = 0. # estimated number of links\n tnodes = 0\n for ii in range(len(llen)):\n i = llen[ii]\n s1 = size[i]\n tnodes = tnodes + s1\n for jj in range(len(llen) - (ii)):\n kk = ii + jj\n k = llen[kk]\n s2 = size[k]\n dgg = Kparams[0]\n for zz in range(K):\n dgg = dgg + Kparams[zz + 1] * ppair[(i, k)][zz]\n if i != k:\n A = A + s1 * s2 * exp(-1. * (dgg))\n else:\n A = A + s1 * (s1 - 1.) * 0.5 * exp(-1. * (dgg))\n Agg[str(i) + str(k)] = 1. / (1 + exp(dgg))\n Agg[str(k) + str(i)] = 1. / (1 + exp(dgg))\n return Agg, A / float(tnodes)\n\n\ndef generatenet(Agg, aarea, userw, size):\n llen = aarea.keys() # area.keys:wardCode\n llenu = {}\n llindex = []\n ssize = 0\n for w in llen:\n llenu[w] = []\n llindex.append(ssize)\n ssize = ssize + size[w]\n users = len(userw.keys())\n net = {}\n for i in range(users):\n net[i] = []\n totl = 0\n for ii in range(len(llen)):\n cor = llen[ii]\n indi = llindex[llen.index(cor)]\n pp = Agg[str(cor) + str(cor)]\n Ni = size[cor]\n # without repetion\n for i in range(Ni):\n for j in range(Ni - (i + 1)):\n k = i + j + 1\n if random.random() < pp:\n net[indi + i].append(indi + k)\n net[indi + k].append(indi + i)\n totl = totl + 1\n\n for jj in range(len(llen) - (ii + 1)):\n kk = ii + jj + 1\n cor2 = llen[kk]\n indk = llindex[llen.index(cor2)]\n pp = Agg[str(cor) + str(cor2)]\n # with repetition\n Nk = size[cor2]\n for i in range(Ni):\n for k in range(Nk):\n if random.random() < pp:\n net[indi + i].append(indk + k)\n net[indk + k].append(indi + i)\n totl = totl + 1\n return net, totl / float(users)\n\n\ndef GgenerateSpins(KKH, userw, Kparams, ppm, aarea, ppair, JJ): # ppm: net网络结构\n # random network:\n spins = [-1, 1]\n nseta = {} # nseta: 自旋方向\n users = len(userw.keys())\n for e in range(users):\n nseta[e] = random.choice(spins)\n count = 0\n L, gg1, gg2 = likelihood(KKH, aarea, nseta, ppm, userw, JJ) # 计算Hamiltonian function\n i = 0\n ext_time = 0\n cacc = 0\n T = 1.\n llen = aarea.keys()\n while count < 55 * users: # 修正Hamiltonian function\n x = random.choice(range(users))\n # evalueta change of likelihood:\n L2 = updatelike(KKH, aarea, nseta, ppm, L, x, ppair, userw, JJ)\n\n AL = L - L2\n try:\n th = 1. / (1. + exp(AL / T))\n except OverflowError:\n th = exp(-AL / T)\n pass\n a = random.random()\n if a <= th:\n nseta[x] = nseta[x] * (-1)\n L = L2\n\n cacc = cacc + 1\n else:\n count = count + 1\n ext_time += 1\n L, ll1, ll2 = likelihood(KKH, aarea, nseta, ppm, userw, JJ)\n return nseta, ll1, ll2\n\n\ndef latlongdistance(lat1, lon1, lat2, lon2):\n R = 6373.0\n\n lat1 = radians(lat1)\n lon1 = radians(lon1)\n lat2 = radians(lat2)\n lon2 = radians(lon2)\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n distance = R * c\n return distance\n\n\ndef ncr(n, r):\n r = min(r, n - r)\n if r == 0: return 1\n numer = reduce(op.mul, xrange(n, n - r, -1))\n denom = reduce(op.mul, xrange(1, r + 1))\n return numer // denom\n\n\ndef funcmean(vec):\n mean = sum(vec) / float(len(vec))\n s2 = 0.\n for e in vec:\n s2 = s2 + (e - mean) * (e - mean)\n s2 = sqrt(s2 / (len(vec) - 1.))\n return mean, s2\n\n\ndef areadistance(K, KH, KH12, BKH, Kparams, aarea, Baarea, inBor, ppair, size, userw, J, J12, BJ, ggg):\n # genarate Agg: 生成邻接矩阵\n Agg, AA = generateAgg(K, Kparams, aarea, ppair)\n if AA < 3: # We only generate spin configurations for networks with average degree () smalles than 6 (=2L/N; AA=L/N)\n # same network for the three elections: 生成网络结构\n net, avel = generatenet(Agg, aarea, userw, size)\n\n # ME16:\n fout2 = open('ME16config' + str(ggg) + '.dat', 'w')\n fout2.write('wards KBIfraction originalfration\\n')\n nseta, ml1, ml2 = GgenerateSpins(KH, userw, Kparams, net, aarea, ppair, J) # 生成自旋\n llen = aarea.keys() # area.keys:wardCode\n llindex = []\n ssize = 0\n for w in llen:\n llindex.append(ssize)\n ssize = ssize + size[w]\n dall = 0.\n tall = 0\n di = 0.\n tutto = 0\n for cor in aarea.keys(): # area.keys:wardCode\n if cor in Baarea.keys():\n indi = llindex[llen.index(cor)]\n rneg = aarea[cor][0][0] # area: { wardCode:[ [ME16,ME12],[education,age,gender,[longitude,latitude],income],BoroughID ] }\n neg = 0\n for e in range(size[cor]):\n if nseta[indi + e] == -1:\n neg = neg + 1\n neg = neg / float(size[cor])\n dall = dall + abs(neg - rneg)\n tall = tall + 1\n di = di + abs(neg - rneg)\n tutto = tutto + 1\n fout2.write('%s %s %s\\n' % (cor, neg, rneg))\n else:\n indi = llindex[llen.index(cor)]\n rneg = aarea[cor][0][0]\n neg = 0\n for e in range(size[cor]):\n if nseta[indi + e] == -1:\n neg = neg + 1\n neg = neg / float(size[cor])\n dall = dall + abs(neg - rneg)\n tall = tall + 1\n fout2.write('%s %s %s\\n' % (cor, neg, rneg))\n d2 = 0\n t2 = 0\n for a in inBor.keys():\n neg = 0\n asize = 0\n rneg = inBor[a][0][0]\n for cor in inBor[a][1]:\n indi = llindex[llen.index(cor)]\n ch = 0\n for e in range(size[cor]):\n if nseta[indi + e] == -1:\n neg = neg + 1\n ch = ch + 1\n asize = asize + size[cor]\n neg = neg / float(asize)\n d2 = d2 + abs(neg - rneg) * len(inBor[a][1])\n t2 = t2 + len(inBor[a][1])\n res16 = [dall / float(tall), di / float(tutto), d2 / float(t2), (di + d2) / float(tutto + t2)]\n fout2.close()\n\n # ME12:\n fout2 = open('ME12config' + str(ggg) + '.dat', 'w')\n fout2.write('wards KBIfraction originalfration\\n')\n nseta, ml1, ml2 = GgenerateSpins(KH12, userw, Kparams, net, aarea, ppair, J12)\n llen = aarea.keys()\n llindex = []\n ssize = 0\n for w in llen:\n llindex.append(ssize)\n ssize = ssize + size[w]\n dall12 = 0.\n tall12 = 0\n di12 = 0.\n tutto12 = 0\n for cor in aarea.keys():\n if cor in Baarea.keys():\n indi = llindex[llen.index(cor)]\n rneg = aarea[cor][0][1]\n neg = 0\n for e in range(size[cor]):\n if nseta[indi + e] == -1:\n neg = neg + 1\n neg = neg / float(size[cor])\n dall12 = dall12 + abs(neg - rneg)\n tall12 = tall12 + 1\n di12 = di12 + abs(neg - rneg)\n tutto12 = tutto12 + 1\n fout2.write('%s %s %s\\n' % (cor, neg, rneg))\n else:\n indi = llindex[llen.index(cor)]\n rneg = aarea[cor][0][1]\n neg = 0\n for e in range(size[cor]):\n if nseta[indi + e] == -1:\n neg = neg + 1\n neg = neg / float(size[cor])\n dall12 = dall12 + abs(neg - rneg)\n tall12 = tall12 + 1\n fout2.write('%s %s %s\\n' % (cor, neg, rneg))\n d122 = 0\n t122 = 0\n for a in inBor.keys():\n neg = 0\n asize = 0\n rneg = inBor[a][0][1]\n for cor in inBor[a][1]:\n indi = llindex[llen.index(cor)]\n ch = 0\n for e in range(size[cor]):\n if nseta[indi + e] == -1:\n neg = neg + 1\n ch = ch + 1\n asize = asize + size[cor]\n neg = neg / float(asize)\n d122 = d122 + abs(neg - rneg) * len(inBor[a][1])\n t122 = t122 + len(inBor[a][1])\n res12 = [dall12 / float(tall12), di12 / float(tutto12), d122 / float(t122),\n (di12 + d122) / float(tutto12 + t122)]\n fout2.close()\n\n # EUref:\n fout2 = open('EUrconfig' + str(ggg) + '.dat', 'w')\n fout2.write('area(wards,Boroughs) KBIfraction originalfration\\n')\n nseta, bl1, bl2 = GgenerateSpins(BKH, userw, Kparams, net, aarea, ppair, BJ)\n Btutto = 0\n Bdi = 0\n for cor in Baarea.keys():\n indi = llindex[llen.index(cor)]\n rneg = Baarea[cor][0]\n neg = 0\n for e in range(size[cor]):\n if nseta[indi + e] == -1:\n neg = neg + 1\n neg = neg / float(size[cor])\n Bdi = Bdi + abs(neg - rneg)\n Btutto = Btutto + 1\n fout2.write('%s %s %s\\n' % (cor, neg, rneg))\n Bd2 = 0\n Bt2 = 0\n for a in inBor.keys():\n neg = 0\n asize = 0\n rneg = inBor[a][0][2]\n for cor in inBor[a][1]:\n indi = llindex[llen.index(cor)]\n ch = 0\n for e in range(size[cor]):\n if nseta[indi + e] == -1:\n neg = neg + 1\n ch = ch + 1\n asize = asize + size[cor]\n neg = neg / float(asize)\n Bd2 = Bd2 + abs(neg - rneg) * len(inBor[a][1])\n Bt2 = Bt2 + len(inBor[a][1])\n fout2.write('Borough %s %s %s\\n' % (a, neg, rneg))\n resB = [Bdi / float(Btutto), Bd2 / float(Bt2), (Bdi + Bd2) / float(Btutto + Bt2)]\n fout2.close()\n return res16, res12, resB, avel\n else:\n return [], [], [], 100\n\n\n##########################################################################\nNnumber = int(sys.argv[1]) # 获取命令参数\n\nfh2 = open('EUwards.dat', 'r') # 选区级别的欧盟公投数据\nigot2 = fh2.readlines()\nfh2.close()\n\nBarea = {}\n# Brexit data: 脱欧数据\nfor line in igot2:\n about = line.strip().split(' ')\n w = about[0]\n Barea[w] = []\n Barea[w].append(1. - float(about[1]))\n\nfh = open('ME16-12_sociodemographics.dat', 'r') # 选区级别的市长选举数据以及社会人口数据\nigot = fh.readlines()\ndel igot[0]\nfh.close()\n\nglobal K\nK = 5 # Blau-space dimension 布劳空间维度\n\narea = {}\nolda = 0\nind = 0\n\nsize = {}\ninBor = {}\ninBre = []\nfor line in igot: # igot:ME16-12_sociodemographics 选区级别的市长选举数据以及社会人口数据\n about = line.strip().split(' ')\n w = about[0] # wardCODE\n area[w] = [] # area: { wardCode:[ [ME16,ME12],[education,age,gender,[longitude,latitude],income],BoroughID ] }\n area[w].append([1. - float(about[2]), 1. - float(about[3])]) # 2:ME16 3:ME12\n size[w] = float(about[10]) # 10:size\n vec = []\n vec.append([float(about[4])]) # 4education\n vec.append([float(about[5])]) # 5age\n vec.append([float(about[6])]) # 6gender\n vec.append([float(about[7]), float(about[8])]) # 7 8 distance\n vec.append([float(about[9])]) # 9income\n area[w].append(vec)\n area[w].append(int(about[1])) # 1BoroughID\n # list of Boroughs missing in EUreferendum data: 欧盟公投数据中缺失的自治市镇列表:\n if int(about[1]) < 33 and int(about[1]) != 7 and w not in Barea.keys():\n try:\n inBor[int(about[1])][1].append(w)\n except KeyError:\n inBor[int(about[1])] = [[], [w]]\n pass\n\nfh.close()\n\n# Rescale:\nminsize = 40 # min population size in a ward\nminw = min(size.values())\ntotn = 0\nfor w in size.keys():\n totn = totn + int((size[w] / minw) * minsize)\n size[w] = int((size[w] / minw) * minsize)\n\nfh4 = open('ME16-12EUBoroughs.dat', 'r') # 自治市镇级别市长选举和的欧盟公投结果\nigot4 = fh4.readlines()\ndel igot4[0]\nfh4.close()\n\n# EUref data:\nfor line in igot4:\n about = line.strip().split(' ')\n a = int(about[0])\n if a in inBor.keys():\n inBor[a][0] = [1. - float(about[3]), 1. - float(about[4]), 1. - float(about[2])] # ME2016,ME2012,EUreferendum\n\nuserw = {}\nind = 0\nfor w in area.keys(): # area.keys:wardCode\n for i in range(size[w]):\n userw[ind] = w\n ind = ind + 1\n\npairw = {}\n\nssum = [0.] * K\nss2 = [0.] * K\ntotsum = 0.\n\nllen = area.keys() # area.keys:wardCode\n# area: { wardCode:\n# [ [ME16,ME12],\n# [education,\n# age,\n# gender,\n# [longitude,latitude],\n# income\n# ],\n# BoroughID\n# ]\n# }\nfor ii in range(len(llen)):\n i = llen[ii]\n v1 = area[i][1]\n s1 = size[i]\n for jj in range(len(llen) - (ii)):\n kk = ii + jj\n k = llen[kk]\n v2 = area[k][1]\n s2 = size[k]\n if i == k:\n fff = int(s1 * (s1 - 1.) * 0.5)\n else:\n fff = s1 * s2\n pairw[(i, k)] = [0.] * (K)\n pairw[(k, i)] = [0.] * (K)\n\n # edu:\n absdis = abs(v1[0][0] - v2[0][0])\n pairw[(i, k)][0] = float(absdis)\n ssum[0] = ssum[0] + absdis * fff\n ss2[0] = ss2[0] + absdis * absdis * fff\n # age\n absdis = abs(v1[1][0] - v2[1][0])\n pairw[(i, k)][1] = float(absdis)\n ssum[1] = ssum[1] + absdis * fff\n ss2[1] = ss2[1] + absdis * absdis * fff\n # gender:\n absdis = abs(v1[2][0] - v2[2][0])\n pairw[(i, k)][2] = float(absdis)\n ssum[2] = ssum[2] + absdis * fff\n ss2[2] = ss2[2] + absdis * absdis * fff\n # distance:\n absdis = latlongdistance(v1[3][0], v1[3][1], v2[3][0], v2[3][1])\n pairw[(i, k)][3] = float(absdis)\n ssum[3] = ssum[3] + absdis * fff\n ss2[3] = ss2[3] + absdis * absdis * fff\n # income:\n absdis = abs(v1[4][0] - v2[4][0])\n pairw[(i, k)][4] = float(absdis)\n ssum[4] = ssum[4] + absdis * fff\n ss2[4] = ss2[4] + absdis * absdis * fff\n totsum = totsum + fff\n\n# Blau space distance normalization:\nfdisc = [0.] * K\nstdv = [0.] * K\nfor x in range(K):\n fdisc[x] = ssum[x] / float(totsum)\n stdv[x] = sqrt((ss2[x] - 2. * ssum[x] * fdisc[x] + totsum * fdisc[x] * fdisc[x]) / float(totsum - 1))\n\nllen = area.keys()\nfor ii in range(len(llen)):\n for jj in range(len(llen) - (ii)):\n kk = ii + jj\n i = llen[ii]\n k = llen[kk]\n for x in range(K):\n a = (pairw[(i, k)][x] - fdisc[x]) / (2 * stdv[x])\n pairw[(i, k)][x] = a\n pairw[(k, i)][x] = a\n\ndel fdisc\ndel stdv\n\nfout = open('parameters' + str(Nnumber) + '.dat', 'w')\nfout.write(\n 'ID ME16distance_608w ME16distance_280w ME16distance_18Bor ME16distance_280w+18Bor ME12distance_608w ME12distance_280w ME12distance_18Bor ME12distance_280w+18Bor EUrdistance_280w EUrdistance_18Bor EUrdistance_280w+18Bor links/N theta_0 theta_edu theta_age theta_gender theta_dist theta_income ME16h_edu ME16h_age ME16h_gender ME16h_income ME12h_edu ME12h_age ME12h_gender ME12h_income EUrh_edu EUrh_age EUrh_gender EUrh_income ME16beta ME12beta EUrbeta ME16J ME12J EUrJ\\n')\n# random.seed(1111)\nggg = 0\nstart_time = time.time()\nwhile time.time() - start_time < 84600: # it generates spins configurations according to random model parameters for 24h\n tetas = (14., random.uniform(-7., 12.), random.uniform(-5., 12.), random.uniform(-6., 12), random.uniform(-7., 11.),\n random.uniform(-5., 12.), 0.44, random.uniform(-2.35, -0.1), random.uniform(0.3, 2.7),\n random.uniform(-1.5, -0.2), 0.44, random.uniform(-1.65, 0.15), random.uniform(0.1, 2),\n random.uniform(-1.6, -0.35), 0.44, random.uniform(-1.5, 0.15), random.uniform(-0.3, 1.2),\n random.uniform(-0.4, 0.7), random.uniform(0., 4.), random.uniform(0., 4.), random.uniform(0., 4.),\n random.uniform(0., 40.), random.uniform(0., 40.), random.uniform(0., 40.))\n params = [tetas[0], tetas[1], tetas[2], tetas[3], tetas[4], tetas[5]]\n ff = tetas[18]\n ff12 = tetas[19]\n Bff = tetas[20]\n J = tetas[18] * tetas[21]\n J12 = tetas[19] * tetas[22]\n BJ = tetas[20] * tetas[23]\n ef = [[tetas[6] * ff], [tetas[7] * ff * 0.1], [tetas[8] * ff * 10], [tetas[9] * ff * 0.1]]\n ef12 = [[tetas[10] * ff12], [tetas[11] * ff12 * 0.1], [tetas[12] * ff12 * 10], [tetas[13] * ff12 * 0.1]]\n Bef = [[tetas[14] * Bff], [tetas[15] * Bff * 0.1], [tetas[16] * Bff * 10], [tetas[17] * Bff * 0.1]]\n\n res16, res12, resB, avel = areadistance(K, ef, ef12, Bef, params, area, Barea, inBor, pairw, size, userw, J, J12,\n BJ, ggg)\n if avel != 100:\n fout.write('%s ' % (ggg))\n for a in res16:\n fout.write('%s ' % (a))\n for a in res12:\n fout.write('%s ' % (a))\n for a in resB:\n fout.write('%s ' % (a))\n\n fout.write('%s ' % (avel))\n for zz in range(len(tetas)):\n fout.write('%s ' % (tetas[zz]))\n fout.write('\\n')\n fout.flush()\n ggg = ggg + 1\nfout.close()\n","repo_name":"szu-advtech/AdvTech","sub_path":"2022/7-吕思豪 指导老师-廖好/kernel-Blau-Isingmodel-master/KBICode.py","file_name":"KBICode.py","file_ext":"py","file_size_in_byte":19409,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"19"} +{"seq_id":"41257022719","text":"import os\nimport pandas as pd\nfrom datetime import datetime, timedelta\n\ntoday = datetime.today()\nyesterday = today - timedelta(days=1)\n\n# Format the date as a string\nyesterday_str = yesterday.strftime(\"%Y-%m-%d\")\n\n# # Read from a CSV file\n# csv_filename = F'/contratos-inteligentes/data/raw/extracted_date={yesterday_str}/ethereum_tokens_data.csv'\n# df_csv = pd.read_csv(csv_filename)\n\n# Read from a Parquet file\nparquet_filename = F'/contratos-inteligentes/data/raw/extracted_date={yesterday_str}/ethereum_tokens_data.parquet.gzip'\ndf_parquet = pd.read_parquet(parquet_filename)\n\n# Add a new 'block' column to the DataFrame\n# df_csv['block'] = df_csv['block_timestamp'].astype(str) + '_' + df_csv['block_hash'].astype(str) + '_' + df_csv['block_number'].astype(str)\ndf_parquet['block'] = df_parquet['block_timestamp'].astype(str) + '_' + df_parquet['block_hash'].astype(str) + '_' + df_parquet['block_number'].astype(str)\n\n# # Define the local file path where you want to save the transformed data\n# output_file_csv = F\"/contratos-inteligentes/data/stage/extracted_date={yesterday_str}/ethereum_tokens_data.csv\"\n\n# # Extract the directory path from the file path\n# directory_path = os.path.dirname(output_file_csv)\n\n# # Create the directory if it doesn't exist\n# if not os.path.exists(directory_path):\n# os.makedirs(directory_path)\n\n# # Save the transformed data to a local CSV file\n# df_csv.to_csv(output_file_csv, index=False)\n\n# print(f\"Data has been transformed and saved to {output_file_csv}\")\n\noutput_file_parquet = F\"/contratos-inteligentes/data/stage/extracted_date={yesterday_str}/ethereum_tokens_data.parquet.gzip\"\n\n# Extract the directory path from the file path\ndirectory_path = os.path.dirname(output_file_parquet)\n\n# Create the directory if it doesn't exist\nif not os.path.exists(directory_path):\n os.makedirs(directory_path)\n\ndf_parquet.to_parquet(output_file_parquet, compression='gzip')\n\nprint(f\"Data has been transformed and saved to {output_file_parquet}\")\n","repo_name":"LuizGustavus/challenge-pld","sub_path":"k8s/contratos-inteligentes/scripts/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"10941319903","text":"from matplotlib.figure import Figure\nfrom matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg,NavigationToolbar2Tk)\nimport tkinter as tk\nfrom tkinter import messagebox\nimport time\nimport numpy as np\nimport cupy as cp\nfrom numpy import arange,exp\nimport FixedPoint as fp\nfrom pylab import meshgrid,imshow,contour,clabel,colorbar,axis,title,show\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom scipy.special import lambertw\nfrom matplotlib.backend_bases import MouseButton\nfrom matplotlib.widgets import TextBox\nimport math\nfrom pynput import keyboard\nimport keyboard as kb\nfrom pylab import show\nimport mouse\nimport matplotlib\nimport matplotlib.style as mplstyle\nimport ipywidgets as widgets\n#import scipy.optimize as opt\n\n\n\n\nclass Fractal:\n '''\n Julia set/dynamical plane fractal object\n Attributes:\n is_julia : bool : True if dynamical plane, False if parameter space\n f : compiled expression : Julia set/parameter space function\n exitCond : compiled expression : iteration exit condition\n N : int : iteration cap\n c : complex number : Julia set parameter\n Methods:\n f_nits : n,z0 -> first n iterates starting at z0\n escape_iter : z -> min(# of iterations to escape from,N)\n get_fract : xrng,yrng,res -> grid of iterations to escape at ~res points in (xrng)x(yrng)\n find_per_orbit : z0,period -> periodic point near z0, or None if search fails\n find_per_orbits : xrng,yrng,res,period -> list of periodic points in (xrng)x(yrng), searching from res starting points\n '''\n def __init__(self,is_julia,f,exitCond,N=40,c=.4+.6j):\n self.is_julia = is_julia\n self.f = f\n self.exitCond = exitCond\n self.N = N\n self.c = c # Only relevant if is_julia\n\n # Vectorize some methods to improve performance\n self.escape_iter=np.vectorize(self.escape_iter)\n self.find_per_orbit=np.vectorize(self.find_per_orbit)\n \n def eval_f(self,z,c):\n return eval(self.f,None,{\"z\":z,\"c\":c})\n\n def eval_exitCond(self,z,c):\n return eval(self.exitCond,None,{\"z\":z,\"c\":c})\n\n def f_nits(self,n,z0):\n '''\n Returns first n iterates starting at z0\n Terminates early if exit condition is met\n '''\n pts = [z0]\n c = self.c if self.is_julia else z0\n for i in range(n):\n if self.eval_exitCond(pts[-1],c):\n return pts\n try:\n pts=np.append(pts,[self.eval_f(pts[-1],c)],axis=0)\n except (ValueError,ArithmeticError,RuntimeError,MemoryError):\n return pts\n return pts\n\n def escape_iter(self,z):\n '''\n Returns number of iterations needed to escape\n starting at z. Returns maximum # of iterations,\n self.N, if error is thrown, or self.N is reached\n '''\n c = self.c if self.is_julia else z\n for i in range(self.N):\n if self.eval_exitCond(z,c):\n return i\n try:\n z=self.eval_f(z,c)\n except (ValueError,ArithmeticError,RuntimeError,MemoryError):\n return self.N\n return self.N\n\n def get_fract(self,xrng,yrng,res):\n return self.escape_iter(self.complex_grid(xrng,yrng,res))\n\n def find_per_orbit(self,z0,period):\n # Find periodic orbit, with initial guess z0\n try:\n if self.is_julia:\n return fp.per_point(lambda z:self.eval_f(z,self.c),z0,period)\n else:\n return fp.param_per_point(lambda z,c:self.eval_f(z,c),z0,period)\n except (RuntimeError,ZeroDivisionError):\n pass\n return None\n \n\n def find_per_orbits(self,xrng,yrng,res,period):\n per_orbs = self.find_per_orbit(self.complex_grid(xrng,yrng,res),period) # search for periodic orbits\n pts = []\n for xrow in per_orbs: # filter out \"None\"s and duplicate points\n for z in xrow:\n if z != None and (not (True in (abs(ele-z)<1e-5 for ele in pts))):\n pts.append(z)\n return pts\n\n # Getters and setters\n def set_f(self,f):\n self.f = f\n\n def set_exitCond(self,exitCond):\n self.exitCond = exitCond\n\n def set_c(self,c):\n self.c = c\n\n def set_N(self,N):\n self.N = N\n\n def get_f(self):\n return self.f\n\n def get_exitCond(self):\n return self.exitCond\n\n def get_c(self):\n return self.c\n\n def get_N(self):\n return self.N\n\n # Utility functions\n def complex_grid(self,xrng,yrng,res):\n step = np.sqrt((xrng[1]-xrng[0])*(yrng[1]-yrng[0])/res) # compute step size\n X,Y=meshgrid(arange(xrng[0], xrng[1], step),arange(yrng[0], yrng[1], step)) # generate grid of sample points\n return X+Y*1j\n\n \n \n\n\n\nclass FractalPlot:\n def __init__(self,main_window,parent,is_julia,f,exitCond,xrng=[-2.25,.75],yrng=[-1.5,1.5],res=50000,N=40,c=.4+.6j):\n self.main_window = main_window\n self.parent = parent\n\n # Create fractal object\n self.fract = Fractal(is_julia,f,exitCond,N,c)\n self.xrng = xrng\n self.yrng = yrng\n self.res = res\n\n \n # Default values\n self.nits = 6\n self.per = 1\n self.search_res = 200\n self.iplt_ms = 2 # iteration plot marker size\n self.iplt_lw = 1.0 # iteration plot line width\n self.iplt_alpha = .7 # iteration plot opacity\n self.perplt_ms = 8 # period plot marker size\n \n # Instantiate misc. environment variables\n self.ipts = []\n self.drawn_iplts = [] # list of drawn plots of iterates\n self.drawn_perplts = [] # list of drawn periodic points\n self.drawn_perplt_pers = [] # list storing the period of the each plot in drawn_perplts\n self.perpts = []\n self.per_plot = None\n self.zoompt1 = None\n self.zoompt2 = None\n self.srect_plot = None # Plot of selection rectangle for zooming\n\n \n\n # Instantiate plot and tk widget\n self.plot_fig = Figure()\n self.plot_fig.subplots_adjust(left=.05,right=.99,top=.95,bottom=.1)\n self.plot_canvas = FigureCanvasTkAgg(self.plot_fig,master=self.main_window)\n\n # Set up plot\n self.plot_ax = self.plot_fig.gca() \n if self.fract.is_julia:\n self.plot_ax.set_title('Julia Set')\n else:\n self.plot_ax.set_title('Parameter Space')\n self.plot_ax.set_xlabel('Re')\n self.plot_ax.set_ylabel('Im')\n\n # Make fractal plot\n cmap = plt.cm.get_cmap(\"twilight_shifted\")#, self.fract.get_N()+1)\n self.fractal_grid = self.fract.get_fract(self.xrng,self.yrng,self.res)\n self.fplot = self.plot_ax.imshow(self.fractal_grid, extent=self.get_extent(),\n interpolation=\"gaussian\",cmap=cmap)\n self.fplot.set_clim(0,self.fract.get_N()+1)\n self.update_axes()\n\n # Set up drawing of first several iterates, starting at mouse position and blit\n self.iplt,=self.plot_ax.plot([],[],'-ok',animated=True,ms=self.iplt_ms,\n lw=self.iplt_lw,alpha=self.iplt_alpha)\n self.plot_canvas.draw()\n plt.pause(.1)\n self.bg = self.plot_canvas.copy_from_bbox(self.plot_ax.bbox)\n self.plot_fig.draw_artist(self.iplt)\n self.plot_canvas.blit(self.plot_ax.bbox)\n self.fig_size = self.plot_fig.get_size_inches()\n\n \n # Set up event listeners\n self.click_listener = self.plot_fig.canvas.mpl_connect('button_press_event', self.on_click)\n self.move_listener = self.plot_fig.canvas.mpl_connect('motion_notify_event', self.on_move)\n self.enter_plot_listener = self.plot_fig.canvas.mpl_connect('axes_enter_event', self.on_enter_plot)\n\n\n def draw_plot(self):\n self.plot_canvas.draw()\n plt.show(block=False)\n plt.pause(.1)\n self.bg = self.plot_fig.canvas.copy_from_bbox(self.plot_ax.bbox)\n self.plot_fig.canvas.blit(self.plot_ax.bbox)\n\n def update_plot(self):\n self.update_axes()\n self.update_extent()\n self.fract_grid = self.fract.get_fract(self.xrng,self.yrng,self.res)\n self.fplot.set_data(self.fract_grid)\n self.fplot.set_clim(0,self.fract.get_N()+1)\n self.clear_plot()\n self.draw_plot()\n\n def clear_plot(self):\n self.clear_iterate_plot()\n self.clear_period_plot()\n\n def clear_iterate_plot(self):\n if len(self.drawn_iplts)>0:\n for drawn_iplt in self.drawn_iplts:\n drawn_iplt[0].remove()\n self.drawn_iplts = []\n self.ipts = []\n self.update_axes()\n self.update_extent()\n self.draw_plot()\n\n def clear_period_plot(self):\n self.perpts = []\n if len(self.drawn_perplts)>0:\n for drawn_perplt in self.drawn_perplts:\n drawn_perplt.remove()\n self.drawn_perplts = []\n self.drawn_perplt_pers = []\n self.perpts = []\n self.per_plt_legend.remove()\n self.update_axes()\n self.update_extent()\n self.draw_plot()\n\n def update_iterates(self,event):\n # Detect window resize, so blitting can be reset\n if (self.fig_size != self.plot_fig.get_size_inches()).any():\n self.bg = self.plot_fig.canvas.copy_from_bbox(self.plot_ax.bbox)\n self.fig_size = self.plot_fig.get_size_inches()\n self.nits = int(self.parent.nits_input_var.get())\n self.ipts=self.fract.f_nits(self.nits,event.xdata+event.ydata*1j)\n self.plot_fig.canvas.restore_region(self.bg)\n self.iplt.set_xdata(np.real(self.ipts))\n self.iplt.set_ydata(np.imag(self.ipts))\n self.plot_fig.draw_artist(self.iplt)\n self.plot_fig.canvas.blit(self.plot_ax.bbox)\n self.plot_fig.canvas.flush_events()\n\n def update_extent(self):\n self.fplot.set_extent(self.get_extent())\n\n def update_axes(self):\n self.plot_ax.set_xlim(self.xrng[0],self.xrng[1])\n self.plot_ax.set_ylim(self.yrng[0],self.yrng[1])\n\n\n def on_click(self,event):\n if event.inaxes:\n if event.button == 1: # left click\n # Zoom region selection\n if kb.is_pressed(\"ctrl\"):\n if self.zoompt1 is None:\n self.zoompt1=[event.xdata,event.ydata]\n self.srect_plot, = self.plot_ax.plot(event.xdata,event.ydata,\n '-ok',animated=True,ms=1,lw=.5)\n self.plot_fig.draw_artist(self.srect_plot)\n self.plot_canvas.blit(self.plot_ax.bbox)\n else:\n self.zoompt2 = [event.xdata,event.ydata]\n self.xrng = sorted([self.zoompt1[0],self.zoompt2[0]])\n self.yrng = sorted([self.zoompt2[1],self.zoompt1[1]])\n self.zoompt1 = None\n self.zoompt2 = None\n self.parent.update_range_text()\n self.update_plot()\n # Paint current plot of iterates to plot\n if self.zoompt1 is None and kb.is_pressed(\"shift\"):\n self.drawn_iplts.append(self.plot_ax.plot(np.real(self.ipts),np.imag(self.ipts),\n '-ok',ms=self.iplt_ms,lw=self.iplt_lw,\n alpha=self.iplt_alpha))\n self.bg = self.plot_fig.canvas.copy_from_bbox(self.plot_ax.bbox)\n elif event.button == 3 and (not self.fract.is_julia): # right click\n # ask if user wants to generate julia set plot at this value of c\n if event.ydata<0:\n msg_txt=\"Generate a Julia set at c=\"+\"{:.3f}\".format(event.xdata)+\"-\"+\"{:.3f}\".format(abs(event.ydata))+\"i?\"\n else:\n msg_txt=\"Generate a Julia set at c=\"+\"{:.3f}\".format(event.xdata)+\"+\"+\"{:.3f}\".format(event.ydata)+\"i?\"\n draw_julia = messagebox.askyesno(\"Generate Julia Set?\", msg_txt)\n if draw_julia:\n self.parent.add_child_plot(event.xdata+event.ydata*1j,xrng=[-1.5,1.5],yrng=[-1.5,1.5])\n\n\n # Attempt to find points of period self.per, and plot them\n def draw_per_plot(self):\n if (not (self.per in self.drawn_perplt_pers)):\n self.perpts = self.fract.find_per_orbits(self.xrng,self.yrng,self.search_res,self.per)\n if len(self.drawn_perplt_pers) == 0:\n insert_index = 0\n elif self.per>self.drawn_perplt_pers[-1]:\n insert_index = len(self.drawn_perplt_pers)\n else:\n insert_index = next(i for i,v in enumerate(self.drawn_perplt_pers) if self.per 0.99:\n return True\n\n\ndef make_pairs(filename,val_subject,data):\n f = open(filename,'r')\n names = [name.strip() for name in f.readlines()]\n background = np.zeros((len(names)))\n pairs = []\n # Find background images\n for idx,name in enumerate(names):\n if is_background(data[1][name]):\n background[idx] = 1\n print(\"Background check done\")\n # Only read those files where there is something other than background\n for i in range(len(names)-1):\n # Ignore validation images\n if names[i].split(\"_\")[0] == str(val_subject):\n continue\n # Ignore images with only background class\n if background[i] == 1:\n continue\n for j in range(i+1,len(names)):\n if background[j] == 1:\n continue\n slice_idx_i = names[i].split(\"_\")[-1]\n slice_idx_j = names[j].split(\"_\")[-1]\n # pairs.append((names[i],names[i]))\n if slice_idx_i == slice_idx_j:\n pairs.append((names[i],names[j]))\n pairs.append((names[j],names[i]))\n print(\"Done making pairs\")\n return pairs\n\ndef load_data(filename,datadir,co_transform,img_transform,label_transform):\n f = open(filename,'r')\n names = [name.strip() for name in f.readlines()]\n img_dict = {}\n label_dict = {}\n oh_label_dict = {}\n idx = np.arange(4).reshape(4,1)[:,:,None]\n idx = torch.from_numpy(idx).long()\n for name in names:\n cls_name = name.split(\"_\")[0] + '_' + name.split(\"_\")[-1]\n assert(os.path.exists(os.path.join(datadir,\"img\",name+\".tif\")))\n assert(os.path.exists(os.path.join(datadir,\"cls\",cls_name+\".png\")))\n\n img = Image.open(os.path.join(datadir,\"img\",name+\".tif\"))\n label = Image.open(os.path.join(datadir,\"cls\",cls_name+\".png\")).convert('P')\n img,label = co_transform((img,label))\n img = img_transform(img)\n label = label_transform(label)\n ohlabel = (label[None,:,:] == idx).long()\n img_dict[name] = img\n label_dict[name] = label\n oh_label_dict[name] = ohlabel\n return img_dict,label_dict,oh_label_dict\n\nclass MRBrainS(Dataset):\n data_list = 'lists/train_list_MRBrainS.txt'\n\n def __init__(self,root,datadir,\n co_transform=Compose([]),img_transform=Compose([]),\n label_transform=Compose([]),val_subject=4):\n self.root = root\n self.datadir = datadir\n self.val_subject = val_subject\n self.co_transform = co_transform\n self.img_transform = img_transform\n self.label_transform = label_transform\n self.data = load_data(os.path.join(self.root,\"datasets\",self.data_list),datadir,co_transform,img_transform,label_transform)\n self.list = make_pairs(os.path.join(self.root,\"datasets\",self.data_list),self.val_subject,self.data)\n\n def __getitem__(self,index):\n fname1,fname2 = self.list[index]\n\n img1 = self.data[0][fname1]\n img2 = self.data[0][fname2]\n label1 = self.data[1][fname1]\n label2 = self.data[1][fname2]\n ohlabel1 = self.data[2][fname1]\n ohlabel2 = self.data[2][fname2]\n\n return ((img1,label1,ohlabel1,fname1),(img2,label2,ohlabel2,fname2))\n\n def __len__(self):\n return len(self.list)\n","repo_name":"mohitzsh/mri-seg","sub_path":"datasets/mrbrains.py","file_name":"mrbrains.py","file_ext":"py","file_size_in_byte":3671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22226251784","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\"\"\"\n Author: Jacek 'Szumak' Kotlarski --\n Created: 05.12.2023\n\n Purpose:\n\"\"\"\n\nimport time, sys, gc\nfrom inspect import currentframe\nfrom typing import Dict, List, Optional, Tuple, Any\nfrom threading import Thread, Event\nfrom queue import Queue\n\nfrom sqlalchemy.cyextension import immutabledict\nfrom sqlalchemy import create_engine, or_, and_, text, func\nfrom sqlalchemy.orm import (\n Session,\n sessionmaker,\n DeclarativeBase,\n Mapped,\n mapped_column,\n relationship,\n)\nfrom sqlalchemy.pool import QueuePool\nfrom sqlalchemy.engine.base import Engine\nfrom sqlalchemy.engine import URL, engine_from_config\n\nfrom jsktoolbox.libs.base_th import ThBaseObject\nfrom jsktoolbox.logstool.logs import LoggerClient, LoggerQueue\nfrom jsktoolbox.configtool.main import Config as ConfigTool\nfrom jsktoolbox.stringtool.crypto import SimpleCrypto\nfrom jsktoolbox.netaddresstool.ipv4 import Address\nfrom jsktoolbox.attribtool import ReadOnlyClass\nfrom jsktoolbox.raisetool import Raise\nfrom jsktoolbox.datetool import Timestamp\n\nfrom libs.base.classes import BModule, BLogs, BDebug\nfrom libs.interfaces.modules import IRunModule\nfrom libs.base.classes import BModuleConfig\nfrom libs.interfaces.conf import IModuleConfig\nfrom libs.templates.modules import TemplateConfigItem\nfrom libs.com.message import Message, Multipart, AtChannel\nfrom libs.tools.datetool import MDateTime\n\nimport libs.db_models.mlms as mlms\n\n# gc.set_debug(\n# gc.DEBUG_COLLECTABLE\n# gc.DEBUG_LEAK\n# | gc.DEBUG_SAVEALL\n# | gc.DEBUG_STATS\n# | gc.DEBUG_UNCOLLECTABLE\n# )\n\n\ndef heap_results():\n from guppy import hpy\n\n hp = hpy()\n h = hp.heap()\n print(h)\n\n\ndef get_session(dblist: List) -> Session:\n \"\"\".\"\"\"\n for dbh in dblist:\n try:\n session = Session(dbh)\n\n if session:\n return session\n except:\n continue\n\n\nprint(f\"Garbage: {gc.collect()}\")\nsalt = 387741\n\nips = [\"10.5.0.37\", \"10.5.0.36\", \"10.5.0.39\"]\nengines = []\nconfig = {\n \"db.url\": None,\n \"db.echo\": False,\n \"db.poolclass\": QueuePool,\n \"db.pool_pre_ping\": True,\n \"db.pool_size\": 5,\n \"db.pool_recycle\": 120,\n \"db.pool_use_lifo\": True,\n \"db.echo_pool\": True,\n \"db.query_cache_size\": 10,\n # \"db.connect_timeout\": 5,\n}\n\n# heap_results()\n\nfor dialect in (\"pymysql\", \"mysqlconnector\"):\n for ip in ips:\n url = URL(\n f\"mysql+{dialect}\",\n username=\"lms3\",\n password=f\"{SimpleCrypto.multiple_decrypt(salt, '//4AAHAAAABMAAAAagAAAEkAAAA1AAAAZAAAADcAAAB6AAAAbgAAAGcAAABtAAAANQAAAE0AAABlAAAASgAAAHUAAAA=')}\",\n host=ip,\n database=\"lmsv3\",\n port=3306,\n query={\n \"charset\": \"utf8mb4\",\n },\n )\n # print(url)\n config[\"db.url\"] = url\n dbh = engine_from_config(config, prefix=\"db.\")\n try:\n ctest = False\n with dbh.connect() as connection:\n connection.execute(text(\"SELECT 1\"))\n print(f\"Connected to: {dbh.url}\")\n ctest = True\n except Exception as ex:\n print(f\"exception: {ex}\")\n if ctest:\n engines.append(dbh)\n\nsession = get_session(engines)\n\nmaxid = session.query(func.max(mlms.MCustomer.id)).first()[0]\ncfrom = 0\ncto = 100\ncount = 0\nwhile cfrom < maxid:\n customers: List[mlms.MCustomer] = (\n session.query(mlms.MCustomer)\n .filter(\n mlms.MCustomer.deleted == 0,\n and_(mlms.MCustomer.id >= cfrom, mlms.MCustomer.id < cto),\n )\n .all()\n )\n for item in customers:\n customer: mlms.MCustomer = item\n if customer.balance < 0:\n count += 1\n print(\n f\"[{count}] CID: {customer.id}, Balance: {customer.balance}, Pay Time: {customer.pay_time}\"\n )\n for item2 in customer.cash_operations:\n cash: mlms.MCash = item2\n if cash.docid:\n print(cash.doc)\n cfrom = cto\n cto += 100\n # heap_results()\n print(f\"Garbage: {gc.collect()}\")\n\n# heap_results()\nprint(f\"Garbage: {gc.collect()}\")\nsession.close()\nprint(f\"Garbage: {gc.collect()}\")\n# heap_results()\nsys.exit(0)\n\n##########################################################\nsql_uri_template = \"mysql+{}://{}:{}@{}:{}/{}{}\".format(\n \"pymysql\",\n \"lms3\",\n f\"{SimpleCrypto.multiple_decrypt(salt, '//4AAHAAAABMAAAAagAAAEkAAAA1AAAAZAAAADcAAAB6AAAAbgAAAGcAAABtAAAANQAAAE0AAABlAAAASgAAAHUAAAA=')}\",\n ip,\n \"3306\",\n \"lmsv3\",\n \"?charset=utf8mb4\",\n)\n\n\nsql_engine_options = {\n \"pool_recycle\": 60,\n}\n\nengine = create_engine(\n cstr,\n poolclass=QueuePool,\n pool_size=10,\n max_overflow=10,\n pool_pre_ping=True,\n connect_args={\n \"connect_timeout\": 2,\n },\n)\nwith engine.connect() as connection:\n connection.execute(text(\"SELECT 1\"))\n\ntry:\n with engine.connect() as connection:\n connection.execute(text(\"SELECT 1\"))\n print(f\"create session for {engine}\")\n session = Session(engine)\n if session is None:\n print(\"session is none\")\nexcept Exception as ex:\n print(f\"Exception: '{ex}'\")\n\n#\nif session:\n customers: mlms.MCustomer = (\n session.query(mlms.MCustomer)\n .filter(\n mlms.MCustomer.deleted == 0,\n mlms.MCustomer.id == 1679,\n mlms.MCustomer.paytime != \"-1\",\n )\n .all()\n )\n count = 0\n for item1 in customers:\n customer: mlms.MCustomer = item1\n # print(customer.has_active_node)\n count += 1\n print(\n f\"{count}: CID: {customer.id} Termin płatności: {customer.paytime}\"\n )\n if True:\n continue\n if customer.balance < 0:\n if customer.tariffs and customer.has_active_node is not None:\n print(customer)\n continue\n # print(customer.cash_operations)\n balance = 0\n for item2 in customer.cash_operations:\n cash: mlms.Cash = item2\n # print(cash)\n balance += cash.value\n print(\n \"{time} {value}/{balance} {doc} {comment}\".format(\n time=MDateTime.datetime_from_timestamp(cash.time),\n value=cash.value,\n balance=balance,\n doc=True if cash.docid else False,\n comment=cash.comment,\n )\n )\n\n print(customer.balance)\n print(customer.debt_timestamp)\n print(MDateTime.datetime_from_timestamp(customer.debt_timestamp))\n print(\n MDateTime.elapsed_time_from_timestamp(\n customer.debt_timestamp\n )\n )\n print(\n f\"consent date: {MDateTime.datetime_from_timestamp(customer.consentdate)}\"\n )\n print(\n f\"cutoff stop: {MDateTime.datetime_from_timestamp(customer.cutoffstop)}\"\n )\n for item2 in customer.tariffs:\n tariff: mlms.Tariff = item2\n print(tariff)\n\n# #[EOF]#######################################################################\n","repo_name":"Szumak75/AASd","sub_path":"tests/create_engine.py","file_name":"create_engine.py","file_ext":"py","file_size_in_byte":7298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35255245356","text":"import grpc\nimport protos.satellite_pb2\nimport protos.satellite_pb2_grpc\n\nimport asyncio\n\nclass SimpleProxy:\n \n\n def __init__(self, server):\n\n self.server = server\n self.ca_cert, self.client_cert, self.client_key = self._readCertificates()\n self.grpc_credentials = grpc.ssl_channel_credentials(\n root_certificates=self.ca_cert,\n private_key=self.client_key,\n certificate_chain=self.client_cert)\n\n \n def _readCertificates(self):\n\n with open(\"./certs/ca-cert.pem\", 'rb') as f1:\n caCert = f1.read()\n\n with open(\"./certs/client-cert.pem\", 'rb') as f2:\n clientCert = f2.read()\n\n with open(\"./certs/client-key.pem\", 'rb') as f3:\n clientKey = f3.read()\n \n return caCert, clientCert, clientKey\n \n\n def GetRequests(self, locations):\n\n # Get single locations\n print(\"[*] gRPC single requests:\")\n\n return asyncio.get_event_loop().run_until_complete(self._getRequests(locations))\n \n\n async def _getRequests(self, locations):\n\n result = []\n\n async with grpc.aio.secure_channel(self.server, self.grpc_credentials) as channel:\n\n # Init the client stub\n stub = protos.satellite_pb2_grpc.SatelliteStub(channel)\n\n for loc in locations:\n try:\n response = await self._get_img(stub, loc)\n result.append((loc[0], loc[1], response.img))\n except grpc.RpcError as e:\n status_code = e.code()\n if grpc.StatusCode.OUT_OF_RANGE == status_code:\n print(\"Bad request, out of bound location\", loc)\n elif grpc.StatusCode.PERMISSION_DENIED == status_code:\n print(\"\\n[Error] Permission denied\", e.details())\n elif grpc.StatusCode.DEADLINE_EXCEEDED == status_code:\n print(\"\\n[Error] Deadline exceeded, please reduce the server sleep_time\")\n else:\n print(e)\n print(\"Undefined error\")\n return result\n \n\n def _get_img(self, stub: protos.satellite_pb2_grpc.SatelliteStub, location):\n\n loc = protos.satellite_pb2.Location()\n loc.x = location[0]\n loc.y = location[1]\n \n return stub.GetImage(loc, timeout=0.04, metadata=((\"token\", ('03357-1')),)) # 40 ms timeout\n \n\n def GetStream(self, queue, area):\n\n self.queue = queue\n\n # Get location (server) stream\n print(\"[*] gRPC server-stream request:\")\n\n asyncio.get_event_loop().run_until_complete(self._getStream(area))\n \n\n async def _getStream(self, area):\n\n async with grpc.aio.secure_channel(self.server, self.grpc_credentials) as channel:\n\n # Init the client stub\n stub = protos.satellite_pb2_grpc.SatelliteStub(channel)\n\n try:\n await self._get_imgs(self.queue, stub, area[0], area[1])\n except grpc.RpcError as e:\n status_code = e.code()\n if grpc.StatusCode.OUT_OF_RANGE == status_code:\n print(\"Bad request, out of bound location\", area)\n elif grpc.StatusCode.DEADLINE_EXCEEDED == status_code:\n print(\"\\n[Error] Deadline exceeded, please reduce the server sleep_time\")\n else:\n print(e)\n print(\"undefined error\")\n \n\n async def _get_imgs(self, queue, stub: protos.satellite_pb2_grpc.SatelliteStub, xy1, xy2):\n \n ll = protos.satellite_pb2.Location()\n ll.x = xy1[0]\n ll.y = xy1[1]\n \n ur = protos.satellite_pb2.Location()\n ur.x = xy2[0]\n ur.y = xy2[1]\n\n area = protos.satellite_pb2.Area()\n area.ll.CopyFrom(ll)\n area.ur.CopyFrom(ur)\n\n responses = stub.GetImages(area, timeout=10) # 10 s timeout\n async for response in responses:\n queue.put([response.x, response.y, response.img])\n\n queue.put(None)\n","repo_name":"dariofad/grpc_py_go_example","sub_path":"client/Proxies.py","file_name":"Proxies.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37285310256","text":"import cv2\nimport numpy as np\nfrom numpy.lib.function_base import angle\nfrom scipy.ndimage import interpolation as inter\n\ndef get_angle(image, delta = 1, center = 0, range = 45):\n def determine_score(arr, angle):\n data = inter.rotate(arr, angle, reshape = False, order = 0)\n histogram = np.sum(data, axis = 1, dtype = object)\n score = np.sum((histogram[1:] - histogram[:-1]) ** 2, dtype = object)\n \n return histogram, score\n\n scores = []\n angles = np.arange(center - range, center + range + delta, delta)\n for angle in angles:\n histogram, score = determine_score(image, angle)\n scores.append(score)\n\n best_angle = angles[scores.index(max(scores))]\n return best_angle\n\ndef rotate_image(image, angle):\n (h, w) = image.shape[:2]\n center = (w // 2, h // 2)\n\n M = cv2.getRotationMatrix2D(center, angle, 1.0)\n rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)\n\n return rotated\n\ndef resize_image(image, max_width = 800, max_height = 1000):\n height, width = image.shape[:2]\n\n scale_with = float(max_width) / width\n scale_height = float(max_height) / height\n \n scale = max(scale_with, scale_height)\n\n if scale < 1.0:\n return cv2.resize(image, None, fx = scale, fy = scale, interpolation = cv2.INTER_AREA)\n else:\n return image\n\ndef correct_skew(image, delta = 0.01, range = 45):\n small = resize_image(image)\n \n _delta = 1\n angle = 0\n while _delta >= delta:\n angle = get_angle(small, _delta, angle, range)\n print(angle)\n\n range = _delta\n _delta /= 10\n\n return rotate_image(image, angle)\n\n\nif __name__ == '__main__':\n image = cv2.imread('demo/demo1.png', 0)\n\n cv2.imwrite(\"res11.png\", correct_skew(image))\n","repo_name":"anhhuytran/OCR-Preprocessing-Method","sub_path":"quayVanBan.py","file_name":"quayVanBan.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37931703806","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/python\n\nfrom mygengo import MyGengo\n\n# Get an instance of MyGengo to work with...\ngengo = MyGengo(\n public_key = 'your_public_key',\n private_key = 'your_private_key',\n sandbox = True, # possibly false, depending on your dev needs\n)\n\n# Post a comment on a specific job; perhaps you have an update for the translator\n# or something of the sort.\ngengo.postTranslationJobComment(id = 42, comment = {\n\t'body': 'I love lamp!',\n})\n","repo_name":"crschmidt/mygengo-python","sub_path":"examples/postTranslationJobComment.py","file_name":"postTranslationJobComment.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"12804760818","text":"import json\nimport glob\nfrom rasterio import open as r_open\nimport tqdm\nfrom typing import Tuple\nimport numpy as np\nimport os\nfrom sklearn.model_selection import train_test_split\n\nfrom sar_to_eo_utils.preprocess_utils import filtering, preprocess\n\n__channel_info__ = {'r': 'Sentinel2_B4', 'g': 'Sentinel2_B3', 'b': 'Sentinel2_B2', 'cloud': 'Sentinel2_QA60'}\n\n__prefix__ = ['Sentinel1_VV', 'Sentinel1_VH']+list(__channel_info__.values())\n\n__postfix__ = ['tiff']\n\ndef image_open(path: str, name: str) -> np.ndarray:\n _path = os.path.join(path, name)\n try:\n with r_open(_path, 'r') as f:\n _arr = f.read(1)\n return _arr\n except Exception as e:\n print(\"Cannot open the path\", e)\n \ndef get_image_dict_from_path(path: str, prefix: str, filename_extension: str='tiff') -> list:\n assert prefix in __prefix__\n assert filename_extension in __postfix__\n \n _lst = glob.glob(os.path.join(path, f\"{prefix}*.{filename_extension}\"))\n \n _lst = [os.path.basename(_path) for _path in _lst]\n \n return {'path': path, 'channel': prefix.split('_')[1], 'filename_extension': filename_extension, 'list': _lst}\n\ndef save_json(path: str, name: str, data: list or dict):\n _path = os.path.splitext(os.path.join(path, name))[0]+'.json'\n with open(_path, 'w') as f:\n json.dump(data, f, indent=2)\n \n print(f\"save {name} to the {_path}\")\n \ndef load_json(path: str, name: str) -> list or dict:\n _path = f\"{os.path.join(path, name)}\"\n with open(_path, 'r') as f:\n _data = json.load(f)\n return _data\n\ndef sort_QA60_cloud_value(image_dict: dict, path: str, name: str, filter_method: str=None) -> None:\n \n assert image_dict['channel'] == 'QA60'\n \n \n _list = image_dict['list']\n _path = image_dict['path'] \n \n a = []\n b = []\n c = []\n d = []\n e = []\n f = []\n g = []\n \n _a = set([0])\n _b = set([1024])\n _c = set([2048])\n _d = set([0, 1024])\n _e = set([0, 2048])\n _f = set([1024, 2048])\n _g = set([0, 1024, 2048])\n \n for _qa_path in tqdm.tqdm(_list):\n _arr = image_open(_path, _qa_path)\n \n # filtering\n if filter_method is not None:\n _eo_arr = from_QA60_to_RGB(_path, _qa_path)\n if filtering(_eo_arr, method=filter_method, image_type='EO'):\n continue\n \n _arrlst = _arr.flatten().tolist()\n _tempset = set()\n for el in _arrlst:\n _tempset.add(el)\n \n if _tempset == _a:\n a.append(_qa_path)\n if _tempset == _b:\n b.append(_qa_path)\n if _tempset == _c:\n c.append(_qa_path)\n if _tempset == _d:\n d.append(_qa_path)\n if _tempset == _e:\n e.append(_qa_path)\n if _tempset == _f:\n f.append(_qa_path)\n if _tempset == _g:\n g.append(_qa_path)\n\n _dict = {}\n _dict['a'] = a\n _dict['b'] = b\n _dict['c'] = c\n _dict['d'] = d\n _dict['e'] = e\n _dict['f'] = f\n _dict['g'] = g\n \n save_json(path, name, _dict)\n\ndef _get_loc_time(path: str) -> Tuple[str, str]:\n \n _temp = os.path.splitext(os.path.basename(path)) \n \n return _temp[1], _temp[0].split('_')[2:]\n\ndef eo_path_to_qa(path: str) -> str:\n _postfix, _loc_time = _get_loc_time(path)\n _qa_path = f\"{'_'.join([__channel_info__['cloud']]+_loc_time)}{_postfix}\"\n return _qa_path\n \n \ndef from_QA60_to_RGB(path: str, name: str) -> np.ndarray:\n \n _postfix, _loc_time = _get_loc_time(name)\n _r_path = f\"{'_'.join([__channel_info__['r']]+_loc_time)}{_postfix}\"\n _g_path = f\"{'_'.join([__channel_info__['g']]+_loc_time)}{_postfix}\"\n _b_path = f\"{'_'.join([__channel_info__['b']]+_loc_time)}{_postfix}\"\n _r = image_open(path, _r_path)\n _g = image_open(path, _g_path)\n _b = image_open(path, _b_path)\n \n _rgb = np.dstack([_r, _g, _b])\n \n return _rgb\n\ndef get_train_data_json(json_root: str, data_json: str, data_root: str, method: str or tuple(str) or list(str), clip_min: int=0, clip_max: int=2500):\n \n image_paths = load_json(json_root, data_json)\n \n _len = len(image_paths)\n \n channels_mean, channels_sqaured_mean = 0, 0\n \n for qa_path in tqdm.tqdm(image_paths):\n _eo = from_QA60_to_RGB(data_root, qa_path)\n _eo = preprocess(_eo, method=method, clip_min=clip_min, clip_max=clip_max, minmax_min=clip_min, minmax_max=clip_max)\n channels_mean += np.mean(_eo, axis=(0,1))\n channels_sqaured_mean += np.mean(np.square(_eo), axis=(0, 1))\n \n mean = (channels_mean / _len).tolist()\n std = np.sqrt(channels_sqaured_mean / _len - np.square(mean)).tolist()\n \n _dict = {}\n _dict['data_paths'] = image_paths\n _dict['data_root'] = data_root\n _dict['clip_min'] = clip_min\n _dict['clip_max'] = clip_max\n _dict['mean'] = mean\n _dict['std'] = std\n save_json(json_root, 'data_'+data_json, _dict)\n \ndef split_train_valid_data_json(json_root: str, data_json: str, seed: int=42):\n _dict = load_json(json_root, data_json)\n _paths = _dict['data_paths']\n \n _train_paths, _valid_paths = train_test_split(_paths, test_size=0.01, random_state=seed)\n \n _train_dict = _dict.copy()\n _train_dict['data_paths'] = _train_paths\n _valid_dict = _dict.copy()\n _valid_dict['data_paths'] = _valid_paths\n \n save_json(json_root, 'train_'+data_json, _train_dict)\n save_json(json_root, 'valid_'+data_json, _valid_dict)\n \n print(len(_train_dict['data_paths']))\n print(len(_valid_dict['data_paths']))\n \n","repo_name":"hjinnkim/sar-to-eo-utils","sub_path":"sar_to_eo_utils/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":5596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"13563442962","text":"from typing import Dict\n\nfrom entity.abstract_storage import AbstractStorage\nfrom entity.exceptions import NotEnoughSpaceError, TooManyDifferentItemsError\nfrom entity.request import Request\nfrom entity.shop import Shop\nfrom entity.store import Store\n\n# shop_1 = Shop(\n# items = {'печенька' : 3,\n# 'ноутбук' : 15,\n#\n#\n# }\n# )\n#\n# store_1 = Store(\n# items = {'печенька' : 10,\n# 'ноутбук' : 20\n# }\n# )\n# storages_1 = {\n# 'магазин': shop_1,\n# 'склад': store_1,\n# }\n\nclass Courier:\n def __init__(self, request: Request, storages : Dict[str, AbstractStorage]):\n self.request = request\n\n self.from_where: AbstractStorage = storages[self.request.departure]\n self.to_where: AbstractStorage = storages[self.request.destination]\n\n\n def move(self):\n if self.to_where.get_free_space() < self.request.amount:\n raise NotEnoughSpaceError\n elif self.to_where.get_unique_items_count()==4 \\\n and self.request.product not in self.to_where.get_items().keys():\n\n raise TooManyDifferentItemsError\n\n self.from_where.remove(name = self.request.product, amount = self.request.amount)\n print(f'Курьер забрал {self.request.amount} {self.request.product} из {self.request.departure} ')\n\n print(f'Курьер везет {self.request.amount} {self.request.product}')\n\n self.to_where.add(name = self.request.product, amount = self.request.amount)\n print(f\"Курьер доставил {self.request.amount} {self.request.product} в {self.request.destination}\")\n\n# test_str = \"Доставь 3 печенька из склад в магазин\"\n# test_request = Request(test_str, storages_1)\n# test_courier = Courier(test_request, storages_1 )\n# print(test_request.product)\n# # print(test_courier.to_where())\n# # test_courier = Courier(test_request, storages_1 )\n# # print (test_courier.from_where.get_free_space())\n# print (test_courier.from_where.get_items().keys())\n# # print (test_courier.from_where.get_unique_items_count())\n# print(test_request.product in test_courier.from_where.get_items().keys() )","repo_name":"Nadiabona/HW_21_Golubeva","sub_path":"entity/courier.py","file_name":"courier.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23733230058","text":"import cv2\nimport math\nimport numpy as np\n\ndef Growth_Estimate(img):\n data = list()\n plants = [\n {\n \"id\":1,\n \"seed_num\":1,\n \"center_pt\": tuple([366, 425]),\n \"edge_pt\": tuple([362, 363])\n },\n {\n \"id\":2,\n \"seed_num\":2,\n \"center_pt\": tuple([360, 235]),\n \"edge_pt\": tuple([355, 296])\n },\n {\n \"id\":3,\n \"seed_num\":1,\n \"center_pt\": tuple([353, 72]),\n \"edge_pt\": tuple([343, 118])\n }\n\n ]\n img = cv2.GaussianBlur(img, (3,3), 0)\n b, g, r = cv2.split(img)\n ret, thresh = cv2.threshold(g, 127, 255, cv2.THRESH_BINARY)\n\n for plant in plants:\n print(plant)\n\n # Create mask image\n radius = int(math.sqrt((plant['center_pt'][0]-plant['edge_pt'][0])**2+(plant['center_pt'][1]-plant['edge_pt'][1])**2))\n center = plant['center_pt']\n\n size = g.shape\n mask = np.zeros(size, np.uint8)\n cv2.circle(mask, center, radius, (255, 255, 255), -1)\n\n # Bitwise_and\n result = cv2.bitwise_and(thresh, thresh, mask=mask)\n\n # Calculate Percentage\n print(\"result shape\", result.shape)\n nonzero_count = cv2.countNonZero(result)\n print(nonzero_count)\n\n circle_area = cv2.countNonZero(mask)\n print(nonzero_count/float(circle_area))\n\n data.append({\n \"id\": plant['id'],\n \"growth_rate\": nonzero_count/float(circle_area)\n })\n\n return data\n\n\ncap = cv2.VideoCapture(0)\nret, frame = cap.read()\ngrowth_data = Growth_Estimate(frame)\n\n\nprint(\"growth_data\", growth_data)\n\nheight, width, channels = frame.shape\nprint(height, width, channels)\nprint(frame)\n\nimport base64\nbase64_str = cv2.imencode('.jpg', frame)[1].tostring()\nbase64_str = base64.b64encode(base64_str)\n\nbase64_str_decod_utf_8 = base64_str.decode(\"utf-8\")\n\n\nimport json\nimport requests\n\nurl = 'http://10.1.1.16:8000/receiveImage'\ndata = {\n 'image': base64_str_decod_utf_8,\n 'data': tuple(growth_data)\n }\n\nprint(data)\n\nheaders = {'content-type': 'application/json'}\n\nprint(headers)\n\nr = requests.post(url, data=json.dumps(data), headers=headers)\n\nprint(\"response text\", r.text)\n","repo_name":"ArthurWuTW/crawler-script","sub_path":"test_functionality/test_post_json.py","file_name":"test_post_json.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"14719207012","text":"import os\nimport tempfile\nimport ntpath\n\nimport nanome\nfrom nanome.api.ui import Menu, LayoutNode\nfrom nanome.util import async_callback, Logs\n\nNAME = \"File Explorer\"\nDESCRIPTION = \"Allows you to browse your files\"\nCATEGORY = \"\"\nHAS_ADVANCED_OPTIONS = False\n\ntest_assets = os.getcwd() + (\"/testing/test_assets\")\n\n\nclass FileExplorer(nanome.AsyncPluginInstance):\n\n def start(self):\n self.running = False\n self.item_prefab = LayoutNode.io.from_json(test_assets + \"/File.json\")\n self.menu = Menu.io.from_json(test_assets + \"/FileExplorer.json\")\n self.grid = self.menu.root.find_node(\"Grid\", True).get_content()\n self.path_text = self.menu.root.find_node(\"path\", True).get_content()\n self.load_button = self.menu.root.find_node(\"LoadButton\", True).get_content()\n self.load_button.register_pressed_callback(self.load_pressed)\n self.save_button = self.menu.root.find_node(\"SaveButton\", True).get_content()\n self.save_button.register_pressed_callback(self.save_pressed)\n self.back_button = self.menu.root.find_node(\"back\", True).get_content()\n self.back_button.register_pressed_callback(self.back_pressed)\n self.selected_button = None\n self.fetch_path()\n self.temp_dir = tempfile.mkdtemp()\n # self.test_path = \"C:\\\\Users\\\\ETHANV~1\\\\AppData\\\\Local\\\\Temp\\\\tmpuzepx_cf\\\\file.jpg\"\n # self.test_path1 = \"C:\\\\Users\\\\ETHANV~1\\\\AppData\\\\Local\\\\Temp\\\\tmpuzepx_cf\\\\1.jpg\"\n # self.test_path2 = \"C:\\\\Users\\\\ETHANV~1\\\\AppData\\\\Local\\\\Temp\\\\tmpuzepx_cf\\\\2.jpg\"\n # self.test_path3 = \"C:\\\\Users\\\\ETHANV~1\\\\AppData\\\\Local\\\\Temp\\\\tmpuzepx_cf\\\\3.jpg\"\n # self.files.cp(self.test_path, self.test_path1, self.cp_done)\n # self.files.put(self.test_path, self.test_path2, self.put_done)\n # self.files.mv(self.test_path, self.test_path3, self.mv_done)\n\n # def cp_done(self, *args):\n # Logs.debug(\"cp done\")\n\n # def put_done(self, *args):\n # Logs.debug(\"put done\")\n\n # def mv_done(self, *args):\n # Logs.debug(\"mv done\")\n\n def on_run(self):\n self.running = True\n self.update_menu(self.menu)\n\n @async_callback\n async def fetch_path(self, *_):\n error, wd = await self.files.pwd()\n self.path_text.text_value = wd\n if self.running:\n self.update_content(self.path_text)\n\n error, files = await self.files.ls(\".\")\n if error != nanome.util.FileError.no_error: # If API couldn't access directory, display error\n nanome.util.Logs.error(\"Directory request error:\", str(error))\n return\n self.grid.items = []\n for file in files:\n item = self.create_file_rep(file)\n self.grid.items.append(item)\n if self.running:\n self.update_content(self.grid)\n\n def create_file_rep(self, entry):\n item = self.item_prefab.clone()\n button = item.find_node(\"Button\", True).get_content()\n button.text.value.set_all(entry.name)\n button.register_pressed_callback(self.entry_pressed)\n button.entry = entry\n button.text.value.set_all(self.path_leaf(entry.name))\n button.text.size = .3\n button.text.ellipsis = True\n return item\n\n def entry_pressed(self, button):\n if button.entry.is_directory:\n self.files.cd(button.entry.name, self.fetch_path)\n return\n to_update = []\n button.selected = True\n if self.selected_button is not None:\n self.selected_button.selected = False\n to_update.append(self.selected_button)\n if self.selected_button == button:\n self.selected_button = None\n else:\n self.selected_button = button\n to_update.append(self.selected_button)\n\n self.save_button.unusable = self.selected_button == None\n to_update.append(self.save_button)\n self.load_button.unusable = self.selected_button == None\n to_update.append(self.load_button)\n self.update_content(to_update)\n\n @async_callback\n async def load_pressed(self, button):\n entry = self.selected_button.entry\n if entry.is_directory:\n return\n\n dest = os.path.join(self.temp_dir, str(self.path_leaf(entry.name)))\n error, path = await self.files.get(entry.name, dest)\n if error == nanome.util.FileError.no_error:\n Logs.debug(path)\n self.send_files_to_load(path)\n else:\n Logs.debug(error)\n\n def save_pressed(self, button):\n pass\n\n def back_pressed(self, button):\n self.files.cd(\"..\", self.fetch_path)\n\n def path_leaf(self, path):\n head, tail = ntpath.split(path)\n return tail or ntpath.basename(head)\n\n\nnanome.Plugin.setup(NAME, DESCRIPTION, CATEGORY, HAS_ADVANCED_OPTIONS, FileExplorer, permissions=[nanome.util.enums.Permissions.local_files_access])\n","repo_name":"nanome-ai/nanome-lib","sub_path":"testing/test_plugins/FileExplorer.py","file_name":"FileExplorer.py","file_ext":"py","file_size_in_byte":4911,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"19"} +{"seq_id":"31859913812","text":"from bib2json import bib2json\nfrom gen_references import gen_refs\nimport re\n\ndef inline_citation(bib_file, input_file, output_file=None):\n \"\"\"\n Replace all the citation tags from the input\n file with the inline citations in an apa style, \n write the result in the output_file.\n Also add the references as a separate section\n at the end of the output file.\n\n Args\n ----\n bib_file: name of the bibtex file\n input_file: input_file containing the tex document\n output_file: file to be written, if not provided then\n generate using out_file_name function\n \"\"\"\n def apa(match):\n \"\"\"helper function used as an argument of the ptrn.sub function \n (used below)\"\"\"\n try: # if any error then leave the tag as it is.\n tags = match.groupdict()['tags'].split(',')\n suf = match.groupdict()['suf']\n ang = match.groupdict()['ang'] ## txt inside <..> if present\n rslt = reduce(lambda s1, s2: s1 + \"; \" + s2, \n [apa_helper(t, suf) for t in tags])\n if ang:\n rslt = \"{0} {1}\".format(ang, rslt)\n if suf == \"\":\n rslt = '(' + rslt + ')'\n return rslt\n except:\n return match.string[match.start(): match.end()]\n \n def apa_helper(tag, suf):\n \"\"\" \n suf is in the set {\\citeA, \\citedate, \\cite}.\n if suf is 'A'(\\citeA) then return 'a_1, a_2,..and a_n (date)'\n if suf is 'date'(\\citedate) then return '(year)'\n if suf is None(\\cite) then return 'a_1, a_2,..\\& a_n, date'\n \"\"\"\n try: \n bib = json_db[tag.strip()]\n except KeyError:\n try:\n bib = json_db[tag.lower().strip()]\n except KeyError:\n pass\n\n names = map(lambda author: author['last'], \n bib['author']) # list of last names\n if suf == 'year':\n tags.append(tag)\n return \"(\" + bib['year'] + \")\"\n\n if suf == 'A':\n and_style = ' and '\n yr_style = \" ({0})\".format(bib['year'])\n else:\n and_style = ' \\& '\n yr_style = \", {0}\".format(bib['year'])\n\n if len(names) == 1:\n rslt = names[0]\n elif (len(names) > 2) and (tag in tags):\n rslt = \"%s et al.\"%names[0]\n else:\n rslt = reduce(lambda s1, s2: s1 +', '+ s2, names[:-1]) +\\\n and_style + names[-1] \n rslt += yr_style\n tags.append(tag)\n return rslt\n\n if not output_file:\n output_file = out_file_name(input_file)\n \n ptrn = re.compile(r\"\"\"\n \\\\cite(?P [A]? | year?) # either \\cite or \\citeA or \\citeyear\n (< (?P[^>]*) >)? # txt in <>, if <> present\n { (?P[^}]*) } # {tag1,tag2,..}\n \"\"\", re.VERBOSE)\n \n tags = [] # keep track of tags which has appeared\\\n # if a tag appear again then citation style might\\\n # be different.\n json_db = bib2json(bib_file)\n txt = open(input_file).read()\n rslt = ptrn.sub(apa, txt)\n file(output_file, \"w\").write(rslt)\n\n # adding reference list at the end of the output file.\n refs = gen_refs(input_file, json_db)\n add_refs(refs, output_file)\n\n\n return None\n\ndef add_refs(refs, fl):\n \"\"\"\n Insert references in the file(fl) just before\n the \\end{document}\n \"\"\"\n lines = open(fl).readlines()\n\n # find the '\\end{document}' in the file_txt and\n # insert references before it\n ptrn = r\"\\end{document}\"\n \n index = index_ptrn(ptrn, lines)\n if not index:\n raise ValueError(\"\\end{document} missing\")\n\n lines.insert(index, r\"\\section{References}\")\n index += 1\n for ref in refs:\n lines.insert(index, ref + r\"\\\\\" + \"\\n\")\n index += 1\n # modify the file\n open(fl, \"w\").writelines(lines)\n return None\n\ndef index_ptrn(ptrn, lst):\n for i in range(len(lst)):\n if re.search(ptrn, lst[i]):\n return i\n\ndef out_file_name(in_file_name):\n \"\"\"construct name of the output file \n from the name of the input file.\n >>> out_file_name(\"foo.tex\")\n \"foo_output.tex\"\n >>> out_file_name(\"../foo.tex\")\n \"../foo_output.tex\"\n \"\"\"\n try:\n base, extension = re.search(\"(.*\\w+)[.](\\w+)\", in_file_name).groups()\n return base + \"_output.\" + extension\n except AttributeError: # no extension\n return in_file_name + \"_output\"\n\n","repo_name":"tima04/inline-citation","sub_path":"code/inline_citation.py","file_name":"inline_citation.py","file_ext":"py","file_size_in_byte":4466,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"28791239829","text":"import glob\nimport os\n\nfrom Class.User import User\n\n\n# 'D:\\CSTI\\Social Data Analysis\\Project\\dataset_twitter\\dataset'\n\ndef run(path):\n files = {}\n counter = 0\n print('Indexing Files: START')\n for file in glob.glob(os.path.join(path, '*')): # do your stuff\n iid = screen_name = description = location = url = lang = ''\n tweets_index = 0\n counter += 1\n if counter % 1000 == 0 or counter == 5:\n print(str(counter))\n break\n with open(file, 'r', encoding=\"utf8\") as myfile:\n data = myfile.readlines()\n for index, line in enumerate(data):\n key_value = line.split(':', 1)\n if key_value[0] == 'TweetId':\n tweets_index = index\n break\n if key_value[0] == 'ID':\n iid = key_value[1]\n continue\n if key_value[0] == 'Screen-name':\n screen_name = key_value[1]\n continue\n if key_value[0] == 'Description':\n description = key_value[1]\n # key_value = data[index + 1].split(':', 1)\n # while key_value[0] != 'Location':\n # description += ' ' + key_value[0]\n # key_value = data[index + 1].split(':', 1)\n continue\n if key_value[0] == 'Location':\n location = key_value[1]\n continue\n if key_value[0] == 'URL':\n url = key_value[1]\n continue\n if key_value[0] == 'Lang':\n lang = key_value[1]\n continue\n user = User(iid, screen_name, description, location, url, lang)\n for tweet in data[tweets_index + 1:]:\n if tweet.startswith('TweetId'):\n continue\n user.add_tweet(tweet)\n files[iid] = user\n # words = ''\n # for user in users:\n # words = users[user].words\n # print(words)\n print('Indexing Files: END (' + str(counter) + ' indexed)')\n return files\n","repo_name":"aliabs/nltkDemo","sub_path":"PreProcessing/IndexingFiles.py","file_name":"IndexingFiles.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"29388355356","text":"\"\"\"\nThis is code for XOX Game using tkinter.\n\"\"\"\n# TODO: For Single Player, we need to ask if they want the computer to take the first move may be through a dialog box.\n# TODO: May be add sounds for each button click.\n# TODO: Add a better way for the computer to play rather than computerRandomMove\n# Note: Considered minimax for the computer move, but that would make the computer already know the moves to win or tie.\n\n\n# The Import Section\nfrom tkinter import *\nfrom functools import partial\nfrom tkinter import messagebox\nimport random\n\n# The Constants\nBACKGROUND = \"bg\"\nTEXT = \"text\"\nSTATE = \"state\"\nDISABLED_FOREGROUND = \"disabledforeground\"\nFOREGROUND = \"fg\"\nX, O = \"X\", \"O\"\n\n\ndef messageBoxInfo(title, text):\n root = Tk()\n root.withdraw()\n messagebox.showinfo(title, text)\n root.destroy()\n\n\nclass GameBoard(Tk):\n\n def __init__(self, player1=\"\", player2=\"\"):\n super().__init__()\n\n self.s = set()\n self.player1 = player1\n self.player2 = player2 if len(player2) != 0 else \"Computer\"\n\n self.OnePlayerGame = len(player2) == 0\n\n # This frame holds the cells of the board. (Buttons)\n self.f = Frame(self, width=50, height=50)\n # The Place where we display the verdict or tell its X's or O's Turn.\n self.AnnouncementLabel = Label(self, padx=20)\n self.title(\"XOX GAME!\")\n self.curr_player = X\n self.pressed = 0\n # The Board:\n self.board = []\n # We are using a frame to get the two buttons, reLabel, Quit side-by-side.\n self.top = Frame(self)\n self.reLabel = Button(self.top, text=\"Reset\", padx=40, pady=20, command=self.resetAllCells, bg=\"#90EE90\", bd=10)\n self.Quit = Button(self.top, text=\"Quit\", padx=40, pady=20, command=self.destroy, bg=\"#dc143c\", bd=10)\n\n # The properties of the cell change on a press according to the player who pressed it.\n self.players = {\n X: {DISABLED_FOREGROUND: \"blue\", BACKGROUND: \"#87CEEB\", STATE: DISABLED, TEXT: X},\n O: {DISABLED_FOREGROUND: \"red\", BACKGROUND: \"#FFCCCB\", STATE: DISABLED, TEXT: O}\n }\n\n # When we want to reset the board.\n self.resetting = {\n FOREGROUND: \"grey\", BACKGROUND: \"#C0C0C0\", STATE: NORMAL, TEXT: \"\"\n }\n self.setWidgets()\n self.resizable(False, False)\n\n def getPlayerName(self):\n if self.curr_player == X:\n return self.player1\n return self.player2\n\n def setWidgets(self):\n self.AnnouncementLabel.pack()\n self.AnnouncementLabel.config(font=('Helvetica bold', 20))\n\n self.buildBoard()\n\n # Initialization for a new game.\n self.initialState()\n\n self.f.pack()\n\n self.top.pack(side=TOP)\n\n self.reLabel.config(font=('Helvetica bold', 15))\n self.Quit.config(font=('Helvetica bold', 15))\n\n self.reLabel.pack(in_=self.top, side=LEFT)\n self.Quit.pack(in_=self.top, side=LEFT)\n\n # Before we start a new Game.\n def initialState(self):\n self.curr_player = X\n self.pressed = 0\n self.AnnouncementLabel[\"text\"] = \"{}'s Turn\".format(self.getPlayerName())\n\n # Creating widgets: buttons! as a board.\n def buildBoard(self):\n for i in range(3):\n rows = []\n for j in range(3):\n b = Button(self.f, text=\"\", padx=40, pady=20, command=partial(self.move, i, j), bg=\"#C0C0C0\")\n b.grid(row=i + 1, column=j + 1)\n b.config(width=10, height=2)\n rows.append(b)\n self.board.append(rows)\n\n # Since this is a two-player game, and there would alternate chances.\n def changePlayer(self):\n self.curr_player = X if self.curr_player == O else O\n\n # After a game ends (either a win or draw), we disable all the cells.\n def disableAllCells(self):\n for i in range(3):\n for j in range(3):\n self.board[i][j][STATE] = DISABLED\n\n # When we reset the board, or we are getting ready for a rematch.\n def resetAllCells(self):\n self.initialState()\n self.reLabel[\"text\"] = \"Reset\"\n for i in range(3):\n for j in range(3):\n for reset_options in self.resetting:\n self.board[i][j][reset_options] = self.resetting[reset_options]\n\n # We keep checking the board after every turn, so, we can find if a player won.\n def checkIfAPlayerWon(self, i, j):\n player = self.curr_player\n # check the row\n for r in range(3):\n if self.board[r][j][TEXT] != player:\n break\n else:\n return True\n\n # check the column if current player won.\n for c in range(3):\n if self.board[i][c][TEXT] != player:\n break\n else:\n return True\n\n for r, c in zip(range(3), range(3)):\n if self.board[r][c][TEXT] != player:\n break\n else:\n return True\n\n for r, c in zip(range(3), range(2, -1, -1)):\n if self.board[r][c][TEXT] != player:\n break\n else:\n return True\n return False\n\n # Move here is to imply each turn played.\n def move(self, i, j):\n player = self.curr_player\n self.pressed += 1\n for values in self.players[player]:\n self.board[i][j][values] = self.players[player][values]\n if self.checkIfAPlayerWon(i, j):\n winLabel = \"{} WINS!\".format(self.getPlayerName())\n self.AnnouncementLabel[\"text\"] = winLabel\n self.disableAllCells()\n rematch = \"Rematch?\"\n self.reLabel[\"text\"] = rematch\n messageBoxInfo(\"Winner!\", winLabel)\n elif self.pressed == 9:\n TieLabel = \"That's a Draw!\"\n self.AnnouncementLabel[\"text\"] = TieLabel\n self.disableAllCells()\n rematch = \"Rematch?\"\n self.reLabel[\"text\"] = rematch\n messageBoxInfo(\"Well Played!\", TieLabel)\n else:\n self.changePlayer()\n self.AnnouncementLabel[\"text\"] = \"{}'s Turn\".format(self.getPlayerName())\n if self.OnePlayerGame and self.getPlayerName() == 'Computer':\n self.computerRandomMove()\n\n def computerRandomMove(self):\n s = set()\n for i in range(3):\n for j in range(3):\n if self.board[i][j][STATE] != DISABLED:\n s.add((i, j))\n if s:\n p = random.sample(s, 1)[0]\n self.s.discard(p)\n self.move(p[0], p[1])\n\n\nif __name__ == \"__main__\":\n g = GameBoard(\"jack\")\n g.mainloop()\n","repo_name":"Jahnavi-Mantripragada/XOX_GAME","sub_path":"GameBoard.py","file_name":"GameBoard.py","file_ext":"py","file_size_in_byte":6659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"8558474768","text":"\"\"\"\nloading timezone data into DB\n\"\"\"\n\n# Standard Library\nimport datetime\n\n# Third Party\nimport pytz\n\n# Django\nfrom django.core.management.base import BaseCommand\n\n# AA Time Zones\nfrom timezones.models import TimezoneData\n\n\ndef get_input(text):\n \"\"\"\n Wrapped input to enable tz import\n \"\"\"\n\n return input(text)\n\n\nclass Command(BaseCommand):\n \"\"\"\n Import timezones\n \"\"\"\n\n help = \"Imports timezone data\"\n\n def _import_timezone_data(self) -> None:\n \"\"\"\n Import time zone data\n :return:\n \"\"\"\n\n timezones_imported = 0\n timezones_updated = 0\n\n for timezone_name in pytz.common_timezones:\n if timezone_name == \"UTC\":\n break\n\n timezone_panel_id = timezone_name.replace(\"/\", \"-\").lower()\n timezone_utc_offset = datetime.datetime.now(\n pytz.timezone(timezone_name)\n ).strftime(\"%z\")\n\n timezone, created = TimezoneData.objects.update_or_create(\n timezone_name=timezone_name,\n panel_id=timezone_panel_id,\n defaults={\"utc_offset\": timezone_utc_offset},\n )\n\n if created:\n action = \"Importing\"\n timezones_imported += 1\n else:\n action = \"Updating\"\n timezones_updated += 1\n\n self.stdout.write(\n f\"{action} timezone '{timezone.timezone_name}' \"\n f\"with UTC offset of '{timezone.utc_offset}' \"\n f\"and panel ID of '{timezone.panel_id}'\"\n )\n\n self.stdout.write(\n f\"Import/Update done with {timezones_imported} new timezones imported \"\n f\"and {timezones_updated} timezones updated because they were already \"\n \"in the DB.\"\n )\n\n def handle(self, *args, **options): # pylint: disable=unused-argument\n \"\"\"\n Ask before running ...\n :param args:\n :param options:\n \"\"\"\n\n self.stdout.write(\n \"Timezones will be imported. \"\n \"Previously imported timezones will not be replaced or overwritten.\"\n )\n\n user_input = get_input(\"Are you sure you want to proceed? (yes/no)?\")\n\n if user_input == \"yes\":\n self.stdout.write(\"Starting import of time zones. Please stand by.\")\n self._import_timezone_data()\n self.stdout.write(self.style.SUCCESS(\"Timezones Import complete!\"))\n else:\n self.stdout.write(self.style.WARNING(\"Aborted.\"))\n","repo_name":"ppfeufer/aa-timezones","sub_path":"timezones/management/commands/timezones_load_tz_data.py","file_name":"timezones_load_tz_data.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"19"} +{"seq_id":"5119175270","text":"#!/usr/bin/env python\n\n\"\"\"\nCornerhost Searcher Indexer\nThis should index and search and do all kinds of fun stuff\n\"\"\"\n\nimport sys\n\nfrom numpy import *\nimport re\nimport string\nfrom string import lower\nfrom stemmer import PorterStemmer\nfrom vecmath import vcos\nimport ransacker\n\n\nclass VectorSearchEngine(object):\n def __init__(self, index):\n self.index = index\n word_index = self.mapWordsToPosition()\n self.vecs = []\n self.doc_vecs = []\n self.threshold = 0.04\n self.word_index = word_index\n self.build_vectors()\n\n def build_vectors(self):\n # @TODO: assert 0, \"get rid of reliance on self.index.docs\"\n # Basically, this class is making its own index.\n # The goal now is to get this to run off of\n # a ransacker.Index instead.\n for doc in self.index.docs:\n vec = self.make_vector(doc)\n # the perl used at this point is\n # push @vecs, norm $vec;\n # must impliment using Numeric\n self.vecs.append(vec)\n\n self.doc_vecs.extend(self.vecs)\n\n\n def make_vector(self,in_words):\n # words should be a dictionary\n words = get_words(in_words)\n word_count = len(self.word_index.keys())\n vector = zeros(word_count)\n\n for w in words.keys():\n if self.word_index.has_key(w):\n value = words[w]\n offset = self.word_index.get(w,0) \n vector[offset] = value\n return vector \n\n def search(self,search_for):\n qvec = self.make_vector(search_for)\n result_list = self.get_cosines(qvec)\n documents = {}\n for index in result_list.keys():\n doc = self.index.docs[index]\n relevance = result_list[index]\n documents[doc] = relevance\n return documents\n\n def get_cosines(self,in_qvec):\n cosines = {}\n index = 0\n\n for this_vec in self.doc_vecs:\n assert isinstance(this_vec, ndarray)\n cosine = vcos(this_vec, in_qvec)\n if cosine > self.threshold:\n cosines[index] = cosine\n index = index + 1\n return cosines\n\n def mapWordsToPosition(self):\n # create a lookup hash of word to position\n # originally looked like this\n # my %lookup;\n # my @sorted_words = sort keys %all_words;\n # @lookup{@sorted_words} = (1..$#sorted_words ); \n lookup = {}\n x = 0\n for one in self.index.getWordList():\n lookup[one] = x\n x = x + 1\n #### TODO: the above section could(and should) be optimized\n\n return lookup\n\nclass VectorSpace(ransacker.Index):\n\n def __init__(self, docs):\n\n self.docs = docs\n self.all_words = {}\n\n\n def build_index(self):\n for doc in self.docs:\n self.addDocumentWords(doc)\n\n\n def getEngine(self): \n return VectorSearchEngine(self)\n\n\n\n def addDocumentWords(self, doc):\n words = get_words(doc) \n for k in words.keys():\n self.all_words.setdefault(k,0)\n self.all_words[k] += words[k] \n\n def getWordList(self):\n words = self.all_words.keys()\n words.sort()\n return words\n\n\ndef get_words(text):\n stop_list = load_stop_list() # love those ()\n # Splits on whitespace and strips some punctuation \n words = [stem(word) for word in text.lower().split()\n if re.match(\"[a-z\\-']+\", word)\n and word not in stop_list]\n \n # do { $_++ } for @doc_words{@words};\n doc_words = {}\n for one in words:\n doc_words.setdefault(one,0)\n doc_words[one] = doc_words[one] + 1\n \n # return %doc_words;\n return doc_words\n\ndef stem(word):\n # word needs to be all lowercase before being passed to stem\n string.lower(word) \n\n # fancy stuff to remove .,?!\"\n mymatch = re.compile('(\\,|\\.|\\!|\\?|\\\")')\n word = mymatch.sub(r'',word)\n\n p = PorterStemmer()\n word = p.stem(word, 0,len(word)-1)\n \n return word\n\n\ndef load_stop_list():\n all_from_file = []\n # @TODO: unhardcode this path. \n for line in open('./vectorspace/stop_list.txt').readlines():\n mymatch = re.compile('\\n') \n all_from_file.append(mymatch.sub(r'',line))\n \n return all_from_file\n\n \nif __name__==\"__main__\":\n pass\n\n","repo_name":"sabren/sixthdev","sub_path":"ransacker/vectorspace/search_mod.py","file_name":"search_mod.py","file_ext":"py","file_size_in_byte":4398,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"31686605278","text":"#!/usr/bin/env python3\r\n\r\n\r\nimport platform\r\nfrom pathlib import Path\r\nimport os.path\r\n\r\n\r\n\r\n\r\n# Get session exists run id and return log file\r\n\r\ndef logpath(runid):\r\n filename = \"processlog1\" + str(runid) + \".log\"\r\n if platform.system() == 'Windows':\r\n logdir = Path('C:/Users/Davidy/Dropbox/Projects/CryptoAPI/logs/')\r\n path = logdir / filename\r\n return path\r\n else:\r\n logdir = Path('/home/ubuntu/cryptoapi/Tracker/logs')\r\n path = logdir / filename\r\n return path\r\n\r\n\r\n# Get logfile and message\r\n\r\ndef writelog(logfile, msg,dubugmode =0 ):\r\n if dubugmode == 0:\r\n return None\r\n if os.path.exists(str(logfile)):\r\n with open(logfile, 'a') as f:\r\n f.write(msg)\r\n else:\r\n print(\"Writing logfile to \" + str(logfile))\r\n f = open(logfile, 'w')\r\n f.write(msg)\r\n f.close()\r\n if platform.system() != 'Windows':\r\n os.chmod(str(logfile), 777)\r\n","repo_name":"mortrick/Tracker","sub_path":"logs/dynamic_log.py","file_name":"dynamic_log.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"6047967802","text":"import matplotlib as mpl\nimport math\n# mpl.use('Agg')\nfrom Data.VisualGenome.models import ObjectMapping\nfrom FeaturesExtraction.Lib.VisualGenomeDataGenerator import visual_genome_data_cnn_generator_with_batch\nfrom FeaturesExtraction.Lib.Zoo import ModelZoo\nimport os\nimport cPickle\nimport numpy as np\nfrom FeaturesExtraction.Lib.Config import Config\nfrom keras.optimizers import Adam\nfrom keras.layers import Input, AveragePooling2D, Flatten, Dense, GlobalAveragePooling2D, Activation, regularizers\nfrom keras.callbacks import ModelCheckpoint, TensorBoard, CSVLogger, ReduceLROnPlateau, LearningRateScheduler\nfrom keras import backend as K\nfrom keras.models import Model\nimport sys\nimport matplotlib.pyplot as plt\nfrom FeaturesExtraction.Utils.Utils import get_time_and_date, TRAINING_OBJECTS_CNN_PATH, CLASSES_COUNT_FILE, \\\n CLASSES_MAPPING_FILE, replace_top_layer, get_bad_urls, get_dev_entities_img_ids\nfrom Utils.Utils import create_folder\nfrom FeaturesExtraction.Utils.data import splitting_to_datasets, get_filtered_data, get_name_from_file, pickle_dataset\nfrom FeaturesExtraction.Utils.Utils import DATA, VISUAL_GENOME\nfrom FilesManager.FilesManager import FilesManager\nfrom Utils.Logger import Logger\n\nNOF_LABELS = 150\nTRAINING_PERCENT = 0.75\nVALIDATION_PERCENT = 0.05\nTESTING_PERCENT = 0.2\nNUM_EPOCHS = 90\nEPOCHS_DROP = 10.0\nNUM_BATCHES = 64\nMAX_NOF_SAMPLES_THR = 1000000\nMAX_NOF_SAMPLES = 900000\nLR = 1e-6\nDECAY_DROP = 0.9\nUSE_DEV = True\n\n# If the allocation of training, validation and testing does not adds up to one\nused_percent = TRAINING_PERCENT + VALIDATION_PERCENT + TESTING_PERCENT\nif not used_percent == 1:\n error_msg = 'Data used percent (train + test + validation) is {0} and should be 1'.format(used_percent)\n print(error_msg)\n raise Exception(error_msg)\n\n__author__ = 'roeih'\n\n\ndef preprocessing_objects(img_data, hierarchy_mapping, object_file_name='objects.p'):\n \"\"\"\n This function takes the img_data and create a full object list that contains ObjectMapping class\n :param object_file_name: object pickle file name\n :param img_data: list of entities files\n :param hierarchy_mapping: dict of hierarchy_mapping\n :return: list of ObjectMapping\n \"\"\"\n\n object_path_token = \"{0}.{1}.{2}\".format(DATA, VISUAL_GENOME, get_name_from_file(object_file_name))\n\n # Check if pickles are already created\n objects_path = FilesManager().get_file_path(object_path_token)\n\n if os.path.isfile(objects_path):\n Logger().log('File is already exist {0}'.format(objects_path))\n objects = FilesManager().load_file(object_path_token)\n return objects\n\n # Bad urls which should be sorted out\n bad_urls = get_bad_urls()\n\n # Get the whole objects from entities\n objects_lst = []\n correct_labels = hierarchy_mapping.keys()\n idx = 0\n for img in img_data:\n\n # Get the url image\n url = img.image.url\n\n # Sorting bad urls\n if url in bad_urls:\n continue\n\n # Get the objects per image\n objects = img.objects\n for object in objects:\n\n # Get the lable of object\n label = object.names[0]\n\n # Check if it is a correct label\n if label not in correct_labels:\n continue\n\n new_object_mapping = ObjectMapping(object.id, object.x, object.y, object.width, object.height, object.names,\n object.synsets, url)\n # Append the new objectMapping to objects_lst\n objects_lst.append(new_object_mapping)\n\n idx += 1\n Logger().log(\"Finished img: {}\".format(idx))\n\n # Pickle objects_lst\n objects_array = np.array(objects_lst)\n # Save the objects files to the disk\n FilesManager().save_file(object_path_token, objects_array)\n return objects_array\n\n\ndef get_classes_mapping_and_hierarchy_mapping_by_objects(objects, path, config=None):\n \"\"\"\n This function creates classes_mapping and hierarchy_mapping by objects and updates the hierarchy_mapping accordingly\n :param config: config\n :param objects: list of objects\n :param path: saving or loading the classes_count_per_objects and hierarchy_mapping_per_objects from path folder\n :return: dict of classes_mapping and hierarchy_mapping\n \"\"\"\n\n # Load hierarchy mapping and class counting from cache\n if config is not None and config.use_cache_dir:\n classes_count_path = os.path.join(config.loading_model_folder, CLASSES_COUNT_FILE)\n hierarchy_mapping_path = os.path.join(config.loading_model_folder, CLASSES_MAPPING_FILE)\n logger.log(\n \"Loading from cached hierarchy mapping from {0} and class counting {1}\".format(hierarchy_mapping_path,\n classes_count_path))\n classes_count_per_objects = cPickle.load(open(classes_count_path, 'rb'))\n hierarchy_mapping_per_objects = cPickle.load(open(hierarchy_mapping_path, 'rb'))\n return classes_count_per_objects, hierarchy_mapping_per_objects\n\n classes_count_per_objects = {}\n hierarchy_mapping_per_objects = {}\n new_obj_id = 0\n for object in objects:\n # Get the label of object\n label = object.names[0]\n\n # Update the classes_count dict\n if label in classes_count_per_objects:\n # Check if label is already in dict\n classes_count_per_objects[label] += 1\n else:\n # Init label in dict\n classes_count_per_objects[label] = 1\n\n # Update hierarchy_mapping dict\n if label not in hierarchy_mapping_per_objects:\n hierarchy_mapping_per_objects[label] = new_obj_id\n new_obj_id += 1\n\n # Save classes_count_per_objects file\n classes_count_file = file(os.path.join(path, CLASSES_COUNT_FILE), 'wb')\n # Pickle classes_count_per_objects\n cPickle.dump(classes_count_per_objects, classes_count_file, protocol=cPickle.HIGHEST_PROTOCOL)\n # Close the file\n classes_count_file.close()\n # Save hierarchy_mapping_per_objects file\n hierarchy_mapping_file = file(os.path.join(path, CLASSES_MAPPING_FILE), 'wb')\n # Pickle hierarchy_mapping_per_objects\n cPickle.dump(hierarchy_mapping_per_objects, hierarchy_mapping_file, protocol=cPickle.HIGHEST_PROTOCOL)\n # Close the file\n hierarchy_mapping_file.close()\n return classes_count_per_objects, hierarchy_mapping_per_objects\n\n\ndef sorting_urls(train_imgs, test_imgs, val_imgs):\n \"\"\"\n This function sorting bad urls from the objects data-sets\n :param train_imgs: train data\n :param test_imgs: test data\n :param val_imgs: validation data\n :return: train, test and validation object list after sorting\n \"\"\"\n\n # Get the bad urls\n bad_urls = get_bad_urls()\n # Get Dev data-set\n dev_imgs = get_dev_entities_img_ids()\n\n real_train_imgs = []\n real_test_imgs = []\n real_val_imgs = []\n\n # Remove bad urls\n for img in train_imgs:\n if img.url in bad_urls:\n continue\n\n img_id = img.url.split(\"/\")[-1]\n img_id = int(img_id.split('.')[0])\n if img_id in dev_imgs and USE_DEV:\n continue\n real_train_imgs.append(img)\n\n for img in test_imgs:\n if img.url in bad_urls:\n continue\n\n img_id = img.url.split(\"/\")[-1]\n img_id = int(img_id.split('.')[0])\n if img_id in dev_imgs and USE_DEV:\n continue\n real_test_imgs.append(img)\n\n for img in val_imgs:\n if img.url in bad_urls:\n continue\n\n img_id = img.url.split(\"/\")[-1]\n img_id = int(img_id.split('.')[0])\n if img_id in dev_imgs and USE_DEV:\n continue\n real_val_imgs.append(img)\n\n logger.log(\"Debug printing after sorting- the number of train samples: {0}, the number of test samples: {1}, \"\n \"the number of validation samples: {2}\".format(len(real_train_imgs),\n len(real_test_imgs),\n len(real_val_imgs)))\n return real_train_imgs, real_test_imgs, real_val_imgs\n\n\ndef get_testset_by_size(detections_test, size_of_test):\n \"\"\"\n This function returns detections test set according to a specific size\n :param detections_test: the detections test\n :param size_of_test: the wanted test-set size\n :return: \n \"\"\"\n np.random.shuffle(detections_test)\n detections_test = detections_test[:size_of_test]\n return detections_test\n\n\nif __name__ == '__main__':\n\n # Define FileManager\n file_manager = FilesManager()\n # Define Logger\n logger = Logger()\n\n # Get argument\n if len(sys.argv) < 2:\n # Default GPU number\n gpu_num = 0\n else:\n # Get the GPU number from the user\n gpu_num = sys.argv[1]\n\n # Printing which GPU you have selected\n logger.log(\"Selected GPU number: {0}\".format(gpu_num))\n\n # Load class config\n config = Config(gpu_num)\n # Print to the logger the config params\n config.config_logger()\n\n # Define GPU training\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(config.gpu_num)\n\n # Define tensorflow use only the amount of memory required for the process\n # config_tf = tf.ConfigProto()\n # config_tf.gpu_options.allow_growth = True\n # set_session(tf.Session(config=config_tf))\n\n # Get time and date\n time_and_date = get_time_and_date()\n # Path for the training folder\n path = os.path.join(TRAINING_OBJECTS_CNN_PATH, time_and_date)\n # Create a new folder for training\n create_folder(path)\n # loading model weights\n if config.loading_model:\n net_weights = file_manager.get_file_path(config.loading_model_token)\n logger.log(\"Loading Weights from: {}\".format(net_weights))\n else:\n # The Weights for training\n net_weights = file_manager.get_file_path(config.base_net_weights)\n logger.log(\"Taking Base Weights from: {}\".format(net_weights))\n net_weights_path = os.path.join(path, config.model_weights_name)\n logger.log(\"The new Model Weights will be Saved: {}\".format(net_weights_path))\n\n # entities, hierarchy_mapping_objects, _ = get_filtered_data(filtered_data_file_name=\"mini_filtered_data\",\n # category='entities',\n # load_entities=False)\n\n hierarchy_mapping_objects = file_manager.load_file(\"data.visual_genome.hierarchy_mapping_objects\")\n hierarchy_mapping_predicates = file_manager.load_file(\"data.visual_genome.hierarchy_mapping_predicates\")\n # entities_train = file_manager.load_file(\"data.visual_genome.full_filtered_preprocessed_data_train\")\n # entities_test = file_manager.load_file(\"data.visual_genome.full_filtered_preprocessed_data_test\")\n\n # Get Visual Genome Data objects\n # Get Train\n objects_train = preprocessing_objects(None, hierarchy_mapping_objects,\n object_file_name=\"full_objects_train\")\n # objects_train = objects_train[:100000]\n # Shuffle Objects for test-set\n np.random.shuffle(objects_train)\n # Get Test\n objects_test = preprocessing_objects(None, hierarchy_mapping_objects, object_file_name=\"full_objects_test\")\n # Shuffle Objects for test-set\n np.random.shuffle(objects_test)\n # objects_test = objects_test[:len(objects_train) / 5]\n # Get Validation\n objects_val = []\n\n logger.log(\"Debug printing before sorting- the number of train objects: {0}, the number of test objects: {1}, \"\n \"the number of validation objects: {2}\".format(len(objects_train), len(objects_test), len(objects_val)))\n\n # # If there is too much data take only part pf the data\n # if len(objects) > MAX_NOF_SAMPLES_THR and not config.use_all_objects_data:\n # objects = objects[:MAX_NOF_SAMPLES]\n #\n # train_imgs, test_imgs, val_imgs = splitting_to_datasets(objects, training_percent=TRAINING_PERCENT,\n # testing_percent=TESTING_PERCENT, num_epochs=NUM_EPOCHS,\n # path=path, config=config)\n\n logger.log(\"Using Dev test: {}\".format(USE_DEV))\n # Sorting bad urls - should be delete sometime\n train_imgs, test_imgs, val_imgs = sorting_urls(objects_train, objects_test, objects_val)\n\n logger.log(\"Debug printing after sorting- the number of train objects: {0}, the number of test objects: {1}, \"\n \"the number of validation objects: {2}\".format(len(train_imgs), len(test_imgs), len(val_imgs)))\n\n # Save train-set and test-set and validation-set\n pickle_dataset(train_imgs, test_imgs, val_imgs, path)\n\n # Set the number of classes\n if config.replace_top:\n number_of_classes = config.nof_classes\n else:\n number_of_classes = len(hierarchy_mapping_objects)\n\n data_gen_train_vg = visual_genome_data_cnn_generator_with_batch(data=train_imgs,\n hierarchy_mapping=hierarchy_mapping_objects,\n config=config, mode='train', batch_size=NUM_BATCHES)\n data_gen_test_vg = visual_genome_data_cnn_generator_with_batch(data=test_imgs,\n hierarchy_mapping=hierarchy_mapping_objects,\n config=config, mode='test', batch_size=NUM_BATCHES)\n data_gen_validation_vg = visual_genome_data_cnn_generator_with_batch(data=test_imgs,\n hierarchy_mapping=hierarchy_mapping_objects,\n config=config, mode='validation',\n batch_size=NUM_BATCHES)\n\n if K.image_dim_ordering() == 'th':\n input_shape_img = (3, None, None)\n else:\n input_shape_img = (config.crop_height, config.crop_width, 3)\n\n img_input = Input(shape=input_shape_img, name=\"image_input\")\n\n # Define ResNet50 model With Top\n net = ModelZoo()\n model_resnet50 = net.resnet50_base(img_input, trainable=True)\n # model_resnet50 = net.resnet50_base_reg_and_init(img_input, trainable=config.resnet_body_trainable,\n # kernel_regularizer=regularizers.l2(0.005),\n # kernel_initializer=\"he_normal\")\n model_resnet50 = GlobalAveragePooling2D(name='global_avg_pool')(model_resnet50)\n output_resnet50 = Dense(number_of_classes, kernel_initializer=\"he_normal\", activation='softmax', name='fc')(\n model_resnet50)\n\n # Define the model\n model = Model(inputs=img_input, outputs=output_resnet50, name='resnet50')\n # In the summary, weights and layers from ResNet50 part will be hidden, but they will be fit during the training\n model.summary()\n\n # Save the last layer initialized weights\n if config.replace_top:\n last_layer_weights = model.layers[-1].get_weights()\n\n # Load pre-trained weights for ResNet50\n try:\n if config.load_weights:\n logger.log('loading weights from {}'.format(net_weights))\n model.load_weights(net_weights, by_name=True)\n else:\n logger.log('No weights have been loaded')\n except Exception as e:\n logger.log('Could not load pretrained model weights. Weights can be found at {} and {}'.format(\n 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_th_dim_ordering_th_kernels_notop.h5',\n 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n ))\n raise Exception(e)\n\n # Replace the last layer\n if config.replace_top:\n # Set the new initialized weights\n model.layers[-1].set_weights(last_layer_weights)\n\n # Replace the last top layer with a new Dense layer\n # model = replace_top_layer(model, len(hierarchy_mapping_predicates))\n # In the summary, weights and layers from ResNet50 part will be hidden, but they will be fit during the training\n # model.summary()\n\n optimizer = Adam(LR)\n model.compile(optimizer=optimizer,\n loss='categorical_crossentropy', metrics=['accuracy'])\n\n callbacks = [ModelCheckpoint(net_weights_path, monitor='val_loss', save_best_only=True, verbose=0),\n TensorBoard(log_dir=\"logs\", write_graph=True, write_images=True),\n CSVLogger(os.path.join(path, 'training.log'), separator=',', append=False),\n ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2, min_lr=1e-12)]\n # LearningRateScheduler(schedule=lambda epoch: LR * (DECAY_DROP ** math.floor((1 + epoch) / EPOCHS_DROP)))]\n\n logger.log('Starting training with learning rate: {0}'.format(LR))\n history = model.fit_generator(data_gen_train_vg, steps_per_epoch=len(train_imgs) / NUM_BATCHES, epochs=NUM_EPOCHS,\n validation_data=data_gen_test_vg, validation_steps=len(test_imgs) / NUM_BATCHES,\n callbacks=callbacks, max_q_size=100, workers=3, pickle_safe=True)\n\n # Validating the model\n test_score = model.evaluate_generator(data_gen_validation_vg, steps=len(test_imgs) / NUM_BATCHES, max_q_size=100,\n workers=4, pickle_safe=True)\n # Plot the Score\n logger.log(\"The Validation loss is: {0} and the Validation Accuracy is: {1}\".format(test_score[0], test_score[1]))\n\n # Summarize history for accuracy\n plt.figure()\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.savefig(os.path.join(path, \"model_accuracy.jpg\"))\n plt.close()\n # summarize history for loss\n plt.figure()\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.savefig(os.path.join(path, \"model_loss.jpg\"))\n plt.close()\n","repo_name":"roeiherz/SceneGrapher","sub_path":"TrainCNN.py","file_name":"TrainCNN.py","file_ext":"py","file_size_in_byte":18467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37692691931","text":"#Training script for omnidirectional collaborative filtering models\nfrom __future__ import division\nfrom __future__ import print_function\nfrom data_reader import data_reader\nimport keras\nfrom model import omni_model\nimport pandas as pd\nimport numpy as np\nfrom keras import metrics\nimport tensorflow as tf\nimport datetime\nfrom keras.optimizers import Adagrad, RMSprop\nimport tensorflow as tf\nfrom keras.backend.tensorflow_backend import set_session\nimport h5py\nimport copy\nimport json\n\n#Parameters:\n\n#Dataset parameters \ndataset = \"ml1m\" # movielens20m, amazon_books, amazon_moviesAndTv, amazon_videoGames, amazon_clothing, beeradvocate, yelp, netflix, ml1m\nuseTimestamps = False\nreverse_user_item_data = True\n\n#Training parameters\nmax_epochs = 500\ntrain_sparsity = [1.0, 1.0] #Probability of a data point being treated as an input (lower numbers mean a sparser recommendation problem)\ntest_sparsities = [0.0, 0.1, 0.4, 0.5, 0.6, 0.9] #0.0 Corresponds to the cold start problem. This is not used when eval_mode = \"fixed_split\"\nbatch_size = 128 #Bigger batches appear to be very important in getting this to work well. I hypothesize that this is because the optimizer is not fighting itself when optimizing for different things across trials\npatience = 0\nshuffle_data_every_epoch = True\nval_split = [0.8, 0.1, 0.1]\nuseJSON = True\nearly_stopping_metric = \"val_accurate_MSE\" # \"val_loss\" #\"val_accurate_RMSE\"\neval_mode = \"fixed_split\" # \"ablation\" or \"fixed_split\" #Ablation is for splitting the datasets by user and predicting ablated ratings within a user. This is a natural metric because we want to be able to predict unobserved user ratings from observed user ratings\n#Fixed split is for splitting the datasets by rating. This is the standard evaluation procedure in the literature. \nl2_weight_regulatization = None #0 #0.01 #The parameter value for the l2 weight regularization. Use None for no regularization.\npass_through_input_training = True #Turns the model into a denosing autoencoder...\ndropout_probability = 0.2\n\n#Model parameters\nnumlayers = 1\nnum_hidden_units = 512\nuse_causal_info = False #Toggles whether or not the model incorporates the auxilliary info. Setting this to off and setting the auxilliary_mask_type to \"zeros\" have the same computational effect, however this one runs faster but causes some errors with model saving. It is recommended to keep this set to True\nauxilliary_mask_type = None#\"zeros\" #Default is \"dropout\". Other options are \"causal\", \"zeros\", and \"both\" which uses both the causal and the dropout masks.\naux_var_value = -1 #-1 is Zhouwen's suggestion. Seems to work better than the default of 1.\nmodel_save_path = \"models/\"\nmodel_loss = 'mean_squared_error' # \"mean_absolute_error\" 'mean_squared_error'\nlearning_rate = 0.005\noptimizer = Adagrad(lr=learning_rate, epsilon=1e-08, decay=0.0) #'rmsprop' 'adam' 'adagrad'\nactivation_type = 'sigmoid' #Try 'selu' or 'elu' or 'softplus' or 'sigmoid'\nuse_sparse_representation = False #Works, but requires a change in keras backend (at least if using Keras (2.0.4) )\nuse_experimental_sparse_masking_layer = False\n\nload_weights_from = None#\"stackedDenoising_NOfinetuning_[0.5, 0.5]trainSparsity_128bs_2lay_512hu_0.005lr_Noneregul_None_sigmoid_itemUserReverse_movielens20m_11_12AM_October_28_2017_bestValidScore\" #\"0p5trainSparsity_256bs_3lay_512hu_1.0regul_netflix_10_11AM_October_03_2017_bestValidScore\" # Model to load weights from for transfer learning experiments\nperform_finetuning = False #Set to False if you want to fix the weights\n\nmodel_save_name = \"stackedDenoising_WITHfinetuning_\" + str(train_sparsity) +\"trainSparsity_\"+str(batch_size)+\"bs_\"+str(numlayers)+\"lay_\"+str(num_hidden_units)+\"hu_\" + str(learning_rate) + \"lr_\" + str(l2_weight_regulatization) + \"regul_\" + str(auxilliary_mask_type) + \"_\" + str(activation_type)#\"noCausalInfo_0p5trainSparsity_128bs_3lay_256hu\"\n\n#Set dataset params\nwith open(\"./datasets_metadata.json\", \"r\") as f:\n\tmetadata = json.load(f)\ndataset_dict = metadata[dataset]\ndata_path = dataset_dict[\"data_path\"]\nnum_items = dataset_dict[\"num_items\"]\nnum_users = dataset_dict[\"num_users\"]\nrating_range = dataset_dict[\"rating_range\"]\nnonsequentialusers = dataset_dict[\"nonsequentialusers\"]\n\nif reverse_user_item_data:\n\t#data_path = data_path+\"reverse_item-user/\"\n\tnum_items_temp = num_items\n\tnum_items = num_users\n\tnum_users = num_items_temp\n\tmodel_save_name += \"_itemUserReverse\"\n\nmodel_save_name += \"_\" + dataset + \"_\"\nmodelRunIdentifier = datetime.datetime.now().strftime(\"%I_%M%p_%B_%d_%Y\")\nmodel_save_name += modelRunIdentifier #Append a unique identifier to the filename\n\nprint(\"Loading data for \" + dataset)\ndata_reader = data_reader(num_items, num_users, data_path, nonsequentialusers = nonsequentialusers, use_json=useJSON, eval_mode=eval_mode, useTimestamps=useTimestamps, reverse_user_item_data = reverse_user_item_data)\n\nif eval_mode == \"ablation\":\n\tdata_reader.split_for_validation(val_split) #Create a train-valid-test split\n\t#If the eval mode is \"fixed_split\" then the data is aldready split\n\n#NEED TO IMPLEMENT TRAIN-TEST SPLIT\n\n#Build model\nif auxilliary_mask_type=='both':\n\tuse_both_masks=True\nelse:\n\tuse_both_masks=False\nomni_m = omni_model(numlayers, num_hidden_units, num_items, batch_size, dense_activation = activation_type, use_causal_info = use_causal_info, \n\tuse_timestamps = useTimestamps, use_both_masks = use_both_masks, l2_weight_regulatization=l2_weight_regulatization, sparse_representation=use_sparse_representation, \n\tdropout_probability = dropout_probability, use_sparse_masking_layer = use_experimental_sparse_masking_layer)\nm = omni_m.model\n\n\ndef accurate_MAE(y_true, y_pred):\n\tnum_predictions = tf.count_nonzero(y_true+y_pred, dtype=tf.float32)#Count ratings that are non-zero in both the prediction and the targets (the predictions are zeroed explicitly for missing ratings.)\n\tMAE = metrics.mae(y_true, y_pred)\n\treturn (MAE*num_items*batch_size)/num_predictions #Normalize to count only the ratings that are present.\n\t#return (MAE/num_predictions)*num_items*batch_size #Normalize to count only the ratings that are present.\n\ndef accurate_RMSE(y_true, y_pred):\n\tnum_predictions = tf.count_nonzero(y_true+y_pred, dtype=tf.float32)#Count ratings that are non-zero in both the prediction and the targets (the predictions are zeroed explicitly for missing ratings.)\n\tMSE = metrics.mse(y_true, y_pred)\n\treturn tf.sqrt((MSE*num_items*batch_size)/num_predictions) #Normalize to count only the ratings that are present. Then take the square root for RMSE.\n\ndef accurate_MSE(y_true, y_pred):\n\tnum_predictions = tf.count_nonzero(y_true+y_pred, dtype=tf.float32)#Count ratings that are non-zero in both the prediction and the targets (the predictions are zeroed explicitly for missing ratings.)\n\tMSE = metrics.mse(y_true, y_pred)\n\treturn (MSE*num_items*batch_size)/num_predictions #Normalize to count only the ratings that are present.\n\ndef nMAE(y_true, y_pred):\n\tnum_predictions = tf.count_nonzero(y_true+y_pred, dtype=tf.float32)#Count ratings that are non-zero in both the prediction and the targets (the predictions are zeroed explicitly for missing ratings.)\n\tMAE = metrics.mae(y_true, y_pred)\n\treturn ((MAE*num_items*batch_size)/num_predictions)/rating_range #Normalize to count only the ratings that are present. Then normalize by the rating range.\n\n\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\n#config.gpu_options.per_process_gpu_memory_fraction = 0.3\n#config.gpu_options.visible_device_list = \"0\"\nset_session(tf.Session(config=config))\n\nm.compile(optimizer=optimizer,\n loss=model_loss,\n metrics=['mae', accurate_MAE, nMAE, accurate_RMSE, accurate_MSE])\n\n\nif load_weights_from is not None:\n\tprint(\"Loading weights from \", load_weights_from)\n\t#Set the weights of the dense layers of the model to weights from a pretrained model (for domain adaptation experiments)\n\tdonor_model = keras.models.load_model(model_save_path+load_weights_from, \n\t\tcustom_objects={'accurate_MAE': accurate_MAE, 'accurate_RMSE': accurate_RMSE, 'nMAE': nMAE, 'accurate_MSE': accurate_MSE})\n\tif perform_finetuning:\n\t\tprint(\"Fine tuning\")\n\t\tomni_m.manually_load_all_weights(donor_model)\n\telse:\n\t\tomni_m.load_and_fix_for_denoising_autoencoders(donor_model)\n\nmin_loss = None\nbest_epoch = 0\nval_history = []\nfor i in range(max_epochs):\n\tprint(\"Starting epoch \", i+1)\n\n\t#Rebuild the generators for each epoch (the train-valid set assignments stay the same)\n\ttrain_gen = data_reader.data_gen(batch_size, train_sparsity, train_val_test = \"train\", shuffle=shuffle_data_every_epoch, auxilliary_mask_type = auxilliary_mask_type, aux_var_value = aux_var_value, sparse_representation = use_sparse_representation, pass_through_input_training = pass_through_input_training)\n\tvalid_gen = data_reader.data_gen(batch_size, train_sparsity, train_val_test = \"valid\", shuffle=shuffle_data_every_epoch, auxilliary_mask_type = auxilliary_mask_type, aux_var_value = aux_var_value, sparse_representation = use_sparse_representation)\n\n\thistory = m.fit_generator(train_gen, np.floor(data_reader.train_set_size/batch_size)-1, \n\t\tvalidation_data=valid_gen, validation_steps=np.floor(data_reader.val_set_size/batch_size)-1) #callbacks=callbax\n\t\n\t#Early stopping code\n\tval_loss_list = history.history[early_stopping_metric]\n\tval_loss = val_loss_list[len(val_loss_list)-1]\n\tval_history.extend(val_loss_list)\n\tif min_loss == None:\n\t\tmin_loss = val_loss\n\telif min_loss>val_loss:\n\t\tmin_loss = val_loss\n\t\tbest_epoch = i\n\t\tm.save(model_save_path+model_save_name+\"_epoch_\"+str(i+1)+\"_bestValidScore\") #Only save if it is the best model (will save a lot of time and disk space...)\n\n\telif i-best_epoch>patience:\n\n\t\tprint(\"Stopping early at epoch \", i+1)\n\t\tprint(\"Best epoch was \", best_epoch+1)\n\t\tprint(\"Val history: \", val_history)\n\n\t\tbreak\n\t\n\n\n#Testing\nbest_model_fn = model_save_path+model_save_name+\"_epoch_\"+str(best_epoch+1)+\"_bestValidScore\"\ntry: #Delete optimizer if it exists\n\tprint(\"Deleting optimizer weights for model at \", best_model_fn)\n\tf = h5py.File(best_model_fn, 'r+')\n\tdel f['optimizer_weights']\n\tf.close()\nexcept:\n\tprint(\"Could not delete optimizer weights. They probably weren't saved with the model.\")\ntry:\n\tbest_m = keras.models.load_model(best_model_fn, \n\t\tcustom_objects={'accurate_MAE': accurate_MAE, 'accurate_RMSE': accurate_RMSE, 'nMAE': nMAE, 'accurate_MSE': accurate_MSE})\n\tbest_m.save(model_save_path+model_save_name+\"_bestValidScore\") #resave the best one so it can be found later\n\ttest_epoch = best_epoch+1\nexcept:\n\tprint(\"FAILED TO LOAD BEST MODEL. TESTING WITH MOST RECENT MODEL.\")\n\tbest_m = m\n\ttest_epoch = i+1\ntest_epoch = best_epoch+1\nprint(\"Testing model from epoch: \", test_epoch)\n\nif eval_mode == \"ablation\":\n\tprint(\"\\nEvaluating model with ablations\")\n\tfor i, test_sparsity in enumerate(test_sparsities):\n\n\t\ttest_gen = data_reader.data_gen(batch_size, test_sparsity, train_val_test = \"test\", shuffle=shuffle_data_every_epoch, auxilliary_mask_type = auxilliary_mask_type, aux_var_value = aux_var_value, sparse_representation = use_sparse_representation)\n\n\t\ttest_results = best_m.evaluate_generator(test_gen, np.floor(data_reader.test_set_size/batch_size)-1)\n\n\t\tprint(\"\\nTest results with sparsity: \", test_sparsity)\n\t\tprint(test_results)\n\t\tfor i in range(len(test_results)):\n\t\t\tprint(m.metrics_names[i], \" : \", test_results[i])\n\nelif eval_mode == \"fixed_split\":\n\tprint(\"\\nEvaluating model with fixed split\")\n\ttest_gen = data_reader.data_gen(batch_size, None, train_val_test = \"test\", shuffle=shuffle_data_every_epoch, auxilliary_mask_type = auxilliary_mask_type, aux_var_value = aux_var_value, sparse_representation = use_sparse_representation)\n\ttest_results = best_m.evaluate_generator(test_gen, np.floor(data_reader.test_set_size/batch_size)-1)\n\tprint(\"Test results with fixed split\")\n\t#print(test_results)\n\tfor i in range(len(test_results)):\n\t\tprint(m.metrics_names[i], \" : \", test_results[i])\n\n\n\tprint(\"Testing manually\")\n\ttest_gen_manual = data_reader.data_gen(batch_size, None, train_val_test = \"test\", shuffle=shuffle_data_every_epoch, auxilliary_mask_type = auxilliary_mask_type, aux_var_value = aux_var_value, return_target_count=True, sparse_representation = use_sparse_representation)\n\t\n\tpredictions = []\n\ttargets = []\n\tratings_count = 0\n\tprint(\"Predicting\")\n\tfor i in range(int(np.floor(data_reader.test_set_size/batch_size))):\n\t\tcurrent_data = test_gen_manual.next()\n\t\tinput_list = current_data[0]\n\t\tcurrent_targets = current_data[1]\n\t\tcur_ratings_count = current_data[2]\n\t\ttargets.append(current_targets)\n\t\tratings_count += cur_ratings_count\n\t\tcurrent_preds = best_m.predict(input_list, batch_size=batch_size, verbose=0)\n\t\tpredictions.append(current_preds)\n\n\tprint(\"Computing error\")\n\tdef compute_full_RMSE(predictions, targets, ratings_count):\n\t\tsum_squared_error = 0\n\t\tfor i in range(len(predictions)):\n\t\t\tcur_preds = predictions[i]\n\t\t\tcur_tars = targets[i]\n\t\t\terror_contribution = np.sum(np.square(np.subtract(cur_preds, cur_tars)))\n\t\t\tsum_squared_error += error_contribution\n\t\tMSE = sum_squared_error/ratings_count\n\t\tRMSE = np.sqrt(MSE)\n\t\treturn RMSE\n\n\tRMSE = compute_full_RMSE(predictions, targets, ratings_count)\n\tprint(\"Manual test RMSE is \", RMSE)\n\tprint(\"Load this model at: \", model_save_path+model_save_name+\"_bestValidScore\")","repo_name":"Epist/omnidirectional_collaborative_filtering","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":13227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73480320042","text":"from typing import Any\n\nfrom django.db.models import Q, QuerySet\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.cache import cache_page\nfrom drf_yasg import openapi\nfrom drf_yasg.utils import swagger_auto_schema\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\n\nfrom .models import SafeApp\nfrom .serializers import SafeAppsResponseSerializer\n\n\nclass SafeAppsListView(ListAPIView): # type: ignore[type-arg]\n serializer_class = SafeAppsResponseSerializer\n pagination_class = None\n\n _swagger_chain_id_param = openapi.Parameter(\n \"chainId\",\n openapi.IN_QUERY,\n description=\"Used to filter Safe Apps that are available on `chainId`\",\n type=openapi.TYPE_INTEGER,\n )\n _swagger_client_url_param = openapi.Parameter(\n \"clientUrl\",\n openapi.IN_QUERY,\n description=\"Used to filter Safe Apps that are available on `clientUrl`\",\n type=openapi.TYPE_STRING,\n )\n _swagger_url_param = openapi.Parameter(\n \"url\",\n openapi.IN_QUERY,\n description=\"Filter Safe Apps available from `url`. `url` needs to be an exact match\",\n type=openapi.TYPE_STRING,\n )\n\n @method_decorator(cache_page(60 * 10, cache=\"safe-apps\")) # Cache 10 minutes\n @swagger_auto_schema(\n manual_parameters=[\n _swagger_chain_id_param,\n _swagger_client_url_param,\n _swagger_url_param,\n ]\n ) # type: ignore[misc]\n def get(self, request: Request, *args: Any, **kwargs: Any) -> Response:\n \"\"\"\n Returns a collection of Safe Apps (across different chains).\n Each Safe App can optionally include the information about the `Provider`\n \"\"\"\n return super().get(request, *args, **kwargs)\n\n def get_queryset(self) -> QuerySet[SafeApp]:\n queryset = SafeApp.objects.filter(visible=True)\n\n chain_id = self.request.query_params.get(\"chainId\")\n if chain_id is not None and chain_id.isdigit():\n queryset = queryset.filter(chain_ids__contains=[chain_id])\n\n client_url = self.request.query_params.get(\"clientUrl\")\n if client_url and \"\\0\" not in client_url:\n queryset = queryset.filter(\n Q(exclusive_clients__url=client_url) | Q(exclusive_clients__isnull=True)\n )\n\n url = self.request.query_params.get(\"url\")\n if url and \"\\0\" not in url:\n queryset = queryset.filter(url=url)\n\n return queryset\n","repo_name":"safe-global/safe-config-service","sub_path":"src/safe_apps/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"19"} +{"seq_id":"74726142124","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# @Author: José Sánchez-Gallego (gallegoj@uw.edu)\n# @Date: 2021-04-06\n# @Filename: test_exceptions.py\n# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)\n\nimport warnings\n\nimport pytest\n\nfrom archon.exceptions import ArchonControllerError, ArchonControllerWarning\n\n\nasync def test_archon_controller_error_unnamed(controller):\n controller.name = None\n\n with pytest.raises(ArchonControllerError) as err:\n await controller.send_command(\"TEST\", command_id=100000000)\n\n assert \"unnamed - \" in str(err.value)\n\n\nasync def test_archon_controller_error_no_controller():\n class Test:\n def __init__(self):\n raise ArchonControllerError(\"test error\")\n\n with pytest.raises(ArchonControllerError) as err:\n Test()\n\n assert \"unnamed - test error\" in str(err.value)\n\n\nasync def test_archon_controller_error_no_class():\n with pytest.raises(ArchonControllerError) as err:\n raise ArchonControllerError(\"test error\")\n\n assert str(err.value) == \"test error\"\n\n\nasync def test_archon_controller_warning_unnamed(controller):\n controller.name = None\n\n with pytest.warns(ArchonControllerWarning) as warn:\n await controller.process_message(b\"TEST\")\n\n assert \"unnamed - \" in str(warn[-1].message)\n\n\nasync def test_archon_controller_warning_no_controller():\n class Test:\n def __init__(self):\n warnings.warn(\"test warning\", ArchonControllerWarning)\n\n with pytest.warns(ArchonControllerWarning) as warn:\n Test()\n\n assert \"unnamed - test warning\" in str(warn[-1].message)\n\n\nasync def test_archon_controller_warning_no_class():\n with pytest.warns(ArchonControllerWarning) as warn:\n warnings.warn(\"test warning\", ArchonControllerWarning)\n\n assert str(warn[-1].message) == \"test warning\"\n","repo_name":"sdss/archon","sub_path":"tests/test_exceptions.py","file_name":"test_exceptions.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"41980374476","text":"'''\nОПИСАНИЕ.\nСкрипт для обмена актива(BTC,LTC,ETH) в USDT.\n\nНАСТРОКА осуществляется изменением файлов:\n1) private.py\n api_key - api_key биржи binance.com\n secret_key - secret_key биржи www.binance.com\n token - token telegram бота для отправки сервисных сообщений.\n chat_id - chat_id telegram пользователя\\группы для отправки сервисных сообщений.\n2) settings.txt\n формат записи: \"btc\": {\"time\": \"12:00\", \"rnd\": 4}\n \"btc\" - актив подлежащий обмену в USDT\n \"time\" - время обмена.\n \"rnd\" - округление в низ до знака после запятой.\n\nЛОГИКА РАБОТЫ\n./doc/rent.drawio\n\nИСТОРИЯ ИЗМЕНЕНИЙ.\n26.03.2020 v0.1\n Стартовая версия. (схема в ./doc/rent.drawio)\n27.03.2020 v0.2\n Переделанная версия. (схема в ./doc/rent.drawio)\n\nGIT\n не используется.\n'''\n\n\nfrom time import time as t, sleep\nfrom datetime import datetime as dt, timedelta as td\n\nfrom mod_telegram import telegram\nfrom mod_ticker import cbr\nfrom mod_binance import cls_binance\nfrom mod_txt import json_read,json_write,json_write_item,text_write,file_exists\n\nglobal debug\n\n#def get(settings=None,element=None):\n# if settings is None or element is None:\n# print('ERROR: get(None)')\n# return None\n# arr = {}\n# for el in settings['tickers']:\n# if debug: print(\"[DEBUG]\",el,settings[el][element])\n# arr.update({el:settings[el][element]})\n# return arr\n\ndef now_time():\n '''\n Возвращает текущее время в строчном формате HH:MM.\n :return:\n '''\n hour, minute, second_micro = dt.now().time().isoformat().split(':')\n return hour+\":\"+minute\n\ndef compare(time):\n '''\n возвращает разницу в секундах до времени

Say hello to a future of wine that you are guaranteed to love.

\n\n

Join the exclusive Vinely club to learn your Wine Personality,\n gain access to member-only perks and receive delicious personalized,\n hand-picked wine delivered to your door every month

\n\n

Your friends will think you're the host with the most when you introduce them to their Wine Personality.

\n \"\"\"\n\n content_anticipition = \"\"\"\n

Are you Whimsical, Exuberant, Sensational, Moxie, Easygoing or Serendipitous? If you don't know, get drinking!

\n

Just sip and rate our 6 carefully selected First Taste Wines to uncover your Vinely Wine Personality.

\n \"\"\"\n\n content_surprise = \"\"\"\n

Who doesn't love a surprise, especially when you are guaranteed to love it?\n As a Vinely Club member you will eagerly await 6 different wines perfectly matched to your taste buds each month.\n Enhance your enjoyment every month with wine you love. One more surprise from us...we pay for shipping!

\n

This is a club where the deliveries are as unique as you!

\n \"\"\"\n\n content_indulgence = \"\"\"\n

You will be the envy of all your friends when every glass you pour is one you love.\n Give yourself the gift of easy wine enjoyment. Go ahead, you deserve it.

\n \"\"\"\n\n content_excitement = \"\"\"\n

\n Enjoy perks like member-only experiences, preview events, trips, gifts and items that express your personality.\n

\n \"\"\"\n\n content_product = \"\"\"\n

\n

\n

\n \"\"\"\n template, created = orm.ContentTemplate.objects.get_or_create(key=\"join_club\", category=1)\n section, created = orm.Section.objects.get_or_create(key='header', template=template)\n section.content = host_header\n section.save()\n section, created = orm.Section.objects.get_or_create(key='sub_header', template=template)\n section.content = host_sub_header\n section.save()\n section, created = orm.Section.objects.get_or_create(key=\"overview\", template=template)\n section.content = content_overview\n section.save()\n section, created = orm.Section.objects.get_or_create(key=\"anticipation\", template=template)\n section.content = content_anticipition\n section.save()\n section, created = orm.Section.objects.get_or_create(key=\"surprise\", template=template)\n section.content = content_surprise\n section.save()\n section, created = orm.Section.objects.get_or_create(key=\"indulgence\", template=template)\n section.content = content_indulgence\n section.save()\n section, created = orm.Section.objects.get_or_create(key=\"excitement\", template=template)\n section.content = content_excitement\n section.save()\n section, created = orm.Section.objects.get_or_create(key=\"product\", template=template)\n section.content = content_product\n section.save()\n\n def backwards(self, orm):\n \"Write your backwards methods here.\"\n template = orm.ContentTemplate.objects.get(key=\"join_club\", category=1)\n orm.Section.objects.filter(template=template).delete()\n template.delete()\n\n models = {\n 'cms.contenttemplate': {\n 'Meta': {'ordering': \"['category', '-key']\", 'object_name': 'ContentTemplate'},\n 'category': ('django.db.models.fields.IntegerField', [], {}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),\n 'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),\n 'variables_legend': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['cms.Variable']\", 'symmetrical': 'False'})\n },\n 'cms.section': {\n 'Meta': {'object_name': 'Section'},\n 'content': ('django.db.models.fields.TextField', [], {}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),\n 'template': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'sections'\", 'to': \"orm['cms.ContentTemplate']\"})\n },\n 'cms.variable': {\n 'Meta': {'object_name': 'Variable'},\n 'description': ('django.db.models.fields.CharField', [], {'max_length': '128'}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'var': ('django.db.models.fields.CharField', [], {'max_length': '128'})\n }\n }\n\n complete_apps = ['cms']\n symmetrical = True\n","repo_name":"RSV3/nuvine","sub_path":"cms/migrations/0028_add_join_club_content.py","file_name":"0028_add_join_club_content.py","file_ext":"py","file_size_in_byte":5609,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"33430745019","text":"\n# Lists repos of the given github user\n\nimport requests\nimport sys\n\nuser = input(\"Enter github username : \")\nURL = f\"https://api.github.com/users/{user}/repos\"\n\nresp = requests.get(URL)\nif resp.status_code != 200:\n print(\"Sorry! Username is invalid!\")\n sys.exit(1)\n\nrepos = resp.json()\n\nif len(repos) == 0:\n print(\"Sorry! No repos found!\")\n sys.exit(2)\n \nfor repo in repos:\n print(repo['name'])\n\n\n","repo_name":"srikanthpragada/demo_python_10_OCT_2019","sub_path":"libdemo/list_git_repos.py","file_name":"list_git_repos.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"11303647108","text":"#!/Users/varamesh/anaconda/bin/python3\n\nimport sys\n\nlines = sys.stdin.read().splitlines()\n\nsequence = lines[0]\nsize = int(lines[1])\n\ndef PatternCount (seq, pat):\n\n\tcount = 0\n\n\tfor i in range(0, len(seq) - len(pat) + 1):\n\t\tif (seq[i:i+len(pat)] == pat):\n\t\t\tcount = count + 1\n\n\treturn count\n\n\ndef FrequentWords (seq, size):\n\n\tFrequentPat = set()\n\tcount = list()\n\tmaxcount = 0\n\n\tfor i in range(0, len(seq) - size + 1):\n\t\t\n\t\tcount.append(PatternCount(seq, seq[i:i+size]))\n\t\tif (count[i] > maxcount):\n\t\t\tmaxcount = count[i]\n\n\tfor i in range(0, len(seq) - size + 1):\n\n\t\tif(count[i] == maxcount):\n\t\t\tFrequentPat |= { seq[i:i+size] }\n\n\treturn FrequentPat\n\n\n# print(PatternCount(sequence,pattern))\n\nprint(\" \".join(FrequentWords(sequence, size)))","repo_name":"VAR121/Code_Challanges","sub_path":"Coursera/Bio_Info_1/Find_Pattern.py","file_name":"Find_Pattern.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"21256074143","text":"from pyspark.mllib.regression import LabeledPoint\nfrom pyspark.mllib.tree import DecisionTree\nfrom pyspark import SparkConf, SparkContext\nimport numpy as np\nimport pandas as pd\nimport os\n\nfrom pyspark.mllib.classification import NaiveBayes, NaiveBayesModel\nfrom pyspark.mllib.util import MLUtils\n\n# setting the spark context and app name \nconf = SparkConf().setMaster(\"local\").setAppName(\"HousingJobSpark\")\nsc = SparkContext(conf=conf)\n\ndef load_data(file_path, file_name):\n full_path = os.path.join(file_path, file_name)\n return pd.read_csv(full_path)\n\ndef mapOceanProximity(proximity):\n\n if(proximity == \"NEAR BAY\"):\n return 1\n \n elif(proximity == \"INLAND\"):\n return 1\n \n elif(proximity ==\"<1H OCEAN\"):\n return 0\n \n elif(proximity==\"NEAR OCEAN\"):\n return 1\n \n elif(proximity ==\"ISLAND\"):\n return 1\n \n # else:\n # return 0\n\n\ndef createLabeledPoint(fields):\n\n longitude = float(fields[0])\n latitude = float(fields[1])\n housing_median_age = float(fields[2])\n total_rooms = float(fields[3])\n total_bedrooms = int(fields[4])\n population = float(fields[4])\n households = float(fields[5])\n median_income = float(fields[6])\n median_house_value = float(fields[7])\n ocean_proximity = mapOceanProximity(fields[8])\n\n return LabeledPoint(ocean_proximity, np.array([longitude, latitude, housing_median_age, total_rooms,\n total_bedrooms, population, households, median_income, median_house_value]))\n\n\nDATA_PATH = os.path.join(os.getcwd(),'datasets', 'housing')\nFILE_NAME = \"housing.csv\"\nhousing_data = load_data(DATA_PATH, FILE_NAME)\n\n# print(housing_data)\n\nraw_data_rdd = sc.textFile(os.path.join(DATA_PATH, FILE_NAME))\nheader = raw_data_rdd.first()\nraw_data_rdd = raw_data_rdd.filter(lambda x: x != header)\n\nraw_data_rdd = raw_data_rdd.map(lambda x: x.split(\",\"))\n\n# training_data_rdd, test_data_rdd = raw_data_rdd.randomSplit(weights=[8.0,2.0], seed=1) # raw_data_rdd.map(createLabeledPoint)\n\n# training_data_rdd, test_data_rdd = training_data_rdd.map(createLabeledPoint), test_data_rdd.map(createLabeledPoint)\n\n# model = NaiveBayes.train(training_data_rdd, 1.0)\n\n# prediction_and_label = test_data_rdd.map(lambda p: (model.predict(p.features), p.label))\n\n# accuracy = 1.0 * prediction_and_label.filter(lambda pl: pl[0] == pl[1]).count()/test_data_rdd.count()\n\n# print(\"model accuracy {}\".format(accuracy))\n\n\n\n\n\n\n\n","repo_name":"tlmolane/pyspark","sub_path":"housing_spark_job.py","file_name":"housing_spark_job.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71069917221","text":"from newgen.models import *\nfrom django.contrib.auth.models import User\nimport datetime\n\ndef AddToolReportNote(tool_name):\n\tnote_id = 0\n\tif str(ToolsReport.objects.all().order_by('-note_id')[:1]) ==\"\":\n\t\tnote_id = 1\n\telse:\n\t\tfor x in ToolsReport.objects.all().order_by('-note_id')[:1]:\n\t\t\tnote_id = x.note_id\n\tnew_note = ToolsReport()\n\tuser = User.objects.filter(username = \"tools\").first()\n\tnew_note.note_id = note_id + 1\n\tnew_note.date = datetime.date.today()\n\tnew_note.tool_name = Tools.objects.filter(name = tool_name).first()\n\tnew_note.worker_id = Worker.objects.filter(user = user.id).first()\n\tnew_note.save()\n\ndef AddTempToolTransfer(tool,worker, comments):\n\tnote_id = 0\n\tif str(TempToolTransfers.objects.all().order_by('-note_id')[:1]) ==\"\":\n\t\tnote_id = 1\n\telse:\n\t\tfor x in TempToolTransfers.objects.all().order_by('-note_id')[:1]:\n\t\t\tnote_id = x.note_id\n\tnote = TempToolTransfers()\n\n\tif str(TempToolTransfers.objects.filter(worker_id = worker.worker_id, tool_id = tool.tool_id)) ==\"\":\n\t\tnote.note_id = note_id + 1\n\t\tnote.worker_id = worker\n\t\tnote.tool_id = tool\n\t\tnote.note = comments\n\t\ttry:\n\t\t\tnote.save()\n\t\t\treturn True\n\t\texcept:\n\t\t\tpass\n\treturn False\n\ndef AddTool(tool_name):\n\ttool_id = 0\n\tif str(Tools.objects.all().order_by('-tool_id')[:1]) ==\"\":\n\t\ttool_id = 1\n\telse:\n\t\tfor x in Tools.objects.all().order_by('-tool_id')[:1]:\n\t\t\ttool_id = x.tool_id\n\n\tif str(Tools.objects.filter(name = tool_name)) ==\"\":\n\t\ttool = Tools()\n\t\ttool.name = tool_name\n\t\ttool.tool_id = tool_id + 1\n\t\ttry:\n\t\t\ttool.save()\n\t\t\treturn True\n\t\texcept Exception as e:\n\t\t\tpass \n\treturn False \n\ndef GetToolTransfers(worker):\n\ttransfers = []\n\tfor transfer in TempToolTransfers.objects.filter(worker_id = worker.worker_id):\n\t\ttransfers.append(transfer)\n\treturn transfers\n\ndef AddToolForward(tool,worker,comment):\n\tnote_id = 0\n\tif str(ToolsForwarding.objects.all().order_by('-note_id')[:1]) ==\"\":\n\t\tnote_id = 1\n\telse:\n\t\tfor x in ToolsForwarding.objects.all().order_by('-note_id')[:1]:\n\t\t\tnote_id = x.note_id\n\n\tnote = ToolsForwarding()\n\tnote.note_id = note_id + 1\n\tnote.date = datetime.date.today().strftime(\"%Y-%m-%d\")\n\tnote.tool_name = tool\n\tnote.worker_id = worker\n\tif comment != \"\":\n\t\tnote.note = comment\n\telse:\n\t\tnote.note = \"Замечаний нет\"\n\tnote.save()\n\ndef GetTool(tool_name):\n\ttry:\n\t\treturn Tools.objects.filter(name = tool_name).first()\n\texcept Exception as e:\n\t\tprint(\"Undefined tool\")\n\t\treturn None\n\ndef DeleteTool(tool_name):\n\ttry:\n\t\tTools.objects.filter(name = tool_name).first().delete()\n\texcept Exception as e:\n\t\treturn False\n\treturn True\n\ndef GetWorkerTools(worker):\n\ttools = []\n\tfor note in ToolsReport.objects.filter(worker_id = worker.worker_id):\n\t\ttools.append(note.tool_name)\n\treturn tools\n\ndef RefreshToolReportNote(tool,worker):\n\ttool_obj = tool\n\treport = ToolsReport.objects.filter(tool_name = tool_obj.tool_id).first()\n\treport.worker_id = worker\n\treport.date = datetime.date.today()\n\n\tdel tool_obj\n\treport.save()\n\n\n\n\n\n\n\n\n\n","repo_name":"Ollavu/IT","sub_path":"newgen/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26517748993","text":"# buyfield = 'close_50_ema_xu_close_100_ema'\t\n# sellfield = 'close_50_ema_xd_close_100_ema'\nfrom sqlalchemy import types, create_engine, Table,MetaData, column, select, update, insert\nfrom sqlalchemy.orm import sessionmaker\nimport pandas as pd\nimport numpy as np\nfrom datetime import date, timedelta,datetime\nfrom sqlalchemy import types, create_engine\nfrom sqlalchemy.orm import sessionmaker\nimport pandas as pd\nimport numpy as np\nfrom datetime import date, timedelta,datetime\nfrom stockstats import StockDataFrame as Sdf\nimport statsmodels.formula.api as sm\nimport talib\n\n\nengine = create_engine('mysql+pymysql://root:mysql@localhost/stocks', echo=False)\nallsig = pd.DataFrame(columns=['symbol','timestamp','close'])\n\nnifty500 = pd.read_csv(\"ind_nifty500list.csv\")\nsymbols500 = list(nifty500['Symbol'].unique())\n\ndf = pd.read_sql('SELECT * FROM eq_eod_data e order by STR_TO_DATE(e.TIMESTAMP, \"%%d-%%M-%%Y\")', con=engine)\n# df = pd.read_sql('SELECT * FROM eq_eod_data e where e.SYMBOL =\"SWANENERGY\" order by STR_TO_DATE(e.TIMESTAMP, \"%%d-%%M-%%Y\")', con=engine)\n\ndf['TIMESTAMP'] = pd.to_datetime(df['TIMESTAMP'])\n\nlookbackperiod = 3\n\n\ntrades = pd.DataFrame(columns=['SYMBOL','momscore','buydate', 'buyprice', 'todayprice'])\nfor i in range(20 , 1,-1) :\n\tmomentumscores = pd.DataFrame(columns=['SYMBOL','momscore','buyprice', 'todayprice'])\n\tarr =[]\n\tfor j in range(0, len(symbols500)):\n\t\t# if symbols500[j] == \"SWANENERGY\" :\n\t\t\tsymdata = df[df['SYMBOL'] == symbols500[j]]\n\t\t\t# symdata['ema'] = talib.EMA(np.array(symdata['CLOSE']),timeperiod=150)\n\t\t\tsymdata['date'] = symdata['TIMESTAMP']\n\t\t\tsymdata = symdata.set_index('TIMESTAMP')\n\t\t\ttest = symdata.groupby(pd.TimeGrouper('M')).date.max()\n\t\t\ttest = test.reset_index(level=0)\n\t\t\t\n\t\t\ttesting = symdata[symdata['date'].isin(list(test['date']))]\n\t\t\ttestdata = testing.CLOSE.pct_change() + 1\n\t\t\tlength = len(testdata) - 1 -i\n\t\t\ttemp = testdata[length-lookbackperiod : length]\n\t\t\t# bought = trades[trades['SYMBOL'] == symbols500[j]]\n\n\n\t\t\t# if len(bought) > 1 : \n\t\t\t# \tif testing['CLOSE'][length] < testing['ema'][length] :\n\n\t\t\t# \t\tprint( symbols500[j] , \"bought\",bought['buyprice'][0],bought['buydate'][0], \"sold \" , \"price\", testing['CLOSE'][length] , \"date\" , testing['date'][length] )\n\t\t\t\t\n\t\t\tmul=0\n\t\t\tif (len(temp) > lookbackperiod -1) :\n\t\t\t\t# mul = ((temp[0] *temp[1]*temp[2] *temp[3]*temp[4] *temp[5]) -1)*100\n\t\t\t\tmul = 1\n\t\t\t\tfor k in range(0, lookbackperiod) :\n\t\t\t\t\t\n\t\t\t\t\tmul = mul * temp[k]\n\t\t\t\t# mul = ((temp[0] *temp[1]*temp[2] *temp[3]*temp[4] *temp[5]*temp[6] *temp[7]*temp[8] *temp[9]*temp[10] *temp[11]) -1)*100\n\t\t\t\tif np.isnan(mul) :\n\t\t\t\t\tmul = 0\n\t\t\t\telse : \n\t\t\t\t\tmul = (mul -1) * 100\n\t\t\t\tarr.append((symbols500[j], mul,testing['date'][length], testing['CLOSE'][length], testing['CLOSE'][-1]))\n\t\t# momentumscores.append({'SYMBOL': symbols500[j],'momscore' : mul,'buyprice' : testing['CLOSE'][length], 'todayprice' : testing['CLOSE'][-1]})\n\n\tmomentumscores = pd.DataFrame(arr)\n\tmomentumscores.columns = ['SYMBOL','momscore','buydate', 'buyprice', 'todayprice']\n\tmomentumscores = momentumscores.sort_values('momscore',ascending =False)\n\ttrades = trades.append(momentumscores.head(15))\n\tprint(momentumscores.head(15))\n\t\n# print(trades)\n# print(symbols500[j], mul, testing['CLOSE'][length], testing['CLOSE'][-1])","repo_name":"cvrkishore86/stockapps","sub_path":"pyeoddownloader/MomentumScore.py","file_name":"MomentumScore.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"74830034019","text":"import plotly.express as px\nimport plotly.graph_objects as go\n\ndf = px.data.tips()\n\nfig = go.Figure()\nfig.add_trace(go.Violin(\n x=df['day'][df['smoker'] == 'Yes'],\n y=df['total_bill'][df['smoker'] == 'Yes'],\n legendgroup='Yes',\n scalegroup='Yes',\n name='Yes',\n side='negative',\n line_color='blue'\n))\nfig.add_trace(go.Violin(\n x=df['day'][df['smoker'] == 'No'],\n y=df['total_bill'][df['smoker'] == 'No'],\n legendgroup='No',\n scalegroup='No',\n name='No',\n side='positive',\n line_color='orange'\n))\nfig.update_traces(meanline_visible=True)\nfig.update_layout(violingap=0.5, violinmode='overlay')\nfig.show()\n","repo_name":"jiaweim/plot-note","sub_path":"src/plotly_test/violin_overlay_go.py","file_name":"violin_overlay_go.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"10053076874","text":"N, K = map(int, input().split())\n\ncoins = [int(input()) for _ in range(N)]\n\n\ncnt = 0\n\nfor i in range(N-1, -1, -1):\n coin = coins[i]\n cnt += (K // coin)\n K %= coin\n if coin == 0:\n break\n\nprint(cnt)","repo_name":"ohwoo-kwon/algorithm","sub_path":"baekjun/11047.py","file_name":"11047.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"11039868399","text":"import glob\nimport random\nimport os\nimport cv2\n\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nimport torchvision.transforms as transforms\n\n\ndef to_rgb(image):\n rgb_image = Image.new(\"RGB\", image.size)\n rgb_image.paste(image)\n return rgb_image\n\n\nclass ImageDataset(Dataset):\n def __init__(self, root, transforms_= None, img_w=720, img_h=480, patch_size=10,unaligned=False, mode=\"train\"):\n self.transform = transforms.Compose(transforms_)\n self.unaligned = unaligned\n\n #self.files_A = sorted(glob.glob(os.path.join(root, \"%s/data\" % mode) + \"/*.*\"))\n #self.files_B = sorted(glob.glob(os.path.join(root, \"%s/gt\" % mode) + \"/*.*\"))\n self.files_A = sorted(glob.glob(os.path.join(root, \"raindrop\") + \"/*.*\"))\n self.files_B = sorted(glob.glob(os.path.join(root, \"gt\") + \"/*.*\"))\n self.mode = mode\n\n def __getitem__(self, index):\n image_A = Image.open(self.files_A[index % len(self.files_A)])\n\n if self.unaligned:\n image_B = Image.open(self.files_B[random.randint(0, len(self.files_B) - 1)])\n else:\n image_B = Image.open(self.files_B[index % len(self.files_B)])\n\n # Convert grayscale images to rgb\n if image_A.mode != \"RGB\":\n image_A = to_rgb(image_A)\n if image_B.mode != \"RGB\":\n image_B = to_rgb(image_B)\n\n \n if self.mode == \"test\":\n A_patch_list = []\n B_patch_list = []\n w_ps_range = img_w // patch_size\n h_ps_range = img_h // patch_size\n for i in range(0, w_ps_range):\n for j in range(0, h_ps_range):\n A_patch_list.append(image_A[i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size, :])\n B_patch_list.append(image_B[i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size, :])\n\n item_A = [self.transform(patch_a) for patch_a in A_patch_list]\n item_B = [self.transform(patch_b) for patch_b in B_patch_list]\n return {\"A\": item_A, \"B\": item_B}\n \n\n item_A = self.transform(image_A)\n item_B = self.transform(image_B)\n return {\"A\": item_A, \"B\": item_B}\n\n def __len__(self):\n return max(len(self.files_A), len(self.files_B))\n","repo_name":"HJ0Wang/Weakly-Supervised-Raindrop-Removal","sub_path":"datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"531135889","text":"import random\n\nimport numpy as np\nfrom scipy.spatial import KDTree\n\n\nclass Retriever:\n \"\"\"\n General cosine-similarity based dense retriever, from:\n\n @article{hu2022context,\n title={In-Context Learning for Few-Shot Dialogue State Tracking},\n author={Hu, Yushi and Lee, Chia-Hsuan and Xie, Tianbao and Yu, Tao and Smith, Noah A and Ostendorf, Mari},\n journal={arXiv preprint arXiv:2203.08568},\n year={2022}\n }\n \"\"\"\n\n def normalize(self, emb):\n return emb / np.linalg.norm(emb, axis=-1, keepdims=True)\n\n def __init__(self, emb_dict):\n\n # to query faster, stack all embeddings and record keys\n self.emb_keys = list(emb_dict.keys())\n emb_dim = emb_dict[self.emb_keys[0]].shape[-1]\n\n self.emb_values = np.zeros((len(self.emb_keys), emb_dim))\n for i, k in enumerate(self.emb_keys):\n self.emb_values[i] = emb_dict[k]\n\n # normalize for cosine distance (kdtree only support euclidean when p=2)\n self.emb_values = self.normalize(self.emb_values)\n self.kdtree = KDTree(self.emb_values)\n\n def topk_nearest_dialogs(self, query_emb, k=5):\n query_emb = self.normalize(query_emb)\n if k == 1:\n return [self.emb_keys[i] for i in self.kdtree.query(query_emb, k=k, p=2)[1]]\n return [self.emb_keys[i] for i in self.kdtree.query(query_emb, k=k, p=2)[1][0]]\n\n def topk_nearest_distinct_dialogs(self, query_emb, k=5):\n return self.topk_nearest_dialogs(query_emb, k=k)\n\n def random_retrieve(self, k=5):\n return random.sample(self.emb_keys, k)\n\n\nclass IndexRetriever:\n \"\"\"\n General cosine-similarity based dense retriever, which filters search index to the appriate searchable dialogues,\n from:\n\n @article{hu2022context,\n title={In-Context Learning for Few-Shot Dialogue State Tracking},\n author={Hu, Yushi and Lee, Chia-Hsuan and Xie, Tianbao and Yu, Tao and Smith, Noah A and Ostendorf, Mari},\n journal={arXiv preprint arXiv:2203.08568},\n year={2022}\n }\n \"\"\"\n\n # sample selection\n @staticmethod\n def random_sample_selection_by_turn(embs, ratio=0.1):\n n_selected = int(ratio * len(embs))\n print(f\"randomly select {ratio} of turns, i.e. {n_selected} turns\")\n selected_keys = random.sample(list(embs), n_selected)\n return {k: v for k, v in embs.items() if k in selected_keys}\n\n @staticmethod\n def random_sample_selection_by_dialog(embs, ratio=0.1):\n dial_ids = set([turn_label.split('_')[0] for turn_label in embs.keys()])\n n_selected = int(len(dial_ids) * ratio)\n print(f\"randomly select {ratio} of dialogs, i.e. {n_selected} dialogs\")\n selected_dial_ids = random.sample(dial_ids, n_selected)\n return {k: v for k, v in embs.items() if k.split('_')[0] in selected_dial_ids}\n\n @staticmethod\n def pre_assigned_sample_selection(embs, examples):\n selected_dial_ids = set([dial['ID'] for dial in examples])\n return {k: v for k, v in embs.items() if k.split('_')[0] in selected_dial_ids}\n\n def __init__(self, datasets, embedding_filenames, search_index_filename, sampling_method=\"none\", ratio=1.0):\n\n # data_items: list of datasets in this notebook. Please include datasets for both search and query\n # embedding_filenames: list of strings. embedding dictionary npy files. Should contain embeddings of the datasets. No need to be same\n # search_index: string. a single npy filename, the embeddings of search candidates\n # sampling method: \"random_by_turn\", \"random_by_dialog\", \"kmeans_cosine\", \"pre_assigned\"\n # ratio: how much portion is selected\n\n self.data_items = []\n for dataset in datasets:\n self.data_items += dataset\n\n # save all embeddings and dial_id_turn_id in a dictionary\n self.all_embeddings = {}\n for fn in embedding_filenames:\n this_embs = np.load(fn, allow_pickle=True).item()\n for k, v in this_embs.items():\n self.all_embeddings[k] = v\n\n # load the search index embeddings\n self.search_embs = np.load(search_index_filename, allow_pickle=True).item()\n\n # sample selection of search index\n if sampling_method == \"none\":\n self.retriever = Retriever(self.search_embs)\n elif sampling_method == 'random_by_dialog':\n self.retriever = Retriever(self.random_sample_selection_by_dialog(self.search_embs, ratio=ratio))\n elif sampling_method == 'random_by_turn':\n self.retriever = Retriever(self.random_sample_selection_by_turn(self.search_embs, ratio=ratio))\n elif sampling_method == 'pre_assigned':\n self.retriever = Retriever(self.pre_assigned_sample_selection(self.search_embs, self.data_items))\n else:\n raise ValueError(\"selection method not supported\")\n\n def data_item_to_embedding(self, data_item):\n ID = data_item['ID']\n turn = data_item['turn_id']\n label = f\"{ID}_turn_{turn}\"\n\n return self.all_embeddings[label]\n\n def label_to_data_item(self, label):\n ID, _, turn_id = label.split('_')\n turn_id = int(turn_id)\n\n for d in self.data_items:\n if d['ID'] == ID and d['turn_id'] == turn_id:\n return d\n raise ValueError(f\"label {label} not found. check data items input\")\n\n def item_to_nearest_examples(self, data_item, k=5):\n # the nearest neighbor is at the end\n return [self.label_to_data_item(l)\n for l in self.retriever.topk_nearest_distinct_dialogs(\n self.data_item_to_embedding(data_item), k=k)\n ][::-1]\n\n def label_to_nearest_labels(self, label, k=5):\n data_item = self.label_to_data_item(label)\n return [l for l in self.retriever.topk_nearest_distinct_dialogs(\n self.data_item_to_embedding(data_item), k=k)\n ][::-1]\n\n def random_examples(self, data_item, k=5):\n return [self.label_to_data_item(l)\n for l in self.retriever.random_retrieve(k=k)\n ]\n","repo_name":"jlab-nlp/RefPyDST","sub_path":"src/refpydst/retriever/code/index_based_retriever.py","file_name":"index_based_retriever.py","file_ext":"py","file_size_in_byte":6102,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"35"} +{"seq_id":"13769470462","text":"# 4\n# link- https://pentester.land/list-of-bug-bounty-writeups.html\n\n'''\nArticles to be fetched won't be crossing 1 page in what I am implementing\n'''\n\n# imports\nimport os\nimport datetime\n\n# external imports\nfrom pymongo import MongoClient\nfrom bs4 import BeautifulSoup as soup\nimport requests, json\n\n# DB setup\nURI = os.getenv('MONGODB_URL')\nclient = MongoClient(URI)\ndb = client['hackArticles']\npentester_land_articles = db['pentester_land']\n\n\n# Bot send message function\ndef send_message(WEB_HOOK, message):\n send = requests.post(WEB_HOOK, data=json.dumps({ \"content\": message }), headers={ 'Content-Type': 'application/json',})\n \n return send.status_code\n\n##################################################################SCRAPER CODE####################################\n'''\nurl\nscraper\nresult\n'''\narticles = [] # nested list of [title, url]\n\nURL = 'https://pentester.land/list-of-bug-bounty-writeups.html'\n\ndef scraper():\n html = requests.get('https://pentester.land/list-of-bug-bounty-writeups.html').text\n page_soup = soup(html, 'html.parser')\n\n site_name = page_soup.title.text\n \n raw_articles = page_soup.findAll('tr')[1:]\n\n for raw_article in raw_articles:\n article = raw_article.findAll('td')\n try:\n articles.append([article[0].text, article[2].text, article[3].text, article[4].text, article[0].a['href']]) # Title, BBP. Vulnerability, Reward, Link\n except:\n pass\n message = {\n 'site_name': site_name,\n 'articles': articles\n }\n\n return message\n\ndef result(WEB_HOOK, CHAT_ID):\n try:\n articles = scraper().get('articles')\n for article in articles[:5]: # I am sure they are not adding more than 5 at a time ^_^\n if pentester_land_articles.find_one({'title': article[0], 'CHAT_ID':CHAT_ID}) is None: # add in the database and send to telegram\n pentester_land_articles.insert_one({\n 'title': article[0],\n 'BBP': article[1],\n 'Vulnerability': article[2],\n 'Reward': article[3],\n 'url': article[4],\n 'CHAT_ID': CHAT_ID,\n \"date\": datetime.datetime.utcnow()\n })\n\n message = article[4] + '''\\n\\nTitle: {}\\n\\nReward: {}'''.format(article[0], article[3])\n print(send_message(WEB_HOOK, message))\n\n except Exception as e:\n print('[!] Failure for pentester land articles')\n print(str(e))\n\n##################################################################SCRAPER CODE####################################","repo_name":"1UC1F3R616/CSICTF-BugBountyArticles","sub_path":"scrapes/pentester_land.py","file_name":"pentester_land.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"35"} +{"seq_id":"15976298160","text":"import sys\nimport os\nimport requests\nimport json\nimport sqlite3\n\n\n# Configuration\nGITHUB_REPO = 'airflow'\nDATASET_PATH = './path/to/apache.db'\nGITHUB_API_HEADERS = {\"Authorization\": \"token GITHUB_API_TOKEN\"}\n\n\nGITHUB_API_URL = 'https://api.github.com/repositories/'\n\n\ndef getRepoIDByRepoName(repo_name):\n \"\"\"Returns an GitHub repo ID from a repo name (Apache only)\"\"\"\n url = 'https://api.github.com/repos/apache/'+ repo_name\n r = requests.get(url, headers=GITHUB_API_HEADERS)\n\n return json.loads(r.text)['id']\n\n\ndef getChangedFilesByCommit(repo_id, commit_id):\n \"\"\"Returns the changed file information object from a specific git commit using the GitHub API.\"\"\"\n url = GITHUB_API_URL + str(repo_id) + '/commits/' + commit_id\n r = requests.get(url, headers=GITHUB_API_HEADERS)\n\n return json.loads(r.text)['files']\n\n\ndef getChangedFileContentsMultiple(files):\n \"\"\"Returns the raw contents of multiple files using the changed files' information object.\n (currently unused)\"\"\"\n raw_urls = [f['raw_url'] for f in files]\n raw_files = [requests.get(url, headers=GITHUB_API_HEADERS).text for url in raw_urls if url != None]\n\n return raw_files\n\n\ndef getChangedFileContents(file):\n \"\"\"Returns the raw contents of a single file using the changed file's information object.\"\"\"\n url = file['raw_url']\n try:\n raw_file = requests.get(url, headers=GITHUB_API_HEADERS).text\n return raw_file\n except Exception as e:\n print(\"ERROR: Unable to fetch file contents of file:\", file['filename'])\n print(e)\n return ''\n\n\n\ndef getLineNumberOfStringInFile(target, file):\n \"\"\"Searches raw file content (string) by line for the occurrence of a target SATD comment's text.\n Returns the line numbers of the lines for full-length target matches.\"\"\"\n matches = []\n\n file_split = file.splitlines()\n target_split = target.splitlines()\n\n for f_line_number, f_line in enumerate(file_split): # Process each line of the input file\n if target_split[0] in f_line: # First line of target matched in the file line -> potential match\n matched_lines = []\n for t_line_number, t_line in enumerate(target_split): # Check subsequent lines against the target string\n offset_line_number = f_line_number + t_line_number # File line number offset with the current line number of the checked potential match\n if offset_line_number < len(file_split) and t_line in file_split[offset_line_number]: # Target line matched in file line\n matched_lines.append(offset_line_number + 1) # + 1 for 1-indexed absolute line numbers\n else:\n break\n if(len(matched_lines) == len(target_split)): # Full-length match found\n match = (matched_lines, f_line)\n matches.append(match)\n\n return matches\n\n\n\nif __name__ == '__main__':\n result = []\n\n con = sqlite3.connect(DATASET_PATH)\n cur = con.cursor()\n\n github_repo_id = getRepoIDByRepoName(GITHUB_REPO)\n print(\"Repo ID:\", github_repo_id)\n sys.stdout.flush()\n\n # Get and print number of SATD items to process\n q = 'SELECT COUNT(a.id) \\\n FROM git_comment a \\\n JOIN git_comment_satd b ON a.id = b.id \\\n WHERE b.label_id != 0 \\\n AND a.repo_id = ' + str(github_repo_id) + ';'\n\n rows = cur.execute(q)\n satd_item_count = rows.fetchone()[0]\n\n print('\\n\\n\\t\\tPROCESSING', satd_item_count, 'SATD ITEMS...\\n\\n')\n sys.stdout.flush()\n\n\n # Query that collects the git comment SATD items that are to be processed from the dataset\n q = 'SELECT repo_id, sha, comment, b.id, c.label, c.short_label \\\n FROM git_comment a \\\n JOIN git_comment_satd b ON a.id = b.id \\\n JOIN satd_label c ON b.label_id = c.id \\\n WHERE b.label_id != 0 \\\n AND a.repo_id = ' + str(github_repo_id) + ';'\n\n rows = cur.execute(q)\n\n\n idx = 1\n for row in rows:\n found = False # Flag to indicate if the comment text was found in any of the commit's changed files\n changed_files = getChangedFilesByCommit(row[0], row[1]) # Get list of changed files\n \n # Some printing to show where we're at\n try:\n print(\"(\"+str(idx)+\"/\"+str(satd_item_count)+\")\", \"MATCHES FOR: '\", row[2].splitlines()[0][:100], \"'\")\n except Exception as e:\n print(\"ERROR\")\n print(e)\n print(\"\\n\")\n sys.stdout.flush()\n idx += 1\n continue\n url = GITHUB_API_URL + str(row[0]) + '/commits/' + row[1]\n print(\"\\tREFERENCE URL:\\t\" + url)\n sys.stdout.flush()\n\n # Process changed files of the SATD comment's commit\n for file in changed_files:\n raw_content = getChangedFileContents(file) # Get raw file content\n match = getLineNumberOfStringInFile(row[2], raw_content) # Find target comment text in file\n if match != []:\n print(\"\\n\\t\\tIN FILE: '\", file['filename'], \"'\")\n found = True\n for m in match: \n print(\"\\t\\t\\t\", m)\n result.append({'satd_id': row[3], 'satd_repo': row[0], 'satd_sha': row[1], 'satd_text': row[2], 'satd_label': row[4],'satd_label_short': row[5], 'file': file['filename'], 'lines': m[0], 'matched_text': m[1]})\n sys.stdout.flush()\n if not found:\n print(\"\\t!!! NO MATCHES FOUND FOR '\", row[2].splitlines()[0][:100], \"' !!!\")\n result.append({'satd_id': row[3], 'satd_repo': row[0], 'satd_sha': row[1],'satd_text': row[2], 'satd_label': row[4],'satd_label_short': row[5], 'file': False, 'lines': False, 'matched_text': None})\n sys.stdout.flush()\n\n idx += 1\n\n print(\"\\n\")\n sys.stdout.flush()\n\n\n print(\"\\n\\tDUMPING OUTPUT TO FILE...\\n\")\n sys.stdout.flush()\n # Output result to JSON file for future use\n with open('./results/' + GITHUB_REPO + '_line_numbers.json', 'w') as outfile:\n json.dump(result, outfile)\n\n\n print(\"\\n\\t\\tDONE!\\n\")\n sys.stdout.flush()","repo_name":"wmeijer221/ebse","sub_path":"scripts/satd_tracing/GitSATDLineNumbers.py","file_name":"GitSATDLineNumbers.py","file_ext":"py","file_size_in_byte":5667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71987536742","text":"import unittest\n\nfrom hpcbench.benchmark.shoc import SHOC\nfrom .benchmark import AbstractBenchmarkTest\n\n\nclass TestShoc(AbstractBenchmarkTest, unittest.TestCase):\n EXPECTED_METRICS = dict(\n h2d_bw=6.0504,\n d2h_bw=6.7152,\n flops_sp=3098.2900,\n flops_dp=1164.7500,\n gmem_readbw=147.9450,\n gmem_writebw=139.5960,\n lmem_readbw=1042.4400,\n lmem_writebw=925.3470,\n sgemm_n=579.3240,\n dgemm_n=75.6221,\n )\n\n def get_benchmark_clazz(self):\n return SHOC\n\n def get_expected_metrics(self, category):\n return TestShoc.EXPECTED_METRICS\n\n def get_benchmark_categories(self):\n return [self.get_benchmark_clazz().CATEGORY]\n\n @property\n def attributes(self):\n return dict(executable='/path/to/fake')\n\n def test_extra_attributes(self):\n self.assertExecutionMatrix(\n dict(size=42, executable='/fake'),\n [dict(category='gpu', command=['/fake', '-cuda', '-d', '0', '-s', '42'])],\n )\n self.assertExecutionMatrix(\n dict(device=42, executable='/fake'),\n [dict(category='gpu', command=['/fake', '-cuda', '-d', '42', '-s', '1'])],\n )\n self.assertExecutionMatrix(\n dict(executable='/fake', options='uber option'),\n [\n dict(\n category='gpu',\n command=['/fake', '-cuda', '-d', '0', '-s', '1', 'uber', 'option'],\n )\n ],\n )\n self.assertExecutionMatrix(\n dict(executable='/fake', options=['uber', 'option']),\n [\n dict(\n category='gpu',\n command=['/fake', '-cuda', '-d', '0', '-s', '1', 'uber', 'option'],\n )\n ],\n )\n","repo_name":"BlueBrain/hpcbench","sub_path":"tests/benchmark/test_shoc.py","file_name":"test_shoc.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"16871070140","text":"from cli.cli import SimpleCLI\n\nfrom models.manufacturer import Manufacturer\nfrom models.part import Part\nfrom utils import print_record_fields\n\nclass ManufacturerCLI(SimpleCLI):\n def __init__(self, manufacturer_model: Manufacturer, parts_model: Part):\n super().__init__()\n self.manufacturer_model = manufacturer_model\n self.parts_model = parts_model\n\n self.add_command((\"create_manufacturer\", self.create_manufacturer))\n self.add_command((\"get_all_manufacturers\", self.get_all_manufacturers))\n self.add_command((\"get_all_manufacturer_by_city\", self.get_all_manufacturer_by_city))\n self.add_command((\"get_manufacturer_by_name\", self.get_manufacturer_by_name))\n self.add_command((\"update_manufacturer_city\", self.update_manufacturer_city))\n self.add_command((\"update_manufacturer_year_of_foundation\", self.update_manufacturer_year_of_foundation))\n self.add_command((\"update_manufacturer_telephone\", self.update_manufacturer_telephone))\n self.add_command((\"delete_manufacturer\", self.delete_manufacturer))\n self.add_command((\"create_produces_part_rel\", self.create_produces_part_rel))\n self.add_command((\"get_all_parts_made\", self.get_all_parts_made))\n\n def create_manufacturer(self):\n name = input(\"Enter Manufacturer name: \")\n city = input(\"Enter city: \")\n year_of_foundation = int(input(\"Enter year of foundation: \"))\n telephone = input(\"Enter telephone: \")\n self.manufacturer_model.create_manufacturer(name, city, year_of_foundation, telephone)\n print(\"Manufacturer created successfully!\")\n\n def get_all_manufacturers(self):\n manufacturers = self.manufacturer_model.get_all_manufacturers()\n print(\"All Manufacturers:\")\n for manufacturer in manufacturers:\n print_record_fields(manufacturer)\n\n def get_all_manufacturer_by_city(self):\n city = input(\"Enter city: \")\n manufacturers = self.manufacturer_model.get_all_manufacturer_by_city(city)\n print(f\"Manufacturers in {city}:\")\n for manufacturer in manufacturers:\n print_record_fields(manufacturer)\n\n def get_manufacturer_by_name(self):\n name = input(\"Enter Manufacturer name: \")\n manufacturer = self.manufacturer_model.get_manufacturer_by_name(name)\n print(\"Manufacturer details:\")\n if len(manufacturer) > 0:\n print_record_fields(manufacturer[0])\n\n def update_manufacturer_city(self):\n name = input(\"Enter Manufacturer name: \")\n new_city = input(\"Enter new city: \")\n updated_manufacturer = self.manufacturer_model.update_manufacturer_city(name, new_city)\n print(\"Updated Manufacturer:\")\n if len(updated_manufacturer) > 0:\n print_record_fields(updated_manufacturer[0])\n\n def update_manufacturer_year_of_foundation(self):\n name = input(\"Enter Manufacturer name: \")\n new_year = int(input(\"Enter new year of foundation: \"))\n updated_manufacturer = self.manufacturer_model.update_manufacturer_year_of_foundation(name, new_year)\n print(\"Updated Manufacturer:\")\n if len(updated_manufacturer) > 0:\n print_record_fields(updated_manufacturer[0])\n\n def update_manufacturer_telephone(self):\n name = input(\"Enter Manufacturer name: \")\n new_telephone = input(\"Enter new telephone: \")\n updated_manufacturer = self.manufacturer_model.update_manufacturer_telephone(name, new_telephone)\n print(\"Updated Manufacturer:\")\n if len(updated_manufacturer) > 0:\n print_record_fields(updated_manufacturer[0])\n\n def delete_manufacturer(self):\n name = input(\"Enter Manufacturer name: \")\n self.manufacturer_model.delete_manufacturer(name)\n print(\"Manufacturer deleted successfully!\")\n\n def create_produces_part_rel(self):\n manufacturer_name = input(\"Enter Manufacturer name: \")\n part_name = input(\"Enter Part name: \")\n self.manufacturer_model.create_manufacturer_produces_part_rel(manufacturer_name, part_name)\n print(\"Relationship created: Manufacturer produces Part\")\n\n def get_all_parts_made(self):\n manufacturer_name = input(\"Enter Manufacturer name: \")\n parts = self.manufacturer_model.get_all_parts_made(manufacturer_name)\n if len(parts) > 0: \n print(\"All parts made by manufacturer\")\n for part in parts:\n print_record_fields(part)\n\n def run(self):\n print(\"Welcome to the Manufacturer CLI!\")\n super().run()\n","repo_name":"gabieltiso-inatel/projeto-banco-de-dados-2-lab","sub_path":"cli/manufacturer_cli.py","file_name":"manufacturer_cli.py","file_ext":"py","file_size_in_byte":4569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"39906879756","text":"import sys\n\nimport pyspark.sql.functions\nfrom pyspark.sql.functions import *\nfrom pyspark.sql.types import StructType, StringType, StructField\n\nfrom dynamodb_utils import get_rule\nfrom lib.logger import Log4j\nfrom lib.utils import *\nimport pydeequ\nfrom pydeequ.suggestions import *\n\n\ndef gt(n):\n return col(\"pass_percentage\") > n\n\n\ndef lt(n):\n return col(\"pass_percentage\") < n\n\n\ndef eq(n):\n return col(\"pass_percentage\") == n\n\n\ndef ge(n):\n return col(\"pass_percentage\") >= n\n\n\ndef le(n):\n return col(\"pass_percentage\") <= n\n\n\ndq_rule = {\n \"source\": \"nhs\",\n \"entity\": \"patient\",\n \"cde\": [\"patient_id\", \"age\"],\n \"kde\": [\"country\", \"state\"],\n \"rules_run_on\": [\"cde\", \"kde\"],\n \"primary_key\": \"patient_id\",\n \"single_column_validation_rules\": {\n \"patient_id\": [\n {\n \"rule_specification\": \"Null Check\",\n \"rule_name\": \"patient_id_not_null\",\n \"rule\": \"patient_id is not null\",\n \"rule_type\": \"non_aggregate\",\n \"dimension\": \"completeness\",\n \"is_active\": 1,\n \"slo\": \"gt(90)\"\n },\n {\n \"rule_specification\": \"Uniqueness Check\",\n \"rule_name\": \"patient_id_uniqueness\",\n \"rule\": \"uniqueness\",\n \"rule_type\": \"aggregate\",\n \"dimension\": \"uniqueness\",\n \"is_active\": 1,\n \"slo\": \"gt(90)\"\n }\n ],\n \"country\": [\n {\n \"rule_specification\": \"Conditional Check\",\n \"rule_name\": \"country_has\",\n \"rule\": \"country in ('United States','United Kingdom')\",\n \"dimension\": \"conformity\",\n \"is_active\": 1,\n \"slo\": \"gt(90)\"\n },\n {\n \"rule_specification\": \"Null Check\",\n \"rule_name\": \"country_not_null\",\n \"rule\": \"country is not null\",\n \"dimension\": \"completeness\",\n \"is_active\": 1,\n \"slo\": \"gt(90)\"\n }\n ]\n }\n}\n\n# dq_rule = get_rule(\"nhs\", \"patient\")\nrule_elements = []\nif \"cde\" in dq_rule['rules_run_on'] and \"kde\" in dq_rule['rules_run_on']:\n rule_elements = dq_rule['cde'] + dq_rule['kde']\nelif \"cde\" in dq_rule['rules_run_on'] and \"kde\" not in dq_rule['rules_run_on']:\n rule_elements = dq_rule['cde']\nelif \"cde\" not in dq_rule['rules_run_on'] and \"kde\" in dq_rule['rules_run_on']:\n rule_elements = dq_rule['kde']\nrules = dq_rule['single_column_validation_rules']\n\nif __name__ == \"__main__\":\n conf = get_spark_app_config()\n print(rules)\n spark = SparkSession \\\n .builder \\\n .appName(\"HelloSpark\") \\\n .master(\"local[2]\") \\\n .config(\"spark.jars.packages\", pydeequ.deequ_maven_coord) \\\n .config(\"spark.jars.excludes\", pydeequ.f2j_maven_coord) \\\n .getOrCreate()\n\n logger = Log4j(spark)\n\n if len(sys.argv) != 2:\n logger.error(\"Usage: HelloSpark \")\n sys.exit(-1)\n\n logger.info(\"Starting HelloSpark\")\n\n survey_raw_df = load_survey_df(spark, sys.argv[1])\n partitioned_survey_df = survey_raw_df.repartition(2)\n partitioned_survey_df.show()\n rule = \"isnull(patient_id)\"\n test_list = [\"patient_id\", \"country\"]\n rule_df = partitioned_survey_df.select(test_list).withColumn(\"length_of_country_id_gt_0\",\n expr(\"Country in('United States','United Kingdom')\"))\n\n results_df = spark.createDataFrame([], StructType([]))\n metrics_df = spark.createDataFrame([], StructType([]))\n count = 0\n for element, rules_list in rules.items():\n if element in rule_elements:\n for rule in rules_list:\n if rule[\"is_active\"] == 1 and rule[\"rule_type\"] == \"non_aggregate\":\n current_df = partitioned_survey_df.select(test_list).withColumn(\"rule_applied_on\", lit(element)) \\\n .withColumn(\"source\", lit(dq_rule['source'])) \\\n .withColumn(\"entity\", lit(dq_rule['entity'])) \\\n .withColumn(\"rule\", lit(rule['rule'])) \\\n .withColumn(\"rule_name\", lit(rule['rule_name'])) \\\n .withColumn(\"dimension\", lit(rule['dimension'])) \\\n .withColumn(\"value\", partitioned_survey_df[element]) \\\n .withColumn(\"result\", expr(rule['rule'])) \\\n .withColumn(\"run_date\", current_timestamp())\n current_metrics_df = current_df.select('source', 'entity', 'rule_applied_on', 'rule', 'rule_name',\n 'dimension', 'result') \\\n .withColumn('success_record', when(col('result') == 'true', 1).otherwise(0)) \\\n .withColumn('failure_record', when(col('result') == 'false', 1).otherwise(0)) \\\n .groupBy('source', 'entity', 'rule_applied_on', 'rule', 'rule_name', 'dimension') \\\n .agg(sum(col('success_record')).alias(\"success_count\"),\n sum(col('failure_record')).alias(\"failure_count\"),\n pyspark.sql.functions.count('*').alias('count')) \\\n .withColumn(\"pass_percentage\", round(col(\"success_count\") / col(\"count\") * 100, 2)) \\\n .withColumn(\"constraint_status\", eval(rule['slo']))\n if count == 0:\n results_df = current_df\n metrics_df = current_metrics_df\n count += 1\n else:\n results_df = results_df.union(current_df)\n metrics_df = metrics_df.union(current_metrics_df)\n elif rule[\"is_active\"] == 1 and rule[\"rule_type\"] == \"aggregate\":\n agg_metrics_df = partitioned_survey_df.agg(\n sumDistinct(element).alias('distinct'),\n pyspark.sql.functions.count('*').alias('count')\n ).withColumn('percentage', round(col(\"distinct\")/col(\"count\") * 100, 2))\n if count == 0:\n metrics_df = agg_metrics_df\n count += 1\n else:\n metrics_df = metrics_df.union(agg_metrics_df)\n\n results_df.show(100, truncate=False)\n metrics_df.show(truncate=False)\n\n logger.info(\"Finished HelloSpark\")\n spark.stop()\n","repo_name":"AvinashKumarSP/aws-dqm","sub_path":"RuleValidation.py","file_name":"RuleValidation.py","file_ext":"py","file_size_in_byte":6566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"733434562","text":"#!/usr/bin/env python3\n\nimport ucto\n\ntext = \"\"\"To be or not to be, that's the question. This is a test to tokenise. We can span\nmultiple lines!!! The number 6 is Mr Li's favourite. We can't stop yet.\n\nThis is the next paragraph. And so it ends\"\"\"\n\n\n#Set a file to use as tokeniser rules, this one is for English, other languages are available too:\nsettingsfile = \"tokconfig-eng\"\n\n\n#Initialise the tokeniser, options are passed as keyword arguments, defaults:\n# lowercase=False,uppercase=False,sentenceperlineinput=False,\n# sentenceperlineoutput=False,\n# sentencedetection=True, paragraphdetection=True, quotedetectin=False,\n# debug=False\ntokenizer = ucto.Tokenizer(settingsfile)\n\n#pass the text (may be called multiple times),\ntokenizer.process(text)\n\n#read the tokenised data\nfor token in tokenizer:\n #token is an instance of ucto.Token, serialise to string using str()\n print( \"[\" + str(token) + \"]\", end=\"\" )\n\n #tokens remember whether they are followed by a space\n if token.isendofsentence():\n print()\n elif not token.nospace():\n print(\" \",end=\"\")\n\n #the type of the token (i.e. the rule that build it) is available as token.type\n\n#we can continue with more text:\ntokenizer.process(\"This was not enough. We want more text. More sentences are better!!!\")\n\n#there is a high-levelinterface to iterate over sentences as string, with all tokens space-separated:\nfor sentence in tokenizer.sentences():\n print(sentence)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"proycon/python-ucto","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"35"} +{"seq_id":"15521257641","text":"from sympy import *\r\nfrom sympy.plotting import plot\r\nx = symbols('x')\r\n\r\n\r\n \r\ny=diff(-x,x,x)\r\np1 = plot(y, (x,-10000,10000),show=False,line_color='red',points=1000)\r\n\r\np1.append(p1[0])\r\n\r\n\r\np1.show()","repo_name":"manvitha-1212/Graphing-Python-Course","sub_path":"derivative graphs/derivative using diff.py","file_name":"derivative using diff.py","file_ext":"py","file_size_in_byte":203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"9212446372","text":"from numpy.random import poisson\nimport math\n\n# Target active staker size\nTARGET_AMOUNT_STAKING = 312500\n# Average time staking before withdrawal\nAVG_STAKING_TIME = 500\n# How many withdrawals are permitted in\n# one day given a certain validator count?\ndef withdrawals_per_day(validators, total_eth_exiting):\n # return (validators + total_eth_exiting) / 1.07 // 100\n return validators // 100\n # return validators * max(1, int(math.log2(total_eth_exiting))) / 13.5 // 100\n # return int(1 + (total_eth_exiting * validators)**0.5) * 4.9 // 100\n\n# Get the size of the largest staker. This assumes a\n# Zipf's law distribution (ie. power law with power=1)\n# where the nth largest staker is n times smaller than the\n# largest staker. Calculates a value for the largest staker\n# such that the total size of nonzero stakers equals the\n# target amount staking.\ndef get_max_staker_size():\n def get_sum(sz):\n tot = 0\n inc = 1\n while sz // inc:\n tot += (sz // inc) * inc\n inc *= 2\n return tot\n size = 0\n offset = TARGET_AMOUNT_STAKING\n while offset:\n if get_sum(size + offset) < TARGET_AMOUNT_STAKING:\n size += offset\n else:\n offset //= 2\n return size\n\n# As a simplification, we make all stakers have validator sizes\n# be close to the max size divided by a power of two\nSTAKER_SIZES = [get_max_staker_size()]\n\nwhile STAKER_SIZES[-1] > 1:\n STAKER_SIZES.append(STAKER_SIZES[-1] // 2)\n\n# Active and not yet exiting stakers\nstakers = {}\n# Exiting stakers\nexiting = {}\n\n# The exit queue\nexit_queue = []\n# Total eth exiting\ntotal_eth_exiting = 0\n# How much of the first exiter's deposit we have processed\nprocessing_current = 0\n\n# Fill the staker set initially\nfor i, sz in enumerate(STAKER_SIZES):\n stakers[sz] = poisson(2**i)\n sz //= 2\n\n# Count withdrawn stakers of each size, and total delays\n# incurred by them, so we can eventually compute the average\nwithdrawn = {}\ntot_delays = {}\n\nprint(\"Total staking ETH:\", sum(k * v for k,v in stakers.items()))\n\nfor day in range(10000):\n # Deposit new stakers at the rate needed to maintain the equilibrium size\n for i, sz in enumerate(STAKER_SIZES):\n stakers[sz] = stakers.get(sz, 0) + poisson(2**i / AVG_STAKING_TIME)\n sz //= 2\n \n # Each staker has a 1/AVG_STAKING_TIME probability of deciding to leave each day\n for k in stakers.keys():\n exit_count = min(poisson(stakers[k] / AVG_STAKING_TIME), stakers[k])\n if exit_count > 0:\n exit_queue.append((k, exit_count, day))\n stakers[k] -= exit_count\n exiting[k] = exiting.get(k, 0) + exit_count\n total_eth_exiting += exit_count * k\n total_validators = sum(k * v for k,v in stakers.items()) + sum(k * v for k,v in exiting.items())\n \n # Process the queue\n queue_to_empty_today = withdrawals_per_day(total_validators, total_eth_exiting)\n while queue_to_empty_today > 0 and len(exit_queue) > 0:\n key, exit_count, exit_day = exit_queue[0]\n # Partially process the first exiter (exit next loop)\n if key * exit_count > queue_to_empty_today + processing_current:\n processing_current += queue_to_empty_today\n queue_to_empty_today = 0\n # Finish processing the first exiter (continue next loop)\n else:\n processing_current = 0\n queue_to_empty_today -= key * exit_count - processing_current\n exit_queue.pop(0)\n exiting[key] -= exit_count\n total_eth_exiting -= exit_count * key\n withdrawn[key] = withdrawn.get(key, 0) + exit_count\n tot_delays[key] = tot_delays.get(key, 0) + (day - exit_day) * exit_count\n if day % 1000 == 999:\n print(\"Report for day %d: %d total validators %d ETH in exit queue\" % ((day+1), total_validators, total_eth_exiting))\n\nprint(\"Total delays in days\")\nfor key in STAKER_SIZES:\n print(\"%d: % .3f (min %.3f)\" % (key, (tot_delays.get(key, 0) / withdrawn.get(key, 0.0001)), key / withdrawals_per_day(TARGET_AMOUNT_STAKING, key)))\n","repo_name":"ethereum/research","sub_path":"exit_queue_tests/exit_queue_tester.py","file_name":"exit_queue_tester.py","file_ext":"py","file_size_in_byte":4081,"program_lang":"python","lang":"en","doc_type":"code","stars":1683,"dataset":"github-code","pt":"35"} +{"seq_id":"32402877274","text":"from typing import List\nfrom collections import deque\n\nclass Solution:\n\tdef solve(self, board: List[List[str]]) -> None:\n\t\tdef nbrs(i, j):\n\t\t\treturn [(r, c) for r, c in [(i+1, j), (i-1, j), (i, j+1), (i, j-1)] if 0 <= r < R and \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t0 <= c < C and \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tboard[r][c] == 'O']\n\n\t\tdef bfs(s):\n\t\t\tvisited.add(s)\n\t\t\tq = deque([s])\n\t\t\twhile q:\n\t\t\t\tcur = q.pop()\n\t\t\t\tfor nbr in nbrs(*cur):\n\t\t\t\t\tif nbr not in visited:\n\t\t\t\t\t\tvisited.add(nbr)\n\t\t\t\t\t\tq.appendleft(nbr)\n\n\t\tvisited = set()\n\t\tR, C = len(board), len(board[0])\n\t\tfor i in range(R):\n\t\t\tfor j in range(C):\n\t\t\t\tif (i == 0 or i == R-1 or j == 0 or j == C-1) and board[i][j] == 'O':\n\t\t\t\t\t#boundary O\n\t\t\t\t\tbfs((i, j))\n\n\t\tfor i in range(R):\n\t\t\tfor j in range(C):\n\t\t\t\tif board[i][j] == 'O' and (i, j) not in visited:\n\t\t\t\t\tboard[i][j] = 'X'\n\n\t\tfor row in board:\n\t\t\tprint(row)\n\n\t\treturn board\n\n\nSolution().solve([[\"X\",\"X\",\"X\",\"X\"],\n\t\t\t\t [\"X\",\"O\",\"O\",\"X\"],\n\t\t\t\t [\"X\",\"X\",\"O\",\"X\"],\n\t\t\t\t [\"X\",\"O\",\"X\",\"X\"]])\n\n# [[\"X\",\"X\",\"X\",\"X\"],\n# [\"X\",\"X\",\"X\",\"X\"],\n# [\"X\",\"X\",\"X\",\"X\"],\n# [\"X\",\"O\",\"X\",\"X\"]]\t\t","repo_name":"aditya-283/leetcode","sub_path":"problems/Medium/surrounded-regions/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"27686672353","text":"\"\"\"\nResponsible for the Pong Game Engine\n\"\"\"\n\nimport pyxel\nimport random\nimport math\n\nfrom match import match_type\nfrom match.constants import *\n\n\nclass Engine():\n def __init__(self):\n self.ball = Ball()\n self.ball.alive = False\n self.player_one = Player(player_num=1)\n self.player_two = Player(player_num=2)\n self.post_score_delay = PRE_START_FRAME_DELAY\n self.last_scorer = None\n\n def update(self, p1_input, p2_input):\n self.update_match(p1_input, p2_input)\n return {\n 'phase': 'playing',\n 'p1_y': self.player_one.frame.y,\n 'p1_score': self.player_one.score,\n 'p2_y': self.player_two.frame.y,\n 'p2_score': self.player_two.score,\n 'ball_xy': (self.ball.frame.x, self.ball.frame.y),\n 'ball_trail': self.ball.trail,\n 'ball_alive': self.ball.alive,\n }\n\n def update_match(self, p1_input, p2_input):\n if self.check_and_process_score():\n self.ball.alive = False\n self.ball.trail = []\n\n self.player_one.update(p1_input)\n self.player_two.update(p2_input)\n\n if self.post_score_delay > 0:\n self.post_score_delay -= 1\n if self.post_score_delay == 0:\n self.ball.reset(self.last_scorer)\n else:\n if self.player_one.frame.intersects(self.ball.frame):\n (new_dx, new_dy) = self.ball.handle_player_collision(self.player_one)\n self.ball.update_trajectory(new_dx, new_dy)\n elif self.player_two.frame.intersects(self.ball.frame):\n (new_dx, new_dy) = self.ball.handle_player_collision(self.player_two)\n self.ball.update_trajectory(new_dx, new_dy)\n elif self.ball.hit_top_wall() or self.ball.hit_bottom_wall():\n self.ball.update_trajectory(self.ball.dx, -self.ball.dy)\n\n self.ball.update()\n\n def check_for_winner(self):\n if self.player_one.score == WIN_SCORE:\n return self.player_one.player_num\n elif self.player_two.score == WIN_SCORE:\n return self.player_two.player_num\n else:\n return None\n\n def check_and_process_score(self):\n if self.ball.alive == False:\n return False\n\n if self.ball.hit_left_wall():\n self.player_two.score += 1\n self.last_scorer = 2\n elif self.ball.hit_right_wall():\n self.player_one.score += 1\n self.last_scorer = 1\n else:\n return False\n\n self.post_score_delay = PRE_START_FRAME_DELAY\n return True\n\n\nclass Rect:\n def __init__(self, x=0, y=0, width=0, height=0):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n\n @property\n def left(self):\n return self.x\n\n @property\n def right(self):\n return self.x + self.width\n\n @property\n def top(self):\n return self.y\n\n @property\n def bottom(self):\n return self.y + self.height\n\n def intersects(self, rect):\n if (self.right >= rect.left and self.left <= rect.right) and (self.top <= rect.bottom and self.bottom >= rect.top):\n return True\n else:\n return False\n\n\nclass Player:\n def __init__(self, player_num):\n self.player_num = player_num\n self.score = 0\n initial_x_pos = PLAYER_1_X if player_num == 1 else PLAYER_2_X\n initial_y_pos = 10 if player_num == 1 else GAME_COURT_HEIGHT - 10 - PLAYER_HEIGHT\n self.frame = Rect(x=initial_x_pos, y=initial_y_pos, width=PLAYER_WIDTH, height=PLAYER_HEIGHT)\n\n @property\n def moving_up(self):\n key_up = pyxel.KEY_Q if self.player_num == 1 else pyxel.KEY_I\n return pyxel.btn(key_up)\n\n @property\n def moving_down(self):\n key_down = pyxel.KEY_A if self.player_num == 1 else pyxel.KEY_K\n return pyxel.btn(key_down)\n\n def update(self, key_input):\n if self.player_num == 1:\n if key_input == pyxel.KEY_Q:\n self.update_player_pos(-PLAYER_SPEED)\n elif key_input == pyxel.KEY_A:\n self.update_player_pos(PLAYER_SPEED)\n elif self.player_num == 2:\n if key_input == pyxel.KEY_O:\n self.update_player_pos(-PLAYER_SPEED)\n elif key_input == pyxel.KEY_L:\n self.update_player_pos(PLAYER_SPEED)\n\n def update_player_pos(self, dy):\n if dy > 0:\n if self.frame.bottom + dy <= GAME_COURT_HEIGHT:\n self.frame.y += dy\n elif self.frame.bottom + dy >= GAME_COURT_HEIGHT:\n self.frame.y = GAME_COURT_HEIGHT - self.frame.height\n\n if dy < 0:\n if self.frame.top + dy >= 0:\n self.frame.y += dy\n elif self.frame.top + dy <= 0:\n self.frame.y = 0\n\n\nclass Ball:\n def __init__(self):\n self.frame = Rect(width=BALL_WIDTH, height=BALL_WIDTH)\n self.trail = []\n self.reset()\n\n def update(self):\n self.trail.insert(0, (self.frame.x, self.frame.y))\n while len(self.trail) > 20:\n self.trail.pop()\n\n self.frame.x += self.dx\n self.frame.y += self.dy\n\n def reset(self, last_scorer=None):\n rand_dx = 3 if random.randint(0, 1) else -3\n self.alive = True\n self.frame.x = COURT_WIDTH / 2\n self.frame.y = GAME_COURT_HEIGHT - 1 - self.frame.width if random.randint(0, 1) else 1\n self.dx = 3 if last_scorer == 1 else (-3 if last_scorer == 2 else rand_dx)\n self.dy = 2 if self.frame.y == 1 else -2\n\n def handle_player_collision(self, player):\n # Determine where paddle hit ball, top, middle, or bottom\n new_dx = -self.dx\n new_dy = self.dy\n\n distance_to_mid_paddle = round(self.frame.bottom - (player.frame.y + player.frame.height / 2))\n\n if -2 <= distance_to_mid_paddle <= 2:\n new_dy = 0\n # ball hit upper region of paddle\n elif distance_to_mid_paddle < 0 and distance_to_mid_paddle < -1:\n new_dy = -abs(self.dy) if self.dy != 0 else -2\n # ball hit lower region of paddle\n elif distance_to_mid_paddle > 0 and distance_to_mid_paddle > 1:\n new_dy = abs(self.dy) if self.dy != 0 else 2\n\n new_dy = self.handle_potential_spin(new_dy, player)\n\n return (new_dx, new_dy)\n\n def handle_potential_spin(self, dy, player):\n if 1 < dy < 3 or -3 < dy < -1:\n if player.moving_up:\n dy = -1 if dy < 0 else 3\n if player.moving_down:\n dy = 1 if dy > 0 else -3\n\n return dy\n\n def update_trajectory(self, dx, dy):\n self.dx = dx\n self.dy = dy\n\n def hit_left_wall(self):\n return self.frame.x <= 0\n\n def hit_right_wall(self):\n return self.frame.x + self.frame.width >= COURT_WIDTH\n\n def hit_player_paddle(self, p1x, p1y, p2x, p2y):\n hit_p1 = self.frame.x <= p1x + PLAYER_WIDTH and p1y <= self.frame.y <= p1y + PLAYER_HEIGHT + self.frame.width\n hit_p2 = self.frame.x >= p2x - PLAYER_WIDTH and p2y <= self.frame.y <= p2y + PLAYER_HEIGHT + self.frame.width\n return hit_p1 or hit_p2\n\n def hit_top_wall(self):\n return self.frame.y <= 0\n\n def hit_bottom_wall(self):\n return self.frame.y + self.frame.height >= GAME_COURT_HEIGHT\n","repo_name":"nwilliams770/pyng","sub_path":"match/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":6622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41058509138","text":"import numpy as np\nimport numpy.matlib\nimport pandas as pd\n\nfrom argparse import ArgumentParser\nfrom os.path import abspath, dirname, exists, join\nfrom simulations import kom\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.preprocessing import StandardScaler\nfrom svmpath import SVMPath\n\n\n# directories\nWD = dirname(abspath(__file__))\nDATA_DIR = join(WD, *['..', '..', 'data', 'empirical'])\nOUT_DIR = join(WD, *['..', '..', 'results'])\n\n# RHC data\nRHC_L_PATH = join(DATA_DIR, 'rhc_clean.csv')\nRHC_P_PATH = join(DATA_DIR, 'rhc_poly_clean.csv')\n# KCB results\nKCB_PATH = join(OUT_DIR, 'rhc_kcb_rbf.csv')\n# cardmatch results\nCARD_L_PATH = join(OUT_DIR, 'rhc_card_linear')\nCARD_P_PATH = join(OUT_DIR, 'rhc_card_poly')\n\n# function for computing elbow point\ndef elbow(x, y):\n # sort x to be in ascending order\n xs = np.sort(x.to_numpy())\n ys = y.to_numpy()[np.argsort(x)]\n \n # form tuples of x,y\n coords = np.vstack((xs,ys)).T\n # initial/final points\n p0 = coords[0]\n p1 = coords[-1]\n \n # normalized line joining points\n vec = p1 - p0\n vec = vec / np.sqrt(np.sum(vec ** 2))\n \n # distances from vec\n vec_p0 = coords - p0\n ips = np.sum(vec_p0 * np.matlib.repmat(vec, len(xs), 1), axis=1)\n points = np.outer(ips, vec)\n # vectors from points to line, perpendicular to vec\n vecs = vec_p0 - points\n dists_vec = np.sum(vecs ** 2, axis=1)\n \n return np.argsort(x).to_numpy()[dists_vec.argmax()]\n\n# function for gathering statistics about SVM path\ndef path_stats(path, kernel, eps=1e-6):\n # get useful quantities\n X = path.X\n X_trans = path.X_trans # covariate matrix in transformed space\n y = path.y\n outcomes = path.outcomes\n Q = path.Q\n \n # adjust y to 0-1\n if isinstance(y, np.ndarray):\n y = (y == 1).astype(int)\n else:\n y = (y.to_numpy() == 1).astype(int)\n\n # outcomes for treated and control\n out1 = outcomes[y == 1].to_numpy()\n out0 = outcomes[y == 0].to_numpy()\n \n # number of treated and control\n n1 = sum(y)\n n0 = len(y) - n1\n \n # prespecified grid of C values\n if kernel == 'linear':\n Cs = np.geomspace(3e-4, 500, 150)\n elif kernel in {'poly', 'polynomial'}:\n Cs = np.geomspace(1.5e-5, 1e3, 150)\n else:\n Cs = np.geomspace(4e-2, 500, 150)\n \n # conditional variance estimate, estimated from 2 neighbors\n if kernel == 'rbf':\n X_sc= StandardScaler().fit_transform(X)\n else:\n X_sc = StandardScaler().fit_transform(X_trans)\n X1 = X_sc[y == 1]\n X0 = X_sc[y == 0]\n nn1 = NearestNeighbors(n_neighbors=2).fit(X1)\n nn0 = NearestNeighbors(n_neighbors=2).fit(X0)\n\n # conditional variances\n neighb1 = nn1.kneighbors()[1]\n neighb0 = nn0.kneighbors()[1]\n # see Ch. 19 of Imbens & Rubin for conditional variance estimate\n cv1 = ((np.tile(out1, (2,1)).T - out1[neighb1]) ** 2).sum(axis=1) / 4\n cv0 = ((np.tile(out0, (2,1)).T - out0[neighb0]) ** 2).sum(axis=1) / 4\n\n # save conditional variance\n if not exists(join(OUT_DIR, \"cv1.csv\")):\n np.savetxt(join(OUT_DIR, \"cv1.csv\"), cv1 , delimiter=\",\")\n if not exists(join(OUT_DIR, \"cv0.csv\")):\n np.savetxt(join(OUT_DIR, \"cv0.csv\"), cv0 , delimiter=\",\") \n \n # containers\n ate = np.zeros(len(Cs))\n se = ate.copy()\n ss = ate.copy()\n ess = ate.copy()\n bal = ate.copy()\n asum = ate.copy()\n \n # pooled variance, can't do dimensional balance stats with rbf\n if kernel != 'rbf':\n sdim = np.zeros((len(Cs), X.shape[-1]))\n bal_sd = ate.copy()\n # pooled variance\n pooled = np.sqrt(X_trans[y == 1].var(axis=0, ddof=1) / 2 + \n X_trans[y == 0].var(axis=0, ddof=1) / 2)\n \n # compute statistics for each value of C\n for i, C in enumerate(Cs):\n # sum of weights\n alpha = path.get_alpha(C)\n asum[i] = alpha.sum()\n \n # normalize to sum to 1\n alpha = alpha / (alpha.sum() / 2)\n \n # rescaled weights, sum to number of treated / control units\n w1 = alpha[y == 1] * n1\n w0 = alpha[y == 0] * n0 \n \n # ate estimate\n ate[i] = alpha[y == 1] @ out1 - alpha[y == 0] @ out0\n \n # standard error estimate, see Ch. 19 of Imbens & Rubin\n se[i] = np.sqrt((w1 ** 2 * cv1).sum() / n1 ** 2 + \n (w0 ** 2 * cv0).sum() / n0 ** 2)\n \n # subset size\n ss[i] = len(np.where(alpha > eps)[0])\n \n # effective subset size\n ess[i] = alpha[y == 1].sum() ** 2 / (alpha[y == 1] ** 2).sum() + \\\n alpha[y == 0].sum() ** 2 / (alpha[y == 0] ** 2).sum()\n \n # normed difference in means (wrt standardized X)\n bal[i] = np.sqrt(alpha @ Q @ alpha)\n \n # standardized difference in means (not compatible with rbf)\n if kernel != 'rbf': \n sdim[i] = (alpha * (2*y-1)) @ X_trans / pooled\n # average sdim (per dimension)\n bal_sd[i] = np.abs(sdim[i]).mean()\n \n # build results dataframe\n results = pd.DataFrame({'ate' : ate, 'se' : se, 'bal' : bal, 'ss' : ss, \n 'ess' : ess, 'asum' : asum},\n index=Cs)\n if kernel == 'rbf':\n return results\n else:\n # add in average sdim\n results.insert(3, 'bal_sd', bal_sd)\n \n # add in sdim\n temp = pd.DataFrame(sdim, columns=X.columns, index=Cs)\n \n return results.join(temp)\n \n\ndef parse_arguments():\n parser = ArgumentParser()\n parser.add_argument('-k','--kernel',choices=['linear','poly','rbf'],\n default='linear',help='Kernel for specifying function space')\n parser.add_argument('-v','--verbose',action='store_true',default=False,\n help='Verbosity for output')\n \n return parser.parse_args()\n\nif __name__ == '__main__':\n # command line arguments\n args = parse_arguments()\n kernel = args.kernel \n verbose = args.verbose\n \n # compute sdim per dimenstion if kernel is not RBF\n compute_sdim = True if kernel != 'rbf' else False\n\n # read data, split into X and y\n if kernel != 'poly':\n rhc = pd.read_csv(RHC_L_PATH)\n path_kernel = kernel\n else:\n rhc = pd.read_csv(RHC_P_PATH)\n path_kernel = 'linear'\n \n X = rhc.drop(['death','swang1'], axis=1) # unstandardized\n y = rhc.swang1 \n outcomes = rhc.death\n \n # compute path\n print(\"Computing SVM path... \", end=\"\", flush=True)\n path = SVMPath(X, y, outcomes=outcomes, kernel=path_kernel, verbose=verbose)\n print(\"Done!\\n\")\n \n # compute path statistics\n print(\"Computing path statistics... \", end=\"\", flush=True)\n res = path_stats(path, kernel)\n print(\"Done!\\n\")\n \n # compute KOM estimate\n print(\"Computing kernel optimal weights... \", end=\"\", flush=True)\n res_kom = kom(X, y, outcomes, kernel=path_kernel, compute_sdim=compute_sdim,\n verbose=verbose) \n print(\"Done!\\n\")\n \n # save results\n res.to_csv(join(OUT_DIR, 'rhc_svm_%s.csv' % kernel))\n res_kom.to_csv(join(OUT_DIR, 'rhc_kom_%s.csv' % kernel))\n \n # compare balance with KOM and cardmatch\n if kernel != 'rbf': \n # read in cardmatch results\n if kernel == \"linear\": \n res_card = pd.read_csv(CARD_L_PATH)\n elif kernel =='poly':\n res_card = pd.read_csv(CARD_P_PATH)\n \n \n # read in kcb results for rbf kernel\n if kernel == 'rbf':\n res_kcb = pd.read_csv(KCB_PATH)\n \n # get SVM point estimates\n bal_max = res.bal.max()\n bal_min = res.bal.min()\n \n # initial solution\n ind1 = 0\n \n if kernel == 'linear': # no elbow point\n ind2 = -1\n else:\n ind2 = elbow(res.asum, res.bal)\n \n # most balanced solution\n ind3 = -1\n \n # comparable to KOM balance\n ind_kom = np.where(res.bal <= res_kom.iloc[0].bal)[0][0]\n \n # comparable to cardmatch balance\n if kernel != 'rbf':\n ind_card = np.where(res.bal <= res_card.iloc[0].bal)[0][0]\n else:\n ind_kcb = np.where(res.bal <= res_kcb.iloc[0].bal)[0][0]\n \n print(\"Displaying Results for %s kernel...\\n\" % kernel)\n print(\"SVM_Unbalanced\")\n print(\" ATE: %.4f\" % res.ate.iloc[ind1])\n print(\" SE: %.4f\" % res.se.iloc[ind1])\n print(\"Balance: %.4f\" % res.bal.iloc[ind1])\n print(\" ESS: %d\" % res.ess.iloc[ind1])\n print()\n print(\"SVM_Elbow\")\n print(\" ATE: %.4f\" % res.ate.iloc[ind2])\n print(\" SE: %.4f\" % res.se.iloc[ind2])\n print(\"Balance: %.4f\" % res.bal.iloc[ind2])\n print(\" ESS: %d\" % res.ess.iloc[ind2])\n print()\n print(\"SVM_Balanced\")\n print(\" ATE: %.4f\" % res.ate.iloc[ind3])\n print(\" SE: %.4f\" % res.se.iloc[ind3])\n print(\"Balance: %.4f\" % res.bal.iloc[ind3])\n print(\" ESS: %d\" % res.ess.iloc[ind3])\n print()\n print(\"SVM_KOM\")\n print(\" ATE: %.4f\" % res.ate.iloc[ind_kom])\n print(\" SE: %.4f\" % res.se.iloc[ind_kom])\n print(\"Balance: %.4f\" % res.bal.iloc[ind_kom])\n print(\" ESS: %d\" % res.ess.iloc[ind_kom])\n print()\n if kernel != 'rbf':\n print(\"CARD\")\n print(\" ATE: %.4f\" % res_card.ate.iloc[0])\n print(\" SE: %.4f\" % res_card.se.iloc[0])\n print(\"Balance: %.4f\" % res_card.bal.iloc[0])\n print(\" ESS: %d\" % res_card.ess.iloc[0])\n print()\n else:\n print(\"KCB\")\n print(\" ATE: %.4f\" % res_kcb.ate.iloc[0])\n print(\" SE: %.4f\" % res_kcb.se.iloc[0])\n print(\"Balance: %.4f\" % res_kcb.bal.iloc[0])\n print(\" ESS: %d\" % res_kcb.ess.iloc[0])\n print()\n print(\"KOM\")\n print(\" ATE: %.4f\" % res_kom.ate.iloc[0])\n print(\" SE: %.4f\" % res_kom.se.iloc[0])\n print(\"Balance: %.4f\" % res_kom.bal.iloc[0])\n print(\" ESS: %d\" % res_kom.ess.iloc[0])\n ","repo_name":"atarr3/svmcausal-data","sub_path":"code/python/empirical.py","file_name":"empirical.py","file_ext":"py","file_size_in_byte":9942,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"35"} +{"seq_id":"75329195300","text":"from cost_functions import f, g, rastrigin, rosenbrock, michael, schwefel, dropwave, schaffer_2, equation\nfrom pyswarms.single import GlobalBestPSO\nimport numpy as np\nimport time\nimport os\n#Testing the activity commit\nstart = time.time()\noptions = {'c1': 0.5, 'c2': 0.3, 'w':0.9}\nn_dimensions = 20\nx_max = 3 * np.ones(n_dimensions)\nx_min = -3* np.ones(n_dimensions)\nbounds = (x_min, x_max)\niterations = 5000\nn_particles = 800\noptimizer = GlobalBestPSO(n_particles= n_particles, dimensions = n_dimensions,options = options, bounds= bounds)\noptimizer.optimize(f,iters = iterations)\n\ncost_history = optimizer.cost_history\nposition_history = optimizer.pos_history[0]\n\nend = time.time()\nfunction_evaluations = iterations*n_particles\n\n\noptimization_time = np.linspace(0, function_evaluations, num= len(cost_history))\nprint(f'The PSO algorithm took {end-start} seconds')\n\n\nresults_path = 'Results'\nnp.savez(os.path.join(results_path,\"testing_pso_500.npz\"), cost_history,position_history,optimization_time ,cost_h = cost_history,\n time = optimization_time, pos_history = position_history)\n","repo_name":"ferwanguer/Quantum_Inspired_Optimizer","sub_path":"pso_optimization.py","file_name":"pso_optimization.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"6611988492","text":"import torch\nimport torch.nn.functional as F\nfrom torch.distributions import Normal\nimport numpy as np\n\nfrom .reinforce import REINFORCE\nfrom core.optimizer import Optimizer\n\n\nclass VMPO(REINFORCE):\n \"\"\"Maximum A Posteriori Policy Optimization (MPO) agent.\n\n Args:\n optim_config (dict): dictionary of the optimizer info.\n (key: 'name', value: name of optimizer)\n batch_size (int): the number of samples in the one batch.\n n_step (int): The number of steps to run for each environment per update.\n n_epoch (int): Number of epoch when optimizing the surrogate.\n _lambda (float): Factor for trade-off of bias vs variance for Generalized Advantage Estimator.\n clip_grad_norm (float): gradient clipping threshold.\n min_eta (float): minimum value of eta.\n min_alpha_mu (float): minimum value of alpha_mu.\n min_alpha_sigma (float): minimum value of alpha_sigma.\n eps_eta (float): threshold of temperature loss term.\n eps_alpha_mu (float): threshold of mean part of Gaussian-KL constraint term.\n eps_alpha_sigma (float): threshold of variance part of Gaussian-KL constraint term.\n eta (float): Lagrange multipliers of temperature loss term.\n alpha_mu (float): Lagrange multipliers of mean part of Gaussian-KL constraint term (trust-region loss).\n alpha_sigma (float): Lagrange multipliers of variance part of Gaussian-KL constraint term.\n \"\"\"\n\n def __init__(\n self,\n network=\"discrete_policy_value\",\n optim_config={\"name\": \"adam\"},\n batch_size=32,\n n_step=128,\n n_epoch=1,\n _lambda=0.9,\n clip_grad_norm=1.0,\n # parameters unique to V-MPO\n min_eta=1e-8,\n min_alpha_mu=1e-8,\n min_alpha_sigma=1e-8,\n eps_eta=0.02,\n eps_alpha_mu=0.1,\n eps_alpha_sigma=0.1,\n eta=1.0,\n alpha_mu=1.0,\n alpha_sigma=1.0,\n **kwargs,\n ):\n super(VMPO, self).__init__(\n network=network,\n optim_config=optim_config,\n **kwargs,\n )\n\n self.batch_size = batch_size\n self.n_step = n_step\n self.n_epoch = n_epoch\n self._lambda = _lambda\n self.time_t = 0\n self.learn_stamp = 0\n self.clip_grad_norm = clip_grad_norm\n\n self.min_eta = torch.tensor(min_eta, device=self.device)\n self.min_alpha_mu = torch.tensor(min_alpha_mu, device=self.device)\n self.min_alpha_sigma = torch.tensor(min_alpha_sigma, device=self.device)\n\n self.eps_eta = eps_eta\n self.eps_alpha_mu = eps_alpha_mu\n self.eps_alpha_sigma = eps_alpha_sigma\n\n self.eta = torch.nn.Parameter(\n torch.tensor(eta, requires_grad=True).to(self.device)\n )\n self.alpha_mu = torch.nn.Parameter(\n torch.tensor(alpha_mu, requires_grad=True).to(self.device)\n )\n self.alpha_sigma = torch.nn.Parameter(\n torch.tensor(alpha_sigma, requires_grad=True).to(self.device)\n )\n\n self.reset_lgr_muls()\n\n self.optimizer = Optimizer(\n **optim_config,\n params=list(self.network.parameters())\n + [self.eta, self.alpha_mu, self.alpha_sigma],\n )\n\n @torch.no_grad()\n def act(self, state, training=True):\n self.network.train(training)\n\n if self.action_type == \"continuous\":\n mu, std, _ = self.network(self.as_tensor(state))\n z = torch.normal(mu, std) if training else mu\n action = torch.tanh(z)\n else:\n pi, _ = self.network(self.as_tensor(state))\n action = (\n torch.multinomial(pi, 1)\n if training\n else torch.argmax(pi, dim=-1, keepdim=True)\n )\n return {\"action\": action.cpu().numpy()}\n\n def learn(self):\n transitions = self.memory.sample()\n for key in transitions.keys():\n transitions[key] = self.as_tensor(transitions[key])\n\n state = transitions[\"state\"]\n action = transitions[\"action\"]\n reward = transitions[\"reward\"]\n next_state = transitions[\"next_state\"]\n done = transitions[\"done\"]\n\n # set advantage and log_pi_old\n with torch.no_grad():\n if self.action_type == \"continuous\":\n mu, std, value = self.network(state)\n m = Normal(mu, std)\n z = torch.atanh(torch.clamp(action, -1 + 1e-7, 1 - 1e-7))\n log_pi = m.log_prob(z)\n log_prob = log_pi.sum(axis=-1, keepdims=True)\n mu_old = mu\n std_old = std\n else:\n pi, value = self.network(state)\n pi_old = pi\n log_prob = torch.log(pi.gather(1, action.long()))\n log_pi_old = torch.log(pi)\n\n log_prob_old = log_prob\n\n next_value = self.network(next_state)[-1]\n delta = reward + (1 - done) * self.gamma * next_value - value\n adv = delta.clone()\n adv, done = adv.view(-1, self.n_step), done.view(-1, self.n_step)\n for t in reversed(range(self.n_step - 1)):\n adv[:, t] += (\n (1 - done[:, t]) * self.gamma * self._lambda * adv[:, t + 1]\n )\n if self.use_standardization:\n adv = (adv - adv.mean(dim=1, keepdim=True)) / (\n adv.std(dim=1, keepdim=True) + 1e-7\n )\n adv = adv.view(-1, 1)\n done = done.view(-1, 1)\n ret = adv + value\n\n # start train iteration\n actor_losses, critic_losses, eta_losses, alpha_losses = [], [], [], []\n idxs = np.arange(len(reward))\n for _ in range(self.n_epoch):\n np.random.shuffle(idxs)\n for offset in range(0, len(reward), self.batch_size):\n idx = idxs[offset : offset + self.batch_size]\n\n _state, _action, _ret, _next_state, _adv, _log_prob_old = map(\n lambda x: [_x[idx] for _x in x] if isinstance(x, list) else x[idx],\n [state, action, ret, next_state, adv, log_prob_old],\n )\n\n if self.action_type == \"continuous\":\n _mu_old, _std_old = map(lambda x: x[idx], [mu_old, std_old])\n else:\n _log_pi_old, _pi_old = map(lambda x: x[idx], [log_pi_old, pi_old])\n\n # select top 50% of advantages\n idx_tophalf = _adv > _adv.median()\n tophalf_adv = _adv[idx_tophalf]\n # calculate psi\n exp_adv_eta = torch.exp(tophalf_adv / self.eta)\n psi = exp_adv_eta / torch.sum(exp_adv_eta.detach())\n\n if self.action_type == \"continuous\":\n mu, std, value = self.network(_state)\n m = Normal(mu, std)\n z = torch.atanh(torch.clamp(_action, -1 + 1e-7, 1 - 1e-7))\n log_pi = m.log_prob(z)\n log_prob = log_pi.sum(axis=-1, keepdims=True)\n else:\n pi, value = self.network(_state)\n log_prob = torch.log(pi.gather(1, _action.long()))\n log_pi = torch.log(pi)\n\n critic_loss = F.mse_loss(value, _ret).mean()\n\n # calculate loss for eta\n eta_loss = self.eta * self.eps_eta + self.eta * torch.log(\n torch.mean(exp_adv_eta)\n )\n\n # calculate policy loss (actor_loss)\n tophalf_log_prob = log_prob[idx_tophalf.squeeze(), :]\n actor_loss = -torch.sum(psi.detach().unsqueeze(1) * tophalf_log_prob)\n\n # calculate loss for alpha\n # NOTE: assumes that std are in the same shape as mu (hence vectors)\n # hence each dimension of Gaussian distribution is independent\n if self.action_type == \"continuous\":\n ss = 1.0 / (std**2) # (batch_size * action_dim)\n ss_old = 1.0 / (_std_old**2) # (batch_size * action_dim)\n\n # mu\n d_mu = mu - _mu_old.detach() # (batch_size * action_dim)\n KLD_mu = 0.5 * torch.sum(\n d_mu * 1.0 / ss_old.detach() * d_mu, axis=1\n )\n mu_loss = torch.mean(\n self.alpha_mu * (self.eps_alpha_mu - KLD_mu.detach())\n + self.alpha_mu.detach() * KLD_mu\n )\n\n # sigma\n KLD_sigma = 0.5 * (\n (\n torch.sum(1.0 / ss * ss_old.detach(), axis=1)\n - ss.shape[-1]\n + torch.log(\n torch.prod(ss, axis=1)\n / torch.prod(ss_old.detach(), axis=1)\n )\n )\n )\n sigma_loss = torch.mean(\n self.alpha_sigma * (self.eps_alpha_sigma - KLD_sigma.detach())\n + self.alpha_sigma.detach() * KLD_sigma\n )\n\n alpha_loss = mu_loss + sigma_loss\n else:\n KLD_pi = _pi_old.detach() * (_log_pi_old.detach() - log_pi)\n KLD_pi = torch.sum(KLD_pi, axis=len(_pi_old.shape) - 1)\n alpha_loss = torch.mean(\n self.alpha_mu * (self.eps_alpha_mu - KLD_pi.detach())\n + self.alpha_mu.detach() * KLD_pi\n )\n\n loss = critic_loss + actor_loss + eta_loss + alpha_loss\n\n self.optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(\n self.network.parameters(), self.clip_grad_norm\n )\n self.optimizer.step()\n self.reset_lgr_muls()\n\n actor_losses.append(actor_loss.item())\n critic_losses.append(critic_loss.item())\n eta_losses.append(eta_loss.item())\n alpha_losses.append(alpha_loss.item())\n\n result = {\n \"actor_loss\": np.mean(actor_losses),\n \"critic_loss\": np.mean(critic_losses),\n \"eta_loss\": np.mean(eta_losses),\n \"alpha_loss\": np.mean(alpha_losses),\n \"eta\": self.eta.item(),\n \"alpha_mu\": self.alpha_mu.item(),\n \"alpha_sigma\": self.alpha_sigma.item(),\n }\n return result\n\n # reset Lagrange multipliers: eta, alpha_{mu, sigma}\n def reset_lgr_muls(self):\n self.eta.data = torch.max(self.eta, self.min_eta)\n self.alpha_mu.data = torch.max(self.alpha_mu, self.min_alpha_mu)\n self.alpha_sigma.data = torch.max(self.alpha_sigma, self.min_alpha_sigma)\n\n def process(self, transitions, step):\n result = {}\n # Process per step\n self.memory.store(transitions)\n delta_t = step - self.time_t\n self.time_t = step\n self.learn_stamp += delta_t\n\n # Process per n_step\n if self.learn_stamp >= self.n_step:\n result = self.learn()\n if self.lr_decay:\n self.learning_rate_decay(step)\n self.learn_stamp = 0\n\n return result\n","repo_name":"kakaoenterprise/JORLDY","sub_path":"jorldy/core/agent/vmpo.py","file_name":"vmpo.py","file_ext":"py","file_size_in_byte":11431,"program_lang":"python","lang":"en","doc_type":"code","stars":352,"dataset":"github-code","pt":"35"} +{"seq_id":"10916039595","text":"import re\nimport time\nimport datetime\nfrom . import utils\nfrom . import log\n\n\nlogger = log.setup_logger(__name__)\n\n\nYEAR = None\n\n\ndef set_year(ts=None):\n '''\n ts: optional time in seconds\n '''\n global YEAR\n year = time.strftime(\"%Y\", time.localtime(ts))\n if YEAR is not None:\n t = (\" (ts: %s)\" % (ts)) if ts is not None else \"\"\n logger.debug(\"history: setting year to %s%s\", year, t)\n YEAR = year\n\n\ndef human_date(dt=None):\n '''\n Convert datetime argument into a presentational string.\n\n dt: Datetime (default: now)\n '''\n if dt is None:\n dt = utils.make_datetime_naive(datetime.datetime.now())\n # here, dt is in UTC. Convert to localtime:\n localdt = datetime.datetime.fromtimestamp(utils.datetime_to_timestamp(dt))\n # drop microseconds\n return re.sub(\"[.].*\", \"\", \"%s %s\" % (localdt.date(), localdt.time()))\n\n\ndef make_time(t):\n '''\n t: time in seconds / datetime / other\n returns: time in floating point\n '''\n if t is None:\n return None\n elif isinstance(t, datetime.datetime):\n return utils.datetime_to_timestamp(t)\n return t\n\n\n# fmt1: group 11 is node\n# fmt2: group 2 is node\n# fmt3: group 2 is node\n# fmt4: node not available?\n_syslog2node_formats = (re.compile(r'^(\\d{4})-(\\d{2})-(\\d{2})T(\\d{2}):(\\d{2}):(\\d{2})(?:.(\\d+))?([+-])(\\d{2}):?(\\d{2})\\s+(?:\\[\\d+\\])?\\s*([\\S]+)'),\n re.compile(r'^(\\d{4}-\\d{2}-\\d{2}T\\S+)\\s+(?:\\[\\d+\\])?\\s*([\\S]+)'),\n re.compile(r'^([a-zA-Z]{2,4}\\s+\\d{1,2}\\s+\\d{2}:\\d{2}:\\d{2})\\s+(?:\\[\\d+\\])?\\s*([\\S]+)'),\n re.compile(r'^(\\d{4}\\/\\d{2}\\/\\d{2}_\\d{2}:\\d{2}:\\d{2})'),\n re.compile(r'^([A-Z][a-z]+ \\d{1,2} \\d{2}:\\d{2}:\\d{2}\\.\\d+) ([\\S]+)'))\n\n_syslog_ts_prev = None\n\n\ndef syslog_ts(s):\n \"\"\"\n Finds the timestamp in the given line\n Returns as floating point, seconds\n \"\"\"\n global _syslog_ts_prev\n fmt1, fmt2, fmt3, fmt4, fm5 = _syslog2node_formats\n\n # RFC3339\n m = fmt1.match(s)\n if m:\n year, month, day, hour, minute, second, ms, tzsgn, tzh, tzm, _ = m.groups()\n ts = time.mktime((int(year), int(month), int(day), int(hour), int(minute), int(second), 0, 0, -1))\n if tzsgn == '+':\n ts += (3600.0 * float(tzh) + 60.0 * float(tzm))\n else:\n ts -= (3600.0 * float(tzh) + 60.0 * float(tzm))\n if ms:\n ts += float(\"0.%s\" % ms)\n _syslog_ts_prev = ts\n return _syslog_ts_prev\n\n m = fmt2.match(s)\n if m:\n _syslog_ts_prev = utils.parse_to_timestamp(m.group(1))\n return _syslog_ts_prev\n\n m = fmt3.match(s)\n if m:\n if YEAR is None:\n set_year()\n tstr = YEAR + ' ' + m.group(1)\n\n dt = datetime.datetime.strptime(tstr, '%Y %b %d %H:%M:%S')\n from dateutil import tz\n ts = utils.total_seconds(dt - tz.tzlocal().utcoffset(dt) - datetime.datetime(1970, 1, 1))\n _syslog_ts_prev = ts\n return _syslog_ts_prev\n\n m = fmt4.match(s)\n if m:\n tstr = m.group(1).replace('_', ' ')\n _syslog_ts_prev = utils.parse_to_timestamp(tstr)\n return _syslog_ts_prev\n\n m = fm5.match(s)\n if m:\n _syslog_ts_prev = utils.parse_to_timestamp(m.group(1))\n return _syslog_ts_prev\n\n logger.debug(\"malformed line: %s\", s)\n return _syslog_ts_prev\n\n\n_syslog_node_prev = None\n\n\ndef syslog2node(s):\n '''\n Get the node from a syslog line.\n\n old format:\n Aug 14 11:07:04 ...\n new format:\n Aug 14 11:07:04 [] ...\n RFC5424:\n ...\n RFC5424 (2):\n [] ...\n '''\n global _syslog_node_prev\n\n fmt1, fmt2, fmt3, _, _ = _syslog2node_formats\n m = fmt1.match(s)\n if m:\n _syslog_node_prev = m.group(11)\n return _syslog_node_prev\n\n m = fmt2.match(s)\n if m:\n _syslog_node_prev = m.group(2)\n return _syslog_node_prev\n\n m = fmt3.match(s)\n if m:\n _syslog_node_prev = m.group(2)\n return _syslog_node_prev\n\n try:\n # strptime defaults year to 1900 (sigh)\n time.strptime(' '.join(s.split()[0:3]),\n \"%b %d %H:%M:%S\")\n _syslog_node_prev = s.split()[3]\n return _syslog_node_prev\n except ValueError: # try the rfc5424\n ls = s.split()\n if not ls:\n return _syslog_node_prev\n rfc5424 = s.split()[0]\n if 'T' in rfc5424:\n try:\n utils.parse_to_timestamp(rfc5424)\n _syslog_node_prev = s.split()[1]\n return _syslog_node_prev\n except Exception:\n return _syslog_node_prev\n else:\n return _syslog_node_prev\n\n\ndef syslog_ts_node(s):\n \"\"\"\n Returns (timestamp, node) from a syslog log line\n \"\"\"\n global _syslog_ts_prev\n global _syslog_node_prev\n fmt1, fmt2, fmt3, fmt4, fmt5 = _syslog2node_formats\n\n # RFC3339\n m = fmt1.match(s)\n if m:\n year, month, day, hour, minute, second, ms, tzsgn, tzh, tzm, node = m.groups()\n ts = time.mktime((int(year), int(month), int(day), int(hour), int(minute), int(second), 0, 0, -1))\n if tzsgn == '+':\n ts += (3600.0 * float(tzh) + 60.0 * float(tzm))\n else:\n ts -= (3600.0 * float(tzh) + 60.0 * float(tzm))\n _syslog_ts_prev = ts\n _syslog_node_prev = node\n return _syslog_ts_prev, node\n\n m = fmt2.match(s)\n if m:\n _syslog_ts_prev, _syslog_node_prev = utils.parse_to_timestamp(m.group(1)), m.group(2)\n return _syslog_ts_prev, _syslog_node_prev\n\n m = fmt3.match(s)\n if m:\n if YEAR is None:\n set_year()\n tstr = YEAR + ' ' + m.group(1)\n\n dt = datetime.datetime.strptime(tstr, '%Y %b %d %H:%M:%S')\n from dateutil import tz\n ts = utils.total_seconds(dt - tz.tzlocal().utcoffset(dt) - datetime.datetime(1970, 1, 1))\n _syslog_ts_prev, _syslog_node_prev = ts, m.group(2)\n return _syslog_ts_prev, _syslog_node_prev\n\n m = fmt4.match(s)\n if m:\n tstr = m.group(1).replace('_', ' ')\n _syslog_ts_prev = utils.parse_to_timestamp(tstr)\n return _syslog_ts_prev, _syslog_node_prev\n\n m = fmt5.match(s)\n if m:\n _syslog_ts_prev, _syslog_node_prev = utils.parse_to_timestamp(m.group(1)), m.group(2)\n return _syslog_ts_prev, _syslog_node_prev\n\n logger.debug(\"malformed line: %s\", s)\n return _syslog_ts_prev, _syslog_node_prev\n","repo_name":"ClusterLabs/crmsh","sub_path":"crmsh/logtime.py","file_name":"logtime.py","file_ext":"py","file_size_in_byte":6472,"program_lang":"python","lang":"en","doc_type":"code","stars":120,"dataset":"github-code","pt":"35"} +{"seq_id":"42109284980","text":"# https://www.acmicpc.net/problem/16934\n# https://www.acmicpc.net/source/47527277\n\nfrom collections import defaultdict\nimport sys\n\ninput = sys.stdin.readline\nprint = sys.stdout.write\n\n\nclass Trie:\n def __init__(self, s) -> None:\n self.used = False\n self.count = 0\n self.c = s\n self.children = defaultdict(lambda: Trie(\"\"))\n\n def append(self, child, do_not_propagate_used=False):\n s = child.c\n\n if len(s) == 0:\n self.count += 1\n\n if self.count >= 2:\n return str(self.count)\n\n return \"\"\n\n c = s[0]\n\n is_new = False\n\n if self.children[c].c != c:\n self.children[c].c = c\n\n is_new = True\n\n next_ = \"\"\n\n if len(s) >= 1:\n next_ = self.children[c].append(\n Trie(s[1:]), do_not_propagate_used=do_not_propagate_used or not self.used)\n\n if is_new:\n if not do_not_propagate_used:\n self.used = True\n return c\n\n return c + next_\n\n\nroot = Trie(\"\")\n\nn = int(input().rstrip())\n\nfor i in range(n):\n name = input().rstrip()\n\n log = root.append(Trie(name))\n\n if len(log) > 0:\n print(log + \"\\n\")\n","repo_name":"C-B-U/algorithm_challenge","sub_path":"시즌2/상/dps0340/0811_2_dps0340.py","file_name":"0811_2_dps0340.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"35"} +{"seq_id":"11820183478","text":"from collections import deque\n\n\ndef solution(n, computers):\n answer = 0\n visited = [False] * n\n\n def bfs(start):\n q = deque([start])\n while q:\n x = q.popleft()\n visited[x] = True\n for y in range(n):\n if y != x and computers[x][y] and not visited[y]:\n q.append(y)\n for i in range(n):\n if not visited[i]:\n bfs(i)\n answer += 1\n return answer","repo_name":"parkgr95/Algorithm-Programmers","sub_path":"코딩테스트 고득점 Kit/깊이너비 우선 탐색(DFSBFS)/네트워크.py","file_name":"네트워크.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"1678749065","text":"#!/usr/bin/env python\ntry:\n from setuptools import setup, find_packages\nexcept ImportError:\n from distutils.core import setup, find_packages\n\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\n\nsetup(\n name='y2j',\n version='0.0.3',\n author='Neal Ormsbee',\n author_email='neal.ormsbee@gmail.com',\n description='A CLI tool for converting YAML to JSON.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/SheepGotoHeaven/y2j',\n install_requires=['pyyaml'],\n packages=find_packages(),\n license='MIT',\n zip_safe=True,\n classifiers=[\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n ],\n entry_points={\n 'console_scripts': [\n 'y2j = y2j.__main__:main'\n ]\n }\n)\n","repo_name":"nealormsbee/y2j","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"7786660411","text":"def calculate_area(base, height, shape = \"Triangle\"):\r\n\r\n if shape == \"Triangle\":\r\n area = (1/2)*base * height\r\n elif shape == \"Rectangle\":\r\n area = base * height\r\n else:\r\n print(\"Not an area we can calculate\")\r\n\r\n return area\r\n\r\nnum = calculate_area(3,3,\"Triangle\")\r\nnum = calculate_area(3,3,\"Rectangle\")\r\nnum = calculate_area(3,3,)\r\n\r\nprint(\"The area of our shape is: \",num)\r\n\r\n\r\ndef print_pattern(n=5):\r\n '''\r\n :param n: Integer number representing number of lines\r\n to be printed in a pattern. If n=3 it will print,\r\n *\r\n **\r\n ***\r\n If n=4, it will print,\r\n *\r\n **\r\n ***\r\n ****\r\n Default value for n is 5. So if function caller doesn't\r\n supply the input number then it will assume it to be 5\r\n :return: None\r\n '''\r\n # we need to run two for loops. Outer loop prints patterns line by line\r\n # where as inner loop print the content of that specific lines\r\n for i in range(n):\r\n s = ''\r\n for j in range(i+1):\r\n s = s + '*'\r\n print(s)\r\n\r\n\r\n\r\n\r\n","repo_name":"GSidi/Python-Self-Education","sub_path":"Python Excercises Basics I/Shapes.py","file_name":"Shapes.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"15260279310","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport JDataObject as Jdata\r\n\r\n# define the file required for import\r\nfileName = \"C:/Users/johnk/Projects/nothernStar/18060NSTA_8780/18060NSTA_8800.ip\"\r\n\r\nsm24_start_times = []\r\nsm24_stop_times = []\r\ndatalines = []\r\nlines = 0\r\ntext_file = open(fileName, \"r\")\r\n\r\n# determin how many lines in the file\r\nwhile text_file.readline():\r\n lines += 1\r\ntext_file.close()\r\n\r\n# initiate a patch\r\npatch = Jdata.Jpatch()\r\n# read header information\r\ntext_file = open(fileName, \"r\")\r\n# initiate reading control variables\r\ncurrRdg = 0\r\nfor i, line in enumerate(text_file):\r\n if i == 2:\r\n Varinfo = line.split()\r\n header4 = line\r\n # print(Varinfo)\r\n elif i == 3:\r\n extract = line.split('=')\r\n extract2 = extract[1].split(',')\r\n sm24_start_times = extract2\r\n elif i == 4:\r\n extract = line.split('=')\r\n extract2 = extract[1].split(',')\r\n sm24_stop_times = extract2\r\n elif i > 11:\r\n try:\r\n datatxt = line.split()\r\n # # do some Jdatamanagment stuff\r\n # # print(datatxt)\r\n varFields = Jdata.Jreadtxtline(Varinfo, datatxt)\r\n datalines.append(varFields)\r\n # # verify if line is a new reading\r\n # if varFields.RDG == currRdg:\r\n # # add the dipoles\r\n # # Idp = Jdata.JinDipole(varFields)\r\n # Vpdp = JvoltDipole(varFields)\r\n # Rdg.addVoltageDipole(Vpdp)\r\n # else:\r\n # # create a reading\r\n # Rdg = Jreading(varFields.RDG)\r\n # Idp = JinDipole(varFields)\r\n # Vpdp = JvoltDipole(varFields)\r\n # Rdg.addVoltageDipole(Vpdp)\r\n # Rdg.addInDipole(Idp)\r\n # # add reading to the patch\r\n # patch.addreading(Rdg)\r\n # currRdg = varFields.RDG\r\n except:\r\n pass\r\n# print(len(datalines))\r\n# print(datalines[0].RDNG)\r\nsm24_start_times = np.asarray(sm24_start_times).astype(float)\r\nsm24_stop_times = np.asarray(sm24_stop_times).astype(float)\r\n# print(sm24_start_times)\r\n# print(sm24_stop_times)\r\nmid_points = (sm24_start_times + sm24_stop_times) / 2.\r\n# rdg = 11\r\n# for index in range(len(datalines)):\r\n# if rdg == int(datalines[index].RDNG):\r\n# decay = [datalines[index].CH1,\r\n# datalines[index].CH2,\r\n# datalines[index].CH3,\r\n# datalines[index].CH4,\r\n# datalines[index].CH5,\r\n# datalines[index].CH6,\r\n# datalines[index].CH7,\r\n# datalines[index].CH8,\r\n# datalines[index].CH9,\r\n# datalines[index].CH10,\r\n# datalines[index].CH11]\r\n# plt.plot(mid_points, decay, 'r-o')\r\n# plt.show()\r\nindex = 17\r\ndecay = [datalines[index].CH1,\r\n datalines[index].CH2,\r\n datalines[index].CH3,\r\n datalines[index].CH4,\r\n datalines[index].CH5,\r\n datalines[index].CH6,\r\n datalines[index].CH7,\r\n datalines[index].CH8,\r\n datalines[index].CH9,\r\n datalines[index].CH10,\r\n datalines[index].CH11]\r\nplt.plot(mid_points, decay, 'r-o')\r\ndecay = [datalines[index + 4].CH1,\r\n datalines[index + 4].CH2,\r\n datalines[index + 4].CH3,\r\n datalines[index + 4].CH4,\r\n datalines[index + 4].CH5,\r\n datalines[index + 4].CH6,\r\n datalines[index + 4].CH7,\r\n datalines[index + 4].CH8,\r\n datalines[index + 4].CH9,\r\n datalines[index + 4].CH10,\r\n datalines[index + 4].CH11]\r\nplt.plot(mid_points, decay, 'r-o')\r\ndecay = [datalines[index + 8].CH1,\r\n datalines[index + 8].CH2,\r\n datalines[index + 8].CH3,\r\n datalines[index + 8].CH4,\r\n datalines[index + 8].CH5,\r\n datalines[index + 8].CH6,\r\n datalines[index + 8].CH7,\r\n datalines[index + 8].CH8,\r\n datalines[index + 8].CH9,\r\n datalines[index + 8].CH10,\r\n datalines[index + 8].CH11]\r\nplt.plot(mid_points, decay, 'r-o')\r\nprint(datalines[index].RDNG)\r\nprint(datalines[index].C1E)\r\nprint(datalines[index].P1E)\r\nprint(datalines[index].P2E)\r\nprint(datalines[index + 4].RDNG)\r\nprint(datalines[index + 4].C1E)\r\nprint(datalines[index + 4].P1E)\r\nprint(datalines[index + 4].P2E)\r\nprint(datalines[index + 8].RDNG)\r\nprint(datalines[index + 8].C1E)\r\nprint(datalines[index + 8].P1E)\r\nprint(datalines[index + 8].P2E)\r\n\r\n# define the file required for import\r\nfileName = \"C:/Users/johnk/Projects/nothernStar/18060NSTA_8780/L8900-test.DAT\"\r\n\r\npatch = Jdata.loadDias(fileName) # Create the patch from data file\r\nrdg = 19 # Source to plot\r\ndp = 28\r\n# i = 4\r\ndpnum = ['SM24','SM24','SM24', 'DIAS32 - 2', 'DIAS32 - 1','DIAS32 - 3', 'DIAS32 - 4', 'DIAS32 - Ens']\r\nprint(len(patch.readings))\r\nprint(patch.readings[rdg].Idp.Tx1)\r\nprint(patch.readings[rdg].Vdp[dp].Rx1)\r\nprint(patch.readings[rdg].Vdp[dp].Rx2)\r\nplt.plot(patch.window_center,\r\n patch.readings[rdg].Vdp[dp].Vs / (patch.readings[rdg].Vdp[dp].Vp / 1000.), 'k-o')\r\n\r\n# define the file required for import\r\nfileName2 = \"C:/Users/johnk/Projects/nothernStar/18060NSTA_8780/L8900-test-1.DAT\"\r\n\r\npatch2 = Jdata.loadDias(fileName2) # Create the patch from data file\r\nrdg = 19 # Source to plot\r\ndp = 28\r\n# i = 4\r\nprint(len(patch2.readings))\r\nprint(patch2.readings[rdg].Idp.Tx1)\r\nprint(patch2.readings[rdg].Vdp[dp].Rx1)\r\nprint(patch2.readings[rdg].Vdp[dp].Rx2)\r\nplt.plot(patch2.window_center,\r\n patch2.readings[rdg].Vdp[dp].Vs / (patch2.readings[rdg].Vdp[dp].Vp / 1000.), 'm-o')\r\n\r\n# define the file required for import\r\nfileName3 = \"C:/Users/johnk/Projects/nothernStar/18060NSTA_8780/L8900-test-3.DAT\"\r\n\r\npatch3 = Jdata.loadDias(fileName3) # Create the patch from data file\r\nrdg = 19 # Source to plot\r\ndp = 28\r\n# i = 4\r\nprint(len(patch3.readings))\r\nprint(patch3.readings[rdg].Idp.Tx1)\r\nprint(patch3.readings[rdg].Vdp[dp].Rx1)\r\nprint(patch3.readings[rdg].Vdp[dp].Rx2)\r\nplt.plot(patch3.window_center,\r\n patch3.readings[rdg].Vdp[dp].Vs / (patch3.readings[rdg].Vdp[dp].Vp / 1000.), 'c-o')\r\n\r\n# define the file required for import\r\nfileName4 = \"C:/Users/johnk/Projects/nothernStar/18060NSTA_8780/L8900-test-4.DAT\"\r\n\r\npatch4 = Jdata.loadDias(fileName4) # Create the patch from data file\r\nrdg = 19 # Source to plot\r\ndp = 28\r\n# i = 4\r\nprint(len(patch4.readings))\r\nprint(patch4.readings[rdg].Idp.Tx1)\r\nprint(patch4.readings[rdg].Vdp[dp].Rx1)\r\nprint(patch4.readings[rdg].Vdp[dp].Rx2)\r\nplt.plot(patch4.window_center,\r\n patch4.readings[rdg].Vdp[dp].Vs / (patch4.readings[rdg].Vdp[dp].Vp / 1000.), 'g-o')\r\n\r\n# define the file required for import\r\nfileName5 = \"C:/Users/johnk/Projects/nothernStar/18060NSTA_8780/L8900-test-4ens.DAT\"\r\n\r\npatch5 = Jdata.loadDias(fileName5) # Create the patch from data file\r\nrdg = 19 # Source to plot\r\ndp = 28\r\n# i = 4\r\nprint(len(patch5.readings))\r\nprint(patch5.readings[rdg].Idp.Tx1)\r\nprint(patch5.readings[rdg].Vdp[dp].Rx1)\r\nprint(patch5.readings[rdg].Vdp[dp].Rx2)\r\nplt.plot(patch5.window_center,\r\n patch5.readings[rdg].Vdp[dp].Vs / (patch5.readings[rdg].Vdp[dp].Vp / 1000.), 'b-o')\r\n\r\nplt.legend(dpnum)\r\nplt.title(\"Tx 3600 - 3800 Rx 5700 - 5800\")\r\nplt.xlabel(\"time (ms)\")\r\nplt.ylabel(\"Voltage (mV/V)\")\r\nplt.show()\r\n","repo_name":"JKutt/PyDev","sub_path":"plotDecay-0.1/sm24look.py","file_name":"sm24look.py","file_ext":"py","file_size_in_byte":7462,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"24715395191","text":"import discord\r\nfrom discord.ext import commands\r\nimport helper as h\r\nfrom discord.ext.commands.cooldowns import BucketType\r\nimport random\r\nimport math\r\nimport os\r\nimport aiohttp\r\nimport aiosqlite\r\nimport time\r\nfrom better_profanity import profanity\r\nfrom discord import Webhook, AsyncWebhookAdapter\r\nimport aiohttp\r\n\r\nclass janitor(commands.Cog):\r\n def __init__(self, bot):\r\n self.bot = bot\r\n self.hooks = [\r\n \"usr1’s horse stomps usr2 to death with its hooves.\",\r\n \"usr1 removes usr2’s bdypart with a single swing. “Just a flesh wound!” they proclaim.\\n\\nWell, it is definitely more than a flesh wound.\",\r\n \"usr1 throws a coconut at usr2. It bonks off their bdypart with an extremely undignified sound.\",\r\n \"usr1 charges usr2 on horseback, impaling their lance through their bdypart.\",\r\n \"usr1 grips their sword by the blade and beats usr2 to death with the hilt.\",\r\n \"usr1 cuts down usr2 with their sword, then stops to offer a prayer for their soul.\",\r\n \"usr1 charges across an open field at usr2. For nearly a minute, tense drums echo from the ether, the brave knight appearing to get no closer, so great is the distance between them. Then, with frankly improbable speed, the final gap is closed! usr2 is stabbed in the bdypart! Which sucks, but at least now they don’t have to go to that stupid wedding.\",\r\n \"usr2 blows rain down upon usr1, but their armor is impenetrable. Their sword lashes out once in retaliation, taking usr2’s bdypart and ending the duel.\",\r\n \"usr1 throws down their gauntlet, demanding an honorable duel! Then, when usr2 bends down to pick it up, they stab them in the bdypart from behind. Honorably.\",\r\n \"The light reflecting off of usr1’s shining armor blinds usr2, leaving them open to a strike that cuts off their bdypart.\",\r\n \"usr1 unscrews the pommel of their sword, and tosses it into the skull of usr2, ending them rightly.\"\r\n ]\r\n self.crusade = None\r\n profanity.load_censor_words()\r\n pass\r\n\r\n @commands.command()\r\n @commands.guild_only()\r\n async def clean(self, ctx): # Shoots an arrow at someone.\r\n if self.bot.users_classes[str(ctx.author.id)] == \"assistant janitor\":\r\n ap_works = await h.alter_ap(ctx.message, 5, self.bot)\r\n \r\n if ap_works:\r\n messages = await ctx.channel.history(limit=2).flatten()\r\n message_checking = messages[1]\r\n author = message_checking.author\r\n if profanity.contains_profanity(message_checking.content):\r\n if message_checking.author != ctx.author:\r\n async with aiohttp.ClientSession() as session:\r\n url = await h.webhook_safe_check(ctx.channel)\r\n hook = Webhook.from_url(url, adapter=AsyncWebhookAdapter(session))\r\n await hook.send(content=profanity.censor(message_checking.content, \"○\"), username=author.display_name, avatar_url=author.avatar_url)\r\n await message_checking.delete()\r\n await messages[0].delete()\r\n await h.add_coolness(ctx.author.id, 1000)\r\n await h.add_gold(ctx.author.id, 500, self.bot)\r\n else:\r\n await ctx.send(\"You can't clean your own message! Also, for even trying this, I'm taking away 100 coolness from you for swearing!\")\r\n await h.add_coolness(ctx.author.id, -100)\r\n else:\r\n await ctx.send(f\"{ctx.author.mention} | Previous message contains no obvious profanity!\")\r\n \r\n \r\n# A setup function the every cog has\r\ndef setup(bot):\r\n bot.add_cog(janitor(bot))\r\n","repo_name":"Caldraeus/chat-classes","sub_path":"cogs/clss_commands/swordsman/warrior/assistant_janitor/janitor.py","file_name":"janitor.py","file_ext":"py","file_size_in_byte":3892,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"8132863295","text":"import sys, os, shutil\nsys.path.insert(1, \"../../\")\nimport h2o\nfrom h2o.model.binomial import H2OBinomialModel\n\ndef save_load_model(ip,port):\n \n \n\n prostate = h2o.import_file(h2o.locate(\"smalldata/prostate/prostate.csv\"))\n prostate[\"CAPSULE\"] = prostate[\"CAPSULE\"].asfactor()\n prostate_glm = h2o.glm(y=prostate[\"CAPSULE\"], x=prostate[[\"AGE\",\"RACE\",\"PSA\",\"DCAPS\"]], family = \"binomial\",\n alpha = [0.5])\n model_path = h2o.save_model(prostate_glm,force=True)\n the_model = h2o.load_model(model_path)\n shutil.rmtree(model_path)\n\n assert isinstance(the_model, H2OBinomialModel), \"Expected and H2OBinomialModel, but got {0}\".format(the_model)\n\nif __name__ == \"__main__\":\n h2o.run_test(sys.argv, save_load_model)\n","repo_name":"PawarPawan/h2o-v3","sub_path":"h2o-py/tests/testdir_misc/pyunit_save_load_model.py","file_name":"pyunit_save_load_model.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"32109078167","text":"a = \"Hello, World!\"\nprint(a.upper())\n\na = \"My name is Mohammad Mansour Gulzad\"\nprint(a.lower())\n\na = \"Hello, World!\"\nprint(a.replace(\"r\", \"s\"))\n\n\na = \" University of central Asia \"\nprint(a.split(\" \")) \n\nfirst = \"Hello\"\nsecond = \"World\"\nfinall = first + second\nprint(finall)\n\na = \"Hello\"\nb = \"World\"\n\nprint( a + \" \" + b) # for adding space between words we can use \" \" and add space inside ","repo_name":"Mansour-10/python-mansour","sub_path":"modifyString.py","file_name":"modifyString.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"38427135001","text":"'''\nhttps://leetcode-cn.com/problems/reordered-power-of-2/\n\n给定正整数 N ,我们按任何顺序(包括原始顺序)将数字重新排序,注意其前导数字不能为零。\n\n如果我们可以通过上述方式得到 2 的幂,返回 true;否则,返回 false。\n\n示例 1:\n 输入:1\n 输出:true\n\n示例 2:\n 输入:10\n 输出:false\n\n示例 3:\n 输入:16\n 输出:true\n\n示例 4:\n 输入:24\n 输出:false\n\n示例 5:\n 输入:46\n 输出:true\n\n提示:\n 1 <= N <= 10 ^ 9\n\n'''\n\nimport collections\n\nclass Solution:\n def reorderedPowerOf2(self, n):\n count = collections.Counter(str(n)) # 原本数字n的计数,Counter({'4': 1, '6': 1})\n for i in range(31): # 因为 10 ^ 9 < 2 ^ 30, 所以2的次方数,也就2^0,2^1,2^2.......2^30,共31个\n if count == collections.Counter(str(1 << i)):\n return True\n return False\n \nif __name__ == \"__main__\":\n n = 46\n sol = Solution()\n result = sol.reorderedPowerOf2(n)\n print(result)","repo_name":"jasonmayday/LeetCode","sub_path":"leetcode_algorithm/2_medium/0869_重新排序得到2的幂.py","file_name":"0869_重新排序得到2的幂.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"36472917268","text":"#https://tk-tutorial.readthedocs.io/en/latest/listbox/listbox.html\nimport tkinter as tk\n\ndef listbox(parent, items, item_clicked, item_selected, n_row=40):\n\n def myclick(event=None):\n idx = lb.curselection()\n if idx:\n out = lb.get(idx)\n search.delete(0, tk.END)\n search.insert(0, out)\n item_clicked(out)\n def myselect(event):\n myclick(event)\n idx = lb.curselection()\n out = lb.get(idx)\n item_selected(out)\n def search_changed(*args):\n search_str = search_var.get()\n i = 0\n lb.delete(0, tk.END)\n for item in items:\n if search_str.lower() in item.lower():\n lb.insert(i, item)\n i += 1\n \n frame = tk.Frame(parent)\n search_var = tk.StringVar()\n #search_var.trace('w', search_changed)\n search = tk.Entry(frame, width=40, textvariable=search_var)\n search.grid(row=1, column=0)\n \n var = tk.StringVar(value=items)\n lb = tk.Listbox(frame, listvariable=var, selectmode='single', height=n_row, width=40)\n lb.grid(row=2, column=0)\n lb.bind('<>', myclick)\n lb.bind('', myselect)\n\n frame.get = lb.get\n frame.insert = lb.insert\n frame.delete = lb.delete\n frame.index = lb.index\n return frame\n\n\ndef click(*args):\n print('click', args)\ndef select(*args):\n print('select', args)\nif __name__ == '__main__':\n root = tk.Tk()\n frame = listbox(root, dir(tk), click, select)\n frame.grid()\n\n root.mainloop()\n","repo_name":"wyolum/Alex","sub_path":"scripts/packages/mylistbox.py","file_name":"mylistbox.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"16805028063","text":"import sys\nsys.path.append('../')\nfrom utils.tools import *\nfrom utils.log import *\nimport os\nimport time\n\nlogger = get_logger()\n\ndef get_spider_list():\n path='./server_config/spider_list.json'\n lst = read_json(path)\n items = lst['list']\n return [item for item in items]\n\ndef do_scrape_task():\n lst = get_spider_list()\n for spider_name in lst:\n logger.info(\" Starting crawl %s\",spider_name)\n os.system(\"scrapy crawl %s\"%spider_name)\n\n\n\ndef run():\n while True:\n config_path = './server_config/run.yaml'\n config = read_yaml(config_path)\n minutes = config['minutes']\n do_scrape_task()\n logger.warn('scrape all, now sleeping %d minutes' % minutes)\n time.sleep(minutes)\n\n\nif __name__=='__main__':\n spider_lst= do_scrape_task()\n\n","repo_name":"z362215712/NBANews","sub_path":"scrape_news/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31037659359","text":"import socket\n\nimport logging\nimport socketserver\nimport socket\nfrom http import server\n\nPAGE = \"\"\"\\\n\n\nCamera\n\n\n

Camera

\n
\n\n\n\"\"\"\n\ndata = bytearray()\n\nsocketServerPort = 3600\nhttpServerPort = 3700\nbufferSize = 4096*2\n\nudpServerSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\nudpServerSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\nclass StreamingHandler(server.BaseHTTPRequestHandler):\n def do_GET(self):\n if self.path == '/index.html':\n content = PAGE.encode('utf-8')\n self.send_response(200)\n self.send_header('Content-Type', 'text/html')\n self.send_header('Content-Length', len(content))\n self.end_headers()\n self.wfile.write(content)\n elif self.path == '/stream.mjpg':\n self.send_response(200)\n self.send_header('Age', 0)\n self.send_header('Cache-Control', 'no-cache, private')\n self.send_header('Pragma', 'no-cache')\n self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME')\n self.end_headers()\n try:\n while True:\n data = udpServerSocket.recvfrom(bufferSize)[0]\n self.wfile.write(b'--FRAME\\r\\n')\n self.send_header('Content-Type', 'image/jpeg')\n self.send_header('Content-Length', len(data))\n self.end_headers()\n self.wfile.write(data)\n self.wfile.write(b'\\r\\n')\n except Exception as e:\n logging.warning('Client is gone %s: %s', self.client_address, str(e))\n else:\n self.send_error(404)\n self.end_headers()\n\nclass StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):\n udpServerSocket.bind((\"0.0.0.0\", socketServerPort))\n print('Listening on port %s ...' % socketServerPort)\n allow_reuse_address = True\n daemon_threads = True\n\ntry:\n address = ('', httpServerPort)\n server = StreamingServer(address, StreamingHandler)\n server.serve_forever()\nfinally:\n print(\"stopped\")\n","repo_name":"emre-h/cam-server","sub_path":"socket-server.py","file_name":"socket-server.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17878472494","text":"import requests\nfrom pprint import pprint as print\n\nAPI_KEY = '94888e9f3172cc3cd00a3f10'\ncurrency='USD'\nurl=f\"https://v6.exchangerate-api.com/v6/{API_KEY}/pair/{currency}/UZS\"\n\nresponse = requests.get(url)\nprint(response.status_code)\nprint(response.json())\n\njsondata = response.json()\nkurs = jsondata['conversion_rate']\nprint(f\"Bugungi kurs: 1$ = {kurs} so'm\")","repo_name":"sherkhamidov/Translate_Bot","sub_path":"exchange.py","file_name":"exchange.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"17101912697","text":"import numpy as np\nimport pywt # библиотеку нужно качать (pip install PyWavelets)\nfrom scipy.signal import find_peaks\n\n# Вейвлет анализ\ndef wavelet_analysis(ppg_signal, fps, wavelet=None):\n \n length_data = len(ppg_signal)\n if(length_data == 0):\n return None\n \n if (wavelet==None):\n wavelet = 'mexh'\n else: wavelet = wavelet\n \n min_freq = 36 # Минимальный bpm\n max_freq = 240 # Максимальный bpm\n min_freq_hz = min_freq/60 # Минимальный bpm в герцах\n min_scale = min_freq_hz/2 # Нижняя граница масштаба (расчет по статье Unakafov2018 раздел 2.2.5)\n max_scale = fps/2 # Верхняя граница масштаба (расчет по статье Unakafov2018 раздел 2.2.5)\n sampling_period = 1/fps # Период дискретизации\n scales = np.arange(min_scale,max_scale, 2**0.03125) # Массив масштабов (шаг взят со статьи Unakafov2018 примечание 7)\n \n # Расчет вейвлета\n # Доступные вейвлет-функции: print(pywt.wavelist())\n coef = pywt.cwt(data = ppg_signal, scales = scales, wavelet = wavelet, sampling_period = sampling_period)[0]\n \n # Поиск вейвлета с максимальной суммой коэффициентов (по алгоритму со статьи Huang2016 формула 13)\n max_sum = 0\n index_max = 0\n for i in range(len(coef)):\n sum_coef = 0\n for j in range(len(coef[i])):\n sum_coef = sum_coef+coef[i][j]\n if(sum_coef>max_sum):\n max_sum = sum_coef\n index_max = i\n \n # Применение к выбранному вейвлету межпиковог�� анализа (межпиковый анализ используется в алгоритме со статьи Huang2016)\n length_data = len(coef[index_max])\n max_num_peaks = ((length_data/fps)/60)*max_freq # Максимальное количество пиков\n min_distance = length_data/max_num_peaks # Минимальное расстояние между пиками\n peaks = find_peaks(coef[index_max], distance=min_distance-1, prominence=10)[0] # Индексы пиков\n distances = [] # Расстояние между пиками\n for i in range(len(peaks)-1):\n distances.append(peaks[i+1] - peaks[i])\n \n distances = sorted(distances) # Сортировка по возрастанию\n M = max(1,int(len(distances)*0.5)) # Усреднение М медиан\n # Уменьшенный массив расстояний (учет выбросов)\n distances_small = distances[int(len(distances)//2 - M//2) : int(len(distances)//2 - M//2 + M)]\n one_beat_time = (sum(distances_small)/len(distances_small))/fps # Время одного сердцебиения\n hr_estimated = 1/one_beat_time # Предполагаемая ЧСС\n \n return hr_estimated ","repo_name":"ArtemAvanesov/RPPG-BPM","sub_path":"second_stage/wavelet_analysis.py","file_name":"wavelet_analysis.py","file_ext":"py","file_size_in_byte":3073,"program_lang":"python","lang":"ru","doc_type":"code","stars":24,"dataset":"github-code","pt":"37"} +{"seq_id":"30996236140","text":"\"\"\"Check your internet speed powered by speedtest.net\nSyntax: .speedtest\nAvailable Options: image, file, text\"\"\"\nimport logging\nfrom datetime import datetime\n\nimport speedtest\n\nfrom userbot import bot\nfrom userbot.util import admin_cmd\n\nlogging.basicConfig(format='[%(levelname) 5s/%(asctime)s] %(name)s: %(message)s',\n level=logging.WARNING)\nlogger = logging.getLogger(__name__)\n\n\n@bot.on(admin_cmd(pattern=\"speedtest ?(.*)\"))\nasync def _(event):\n if event.fwd_from:\n return\n input_str = event.pattern_match.group(1)\n as_text = False\n as_document = True\n if input_str == \"image\":\n as_document = False\n elif input_str == \"file\":\n as_document = True\n elif input_str == \"text\":\n as_text = True\n await event.edit(\"Calculating my internet speed. Please wait!\")\n start = datetime.now()\n s = speedtest.Speedtest()\n s.get_best_server()\n s.download()\n s.upload()\n end = datetime.now()\n ms = (end - start).microseconds / 1000\n response = s.results.dict()\n download_speed = response.get(\"download\")\n upload_speed = response.get(\"upload\")\n ping_time = response.get(\"ping\")\n client_infos = response.get(\"client\")\n i_s_p = client_infos.get(\"isp\")\n i_s_p_rating = client_infos.get(\"isprating\")\n reply_msg_id = event.message.id\n if event.reply_to_msg_id:\n reply_msg_id = event.reply_to_msg_id\n try:\n response = s.results.share()\n speedtest_image = response\n if as_text:\n await event.edit(\n \"**SpeedTest** completed in {} seconds\\n\"\n \"Download: {}\\n\"\n \"Upload: {}\\n\"\n \"Ping: {}\\n\"\n \"Internet Service Provider: {}\\n\"\n \"ISP Rating: {}\".format(\n ms,\n humanbytes(download_speed),\n humanbytes(upload_speed),\n ping_time,\n i_s_p,\n i_s_p_rating\n )\n )\n else:\n await bot.send_file(\n event.chat_id,\n speedtest_image,\n caption=\"**SpeedTest** completed in {} seconds\".format(ms),\n force_document=as_document,\n reply_to=reply_msg_id,\n allow_cache=False\n )\n await event.delete()\n except Exception as exc:\n await event.edit(\n \"**SpeedTest** completed in {} seconds\\n\"\n \"Download: {}\\n\"\n \"Upload: {}\\n\"\n \"Ping: {}\\n\\n\"\n \"__With the Following ERRORs__\\n\"\n \"{}\".format(\n ms,\n humanbytes(download_speed),\n humanbytes(upload_speed),\n ping_time,\n str(exc)\n )\n )\n","repo_name":"muhammedfurkan/TelethonUserBot","sub_path":"userbot/modules/speedtest.py","file_name":"speedtest.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"37"} +{"seq_id":"39443131943","text":"\"\"\"Unit test suite for Richie's default configuration mixin.\"\"\"\nfrom django.test import TestCase\n\nfrom configurations import Configuration\n\nfrom richie.apps.courses.settings.mixins import RichieCoursesConfigurationMixin\n\n\nclass SettingsMixinsTestCase(TestCase):\n \"\"\"Validate that RichieCoursesConfigurationMixin works as expected.\"\"\"\n\n def test_settings_mixins_value(self):\n \"\"\"\n The configuration mixin should set default values for all settings listed\n in apps/courses/settings/__init__.py.\n \"\"\"\n\n class TestConfiguration(RichieCoursesConfigurationMixin, Configuration):\n \"\"\"A configuration class inheriting from Richie's configuration mixin.\"\"\"\n\n # pylint: disable=no-member\n cms_templates = TestConfiguration().CMS_TEMPLATES\n self.assertEqual(len(cms_templates), 19)\n self.assertEqual(cms_templates[0][0], \"courses/cms/course_detail.html\")\n\n def test_settings_mixins_override(self):\n \"\"\"\n A configuration class inheriting from Richie's default configuration mixin\n should be able to override the value of any setting.\n \"\"\"\n\n class TestConfiguration(RichieCoursesConfigurationMixin, Configuration):\n \"\"\"A configuration class inheriting from Richie's configuration mixin.\"\"\"\n\n CMS_TEMPLATES = \"new value\"\n\n self.assertEqual(TestConfiguration().CMS_TEMPLATES, \"new value\")\n","repo_name":"openfun/richie","sub_path":"tests/apps/courses/test_settings_mixins.py","file_name":"test_settings_mixins.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":240,"dataset":"github-code","pt":"37"} +{"seq_id":"19516986532","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom rdflib import Graph, URIRef, Namespace, Literal\nfrom rdflib.namespace import RDF, XSD\nimport uuid\nimport re # Import regular expressions module\nimport csv\nfrom difflib import SequenceMatcher\n\ndef remove_consecutive_characters(text, char):\n return re.sub(char + '{2,}', char, text)\n\ndef scrape_with_beautifulsoup(url):\n response = requests.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n return soup\n\ndef remove_consecutive_quotes(text):\n return re.sub(r'\"{2,}', '\"', text)\n\ndef remove_consecutive_characters(text, char):\n if not text:\n return text\n\n return re.sub(char + '{2,}', char, text)\n\ndef remove_consecutive_characters(text, char, replace_with=' '):\n if not text:\n return text\n\n return re.sub(char + '{2,}', replace_with, text)\n\ndef read_publishers_from_csv(file_path):\n with open(file_path, newline='', encoding='utf-8') as csvfile:\n reader = csv.reader(csvfile)\n return [row[0] for row in reader]\n\ndef best_matching_publisher(publisher, publisher_list):\n best_match = publisher\n best_ratio = 0\n\n for candidate in publisher_list:\n ratio = SequenceMatcher(None, publisher, candidate).ratio()\n if ratio > best_ratio:\n best_match = candidate\n best_ratio = ratio\n\n return best_match\n\ndef extract_data(soup, link_url):\n h2_tag = soup.find('h2')\n if not h2_tag:\n return []\n\n award_name = h2_tag.text.strip()\n data = []\n\n for h5_tag in soup.find_all('h5'):\n date = h5_tag.text.strip()\n p_tag = h5_tag.find_next_sibling('p')\n\n if p_tag and p_tag.contents:\n author = p_tag.contents[0].rstrip(',').strip() if p_tag.contents[0] and isinstance(p_tag.contents[0], str) else None\n if author and (author.endswith('.') or author.endswith(',')):\n author = author[:-1]\n \n title_tag = p_tag.find('em') or p_tag.find('i') or p_tag.find('br')\n title = title_tag.text.strip() if title_tag else None\n \n publisher = p_tag.contents[-1].lstrip(',').strip() if p_tag.contents[-1] and isinstance(p_tag.contents[-1], str) else None\n\n if author:\n author = remove_consecutive_characters(author, ' ')\n\n if author or title or publisher:\n data.append([award_name, date, author, title, publisher, link_url])\n\n return data\n\ndef visit_links(soup):\n excluded_links = [\n #\"https://www.lurelu.net/prixlitt_cecilegagnon.html\",\n \"https://www.lurelu.net/prixlitt_espiegle.html\",\n \"https://www.lurelu.net/prixlitt_illojeunesse.html\",\n \"https://www.lurelu.net/prixlitt_aqpf-anel.html\",\n \"https://www.lurelu.net/prixlitt_gouverneur.html\",\n \"https://www.lurelu.net/prixlitt_culinar.html\",\n \"https://www.lurelu.net/prixlitt_acelf.html\",\n \"https://www.lurelu.net/prixlitt_conseildesarts.html\",\n \"https://www.lurelu.net/prixlitt_christie.html\",\n #\"https://www.lurelu.net/prixlitt_desjardins.html\",\n \"https://www.lurelu.net/prixlitt_palmaresCJ.html\",\n #\"https://www.lurelu.net/prixlitt_raymondplante.html\",\n ]\n all_data = []\n\n for link in soup.find_all('a', href=True):\n link_url = 'https://www.lurelu.net/' + link['href']\n \n if link_url not in excluded_links:\n link_soup = scrape_with_beautifulsoup(link_url)\n awards_data = extract_data(link_soup, link_url)\n all_data.extend(awards_data)\n\n return all_data\n\nsoup = scrape_with_beautifulsoup('https://www.lurelu.net/prixlitt.html')\nall_awards_data = visit_links(soup)\n\ng = Graph()\nschema = Namespace(\"http://schema.org/\")\nbooks = Namespace(\"https://schema.org/Book\")\n\nfor award_data in all_awards_data:\n award, date, author, title, publisher, link_url = award_data\n award = remove_consecutive_characters(award, ' ')\n award = award.replace('\\n', ' ')\n award = remove_consecutive_characters(award, r' ', ' ')\n award = remove_consecutive_characters(award, '\"')\n award = award.title()\n\n award = remove_consecutive_characters(award, r' ', ' ')\n award = remove_consecutive_characters(award, '\"')\n date = remove_consecutive_characters(date, ' ')\n author = remove_consecutive_characters(author, ' ')\n if title:\n title = remove_consecutive_characters(title, ' ')\n if publisher:\n publisher = remove_consecutive_characters(publisher, ' ')\n publisher = remove_consecutive_characters(publisher, '\"')\n publishers_list = read_publishers_from_csv('./query-result.csv')\n publisher = best_matching_publisher(publisher, publishers_list)\n\n genre = 'Jeunesse'\n langue = 'Français'\n\n book_id = f\"{uuid.uuid4()}\"\n book_node = URIRef(books[book_id])\n g.add((book_node, RDF.type, schema.Book))\n g.add((book_node, schema.name, Literal(title, datatype=XSD.string)))\n if (author):\n authors = author.split(' et ')\n for author in authors:\n g.add((book_node, schema.author, Literal(author.strip(), datatype=XSD.string))) \n g.add((book_node, schema.publisher, Literal(publisher, datatype=XSD.string)))\n g.add((book_node, schema.datePublished, Literal(date, datatype=XSD.gYear)))\n g.add((book_node, schema.award, Literal(award, datatype=XSD.string)))\n g.add((book_node, schema.genre, Literal(genre, datatype=XSD.string))) # Add the new genre attribute to the RDF graph\n g.add((book_node, schema.url, Literal(link_url, datatype=XSD.string)))\n g.add((book_node, schema.inLanguage, Literal(langue, datatype=XSD.string)))\n\n\nturtle_output = g.serialize(format=\"turtle\")\n\nwith open(\"output-lurelu.ttl\", \"w\", encoding=\"utf-8\") as f:\n f.write(turtle_output)\n","repo_name":"KarimLak/Books-Python","sub_path":"lurelu/query_books_lurelu.py","file_name":"query_books_lurelu.py","file_ext":"py","file_size_in_byte":5791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16458480836","text":"import hashlib\n\nfrom django.test import TestCase\nfrom analysis.analysis_models.static_analysis import Strings\n\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\nclass StringsTestCase(TestCase):\n databases = {\"elfen\"}\n\n @classmethod\n def setUpTestData(cls):\n cls.test_string = \"This is a test\"\n cls.sha256 = hashlib.sha256(b\"This is a test\").hexdigest()\n Strings.objects.create(\n string=cls.test_string,\n sha256s=[cls.sha256]\n )\n\n def test_strings_get(self):\n \"\"\"\n This test checks if the created Strings object can be\n successfully retrieved from the DB.\n \"\"\"\n strings = Strings.objects.get(string=self.test_string)\n self.assertEqual(strings.string, self.test_string)\n\n def test_strings_update(self):\n \"\"\"\n This test updates Strings object's sha256s property.\n \"\"\"\n strings = Strings.objects.get(string=self.test_string)\n new_sha256s = [self.sha256,\n hashlib.sha256(b\"Another one\").hexdigest()]\n\n strings.sha256s = new_sha256s\n strings.save()\n\n strings = Strings.objects.get(string=self.test_string)\n self.assertEqual(strings.sha256s, new_sha256s)\n\n def test_strings_delete(self):\n \"\"\"\n This test checks if an existing Strings object can be deleted.\n It should be, since *currently* there are no other db objects referencing\n this Strings object.\n \"\"\"\n strings = Strings.objects.get(string=self.test_string)\n strings.delete()\n\n try:\n Strings.objects.get(string=self.test_string)\n self.fail(\"Strings object not deleted in database\")\n except ObjectDoesNotExist:\n pass\n","repo_name":"nikhilh-20/ELFEN","sub_path":"tests/database/test_strings.py","file_name":"test_strings.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"37"} +{"seq_id":"31598909027","text":"import json\nimport os\nimport requests\nimport sys\n\n\nuser_id_map = {\n \"user1\": \"U051GF3CFAP\"\n}\n\n\ndef get_title_emoji(number_of_envs):\n if number_of_envs > 5:\n return \":triggered_parrot:\"\n elif number_of_envs >= 3:\n return \":alert:\"\n else:\n return \":rotating_light:\"\n\n\ndef slack(payload, webhook_url):\n\n source_provider = payload.get(\"source_provider\")\n source_type = payload.get(\"source_type\")\n env = payload.get(\"env\")\n\n title = (\n f\":rotating_light: {source_provider} {source_type} alarm - {env}\")\n slack_data = {\n \"text\": title,\n \"username\": f\"Ingestion Officer\",\n \"icon_emoji\": \":female-police-officer:\",\n \"blocks\": [\n {\n \"type\": \"header\",\n \"text\": {\n \"type\": \"plain_text\",\n \"text\": title\n }\n }]\n }\n byte_length = str(sys.getsizeof(slack_data))\n requests.post(webhook_url, data=json.dumps(slack_data),\n headers={'Content-Type': \"application/json\", 'Content-Length': byte_length})\n\n\ndef lambda_handler(event, context):\n # Parse the payload as JSON\n print(event)\n webhook_url = os.environ['SLACK_WEBHOOK_URL']\n slack(event, webhook_url)\n\n return {\n \"statusCode\": 200,\n \"body\": f\"Everything was fine\"\n }\n\n","repo_name":"UKSpaceAgency/sst-beta-infra","sub_path":"aws/alarm-lambda-notif/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34029197684","text":"import data\nfrom config import engine, sessionmaker\nsession = sessionmaker(engine)() # 创建游标\nfor str_name in data.station_name:\n for item in str_name[\"elements\"]:\n session.execute(\"insert into station_name(title, name, en, s) values(\\\"{title}\\\", \\\"{name}\\\", \\\"{en}\\\", \\\"{s}\\\")\".format(title=str_name[\"title\"], name=item[\"name\"], en=item[\"en\"], s=item[\"s\"]))\nsession.commit()\nsession.close()\n\n\n\n","repo_name":"zj15175848230/train-angular-python","sub_path":"py-train/src/mysql_conf/insert_data.py","file_name":"insert_data.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21048516605","text":"# This script runs training for smoothed and non-smoothed models. The script loads all\n# parameters/configuration from a YAML file, which must be passed to the `conf` argument.\n\nimport argparse\nimport os\nimport time\nfrom collections import Counter\nfrom copy import deepcopy\nfrom typing import Dict, List\n\nimport yaml\n\nfrom torchmalware.transforms.transforms import DropMetadata, ShiftByConstant\n\ntry:\n from yaml import CLoader as Loader\nexcept ImportError:\n from yaml import Loader\n\nimport csv\nimport os\n\n# filter pytorch UserWarning (the one with non-writable buffer)\nimport warnings\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nfrom torch.utils.data import DataLoader, random_split\nfrom torch.types import Device\n\nfrom torchmalware.certification import CertifiedMalConv, perturbations\nfrom torchmalware.transforms import (\n Compose,\n MaskNonInstruction,\n RemovePEHeader,\n ToTensor,\n Trim,\n ZeroPEHeader,\n)\nfrom torchmalware.metadata import Metadata\nfrom torchmalware.types import IntBinarySample\nfrom torchmalware.utils import collate_pad, get_gpu_memory, seed_worker, set_seed\nfrom utils import make_dataset\n\n\ndef write_pred(test_pred, test_idx, file_path):\n test_pred = [item for sublist in test_pred for item in sublist]\n with open(file_path, \"w\", newline=\"\") as csvfile:\n pred_writer = csv.writer(\n csvfile, delimiter=\",\", quotechar=\"'\", quoting=csv.QUOTE_MINIMAL\n )\n pred_writer.writerows(zip(test_idx, test_pred))\n\n\ndef metadata_to(metadata: Metadata, device: Device = None) -> Metadata:\n for key in metadata.keys():\n if isinstance(metadata[key], torch.Tensor):\n metadata[key] = metadata[key].to(device)\n return metadata\n\n\ndef train_step(\n batch: IntBinarySample,\n model: CertifiedMalConv,\n loss: nn.Module,\n optimizer: optim.Optimizer,\n history: Dict[str, List],\n device: torch.DeviceObjType,\n num_samples: int = 1,\n):\n model.reduce = \"none\"\n (binaries, metadata), targets = batch\n binaries = binaries.to(device)\n metadata = metadata.to(device)\n targets = targets.to(device)\n\n optimizer.zero_grad()\n logits = model.forward(\n binaries,\n num_samples=num_samples,\n return_logits=True,\n return_radii=False,\n batch_size=targets.size(0),\n forward_kwargs=dict(metadata=metadata),\n ).reshape(\n num_samples * targets.size(0), model.out_size\n ) # Reshape from 3d to 2d\n # We want to also duplicate the target tensor\n targets = torch.cat([targets] * num_samples, dim=0)\n loss = loss(logits, targets)\n loss.backward()\n preds = logits.argmax(dim=1)\n optimizer.step()\n history[\"tr_loss\"].append(loss.item())\n history[\"tr_acc\"].extend((targets == preds).tolist())\n mem = 0\n if device == \"cuda:0\":\n try:\n mem, _ = get_gpu_memory(device)\n except:\n pass\n history[\"mem\"].append(mem)\n\n\ndef valid_step(\n batch: IntBinarySample,\n model: CertifiedMalConv,\n loss: nn.Module,\n history: Dict[str, List],\n device: torch.DeviceObjType,\n num_samples: int = 1,\n):\n model.reduce = \"soft\"\n with torch.no_grad():\n (binaries, metadata), targets = batch\n binaries = binaries.to(device)\n metadata = metadata_to(metadata, device=device)\n targets = targets.to(device)\n\n logits = model.forward(\n binaries,\n num_samples=num_samples,\n return_logits=True,\n return_radii=False,\n batch_size=targets.size(0),\n forward_kwargs=dict(metadata=metadata),\n )\n loss = loss(logits, targets)\n preds = logits.argmax(dim=1)\n history[\"val_loss\"].append(loss.item())\n history[\"val_acc\"].extend((targets == preds).tolist())\n history[\"val_pred\"].append(preds.tolist())\n model.reduce = \"none\"\n\n\ndef debug_step(\n batch: IntBinarySample,\n model: CertifiedMalConv,\n loss: nn.Module,\n history: Dict[str, List],\n device: torch.DeviceObjType,\n states: Dict,\n states_path: str,\n):\n model.reduce = \"none\"\n with torch.no_grad():\n (binaries, metadata), targets = batch\n binaries = binaries.to(device)\n metadata = metadata_to(metadata, device=device)\n targets = targets.to(device)\n\n # Recompute loss to see if performance significantly degraded\n new_logits = model.forward(\n binaries,\n num_samples=1,\n return_logits=True,\n return_radii=False,\n batch_size=targets.size(0),\n metadata=metadata,\n ).reshape(targets.size(0), model.out_size)\n\n # Use last 10 histories to consider loss divergence\n loss = np.mean(history[\"tr_loss\"][:10])\n new_loss = loss(new_logits, targets).item()\n if new_loss > (2 * loss):\n print(\n f\"[step: {total_step}] The loss diverged ({loss} -> {new_loss})\"\n )\n # Store the states\n states[\"loss\"] = loss\n states[\"new_loss\"] = new_loss\n os.makedirs(os.path.dirname(states_path), exist_ok=True)\n torch.save(states, states_path)\n\n\ndef scale_mask_grad(mask_ratio, embed_idx=-1):\n scale = (1 - mask_ratio) / mask_ratio\n\n def f(grad):\n grad[embed_idx] *= scale\n return grad\n\n return f\n\n\ndef clip_mask_grad(max_norm=\"max\", embed_idx=-1):\n def f(grad):\n with torch.no_grad():\n if embed_idx == -1:\n _embed_idx = grad.size(0) - 1\n if max_norm == \"max\":\n _max_norm = torch.max(\n torch.norm(\n torch.cat([grad[:_embed_idx], grad[_embed_idx + 1 :]]),\n p=2,\n dim=1,\n )\n )\n else:\n _max_norm = max_norm\n norm = torch.norm(grad[_embed_idx])\n scale = 1 if norm <= _max_norm else _max_norm / norm\n grad[_embed_idx] *= scale\n return grad\n\n return f\n\n\nif __name__ == \"__main__\":\n warnings.filterwarnings(\"ignore\")\n\n # Modified from https://github.com/Alexander-H-Liu/MalConv-Pytorch/blob/master/train.py\n # Load config file for experiment\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--config\", type=str, required=True, help=\"The path to configuration file.\"\n )\n parser.add_argument(\n \"--debug\", required=False, action=\"store_true\", help=\"If debug is enabled.\"\n )\n args = parser.parse_args()\n\n debug = args.debug\n conf = yaml.load(open(args.config, \"r\"), Loader=Loader)\n\n # Set seed\n seed = conf[\"seed\"]\n if seed is None:\n seed = np.random.randint(0, 10000)\n\n if seed is not None:\n set_seed(seed)\n\n exp_name = conf[\"exp_name\"] + \"_sd_\" + str(seed)\n print(\"Experiment:\\t\", exp_name)\n\n log_dir = conf[\"log_dir\"]\n pred_dir = conf[\"pred_dir\"]\n checkpoint_dir = conf[\"checkpoint_dir\"]\n os.makedirs(log_dir, exist_ok=True)\n os.makedirs(pred_dir, exist_ok=True)\n os.makedirs(checkpoint_dir, exist_ok=True)\n\n log_file_path = os.path.join(log_dir, exp_name + \".log\")\n ckpt_acc_base_path = os.path.join(checkpoint_dir, exp_name)\n pred_path = os.path.join(pred_dir, exp_name + \".pred\")\n\n # Parameters\n if torch.cuda.is_available() and conf[\"use_gpu\"]:\n device = \"cuda:0\"\n else:\n device = \"cpu\"\n num_threads = conf[\"cpu_threads\"]\n if num_threads is not None:\n torch.set_num_threads(num_threads)\n\n num_workers = conf[\"num_workers\"]\n if num_workers is None:\n num_workers = torch.get_num_threads()\n learning_rate = conf[\"learning_rate\"]\n momentum = conf[\"momentum\"]\n weight_decay = conf[\"weight_decay\"]\n max_epoch = conf[\"max_epoch\"]\n num_samples = conf[\"num_samples\"]\n valid_num_samples = conf[\"valid_num_samples\"]\n test_epoch = conf[\"test_epoch\"]\n batch_size = conf[\"batch_size\"]\n data_size = conf[\"data_size\"]\n out_size = conf[\"out_size\"]\n window_size = conf[\"window_size\"]\n scale_grad_by_freq = conf[\"scale_grad_by_freq\"]\n channels = conf[\"channels\"]\n embed_size = conf[\"embed_size\"]\n display_step = conf[\"display_step\"]\n\n train_sample_size = conf[\"train_sample_size\"]\n max_early_stop = conf[\"max_early_stop\"]\n\n embed_num = 256\n\n # Perturbation parameters\n perturbation = perturbations[conf[\"perturbation\"]](\n *conf[\"perturbation_args\"], **conf[\"perturbation_kwargs\"]\n )\n embed_num += perturbation.extra_dim()\n if conf[\"non_instruction_mask\"] is not None:\n embed_num = max(embed_num, conf[\"non_instruction_mask\"] + 1)\n # Padding\n embed_num += 1\n\n transform = [\n DropMetadata([\"binary_path\", \"exe_section\", \"header_size\"]),\n Trim(length=data_size),\n ]\n if conf[\"header\"] == \"remove\":\n transform.append(RemovePEHeader())\n elif conf[\"header\"] == \"zero\":\n transform.append(ZeroPEHeader())\n\n if conf[\"non_instruction_mask\"] is not None:\n transform.append(MaskNonInstruction(conf[\"non_instruction_mask\"]))\n transform += [\n ToTensor(dtype=torch.int32),\n ShiftByConstant(1),\n ]\n transform = Compose(transform)\n print(\"Transforms are:\", transform)\n\n model = CertifiedMalConv(\n perturbation=perturbation,\n out_size=out_size,\n channels=channels,\n window_size=window_size,\n embed_num=embed_num,\n embed_size=embed_size,\n scale_grad_by_freq=scale_grad_by_freq,\n threshold=None,\n certify_threshold=None,\n reduce=\"none\",\n ).train()\n\n # Scale/clip gradient of masked byte\n if \"masking\" in conf[\"perturbation\"].lower():\n h = model.embed_1.weight.register_hook(clip_mask_grad(0.5))\n train_data = conf[\"train_data\"]\n train_dataset = make_dataset(train_data, transform)\n valid_data = conf[\"valid_data\"]\n valid_dataset = make_dataset(valid_data, transform)\n valid_idx = [path for path, cls in valid_dataset.samples]\n\n print(\"Training Set:\")\n print(\"\\tTotal\", len(train_dataset), \"files\")\n counter = Counter(train_dataset.targets)\n print(\"\\tMalware Count :\", counter[1])\n print(\"\\tGoodware Count:\", counter[0])\n if train_sample_size:\n print(\"\\t\\t Train sample size:\", train_sample_size)\n\n print(\"Validation Set:\")\n print(\"\\tTotal\", len(valid_dataset), \"files\")\n counter = Counter(valid_dataset.targets)\n print(\"\\tMalware Count :\", counter[1])\n print(\"\\tGoodware Count:\", counter[0])\n\n if train_sample_size:\n new_size = min(train_sample_size, len(train_dataset))\n train_dataset, _ = random_split(\n train_dataset, [new_size, len(train_dataset) - new_size]\n )\n\n pin_memory = False\n non_blocking = pin_memory\n\n train_loader = DataLoader(\n train_dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=num_workers,\n worker_init_fn=seed_worker,\n collate_fn=collate_pad,\n pin_memory=pin_memory,\n )\n\n valid_loader = DataLoader(\n valid_dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=num_workers,\n worker_init_fn=seed_worker,\n collate_fn=collate_pad,\n pin_memory=pin_memory,\n )\n\n if torch.cuda.device_count() > 1:\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n model = nn.DataParallel(model)\n\n ce_loss = nn.CrossEntropyLoss()\n optimizer = optim.SGD(\n [{\"params\": model.parameters()}],\n lr=learning_rate,\n momentum=momentum,\n nesterov=True,\n weight_decay=weight_decay,\n )\n\n model = model.to(device)\n ce_loss = ce_loss.to(device)\n\n step_msg = (\n \"epoch-{:02d}-step-{:03d}-loss-{:.6f}-acc-{:.4f}-mem-{:5d}({:.1f}%)-time-{:.2f}\"\n )\n valid_msg = \"epoch-{:02d}-step-{:03d}-tr_loss-{:.6f}-tr_acc-{:.4f}-val_loss-{:.6f}-val_acc-{:.4f}-time-{:.4f}\"\n log_msg = \"{:02d}, {:03d}, {:.6f}, {:.4f}, {:.6f}, {:.4f}, {:.2f}, {:.2f}\"\n history = {\n \"tr_loss\": [],\n \"tr_acc\": [],\n \"mem\": [],\n }\n log = open(log_file_path, \"w\")\n log.write(\"epoch,step,tr_loss,tr_acc,val_loss,val_acc,time,max_mem_cuda\\n\")\n\n total_mem = 1\n if device == \"cuda:0\":\n try:\n _, total_mem = get_gpu_memory(device)\n except:\n pass\n\n valid_best_acc = 0.0\n total_step = 0\n\n step_t0 = eval_t0 = time.time()\n training_dt = 0\n early_stop = 0\n for epoch in range(max_epoch):\n step = 0\n if \"cuda\" in device:\n torch.cuda.reset_peak_memory_stats()\n for batch in train_loader:\n # Store variables before step\n if debug:\n states = {\n \"batch\": batch,\n \"model\": deepcopy(model.state_dict()),\n \"optimizer\": deepcopy(optimizer.state_dict()),\n }\n train_step(\n batch=batch,\n model=model,\n loss=ce_loss,\n optimizer=optimizer,\n history=history,\n device=device,\n num_samples=num_samples,\n )\n if debug:\n states_path = os.path.join(\n ckpt_acc_base_path + \"_debug\",\n f\"states-step_{total_step}.ckpt\",\n )\n debug_step(\n batch=batch,\n model=model,\n loss=ce_loss,\n history=history,\n device=device,\n states=states,\n states_path=states_path\n )\n\n step_t1 = time.time()\n step_dt = step_t1 - step_t0\n step_t0 = step_t1\n training_dt += step_dt\n\n if (step + 1) % display_step == 0:\n print(\n step_msg.format(\n epoch,\n total_step,\n np.mean(history[\"tr_loss\"]),\n np.mean(history[\"tr_acc\"]),\n int(history[\"mem\"][-1]),\n history[\"mem\"][-1] / total_mem * 100,\n step_dt,\n ),\n end=\"\\r\",\n flush=True,\n )\n total_step += 1\n step += 1\n max_memory = max(history[\"mem\"])\n\n # Interupt for validation\n if (epoch + 1) % test_epoch == 0:\n # Testing\n history[\"val_loss\"] = []\n history[\"val_acc\"] = []\n history[\"val_pred\"] = []\n early_stop += 1\n\n with torch.no_grad():\n for batch in valid_loader:\n valid_step(\n batch=batch,\n model=model,\n loss=ce_loss,\n history=history,\n device=device,\n num_samples=valid_num_samples,\n )\n\n eval_t1 = time.time()\n eval_dt = eval_t1 - eval_t0\n eval_t0 = eval_t1\n print(\n log_msg.format(\n epoch,\n total_step,\n np.mean(history[\"tr_loss\"]),\n np.mean(history[\"tr_acc\"]),\n np.mean(history[\"val_loss\"]),\n np.mean(history[\"val_acc\"]),\n eval_dt,\n training_dt,\n max_memory,\n ),\n file=log,\n flush=True,\n )\n\n print(\n valid_msg.format(\n epoch,\n total_step,\n np.mean(history[\"tr_loss\"]),\n np.mean(history[\"tr_acc\"]),\n np.mean(history[\"val_loss\"]),\n np.mean(history[\"val_acc\"]),\n eval_dt,\n )\n )\n model_ckpt = {\n \"epoch\": epoch,\n \"conf\": conf,\n \"fp_curve\": None,\n \"state_dict\": model.state_dict(),\n }\n ckpt_acc_path = ckpt_acc_base_path + f\"-step_{total_step}.ckpt\"\n torch.save(model_ckpt, ckpt_acc_path)\n torch.save(model_ckpt, ckpt_acc_base_path+\".ckpt\")\n print(\"\\tCheckpoint saved at\", ckpt_acc_path)\n write_pred(history[\"val_pred\"], valid_idx, pred_path)\n print(\"\\tPrediction saved at\", pred_path)\n early_stop = 0\n\n if early_stop > max_early_stop:\n break\n\n history[\"tr_loss\"] = []\n history[\"tr_acc\"] = []\n history[\"mem\"] = []\n\n log.close()\n","repo_name":"Dovermore/randomized-deletion","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":16806,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"12299358699","text":"import sklearn\nimport pandas as pd\nimport numpy as np\nimport sys\nsys.path.append('../')\nfrom model.utils import indices_to_vec, array_to_one_hot\n\nclass DataT:\n '''\n Load datasets.\n '''\n def load_iris():\n # load iris data\n iris = sklearn.datasets.load_iris()\n data = iris[\"data\"]\n labels = iris[\"target\"]\n\n # get label 0 and 1, and corresponding features\n true_labels = labels[labels < 2]\n features = data[np.where(labels < 2)]\n\n return true_labels, features, []\n\n def load_cora(multiclass=False,rel_path=''):\n # load cora data\n if multiclass:\n nodes = pd.read_csv(rel_path+'cora/selected_contents_multiclass.csv',index_col=0,)\n else:\n nodes = pd.read_csv(rel_path+'cora/selected_contents.csv',index_col=0,)\n graph = np.loadtxt(rel_path+'cora/graph.csv',delimiter=',')\n id_ = np.array(nodes.index)\n\n # get label 0 and 1, and corresponding features\n true_labels = np.array(nodes['label'])\n features = nodes.loc[:,'feature_0':].as_matrix()\n true_labels = array_to_one_hot(true_labels)\n\n return true_labels, features, graph\n\n def load_mnist(rel_path=''):\n from tensorflow.examples.tutorials.mnist import input_data\n mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n train_images = mnist.train.images\n train_labels = mnist.train.labels\n test_images = mnist.test.images\n test_labels = mnist.test.labels\n true_labels = np.vstack([train_labels, test_labels])\n features = np.vstack([train_images,test_images])\n labeled_indices = np.arange(train_images.shape[0])\n unlabeled_indices = np.arange(test_images.shape[0]) + train_images.shape[0]\n labels = true_labels.copy()\n k = labels.shape[1]\n labels[unlabeled_indices] = 1/k\n n = len(labels)\n is_labeled = np.zeros(n)\n is_labeled.fill(True)\n is_labeled.ravel()[unlabeled_indices] = False\n return true_labels, features, labels, is_labeled.reshape(-1,1), labeled_indices, unlabeled_indices\n\n def prepare(labels,labeled_indices,true_labels,k,num_classes,num_samples,num_nodes):\n num_nodes = len(labels)\n X_ = np.tile(labels.T,num_samples).reshape(num_classes,num_samples,num_nodes)\n y_ = np.tile(true_labels.T,1).reshape(num_classes,1,num_nodes)\n true_labeled_ = np.repeat(indices_to_vec(labeled_indices,num_nodes).reshape(1,num_nodes),num_classes,axis=0).reshape((num_classes,1,len(true_labels)))\n labeled_ = np.repeat(true_labeled_,num_samples,axis=1)\n masked_ = np.zeros((num_classes,num_samples,num_nodes))\n\n validation_data = {\n 'X': labels.T.reshape(num_classes,1,num_nodes),\n 'y': y_,\n 'labeled': true_labeled_,\n 'true_labeled': true_labeled_, # this will not be used\n 'masked': masked_ # this will not be used\n }\n\n for i in range(num_samples):\n indices_to_mask = np.random.choice(labeled_indices, k)\n X_[:,i,indices_to_mask] = 1/num_classes\n labeled_[:,i,indices_to_mask] = 0\n masked_[:,i,indices_to_mask] = 1\n\n data = {\n 'X': X_,\n 'y': y_,\n 'labeled': labeled_,\n 'true_labeled': true_labeled_,\n 'masked': masked_\n }\n\n return data, validation_data\n","repo_name":"kojino/dlp_for_cluster","sub_path":"model/DataT.py","file_name":"DataT.py","file_ext":"py","file_size_in_byte":3448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24171781361","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.views.generic import ListView, DetailView, CreateView\nfrom django.urls import reverse_lazy\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth import login, logout\nfrom django.contrib import messages\nfrom django.core.paginator import Paginator\nfrom django.core.mail import send_mail\nfrom django.conf import settings\n\nfrom .models import News, Category\nfrom .forms import NewsForm, UserRegisterForm, UserLoginForm, ContactForm\nfrom .utils import MyMixin\n\ndef register(request):\n if request.method == 'POST':\n form = UserRegisterForm(request.POST)\n if form.is_valid():\n user = form.save()\n login(request, user)\n messages.success(request, 'Вы успешно зарегистрировались')\n return redirect('home')\n else:\n messages.error(request, 'Ошибка регистрации')\n else:\n form = UserRegisterForm()\n return render(request, 'news/register.html', {'form': form})\n\n\ndef user_login(request):\n if request.method == 'POST':\n form = UserLoginForm(data=request.POST)\n if form.is_valid():\n user = form.get_user()\n login(request, user)\n return redirect('home')\n else:\n form = UserLoginForm()\n return render(request, 'news/login.html', {'form': form})\n\n\ndef user_logout(request):\n logout(request)\n return redirect('login')\n\n\ndef contact(request):\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if form.is_valid():\n mail = send_mail(form.cleaned_data['subject'], form.cleaned_data['content'],\n settings.EMAIL_HOST_USER, ['fanatkakensina@gmail.com'], fail_silently=True)\n if mail:\n messages.success(request, 'Письмо отправлено')\n return redirect('contact')\n else:\n messages.error(request, 'Ошибка отправки')\n else:\n messages.error(request, 'Ошибка отправки письма')\n else:\n form = ContactForm()\n return render(request, 'news/test.html', {'form': form})\n\n\nclass HomeNews(MyMixin, ListView):\n model = News\n template_name = 'news/home_news_list.html'\n context_object_name = 'news'\n # extra_context = {'title': 'Главная'} # используется только для статичных данных\n # queryset = News.objects.select_related('category') # избавляет от лишних sql запросов\n paginate_by = 3\n\n mixin_prop = 'hello world'\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = self.get_upper('Главная страница')\n context['mixin_prop'] = self.get_prop()\n return context\n\n def get_queryset(self):\n return News.objects.filter(is_published=True).select_related('category')\n\n\nclass NewsByCategory(MyMixin, ListView):\n model = News\n template_name = 'news/home_news_list.html'\n context_object_name = 'news'\n allow_empty = False\n paginate_by = 2\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = self.get_upper(Category.objects.get(pk=self.kwargs['category_id']))\n return context\n\n def get_queryset(self):\n return News.objects.filter(category_id=self.kwargs['category_id'], is_published=True).select_related('category')\n\n\nclass ViewNews(DetailView):\n model = News\n context_object_name = 'news_item' # по-умолчанию object\n # template_name = 'news/view_news.html' # по-умолчанию news_detail.html\n # pk_url_kwarg = 'news_id' # по-умолчанию pk\n\n\nclass CreateNews(LoginRequiredMixin, CreateView):\n form_class = NewsForm\n template_name = 'news/add_news.html'\n # success_url = reverse_lazy('home') # по умолчанию делает redirect на новость, которая была создана\n # login_url = '/admin/'\n # login_url = reverse_lazy('home')\n raise_exception = True # выдаст страницу с текстом '403 Forbidden'\n\n\n","repo_name":"ZhanatUturova/testsite","sub_path":"mysite/news/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18126923747","text":"#Joe Muana\r\n#OXO\r\nboard = [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\r\nturn_count=[0]\r\n\r\n\r\ndef printBoard():\r\n print(\" \" + board[1] + \" | \" + board[2] + \" | \" + board[3] + \" \")\r\n print(\"___|___|___\")\r\n print(\" \" + board[4] + \" | \" + board[5] + \" | \" + board[6] + \" \")\r\n print(\"___|___|___\")\r\n print(\" \" + board[7] + \" | \" + board[8] + \" | \" + board[9] + \" \")\r\n print(\" | | \")\r\n\r\n \r\ndef CheckWin():\r\n \r\n win = False\r\n #Horizontal winning condition \r\n if(board[1] == board[2] and board[2] == board[3] and board[1] != ' '): \r\n win = True \r\n elif(board[4] == board[5] and board[5] == board[6] and board[4] != ' '): \r\n won = True \r\n elif(board[7] == board[8] and board[8] == board[9] and board[7] != ' '): \r\n win = True \r\n #Vertical Winning Condition \r\n elif(board[1] == board[4] and board[4] == board[7] and board[1] != ' '): \r\n win = True \r\n elif(board[2] == board[5] and board[5] == board[8] and board[2] != ' '): \r\n win = True \r\n elif(board[3] == board[6] and board[6] == board[9] and board[3] != ' '): \r\n win = True \r\n #Diagonal Winning Condition \r\n elif(board[1] == board[5] and board[5] == board[9] and board[5] != ' '): \r\n win = True \r\n elif(board[3] == board[5] and board[5] == board[7] and board[5] != ' '): \r\n win = True \r\n #Match Draw Condition \r\n## elif(board[1]!=' ' and board[2]!=' ' and board[3]!=' ' and board[4]!=' ' and board[5]!=' ' and board[6]!=' ' and board[7]!=' ' and board[8]!=' ' and board[9]!=' '): \r\n## win = True\r\n\r\n return win\r\n\r\n#Start Game\r\n\r\n\r\ndef main ():\r\n turn = True\r\n game = True\r\n printBoard()\r\n while(game):\r\n if(turn):\r\n move = int(input(\"Player One, please enter the position between [1-9] where you want to mark \"))\r\n\r\n while(move>9 or move<1) or board[move]!=' ':\r\n move = int(input(\"That is an invalid move. Please enter a new move \"))\r\n \r\n board[move] = 'X'\r\n printBoard()\r\n turn = False\r\n winner = CheckWin()\r\n turn_count.append(1)\r\n\r\n if(winner):\r\n print(\"X Wins!\")\r\n \r\n game = False\r\n elif sum(turn_count) == 9:\r\n print('Game Drawn')\r\n game = False\r\n\r\n elif not(turn):\r\n move = int(input(\"Player two, please enter the position between [1-9] where you want to mark \"))\r\n\r\n\r\n while(move>9 or move<1) or board[move]!=' ':\r\n move = int(input(\"That is an invalid move. Please enter a new move \"))\r\n\r\n \r\n board[move] = 'O'\r\n printBoard()\r\n turn = True\r\n winner = CheckWin()\r\n turn_count.append(1)\r\n if(winner):\r\n print(\"O Wins!\")\r\n game = False\r\n elif sum(turn_count) == 9:\r\n print('Game Drawn')\r\n game = False\r\n if not game:\r\n for x in range (0, len(board)-1):\r\n board[x] = ' '\r\n\r\n\r\nmain()","repo_name":"GabrielM97/LibraryOfGames","sub_path":"ADP-Group Project/OXO.py","file_name":"OXO.py","file_ext":"py","file_size_in_byte":3178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38457220049","text":"#!/usr/bin/env python3\n\nimport sys\nimport os\nimport glob\nimport re\nfrom os.path import expanduser\n\n\n#######################################################################################################################\nif sys.version_info < (3, 6, 0):\n sys.stderr.write(\"must be python 3.6+ for %s\\n\" % __file__)\n sys.exit()\n\n\n#######################################################################################################################\ndef get_out_filename():\n return '%s/assembled.sh' % expanduser('~')\n\n\n# def rc_path():\n# if '/' in __file__:\n# pos = __file__.rindex('/')\n# return __file__[:pos]\n# else:\n# return '.'\n\n\ndef uniq_list(l):\n uniq = []\n for x in l:\n if x not in uniq:\n uniq.append(x)\n return uniq\n\n\ndef add(*args):\n print('ADD', *args)\n\n\ndef skip(*args):\n print('SKIP', *args)\n\n\ndef filter_dirs(fn, dirs):\n print(repr(fn))\n for p in dirs:\n if fn(p):\n add(p)\n yield p\n else:\n skip(p)\n\n\nclass Glob:\n def __init__(self):\n self.path_msk = None\n self.filter_fn = None\n self.set_skip_os(None)\n\n def set_msk(self, *path_msk_pieces):\n self.path_msk = os.path.join(*path_msk_pieces)\n self.path_msk = expanduser(self.path_msk)\n\n def set_skip_os(self, name):\n self.filter_fn = filter_by_os_name(name)\n\n def path_seq(self):\n msk = self.path_msk\n if self.filter_fn(msk):\n add('DIR', msk)\n res = glob.glob(msk)\n # res = filter(self.filter_fn, res)\n res = filter_dirs(self.filter_fn, res)\n res = sorted(res)\n return res\n else:\n skip('DIR', msk)\n return []\n\n\ndef rc_msk_seq():\n rt = '~/Documents'\n roots = [\n # 'dotfiles/rc/**/*.sh',\n 'dot/rc*/*.sh',\n 'dot-private/rc*/*.sh',\n ]\n return [os.path.join(rt, p) for p in roots]\n\n\ndef rc_files_all():\n g = Glob()\n for p in rc_msk_seq():\n g.set_msk(p)\n yield from g.path_seq()\n\n\ndef rc_files_auto():\n g = Glob()\n g.set_skip_os('mac')\n for p in rc_msk_seq():\n yield from g.path_seq()\n\n\ndef main():\n print(sys.version)\n print(sys.platform)\n # print(rc_path())\n out_filename = get_out_filename()\n print(rc_msk_seq())\n print(out_filename)\n # res = rc_files_all()\n res = rc_files_auto()\n res = list(res)\n # for p in res:\n # print(p)\n # print(res)\n # out = open(out_filename, mode=\"w\", encoding=\"utf8\")\n # lines = list(lines_generator()) # write only is read ok\n # for ln in lines:\n # out.write(ln)\n # out.write('\\n')\n # summary = finish_summary(out_filename, len(lines))\n # summary = '\\n'.join(summary)\n # print(summary)\n\n#######################################################################################################################\n\n\n# def fltr(path, skip_os):\n# filename = os.path.basename(path)\n# if '.%s.' % skip_os in filename:\n# return False\n# if filename.startswith('%s.' % skip_os):\n# return False\n# return True\n\n\n# def rc_sub_files(sub_dir):\n# msk = rc_path()+f'/{sub_dir}/*.sh'\n# print('\\n', msk)\n# names = glob.glob(msk)\n# return names\n\n\n# def rc_files_by_hand():\n# home = rc_path()+'/'\n# names = [\n# 'env.sh',\n# 'alias.sh',\n# 'locate.sh',\n# 'wine.sh',\n\n# 'akb.sh',\n\n# 'apt.linux.sh',\n# 'brew.mac.sh',\n\n# 'cd.sh',\n\n# 'ytdl.sh',\n# 'mm.sh',\n\n# 'behlog.sh',\n# 'org.sh',\n# 'sublime.sh',\n# 'du.sh',\n# ]\n# names = (home+x for x in names)\n# return names\n\n\n# def rc_files_iter():\n# yield from rc_sub_files('rc0')\n# yield from rc_sub_files('rc.server')\n# yield from rc_sub_files('rc.cd')\n# yield from rc_sub_files('rc1')\n# yield from rc_sub_files('rc.private')\n# # yield from rc_files_by_hand()\n\n\n# def rc_files_all():\n# names = uniq_list(rc_files_iter())\n# return names\n\n\n# print(rc_files_all())\n# sys.exit()\n\n\n# def rc_files_linux():\n# files = rc_files_all()\n# skip_os = 'mac'\n# filtered = [f for f in files if fltr(f, skip_os)]\n# return filtered\n\n\n# def rc_files_mac():\n# files = rc_files_all()\n# skip_os = 'linux'\n# filtered = [f for f in files if fltr(f, skip_os)]\n# return filtered\n\n\n# def rc_files_auto_detect():\n# res = []\n# if 'linux' in sys.platform:\n# print('LINUX')\n# res = rc_files_linux()\n# if 'darwin' in sys.platform:\n# print('MAC')\n# res = rc_files_mac()\n# return list(res)\n\n\ndef read_file(filename):\n lines = []\n\n try:\n lines = open(filename).readlines()\n lines = list(lines)\n status = '%s; lines: %d; skipped: 0' % (filename, len(lines))\n except Exception as e:\n status = 'ERR %s: %s' % (e, filename)\n\n print(status)\n lines = [ln.rstrip() for ln in lines]\n yield '# '+status\n yield from lines\n\n\ndef lines_generator():\n for f in rc_files_auto_detect():\n yield from read_file(f)\n\n\ndef finish_summary(assembled_filename, lines_writed):\n yield 'writed %d lines to %s' % (lines_writed, assembled_filename)\n yield 'install: source \"%s\" >> ~/.bash_profile'% assembled_filename\n yield 'install: source \"%s\" >> ~/.profile'% assembled_filename\n yield 'edit: vim ~/.profile ~/.bash_profile'\n\n\n# def main():\n# print(sys.version)\n# print(sys.platform)\n# print(rc_path())\n# out_filename = get_out_filename()\n# print(out_filename)\n# out = open(out_filename, mode=\"w\", encoding=\"utf8\")\n# lines = list(lines_generator()) # write only is read ok\n# for ln in lines:\n# out.write(ln)\n# out.write('\\n')\n# summary = finish_summary(out_filename, len(lines))\n# summary = '\\n'.join(summary)\n# print(summary)\n\n\ndef main_all():\n list(rc_files_all())\n # for p in rc_files_all():\n # print(p)\n\n\n############ ng #######################################################################################################\nif __name__ == '__main__':\n main()\n # main_all()\n","repo_name":"rvasilevsk/dot","sub_path":"rc_compiler.old.py","file_name":"rc_compiler.old.py","file_ext":"py","file_size_in_byte":6195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14481266181","text":"class Queue:\n def __init__(self,list = None):\n if list == None:\n self.item =[]\n else :\n self.item = list\n def enQueue(self,i):\n self.item.append(i)\n def size(self): \n return len(self.item)\n\n def isEmpty(self):\n return len(self.item)==0\n def deQueue(self):\n return self.item.pop(0)\ndef TENET(r,b,freeze,heat,mistake):\n lst = []\n buffer=0\n tmp = \"\"\n for i in range(len(r)):\n if r[i] == tmp:\n buffer+=1\n if buffer ==2:\n for j in range(len(b)-2):\n bbomb = \"\"\n if b[j] == b[j+1] == b[j+2] :\n freeze+=1\n bbomb = b[j]\n del b[j:j+3]\n if bbomb == r[i]:\n mistake+=1\n lst.append(bbomb)\n break\n buffer = 1\n else:\n buffer = 0\n tmp = r[i]\n # print(\"append\",r[i])\n lst.append(r[i])\n # newr = []\n # i = 0\n # while i < len(r)-2:\n # print(\"token\",r[i])\n # if r[i] == r[i+1] == r[i+2]:\n # for j in range(len(b)-2):\n # bomb = \"\"\n # if b[j] == b[j+1] == b[j+2] :\n # freeze+=1\n # bbomb = b[j]\n # del b[j:j+3]\n # if bbomb == r[i]:\n # mistake+=1\n # r.insert(i+2,bbomb)\n # del r[i:i+3]\n # i-=1\n # else:\n # r.insert(i+2,bbomb)\n # break\n # i+=1\n # print(i,len(r))\n # if i>len(r):\n # print(\"not in range\")\n # break\n # print(lst)\n # print(b)\n lst,heat = checkbomb(lst,heat)\n b,freeze = checkbomb(b,freeze)\n display(lst,b,heat,freeze,mistake)\ndef checkbomb(lst,t):\n result = Queue()\n buffer=0\n tmp = \"\"\n for i in range(len(lst)):\n if lst[i] == tmp:\n print(lst)\n buffer+=1\n if buffer ==2:\n t+=1\n\n print(result.item.pop())\n print(result.item.pop())\n buffer = 1\n elif buffer<2:\n result.enQueue(lst[i]) \n else:\n buffer = 0\n result.enQueue(lst[i])\n tmp = lst[i]\n return result.item,t\ndef display(r,b,heat,freeze,mistake):\n print(\"Red Team : \\n\",len(r),'\\n',\"\".join(reversed(r)) if len(r)!=0 else \"Empty\",'\\n',heat-mistake,\" Explosive(s) ! ! ! (HEAT)\",sep=\"\")\n if mistake>0:\n print(\"Blue Team Made (a) Mistake(s)\",mistake,\"Bomb(s)\")\n print(\"----------TENETTENET----------\")\n print(\": maeT eulB\\n\",len(b),'\\n',\"\".join(reversed(b)) if len(b) != 0 else \"ytpmE\",\"\\n\",\"(EZEERF) ! ! ! (s)evisolpxE \",freeze,sep=\"\")\n \n\n\nfreeze,heat,mistake = 0,0,0\nr,b = list(map(list,input(\"Enter Input (Red, Blue) : \").split()))\nb = b[::-1]\nTENET(r,b,freeze,heat,mistake)","repo_name":"ppbasleng/CE-Classroom","sub_path":"Datastruc/lab4/lab4-5-2.py","file_name":"lab4-5-2.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9599494629","text":"from config.common_defaults import deffccdicts\n #python FCChhAnalyses/FCChh/ttHH/dataframe/preSel.py \nimport os\n\nbasedir=os.path.join(os.getenv('FCCDICTSDIR', deffccdicts), '') + \"yaml/FCC/fcc_v04/\"\noutdir=\"FCChh/ttHH/\"\nNUM_CPUS = 20\nprocess_list=['mgp8_pp_tthh_lambda100_5f',\n 'mgp8_pp_ttz_5f',\n 'mgp8_pp_ttzz_5f',\n 'mgp8_pp_tth01j_5f'\n ]\nfraction=1\n\nimport bin.runDataFrame as rdf\nmyana=rdf.runDataFrame(basedir,process_list)\nmyana.run(ncpu=NUM_CPUS,fraction=fraction,outDir=outdir)\n","repo_name":"hshaddix/FCCAnalyses","sub_path":"examples/FCChh/ttHH/preSel.py","file_name":"preSel.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32751081752","text":"import json \r\nfrom call_center import call_center\r\ncenter=call_center(3)\r\nfrom calls import calls\r\nimport pprint\r\nimport time\r\n \r\nnum_agents=3\r\n\r\nactive_calls=[]\r\n\r\ndef incoming_call(u):\r\n print(\"\\n\\n\\n\\n\")\r\n print(u)\r\n print(u[\"score\"])\r\n center.waiting_queue=center.updateQueue(u,center.waiting_queue)\r\n center.print_queues()\r\n center.agent_suitability()\r\n for key, value in center.agent_waiting_queue.items():\r\n center.agent_waiting_queue[key] = center.updateQueue({},value,rearrange=True)\r\n center.allocator()\r\n return str(center.waiting_queue),str(center.agent_waiting_queue)\r\n\r\ndef disconnect(id):\r\n for key,value in center.serves.items():\r\n if value==id:\r\n center.serves[key]=0\r\n center.agent_status[key]=\"free\"\r\n center.leaveQueue(id)\r\n center.agent_suitability()\r\n for key, value in center.agent_waiting_queue.items():\r\n center.agent_waiting_queue[key] = center.updateQueue({},value,rearrange=True)\r\n center.allocator()\r\n return str(center.waiting_queue),str(center.agent_waiting_queue)\r\n\r\n\r\ndef get_agent_status(id):\r\n return center.agent_status[id],center.serves[id]\r\n\r\ndef print_agents():\r\n print(\"\\n\\n\\n\")\r\n for i in range(num_agents):\r\n a_status, a_client = get_agent_status(i)\r\n print(\"Agent \"+str(i)+\" is \"+a_status+\" serving Client \"+str(a_client))\r\n\r\ndef end_call():\r\n for i in range(num_agents):\r\n a_status, a_client = get_agent_status(i)\r\n if a_status==\"busy\":\r\n for ac in active_calls:\r\n if ac[\"user\"][\"user_id\"]==a_client:\r\n if ac[\"call_time\"]<(time.time()-ac[\"user\"][\"arrival_time\"]):\r\n ac[\"end_time\"]=time.time()-ac[\"user\"][\"arrival_time\"]\r\n wq,aq=disconnect(a_client)\r\n pprint.pprint(wq)\r\n pprint.pprint(aq)\r\n\r\n \r\ndef print_active_call():\r\n print(\"\\n\\n\\nActive Calls\")\r\n for ac in active_calls:\r\n pprint.pprint(ac)\r\n\r\nfor call in calls.call_list:\r\n call[\"user\"][\"arrival_time\"]=time.time()\r\n pprint.pprint(call)\r\n active_calls.append(call)\r\n wq,aq=incoming_call(call[\"user\"])\r\n print(\"\\n\\n\\nWaiting queue\")\r\n pprint.pprint(list(eval(wq)))\r\n print(\"\\n\\n\\nAgent queue\")\r\n pprint.pprint(eval(aq))\r\n print_agents()\r\n print_active_call()\r\n\r\n","repo_name":"deep0307/Intelligent-Call-Prioritization-using-SER","sub_path":"Anvil Code/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8246938260","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch.utils.data as data\nimport numpy as np\nimport torch\nimport json\nimport cv2\nimport os\nfrom utils.image import flip, color_aug\nfrom utils.image import get_affine_transform, affine_transform\nfrom utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian\nfrom utils.image import draw_dense_reg\nimport math\n\nclass CTDetDataset(data.Dataset):\n def _coco_box_to_bbox(self, box):\n bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],\n dtype=np.float32)\n return bbox\n\n def _get_border(self, border, size):\n i = 1\n while size - border // i <= border // i:\n i *= 2\n return border // i\n\n def __getitem__(self, index):\n img_id = self.images[index]\n file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name']\n img_path = os.path.join(self.img_dir, file_name)\n ann_ids = self.coco.getAnnIds(imgIds=[img_id])\n anns = self.coco.loadAnns(ids=ann_ids)\n num_objs = min(len(anns), self.max_objs)\n\n\n img = cv2.imread(img_path)\n org_img = img\n\n height, width = img.shape[0], img.shape[1]\n c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)\n if self.opt.keep_res:\n input_h = (height | self.opt.pad) + 1\n input_w = (width | self.opt.pad) + 1\n s = np.array([input_w, input_h], dtype=np.float32)\n else:\n\n s = max(img.shape[0], img.shape[1]) * 1.0\n input_h, input_w = self.opt.input_h, self.opt.input_w #height, width\n\n \n flipped = False\n if self.split == 'train': #train\n if not self.opt.not_rand_crop:\n s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))\n w_border = self._get_border(128, img.shape[1])\n h_border = self._get_border(128, img.shape[0])\n c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)\n c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)\n else:\n sf = self.opt.scale\n cf = self.opt.shift\n c[0] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf) # cmnt this to run in jupyter lab\n c[1] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf) # cmnt this to run in jupyter lab\n s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)\n\n if np.random.random() < self.opt.flip:\n flipped = True\n img = img[:, ::-1, :]\n c[0] = width - c[0] - 1\n trans_input = get_affine_transform(\n c, s, 0, [input_w, input_h])\n inp = cv2.warpAffine(img, trans_input, \n (input_w, input_h),\n flags=cv2.INTER_LINEAR)\n #inp = (inp.astype(np.float32) / 255.)\n inp = inp.astype(np.float32)\n if self.split == 'train' and not self.opt.no_color_aug:\n color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)\n\n inp = inp.transpose(2, 0, 1)\n\n output_h = input_h // self.opt.down_ratio\n output_w = input_w // self.opt.down_ratio\n num_classes = self.num_classes\n trans_output = get_affine_transform(c, s, 0, [output_w, output_h])\n \n segMapsize = (num_classes,input_h, input_w) #curImg['height'], curImg['width'] #height, width input_h,input_w\n segMapTotal = np.zeros(segMapsize, dtype = np.uint8)\n #segMapaffine = np.zeros(segMapsize, dtype = np.uint8)\n ctMapsize = (num_classes,input_h, input_w)\n ctMapTotal = np.zeros (ctMapsize, dtype = bool)\n widthMapTotal = np.zeros(ctMapsize, dtype = np.uint8)\n heightMapTotal= np.zeros(ctMapsize, dtype = np.uint8)\n\n gt_det = []\n for k in range(num_objs):\n ann = anns[k]\n bbox = self._coco_box_to_bbox(ann['bbox'])\n if (ann['category_id']==15 or ann['category_id']==20):\n if ann['category_id']==15:\n ann['category_id']=1\n if ann['category_id']==20:\n ann['category_id']=2\n cls_id = int(self.cat_ids[ann['category_id']])\n if flipped:\n bbox[[0, 2]] = width - bbox[[2, 0]] - 1\n #Mehdi\n bbox[:2] = affine_transform(bbox[:2], trans_output)\n bbox[2:] = affine_transform(bbox[2:], trans_output)\n bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1)\n bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)\n h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]\n if h > 0 and w > 0:\n ct = np.array(\n [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)\n ct_int = ct.astype(np.int32)\n labelMask = self.coco.annToMask(ann) == 1\n if flipped:\n labelMask = labelMask[:, ::-1]\n labelMask = labelMask.astype(np.uint8)\n labelMask = cv2.warpAffine(labelMask, trans_input,\n (input_w, input_h),\n flags=cv2.INTER_LINEAR)\n\n\n #segMapTotal[cls_id,:,:] = segMapTotal[cls_id,:,:] | labelMask\n segMapTotal[cls_id,:,:] = np.logical_or (segMapTotal[cls_id,:,:], labelMask)\n\n\n \n\n sq= self.opt.bb_size\n sq_w = int(max(min(sq, w/2),sq/4))\n sq_h = int(max(min(sq, h/2),sq/4))\n \n for i in range(-sq_w, sq_w):\n for j in range(-sq_h, sq_h):\n # print( ct_int[0] , ct_int[1] )\n border_width = max(min(ct_int[1]+j, input_w-1),0)\n border_height = max(min(ct_int[0]+i, input_h-1),0)\n ctMapTotal[cls_id, border_width, border_height] = True\n widthMapTotal[cls_id, border_width, border_height] = w\n heightMapTotal[cls_id , border_width, border_height] = h\n\n ret = {'input':inp , 'gt_segmap':segMapTotal, 'gt_ctmap': ctMapTotal.astype(np.uint8), 'gt_widmap':widthMapTotal.astype(np.uint8), 'gt_heimap':heightMapTotal.astype(np.uint8)}\n return img_id,ret #org_img","repo_name":"miranmanesh/tmp_obj","sub_path":"lib/datasets/sample/ctdet_two_classes.py","file_name":"ctdet_two_classes.py","file_ext":"py","file_size_in_byte":5882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17793724208","text":"from flask import request\n\nfrom .. import bp_api\nfrom ..apimodels.complex import (\n AddProjectToUserAPIModel,\n AddExistingProjectToUserAPIModel,\n RemoveProjectFromUserAPIModel\n)\n\n\n@bp_api.post(\"/complex/add/\")\ndef add_project_to_user(user_id: int):\n am = AddProjectToUserAPIModel(request.form, user_id)\n am.exec()\n return am.to_response()\n\n\n@bp_api.get(\"/complex/add//\")\ndef add_existing_project_to_user(project_id: int, user_id: int):\n am = AddExistingProjectToUserAPIModel(project_id, user_id)\n am.exec()\n return am.to_response()\n\n\n@bp_api.delete(\"/complex/remove//\")\ndef remove_project_from_user(project_id: int, user_id: int):\n am = RemoveProjectFromUserAPIModel(project_id, user_id)\n am.exec()\n return am.to_response()\n","repo_name":"neurothrone/project-dot","sub_path":"app/blueprints/api/endpoints/complex.py","file_name":"complex.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"11773641402","text":"import os\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\nWTF_CSRF_ENABLED = True\nSECRET_KEY = 'klfsalkfafasdfwrtpuier'\n\n# SQLAlchemy database config\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')\nSQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')\n\n# Whoosh Alchemy config\nWHOOSH_BASE = os.path.join(basedir, 'search.db')\nMAXIMUM_SEARCH_RESULTS = 5\n\n# mail server settings\nMAIL_SERVER = 'localhost'\nMAIL_PORT = 25\nMAIL_USERNAME = None\nMAIL_PASSWORD = None\n\n# administrator list\nADMINS = ['tidymilla@gmail.com']\n\n# pagination\nEVENTS_PER_PAGE = 3\n\n# Open Outhentication\nOAUTH_CREDENTIALS = {\n 'facebook': {\n 'id': '414921905507297',\n 'secret': '56b26cc69dfc31264b978be2a7cd7c4a'\n },\n 'twitter': {\n 'id': '16NHDpcGdThjE9xamlW8ADGHj',\n 'secret': 'gf5YRGfDyHxTo81fky4X2ltNhXugnYpHRmqNfYVlaIugx3WpWo'\n },\n 'google': {\n 'id': '223978344432-uo73c742n4skbchtbjp797871f0rveij.apps.googleusercontent.com',\n 'secret': '6SCMQCVHcIMGkpYX_lYWuv7A'\n }\n}\n","repo_name":"millatidy/linkup","sub_path":"LinkUp/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11623313277","text":"#!/usr/bin/python3\n\"\"\"\nModule contains function\n\npurpose: Divide a matrix\nExamples:\n matrix_divided(matrix, div)\n\"\"\"\n\n\ndef matrix_divided(matrix, div):\n \"\"\"Divide matrix elements\n\n Args:\n matrix(list of list): matrix of integer or float\n div(int or float): number use to divide matrix's elements\n\n Raises:\n TypeError: if div is not integer or float\n ZeroDivisionError: if div equal 0\n TypeError: if matrix is not a list or matrix's elements are not list or\n elements in matrix are not int or float\n TypeError: if matrix's elements don't have the same size\n\n Returns:\n The new matrix\n \"\"\"\n if not (type(div) in (int, float)):\n raise TypeError(\"div must be a number\")\n if div == 0:\n raise ZeroDivisionError(\"division by zero\")\n err_msg = \"matrix must be a matrix (list of lists) of integers/floats\"\n if not isinstance(matrix, list):\n raise TypeError(err_msg)\n new_matrix = []\n sizes = []\n for row in matrix:\n if not isinstance(row, list):\n raise TypeError(err_msg)\n if False in [type(x) in (int, float) for x in row]:\n raise TypeError(err_msg)\n sizes.append(len(row))\n new_matrix.append(list(map(lambda x: round(x/div, 2), row)))\n if len(set(sizes)) != 1:\n raise TypeError(\"Each row of the matrix must have the same size\")\n return new_matrix\n","repo_name":"patrice012/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/2-matrix_divided.py","file_name":"2-matrix_divided.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18675524876","text":"\"\"\"\n BAM-ResNet for ImageNet-1K, implemented in Chainer.\n Original paper: 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.\n\"\"\"\n\n__all__ = ['BamResNet', 'bam_resnet18', 'bam_resnet34', 'bam_resnet50', 'bam_resnet101', 'bam_resnet152']\n\nimport os\nimport chainer.functions as F\nimport chainer.links as L\nfrom chainer import Chain\nfrom functools import partial\nfrom chainer.serializers import load_npz\nfrom .common import SimpleSequential, conv1x1, conv1x1_block, conv3x3_block\nfrom .resnet import ResInitBlock, ResUnit\n\n\nclass DenseBlock(Chain):\n \"\"\"\n Standard dense block with Batch normalization and ReLU activation.\n\n Parameters:\n ----------\n in_channels : int\n Number of input features.\n out_channels : int\n Number of output features.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels):\n super(DenseBlock, self).__init__()\n with self.init_scope():\n self.fc = L.Linear(\n in_size=in_channels,\n out_size=out_channels)\n self.bn = L.BatchNormalization(\n size=out_channels,\n eps=1e-5)\n self.activ = F.relu\n\n def __call__(self, x):\n x = self.fc(x)\n x = self.bn(x)\n x = self.activ(x)\n return x\n\n\nclass ChannelGate(Chain):\n \"\"\"\n BAM channel gate block.\n\n Parameters:\n ----------\n channels : int\n Number of input/output channels.\n reduction_ratio : int, default 16\n Channel reduction ratio.\n num_layers : int, default 1\n Number of dense blocks.\n \"\"\"\n def __init__(self,\n channels,\n reduction_ratio=16,\n num_layers=1):\n super(ChannelGate, self).__init__()\n mid_channels = channels // reduction_ratio\n\n with self.init_scope():\n self.init_fc = DenseBlock(\n in_channels=channels,\n out_channels=mid_channels)\n self.main_fcs = SimpleSequential()\n with self.main_fcs.init_scope():\n for i in range(num_layers - 1):\n setattr(self.main_fcs, \"fc{}\".format(i + 1), DenseBlock(\n in_channels=mid_channels,\n out_channels=mid_channels))\n self.final_fc = L.Linear(\n in_size=mid_channels,\n out_size=channels)\n\n def __call__(self, x):\n input_shape = x.shape\n x = F.average_pooling_2d(x, ksize=x.shape[2:])\n x = F.reshape(x, shape=(x.shape[0], -1))\n x = self.init_fc(x)\n x = self.main_fcs(x)\n x = self.final_fc(x)\n x = F.broadcast_to(F.expand_dims(F.expand_dims(x, axis=2), axis=3), input_shape)\n return x\n\n\nclass SpatialGate(Chain):\n \"\"\"\n BAM spatial gate block.\n\n Parameters:\n ----------\n channels : int\n Number of input/output channels.\n reduction_ratio : int, default 16\n Channel reduction ratio.\n num_dil_convs : int, default 2\n Number of dilated convolutions.\n dilate : int, default 4\n Dilation/padding value for corresponding convolutions.\n \"\"\"\n def __init__(self,\n channels,\n reduction_ratio=16,\n num_dil_convs=2,\n dilate=4):\n super(SpatialGate, self).__init__()\n mid_channels = channels // reduction_ratio\n\n with self.init_scope():\n self.init_conv = conv1x1_block(\n in_channels=channels,\n out_channels=mid_channels,\n stride=1,\n use_bias=True)\n self.dil_convs = SimpleSequential()\n with self.dil_convs.init_scope():\n for i in range(num_dil_convs):\n setattr(self.dil_convs, \"conv{}\".format(i + 1), conv3x3_block(\n in_channels=mid_channels,\n out_channels=mid_channels,\n stride=1,\n pad=dilate,\n dilate=dilate,\n use_bias=True))\n self.final_conv = conv1x1(\n in_channels=mid_channels,\n out_channels=1,\n stride=1,\n use_bias=True)\n\n def __call__(self, x):\n input_shape = x.shape\n x = self.init_conv(x)\n x = self.dil_convs(x)\n x = self.final_conv(x)\n x = F.broadcast_to(x, input_shape)\n return x\n\n\nclass BamBlock(Chain):\n \"\"\"\n BAM attention block for BAM-ResNet.\n\n Parameters:\n ----------\n channels : int\n Number of input/output channels.\n \"\"\"\n def __init__(self,\n channels):\n super(BamBlock, self).__init__()\n with self.init_scope():\n self.ch_att = ChannelGate(channels=channels)\n self.sp_att = SpatialGate(channels=channels)\n\n def __call__(self, x):\n att = 1 + F.sigmoid(self.ch_att(x) * self.sp_att(x))\n x = x * att\n return x\n\n\nclass BamResUnit(Chain):\n \"\"\"\n BAM-ResNet unit.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int\n Strides of the convolution.\n bottleneck : bool\n Whether to use a bottleneck or simple block in units.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n stride,\n bottleneck):\n super(BamResUnit, self).__init__()\n self.use_bam = (stride != 1)\n\n with self.init_scope():\n if self.use_bam:\n self.bam = BamBlock(channels=in_channels)\n self.res_unit = ResUnit(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride,\n bottleneck=bottleneck,\n conv1_stride=False)\n\n def __call__(self, x):\n if self.use_bam:\n x = self.bam(x)\n x = self.res_unit(x)\n return x\n\n\nclass BamResNet(Chain):\n \"\"\"\n BAM-ResNet model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.\n\n Parameters:\n ----------\n channels : list of list of int\n Number of output channels for each unit.\n init_block_channels : int\n Number of output channels for the initial unit.\n bottleneck : bool\n Whether to use a bottleneck or simple block in units.\n in_channels : int, default 3\n Number of input channels.\n in_size : tuple of two ints, default (224, 224)\n Spatial size of the expected input image.\n classes : int, default 1000\n Number of classification classes.\n \"\"\"\n def __init__(self,\n channels,\n init_block_channels,\n bottleneck,\n in_channels=3,\n in_size=(224, 224),\n classes=1000):\n super(BamResNet, self).__init__()\n self.in_size = in_size\n self.classes = classes\n\n with self.init_scope():\n self.features = SimpleSequential()\n with self.features.init_scope():\n setattr(self.features, \"init_block\", ResInitBlock(\n in_channels=in_channels,\n out_channels=init_block_channels))\n in_channels = init_block_channels\n for i, channels_per_stage in enumerate(channels):\n stage = SimpleSequential()\n with stage.init_scope():\n for j, out_channels in enumerate(channels_per_stage):\n stride = 2 if (j == 0) and (i != 0) else 1\n setattr(stage, \"unit{}\".format(j + 1), BamResUnit(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride,\n bottleneck=bottleneck))\n in_channels = out_channels\n setattr(self.features, \"stage{}\".format(i + 1), stage)\n setattr(self.features, \"final_pool\", partial(\n F.average_pooling_2d,\n ksize=7,\n stride=1))\n\n self.output = SimpleSequential()\n with self.output.init_scope():\n setattr(self.output, \"flatten\", partial(\n F.reshape,\n shape=(-1, in_channels)))\n setattr(self.output, \"fc\", L.Linear(\n in_size=in_channels,\n out_size=classes))\n\n def __call__(self, x):\n x = self.features(x)\n x = self.output(x)\n return x\n\n\ndef get_resnet(blocks,\n model_name=None,\n pretrained=False,\n root=os.path.join(\"~\", \".chainer\", \"models\"),\n **kwargs):\n \"\"\"\n Create BAM-ResNet model with specific parameters.\n\n Parameters:\n ----------\n blocks : int\n Number of blocks.\n conv1_stride : bool\n Whether to use stride in the first or the second convolution layer in units.\n use_se : bool\n Whether to use SE block.\n width_scale : float\n Scale factor for width of layers.\n model_name : str or None, default None\n Model name for loading pretrained model.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.chainer/models'\n Location for keeping the model parameters.\n \"\"\"\n\n if blocks == 18:\n layers = [2, 2, 2, 2]\n elif blocks == 34:\n layers = [3, 4, 6, 3]\n elif blocks == 50:\n layers = [3, 4, 6, 3]\n elif blocks == 101:\n layers = [3, 4, 23, 3]\n elif blocks == 152:\n layers = [3, 8, 36, 3]\n else:\n raise ValueError(\"Unsupported BAM-ResNet with number of blocks: {}\".format(blocks))\n\n init_block_channels = 64\n\n if blocks < 50:\n channels_per_layers = [64, 128, 256, 512]\n bottleneck = False\n else:\n channels_per_layers = [256, 512, 1024, 2048]\n bottleneck = True\n\n channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]\n\n net = BamResNet(\n channels=channels,\n init_block_channels=init_block_channels,\n bottleneck=bottleneck,\n **kwargs)\n\n if pretrained:\n if (model_name is None) or (not model_name):\n raise ValueError(\"Parameter `model_name` should be properly initialized for loading pretrained model.\")\n from .model_store import get_model_file\n load_npz(\n file=get_model_file(\n model_name=model_name,\n local_model_store_dir_path=root),\n obj=net)\n\n return net\n\n\ndef bam_resnet18(**kwargs):\n \"\"\"\n BAM-ResNet-18 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.chainer/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_resnet(blocks=18, model_name=\"bam_resnet18\", **kwargs)\n\n\ndef bam_resnet34(**kwargs):\n \"\"\"\n BAM-ResNet-34 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.chainer/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_resnet(blocks=34, model_name=\"bam_resnet34\", **kwargs)\n\n\ndef bam_resnet50(**kwargs):\n \"\"\"\n BAM-ResNet-50 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.chainer/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_resnet(blocks=50, model_name=\"bam_resnet50\", **kwargs)\n\n\ndef bam_resnet101(**kwargs):\n \"\"\"\n BAM-ResNet-101 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.chainer/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_resnet(blocks=101, model_name=\"bam_resnet101\", **kwargs)\n\n\ndef bam_resnet152(**kwargs):\n \"\"\"\n BAM-ResNet-152 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.chainer/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_resnet(blocks=152, model_name=\"bam_resnet152\", **kwargs)\n\n\ndef _test():\n import numpy as np\n import chainer\n\n chainer.global_config.train = False\n\n pretrained = False\n\n models = [\n bam_resnet18,\n bam_resnet34,\n bam_resnet50,\n bam_resnet101,\n bam_resnet152,\n ]\n\n for model in models:\n\n net = model(pretrained=pretrained)\n weight_count = net.count_params()\n print(\"m={}, {}\".format(model.__name__, weight_count))\n assert (model != bam_resnet18 or weight_count == 11712503)\n assert (model != bam_resnet34 or weight_count == 21820663)\n assert (model != bam_resnet50 or weight_count == 25915099)\n assert (model != bam_resnet101 or weight_count == 44907227)\n assert (model != bam_resnet152 or weight_count == 60550875)\n\n x = np.zeros((1, 3, 224, 224), np.float32)\n y = net(x)\n assert (y.shape == (1, 1000))\n\n\nif __name__ == \"__main__\":\n _test()\n","repo_name":"osmr/imgclsmob","sub_path":"chainer_/chainercv2/models/bamresnet.py","file_name":"bamresnet.py","file_ext":"py","file_size_in_byte":13941,"program_lang":"python","lang":"en","doc_type":"code","stars":2864,"dataset":"github-code","pt":"37"} +{"seq_id":"6168800283","text":"# Run and save the model\n\n# import dependencies\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import classification_report\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import metrics\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom db_connection import postgreSQLConnection\nimport pandas as pd\n\n# conda install -c conda-forge pickle5\nimport pickle5 as pickle\nfrom etl import hrds_train_df,result_metrics\nfrom db_connection import postgreSQLConnection\n\n# drop the categorical values\nhrds_train_df = hrds_train_df.drop(['training_buckets','city_index_buckets','experience_buckets'],axis=1)\n\nX =hrds_train_df.drop(\"target\" , axis =1)\ny = hrds_train_df[\"target\"]\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, \n y, \n random_state=1, \n stratify=y)\n\n# Create a random forest classifier.\nrf_model = RandomForestClassifier(n_estimators=128, random_state=78)\n# Fitting the model\nrf_model = rf_model.fit(X_train, y_train)\n# Save the model\nfilename = 'rf.sav'\npickle.dump(rf_model, open(filename, 'wb'))\n\n# Calculate the balanced accuracy score\ny_pred = rf_model.predict(X_test)\nrf_cm, rf_acc = result_metrics(y_test, y_pred, 'RandomForest')\n\n# We need to scale the data for logistic regression\nscaler = preprocessing.StandardScaler().fit(X_train)\nX_scaled = scaler.transform(X_train)\n\n#Run and save logical regression\n# Train the Logistic Regression model using the resampled data\n\nmodel = LogisticRegression(solver='lbfgs', random_state=1)\nmodel.fit(X_scaled, y_train)\nfilename = 'lr.sav'\npickle.dump(model, open(filename, 'wb'))\n\n# Calculate the balanced accuracy score\ny_pred = model.predict(X_test)\nlr_cm, lr_acc = result_metrics(y_test, y_pred, 'LogisticRegression')\n\n\n#from sklearn.cross_validation import cross_val_score\n# instantiate learning model (k = 3)\nknn_model = KNeighborsClassifier(n_neighbors=3)\n# fit the model\nknn_model.fit(X_train, y_train)\nfilename = 'knn.sav'\npickle.dump(knn_model, open(filename, 'wb'))\n\n# Calculate the balanced accuracy score\ny_pred = pd.DataFrame(knn_model.predict(X_test))\nknn_cm, knn_acc = result_metrics(y_test, y_pred, 'KNN')\n\n# Save scores and confusion metrics for all the models\naccuracy_df = pd.concat([rf_acc,lr_acc,knn_acc],axis=0)\naccuracy_df = accuracy_df.sort_values(by='accuracy',ascending=False)\nconfusion_df = pd.concat([rf_cm,lr_cm,knn_cm])\n\n# Persist the results in postgres\naccuracy_df.to_sql('all_models_accuracy', postgreSQLConnection, if_exists='fail')\nconfusion_df.to_sql('all_models_cm', postgreSQLConnection, if_exists='fail')","repo_name":"mermaidzhang/Job-Change-of-Data-Scientists","sub_path":"save_model.py","file_name":"save_model.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"30448275070","text":"\n\ndef getEndBytes(inStr):\n\tnum = ''\n\tle = False\n\tif 'le' in inStr:\n\t\tnum = hex(int(int(inStr.split('le')[1])/8))\n\t\tle = True\n\telif 'u' in inStr:\n\t\tnum = hex(int(int(inStr.split('u')[1])/8))\n\telif 'char' in inStr:\n\t\tnum = hex(0x1)\n\n\treturn [num,le]\n\nx = open('fileToOpen', 'r')\nf=x.readlines()\nx.close()\n\nprint(f)\nnewFile = open('outfile','w')\n\ncurrentHex = 0\nfor x in f:\n parts= x.split()\n hexPart = getEndBytes(parts[0])[0].split('x')[1]\n endHex = int(currentHex) + int(hexPart)\n #newFile.write\n newFile.write(\"self.\"+parts[1][:-1]+\"= getHex(self.superblock, \"+hex(currentHex)+\", \"+hex(endHex)+\", \"+str(getEndBytes(parts[0])[1])+\")\")\n newFile.write('#'+str(' '.join(parts[2:]))+'\\n')\n currentHex = int(currentHex) + int( hexPart)\n\nnewFile.close()","repo_name":"JMPercival/ext2Project","sub_path":"scripts/cParser.py","file_name":"cParser.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43506834183","text":"from time import sleep, time\nfrom math import floor\nfrom threading import Lock, Thread\nfrom time import sleep, time\nfrom typing import Callable, Dict, List\n\nfrom mido import Message\n\nfrom flask_entry import (\n _BASE_KEYWORD,\n _DEBUG,\n _KEYWORD_LIST,\n _MAX_VALUE,\n _RATE_KEYWORD,\n _SKEW_KEYWORD,\n StockMidiCvException,\n)\nfrom helpers import _MILLISECONDS_PER_SECOND, get_time_ms, Value\n\n\nclass NoTimeException(StockMidiCvException):\n \"\"\"time is not set yet\"\"\"\n\n\nclass InvalidSequenceProgress(StockMidiCvException):\n \"\"\"must be a float between 0 and 1 for the %\"\"\"\n\n\nclass Clock:\n \"\"\"A thread that controls a specific value over MIDI CC at a given rate\n This rate is computed in real time based on the tics that come in as input.\n \"\"\"\n\n def __init__(self, sequence_length, tics_per_step, channel, cc, step_fn) -> None:\n print(\"abc123 init\")\n self.sequence_length = sequence_length\n self.lock = Lock()\n self.step_index = 0\n self.tic_index = 0\n self.channel = channel\n self.cc = cc\n self.tics_ms = []\n self.step_fn: Callable[\n [None],\n ] = step_fn\n self.tics_per_step = Value(min=0.01, max=1000, initialized_value=tics_per_step)\n self.thread = Thread(target=self.time, args=())\n self.thread.start()\n\n def tic(self):\n time_ms = get_time_ms()\n with self.lock:\n self.tics_ms.append(time_ms)\n self.tic_index = self.tic_index + 1\n\n @property\n def ms_per_tick(self):\n # print(\"abc123 entering ms per tic\")\n with self.lock:\n recent_tics = self.tics_ms[-4:]\n\n def _yeild_pairs(arr):\n for i in range(0, len(arr) - 1):\n yield arr[i], arr[i + 1]\n\n if len(recent_tics) >= 1:\n time_delta_list = [\n (time_b - time_a) for time_a, time_b in _yeild_pairs(recent_tics)\n ]\n # print(\n # f\"{time_delta_list} - sum: {sum(time_delta_list)} - len {len(time_delta_list)} - ret {sum(time_delta_list) / len(time_delta_list)}\"\n # )\n\n return sum(time_delta_list) / len(time_delta_list)\n else:\n raise NoTimeException()\n\n @property\n def time_since_last_tic(self):\n with self.lock:\n return time() - self.tics_ms[-1]\n\n @property\n def sequence_progress(self):\n \"\"\"\n Returns:\n Where we are in the sequence 0 - 1.0. 0 being at the very beginning and 1.0 being at the very end\n \"\"\"\n num_tics = self.tic_index + self.time_since_last_tic / self.ms_per_tick\n num_steps = num_tics / self.tics_per_step\n percent = num_steps / self.sequence_length\n print(f\"sequence progress: tics: {num_tics} steps:{num_steps} perct: {percent}\")\n return percent\n\n @property\n def tic_index(self):\n return self._tic_index\n\n @tic_index.setter\n def tic_index(self, value):\n self._tic_index = value\n if self._tic_index > self.sequence_length:\n self._tic_index = 0\n\n def time(self):\n print(\"abc123 time init\")\n # get past the beginning when no time\n while True:\n try:\n print(self.ms_per_tick)\n break\n except NoTimeException:\n pass\n\n while True:\n # call the function to update the value\n self.step_fn(self.sequence_progress)\n # sleep until the next update is due\n sleep_duration_msec: float = self.tics_per_step * self.ms_per_tick\n sleep_duration_sec: float = sleep_duration_msec * _MILLISECONDS_PER_SECOND\n sleep(sleep_duration_sec)\n\n\nclass SequenceState:\n \"\"\"abstract class to be implemented with _step\"\"\"\n\n def __init__(self, sequence: List[float], port, tics_per_step, channel, cc) -> None:\n def step_fn(sequence_percent_progress: float):\n self._step(sequence_percent_progress)\n\n self.port = port\n self.sequence = sequence\n self.clock = Clock(\n sequence_length=len(sequence),\n tics_per_step=tics_per_step,\n channel=channel,\n cc=cc,\n step_fn=step_fn,\n )\n self.alter_table: Dict[str, Value] = {\n word: Value(\n initialized_value=(_MAX_VALUE / 2), max_value=_MAX_VALUE, min_value=0\n )\n for word in _KEYWORD_LIST\n }\n # set a listener here and on each action if there is a listener it will be called\n self.alter_action_table = {}\n\n def set_sequence(self, sequence: List[float]):\n self.sequence = sequence\n self.clock.sequence_length = self.sequence_length\n\n @property\n def sequence_length(self):\n return len(self.sequence)\n\n def alter(self, key, value):\n print(f\"altering: key {key} {value}\")\n self.alter_table[key] = value\n if key in self.alter_action_table:\n print(\"calling action {key} with {value}\")\n # call action listener\n self.alter_action_table[key](value)\n else:\n print(f\"not calling any action for {key}\")\n\n def _step(self, sequence_percent_progress: float):\n \"\"\"Abstract\"\"\"\n raise NotImplementedError\n\n\nclass AveragingSequenceState(SequenceState):\n def __init__(self, sequence: List[float], port, tics_per_step, channel, cc) -> None:\n super().__init__(sequence, port, tics_per_step, channel, cc)\n\n def _alter_tics_per_step(value):\n self.clock.tics_per_step = value\n\n self.alter_action_table[_RATE_KEYWORD] = _alter_tics_per_step\n\n def _step(self, sequence_percent_progress: float):\n if sequence_percent_progress > 1 or sequence_percent_progress < 0:\n raise InvalidSequenceProgress()\n\n # rounding down to the nearest step\n step_index = floor(sequence_percent_progress * self.sequence_length)\n raw_step_value = self.sequence[step_index]\n\n # getting relevant data out of the alter table\n rate_value = self.alter_table[_RATE_KEYWORD]\n base_value = self.alter_table[_BASE_KEYWORD]\n skew_value = self.alter_table[_SKEW_KEYWORD]\n skewed_skew_value = Value(\n min_value=0.5, max_value=1.5, initialized_percent=skew_value.value_percent\n )\n # computing step by skewing raw value\n computed_step = raw_step_value * skewed_skew_value\n # normalize step to be between 0 and max value\n normalized_value: Value = Value(\n max_value=_MAX_VALUE,\n min_value=base_value.value,\n initialized_percent=computed_step,\n )\n message = Message(\n type=\"control_change\",\n channel=self.channel,\n control=self.cc,\n value=normalized_value,\n )\n self.tics_per_step = rate_value\n if not _DEBUG:\n self.port.send(message)\n else:\n print(\"not sending bc debug -\", message)\n","repo_name":"mknutsen/stock_midi_cv","sub_path":"stockcv/sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":7032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22629225166","text":"import os.path as osp\nimport os\nimport torch\nfrom torch_geometric.data import Data\nfrom torch_geometric.data import InMemoryDataset\nfrom torch_geometric.utils import remove_self_loops\nimport torch_geometric.transforms as T\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nfrom rdkit import RDLogger\nfrom rdkit.Chem import ChemicalFeatures\nfrom rdkit import RDConfig\nimport networkx as nx\nimport pathlib\nimport ase\nfrom ase.io import read\nfrom ase import Atoms\nimport numpy as np\nimport pandas as pd\nimport copy\nfrom molml.features import BagOfBonds\n\nfrom ..bin.GaussianOutputFileReader import Gaussian_Output\nfrom ..bin.featurization import get_acsf_features, get_CoulombMatrix, get_SOAP, PhyChem\nfrom ..bin.featurization import get_RDKit, atomic_para\nfrom ..bin.Label2Idx import Ar_dict_generator, R_dict_generator\n\n\ndef label2num(label, mode='Ar', src_dir=None):\n assert mode in ['Ar', 'R'], \"mode should be Ar/R\"\n \n if mode=='Ar':\n Ar_dict = Ar_dict_generator(src_dir=src_dir)\n return Ar_dict[label]\n else:\n R_dict = R_dict_generator(src_dir=src_dir)\n return R_dict[label]\n\nclass BaseDataset(InMemoryDataset):\n fdef_name = osp.join(RDConfig.RDDataDir, 'BaseFeatures.fdef')\n chem_feature_factory = ChemicalFeatures.BuildFeatureFactory(fdef_name)\n\n def __init__(self, root, mode='Ar', suffix=None, transform=None, pre_transform=None, pre_filter=None):\n self.mode = mode\n self.root = root\n self.suffix = suffix \n assert mode in ['Ar', 'R'], \"mode should be Ar/R\"\n super(BaseDataset, self).__init__(root, transform, pre_transform, pre_filter)\n self.data, self.slices = torch.load(self.processed_paths[0])\n\n @property\n def raw_file_names(self):\n return ['%s/%s'%(self.suffix, self.mode), ] if self.suffix else [self.mode, ]\n\n @property\n def processed_file_names(self):\n suffix = '_%s'%self.suffix if self.suffix else None\n if self.suffix:\n return '%s/MolGraph_%s%s.pt' % (self.suffix, self.mode, suffix)\n else:\n return 'MolGraph_%s%s.pt' % (self.mode, suffix)\n\n def download(self):\n return 0\n raise NotImplementedError('please download and unzip dataset from %s, and put it at %s' % (_urls[self.mode], self.raw_dir))\n \n def mol_nodes(self, g):\n feat, feat_SF = [], []\n for n, d in g.nodes(data=True):\n h_t, SF_t = [], []\n # Atom type (One-hot H, C, N, O F)\n h_t += [int(d['a_type'] == x) for x in ['H', 'C', 'N', 'O', 'F', 'S']]\n # Atomic number\n h_t.append(d['a_num'])\n SF_t = copy.copy(h_t)\n # Acceptor\n h_t.append(d['acceptor'])\n # Donor\n h_t.append(d['donor'])\n # Aromatic\n h_t.append(int(d['aromatic']))\n # Hybradization\n h_t += [int(d['hybridization'] == x) \\\n for x in (Chem.rdchem.HybridizationType.SP, \\\n Chem.rdchem.HybridizationType.SP2,\n Chem.rdchem.HybridizationType.SP3)]\n h_t.append(d['num_h'])\n ##新增2行\n h_t.append(d['NPA_chg'])\n h_t += list(d['rdkit_merge'])\n # add other atom parameter\n h_t += atomic_para(d['a_type'])\n # add SymmetryFunction\n SF_t += d['SymmetryFunction']\n feat.append((n, h_t))\n feat_SF.append((n,SF_t))\n \n feat.sort(key=lambda item: item[0])\n node_attr = torch.FloatTensor([item[1] for item in feat])\n feat_SF.sort(key=lambda item: item[0])\n node_SF = torch.FloatTensor([item[1] for item in feat_SF])\n return node_attr, node_SF\n\n def mol_edges(self, g):\n e={}\n h=g.to_undirected()\n flag=1\n try:\n p=list(nx.simple_cycles(h))\n except :\n flag=0\n #for n1, n2, d in g.edges(data=True):\n for N1 in g.nodes(data=True):\n n1=N1[0]\n ch1=N1[1]['NPA_chg']\n for N2 in g.nodes(data=True):\n n2=N2[0]\n ch2=N2[1]['NPA_chg']\n ch=ch1*ch2\n flag2=0\n try: \n y=g.edges[n1,n2]['b_type']\n except:\n e_t =[0,0,0,0]\n else:\n e_t = [int(y == x)\n for x in (Chem.rdchem.BondType.SINGLE, \\\n Chem.rdchem.BondType.DOUBLE, \\\n Chem.rdchem.BondType.TRIPLE, \\\n Chem.rdchem.BondType.AROMATIC) ]\n \n\n if flag==1:\n for i in range (len(p)):\n if n1 in p[i] and n2 in p[i]:\n flag2=1\n break\n e_t.append(flag2)\n try: \n di=int(nx.algorithms.shortest_paths.generic.shortest_path_length(h,n1,n2))\n except:\n di=0\n e_t.append(di)\n e_t.append(ch)\n e[(n1, n2)] = e_t\n edge_index = torch.LongTensor(list(e.keys())).transpose(0, 1)\n edge_attr = torch.FloatTensor(list(e.values()))\n return edge_index, edge_attr\n\n # gaussian file reader for Mol dataset \n def gaussian_graph_reader(self, mol_file):\n GO = Gaussian_Output(self.root, mol_file, 'PRE')\n mol = GO.rdkitmol\n keyAtoms = np.array(GO.Atom_labels_list) - 1\n \n tmp_atoms = read(mol_file,format='gaussian-out')\n ase_atoms = Atoms(tmp_atoms.symbols, GO.AtomsCoordinates)\n ase_atoms.set_initial_charges(GO.NPACharge)\n ase_atoms.set_atomic_numbers(GO.AtomsNum)\n tmp_mol = ([ase_atoms.numbers, ase_atoms.positions])\n if 'NPACharges' not in ase_atoms.arrays.keys():\n ase_atoms.new_array('NPACharges',GO.AtomsNum + GO.NPACharge)\n ascf_features = get_acsf_features(ase_atoms)\n ascf_features_local = get_acsf_features(ase_atoms,list(keyAtoms))\n CM_features = get_CoulombMatrix(ase_atoms)\n SOAP_features = get_SOAP(ase_atoms,list(keyAtoms))\n\n if mol is None:\n print(\"rdkit can not parsing\", mol_file)\n return None\n feats = self.chem_feature_factory.GetFeaturesForMol(mol)\n MACCSfp = AllChem.GetMACCSKeysFingerprint(mol)\n MACCSfp = torch.ByteTensor([int(x) for x in MACCSfp.ToBitString()])\n Morganfp = AllChem.GetMorganFingerprintAsBitVect(mol,2,nBits=512)\n Morganfp = torch.ByteTensor([int(x) for x in Morganfp.ToBitString()])\n \n g = nx.DiGraph()\n bond_ring_info = get_RDKit(mol) \n \n # for training set, we store its target\n # otherwise, we store its molecule id\n mol_fn_ls = mol_file.stem.split('-')\n if self.mode == \"Ar\":\n mol_name = mol_fn_ls[0] if mol_fn_ls[2] == 'c1' else mol_fn_ls[0] + \"-\" + mol_fn_ls[2]\n elif self.mode == \"R\":\n mol_name = mol_fn_ls[0]\n \n try:\n mol_num = label2num(mol_name,self.mode,self.raw_paths[0])\n except KeyError:\n mol_name = mol_fn_ls[0]\n mol_num = label2num(mol_name,self.mode,self.raw_paths[0])\n \n l = torch.FloatTensor(self.target.loc[int(mol_num)].tolist()).unsqueeze(0) \\\n if self.mode == 'dev' else torch.LongTensor([int(mol_num)])\n alias = torch.LongTensor([int(mol_num)])\n \n # add PhyChem descriptors\n _idx = int(mol_num)*10 + keyAtoms if self.mode == 'Ar' else np.array([int(mol_num)])\n phy_chem_total = self.PhyChem.get_total_PhyChem(_idx[0])\n phy_chem_local = self.PhyChem.get_local_PhyChem(_idx)\n \n charge_list = GO.AtomsNum + GO.NPACharge\n\n # Create nodes\n assert len(mol.GetConformers()) == 1\n geom = mol.GetConformers()[0].GetPositions()\n\n for i in range(mol.GetNumAtoms()):\n atom_i = mol.GetAtomWithIdx(i)\n g.add_node(i, a_type=atom_i.GetSymbol(), a_num=atom_i.GetAtomicNum(), acceptor=0, donor=0,\n aromatic=atom_i.GetIsAromatic(), hybridization=atom_i.GetHybridization(),\n num_h=atom_i.GetTotalNumHs(),NPA_chg=charge_list[i],rdkit_merge=bond_ring_info[i],\n SymmetryFunction=list(ascf_features[i]))\n\n for i in range(len(feats)):\n if feats[i].GetFamily() == 'Donor':\n node_list = feats[i].GetAtomIds()\n for i in node_list:\n g.node[i]['donor'] = 1\n elif feats[i].GetFamily() == 'Acceptor':\n node_list = feats[i].GetAtomIds()\n for i in node_list:\n g.node[i]['acceptor'] = 1\n \n # Read Edges\n for i in range(mol.GetNumAtoms()):\n for j in range(mol.GetNumAtoms()):\n e_ij = mol.GetBondBetweenAtoms(i, j)\n if e_ij is not None:\n g.add_edge(i, j, b_type=e_ij.GetBondType())\n\n \n node_attr, node_SF = self.mol_nodes(g)\n edge_index, edge_attr = self.mol_edges(g)\n data = Data(\n x=node_attr,\n pos=torch.FloatTensor(geom),\n edge_index=edge_index,\n edge_attr=edge_attr,\n y=l\n )\n data.ACSF = node_SF\n data.ACSF_local = torch.FloatTensor(ascf_features_local)\n data.alias = alias\n data.PhyChem_total = torch.FloatTensor(phy_chem_total.values).unsqueeze(0)\n data.PhyChem_local = torch.FloatTensor(phy_chem_local.values)\n data.SOAP = torch.FloatTensor(SOAP_features)\n data.CM = torch.FloatTensor(CM_features)\n data.MACCSfp = MACCSfp.unsqueeze(0)\n data.Morganfp = Morganfp.unsqueeze(0)\n data.mergefp = torch.cat((MACCSfp,Morganfp)).unsqueeze(0)\n data.keyAtom_list = torch.LongTensor(list([([int(mol_num)]*len(keyAtoms)),list(keyAtoms)])).transpose(1,0)\n # data.BoB in self.process\n return data, tmp_mol\n\n def process(self):\n '''\n if self.mode == 'dev':\n self.target = pd.read_csv(self.raw_paths[1], index_col=0,\n usecols=['gdb_idx',] + ['property_%d' % x for x in range(12)])\n self.target = self.target[['property_%d' % x for x in range(12)]]\n '''\n gaussian_dir = pathlib.Path(self.raw_paths[0])\n self.PhyChem = PhyChem(mode=self.mode, suffix=self.suffix)\n self.PhyChem_labels_total = self.PhyChem.total_PhyChem_labels\n self.PhyChem_labels_local = self.PhyChem.local_PhyChem_labels\n data_list, mol_list = [], []\n for mol_file in gaussian_dir.glob(\"**/*sp.log\"):\n mol_data, tmp_mol = self.gaussian_graph_reader(mol_file)\n if mol_data is not None:\n data_list.append(mol_data)\n mol_list.append(tmp_mol)\n \n # add BoB descriptor\n BoB = BagOfBonds()\n BoB.fit(mol_list)\n self.BOB_labels = BoB.get_bob_labels(BoB._bag_sizes)\n for i in range(len(mol_list)):\n tmp_BoB = BoB.transform([mol_list[i]])\n data_list[i].BoB = torch.LongTensor(tmp_BoB)\n \n if self.pre_filter is not None:\n data_list = [data for data in data_list if self.pre_filter(data)]\n\n if self.pre_transform is not None:\n data_list = [self.pre_transform(data) for data in data_list]\n\n data, slices = self.collate(data_list)\n processed_dir = os.path.dirname(self.processed_paths[0])\n if not os.path.isdir(processed_dir):\n os.makedirs(processed_dir)\n torch.save((data, slices), self.processed_paths[0])\n\n \nclass ElectroNegativityDiff(object):\n\n def __init__(self, norm=True, max_value=None, cat=True):\n self.norm = norm\n self.max = max_value\n self.cat = cat\n def elec_nega(self,atom_pair):\n atom_1,atom_2 = atom_pair\n elec_nega_table = { 0: 2.20, #H\n 1: 2.55, #C\n 2: 3.04, #N\n 3: 3.44, #O\n 4: 3.98, #F\n 5: 2.58, #S\n 6: 3.16, #Cl\n }\n elec_nega_1,elec_nega_2 = elec_nega_table[atom_1],elec_nega_table[atom_2]\n Elec_diff = elec_nega_1 - elec_nega_2 #电负性差值\n return Elec_diff \n def elec_dire(self,elec_diff):\n if elec_diff < 0:\n return np.array([0,1])\n elif elec_diff > 0:\n return np.array([1,0])\n else:\n return np.array([0,0])\n def __call__(self, data):\n (row, col), pos, pseudo = data.edge_index, data.pos, data.edge_attr\n atom_type = data.x[:,:7]\n atom_type_x,atom_type_y = atom_type[row],atom_type[col]\n \n dist = torch.norm(pos[col] - pos[row], p=2, dim=-1).view(-1, 1)\n \n atom_pair_stack = np.stack((atom_type_x.argmax(dim=1),atom_type_y.argmax(dim=1)),axis=1)\n temp_elec_diff = np.array([self.elec_nega(atom_pair) for atom_pair in atom_pair_stack],dtype=np.float32)\n \n elec_dire_one_hot = torch.tensor([self.elec_dire(elec_diff) for elec_diff in temp_elec_diff],dtype=torch.float32).view(-1,2)\n elec_diff = torch.tensor(np.abs(temp_elec_diff),dtype=torch.float32).view(-1,1)\n if self.norm and dist.numel() > 0:\n dist = dist / dist.max() if self.max is None else self.max\n\n \n if pseudo is not None and self.cat:\n pseudo = pseudo.view(-1, 1) if pseudo.dim() == 1 else pseudo\n data.edge_attr = torch.cat([pseudo, dist.type_as(pseudo),elec_diff,elec_dire_one_hot], dim=-1)\n else:\n data.edge_attr = torch.cat([dist,elec_diff,elec_dire_one_hot],dim=-1)\n\n \n \n return data\n\n def __repr__(self):\n return '{}(norm={}, max_value={},writer: Licheng Xu)'.format(self.__class__.__name__,\n self.norm, self.max)\n\nclass Complete(object):\n def __call__(self, data):\n device = data.edge_index.device\n\n row = torch.arange(data.num_nodes, dtype=torch.long, device=device)\n col = torch.arange(data.num_nodes, dtype=torch.long, device=device)\n\n row = row.view(-1, 1).repeat(1, data.num_nodes).view(-1)\n col = col.repeat(data.num_nodes)\n edge_index = torch.stack([row, col], dim=0)\n\n edge_attr = None\n if data.edge_attr is not None:\n idx = data.edge_index[0] * data.num_nodes + data.edge_index[1]\n size = list(data.edge_attr.size())\n size[0] = data.num_nodes * data.num_nodes\n edge_attr = data.edge_attr.new_zeros(size)\n edge_attr[idx] = data.edge_attr\n\n edge_index, edge_attr = remove_self_loops(edge_index, edge_attr)\n data.edge_attr = edge_attr\n data.edge_index = edge_index\n\n return data\n","repo_name":"Masker-Li/ChemSelML","sub_path":"ChemSelML/bin/BaseDataset.py","file_name":"BaseDataset.py","file_ext":"py","file_size_in_byte":15103,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"70603378987","text":"#! /usr/bin/env python3\nimport numpy as np\nimport argparse\nfrom collections import Counter\nfrom scipy.sparse import coo_matrix\nfrom scipy.sparse.linalg import svds\nfrom sklearn.metrics import adjusted_rand_score as ARI\nimport dcsbm\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\nrc('font',**{'family':'serif','serif':['Times']})\nrc('text', usetex=True)\n\n## Bipartite graph\ncollege_map = {}\ncollege_inverse_map = {}\nserver_map = {}\nserver_inverse_map = {}\nrows = []\ncols = []\n\n## Obtain the graph\nk_college = 0\nk_server = 0\nG = Counter()\nfilter_ips = np.loadtxt('Data/hu_ch_sp_cx_ips.csv',delimiter=',',dtype=str)[:,0]\n\n## Obtain the full graph\nwith open('Data/college_edges_filter.csv') as f:\n for line in f:\n line = line.rstrip('\\r\\n').split(',')\n link = tuple(line[:2])\n src = link[0]\n dst = link[1]\n if src in filter_ips:\n G[src,dst] += 1\n if src not in college_map:\n college_map[src] = k_college\n college_inverse_map[k_college] = src\n k_college += 1\n rows += [college_map[src]]\n if dst not in server_map:\n server_map[dst] = k_server\n server_inverse_map[k_server] = dst\n k_server += 1\n cols += [server_map[dst]]\n\n## Obtain covariates\ncovs = np.loadtxt('Data/college_nodes_map_covs.csv',delimiter=',',dtype=str)\ncovs = covs[np.array([np.where(covs[:,0] == college_inverse_map[k])[0][0] for k in range(k_college)])][:,2]\nd = dict(zip(set(covs), range(len(covs))))\nlabs = np.array([d[x] for x in covs])\n\n## Obtain the adjacency matrix and the embeddings\nA = coo_matrix((np.repeat(1.0,len(rows)),(rows,cols)),shape=(k_college,k_server))\ndd = np.array(A.sum(axis=1))[:,0]\n\n## Plot degree distribution for ICL2\nfig, ax = plt.subplots(figsize=(4.25,3.25))\ncdict = ['#D81B60','#004D40','#1E88E5','#FFC107']\nmms = ['d', 's', 'o', 'v']\nlss = ['-','--', ':', '-.']\ngroup = ['Chemistry','Civil Engineering','Mathematics','Medicine',]\nfor g in range(4):\n ix = np.where(labs == g)\n ax.hist(dd[labs == g], bins=10, color = cdict[g], alpha=0.25)\n\nfor g in range(4):\n ix = np.where(labs == g) \n ax.hist(dd[labs == g], bins=10, histtype=u'step', linestyle = lss[g], edgecolor=cdict[g], linewidth=2, label = group[g])\n\nax.legend()\nplt.xlabel('Out-degree')\nplt.ylabel('Frequency')\nplt.savefig(\"out_icl.pdf\",bbox_inches='tight')\nplt.show()\n\n## Simulate stochastic blockmodel\nnp.random.seed(117)\nn = 1000\nK = 4\nz = np.array(list(range(4))*250)\nB = np.ones((4,4))*.25 + np.array([.5,.25,.1,0])*np.diag(np.ones(4))\nA = np.zeros((n,n))\nfor i in range(n-1):\n for j in range(i+1,n):\n A[i,j] = np.random.choice(2,size=1,p=[1-B[z[i],z[j]],B[z[i],z[j]]])\n A[j,i] = A[i,j]\n\n## Plot degree distribution of SBM\ndd = np.array(A.sum(axis=1))\nfig, ax = plt.subplots(figsize=(4.25,3.25))\ncdict = ['#1E88E5','#FFC107','#D81B60','#004D40']\nmms = ['o', 'v', 'd', 's']\nlss = [':','-.','-','--']\nfor g in [2,3,0,1]:\n ix = np.where(z == g)\n ax.hist(dd[z == g], bins=10, color = cdict[g], alpha=0.25)\n\nfor g in [2,3,0,1]:\n ix = np.where(z == g) \n ax.hist(dd[z == g], bins=10, histtype=u'step', linestyle = lss[g], edgecolor=cdict[g], linewidth=2)\n\nplt.xlabel('Degree')\nplt.ylabel('Frequency')\nplt.savefig(\"out_sbm.pdf\",bbox_inches='tight')\nplt.show()\n\n## Simulate degree corrected stochastic blockmodel\nnp.random.seed(117)\nrho = np.random.beta(size=n,a=2,b=4)\nfor i in range(n-1):\n for j in range(i+1,n):\n A[i,j] = np.random.choice(2,size=1,p=[1-rho[i]*rho[j]*B[z[i],z[j]],rho[i]*rho[j]*B[z[i],z[j]]])\n A[j,i] = A[i,j]\n\n## Plot degree distribution of DCSBM\ndd = np.array(A.sum(axis=1))\nfig, ax = plt.subplots(figsize=(4.25,3.25))\ncdict = ['#1E88E5','#FFC107','#D81B60','#004D40']\nmms = ['o', 'v', 'd', 's']\nlss = [':','-.','-','--']\nfor g in [2,3,0,1]:\n ix = np.where(z == g)\n ax.hist(dd[z == g], bins=10, color = cdict[g], alpha=0.25)\n\nfor g in [2,3,0,1]:\n ix = np.where(z == g) \n ax.hist(dd[z == g], bins=10, histtype=u'step', linestyle = lss[g], edgecolor=cdict[g], linewidth=2)\n\nplt.xlabel('Degree')\nplt.ylabel('Frequency')\nplt.savefig(\"out_dcsbm.pdf\",bbox_inches='tight')\nplt.show()","repo_name":"fraspass/dcsbm","sub_path":"degree_distributions.py","file_name":"degree_distributions.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"42026798930","text":"import scipy.signal as sg \nimport numpy as np \nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm \n\ndata = np.load('data.npy')\nx = data[300000:-300000,1]\nx = np.concatenate((x[:int(x.size/2)],x[int(x.size/2):]))\nt = data[300000:-300000,0]/60/60\nt = t - min(t)\nFs = 20\n(f,ts,Z) = sg.stft(x,Fs,nperseg=4096*8,detrend='linear',boundary='even')\nZ = np.abs(Z[:,0:-1])\nts = ts[0:-1]/60/60\nZ = Z[:,:]\nf = f[:]\n#plt.subplot(211)\n#plt.pcolor(ts, f, np.abs(Z),norm=LogNorm(vmin=Z.min(), vmax=Z.max()))\n#plt.colorbar()\nplt.xlabel('time (hr)')\nplt.ylabel('frequency (Hz)')\nplt.subplot(212)\n#plt.plot(t,x)\nplt.xlim(np.min(ts),np.max(ts))\nplt.xlabel('time (hr)')\n#plt.show()\nplt.clf()\n\n\n\n'''\nplt.subplot(211)\nplt.plot(ts,np.mean(Z[1:100,:],axis=0))\nplt.xlim(np.min(ts),np.max(ts))\nplt.xlabel('Time (hrs)')\nplt.ylabel('Amplitude')\n\nplt.subplot(212)\nplt.plot(t,x)\nplt.xlim(np.min(ts),np.max(ts))\n#plt.yscale('log')\n#plt.xscale('log')\nplt.xlabel('Time (hrs)')\nplt.ylabel('Altitude (m)')\nplt.show()\n#plt.clf()\n'''\n'''\namps = np.mean(Z[100:500,:],axis=0)\nalts = [x[np.argmin(np.abs(t - i))] for i in ts]\nplt.xlabel('Altitude (m)')\nplt.ylabel('Amplitude')\n\nplt.title('Amplitude of high freqency oscilations vs Altitude')\nplt.plot(alts,amps,'r*')\nplt.plot(alts,0.00*np.exp(np.array(alts)/8000))\nplt.show()\n'''","repo_name":"johnldean/valbal-controller","sub_path":"spetrogram.py","file_name":"spetrogram.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70037955628","text":"#!/usr/bin/python\n\n\"\"\"\nVersion 3/19/2020\n\nImport and visualize data from the 3D cell microscopy experiments. \n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport os\nimport sys\nimport re\n\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nfrom cell_data_loader import uniq, cell_dataframe\n\ndef locate_trackID(df, xmatch, ymatch, tol = 25):\n\n\tsubDF = df.groupby('trackID').mean()\n\t# print(subDF)\n\n\ttracklist = subDF.loc[(np.abs(subDF['x'].values - xmatch) <= tol) & (np.abs(subDF['y'].values - ymatch) <= tol)]\n\n\treturn tracklist.index.tolist()\n\nprint(pd.__version__)\npd.set_option('display.expand_frame_repr', False, 'display.max_columns', None)\ncwd = os.getcwd()\n\nif 'Chris Price' in cwd:\n\tdatadir = 'C:\\\\Users\\\\Chris Price\\\\Box Sync\\\\Plasticity_Protrusions_ML\\\\'\nelse:\n\tprint('add your path to Plasticity_Protrusions_ML here')\n\tsys.exit()\n\n\n##### in this section, load a dataframe which contains the x-y coordinates of the images to be labeled with a track ID.\n\n# expdata = pd.read_excel(datadir+\"\\\\research\\\\cell ML\\\\modified_cell_protrusion_v1.xlsx\", skiprows=12)\nmatchdata = pd.read_excel(boxdir+\"\\\\research\\\\cell ML\\\\modified_cell_protrusion_v1.xlsx\", skiprows=0, nrows=10)\n\n######\n\n\n#### clean up the data to be matched\n\ncols=[i for i in matchdata.columns if i not in [\"index\",\"blank\"]]\n# print(cols)\nfor col in cols:\n matchdata[col] = pd.to_numeric(matchdata[col], errors='coerce')\n\nprint(expdata)\nprint(matchdata)\n# print(np.nanmax(expdata.iloc[:,1:]))\n# print(np.nanmin(expdata.iloc[:,1:]))\n# sys.exit()\n\n############\n###########\n\n##### get ready to load the tracking data\n\nkey = pd.read_excel(datadir +'\\\\images\\\\track-data\\\\computational_key.xlsx')\ndir1 = datadir + '\\\\combined_data'\n\ntol = 30\n\nfig, ax = plt.subplots(1,8,figsize=(32, 5))\npcount = 0\n\ndfcol = 'area'\n\n# loop over each cell to be matched \n\nfor ci, cn in enumerate(cols): \n\n\t# get filters from the image for which part of track data to look at\n\tif ~np.isnan(matchdata[cn].iloc[0]):\n\n\t\tcomponents = cn.split('_')\n\t\tassay = int(components[0].split('Q')[1])\n\t\tsample = components[1]\n\t\tplastic = components[2]\n\t\tcell = components[3]\n\n\t\t## interpolate if assay > 30 #### !!! ### can probably delete\n\t\tif assay > 30:\n\n\t\t\tcomponents = cn.split('_')\n\t\t\tassay = int(components[0].split('Q')[1])\n\t\t\tsample = '_'.join(components[1:-2])\n\t\t\tplastic = components[-2]\n\t\t\tcell = components[-1]\n\n\t\t\tfirst = expdata[cn].first_valid_index()\n\t\t\tlast = expdata[cn].last_valid_index()+1\n\t\t\texpdata[cn].iloc[first:last] = expdata[cn].iloc[first:last].interpolate(method='linear')\n\n\n\t\t### load the relevant track data.\n\n\t\tsubkey = key.loc[key['assay'] == assay]\n\t\tcellDF = cell_dataframe(subkey, [dir1])\n\t\tcellDF = cellDF.loc[cellDF['plastic'] == plastic].loc[cellDF['sample'] == sample]\n\n\t\t# x and y coordinates of cell center in image (in units of micrometers)\n\t\t## important: need another script to correlate pixel location with distance.\n\t\t## very important: need to vertically reflect the images.\n\n\t\txmatch = matchdata[cn].iloc[5]\n\t\tymatch = matchdata[cn].iloc[6]\n\n\n\t\t## this section used for aligning in time. can probably be cut if using average x-y. the second to last frame of the image aligns with the last time of the longest cell in the track data. Can rework this to run backwards.\n\n\t\tmaxlength = np.amax(cellDF.groupby('trackID').count().values)\n\t\talltimes = pd.DataFrame(pd.unique(cellDF['time']), columns=['imaris_time'])\n\t\t\n\t\tsub_match = matchdata.filter(regex=('_'.join(cn.split('_')[0:3]) + '_' +'.*'))\n\t\tmaxexplength = int(sub_match.iloc[7,1])\n\n\t\tcandidates = []\n\t\twhile len(candidates) < 1:\n\t\t\t\tcandidates = locate_trackID(cellDF, xmatch, ymatch, tol = tol)\n\t\t\t\ttol += 5\n\n\t\t# candidates = locate_trackID(cellDF, xmatch, ymatch, tol = tol)\n\n\t\toffset_dex = maxexplength - maxlength\n\n\t\tcorrscore = []\n\t\tlengths = []\n\t\toutDF = pd.DataFrame()\n\n\t\tfor ci, cc in enumerate(candidates):\n\n\t\t\tsubDF = cellDF.loc[cellDF['trackID'] == cc] #.reset_index(drop=True)\n\t\t\t# maxlength = np.amax(cellDF.groupby('trackID').count().values)\n\n\t\t\t# print(subDF)\n\n\t\t\texp = expdata[['Original Time',cn]].iloc[offset_dex:].reset_index(drop=True)\n\t\t\texp = pd.concat((alltimes,exp), axis=1)\n\t\t\texp.columns = ['imaris_time','exp_time','exp_length_um']\n\n\t\t\tsubDF = subDF.merge(exp, left_on = 'time', right_on='imaris_time',how='inner')\n\t\t\n\t\t\tif len(subDF) > 1: \n\n\t\t\t\t\n\t\t\t\t# plotting\n\t\t\t\t# ax[pcount].scatter(subDF['exp_length_um'], subDF[dfcol])\n\t\t\t\t# ax[pcount].set_ylim(arealimits)\n\n\t\t\t\tslope, intercept, r_value, p_value, std_err = stats.linregress(subDF.dropna(subset=['exp_length_um'])['exp_length_um'], subDF.dropna(subset=['exp_length_um'])[dfcol])\n\n\t\t\telse:\n\t\t\t\tr_value = 0.\n\n\t\t\tcorrscore.append(r_value**2)\n\t\t\tlengths.append(len(subDF))\n\n\t\t\tif corrscore[-1] == np.amax(corrscore):\n\t\t\t\tsaveDF = subDF.copy()\n\n\t\toutDF = pd.concat((outDF, saveDF), axis=0)\n\n\t\tpcount += 1\n\t\tprint(corrscore)\n\t\tprint(lengths)\n\n\t\tmatchdata.at[8, cn] = candidates[np.argmax(corrscore)]\n\t\tmatchdata.at[9, cn] = offset_dex\n\n\n# plt.show()\n\nprint(matchdata)","repo_name":"chris-price19/cell-ML","sub_path":"2D_Image_Scripts/assign-trackID.py","file_name":"assign-trackID.py","file_ext":"py","file_size_in_byte":4986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9148260575","text":"import bisect\r\n\r\n\r\ndef list_prime(n):\r\n if n <= 1:\r\n return []\r\n A = [2] + list(range(3, n + 1, 2))\r\n B = list()\r\n while A[0] ** 2 <= n:\r\n tmp = A[0]\r\n B.append(tmp)\r\n A = [a for a in A if a % tmp != 0]\r\n return B + A\r\n\r\n\r\nX = int(input())\r\n\r\nli = list_prime(100003)\r\nidx = bisect.bisect_left(li, X)\r\n\r\nprint(li[idx])\r\n","repo_name":"mikiya1130/AtCoder","sub_path":"field/contests/abc149/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"236016037","text":"import json\nfrom pathlib import Path\nfrom collections import defaultdict\nfrom allennlp.predictors.predictor import Predictor\nfrom datasets import load_dataset\n\nfrom constants import GENDER_PAIRS, RACE_SETS, RELIGION_SETS, NATIONALITY_SETS\n\nCROWS_DATASET_PATH = Path(\"data\") / \"crows_pairs_anonymized.csv\"\nOUTPUT_PATH = Path(\"data\") / \"debias_target_words.csv\"\n\n\ndef main():\n dataset = load_dataset(\"csv\", data_files=str(CROWS_DATASET_PATH), split=\"train\")\n\n predictor = Predictor.from_path(\"https://storage.googleapis.com/allennlp-public-models/ner-model-2020.02.10.tar.gz\")\n\n tokens_dict = defaultdict(list)\n\n for ex in dataset:\n if ex[\"bias_type\"] in [\"socioeconomic\", \"disability\", \"physical-appearance\", \"sexual-orientation\", \"age\"]:\n continue\n for sent in [ex[\"sent_more\"], ex[\"sent_less\"]]:\n result = predictor.predict(sent)\n for i, tag in enumerate(result[\"tags\"]):\n if any([tag.endswith(tag_name) for tag_name in [\"ORG\", \"PER\", \"LOC\"]]):\n tokens_dict[ex[\"bias_type\"]].append(result[\"words\"][i].lower())\n\n for name, tokens in tokens_dict.items():\n tokens_dict[name] = list(set(tokens))\n\n tokens_dict[\"gender\"].extend(set(sum(GENDER_PAIRS, [])))\n tokens_dict[\"race-color\"].extend(set(sum(RACE_SETS, [])))\n tokens_dict[\"religion\"].extend(set(sum(RELIGION_SETS, [])))\n tokens_dict[\"nationality\"].extend(set(sum(NATIONALITY_SETS, [])))\n\n json.dump({\"values\": [\n [tokens_dict[\"gender\"]],\n [tokens_dict[\"race-color\"]],\n [tokens_dict[\"religion\"]],\n [tokens_dict[\"nationality\"]]\n ]}, open(OUTPUT_PATH, \"w\"))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"otakumesi/debias-BERT","sub_path":"experiments/extract_entity_from_crows_pairs.py","file_name":"extract_entity_from_crows_pairs.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3030202589","text":"# Author: Longsen Gao\n# Contact: longsengao@gmail.com\n# Created Time: 11-27-2022\n\nimport os\nimport inspect\nimport time\nimport mujoco_py\nimport numpy as np\nimport pandas as pd\n\nfrom threading import Thread\n# from mujoco_robots import transform\nfrom mujoco_robots.utils import MjViewerExtended\n\n\ndef test_sensors():\n model = mujoco_py.load_model_from_path(\"/home/wam/mujoco_robots/robot_description/wam7/wam_7dof.xml\")\n sim = mujoco_py.MjSim(model)\n sensor = np.zeros(7)\n sensor[0] = sim.data.get_sensor(\"fsensor3_6_1\")\n sensor[1] = sim.data.get_sensor(\"fsensor3_6_2\")\n sensor[3] = sim.data.get_sensor(\"fsensor3_6_3\")\n sensor[4] = sim.data.get_sensor(\"fsensor3_7_1\")\n sensor[5] = sim.data.get_sensor(\"fsensor3_7_2\")\n sensor[6] = sim.data.get_sensor(\"fsensor3_7_3\")\n return sensor\n\n\nclass MjRobot():\n \"\"\" An abstract base class to wrap mujoco_py simulations \"\"\"\n\n def __init__(self, xml_path, object_names=[], render=True,\n g_comp=False, tool_mass=0, tool_mass_site=None):\n \"\"\" An abstract base class to wrap mujoco_py simulations \"\"\"\n assert xml_path is None or os.path.isfile(xml_path)\n assert hasattr(self, 'n_dof') # Int\n assert hasattr(self, 'home_pos') # numpy.ndarray\n assert hasattr(self, 'p_gains') # numpy.ndarray\n assert hasattr(self, 'd_gains') # numpy.ndarray\n assert hasattr(self, 'max_ctrl') # numpy.ndarray\n assert hasattr(self, 'min_ctrl') # numpy.ndarray\n assert hasattr(self, 'dt') # Float\n assert not ((tool_mass == 0) ^ (tool_mass_site is None)) # must set both\n\n self.model = mujoco_py.load_model_from_path(xml_path)\n self.sim = mujoco_py.MjSim(self.model, nsubsteps=1)\n self.viewer = None\n\n if g_comp:\n self.g_comp_ctl = GravityCompensationController(self)\n else:\n self.g_comp_ctl = None\n if tool_mass_site is not None:\n self.mass_names.append(tool_mass_site)\n self.masses = np.append(self.masses, tool_mass)\n\n # data recording\n self.object_names = object_names\n self.rendering = render\n self.recording = False\n self.video_recording = False\n self._recorded_trajectory = []\n\n self.reset()\n self.wait()\n\n def close(self):\n \"\"\" closes all open threads \"\"\"\n self.stop_spinning()\n self.stop_rendering()\n\n # ========== basic simulation control ==========\n\n def step(self, des_pos=None, des_vel=None, tau=None, n_steps=1):\n \"\"\" advances the simulation by one time step \"\"\"\n assert des_pos is None or len(des_pos) == self.n_dof and isinstance(des_pos, np.ndarray)\n assert des_vel is None or len(des_vel) == self.n_dof and isinstance(des_vel, np.ndarray)\n assert tau is None or len(tau) == self.n_dof and isinstance(tau, np.ndarray)\n\n for _ in range(n_steps):\n motor_torques = np.zeros(self.n_dof)\n # ...pos if available and 0 vel if no vel availavle\n if not des_pos is None:\n motor_torques += self.p_gains * (des_pos.ravel() - self.pos)\n if des_vel is None: # damping to prevent P control only\n motor_torques -= self.d_gains * self.vel\n if not des_vel is None:\n motor_torques += self.d_gains * (des_vel.ravel() - self.vel)\n if not tau is None:\n motor_torques += tau.ravel()\n if self.g_comp_ctl is not None:\n tau_g_comp = self.g_comp_ctl()\n motor_torques += tau_g_comp\n motor_torques = np.maximum(np.minimum(motor_torques, self.max_ctrl), self.min_ctrl)\n\n # apply actions and advance the simulation\n self.sim.data.qfrc_applied[:self.n_dof] = motor_torques\n self.sim.step()\n\n # get sim state / observations\n self.pos = self.sim.data.qpos[:self.n_dof].copy()\n self.vel = self.sim.data.qvel[:self.n_dof].copy()\n\n # update time\n self.time = self.sim.data.time\n self.timestep += 1\n self.time_task += self.dt\n self.timestep_task += 1\n\n # render...\n if self.rendering:\n if self.viewer is None: # ...and setup a viewer if not available\n self._make_viewer()\n self.viewer.render()\n\n if self.recording:\n self.record_current_time_step()\n\n return self.pos, self.vel, self.time\n\n def reset(self, pos=None, vel=None, task_name=''):\n \"\"\" reset the simulation state\n task_name is an optional label for data recording\n \"\"\"\n assert pos is None or len(pos) == self.n_dof\n assert vel is None or len(vel) == self.n_dof\n self.sim.reset()\n\n self.time = self.sim.data.time\n self.timestep = 0.\n self.timestep_task = 0.\n self.time_task = 0.\n self.task_name = task_name\n self.task_done = False\n\n if pos is None:\n self.sim.data.qpos[:self.n_dof] = self.home_pos\n else:\n self.sim.data.qpos[:self.n_dof] = np.array(pos)\n if vel is None:\n self.sim.data.qvel[:self.n_dof] = np.zeros(self.n_dof)\n else:\n self.sim.data.qvel[:self.n_dof] = np.array(vel)\n self.pos = self.sim.data.qpos[:self.n_dof].copy()\n self.vel = self.sim.data.qvel[:self.n_dof].copy()\n self.pos_des = self.sim.data.qpos[:self.n_dof].copy()\n self.vel_des = self.sim.data.qvel[:self.n_dof].copy()\n self.tau_des = np.zeros(self.n_dof) # feed forward\n\n self.sim.forward()\n\n return self.pos, self.vel, self.time\n\n def get_joint_state(self, pos=True, vel=False):\n \"\"\" returns joint positions and velocities if requested \"\"\"\n ret = []\n if pos is True:\n ret.append(self.sim.data.qpos[:self.n_dof])\n if vel is True:\n ret.append(self.sim.data.qvel[:self.n_dof])\n if len(ret) == 1:\n return ret[0]\n return tuple(ret)\n\n def set_joint_state(self, pos=None, vel=None):\n \"\"\" sets the positions and velocities of all robot joints\n think about using reset instead\n \"\"\"\n if pos is not None:\n self.sim.data.qpos[:self.n_dof] = pos\n if vel is not None:\n self.sim.data.qvel[:self.n_dof] = vel\n\n def set_object_state(self, name, pos=None, vel=None, quat=None, vel_euler=None):\n \"\"\" overwrites all provided state variables of the free joint\n named name - this sets the state of the corresponding object wrt\n the frame the free joint is defined in (usually world)\n \"\"\"\n x = self.sim.data.get_joint_qpos(name)\n dx = self.sim.data.get_joint_qvel(name)\n if pos is not None:\n x[0:3] = pos\n if vel is not None:\n dx[0:3] = vel\n if quat is not None:\n x[3:7] = quat\n if vel_euler is not None:\n dx[3:6] = vel_euler\n self.sim.data.set_joint_qpos(name, x)\n self.sim.data.set_joint_qvel(name, dx)\n\n def get_object_state(self, name, pos=True, vel=False, quat=False, vel_euler=False):\n \"\"\" returns all requested state variables of the free joint\n named name, which are identical to the state of the corresponding\n object wrt the frame the free joint is defined in (usually world)\n \"\"\"\n x = self.sim.data.get_joint_qpos(name)\n dx = self.sim.data.get_joint_qvel(name)\n ret = []\n if pos is True:\n ret.append(x[0:3])\n if vel is True:\n ret.append(dx[0:3])\n if quat is True:\n ret.append(x[3:7])\n if vel_euler is True:\n ret.append(dx[3:6])\n if len(ret) == 1:\n return ret[0]\n return tuple(ret)\n\n def get_site_state(self, name, pos=True, vel=False, xmat=False, vel_euler=False):\n \"\"\" returns all requested state variables of the site named name \"\"\"\n ret = []\n if pos is True:\n ret.append(self.sim.data.get_site_xpos(name))\n if vel is True:\n ret.append(self.sim.data.get_site_xvelp(name))\n if xmat is True:\n ret.append(self.sim.data.get_site_xmat(name))\n if vel_euler is True:\n ret.append(self.sim.get_site_xvelr(name))\n MjViewerExtended.terminate_all()\n if len(ret) == 1:\n return ret[0]\n return tuple(ret)\n\n def get_transform(self, from_body, to_body):\n \"\"\" returns the homogeneous transformation from one body defined in the\n xml description to another one as 4x4 matrix \"\"\"\n from_world = self.get_body_full_mat(from_body)\n to_world = self.get_body_full_mat(to_body)\n world_to = np.linalg.inv(to_world)\n trafo = from_world @ world_to\n return trafo\n\n def get_body_full_mat(self, body):\n \"\"\" returns the homogeneous transformation from a body defined in the\n xml description to the world frame as 4x4 matrix\n \"\"\"\n if body == 'world':\n rot = np.eye(3)\n pos = np.zeros((3, 1))\n else:\n rot = self.sim.data.get_body_xmat(body)\n pos = self.sim.data.get_body_xpos(body).reshape((3, 1))\n upper_mat = np.hstack([rot, pos])\n full_mat = np.vstack([upper_mat, np.array([[0, 0, 0, 1]])])\n return full_mat\n\n # ========== visuals and recording ==========\n\n def start_rendering(self):\n self.rendering = True\n\n def stop_rendering(self):\n \"\"\" closes the current viewer \"\"\"\n if self.rendering:\n self.rendering = False\n self.viewer.close()\n self.viewer = None\n\n def _make_viewer(self):\n \"\"\" inherit to configure your own defaults\n more options:\n self.viewer.cam.distance = self.model.stat.extent * 1.0 # zoom\n self.viewer.cam.trackbodyid = 0 # id of the body to track ()\n self.viewer.cam.lookat[0] += 0.5 # works if trackbodyid=-1\n self.viewer.cam.lookat[1] += 0.5\n self.viewer.cam.lookat[2] += 0.5\n self.viewer.cam.elevation = -90 # camera tilting\n self.viewer.cam.azimuth = 0 # camera panning\n \"\"\"\n self.viewer = MjViewerExtended(self.sim)\n self.viewer._hide_overlay = True\n self.viewer._run_speed = 1.0\n self.rendering = True\n\n def set_mocap_pos(self, mocap_name, pos):\n \"\"\" sets the position of a specified mocap object_names in world coords\n this is useful to show things like desired cartesian positions\n \"\"\"\n self.sim.data.set_mocap_pos(mocap_name, pos)\n\n def start_data_recording(self):\n \"\"\" start recording the simulation state each time step \"\"\"\n self.recording = True\n\n def stop_data_recording(self):\n \"\"\" stop recording the simulation state \"\"\"\n self.recording = False\n\n def get_data_recording(self):\n \"\"\" return a DataFrame of the recorded robot and object states \"\"\"\n q_des = [f\"{joint_name}_des\" for joint_name in self.joint_names]\n dq_des = [f\"d{joint_name}_des\" for joint_name in self.joint_names]\n q = [f\"{joint_name}\" for joint_name in self.joint_names]\n dq = [f\"d{joint_name}\" for joint_name in self.joint_names]\n\n value_types = ['pos_x', 'pos_y', 'pos_z', 'quat_x', 'quat_y', 'quat_z', 'quat_w']\n objs = [f\"{val_type}_{name}\" for name in self.object_names for val_type in value_types]\n\n columns = ['time', 'time_task', 'task'] + q_des + dq_des + q + dq + objs\n\n df = pd.DataFrame(self._recorded_trajectory[:], columns=columns)\n return df\n\n def clear_data_recording(self):\n \"\"\" clears all recorded data from cache \"\"\"\n self._recorded_trajectory = []\n\n def record_current_time_step(self):\n \"\"\" adds the current simulation state to a buffer\n is called each time step if self.recording is set\n \"\"\"\n obj_states = []\n for obj in range(len(self.object_names)):\n obj_states += self.obj_pos[obj].tolist()\n obj_states += self.obj_quat[obj].tolist()\n self._recorded_trajectory.append([self.time, self.time_task, self.task_name]\n + self.pos_des.tolist()\n + self.vel_des.tolist()\n + self.pos.tolist()\n + self.vel.tolist()\n + obj_states)\n\n def start_video_recording(self, cameras, video_subsampling=10):\n \"\"\" start recording a video of the simulation \"\"\"\n raise NotImplementedError(\"Video recording does not work yet\")\n self.cameras = cameras\n self.video_subsampling = video_subsampling\n self._recorded_video = [[] for _ in range(len(cameras))]\n self.video_recording = True\n\n def stop_video_recording(self):\n \"\"\" stop recording a video of the simulation \"\"\"\n self.video_recording = False\n\n def get_image(self, camera_name, width=128, height=128, depth=False):\n \"\"\" returns the view of a given camera specified in the xml file \"\"\"\n img = self.sim.render(camera_name=camera_name, width=width, height=height, depth=depth)\n return img\n\n # ========== callback based control ==========\n # can be used similar to Robcom 2\n\n def set_control_cb(self, cb, duration=-1, task_name=''):\n \"\"\" replaces the current control callback\n\n cb: use the callback defined in goto_joint_cubic as template\n duration: in time steps or -1 for infinite\n task_name: an optional label for data recording \"\"\"\n self.time_task = 0\n self.timestep_task = 0\n self.task_duration = duration\n self.task_name = task_name\n self.control_callback = cb\n\n def start_spinning(self, sync_mode=False):\n \"\"\" starts a thread alternating between calling the control callback\n and step\n will be approximately real-time when rendering is on\n\n sync_mode: only here for compatibility with robocom_robots\"\"\"\n\n def spin():\n while self._spinning:\n if not self.control_callback is None:\n self.control_callback(self)\n self.step(self.pos_des, self.vel_des, self.tau_des)\n\n self._spinning = True\n self.control_thread = Thread(target=spin)\n self.control_thread.start()\n\n def stop_spinning(self):\n \"\"\" stops the thread started in start_spinning \"\"\"\n if hasattr(self, \"_spinning\") and self._spinning:\n self._spinning = False\n self.control_thread.join()\n\n # ========== task based control ==========\n # very simple to use - similar to the goto actions from ias_ros\n\n def wait_for_task(self, dt=0.1):\n \"\"\" sleeps in dt time increments until the current task is done \"\"\"\n while not self.task_done:\n time.sleep(dt)\n\n def wait(self, task_name='WAIT'):\n \"\"\" holds current position \"\"\"\n self.task_name = task_name\n self.vel_des = np.zeros(self.n_dof)\n\n cb = lambda robot: None # do nothing\n self.set_control_cb(cb)\n\n def goto_joint_cubic(self, q, dq=None, T=5, task_name='GOTO_CUBIC'):\n \"\"\" moves the robot to the desired position, then waits\n q: target joint position in rad\n dq: target joint angular velocity in rad/s\n T: target duration in sec \"\"\"\n self.task_done = False\n self.task_name = task_name\n\n q_0 = self.pos_des\n dq_0 = self.vel_des\n q_T = np.array(q)\n if dq is None:\n dq_T = np.array(self.n_dof)\n else:\n dq_T = np.array(dq)\n N = float(T) / self.dt\n\n a = - 2 * (q_T - q_0) / N ** 3 + (dq_T + dq_0) / N ** 2\n b = 3 * (q_T - q_0) / N ** 2 - (dq_T + 2 * dq_0) / N\n c = dq_0\n d = q_0\n\n def goto_joint_cubic_cb(robot, a, b, c, d):\n t = robot.timestep_task\n robot.pos_des = a * t ** 3 + b * t ** 2 + c * t + d\n robot.vel_des = (3 * a * t ** 2 + 2 * b * t + c) / robot.dt\n robot.tau_des = None\n if robot.timestep_task >= robot.task_duration:\n robot.task_done = True\n robot.wait(task_name='WAIT_AFTER_' + robot.task_name)\n\n cb = lambda robot: goto_joint_cubic_cb(robot, a, b, c, d)\n self.set_control_cb(cb, N)\n\n def goto_home(self, duration=4.):\n \"\"\" moves the robot to it's home position, then waits\"\"\"\n self.goto_joint_cubic(self.home_pos, np.zeros(self.n_dof), duration, task_name='GO_HOME')\n\n\nclass HFinger1(MjRobot):\n default_xml_file = \"wam7/wam_7dof.xml\"\n n_dof = 3\n home_pos = np.array([0., 0., 0.0])\n p_gains = np.array([300, 200.0, 300.0])\n d_gains = np.array([20.0, 7.0, 15.0])\n max_ctrl = np.array([3.14, 2.44, 0.83])\n min_ctrl = np.array([0, 0, 0])\n dt = 0.001\n\n joint_names = [f'q{i}' for i in range(1, n_dof + 1)]\n masses = np.array([\n 0.14109,\n 0.062139,\n 0.041377]) # same as in xml file, for gravity compensation\n mass_names = [\n \"wam/mass_sites/finger_1/prox_link\",\n \"wam/mass_sites/finger_1/med_link\",\n \"wam/mass_sites/finger_1/dist_link\"\n ]\n\n def __init__(self, xml_path=None, object_names=[], render=True,\n g_comp=False, tool_mass=0, tool_mass_site=None):\n \"\"\" The 7 DoF, 80V Barret WAM robot\n xml_path: to change the robots environment or end effector, provide a\n modified version of the default xml description file\n object_names: states of the listed objects are included in recordings\n render: whether or not to render the simulation\n g_comp: whether or not to use gravity compensation \"\"\"\n\n if xml_path == None:\n script_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n xml_path = script_path + '/../robot_description/' + self.default_xml_file\n\n MjRobot.__init__(self, xml_path, object_names=object_names, render=render,\n g_comp=g_comp, tool_mass=tool_mass, tool_mass_site=tool_mass_site)\n\n self.ROBOT2WORLD = self.get_body_full_mat('wam/bhand/finger_1/prox_link')\n self.WORLD2ROBOT = self.get_transform('wam/bhand/finger_1/prox_link', 'wam/bhand/finger_1/dist_link')\n\n\nclass UR5_pure(MjRobot):\n \"\"\" The 6 DoF, 80V UR5 WAM robot \"\"\"\n # default_xml_file = \"wam7_Pro/wam_7dof.xml\"\n default_xml_file = \"wam7/wam_7_Pure_UR5.xml\"\n\n # robot properties\n # n_dof = 11\n n_dof = 6\n\n home_pos = np.array([0.8164, -1.5386, -1.6014, 0, 0, 0])\n p_gains = np.array([300.0, 150.0, 50.0, 50.0, 50.0, 10.0])\n d_gains = np.array([15.0, 15.0, 10.0, 7.0, 5.3, 5.2])\n max_ctrl = np.array([360.0, 360.0, 360.0, 360.0, 360.0, 360.0])\n min_ctrl = np.array(\n [-360.0, -360.0, -360.0, -360.0, -360.0, -360.0])\n\n # home_pos = np.array([0., -1.986, 0.0, 3.146, 0.0, 0.0, 0.0])\n # p_gains = np.array([200.0, 300.0, 200.0, 100.0, 100.0, 10.0, 2.5])\n # max_ctrl = np.array([150.0, 113.0, 157.0, 180.0, 75.0, 90.0, 128.0])\n # min_ctrl = np.array([-150.0, -113.0, -157.0, -50.0, -275.0, -90.0, -128.0])\n\n dt = 0.001\n\n joint_names = [f'q{i}' for i in range(1, n_dof + 1)]\n masses = np.array([\n 3.7,\n 8.393,\n 2.275,\n 1.219,\n 1.219,\n 0.1879\n ]) # same as in xml file, for gravity compensation\n mass_names = [\n \"ur5/mass_sites/shoulder_link\",\n \"ur5/mass_sites/upper_arm_link\",\n \"ur5/mass_sites/forearm_link\",\n \"ur5/mass_sites/wrist1_link\",\n \"ur5/mass_sites/wrist2_link\",\n \"ur5/mass_sites/wrist3_link\"\n ]\n\n def __init__(self, xml_path=None, object_names=[], render=True,\n g_comp=False, tool_mass=0, tool_mass_site=None):\n \"\"\" The 6 DoF, 80V UR5e robot\n xml_path: to change the robots environment or end effector, provide a\n modified version of the default xml description file\n object_names: states of the listed objects are included in recordings\n render: whether or not to render the simulation\n g_comp: whether or not to use gravity compensation \"\"\"\n\n if xml_path == None:\n script_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n xml_path = script_path + '/../robot_description/' + self.default_xml_file\n\n MjRobot.__init__(self, xml_path, object_names=object_names, render=render,\n g_comp=g_comp, tool_mass=tool_mass, tool_mass_site=tool_mass_site)\n\n # transformations:\n # self.ROBOT2WORLD = self.get_body_full_mat('wam/links/Track')\n # self.WORLD2ROBOT = self.get_transform('world', 'wam/links/Track')\n\n self.ROBOT2WORLD = self.get_body_full_mat('base_link')\n self.WORLD2ROBOT = self.get_transform('base_link', 'wrist3_link')\n\n def _make_viewer(self):\n \"\"\" visuals of the simulation, see MjRobot.viewer_setup() \"\"\"\n MjRobot._make_viewer(self)\n self.viewer.cam.distance = 2.5\n self.viewer.cam.lookat[0] += 0.15\n self.viewer.cam.elevation = -25\n self.viewer.cam.azimuth = -90\n\n\nclass MjWam7_pure(MjRobot):\n \"\"\" The 7 DoF, 80V Barret WAM robot \"\"\"\n # default_xml_file = \"wam7_Pro/wam_7dof.xml\"\n default_xml_file = \"wam7/wam_7_Pure_UR5.xml\"\n\n # robot properties\n # n_dof = 11\n n_dof = 7\n\n home_pos = np.array([0., -1.986, 0.0, 3.146, 0.0, -1.57, 0.0])\n p_gains = np.array([300.0, 150.0, 50.0, 50.0, 50.0, 10.0, 2.5])\n d_gains = np.array([15.0, 15.0, 10.0, 7.0, 5.3, 5.2, 2.05])\n max_ctrl = np.array([150.0, 113.0, 157.0, 180.0, 75.0, 90.0, 128.0])\n min_ctrl = np.array(\n [-150.0, -113.0, -157.0, -50.0, -275.0, -90.0, -128.0])\n\n # home_pos = np.array([0., -1.986, 0.0, 3.146, 0.0, 0.0, 0.0])\n # p_gains = np.array([200.0, 300.0, 200.0, 100.0, 100.0, 10.0, 2.5])\n # max_ctrl = np.array([150.0, 113.0, 157.0, 180.0, 75.0, 90.0, 128.0])\n # min_ctrl = np.array([-150.0, -113.0, -157.0, -50.0, -275.0, -90.0, -128.0])\n\n dt = 0.001\n\n joint_names = [f'q{i}' for i in range(1, n_dof + 1)]\n masses = np.array([\n 10.76768767,\n 3.87493756,\n 1.80228141,\n 2.40016804,\n 0.12376019,\n 0.41797364,\n 0.06864753\n ]) # same as in xml file, for gravity compensation\n mass_names = [\n \"wam/mass_sites/shoulder_yaw\",\n \"wam/mass_sites/shoulder_pitch\",\n \"wam/mass_sites/upper_arm\",\n \"wam/mass_sites/forearm\",\n \"wam/mass_sites/wrist_yaw\",\n \"wam/mass_sites/wrist_pitch\",\n \"wam/mass_sites/wrist_palm\"\n ]\n\n def __init__(self, xml_path=None, object_names=[], render=True,\n g_comp=False, tool_mass=0, tool_mass_site=None):\n \"\"\" The 7 DoF, 80V Barret WAM robot\n xml_path: to change the robots environment or end effector, provide a\n modified version of the default xml description file\n object_names: states of the listed objects are included in recordings\n render: whether or not to render the simulation\n g_comp: whether or not to use gravity compensation \"\"\"\n\n if xml_path == None:\n script_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n xml_path = script_path + '/../robot_description/' + self.default_xml_file\n\n MjRobot.__init__(self, xml_path, object_names=object_names, render=render,\n g_comp=g_comp, tool_mass=tool_mass, tool_mass_site=tool_mass_site)\n\n # transformations:\n # self.ROBOT2WORLD = self.get_body_full_mat('wam/links/Track')\n # self.WORLD2ROBOT = self.get_transform('world', 'wam/links/Track')\n\n self.ROBOT2WORLD = self.get_body_full_mat('wam/links/base')\n self.WORLD2ROBOT = self.get_transform('wam/links/base', 'wam/links/wrist_palm')\n\n def _make_viewer(self):\n \"\"\" visuals of the simulation, see MjRobot.viewer_setup() \"\"\"\n MjRobot._make_viewer(self)\n self.viewer.cam.distance = 2.5\n self.viewer.cam.lookat[0] += 0.15\n self.viewer.cam.elevation = -25\n self.viewer.cam.azimuth = -90\n\n\nclass MjWam7(MjRobot):\n \"\"\" The 7 DoF, 80V Barret WAM robot \"\"\"\n # default_xml_file = \"wam7_Pro/wam_7dof.xml\"\n default_xml_file = \"wam7/wam_7_UR5.xml\"\n\n # robot properties\n # n_dof = 11\n n_dof = 15\n\n home_pos = np.array([0., -1.986, 0.0, 3.146, 0.0, -1.57, 0.0, 2.33, 0.84, 0.0, 2.33, 0.84, 0, 2.33, 0.84])\n p_gains = np.array([300.0, 150.0, 50.0, 50.0, 50.0, 10.0, 2.5, 1, 1, 1, 1, 1, 1, 1, 1])\n d_gains = np.array([15.0, 15.0, 10.0, 7.0, 5.3, 5.2, 2.05, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02])\n max_ctrl = np.array([150.0, 113.0, 157.0, 180.0, 75.0, 90.0, 128.0, 139, 48, 180, 139, 48, 180, 139, 48])\n min_ctrl = np.array(\n [-150.0, -113.0, -157.0, -50.0, -275.0, -90.0, -128.0, -139, -48, 0, -139, -48, 0, -139, -48])\n\n # home_pos = np.array([0., -1.986, 0.0, 3.146, 0.0, 0.0, 0.0])\n # p_gains = np.array([200.0, 300.0, 200.0, 100.0, 100.0, 10.0, 2.5])\n # max_ctrl = np.array([150.0, 113.0, 157.0, 180.0, 75.0, 90.0, 128.0])\n # min_ctrl = np.array([-150.0, -113.0, -157.0, -50.0, -275.0, -90.0, -128.0])\n\n dt = 0.001\n\n joint_names = [f'q{i}' for i in range(1, n_dof + 1)]\n masses = np.array([\n 10.76768767,\n 3.87493756,\n 1.80228141,\n 2.40016804,\n 0.12376019,\n 0.41797364,\n 0.06864753,\n 0.50573,\n 0.062139,\n 0.041377,\n 0.14109,\n 0.062139,\n 0.041377,\n 0.14109,\n 0.062139,\n 0.041377\n ]) # same as in xml file, for gravity compensation\n mass_names = [\n \"wam/mass_sites/shoulder_yaw\",\n \"wam/mass_sites/shoulder_pitch\",\n \"wam/mass_sites/upper_arm\",\n \"wam/mass_sites/forearm\",\n \"wam/mass_sites/wrist_yaw\",\n \"wam/mass_sites/wrist_pitch\",\n \"wam/mass_sites/wrist_palm\",\n \"wam/mass_sites/bhand_palm_link\",\n \"wam/mass_sites/finger_3/med_link\",\n \"wam/mass_sites/finger_3/dist_link\",\n \"wam/mass_sites/finger_1/prox_link\",\n \"wam/mass_sites/finger_1/med_link\",\n \"wam/mass_sites/finger_1/dist_link\",\n \"wam/mass_sites/finger_2/prox_link\",\n \"wam/mass_sites/finger_2/med_link\",\n \"wam/mass_sites/finger_2/dist_link\"\n ]\n\n def __init__(self, xml_path=None, object_names=[], render=True,\n g_comp=False, tool_mass=0, tool_mass_site=None):\n \"\"\" The 7 DoF, 80V Barret WAM robot\n xml_path: to change the robots environment or end effector, provide a\n modified version of the default xml description file\n object_names: states of the listed objects are included in recordings\n render: whether or not to render the simulation\n g_comp: whether or not to use gravity compensation \"\"\"\n\n if xml_path == None:\n script_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n xml_path = script_path + '/../robot_description/' + self.default_xml_file\n\n MjRobot.__init__(self, xml_path, object_names=object_names, render=render,\n g_comp=g_comp, tool_mass=tool_mass, tool_mass_site=tool_mass_site)\n\n # transformations:\n # self.ROBOT2WORLD = self.get_body_full_mat('wam/links/Track')\n # self.WORLD2ROBOT = self.get_transform('world', 'wam/links/Track')\n\n self.ROBOT2WORLD = self.get_body_full_mat('wam/links/base')\n self.WORLD2ROBOT = self.get_transform('wam/links/base', 'wam/bhand/bhand_palm_link')\n\n def _make_viewer(self):\n \"\"\" visuals of the simulation, see MjRobot.viewer_setup() \"\"\"\n MjRobot._make_viewer(self)\n self.viewer.cam.distance = 2.5\n self.viewer.cam.lookat[0] += 0.15\n self.viewer.cam.elevation = -25\n self.viewer.cam.azimuth = -90\n\n\nclass MjWam4(MjRobot):\n \"\"\" The 4 DoF, 80V Barret WAM robot \"\"\"\n default_xml_file = \"wam4/wam_4dof.xml\"\n\n # robot properties\n n_dof = 4\n home_pos = np.array([0., -1.986, 0., 3.146])\n p_gains = np.array([200.0, 300.0, 100.0, 100.0])\n d_gains = np.array([7.0, 15.0, 5.0, 2.5])\n max_ctrl = np.array([150.0, 125.0, 40.0, 60.0])\n min_ctrl = -max_ctrl\n dt = 0.002\n\n joint_names = [f'q{i}' for i in range(1, n_dof + 1)]\n masses = np.array([10.76768767,\n 3.87493756,\n 1.80228141,\n 1.06513649]) # same as in xml file, for gravity compensation\n mass_names = [\"wam/mass_sites/shoulder_yaw\",\n \"wam/mass_sites/shoulder_pitch\",\n \"wam/mass_sites/upper_arm\",\n \"wam/mass_sites/forearm\"]\n\n def __init__(self, xml_path=None, object_names=[], render=True,\n g_comp=False, tool_mass=0, tool_mass_site=None):\n \"\"\" The 4 DoF, 80V Barret WAM robot\n xml_path: to change the robots environment or end effector, provide a\n modified version of the default xml description file\n object_names: states of the listed objects are included in recordings\n render: whether or not to render the simulation\n g_comp: whether or not to use gravity compensation \"\"\"\n\n if xml_path == None:\n script_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n xml_path = script_path + '/../robot_description/' + self.default_xml_file\n\n MjRobot.__init__(self, xml_path, object_names=object_names, render=render,\n g_comp=g_comp, tool_mass=tool_mass, tool_mass_site=tool_mass_site)\n\n # transformations:\n self.ROBOT2WORLD = self.get_body_full_mat('wam/links/base')\n self.WORLD2ROBOT = self.get_transform('world', 'wam/links/base')\n\n def _make_viewer(self):\n \"\"\" visuals of the simulation, see MjRobot.viewer_setup() \"\"\"\n MjRobot._make_viewer(self)\n self.viewer.cam.azimuth = -45\n self.viewer.cam.distance = 2.5\n self.viewer.cam.lookat[0] += 0.15\n self.viewer.cam.lookat[2] += 0.3\n self.viewer.cam.elevation = -25\n self.viewer.cam.azimuth = -90\n\n\nclass GravityCompensationController():\n \"\"\" A simple gravity compensation controller for any MjRobot\n\n Requires the MjRobot to have a 'site' object defined at every mass\n location in the xml description and a list of their names\n (MjRobot.mass_names) to compute the Jacobians \"\"\"\n\n def __init__(self, robot, gravity=np.array([0., 0., -9.81])):\n \"\"\" A simple gravity compensation controller for any MjRobot \"\"\"\n self.robot = robot\n self.g = gravity\n\n def __call__(self):\n \"\"\" returns the torques that currently compensate gravity \"\"\"\n tau = np.zeros(self.robot.n_dof)\n for i in range(len(self.robot.mass_names)):\n # get jacobian\n target_jacp = np.zeros(3 * self.robot.sim.model.nv)\n self.robot.sim.data.get_site_jacp(self.robot.mass_names[i], jacp=target_jacp)\n\n # get rid of additional objects's free joints in target_jacp\n jac = np.array(target_jacp).reshape((3, self.robot.sim.model.nv))[:, :self.robot.n_dof]\n\n # compute torques with jacobian transpose method\n tau += jac.T @ (- self.robot.masses[i] * self.g)\n\n return tau\n","repo_name":"gaolongsen/WAM7_UR5e_Control","sub_path":"WAM_UR5_Control/mujoco_robots/robots.py","file_name":"robots.py","file_ext":"py","file_size_in_byte":31692,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"32883378582","text":"import sys\n\nreg_x = 1\n\ndisplay = [[]]\n\ncycles = 0\n\ndef draw():\n cur_line = display[-1]\n pos = len(cur_line)\n if reg_x == pos or abs(pos - reg_x) == 1:\n cur_line.append('#')\n else:\n cur_line.append('.')\n if cycles % 40 == 0:\n display.append([])\n\nfor line in sys.stdin.readlines():\n cmd, *args = line.strip().split(' ')\n if cmd == 'noop':\n cycles += 1\n draw()\n elif cmd == 'addx':\n v = int(args[0])\n cycles += 1\n draw()\n cycles += 1\n draw()\n reg_x += v\n\nfor line in display:\n print(''.join(line))\n","repo_name":"simon816/Advent-of-Code-2022","sub_path":"10/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13670430175","text":"\"\"\"\nОсновной каркас тестового файлового менеджера.\n\"\"\"\nimport sys\nimport os\nfrom colorama import Fore, Style\nfrom core import create_new_folder, create_new_file, copy_file_or_folder, \\\n delete_file_or_folder, get_list\n\nFAILURE_MESSAGE = 'Не задан параметр'\n\ntry:\n command = sys.argv[1]\nexcept IndexError:\n print('Необходимо выбрать команду.')\nelse:\n if command == 'list':\n try:\n FLAG = sys.argv[2]\n get_list(FLAG)\n except IndexError:\n get_list()\n elif command == 'create_file':\n try:\n name = sys.argv[2]\n except IndexError:\n print(FAILURE_MESSAGE)\n else:\n create_new_file(name)\n elif command == 'create_folder':\n try:\n name = sys.argv[2]\n except IndexError:\n print(FAILURE_MESSAGE)\n else:\n create_new_folder(name)\n elif command == 'delete':\n try:\n name = sys.argv[2]\n except IndexError:\n print(FAILURE_MESSAGE)\n else:\n delete_file_or_folder(name)\n elif command == 'copy_file_or_folder':\n try:\n name = sys.argv[2]\n direction = sys.argv[3]\n except IndexError:\n print(FAILURE_MESSAGE)\n else:\n copy_file_or_folder(name, direction)\n elif command == 'work_dir':\n print(Fore.RED + 'Current Work Directory: ' + Fore.BLUE\n + os.getcwd() + Style.RESET_ALL)\n elif command == 'help':\n print(Fore.RED + '\"list\"' + Fore.GREEN +\n ' Запросить список файлов и папок')\n print(Fore.RED + '\"create_file\"' + Fore.GREEN +\n ' Создать файл, укажите через пробел название файла')\n print(Fore.RED + '\"create_folder\"' + Fore.GREEN +\n ' Создать папку, укажите через пробел название папки')\n print(Fore.RED + '\"delete\"' + Fore.GREEN +\n ' Удаление файла или папки')\n print(Fore.RED + '\"copy_file_or_folder\"' + Fore.GREEN +\n ' Копирование файлов и папок' + Style.RESET_ALL)\n print(Fore.RED + '\"work_dir\"' + Fore.GREEN +\n ' Рабочая директория')\n","repo_name":"FoxyWRTH/XR-27","sub_path":"EDUCATION/GeekBrains/introduction_course/Primitive_File_Manager/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34462844520","text":"import math\n\n# C-4.9\n\"\"\"Write a short recursive Python function that finds the minimum and maximum and values in a sequence without \nusing any loops.\"\"\"\n\ndef find_min_max(S, max=-math.inf, min=math.inf):\n '''\n >>> find_min_max([9,4,56,43,22,55,34])\n (56, 4)\n '''\n if S[0] > max:\n max = S[0]\n if S[0] < min:\n min = S[0]\n if len(S) == 1:\n return (max, min)\n return find_min_max(S[1:], max, min)\n\n# C-4.10\n\"\"\"Describe a recursive algorithm to compute the integer part of the base-two logarithm of n using only addition\nand integer division\"\"\"\n\n# Start the number of steps at 0.\n# You would integer divide the integer by 2 (n // 2). Add + 1 to the number of steps.\n# If the result of the division is 1, return the number of steps.\n# Else do a recursion, using the result of the n // 2 division and the number of steps as input.\n# The number of steps will be the floor of the answer.\n\ndef floor_log2(number):\n '''\n >>> floor_log2(30)\n 4\n >>> floor_log2(242)\n 7\n '''\n if number <= 1:\n return 0\n new_number = number // 2\n return 1 + floor_log2(new_number)\n\n# C-4.11\n\"\"\"Describe an efficient recursive function for solving the element uniqueness problem, which runs in time that is at\nmost O(n^2) in the worst case without sorting.\"\"\"\n\ndef all_elements_unique(S, index=0):\n \"\"\"\n >>> all_elements_unique([5, 6, 7, 8])\n True\n >>> all_elements_unique([9, 4, 3, 4, 2, 6])\n False\n \"\"\"\n if index == len(S) - 1:\n return True\n else:\n for i in range(index+1, len(S)):\n if S[i] == S[index]:\n return False\n return all_elements_unique(S, index+1)\n\n# C-4.12\n\"\"\"Give a recursive algorithm to compute the product of two positive integers, m and n, using only addition and \nsubtraction\"\"\"\n\ndef multiply(n, m):\n \"\"\"\n >>> multiply(5, 3)\n 15\n >>> multiply(12, 7)\n 84\n \"\"\"\n if m == 0:\n return 0\n return n + multiply(n, m-1)\n\n# C-4.13\n\"\"\"In Section 4.2 we prove by induction that the number of lines printed by a call to draw_interval(c) is 2^c - 1.\nAnother interesting question i how many dashes are printed during that process. Prove by induction that the number of \ndashes printed by draw_interval(c) is 2^(c+1) - c - 2\"\"\"\n\n# TODO: Later\n\n# C-4.14\n\"\"\"In the Towers of Hanoi puzzle, we are given a platform with three pegs, a, b, and c, sticking out of it. On peg a\nis a stack of n disks, each larger than the next, so that the smallest is on top and the largest is on the bottom. The\npuzzle is to move all the disks from peg a to peg c, moving one disk at a time, so that we never place a larger disk\non top of a smaller one. Describe a recursive algorithm for solving the Towers of Hanoi puzzle for arbitrary n.\"\"\"\n\n# If Tower B and C are empty:\n# Move peg from A to B, and another to C\n# If Tower B and C are occupied:\n# Move peg from Tower B to C,\n# Then move that collective peg to Tower B.\n# If Tower B is occupied but not C:\n# Move peg from Tower A to C.\n\n# C-4.15\n\"\"\"Write a recursive function that will output all the subsets of a set of n elements (without repeating any subsets)\"\"\"\n\ndef get_all_subsets(k, S, universe):\n if k == 1:\n pass\n else:\n for element in universe:\n new_S = S + [element]\n print(new_S)\n new_universe = universe.difference({element})\n k -= 1\n if k > 1:\n get_all_subsets(k, new_S, new_universe)\n k = len(universe)\n universe = universe.difference({element})\n\n\n# C-4.16\n\"\"\"Write a short recursive Python function that takes a character string s and outputs its reverse. For example, the \nreverse of pots&pans would be snap&stop\"\"\"\n\ndef reverse_string(s):\n \"\"\"\n >>> reverse_string('pots&pans')\n 'snap&stop'\n >>> reverse_string('gotmilk')\n 'klimtog'\n \"\"\"\n stop = len(s) - 1\n if stop == 0:\n return s\n return s[stop] + reverse_string(s[1:stop]) + s[0]\n\n\n# C-4.17\n\"\"\"Write a short recursive Python function that determines if a string s is a palindrome\"\"\"\ndef is_palindrome(s):\n \"\"\"\n >>> is_palindrome('racecar')\n True\n >>> is_palindrome('lalal')\n True\n >>> is_palindrome('lalala')\n False\n >>> is_palindrome('ss')\n True\n \"\"\"\n if len(s) <= 1:\n return True\n if s[0] != s[-1]:\n return False\n return is_palindrome(s[1:-1])\n\n\n# C-4.18\n\"\"\"Use recursion to write a Python function for determining if a string s has more vowels than consonants\"\"\"\ndef more_vowels_than_consonants(s, vowel_count=0, consonant_count=0):\n \"\"\"\n >>> more_vowels_than_consonants('')\n False\n >>> more_vowels_than_consonants('h')\n False\n >>> more_vowels_than_consonants('ha')\n False\n >>> more_vowels_than_consonants('a')\n True\n >>> more_vowels_than_consonants('huehuehue')\n True\n >>> more_vowels_than_consonants('hahahaha')\n False\n \"\"\"\n if len(s) == 0:\n return vowel_count > consonant_count\n if s[0] in ('a','e','i','o','u'):\n vowel_count += 1\n else:\n consonant_count += 1\n return more_vowels_than_consonants(s[1:], vowel_count, consonant_count)\n\n\n# C-4.19\n\"\"\"Write a short recursive Python function that rearranges a sequence of integer values so that all even values appear\nbefore all the odd values.\"\"\"\n\ndef all_even_before_odd(S, n=0):\n \"\"\"\n >>> all_even_before_odd([2, 8, 4, 9, 2])\n [2, 8, 4, 2, 9]\n >>> all_even_before_odd([3, 7, 0, 2, 4, 3])\n [0, 2, 4, 3, 7, 3]\n \"\"\"\n if n >= len(S):\n return []\n if S[n] % 2 == 1:\n return all_even_before_odd(S, n+1) + [S[n]]\n else:\n return [S[n]] + all_even_before_odd(S, n+1)\n\n\n# C-4.20\n\"\"\"Given an unsorted sequence, S, of integers and an integer k, describe a recursive algorithm for rearranging the \nelements in S so that all elements less than or equal to k come before any elements larger than k. What is the running \ntime of your algorithm on a sequence of n values?\"\"\"\n\ndef sort_smaller_than_k(S, k, n=0):\n \"\"\"\n >>> sort_smaller_than_k([2, 8, 4, 9, 2], 5)\n [2, 4, 2, 9, 8]\n >>> sort_smaller_than_k([3, 7, 0, 2, 4, 3], 6)\n [3, 0, 2, 4, 3, 7]\n \"\"\"\n if n >= len(S):\n return []\n if S[n] > k:\n return sort_smaller_than_k(S, k, n+1) + [S[n]]\n else:\n return [S[n]] + sort_smaller_than_k(S, k, n+1)\n\n\n# The running time is O(n).\n\n# C-4.21\n\"\"\"Suppose you are given an n-element sequence, S, given integers that are listed in increasing order. Given a number k,\ndescribe a recursive algorithm to find two integers in S that sum to k, if such a pair exists. What is the running time of\nyour algorithm?\"\"\"\n\n# Good idea to implement binary search\n\ndef sum_to_k(S, k, start, stop):\n \"\"\"\n >>> sum_to_k([1, 4, 6, 7, 11, 13, 19], 17, 0, 7)\n [4, 13]\n >>> sum_to_k([6, 9, 10, 12, 29, 35, 83, 134], 15, 0, 8)\n [6, 9]\n >>> sum_to_k([3, 6, 10, 15, 29, 34, 44], 6, 0, 7)\n >>> sum_to_k([], 10, 0, 0)\n \"\"\"\n if len(S) == 0:\n return None\n mid = (start + stop) // 2\n if start >= (stop - 1):\n return sum_to_k(S[1:], k, 0, len(S[1:]))\n if S[0] + S[mid] == k:\n return [S[0], S[mid]]\n elif S[0] + S[mid] > k:\n return sum_to_k(S, k, start, mid)\n elif S[0] + S[mid] < k:\n return sum_to_k(S, k, mid, stop)\n\n\n# C-4.22\n\"\"\"Develop a nonrecursive implementation of the version of power from Code Fragment 4.12 that uses repeated squaring.\"\"\"\n\n# Code Fragment 4.12\n\ndef power(x, n):\n \"\"\"Computer the value x**n for integer n.\"\"\"\n if n == 0:\n return 1\n else:\n partial = power(x, n // 2)\n result = partial * partial\n if n % 2 == 1:\n result *= x\n return result\n\ndef nonrecursive_power(x, n):\n \"\"\"\n >>> nonrecursive_power(2, 2)\n 4\n >>> nonrecursive_power(2, 3)\n 8\n >>> nonrecursive_power(2, 4)\n 16\n >>> nonrecursive_power(2, 18)\n 262144\n >>> nonrecursive_power(3, 3)\n 27\n \"\"\"\n result = 1\n k = 0\n while k < n:\n if k == 0 or (k * 2 > n):\n result *= x\n k += 1\n elif k * 2 <= n:\n result = result * result\n k *= 2\n return result\n\n\nif __name__ == \"__main__\":\n get_all_subsets(5, [], {1, 2, 3, 4, 5})\n","repo_name":"aamirza/learning-data-structures-algorithms","sub_path":"ch4/creativity/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":8258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4517958202","text":"import mysql.connector\nimport sys\n\n\n#change the username and password with respect to your system and then execute \ndatabaseName = sys.argv[1]\n\ncnx = mysql.connector.connect(user='root', database=databaseName,password='Password')\ncursor = cnx.cursor(buffered=True)\n\n\nquery = (\"show tables\")\ncursor.execute(query)\nli = []\nfor table in cursor:\n li.append(str(table)[2:len(str(table))-3])\n\n\ncursor.close()\n\nli2 =[]\nfor i in range(0,len(li)):\n cursor = cnx.cursor(buffered=True)\n query = (\"select count(*) from \" + li[i])\n cursor.execute(query)\n for table_length in cursor:\n li2.append(str(table_length))\n cursor.close()\n\nfor i in range(0,len(li)):\n print(li[i],li2[i])\ncnx.close()\n","repo_name":"phanitejakesha/Data-Science-","sub_path":"MySqlPythonConnector/pythonConnector.py","file_name":"pythonConnector.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36153950515","text":"from numpy import genfromtxt, fromstring\nfrom collections import Counter\n\ndef average_song_length(data):\n s = 0\n for line in data:\n s += float(line[7]) #sum\n return s / len(data) #divide to average\n\ndef average_song_length_formatted(data):\n m, s = divmod(average_song_length(data), 60) #format\n return str(int(m)).zfill(2) + \":\" + str(int(s)).zfill(2)\n\ndef average_song_progress(data):\n s = 0\n for line in data:\n s += float(line[6])\n return s / len(data) #divide to average\n\ndef most_played_artists(data):\n artists = get_artists(data, 50)\n\n highest_occuring_artists = Counter(artists).most_common()\n\n if len(highest_occuring_artists) != 1:\n highest_occurence = highest_occuring_artists[0][1]\n artists = []\n\n for artist in highest_occuring_artists:\n if artist[1] != highest_occurence:\n break\n if artist[1] not in artists:\n artists.append(artist[0])\n\n return artists\n\ndef average_song_completion_for_artist(data, artist):\n sum_percent_listened = 0\n song_list = get_songs(data, artist)\n\n if len(song_list) == 0 or song_list == [ ]:\n print(\"NO SONGS BY GIVEN ARIST '\" + artist + \"'!\")\n return None\n\n for song in song_list:\n sum_percent_listened += percent_listened(get_song_line(data, song))\n\n return sum_percent_listened / len(song_list)\n\ndef percent_listened(line):\n return ( float(line[6]) / float(line[7]) ) * 100\n\ndef get_song_line(data, song):\n for line in data:\n if line[0] == song:\n return line\n return None\n\ndef get_songs(data, artist=None):\n songs = [ ]\n for line in data:\n if percent_listened(line) > 25:\n if artist != None and artist == line[1]:\n songs.append(line[0])\n else:\n songs.append(line[0])\n\n return songs\n\ndef get_albums(data, artist=None):\n albums = [ ]\n for line in data:\n if artist != None and artist == line[1]:\n albums.append(line[2])\n else:\n albums.append(line[2])\n\n return albums\n\ndef get_artists(data, minimum_percent=None):\n artists = [ ]\n for line in data:\n if minimum_percent == None and percent_listened(line) > 25:\n artists.append(line[1])\n elif percent_listened(line) > minimum_percent:\n artists.append(line[1])\n\n return artists\n\ndef main():\n d = genfromtxt('data.csv', delimiter=',', dtype=None, encoding=\"UTF-8\")\n print(average_song_completion_for_artist(d, \"Childish Gambino\"))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"poiley/tastebud-python","sub_path":"spotifyCalculations.py","file_name":"spotifyCalculations.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11303838186","text":"#configuration\n\n#Screen size\nSCREEN_WIDTH = 551*2\nSCREEN_HEIGHT = 574\n\nSCREEN_SIZE = SCREEN_WIDTH, SCREEN_HEIGHT\n\nscreen = None\ncurrent_screen = None\n\n#sound options\nsound_song_on = True\nsound_effect_on = True\n\nevents = None\n\nquit_game = False\n\n#diffrent screens\nmenu_settingsScreen = None\ngameScreen = None\nmenuScreen = None\nchooseMapScreen = None\n\n#diffrent maps\nmap1 = False\nmap2 = True\nmap3 = False","repo_name":"emir3100/Supernatural-2d-game","sub_path":"Supernatural/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"17641155323","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nfrom scipy.stats import norm\nfrom sklearn.preprocessing import StandardScaler\nfrom scipy import stats, special\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\ndef transform_train_data(df_train):\n \"\"\"\n Pre-process training data using multi-variate statistical analysis.\n Retrieved from: https://www.kaggle.com/pmarcelino/comprehensive-data-exploration-with-python#Out-liars!\n :param df_train: DataFrame containing the training data.\n :return: tuple containing the new data frame and transformations used (data scaler).\n \"\"\"\n returned_objects = []\n # dealing with missing data\n total = df_train.isnull().sum().sort_values(ascending=False)\n percent = (df_train.isnull().sum()/df_train.isnull().count()).sort_values(ascending=False)\n missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])\n dropped_indexes = (missing_data[missing_data['Total'] > 1]).index\n returned_objects.append(dropped_indexes)\n df_train = df_train.drop(dropped_indexes, 1)\n df_train = df_train.drop(df_train.loc[df_train['Electrical'].isnull()].index)\n df_train.isnull().sum().max() # just checking that there's no missing data missing...\n # standardizing data\n scaler = StandardScaler()\n # saleprice_scaled = scaler.fit_transform(df_train['SalePrice'][:, np.newaxis])\n returned_objects.append(scaler)\n # low_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][:10]\n # high_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][-10:]\n # deleting points\n df_train = df_train.drop(df_train[df_train['Id'] == 1299].index)\n df_train = df_train.drop(df_train[df_train['Id'] == 524].index)\n # applying log transformation to achieve normal distribution\n df_train['SalePrice'] = np.log(df_train['SalePrice'])\n # data transformation\n df_train['GrLivArea'] = np.log(df_train['GrLivArea'])\n # create column for new variable (one is enough because it's a binary categorical feature)\n # if area>0 it gets 1, for area==0 it gets 0\n df_train['HasBsmt'] = pd.Series(len(df_train['TotalBsmtSF']), index=df_train.index)\n df_train['HasBsmt'] = 0\n # Every house that has basement SF bigger than 0, set hasBsmt to 1\n df_train.loc[df_train['TotalBsmtSF']>0,'HasBsmt'] = 1\n # transform data\n # Every house with basement, transform the squared feet to log\n df_train.loc[df_train['HasBsmt']==1,'TotalBsmtSF'] = np.log(df_train['TotalBsmtSF'])\n # convert categorical variable into dummy\n new_df = pd.get_dummies(df_train)\n return new_df, returned_objects\n\n\ndef transform_test_data(df_test, dropped_features, train_features):\n \"\"\"\n Pre-process test data using the inverse operations performed on previously analysed training data.\n - Drops features that were dropped from the training dataset\n - Adds features that were added in the training dataset.\n - Applies inverse log (exp) to features that were scaled using log operation in the training dataset (GrLivArea, TotalBsmtSF)\n :param df_test: DataFrame containing the test data.\n :param dropped_features: list of features that were dropped during training\n :param train_features: features that currently exist in the training dataset. \n :return: DataFrame containing the test data, compatible with the training data, eady to be used.\n \"\"\"\n df_test = df_test.drop(dropped_features, 1)\n df_test = df_test.drop(df_test.loc[df_test['Electrical'].isnull()].index)\n df_test.isnull().sum().max() # just checking that there's no missing data missing...\n df_test['HasBsmt'] = pd.Series(len(df_test['TotalBsmtSF']), index=df_test.index)\n df_test['HasBsmt'] = 0\n df_test.loc[df_test['TotalBsmtSF']>0,'HasBsmt'] = 1\n df_test.loc[df_test['HasBsmt']==1,'TotalBsmtSF'] = np.log(df_test['TotalBsmtSF'])\n df_test['GrLivArea'] = np.log(df_test['GrLivArea'])\n df_test = pd.get_dummies(df_test)\n test_features = df_test.columns\n features_to_add = np.setdiff1d(train_features.values, test_features.values)\n for feature in features_to_add:\n df_test.insert(len(df_test.columns), feature, 0, True)\n df_test = df_test.fillna(0)\n df_test = df_test.drop(\"SalePrice\", axis=1)\n return df_test\n\n\ndef get_plots_and_analysis(df_train):\n \"\"\"\n Shows plots related with data analysis. Does not perform any writing operations on training data. \n Used for analysis only.\n :param df_train: DataFrame containing the training data.\n \"\"\"\n var = 'OverallQual'\n data = pd.concat([df_train['SalePrice'], df_train[var]], axis=1)\n f, ax = plt.subplots(figsize=(8, 6))\n fig = sns.boxplot(x=var, y=\"SalePrice\", data=data)\n fig.axis(ymin=0, ymax=800000)\n # Year built\n var = 'YearBuilt'\n data = pd.concat([df_train['SalePrice'], df_train[var]], axis=1)\n f, ax = plt.subplots(figsize=(16, 8))\n fig = sns.boxplot(x=var, y=\"SalePrice\", data=data)\n fig.axis(ymin=0, ymax=800000)\n plt.xticks(rotation=90)\n # correlation matrix\n corrmat = df_train.corr()\n f, ax = plt.subplots(figsize=(12, 9))\n sns.heatmap(corrmat, vmax=.8, square=True)\n # saleprice correlation matrix\n k = 10 # number of variables for heatmap\n cols = corrmat.nlargest(k, 'SalePrice')['SalePrice'].index\n cm = np.corrcoef(df_train[cols].values.T)\n sns.set(font_scale=1.25)\n hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values)\n plt.show()\n # scatterplot\n sns.set()\n # Strongly correlated variables\n cols = ['SalePrice', 'OverallQual', 'GrLivArea', 'GarageCars', 'TotalBsmtSF', 'FullBath', 'YearBuilt']\n sns.pairplot(df_train[cols], size=2.5)\n plt.show()\n # check missing data\n total = df_train.isnull().sum().sort_values(ascending=False)\n percent = (df_train.isnull().sum()/df_train.isnull().count()).sort_values(ascending=False)\n missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])\n missing_data.head(20)\n # dealing with missing data\n df_train = df_train.drop((missing_data[missing_data['Total'] > 1]).index, 1)\n df_train = df_train.drop(df_train.loc[df_train['Electrical'].isnull()].index)\n df_train.isnull().sum().max() # just checking that there's no missing data missing...\n # standardizing data\n saleprice_scaled = StandardScaler().fit_transform(df_train['SalePrice'][:,np.newaxis])\n low_range = saleprice_scaled[saleprice_scaled[:,0].argsort()][:10]\n high_range= saleprice_scaled[saleprice_scaled[:,0].argsort()][-10:]\n print('outer range (low) of the distribution:')\n print(low_range)\n print('\\nouter range (high) of the distribution:')\n print(high_range)\n # bivariate analysis saleprice/grlivarea\n var = 'GrLivArea'\n data = pd.concat([df_train['SalePrice'], df_train[var]], axis=1)\n data.plot.scatter(x=var, y='SalePrice', ylim=(0,800000))\n # deleting points\n df_train = df_train.drop(df_train[df_train['Id'] == 1299].index)\n df_train = df_train.drop(df_train[df_train['Id'] == 524].index)\n #bivariate analysis saleprice/grlivarea\n var = 'TotalBsmtSF'\n data = pd.concat([df_train['SalePrice'], df_train[var]], axis=1)\n data.plot.scatter(x=var, y='SalePrice', ylim=(0,800000))\n #histogram and normal probability plot\n sns.distplot(df_train['SalePrice'], fit=norm)\n fig = plt.figure()\n res = stats.probplot(df_train['SalePrice'], plot=plt)\n # applying log transformation to achieve normal distribution\n df_train['SalePrice'] = np.log(df_train['SalePrice'])\n # transformed histogram and normal probability plot\n sns.distplot(df_train['SalePrice'], fit=norm)\n fig = plt.figure()\n res = stats.probplot(df_train['SalePrice'], plot=plt)\n # data transformation\n df_train['GrLivArea'] = np.log(df_train['GrLivArea'])\n # transformed histogram and normal probability plot\n sns.distplot(df_train['GrLivArea'], fit=norm)\n fig = plt.figure()\n res = stats.probplot(df_train['GrLivArea'], plot=plt)\n # histogram and normal probability plot\n sns.distplot(df_train['TotalBsmtSF'], fit=norm)\n fig = plt.figure()\n res = stats.probplot(df_train['TotalBsmtSF'], plot=plt)\n # create column for new variable (one is enough because it's a binary categorical feature)\n # if area>0 it gets 1, for area==0 it gets 0\n df_train['HasBsmt'] = pd.Series(len(df_train['TotalBsmtSF']), index=df_train.index)\n df_train['HasBsmt'] = 0\n df_train.loc[df_train['TotalBsmtSF']>0,'HasBsmt'] = 1\n # transform data\n df_train.loc[df_train['HasBsmt']==1,'TotalBsmtSF'] = np.log(df_train['TotalBsmtSF'])\n # histogram and normal probability plot\n sns.distplot(df_train[df_train['TotalBsmtSF']>0]['TotalBsmtSF'], fit=norm)\n fig = plt.figure()\n res = stats.probplot(df_train[df_train['TotalBsmtSF']>0]['TotalBsmtSF'], plot=plt)\n # scatter plot\n plt.scatter(df_train['GrLivArea'], df_train['SalePrice'])\n # scatter plot\n plt.scatter(df_train[df_train['TotalBsmtSF']>0]['TotalBsmtSF'], df_train[df_train['TotalBsmtSF']>0]['SalePrice'])\n\n\ndef my_analysis(df_train, df_test):\n \"\"\"\n TODO\n :param df_train:\n :return:\n \"\"\"\n sns.set()\n fig = plt.figure()\n # sns.distplot(df_train[\"SalePrice\"], fit=stats.johnsonsb, kde=False, fit_kws={'color': 'green'})\n # sns.distplot(df_train[\"SalePrice\"], fit=stats.lognorm, kde=False, fit_kws={'color': 'blue'},\n # axlabel=\"Sale Price Distribution (log fit)\")\n # plt.show(block=False)\n # print(df_train['SalePrice'].describe())\n # fig = plt.figure()\n # df_train[\"SalePrice\"] = np.log(df_train[\"SalePrice\"])\n # sns.distplot(df_train[\"SalePrice\"], fit=stats.lognorm, kde=False, fit_kws={'color': 'blue'},\n # axlabel=\"Log Sale Price Distribution\")\n # plt.show()\n # input()\n # cols = ['OverallQual', 'GrLivArea', 'GarageCars', 'TotalBsmtSF', 'FullBath', 'YearBuilt', 'Fireplaces']\n # Distributions of selected features (fit=log)\n # for c in cols:\n # fig = plt.figure()\n # sns.distplot(df_train[c], fit=stats.lognorm, kde=False, fit_kws={'color': 'blue'},\n # axlabel=\"{} Distribution (log fit)\".format(c))\n # plt.show(block=False)\n # plt.show()\n # input()\n # # Distribution of selected features (fit=norm)\n # for c in cols:\n # fig = plt.figure()\n # sns.distplot(df_train[c], fit=stats.norm, kde=False, fit_kws={'color': 'blue'},\n # axlabel=\"{} Distribution (norm fit)\".format(c))\n # plt.show(block=False)\n # # Scatter plots of selected features\n # plt.show()\n # input()\n # cols_scatter = ['GrLivArea', 'TotalBsmtSF', 'YearBuilt']\n # # Boxplot of selected features\n # cols_box_plot = ['OverallQual', 'GarageCars', 'FullBath', 'Fireplaces', 'Utilities', 'Street']\n # for c in cols_box_plot:\n # fig = plt.figure()\n # sns.boxplot(x=c, y='SalePrice', data=pd.concat([df_train[c], df_train['SalePrice']], axis=1))\n # plt.show(block=False)\n features = classify_features(df_train)\n numerical_features = features[0]\n categorical_features = features[1]\n remove_outliers(df_train)\n df_train.drop(axis=1, labels=['Id'], inplace=True)\n deal_missing_data(df_train, df_test)\n categorical_to_ordinal_features(df_train, df_test)\n transform_numerical_features(df_train, df_test)\n combine_features(df_train, df_test)\n new_df_train, new_df_test = pd.get_dummies(df_train), pd.get_dummies(df_test)\n # Compensate both data frames for creating dummies. Creates columns in train and test that are missing.\n if new_df_train.shape[1] != new_df_test.shape[1]:\n train_features = new_df_train.columns\n test_features = new_df_test.columns\n features_to_add = np.setdiff1d(train_features.values, test_features.values)\n features_to_add = np.delete(features_to_add, np.where(features_to_add == 'SalePrice'))\n for feature in features_to_add:\n new_df_test[feature] = 0\n features_to_add = np.setdiff1d(test_features.values, train_features.values)\n for feature in features_to_add:\n new_df_train[feature] = 0\n assert (new_df_train.isna().any().sum() == 0)\n assert (new_df_test.isna().any().sum() == 0)\n assert (new_df_train.shape[1] == new_df_test.shape[1]+1)\n return new_df_train, new_df_test\n\n\ndef remove_outliers(df_train):\n \"\"\"\n Removes outliers, in place, from a given data frame.\n After analysing the scatter plot of the features with the SalePrice, the following cases were detected:\n - GrLivArea : i=523 and i=1298 are obvious outliers\n - TotalBsmtSF: i=1298 is an outlier\n i=440, 523, 496, 332 might be outliers\n :param df_train: Train data frame\n \"\"\"\n indexes = [523, 1298]\n df_train.drop(axis=0, labels=indexes, inplace=True)\n\n\ndef deal_missing_data(df_train, df_test):\n \"\"\"\n Deals with missing data (NaN). Changes are made in place, does not return a new data frame.\n For categorical features:\n - Some features have NaN as a possible value. Thus, they are not missing values.\n In such cases, values are changed to 'None'\n - Features that have in fact missing values are filled with the mode.\n For numerical features:\n - Missing values are filled the feature's median value.\n :param df_train: Train data frame\n \"\"\"\n total = df_train.isnull().sum().sort_values(ascending=False)\n percent = (df_train.isnull().sum()/df_train.isnull().count()).sort_values(ascending=False)\n missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])\n missing_data = missing_data[missing_data['Total'] > 0]\n # Fill rows that have missing data with the median value of the feature\n numerical_features = df_train.select_dtypes(exclude=object).columns\n for c in numerical_features:\n df_train[c].fillna(np.nanmedian(df_train[c]), inplace=True)\n if c != 'SalePrice':\n df_test[c].fillna(np.nanmedian(df_test[c]), inplace=True)\n # Fill rows that have missing data with specific values\n df_train['MasVnrType'].fillna('None', inplace=True)\n df_train['Electrical'].fillna('SBrkr', inplace=True)\n # Fill rows that have missing data with None or the median (numerical features)\n categorical_features = df_train.select_dtypes(include=object).columns\n for c in categorical_features:\n df_train[c].fillna('None', inplace=True)\n df_test[c].fillna('None', inplace=True)\n # Drop PoolQC because almost 100% has no information regarding it.\n # The feature PoolArea represents most of the feature PoolQC.\n features_to_drop = ['PoolQC', 'Street', 'Utilities']\n df_train.drop(features_to_drop, axis=1, inplace=True)\n df_test.drop(features_to_drop, axis=1, inplace=True)\n assert(df_train.isna().any().sum() == 0)\n assert (df_test.isna().any().sum() == 0)\n\n\ndef classify_features(df_train):\n numerical_features = []\n categorical_features = []\n for c in df_train:\n if df_train.dtypes[c] != object:\n numerical_features.append(c)\n else:\n categorical_features.append(c)\n return numerical_features, categorical_features\n\n\ndef combine_features(df_train, df_test):\n ds = [df_train, df_test]\n for d in ds:\n d['TotalSF'] = d['TotalBsmtSF'] + d['1stFlrSF'] + d['2ndFlrSF']\n d['Total_porch_sf'] = (d['OpenPorchSF'] + d['3SsnPorch'] +\n d['EnclosedPorch'] + d['ScreenPorch'] +\n d['WoodDeckSF'])\n d['haspool'] = d['PoolArea'].apply(lambda x: 1 if x > 0 else 0)\n\n\ndef categorical_to_ordinal_features(df_train, df_test):\n \"\"\"\n Transforms categorical features into ordinal. Selected features are based on manual analysis of data.\n The transformation is made in place (i.e. does not return a new data frame).\n :param df_train: Train data frame\n \"\"\"\n mapper = {'LotShape': {'Reg': 3, 'IR1': 2, 'IR2': 1, 'IR3': 0},\n 'LandContour': {'Lvl': 1, 'Bnk': 0, 'HLS': 3, 'Low': 2},\n 'LandSlope': {'Mod': 2, 'Gtl': 1, 'Sev': 0},\n 'ExterQual': {'Ex': 4, 'Gd': 3, 'TA': 2, 'Fa': 1, 'Po': 0},\n 'ExterCond': {'Ex': 4, 'Gd': 3, 'TA': 2, 'Fa': 1, 'Po': 0},\n 'BsmtQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'None': 0},\n 'BsmtCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'None': 0},\n 'BsmtExposure': {'Gd': 4, 'Av': 3, 'Mn': 2, 'No': 1, 'None': 0},\n 'BsmtFinType1': {'GLQ': 6, 'ALQ': 5, 'BLQ': 4, 'Rec': 3, 'LwQ': 2, 'Unf': 1, 'None': 0},\n 'BsmtFinType2': {'GLQ': 6, 'ALQ': 5, 'BLQ': 4, 'Rec': 3, 'LwQ': 2, 'Unf': 1, 'None': 0},\n 'HeatingQC': {'Ex': 4, 'Gd': 3, 'TA': 2, 'Fa': 1, 'Po': 0},\n 'CentralAir': {'Y': 1, 'N': 0},\n 'KitchenQual': {'Ex': 4, 'Gd': 3, 'TA': 2, 'Fa': 1, 'Po': 0},\n 'FireplaceQu': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'None': 0},\n 'GarageFinish': {'Fin': 3, 'RFn': 2, 'Unf': 1, 'None': 0},\n 'GarageQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'None': 0},\n 'GarageCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'None': 0},\n 'Fence': {'GdPrv': 4, 'MnPrv': 3, 'GdWo': 2, 'MnWw': 1, 'None': 0}\n }\n for k in list(mapper.keys()):\n df_train[k].replace(mapper.get(k), inplace=True)\n df_test[k].replace(mapper.get(k), inplace=True)\n assert(df_train.isna().any().all() == False)\n assert(df_test.isna().any().all() == False)\n\n\ndef transform_numerical_features(df_train, df_test):\n \"\"\"\n TODO currently deals with positive skewed features, not negative. Analyse this.\n :param df_train:\n :param df_test:\n :return:\n \"\"\"\n # apply log, scaling features\n # SalePrice. Check log against log(1+x), log1p\n df_train['SalePrice'] = np.log1p(df_train['SalePrice'])\n # Check for skewed features\n features_skewness = []\n for k in df_train.columns:\n if k == 'SalePrice':\n pass\n elif df_train.dtypes[k] != object:\n features_skewness.append([k, np.float(stats.skew(df_train[k]))])\n features_skewness_df = pd.DataFrame(features_skewness, columns=['F', 'S']).sort_values(by='S')\n left_skewed_features = features_skewness_df[features_skewness_df['S'] > 0.5]['F']\n right_skewed_features = features_skewness_df[features_skewness_df['S'] < -0.5]['F']\n # Apply log for right-skewed features (skewness<-0.5)\n # Apply boxcox1p for left-skewed features (skewness>0.5)\n # boxcox1p(x,lmbda):\n # y = log(1+x) if lmbda==0\n # y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0\n # the Box-Cox Power transformation only works if all the data is positive and greater than 0\n for f in left_skewed_features:\n box_cox_coef = stats.boxcox_normmax(df_train[f] + 1)\n df_train[f] = special.boxcox1p(df_train[f], box_cox_coef)\n df_test[f] = special.boxcox1p(df_test[f], box_cox_coef)\n\n'''\nNotes about data:\n---\nBased on distributions analysis:\n- SalePrice follows a lognorm or johnsons SB distribution.\n- SalePrice and GrLivArea have a high positive correlation. \n Two points may be outliers (GrLivArea > 4000, SalePrice < 12.5)\n- SalePrice and TotalBsmtSF have a high positive correlation. \n One data point seems like an outlier (TaoalBsmtSF > 6000, might be information). \n 4 points may be outliers (TotalBsmtSF > 3000)\n- OverallQual, GarageCars maybe log\n- OverallQual, GarageCars maybe norm\n- GrLivArea follows a norm distribution\n- TotalBsmtSF follows a norm distribution, slightly left skewed\n- YearBuilt follows a norm distribution, slightly right skewed\n\n---\nDealing with missing data:\nThe following features have missing data:\n 'PoolQC' - NA means no pool \n 'MiscFeature' - NA means no misc feature\n 'Alley' - NA means no alley access\n 'Fence' - NA means no fence\n 'FireplaceQu' - NA means no fireplace (same as Fireplaces=0)\n 'GarageCond' - NA means no garage\n 'GarageType' - NA means no garage\n 'GarageYrBlt' - NA means no garage\n 'GarageFinish' - NA means no garage\n 'GarageQual' - NA means no garage\n 'BsmtExposure' - NA means no basement (there is one extra basement NA in Exposure and Fin Type 2)\n 'BsmtFinType2' - NA means no basement\n 'BsmtFinType1' - NA means no basement\n 'BsmtCond' - NA means no basement\n 'BsmtQual' - NA means no basement \n 'MasVnrArea' - NA is missing data (X)\n 'MasVnrType' - NA is missing data (X)\n 'Electrical' - NA is missing data (X)\n 'LotFrontage' - NA is missing data (X)\n---\nDealing with outliers:\n- GrLivArea : i=523 and i=1298 are outliers\n- TotalBsmtSF: i=1298 is an outlier\n i=440, 523, 496, 332 might be outliers\n'''\n\n\n","repo_name":"alejandromumo/MLSKG","sub_path":"house-prices-advanced-regression-techniques/multivariate_analysis.py","file_name":"multivariate_analysis.py","file_ext":"py","file_size_in_byte":21031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29049246320","text":"import sys\nfrom pathlib import Path\nSERVER_FOLDER = Path(__file__).parent.parent.resolve()\ndata_folder = SERVER_FOLDER / \"Data/redditData/Posts/processed_df.csv\"\n\n\n# sys.path.insert(0, '/Users/aadeesh/redditSentiment/server/model')\n# from modelClasses import textTransformer, customModel\nimport pandas as pd\nimport numpy as np\n\ndef dataframeProcessor(df, classifier):\n\n keywords = {\"Tesla\" : [\"$tsla\", \"tsla\", \"tesla\", \"elon musk\", \"musk\"],\n \"Apple\" : [\"$aapl\", \"aapl\", \"apple\", \"mac\"], \n \"Nvidia\" : [\"$nvda\", \"nvda\", \"nvidia\"], \n \"Google\" : [\"$googl\", \"googl\", \"google\", \"alphabet\", \"bard\"],\n \"Amazon\" : [\"$amzn\", \"amzn\", \"amazon\", \"aws\"],\n \"Microsoft\" : [\"$msft\", \"msft\", \"microsoft\", \"windows\", \"azure\"],\n \"Meta\" : [\"$meta\", \"meta\", \"instagram\", \"facebook\"]\n }\n keywords2 = [\"$tsla\", \"tsla\", \"tesla\", \"elon musk\", \"musk\", \n \"$aapl\", \"aapl\", \"apple\", \"mac\",\n \"$nvda\", \"nvda\", \"nvidia\",\n \"$googl\", \"googl\", \"google\", \"alphabet\", \"bard\",\n \"$amzn\", \"amzn\", \"amazon\", \"aws\",\n \"$msft\", \"msft\", \"microsoft\", \"windows\", \"azure\",\n \"$meta\", \"meta\", \"instagram\", \"facebook\"\n ]\n\n filtered_df = df[df['Comment'].str.contains('|'.join(keywords2), case = False)]\n\n # Add an extra column to the filtered dataframe that indicates which keyword was present in that comment\n def keyWordBuilder(comment):\n returnString = \"\"\n for keyword in keywords2:\n if keyword in comment.lower():\n for key in keywords:\n if keyword in keywords[key]:\n if key not in returnString:\n returnString += key + ' '\n if returnString == \"\":\n return \"None\"\n return returnString\n\n keyWordList = filtered_df['Comment'].apply(keyWordBuilder)\n\n filtered_df = filtered_df.assign(Keyword = keyWordList)\n\n newDates = pd.to_datetime(filtered_df['Date'])\n newDates = newDates.dt.date\n filtered_df = filtered_df.assign(Date = newDates)\n filtered_df = filtered_df.sort_values(by='Date', ascending=True)\n\n comments = filtered_df.Comment\n preds = classifier.predict(comments)\n\n sentiments = np.argmax(preds, axis = 1)\n # preds\n\n filtered_df = filtered_df.assign(Sentiment = sentiments)\n\n return filtered_df\n\ndef jsonBuilder(filtered_df):\n # filtered_rows = filtered_df[filtered_df['Keyword'].str.contains('tesla', case=False)]\n # filtered_rows['Date'] = pd.to_datetime(filtered_rows['Date'])\n\n # # Extract only the date part from the 'Date' column\n # filtered_rows['Date'] = filtered_rows['Date'].dt.date\n # print(filtered_rows.head())\n tesla_df, apple_df, nvda_df, google_df, amzn_df, msft_df, meta_df = {}, {}, {}, {}, {}, {}, {}\n\n done = []\n for i in (filtered_df.Date):\n # date_string = i.strftime('%m-%d')\n date_string = i\n if date_string not in done:\n tesla_df[date_string] = 0\n apple_df[date_string] = 0\n nvda_df[date_string] = 0\n google_df[date_string] = 0\n amzn_df[date_string] = 0\n msft_df[date_string] = 0\n meta_df[date_string] = 0\n done.append(date_string)\n\n for i, j, k in zip(filtered_df.Date, filtered_df.Keyword, filtered_df.Sentiment):\n # date_string = i.strftime('%m-%d')\n date_string = i\n val = 1\n if k == 0:\n val = 0\n for keyword in j.split():\n if keyword == \"Tesla\":\n tesla_df[date_string] += val\n if keyword == \"Apple\":\n apple_df[date_string] += val\n if keyword == \"Nvidia\":\n nvda_df[date_string] += val\n if keyword == \"Google\":\n google_df[date_string] += val\n if keyword == \"Amazon\":\n amzn_df[date_string] += val\n if keyword == \"Microsoft\":\n msft_df[date_string] += val\n if keyword == \"Meta\":\n meta_df[date_string] += val\n return [tesla_df, apple_df, nvda_df, google_df, amzn_df, msft_df, meta_df]\n\ndef get_data():\n # df = pd.read_csv(\"/Users/aadeesh/redditSentiment/server/Data/redditData/Posts/post.csv\")\n \n # filtered_df = dataframeProcessor(df, classifier)\n filtered_df = pd.read_csv(data_folder)\n return_list = jsonBuilder(filtered_df)\n\n # tesla_df, apple_df, nvda_df, google_df, amzn_df, msft_df, meta_df\n labels = list(return_list[0].keys())\n values = [list(return_list[i].values()) for i in range(len(return_list))]\n # values = list(return_list[0].values())\n return labels, values","repo_name":"Aaddy-1/SentiStocks","sub_path":"server/api/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":4684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21640580133","text":"my_string = \"hello python world , i'm a beginner \"\nprint (my_string.split(\"world , \",1)[1])\n\n# Find first part and return slice before it.\nstr = 'feature/CKBICHDSKK-8595\\nYour branch'\nch = \"\\n\"\npos_ch = str.find(ch)\nif pos_ch == -1: \n print(\"\")\nprint(str[0:pos_ch])","repo_name":"sergeiboikov/Labs.Python","sub_path":"Strings/Get string after substring.py","file_name":"Get string after substring.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70789678187","text":"from skimage.color import rgb2gray\nimport numpy as np\n\nclass HOGDescriptor(object):\n\n\n def __init__(self, pxl_per_cell):\n\n self.pxl_per_cell = pxl_per_cell\n\n\n # racunanje vrednosti gradijenata za sve pixele, na osnovu gradijenata njihove intezitete i uglove\n # vraca tri dvodimenzionalne matrice:\n # gradients - svaki element predstavlja gradijent za pixel (dve vrednosti po x i y osi)\n # intensities - svaki element predstavlja intezitet vektora\n # angles - svaki element predstavlja ugao vektora\n def img_gradients(self, img):\n\n # strukture koje vracamo na kraju\n gradients = []\n intensities = []\n angles = []\n\n # iterisemo od drugog do predposlednjeg reda - prvi red nema susedni gornji, a poslednji red nema susedni donji\n for i in range(self.n_rows):\n\n gradients.append([])\n intensities.append([])\n angles.append([])\n\n # iterisemo od druge do predposlednje kolone, prva nema susednu levu, a poslednja susednu desnu kolonu\n for j in range(self.n_cols):\n # racunamo gradijent po x i y osi za odredjeni pixel\n grad = self.pxl_gradients(img, i, j)\n # racunamo intezitet vektora\n gradients[i].append(grad)\n intensity = np.sqrt(grad[0] ** 2 + grad[1] ** 2)\n intensities[i].append(intensity)\n # ako je gradijent po x = 0 to nam je problem kod racunanja ugla jer bi se delilo sa nulom\n # zato gradijent postavljamo na vrlo mali broj koji je razlicit od nula ali nece uticati mnogo na rez\n # racunamo ugao vektora\n angle = np.degrees(np.arctan2(grad[1], grad[0]+1e-15)) % 180\n angles[i].append(angle)\n\n return gradients, intensities, angles\n\n # racunanje gradijenta po x i y za svaki pixel vraca listu sa dva elementa\n def pxl_gradients(self, img, row, col):\n\n y = 0\n x = 0\n\n # ako je element iz prve kolone\n if col == 0:\n # onda gradijent po x racunamo: sledeci pixel - trenutni\n x = img[row][col + 1] - img[row][col]\n # ako je element iz poslednje kolone\n elif col == self.n_cols - 1:\n # onda gradijent po x racunamo: trenutni pixel - prosli\n x = img[row][col] - img[row][col - 1]\n # za sve ostale slucajeve\n else:\n # gradijent po x racunamo: (sledeci - prosli)/2\n x = (img[row][col + 1] - img[row][col - 1]) / 2\n\n # ako je element iz prvog reda\n if row == 0:\n # onda gradijent po y racunamo: sledeci pixel - trenutni\n y = img[row + 1][col] - img[row][col]\n # ako je element iz poslednjeg reda\n elif row == self.n_rows - 1:\n # onda gradijent po y racunamo: trenutni pixel - prosli\n y = img[row][col] - img[row - 1][col]\n # za sve ostale slucajeve\n else:\n # gradijent po y racunamo: (sledeci - prosli)/2\n y = (img[row + 1][col] - img[row - 1][col]) / 2\n\n\n return [x, y]\n\n # kreiranje histograma za sve celije\n def cell_histo(self, angles, intensities):\n\n histo = []\n # iteratori po redovima matrice uglova i inteziteta vektora\n row = 0\n # pomocna promenljiva za pristup elementu histograma\n i = 0\n # dok ne stignemo do poslednjeg reda u matrici uglova\n while row != len(angles):\n # dodati prazan red u matricu histogram\n histo.append([])\n # iteratori po kolonama matrice uglova i inteziteta vektora\n col = 0\n # dok ne stignemo do poslednje kolone u matrici uglova\n while col != len(angles[0]):\n\n # inicijalizovati prazan histogram za ovu celiju\n cell_histo = [0]*9\n\n # pristupamo elementima celije 8x8 pixela\n for r in range(row, row + self.pxl_per_cell[0]):\n for c in range(col, col + self.pxl_per_cell[1]):\n\n # ugao koji zelimo da dodajemo\n angle = angles[r][c]\n\n if angle < 180 and angle >= 0:\n # index bina kome dodajemo\n floor = int(angle // 20)\n cell_histo[floor] += intensities[r][c]\n\n for r in range(len(cell_histo)):\n cell_histo[r] = cell_histo[r] / 64\n\n # dodajemo histogram celije u rezultujuci histogram\n histo[i].append(cell_histo)\n col += self.pxl_per_cell[1]\n row += self.pxl_per_cell[0]\n i += 1\n\n return histo\n\n\n def block_create(self, histogram):\n\n blocks = []\n\n row = 0\n col = 0\n\n while row + 1 != len(histogram):\n col = 0\n while col + 1 != len(histogram[0]):\n\n block = histogram[row][col] + histogram[row][col + 1] + histogram[row + 1][col] + histogram[row + 1][col + 1]\n k = np.sum(np.abs(block)) + 1e-5\n for i in range(len(block)):\n block[i] = block[i] / k\n blocks += block\n col += 1\n row += 1\n return blocks\n\n\n def describe(self, img):\n\n img_gray = rgb2gray(img)\n\n self.n_rows = len(img_gray)\n self.n_cols = len(img_gray[0])\n\n gradients, intensities, angles = self.img_gradients(img_gray)\n\n histo = self.cell_histo(angles, intensities)\n\n blocks = self.block_create(histo)\n\n return blocks","repo_name":"C0mpy/Soft-Computing","sub_path":"HogDescriptor.py","file_name":"HogDescriptor.py","file_ext":"py","file_size_in_byte":5621,"program_lang":"python","lang":"sh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25308892723","text":"from typing import Optional\n\nimport pandas as pd\nimport numpy as np\nimport neurokit2 as nk\n\nfrom tpcp import Algorithm, Parameter, make_action_safe\n\n\nclass HeartBeatExtraction(Algorithm):\n \"\"\"finds R-peaks and segments ECG signal into heartbeats\"\"\"\n\n _action_methods = \"extract\"\n\n # input parameters\n variable_length: Parameter[bool]\n start_factor: Parameter[float]\n\n # result\n heartbeat_list_: pd.DataFrame\n\n def __init__(\n self,\n variable_length: Optional[bool] = True,\n start_factor: Optional[float] = 0.35\n ):\n \"\"\"initialize new HeartBeatExtraction algorithm instance\n\n Parameters\n ----------\n variable_length : bool\n defines, if extracted heartbeats should have variable length (depending on the current RR-interval) or\n fixed length (same length for all heartbeats, depending on mean heartrate of the complete signal, 35% of\n mean heartrate in seconds before R-peak and 50% after r_peak, see neurokit2 ecg_segments)\n for variable length heartbeats, start of next heartbeat follows directly after end of last (ends exclusive)\n for fixed length heartbeats, there might be spaces between heartbeat boarders, or they might overlap\n start_factor : float, optional\n only needed for variable_length heartbeats, factor between 0 and 1, which defines where the start boarder\n between heartbeats is set depending on the RR-interval to previous heartbeat, for example factor 0.35 means\n that beat start is set at 35% of current RR-distance before the R-peak of the beat\n \"\"\"\n\n self.variable_length = variable_length\n self.start_factor = start_factor\n\n @make_action_safe\n def extract(self, ecg_clean: pd.Series, sampling_rate_hz: int):\n \"\"\"segments ecg signal into heartbeats, extract start, end, r-peak of each heartbeat\n\n fills df containing all heartbeats, one row corresponds to one heartbeat;\n for each heartbeat, df contains: start datetime, sample index of start/end, and sample index of r-peak;\n index of df can be used as heartbeat id\n\n Args:\n ecg_clean : containing cleaned ecg signal as pd series with datetime index\n sampling_rate_hz : containing sampling rate of ecg signal in hz as int\n Returns:\n self: fills heartbeat_list_\n \"\"\"\n\n _, r_peaks = nk.ecg_peaks(ecg_clean, sampling_rate=sampling_rate_hz, method=\"neurokit\")\n r_peaks = r_peaks[\"ECG_R_Peaks\"]\n\n heartbeats = pd.DataFrame(index=np.arange(0, len(r_peaks)), columns=[\"start_time\",\n \"start_sample\",\n \"end_sample\",\n \"r_peak_sample\",\n \"rr_interval_samples\"])\n heartbeats[\"r_peak_sample\"] = r_peaks\n\n # save RR-interval to successive heartbeat\n rr_interval_to_next_beat = np.abs(heartbeats[\"r_peak_sample\"].diff(periods=-1))\n rr_interval_to_next_beat.iloc[-1] = rr_interval_to_next_beat.iloc[-2] # extrapolate last beat\n heartbeats[\"rr_interval_samples\"] = rr_interval_to_next_beat\n\n if self.variable_length:\n # split ecg signal into heartbeats with varying length\n\n rr_interval_samples = heartbeats[\"r_peak_sample\"].diff()\n\n # calculate start of each heartbeat based on corresponding R-peak and current RR-interval\n beat_starts = heartbeats[\"r_peak_sample\"] - self.start_factor * rr_interval_samples\n\n # extrapolate first beats start based on RR-interval of next beat\n first_beat_start = heartbeats[\"r_peak_sample\"].iloc[0] - self.start_factor * rr_interval_samples.iloc[1]\n if first_beat_start >= 0:\n beat_starts.iloc[0] = first_beat_start\n else:\n beat_starts = beat_starts.iloc[1:].reset_index(drop=True) # drop row, when heartbeat is incomplete\n heartbeats = heartbeats.iloc[1:].reset_index(drop=True)\n beat_starts = round(beat_starts).astype(int)\n heartbeats[\"start_sample\"] = beat_starts\n\n # calculate beat ends (last beat ends 1 sample before next starts, end is exclusive)\n beat_ends = beat_starts.shift(-1) # end is exclusive\n\n # extrapolate last beats end based on RR-interval of previous beat\n last_beat_end = round(\n heartbeats[\"r_peak_sample\"].iloc[-1] + (1 - self.start_factor) * rr_interval_samples.iloc[-1])\n if last_beat_end < len(ecg_clean):\n beat_ends.iloc[-1] = last_beat_end\n else:\n beat_ends = beat_ends.iloc[:-1] # drop row, when heart beat is incomplete\n heartbeats = heartbeats.iloc[:-1]\n beat_ends = beat_ends.astype(int)\n heartbeats[\"end_sample\"] = beat_ends\n\n # extract time of each beat's start\n beat_starts_time = ecg_clean.iloc[heartbeats[\"start_sample\"]].index\n heartbeats[\"start_time\"] = beat_starts_time\n\n else:\n # split ecg signal into heartbeats with fixed length\n\n heartbeat_segments = nk.ecg_segment(ecg_clean, rpeaks=r_peaks, sampling_rate=sampling_rate_hz, show=False)\n for segment_idx in heartbeat_segments.keys():\n # extract sample number of start, end, r peak, and datetime of start from current segment\n segment = heartbeat_segments[segment_idx].reset_index(drop=True)\n start = segment[\"Index\"].iloc[0]\n end = segment[\"Index\"].iloc[-1]\n start_time = ecg_clean.index[start]\n\n # fill the corresponding row of heartbeats for current segment\n # (idx-1 because segments keys start with 1, but heartbeats_list should start with 0)\n heartbeats[\"start_sample\"].iloc[int(segment_idx) - 1] = start\n heartbeats[\"end_sample\"].iloc[int(segment_idx) - 1] = end\n heartbeats[\"start_time\"].iloc[int(segment_idx) - 1] = start_time\n\n # ensures that index is Int64Index (not RangeIndex) because some neurokit functions won't work with RangeIndex\n heartbeats.index = list(heartbeats.index)\n heartbeats.index.name = \"heartbeat_id\"\n\n # check if R-peak occurs between corresponding start and end\n check = heartbeats.apply(lambda x: x[\"start_sample\"] < x[\"r_peak_sample\"] < x[\"end_sample\"],\n axis=1)\n if len(check.loc[~check]) > 0:\n raise ValueError(\n f\"Start/end/R-peak position of heartbeat {list(check.loc[check == False].index)} could be incorrect!\")\n\n self.heartbeat_list_ = heartbeats\n return self\n","repo_name":"empkins/empkins-micro","sub_path":"empkins_micro/feature_extraction/pep/algorithms/ecg/extraction_heartbeats.py","file_name":"extraction_heartbeats.py","file_ext":"py","file_size_in_byte":6989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9576059192","text":"## Script (Python) \"getEventDictDay\"\n##bind container=container\n##bind context=context\n##bind namespace=\n##bind script=script\n##bind subpath=traverse_subpath\n##parameters=event, type='C', periodID=None, cDate=None\n##title=Get dictionary of Event info for day view\n##\n\"\"\"\nreturns a dictionary of useful objects for Events for the Day view\n\nmodified for CalendarX-0.6.4 change jsStart to reflect \"earlier\" event\n highlighting changes.\nReleased under the GPL (see LICENSE.txt)\nevent types:\n 'C' for continuing event (above main calendar view)\n 'E' for normal event (in main calendar view)\n 'L' for later event (below main calendar view)\n\"\"\"\nrequest = container.REQUEST\n\n#get from Property Sheets (true or false right now)\nampm = context.getCXAttribute('hoursDisplay') == '12ampm'\ndayviewstarthour = int(context.getCXAttribute('dayViewStartHour'))\ndayviewendhour = int(context.getCXAttribute('dayViewEndHour'))\nuseHalfHours = context.getCXAttribute('useHalfHours')\n\n#get currentDate from passed params, not from request or default (bug in pre 0.6.3)\ncurrentDate = cDate\n\n#calculate these DateTimes and related useful objects\nstartDate = context.getStartOfDay(currentDate,dayviewstarthour)\nendDate = context.getEndOfDay(currentDate,dayviewendhour)\nnextDate = (currentDate + 1).Date()\nprevDate = (currentDate - 1).Date()\neventstart = DateTime(str(event.start))\neventend = DateTime(str(event.end))\n#MODDED FOR 0.6.1\nif useHalfHours:\n periodsFactor = 2\nelse:\n periodsFactor = 1\nperiodsInDay = (dayviewendhour - dayviewstarthour)*periodsFactor\nperiodsInView = periodsInDay - 1\n\n#display parameters common to ALL event types\neventurl = str(event.getURL())+'/view'\neventState = event.review_state\neventtitle = str(event.Title)\nif not eventtitle:\n eventtitle = 'untitled'\nportaltype = str(event.Type)\nsyear = str(eventstart.year())\nsmonth = eventstart.aMonth()\nsday = str(eventstart.day())\nstime = test(ampm, eventstart.AMPMMinutes(), eventstart.TimeMinutes() + ' h')\neyear = str(eventend.year())\nemonth = eventend.aMonth()\neday = str(eventend.day())\netime = test(ampm, eventend.AMPMMinutes(), eventend.TimeMinutes() + ' h')\n\n\n#calculate jsStart and jsEnd: integer range of cells in view for hightlighting\nif type == 'C': #FOR CONTINUING EVENTS\n howManyPeriods = context.getNumOfPeriods(startDate, eventend, dayviewstarthour, dayviewendhour)\n #0.6.4 change to highlight cEvents cell if an early event (not for actual continuing events)\n# jsStart = dayviewstarthour*periodsFactor + 1 #ONLY highlights the hours below, not the cEvents cell\n jsStart = test(eventend <= startDate,dayviewstarthour*periodsFactor,dayviewstarthour*periodsFactor + 1)\n\n jsEnd = jsStart + howManyPeriods\n jsEndIfAllDay = jsStart + periodsInView\n #the following test() is for events ending ON THE PERIOD, so they don't spill over into the next period upon rollover.\n if useHalfHours:\n jsEndIfNotAllDay = test(eventend.minute() in [0,30], jsEnd-1, jsEnd)\n else:\n jsEndIfNotAllDay = test(eventend.minute() in [0], jsEnd-1, jsEnd)\n jsEnd = test(jsEndIfNotAllDay < jsEndIfAllDay, jsEndIfNotAllDay, jsEndIfAllDay)\n #test() just to make sure End is not prior to Start; an old test, not sure it is still needed.\n jsEnd = test(jsEnd < jsStart, jsStart, jsEnd)\n #generate the eventstring\n eventstring = ''+eventtitle+' (start: '+stime+' - '+smonth+' '+sday+', '+syear+' | end: '+etime+' - '+emonth+' '+eday+', '+eyear+')'\n\n\n#calculate jsStart and jsEnd: integer range of cells in view for hightlighting\nif type == 'E': #FOR EVENTS IN THE REGULAR DAY by hour TABLE\n howManyPeriods = context.getNumOfPeriods(eventstart, eventend, dayviewstarthour, dayviewendhour)\n jsStart = periodID\n jsEnd = jsStart + howManyPeriods\n jsEndIfAllDay = dayviewendhour*periodsFactor\n #the following test() is for events ending ON THE PERIOD, so they don't spill over into the next period upon rollover.\n if useHalfHours:\n jsEndIfNotAllDay = test(eventend.minute() in [0,30], jsEnd-1, jsEnd)\n else:\n jsEndIfNotAllDay = test(eventend.minute() in [0], jsEnd-1, jsEnd)\n jsEnd = test(jsEndIfNotAllDay < jsEndIfAllDay, jsEndIfNotAllDay, jsEndIfAllDay)\n #test() to make sure End is not prior to Start\n jsEnd = test(jsEnd < jsStart, jsStart, jsEnd)\n #generate the eventstring\n eventstring = ''+eventtitle+' (start: '+stime+' - '+smonth+' '+sday+', '+syear+' | end: '+etime+' - '+emonth+' '+eday+', '+eyear+')'\n\n\n\n#calculate jsStart and jsEnd: integer range of cells in view for hightlighting\nif type == 'L': #FOR LATER EVENTS\n jsStart = dayviewendhour*periodsFactor + 1\n jsEnd = jsStart\n #generate the eventstring\n eventstring = ''+eventtitle+' (start: '+stime+' - '+smonth+' '+sday+', '+syear+' | end: '+etime+' - '+emonth+' '+eday+', '+eyear+')'\n\n\n\n\n#marshall all these into a dictionary\neddict = {\n 'eventurl':eventurl,\n 'eventstart':eventstart,\n 'eventend':eventend,\n 'jsStart':jsStart,\n 'jsEnd':jsEnd,\n 'eventurl':eventurl,\n 'eventState':eventState,\n 'eventtitle':eventtitle,\n 'portaltype':portaltype,\n 'syear':syear,\n 'smonth':smonth,\n 'sday':sday,\n 'stime':stime,\n 'eyear':eyear,\n 'emonth':emonth,\n 'eday':eday,\n 'etime':etime,\n 'eventstring':eventstring,\n }\n\nreturn eddict\n\n","repo_name":"collective/Products.CalendarX","sub_path":"Products/CalendarX/skins/CalendarX/getEventDictDay.py","file_name":"getEventDictDay.py","file_ext":"py","file_size_in_byte":5439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30989841915","text":"\"\"\"\nThe config for this file is specified via cmd when running the scripts.\nConfig files are stored in the config folder.\nwrtie to console: python predict.py --config path_to_your_config\n\"\"\"\nimport numpy as np\nimport tensorflow as tf\n\nimport os\nfrom tqdm import tqdm\n\nfrom src.utils import parse_config\nfrom src.models.unet import Unet\n\n@parse_config\ndef main(config) -> None:\n data_path = config['sample_path']\n n_preds = config['n_preds']\n seg_path = config[\"seg_path\"]\n \n if not os.path.isdir(seg_path):\n os.mkdir(seg_path)\n\n unet = Unet()\n unet.load_model(config['model_graph'], config['model_weights'])\n validation = sorted(os.listdir(data_path))\n\n\n iter = 0\n\n\n for sample in tqdm(validation):\n x = np.load(os.path.join(data_path,sample))[np.newaxis, ...]\n pred = unet.model.predict(x)\n pred=tf.argmax(pred, axis=-1)\n pred = np.squeeze(pred)\n np.save(os.path.join(seg_path, sample.replace(\"sample\", \"pred\")), pred.astype(np.int8))\n\n del pred\n iter += 1\n if iter == n_preds:\n break\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"gitpanekj/3D_Unet","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"22769850300","text":"arquivo = open(\"arquivo.txt\", mode=\"w\") # ao abrir um arquivo para escrita,\n# um novo arquivo é criado mesmo queele já exista, sobrescrevendo o antigo.\n\n# Para escrevermos um conteúdo em um arquivo utilizamos a função write\n\n# file = open(\"arquivo.txt\", mode=\"w\")\n\narquivo.write(\"nome idade\\n\")\narquivo.write(\"Maria 45\\n\")\narquivo.write(\"Miguel 33\\n\")\n\n# também podemos escrever em um arquivo através do print.\n\n# file.write(\"Miguel 33\\n\")\n\n# Não precisa da quebra de linha, pois esse é um comportamento padrão do print\nprint(\"Amaterasu 33\", file=arquivo)\n\n# Para escrever múltiplas linhas podemos utilizar a função writelines. Repare\n# que a função espera que cada linha tenha o próprio caractere de separação(\\n)\n\n#\n# print(\"Túlio 22\", file=file)\n\n\nLINES = [\"Alberto 35\\n\", \"Betina 22\\n\", \"João 42\\n\", \"Victor 19\\n\"]\narquivo.writelines(LINES)\n\n# Abrimos o arquivo e escrevemos seu conteúdo. Vamos então fechá-lo:\n\n# file.writelines(LINES)\n\narquivo.close()\n","repo_name":"Marcio-Gabriel-Roque-Mendes/Trybe_exercise","sub_path":"Exercicios/4 - Ciencia da Computacao/bloco-31-introducao-a-python/dia-2-entrada-e-saida-de-dados/conteudo-2-manipulacao-de-arquivos/1_open_e_close.py","file_name":"1_open_e_close.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9572121092","text":"from hpc.autoscale.ccbindings.mock import MockClusterBinding\nfrom hpc.autoscale.job.job import Job\nfrom hpc.autoscale.job.schedulernode import SchedulerNode\nfrom hpc.autoscale.node.nodemanager import new_node_manager\n\n\ndef setup_module() -> None:\n SchedulerNode.ignore_hostnames = True\n\n\ndef test_placement_group() -> None:\n node = SchedulerNode(\"\", {})\n node.exists = False\n\n node.placement_group = \"\"\n assert node.placement_group is None\n\n node.placement_group = \"a\"\n assert node.placement_group == \"a\"\n\n node.placement_group = \"0\"\n assert node.placement_group == \"0\"\n try:\n node.placement_group = \".\"\n except Exception:\n pass\n\n assert node.placement_group == \"0\"\n node.set_placement_group_escaped(\".\")\n assert node.placement_group == \"_\"\n\n node.exists = True\n try:\n node.placement_group = \"123\"\n except Exception:\n assert node.placement_group == \"_\"\n\n\ndef test_custom_node_attrs_and_node_config() -> None:\n b = MockClusterBinding()\n b.add_nodearray(\"htc\", {}, software_configuration={\"myscheduler\": {\"A\": 1}})\n b.add_bucket(\"htc\", \"Standard_F2\", 10, 10)\n b.add_node(\"htc-1\", \"htc\")\n node_mgr = new_node_manager({\"_mock_bindings\": b})\n (existing_node,) = node_mgr.get_nodes()\n\n try:\n existing_node.node_attribute_overrides[\"willfail\"] = 123\n assert False\n except TypeError:\n pass\n\n result = node_mgr.allocate({\"exclusive\": True}, node_count=2)\n assert result\n (node,) = [n for n in result.nodes if not n.exists]\n\n assert node.software_configuration.get(\"test_thing\") is None\n node.node_attribute_overrides[\"Configuration\"] = {\"test_thing\": \"is set\"}\n assert node.software_configuration.get(\"test_thing\") == \"is set\"\n try:\n node.software_configuration[\"willfail\"] = 123\n assert not node.software_configuration.get(\"willfail\")\n except TypeError:\n pass\n\n # we won't handle dict merges here.\n assert node.software_configuration.get(\"myscheduler\") == {\"A\": 1}\n\n node.node_attribute_overrides[\"Configuration\"] = {\"myscheduler\": {\"B\": 2}}\n assert node.software_configuration.get(\"myscheduler\") == {\"B\": 2}\n\n # if you want to add to the existing software_configuration, use\n # the node.software_configuration\n node.node_attribute_overrides[\"Configuration\"][\n \"myscsheduler\"\n ] = node.software_configuration.get(\"myscheduler\", {})\n node.node_attribute_overrides[\"Configuration\"][\"myscheduler\"][\"B\"] = 2\n\n node.node_attribute_overrides[\"Configuration\"] = {\"myscheduler\": {\"A\": 1, \"B\": 2}}\n\n node.software_configuration[\"willsucceed\"] = 123\n node.exists = True\n try:\n node.software_configuration[\"willfail\"] = 123\n assert False\n except TypeError:\n pass\n\n\ndef test_clone() -> None:\n orig = SchedulerNode(\"lnx0\", {\"ncpus\": 4})\n orig.metadata[\"exists_in_both\"] = True\n new = orig.clone()\n assert new.available[\"ncpus\"] == 4\n assert new.resources[\"ncpus\"] == 4\n new.available[\"ncpus\"] -= 1\n assert new.available[\"ncpus\"] == 3\n assert orig.available[\"ncpus\"] == 4\n\n job = Job(\"1\", {\"ncpus\": 2})\n new.decrement(job._constraints, assignment_id=job.name)\n assert new.available[\"ncpus\"] == 1\n assert orig.available[\"ncpus\"] == 4\n assert new.assignments == set([\"1\"])\n assert orig.assignments == set()\n\n orig.metadata[\"exists_in_orig\"] = True\n new.metadata[\"exists_in_new\"] = True\n\n assert orig.metadata[\"exists_in_both\"] is True\n assert \"exists_in_new\" not in orig.metadata\n assert orig.metadata[\"exists_in_orig\"] is True\n\n assert new.metadata[\"exists_in_both\"] is True\n assert new.metadata[\"exists_in_new\"] is True\n assert \"exists_in_orig\" not in new.metadata\n","repo_name":"Azure/cyclecloud-scalelib","sub_path":"test/hpc/node/node_test.py","file_name":"node_test.py","file_ext":"py","file_size_in_byte":3748,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"69822608045","text":"import sys\nfrom base_django import DjangoGitBuild\nfrom pony_barn import client as pony\n\nclass PonyBuild(DjangoGitBuild):\n def __init__(self):\n super(PonyBuild, self).__init__()\n self.repo_url = \"git://github.com/ericholscher/django_inspect\"\n self.name = \"django_inspect\"\n self.package_name = 'django_inspect'\n self.installed_apps = ['django_inspect']\n\nif __name__ == '__main__':\n build = PonyBuild()\n sys.exit(build.execute(sys.argv))\n","repo_name":"ericholscher/pony_barn","sub_path":"pony_barn/build-inspect.py","file_name":"build-inspect.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"19"} +{"seq_id":"10707986614","text":"from xmlrpc.client import ResponseError\nimport requests\nimport os\nimport pandas as pd\n\nfrom reading_dashboard.postgres import postgres_to_pandas, append_to_postgres\n\nGOOGLE_BOOKS_URL = \"https://www.googleapis.com/books/v1/\"\nGOOGLE_API_KEY = os.environ[\"GOOGLE_API_KEY\"]\n\n\nclass GoogleBooks:\n def __init__(self):\n self.postgres = GoogleBooksPostgres()\n self.api = GoogleBooksApi()\n\n def get(self, books: list[dict]) -> pd.DataFrame:\n \"\"\"\n Get book information from cached data if possible, and the Google Books API for\n new books not found in the cached dataset.\n\n If new books are queried, their info is appended to the data cache.\n\n Parameters\n ----------\n books : list[dict]\n List of books to query, where each book to query is a dict with keys for the\n title and author\n\n Returns\n -------\n book_info : pd.DataFrame\n Google Books information for the input books list\n \"\"\"\n pass\n\n def _get_cached(self, books: list[dict]) -> (pd.DataFrame, list[dict]):\n \"\"\"\n Pull cached book info for any books in the list that we've seen before.\n\n Parameters\n ----------\n books : list[dict]\n List of books to query, where each book to query is a dict with keys for the\n title and author\n\n Returns\n -------\n cached_book_info : pd.DataFrame\n Matching books from the input list that were found in the cached database\n books_not_found : list[dict]\n Books from the input list that were not found in the cached database\n \"\"\"\n # get cached books\n df_cached = self.postgres.get_cached_books()\n\n # limit to requested books, matching on title\n cached_book_info = df_cached[\n df_cached[\"title\"].isin([x[\"title\"] for x in books])\n ]\n\n # check the books that weren't found in the cache\n books_not_found = [\n book for book in books if book[\"title\"] not in cached_book_info[\"title\"]\n ]\n\n return cached_book_info, books_not_found\n\n def _get_new(self, books: list[dict]) -> pd.DataFrame:\n \"\"\"\n For new books not found in the cache, get their info from the Google Books API\n and add the book's information to the cached database.\n\n Parameters\n ----------\n books : list[dict]\n List of books to query, where each book to query is a dict with keys for the\n title and author\n\n Returns\n -------\n queried_book_info : pd.DataFrame\n Info for the books from the input list returned by the Google Books API\n \"\"\"\n queried_book_info = self.api.query_multiple_books(books)\n append_to_postgres(queried_book_info, \"google_books\")\n return queried_book_info\n\n\nclass GoogleBooksPostgres:\n def get_cached_books() -> pd.DataFrame:\n return postgres_to_pandas(\"google_books\")\n\n def cache_book(row):\n append_to_postgres(row, \"google_books\")\n\n\nclass GoogleBooksApi:\n def query_single_book(self, title: str, author: str) -> dict:\n query_root = f\"{GOOGLE_BOOKS_URL}volumes?q=\"\n query = f\"{query_root}intitle:{title}+inauthor:{author}&maxResults=1&key={GOOGLE_API_KEY}\"\n try:\n response = requests.get(query)\n assert response.status_code == 200\n response_dict = response.json()\n except:\n if response.status_code == 429:\n raise RuntimeError(\"API said we exceeded hits for today :(\")\n # TODO what is the specific type of error i should raise here?\n # TODO print more descriptive error message\n # TODO maybe give 'none' values for all attributes in this case, so that the\n # whole program doesn't break?\n raise ResponseError\n\n if not (response_dict.get(\"totalItems\", 0) > 0):\n # raise Warning(f\"Bad response for totalItems\")\n print(\"totalItems too small\")\n return {}\n\n if len(response_dict.get(\"items\", [])) == 0:\n # raise Warning(f\"Bad response, items empty\")\n print(\"items is empty\")\n return {}\n\n # assume that google searched well, and first match is the best match\n item = response_dict[\"items\"][0]\n # volumeInfo contains the most interesting info that we care about\n item_info = item[\"volumeInfo\"]\n\n return item_info\n\n def query_multiple_books(\n self,\n df_to_query: pd.DataFrame,\n title_col: str = \"title\",\n author_col: str = \"author\",\n ) -> pd.DataFrame:\n \"\"\"\n Inputs\n ------\n df_to_query: pd.DataFrame\n Dataframe dictating the titles to be queried. Expected to contain a column with\n book titles, and a column with book authors. Extra columns are ignored.\n title_col: str\n Name of the column in df_input containing book titles.\n author_col: str\n Name of the column in df_input containing book authors.\n\n Returns\n -------\n df_plus_googlebooks: pd.DataFrame\n Copy of the input Dataframe `df_to_query`, with additional columns containing\n information returned from Google Books queries.\n \"\"\"\n # TODO make author optional?\n\n assert title_col in df_to_query.columns\n assert author_col in df_to_query.columns\n\n # df_plus_googlebooks = df_to_query.copy()\n # df_googlebooks = pd.DataFrame()\n googlebooks_responses = []\n for idx, row in df_to_query.iterrows():\n # print(row[title_col])\n googlebook_info = self.query_single_book(row[title_col], row[author_col])\n googlebooks_responses += [googlebook_info]\n # add columns to df if any new fields\n # new_cols = [x for x in googlebook_info.keys() if x not in list(df_googlebooks.columns)]\n # if new_cols and list(df_googlebooks.columns):\n # df_googlebooks = df_googlebooks.reindex(columns = list(df_googlebooks.columns) + new_cols)\n # elif new_cols:\n # df_googlebooks = df_googlebooks.reindex(columns = new_cols)\n\n # df_googlebooks = df_googlebooks.append(googlebook_info, ignore_index=True)\n # print(df_googlebooks.head())\n # df_googlebooks\n df_googlebooks = pd.DataFrame.from_dict(googlebooks_responses).add_prefix(\n \"googlebooks_\"\n )\n return pd.concat([df_to_query, df_googlebooks], axis=1)\n # series_info.rename(\"googlebooks_{}\".format, inplace=True).add_\n\n def __book_response_to_pandas(self, response_dict: dict) -> pd.Series:\n # TODO handle the case where response_dict doesn't return any matches\n # TODO move this validation to separate function?\n # check that the response matches our expectations\n # assert response_dict[\"kind\"] == \"books#volumes\"\n if not (response_dict.get(\"totalItems\", 0) > 0):\n # raise Warning(f\"Bad response for totalItems\")\n return pd.Series([], dtype=object)\n\n if len(response_dict.get(\"items\", [])) == 0:\n # raise Warning(f\"Bad response, items empty\")\n return pd.Series([], dtype=object)\n\n # assume that google searched well, and first match is the best match\n item = response_dict[\"items\"][0]\n # volumeInfo contains the most interesting info that we care about\n item_info = item[\"volumeInfo\"]\n\n # TODO check that title / author are \"close enough\" match\n\n series_info = pd.Series(item_info)\n\n # prefix each index with `googlebooks` for uniqueness against other data sources\n # which contain the same information\n series_info.rename(\"googlebooks_{}\".format, inplace=True)\n # print(series_info)\n return series_info\n\n\nif __name__ == \"__main__\":\n df = pd.DataFrame(\n {\n \"title\": [\"To Kill a Mockingbird\", \"Where the Red Fern Grows\"],\n \"author\": [\"Harper Lee\", \"Wilson Rawls\"],\n }\n )\n df_ = GoogleBooksApi().query_multiple_books(df)\n print(df_.head())\n","repo_name":"kaylarobinson077/reading_dashboard","sub_path":"google_books.py","file_name":"google_books.py","file_ext":"py","file_size_in_byte":8213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31863174096","text":"from rest_framework import serializers\nfrom set_feedback.models import Set_feedback\n\nclass Set_feedbackSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Set_feedback\n fields = ('pk','value')\n #write_only_fields = ('firstame', 'lastname')\n\n def create(self, validated_data):\n \"\"\"\n Create and return a new `Snippet` instance, given the validated data.\n \"\"\"\n \n objects=Set_feedback.objects.create(value=validated_data.get('value'))\n return objects\n\n\n ","repo_name":"poojapauskar/feedback-api","sub_path":"set_feedback/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70671877165","text":"#%%\nfrom PIL import Image\nimport glob\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport cv2\n\n#%%\n_img_dir_ = '../data'\n_file_type_ = 'JPG'\n\nnum_photos = 7\nref_idx = 3 # int(len(image_c1_list) / 2)\n\n#%%\n####################################\n# Load images\n####################################\n\n#%%\nimage_RGB_list = []\nimage_YCbCr_list = []\nfor i, filename in enumerate(sorted(glob.glob(_img_dir_+'/original/*.'+_file_type_))): #assuming gif\n if i == num_photos:\n break\n print(f'image {i}: {filename}')\n im = Image.open(filename)\n image_RGB_list.append(im.convert('RGB'))\n image_YCbCr_list.append(im.convert('YCbCr'))\n\n# np.array(image_RGB_list[ref_idx].getchannel('G'))\n\n#%%\n# Resize image\ndef resizeImage(image_list=None, set_height=512):\n if image_list is None:\n return image_list\n width, height = image_list[0].size\n if set_height < height:\n new_height = set_height\n new_width = int(width * (set_height / height))\n new_image_list = []\n for img in image_list:\n new_image_list.append(img.resize((new_width, new_height), Image.ANTIALIAS))\n\n return new_width, new_height, new_image_list\n else:\n return width, height, image_list\n\n#%%\n# height, width, _ = np.array(image_RGB_list[0]).shape # no resize\nwidth, height, image_RGB_list = resizeImage(image_list=image_RGB_list, set_height=1024)\nwidth, height, image_YCbCr_list = resizeImage(image_list=image_YCbCr_list, set_height=1024)\n\n# image_RGB_list[ref_idx].show()\n\n#%%\n# Show images\nfig = plt.figure(figsize=(56, 18), dpi=100)\nfor i in range(num_photos):\n sub_fig = fig.add_subplot(2, int(num_photos / 2) + 1, i + 1)\n plt.imshow(image_RGB_list[i])\n plt.axis('off')\n\npath_row_photos = f'{_img_dir_}/combine_photos.jpg'\nfig.savefig(path_row_photos)\nprint(f'Combined photos saved at {path_row_photos}')\n\n#%%\n####################################\n# MTB Alignment\n####################################\n\n#%%\ndef imToArr(im=None):\n im2arr = np.array(im) # im2arr.shape: height x width x channel\n # print(im2arr.shape)\n im2arr = np.transpose(im2arr, (2, 0, 1)) # im2arr.shape: channel x height x width\n return im2arr\n\ndef getEachChannelArr(image_list=None):\n if image_list is None:\n return image_list\n H, W, C = np.array(image_list[0]).shape\n print(f'Channel: {C}, Height: {H}, Weight: {W}')\n c1, c2, c3 = [], [], []\n for im in image_list:\n im2arr = imToArr(im)\n c1.append(im2arr[0])\n c2.append(im2arr[1])\n c3.append(im2arr[2])\n return c1, c2, c3\n\ndef intensityToBitmaps(intensity=None, exclusion_tolerance=4):\n arr = np.array(intensity)\n median_value = int(np.median(arr))\n intensity_bitmap = np.where(arr > median_value, 1, 0)\n exclusion_bitmap = np.where((arr <= median_value + exclusion_tolerance) & (arr >= median_value - exclusion_tolerance), 0, 1)\n return intensity_bitmap, exclusion_bitmap\n\ndef intensitiesToBitmaps(intensity_list=None, exclusion_tolerance=4):\n intensity_bitmap_list = []\n exclusion_bitmap_list = []\n for arr in intensity_list:\n intensity_bitmap, exclusion_bitmap = intensityToBitmaps(arr)\n intensity_bitmap_list.append(intensity_bitmap)\n exclusion_bitmap_list.append(exclusion_bitmap)\n return intensity_bitmap_list, exclusion_bitmap_list\n\ndef bitmapToImage(bitmap=None, multiplier=255):\n return Image.fromarray(np.uint8(bitmap * multiplier) , 'L')\n\ndef shiftArr(X, dx, dy):\n origin_X = X\n X = np.roll(X, dy, axis=0)\n X = np.roll(X, dx, axis=1)\n if dy>0:\n X[:dy, :] = 0\n elif dy<0:\n X[dy:, :] = 0\n if dx>0:\n X[:, :dx] = 0\n elif dx<0:\n X[:, dx:] = 0\n return X\n\n#%%\nimage_R_list, image_G_list, image_B_list = getEachChannelArr(image_list=image_RGB_list)\nimage_Y_list, image_Cb_list, image_Cr_list = getEachChannelArr(image_list=image_YCbCr_list)\n\n#%%\n# intensity_bitmaps, exclusion_bitmaps = intensitiesToBitmaps(image_Y_list)\n# intensity_bitmap_1 = intensity_bitmaps[ref_idx]\n# exclusion_bitmap_1 = exclusion_bitmaps[ref_idx]\n# bitmapToImage(intensity_bitmap_1, multiplier=255).show()\n# bitmapToImage(exclusion_bitmap_1, multiplier=255).show()\n\n#%%\n# img_1 = image_Y_list[0]\n# img_1 = Image.fromarray(img_1, 'L')\n# img_1.resize((int(img_1.size[0] / 2), int(img_1.size[1] / 2))).show()\n\n#%%\n# Calculate translation\ndef align(ref_img, img, base_dx=0, base_dy=0):\n ref_i_bm, ref_e_bm = intensityToBitmaps(ref_img)\n img_i_bm, img_e_bm = intensityToBitmaps(img)\n min_diff = (ref_i_bm != img_i_bm).sum()\n dx, dy = base_dx, base_dy\n for i in range(-1, 2):\n for j in range(-1, 2):\n diff = shiftArr(img_i_bm, base_dx + i, base_dy + j) != ref_i_bm\n exclude = shiftArr(img_e_bm, base_dx + i, base_dy + j) & ref_e_bm\n diff = (diff & exclude).sum()\n # print(f\"({i}, {j}), diff = {diff}\")\n if min_diff > diff:\n min_diff = diff\n dx = base_dx + i\n dy = base_dy + j\n return dx, dy\n\ndef MTB(ref_img=None, img=None):\n dx, dy = 0, 0\n for i in range(5, -1, -1):\n r_width, r_height = int(width / pow(2, i)), int(height / pow(2, i))\n ref_img = ref_img.resize((r_width, r_height))\n img = img.resize((r_width, r_height))\n dx, dy = align(ref_img, img, 2 * dx, 2 * dy)\n # print(f\"dx = {dx}, dy = {dy}\")\n return dx, dy\n\ntranslation = []\n\nref_intensity = image_Y_list[ref_idx]\nref_img = Image.fromarray(ref_intensity, 'L')\nfor i in range(len(image_Y_list)):\n intensity = image_Y_list[i]\n img = Image.fromarray(intensity, 'L')\n dx, dy = MTB(ref_img=ref_img, img=img)\n translation.append((dx, dy))\nprint(f'Translation: {translation}')\n\n#%%\n# Shift each channel by translation\naligned_channels = []\nfor image_list in [image_R_list, image_G_list, image_B_list, image_Y_list, image_Cb_list, image_Cr_list]:\n aligned_list = []\n for i in range(len(translation)):\n dx = translation[i][0]\n dy = translation[i][1]\n # print(f'Image {i} shift right {dx} pixels, shift down {dy} pixels')\n aligned = shiftArr(image_list[i], dx, dy)\n aligned_list.append(aligned)\n aligned_channels.append(aligned_list)\n\naligned_RGB_list = aligned_channels[0:3]\naligned_YCbCr_list = aligned_channels[3:6]\naligned_R_list, aligned_G_list, aligned_B_list = aligned_RGB_list[0], aligned_RGB_list[1], aligned_RGB_list[2]\naligned_Y_list, aligned_Cb_list, aligned_Cr_list = aligned_YCbCr_list[0], aligned_YCbCr_list[1], aligned_YCbCr_list[2]\n\n#%%\n####################################\n# Recover response curves\n####################################\n\n#%%\nrandom.seed(1121326)\nsample_points = []\nnum_samples = 1000\nfor i in range(num_samples):\n sample_h = random.randint(32, height - 32 - 1)\n sample_w = random.randint(32, width - 32 - 1)\n sample_points.append((sample_h, sample_w))\n\n#%%\nlog_shutter_time = [-i for i in range(20)]\n\n#%%\n# Recover response curve for each channel\ng_RGB = []\nfor aligned_list in aligned_RGB_list:\n A = []\n b = []\n for i in range(num_samples):\n for j in range(num_photos):\n row = [0 for _ in range(256 + num_samples)]\n z_ij = aligned_list[j][sample_points[i][0]][sample_points[i][1]]\n row[z_ij] = 1\n row[256 + i] = -1\n A.append(row)\n b.append(log_shutter_time[j])\n\n \n row = [0 for _ in range(256 + num_samples)]\n row[127] = 1 # assume g(127) = 0\n A.append(row)\n b.append(0)\n\n for i in range(254):\n row = [0 for _ in range(256 + num_samples)]\n row[i] = 1\n row[i + 1] = -2\n row[i + 2] = 1\n A.append(row)\n b.append(0)\n\n A = np.array(A)\n b = np.array(b)\n\n x, residuals, rank, s = np.linalg.lstsq(A, b, rcond=None)\n g = x[:256]\n g_RGB.append(g)\n\n# g_R, g_G, g_B = g_RGB[0], g_RGB[1], g_RGB[2]\n\n#%%\nplt.figure(figsize=(16, 9), dpi=100)\ny = [i for i in range(256)]\n\nplt.subplot(2, 2, 1)\nplt.plot(g_RGB[0], y, 'r')\nplt.title('Red')\nplt.xlabel('log exposure X')\nplt.ylabel('pixel value Z')\nplt.xticks(range(-5, 5, 1))\nplt.yticks(range(0, 256, 50))\n\nplt.subplot(2, 2, 2)\nplt.plot(g_RGB[1], y, 'g')\nplt.title('Green')\nplt.xlabel('log exposure X')\nplt.ylabel('pixel value Z')\nplt.xticks(range(-5, 5, 1))\nplt.yticks(range(0, 256, 50))\n\nplt.subplot(2, 2, 3)\nplt.plot(g_RGB[2], y, 'b')\nplt.title('Blue')\nplt.xlabel('log exposure X')\nplt.ylabel('pixel value Z')\nplt.xticks(range(-5, 5, 1))\nplt.yticks(range(0, 256, 50))\n\nplt.subplot(2, 2, 4)\nplt.plot(g_RGB[0], y, 'r', g_RGB[1], y, 'g', g_RGB[2], y, 'b')\nplt.title('Red, Green, and Blue curves')\nplt.xlabel('log exposure X')\nplt.ylabel('pixel value Z')\nplt.xticks(range(-5, 5, 1))\nplt.yticks(range(0, 256, 50))\n\npath_response_curves = f'{_img_dir_}/response_curves.jpg'\nplt.savefig(path_response_curves)\nprint(f'Response curves saved at {path_response_curves}')\n\n#%%\n####################################\n# Construct HDR radiance map\n####################################\n\n#%%\ndef weightFn(z):\n if z <= 127:\n return max(z, 1e-5)\n else:\n return max(255 - z, 1e-5)\n\nE_RGB = []\nfor channel in range(3):\n E = np.zeros((height, width))\n g = g_RGB[channel]\n for i in range(height):\n for j in range(width):\n weighted_log_E = 0\n weight = 0\n for p in range(num_photos):\n Z_pixel = aligned_RGB_list[channel][p][i][j]\n weighted_log_E += weightFn(Z_pixel) * (g[Z_pixel] - log_shutter_time[p])\n weight += weightFn(Z_pixel)\n E[i][j] = pow(2, weighted_log_E / weight)\n E_RGB.append(E)\n\nR_w, G_w, B_w = E_RGB[0], E_RGB[1], E_RGB[2]\nHDR_image = np.dstack((R_w, G_w, B_w))\nprint(f'HDR_image.shape = {HDR_image.shape}')\n\n#%%\n# Show recover result (just for visualization, not real HDR)\nplt.figure(figsize=(16, 9), dpi=100)\nim = plt.imshow(G_w, cmap='jet', vmin=np.percentile(G_w, 3), vmax=np.percentile(G_w, 97))\ncbar = plt.colorbar(im)\n\npath_radiance_visualize = f'{_img_dir_}/radiance_visualize.jpg'\nplt.savefig(path_radiance_visualize)\nprint(f'Radiance visualization saved at {path_radiance_visualize}')\n\n#%%\n# Save HDR\npath_hdr = f'{_img_dir_}/HDR_image.hdr'\ncv2.imwrite(path_hdr, HDR_image.astype(np.float32))\nprint(f'HDR image saved at {path_hdr}')\n\n#%%\n####################################\n# Tone mapping\n####################################\n\n#%%\ndef saveTonemapPhotographicGlobal(path, HDR_image, key=0.5, delta=1e-6): # low key = 0.18, high key = 0.5\n L_w = HDR_image\n L_w_bar = pow(2, np.log2(L_w + delta).mean())\n L_m = L_w * key / L_w_bar\n L_white = np.max(L_m)\n L_d = (L_m * (1 + (L_m / pow(L_white, 2)))) / (1 + L_m)\n\n LDR_image = np.clip(np.array(L_d * 255), 0, 255).astype(np.uint8)\n cv2.imwrite(path, LDR_image)\n return LDR_image\n\ndef Lm2Lblur(L_m, s_max=43, phi=8, key=0.5, epsilon=0.05):\n L_blur = np.zeros(L_m.shape)\n s_list = [i for i in range(1, s_max + 1, 2)]\n num_s = len(s_list)\n for channel in range(3):\n L_m_onechannel = L_m[:,:,channel]\n L_s_list = np.zeros(L_m_onechannel.shape + (num_s,))\n V_s_list = np.zeros(L_m_onechannel.shape + (num_s,))\n for i in range(num_s):\n s = s_list[i]\n L_s_list[:,:,i] = cv2.GaussianBlur(L_m_onechannel,(s,s),0)\n V_s_list[:,:,i] = np.absolute((L_s_list[:,:,i] - L_s_list[:,:,max(i-1, 0)]) / (pow(2, phi) * key / pow(s, 2) + L_s_list[:,:,max(i-1, 0)]))\n \n L_s_max = np.argmax(V_s_list > epsilon, axis=2) - 1\n L_s_max[L_s_max < 0] = 0\n\n for i in range(L_m.shape[0]):\n for j in range(L_m.shape[1]):\n L_blur[i, j, channel] = L_s_list[i, j, L_s_max[i, j]]\n\n return L_blur\n\ndef saveTonemapPhotographicLocal(path, HDR_image, key=0.5, delta=1e-6, s_max=43, phi=8, epsilon=0.05):\n L_w = HDR_image\n L_w_bar = pow(2, np.log2(L_w + delta).mean())\n L_m = L_w * key / L_w_bar\n L_blur = Lm2Lblur(L_m, s_max, phi, key, epsilon)\n L_d = L_m / (1 + L_blur)\n \n LDR_image = np.clip(np.array(L_d * 255), 0, 255).astype(np.uint8)\n cv2.imwrite(path, LDR_image)\n return LDR_image\n\npath_global_map = f'{_img_dir_}/tonemap_photographic_global.png'\npath_local_map = f'{_img_dir_}/tonemap_photographic_local.png'\nLDR_phtographi_global = saveTonemapPhotographicGlobal(path_global_map, HDR_image)\nLDR_phtographi_local = saveTonemapPhotographicLocal(path_local_map, HDR_image)\nprint(f'Tone mapped images saved at {path_global_map} and {path_local_map}')\n\n#%%\ncv2.imwrite('../result.png', LDR_phtographi_local)\nprint(f'Final result saved at ../result.png')\n\n#%%\n","repo_name":"ChiaYi-LIN/2022VFX","sub_path":"Project_1_HDR/code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"34248351370","text":"import os\nimport json\nimport shutil\n\nfrom django.conf import settings\nfrom django.core import management\nfrom django.core.urlresolvers import reverse\nfrom django.test import TestCase\nfrom django.test.utils import override_settings\n\nfrom nose.tools import eq_\n\nfrom richard.videos.tests import category, speaker, video, related_url\nfrom richard.videos.models import Video\n\n\nclass TestVideos(TestCase):\n \"\"\"Tests for the ``videos`` app.\"\"\"\n\n # category\n\n def test_category_list_empty(self):\n \"\"\"Test the view of the listing of all categories.\"\"\"\n url = reverse('videos-category-list')\n\n resp = self.client.get(url)\n eq_(resp.status_code, 200)\n self.assertTemplateUsed(resp, 'videos/category_list.html')\n\n def test_category_list_with_categories(self):\n \"\"\"Test the view of the listing of all categories.\"\"\"\n category(save=True)\n category(save=True)\n category(save=True)\n\n url = reverse('videos-category-list')\n\n resp = self.client.get(url)\n eq_(resp.status_code, 200)\n self.assertTemplateUsed(resp, 'videos/category_list.html')\n\n def test_category_urls(self):\n \"\"\"Test the view of an category.\"\"\"\n cat = category(save=True)\n\n cases = [\n cat.get_absolute_url(),\n u'/category/%s/%s/' % (cat.id, cat.slug), # with slug and /\n u'/category/%s/%s' % (cat.id, cat.slug), # with slug and no /\n u'/category/%s/' % cat.id, # no slug and /\n u'/category/%s' % cat.id, # no slug and no /\n ]\n\n for url in cases:\n resp = self.client.get(url)\n eq_(resp.status_code, 200)\n self.assertTemplateUsed(resp, 'videos/category.html')\n\n def test_category_raise_404_when_does_not_exist(self):\n \"\"\"\n Test that trying to view a non-existent category returns\n a HTTP 404 error.\n \"\"\"\n url = reverse('videos-category',\n args=(1234, 'slug'))\n\n resp = self.client.get(url)\n eq_(resp.status_code, 404)\n\n # speaker\n\n def test_speaker_list_with_no_speakers_in_database(self):\n \"\"\"Test the view of the listing of all speakers.\"\"\"\n url = reverse('videos-speaker-list')\n\n resp = self.client.get(url)\n eq_(resp.status_code, 200)\n self.assertTemplateUsed(resp, 'videos/speaker_list.html')\n\n def test_speaker_list_empty_character(self):\n \"\"\"\n Test the view of the listing of all speakers given a empty\n `character` GET parameter. It should fallback to showing the\n speakers starting from the lowest possible character.\n \"\"\"\n s1 = speaker(name=u'Random Speaker', save=True)\n s2 = speaker(name=u'Another Speaker', save=True)\n\n url = reverse('videos-speaker-list')\n data = {'character': ''}\n\n resp = self.client.get(url, data)\n eq_(resp.status_code, 200)\n self.assertTemplateUsed(resp, 'videos/speaker_list.html')\n assert s1.name not in resp.content\n assert s2.name in resp.content\n\n def test_speaker_list_character(self):\n \"\"\"\n Test the view of the listing of all speakers whose names start\n with certain character.\n \"\"\"\n s1 = speaker(name=u'Another Speaker', save=True)\n s2 = speaker(name=u'Random Speaker', save=True)\n\n url = reverse('videos-speaker-list')\n data = {'character': 'r'}\n\n resp = self.client.get(url, data)\n eq_(resp.status_code, 200)\n self.assertTemplateUsed(resp, 'videos/speaker_list.html')\n assert s1.name not in resp.content\n assert s2.name in resp.content\n\n def test_speaker_list_character_with_string(self):\n \"\"\"\n Test the view of the listing of all speakers giving a invalid\n character argument. The view should fallback to showing the\n speakers starting from the lowest possible character.\n \"\"\"\n s1 = speaker(name=u'Random Speaker', save=True)\n s2 = speaker(name=u'Another Speaker', save=True)\n\n url = reverse('videos-speaker-list')\n data = {'character': 'richard'}\n\n resp = self.client.get(url, data)\n eq_(resp.status_code, 200)\n self.assertTemplateUsed(resp, 'videos/speaker_list.html')\n assert s1.name not in resp.content\n assert s2.name in resp.content\n\n def test_speaker_list_not_string_character(self):\n \"\"\"\n Test the view of the listing of all speakers giving a invalid\n character argument. The view should fallback to showing the\n speakers starting from the lowest possible character.\n \"\"\"\n s1 = speaker(name=u'Random Speaker', save=True)\n s2 = speaker(name=u'Another Speaker', save=True)\n\n url = reverse('videos-speaker-list')\n data = {'character': 42}\n\n resp = self.client.get(url, data)\n eq_(resp.status_code, 200)\n self.assertTemplateUsed(resp, 'videos/speaker_list.html')\n assert s1.name not in resp.content\n assert s2.name in resp.content\n\n def test_speaker_urls(self):\n \"\"\"Test the view of a speaker.\"\"\"\n spe = speaker(name=u'Random Speaker', save=True)\n\n cases = [\n spe.get_absolute_url(), # returns the URL with pk and slug\n u'/speaker/%s/%s/' % (spe.id, spe.slug), # with slug and /\n u'/speaker/%s/%s' % (spe.id, spe.slug), # with slug and no /\n u'/speaker/%s/' % spe.id, # no slug and /\n u'/speaker/%s' % spe.id, # no slug and no /\n ]\n\n for url in cases:\n resp = self.client.get(url)\n eq_(resp.status_code, 200)\n self.assertTemplateUsed(resp, 'videos/speaker.html')\n\n # videos\n\n def test_video_urls(self):\n \"\"\"Test the view of a video.\"\"\"\n vid = video(save=True)\n\n cases = [\n vid.get_absolute_url(),\n u'/video/%s/%s/' % (vid.id, vid.slug), # with slug and /\n u'/video/%s/%s' % (vid.id, vid.slug), # with slug and no /\n u'/video/%s/' % vid.id, # no slug and /\n u'/video/%s' % vid.id, # no slug and no /\n ]\n\n for url in cases:\n resp = self.client.get(url)\n eq_(resp.status_code, 200)\n self.assertTemplateUsed(resp, 'videos/video.html')\n\n def test_active_video_speaker_page(self):\n \"\"\"Active video should show up on it's speaker's page.\"\"\"\n s = speaker(save=True)\n vid = video(state=Video.STATE_LIVE, save=True)\n vid.speakers.add(s)\n\n speaker_url = s.get_absolute_url()\n\n resp = self.client.get(speaker_url)\n assert vid.title in resp.content\n\n def test_active_video_category_page(self):\n \"\"\"Active video should shows up on category page.\"\"\"\n vid = video(state=Video.STATE_LIVE, save=True)\n\n category_url = vid.category.get_absolute_url()\n\n resp = self.client.get(category_url)\n assert vid.title in resp.content\n\n def test_inactive_video_category_page(self):\n \"\"\"Inactive video should not show up on category page.\"\"\"\n vid = video(save=True)\n\n category_url = vid.category.get_absolute_url()\n\n resp = self.client.get(category_url)\n assert vid.title not in resp.content\n\n def test_inactive_video_speaker_page(self):\n \"\"\"Inactive video should not show up on it's speaker's page.\"\"\"\n s = speaker(save=True)\n vid = video(save=True)\n vid.speakers.add(s)\n\n speaker_url = s.get_absolute_url()\n\n resp = self.client.get(speaker_url)\n assert vid.title not in resp.content\n\n def test_related_url(self):\n \"\"\"Related urls should show up on the page.\"\"\"\n v = video(save=True)\n rurl = related_url(video_id=v.id, url=u'http://example.com/foo',\n description=u'Example related url',\n save=True)\n\n resp = self.client.get(v.get_absolute_url())\n assert rurl.description in resp.content\n\n def test_download_only(self):\n \"\"\"Video urls marked as download-only shouldn't be in video tag.\"\"\"\n v = video(video_ogv_url='http://example.com/OGV_VIDEO',\n video_ogv_download_only=False,\n video_mp4_url='http://example.com/MP4_VIDEO',\n video_mp4_download_only=True,\n save=True)\n\n resp = self.client.get(v.get_absolute_url())\n # This shows up in video tag and in downloads area\n eq_(resp.content.count('OGV_VIDEO'), 2)\n # This only shows up in downloads area\n eq_(resp.content.count('MP4_VIDEO'), 1)\n\n\nclass TestVideoSearch(TestCase):\n def tearDown(self):\n \"\"\"Remove the search index after each test run.\n\n The path is set in richard/settings_test.py.\"\"\"\n path = settings.HAYSTACK_CONNECTIONS['default']['PATH']\n if os.path.exists(path):\n shutil.rmtree(path)\n\n def test_search(self):\n \"\"\"Test the search view.\"\"\"\n url = reverse('videos-search')\n\n resp = self.client.get(url)\n eq_(resp.status_code, 200)\n\n def test_opensearch_description(self):\n \"\"\"Test the opensearch description view.\"\"\"\n url = reverse('videos-opensearch')\n\n resp = self.client.get(url)\n eq_(resp.status_code, 200)\n\n @override_settings(OPENSEARCH_ENABLE_SUGGESTIONS=True)\n def test_opensearch_description_with_suggestions(self):\n \"\"\"Test the opensearch description view.\"\"\"\n url = reverse('videos-opensearch')\n\n resp = self.client.get(url)\n eq_(resp.status_code, 200)\n\n @override_settings(OPENSEARCH_ENABLE_SUGGESTIONS=True)\n def test_opensearch_suggestions(self):\n \"\"\"Test the opensearch suggestions view.\"\"\"\n video(title='introduction to pypy', save=True)\n video(title='django testing', save=True)\n video(title='pycon 2012 keynote', save=True)\n video(title='Speedily Practical Large-Scale Tests', save=True)\n management.call_command('rebuild_index', interactive=False)\n\n url = reverse('videos-opensearch-suggestions')\n\n response = self.client.get(url, {'q': 'test'})\n eq_(response.status_code, 200)\n data = json.loads(response.content)\n eq_(data[0], 'test')\n eq_(set(data[1]),\n set(['django testing', 'Speedily Practical Large-Scale Tests']))\n\n def test_opensearch_suggestions_disabled(self):\n \"\"\"Test that when suggestions are disabled, the view does nothing.\"\"\"\n url = reverse('videos-opensearch-suggestions')\n\n response = self.client.get(url, {'q': 'test'})\n eq_(response.status_code, 404)\n","repo_name":"appsembler/richard-openshift-quickstart","sub_path":"richard/videos/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":10799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"42074559278","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 21 13:21:27 2021\n\n@author: gerv1\n\"\"\"\n\n# Neural Network Model dependencies\nimport torch.nn as nn\n\n# New model - from VGG paper (VERY DEEP CONVOLUTIONAL NETWORKS FOR LARGE-SCALE IMAGE RECOGNITION)\n# Visual Geometry Group, Department of Engineering Science, University of Oxford\nVGG_types = {\n 'VGG8' : [64, 'M', 128, 'M', 256, 'M', 512, 'M', 512, 'M'],\n 'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n 'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M']\n}\n\nclass VGG(nn.Module):\n def __init__(self, in_channels = 3, num_classes = 1000, vgg_type='VGG11'):\n super(VGG, self).__init__() # run the init of the parent method\n self.in_channels = in_channels\n self.conv_layers = self.create_conv_layers(VGG_types[vgg_type])\n \n # From paper: followed by three Fully-Connected (FC) layers: the first two have 4096 channels each, the third performs 1000-way ILSVRC classification and thus contains 1000 channels (one for each class) \n self.fcs = nn.Sequential(\n #nn.Linear(512 * 7 * 7, 4096), # (224 / ( 2 ** num_max_pool_layers ))\n nn.Linear(512, 4096), # because we are using crops of 32 x 32\n nn.ReLU(),\n nn.Dropout(p = 0.5),\n nn.Linear(4096, 4096),\n nn.ReLU(),\n nn.Dropout(p = 0.5),\n nn.Linear(4096, num_classes))\n # Dropout is a simple technique that will randomly drop nodes out of the network. \n # It has a regularizing effect as the remaining nodes must adapt to pick-up the slack of the removed nodes.\n\n def forward(self, x):\n x = self.conv_layers(x)\n x = x.reshape(x.shape[0], -1)\n x = self.fcs(x)\n return x\n \n def create_conv_layers(self, architecture):\n layers = []\n in_channels = self.in_channels \n \n for i in architecture:\n if type(i) == int:\n out_channels = i\n # From paper: The convolution stride is fixed to 1 pixel; the spatial padding of conv. layer input is such that the spatial resolution is preserved after convolution, i.e. the padding is 1 pixel for 3 × 3 conv. layers.\n layers += [nn.Conv2d(in_channels=in_channels, out_channels=out_channels,\n kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\n nn.BatchNorm2d(i), # Not included on original vgg paper\n nn.ReLU()] # From paper: All hidden layers are equipped with the rectification (ReLU)\n # Update the in_channels for the next convolution layer\n in_channels = i\n elif i == 'M':\n # From paper: Max-pooling is performed over a 2 × 2 pixel window, with stride 2\n layers += [nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))]\n return nn.Sequential(*layers)","repo_name":"gerv94/CIFAR-Project","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"7315253702","text":"import os\nfrom flask_sqlalchemy import SQLAlchemy\nclass DB:\n SQLALCHEMY_DATABASE_URI = 'sqlite:///database/test.db'\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n def __init__(self,app):\n self.app = app\n self.__Initialize()\n def __Initialize(self):\n if self.app:\n try:\n self.app.config['SQLALCHEMY_DATABASE_URI'] = self.SQLALCHEMY_DATABASE_URI\n self.app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = self. SQLALCHEMY_TRACK_MODIFICATIONS\n self.db = SQLAlchemy(self.app)\n print(\"[INFO] Initialized Flask SQL Alchemy\")\n except Exception as e:\n print(\"[ERROR] Failed to initialize Flask SQL Alchemy\")\n print(e)\n else:\n print(\"[ERROR] 'App' is not defined\")\n def CreateDB(self):\n if not os.path.exists('./database/test.db'):\n try:\n self.db.create_all()\n print(\"[INFO] Database Created Succesfully\")\n except Exception as e:\n print(\"[ERROR] Error in creating database\")\n print(e)\n def GetDB(self):\n if self.db:\n return self.db\n else:\n print(\"[ERROR] 'db' is not defined\")\n return None","repo_name":"Ad74751/Grocery_Management_System","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"7243828654","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPHYS20161 Lecture 10 Quiz, Broadcasting error 4\n\nIllustrates potential issues when broadcasting numpy arrays for plots\n\nLloyd Cawthorne 03/12/20\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nOFFSETS = np.array([0., 3., 4.])\nSLOPES = np.array([1., 1.5, 2.3])\n\n\ndef mesh_arrays(x_array, y_array):\n \"\"\"Returns two meshed arrays of size len(x_array)\n by len(y_array)\n x_array array[floats]\n y_array array[floats]\n \"\"\"\n x_array_mesh = np.empty((0, len(x_array)))\n\n for _ in y_array: # PyLint accepts _ as an uncalled variable.\n x_array_mesh = np.vstack((x_array_mesh, x_array))\n\n y_array_mesh = np.empty((0, len(y_array)))\n\n for dummy_element in x_array: # PyLint accepts dummy_anything as well.\n y_array_mesh = np.vstack((y_array_mesh, y_array))\n\n y_array_mesh = np.transpose(y_array_mesh)\n\n return x_array_mesh, y_array_mesh\n\n\ndef function(x_variable, y_variable, coefficients, offsets):\n \"\"\"\n Returns + \n Parameters\n ----------\n x_variable : float\n y_variable : float\n coefficient : np.array(float)\n offset : np.array(float)\n Returns\n -------\n float\n \"\"\"\n return (np.average(coefficients * (x_variable + y_variable)**2)\n + np.average(offsets))\n\ndef main():\n \"\"\"\n Creates plot of function.\n\n Returns\n -------\n int : 0\n \"\"\"\n x_values = np.linspace(0, 5, 10)\n y_values = x_values.copy()\n x_values_mesh, y_values_mesh = mesh_arrays(x_values, y_values)\n\n figure = plt.figure()\n axes = figure.add_subplot(111)\n\n function_values = np.empty((0, len(x_values)))\n for y_value in y_values:\n row = np.array([])\n for x_value in x_values:\n row = np.append(row, function(x_value, y_value, SLOPES, OFFSETS))\n function_values = np.vstack((function_values, row))\n\n axes.contour(x_values_mesh, y_values_mesh,\n function_values)\n plt.show()\n\n return 0\n\nmain()\n","repo_name":"clead6/python-programming","sub_path":"Assignment 2/broadcasting_4.py","file_name":"broadcasting_4.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"43678253964","text":"# This Code is Heavily Inspired By The YouTuber: Cheesy AI\n# Code Changed, Optimized And Commented By: NeuralNine (Florian Dedov)\n\nimport math\nimport random\nimport sys\nimport os\n\nimport neat\nimport pygame\n\n# Constants\n# WIDTH = 1600\n# HEIGHT = 880\n\nWIDTH = 1920\nHEIGHT = 1080\n\nCAR_SIZE_X = 60 \nCAR_SIZE_Y = 60\n\nBORDER_COLOR = (255, 255, 255, 255) # Color To Crash on Hit\n\ncurrent_generation = 0 # Generation counter\n\nclass Car:\n\n def __init__(self):\n # Load Car Sprite and Rotate\n self.sprite = pygame.image.load('car.png').convert() # Convert Speeds Up A Lot\n self.sprite = pygame.transform.scale(self.sprite, (CAR_SIZE_X, CAR_SIZE_Y))\n self.rotated_sprite = self.sprite \n\n # self.position = [690, 740] # Starting Position\n self.position = [830, 920] # Starting Position\n self.angle = 0\n self.speed = 0\n\n self.speed_set = False # Flag For Default Speed Later on\n\n self.center = [self.position[0] + CAR_SIZE_X / 2, self.position[1] + CAR_SIZE_Y / 2] # Calculate Center\n\n self.radars = [] # List For Sensors / Radars\n self.drawing_radars = [] # Radars To Be Drawn\n\n self.alive = True # Boolean To Check If Car is Crashed\n\n self.distance = 0 # Distance Driven\n self.time = 0 # Time Passed\n\n def draw(self, screen):\n screen.blit(self.rotated_sprite, self.position) # Draw Sprite\n self.draw_radar(screen) #OPTIONAL FOR SENSORS\n\n def draw_radar(self, screen):\n # Optionally Draw All Sensors / Radars\n for radar in self.radars:\n position = radar[0]\n pygame.draw.line(screen, (0, 255, 0), self.center, position, 1)\n pygame.draw.circle(screen, (0, 255, 0), position, 5)\n\n def check_collision(self, game_map):\n self.alive = True\n for point in self.corners:\n # If Any Corner Touches Border Color -> Crash\n # Assumes Rectangle\n if game_map.get_at((int(point[0]), int(point[1]))) == BORDER_COLOR:\n self.alive = False\n break\n\n def check_radar(self, degree, game_map):\n length = 0\n x = int(self.center[0] + math.cos(math.radians(360 - (self.angle + degree))) * length)\n y = int(self.center[1] + math.sin(math.radians(360 - (self.angle + degree))) * length)\n\n # While We Don't Hit BORDER_COLOR AND length < 300 (just a max) -> go further and further\n while not game_map.get_at((x, y)) == BORDER_COLOR and length < 300:\n length = length + 1\n x = int(self.center[0] + math.cos(math.radians(360 - (self.angle + degree))) * length)\n y = int(self.center[1] + math.sin(math.radians(360 - (self.angle + degree))) * length)\n\n # Calculate Distance To Border And Append To Radars List\n dist = int(math.sqrt(math.pow(x - self.center[0], 2) + math.pow(y - self.center[1], 2)))\n self.radars.append([(x, y), dist])\n \n def update(self, game_map):\n # Set The Speed To 20 For The First Time\n # Only When Having 4 Output Nodes With Speed Up and Down\n if not self.speed_set:\n self.speed = 20\n self.speed_set = True\n\n # Get Rotated Sprite And Move Into The Right X-Direction\n # Don't Let The Car Go Closer Than 20px To The Edge\n self.rotated_sprite = self.rotate_center(self.sprite, self.angle)\n self.position[0] += math.cos(math.radians(360 - self.angle)) * self.speed\n self.position[0] = max(self.position[0], 20)\n self.position[0] = min(self.position[0], WIDTH - 120)\n\n # Increase Distance and Time\n self.distance += self.speed\n self.time += 1\n \n # Same For Y-Position\n self.position[1] += math.sin(math.radians(360 - self.angle)) * self.speed\n self.position[1] = max(self.position[1], 20)\n self.position[1] = min(self.position[1], WIDTH - 120)\n\n # Calculate New Center\n self.center = [int(self.position[0]) + CAR_SIZE_X / 2, int(self.position[1]) + CAR_SIZE_Y / 2]\n\n # Calculate Four Corners\n # Length Is Half The Side\n length = 0.5 * CAR_SIZE_X\n left_top = [self.center[0] + math.cos(math.radians(360 - (self.angle + 30))) * length, self.center[1] + math.sin(math.radians(360 - (self.angle + 30))) * length]\n right_top = [self.center[0] + math.cos(math.radians(360 - (self.angle + 150))) * length, self.center[1] + math.sin(math.radians(360 - (self.angle + 150))) * length]\n left_bottom = [self.center[0] + math.cos(math.radians(360 - (self.angle + 210))) * length, self.center[1] + math.sin(math.radians(360 - (self.angle + 210))) * length]\n right_bottom = [self.center[0] + math.cos(math.radians(360 - (self.angle + 330))) * length, self.center[1] + math.sin(math.radians(360 - (self.angle + 330))) * length]\n self.corners = [left_top, right_top, left_bottom, right_bottom]\n\n # Check Collisions And Clear Radars\n self.check_collision(game_map)\n self.radars.clear()\n\n # From -90 To 120 With Step-Size 45 Check Radar\n for d in range(-90, 120, 45):\n self.check_radar(d, game_map)\n\n def get_data(self):\n # Get Distances To Border\n radars = self.radars\n return_values = [0, 0, 0, 0, 0]\n for i, radar in enumerate(radars):\n return_values[i] = int(radar[1] / 30)\n\n return return_values\n\n def is_alive(self):\n # Basic Alive Function\n return self.alive\n\n def get_reward(self):\n # Calculate Reward (Maybe Change?)\n # return self.distance / 50.0\n return self.distance / (CAR_SIZE_X / 2)\n\n def rotate_center(self, image, angle):\n # Rotate The Rectangle\n rectangle = image.get_rect()\n rotated_image = pygame.transform.rotate(image, angle)\n rotated_rectangle = rectangle.copy()\n rotated_rectangle.center = rotated_image.get_rect().center\n rotated_image = rotated_image.subsurface(rotated_rectangle).copy()\n return rotated_image\n\n\ndef run_simulation(genomes, config):\n \n # Empty Collections For Nets and Cars\n nets = []\n cars = []\n\n # Initialize PyGame And The Display\n pygame.init()\n screen = pygame.display.set_mode((WIDTH, HEIGHT), pygame.FULLSCREEN)\n\n # For All Genomes Passed Create A New Neural Network\n for i, g in genomes:\n net = neat.nn.FeedForwardNetwork.create(g, config)\n nets.append(net)\n g.fitness = 0\n\n cars.append(Car())\n\n # Clock Settings\n # Font Settings & Loading Map\n clock = pygame.time.Clock()\n generation_font = pygame.font.SysFont(\"Arial\", 30)\n alive_font = pygame.font.SysFont(\"Arial\", 20)\n game_map = pygame.image.load('map.png').convert() # Convert Speeds Up A Lot\n\n global current_generation\n current_generation += 1\n\n # Simple Counter To Roughly Limit Time (Not Good Practice)\n counter = 0\n\n while True:\n # Exit On Quit Event\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit(0)\n\n # For Each Car Get The Acton It Takes\n for i, car in enumerate(cars):\n output = nets[i].activate(car.get_data())\n choice = output.index(max(output))\n if choice == 0:\n car.angle += 10 # Left\n elif choice == 1:\n car.angle -= 10 # Right\n elif choice == 2:\n if(car.speed - 2 >= 12):\n car.speed -= 2 # Slow Down\n else:\n car.speed += 2 # Speed Up\n \n # Check If Car Is Still Alive\n # Increase Fitness If Yes And Break Loop If Not\n still_alive = 0\n for i, car in enumerate(cars):\n if car.is_alive():\n still_alive += 1\n car.update(game_map)\n genomes[i][1].fitness += car.get_reward()\n\n if still_alive == 0:\n break\n\n counter += 1\n if counter == 30 * 40: # Stop After About 20 Seconds\n break\n\n # Draw Map And All Cars That Are Alive\n screen.blit(game_map, (0, 0))\n for car in cars:\n if car.is_alive():\n car.draw(screen)\n \n # Display Info\n text = generation_font.render(\"Generation: \" + str(current_generation), True, (0,0,0))\n text_rect = text.get_rect()\n text_rect.center = (900, 450)\n screen.blit(text, text_rect)\n\n text = alive_font.render(\"Still Alive: \" + str(still_alive), True, (0, 0, 0))\n text_rect = text.get_rect()\n text_rect.center = (900, 490)\n screen.blit(text, text_rect)\n\n pygame.display.flip()\n clock.tick(60) # 60 FPS\n\nif __name__ == \"__main__\":\n \n # Load Config\n config_path = \"./config.txt\"\n config = neat.config.Config(neat.DefaultGenome,\n neat.DefaultReproduction,\n neat.DefaultSpeciesSet,\n neat.DefaultStagnation,\n config_path)\n\n # Create Population And Add Reporters\n population = neat.Population(config)\n population.add_reporter(neat.StdOutReporter(True))\n stats = neat.StatisticsReporter()\n population.add_reporter(stats)\n \n # Run Simulation For A Maximum of 1000 Generations\n population.run(run_simulation, 1000)\n","repo_name":"NeuralNine/ai-car-simulation","sub_path":"newcar.py","file_name":"newcar.py","file_ext":"py","file_size_in_byte":9387,"program_lang":"python","lang":"en","doc_type":"code","stars":181,"dataset":"github-code","pt":"19"} +{"seq_id":"71788324202","text":"from mrjob.job import MRJob, MRStep\nimport time\n\nclass Mapper(MRJob):\n\n def mapper(self, key, ligne):\n el_ligne = ligne.split(',') #recuperation des differentes mesures d'une ligne\n keys = ['O3', 'NO2', 'SO2', 'CO'] #cles valeurs des colonnes \n\n for i, element in enumerate(el_ligne):\n try:\n temp = float(element)\n if temp > 0.5 : # test de si la mersure est valide\n yield (keys[i], (temp, 1))\n\n except ValueError: #traitement du cas ou une ligne n'est pas lisible (header/footer/erreur d'ecriture)\n pass\n\n\n def reducer(self, key, values):\n min_val = next(values) #valeurs initiale\n max_val = min_val #valeur initiale\n count = 0 #valeur initiale\n avg = 0 #valeur initiale\n for element in values:\n avg += element[0]\n min_val = min(element, min_val) #compare la valeur actuelle avec la valeur iteree \n max_val = max(element, max_val) #compare la valeur actuelle avec la valeur iteree \n count += 1\n yield key, (min_val[0], max_val[0], count, avg/count) #retourne pour chaque cle : min, max, nombre de mesure valide, moyenne des mesures valide\n\n\n def steps(self):\n return [MRStep(mapper=self.mapper, reducer=self.reducer)]\n\n\nif __name__ == \"__main__\":\n Mapper.run()\n","repo_name":"DamienCM/big_data","sub_path":"python/question3/mapper_reducer.py","file_name":"mapper_reducer.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"4486519644","text":"#!/usr/bin/env python3\n# vim: set fileencoding=utf-8\n\n\"\"\"Reading CSV file that contains column with integer values (some are missing).\"\"\"\n\n\nimport pandas\nimport math\nfrom opulent_pandas import Schema, Required, BaseValidator, Error\n\n\ndef print_data_frame(df):\n print(\"Data frame\")\n print(\"---------------------------\")\n print(df)\n print()\n\n print(\"Column types\")\n print(\"---------------------------\")\n print(df.dtypes)\n\n\nclass PosintValidator(BaseValidator):\n def validate(self, values):\n if not (values > 0).all():\n raise Error(\"positive integer value expected\")\n\n\nclass NotNaNValidator(BaseValidator):\n def validate(self, values):\n for value in values:\n if math.isnan(value):\n raise Error(\"regular float value expected, but got: {}\".format(value))\n\n\ndef validate_data_frame(data_frame):\n\n schema = Schema(\n {\n Required(\"Block size\"): [PosintValidator()],\n Required(\"Time to read\"): [NotNaNValidator()],\n }\n )\n\n schema.validate(data_frame)\n\n\ndf = pandas.read_csv(\"missing_integer_values.csv\")\nprint_data_frame(df)\nvalidate_data_frame(df)\n","repo_name":"tisnik/python-programming-courses","sub_path":"Pandas/examples/27_opulent_pandas_3.py","file_name":"27_opulent_pandas_3.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"15643918631","text":"import requests\nimport json\nimport re\n\n# 定义头部信息\nheaders = {\n 'Yifan_token': '0b5fe11ceeaa4432d94535d578466de2'\n}\n\n# 缓存变量\ncache = ''\n\n\ndef useLan(path, url, lan='en'):\n global cache\n # 发送带有头部信息的 GET 请求\n response = cache if bool(cache) else requests.get(url, headers=headers)\n with open(path, 'r') as file:\n file_data = file.read()\n\n # 检查请求是否成功\n if response.status_code == 200:\n cache = response\n # 获取响应的数据中的 'data.records'\n data = response.json()\n records = data.get('data', {})[0].get('data')\n print(records)\n if records is not None:\n # 创建字典列表\n dict_list = [{record['tid']: [record.get(\n 'en', {}), record.get(\n 'ja', {})]} for record in records]\n # 读取文件\n\n # # 遍历字典列表\n for dictionary in dict_list:\n for key, value in dictionary.items():\n # 在文件中查找对应的键\n regex = re.compile(f'({key}:.*?)(`[^`]*`)')\n matches = regex.findall(file_data)\n _val = value[0] if lan == 'en' else value[1]\n if matches:\n # 将文件中的值替换为字典中的值\n for match in matches:\n file_data = file_data.replace(\n match[1], f'`{_val} `' if lan == 'en' else f'`{_val}`')\n\n # 写回文件\n with open(path, 'w') as file:\n file.write(file_data)\n print('done')\n else:\n print('未找到 \"data.records\"')\n else:\n print('请求失败,状态码:', response.status_code)\n\n\nuseLan('./en_US.ts', 'https://yifan.nie.netease.com/api/yifan-task/translator_data?project=artist&classification=1&force=1&file_type=0&file_names=DreamMaker%E7%94%A8%E6%88%B7%E7%95%8C%E9%9D%A2_0622')\nuseLan('./ja_JP.ts', 'https://yifan.nie.netease.com/api/yifan-task/translator_data?project=artist&classification=1&force=1&file_type=0&file_names=DreamMaker%E7%94%A8%E6%88%B7%E7%95%8C%E9%9D%A2_0622', 'ja')\n","repo_name":"hejie3920/hejie-all","sub_path":"hejie-python/lib/useRes.py","file_name":"useRes.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"35592670717","text":"class Solution:\n def peakIndexInMountainArray(self, arr: List[int]) -> int:\n n = len(arr)\n first = 1\n last = (n - 1)\n\n while(first < last):\n mid = (first + last) // 2\n\n if(arr[mid] < arr[first] and arr[mid] < arr[last]):\n return mid\n\n if(arr[mid] < arr[mid + 1]):\n first = mid + 1\n\n else:\n last = mid\n\n return first","repo_name":"101rror/LeetCode","sub_path":"882-peak-index-in-a-mountain-array/peak-index-in-a-mountain-array.py","file_name":"peak-index-in-a-mountain-array.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"27355091789","text":"#!/usr/bin/env python3\n\nimport pytest\nimport pandas as pd\nfrom io import StringIO\nimport os\n\ntest_output_path = os.path.dirname(os.path.abspath(__file__)) + \\\n '/../../'\n\n\n@pytest.mark.trimData\ndef test_trimData_se():\n assert os.path.exists(os.path.join(\n test_output_path, 'Q-Y5F6_1M.se_trimmed.fq.gz'))\n\n\n@pytest.mark.trimData\ndef test_trimData_pe():\n assert os.path.exists(os.path.join(\n test_output_path, 'Q-Y5F6_1M.pe_val_1.fq.gz'))\n assert os.path.exists(os.path.join(\n test_output_path, 'Q-Y5F6_1M.pe_val_2.fq.gz'))\n","repo_name":"utsw-bicf/gudmap_rbk.rna-seq","sub_path":"workflow/tests/test_trimData.py","file_name":"test_trimData.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"43213688273","text":"import pytest\n\n@pytest.mark.usefixtures('return_obj')\nclass TestMycode:\n def test_hello_world(self, return_obj):\n assert return_obj.hello_world() == \"hello,world\"\n \n def test_integer_division(self,return_obj):\n assert return_obj.integer_division(6,2) == 3\n #assert mycode.integer_division(3,0) == \"Division by zero detected!!!\"\n with pytest.raises(ZeroDivisionError):\n return_obj.integer_division(6,0)\n #assert mycode.integer_division('tech',6) == \"Expecting integers as parameters!\" \n with pytest.raises(TypeError):\n return_obj.integer_division('tech',5)\n assert isinstance(return_obj.integer_division(8.4,2),int)\n\n @pytest.mark.usefixtures('return_additional_obj','return_additional_obj2')\n def test_additional_obj_counter(self,return_additional_obj,return_additional_obj2):\n assert return_additional_obj.counter() == 3\n assert return_additional_obj2.counter() == 0\n ","repo_name":"ruhinshukurlu/PyTest","sub_path":"test/test_mycode.py","file_name":"test_mycode.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"2519731356","text":"#!/usr/bin/python\nimport mysql.connector\n\n\ndef fetch_table_data(table_name):\n # The connect() constructor creates a connection to the MySQL server and returns a MySQLConnection object.\n cnx = mysql.connector.connect(\n host='localhost',\n database='sams',\n user='root',\n password='root'\n )\n\n cursor = cnx.cursor()\n cursor.execute('select * from ' + table_name)\n\n header = [row[0] for row in cursor.description]\n\n rows = cursor.fetchall()\n\n # Closing connection\n cnx.close()\n\n return header, rows\n\n\ndef export(table_name):\n header, rows = fetch_table_data(table_name)\n\n # Create csv file\n f = open(table_name + '.csv', 'w+')\n\n # Write header\n f.write(','.join(header) + '\\n')\n\n for row in rows:\n f.write(','.join(str(r) for r in row) + '\\n')\n\n f.close()\n print(str(len(rows)) + ' rows written successfully to ' + f.name)\n\n\n# Tables to be exported\nexport('record')\nprint(\"Exported once and next in track\")","repo_name":"AshirA88/SAMS-ALGO","sub_path":"export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"32530708282","text":"import pytest\nfrom anexo2.docs import Anexo2\n\n\n@pytest.fixture\ndef anexo2_data() -> dict:\n hospital = {'nombre': 'HOSPITAL SAN ROQUE', # https://www.sssalud.gob.ar/index.php?page=bus_hosp&cat=consultas\n 'codigo_hpgd': '4321323'}\n \n beneficiario = {'apellido_y_nombres': 'Juan Perez',\n 'tipo_dni': 'DNI', # | LE | LC\n 'dni': '34100900',\n 'tipo_beneficiario': 'titular', # | no titular | adherente\n 'parentesco': 'conyuge', # hijo | otro\n 'sexo': 'M', # | F\n 'edad': 88}\n atencion = {'tipo': ['consulta', 'práctica', 'internación'],\n 'especialidad': 'Va un texto al parecer largo, quizas sea del nomenclador',\n 'profesional': {\n 'apellido_y_nombres': 'MARTÍNEZ, Adolfo',\n 'matricula_profesional': '10542',\n },\n 'codigos_N_HPGD': ['AA01', 'AA02', 'AA06', 'AA07'], # no se de donde son estos códigos\n 'fecha': {'dia': 3, 'mes': 9, 'anio': 2019},\n 'diagnostico_ingreso_cie10': {'principal': 'W020', 'otros': ['w021', 'A189']}}\n obra_social = {'codigo_rnos': '800501',\n 'nombre': 'OBRA SOCIAL ACEROS PARANA',\n 'nro_carnet_obra_social': '9134818283929101',\n 'fecha_de_emision': {'dia': 11, 'mes': 9, 'anio': 2009},\n 'fecha_de_vencimiento': {'dia': 11, 'mes': 9, 'anio': 2029}}\n empresa = {'nombre': 'Telescopios Hubble',\n 'direccion': 'Av Astronómica s/n',\n 'ultimo_recibo_de_sueldo': {'mes': 7, 'anio': 2019},\n 'cuit': '31-91203043-8'}\n\n data = {'dia': 3,\n 'mes': 9,\n 'anio': 2019,\n 'hospital': hospital,\n 'beneficiario': beneficiario,\n 'atencion': atencion,\n 'obra_social': obra_social,\n 'empresa': empresa\n }\n \n return data\n\n\ndef test_base(anexo2_data):\n \"\"\" El Anexo2 completo se genera correctamente \"\"\"\n\n anx = Anexo2(data=anexo2_data)\n res = anx.get_html()\n assert res is not None\n\n\ndef test_empresa_vacia(anexo2_data):\n \"\"\" El Anexo2 sin datos de la empresa se genera sin errores\"\"\"\n\n empresa_vacia = {\n 'empresa': {}\n }\n\n # Reemplazamos los datos de la empresa por los datos vacíos\n anexo2_data.update(empresa_vacia)\n\n anx = Anexo2(data=anexo2_data)\n res = anx.get_html()\n assert res is not None\n\n\ndef test_empresa_incompleta(anexo2_data):\n \"\"\" El Anexo2 se genera sin errores a pesar de que le faltan los datos del ult. recibo de sueldo \"\"\"\n\n empresa_incompleta = {\n 'empresa': {\n 'nombre': 'Telescopios Hubble',\n 'direccion': 'Av Astronómica s/n',\n 'cuit': '31-91203043-8'\n }\n }\n\n anexo2_data.update(empresa_incompleta)\n\n\n anx = Anexo2(data=anexo2_data)\n res = anx.get_html()\n assert res is not None","repo_name":"cluster311/Anexo2","sub_path":"tests/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"9892286979","text":"sentences=[]\n#Construction du texte global\nfirst=0\nfor ligne in open(\"d:/tal/corpus-kab.txt\",encoding='utf-8'):\n if (first!=0):\n sentence=[]\n line=ligne.split()\n\n for i in line:\n j=i.split('/')\n couple=(j[0],j[1])\n sentence.append(couple)\n sentences.append(sentence)\n first=1\n#print(sentences[:20])\n\ntags = set([\n tag for sentence in sentences\n for _, tag in sentence\n])\nprint('nb_tags: %sntags: %s' % (len(tags), tags))\n\ntrain_test_cutoff = int(.80 * len(sentences))\ntraining_sentences = sentences[:train_test_cutoff]\ntesting_sentences = sentences[train_test_cutoff:]\n\ntrain_val_cutoff = int(.25 * len(training_sentences))\nvalidation_sentences = training_sentences[:train_val_cutoff]\ntraining_sentences = training_sentences[train_val_cutoff:]\n\n\n##\ndef add_basic_features(sentence_terms, index):\n \"\"\" Compute some very basic word features.\n\n :param sentence_terms: [w1, w2, ...]\n :type sentence_terms: list\n :param index: the index of the word\n :type index: int\n :return: dict containing features\n :rtype: dict\n \"\"\"\n term = sentence_terms[index]\n return {\n 'nb_terms': len(sentence_terms),\n 'term': term,\n 'is_first': index == 0,\n 'is_last': index == len(sentence_terms) - 1,\n 'is_capitalized': term[0].upper() == term[0],\n 'is_all_caps': term.upper() == term,\n 'is_all_lower': term.lower() == term,\n 'prefix-1': term[0],\n 'prefix-2': term[:2],\n 'prefix-3': term[:3],\n 'suffix-1': term[-1],\n 'suffix-2': term[-2:],\n 'suffix-3': term[-3:],\n 'prev_word': '' if index == 0 else sentence_terms[index - 1],\n 'next_word': '' if index == len(sentence_terms) - 1 else sentence_terms[index + 1]\n }\n###\n\ndef untag(tagged_sentence):\n \"\"\"\n Remove the tag for each tagged term.\n\n:param tagged_sentence: a POS tagged sentence\n :type tagged_sentence: list\n :return: a list of tags\n :rtype: list of strings\n \"\"\"\n return [w for w, _ in tagged_sentence]\n\ndef transform_to_dataset(tagged_sentences):\n \"\"\"\n Split tagged sentences to X and y datasets and append some basic features.\n\n:param tagged_sentences: a list of POS tagged sentences\n :param tagged_sentences: list of list of tuples (term_i, tag_i)\n :return:\n \"\"\"\n X, y = [], []\n\n for pos_tags in tagged_sentences:\n for index, (term, class_) in enumerate(pos_tags):\n # Add basic NLP features for each sentence term\n X.append(add_basic_features(untag(pos_tags), index))\n y.append(class_)\n return X, y\n###\nX_train, y_train = transform_to_dataset(training_sentences)\nX_test, y_test = transform_to_dataset(testing_sentences)\nX_val, y_val = transform_to_dataset(validation_sentences)\n\nfrom sklearn.feature_extraction import DictVectorizer\n\n# Fit our DictVectorizer with our set of features\ndict_vectorizer = DictVectorizer(sparse=False)\ndict_vectorizer.fit(X_train + X_test + X_val)\n\n# Convert dict features to vectors\nX_train = dict_vectorizer.transform(X_train)\nX_test = dict_vectorizer.transform(X_test)\nX_val = dict_vectorizer.transform(X_val)\n\nfrom sklearn.preprocessing import LabelEncoder\n\n# Fit LabelEncoder with our list of classes\nlabel_encoder = LabelEncoder()\nlabel_encoder.fit(y_train + y_test + y_val)\n\n# Encode class values as integers\ny_train = label_encoder.transform(y_train)\ny_test = label_encoder.transform(y_test)\ny_val = label_encoder.transform(y_val)\n\n# Convert integers to dummy variables (one hot encoded)\nfrom keras.utils import np_utils\n\ny_train = np_utils.to_categorical(y_train)\ny_test = np_utils.to_categorical(y_test)\ny_val = np_utils.to_categorical(y_val)\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation\n\ndef build_model(input_dim, hidden_neurons, output_dim):\n \"\"\"\n Construct, compile and return a Keras model which will be used to fit/predict\n \"\"\"\n model = Sequential([\n Dense(hidden_neurons, input_dim=input_dim),\n Activation('relu'),\n Dropout(0.2),\n Dense(hidden_neurons),\n Activation('relu'),\n Dropout(0.2),\n Dense(output_dim, activation='softmax')\n ])\n\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\nfrom keras.wrappers.scikit_learn import KerasClassifier\n\nmodel_params = {\n 'build_fn': build_model,\n 'input_dim': X_train.shape[1],\n 'hidden_neurons': 512,\n 'output_dim': y_train.shape[1],\n 'epochs': 10,\n 'batch_size': 256,\n 'verbose': 1,\n 'validation_data': (X_val, y_val),\n 'shuffle': True\n}\n\nclf = KerasClassifier(**model_params)\nhist = clf.fit(X_train, y_train)\n\nimport matplotlib.pyplot as plt\n\ndef plot_model_performance(train_loss, train_acc, train_val_loss, train_val_acc):\n \"\"\" Plot model loss and accuracy through epochs. \"\"\"\n\n blue= '#34495E'\n green = '#2ECC71'\n orange = '#E23B13'\n\n # plot model loss\n fig, (ax1, ax2) = plt.subplots(2, figsize=(10, 8))\n ax1.plot(range(1, len(train_loss) + 1), train_loss, blue, linewidth=5, label='aslughmu')\n ax1.plot(range(1, len(train_val_loss) + 1), train_val_loss, green, linewidth=5, label='asentem')\n ax1.set_xlabel('# epoch')\n ax1.set_ylabel('asruḥu')\n ax1.tick_params('y')\n ax1.legend(loc='upper right', shadow=False)\n ax1.set_title('Taneghruft n usruḥu s imal irennu #epochs', color=orange, fontweight='bold')\n\n # plot model accuracy\n ax2.plot(range(1, len(train_acc) + 1), train_acc, blue, linewidth=5, label='aslughmu')\n ax2.plot(range(1, len(train_val_acc) + 1), train_val_acc, green, linewidth=5, label='asentem')\n ax2.set_xlabel('# epoch')\n ax2.set_ylabel('tiseddi')\n ax2.tick_params('y')\n ax2.legend(loc='lower right', shadow=False)\n ax2.set_title('Taneghruft n tseddi s imal irennu #epochs', color=orange, fontweight='bold')\n plt.show()\n\nplot_model_performance(\n train_loss=hist.history.get('loss', []),\n train_acc=hist.history.get('acc', []),\n train_val_loss=hist.history.get('val_loss', []),\n train_val_acc=hist.history.get('val_acc', [])\n)\n\nscore = clf.score(X_test, y_test)\nprint(score)\n\nfrom keras.utils import plot_model\n\nplot_model(clf.model, to_file='d:/tal/model.png', show_shapes=True)\nclf.model.save('d:/tal/keras_mlp.h5')\n","repo_name":"MohammedBelkacem/KabyleNLP","sub_path":"kerras/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6361,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"19"} +{"seq_id":"42918326272","text":"import ibm_db\nfrom flask import session\n\n\ndef addCustomer(custid, custname, custmobile, custemail, custaddress, userid, conn):\n columns = '\"CUSTID\",\"CUSTNAME\",\"CUSTMOBILE\",\"CUSTEMAIL\",\"CUSTADDRESS\",\"USERID\"'\n val = (\n \"'\"\n + str(custid)\n + \"','\"\n + custname\n + \"','\"\n + custmobile\n + \"','\"\n + custemail\n + \"','\"\n + custaddress\n + \"','\"\n + userid\n + \"'\"\n )\n sql = \"INSERT INTO CUSTOMERS (\" + columns + \") values(\" + val + \")\"\n try:\n stmt = ibm_db.prepare(conn, sql)\n ibm_db.execute(stmt)\n print(\"added :-)\")\n return \"success\"\n except Exception as e:\n # print(\"error on db\", e)\n if ibm_db.stmt_error() == \"23505\":\n print(\"data is present already\")\n return \"already present\"\n else:\n # print(\"Error While Adding the User ! \")\n return \"database error\"\n\n\ndef getCustomer(userid, conn):\n sql = \"Select * from customers where USERID = '\" + userid + \"'\"\n arr = []\n try:\n stmt = ibm_db.exec_immediate(conn, sql)\n dictionary = ibm_db.fetch_assoc(stmt)\n if dictionary != False:\n arr.append(\"success\")\n # arr.append({\"message\": \"success\"})\n while dictionary != False:\n arr.append(dictionary)\n dictionary = ibm_db.fetch_assoc(stmt)\n return arr\n else:\n print(\"No data is found !\")\n arr.append(\"no data\")\n arr.append(dictionary)\n return arr\n except Exception as e:\n print(e)\n print(\"Error while retrying data !\")\n arr.append(\"database errors\")\n arr.append({})\n return arr\n return arr\n","repo_name":"IBM-EPBL/IBM-Project-601-1658309647","sub_path":"Final Deliveries/Backend/controllers/customer.py","file_name":"customer.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35360937983","text":"from merc import errors\nfrom merc import feature\nfrom merc import message\n\n\nclass KickFeature(feature.Feature):\n NAME = __name__\n\n\ninstall = KickFeature.install\n\n\nMAX_TARGETS = 1\n\n\n@KickFeature.register_user_command\nclass Kick(message.Command):\n NAME = \"KICK\"\n MIN_ARITY = 2\n\n def __init__(self, channel_name, nickname, reason=None):\n self.channel_name = channel_name\n self.nickname = nickname\n\n self.reason = reason\n\n @property\n def FORCE_TRAILING(self):\n return self.reason is not None\n\n @message.Command.requires_registration\n def handle_for(self, app, user, prefix):\n try:\n channel = app.channels.get(self.channel_name)\n except errors.NoSuchNick:\n raise errors.NoSuchChannel(self.channel_name)\n\n target = app.users.get(self.nickname)\n\n channel.check_has_user(user)\n channel.check_has_user(target)\n channel.check_is_halfop(user)\n\n channel.broadcast(None, user.hostmask,\n Kick(self.channel_name, self.nickname, self.reason))\n channel.part(target)\n\n def as_command_params(self):\n params = [self.channel_name, self.nickname]\n if self.reason is not None:\n params.append(self.reason)\n return params\n\n\n@KickFeature.hook(\"server.targmax.modify\")\ndef modify_targmax(app, targmax):\n targmax[\"KICK\"] = MAX_TARGETS\n","repo_name":"merc-devel/merc","sub_path":"merc/features/rfc1459/kick.py","file_name":"kick.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"6756414173","text":"#调用栈抽象类\nfrom Stack import *\ndef Checker(string):\n\ts=Stack()\n\tflag=True #括号平衡标志位\n\tindex=0\n\twhile index list():\n time.sleep(0.5)\n context = ctx.triggered_id\n try:\n filtered_tasks = tasks.saved[tasks.saved['map name'] == current_map.name]\n options = filtered_tasks.name.unique() \n options.sort()\n except:\n options = []\n return [options]*8 ","repo_name":"EinEinfach/CaSSAndRA","sub_path":"CaSSAndRA/src/components/tasks/tasksorder.py","file_name":"tasksorder.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"19"} +{"seq_id":"44064139943","text":"\"\"\"webapp URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path\r\nfrom webapp import views\r\n\r\nurlpatterns = [\r\n # 里面留空,代表首页\r\n path('', views.start),\r\n # 返回首页\r\n path('home/', views.start, name=\"start\"),\r\n # 搜索跳转\r\n path('get_ans/', views.get_ans, name=\"get_ans\"),\r\n # 输入错误\r\n path('error/', views.get_ans, name=\"error\"),\r\n # 输入正确\r\n path('user_class/', views.get_ans, name=\"user_class\"),\r\n]\r\n","repo_name":"frederic89/bert_class_webapp","sub_path":"webapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"12156917450","text":"import numpy as np\nimport time\nimport cv2\n\nfrom dronekit import connect, Command, LocationGlobal, VehicleMode\n\ndef RunDetection(conn):\n\n COLORS = [(0, 0, 255), (0, 255, 255), (255, 0, 0), (100, 100, 255), (100, 255, 255), (255, 100, 100)]\n\n conf = 0.5\n thres = 0.2\n\n labelsPath = \"darknet_cfg/obj.names\"\n configPath = \"darknet_cfg/yolov3-tiny.cfg\"\n weightsPath = \"darknet_cfg/yolov3-tiny_10000.weights\"\n\n print(\"[INFO] loading YOLO from disk...\")\n LABELS = open(labelsPath).read().strip().split(\"\\n\")\n net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)\n allLayersNames = net.getLayerNames()\n OutLayersNames = [allLayersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n print(\"[INFO] done\")\n\n cv2.namedWindow(\"drone's camera\")\n videoStream = cv2.VideoCapture(0)\n writer = None\n (W, H) = (None, None)\n\n if videoStream.isOpened(): \n rval, frame = videoStream.read()\n else:\n rval = False\n\n\n while rval:\n\n key = cv2.waitKey(20)\n if key == 27:\n break\n if W is None or H is None:\n (H, W) = frame.shape[:2]\n\n blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (608, 608),\n swapRB=True, crop=False)\n net.setInput(blob)\n start = time.time()\n layerOutputs = net.forward(OutLayersNames)\n end = time.time()\n\n boxes = []\n confidences = []\n classIDs = []\n\n for output in layerOutputs:\n for detection in output:\n scores = detection[5:]\n classID = np.argmax(scores)\n confidence = scores[classID]\n\n if confidence > conf:\n box = detection[0:4] * np.array([W, H, W, H])\n (centerX, centerY, width, height) = box.astype(\"int\")\n\n x = int(centerX - (width / 2))\n y = int(centerY - (height / 2))\n\n boxes.append([x, y, int(width), int(height)])\n confidences.append(float(confidence))\n classIDs.append(classID)\n\n idxs = cv2.dnn.NMSBoxes(boxes, confidences, conf,\n thres)\n\n if len(idxs) > 0:\n\n for i in idxs.flatten():\n (x, y) = (boxes[i][0], boxes[i][1])\n (w, h) = (boxes[i][2], boxes[i][3])\n\n color = [int(c) for c in COLORS[classIDs[i]]]\n cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)\n text = \"{}: {:.4f}\".format(LABELS[classIDs[i]],\n confidences[i])\n cv2.putText(frame, text, (x, y - 5),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\n conn.send(LABELS[classIDs[i]])\n \n\n cv2.imshow(\"drone's camera\", frame)\n rval, frame = videoStream.read()\n\n conn.send(None)\n print(\"[INFO] cleaning up...\")\n cv2.namedWindow(\"preview\")\n videoStream.release()\n\ndef connect_to_drone():\n\n print(\"[INFO] Connecting to drone\")\n connection_string = '127.0.0.1:14551'\n vehicle = connect(connection_string, wait_ready=True)\n\n print(\" Type: %s\" %(vehicle._vehicle_type))\n print(\" Armed: %s\" %(vehicle.armed))\n print(\" System status: %s\" %(vehicle.system_status.state))\n print(\" GPS: %s\" %(vehicle.gps_0))\n print(\" Alt: %s\" %(vehicle.location.global_relative_frame.alt))\n \n return vehicle\n\ndef arm_and_takeoff(vehicle, aTargetAltitude):\n \"\"\"\n Arms vehicle and fly to aTargetAltitude.\n \"\"\"\n print(\"Basic pre-arm checks\")\n while not vehicle.is_armable:\n print(\" Waiting for vehicle to initialise...\")\n time.sleep(1)\n\n print(\"Arming motors\")\n vehicle.mode = VehicleMode(\"GUIDED\")\n vehicle.armed = True\n\n while not vehicle.armed:\n print(\" Waiting for arming...\")\n time.sleep(1)\n\n print(\"Taking off!\")\n vehicle.simple_takeoff(aTargetAltitude)\n\n while True:\n print(\" Altitude: \", vehicle.location.global_relative_frame.alt)\n if vehicle.location.global_relative_frame.alt >= aTargetAltitude * 0.95:\n print(\"Reached target altitude\")\n break\n time.sleep(1)","repo_name":"HukamiEstia/DUAV2019-mirror","sub_path":"Dronekit/dronekit en vrac/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":4159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74729982763","text":"import os\nfrom functools import partial\nfrom multiprocessing import Pool\n\nimport numpy as np\nfrom scipy.io import loadmat\nfrom tqdm import tqdm\n\nfrom src.config import cfg\nfrom src.utils.data_pipeline import save_data, save_dict\n\n\ndef format_dreamer_data(subject_id, data_path, save_dir=None):\n '''\n Load and retrieve NumPy arrays containing pertinent EMG classification data_pipeline + labels for given subject from the raw\n dataset.\n :param data_path: String, path to main dataset directory\n :param subject_id: String, subject number specified by dataset file naming for specific subject\n :param data_col: String, column name for data_pipeline\n :param label_col: String, column name for labels,\n :param grasp_ids: List of grasp int IDs\n :param electrode_ids: Array of ints representing IDs for electrode channels\n :param save_dir: String, path to directory to save the data in\n :return Tuple of two NumPy arrays as (emg data_pipeline, grasp labels)\n '''\n data = loadmat(os.path.join(data_path, str(subject_id)+'.mat'))\n\n arousals = np.squeeze(data['arousal'])\n valences = np.squeeze(data['valence'])\n baselines = data['baseline']\n\n full_dict = {}\n # Create trial-wise entries\n for i in range(18):\n eeg_data = data['trial'+str(i+1)]\n trial_arousal = np.full_like(eeg_data, fill_value=arousals[i], dtype=np.int32)\n trial_valence = np.full_like(eeg_data, fill_value=valences[i], dtype=np.int32)\n trial_baseline = baselines[..., i]\n full_dict[i] = {\n 'eeg': eeg_data,\n 'baseline': trial_baseline,\n 'arousal': trial_arousal,\n 'valence': trial_valence\n }\n\n if save_dir:\n save_path = os.path.join(save_dir, subject_id + '.pkl')\n save_dict(full_dict, save_path)\n\n return full_dict\n\n\nif __name__ == '__main__':\n # DREAMER\n dm_cfg = cfg['DATASETS']['DREAMER']\n raw_data_path = dm_cfg['RAW_DATA_PATH']\n subject_ids = dm_cfg['SUBJECTS']\n save_dir = dm_cfg['FORMATTED_DATA_PATH']\n\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n format_params = {'data_path': raw_data_path,\n 'save_dir': save_dir}\n\n with Pool() as pool:\n res = list(tqdm(pool.imap(partial(format_dreamer_data, **format_params), subject_ids),\n total=len(subject_ids)))\n\n print('Done.')\n\n","repo_name":"farazali7/anxiety_classification","sub_path":"src/utils/data_pipeline/data_formatter.py","file_name":"data_formatter.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"7232388082","text":"import gensim\n\nLabeledSentence = gensim.models.doc2vec.LabeledSentence\n\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\n\nimport os\n\npath = os.getcwd()\nfilepath = path[:-10] + '\\\\aclImdb\\\\train\\\\pos_all.txt'\nprint(filepath)\n#f=open(path[:-9] + '\\\\aclImdb\\\\train\\\\unsup_all.txt', 'w', encoding=\"utf-8\")\nwith open(path[:-10] + '\\\\aclImdb\\\\train\\\\pos_all.txt', 'r', encoding=\"utf-8\") as infile:\n pos_reviews = infile.readlines()\n\nwith open(path[:-10] + '\\\\aclImdb\\\\train\\\\neg_all.txt', 'r', encoding=\"utf-8\") as infile:\n neg_reviews = infile.readlines()\n\nwith open(path[:-10] + '\\\\aclImdb\\\\train\\\\unsup_all.txt', 'r', encoding=\"utf-8\") as infile:\n unsup_reviews = infile.readlines()\n\n\n# 1 代表积极情绪,0 代表消极情绪\ny = np.concatenate((np.ones(len(pos_reviews)), np.zeros(len(neg_reviews))))\n\nx_train, x_test, y_train, y_test = train_test_split(np.concatenate((pos_reviews, neg_reviews)), y, test_size=0.2)\n\n\n# 零星的预处理\ndef cleanText(corpus):\n punctuation = \"\"\".,?!:;(){}[]\"\"\"\n corpus = [z.lower().replace('n', '') for z in corpus]\n corpus = [z.replace('<br />', ' ') for z in corpus]\n\n # 将标点视为一个单词\n for c in punctuation:\n corpus = [z.replace(c, ' %s ' % c) for z in corpus]\n corpus = [z.split() for z in corpus]\n return corpus\n\n\nx_train = cleanText(x_train)\nx_test = cleanText(x_test)\nunsup_reviews = cleanText(unsup_reviews)\n\n\n# Gensim 的 Doc2Vec 工具要求每个文档/段落包含一个与之关联的标签。我们利用 LabeledSentence 进行处理。格式形如 “TRAIN_i” 或者 “TEST_i”,其中 “i” 是假的评论索引。\ndef labelizeReviews(reviews, label_type):\n labelized = []\n for i, v in enumerate(reviews):\n label = '%s_%s' % (label_type, i)\n labelized.append(LabeledSentence(v, [label]))\n return labelized\n\n\nx_train = labelizeReviews(x_train, 'TRAIN')\nx_test = labelizeReviews(x_test, 'TEST')\nunsup_reviews = labelizeReviews(unsup_reviews, 'UNSUP')\n\n\nsize = 400\n\n# 实例化 DM 和 DBOW 模型\nmodel_dm = gensim.models.Doc2Vec(min_count=1, window=10, size=size, sample=1e-3, negative=5, workers=3)\nmodel_dbow = gensim.models.Doc2Vec(min_count=1, window=10, size=size, sample=1e-3, negative=5, dm=0, workers=3)\n\n# 对所有评论创建词汇表\nmodel_dm.build_vocab(np.concatenate((x_train, x_test, unsup_reviews)))\nmodel_dbow.build_vocab(np.concatenate((x_train, x_test, unsup_reviews)))\n\n# 多次传入数据集,通过每次滑动(shuffling)来提高准确率。\nall_train_reviews = np.concatenate((x_train, unsup_reviews))\nfor epoch in range(10):\n perm = np.random.permutation(all_train_reviews.shape[0])\n model_dm.train(all_train_reviews[perm])\n model_dbow.train(all_train_reviews[perm])\n\nmodel_dbow.save(\"./dbow.model\")\nmodel_dm.save(\"./db.model\")\n\n# 从我们的模型中获得训练过的向量\ndef getVecs(model, corpus, size):\n vecs = [np.array(model[z.labels[0]]).reshape((1, size)) for z in corpus]\n return np.concatenate(vecs)\n\ntrain_vecs_dm = getVecs(model_dm, x_train, size)\ntrain_vecs_dbow = getVecs(model_dbow, x_train, size)\n\ntrain_vecs = np.hstack((train_vecs_dm, train_vecs_dbow))\n\n# 训练测试数据集\nx_test = np.array(x_test)\n\nfor epoch in range(10):\n perm = np.random.permutation(x_test.shape[0])\n model_dm.train(x_test[perm])\n model_dbow.train(x_test[perm])\n\n# 创建测试数据集向量\ntest_vecs_dm = getVecs(model_dm, x_test, size)\ntest_vecs_dbow = getVecs(model_dbow, x_test, size)\n\ntest_vecs = np.hstack((test_vecs_dm, test_vecs_dbow))\n","repo_name":"gmh570154/DeepLearning_In_Django","sub_path":"algorithms/text_train.py","file_name":"text_train.py","file_ext":"py","file_size_in_byte":3562,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"30693938791","text":"import operator\n\nimport numpy as np\nimport pandas as pd\n\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render\n\nfrom plyfile import PlyData, PlyElement\n\nimport os\n\n# python manage.py runserver 0.0.0.0:8000\n# 表单\nimport json\n\nfrom plyRender.settings import BASE_DIR\n\n\ndef search_form(request):\n return render(request, 'search_form.html')\n\n\n# 接收请求数据\ndef search(request):\n request.encoding = 'utf-8'\n\n if 'q' in request.GET and request.GET['q']:\n\n # message = '数据为: ' + readPlyToNp() + request.GET['q']\n dic = readPly()\n else:\n message = '你提交了空表单'\n # return HttpResponse('hello')\n return JsonResponse(dic)\n\n\ndef readPly(): # 读取顶点和面,返回一个dict。\n plydata = PlyData.read(os.path.join(BASE_DIR, r'plyRender\\tet.ply'))\n\n # print(plydata.elements[0].name)\n # print(plydata.elements[0].data[0])\n # print(plydata.elements[0].data['z'])\n # print(plydata['face'][0]['red'])\n # print(plydata['face'][0]['vertex_indices'])\n\n # for x in plydata.elements:\n # print(x.name)\n\n vertex_np = []\n face_np = []\n print('————↓vertex↓————')\n # 传递点\n for x in plydata['vertex']: # vertex\n print(x)\n vertex_np.append(x)\n\n print('————↓face↓————')\n # 传递面\n for x in plydata['face']: # face\n print(x['vertex_indices'])\n face_np.append(x['vertex_indices'])\n\n # return plydata['vertex']\n print('————↓face np↓————')\n print(face_np)\n\n dic = {}\n for kind in (vertex_np, face_np):\n arr = []\n for x in kind:\n arr.append(list(x.tolist())) # 类型转换 numpy obj -> tuple ->list\n arr = sum(arr, []) # 多维转一维\n if kind is vertex_np:\n dic['vertex'] = arr\n else:\n dic['facet'] = arr\n\n return dic\n\n\ndef readPlyToNp():\n plydata = PlyData.read(os.path.join(BASE_DIR, r'plyRender\\tet.ply'))\n data = plydata.elements[0].data # 读取vertex\n print('----1↓-------')\n print(data)\n data_pd = pd.DataFrame(data) # 转换成DataFrame, 因为DataFrame可以解析结构化的数据\n data_np = np.zeros(data_pd.shape, dtype=np.float) # 初始化储存数据的array\n property_names = data[0].dtype.names # 读取property的名字\n for i, name in enumerate(property_names): # 按property读取数据,这样可以保证读出的数据是同样的数据类型。\n data_np[:, i] = data_pd[name]\n print('----numpy version↓-------')\n print(data_np)\n return data_np\n","repo_name":"iceki7/plyRenderWithWebGL","sub_path":"plyRender/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"19786268546","text":"#!/usr/bin/env python\n\n\ndef to_camel(line):\n parts = line.split('_')\n return parts[0] + ''.join(list(map(lambda s: str(s).capitalize(), parts[1:])))\n\n\nif __name__ == '__main__':\n line = input('Enter line: ')\n print(f\"Result: {to_camel(line)}\")\n","repo_name":"Igor-Polatajko/python-labs","sub_path":"lab_7_7.py","file_name":"lab_7_7.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"14403261036","text":"from tests import RelengToolTestCase\nfrom tests import prepare_testenv\nimport json\nimport os\n\n\nclass TestExtensionEventWorkingDirectories(RelengToolTestCase):\n def test_extension_event_workdirs(self):\n with prepare_testenv(template='extension-env-dirs') as engine:\n rv = engine.run()\n self.assertTrue(rv)\n\n expected_workdirs = {\n 'config-loaded': engine.opts.root_dir,\n 'post-build-started': engine.opts.target_dir,\n 'post-build-finished': engine.opts.target_dir,\n }\n\n for k, expected_dir in expected_workdirs.items():\n state = os.path.join(engine.opts.root_dir, k + '.json')\n self.assertTrue(os.path.exists(state))\n\n with open(state, 'r') as f:\n data = json.load(f)\n self.assertTrue('wd' in data)\n self.assertEqual(os.path.realpath(data['wd']),\n os.path.realpath(expected_dir))\n","repo_name":"releng-tool/releng-tool","sub_path":"tests/unit-tests/test_extension_event_workdirs.py","file_name":"test_extension_event_workdirs.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"19"} +{"seq_id":"21976558008","text":"from mimetypes import init\nimport SimpleITK as sitk\nimport numpy as np\nfrom skimage import exposure, morphology, util\nfrom sklearn.cluster import KMeans\nimport scipy as sp\nimport slicer\nfrom operator import xor\n\nversion = 'v0.3.1'\n\ndef geo_strep(marker, mask, se, method):\n if method == 'dilate':\n result = np.bitwise_and(morphology.binary_dilation (marker, se), mask).astype(np.int32)\n elif method == 'erode':\n result = np.bitwise_or(morphology.binary_erosion(marker, se), mask).astype(np.int32)\n else:\n pass\n return result \ndef basic_reconstruction(marker, mask, se, method):\n prev_marker = marker\n new_marker = geo_strep(prev_marker, mask, se, method)\n while np.sum(prev_marker - new_marker)!=0:\n prev_marker = new_marker\n new_marker = geo_strep(prev_marker, mask, se, method)\n result=new_marker\n return result\ndef open_reconstruction(image, se, n):\n for i in range (n) :\n result_erosion = morphology.binary_erosion (image, se).astype(np.int32)\n image = result_erosion\n marker = result_erosion\n result=basic_reconstruction(marker, image, se, 'dilate')\n return result\ndef close_reconstruction(image, se, n):\n for i in range (n) :\n result_erosion = morphology.binary_dilation (image, se).astype(np.int32)\n image = result_erosion\n marker = result_erosion\n result=basic_reconstruction(marker, image, se, 'erode')\n return result \n\ndef cluster(I_primary, ncluster, numberOfPartitions, RangeSlice, primerosSlices, ultimosSlices, I_optional = None, \n mask = None, centroids = None):\n\n '''\n Aplica una clusterización tipo K-Means, utilizando una o dos imágenes. Existe la posibilidad de realizar el proceso solo\n en las zonas de la imagen indicadas en la máscara. En caso de que se proporcionen centroides, estos se utilizan como centroides\n iniciales en la clusterización. \n La clusterización se realiza en grupos de cortes, el tamaño de los mismos se indica con la variable numberOfPartitions. Pero \n en caso de que se estén analizando los extremos de la imagen, estos grupos tienen siempre tamaño 1.\n\n Inputs:\n Iprimary -- imagen a clusterizar\n ncluster -- número de cluster\n numberOfPartitions -- tamaño de los grupos de cortes \n RangeSlices -- rango de cortes de la imagen que se desea segmentar\n primerosSlices -- corte en el que se acaba el extremo inferior de la imagen\n ultimosSlices-- corte en el que empeiza el extremo superior de la imagen\n I_optional -- imagen opcional a clusterizar\n mask -- máscara con la zona de la imagen/imagenes que se quiere analizar\n centroids -- centroides que se utilizan en la inicializacion del K-Means\n\n Outputs:\n classImage -- label map de salida de la clusterización \n mean_classes -- matriz con el número de clusters y la media de cada una de ellas\n\n '''\n I_primary_mat = sitk.GetArrayFromImage(I_primary)\n\n if mask==None:\n mask_mat = np.ones(I_primary_mat.shape)\n else:\n mask_mat = sitk.GetArrayFromImage(mask)\n \n I_primary_mat_mask = I_primary_mat [mask_mat == 1]\n \n if I_optional==None:\n I = I_primary_mat_mask .reshape(-1, 1)\n mask = mask_mat[mask_mat ==1].reshape(-1, 1)\n else:\n #Paso dados a forma Nx2\n #En caso de tener dos imagenes se utilizan ambas para hacer la clusterizacion\n I_optional_mat = sitk.GetArrayFromImage(I_optional)\n I_optional_mat_mask = I_optional_mat [mask_mat == 1]\n I_concatenate = np.concatenate ((I_optional_mat_mask, I_primary_mat_mask))\n I = I_concatenate.reshape(2, I_primary_mat_mask.shape[0]).T \n mask_concatenate = np.concatenate ((mask_mat[mask_mat ==1], mask_mat[mask_mat ==1]))\n mask = mask_concatenate\n\n if centroids is None:\n centroids = np.array([[0,0], [255,0], [0,255]])\n\n #K-means\n aux =0\n classes = np.zeros(I_primary_mat_mask.shape)\n classes =np.squeeze(classes.reshape((-1, 1)))\n \n for i in range (1, (I_primary_mat.shape[0]//numberOfPartitions)+1) : \n #Si nos encontramos en los extremos del volumen, el número de particiones que se analizan cada vez es igual a uno. \n if (RangeSlice[0] + ((i-1)*numberOfPartitions)) <= primerosSlices or (RangeSlice[0] + ((i)*numberOfPartitions)) >= ultimosSlices \\\n and numberOfPartitions>1:\n\n newnumberOfPartitions = 1\n diferencia = numberOfPartitions - newnumberOfPartitions\n for j in range (1, diferencia+2):\n kmeans_results = KMeans(ncluster , init= centroids , max_iter = 100) \n kmeans_results.fit(I[aux:(aux +((I.shape[0])//(I_primary_mat.shape[0]//numberOfPartitions))//(diferencia+1)), :]) \n centroids = kmeans_results.cluster_centers_\n labels = kmeans_results.predict(I[aux:(aux +((I.shape[0])//(I_primary_mat.shape[0]//numberOfPartitions))//(diferencia+1)), :])\n classes[aux : (aux +((I.shape[0])//(I_primary_mat.shape[0]//numberOfPartitions))//(diferencia+1))] = labels\n aux=(aux +((I.shape[0])//(I_primary_mat.shape[0]//numberOfPartitions))//(diferencia+1))\n else:\n #En caso de no estar en los extremos se analizan grupo de slices. Esto grupos tienen el tamaño que indica el usuario en numero de particiones\n kmeans_results = KMeans(ncluster , init= centroids , max_iter = 100) \n kmeans_results.fit(I[aux:aux+(I.shape[0])//(I_primary_mat.shape[0]//numberOfPartitions), :]) \n centroids = kmeans_results.cluster_centers_\n labels = kmeans_results.predict(I[aux: aux + (I.shape[0])//(I_primary_mat.shape[0]//numberOfPartitions), :])\n classes[aux : aux + (I.shape[0])//(I_primary_mat.shape[0]//numberOfPartitions)] = labels\n aux=aux + (I.shape[0])//(I_primary_mat.shape[0]//numberOfPartitions) \n\n #hallo la media de cada cluster\n Icluster = np.zeros(I_primary_mat.shape)\n Icluster[mask_mat == 1] = classes\n mean_classes= np.zeros((ncluster, 2))\n\n for k in range (ncluster):\n aux=I_primary_mat[Icluster==k]\n mean_classes[k,0] = np.mean(aux, axis = 0)\n mean_classes [k,1] = k\n mean_classes.sort(axis=0)\n mean_classes = mean_classes[::-1]\n\n Icluster_img = sitk.GetImageFromArray(Icluster)\n Icluster_img.CopyInformation(I_primary)\n\n return Icluster_img, mean_classes\n \ndef clean_border_thigh(input_mask,image_W, image_F):\n # Ponemos las dos tapas inferior y superior para que los muslos no estén pegados al borde del volumen\n pad_filter = sitk.ConstantPadImageFilter()\n pad_filter.SetPadLowerBound([0,0,1])\n pad_filter.SetPadUpperBound([0,0,1])\n pad_input_mask = pad_filter.Execute(input_mask)\n\n aux_mask = (image_W==0) & (image_F==0)\n\n pad_filter.SetConstant = 1\n aux_mask = pad_filter.Execute(aux_mask)\n aux_mask_mat = sitk.GetArrayFromImage(aux_mask)\n #Limpiamos la imagen\n struct_element = np.ones([1,4,1])\n aux_mask_mat = open_reconstruction(aux_mask_mat, struct_element, 1).astype(np.int8)\n aux_img = sitk.GetImageFromArray(aux_mask_mat)\n aux_img.CopyInformation(aux_mask)\n aux_mask = aux_img\n # aux_mask = sitk.BinaryGrindPeak(aux_mask)\n aux_bgp = sitk.BinaryGrindPeak((pad_input_mask!=0) | sitk.Cast (aux_mask, sitk.sitkUInt8)) # Quitamos todo lo que no esté pegado al borde\n non_border_seg = sitk.Mask( pad_input_mask, aux_bgp==0) # Enmascaramos con el inverso\n non_border_seg = non_border_seg[:,:,1:-1]\n # Comprobamos si ha desaparecido, calculando el máximo de la imagen y comparandolo con 0\n full_Q = (np.max(sitk.GetArrayFromImage(non_border_seg))!=0) \n\n return non_border_seg, full_Q\n\ndef act_cont(Img1, Img2, PropagationScaling,CurvatureScaling , AdvectionScaling, sigma, alpha_, beta_, sliceRange, a=False):\n '''\n Aplica un contorno activo\n \n Inputs:\n Img1 -- imagen base \n Img2 -- imagen cuyo contorno se quiere ajustar al de la imagen base\n PropagationScaling -- parámetro del contorno activo para ajustar la propagación\n CurvatureScaling -- parámetro del contorno activo para ajustar la curvatura\n AdvectionScaling -- parámetro del contorno activo para ajustar el campo de advección\n sigma -- parámetro del cálculo de la magnitud de gradiente gaussiana\n alpha_, beta_ -- parámetros del filtro sigmeoideo\n sliceRange -- rango de cortes de la imagen que se desea segmentar\n\n Outputs:\n thes_GAC -- imagen en 3D con el resultado del contorno activo\n\n '''\n pad_input_mask = 0\n pad_input_mask2 = 0\n initImg = 0\n imgSigmoid = 0\n \n Img1 =sitk.Cast(Img1, sitk.sitkFloat32)\n pad_input_mask_mat = 0 \n\n if sliceRange[1]- sliceRange[0] < 4:\n diferencia = int (4- (sliceRange[1]- sliceRange[0]))\n numeroSlciesAñadir = 1 + diferencia\n #Img2 --> se añade una tapa inferior y tantas tapas superiores como sea necesario para que el número de slices sea de 4 slcies\n Img_aux = Img2[:,:, 0:1]\n Img_aux_mat = sitk.GetArrayFromImage(Img_aux)\n Img2_mat =sitk.GetArrayFromImage(Img2)\n pad_input_mask2_mat=np.insert(Img2_mat, 0, Img_aux_mat, axis=0)\n pad_input_mask2_mat_shape = pad_input_mask2_mat.shape\n Img2_slices_shape = Img2.GetSize()\n Img2_aux = Img2[:,:,int(Img2_slices_shape[2]-1)]\n Img_aux_mat = sitk.GetArrayFromImage(Img_aux)\n posicion=pad_input_mask2_mat_shape[0]\n for i in range (int(diferencia+1)):\n pad_input_mask2_mat=np.insert(pad_input_mask2_mat, posicion, Img_aux_mat, axis=0)\n posicion= pad_input_mask2_mat.shape[0]\n pad_input_mask2 = sitk.GetImageFromArray(pad_input_mask2_mat)\n\n #Img1 --> en este caso las tapas son el primer y ultimo slice de la imagen.\n Img_aux = Img1[:,:,0:1]\n Img1_mat =sitk.GetArrayFromImage(Img1)\n Img1_aux_mat =sitk.GetArrayFromImage(Img_aux)\n pad_input_mask_mat=np.insert(Img1_mat, 0, Img1_aux_mat, axis=0)\n pad_input_mask_mat_shape = pad_input_mask_mat.shape\n Img1_slices_shape = Img1.GetSize()\n Img1_aux = Img1[:,:,int(Img1_slices_shape[2]-1)]\n Img1_aux_mat=sitk.GetArrayFromImage(Img1_aux)\n posicion=pad_input_mask_mat_shape[0]\n for i in range (int (diferencia+1)):\n pad_input_mask_mat=np.insert(pad_input_mask_mat, posicion, Img1_aux_mat, axis=0)\n posicion= pad_input_mask_mat.shape[0]\n pad_input_mask = sitk.GetImageFromArray(pad_input_mask_mat)\n\n else:\n #Img1 --> se añade una tapa inferior igual al primer slice y otra superior igual al último\n Img_aux = Img1[:,:,0:1]\n Img1_mat =sitk.GetArrayFromImage(Img1)\n Img1_aux_mat =sitk.GetArrayFromImage(Img_aux)\n pad_input_mask_mat=np.insert(Img1_mat, 0, Img1_aux_mat, axis=0)\n pad_input_mask_mat_shape = pad_input_mask_mat.shape\n Img1_slices_shape = Img1.GetSize()\n Img1_aux = Img1[:,:,int(Img1_slices_shape[2]-1)]\n \n Img1_aux_mat=sitk.GetArrayFromImage(Img1_aux)\n pad_input_mask_mat=np.insert(pad_input_mask_mat, pad_input_mask_mat_shape[0] , Img1_aux_mat, axis=0)\n pad_input_mask = sitk.GetImageFromArray(pad_input_mask_mat)\n\n #Img2 --> se añade una tapa inferior y otra superior\n Img_aux = Img2[:,:,0:1]\n Img2_mat =sitk.GetArrayFromImage(Img2)\n Img2_aux_mat =sitk.GetArrayFromImage(Img_aux)\n pad_input_mask2_mat=np.insert(Img2_mat, 0, Img2_aux_mat, axis=0)\n pad_input_mask2_mat_shape = pad_input_mask2_mat.shape\n Img2_slices_shape = Img2.GetSize()\n Img2_aux = Img2[:,:,int(Img2_slices_shape[2]-1)]\n \n Img2_aux_mat=sitk.GetArrayFromImage(Img2_aux)\n pad_input_mask2_mat=np.insert(pad_input_mask2_mat, pad_input_mask2_mat_shape[0] , Img2_aux_mat, axis=0)\n pad_input_mask2 = sitk.GetImageFromArray(pad_input_mask2_mat)\n\n\n pad_input_mask = sitk.Cast (pad_input_mask, sitk.sitkFloat32)\n\n #Anisotropic filtering\n timeStep_, conduct, numIter = (0.04, 9.0, 5) \n curvDiff = sitk.CurvatureAnisotropicDiffusionImageFilter()\n curvDiff.SetTimeStep(timeStep_)\n curvDiff.SetConductanceParameter(conduct)\n curvDiff.SetNumberOfIterations(numIter)\n imgFilter = curvDiff.Execute(pad_input_mask)\n\n #Magnitude of the gradient\n imgGauss = sitk.GradientMagnitudeRecursiveGaussian(image1=imgFilter, sigma=sigma)\n\n #Edge potential function\n sigFilt = sitk.SigmoidImageFilter()\n sigFilt.SetAlpha(alpha_)\n sigFilt.SetBeta(beta_)\n sigFilt.SetOutputMaximum(1.0)\n sigFilt.SetOutputMinimum(0.0)\n imgSigmoid = sigFilt.Execute(imgGauss)\n\n initImg = sitk.Cast(pad_input_mask2,sitk.sitkFloat32) # Cast to float32\n initImg = 0.5-initImg # Condition the image to use it as first inpunt of the filter\n\n #ActiveContour\n gac = sitk.GeodesicActiveContourLevelSetImageFilter() # Create the active contour filter\n gac.SetPropagationScaling(PropagationScaling) # Set the propagation parameter\n gac.SetCurvatureScaling(CurvatureScaling) # Set the curvature parameter\n gac.SetAdvectionScaling(AdvectionScaling) # Set the advection parameter\n gac.SetMaximumRMSError(0.01) # Set the maximum RMS error of the PDE solution\n gac.SetNumberOfIterations(100) # Set the maximum number of iterations \n\n initImg.CopyInformation(pad_input_mask)\n imgSigmoid.CopyInformation(pad_input_mask)\n\n imgGAC = gac.Execute(initImg, imgSigmoid) # Launch the segmentation\n \n if sliceRange[1]- sliceRange[0] < 4:\n imgGAC= imgGAC[:,:, 0:-int(numeroSlciesAñadir-1)] # Quito los que añado para que haya 4 slices\n imgGAC= imgGAC[:,:,1:-1]\n imgGAC.CopyInformation(Img1)\n\n #Umbralizacion\n thres_filter = sitk.BinaryThresholdImageFilter()\n thres_filter.SetLowerThreshold(0)\n thres_filter.SetInsideValue(0)\n thres_filter.SetOutsideValue(1)\n thres_GAC= thres_filter.Execute(imgGAC)\n thres_GAC.CopyInformation(Img1)\n\n\n return thres_GAC\n\ndef CalculateLabelMap(filled_thigh_mask_img, thigh_mask_img , muslo_fat_img_slices, muslo_water_img_slices, numberOfPartitions, \n RangeSlice, primerosSlices, ultimosSlices):\n \"\"\"\n Crea un label map con la segmentación del muslo\n\n Inputs:\n filled_thigh_mask_img -- máscara binaria de los muslos\n thigh_mask_img -- máscara binaria de los muslos, con hueso a cero y resto a uno\n muslo_fat_img_slices -- parte de la imagen de grasa que se desea segmentar\n muslo_water_img_slices -- parte de la imagen de agua que se desea segmentar \n numberOfPartitions-- conjunto de cortes en que se desea hacer la segmenatción\n RangeSlices -- rango de cortes de la imagen que se desea segmentar\n primerosSlices -- corte en el que se acaba el extremo inferior de la imagen\n ultimosSlices-- corte en el que empeiza el extremo superior de la imagen\n\n Outputs:\n classImage_bw_img -- label map con el resulatdo de la segmentacion \n\n \"\"\"\n \n filled_thigh_mask = sitk.GetArrayFromImage(filled_thigh_mask_img)\n thigh_mask_mat = sitk.GetArrayFromImage(thigh_mask_img)\n muslo_fat_mat = sitk.GetArrayFromImage(muslo_fat_img_slices)\n muslo_water_mat = sitk.GetArrayFromImage (muslo_water_img_slices)\n\n #Calculo de la médula\n filled_thigh_mask_erode = morphology.erosion (filled_thigh_mask, morphology.ball (2))\n contornomedula = np.multiply(util.invert(thigh_mask_mat), filled_thigh_mask_erode)\n contornomedula_dilate = morphology.dilation(contornomedula, morphology.ball(1))\n marrowAndbone = sp.ndimage.binary_fill_holes(contornomedula_dilate, structure=np.ones((1,3,2))).astype(np.int32)\n contornomedula_img = sitk.GetImageFromArray(marrowAndbone)\n contornomedula_img.CopyInformation(muslo_fat_img_slices)\n marrow= morphology.opening(np.multiply(marrowAndbone, thigh_mask_mat), morphology.ball(2))\n \n #Calculo del hueso\n marrowAndbone = sp.ndimage.binary_fill_holes(contornomedula_dilate, structure=np.ones((1,3,3))).astype(np.int32)\n marrowAndbone_img = sitk.GetImageFromArray(marrowAndbone)\n marrowAndbone_img.CopyInformation(muslo_fat_img_slices)\n \n marrowAndbone_clean_img= sitk.BinaryOpeningByReconstruction(marrowAndbone_img,kernelRadius=[3,3,3])\n \n marrowAndbone_clean_mat = sitk.GetArrayFromImage(marrowAndbone_clean_img)\n marrow_dilate = morphology.dilation(marrow.astype(np.int32), morphology.ball(7))\n marrow_dilate = morphology.dilation(marrow_dilate.astype(np.int32), morphology.ball(3))\n\n bone = np.multiply(marrow_dilate , marrowAndbone_clean_mat)\n \n #Calculo de la fracción grasa (FF)\n muslo_fat_eps =np.where(muslo_fat_mat==0, np.finfo(float).eps, muslo_fat_mat) #Así evitamos el 0/0\n muslo_water_eps =np.where(muslo_water_mat==0, np.finfo(float).eps, muslo_water_mat) \n FF_mat = np.divide(muslo_fat_eps, (muslo_fat_eps+muslo_water_eps)) \n FF_mat_clean = np.multiply(FF_mat, filled_thigh_mask)\n FF_img_clean= sitk.GetImageFromArray(FF_mat_clean)\n FF_img_clean.CopyInformation(muslo_fat_img_slices)\n\n #Cluster --> Kmeans a FF\n numberOfClasses = 3\n [classImage_img,mean_classes]=cluster(FF_img_clean, numberOfClasses, numberOfPartitions, RangeSlice,\n primerosSlices, ultimosSlices, mask = filled_thigh_mask_img, \n centroids = np.array([[0], [1], [0.5]]))\n\n classImage = sitk.GetArrayFromImage(classImage_img)\n\n #Grasa\n sat_mat = np.where (classImage == mean_classes[1,1], 1, 0)\n sat_mat = sat_mat.astype(np.float64)\n sat_withoutBone = np.where (bone == 1, 0, sat_mat)\n class11_img= sitk.GetImageFromArray(sat_mat)\n class11_img.CopyInformation(muslo_fat_img_slices)\n\n #InterMuscular + vasos + piel\n class01 = np.where (classImage == mean_classes[0,1], 1, 0)\n class01 = np.where (bone == 1, 0, class01).astype(np.float64)\n class01_img= sitk.GetImageFromArray(class01)\n class01_img.CopyInformation(muslo_fat_img_slices)\n\n #Skin class01\n suma = (sat_mat + class01)\n filled_suma = sp.ndimage.binary_fill_holes(suma, structure=np.ones((1,3,2)))\n invert_filled = util.invert(filled_suma).astype(np.int32)\n dilate = morphology.dilation (invert_filled, morphology.ball(3))\n skin_class01 = np.multiply (dilate, class01)\n \n class01_withoutSkin = np.where (skin_class01 == 1, 0, class01)\n \n #Musculo\n class21 = np.where (classImage == mean_classes[2,1], 1, 0)\n class21_clean = (class21*filled_thigh_mask) .astype(np.float64)\n class21_img= sitk.GetImageFromArray(class21_clean)\n class21_img.CopyInformation(muslo_fat_img_slices)\n\n #Skin class21\n skin_mt = np.multiply(dilate, class21_clean)\n mt_withoutSkin = np.where (skin_mt == 1, 0, class21_clean).astype(np.float64)\n mt_withoutSkin_img= sitk.GetImageFromArray(mt_withoutSkin)\n mt_withoutSkin_img.CopyInformation(muslo_fat_img_slices)\n\n #Suma de class01 y class21 para hallar area de MT\n suma = class01_withoutSkin + mt_withoutSkin \n suma_mat_close = close_reconstruction (suma, morphology.ball(1), 2)\n suma_mat_clean = open_reconstruction(suma_mat_close, morphology.ball(3), 2)\n suma_mat_clean = suma_mat_clean.astype(np.int32)\n\n #Hallo la componenete conexa --> separar la grasa intermuscular de la subcutánea\n suma_mat_clean = morphology.dilation (suma_mat_clean, morphology.ball(6)) #4\n suma_mat_clean_close= morphology.closing(suma_mat_clean, morphology.ball(5)).astype(np.float64)\n suma_mat_clean_close_img = sitk.GetImageFromArray(suma_mat_clean_close)\n suma_mat_clean_close_img.CopyInformation(muslo_fat_img_slices)\n #Paso a 2D para hacer el convex hull\n suma_mat_clean_close = suma_mat_clean_close.reshape((-1, muslo_water_mat.shape[2]))\n class21_convex_hull_mat = morphology.convex_hull_object(suma_mat_clean_close).astype(np.int32)\n class21_convex_hull_mat = class21_convex_hull_mat.reshape(muslo_water_mat.shape)\n class21_convex_hull_mat = class21_convex_hull_mat.astype(np.float64)\n class21_convex_hull_img = sitk.GetImageFromArray(class21_convex_hull_mat)\n class21_convex_hull_img.CopyInformation(muslo_fat_img_slices)\n\n #Contorno activo\n Act_cont_IMAT = act_cont(muslo_water_img_slices, class21_convex_hull_img, -1.0, 1.5, 1.5, 1.5, -2.0, 7 , RangeSlice)\n #-2.0, 1.5, 1.5, 1.5, -1.0, 7\n Act_cont_IMAT_mat = sitk.GetArrayFromImage(Act_cont_IMAT)\n \n #Defino las diferentes componentes\n imat_mat = np.multiply(sat_withoutBone,Act_cont_IMAT_mat)\n vasos_class01_mat = np.where(Act_cont_IMAT_mat == 0, class01_withoutSkin, 0)\n vasos_mt_mat = np.where(Act_cont_IMAT_mat == 0, mt_withoutSkin, 0)\n vasos_mat = vasos_mt_mat+vasos_class01_mat\n vasos_mat = vasos_mat.astype(np.float64)\n vasos_img = sitk.GetImageFromArray(vasos_mat)\n vasos_img.CopyInformation(muslo_fat_img_slices)\n\n mt_withoutSkinAndBone = np.where(bone==1,0, mt_withoutSkin)\n\n skin = skin_class01+skin_mt\n\n #LABELMAP\n classImage_bw = np.zeros(muslo_water_mat.shape)\n classImage_bw[sat_withoutBone==1] = 15 #SAT\n classImage_bw[imat_mat == 1] = 31 #IMAT\n classImage_bw[mt_withoutSkinAndBone== 1] = 8 #MT \n classImage_bw[class01_withoutSkin==1] = 11 #INTRA\n classImage_bw[vasos_mat==1] = 5 #vasos\n classImage_bw[bone == 1] = 2 #bone\n classImage_bw[marrow == 1] = 7 #marrow\n classImage_bw[skin == 1] = 6 #skin\n classImage_bw [filled_thigh_mask == 0] = 0\n classImage_bw = classImage_bw.astype(np.float64)\n classImage_bw_img =sitk.GetImageFromArray(classImage_bw)\n classImage_bw_img.CopyInformation(muslo_fat_img_slices)\n \n return classImage_bw_img\ndef ThighSegmentation (fat_img, water_img, RangeSlice, numberOfPartitions, incomplete):\n '''\n Calculo de algunas máscaras necesarias para la segmentación y separación de ambos muslos\n\n Inputs:\n fat_img -- imagen de grasa que se desea segmentar\n water_img -- imagen de agua que se desea segmentar \n numberOfPartitions-- conjunto de cortes en que se desea hacer la segmenatción\n RangeSlices -- rango de cortes de la imagen que se desea segmentar\n incomplete -- variable booleana que indica si se desea segmentar los muslos incompletos\n \n\n Outputs:\n out_l, out_r -- segmentacion de los muslos izquierdo y derecho\n right_full_Q, left_full_Q -- variables booleanas que indican si el muslo izquierdo y derecho tocan el borde, es decir, son incompletos\n sum_right, sum_left -- variables que indican si los muslos se han podido dividir\n '''\n\n print('Processing with TisSegLibrary '+version)\n\n muslo_fat_img_slices = fat_img[:,:, int(RangeSlice[0]):int(RangeSlice[1])]\n muslo_fat_mat = sitk.GetArrayFromImage(muslo_fat_img_slices)\n\n muslo_water_img_slices = water_img[:,:,int(RangeSlice[0]):int(RangeSlice[1])]\n muslo_water_mat = sitk.GetArrayFromImage(muslo_water_img_slices)\n\n numberOfPartitions=int(numberOfPartitions)\n\n musloShape = fat_img.GetSize()\n ultimosSlices = musloShape[2] - (musloShape[2]*10//100)\n primerosSlices = (musloShape[2]*10//100)\n \n sum_left = 2\n sum_right = 2\n\n muslo_fat_adjustgamma = exposure.adjust_gamma(muslo_fat_mat, 0.65)\n muslo_fat_adjustgamma_img = sitk.GetImageFromArray(muslo_fat_adjustgamma)\n muslo_fat_adjustgamma_img.CopyInformation(muslo_water_img_slices)\n muslo_water_adjustgamma = exposure.adjust_gamma(muslo_water_mat, 0.7)\n muslo_water_adjustgamma_img = sitk.GetImageFromArray(muslo_water_adjustgamma)\n muslo_water_adjustgamma_img.CopyInformation(muslo_water_img_slices)\n\n classImage_img, mean_classes = cluster(muslo_water_adjustgamma_img, 3, numberOfPartitions, RangeSlice, \n primerosSlices, ultimosSlices, muslo_fat_adjustgamma_img)\n\n classImage = sitk.GetArrayFromImage(classImage_img)\n muslo_fat_mat = sitk.GetArrayFromImage(muslo_fat_img_slices)\n\n #Definición de máscaras\n thigh_mask_mat= np.zeros(muslo_fat_mat.shape)\n thigh_mask_mat [classImage!=mean_classes[2,1]]= 1 \n thigh_mask_img = sitk.GetImageFromArray(thigh_mask_mat)\n thigh_mask_img.CopyInformation(muslo_fat_img_slices)\n\n #Relleno hueco del hueso\n filled_thigh_mask = sp.ndimage.binary_fill_holes(thigh_mask_mat, structure=np.ones((1,7,7))).astype(np.int32)\n filled_thigh_mask_img = sitk.GetImageFromArray(filled_thigh_mask)\n filled_thigh_mask_img.CopyInformation(muslo_fat_img_slices)\n\n #Separación de los muslos\n filled_thigh_mask_img = sitk.BinaryOpeningByReconstruction(filled_thigh_mask_img,kernelRadius=[7,7,7])\n compo_filter = sitk.ConnectedComponentImageFilter()\n compo_result = compo_filter.Execute(filled_thigh_mask_img)\n compo_num = compo_filter.GetObjectCount()\n if compo_num > 2:\n slicer.util.errorDisplay('Error detecting thighs') \n\n dist_img = sitk.SignedMaurerDistanceMap(filled_thigh_mask_img != 0, insideIsPositive=False, squaredDistance=False, useImageSpacing=False)\n\n min_dst = np.min(np.concatenate(sitk.GetArrayFromImage(dist_img)))\n inc_radius = -0.05\n init_radius = 0.95\n actual_radius = init_radius\n while True:\n radius = actual_radius*np.abs(min_dst)\n # Seeds have a distance of \"radius\" or more to the object boundary, they are uniquely labelled.\n seeds = sitk.ConnectedComponent(dist_img < -radius)\n # Relabel the seed objects using consecutive object labels while removing all objects with less than 15 pixels.\n seeds = sitk.RelabelComponent(seeds, minimumObjectSize=15)\n num_comp_seeds = np.max(np.concatenate(sitk.GetArrayFromImage(seeds)))\n if num_comp_seeds<2:\n actual_radius = actual_radius+inc_radius\n else:\n break\n\n # Run the watershed segmentation using the distance map and seeds.\n ws = sitk.MorphologicalWatershedFromMarkers(dist_img, seeds, \n markWatershedLine=False) \n ws = sitk.Mask( ws, sitk.Cast(filled_thigh_mask_img, ws.GetPixelID()))\n\n\n #definición del muslo izquierdo y derecho\n\n #Hallo los centroides de las componentes de la watersed segmentation\n stats = sitk.LabelShapeStatisticsImageFilter()\n stats.Execute(sitk.ConnectedComponent(ws))\n numberofcompo = int (stats.GetNumberOfLabels())\n #hallo centroide de una de las componentes\n centroids = [ stats.GetCentroid(l) for l in stats.GetLabels() ]\n stats.Execute (sitk.ConnectedComponent(ws==1))\n centroid_1 = [ stats.GetCentroid(l) for l in stats.GetLabels() ]\n #hallo centroide de la otra componente\n stats.Execute (sitk.ConnectedComponent(ws==2))\n centroid_2 = [ stats.GetCentroid(l) for l in stats.GetLabels() ]\n\n #Si solo hay una componente, el muslo izquierdo sera el de la componente que tenga el centroide menor que el de la imagen ws\n #Y el derecho será el de la componente que tenga el centroide mayor\n if numberofcompo ==1:\n if centroid_1 > centroids and centroid_2 < centroids:\n right_mask = (ws==2)\n left_mask = (ws ==1)\n\n if centroid_1 < centroids and centroid_2 > centroids:\n right_mask = (ws==1)\n left_mask = (ws ==2)\n\n elif numberofcompo == 2:\n #Si hay dos componentes el muslo de la izquierda será el que tenga una componente menor o igual a la menor componente de la imagen ws\n #Y el muslo de la sera sera el que tenga una componente mayor o igual a la mayor componente de la imagen ws\n if centroid_1[0] <= centroids[0] and centroids[1] >= centroid_2[0]:\n if centroids[0]= centroid_2[0]:\n if centroids[0] 0, 1, 0).astype(np.float64)\n VAT_img = sitk.GetImageFromArray(VAT)\n VAT_img.CopyInformation(abdomen_fat_img_slices)\n\n #AIR\n ROI_erode = morphology.erosion (ROI_acti_mat, morphology.ball(1.5)).astype(np.float32)\n air = (VAT_area * BoneAndFat).astype(np.bool_)\n air = air * ROI_erode\n air = air.astype(np.float64)\n air_img = sitk.GetImageFromArray(air.astype(np.int32))\n air_img.CopyInformation(abdomen_fat_img_slices)\n\n #OTHER TISSUE\n Other_tissue = np.multiply(class21_clean, ROI_erode).astype(np.float64)\n Other_tissue_img = sitk.GetImageFromArray(Other_tissue)\n Other_tissue_img.CopyInformation(abdomen_fat_img_slices)\n\n #SKIN\n filled_erode = morphology.erosion (filled, morphology.ball(7)).astype(np.float32)\n Skin_class01 = util.invert(filled_erode) * (class01)\n Skin_class21 = util.invert(filled_erode) * (class21)\n Skin = (Skin_class01 + Skin_class21)* filled\n Skin = Skin.astype(np.float64)\n Skin_img = sitk.GetImageFromArray(Skin)\n Skin_img.CopyInformation(abdomen_fat_img_slices)\n\n #EDEMA + VESSELS\n suma_classes = (class01_img + class21_img) - Skin_img\n active_mat_dilate = morphology.dilation(active_mat, morphology.ball(2))\n aux =active_mat_dilate + ROI_acti_mat\n active_mat_invert = util.invert(aux.astype(np.float32)).astype(np.float64)\n active_invert_img = sitk.GetImageFromArray(active_mat_invert)\n active_invert_img.CopyInformation(abdomen_fat_img_slices)\n edema_img = suma_classes * active_invert_img\n edema = sitk.GetArrayFromImage(edema_img)\n\n classImage2 = np.zeros (abdomen_fat_mat.shape)\n classImage2[filled == 1] = 15 #SAT\n classImage2[IMAT == 1] = 31 #IMAT\n classImage2[VAT == 1] = 11 #VAT\n classImage2[MT==1] = 8 #MT\n classImage2[Other_tissue == 1]= 5 #OTHER TISSUE\n classImage2[Bone == 1] = 2 #BONE/AIR\n classImage2[air==True] = 7 #AIR\n classImage2[Skin == 1] = 6 #SKIN\n classImage2[edema == 1] = 36 #EDEMA+VESSELS\n classImage2 = classImage2.astype(np.float64)\n classImage2_img=sitk.GetImageFromArray(classImage2)\n classImage2_img.CopyInformation(abdomen_fat_img_slices)\n\n return classImage2_img\n\ndef ColorSegmentation_Abdo(outputVolume, Segmentation):\n \"\"\"\n Convierte un label map en segmenatción. También define el color y nombra cada uno de los segmentos de la segmenatcaión\n\n Parámetros:\n outputVolume -- label map a partir del que se obtiene la segmentación\n Segmentation -- volumen que contine la segmentación\n \"\"\"\n if Segmentation is not None:\n #Se eliminan los posibles segmentos que exitan\n segmentation = Segmentation.GetSegmentation()\n segmentation.RemoveAllSegments()\n #Se importa el label map a una segmentacion y se define (nombre y color) cada segmento\n slicer.modules.segmentations.logic().ImportLabelmapToSegmentationNode(outputVolume, Segmentation)\n segmentation = Segmentation.GetSegmentation()\n if segmentation.GetNumberOfSegments () >= 8 and segmentation.GetNumberOfSegments () < 10 :\n segment = segmentation.GetNthSegment(6)\n segment.SetName(\"SAT\")\n color = (230,220,70)\n color = np.array(color, float) / 255\n segment.SetColor(color)\n segment = segmentation.GetNthSegment(7)\n segment.SetName(\"IMAT\")\n color = (140,224,228)\n color = np.array(color, float) / 255\n segment.SetColor(color)\n segment = segmentation.GetNthSegment(4)\n segment.SetName(\"Muscle\")\n color = (192,104,88)\n color = np.array(color, float) / 255\n segment.SetColor(color)\n segment = segmentation.GetNthSegment(5)\n segment.SetName(\"VAT\")\n color = (250,250,225)\n color = np.array(color, float) / 255\n segment.SetColor(color)\n segment = segmentation.GetNthSegment(3)\n segment.SetName(\"Air\")\n color = (144,238,144) \n color = np.array(color, float) / 255\n segment.SetColor(color)\n segment = segmentation.GetNthSegment(0)\n segment.SetName(\"Bone/Air\")\n color = (241,214, 145)\n color = np.array(color, float) / 255\n segment.SetColor(color)\n segment = segmentation.GetNthSegment(1)\n segment.SetName(\"Other tissue\")\n color = (216 ,101,79)\n color = np.array(color, float) / 255\n segment.SetColor(color)\n segment = segmentation.GetNthSegment(2)\n segment.SetName(\"Skin\")\n color = (177,122,101)\n color = np.array(color, float) / 255\n segment.SetColor(color)\n segment = segmentation.GetNthSegment(8)\n segment.SetName(\"Edema + Vessels\")\n color = (150,98,83)\n color = np.array(color, float) / 255\n segment.SetColor(color)\n \n Segmentation.CreateClosedSurfaceRepresentation()\n else:\n slicer.util.errorDisplay (\"The segmentation couldn't be done correctly\")\ndef ColorSegmentation(outputVolume, l_o_r, Segmentation):\n \"\"\"\n Convierte un label map en segmenatción. También define el color y nombra cada uno de los segmentos de la segmenatcaión\n\n Parámetros:\n outputVolume -- label map a partir del que se obtiene la segmentación\n Segmentation -- volumen que contine la segmentación\n \"\"\"\n\n if Segmentation is not None:\n #Se eliminan los posibles segmentos que exitan\n segmentation = Segmentation.GetSegmentation()\n segmentation.RemoveAllSegments()\n #Se importa el label map a una segmentacion y se define (nombre y color) cada segmento\n slicer.modules.segmentations.logic().ImportLabelmapToSegmentationNode(outputVolume, Segmentation)\n segmentation = Segmentation.GetSegmentation()\n if segmentation.GetNumberOfSegments () == 8:\n segment = segmentation.GetNthSegment(6)\n segment.SetName(\"SAT\"+ l_o_r)\n color = (230,220,70)\n color = np.array(color, float) / 255\n segment.SetColor(color)\n segment = segmentation.GetNthSegment(7)\n segment.SetName(\"InterMAT\"+ l_o_r)\n color = (140,224,228)\n color = np.array(color, float) / 255\n segment.SetColor(color)\n segment = segmentation.GetNthSegment(4)\n segment.SetName(\"Muscle\"+ l_o_r)\n color = (192,104,88)\n color = np.array(color, float) / 255\n segment.SetColor(color)\n segment = segmentation.GetNthSegment(5)\n segment.SetName(\"IntraMAT\"+ l_o_r)\n color = (250,250,225)\n color = np.array(color, float) / 255\n segment.SetColor(color)\n segment = segmentation.GetNthSegment(1)\n segment.SetName(\"Vessels\"+ l_o_r)\n color = (216 ,101,79)\n color = np.array(color, float) / 255\n segment.SetColor(color)\n segment = segmentation.GetNthSegment(0)\n segment.SetName(\"Bone\"+ l_o_r)\n color = (241,214, 145)\n color = np.array(color, float) / 255\n segment.SetColor(color)\n segment = segmentation.GetNthSegment(3)\n segment.SetName(\"Marrow\"+ l_o_r)\n color = (144,238,144)\n color = np.array(color, float) / 255\n segment.SetColor(color)\n segment = segmentation.GetNthSegment(2)\n segment.SetName(\"Skin\"+ l_o_r)\n color = (177,122,101)\n color = np.array(color, float) / 255\n segment.SetColor(color)\n Segmentation.CreateClosedSurfaceRepresentation()\n else:\n slicer.util.errorDisplay (\"The segmentation couldn't be done correctly\")","repo_name":"MarinaSandonis/TisSegLibrary","sub_path":"src/tisseglibrary/tisseglibrary.py","file_name":"tisseglibrary.py","file_ext":"py","file_size_in_byte":50237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"3409680267","text":"\"\"\"common functions for helpers\"\"\"\n\n\n# internal libs\nfrom Show_Images_Differences.config.config import ARGV\nfrom Show_Images_Differences.utils import resize_with_with_aspect_ratio\n\n\ndef resize_all(images, width):\n \"\"\"Change all image size keeping ratio\"\"\"\n\n for image in images:\n if not image == \"Source name\": # This is the only value in dict which is not a image\n resize = resize_with_with_aspect_ratio(images[image], width)\n images[image] = resize\n\n return images\n\n\ndef check_type_width(width):\n \"\"\"raise error when it's not string\"\"\"\n\n if not isinstance(width, int):\n raise TypeError(f\"Wrong type {type(width)}, it should be int\")\n","repo_name":"Luk-kar/Show_Images_Differences","sub_path":"Show_Images_Differences/modes/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"15316045379","text":"\"\"\"\nThis is another example exception\n\"\"\"\n\nsport =[\"hockey\",\"basketball\",\"soccer\",\"tennis\",\"football\",\"baseball\"]\n\nppl_play = {'hokey':4 ,\"soccer\":10,\"football\":15,\"tennis\":8}\n\nfor x in sport:\n try:\n print(ppl_play[x])\n except KeyError:\n print('Key is not found')\n\n","repo_name":"gsudarshan1990/Python_Sets","sub_path":"Exceptions/example13.py","file_name":"example13.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"344839449","text":"from sets import Set\nfrom scrapy import Spider\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom ..items import AresscrapeCPU\n\n\nclass NeweggCPUSpider(Spider):\n name = \"neweggcpu\"\n allowed_domains = [\"newegg.com\"]\n start_urls = [\n \"http://www.newegg.com/Processors-Desktops/SubCategory/ID-343/Page-%s?Pagesize=90\"\n % page for page in xrange(1, 5)\n ]\n visitedURLs = Set()\n\n def parse(self, response):\n products = Selector(response).xpath('//*[@class=\"itemCell\"]')\n for product in products:\n item = AresscrapeCPU()\n item['url'] = product.xpath('div[2]/div/a/@href').extract()[0]\n item['newegg_sku'] = str(item['url']).replace(\"http://www.newegg.com/Product/Product.aspx?Item=\", '')\n validprice = product.xpath('div[3]/ul/li[3]/strong/text()')\n productname = product.xpath('div[2]/div/a/span/text()').extract()[0].encode('utf-8', 'ignore')\n if 'Configurator' in productname:\n continue\n # if price isnt found (example, 'view price in cart') skip the item entirely. Fuck you newegg.\n if not validprice:\n continue\n # If product is refurb, skip.\n elif str(productname).startswith('Refurbished'):\n continue\n else:\n price1 = product.xpath('div[3]/ul/li[3]/strong/text()').extract()[0]\n price2 = product.xpath('div[3]/ul/li[3]/sup/text()').extract()[0]\n item['price'] = price1 + price2\n urls = Set([product.xpath('div[2]/div/a/@href').extract()[0]])\n for url in urls:\n if url not in self.visitedURLs:\n request = Request(url, callback=self.cpuproductpage)\n request.meta['item'] = item\n yield request\n\n def cpuproductpage(self, response):\n specs = Selector(response).xpath('//*[@id=\"Specs\"]/fieldset')\n itemdict = {}\n for i in specs:\n test = i.xpath('dl')\n for t in test:\n name = t.xpath('dt/text()').extract()[0]\n if name == ' ':\n name = t.xpath('dt/a/text()').extract()[0]\n itemdict[name] = t.xpath('dd/text()').extract()[0]\n item = response.meta['item']\n image = Selector(response).xpath('//*[@id=\"synopsis\"]/div/div/div/a/span/img/@src').extract()\n if image:\n image = [image[0].replace(\"?$S300W$\", \"\").replace(\"?$S300$\", \"\")]\n # If the product doesnt have a model or brand, don't do anything with it.\n if 'Name' not in itemdict or 'Brand' not in itemdict:\n yield None\n else:\n # image_urls passes image data to S3 pipeline\n item['image_urls'] = image\n item['make'] = itemdict['Brand']\n item['model'] = itemdict['Name']\n item['freq'] = itemdict['Operating Frequency']\n item['turbo'] = itemdict.get('Max Turbo Frequency', None)\n item['die_size'] = itemdict.get('Manufacturing Tech', None)\n item['lanes'] = itemdict.get('Max Number of PCI Express Lanes', None)\n item['threads'] = itemdict.get('# of Threads', None)\n item['l2'] = itemdict.get('L2 Cache', None)\n item['l3'] = itemdict.get('L3 Cache', None)\n item['cores'] = str(itemdict.get('# of Cores', None)).\\\n replace(\"-Core\", \"\").\\\n replace(\"Dual\", \"2\").\\\n replace(\"Quad\", \"4\")\n item['socket'] = str(itemdict.get('CPU Socket Type', None)).replace(\"Socket\", \"\").replace(\"LGA\", \"\").strip()\n yield item\n","repo_name":"Comparebench/Newegg-Crawler","sub_path":"neweggscrape/spiders/neweggcpu.py","file_name":"neweggcpu.py","file_ext":"py","file_size_in_byte":3689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"16056091189","text":"import pygame\n\n# Class for the Icons shown inside the reels\nclass Icon(pygame.sprite.Sprite):\n # Function that gets the icon's name, win rate when 3 icons are aligned, win rate when 2 icons are aligned, icon's image, and winrate for special icons as the parameters.\n def __init__(self, name, win_rate_full, win_rate_two, icon_image, bonus_win_rate = 0):\n pygame.sprite.Sprite.__init__(self)\n self.name = name\n self.image = pygame.image.load(\"images/\" + icon_image)\n self.image = self.image.convert_alpha()\n self.rect = self.image.get_rect()\n self.win_rate_full = win_rate_full\n self.win_rate_two = win_rate_two\n self.bonus_win_rate = bonus_win_rate","repo_name":"FelAmore/AlgoPro-Final-Project","sub_path":"icon.py","file_name":"icon.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"8633324017","text":"import turtle\nt=turtle.Turtle()\ns=turtle.Screen()\nt.speed(0)\n\nt.up()\nt.goto(-150,-150)\nt.down()\nt.color(\"red\")\nt.begin_fill()\nt.circle(110)\nt.end_fill()\n\nt.up()\nt.goto(-150,-130)\nt.down()\nt.color(\"white\")\nt.begin_fill()\nt.circle(90)\nt.end_fill()\n\nt.up()\nt.goto(-150,-110)\nt.down()\nt.color(\"red\")\nt.begin_fill()\nt.circle(70)\nt.end_fill()\n\nt.up()\nt.goto(-150,-90)\nt.down()\nt.color(\"blue\")\nt.begin_fill()\nt.circle(50)\nt.end_fill()\n\n\nt.up()\nt.goto(-195,-55)\nt.down()\nt.color(\"white\")\nt.begin_fill()\nt.fd(92)\nt.left(144)\nt.fd(92)\nt.left(144)\nt.fd(92)\nt.left(144)\nt.fd(92)\nt.left(144)\nt.fd(92)\nt.left(144)\nt.end_fill()\nt.hideturtle()\n\ns.mainloop()\n","repo_name":"22-JWL/REQ0018-Elemental-python-programing","sub_path":"untitled folder/방패.py","file_name":"방패.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28608317057","text":"from random import randint\n\nguessesTaken = 0\n\nprint(\"Hi, what's your name?\")\nname = input()\n\nnumber = randint(1, 20)\nprint(f\"Hi {name}! I'm thinking of a number between 1 and 20, take a guess?\")\n\nfor guessesTaken in range(6):\n print(\"Take a guess!\")\n guess = input()\n guess = int(guess)\n\n if guess < number:\n print(\"Too low!\")\n\n if guess > number:\n print(\"Too high!\")\n\n if guess == number:\n break\n\nif guess == number:\n guessesTaken = str(guessesTaken + 1)\n print(f\"Well done {name}! You guessed my number in {guessesTaken} guesses!\")\n\nif guess != number:\n number = str(number)\n print(f\"Nope, better luck next time, I was thinking of {number}.\")","repo_name":"EmmanuelPure0x1/python_exercises_2","sub_path":"guess_the_number_solution.py","file_name":"guess_the_number_solution.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"10359104200","text":"\"\"\"\nScikit-Learn 库使用--模型的训练方法--线性回归、Logistic 回归和 Softmax回归\ndate:2018-7-18\nauthor: 王建坤\n\"\"\"\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\n# 创建线性回归数据集\ndef create_dataset():\n X = 2 * np.random.rand(100, 1)\n # 结果加上高斯噪声\n y = 4 + 3*X + np.random.randn(100, 1)\n return X, y\n\n\n# 线性回归解析法:使用正态方程求解,直接得到全局最优解\ndef linear_regression_analysis(X, y):\n # 特征向量为参数b添加值为1的特征\n X_b = np.c_[np.ones((100, 1)), X]\n # 用正态方程解得全局最优解\n theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)\n print(\"线性回归解析解为:\", theta_best)\n # 预测\n sample = np.array([[0], [2]])\n sample_b = np.c_[np.ones((2, 1)), sample]\n predict = sample_b.dot(theta_best)\n # print('解析解方程预测为:', predict)\n # 绘制线性回归模型图像\n plt.plot(sample, predict, 'r-')\n plt.plot(X, y, 'b.')\n plt.axis([0, 2, 0, 15])\n plt.show()\n return X_b\n\n\n# 使用sk-learn的线性回归模型,默认使用解析法\ndef linear_regression_sk(X, y):\n from sklearn.linear_model import LinearRegression\n # 创建线性回归模型实例\n lin_reg = LinearRegression()\n lin_reg.fit(X, y)\n print('sk-learn线性回归解析解:', 'b:', lin_reg.intercept_, 'w:', lin_reg.coef_)\n\n\n# 线性回归批量梯度下降法(batch gradient descent)\ndef linear_regression_batch_gd(X_b, y):\n # 学习率不变、迭代次数和样本数\n learning_rate = 0.1\n max_iterations = 1000\n m = 100\n # 随机初始值\n theta = np.random.randn(2, 1)\n # 开始迭代\n for n in range(max_iterations):\n gradients = 2/m * X_b.T.dot(X_b.dot(theta)-y)\n theta = theta - learning_rate*gradients\n print('线性回归批量梯度下降法解:', theta)\n\n\n# 线性回归随机梯度下降法(stochastic gradient descent)\ndef linear_regression_stochastic_gd(X_b, y):\n # epoch次数,样本数\n n_epochs = 50\n m = 100\n theta = np.random.randn(2, 1)\n for epoch in range(n_epochs):\n for i in range(m):\n random_index = np.random.randint(m)\n xi = X_b[random_index:random_index+1]\n yi = y[random_index:random_index+1]\n gradients = 2 * xi.T.dot(xi.dot(theta) - yi)\n learning_rate = 1.0/(epoch*m + i + 10)\n theta = theta - learning_rate*gradients\n print('线性回归随机梯度下降法解:', theta)\n\n\n# sk-learn 线性回归随机梯度下降\ndef linear_regression_stochastic_gd_sk(X, y):\n from sklearn.linear_model import SGDRegressor\n sgd_reg = SGDRegressor(n_iter=50, penalty=None, eta0=0.1)\n sgd_reg.fit(X, y.ravel())\n print('sk-learn线性回归随机梯度下降法解:', 'b:', sgd_reg.intercept_, 'w:', sgd_reg.coef_)\n\n\n# 创建线性回归数据集\ndef create_dataset_poly():\n m = 100\n X1 = 6 * np.random.rand(m, 1) - 3\n y1 = 0.5 * X1 ** 2 + X1 + 2 + np.random.randn(m, 1)\n return X1, y1\n\n\n# 多项式回归\ndef polynomial_regression(X, y):\n # 添加二次特征\n from sklearn.preprocessing import PolynomialFeatures\n from sklearn.linear_model import LinearRegression\n poly_features = PolynomialFeatures(degree=2, include_bias=False)\n X_poly = poly_features.fit_transform(X)\n lin_reg_poly = LinearRegression()\n lin_reg_poly.fit(X_poly, y)\n print('多项式回归解:', 'b:', lin_reg_poly.intercept_, 'w:', lin_reg_poly.coef_)\n return lin_reg_poly\n\n\n# 绘制关于训练集规模的学习曲线\ndef plot_learning_curves(model, X, y):\n from sklearn.metrics import mean_squared_error\n from sklearn.model_selection import train_test_split\n X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2)\n train_errors, val_errors = [], []\n for m in range(1, len(X_train)):\n model.fit(X_train[:m], y_train[:m])\n y_train_predict = model.predict(X_train[:m])\n y_val_predict = model.predict(X_val)\n train_errors.append(mean_squared_error(y_train_predict, y_train[:m]))\n val_errors.append(mean_squared_error(y_val_predict, y_val))\n plt.plot(np.sqrt(train_errors), \"r-+\", linewidth=2, label=\"train\")\n plt.plot(np.sqrt(val_errors), \"b-\", linewidth=3, label=\"val\")\n plt.show()\n\n\n# 岭回归,l2正则化,封闭方程求解\ndef ridge_regression_analysis(X, y):\n from sklearn.linear_model import Ridge\n ridge_reg = Ridge(alpha=1, solver=\"cholesky\")\n ridge_reg.fit(X, y)\n print('岭回归解:', 'b:', ridge_reg.intercept_, 'w:', ridge_reg.coef_)\n\n\n# Lasso 回归,l2正则化,封闭方程求解\ndef lasso_regression_analysis(X, y):\n from sklearn.linear_model import Lasso\n lasso_reg = Lasso(alpha=0.1)\n lasso_reg.fit(X, y)\n print('Lasso 回归解:', 'b:', lasso_reg.intercept_, 'w:', lasso_reg.coef_)\n\n\n# l2,l1正则化,梯度下降求解\ndef regularization_regression_gd(X, y):\n from sklearn.linear_model import SGDRegressor\n # l1正则化把 penalty=\"l2\" 改为 penalty=\"l1\"\n sgd_reg = SGDRegressor(penalty=\"l2\")\n sgd_reg.fit(X, y.ravel())\n print('l2梯度下降法解:', 'b:', sgd_reg.intercept_, 'w:', sgd_reg.coef_)\n\n\n# 弹性网路正则化,即l1、l2混合正则化\ndef elasticnet_regression_gd(X, y):\n from sklearn.linear_model import ElasticNet\n # l1_ratio 指的就是混合率, 即l1正则化占的比例\n elastic_net = ElasticNet(alpha=0.1, l1_ratio=0.5)\n elastic_net.fit(X, y)\n print('弹性网络解:', 'b:', elastic_net.intercept_, 'w:', elastic_net.coef_)\n\n\n# 早期停止法(Early Stopping)\ndef early_stoping(X, y):\n from sklearn.base import clone\n from sklearn.linear_model import SGDRegressor\n from sklearn.metrics import mean_squared_error\n from sklearn.model_selection import train_test_split\n # 当warm_start=True时,调用fit()方法后,训练会从停下来的地方继续,而不是从头重新开始。\n sgd_reg = SGDRegressor(max_iter=1, warm_start=True, penalty=None, learning_rate=\"constant\", eta0=0.0005)\n X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2)\n minimum_val_error = float(\"inf\")\n best_epoch = None\n best_model = None\n for epoch in range(1000):\n sgd_reg.fit(X_train, y_train.ravel())\n y_val_predict = sgd_reg.predict(X_val)\n val_error = mean_squared_error(y_val_predict, y_val)\n if val_error < minimum_val_error:\n minimum_val_error = val_error\n best_epoch = epoch\n best_model = clone(sgd_reg)\n print('stopping in:', best_epoch)\n\n\n# 加载鸢尾花数据集\ndef load_dataset_flower():\n from sklearn import datasets\n iris = datasets.load_iris()\n # X_f = iris['data']\n # y_f = iris['target']\n # print('加载鸢尾花数据集成功:', iris)\n return iris\n\n\n# logistic 回归\ndef logistic_classify(iris):\n from sklearn.linear_model import LogisticRegression\n X = iris[\"data\"][:, 3:] # petal width\n y = (iris[\"target\"] == 2).astype(np.int)\n log_reg = LogisticRegression()\n log_reg.fit(X, y)\n # 绘图\n X_new = np.linspace(0, 3, 1000).reshape(-1, 1)\n y_proba = log_reg.predict_proba(X_new)\n plt.plot(X_new, y_proba[:, 1], \"g-\", label=\"Iris-Virginica\")\n plt.plot(X_new, y_proba[:, 0], \"b--\", label=\"Not Iris-Virginica\")\n plt.show()\n\n\n# softmax 回归多分类\ndef softmax_classify(iris):\n from sklearn.linear_model import LogisticRegression\n # 划分数据集\n X = iris[\"data\"][:, (2, 3)] # petal length, petal width\n y = iris[\"target\"]\n # 创建 softmax 回归实例\n softmax_reg = LogisticRegression(multi_class=\"multinomial\", solver=\"lbfgs\", C=10)\n softmax_reg.fit(X, y)\n # 预测\n predict = softmax_reg.predict([[5, 2]])\n predict_pro = softmax_reg.predict_proba([[5, 2]])\n print('softmax回归预测为:', predict, '各类概率为', predict_pro)\n\n\nif __name__ == '__main__':\n # 获得线性回归数据集\n X, y = create_dataset()\n # 线性回归解析法\n # X_b = linear_regression_analysis(X, y)\n # sk-learn线性回归解\n # linear_regression_sk(X, y)\n # 线性回归批量梯度下降法\n # linear_regression_batch_gd(X_b, y)\n # 线性回归随机梯度下降法\n # linear_regression_stochastic_gd(X_b, y)\n # sk-learn线性回归随机梯度下降法\n # linear_regression_stochastic_gd_sk(X, y)\n # 获得多项式回归数据集\n # X1, y1 = create_dataset_poly()\n # 多项式回归解\n # lin_reg_poly = polynomial_regression(X1, y1)\n # 获得关于训练集规模的学习曲线\n # plot_learning_curves(lin_reg_poly, X1, y1)\n # 岭回归,l2正则化\n # ridge_regression_analysis(X, y)\n # lasso回归,l1正则化\n # lasso_regression_analysis(X, y)\n # 梯度下降法的正则化\n # regularization_regression_gd(X, y)\n # 弹性网络\n # elasticnet_regression_gd(X, y)\n # 早期停止\n # early_stoping(X1, y1)\n # 加载花的数据集\n iris = load_dataset_flower()\n # logistic 回归二分类\n logistic_classify(iris)\n # softmax 多分类\n softmax_classify(iris)\n","repo_name":"wang-jiankun/Machine-Learning","sub_path":"ml-sk/model_train/model_train.py","file_name":"model_train.py","file_ext":"py","file_size_in_byte":9187,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"43709224028","text":"# MODIFICATIONS:\n# - self.sprites replace with 2D array for gridsquares\n# - Camera Removed\n\n# DO NOT COPY LOL\n\nimport os\nos.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = \"hide\"\n\nimport sys\nimport math\nimport time\nimport random\nimport pygame\npygame.font.init()\n\n\nclass sprite():\n\n def __repr__(self):\n self.highlight = 30\n return f\"{self.layer} at position {self.x}, {self.y}\"\n\n def update_move(self, game):\n pass\n\n def update_draw(self, game):\n pass\n\n def update_destroy(self, game):\n pass\n\n def update_highlight(self, game):\n if self.highlight > 0:\n self.draw_highlight(game)\n self.highlight -= 1\n\n def draw_highlight(self, game):\n try:\n highlight_r = self.r\n except AttributeError:\n highlight_r = 10\n\n pygame.draw.circle(game.win, (35, 155, 35), (self.x, self.y), highlight_r)\n\n def add_default_attr(self, game):\n self.destroy = False\n self.destroying = False\n self.highlight = 0\n\n def kill(self):\n self.destroy = True\n\n def onscreen(self, game):\n if self.x < 0 or self.x > game.win_w:\n return False\n if self.y < 0 or self.y > game.win_h:\n return False\n\n return True\n\n def onscreen_info(self, game):\n if self.x < 0 or self.x > game.win_w:\n return \"X\"\n if self.y < 0 or self.y > game.win_h:\n return \"Y\"\n\n return \"\"\n\n def center_image_pos(self, sprite, pos):\n i = sprite.get_size()\n\n x = pos[0] - (i[0] // 2)\n y = pos[1] - (i[0] // 2)\n\n return x, y\n\n def blit_rotate(self, image, angle, game):\n img = pygame.transform.rotate(image, angle)\n\n b_x = self.x - img.get_width()//2\n b_y = self.y - img.get_height()//2\n\n game.win.blit(img, (b_x, b_y))\n\n\nclass game_info():\n def __init__(self, name, win_w, win_h, user_w, user_h, bg, sounds=None, framecap=False, show_framerate=False, quit_key=None):\n self.win_w = win_w\n self.win_h = win_h\n\n self.user_w = user_w\n self.user_h = user_h\n\n self.win_scale = pygame.display.set_mode((user_w, user_h))\n pygame.display.set_caption(name)\n self.win = pygame.Surface((win_w, win_h))\n\n if sounds:\n self.sounds = {}\n for i, s in enumerate(sounds):\n self.sounds[s] = (sounds[s], i)\n pygame.mixer.set_num_channels(len(self.sounds))\n\n else:\n self.sounds = None\n\n self.bg = bg\n self.run = True\n\n self.clock = pygame.time.Clock()\n\n self.keys = pygame.key.get_pressed()\n self.mouse = pygame.mouse.get_pressed()\n self.mouse_pos = pygame.mouse.get_pos()\n self.update_keys()\n\n self.frames = 0\n self.framecap = framecap\n self.show_framerate = show_framerate\n\n self.start_time = time.time()\n self.quit_key = quit_key\n\n self.shake_x = 0\n self.shake_y = 0\n self.shake = False\n\n self.particles = []\n self.sprites = []\n\n self.font = pygame.font.SysFont(\"Comic sans\", 30)\n\n # Function that converts an orientation into actual numbers\n def orientate(self, h=False, v=False):\n\n h_dict = {\n \"Left\" : 0,\n \"Left-Center\" : self.win_w // 4,\n \"Center\" : self.win_w // 2,\n \"Right-Center\" : (self.win_w // 4) * 3,\n \"Right\" : self.win_w,\n \"Rand\" : random.randint(0, self.win_w)\n }\n\n v_dict = {\n \"Top\" : 0,\n \"Top-Center\" : self.win_h // 4,\n \"Center\" : self.win_h // 2,\n \"Bottom-Center\" : (self.win_h // 4) * 3,\n \"Bottom\" : self.win_h,\n \"Rand\" : random.randint(0, self.win_h)\n }\n\n # We have to check that the orientation exists first\n if h:\n assert h in h_dict, f\"{h} is not a valid orientation\"\n if v:\n assert v in v_dict, f\"{v} is not a valid orientation\"\n\n if h and v:\n return (h_dict[h], v_dict[v])\n elif h and not v:\n return h_dict[h]\n elif v and not h:\n return v_dict[v]\n\n return False # Safety Clause\n\n def playsound(self, name):\n\n assert name in self.sounds, f\"{name} is an undefined sound\"\n s = self.sounds[name]\n\n pygame.mixer.Channel(s[1]).play(s[0])\n\n def purge_sprites(self):\n for layer in range(len(self.sprites)):\n self.sprites[layer] = []\n\n def init_screenshake(self, magnitude, len, rand=True, spread=False):\n self.shake = True\n self.shake_index = 0\n\n pos1 = 0 - magnitude\n pos2 = magnitude\n\n if rand and spread:\n pos1 = int(pos1 * random.uniform(spread[0], spread[1]))\n pos2 = int(pos2 * random.uniform(spread[0], spread[1]))\n\n bb_temp = [\n (pos1, pos1),\n (pos2, pos1),\n (pos1, pos2),\n (pos2, pos2)\n ]\n\n self.bounding_box = [bb_temp[i % 4] for i in range(len)]\n\n if rand:\n random.shuffle(self.bounding_box)\n\n self.bounding_box.append((0, 0))\n\n def check_mouse(self, button, buffer=False, timebuffer=False):\n if timebuffer:\n if self.frames % timebuffer != 0 and self.last_mouse[button]:\n return False\n\n if buffer:\n if self.last_mouse[button]:\n return False\n elif self.mouse[button]:\n return True\n elif self.mouse[button]:\n return True\n\n def check_key(self, *args, buffer=False, all_press=False, timebuffer=False):\n\n if timebuffer:\n if self.frames % timebuffer != 0:\n for givenkey in args:\n if self.last_keys[givenkey]:\n return False\n\n fullkeys = 0\n for givenkey in args:\n if buffer:\n if self.last_keys[givenkey]:\n return False\n\n if self.keys[givenkey]:\n if all_press:\n fullkeys += 1\n else:\n fullkeys = len(args)\n break\n\n if fullkeys >= len(args):\n return True\n\n return False # Safety Clause\n\n def update_screenshake(self):\n if not self.shake:\n return\n\n bb_iter = self.bounding_box[self.shake_index]\n self.shake_x = bb_iter[0]\n self.shake_y = bb_iter[1]\n\n if self.shake_index < len(self.bounding_box) - 1:\n self.shake_index += 1\n else:\n self.shake = False\n\n def update_particles(self):\n if len(self.particles):\n p_survive = []\n for p in self.particles:\n if p.destroy:\n continue\n p.update_move()\n p.update_draw(self)\n p_survive.append(p)\n\n self.particles = p_survive\n\n def update_keys(self):\n self.last_keys = self.keys\n self.last_mouse = self.mouse\n\n self.keys = pygame.key.get_pressed()\n self.mouse = pygame.mouse.get_pressed()\n\n m = pygame.mouse.get_pos()\n w_ratio = self.win_w / self.user_w\n h_ratio = self.win_h / self.user_h\n\n self.mouse_pos = (m[0] * w_ratio, m[1] * h_ratio)\n\n def bg_kill(self, obj):\n for iter, s in enumerate(self.sprites[\"BACKGROUND\"]):\n if s is obj:\n for d in self.sprites[\"BACKGROUND\"][0:iter]:\n d.kill()\n\n self.bg = obj.c\n obj.kill()\n\n def update_draw(self):\n\n for y, row in enumerate(self.sprites):\n valid_sprites = []\n for x in row:\n x.update_move(self)\n if not x.destroy:\n valid_sprites.append(x)\n self.sprites[y] = valid_sprites\n\n for y, row in enumerate(self.sprites):\n valid_sprites = []\n for x in row:\n x.update_draw(self)\n if not x.destroy:\n valid_sprites.append(x)\n self.sprites[y] = valid_sprites\n\n self.update_screenshake()\n\n # Function for scaling the design screen to the target screen\n def update_scaled(self):\n\n # Lock framerate\n if self.framecap:\n self.clock.tick(self.framecap)\n\n # Scale the design screen to the size of the target screen\n frame = pygame.transform.scale(self.win, (self.user_w, self.user_h))\n\n # Blit scaled design screen to target screen, plus screenshake\n self.win_scale.blit(frame, (self.shake_x, self.shake_y))\n\n # Update screen display\n pygame.display.flip()\n\n # Delete everything on the screen for next loop\n self.win.fill(self.bg)\n self.win_scale.fill(self.bg)\n\n def update_state(self):\n\n self.frames += 1\n self.elapsed_time = time.time() - self.start_time\n\n self.framerate = self.frames / self.elapsed_time\n if self.show_framerate:\n print(self.framerate, end=\"\\r\")\n\n if not self.quit_key == None:\n if self.check_key(self.quit_key):\n self.run = False\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.run = False\n\n\nif __name__ == \"__main__\":\n game = game_info(\n name=\"Launched from module py file\",\n win_w=1280,\n win_h=720,\n user_w=1920,\n user_h=1080,\n bg=(0, 0, 0),\n framecap=60,\n show_framerate=True,\n quit_key=pygame.K_ESCAPE)\n\n while game.run:\n game.update_keys()\n game.update_draw()\n\n if game.check_key(pygame.K_LEFT, pygame.K_RIGHT, all_press=True):\n print(\"BOTH DOWN\")\n\n if game.check_key(pygame.K_d, pygame.K_a):\n print(\"ALIAS\")\n\n game.update_scaled()\n game.update_state()\n\n pygame.quit()\n","repo_name":"Infinnitor/gamists-platformer-2021","sub_path":"LevelBuild/gameinfo.py","file_name":"gameinfo.py","file_ext":"py","file_size_in_byte":10076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"935055117","text":"import socket, sys, time, os, errno\n\ndef servidor():\n\tserver_address = (\"localhost\", 3001)\n\tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\tsock.bind(server_address)\n\tsock.listen(10)\n\n\twhile True:\n\t\tconexion, client_address = sock.accept()\n\t\tprint(\"Escuchando...\")\n\t\ttry:\n\t\t\tfichero = conexion.recv(64)\n\t\t\tif os.path.exists(fichero):\n\t\t\t\ttry:\n\t\t\t\t\tficherocompleto = open(fichero, \"r\")\n\t\t\t\t\tconexioncontenido = ficherocompleto.read(1024)\n\n\t\t\t\t\twhile conexioncontenido != '':\n\t\t\t\t\t\tconexion.send(conexioncontenido.encode('utf8'))\n\t\t\t\t\t\tconexioncontenido = ficherocompleto.read(1024)\n\t\t\t\t\tficherocompleto.close()\n\n\t\t\t\texcept FileNotFoundError:\n\t\t\t\t\tconexion.sendall(\"Error al abrir el fichero\\n\".encode('utf8'))\n\t\t\n\t\t\tconexion.close()\n\n\t\texcept IOError as e:\n\t\t\tif e.errno == errno.EPIPE:\n\t\t\t\tpass\n\nif __name__ == \"__main__\":\n\n\ttry:\n\t\tservidor()\n\texcept KeyboardInterrupt:\n\t\tprint(\"\\nCerrando Servidor...\\n\")\n\n","repo_name":"galvarez33/Practicas-PED","sub_path":"p6/c.balsalobre/serv6app1.py","file_name":"serv6app1.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"40145229415","text":"import pandas as pd\nimport numpy as np\nimport os\nimport pytz\nimport matplotlib.pyplot as plt\n\nsource = \"CSV/\"\nsubdir = os.listdir(source)\n\nnode_mean = pd.DataFrame()\nnode_med = pd.DataFrame()\nnode_sd = pd.DataFrame()\nnode_weekday = pd.DataFrame()\nnode_weekend = pd.DataFrame()\nnode_mid = pd.DataFrame()\nnode_off = pd.DataFrame()\nnode_on = pd.DataFrame()\n\n# based off state holidays https://www.sos.ca.gov/state-holidays\n# holidays = pd.DatetimeIndex(\n# [date(2022, 1, 1), date(2022, 1, 17), date(2022, 2, 21), date(2022, 3, 31), date(2022, 5, 30),\n# date(2022, 7, 1), date(2022, 9, 5), date(2022, 11, 11), date(2022, 11, 24), date(2022, 11, 24),\n# date(2022, 12, 25), date(2022, 12, 26)])\nmonths = [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"]\npst = pytz.timezone(\"US/Pacific\")\n\n\ndef read_sub_dir(path, node_time_mean):\n \"\"\"\n :param path: The path to the Node folder within CSV/\n :param node_time_mean: df containing the time data of each months mean\n :return: processed node_time_mean df from clean_data\n \"\"\"\n df = pd.DataFrame()\n files = os.listdir(path)\n for f in files:\n print(f)\n dir_path = \"{}/{}\".format(path, f)\n # An exception occurs if a csv file is empty\n try:\n t = pd.read_csv(dir_path)\n # ignore files that have \"File does not exist.\"\n if \"NODE_ID\" in t.columns:\n if \"LMP_TYPE\" in t.columns:\n t = t[t[\"LMP_TYPE\"] == \"LMP\"]\n df = pd.concat([df, t])\n except Exception as e:\n print(e)\n # skip the empty file\n df.drop_duplicates()\n return clean_data(df, node_time_mean)\n\n\ndef add_columns():\n \"\"\"\n Adds the month and year column names to each dataframe\n \"\"\"\n for year in range(2019, 2023):\n for month in months:\n col_name = \"{}_{}\".format(month, year)\n\n node_mean[col_name] = None\n node_med[col_name] = None\n node_sd[col_name] = None\n node_weekday[col_name] = None\n node_weekend[col_name] = None\n node_mid[col_name] = None\n node_off[col_name] = None\n node_on[col_name] = None\n node_sd[col_name] = None\n print(node_mean)\n\n\ndef clean_data(df, node_time_mean):\n if \"OPR_DT\" in df:\n # Get month and year\n df[\"date\"] = pd.DatetimeIndex(df['OPR_DT'])\n df['month_year'] = df['date'].dt.to_period('M')\n df['weekday'] = pd.DatetimeIndex(df['OPR_DT'])\n df['weekday'] = pd.DatetimeIndex(df['OPR_DT']).dayofweek\n df['peak'] = pd.DatetimeIndex(df['OPR_DT']).hour\n # convert GMT to PST\n df['hour'] = pd.DatetimeIndex(df['INTERVALSTARTTIME_GMT'], tz=pst).hour\n\n week_conditions = [(df['weekday'] == 5) | (df['weekday'] == 6)]\n # 5am - 9am\n mid_peak = [(df['hour'] == 5) | (df['hour'] == 6) | (df['hour'] == 7) | (df['hour'] == 8)\n | (df['hour'] == 9)]\n # 5pm - 9pm\n on_peak = [(df['hour'] == 17) | (df['hour'] == 18) | (df['hour'] == 19) | (df['hour'] == 20)\n | (df['hour'] == 21)]\n # Otherwise\n off_peak = [(df['hour'] == 10) | (df['hour'] == 11) | (df['hour'] == 12) | (df['hour'] == 13)\n | (df['hour'] == 14) | (df['hour'] == 15) | (df['hour'] == 16) | (df['hour'] == 22)\n | (df['hour'] == 23) | (df['hour'] == 0) | (df['hour'] == 1) | (df['hour'] == 2) |\n (df['hour'] == 3) | (df['hour'] == 4)]\n peak_time = off_peak + mid_peak + on_peak\n\n df[\"peak\"] = np.select(peak_time, ['Off Peak', 'Mid Peak', 'On Peak'])\n df[\"weekday\"] = np.select(week_conditions, [\"Weekend\"], default=\"Weekday\")\n\n # Get different type of statistics\n mean = df.groupby([\"NODE_ID\", \"month_year\"]).mean(numeric_only=True)\n median = df.groupby([\"NODE_ID\", \"month_year\"]).median(numeric_only=True)\n sd = df.groupby([\"NODE_ID\", \"month_year\"]).std(numeric_only=True)\n mean_holi = df.groupby([\"NODE_ID\", \"month_year\", \"weekday\"]).mean(numeric_only=True)\n mean_hour = df.groupby([\"NODE_ID\", \"month_year\", \"peak\"]).mean(numeric_only=True)\n df.to_csv(\"Temp.csv\")\n\n # bplot = df.boxplot(by=\"month_year\", column=[\"MW\"], grid=False, figsize=(25, 10))\n # bplot.plot()\n # bplot.set_xlabel(\"Year & Month\")\n # bplot.set_ylabel(\"$MW/hr\")\n # plt.suptitle(\"{} $MW/hr Boxplot\".format(df[\"NODE_ID\"].iloc[0]))\n # plt.savefig(\"Boxplot/{}.png\".format(df[\"NODE_ID\"].iloc[0]))\n # plt.close()\n\n # index[0] Node_id Index[1] Period(YYYY-MM, 'M')\n for index, row in mean.iterrows():\n col_name = \"{}_{}\".format(months[int(index[1].month) - 1], int(index[1].year))\n node_mean.loc[index[0], col_name] = row.MW\n\n for index, row in median.iterrows():\n col_name = \"{}_{}\".format(months[int(index[1].month) - 1], int(index[1].year))\n node_med.loc[index[0], col_name] = row.MW\n\n for index, row in sd.iterrows():\n col_name = \"{}_{}\".format(months[int(index[1].month) - 1], int(index[1].year))\n node_sd.loc[index[0], col_name] = row.MW\n\n # index[2] week type\n for index, row in mean_holi.iterrows():\n col_name = \"{}_{}\".format(months[int(index[1].month) - 1], int(index[1].year))\n # if column name does not exists, add it with null values\n if index[2] == \"Weekday\":\n node_weekday.loc[index[0], col_name] = row.MW\n else:\n node_weekend.loc[index[0], col_name] = row.MW\n\n # index[2] peak\n for index, row in mean_hour.iterrows():\n col_name = \"{}_{}\".format(months[int(index[1].month) - 1], int(index[1].year))\n # if column name does not exists, add it with null values\n if index[2] == \"Mid Peak\":\n node_mid.loc[index[0], col_name] = row.MW\n elif index[2] == \"Off Peak\":\n node_off.loc[index[0], col_name] = row.MW\n else:\n node_on.loc[index[0], col_name] = row.MW\n\n node_time_mean = pd.concat([node_time_mean, df.groupby([\"NODE_ID\", \"month_year\"]).mean(numeric_only=True)])\n\n return node_time_mean\n\n\nif __name__ == \"__main__\":\n add_columns()\n node_time_mean = pd.DataFrame()\n for sdir in subdir:\n node_time_mean = read_sub_dir(\"{}/{}\".format(source, sdir), node_time_mean)\n\n node_mean.to_csv(\"Nodes_Mean.csv\")\n node_med.to_csv(\"Nodes_Median.csv\")\n node_sd.to_csv(\"Nodes_Standard_Deviation.csv\")\n node_weekday.to_csv(\"Nodes Weekday.csv\")\n node_weekend.to_csv(\"Nodes Weekend.csv\")\n node_mid.to_csv(\"Nodes Mid.csv\")\n node_off.to_csv(\"Nodes Off.csv\")\n node_on.to_csv(\"Nodes On.csv\")\n node_time_mean.to_csv(\"Nodes Time.csv\")\n","repo_name":"AnilMaharajh/CAISO-LMP-Generator","sub_path":"Data Cleaning.py","file_name":"Data Cleaning.py","file_ext":"py","file_size_in_byte":6868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"41361999966","text":"class Module:\n \"\"\"\n Represents a module in the Festo system\n \"\"\"\n\n def __init__(self, module_id, work_type, processing_time, transport_time, cost_rate, connections):\n \"\"\"\n :param module_id: A unique identifier for the module\n :param connections: A list of IDs, which the module connects to.\n :param work_type: An identifier for the work type of the module (integer)\n :param processing_time: An integer specifying the processing time of the module\n :param transport_time: An integer specifying the transport time of the module\n \"\"\"\n\n if module_id == 0:\n raise ValueError(\"Module_id may not be set to 0\")\n else:\n self.module_id = module_id\n self.connections = connections\n self.work_type = work_type\n self.cost_rate = cost_rate\n self.processing_time = processing_time\n self.transport_time = transport_time\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n else:\n return False\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash(tuple(sorted(self.__dict__)))\n\n def __str__(self):\n s = \"\"\n s += \"{module_id: \" + str(self.module_id) + \", \"\n s += \"connections: [\"\n for con in self.connections:\n s += str(con.module_id) + \", \"\n if len(self.connections) != 0:\n s = s[:-2]\n return s + \"]}\"\n\n def __repr__(self):\n return str(self)\n\n def get_connections(self):\n \"\"\"\n :return: A list of IDs, which the module connects to\n \"\"\"\n res = []\n for i in range(0, len(self.connections)):\n res.append(self.connections[i].module_id)\n return res\n\n\ndef printable_paths(paths):\n \"\"\"\n Used for debugging purposes.\n :param paths: a list of paths, i.e. a list of list of modules\n :return: paths as a string in a printable format\n \"\"\"\n s = \"[\"\n for path in paths:\n s += printable_path(path) + \", \"\n if len(s) > 1:\n return s[:-2] + \"]\"\n else:\n return s + \"]\"\n\n\ndef printable_path(path):\n \"\"\"\n Used for debugging purposes.\n :param path: a single path, i.e a list of modules\n :return: a path as a string in a printable format\n \"\"\"\n s = \"[\"\n for module in path:\n if type(module) is Module:\n s += str(module.module_id) + \", \"\n else:\n s += \"(Looping index: \" + str(module) + \") \"\n if len(s) > 1:\n return s[:-2] + \"]\"\n else:\n return s + \"]\"\n","repo_name":"KaFuCh/P7","sub_path":"Code/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5970944939","text":"from ..tools import randstr, randemail, RegisterTest, register_test_case\nfrom ..tests import BaseTestCase\n\nfrom ..page_objects.team_members import TeamMembersPage\nfrom ..page_objects.teams import TeamsDetailsPage\n\nfrom .test_register import help_register\nfrom .test_teams import help_make_team\n\n\n@register_test_case('MembersTestCase')\nclass MembersTestCase(BaseTestCase):\n\n register_test = RegisterTest(\n 'test_invite_member',\n 'test_invite_multiple_members'\n )\n\n @register_test('test_invite_member')\n def test_invite_member(self):\n help_register(self)\n help_make_team(self)\n help_invite_member(self)\n\n @register_test('test_invite_multiple_members')\n def test_invite_multiple_members(self):\n help_register(self)\n help_make_team(self)\n\n for _ in range(3):\n help_invite_member(self)\n\n\ndef help_invite_member(self):\n (\n TeamsDetailsPage(self)\n .assert_page()\n .add_member(),\n\n TeamMembersPage(self)\n .invite_member(randstr(8), randstr(8), randemail(8))\n )\n","repo_name":"elston/onboardingapp-1","sub_path":"tests/seleniumtests/tests/test_members.py","file_name":"test_members.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12856295836","text":"#!/usr/bin/env python3\nimport os\nimport sys\nimport socket\nfrom random import randint\nfrom threading import Thread\n\nclass Server:\n\tdef __init__(self):\n\t\twelcome_msg = \"\"\"\n __________________________________________________________\n| ___ ___ ______ _ _ ______ _______ |\n| | \\ / | __ | ____| | |_| | | __ | |__ __| |\n| | |\\ \\ / /| | |__| | | | _ | | |__| | | | |\n| | | \\ \\ / / | | | |____ | | | | | __ | | | |\n| |_| \\___/ |_| |______| |_| |_| |_| |_| |_| |\n| |\n|_________________________ SERVER _________________________|\n\"\"\"\n\t\tprint(welcome_msg)\n\n\t\t#49152–65535 are dynamic and private ports\n\t\taddr = [input(\"Server IP [blank for local]: \") or \"127.0.0.1\", input(\"Set Port Number [blank for random]: \") or randint(49152, 65536)]\n\t\tif not isinstance(addr[1], int):\n\t\t\tprint(\"Invalid port number, a port random number between 49152-65536 will be used instead.\")\n\t\t\taddr[1] = randint(49152, 65536)\n\t\taddr = tuple(addr)\n\t\tself.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n\t\ttry:\n\t\t\tself.sock.bind(addr)\n\t\texcept OSError as e:\n\t\t\tprint(e)\n\t\t\tprint(\"Unable to bind server to {}\".format(str(addr)))\n\t\t\tself.sock.close()\n\t\t\tsys.exit(1)\n\n\t\tself.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\t\tself.sock.listen(5)\n\n\t\tself.clients = []\n\t\tself.client_msgs = []\n\t\tself.buff_size = 4096\n\t\tself.client_numbers = 0\n\t\tself.client_user_names = {}\n\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tprint(\"Server Listening on {} waiting for client to connect.\".format(str(addr)))\n\t\t\t\tclient, client_addr = self.sock.accept()\n\t\t\t\tself.client_numbers += 1\n\t\t\t\tself.clients.append((client, self.client_numbers))\n\t\t\t\tprint(\"\\nClient connected {} {}\\n\".format(str(self.client_numbers), client_addr))\n\t\t\t\tclient_thread = Thread(target=self.handle_client, args=[(client, self.client_numbers)], daemon=True)\n\t\t\t\tclient_thread.start()\n\t\t\texcept (KeyboardInterrupt, OSError):\n\t\t\t\tbreak\n\n\t\tfor i, j in self.clients: i.send(\"\\0\".encode(\"utf-8\"))\n\t\tself.sock.close()\n\t\tprint(\"Server Closed.\")\n\n\tdef handle_client(self, client):\n\t\tclient_sock, client_num = client\n\t\tcur_clients = [x[1] for x in self.clients]\n\t\t\n\t\tuser_name = client_sock.recv(self.buff_size).decode(\"utf-8\")\n\t\tcur_users = [self.client_user_names[x] for x in self.client_user_names]\n\n\t\twhile user_name in cur_users:\n\t\t\tuser_name += str(randint(0, 9999))\n\n\t\tself.client_user_names[client_num] = user_name\n\t\tprint(\"\\nUSER: {}, has connected to the server.\\n\".format(user_name))\n\n\t\tcur_users = [self.client_user_names[x] for x in self.client_user_names]\n\t\tclient_sock.send(\"{}\\0CONNECTED TO SERVER. \\nUSERS CONNECTED: {}\".format(user_name, str(cur_users)).encode(\"utf-8\"))\n\n\t\tself.client_msgs.append((\"User-Name: {}, has connected\".format(user_name), -1))\n\t\trecv_thread = Thread(target=self.recv_client_msgs, args=[client_sock, client_num], daemon=True)\n\t\trecv_thread.start()\n\t\tsend_thread = Thread(target=self.send_client_msgs, daemon=True)\n\t\tsend_thread.start()\n\n\tdef recv_client_msgs(self, client_sock, client_num):\n\t\tclient = (client_sock, client_num)\n\t\t'receives client msgs and stores them in client_msgs'\n\t\twhile True:\n\t\t\tmsg = client_sock.recv(self.buff_size).decode(\"utf-8\")\n\t\t\tif msg == \"\\0\":\n\t\t\t\t#single null char represents the client closing\n\t\t\t\tbreak\n\t\t\telif msg:\n\t\t\t\tself.client_msgs.append((msg, client_num))\n\n\t\tself.remove_client(client_sock, client_num)\n\n\tdef send_client_msgs(self):\n\t\t'send msgs to every client, except client who sent the msg'\n\t\twhile True:\n\t\t\tif self.client_msgs:\n\t\t\t\ttmp = [] + self.client_msgs\n\t\t\t\tfor m, n in tmp:\n\t\t\t\t\tself.client_msgs.remove((m, n))\n\t\t\t\t\tfor c1, n1 in self.clients:\n\t\t\t\t\t\tif n1 != n:\n\t\t\t\t\t\t\tname = \"SERVER MESSAGE\" if n == -1 else self.client_user_names[n]\n\t\t\t\t\t\t\tm_send = name + \"\\0\" + m\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tc1.send(m_send.encode(\"utf-8\"))\n\t\t\t\t\t\t\texcept OSError:\n\t\t\t\t\t\t\t\tself.remove_client(c1, n1)\n\n\tdef remove_client(self, client, client_num):\n\t\ttry:\n\t\t\tself.clients.remove((client, client_num))\n\t\t\tuser = self.client_user_names[client_num]\n\t\t\tdel self.client_user_names[client_num]\n\t\texcept KeyError:\n\t\t\t#client had not choosen a username\n\t\t\tpass\n\t\tprint(\"Client: {}, {} has disconnected.\".format(client_num, user))\n\nif __name__ == '__main__':\n\tos.system(\"clear\")\n\tServer()","repo_name":"murster972/Chat","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36977679257","text":"'''\n Created by Cyx on 2022.7.21\n FTP基本类\n'''\n\nimport ftplib\nimport os\n\nclass FTP():\n ftp_server = ftplib.FTP()\n hostname = 'ftp.dlptest.com'\n username = 'dlpuser'\n pwd = 'rNrKYTX9g7z3RgJRmxWuGHbeu'\n \n def __init__(self, hostname = 'ftp.dlptest.com',\n username = 'dlpuser',\n pwd = 'rNrKYTX9g7z3RgJRmxWuGHbeu'):\n self.hostname = hostname\n self.username = username\n self.pwd = pwd \n\n # 连接服务器\n def connect(self):\n # print(self.hostname)\n # print(self.username)\n # print(self.pwd)\n try:\n # Connect FTP Server, 默认调用login()\n self.ftp_server = ftplib.FTP(self.hostname, \n self.username, \n self.pwd)\n except Exception as err:\n print(\"connect falut: \"+str(err))\n return -1\n else:\n print(\"success connect\")\n # force UTF-8 encoding\n self.ftp_server.encoding = \"utf-8\"\n return 0\n\n # 关闭连接\n def disconnect(self):\n self.ftp_server.quit()\n\n # 列出当前目录信息\n def list_dir(self):\n self.ftp_server.dir()\n\n # 修改当前路径. \n def change_path(self, remotePath):\n try:\n self.ftp_server.cwd(remotePath)\n except Exception as err:\n print(\"path fault: \"+str(err))\n finally:\n print(\"present working path:\"+str(self.ftp_server.pwd())) # 打印当前路径\n\n # 删除文件\n def delete(self, file):\n try:\n self.ftp_server.delete(file)\n except Exception as err:\n print(\"delete fault: \"+str(err))\n\n # 新建目录\n def make_dir(self, dir): \n try:\n self.ftp_server.mkd(dir)\n except Exception as err:\n print(\"make new dir fault: \"+str(err))\n\n # 上传文件(断点重传)\n def upload(self, file):\n if not os.path.exists(file):\n print(\"Local file doesn't exists\")\n return\n try:\n with open(file, \"rb\") as f:\n self.ftp_server.storbinary(f\"STOR {file}\", f)\n except Exception as err:\n print(\"upload fault: \"+str(err))\n print(\"upload succsessfully\")\n return 0\n \n # 下载文件(断点重传)\n def download(self, file):\n try:\n # Write file in binary mode\n with open(file, \"wb\") as f:\n # Command for Downloading the file \"RETR filename\"\n self.ftp_server.retrbinary(f\"RETR {file}\", f.write)\n except Exception as err:\n print(\"download fault: \"+str(err))\n print(\"download succsessfully\")\n return 0\n \n\n\nif __name__ == \"__main__\":\n ftp = FTP(r\"ftp.dlptest.com\",'dlpuser','rNrKYTX9g7z3RgJRmxWuGHbeu')\n # ftp = FTP()\n ftp.connect()\n # ftp.list_dir()\n # ftp.upload('./cyx.txt')\n # ftp.download('cyx.txt')\n \n \n \n\n","repo_name":"Crilliant/FTPClient","sub_path":"backend/FTP.py","file_name":"FTP.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12304011865","text":"#!/usr/bin/env python3\nimport bioutils as b\nimport random, sys\n\n\ndef greedy_motif_search(dna: list, k: int, pseudo: bool):\n\n best_motifs = [kmer[0:k] for kmer in dna]\n\n t = len(dna)\n\n for kmer in b.iter_substr(dna[0], k):\n motif = [kmer]\n\n for i in range(1, t):\n profile = b.profile(motif[0:i], pseudo)\n most_probable = b.profile_most_probable_kmer(profile, dna[i], k)\n motif.append(most_probable)\n\n if b.score(motif) < b.score(best_motifs):\n best_motifs = motif\n return best_motifs\n\n\ndef randomized_motif_search(dna, k):\n best_motifs = _randomized_motif_search(dna, k)\n for _ in range(1001):\n motifs = _randomized_motif_search(dna, k)\n if b.score(motifs) < b.score(best_motifs):\n best_motifs = motifs\n return best_motifs\n\n\ndef _randomized_motif_search(dna, k):\n\n # Randomly select kmers Motifs in each string\n motifs = []\n for string in dna:\n r = random.randint(0, len(string) - k)\n motifs.append(string[r : r + k])\n best_motifs = motifs\n\n while True:\n profile = b.profile(motifs, True)\n motifs = _motifs(profile, dna, k)\n if b.score(motifs) < b.score(best_motifs):\n best_motifs = motifs\n else:\n return best_motifs\n\n\ndef _motifs(profile, dna, k):\n motifs = []\n for sequence in dna:\n motifs.append(b.profile_most_probable_kmer(profile, sequence, k))\n return motifs\n\n\nif __name__ == \"__main__\":\n import sys\n\n with open(sys.argv[1]) as f:\n k = int(f.readline().strip().split(\" \")[0])\n dna = [entry.strip() for entry in f.readlines()]\n res = randomized_motif_search(dna, k)\n print(\"\\n\".join(res))\n","repo_name":"steina1989/bioinformatics","sub_path":"motifsearch.py","file_name":"motifsearch.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"40656571677","text":"from selenium.webdriver.common.keys import Keys\nfrom lists.forms import DUPLICATE_ITEM_ERROR\nfrom .base import FunctionalTest\n\nclass ItemValidationTest(FunctionalTest):\n\tdef test_cannot_add_duplicate_item(self):\n\t\t# Edith goes to the home page and accidentally tries to submit\n\t\t# an duplicate list item.\n\t\tself.browser.get(self.live_server_url)\n\t\tself.browser.find_element_by_id('id_new_item').send_keys('Buy milk')\n\t\tself.browser.find_element_by_id('btn_submit').send_keys(Keys.ENTER)\n\t\tself.wait_for_row_in_list_table('1: Buy milk')\n\t\tself.browser.find_element_by_id('id_new_item').send_keys('Buy milk')\n\t\tself.browser.find_element_by_id('btn_submit').send_keys(Keys.ENTER)\n\t\t\n\t\t# The home page refreshes, and there is an error message saying\n\t\t# that list items cannot be duplicated\n\t\tself.wait_for(lambda: self.assertEqual( \n\t\t self.browser.find_element_by_css_selector('.has-error').text,\n\t\t DUPLICATE_ITEM_ERROR\n\t\t))\t","repo_name":"RiansyahTohamba/to-do-list","sub_path":"functional_tests/test_list_item_validation.py","file_name":"test_list_item_validation.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"18190396779","text":"\"\"\"\nCS4240: Deep Learning\nReproducbility project\n\"\"\"\n\nimport time \nimport numpy as np\nimport matplotlib \nimport matplotlib.pyplot as plt\nfrom pathlib import Path\n\n# Pytorch\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader, RandomSampler\nimport torchinfo\n\n### Own modules\nimport sys\nsys.path.insert(0, '../src/')\n\nfrom PDE import Poisson1D\nfrom PINN import PINN\nfrom plotFunctions import plot_NTK, plot_param_ntk_diff, plot_NTK_change, plot_convergence_rate\nfrom NTK_helper import compute_convergence_rate\n\n### Set dtype and device to be used\ndtype = torch.float32\n\nif torch.backends.mps.is_available():\n device = torch.device(\"mps\")\nelif torch.cuda.is_available():\n device = torch.device('cuda')\nelse:\n device = torch.device('cpu')\n\nsave_model = False\ntrain_model = True\nmodel_name = 'Poisson1D'\n\n##### Define Poisson1D exact, source and boundary condition functions\ndef f_u_exact(a,x):\n \"\"\" \n Exact solution\n \"\"\"\n u_exact = torch.sin(a*torch.pi*x)\n\n return u_exact\n\ndef f_x(a, x):\n \"\"\"\n Source/Forcing function\n \"\"\"\n fx = -(a**2)*(torch.pi**2)*torch.sin(a*torch.pi*x)\n \n return fx\n\ndef g_x(x, xb):\n \"\"\"\n Boundary condition\n \"\"\"\n \n ub = torch.zeros(x.size(), dtype=dtype)\n\n xb1_idx = torch.where(x == xb[0])[0]\n xb2_idx = torch.where(x == xb[1])[0]\n\n ub[xb1_idx] = 0\n ub[xb2_idx] = 0\n\n return ub\n\n#### Setup data\n\n# Define PDE domain\nX_0,X_N = 0.,1.\nX_bc = [X_0, X_N]\n\n# Number of points\nNX = 100\ndx = (X_N - X_0) / NX\n\n# Create points for interior and boundary\nXr = torch.linspace(X_0, X_N, NX, dtype=dtype, device=device, requires_grad=True).view(-1,1)\nXb = torch.randint(0, 2, (NX,1), dtype=dtype, device=device, requires_grad=True)\nX = torch.hstack((Xr, Xb))\n\n# Setup dataset\nBr = 100\nBb = 100\nrand_sampler = RandomSampler(X, replacement=True)\nXTrain = DataLoader(X, batch_size=Br, shuffle=True)\ntraining_batches = len(XTrain.dataset)\n\nprint(f\"Training batch XTrain: {training_batches}\")\n\n### Setup PINN Network\n\n# Settings\n# NTK computation settings\ncompute_NTK = True\ncompute_NTK_interval = 10\n\n# Logging parameters\nlog_parameters = True\nlog_NTK = True\n\n# Adapation algorithm\nuse_adaptation_algorithm = False\n\n# correct for coupled parameters\nif not compute_NTK:\n log_NTK = False\n use_adaptation_algorithm = False \n\n# net parameters\ninput_size = 1\noutput_size = 1\n\n# Training parameters\nlearning_rate = 1e-5\nepochs = int(10e3)\n\n##### Result 1\n\n### Setup PDE Equation\na = [1, 2, 4]\nPDE = [Poisson1D(a_i) for a_i in a]\n\n# network neurons\nneurons = [100]\nneural_nets = [PINN(input_size, output_size, neurons, PDE_i, 'normal', dtype, device, log_parameters, log_NTK).to(device) for PDE_i in PDE]; \n\ndata_labels = [f'a = {a_i}' for a_i in a]\nylabels1 = [r'$\\lambda_{K}$', r'$\\lambda_{uu}$', r'$\\lambda_{rr}$']\n\n# Plot 1 - initial NTK\nfig0, axs0 = plt.subplots(1,3, figsize=(23,6))\n\nx = next(iter(XTrain)).view(-1, Br, 1)\nx_prime = next(iter(XTrain)).view(-1, Br, 1)\n\nprint(\"Compute initial NTK estimation\\n\")\nfor net in neural_nets:\n ## Observe initial estimation of NTK Matrix\n net.eval()\n net.NTK(x, x_prime)\n\n if log_NTK:\n net.log_NTK(0)\n plot_NTK(net, fig0, axs0)\n\nfor i, ax in enumerate(axs0):\n ax.legend(labels=data_labels)\n # ax.ticklabel_format(axis='y', scilimits=(0,0))\n ax.set_yscale('log')\n ax.set_ylabel(ylabels1[i])\n ax.set_xlabel(r'$Index$')\n\nplt.show()\n\n##### Train network\noptimizer = optim.SGD\n# optimizer = optim.Adam\n\noptimizers = [optimizer(net_i.parameters(), learning_rate) for net_i in neural_nets]\n\n# Auto Mixed Precision settings\nuse_amp = False\nif device == torch.device('cpu'):\n print(\"Using CPU\")\n use_amp = False\n\n# use scaler\nscaler = torch.cuda.amp.GradScaler(enabled=use_amp)\n\n### Model save settings\nif use_adaptation_algorithm and compute_NTK:\n model_adaption = '_adapted'\nelse:\n model_adaption = ''\n\nif isinstance(optimizers[0], optim.SGD):\n opt = 'SGD'\nelif isinstance(optimizers[0] , optim.Adam):\n opt = 'Adam'\n\nfile_name = f'{model_name}_Epoch={epochs}_Optimizer={opt}{model_adaption}'\npath = './results/models/'\npathfile = path+file_name+\"_freqAmplitude\"\nPath(path).mkdir(parents=True, exist_ok=True)\n\n#### Train loop\ndef train_network(a, net, optimizer):\n\n train_losses = []\n for epoch in range(epochs+1):\n\n if epoch == 0 and compute_NTK:\n ## Observe initial estimation of NTK Matrix\n net.eval()\n x = next(iter(XTrain)).view(-1, Br, 1)\n x_prime = next(iter(XTrain)).view(-1, Br, 1)\n\n net.NTK(x, x_prime)\n if log_NTK:\n net.log_NTK(0)\n # reset lambda\n # net.lambda_adaptation = torch.tensor([1., 1.], dtype=dtype, device=device)\n\n # log parameters and set in training mode\n net.log_parameters(epoch)\n net.train()\n\n epoch_loss = 0.0\n\n for i, x in enumerate(XTrain):\n # reset gradients\n optimizer.zero_grad()\n\n xr = x[:,0].view(-1,1).to(device); xb = x[:,1].view(-1,1).to(device)\n\n x = torch.stack([xr, xb], dim=0)\n\n ### INTERIOR DOMAIN\n # make prediction w.r.t. interior points\n\n with torch.autocast(device_type='cuda', dtype=torch.float16, enabled=use_amp):\n\n ### Predict interior points\n u_hat_x = net(xr)\n \n # determine gradients w.r.t interior points\n U_x = net.compute_pde_gradient(u_hat_x, xr)\n\n ### BOUNDARY DOMAIN\n u_hat_xb = net(xb)\n\n # determine gradients w.r.t boundary points\n U_xb = net.compute_pde_gradient(u_hat_xb, xb)\n \n # Compute forcing/source function\n fx = f_x(a, xr).T.to(device)\n\n # compute boundary condition\n gx = g_x(xb, X_bc).T.to(device)\n\n # Stack\n U = torch.stack((U_x, U_xb), dim=0)\n\n ## Backward step\n net.backward(x, U, fx, gx, use_adaption=use_adaptation_algorithm)\n epoch_loss += net.loss.item()\n if i == len(XTrain) - 1:\n x_prime = x\n\n # Do optimisation step\n if use_amp:\n scaler.scale(net.loss).backward()\n scaler.step(optimizer)\n scaler.update()\n else:\n net.loss.backward()\n optimizer.step()\n\n ### END Batch loop\n\n # Compute NTK\n if epoch > 0:\n if (epoch % compute_NTK_interval == 0 or epoch == epochs - 1) and compute_NTK:\n\n net.eval()\n net.NTK(x, x_prime)\n\n if log_NTK:\n net.log_NTK(epoch)\n\n train_losses.append(epoch_loss / len(XTrain))\n \n if epoch % 100 == 0 or epoch == epochs: \n print(f\"Epoch: {epoch:4d} Loss: {train_losses[-1]:4f} Lr: {optimizer.param_groups[0]['lr']:.2E}\")\n\n if use_adaptation_algorithm:\n lambda_weights = \"\"\n for lambda_i in net.lambda_adaptation:\n lambda_weights += f\"{lambda_i.item():5f} \"\n print(f\" Lambda Adaption: \" + lambda_weights)\n ### END training loop\n\n #### save model\n if save_model:\n net.save_model(pathfile)\n net.save_log(pathfile)\n with open(f'{pathfile}.npy', 'wb') as f:\n np.save(f, np.array(train_losses))\n\n return net, train_losses\n\n# RUN NETWORK\ntrain_losses = []\nfor a_i, net, optimizer in zip(a,neural_nets, optimizers):\n print(\"TRAIN NETWORK:\", f\"a = {a_i}\")\n net, losses = train_network(a_i, net, optimizer)\n\n########### PLOT RESULTS\n\npath = './results/figures/'\npathfile = path+file_name+\"_neurons\"\nPath(path).mkdir(parents=True, exist_ok=True)\n\nNX = 100\nx = torch.linspace(X_0, X_N, NX, dtype=dtype).view(-1,1).to(device)\n\n# Plot 1 - solution and train losses\nfig1 = plt.figure(figsize=(24,8))\ngs = fig1.add_gridspec(1,2)\ncm = matplotlib.cm.Set1\n\n# ax1\nax0 = fig1.add_subplot(gs[0,0])\nax0.set_title('Exact vs. neural network prediction')\nax0.set_ylabel(r'$u(x)$', size=14)\nax0.set_xlabel(r'$x$', size=14)\n\n# ax2\nax1 = fig1.add_subplot(gs[0,1])\nax1.set_title('Absolute Error')\nax1.set_ylabel('Point-wise difference', size=14)\nax1.set_xlabel(r'$x$', size=14)\n\nfor i, net in enumerate(neural_nets):\n net.eval()\n\n u_exact = f_u_exact(a[i], x)\n u_pred = net(x)\n\n xplot = x.cpu().detach().numpy()\n u_exact = u_exact.cpu().detach().numpy()\n u_pred = u_pred.cpu().detach().numpy()\n\n # Plot 1 - predict \n ax0.plot(xplot, u_exact, color=cm(i), label=r'$u_{exact}$ | a = ' + str(a[i]))\n ax0.plot(xplot, u_pred, '--', color=cm(i), label=r'$u_{pred}$ | a = ' + str(a[i]))\n\n # Plot 2 - error plot\n pointWise_err = np.abs(u_exact - u_pred)\n ax1.semilogy(xplot, pointWise_err, color=cm(i), label= data_labels[i])\n\nax0.legend(loc='upper center', bbox_to_anchor=(0.5, -0.08), fancybox=True, ncol=3)\nax1.legend(loc='upper center', bbox_to_anchor=(0.5, -0.08), fancybox=True, ncol=3)\n\nif use_adaptation_algorithm:\n fig1.suptitle(f'Poisson 1D - a = {a} Width = {neurons}\\nWith adaptation')\nelse:\n fig1.suptitle(f'Poisson 1D - a = {a} Width = {neurons}')\n\nfig1.tight_layout()\nfig1.savefig(pathfile+'_plot_1D', bbox_inches='tight',)\n\n# Plot 2 - Parameter and ntk difference\nfig2, axs2 = plt.subplots(1,2, figsize=(18,6))\n\nfor net in neural_nets:\n fig2, axs2 = plot_param_ntk_diff(net, fig2, axs2)\nfor ax in axs2:\n ax.legend(labels=data_labels, loc='upper center', bbox_to_anchor=(0.5, 1.1),\n fancybox=True, ncol=3)\nfig2.tight_layout()\nfig2.savefig(pathfile+'_plot_param_ntk_diff', bbox_inches='tight',)\n\n# Plot 3 - Convergence rate for all matrices\nfig3, axs3 = plt.subplots(1,3, figsize=(24,6))\nif use_adaptation_algorithm:\n fig3.suptitle('Convergence rate: ' + r'$c = $' + r'$\\frac{Tr(K_{i})}{n}$' +'\\n With adaptation')\nelse:\n fig3.suptitle('Convergence rate: ' + r'$c = $' + r'$\\frac{Tr(K_{i})}{n}$')\n\nylabels = [r'$c_{K}$', r'$c_{K_{uu}}$', r'$c_{K_{rr}}$', r'$c_{K_{ii}}$']\n\nfor i,net in enumerate(neural_nets):\n NTK_epochs, NTK_convergenceRate = compute_convergence_rate(net)\n NTK_convergenceRate = np.real(NTK_convergenceRate.detach().cpu().numpy())\n n = NTK_convergenceRate.shape[1]\n\n for j in range(n):\n axs3[j].semilogy(NTK_epochs, NTK_convergenceRate[:,j]); \n\nfor i,ax in enumerate(axs3):\n ax.set_xlabel(r'$Epoch$', size=14)\n ax.set_ylabel(ylabels[i], size=14)\n ax.legend(labels=data_labels, loc='upper center', bbox_to_anchor=(0.5, 1.1),\n fancybox=True, ncol=3)\n\nfig3.tight_layout()\nfig3.savefig(pathfile+'_plot_convergence_rate', bbox_inches='tight',)\n\n# Plot 4 - NTK change\nfig4, axs4 = plt.subplots(1,1, figsize=(14,12))\nfor i,net in enumerate(neural_nets):\n plot_NTK_change(net, fig4, axs4, cm(i))\n\ndata_labels = []\nfor a_i in a:\n data_labels.append(f'Epoch = {0:.2E} | a = {a_i}')\n data_labels.append(f'Epoch = {epochs:.2E} | a = {a_i}')\naxs4.legend(labels=data_labels, loc='upper center', bbox_to_anchor=(0.5, 1.065),\n fancybox=True, ncol=3)\n\nfig4.tight_layout()\nfig4.savefig(pathfile+'_plot_NTK_change', bbox_inches='tight',)\n\nplt.show()\n\n#### Results 2\n\n## Setup PINN \n\nneurons = [[10], [100], [500]]\na = 4\nPDE = Poisson1D(a)\n\nneural_nets = [PINN(input_size, output_size, neurons_i, PDE, 'normal', dtype, device, log_parameters, log_NTK) for neurons_i in neurons]\n\nX = next(iter(XTrain))\nX_prime = next(iter(XTrain))\n\nxr = X[:,0].to(device).view(-1,1); xb = X[:,1].to(device).view(-1,1)\nxr_prime = X_prime[:,0].to(device).view(-1,1); xb_prime = X_prime[:,1].to(device).view(-1,1)\n\nx = torch.stack([xr, xb], dim=0); x_prime = torch.stack([xr_prime, xb_prime], dim=0)\n\nfor net in neural_nets:\n net.to(device)\n net.NTK(x, x_prime)\n net.log_NTK(0)\n\n### PLOT Eigenvalue of NTK matrices for different widths\nfig, axs = plt.subplots(1,3, figsize=(23,6))\ndata_labels = [f'width = {neuron_i}' for neuron_i in neurons]\nylabels = [r'$\\lambda_{K}$', r'$\\lambda_{uu}$', r'$\\lambda_{rr}$']\n\nfor net in neural_nets:\n plot_NTK(net, fig, axs)\n\nfor i,ax in enumerate(axs):\n ax.legend(labels=data_labels)\n # ax.ticklabel_format(axis='y', scilimits=(0,0))\n ax.set_yscale('log')\n ax.set_ylabel(ylabels[i], size=14)\n ax.set_xlabel(r'$Index$', size=14)\n\nplt.tight_layout()\nplt.show()\n\noptimizer = optim.SGD\n# optimizer = optim.Adam\n\noptimizers = [optimizer(net_i.parameters(), learning_rate) for net_i in neural_nets]\n\n\nfor neurons_i, net, optimizer in zip(neurons, neural_nets, optimizers):\n print(\"TRAIN NETWORK:\", f\"neurons = {neurons_i}\")\n\n net = train_network(a, net, optimizer)\n\n########### PLOT RESULTS\n\npath = './results/figures/'\npathfile = path+file_name+\"_width\"\nPath(path).mkdir(parents=True, exist_ok=True)\n\nNX = 100\nx = torch.linspace(X_0, X_N, NX, dtype=dtype).view(-1,1).to(device)\n\nfig1 = plt.figure(1, (24,8), layout='tight')\ngs = fig1.add_gridspec(1,2)\ncm = matplotlib.cm.Set1\n\n# ax1\nax0 = fig1.add_subplot(gs[0,0])\nax0.set_title('Exact vs. neural network prediction')\nax0.set_ylabel(r'$u(x)$', size=14)\nax0.set_xlabel(r'$x$', size=14)\n\n# ax2\nax1 = fig1.add_subplot(gs[0,1])\nax1.set_title('Absolute Error')\nax1.set_ylabel('Point-wise difference', size=14)\nax1.set_xlabel(r'$x$', size=14)\n\nu_exact = f_u_exact(a, x)\nxplot = x.cpu().detach().numpy()\nu_exact = u_exact.cpu().detach().numpy()\nax0.plot(xplot, u_exact, color=cm(0), label=r'$u_{exact}$')\n\nfor i, net in enumerate(neural_nets):\n net.eval()\n u_pred = net(x)\n\n u_pred = u_pred.cpu().detach().numpy()\n\n # Plot 1 - predict \n ax0.plot(xplot, u_pred, '--', color=cm(i+1), label=r'$u_{pred}$ | width = ' + str(neurons[i]))\n # Plot 2 - error plot\n pointWise_err = np.abs(u_exact - u_pred)\n ax1.semilogy(xplot, pointWise_err, color=cm(i+1), label= data_labels[i])\n\nax0.legend(loc='upper center', bbox_to_anchor=(0.5, -0.08), fancybox=True, ncol=4)\nax1.legend(loc='upper center', bbox_to_anchor=(0.5, -0.08), fancybox=True, ncol=3)\n\n\nif use_adaptation_algorithm:\n fig1.suptitle(f'Poisson 1D - a = {a} Width = {neurons}\\nWith adaptation')\nelse:\n fig1.suptitle(f'Poisson 1D - a = {a} Width = {neurons}')\n\nfig1.tight_layout()\nfig1.savefig(pathfile+'_plot_1D', bbox_inches='tight',)\n\n# Plot 2 - Parameter and ntk difference\nfig2, axs2 = plt.subplots(1,2, figsize=(18,6))\n\nfor net in neural_nets:\n fig2, axs2 = plot_param_ntk_diff(net, fig2, axs2)\nfor ax in axs2:\n ax.legend(labels=data_labels, loc='upper center', bbox_to_anchor=(0.5, 1.1),\n fancybox=True, ncol=3)\n\nfig2.tight_layout()\nfig2.savefig(pathfile+'_plot_param_ntk_diff', bbox_inches='tight',)\n\n# Plot 3 - convergence rate for all matrices\nfig3, axs3 = plt.subplots(1,3, figsize=(18,6))\nif use_adaptation_algorithm:\n fig3.suptitle('Convergence rate: ' + r'$c = $' + r'$\\frac{Tr(K_{i})}{n}$' +'\\n With adaptation')\nelse:\n fig3.suptitle('Convergence rate: ' + r'$c = $' + r'$\\frac{Tr(K_{i})}{n}$')\n\nylabels = [r'$c_{K}$', r'$c_{K_{uu}}$', r'$c_{K_{rr}}$', r'$c_{K_{ii}}$']\n\nfor i,net in enumerate(neural_nets):\n NTK_epochs, NTK_convergenceRate = compute_convergence_rate(net)\n NTK_convergenceRate = np.real(NTK_convergenceRate.detach().cpu().numpy())\n n = NTK_convergenceRate.shape[1]\n\n for j in range(n):\n axs3[j].semilogy(NTK_epochs, NTK_convergenceRate[:,j]); \n\nfor i,ax in enumerate(axs3):\n ax.set_xlabel(r'$Epoch$', size=14)\n ax.set_ylabel(ylabels[i],size=14)\n ax.legend(labels=data_labels, loc='upper center', bbox_to_anchor=(0.5, 1.1),\n fancybox=True, ncol=3)\n\nfig3.tight_layout()\nfig3.savefig(pathfile+'_plot_convergence_rate', bbox_inches='tight',)\n\n# Plot 4 - NTK change\nfig4, axs4 = plt.subplots(1,1, figsize=(14,12))\nfor i,net in enumerate(neural_nets):\n plot_NTK_change(net, fig4, axs4, cm(i))\n\ndata_labels = []\nfor neurons_i in neurons:\n data_labels.append(f'Epoch = {0:.0E} | width = {neurons_i}')\n data_labels.append(f'Epoch = {epochs:.0E} | width = {neurons_i}')\naxs4.legend(labels=data_labels, loc='upper center', bbox_to_anchor=(0.5, 1.065),\n fancybox=True, ncol=3)\n\nfig4.tight_layout()\nfig4.savefig(pathfile+'_plot_NTK_change', bbox_inches='tight',)\n\nplt.show()\n\n","repo_name":"ikbenali/ReproducibilityProject_DL","sub_path":"Implementation_1/Poisson/ReproducibilityResults.py","file_name":"ReproducibilityResults.py","file_ext":"py","file_size_in_byte":16482,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"21166061359","text":"\nfrom threading import Lock\nfrom sqlalchemy import Column, Integer, Sequence, Index, String, Table, ForeignKey, ForeignKeyConstraint, Float\nfrom sqlalchemy.orm import relationship, remote, foreign, column_property\nfrom sqlalchemy_json import MutableJson\nfrom sqlalchemy_utils import LtreeType, Ltree\nfrom Backend.DataBase.IHandler import IHandler\nfrom Backend.DataBase.database import engine, session, mapper_registry\nfrom Backend.Domain.TradingSystem.Interfaces.IDiscount import IDiscount\nfrom Backend.Domain.TradingSystem.TypesPolicies.discounts import MaximumCompositeDiscount, AddCompositeDiscount, \\\n XorCompositeDiscount, AndConditionDiscount, OrConditionDiscount, SimpleDiscount, Discounter\nfrom Backend.response import Response\nfrom Backend.rw_lock import ReadWriteLock\nfrom sqlalchemy import func\n\ndiscounts_id_seq = Sequence('rules_id_seq')\n\n\nclass DiscountsHandler(IHandler):\n _lock = Lock()\n _instance = None\n\n @staticmethod\n def get_instance():\n with DiscountsHandler._lock:\n if DiscountsHandler._instance is None:\n DiscountsHandler._instance = DiscountsHandler()\n return DiscountsHandler._instance\n\n def __init__(self):\n\n super().__init__(ReadWriteLock(), IDiscount)\n\n self.__discounts_table = Table('discounts', mapper_registry.metadata,\n Column('id', Integer, discounts_id_seq, primary_key=True),\n Column('path', LtreeType, nullable=False),\n Column('type', String(50)),\n # Column('context', MutableJson),\n Column('context_obj', String(50)),\n Column('context_id', String(50)),\n Column('condition_id', Integer),\n Column('decision_rule', String(10)),\n Column('conditions_policy_root_id', String(10)),\n Column('discounter_data', MutableJson),\n Index('ix_discounts_path', 'path', postgresql_using='gist'))\n\n mapper_registry.map_imperatively(IDiscount, self.__discounts_table, properties={\n '_id': self.__discounts_table.c.id,\n 'path': self.__discounts_table.c.path,\n 'parent': relationship(\n 'IDiscount',\n primaryjoin=(remote(self.__discounts_table.c.path) == foreign(\n func.subpath(self.__discounts_table.c.path, 0, -1))),\n backref='_children',\n viewonly=True\n ),\n \"_conditions_policy_root_id\": column_property(self.__discounts_table.c.conditions_policy_root_id),\n \"_context_obj\": column_property(self.__discounts_table.c.context_obj),\n \"_context_id\": column_property(self.__discounts_table.c.context_id),\n # \"_context\": column_property(self.__discounts_table.c.context),\n \"_condition_id\": column_property(self.__discounts_table.c.condition_id),\n \"_decision_rule\": column_property(self.__discounts_table.c.decision_rule),\n \"_discounter_data\": self.__discounts_table.c.discounter_data,\n }, polymorphic_on=self.__discounts_table.c.type)\n\n mapper_registry.map_imperatively(MaximumCompositeDiscount, self.__discounts_table, inherits=IDiscount,\n polymorphic_identity='MaximumCompositeDiscount')\n\n mapper_registry.map_imperatively(AddCompositeDiscount, self.__discounts_table, inherits=IDiscount,\n polymorphic_identity='AddCompositeDiscount')\n\n mapper_registry.map_imperatively(XorCompositeDiscount, self.__discounts_table, inherits=IDiscount,\n polymorphic_identity='XorCompositeDiscount')\n\n mapper_registry.map_imperatively(AndConditionDiscount, self.__discounts_table, inherits=IDiscount,\n polymorphic_identity='AndConditionDiscount')\n\n mapper_registry.map_imperatively(OrConditionDiscount, self.__discounts_table, inherits=IDiscount,\n polymorphic_identity='OrConditionDiscount')\n\n mapper_registry.map_imperatively(SimpleDiscount, self.__discounts_table, inherits=IDiscount,\n polymorphic_identity='SimpleDiscount')\n\n def remove_rule(self, discount_rule):\n self._rwlock.acquire_write()\n res = Response(True)\n try:\n whole_subtree = session.query(IDiscount).filter(\n IDiscount.path.descendant_of(discount_rule.path)).all()\n session.delete(discount_rule)\n for rule_child in whole_subtree:\n session.delete(rule_child)\n res = Response(True)\n except Exception as e:\n session.rollback()\n res = Response(False, msg=str(e))\n finally:\n self._rwlock.release_write()\n return res\n\n\n def edit_rule(self, old_rule, edited_rule):\n self._rwlock.acquire_write()\n for n in old_rule._children:\n n.parent = edited_rule\n n._clause = None\n n.path = edited_rule.path + n.path[len(old_rule.path):]\n session.flush()\n edited_rule._children.append(n)\n session.flush()\n self._rwlock.release_write()\n self.remove_rule(old_rule)\n self.save(edited_rule)\n return Response(True)\n\n def move_rule(self, discount: IDiscount, new_parent: IDiscount):\n self._rwlock.acquire_write()\n new_path = new_parent.path + Ltree(str(discount._id))\n session.flush()\n for n in discount._children:\n n.path = new_path + n.path[len(discount.path):]\n session.flush()\n discount.path = new_path\n session.flush()\n discount.parent = new_parent\n session.flush()\n new_parent._children.append(discount)\n session.flush()\n self._rwlock.release_write()","repo_name":"RavidRo/TradingSystem","sub_path":"Backend/DataBase/Handlers/discounts_handler.py","file_name":"discounts_handler.py","file_ext":"py","file_size_in_byte":6198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71765980900","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 2 21:51:40 2022\n\n@author: dingxu\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndata = np.loadtxt('datamag.txt')\n\nphase = data[:,0]\nmag = data[:,1]\n\n#plt.plot(phase, mag, '.')\n\nplt.figure(0)\nax1 = plt.gca()\nax1.plot(phase, mag, '.')\nplt.xlabel('phase',fontsize=18)\nplt.ylabel('mag',fontsize=18)\n#plt.text(0.5,0.3,'Period='+str(np.round(1*period,3)),fontsize=18)\nax1.yaxis.set_ticks_position('left') #将y轴的位置设置在右边\nax1.invert_yaxis() #y轴反向\n\nnoise = np.random.normal(0.0, 0.06, 200)\nnoisy = mag + noise\n\n\nplt.figure(1)\nax2 = plt.gca()\nax2.plot(phase, noisy, '.')\nplt.xlabel('phase',fontsize=18)\nplt.ylabel('mag',fontsize=18)\n#plt.text(0.5,0.3,'Period='+str(np.round(1*period,3)),fontsize=18)\nax2.yaxis.set_ticks_position('left') #将y轴的位置设置在右边\nax2.invert_yaxis() #y轴反向\n\ns = np.diff(noisy,2).std()/np.sqrt(6)\nprint(s)\nprint(np.std(noisy)/s)","repo_name":"dingxu6207/fenleicode","sub_path":"ztfcodefft/TESSSN/detectvariable.py","file_name":"detectvariable.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"21905058044","text":"import FWCore.ParameterSet.Config as cms\n\nmuonOnMuon = cms.EDProducer(\n \"TPMuonOnMuon\",\n enable = cms.bool(True),\n #find the leptons that fail the ID cuts\n topCollection = cms.InputTag('susyMuon'),\n bottomCollection = cms.InputTag('cmgMuonSel'),\n #\n name = cms.untracked.string('muonOnMuon'),\n verbose = cms.untracked.bool(False)\n)\n\n","repo_name":"anantoni/CMG","sub_path":"CMGTools/Susy/python/topprojections/muonprojector_cff.py","file_name":"muonprojector_cff.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"32684574073","text":"from __future__ import annotations\n\nfrom unittest import mock\nfrom unittest.mock import Mock\n\nimport pytest\n\nimport airflow\nfrom airflow.exceptions import AirflowException\nfrom airflow.models import DAG, DagRun, TaskInstance\nfrom airflow.operators.empty import EmptyOperator\nfrom airflow.operators.subdag import SkippedStatePropagationOptions, SubDagOperator\nfrom airflow.utils.session import create_session\nfrom airflow.utils.state import State\nfrom airflow.utils.timezone import datetime\nfrom airflow.utils.types import DagRunType\nfrom tests.test_utils.db import clear_db_runs\n\nDEFAULT_DATE = datetime(2016, 1, 1)\n\ndefault_args = {\"start_date\": DEFAULT_DATE}\n\n\nclass TestSubDagOperator:\n def setup_method(self):\n clear_db_runs()\n self.dag_run_running = DagRun()\n self.dag_run_running.state = State.RUNNING\n self.dag_run_success = DagRun()\n self.dag_run_success.state = State.SUCCESS\n self.dag_run_failed = DagRun()\n self.dag_run_failed.state = State.FAILED\n\n def teardown_class(self):\n clear_db_runs()\n\n def test_subdag_name(self):\n \"\"\"\n Subdag names must be {parent_dag}.{subdag task}\n \"\"\"\n dag = DAG(\"parent\", default_args=default_args)\n subdag_good = DAG(\"parent.test\", default_args=default_args)\n subdag_bad1 = DAG(\"parent.bad\", default_args=default_args)\n subdag_bad2 = DAG(\"bad.test\", default_args=default_args)\n subdag_bad3 = DAG(\"bad.bad\", default_args=default_args)\n\n SubDagOperator(task_id=\"test\", dag=dag, subdag=subdag_good)\n with pytest.raises(AirflowException):\n SubDagOperator(task_id=\"test\", dag=dag, subdag=subdag_bad1)\n with pytest.raises(AirflowException):\n SubDagOperator(task_id=\"test\", dag=dag, subdag=subdag_bad2)\n with pytest.raises(AirflowException):\n SubDagOperator(task_id=\"test\", dag=dag, subdag=subdag_bad3)\n\n def test_subdag_in_context_manager(self):\n \"\"\"\n Creating a sub DAG within a main DAG's context manager\n \"\"\"\n with DAG(\"parent\", default_args=default_args) as dag:\n subdag = DAG(\"parent.test\", default_args=default_args)\n op = SubDagOperator(task_id=\"test\", subdag=subdag)\n\n assert op.dag == dag\n assert op.subdag == subdag\n\n def test_subdag_pools(self):\n \"\"\"\n Subdags and subdag tasks can't both have a pool with 1 slot\n \"\"\"\n dag = DAG(\"parent\", default_args=default_args)\n subdag = DAG(\"parent.child\", default_args=default_args)\n\n session = airflow.settings.Session()\n pool_1 = airflow.models.Pool(pool=\"test_pool_1\", slots=1, include_deferred=False)\n pool_10 = airflow.models.Pool(pool=\"test_pool_10\", slots=10, include_deferred=False)\n session.add(pool_1)\n session.add(pool_10)\n session.commit()\n\n EmptyOperator(task_id=\"dummy\", dag=subdag, pool=\"test_pool_1\")\n\n with pytest.raises(AirflowException):\n SubDagOperator(task_id=\"child\", dag=dag, subdag=subdag, pool=\"test_pool_1\")\n\n # recreate dag because failed subdagoperator was already added\n dag = DAG(\"parent\", default_args=default_args)\n SubDagOperator(task_id=\"child\", dag=dag, subdag=subdag, pool=\"test_pool_10\")\n\n session.delete(pool_1)\n session.delete(pool_10)\n session.commit()\n\n def test_subdag_pools_no_possible_conflict(self):\n \"\"\"\n Subdags and subdag tasks with no pool overlap, should not to query\n pools\n \"\"\"\n dag = DAG(\"parent\", default_args=default_args)\n subdag = DAG(\"parent.child\", default_args=default_args)\n\n session = airflow.settings.Session()\n pool_1 = airflow.models.Pool(pool=\"test_pool_1\", slots=1, include_deferred=False)\n pool_10 = airflow.models.Pool(pool=\"test_pool_10\", slots=10, include_deferred=False)\n session.add(pool_1)\n session.add(pool_10)\n session.commit()\n\n EmptyOperator(task_id=\"dummy\", dag=subdag, pool=\"test_pool_10\")\n\n mock_session = Mock()\n SubDagOperator(task_id=\"child\", dag=dag, subdag=subdag, pool=\"test_pool_1\", session=mock_session)\n assert not mock_session.query.called\n\n session.delete(pool_1)\n session.delete(pool_10)\n session.commit()\n\n def test_execute_create_dagrun_wait_until_success(self):\n \"\"\"\n When SubDagOperator executes, it creates a DagRun if there is no existing one\n and wait until the DagRun succeeds.\n \"\"\"\n dag = DAG(\"parent\", default_args=default_args)\n subdag = DAG(\"parent.test\", default_args=default_args)\n subdag_task = SubDagOperator(task_id=\"test\", subdag=subdag, dag=dag, poke_interval=1)\n\n subdag.create_dagrun = Mock()\n subdag.create_dagrun.return_value = self.dag_run_running\n\n subdag_task._get_dagrun = Mock()\n subdag_task._get_dagrun.side_effect = [None, self.dag_run_success, self.dag_run_success]\n\n context = {\n \"data_interval_start\": None,\n \"data_interval_end\": None,\n \"execution_date\": DEFAULT_DATE,\n }\n\n subdag_task.pre_execute(context=context)\n subdag_task.execute(context=context)\n subdag_task.post_execute(context=context)\n\n subdag.create_dagrun.assert_called_once_with(\n run_type=DagRunType.SCHEDULED,\n execution_date=DEFAULT_DATE,\n data_interval=None,\n conf=None,\n state=State.RUNNING,\n external_trigger=True,\n )\n\n assert 3 == subdag_task._get_dagrun.call_count\n\n def test_execute_create_dagrun_with_conf(self):\n \"\"\"\n When SubDagOperator executes, it creates a DagRun if there is no existing one\n and wait until the DagRun succeeds.\n \"\"\"\n conf = {\"key\": \"value\"}\n dag = DAG(\"parent\", default_args=default_args)\n subdag = DAG(\"parent.test\", default_args=default_args)\n subdag_task = SubDagOperator(task_id=\"test\", subdag=subdag, dag=dag, poke_interval=1, conf=conf)\n\n subdag.create_dagrun = Mock()\n subdag.create_dagrun.return_value = self.dag_run_running\n\n subdag_task._get_dagrun = Mock()\n subdag_task._get_dagrun.side_effect = [None, self.dag_run_success, self.dag_run_success]\n\n context = {\n \"data_interval_start\": None,\n \"data_interval_end\": None,\n \"execution_date\": DEFAULT_DATE,\n }\n\n subdag_task.pre_execute(context=context)\n subdag_task.execute(context=context)\n subdag_task.post_execute(context=context)\n\n subdag.create_dagrun.assert_called_once_with(\n run_type=DagRunType.SCHEDULED,\n execution_date=DEFAULT_DATE,\n data_interval=None,\n conf=conf,\n state=State.RUNNING,\n external_trigger=True,\n )\n\n assert 3 == subdag_task._get_dagrun.call_count\n\n def test_execute_dagrun_failed(self):\n \"\"\"\n When the DagRun failed during the execution, it raises an Airflow Exception.\n \"\"\"\n dag = DAG(\"parent\", default_args=default_args)\n subdag = DAG(\"parent.test\", default_args=default_args)\n subdag_task = SubDagOperator(task_id=\"test\", subdag=subdag, dag=dag, poke_interval=1)\n\n subdag.create_dagrun = Mock()\n subdag.create_dagrun.return_value = self.dag_run_running\n\n subdag_task._get_dagrun = Mock()\n subdag_task._get_dagrun.side_effect = [None, self.dag_run_failed, self.dag_run_failed]\n\n context = {\n \"data_interval_start\": None,\n \"data_interval_end\": None,\n \"execution_date\": DEFAULT_DATE,\n }\n\n with pytest.raises(AirflowException):\n subdag_task.pre_execute(context=context)\n subdag_task.execute(context=context)\n subdag_task.post_execute(context=context)\n\n def test_execute_skip_if_dagrun_success(self):\n \"\"\"\n When there is an existing DagRun in SUCCESS state, skip the execution.\n \"\"\"\n dag = DAG(\"parent\", default_args=default_args)\n subdag = DAG(\"parent.test\", default_args=default_args)\n\n subdag.create_dagrun = Mock()\n subdag_task = SubDagOperator(task_id=\"test\", subdag=subdag, dag=dag, poke_interval=1)\n subdag_task._get_dagrun = Mock()\n subdag_task._get_dagrun.return_value = self.dag_run_success\n\n context = {\n \"data_interval_start\": None,\n \"data_interval_end\": None,\n \"execution_date\": DEFAULT_DATE,\n }\n\n subdag_task.pre_execute(context=context)\n subdag_task.execute(context=context)\n subdag_task.post_execute(context=context)\n\n subdag.create_dagrun.assert_not_called()\n assert 3 == subdag_task._get_dagrun.call_count\n\n def test_rerun_failed_subdag(self, dag_maker):\n \"\"\"\n When there is an existing DagRun with failed state, reset the DagRun and the\n corresponding TaskInstances\n \"\"\"\n with create_session() as session:\n with dag_maker(\"parent.test\", default_args=default_args, session=session) as subdag:\n dummy_task = EmptyOperator(task_id=\"dummy\")\n sub_dagrun = dag_maker.create_dagrun(\n run_type=DagRunType.SCHEDULED,\n execution_date=DEFAULT_DATE,\n state=State.FAILED,\n external_trigger=True,\n )\n\n (dummy_task_instance,) = sub_dagrun.task_instances\n dummy_task_instance.refresh_from_task(dummy_task)\n dummy_task_instance.state == State.FAILED\n\n with dag_maker(\"parent\", default_args=default_args, session=session):\n subdag_task = SubDagOperator(task_id=\"test\", subdag=subdag, poke_interval=1)\n dag_maker.create_dagrun(execution_date=DEFAULT_DATE, run_type=DagRunType.SCHEDULED)\n\n subdag_task._reset_dag_run_and_task_instances(sub_dagrun, execution_date=DEFAULT_DATE)\n\n dummy_task_instance.refresh_from_db()\n assert dummy_task_instance.state == State.NONE\n\n sub_dagrun.refresh_from_db()\n assert sub_dagrun.state == State.RUNNING\n\n @pytest.mark.parametrize(\n \"propagate_option, states, skip_parent\",\n [\n (SkippedStatePropagationOptions.ALL_LEAVES, [State.SKIPPED, State.SKIPPED], True),\n (SkippedStatePropagationOptions.ALL_LEAVES, [State.SKIPPED, State.SUCCESS], False),\n (SkippedStatePropagationOptions.ANY_LEAF, [State.SKIPPED, State.SUCCESS], True),\n (SkippedStatePropagationOptions.ANY_LEAF, [State.FAILED, State.SKIPPED], True),\n (None, [State.SKIPPED, State.SKIPPED], False),\n ],\n )\n @mock.patch(\"airflow.operators.subdag.SubDagOperator.skip\")\n @mock.patch(\"airflow.operators.subdag.get_task_instance\")\n def test_subdag_with_propagate_skipped_state(\n self,\n mock_get_task_instance,\n mock_skip,\n dag_maker,\n propagate_option,\n states,\n skip_parent,\n ):\n \"\"\"\n Tests that skipped state of leaf tasks propagates to the parent dag.\n Note that the skipped state propagation only takes affect when the dagrun's state is SUCCESS.\n \"\"\"\n with dag_maker(\"parent.test\", default_args=default_args) as subdag:\n dummy_subdag_tasks = [EmptyOperator(task_id=f\"dummy_subdag_{i}\") for i in range(len(states))]\n dag_maker.create_dagrun(execution_date=DEFAULT_DATE)\n\n with dag_maker(\"parent\", default_args=default_args):\n subdag_task = SubDagOperator(\n task_id=\"test\",\n subdag=subdag,\n poke_interval=1,\n propagate_skipped_state=propagate_option,\n )\n dummy_dag_task = EmptyOperator(task_id=\"dummy_dag\")\n subdag_task >> dummy_dag_task\n dag_run = dag_maker.create_dagrun(execution_date=DEFAULT_DATE)\n\n subdag_task._get_dagrun = Mock(return_value=self.dag_run_success)\n\n mock_get_task_instance.side_effect = [\n TaskInstance(task=task, run_id=dag_run.run_id, state=state)\n for task, state in zip(dummy_subdag_tasks, states)\n ]\n\n context = {\n \"execution_date\": DEFAULT_DATE,\n \"dag_run\": dag_run,\n \"task\": subdag_task,\n \"ti\": mock.MagicMock(map_index=-1),\n }\n subdag_task.post_execute(context)\n\n if skip_parent:\n mock_skip.assert_called_once_with(\n context[\"dag_run\"], context[\"execution_date\"], [dummy_dag_task], map_index=-1\n )\n else:\n mock_skip.assert_not_called()\n\n def test_deprecation_warning(self):\n dag = DAG(\"parent\", default_args=default_args)\n subdag = DAG(\"parent.test\", default_args=default_args)\n warning_message = \"\"\"This class is deprecated. Please use `airflow.utils.task_group.TaskGroup`.\"\"\"\n\n with pytest.warns(DeprecationWarning) as warnings:\n SubDagOperator(task_id=\"test\", subdag=subdag, dag=dag)\n assert warning_message == str(warnings[0].message)\n","repo_name":"a0x8o/airflow","sub_path":"tests/operators/test_subdag_operator.py","file_name":"test_subdag_operator.py","file_ext":"py","file_size_in_byte":13158,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"17155190662","text":"from hypothesis import given\nfrom hypothesis import strategies as s\n\nfrom beancount_hypothesis import account\n\n\n@given(\n s.tuples(\n s.integers(min_value=1, max_value=10),\n s.integers(min_value=1, max_value=10),\n )\n)\ndef test_generate(mins: tuple[int, int]):\n gen = account.AccountGenerator(\n min_leaves=mins[0],\n max_leaves=mins[0] + 3,\n min_nodes=mins[1],\n max_nodes=mins[1] + 3,\n )\n\n roots = []\n for acct in gen.generate():\n roots.append(acct.split(\":\")[0])\n assert len(acct.split(\":\")) <= mins[0] + 3\n\n assert len(set(roots)) <= mins[1] + 3\n\n\ndef test_account_name():\n result = account.account_name().example()\n assert len(result.split(\":\")) == 3\n","repo_name":"jmgilman/beancount-hypothesis","sub_path":"tests/test_account.py","file_name":"test_account.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"20911076142","text":"# a = input(\"请输入:\")\n# b = input(\"请输入:\")\n\n# print(len(a+b))\n\n#hahaa\n\n#元祖,下标,从0开始编号\n\n\na = (1,2,3,4,\"哈哈\",\"嘻嘻\",True,False)\n\n# print(a[4])\n# print(a.index(\"哈哈\"))\n\n#二维元祖\nb = (a,\"哈哈\",\"嘻嘻\")\nprint(b[0][3])\n\n#切片\n\nprint(a[0:4])\n\n# 元祖一定写好过后不可以修改,而数组是可以修改的\n\nc = [1,2,3,4,\"哈哈\",\"嘻嘻\",True,False]\n\nc.append(\"奥特曼\")\nprint(c)\n\nc.insert(0,\"赛罗\")\nprint(c)\n\n#剪切\nc.pop(5)\nprint(c)\n\n#clear()清空\n#extend()\n\n\"\"\"\n所有的方法都是小括号结尾,print(),input(),len()\n元祖、数组、字典的取值,都是用中括号,比如 a[0]\n元祖、数组、字典的定义,分别是()、[]、{}\n\"\"\"\nd = {\"name\":\"张三\",0:\"哈哈\",\"age\":25}\n\n#新增\nd[\"high\"] = \"183cm\"\nprint(d)\n#修改\nd[\"name\"] = \"李四\"\nprint(d)\n#get()取value值\n\n#update 修改\nd.update(name=\"王五\")\nprint(d)\n\n\n'''\n练习\n获取用户输入的个人信息,并且存储在字典中\n个人信息包括了,name,age,sex\n'''\n\n\naa = input(\"请输入姓名:\")\nbb = input(\"请输入年龄:\")\ncc = input(\"请输入性别:\")\n\np = {\"name\":\"haha\",\"age\":25,\"sex\":\"\"}\n\np.update(name=aa)\np.update(age=bb)\np.update(sex=cc)\nprint(p)\n\n\n","repo_name":"zjbtsy0122/chaiquan","sub_path":"demo01.py","file_name":"demo01.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"23592963106","text":"from threading import Thread\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nclass Snapdeal(Thread):\n def __init__(self, query):\n super(Snapdeal, self).__init__()\n self.query = query\n self.items = []\n\n def check(self, element):\n return '' if element is None else element\n\n def parse_product(self, product):\n data = {'author': '',\n 'title': '',\n 'offer_link': '',\n 'link': '',\n 'price': None,\n 'ISBN': '',\n 'provider': 'https://logos-download.com/wp-content/uploads/2016/10/SnapDeal_logo_Snap_Deal.png'}\n\n if self.check(product.find('p', {\"class\": \"product-title\"})):\n data['title'] = product.find('p', {\"class\": \"product-title\"}).text\n if self.check(product.find('a', {\"class\": \"dp-widget-link\"})):\n data['link'] = product.find('a', {\"class\": \"dp-widget-link\"})['href']\n if self.check(product.find('span', {\"class\": \"lfloat product-price\"})):\n data['price'] = float(product.find('span', {\"class\": \"lfloat product-price\"})['display-price'])\n if self.check(product.find(\"p\", {\"class\": \"product-author-name\"})):\n data['author'] = product.find(\"p\", {\"class\": \"product-author-name\"}).text\n if self.check(product.find('img', {\"class\": \"product-image\"})):\n try:\n data['image'] = product.find('img', {\"class\": \"product-image\"})['src']\n except KeyError:\n data['image'] = product.find('img', {\"class\": \"product-image\"})['data-src']\n return data\n\n def run(self):\n url = f'https://www.snapdeal.com/products/books?sort=rlvncy&keyword={self.query}'\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'html.parser')\n products = soup.findAll(\n \"div\", {\"class\": \"favDp\"})\n if len(products) <= 0:\n return None\n for product in products:\n self.items.append(self.parse_product(product))","repo_name":"vshelke/bookpro","sub_path":"bookpro/find_books/integrations/snapdeal.py","file_name":"snapdeal.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"35"} +{"seq_id":"3402019562","text":"from testtools import TestCase\nfrom mock import patch, call, MagicMock\n\nimport charmhelpers.core as ch_core\nimport charmhelpers.contrib.openstack.ip as ip\n\nTO_PATCH = [\n 'config',\n 'unit_get',\n 'get_address_in_network',\n 'is_clustered',\n 'service_name',\n 'network_get_primary_address',\n 'resolve_network_cidr',\n 'get_iface_for_address',\n]\n\n\nclass TestConfig():\n\n def __init__(self):\n self.config = {}\n\n def set(self, key, value):\n self.config[key] = value\n\n def get(self, key):\n return self.config.get(key)\n\n\nclass IPTestCase(TestCase):\n\n def setUp(self):\n super(IPTestCase, self).setUp()\n for m in TO_PATCH:\n setattr(self, m, self._patch(m))\n self.test_config = TestConfig()\n self.config.side_effect = self.test_config.get\n self.network_get_primary_address.side_effect = [\n NotImplementedError,\n ch_core.hookenv.NoNetworkBinding,\n ]\n\n def _patch(self, method):\n _m = patch('charmhelpers.contrib.openstack.ip.' + method)\n mock = _m.start()\n self.addCleanup(_m.stop)\n return mock\n\n def test_get_invalid_vips_valid_ip(self):\n self.is_clustered.return_value = True\n self.test_config.set('vip', '10.5.3.200')\n self.get_iface_for_address.return_value = 'ens3'\n self.assertEquals(ip.get_invalid_vips(), [])\n\n def test_get_invalid_vips_invalid_ip(self):\n self.is_clustered.return_value = True\n self.test_config.set('vip', '10.3.2.50')\n self.get_iface_for_address.return_value = None\n self.assertEquals(ip.get_invalid_vips(), ['10.3.2.50'])\n\n def test_get_invalid_vips_no_vip(self):\n self.is_clustered.return_value = True\n self.test_config.set('vip', '')\n self.assertEquals(ip.get_invalid_vips(), [])\n\n def test_get_invalid_vips_mixed(self):\n self.is_clustered.return_value = True\n self.test_config.set('vip', '10.3.1.100 10.5.3.200 2.3.5.6')\n self.get_iface_for_address.side_effect = [None, 'ens3', None]\n self.assertEquals(ip.get_invalid_vips(), ['10.3.1.100', '2.3.5.6'])\n\n def test_resolve_address_default(self):\n self.is_clustered.return_value = False\n self.unit_get.return_value = 'unit1'\n self.get_address_in_network.return_value = 'unit1'\n self.assertEquals(ip.resolve_address(), 'unit1')\n self.unit_get.assert_called_with('public-address')\n calls = [call('os-public-network'),\n call('prefer-ipv6')]\n self.config.assert_has_calls(calls)\n\n def test_resolve_address_default_internal(self):\n self.is_clustered.return_value = False\n self.unit_get.return_value = 'unit1'\n self.get_address_in_network.return_value = 'unit1'\n self.assertEquals(ip.resolve_address(ip.INTERNAL), 'unit1')\n self.unit_get.assert_called_with('private-address')\n calls = [call('os-internal-network'),\n call('prefer-ipv6')]\n self.config.assert_has_calls(calls)\n\n def test_resolve_address_public_not_clustered(self):\n self.is_clustered.return_value = False\n self.test_config.set('os-public-network', '192.168.20.0/24')\n self.unit_get.return_value = 'unit1'\n self.get_address_in_network.return_value = '192.168.20.1'\n self.assertEquals(ip.resolve_address(), '192.168.20.1')\n self.unit_get.assert_called_with('public-address')\n calls = [call('os-public-network'),\n call('prefer-ipv6')]\n self.config.assert_has_calls(calls)\n self.get_address_in_network.assert_called_with(\n '192.168.20.0/24',\n 'unit1')\n\n def test_resolve_address_public_clustered(self):\n self.is_clustered.return_value = True\n self.test_config.set('os-public-network', '192.168.20.0/24')\n self.test_config.set('vip', '192.168.20.100 10.5.3.1')\n self.assertEquals(ip.resolve_address(), '192.168.20.100')\n\n def test_resolve_address_default_clustered(self):\n self.is_clustered.return_value = True\n self.test_config.set('vip', '10.5.3.1')\n self.assertEquals(ip.resolve_address(), '10.5.3.1')\n self.config.assert_has_calls(\n [call('vip'),\n call('os-public-network')])\n\n def test_resolve_address_public_clustered_inresolvable(self):\n self.is_clustered.return_value = True\n self.test_config.set('os-public-network', '192.168.20.0/24')\n self.test_config.set('vip', '10.5.3.1')\n self.assertRaises(ValueError, ip.resolve_address)\n\n def test_resolve_address_override(self):\n self.test_config.set('os-public-hostname', 'public.example.com')\n addr = ip.resolve_address()\n self.assertEqual('public.example.com', addr)\n\n @patch.object(ip, '_get_address_override')\n def test_resolve_address_no_override(self, _get_address_override):\n self.test_config.set('os-public-hostname', 'public.example.com')\n self.unit_get.return_value = '10.0.0.1'\n addr = ip.resolve_address(override=False)\n self.assertFalse(_get_address_override.called)\n self.assertEqual('10.0.0.1', addr)\n\n def test_resolve_address_override_template(self):\n self.test_config.set('os-public-hostname',\n '{service_name}.example.com')\n self.service_name.return_value = 'foo'\n addr = ip.resolve_address()\n self.assertEqual('foo.example.com', addr)\n\n @patch.object(ip, 'get_ipv6_addr', lambda *args, **kwargs: ['::1'])\n def test_resolve_address_ipv6_fallback(self):\n self.test_config.set('prefer-ipv6', True)\n self.is_clustered.return_value = False\n self.assertEqual(ip.resolve_address(), '::1')\n\n @patch.object(ip, 'resolve_address')\n def test_canonical_url_http(self, resolve_address):\n resolve_address.return_value = 'unit1'\n configs = MagicMock()\n configs.complete_contexts.return_value = []\n self.assertTrue(ip.canonical_url(configs),\n 'http://unit1')\n\n @patch.object(ip, 'resolve_address')\n def test_canonical_url_https(self, resolve_address):\n resolve_address.return_value = 'unit1'\n configs = MagicMock()\n configs.complete_contexts.return_value = ['https']\n self.assertTrue(ip.canonical_url(configs),\n 'https://unit1')\n\n @patch.object(ip, 'is_ipv6', lambda *args: True)\n @patch.object(ip, 'resolve_address')\n def test_canonical_url_ipv6(self, resolve_address):\n resolve_address.return_value = 'unit1'\n self.assertTrue(ip.canonical_url(None), 'http://[unit1]')\n\n @patch.object(ip, 'local_address')\n def test_resolve_address_network_get(self, local_address):\n self.is_clustered.return_value = False\n self.unit_get.return_value = 'unit1'\n self.network_get_primary_address.side_effect = None\n self.network_get_primary_address.return_value = '10.5.60.1'\n self.assertEqual(ip.resolve_address(), '10.5.60.1')\n local_address.assert_called_once_with(\n unit_get_fallback='public-address')\n calls = [call('os-public-network'),\n call('prefer-ipv6')]\n self.config.assert_has_calls(calls)\n self.network_get_primary_address.assert_called_with('public')\n\n def test_resolve_address_network_get_clustered(self):\n self.is_clustered.return_value = True\n self.test_config.set('vip', '10.5.60.20 192.168.1.20')\n self.network_get_primary_address.side_effect = None\n self.network_get_primary_address.return_value = '10.5.60.1'\n self.resolve_network_cidr.return_value = '10.5.60.1/24'\n self.assertEqual(ip.resolve_address(), '10.5.60.20')\n calls = [call('os-public-hostname'),\n call('vip'),\n call('os-public-network')]\n self.config.assert_has_calls(calls)\n self.network_get_primary_address.assert_called_with('public')\n","repo_name":"juju/charm-helpers","sub_path":"tests/contrib/openstack/test_ip.py","file_name":"test_ip.py","file_ext":"py","file_size_in_byte":7993,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"35"} +{"seq_id":"30548377861","text":"import numpy as np\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy import coordinates as coords\nfrom astropy.wcs import WCS\nimport pandas as pd\n\n##x/y positions (requires subtile info)\n###need to create WCS info for each image - WCS(dict of series)\ndef yg_recover_xy(cdata, imdat, acol='RA', dcol='DEC', apcol='RA_max', dpcol='DEC_max',\n eacol='E_RA', edcol='E_DEC', eapcol='E_RA_max', edpcol='E_DEC_max'):\n ##make colnames generic within function\n ###recover X/Yposn from image header info\n #fstart_time = time.time()\n \n #cunit, ctype, cdelt, crval, crpix, naxis\n ###values that are constant for ALL QL images\n nax = 3722\n cta = 'RA---SIN'\n ctd = 'DEC--SIN'\n cu = 'deg'\n crp = 1861.0\n cda = -0.0002777777777778\n cdd = 0.0002777777777778\n \n ###create a list of WCS transforms\n ###create a base 2D WCS for all images - need to move inside of loop\n wcslist = []\n for i in range(len(imdat)):\n aref, dref = imdat.CRVAL1.iloc[i], imdat.CRVAL2.iloc[i]\n ql_wcs = WCS({'NAXIS':2, 'NAXIS1':nax, 'NAXIS2':nax})\n ql_wcs.wcs.ctype = [cta, ctd]\n ql_wcs.wcs.cunit = [cu, cu]\n ql_wcs.wcs.crpix = [crp, crp]\n ql_wcs.wcs.crval = [aref, dref]\n ql_wcs.wcs.cdelt = [cda, cdd]\n wcslist.append(ql_wcs)\n \n ###obtain index of subtile in imdat to determine wcs to use for row in catalogue\n stlist = list(imdat['Subtile'])\n\n ##need to loop through cdata and append x/y coords to list\n cpos_cat = SkyCoord(ra=np.array(cdata[acol]), dec=np.array(cdata[dcol]), unit='deg')\n mpos_cat = SkyCoord(ra=np.array(cdata[apcol]), dec=np.array(cdata[dpcol]), unit='deg')\n xpos, ypos, xpmax, ypmax = [], [], [], []\n for i in range(len(cdata)):\n skypos = cpos_cat[i]\n maxpos = mpos_cat[i]\n stile = cdata.iloc[i]['Subtile']\n sti = stlist.index(stile)\n pxcoords = wcslist[sti].world_to_pixel(skypos)\n pxcoords_max = wcslist[sti].world_to_pixel(maxpos)\n \n xpos.append(float(pxcoords[0]))\n ypos.append(float(pxcoords[1]))\n\n xpmax.append(float(pxcoords_max[0]))\n ypmax.append(float(pxcoords_max[1]))\n\n ##add x/y columns to cdata\n ###errors in px coords estimate via error in pos/|cdelt|\n ###e.g. E_Xposn = E_RA/|CDELT1|\n cdata = cdata.assign(Xposn = xpos)\n cdata = cdata.assign(E_Xposn = np.array(cdata[eacol])/cdd)\n cdata = cdata.assign(Yposn = ypos)\n cdata = cdata.assign(E_Yposn = np.array(cdata[edcol])/cdd)\n\n cdata = cdata.assign(Xposn_max = xpmax)\n cdata = cdata.assign(E_Xposn_max = np.array(cdata[eapcol])/cdd)\n cdata = cdata.assign(Yposn_max = ypmax)\n cdata = cdata.assign(E_Yposn_max = np.array(cdata[edpcol])/cdd)\n \n return(cdata)\n\ndef xy_positions(vlad_table,meta,metrics,info_table):\n df = vlad_table.to_pandas().copy()\n datum = yg_recover_xy(df,info_table.to_pandas())\n fields = ['Xposn','E_Xposn','Yposn','E_Yposn','Xposn_max','E_Xposn_max','Yposn_max','E_Yposn_max']\n xy_pos = {f:list(datum[f].copy()*u.pixel) for f in fields}\n return xy_pos\n\ndef yg_find_duplicates(df, acol='RA', dcol='DEC', pos_err=2*u.arcsec):\n ###find duplicates and flag\n\n ###create SN column to sort by - may replace with q_flag later\n df['SN'] = df['Peak_flux']/df['Isl_rms']\n \n #2) sort by SN/qflag, subset dist<2\"\n #df = df.sort_values(by='SN', ascending=False).reset_index(drop=True)\n df = df.sort_values(by='SN', ascending=False).reset_index(drop=False)\n\n #####DONT subset duplicates!\n ###tun search around on entire catalogue (sorted) and use index you dumbass!\n dfpos = SkyCoord(ra=np.array(df[acol]), dec=np.array(df[dcol]), unit='deg')\n dsearch = dfpos.search_around_sky(dfpos, seplimit=pos_err)\n \n ###create dataframe for easy manipulation - not actually neccesary just cleaner\n dsdf = pd.DataFrame({'ix1': dsearch[0], 'ix2': dsearch[1], 'ix3': dsearch[2].arcsec})\n \n ###subset to ix1 != ix2 - reduces 4M to 500k\n dsdf = dsdf[(dsdf['ix1']!=dsdf['ix2'])].reset_index(drop=True)\n \n ###is index of preferred components where fist instance in ix1 occurs before ix2? - I think so\n ix1, ix2 = list(dsdf['ix1']), list(dsdf['ix2'])\n prefcomp = [i for i in ix1 if ix1.index(i) < ix2.index(i)] ##this takes a while\n \n ###use pref comp to filter dup array and reflag\n dupflag = np.zeros(len(df)) ##all set to zero\n dupflag[np.unique(ix1)] = 2 ##flags all duplicates\n dupflag[prefcomp] = 1 ##reflags preferred duplicates\n \n df['Duplicate_flag'] = dupflag\n \n ###re-sort to old index and drop column 'index'\n df = df.sort_values(by='index').drop('index', axis=1).reset_index(drop=True)\n \n return(df)\n\ndef find_duplicates(vlad_table,meta,metrics,info_table):\n df = vlad_table.to_pandas().copy()\n position_error = metrics['duplicate_flagging']['duplicate_search']['radius']\n position_error *= eval(f\"u.{metrics['duplicate_flagging']['duplicate_search']['units']}\")\n duplicate_flags = list(yg_find_duplicates(df,pos_err=position_error)['Duplicate_flag'].copy())\n return duplicate_flags\n\ndef yg_q_flag(df, snmin=5, prmax=2, prdist=20):\n ###Q_flag\n ##3 fold:\n ## 1) Tot < Peak (GFIT)\n ## 2) Peak < 5*Isl_rms (SN)\n ## 3) dNN >= 20\" && Peak_to_ring < 2 (PR)\n \n ###combine in to single flag value via binary bit addition\n ## PR = 1; SN = 2; GFIT = 4\n ## weights GFIT highest, then SN, then P2R\n gfit, sn, pr = np.zeros(len(df)), np.zeros(len(df)), np.zeros(len(df))\n \n ###necessary column arrays\n stot = np.array(df['Total_flux'])\n speak = np.array(df['Peak_flux'])\n rms = np.array(df['Isl_rms'])\n dnn = np.array(df['NN_dist'])\n ptr = np.array(df['Peak_to_ring'])\n \n ###flag individual critera\n gfit[(speak > stot)] = 4\n sn[(speak < snmin*rms)] = 2\n pr[(dnn>=prdist) & (ptr= 2:\n stream.write('-' * 70)\n stream.write(\n '\\nSaving Puppy trace to {}\\n'.format(\n self.conf.options.puppy_file))\n\n if self.conf.options.verbosity >= 3:\n stream.write(\n 'Traced functions: {}\\n'.format(\n ' '.join(self.tracer.fndb.keys())))\n\n with open(self.conf.options.puppy_file, 'w') as fd:\n self.tracer.dump(fd)\n\n if self.conf.options.puppy_annotate:\n if self.conf.options.verbosity >= 2:\n stream.write(\n 'Annotating source files. '\n 'Use puppy-deannotate to remove annotations\\n')\n self.puppyparachute.annotate.annotate_all(self.tracer.freeze())\n","repo_name":"naure/PuppyParachute","sub_path":"puppyparachute/puppynose.py","file_name":"puppynose.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"30394383307","text":"\"\"\"\nUtility functions and object abstractions for general interactions\nwith BugTrackers\n\"\"\"\nimport asyncio\nimport itertools\nimport re\nimport urllib.parse\nimport xmlrpc.client\nimport bugzilla\nimport click\nimport os\nimport requests\nfrom requests_gssapi import HTTPSPNEGOAuth\nfrom datetime import datetime, timezone\nfrom time import sleep\nfrom typing import Dict, Iterable, List, Optional\nfrom jira import JIRA, Issue\nfrom errata_tool import Erratum\nfrom errata_tool.jira_issue import JiraIssue as ErrataJira\nfrom errata_tool.bug import Bug as ErrataBug\nfrom bugzilla.bug import Bug\nfrom koji import ClientSession\n\nfrom elliottlib import constants, exceptions, exectools, logutil, errata, util\nfrom elliottlib.cli import cli_opts\nfrom elliottlib.errata_async import AsyncErrataAPI\nfrom elliottlib.metadata import Metadata\nfrom elliottlib.util import isolate_timestamp_in_release, chunk\n\nlogger = logutil.getLogger(__name__)\n\n\n# This is easier to patch in unit tests\ndef datetime_now():\n return datetime.now(timezone.utc)\n\n\ndef get_jira_bz_bug_ids(bug_ids):\n ids = cli_opts.id_convert_str(bug_ids)\n jira_ids = {b for b in ids if JIRABug.looks_like_a_jira_bug(b)}\n bz_ids = {int(b) for b in ids if not JIRABug.looks_like_a_jira_bug(b)}\n return jira_ids, bz_ids\n\n\nclass Bug:\n def __init__(self, bug_obj):\n self.bug = bug_obj\n\n @property\n def id(self):\n raise NotImplementedError\n\n def created_days_ago(self):\n created_date = self.creation_time_parsed()\n return (datetime_now() - created_date).days\n\n def creation_time_parsed(self):\n raise NotImplementedError\n\n @property\n def corresponding_flaw_bug_ids(self):\n raise NotImplementedError\n\n @property\n def whiteboard_component(self):\n raise NotImplementedError\n\n def all_advisory_ids(self):\n raise NotImplementedError\n\n def is_tracker_bug(self):\n raise NotImplementedError\n\n def is_invalid_tracker_bug(self):\n raise NotImplementedError\n\n def is_flaw_bug(self):\n return self.product == \"Security Response\" and self.component == \"vulnerability\"\n\n def is_ocp_bug(self):\n raise NotImplementedError\n\n @property\n def component(self):\n raise NotImplementedError\n\n @property\n def product(self):\n raise NotImplementedError\n\n @staticmethod\n def get_target_release(bugs: List[Bug]) -> str:\n \"\"\"\n Pass in a list of bugs and get their target release version back.\n Raises exception if they have different target release versions set.\n\n :param bugs: List[Bug] instance\n \"\"\"\n invalid_bugs = []\n target_releases = dict()\n\n if not bugs:\n raise ValueError(\"bugs should be a non empty list\")\n\n for bug in bugs:\n # make sure it's a list with a valid str value\n valid_target_rel = isinstance(bug.target_release, list) and len(bug.target_release) > 0 and \\\n re.match(r'(\\d+.\\d+.[0|z])', bug.target_release[0])\n if not valid_target_rel:\n invalid_bugs.append(bug)\n else:\n tr = bug.target_release[0]\n if tr not in target_releases:\n target_releases[tr] = set()\n target_releases[tr].add(bug.id)\n\n if invalid_bugs:\n err = 'target_release should be a list with a string matching regex (digit+.digit+.[0|z])'\n for b in invalid_bugs:\n err += f'\\n bug: {b.id}, target_release: {b.target_release} '\n raise ValueError(err)\n\n if len(target_releases) != 1:\n err = f'Found different target_release values for bugs: {target_releases}. ' \\\n 'There should be only 1 target release for all bugs. Fix the offending bug(s) and try again.'\n raise ValueError(err)\n\n return list(target_releases.keys())[0]\n\n\nclass BugzillaBug(Bug):\n def __getattr__(self, attr):\n if attr in self.__dict__:\n return getattr(self, attr)\n return getattr(self.bug, attr)\n\n def __init__(self, bug_obj):\n super().__init__(bug_obj)\n\n @property\n def id(self):\n return self.bug.id\n\n @property\n def product(self):\n return self.bug.product\n\n @property\n def component(self):\n return self.bug.component\n\n @property\n def target_release(self):\n return self.bug.target_release\n\n @property\n def sub_component(self):\n if hasattr(self.bug, 'sub_component'):\n return self.bug.sub_component\n else:\n return None\n\n @property\n def corresponding_flaw_bug_ids(self):\n return self.bug.blocks\n\n @property\n def whiteboard_component(self):\n \"\"\"Get whiteboard component value of a bug.\n\n An OCP cve tracker has a whiteboard value \"component:\"\n to indicate which component the bug belongs to.\n\n :returns: a string if a value is found, otherwise None\n \"\"\"\n marker = r'component:\\s*(\\S+)'\n tmp = re.search(marker, self.bug.whiteboard)\n if tmp and len(tmp.groups()) == 1:\n component_name = tmp.groups()[0]\n return component_name\n return None\n\n def is_tracker_bug(self):\n has_keywords = set(constants.TRACKER_BUG_KEYWORDS).issubset(set(self.keywords))\n has_whiteboard_component = bool(self.whiteboard_component)\n return has_keywords and has_whiteboard_component\n\n def is_invalid_tracker_bug(self):\n if self.is_tracker_bug():\n return False\n if 'WeaknessTracking' in self.keywords:\n # See e.g. https://bugzilla.redhat.com/show_bug.cgi?id=2092289. This bug is not a CVE tracker\n return False\n has_cve_in_summary = bool(re.search(r'CVE-\\d+-\\d+', self.summary))\n has_keywords = set(constants.TRACKER_BUG_KEYWORDS).issubset(set(self.keywords))\n return has_keywords or has_cve_in_summary\n\n def all_advisory_ids(self):\n return ErrataBug(self.id).all_advisory_ids\n\n def is_ocp_bug(self):\n return self.product == constants.BUGZILLA_PRODUCT_OCP\n\n def creation_time_parsed(self):\n return datetime.strptime(str(self.bug.creation_time), '%Y%m%dT%H:%M:%S').replace(tzinfo=timezone.utc)\n\n\nclass JIRABug(Bug):\n def __init__(self, bug_obj: Issue):\n super().__init__(bug_obj)\n\n @property\n def id(self):\n return self.bug.key\n\n @property\n def weburl(self):\n return self.bug.permalink()\n\n @property\n def component(self):\n component0 = self.bug.fields.components[0].name\n return component0.split('/')[0].strip()\n\n @property\n def status(self):\n return self.bug.fields.status.name\n\n @property\n def security_level(self):\n try:\n return self.bug.fields.security\n except AttributeError:\n return None\n\n def is_tracker_bug(self):\n has_keywords = set(constants.TRACKER_BUG_KEYWORDS).issubset(set(self.keywords))\n has_whiteboard_component = bool(self.whiteboard_component)\n has_linked_flaw = bool(self.corresponding_flaw_bug_ids)\n return has_keywords and has_whiteboard_component and has_linked_flaw\n\n def is_invalid_tracker_bug(self):\n if self.is_tracker_bug():\n return False\n if 'WeaknessTracking' in self.keywords:\n # See e.g. https://issues.redhat.com/browse/OCPBUGS-5804. This is not to be regarded a tracking bug.\n return False\n if 'art:cloned-kernel-bug' in self.keywords:\n # Bugs for advance-shipped kernel builds should not be regarded as a tracker. They might look like one,\n # but they are not invalid.\n # Context in this thread: https://redhat-internal.slack.com/archives/C04SCM5AYE4/p1685524912511489?thread_ts=1685489306.568039&cid=C04SCM5AYE4\n # This is likely not the end state, but at least for the time being.\n return False\n has_cve_in_summary = bool(re.search(r'CVE-\\d+-\\d+', self.summary))\n has_keywords = set(constants.TRACKER_BUG_KEYWORDS).issubset(set(self.keywords))\n has_linked_flaw = bool(self.corresponding_flaw_bug_ids)\n return has_keywords or has_cve_in_summary or has_linked_flaw\n\n @property\n def summary(self):\n return self.bug.fields.summary\n\n @property\n def blocks(self):\n return self._get_blocks()\n\n @property\n def keywords(self):\n return self.bug.fields.labels\n\n @property\n def corresponding_flaw_bug_ids(self):\n flaw_bug_ids = []\n for label in self.bug.fields.labels:\n if str(label).startswith(\"flaw\"):\n match = re.match(r'flaw:bz#(\\d+)', label)\n if match:\n flaw_bug_ids.append(match[1])\n return [int(f) for f in flaw_bug_ids]\n\n @property\n def version(self):\n return [x.name for x in self.bug.fields.versions]\n\n @property\n def blocked_by_bz(self):\n url = getattr(self.bug.fields, JIRABugTracker.FIELD_BLOCKED_BY_BZ)\n if not url:\n return None\n bug_id = re.search(r\"id=(\\d+)\", url)\n if not bug_id:\n return None\n return int(bug_id.groups()[0])\n\n @property\n def target_release(self):\n tr_field = getattr(self.bug.fields, JIRABugTracker.FIELD_TARGET_VERSION)\n if not tr_field:\n raise ValueError(f'bug {self.id} does not have `Target Version` field set')\n return [x.name for x in tr_field]\n\n @property\n def sub_component(self):\n component0 = self.bug.fields.components[0].name\n split = component0.split('/')\n if len(split) < 2:\n return None\n return split[1].strip()\n\n @property\n def resolution(self):\n return str(self.bug.fields.resolution)\n\n @property\n def depends_on(self):\n depends_on = self._get_depends()\n depends_on_bz = self.blocked_by_bz\n if depends_on_bz:\n depends_on.append(depends_on_bz)\n return depends_on\n\n @property\n def release_blocker(self):\n return self._get_release_blocker()\n\n @property\n def severity(self):\n return self._get_severity()\n\n @property\n def product(self):\n return self.bug.fields.project.key\n\n @property\n def alias(self):\n # TODO: See usage. this can be correct or incorrect based in usage.\n return self.bug.fields.labels\n\n @property\n def whiteboard_component(self):\n \"\"\"Get whiteboard component value of a bug.\n\n An OCP cve tracker has a whiteboard value \"component:\"\n to indicate which component the bug belongs to.\n\n :returns: a string if a value is found, otherwise None\n \"\"\"\n marker = r'component:\\s*(\\S+)'\n for label in self.bug.fields.labels:\n tmp = re.search(marker, label)\n if tmp and len(tmp.groups()) == 1:\n component_name = tmp.groups()[0]\n return component_name\n return None\n\n def _get_release_blocker(self):\n # release blocker can be ['None','Approved'=='+','Proposed'=='?','Rejected'=='-']\n field = getattr(self.bug.fields, JIRABugTracker.FIELD_RELEASE_BLOCKER)\n if field:\n return field.value == 'Approved'\n return False\n\n def _get_blocked_reason(self):\n field = getattr(self.bug.fields, JIRABugTracker.FIELD_BLOCKED_REASON)\n if field:\n return field.value\n return None\n\n def _get_severity(self):\n field = getattr(self.bug.fields, JIRABugTracker.FIELD_SEVERITY)\n if field:\n if \"Urgent\" in field.value:\n return \"Urgent\"\n if \"High\" in field.value:\n return \"High\"\n if \"Medium\" in field.value:\n return \"Medium\"\n if \"Low\" in field.value:\n return \"Low\"\n return None\n\n def all_advisory_ids(self):\n return ErrataJira(self.id).all_advisory_ids\n\n def creation_time_parsed(self):\n return datetime.strptime(str(self.bug.fields.created), '%Y-%m-%dT%H:%M:%S.%f%z')\n\n def is_ocp_bug(self):\n return self.bug.fields.project.key == \"OCPBUGS\" and not self.is_placeholder_bug()\n\n def is_placeholder_bug(self):\n return ('Placeholder' in self.summary) and (self.component == 'Release') and ('Automation' in self.keywords)\n\n def _get_blocks(self):\n # link \"blocks\"\n blocks = []\n for link in self.bug.fields.issuelinks:\n if link.type.name == \"Blocks\" and hasattr(link, \"outwardIssue\"):\n blocks.append(link.outwardIssue.key)\n return blocks\n\n def _get_depends(self):\n # link \"is blocked by\"\n depends = []\n for link in self.bug.fields.issuelinks:\n if link.type.name == \"Blocks\" and hasattr(link, \"inwardIssue\"):\n depends.append(link.inwardIssue.key)\n return depends\n\n @staticmethod\n def looks_like_a_jira_bug(bug_id):\n pattern = re.compile(r'\\w+-\\d+')\n return pattern.match(str(bug_id))\n\n\nclass BugTracker:\n def __init__(self, config: dict, tracker_type: str):\n self.config = config\n self._server = self.config.get('server', '')\n self.type = tracker_type\n\n def component_filter(self, filter_name='default') -> List:\n return self.config.get('filters', {}).get(filter_name)\n\n def target_release(self) -> List:\n return self.config.get('target_release')\n\n def search(self, status, search_filter, verbose=False, **kwargs):\n raise NotImplementedError\n\n def blocker_search(self, status, search_filter, verbose=False, **kwargs):\n raise NotImplementedError\n\n def cve_tracker_search(self, status, search_filter, verbose=False, **kwargs):\n raise NotImplementedError\n\n def get_bug(self, bugid, **kwargs):\n raise NotImplementedError\n\n def get_bugs(self, bugids: List, permissive=False, **kwargs):\n raise NotImplementedError\n\n def get_bugs_map(self, bugids: List, permissive: bool = False, **kwargs) -> Dict:\n id_bug_map = {}\n if not bugids:\n return id_bug_map\n bugs = self.get_bugs(bugids, permissive=permissive, **kwargs)\n for bug in bugs:\n id_bug_map[bug.id] = bug\n return id_bug_map\n\n def remove_bugs(self, advisory_obj, bugids: List, noop=False):\n raise NotImplementedError\n\n def attach_bugs(self, bugids: List, advisory_id: int = 0, advisory_obj: Erratum = None, noop=False,\n verbose=False):\n raise NotImplementedError\n\n def add_comment(self, bugid, comment: str, private: bool, noop=False):\n raise NotImplementedError\n\n def create_bug(self, bug_title, bug_description, target_status, keywords: List, noop=False):\n raise NotImplementedError\n\n def _update_bug_status(self, bugid, target_status):\n raise NotImplementedError\n\n @staticmethod\n def advisory_bug_ids(advisory_obj):\n raise NotImplementedError\n\n @staticmethod\n def id_convert(id_string):\n raise NotImplementedError\n\n def create_placeholder(self, kind, noop=False):\n title = f\"Placeholder bug for OCP {self.config.get('target_release')[0]} {kind} release\"\n return self.create_bug(title, title, \"VERIFIED\", [\"Automation\"], noop)\n\n def create_textonly(self, bug_title, bug_description, noop=False):\n return self.create_bug(bug_title, bug_description, \"VERIFIED\", [], noop)\n\n def update_bug_status(self, bug: Bug, target_status: str,\n comment: Optional[str] = None, log_comment: bool = True, noop=False):\n \"\"\" Update bug status and optionally leave a comment\n :return: True if but status has been actually updated\n \"\"\"\n current_status = bug.status\n action = f'changed {bug.id} from {current_status} to {target_status}'\n if current_status == target_status:\n logger.info(f'{bug.id} is already on {target_status}')\n return False\n elif noop:\n logger.info(f\"Would have {action}\")\n else:\n self._update_bug_status(bug.id, target_status)\n logger.info(action)\n\n comment_lines = []\n if log_comment:\n comment_lines.append(f'Elliott changed bug status from {current_status} to {target_status}.')\n if comment:\n comment_lines.append(comment)\n if comment_lines:\n self.add_comment(bug.id, '\\n'.join(comment_lines), private=True, noop=noop)\n return True\n\n @staticmethod\n def get_corresponding_flaw_bugs(tracker_bugs: List[Bug], flaw_bug_tracker, brew_api,\n strict: bool = True, verbose: bool = False) -> (Dict, Dict):\n \"\"\"Get corresponding flaw bug objects for given list of tracker bug objects.\n flaw_bug_tracker object to fetch flaw bugs from\n\n :return: (tracker_flaws, flaw_id_bugs): tracker_flaws is a dict with tracker bug id as key and list of flaw\n bug id as value, flaw_id_bugs is a dict with flaw bug id as key and flaw bug object as value\n \"\"\"\n bug_tracker = flaw_bug_tracker\n flaw_bugs = bug_tracker.get_flaw_bugs(\n list(set(sum([t.corresponding_flaw_bug_ids for t in tracker_bugs], []))),\n verbose=verbose\n )\n flaw_tracker_map = {bug.id: {'bug': bug, 'trackers': []}\n for bug in flaw_bugs}\n\n # Validate that each tracker has a corresponding flaw bug\n # and a whiteboard component\n trackers_with_no_flaws = set()\n trackers_with_invalid_components = set()\n for t in tracker_bugs:\n component = t.whiteboard_component\n if not component:\n trackers_with_invalid_components.add(t.id)\n continue\n\n # is this component a valid package name in brew?\n if not brew_api.getPackageID(component):\n logger.info(f'package `{component}` not found in brew')\n trackers_with_invalid_components.add(t.id)\n continue\n\n flaw_bug_ids = [i for i in t.corresponding_flaw_bug_ids if i in flaw_tracker_map]\n if not len(flaw_bug_ids):\n trackers_with_no_flaws.add(t.id)\n continue\n\n for f_id in flaw_bug_ids:\n flaw_tracker_map[f_id]['trackers'].append(t)\n\n error_msg = ''\n if trackers_with_no_flaws:\n error_msg += 'Cannot find any corresponding flaw bugs for these trackers: ' \\\n f'{sorted(trackers_with_no_flaws)}. '\n\n if trackers_with_invalid_components:\n error_msg += \"These trackers do not have a valid whiteboard component value:\" \\\n f\" {sorted(trackers_with_invalid_components)}.\"\n\n if error_msg:\n if strict:\n raise exceptions.ElliottFatalError(error_msg)\n else:\n logger.warning(error_msg)\n\n invalid_trackers = trackers_with_no_flaws | trackers_with_invalid_components\n tracker_flaws = {\n t.id: [b for b in t.corresponding_flaw_bug_ids if b in flaw_tracker_map]\n for t in tracker_bugs if t.id not in invalid_trackers\n }\n return tracker_flaws, flaw_tracker_map\n\n def get_tracker_bugs(self, bug_ids: List, strict: bool = False, verbose: bool = False):\n raise NotImplementedError\n\n def get_flaw_bugs(self, bug_ids: List, strict: bool = True, verbose: bool = False):\n raise NotImplementedError\n\n\nclass JIRABugTracker(BugTracker):\n JIRA_BUG_BATCH_SIZE = 50\n\n # Prefer to query by user visible Field Name. Context: https://issues.redhat.com/browse/ART-7053\n FIELD_BLOCKED_BY_BZ = 'customfield_12322152' # \"Blocked by Bugzilla Bug\"\n FIELD_TARGET_VERSION = 'customfield_12323140' # \"Target Version\"\n FIELD_RELEASE_BLOCKER = 'customfield_12319743' # \"Release Blocker\"\n FIELD_BLOCKED_REASON = 'customfield_12316544' # \"Blocked Reason\"\n FIELD_SEVERITY = 'customfield_12316142' # \"Severity\"\n\n @staticmethod\n def get_config(runtime) -> Dict:\n major, minor = runtime.get_major_minor()\n if major == 4 and minor < 6:\n raise ValueError(\"ocp-build-data/bug.yml is not expected to be available for 4.X versions < 4.6\")\n bug_config = runtime.gitdata.load_data(key='bug').data\n # construct config so that all jira_config keys become toplevel keys\n jira_config = bug_config.pop('jira_config')\n for key in jira_config:\n if key in bug_config:\n raise ValueError(f\"unexpected: top level config contains same key ({key}) as jira_config\")\n bug_config[key] = jira_config[key]\n return bug_config\n\n def login(self, token_auth=None) -> JIRA:\n if not token_auth:\n token_auth = os.environ.get(\"JIRA_TOKEN\")\n if not token_auth:\n raise ValueError(f\"elliott requires login credentials for {self._server}. Set a JIRA_TOKEN env var \")\n client = JIRA(self._server, token_auth=token_auth)\n return client\n\n def __init__(self, config):\n super().__init__(config, 'jira')\n self._project = self.config.get('project', '')\n self._client: JIRA = self.login()\n\n @property\n def product(self):\n return self._project\n\n def looks_like_a_jira_project_bug(self, bug_id) -> bool:\n pattern = re.compile(fr'{self._project}-\\d+')\n return bool(pattern.match(str(bug_id)))\n\n def get_bug(self, bugid: str, **kwargs) -> JIRABug:\n return JIRABug(self._client.issue(bugid, **kwargs))\n\n def get_bugs(self, bugids: List[str], permissive=False, verbose=False, **kwargs) -> List[JIRABug]:\n invalid_bugs = [b for b in bugids if not self.looks_like_a_jira_project_bug(b)]\n if invalid_bugs:\n logger.warn(f\"Cannot fetch bugs from a different project (current project: {self._project}):\"\n f\" {invalid_bugs}\")\n bugids = [b for b in bugids if self.looks_like_a_jira_project_bug(b)]\n if not bugids:\n return []\n\n # Split the request in chunks, in order not to fall into\n # jira.exceptions.JIRAError for request header size too large\n bugs = []\n for chunk_of_bugs in chunk(list(bugids), self.JIRA_BUG_BATCH_SIZE):\n query = self._query(bugids=chunk_of_bugs, with_target_release=False)\n if verbose:\n logger.info(query)\n bugs.extend(self._search(query))\n\n if len(bugs) < len(bugids):\n bugids_not_found = set(bugids) - {b.id for b in bugs}\n msg = f\"Some bugs could not be fetched ({len(bugids) - len(bugs)}): {bugids_not_found}\"\n if not permissive:\n raise ValueError(msg)\n else:\n logger.warn(msg)\n return bugs\n\n def get_bug_remote_links(self, bug: JIRABug):\n remote_links = self._client.remote_links(bug)\n link_dict = {}\n for link in remote_links:\n if link.__contains__('relationship'):\n link_dict[link.relationship] = link.object.url\n return link_dict\n\n def create_bug(self, bug_title: str, bug_description: str, target_status: str, keywords: List, noop=False) -> \\\n JIRABug:\n fields = {\n 'project': {'key': self._project},\n 'issuetype': {'name': 'Bug'},\n 'components': [{'name': 'Release'}],\n 'versions': [{'name': self.config.get('version')[0]}], # Affects Version/s\n self.FIELD_TARGET_VERSION: [{'name': self.config.get('target_release')[0]}], # Target Version\n 'summary': bug_title,\n 'labels': keywords,\n 'description': bug_description\n }\n if noop:\n logger.info(f\"Would have created JIRA Issue with status={target_status} and fields={fields}\")\n return\n bug = self._client.create_issue(fields=fields)\n self._client.transition_issue(bug, target_status)\n return JIRABug(bug)\n\n def _update_bug_status(self, bugid, target_status):\n return self._client.transition_issue(bugid, target_status)\n\n def add_comment(self, bugid: str, comment: str, private: bool, noop=False):\n if noop:\n logger.info(f\"Would have added a private={private} comment to {bugid}\")\n return\n if private:\n self._client.add_comment(bugid, comment, visibility={'type': 'group', 'value': 'Red Hat Employee'})\n else:\n self._client.add_comment(bugid, comment)\n\n def _query(self, bugids: Optional[List] = None,\n status: Optional[List] = None,\n target_release: Optional[List] = None,\n include_labels: Optional[List] = None,\n exclude_labels: Optional[List] = None,\n with_target_release: bool = True,\n search_filter: str = None,\n custom_query: str = None) -> str:\n\n if target_release and with_target_release:\n raise ValueError(\"cannot use target_release and with_target_release together\")\n if not target_release and with_target_release:\n target_release = self.target_release()\n\n exclude_components = []\n if search_filter:\n exclude_components = self.component_filter(search_filter)\n\n query = f\"project={self._project}\"\n if bugids:\n query += f\" and issue in ({','.join(bugids)})\"\n if status:\n val = ','.join(f'\"{s}\"' for s in status)\n query += f\" and status in ({val})\"\n if target_release:\n tr = ','.join(target_release)\n query += f' and \"Target Version\" in ({tr})'\n if include_labels:\n query += f\" and labels in ({','.join(include_labels)})\"\n if exclude_labels:\n query += f\" and labels not in ({','.join(exclude_labels)})\"\n if exclude_components:\n # https://docs.adaptavist.com/sr4js/6.55.1/features/jql-functions/included-jql-functions/calculations\n val = ','.join(f'componentMatch(\"{c}*\")' for c in exclude_components)\n query += f\" and component not in ({val})\"\n if custom_query:\n query += custom_query\n return query\n\n def _search(self, query, verbose=False) -> List[JIRABug]:\n if verbose:\n logger.info(query)\n results = self._client.search_issues(query, maxResults=0)\n return [JIRABug(j) for j in results]\n\n def blocker_search(self, status, search_filter='default', verbose=False, **kwargs):\n query = self._query(\n status=status,\n with_target_release=True,\n search_filter=search_filter,\n custom_query='and \"Release Blocker\" = \"Approved\"'\n )\n return self._search(query, verbose=verbose, **kwargs)\n\n def search(self, status, search_filter='default', verbose=False):\n query = self._query(\n status=status,\n search_filter=search_filter\n )\n return self._search(query, verbose=verbose)\n\n def cve_tracker_search(self, status, search_filter='default', verbose=False):\n query = self._query(\n status=status,\n search_filter=search_filter,\n include_labels=[\"SecurityTracking\"],\n )\n return self._search(query, verbose=verbose)\n\n def remove_bugs(self, advisory_obj, bugids: List, noop=False):\n if noop:\n print(f\"Would've removed bugs: {bugids}\")\n return\n advisory_obj.removeJIRAIssues(bugids)\n advisory_obj.commit()\n\n def attach_bugs(self, bugids: List, advisory_id: int = 0, advisory_obj: Erratum = None, noop=False,\n verbose=False):\n if not advisory_obj:\n advisory_obj = Erratum(errata_id=advisory_id)\n return errata.add_jira_bugs_with_retry(advisory_obj, bugids, noop=noop)\n\n def filter_bugs_by_cutoff_event(self, bugs: Iterable, desired_statuses: Iterable[str],\n sweep_cutoff_timestamp: float, verbose=False) -> List:\n dt = datetime.utcfromtimestamp(sweep_cutoff_timestamp).strftime(\"%Y/%m/%d %H:%M\")\n val = ','.join(f'\"{s}\"' for s in desired_statuses)\n query = f\"issue in ({','.join([b.id for b in bugs])}) \" \\\n f\"and status was in ({val}) \" \\\n f'before(\"{dt}\")'\n return self._search(query, verbose=verbose)\n\n async def filter_attached_bugs(self, bugs: Iterable):\n bugs = list(bugs)\n api = AsyncErrataAPI()\n results = await asyncio.gather(*[api.get_advisories_for_jira(bug.id, ignore_not_found=True) for bug in bugs])\n attached_bugs = [bug for bug, advisories in zip(bugs, results) if advisories]\n await api.close()\n return attached_bugs\n\n @staticmethod\n def advisory_bug_ids(advisory_obj):\n return advisory_obj.jira_issues\n\n @staticmethod\n def id_convert(id_string):\n return cli_opts.id_convert_str(id_string)\n\n def get_tracker_bugs(self, bug_ids: List, strict: bool = False, verbose: bool = False):\n return [b for b in self.get_bugs(bug_ids, permissive=not strict, verbose=verbose) if b.is_tracker_bug()]\n\n def get_flaw_bugs(self, bug_ids: List, strict: bool = True, verbose: bool = False):\n return [b for b in self.get_bugs(bug_ids, permissive=not strict, verbose=verbose) if b.is_flaw_bug()]\n\n\nclass BugzillaBugTracker(BugTracker):\n @staticmethod\n def get_config(runtime):\n major, minor = runtime.get_major_minor()\n if major == 4 and minor < 5:\n raise ValueError(\"ocp-build-data/bug.yml is not expected to be available for 4.X versions < 4.5\")\n bug_config = runtime.gitdata.load_data(key='bug').data\n # construct config so that all bugzilla_config keys become toplevel keys\n bz_config = bug_config.pop('bugzilla_config')\n for key in bz_config:\n if key in bug_config:\n raise ValueError(f\"unexpected: top level config contains same key ({key}) as bugzilla_config\")\n bug_config[key] = bz_config[key]\n return bug_config\n\n def login(self):\n client = bugzilla.Bugzilla(self._server)\n if not client.logged_in:\n raise ValueError(f\"elliott requires cached login credentials for {self._server}. Login using 'bugzilla \"\n \"login --api-key\")\n return client\n\n def __init__(self, config):\n super().__init__(config, 'bugzilla')\n self._client = self.login()\n self.product = self.config.get('product', '')\n\n def get_bug(self, bugid, **kwargs):\n return BugzillaBug(self._client.getbug(bugid, **kwargs))\n\n def get_bugs(self, bugids, permissive=False, **kwargs):\n if not bugids:\n return []\n if 'verbose' in kwargs:\n if kwargs.pop('verbose'):\n logger.info(f'get_bugs called with bugids: {bugids}, permissive: {permissive} and kwargs: {kwargs}')\n bugs = [BugzillaBug(b) for b in self._client.getbugs(bugids, permissive=permissive, **kwargs)]\n if len(bugs) < len(bugids):\n bugids_not_found = set(bugids) - {b.id for b in bugs}\n msg = f\"Some bugs could not be fetched ({len(bugids)-len(bugs)}): {bugids_not_found}\"\n if permissive:\n print(msg)\n return bugs\n\n def client(self):\n return self._client\n\n def blocker_search(self, status, search_filter='default', verbose=False):\n query = _construct_query_url(self.config, status, search_filter, flag='blocker+')\n return self._search(query, verbose)\n\n def search(self, status, search_filter='default', verbose=False):\n query = _construct_query_url(self.config, status, search_filter)\n return self._search(query, verbose)\n\n def cve_tracker_search(self, status, search_filter='default', verbose=False):\n query = _construct_query_url(self.config, status, search_filter)\n query.addKeyword('SecurityTracking')\n return self._search(query, verbose)\n\n def _search(self, query, verbose=False):\n if verbose:\n logger.info(query)\n return [BugzillaBug(b) for b in _perform_query(self._client, query)]\n\n def remove_bugs(self, advisory_obj, bugids: List, noop=False):\n if noop:\n print(f\"Would've removed bugs: {bugids}\")\n return\n advisory_id = advisory_obj.errata_id\n return errata.remove_multi_bugs(advisory_id, bugids)\n\n def attach_bugs(self, bugids: List, advisory_id: int = 0, advisory_obj: Erratum = None, noop=False, verbose=False):\n if not advisory_obj:\n advisory_obj = Erratum(errata_id=advisory_id)\n return errata.add_bugzilla_bugs_with_retry(advisory_obj, bugids, noop=noop)\n\n def create_bug(self, title, description, target_status, keywords: List, noop=False) -> BugzillaBug:\n create_info = self._client.build_createbug(\n product=self.product,\n version=self.config.get('version')[0],\n target_release=self.config.get('target_release')[0],\n component=\"Release\",\n summary=title,\n keywords=keywords,\n description=description)\n if noop:\n logger.info(f\"Would have created BugzillaBug with status={target_status} and fields={create_info}\")\n return\n new_bug = self._client.createbug(create_info)\n # change state to VERIFIED\n try:\n update = self._client.build_update(status=target_status)\n self._client.update_bugs([new_bug.id], update)\n except Exception as ex: # figure out the actual bugzilla error. it only happens sometimes\n sleep(5)\n self._client.update_bugs([new_bug.id], update)\n print(ex)\n\n return BugzillaBug(new_bug)\n\n def _update_bug_status(self, bugid, target_status):\n if target_status == 'CLOSED':\n return self._client.update_bugs([bugid], self._client.build_update(status=target_status,\n resolution='WONTFIX'))\n return self._client.update_bugs([bugid], self._client.build_update(status=target_status))\n\n def add_comment(self, bugid, comment: str, private, noop=False):\n self._client.update_bugs([bugid], self._client.build_update(comment=comment, comment_private=private))\n\n def filter_bugs_by_cutoff_event(self, bugs: Iterable, desired_statuses: Iterable[str],\n sweep_cutoff_timestamp: float, verbose=False) -> List:\n \"\"\" Given a list of bugs, finds those that have changed to one of the desired statuses before the given timestamp.\n\n According to @jupierce:\n\n Let:\n - Z be a non-closed BZ in a monitored component\n - S2 be the current state (as in the moment we are scanning) of Z\n - S1 be the state of the Z at the moment of the cutoff\n - A be the set of state changes Z after the cutoff\n - F be the sweep states (MODIFIED, ON_QA, VERIFIED)\n\n Then Z is swept in if all the following are true:\n - S1 ∈ F\n - S2 ∈ F\n - A | ∄v : v <= S1\n\n In prose: if a BZ seems to qualify for a sweep currently and at the cutoff event, then all state changes after the cutoff event must be to a greater than the state which qualified the BZ at the cutoff event.\n\n :param bugs: a list of bugs\n :param desired_statuses: desired bug statuses\n :param sweep_cutoff_timestamp: a unix timestamp\n :return: a list of found bugs\n \"\"\"\n qualified_bugs = []\n desired_statuses = set(desired_statuses)\n\n # Filters out bugs that are created after the sweep cutoff timestamp\n before_cutoff_bugs = [bug for bug in bugs if to_timestamp(bug.creation_time) <= sweep_cutoff_timestamp]\n if len(before_cutoff_bugs) < len(bugs):\n logger.info(\n f\"{len(bugs) - len(before_cutoff_bugs)} of {len(bugs)} bugs are ignored because they were created after the sweep cutoff timestamp {sweep_cutoff_timestamp} ({datetime.utcfromtimestamp(sweep_cutoff_timestamp)})\")\n\n # Queries bug history\n bugs_history = self._client.bugs_history_raw([bug.id for bug in before_cutoff_bugs])\n\n class BugStatusChange:\n def __init__(self, timestamp: int, old: str, new: str) -> None:\n self.timestamp = timestamp # when this change is made?\n self.old = old # old status\n self.new = new # new status\n\n @classmethod\n def from_history_ent(cls, history):\n \"\"\" Converts from bug history dict returned from Bugzilla to BugStatusChange object.\n The history dict returned from Bugzilla includes bug changes on all fields, but we are only interested in the \"status\" field change.\n :return: BugStatusChange object, or None if the history doesn't include a \"status\" field change.\n \"\"\"\n status_change = next(filter(lambda change: change[\"field_name\"] == \"status\", history[\"changes\"]), None)\n if not status_change:\n return None\n return cls(to_timestamp(history[\"when\"]), status_change[\"removed\"], status_change[\"added\"])\n\n for bug, bug_history in zip(before_cutoff_bugs, bugs_history[\"bugs\"]):\n assert bug.id == bug_history[\n \"id\"] # `bugs_history[\"bugs\"]` returned from Bugzilla API should have the same order as `before_cutoff_bugs`, but be safe\n\n # We are only interested in \"status\" field changes\n status_changes = filter(None, map(BugStatusChange.from_history_ent, bug_history[\"history\"]))\n\n # status changes after the cutoff event\n after_cutoff_status_changes = list(\n itertools.dropwhile(lambda change: change.timestamp <= sweep_cutoff_timestamp, status_changes))\n\n # determines the status of the bug at the moment of the sweep cutoff event\n if not after_cutoff_status_changes:\n sweep_cutoff_status = bug.status # no status change after the cutoff event; use current status\n else:\n sweep_cutoff_status = after_cutoff_status_changes[\n 0].old # sweep_cutoff_status should be the old status of the first status change after the sweep cutoff event\n\n if sweep_cutoff_status not in desired_statuses:\n logger.info(\n f\"BZ {bug.id} is ignored because its status was {sweep_cutoff_status} at the moment of sweep cutoff ({datetime.utcfromtimestamp(sweep_cutoff_timestamp)})\")\n continue\n\n # Per @Justin Pierce: If a BZ seems to qualify for a sweep currently and at the sweep cutoff event, then all state changes after the sweep cutoff event must be to a greater than the state which qualified the BZ at the sweep cutoff event.\n regressed_changes = [change.new for change in after_cutoff_status_changes if\n constants.VALID_BUG_STATES.index(change.new) <= constants.VALID_BUG_STATES.index(\n sweep_cutoff_status)]\n if regressed_changes:\n logger.warning(\n f\"BZ {bug.id} is ignored because its status was {sweep_cutoff_status} at the moment of sweep cutoff ({datetime.utcfromtimestamp(sweep_cutoff_timestamp)})\"\n f\", however its status changed back to {regressed_changes} afterwards\")\n continue\n\n qualified_bugs.append(bug)\n\n return qualified_bugs\n\n async def filter_attached_bugs(self, bugs: Iterable):\n bugs = list(bugs)\n api = AsyncErrataAPI()\n results = await asyncio.gather(*[api.get_advisories_for_bug(bug.id) for bug in bugs])\n attached_bugs = [bug for bug, advisories in zip(bugs, results) if advisories]\n await api.close()\n return attached_bugs\n\n @staticmethod\n def advisory_bug_ids(advisory_obj):\n return advisory_obj.errata_bugs\n\n @staticmethod\n def id_convert(id_string):\n return cli_opts.id_convert(id_string)\n\n def get_tracker_bugs(self, bug_ids: List, strict: bool = False, verbose: bool = False):\n fields = [\"target_release\", \"blocks\", 'whiteboard', 'keywords']\n return [b for b in self.get_bugs(bug_ids, permissive=not strict, include_fields=fields, verbose=verbose) if\n b.is_tracker_bug()]\n\n def get_flaw_bugs(self, bug_ids: List, strict: bool = True, verbose: bool = False):\n fields = [\"product\", \"component\", \"depends_on\", \"alias\", \"severity\", \"summary\"]\n return [b for b in self.get_bugs(bug_ids, permissive=not strict, include_fields=fields, verbose=verbose) if\n b.is_flaw_bug()]\n\n\ndef get_highest_impact(trackers, tracker_flaws_map):\n \"\"\"Get the highest impact of security bugs\n\n :param trackers: The list of tracking bugs you want to compare to get the highest severity\n :param tracker_flaws_map: A dict with tracking bug IDs as keys and lists of flaw bugs as values\n :return: The highest impact of the bugs\n \"\"\"\n severity_index = 0 # \"unspecified\" severity\n for tracker in trackers:\n tracker_severity = constants.BUG_SEVERITY_NUMBER_MAP[tracker.severity.lower()]\n if tracker_severity == 0:\n # When severity isn't set on the tracker, check the severity of the flaw bugs\n # https://jira.coreos.com/browse/ART-1192\n flaws = tracker_flaws_map[tracker.id]\n for flaw in flaws:\n flaw_severity = constants.BUG_SEVERITY_NUMBER_MAP[flaw.severity.lower()]\n if flaw_severity > tracker_severity:\n tracker_severity = flaw_severity\n if tracker_severity > severity_index:\n severity_index = tracker_severity\n if severity_index == 0:\n # When severity isn't set on all tracking and flaw bugs, default to \"Low\"\n # https://jira.coreos.com/browse/ART-1192\n logger.warning(\"CVE impact couldn't be determined for tracking bug(s); defaulting to Low.\")\n return constants.SECURITY_IMPACT[severity_index]\n\n\ndef is_viable_bug(bug_obj):\n \"\"\" Check if a bug is viable to attach to an advisory.\n\n A viable bug must be in one of MODIFIED and VERIFIED status. We accept ON_QA\n bugs as viable as well, as they will be shortly moved to MODIFIED while attaching.\n\n :param bug_obj: bug object\n :returns: True if viable\n \"\"\"\n return bug_obj.status in [\"MODIFIED\", \"ON_QA\", \"VERIFIED\"]\n\n\ndef _construct_query_url(config, status, search_filter='default', flag=None):\n query_url = SearchURL(config)\n query_url.fields = ['id', 'status', 'summary', 'creation_time', 'cf_pm_score', 'component',\n # the api expects \"sub_components\" for the field \"sub_component\"\n # https://github.com/python-bugzilla/python-bugzilla/blob/main/bugzilla/base.py#L321\n 'sub_components',\n 'external_bugs', 'whiteboard', 'keywords', 'target_release', 'depends_on']\n\n filter_list = []\n if config.get('filter'):\n filter_list = config.get('filter')\n elif config.get('filters'):\n filter_list = config.get('filters').get(search_filter)\n\n for f in filter_list:\n query_url.addFilter('component', 'notequals', f)\n\n # CVEs for this image get filed into component that we need to look at. As this is about a\n # deprecated system and fixing config is not an option, hard code this exclusion:\n query_url.addFilter('status_whiteboard', 'notsubstring', 'component:assisted-installer-container')\n\n for s in status:\n query_url.addBugStatus(s)\n\n for r in config.get('target_release', []):\n query_url.addTargetRelease(r)\n\n if flag:\n query_url.addFlagFilter(flag, \"substring\")\n\n return query_url\n\n\ndef _perform_query(bzapi, query_url):\n BZ_PAGE_SIZE = 1000\n\n def iterate_query(query):\n results = bzapi.query(query)\n\n if len(results) == BZ_PAGE_SIZE:\n query['offset'] += BZ_PAGE_SIZE\n results += iterate_query(query)\n return results\n\n include_fields = query_url.fields\n if not include_fields:\n include_fields = ['id']\n\n query = bzapi.url_to_query(str(query_url))\n query[\"include_fields\"] = include_fields\n query[\"limit\"] = BZ_PAGE_SIZE\n query[\"offset\"] = 0\n\n return iterate_query(query)\n\n\nclass SearchFilter(object):\n \"\"\"\n This represents a query filter. Each filter consists of three components:\n\n * field selector string\n * operator\n * field value\n \"\"\"\n\n pattern = \"&f{0}={1}&o{0}={2}&v{0}={3}\"\n\n def __init__(self, field, operator, value):\n self.field = field\n self.operator = operator\n self.value = value\n\n def tostring(self, number):\n return SearchFilter.pattern.format(\n number, self.field, self.operator, urllib.parse.quote(self.value)\n )\n\n\nclass SearchURL(object):\n\n url_format = \"https://{}/buglist.cgi?\"\n\n def __init__(self, config):\n self.bz_host = config.get('server', '')\n self.classification = config.get('classification', '')\n self.product = config.get('product', '')\n self.bug_status = []\n self.filters = []\n self.filter_operator = \"\"\n self.versions = []\n self.target_releases = []\n self.keyword = \"\"\n self.keywords_type = \"\"\n self.fields = []\n\n def __str__(self):\n root_string = SearchURL.url_format.format(self.bz_host)\n\n url = root_string + self._status_string()\n\n url += \"&classification={}\".format(urllib.parse.quote(self.classification))\n url += \"&product={}\".format(urllib.parse.quote(self.product))\n url += self._keywords_string()\n url += self.filter_operator\n url += self._filter_string()\n url += self._target_releases_string()\n url += self._version_string()\n\n return url\n\n def _status_string(self):\n return \"&\".join([\"bug_status={}\".format(i) for i in self.bug_status])\n\n def _version_string(self):\n return \"\".join([\"&version={}\".format(i) for i in self.versions])\n\n def _filter_string(self):\n return \"\".join([f.tostring(i) for i, f in enumerate(self.filters)])\n\n def _target_releases_string(self):\n return \"\".join([\"&target_release={}\".format(tr) for tr in self.target_releases])\n\n def _keywords_string(self):\n return \"&keywords={}&keywords_type={}\".format(self.keyword, self.keywords_type)\n\n def addFilter(self, field, operator, value):\n self.filters.append(SearchFilter(field, operator, value))\n\n def addFlagFilter(self, flag, operator):\n self.filters.append(SearchFilter(\"flagtypes.name\", operator, flag))\n\n def addTargetRelease(self, release_string):\n self.target_releases.append(release_string)\n\n def addVersion(self, version):\n self.versions.append(version)\n\n def addBugStatus(self, status):\n self.bug_status.append(status)\n\n def addKeyword(self, keyword, keyword_type=\"anywords\"):\n self.keyword = keyword\n self.keywords_type = keyword_type\n\n\ndef to_timestamp(dt: xmlrpc.client.DateTime):\n \"\"\" Converts xmlrpc.client.DateTime to timestamp \"\"\"\n return datetime.strptime(dt.value, \"%Y%m%dT%H:%M:%S\").replace(tzinfo=timezone.utc).timestamp()\n\n\nasync def approximate_cutoff_timestamp(basis_event: int, koji_api: ClientSession, metas: Iterable[Metadata]) -> float:\n \"\"\" Calculate an approximate sweep cutoff timestamp from the given basis event\n \"\"\"\n basis_timestamp = koji_api.getEvent(basis_event)[\"ts\"]\n builds: List[Dict] = await asyncio.gather(*[exectools.to_thread(meta.get_latest_build, default=None, complete_before_event=basis_event, honor_is=False) for meta in metas])\n nvrs = [b[\"nvr\"] for b in builds if b]\n rebase_timestamp_strings = filter(None, [isolate_timestamp_in_release(nvr) for nvr in nvrs]) # the timestamp in the release field of NVR is the approximate rebase time\n # convert to UNIX timestamps\n rebase_timestamps = [datetime.strptime(ts, \"%Y%m%d%H%M%S\").replace(tzinfo=timezone.utc).timestamp()\n for ts in rebase_timestamp_strings]\n return min(basis_timestamp, max(rebase_timestamps, default=basis_timestamp))\n\n\ndef get_highest_security_impact(bugs):\n security_impacts = set(bug.severity.lower() for bug in bugs)\n if 'urgent' in security_impacts:\n return 'Critical'\n if 'high' in security_impacts:\n return 'Important'\n if 'medium' in security_impacts:\n return 'Moderate'\n return 'Low'\n\n\ndef sort_cve_bugs(bugs):\n def cve_sort_key(bug):\n impact = constants.security_impact_map[get_highest_security_impact([bug])]\n year, num = bug.alias[0].split(\"-\")[1:]\n return impact, -int(year), -int(num)\n return sorted(bugs, key=cve_sort_key, reverse=True)\n\n\ndef is_first_fix_any(flaw_bug: BugzillaBug, tracker_bugs: Iterable[Bug], current_target_release: str):\n # all z stream bugs are considered first fix\n if current_target_release[-1] != '0':\n return True\n\n if not tracker_bugs:\n # This shouldn't happen\n raise ValueError(f'flaw bug {flaw_bug.id} does not seem to have trackers')\n\n if not (hasattr(flaw_bug, 'alias') and flaw_bug.alias):\n raise ValueError(f'flaw bug {flaw_bug.id} does not have an alias')\n\n alias = flaw_bug.alias[0]\n cve_url = f\"https://access.redhat.com/hydra/rest/securitydata/cve/{alias}.json\"\n response = requests.get(cve_url)\n response.raise_for_status()\n data = response.json()\n\n major, minor = util.minor_version_tuple(current_target_release)\n ocp_product_name = f\"Red Hat OpenShift Container Platform {major}\"\n components_not_yet_fixed = []\n pyxis_base_url = \"https://pyxis.engineering.redhat.com/v1/repositories/registry/registry.access.redhat.com\" \\\n \"/repository/{pkg_name}/images?page_size=1&include=data.brew\"\n\n if 'package_state' not in data:\n logger.info(f'{flaw_bug.id} ({alias}) not considered a first-fix because no unfixed components were found')\n return False\n\n for package_info in data['package_state']:\n # previously we were also checking `package_info['fix_state'] in ['Affected', 'Under investigation']`\n # but we don't need to verify that since according to @sfowler if a package has a tracker for a cve\n # and was found in the list of unfixed components then it is assumed to be `Affected`\n if ocp_product_name in package_info['product_name']:\n pkg_name = package_info['package_name']\n # for images `package_name` field is usually the container delivery repo\n # otherwise we assume it's the exact brew package name\n if '/' in pkg_name:\n pyxis_url = pyxis_base_url.format(pkg_name=pkg_name)\n response = requests.get(pyxis_url, auth=HTTPSPNEGOAuth())\n if response.status_code == requests.codes.ok:\n data = response.json()['data']\n if data:\n pkg_name = data[0]['brew']['package']\n else:\n logger.warn(f'could not find brew package info at {pyxis_url}')\n else:\n logger.warn(f'got status={response.status_code} for {pyxis_url}')\n components_not_yet_fixed.append(pkg_name)\n\n # get tracker components\n first_fix_components = []\n for t in tracker_bugs:\n component = t.whiteboard_component\n if component in components_not_yet_fixed:\n first_fix_components.append((component, t.id))\n\n if first_fix_components:\n logger.info(f'{flaw_bug.id} ({alias}) considered first-fix for these (component, tracker):'\n f' {first_fix_components}')\n return True\n\n logger.info(f'{flaw_bug.id} ({alias}) not considered a first-fix because newly fixed trackers '\n f'components {[t.whiteboard_component for t in tracker_bugs]}, were not found in unfixed components '\n f'{components_not_yet_fixed}')\n return False\n","repo_name":"openshift-eng/elliott","sub_path":"elliottlib/bzutil.py","file_name":"bzutil.py","file_ext":"py","file_size_in_byte":52373,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"35"} +{"seq_id":"12927944394","text":"# Tutorial 4: Quit Button\n\nimport sys\nfrom PySide import QtGui\nfrom PySide import QtCore\n\n\nclass Example(QtGui.QWidget):\n\n def __init__(self):\n # boring, nothing new:\n super(Example, self).__init__()\n self.init_ui()\n\n def init_ui(self):\n # we've seen all this before:\n quitBtn = QtGui.QPushButton('Quit', self)\n # gui_widget.type_of_action.connect = accepts a fn as argument\n # which dictates behavior on that action?\n # \n # instance() returns an object representing the current \n # QApplication? That we then get the quit method of, since we\n # want the application to quit once the quit button is clicked.\n quitBtn.clicked.connect(QtCore.QCoreApplication.instance().quit)\n # ho hum:\n quitBtn.setToolTip('This QPushButton quits the application.')\n quitBtn.resize(quitBtn.sizeHint())\n\n self.setGeometry(0, 0, 250, 300)\n self.show()\n\n\ndef main():\n app = QtGui.QApplication(sys.argv)\n ex = Example()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"EFulmer/python_doodles","sub_path":"pyside_tut/ex_4.py","file_name":"ex_4.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"5545675179","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\nclass LocalModel(nn.Module):\n def __init__(self, nums_word, word_dim, max_desc_len, max_neigh_len):\n super(LocalModel, self).__init__()\n self.attention = nn.Sequential(\n nn.Linear(word_dim*2, word_dim, bias=False),\n nn.ReLU(),\n nn.Linear(word_dim, word_dim, bias=False),\n nn.ReLU(),\n nn.Linear(word_dim, 1, bias=False),\n nn.Sigmoid(),\n )\n self.col_norm = nn.Softmax(dim=2)\n self.col_pool = nn.MaxPool2d((1, max_neigh_len))\n self.row_norm = nn.Softmax(dim=1)\n self.row_pool = nn.MaxPool2d((max_neigh_len, 1))\n self.word_embddings = nn.Embedding(nums_word+1, word_dim)\n self._init_weight()\n \n def _init_weight(self):\n # init embedding\n nn.init.xavier_uniform_(self.word_embddings.weight, gain=nn.init.calculate_gain('relu'))\n # init attention\n for layer in self.attention:\n if isinstance(layer,nn.Linear):\n nn.init.xavier_uniform_(layer.weight, gain=nn.init.calculate_gain('relu'))\n \n \n def forward(self, params, u_matrix, i_matrix, w_emb):\n \"\"\"\n \n\n Args:\n u_matrix ([type]): [batch, max_neigh_len, max_desc_len]\n i_matrix ([type]): [batch, max_neigh_len, max_desc_len]\n\n Returns:\n [type]: [description]\n \"\"\"\n # 学习每个item的description\n shape = u_matrix.shape\n # # shape: [batch*max_neigh_len, max_desc_len, word_dim]\n u_matrix = w_emb(u_matrix.view(shape[0]*shape[1], -1)).view(shape[0], shape[1], -1)\n # # shape: [batch*max_neigh_len, max_desc_len, word_dim]\n i_matrix = w_emb(i_matrix.view(shape[0]*shape[1], -1)).view(shape[0], shape[1], -1)\n \n # co-attention\n u_matrix_repeat = torch.repeat_interleave(u_matrix, i_matrix.shape[1], axis=1)\n i_matrix_repeat = i_matrix.repeat(1, u_matrix.shape[1], 1)\n # shape: [batch, max_neigh_len*max_neigh_len, filter_num*2]\n h = torch.cat([u_matrix_repeat, i_matrix_repeat], -1)\n weights = self.attention(h).view(u_matrix.shape[0], u_matrix.shape[1], -1)\n u_weight = self.col_pool(weights).view(u_matrix.shape[0], -1, 1)\n i_weight = self.row_pool(weights).view(i_matrix.shape[0], -1, 1)\n u_features = torch.mean(u_matrix*u_weight, axis=1).view(u_matrix.shape[0], -1)\n v_features = torch.mean(i_weight*i_matrix, axis=1).view(i_matrix.shape[0], -1)\n return u_features, v_features\n \n\n\n","repo_name":"scwu1008/KADM","sub_path":"models/LocalModel.py","file_name":"LocalModel.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"4197484870","text":"\"\"\"open_discussions utilities\"\"\"\nimport datetime\nimport os\nfrom enum import auto, Flag\nfrom itertools import islice\nimport logging\n\nimport pytz\nfrom django.conf import settings\n\nfrom bs4 import BeautifulSoup\nimport markdown2\n\n\nlog = logging.getLogger(__name__)\n\n# This is the Django ImageField max path size\nIMAGE_PATH_MAX_LENGTH = 100\n\n\nclass FeatureFlag(Flag):\n \"\"\"\n FeatureFlag enum\n\n Members should have values of increasing powers of 2 (1, 2, 4, 8, ...)\n\n \"\"\"\n\n EXAMPLE_FEATURE = auto()\n\n\ndef is_near_now(time):\n \"\"\"\n Returns true if time is within five seconds or so of now\n Args:\n time (datetime.datetime):\n The time to test\n Returns:\n bool:\n True if near now, false otherwise\n \"\"\"\n now = datetime.datetime.now(tz=pytz.UTC)\n five_seconds = datetime.timedelta(0, 5)\n return now - five_seconds < time < now + five_seconds\n\n\ndef now_in_utc():\n \"\"\"\n Get the current time in UTC\n Returns:\n datetime.datetime: A datetime object for the current time\n \"\"\"\n return datetime.datetime.now(tz=pytz.UTC)\n\n\ndef normalize_to_start_of_day(dt):\n \"\"\"\n Normalizes a datetime value to the start of it's day\n\n Args:\n dt (datetime.datetime): the datetime to normalize\n\n Returns:\n datetime.datetime: the normalized datetime\n \"\"\"\n return dt.replace(hour=0, minute=0, second=0, microsecond=0)\n\n\ndef chunks(iterable, *, chunk_size=20):\n \"\"\"\n Yields chunks of an iterable as sub lists each of max size chunk_size.\n\n Args:\n iterable (iterable): iterable of elements to chunk\n chunk_size (int): Max size of each sublist\n\n Yields:\n list: List containing a slice of list_to_chunk\n \"\"\"\n chunk_size = max(1, chunk_size)\n iterable = iter(iterable)\n chunk = list(islice(iterable, chunk_size))\n\n while len(chunk) > 0:\n yield chunk\n chunk = list(islice(iterable, chunk_size))\n\n\ndef merge_strings(list_or_str):\n \"\"\"\n Recursively go through through nested lists of strings and merge into a flattened list.\n\n Args:\n list_or_str (any): A list of strings or a string\n\n Returns:\n list of str: A list of strings\n \"\"\"\n\n list_to_return = []\n _merge_strings(list_or_str, list_to_return)\n return list_to_return\n\n\ndef _merge_strings(list_or_str, list_to_return):\n \"\"\"\n Recursively go through through nested lists of strings and merge into a flattened list.\n\n Args:\n list_or_str (any): A list of strings or a string\n list_to_return (list of str): The list the strings will be added to\n \"\"\"\n if isinstance(list_or_str, list):\n for item in list_or_str:\n _merge_strings(item, list_to_return)\n elif list_or_str is not None:\n list_to_return.append(list_or_str)\n\n\ndef filter_dict_keys(orig_dict, keys_to_keep, *, optional=False):\n \"\"\"\n Returns a copy of a dictionary filtered by a collection of keys to keep\n\n Args:\n orig_dict (dict): A dictionary\n keys_to_keep (iterable): Keys to filter on\n optional (bool): If True, ignore keys that don't exist in the dict. If False, raise a KeyError.\n \"\"\"\n return {\n key: orig_dict[key] for key in keys_to_keep if not optional or key in orig_dict\n }\n\n\ndef filter_dict_with_renamed_keys(orig_dict, key_rename_dict, *, optional=False):\n \"\"\"\n Returns a copy of a dictionary with keys renamed according to a provided dictionary\n\n Args:\n orig_dict (dict): A dictionary\n key_rename_dict (dict): Mapping of old key to new key\n optional (bool): If True, ignore keys that don't exist in the dict. If False, raise a KeyError.\n \"\"\"\n return {\n new_key: orig_dict[key]\n for key, new_key in key_rename_dict.items()\n if not optional or key in orig_dict\n }\n\n\ndef html_to_plain_text(html_str):\n \"\"\"\n Takes an HTML string and returns text with HTML tags removed and line breaks replaced with spaces\n\n Args:\n html_str (str): A string containing HTML tags\n\n Returns:\n str: Plain text\n \"\"\"\n soup = BeautifulSoup(html_str, features=\"html.parser\")\n return soup.get_text().replace(\"\\n\", \" \")\n\n\ndef markdown_to_plain_text(markdown_str):\n \"\"\"\n Takes a string and returns text with Markdown elements removed and line breaks\n replaced with spaces\n\n Args:\n markdown_str (str): A string containing Markdown\n\n Returns:\n str: Plain text\n \"\"\"\n html_str = markdown2.markdown(markdown_str)\n return html_to_plain_text(html_str).strip()\n\n\ndef prefetched_iterator(query, chunk_size=2000):\n \"\"\"\n This is a prefetch_related-safe version of what iterator() should do.\n It will sort and batch on the default django primary key\n\n Args:\n query (QuerySet): the django queryset to iterate\n chunk_size (int): the size of each chunk to fetch\n\n \"\"\"\n # walk the records in ascending id order\n base_query = query.order_by(\"id\")\n\n def _next(greater_than_id):\n \"\"\"Returns the next batch\"\"\"\n return base_query.filter(id__gt=greater_than_id)[:chunk_size]\n\n batch = _next(0)\n\n while batch:\n item = None\n # evaluate each batch query here\n for item in batch:\n yield item\n\n # next batch starts after the last item.id\n batch = _next(item.id) if item is not None else None\n\n\ndef generate_filepath(filename, directory_name, suffix, prefix):\n \"\"\"\n Generate and return the filepath for an uploaded image\n\n Args:\n filename(str): The name of the image file\n directory_name (str): A directory name\n suffix(str): 'small', 'medium', or ''\n prefix (str): A directory name to use as a prefix\n\n Returns:\n str: The filepath for the uploaded image.\n \"\"\"\n name, ext = os.path.splitext(filename)\n timestamp = now_in_utc().replace(microsecond=0)\n path_format = \"{prefix}/{directory_name}/{name}-{timestamp}{suffix}{ext}\"\n\n path_without_name = path_format.format(\n timestamp=timestamp.strftime(\"%Y-%m-%dT%H%M%S\"),\n prefix=prefix,\n directory_name=directory_name,\n suffix=suffix,\n ext=ext,\n name=\"\",\n )\n if len(path_without_name) >= IMAGE_PATH_MAX_LENGTH:\n raise ValueError(\n \"path is longer than max length even without name: {}\".format(\n path_without_name\n )\n )\n\n max_name_length = IMAGE_PATH_MAX_LENGTH - len(path_without_name)\n full_path = path_format.format(\n name=name[:max_name_length],\n timestamp=timestamp.strftime(\"%Y-%m-%dT%H%M%S\"),\n prefix=prefix,\n directory_name=directory_name,\n suffix=suffix,\n ext=ext,\n )\n\n return full_path\n\n\ndef extract_values(obj, key):\n \"\"\"\n Pull all values of specified key from nested JSON.\n\n Args:\n obj(dict): The JSON object\n key(str): The JSON key to search for and extract\n\n Returns:\n list of matching key values\n\n \"\"\"\n array = []\n\n def extract(obj, array, key):\n \"\"\"Recursively search for values of key in JSON tree.\"\"\"\n if isinstance(obj, dict):\n for k, v in obj.items():\n if k == key:\n array.append(v)\n if isinstance(v, (dict, list)):\n extract(v, array, key)\n elif isinstance(obj, list):\n for item in obj:\n extract(item, array, key)\n return array\n\n results = extract(obj, array, key)\n return results\n\n\ndef write_to_file(filename, contents):\n \"\"\"\n Write content to a file in binary mode, creating directories if necessary\n\n Args:\n filename (str): The full-path filename to write to.\n contents (bytes): What to write to the file.\n\n \"\"\"\n if not os.path.exists(os.path.dirname(filename)):\n os.makedirs(os.path.dirname(filename))\n if os.path.exists(filename):\n with open(filename, \"rb\") as infile:\n if infile.read() == contents:\n return\n with open(filename, \"wb\") as infile:\n infile.write(contents)\n\n\ndef write_x509_files():\n \"\"\"Write the x509 certificate and key to files\"\"\"\n write_to_file(settings.MIT_WS_CERTIFICATE_FILE, settings.MIT_WS_CERTIFICATE)\n write_to_file(settings.MIT_WS_PRIVATE_KEY_FILE, settings.MIT_WS_PRIVATE_KEY)\n\n\ndef get_field_names(model):\n \"\"\"\n Get field names which aren't autogenerated\n\n Args:\n model (class extending django.db.models.Model): A Django model class\n Returns:\n list of str:\n A list of field names\n \"\"\"\n return [\n field.name\n for field in model._meta.get_fields()\n if not field.auto_created # pylint: disable=protected-access\n ]\n","repo_name":"mitodl/open-discussions","sub_path":"open_discussions/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8740,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"35"} +{"seq_id":"19765633907","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing.image import img_to_array, load_img\n\n\n\ndef visualize_convolutions(model, img_prep):\n\n successive_outputs = [layer.output for layer in model.layers[1:]]\n visualization_model = tf.keras.models.Model(inputs = model.input, outputs = successive_outputs)\n layer_names = [layer.name for layer in model.layers[1:]]\n\n successive_feature_maps = visualization_model.predict(img_prep)\n\n for layer_name, feature_map in zip(layer_names, successive_feature_maps):\n if len(feature_map.shape) == 4:\n \n n_features = feature_map.shape[-1] \n \n size = feature_map.shape[1]\n \n display_grid = np.zeros((size, size * n_features))\n for i in range(n_features):\n \n x = feature_map[0, :, :, i]\n x -= x.mean()\n x /= x.std()\n x *= 64\n x += 128\n x = np.clip(x, 0, 255).astype('uint8')\n \n display_grid[:, i * size : (i + 1) * size] = x\n \n scale = 20. / n_features\n plt.figure(figsize=(scale * n_features, scale))\n plt.title(layer_name)\n plt.grid(False)\n plt.imshow(display_grid, aspect='auto', cmap='viridis')","repo_name":"abhi8893/tfutils","sub_path":"tfutils/image/visualize_conv.py","file_name":"visualize_conv.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"20999666028","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n#matplotlib 한글 폰트 오류해결\nfrom IPython.core.pylabtools import figsize\nfrom matplotlib import font_manager,rc\nfont_path = \"D:/5674-833_4th/part4/malgun.ttf\"\nfont_name = font_manager.FontProperties(fname= font_path).get_name()\nrc('font',family = font_name)\n\nplt.style.use('ggplot')\nplt.rcParams['axes.unicode_minus'] = False #마이너스 깨짐 방지\n\n#Excel 데이터를 데이터 프레임으로 변환\ndf = pd.read_excel(\"D:/5674-833_4th/part4/남북한발전전력량.xlsx\",engine = 'openpyxl',convert_float= True)\n\n\ndf = df.loc[5:9]\n\ndf.drop('전력량 (억㎾h)',axis = 'columns',inplace = True)\ndf.set_index('발전 전력별',inplace = True)\n\ndf = df.T\nprint(df)\nprint('\\n')\n\n#증강률(변동률 계산)\n\ndf = df.rename(columns={'합계':'총발전량'})\nprint(df)\ndf['총발전량 - 1년'] = df['총발전량'].shift(1)\ndf['증감률'] = ((df['총발전량']/df['총발전량 - 1년'])-1) * 100\n\n# 2축 그래프 그리기\nax1 = df[['수력','화력']].plot(figsize= (20,10),kind = 'bar' ,width = 0.7, stacked = True)\nax2 = ax1.twinx()\nax2.plot(df.index,df.증감률,ls = '--',marker = 'o',markersize =20, #ls = '--' 는 점선으로 그려라\n color ='red',label = '전년대비 증감률(%)')\n\nax1.set_ylim(0,500)\nax2.set_ylim(-50,50)\n\nax1.set_xlabel('연도',size =20)\nax1.set_ylabel('발전량 (억 KWh)')\nax2.set_ylabel('전년 대비 증감률(%)')\nplt.title('북한 전력 발전량(1990~2016)',size =30)\nax1.legend(loc = 'upper left')\n\nplt.show()\n\n","repo_name":"tls1403/PythonTest","sub_path":"part4/matplotlib/northkoreaelectricpower.py","file_name":"northkoreaelectricpower.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"33161594467","text":"import json\nimport asyncio\nimport unittest\n\nimport pytest\nfrom aiohttp import ClientSession\n\nimport aiohttpretty\n\n\ndef async_test(f):\n def wrapper(*args, **kwargs):\n coro = asyncio.coroutine(f)\n future = coro(*args, **kwargs)\n loop = asyncio.get_event_loop()\n loop.run_until_complete(future)\n return wrapper\n\n\nclass DummyAsyncStream(asyncio.StreamReader):\n\n def __init__(self, data):\n super().__init__()\n self.size = len(data)\n self.feed_data(data)\n self.feed_eof()\n\n\nclass TestGeneral(unittest.TestCase):\n\n def tearDown(self):\n aiohttpretty.clear()\n\n @async_test\n async def test_fake_request(self):\n desired_response = b'example data'\n url = 'http://example.com/'\n\n aiohttpretty.register_uri('GET', url, body=desired_response)\n\n response = await aiohttpretty.fake_request('GET', url)\n data = await response.read()\n assert data == desired_response\n\n def test_register_uri(self):\n url = 'http://example.com/'\n desired_response = b'example data'\n\n aiohttpretty.register_uri('GET', url, body=desired_response)\n options = aiohttpretty.registry[('GET', 'http://example.com/')]\n assert options == {'body': b'example data'}\n\n def test_register_json_uri(self):\n url = 'http://example.com/'\n desired_response = {'test_key': 'test_value'}\n\n aiohttpretty.register_json_uri('GET', url, body=desired_response)\n options = aiohttpretty.registry[('GET', 'http://example.com/')]\n assert json.loads(options['body'].decode('utf-8')) == desired_response\n\n @async_test\n async def test_param_handling(self):\n url = 'http://example-params.com/?test=test'\n desired_error_msg = (\n \"No URLs matching GET http://example-params.com/?test=test with params {'test': 'test'}. \"\n \"Not making request. Go fix your test.\"\n )\n try:\n await aiohttpretty.fake_request('GET', url)\n except Exception as exception:\n assert str(exception) == desired_error_msg\n\n @async_test\n async def test_params(self):\n desired_response = b'example data'\n url = 'http://example.com/'\n params = {'meow': 'quack', 'woof': 'beans'};\n\n aiohttpretty.register_uri('GET', url, params=params, body=desired_response)\n\n response = await aiohttpretty.fake_request('GET',\n 'http://example.com/?meow=quack&woof=beans')\n data = await response.read()\n assert data == desired_response\n\n @async_test\n async def test_str_response_encoding(self):\n aiohttpretty.register_uri('GET',\n 'http://example.com/',\n body='example résumé data')\n response = await aiohttpretty.fake_request('GET',\n 'http://example.com/')\n data = await response.read()\n assert data == 'example résumé data'.encode('utf-8')\n\n @async_test\n async def test_has_call(self):\n aiohttpretty.register_uri('GET',\n 'http://example.com/',\n params={'alpha': '1', 'beta': None},\n body='foo')\n response = await aiohttpretty.fake_request('GET',\n 'http://example.com/?alpha=1&beta=')\n assert await response.read() == b'foo'\n\n params_equivalent = [\n 'http://example.com/?alpha=1&beta=',\n 'http://example.com/?beta=&alpha=1',\n ]\n for uri in params_equivalent:\n assert aiohttpretty.has_call(method='GET', uri=uri)\n\n params_different = [\n 'http://example.com/',\n 'http://example.com/?alpha=2&beta=',\n # 'http://example.com/?alpha=1', # buggy atm\n 'http://example.com/?beta=',\n 'http://example.com/?alpha=1&beta=1',\n 'http://example.com/?alpha=&beta=',\n ]\n for uri in params_different:\n assert not aiohttpretty.has_call(method='GET', uri=uri)\n\n assert aiohttpretty.has_call(method='GET', uri='http://example.com/',\n params={'alpha': '1', 'beta': None})\n assert aiohttpretty.has_call(method='GET', uri='http://example.com/', check_params=False)\n assert not aiohttpretty.has_call(method='POST', uri='http://example.com/?alpha=1&beta=')\n assert not aiohttpretty.has_call(method='GET', uri='http://otherexample.com/')\n\n def test_activate(self):\n orig_real_id = id(ClientSession._request)\n orig_fake_id = id(aiohttpretty.fake_request)\n\n assert aiohttpretty.request is None\n assert ClientSession._request != aiohttpretty.fake_request\n assert id(ClientSession._request) == orig_real_id\n assert id(ClientSession._request) != orig_fake_id\n\n aiohttpretty.activate()\n\n assert aiohttpretty.request is not None\n assert id(aiohttpretty.request) == orig_real_id\n\n assert ClientSession._request == aiohttpretty.fake_request\n assert id(ClientSession._request) != orig_real_id\n assert id(ClientSession._request) == orig_fake_id\n\n aiohttpretty.deactivate()\n\n assert aiohttpretty.request is None\n assert ClientSession._request != aiohttpretty.fake_request\n assert id(ClientSession._request) == orig_real_id\n assert id(ClientSession._request) != orig_fake_id\n\n @async_test\n async def test_multiple_responses(self):\n aiohttpretty.register_uri(\n 'GET',\n 'http://example.com/',\n responses=[\n {\n 'status': 200,\n 'body': 'moo',\n },\n {\n 'status': 200,\n 'body': 'quack',\n },\n ],\n )\n\n first_resp = await aiohttpretty.fake_request('GET', 'http://example.com/')\n assert await first_resp.read() == b'moo'\n\n second_resp = await aiohttpretty.fake_request('GET', 'http://example.com/')\n assert await second_resp.read() == b'quack'\n\n with pytest.raises(Exception) as exc:\n await aiohttpretty.fake_request('GET', 'http://example.com/')\n\n def test_no_params_in_responses(self):\n with pytest.raises(ValueError):\n aiohttpretty.register_uri(\n 'GET',\n 'http://example.com/',\n responses=[\n {\n 'status': 200,\n 'body': 'moo',\n 'params': {'alpha': '1', 'beta': None}\n },\n ],\n )\n\n with pytest.raises(ValueError):\n aiohttpretty.register_uri(\n 'GET',\n 'http://example.com/',\n responses=[\n {\n 'status': 200,\n 'body': 'woof',\n },\n {\n 'status': 200,\n 'body': 'moo',\n 'params': {'alpha': '1', 'beta': None}\n },\n ],\n )\n\n @async_test\n async def test_headers_in_response(self):\n aiohttpretty.register_uri('GET', 'http://example.com/',\n headers={'X-Magic-Header': '1'})\n\n first_resp = await aiohttpretty.fake_request('GET', 'http://example.com/')\n assert 'X-Magic-Header' in first_resp.headers\n\n @async_test\n async def test_async_streaming_body(self):\n stream = DummyAsyncStream(b'meow')\n aiohttpretty.register_uri('GET', 'http://example.com/', body=stream)\n\n resp = await aiohttpretty.fake_request('GET', 'http://example.com/')\n assert await resp.read() == b'meow'\n\n @async_test\n async def test_invalid_body(self):\n aiohttpretty.register_uri('GET', 'http://example.com/', body=1234)\n\n with pytest.raises(TypeError):\n await aiohttpretty.fake_request('GET', 'http://example.com/')\n\n @async_test\n async def test_passed_data_is_read(self):\n aiohttpretty.register_uri('GET', 'http://example.com/', body='woof')\n\n stream = DummyAsyncStream(b'meow')\n assert not stream.at_eof()\n\n resp = await aiohttpretty.fake_request('GET', 'http://example.com/', data=stream)\n\n assert stream.at_eof()\n assert await resp.read() == b'woof'\n\n @async_test\n async def test_aiohttp_request(self):\n aiohttpretty.register_uri('GET', 'http://example.com/', body=b'example data')\n\n aiohttpretty.activate()\n async with ClientSession() as session:\n async with session.get('http://example.com/') as response:\n assert await response.read() == b'example data'\n aiohttpretty.deactivate()\n","repo_name":"CenterForOpenScience/aiohttpretty","sub_path":"tests/test_general.py","file_name":"test_general.py","file_ext":"py","file_size_in_byte":8983,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"35"} +{"seq_id":"72804472742","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPull some data for 10 stocks used for PCA FA and copula and extreme value theory later...\n\nCreated on Mon May 3 15:50:57 2021\n\n@author: MauritsOever\n\"\"\"\n\ndef DataPuller_assignment3():\n # specify packages just in case:\n import os\n import numpy as np\n import pandas as pd\n from pandas_datareader import data as pdr\n from datetime import date\n import yfinance as yf\n yf.pdr_override()\n \n # hardcode all the arguments bc one assignment anyways, next time i can automate more easily\n # main args\n ticker_list = ['ASML.AS', 'SONY', 'AKZA.AS', 'BAYN.DE', 'TSN', 'NTDOY', 'SQNXF', 'AMD','CSGN.SW', 'MUFG']\n path = r\"C:\\Users\\gebruiker\\Documents\\GitHub\\QFRM\\Data3\\\\\"\n \n # some more args\n start_date = '2011-04-20'\n end_date = '2021-04-20'\n files = []\n \n # check if function has run/downloaded stuff before:\n if 'data_main.csv' in os.listdir(r\"C:\\Users\\gebruiker\\Documents\\GitHub\\QFRM\\Data3\\\\\"):\n df = pd.read_csv(r\"C:\\Users\\gebruiker\\Documents\\GitHub\\QFRM\\Data3\\data_main.csv\",index_col=0)\n \n else:\n def SaveData(df, filename):\n df.to_csv(path +filename+\".csv\")\n \n \n def getData(ticker):\n print(ticker)\n data = pdr.get_data_yahoo(ticker, start=start_date, end=end_date)\n dataname = ticker\n files.append(dataname)\n SaveData(data, dataname)\n \n for tik in ticker_list:\n getData(tik)\n \n \n df = pd.read_csv(path+str(files[0])+\".csv\")\n df[str(files[0])] = df['Adj Close']\n # filter df on adjclose and date:\n df = df.iloc[:,list([0,-1])]\n \n for i in range(1, len(files)):\n #for i in range(1, 3):\n df1 = pd.read_csv(path+str(files[i])+\".csv\")\n df1[str(files[i])] = df1['Adj Close']\n df1 = df1.iloc[:,list([0,-1])]\n \n # now join those df1s to df for master dataset to get \n df = pd.merge(df, df1, how='left', on=['Date'])\n \n # clean it up a bit, remove nans by ffill\n df = df.iloc[1:,:]\n df = df.ffill(axis=0)\n \n # get log returns for every ticker\n \n for tic in df.columns[1:]:\n df[tic+'_ret'] = np.log(df[tic]) - np.log(df[tic].shift(1))\n \n # get some portfolio returns, assume average weight...\n df['port_ret'] = df.iloc[:,len(ticker_list)+1:len(df.columns)+1].mean(axis=1)\n df.to_csv(path+'data_main.csv')\n \n dfrets = df.iloc[1:,len(ticker_list)+1:len(df.columns)-1]\n return df, dfrets\n\n\n\ndf, dfrets = DataPuller_assignment3()\n\n\n\n\n\n","repo_name":"c0nn0rstevens/QFRM","sub_path":"Assignment 3/data_puller3000.py","file_name":"data_puller3000.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"23825478750","text":"# Machine Learning courseby Andrew Ng\n# Python version\n# Solution by: Amin Ahmadi\n# Date: April 9, 2018\n\n#### Logistic regression, classification\n##################################################\nimport numpy as np\nimport matplotlib.pyplot as pl\n\n# Read admission data into an numpy array for analysis and plotting\ndata = np.loadtxt('ex2data1.txt', delimiter=',', dtype=float)\npl.plot(data[:,0], data[:,1], '*b')\n\n# To find admitted and rejected student\n\n#admitted students\ndp = data[data[:,2]==1,0:2]\n#rejected students\ndn = data[data[:,2]==0,0:2]\n\n\n# Plot admitted and rejected student with different colors\n# pl.plot(dp[:,0], dp[:,1], 'ko', label='admitted')\n# pl.plot(dn[:,0], dn[:,1], 'b*', label='rejected')\n# pl.legend()\n\n########################################\ndef sigmoid(x):\n \"\"\"Calculate sigmoid function of input paramter.\n input:\n ------\n x: float\n \n return:\n -------\n aux: float\n \"\"\"\n if x == 0:\n aux=0.5\n else:\n aux = 1./( 1+np.exp(-x) )\n \n return aux\n# to be applicable to an array, vectorize it \nsigmoid = np.vectorize(sigmoid)\n\n########################################\ndef costFun(x,y,theta):\n \"\"\"Calculate the cost function in logistic regression.\n input:\n ------\n x: array, float, features\n y: array, float, targets\n theta: array, float, hypothesis parameter\n \n return:\n -------\n float, the cost \n \"\"\"\n aux = 0\n M = x.shape[0]\n aux = np.sum(-y*np.log(sigmoid(np.dot(x,theta))) -\\\n (1-y)*np.log(1-sigmoid(np.dot(x,theta))))\n return aux/M\n\n########################################\ndef costGrad(x,y,theta):\n \"\"\"Gradient of the logistic regression cost function.\n input:\n ------\n x: array, float, features\n y: array, float, target\n theta: array, hypothesis paramters\n \n return:\n -------\n grad: array, float, dimension of features\n \"\"\"\n M = x.shape[0]\n grad = np.zeros((x.shape[1]),float)\n temp = sigmoid(np.dot(theta,x.T))\n error = temp - y\n grad = (1./M)*np.dot(x.T,error)\n return grad\n\n########################################\ndef g_des(x,y,theta_int,alpha):\n \"\"\"Gradient desendent to find the minimum of the cost function.\n input:\n ------\n x: array, float, features\n y: array, float, target\n theta_int: array, hypothesis paramters, initial guess.\n \n return:\n -------\n theta:array, hypothesis parameters, fitted\n Jarr: array, cost function in each iteration\n \"\"\"\n num_itr = 5000\n Jarr = np.zeros((num_itr), float)\n theta = theta_int\n print(Jarr.shape)\n for it in range(num_itr):\n theta -= alpha*costGrad(x,y,theta)\n Jarr[it] = costFun(x,y,theta)\n return theta, Jarr\n\n########################################\n\nx = data[:,0:2]\nm = x.shape[0]\n# add one column of 1s to the featurs for theta_0\nx = np.hstack((np.ones((m,1), float),x))\ny = data[:,2]\ntheta_int = np.array([-10, 0.1,0.1])\ntheta_f, J_arr = g_des(x,y,theta_int,0.001)\nprint(theta_f)\nprint(sigmoid(np.dot(theta_f,np.array([1,45,85],float))))\nd1 = np.linspace(0,1,5000)\npl.plot(d1,J_arr, 'k,')\n\n\n########################################\nfrom scipy.optimize import minimize\n# define one-paramter cost function for optimization\ndef cost_1param(theta):\n \"\"\"one parameter cost function for optimization\"\"\"\n x = data[:,0:2]\n m = x.shape[0]\n x1 = np.ones((m,1),float)\n x = np.hstack((x1,x))\n y = data[:,2]\n return costFun(x,y,theta)\n\n#optimization using \"minimize\" from scipy\nres = minimize(cost_1param,(0.,0.1,0.1))\nprint(res)\nprint(res.x)\n#prediction for test of (45,85) = 0.776\nsigmoid(np.dot(res.x,np.array([1,45,85])))\n\n########################################\nx = data[:,0:2]\ny = data[:,2]\ntheta = res.x\nx_pos = x[y==1,:]\nx_neg = x[y==0,:]\npl.plot(x_pos[:,0], x_pos[:,1], 'ko', label='admitted')\npl.plot(x_neg[:,0], x_neg[:,1], 'b*', label='rejected')\npl.legend()\nx1_fit = np.linspace(min(x[:,0]), max(x[:,0]),100)\nx2_fit = (-1./theta[2])* (theta[1]*x1_fit + theta[0])\npl.plot(x1_fit,x2_fit, 'r-')\npl.xlim(min(x[:,0]), max(x[:,0]))\npl.ylim(min(x[:,1]), max(x[:,1]))\npl.show()\n","repo_name":"khroushan/ML_Ng_Py","sub_path":"ex2/ML_ex2.py","file_name":"ML_ex2.py","file_ext":"py","file_size_in_byte":4110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"2310334166","text":"from flask import Flask, render_template, redirect, request\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'\ndb = SQLAlchemy(app)\n\n# go to python interpreter, import db and create it\n# import Pokemon class, add create some instances, add them and commit\nclass Pokemon(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20), unique=True)\n element = db.Column(db.String(20), unique=False)\n\n def __init__(self, name, element):\n self.name = name \n self.element = element \n\n def __repr__(self):\n return '' % self.name\n\n\n@app.route('/')\ndef hello_world():\n return render_template(\"index.html\")\n\n# hits GET request by default\n@app.route('/pokemons')\ndef pokemons():\n # query db\n pokemons = Pokemon.query.all()\n\n return render_template(\"list.html\", pokemons=pokemons)\n\n\n@app.route('/add_pokemon', methods=['POST'])\ndef new_pokemon():\n\n name = request.form['name']\n element = request.form['element']\n\n new_pokemon = Pokemon(name, element)\n db.session.add(new_pokemon)\n db.session.commit()\n\n return redirect('/pokemons')\n\n\n@app.route('/delete_pokemons', methods=['POST'])\ndef delete_pokemons():\n\n Pokemon.query.delete()\n db.session.commit()\n return redirect('/pokemons')\n\nif __name__ == '__main__':\n # always have debug true while developing\n app.run(debug=True)\n\n\n","repo_name":"psuong/FlaskWorkshop","sub_path":"pokemon_list/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"14926097915","text":"import glob,re,sys\r\n\r\n\r\nvarre = re.compile(\"(.*)VAR_([LBW])\")\r\n\r\nsizedict = {\"UBYTE\":1,\"UWORD\":2,\"ULONG\":4,\"STRUCT\":16}\r\nsize2dict = {\"B\":1,\"W\":2,\"L\":4}\r\n\r\nvarsize_dict = {}\r\n# open the .i file to get the declared types (AKA sizes)\r\n# check odd word/longs!! (assuming all structs are even)\r\n\r\nalign = 0\r\nfailed = False\r\n\r\nwith open(\"../include/jst_rel.i\") as f:\r\n for line in f:\r\n toks = line.split()\r\n if len(toks)>1:\r\n sz = sizedict.get(toks[0])\r\n if sz:\r\n if sz!=1 and align:\r\n print(\"Error: odd-aligned variable {}\".format(toks[1]))\r\n failed = True\r\n align = (align + sz) % 2\r\n if toks[1].startswith(\"RelVar_\"):\r\n varsize_dict[toks[1][len(\"RelVar_\"):]] = sz\r\n\r\n# now scan the .asm files\r\nfor asm_file in glob.glob(\"src/*.asm\"):\r\n with open(asm_file) as f:\r\n for i,line in enumerate(f,1):\r\n toks = line.split()\r\n if len(toks)>1:\r\n if toks[0].startswith(\";\"):\r\n continue\r\n m = varre.match(toks[0])\r\n if m:\r\n used_size = size2dict[m.group(2)]\r\n for x in toks[1].split(\",\"):\r\n if x in varsize_dict:\r\n if varsize_dict[x] != used_size:\r\n print(\"Error: {}:{}: {} declared size {}, used size {}\".format(asm_file,i,x,varsize_dict[x],used_size))\r\n failed = True\r\n\r\nsys.exit(int(failed))\r\n\r\n","repo_name":"jotd666/jst","sub_path":"scripts/check_reloc_vars.py","file_name":"check_reloc_vars.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"35"} +{"seq_id":"12584703086","text":"from settings import *\r\nimport json\r\nfrom flask import Flask, make_response, request\r\nfrom datetime import datetime, timedelta\r\n\r\n\r\n@app.route('/', methods=['GET'])\r\ndef get_item():\r\n request_data = request.get_json() # getting data from client\r\n resp = make_response(f\"The Cookie has been Set {request_data['itemname']}\")\r\n resp.set_cookie('itemname', request_data['itemname'])\r\n return resp\r\n\r\n@app.route('/', methods=['POST'])\r\ndef post_item():\r\n '''Function to add new movie to our database'''\r\n # timestring = \"17:00-21:00,7:00-10:00,10:45-14:45\"\r\n request_data = request.get_json() # getting data from client\r\n timestring = request_data[\"timestring\"]\r\n range_list = timestring.split(\",\")\r\n is_item_available = False\r\n current_time = request.cookies.get('itemname', None)\r\n item = request_data[\"item\"]\r\n current_time = datetime.strptime(current_time, \"%H:%M\")\r\n if current_time:\r\n for time_range in range_list:\r\n sample = time_range.split(\"-\")\r\n range1 = datetime.strptime(sample[0], \"%H:%M\")\r\n range2 = datetime.strptime(sample[1], \"%H:%M\")\r\n if(current_time.time()>=range1.time() and current_time.time()<=range2.time()):\r\n is_item_available = True\r\n break\r\n \r\n if is_item_available:\r\n response = Response(f\"{item} available\", 200, mimetype='application/json')\r\n else:\r\n response = Response(f\"{item} Not available\", 200, mimetype='application/json')\r\n else:\r\n resp = make_response(f\"Cookie not set\")\r\n return resp\r\n return response\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(port=1234, debug=True)\r\n","repo_name":"ramanjaneyalu/Food-app-theme-flask-project","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"27855679673","text":"from pathlib import Path\n\nimport torch\nfrom torch.nn.utils.rnn import pad_sequence\n\nfrom .dataset import Wav2VecMelDataset\nfrom ..base import DataModuleBase\nfrom ..common import load_data_with_indices\n\n\nclass Wav2VecMelDataModule(DataModuleBase):\n\n def setup(self, stage=None):\n train, valid, train_i, valid_i = load_data_with_indices(Path(self.params.data_dir), ratio=self.params.train_ratio)\n self.train_x, self.valid_x = Wav2VecMelDataset(train, train_i), Wav2VecMelDataset(valid)\n\n def _collate_fn(self, batch):\n src, src_mel, tgt_mel = zip(*batch)\n src_lens = [x.size(0) for x in src]\n tgt_lens = [x.size(0) for x in tgt_mel]\n overlap_len = [min(src_len, tgt_len) for src_len, tgt_len in zip(src_lens, tgt_lens)]\n\n src = pad_sequence(src, batch_first=True)\n src_mask = torch.stack([torch.arange(src.size(1)) < l for l in src_lens])\n\n tgt_mel = pad_sequence(tgt_mel, batch_first=True, padding_value=-5)\n tgt_mel = tgt_mel.transpose(1, 2)\n tgt_mask = torch.stack([torch.arange(tgt_mel.size(1)) < l for l in tgt_lens])\n return src, tgt_mel, src_mel, src_mask, tgt_mask, overlap_len\n","repo_name":"reppy4620/VCon","sub_path":"dataset/wav2vec_mel/pl_data_module.py","file_name":"pl_data_module.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"33908524073","text":"from tree_node import TreeNode\nfrom error_messages import ErrorMessages\nfrom typing import Union\n\n\"\"\"\nPathHandler manages file system paths.\n\"\"\"\n\n\nclass PathHandler:\n def __init__(self, root: TreeNode):\n self.root = root\n self.current_directory = \"/\"\n self.previous_directory = None\n\n def is_absolute_path(self, path: str) -> bool:\n # Check if he given path is an absolute path (starts with the root directory separator character (\"/\"))\n return path.startswith(\"/\")\n\n def split_path(self, path: str) -> tuple:\n # Split the given path into parent directory and name\n if self.is_absolute_path(path):\n # Check if the path is absolute\n parent_dir_path, _, file_or_dir_name = path.rpartition(\"/\")\n if not parent_dir_path: # If parent_dir_path is empty, set it to \"/\"\n parent_dir_path = \"/\"\n else: # Relative path\n current_dir = self.current_directory # Get the current working directory name\n path_components = path.strip(\"/\").split(\"/\")\n if current_dir == \"/\":\n parent_dir_path = \"/\" + \"/\".join(path_components[:-1])\n else:\n parent_dir_path = current_dir + \"/\" + \"/\".join(path_components[:-1])\n file_or_dir_name = path_components[-1]\n return parent_dir_path, file_or_dir_name\n\n def get_node_by_path(self, path: str, show_errors: bool = True) -> Union[TreeNode, bool]:\n # Get the node corresponding to the given path\n if not path:\n return False\n elif path == \"/\":\n return self.root\n elif self.is_absolute_path(path):\n # Start searching from the root node\n current_node = self.root\n else:\n # Start searching from the current node\n current_node = self.get_node_by_path(self.current_directory) # start to search from the current node\n path_components = path.strip(\"/\").split(\"/\")\n for component in path_components:\n next_node = current_node.get_child_by_name(component)\n if not next_node:\n if show_errors: # show error if the node is not found.\n print(f\"{ErrorMessages.NotFoundError.value}{path}\")\n return False\n current_node = next_node\n return current_node\n\n def change_current_dir(self, new_cur_directory: str) -> bool:\n # change the current working directory to the specified directory\n self.previous_directory = self.current_directory\n self.current_directory = new_cur_directory\n return True\n\n def go_back_dir(self) -> bool:\n # change the current working directory to the previous directory\n if not self.previous_directory:\n print(f\"{ErrorMessages.NotFoundError.value}previous directory does not found\")\n return False\n cur_dir = self.current_directory\n self.current_directory = self.previous_directory\n self.previous_directory = cur_dir\n return True\n","repo_name":"AdielGoetschel/FileSystem","sub_path":"path_handler.py","file_name":"path_handler.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35661478631","text":"# Author: Janna Harding\n# File: breakcaesar.py\n# Usage: $ python3 breakcaesar.py to_decrypt.txt output_name.txt\n# Desc: this program takes data encrypted by a simple caesar cipher and\n# performs frequency analysis to break the encryption. \n# output is placed in code/output/ under the name of 3rd arg specified.\n#\n\n# import necessary packages\nimport sys\nsys.path.append(sys.path[0] + \"/../lib/\")\nfrom cryptotools import caesar, getShift\nfrom cryptotools import LOWERCASE_A_ASCII\n\n# check for correct argument list\nif len(sys.argv) != 3:\n print(\"Argument Error! Expected format:\\n\" + \n \"python3 breakcaesar.py to_decrypt.txt output_name.txt\")\n sys.exit()\n\n# define some important things\nOUTFILENAME = sys.argv[2]\nSRC = sys.argv[1]\nOUT = sys.path[0] + \"/../output/\" + OUTFILENAME\ninfile = open(SRC, 'r')\noutfile = open(OUT, 'w+')\n\n# provide some output\nprint(\"Breaking encryption on file \" + SRC + \"...\")\n\n# get data to decrypt\ndata = str(infile.read()).lower()\n\n# determine shift value\nshift = getShift(data)\n\n# provide some output\nprint(\"Shift value found: \" + chr(shift + LOWERCASE_A_ASCII))\n\n# perform decryption\nresult = caesar(data, -shift)\n\n# write result to file, clean up, provide output\noutfile.write(result)\ninfile.close()\noutfile.close()\nprint(\"Decrypted file path: ./output/\" + OUTFILENAME)","repo_name":"hackers4f/decryption-suite","sub_path":"breakcaesar.py","file_name":"breakcaesar.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37848273781","text":"from __future__ import annotations\nfrom enum import Enum\nfrom typing import NamedTuple, List, Tuple, Optional, Union, Callable\nfrom random import uniform\nfrom queue import LifoQueue, Queue, PriorityQueue\nfrom math import sqrt\n\n\nclass Cell(str, Enum):\n EMPTY = \" \"\n BLOCKED = \"#\"\n START = \"S\"\n GOAL = \"G\"\n PATH = \".\"\n\n\nGrid = List[List[Cell]]\n\n\nclass MazeLocation(NamedTuple):\n row: int\n column: int\n\n def add(self, coord: Tuple[int, int]) -> MazeLocation:\n new_cell = MazeLocation(self.row + coord[0], self.column + coord[1])\n return new_cell\n\n @staticmethod\n def manhattan_distance(goal: MazeLocation) -> Callable[[MazeLocation], float]:\n def distance(ml: MazeLocation) -> float:\n return abs(ml.column - goal.column) + abs(ml.row - goal.row)\n\n return distance\n\n\nclass Node:\n def __init__(\n self,\n state: MazeLocation,\n parent: Optional[Node],\n cost: float = 0.0,\n heuristic: float = 0.0,\n ) -> None:\n self.state = state\n self.parent = parent\n self.cost = cost\n self.heuristic = heuristic\n\n def __lt__(self, other: Node) -> bool:\n return (self.cost + self.heuristic) < (other.cost + other.heuristic)\n\n\nclass Maze:\n moves = [(0, 1), (1, 0), (0, -1), (-1, 0)]\n\n def __init__(\n self,\n rows: int = 10,\n columns: int = 10,\n sparseness: float = 0.2,\n start: MazeLocation = MazeLocation(0, 0),\n goal: MazeLocation = MazeLocation(9, 9),\n ) -> None:\n # initialize basic instance variables\n self._rows = rows\n self._columns = columns\n self.start = start\n self.goal = goal\n # fill the grid with empty cells\n self._grid: Grid = [[Cell.EMPTY for c in range(columns)] for r in range(rows)]\n # populate the grid with blocked cells\n self._randomly_fill(sparseness)\n # fill the start and goal locations in\n self._grid[start.row][start.column] = Cell.START\n self._grid[goal.row][goal.column] = Cell.GOAL\n\n def _randomly_fill(self, sparseness: float) -> None:\n for row in range(self._rows):\n for column in range(self._columns):\n if uniform(0, 1.0) < sparseness:\n self._grid[row][column] = Cell.BLOCKED\n\n def goal_test(self, ml: MazeLocation) -> bool:\n \"\"\"Checks if goals is reached\"\"\"\n return ml == self.goal\n\n def _is_valid_point(self, ml: MazeLocation) -> bool:\n \"\"\"Checks if point inside of the maze\"\"\"\n return (\n 0 <= ml.row\n and ml.row < self._rows\n and ml.column < self._columns\n and 0 <= ml.column\n and self._grid[ml.row][ml.column] != Cell.BLOCKED\n )\n\n def successors(self, ml: MazeLocation) -> List[MazeLocation]:\n \"\"\"Finds the possible next locations from a given point.\"\"\"\n return [ml.add(l) for l in self.moves if self._is_valid_point(ml.add(l))]\n\n def mark(self, path: List[MazeLocation], marker: Cell) -> None:\n for ml in path:\n if ml != self.start and ml != self.goal:\n self._grid[ml.row][ml.column] = marker\n\n def __repr__(self) -> str:\n output = []\n for row in self._grid:\n output.append(\"\".join([c.value for c in row]))\n return \"\\n\".join(output)\n\n def calculate(self, frontier: Union[LifoQueue, Queue]) -> Optional[Node]:\n \"\"\"Uses BFS or DFS based on the storage type.\"\"\"\n # frontier is where we've yet to go\n frontier.put(Node(self.start, None))\n # explored is where we've been\n explored = {self.start}\n\n while not frontier.empty():\n current_node = frontier.get()\n current_state = current_node.state\n if self.goal_test(current_state):\n return current_node\n for child in self.successors(current_state):\n # skip children we already explored\n if child in explored:\n continue\n explored.add(child)\n frontier.put(Node(child, current_node))\n return None\n\n def astar(self) -> Optional[Node]:\n \"\"\"Uses A* algorithm to find the shortest distance\"\"\"\n frontier: PriorityQueue = PriorityQueue()\n distance = MazeLocation.manhattan_distance(self.goal)\n frontier.put(Node(self.start, None))\n explored = {self.start: 0.0}\n\n while not frontier.empty():\n current_node = frontier.get()\n current_state = current_node.state\n if self.goal_test(current_state):\n return current_node\n for child in self.successors(current_state):\n # 1 assumes a grid, need a cost function for more sophisticated apps\n new_cost = current_node.cost + 1\n if child not in explored or explored[child] > new_cost:\n explored[child] = new_cost\n frontier.put(\n Node(child, current_node, new_cost, distance(current_state))\n )\n return None\n\n @staticmethod\n def node_to_path(node: Node) -> List[MazeLocation]:\n path = [node.state]\n while node.parent:\n node = node.parent\n path.append(node.state)\n path.reverse()\n return path\n\n\ndef print_solution(maze: Maze) -> Callable[[Optional[Node]], None]:\n def printer(solution):\n if solution1 is None:\n print(maze)\n print(\"No solution found\")\n else:\n path = maze.node_to_path(solution)\n maze.mark(path, Cell.PATH)\n print(maze)\n maze.mark(path, Cell.EMPTY)\n print(f\"Path length: {len(path)}\")\n print(\"-\" * maze._columns, \"\\n\")\n\n return printer\n\n\nif __name__ == \"__main__\":\n maze = Maze()\n printer = print_solution(maze)\n\n # DFS\n solution1 = maze.calculate(LifoQueue())\n print(\"DFS\")\n printer(solution1)\n\n # BFS\n solution2 = maze.calculate(Queue())\n print(\"BFS\")\n printer(solution2)\n\n # A*\n solution3 = maze.astar()\n print(\"A*\")\n printer(solution3)\n","repo_name":"vlad-bezden/ClassicComputerScienceProblemsInPython","sub_path":"Chapter2/maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":6182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"22230601963","text":"#!/usr/bin/python3\n\"\"\"0. Rain\"\"\"\n\n\ndef rain(walls):\n \"\"\"\n Prototype: def rain(walls)\n walls is a list of non-negative integers.\n Return: Integer indicating total amount of rainwater retained.\n Assume that the ends of the list (before index 0 and after index walls[-1])\n are not walls, meaning they will not retain water.\n If the list is empty return 0.\n \"\"\"\n\n if not walls or len(walls) < 3:\n return 0\n\n rainwater = 0\n\n for i in range(0, len(walls)):\n preWall = max(walls[:i + 1])\n postWall = max(walls[i:])\n rainwater += min(preWall, postWall) - walls[i]\n\n return rainwater\n","repo_name":"rubenoliveros/holbertonschool-interview","sub_path":"0x10-rain/0-rain.py","file_name":"0-rain.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71766751142","text":"\"\"\"\nweb_server\n\"\"\"\nfrom socket import *\nfrom select import select\nimport re\n\n\nclass WebServer:\n def __init__(self, host='0.0.0.0', port=80, html=None):\n self.host = host\n self.port = port\n self.html = html\n self.rlist = []\n self.wlist = []\n self.xlist = []\n # 创建web服务器准备工作\n self.creat_socket()\n self.bind()\n\n # 创建tcp套接字\n def creat_socket(self):\n self.sock = socket()\n self.sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n # 将套接字设为非阻塞状态\n self.sock.setblocking(False)\n\n # 绑定地址\n def bind(self):\n self.address = (self.host, self.port)\n self.sock.bind(self.address)\n\n def handle(self, connfd):\n # 接收客户端请求\n request = connfd.recv(1024 * 10).decode()\n # 解析客户端请求\n pattern = \"[A-Z]+\\s+(?P/\\S*)\"\n result = re.match(pattern, request)\n if result:\n # 匹配到的内容\n info = result.group(\"info\")\n print(\"请求内容:\", info)\n self.send_html(connfd, info)\n else:\n connfd.close()\n self.rlist.remove(connfd)\n return\n\n # 创建发送服务\n def send_html(self, connfd, info):\n if info == \"/\":\n filename = self.html + \"/index.html\"\n else:\n filename = self.html + info\n try:\n f = open(filename, \"rb\")\n except:\n # 文件不存在\n response = \"HTTP/1.1 404 Not Found\\r\\n\"\n response += \"Content-Type:text/html\\r\\n\"\n response += \"\\r\\n\"\n response += \"Sorry...\"\n response = response.encode()\n else:\n data = f.read()\n response = \"HTTP/1.1 200 OK\\r\\n\"\n response += \"Content-Type:text/html\\r\\n\"\n response += \"Content-Length:%d\\r\\n\" % len(data)\n response += \"\\r\\n\"\n response = response.encode() + data\n finally:\n connfd.send(response)\n\n # 创建启动函数\n def start(self):\n # 设置监听\n self.sock.listen(5)\n print(\"listen the port %d\" % self.port)\n # 循环监控,初始监控监听套接字\n self.rlist.append(self.sock)\n while True:\n rs, ws, xs = select(self.rlist, self.wlist, self.xlist)\n for r in rs:\n if r is self.sock:\n connfd, addr = r.accept()\n print(\"connect from\", addr)\n connfd.setblocking(False)\n # 将客户端套接字添加进监听列表\n self.rlist.append(connfd)\n else:\n try:\n self.handle(r)\n except:\n r.close()\n self.rlist.remove(r)\n\n\nif __name__ == '__main__':\n # 实例化对象\n httpd = WebServer(host=\"0.0.0.0\", port=8005, html=\"./static\")\n # 启动服务\n httpd.start()\n","repo_name":"zxd-sys/web-server","sub_path":"web_server02.py","file_name":"web_server02.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25884802833","text":"import requests\nfrom bs4 import BeautifulSoup\nimport ssl\ncontext = ssl._create_unverified_context()\n\n\nnum = 0\nbaseurl='https://www.qiushibaike.com/8hr/page/'\nfor pagenum in range(1,5):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'\n }\n url=baseurl+str(pagenum)+'/'\n r=requests.get(url,headers=headers,verify=False)\n content=r.text\n\n soup = BeautifulSoup(content,'lxml')\n div1=soup.find_all(class_='article block untagged mb15 typs_hot')\n div2=soup.find_all(class_='article block untagged mb15 typs_long')\n div3=soup.find_all(class_='article block untagged mb15 typs_recent')\n div4=soup.find_all(class_='article block untagged mb15 typs_old')\n divs=[div1,div2,div3,div4]\n # print(div1)\n for d in divs:\n for div in d:\n joke = div.span.get_text()\n name= div.h2.get_text()\n if div.find_all(class_='thumb'):\n # image= div.find_all('img')\n # print(image)\n continue\n print(joke)\n # print(name)\n num+=1\n print('------------')\n print(\"===++++++****************+++++++=====\")\n\n print(\"===+++page+++*****page\",pagenum,\"*******page****+++++page++=====\")\n\nprint(num)","repo_name":"ahecate/pachong","sub_path":"qiushibaike.py","file_name":"qiushibaike.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"8592218455","text":"import argparse\n\nimport yaml\nfrom base_api import BaseAPI\n\n\ndef gen_header(header):\n return f\"\"\"\n#pragma once\n\n#include \n\n#include \n#include \n#include \n#include \n\nnamespace phi {{\n\n{header}\n\n}} // namespace phi\n\"\"\"\n\n\ndef gen_cc(cc):\n return f\"\"\"\n#include \"paddle_lazy/eager_backend/eager_ops_autogen.h\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \"paddle_lazy/eager_backend/eager_ops.h\"\n\nnamespace phi {{\n\n{cc}\n\n}} // namespace phi\n\"\"\"\n\n\ndef generate_api(headers, ccs, args):\n header = ''.join(headers)\n header = gen_header(header)\n\n cc = ''.join(ccs)\n cc = gen_cc(cc)\n\n with open(args.api_header_path, 'w') as f:\n f.write(header)\n\n with open(args.api_source_path, 'w') as f:\n f.write(cc)\n\n\nclass API(BaseAPI):\n\n def header(self):\n api_declaration = f\"\"\"\nvoid dense_{self.api}({self.get_declare_args()});\n\"\"\"\n return api_declaration\n\n def cc(self):\n api_code = f\"\"\"\nvoid dense_{self.api}({self.get_define_args()}) {{\n\"\"\"\n return api_code + self.gen_kernel_code(self.api) + \"\"\"\n}\n\"\"\"\n\n def get_declare_args(self, inplace_flag=False):\n declare_args = self.get_input_tensor_args(inplace_flag)\n declare_args.extend(self.get_output_tensor_args())\n for name in self.attrs['names']:\n default_value = ''\n if self.attrs['attr_info'][name][1] is not None:\n default_value = ' = ' + self.attrs['attr_info'][name][1]\n declare_args.append(self.attrs['attr_info'][name][0] + ' ' + name +\n default_value)\n return \", \".join(declare_args)\n\n def get_define_args(self, inplace_flag=False):\n define_args = self.get_input_tensor_args(inplace_flag)\n define_args.extend(self.get_output_tensor_args())\n for name in self.attrs['names']:\n define_args.append(self.attrs['attr_info'][name][0] + ' ' + name)\n return \", \".join(define_args)\n\n def get_kernel_args(self, kernel_tensor_type=None, code_indent=''):\n dense_input_trans_map = {\n # 'const Tensor&':\n 'const DenseTensor*':\n 'const phi::DenseTensor&',\n 'const std::vector&':\n 'const std::vector&',\n 'const paddle::optional':\n 'paddle::optional',\n 'const paddle::optional&':\n 'const paddle::optional&',\n 'const paddle::optional>&':\n 'paddle::optional&>'\n }\n dense_out_trans_map = {\n 'Tensor': 'phi::DenseTensor*',\n 'std::vector': 'std::vector&'\n }\n sr_input_trans_map = {\n 'const Tensor&':\n 'const phi::SelectedRows&',\n 'const paddle::optional&':\n 'const paddle::optional&'\n }\n sr_out_trans_map = {'Tensor': 'phi::SelectedRows*'}\n input_names = self.inputs['names']\n input_infos = self.inputs['input_info']\n kernel_args_type_list = ['const phi::DeviceContext&']\n\n attr_names = self.attrs['names']\n kernel_param = input_names + attr_names\n\n kernel_args = [\"*dev_ctx\"]\n for param in kernel_param:\n if param in input_names:\n if param in self.optional_vars:\n kernel_args.append(param)\n else:\n if self.inputs['input_info'][param] == \"const DenseTensor*\":\n kernel_args.append(\"*\" + param)\n elif self.inputs['input_info'][\n param] == \"const std::vector&\":\n kernel_args.append(param)\n else:\n # do nothing\n pass\n # input is dense tensor\n if kernel_tensor_type is None or kernel_tensor_type[0][\n kernel_param.index(param)] == 'dense':\n kernel_args_type_list.append(\n dense_input_trans_map[input_infos[param]])\n else: # input is selected_rows\n kernel_args_type_list.append(\n sr_input_trans_map[input_infos[param]])\n elif param in attr_names:\n # set attr for kernel_context\n if 'IntArray' in self.attrs['attr_info'][param][0]:\n kernel_args_type_list.append('const phi::IntArray&')\n param = 'phi::IntArray(' + param + ')'\n elif 'Scalar' in self.attrs['attr_info'][param][0]:\n kernel_args_type_list.append('const phi::Scalar&')\n param = 'phi::Scalar(' + param + ')'\n else:\n kernel_args_type_list.append(\n self.attrs['attr_info'][param][0])\n kernel_args.append(param)\n elif isinstance(param, bool):\n kernel_args.append(str(param).lower())\n else:\n kernel_args.append(str(param))\n\n for i, out_type in enumerate(self.outputs['types']):\n # output is dense tensor\n if kernel_tensor_type is None or kernel_tensor_type[1][i] == 'dense':\n kernel_args_type_list.append(dense_out_trans_map[out_type])\n else: # output is selected_rows\n kernel_args_type_list.append(sr_out_trans_map[out_type])\n kernel_signature = \"void(*)(\" + \", \".join(kernel_args_type_list) + \")\"\n kernel_args = \", \".join(kernel_args)\n\n return kernel_args, kernel_signature\n\n def gen_kernel_code(self, kernel_name, code_indent='', inplace_flag=False):\n kernel_args, kernel_signature = self.get_kernel_args()\n outputs_args = ','.join(self.outputs['names'])\n return f\"\"\"\n Backend kernel_backend = Backend::CPU;\n DataLayout kernel_layout = {self.inputs['names'][0]}->layout();\n DataType kernel_data_type = {self.inputs['names'][0]}->dtype();\n\n\n{code_indent} VLOG(6) << \"{self.api} API kernel key: [\" << kernel_backend << \", \" << kernel_layout << \", \"<< kernel_data_type << \"]\";\n{code_indent} const auto& kernel_result = phi::KernelFactory::Instance().SelectKernelOrThrowError(\n{code_indent} \"{kernel_name}\", {{kernel_backend, kernel_layout, kernel_data_type}});\n{code_indent} const auto& kernel = kernel_result.kernel;\n{code_indent} VLOG(6) << \"{kernel_name} kernel: \" << kernel;\n\n{code_indent} auto* dev_ctx = GetDeviceContextByBackend(kernel_backend);\n{code_indent} using kernel_signature = {kernel_signature};\n{code_indent} auto* kernel_fn = kernel.GetVariadicKernelFn();\n{code_indent} {{\n{code_indent} (*kernel_fn)({kernel_args}, {outputs_args});\n{code_indent} }}\n\"\"\"\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Generate PaddlePaddle C++ API files')\n parser.add_argument('--api_yaml_path',\n help='path to api yaml file',\n nargs='+',\n default=f'codegen/test_api.yaml')\n parser.add_argument('--api_header_path',\n help='output of generated api header code file',\n default='paddle_lazy/eager_backend/eager_ops_autogen.h')\n parser.add_argument(\n '--api_source_path',\n help='output of generated api source code file',\n default='paddle_lazy/eager_backend/eager_ops_autogen.cc')\n args = parser.parse_args()\n # args.api_yaml_path = list(args.api_yaml_path)[0]\n\n with open(args.api_yaml_path, 'r') as f:\n yaml_list = yaml.load(f, Loader=yaml.FullLoader)\n\n headers = []\n ccs = []\n for api_yaml in yaml_list:\n api = API(api_yaml)\n headers.append(api.header())\n ccs.append(api.cc())\n generate_api(headers, ccs, args)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"gglin001/paddle-lazy","sub_path":"codegen/autogen_eager_ops.py","file_name":"autogen_eager_ops.py","file_ext":"py","file_size_in_byte":8438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"42072948489","text":"import time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.service import Service\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions\n\noptions = webdriver.ChromeOptions()\noptions.add_experimental_option(\"detach\", True)\n\n# GOOGLE FORM FIELDS X_PATH ARE SAME EXCEPT one div\nX_PATH_START = f'//*[@id=\"mG61Hd\"]/div[2]/div/div[2]/div['\nX_PATH_END = ']/div/div/div[2]/div/div[1]/div/div[1]/input'\n\n# uploading data on Google form\ndef upload_data(space_flight, GOOGLE_FORM_URL):\n print(\"\\nPlease wait while data is being uploaded through Google Form.\")\n # creating Selenium driver\n driver = webdriver.Chrome(service=Service(executable_path=ChromeDriverManager().install()), options=options)\n driver.maximize_window()\n # loop though each address of house\n for i, space_craft_name_list in enumerate(space_flight.space_craft_name_list):\n # accessing Google form\n driver.get(GOOGLE_FORM_URL)\n # wait until first element is clickable\n WebDriverWait(driver, 10).until(expected_conditions.element_to_be_clickable((By.XPATH, f\"{X_PATH_START}1{X_PATH_END}\")))\n\n # uploading data for each question on Google form\n driver.find_element(By.XPATH, f'{X_PATH_START}1{X_PATH_END}').send_keys(space_craft_name_list if space_craft_name_list else \"\")\n driver.find_element(By.XPATH, f'{X_PATH_START}2{X_PATH_END}').send_keys(space_flight.card_header_list[i] if space_flight.card_header_list[i] else \"\")\n driver.find_element(By.XPATH, f'{X_PATH_START}3{X_PATH_END}').send_keys(space_flight.launching_location_list[i] if space_flight.launching_location_list[i] else \"\")\n driver.find_element(By.XPATH, f'{X_PATH_START}4{X_PATH_END}').send_keys(space_flight.next_space_fight_time_list[i] if space_flight.next_space_fight_time_list[i] else \"\")\n # clicking on submit button\n driver.find_element(By.XPATH, '//*[@id=\"mG61Hd\"]/div[2]/div/div[3]/div[1]/div[1]/div/span').click()\n # time.sleep(random.choice([0.1, 0.12, 0.15, 0.13]))\n time.sleep(5)","repo_name":"ma1526763/NEXT_SPACE_FLIGHT","sub_path":"upload_data_to_google_form.py","file_name":"upload_data_to_google_form.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"437201119","text":"\"\"\"python port of polylabel algorithm found here: https://github.com/mapbox/polylabel\"\"\"\nfrom Queue import PriorityQueue\nfrom math import sqrt\n\nSQRT2 = 1.414213562\n\n\ndef polylabel(polygon, precision=1.0):\n \"\"\"find the 'pole of inaccessibility', the most distance point from the polygon outline\"\"\"\n min_x = float(\"inf\")\n min_y = float(\"inf\")\n max_x = float(\"-inf\")\n max_y = float(\"-inf\")\n for pt in polygon:\n min_x = min(min_x, pt[0])\n min_y = min(min_y, pt[1])\n max_x = max(max_x, pt[0])\n max_y = max(max_y, pt[1])\n\n width = max_x - min_x\n height = max_y - min_y\n cellsize = min(width, height)\n half = cellsize / 2.\n\n cell_queue = PriorityQueue()\n\n if cellsize == 0:\n return [min_x, min_y]\n\n x = min_x\n while x < max_x:\n y = min_y\n while y < max_y:\n cell = Cell(x + half, y + half, half, polygon)\n cell_queue.put((-cell.max, cell))\n y += cellsize\n x += cellsize\n\n best_cell = get_centroid_cell(polygon)\n\n bbox_cell = Cell(min_x + width / 2., min_y + height / 2., 0, polygon)\n if bbox_cell.dist > bbox_cell.dist:\n best_cell = bbox_cell\n\n while not cell_queue.empty():\n _, cell = cell_queue.get()\n # update the best cell if we found a better one\n if cell.dist > best_cell.dist:\n best_cell = cell\n\n # do not drill down further if there's no chance of a better solution\n if cell.max - best_cell.dist <= precision:\n continue\n\n # split the cell into four cells\n half = cell.half / 2.\n c1 = Cell(cell.x - half, cell.y - half, half, polygon)\n c2 = Cell(cell.x + half, cell.y - half, half, polygon)\n c3 = Cell(cell.x - half, cell.y + half, half, polygon)\n c4 = Cell(cell.x + half, cell.y + half, half, polygon)\n cell_queue.put((-c1.max, c1))\n cell_queue.put((-c2.max, c2))\n cell_queue.put((-c3.max, c3))\n cell_queue.put((-c4.max, c4))\n\n return best_cell.x, best_cell.y\n\n\nclass Cell(object):\n \"\"\"class for holding cell info\"\"\"\n def __init__(self, x, y, h, polygon):\n self.x = x\n self.y = y\n self.half = h\n self.dist = point_to_polygon_dist(x, y, polygon)\n self.max = self.dist + self.half * SQRT2\n\n\ndef point_to_polygon_dist(x, y, polygon):\n \"\"\"point to polygon dist\"\"\"\n inside = False\n min_dist_sq = float(\"inf\")\n\n ring = polygon\n n_pts = len(ring)\n\n for i, j in zip(range(n_pts), rotate(range(n_pts), 1)):\n pa = polygon[i]\n pb = polygon[j]\n if ((pa[1] > y) != (pb[1] > y)) and (x < (pb[0] - pa[0]) * (y - pa[1]) / (pb[1] - pa[1]) + pa[0]):\n inside = not inside\n min_dist_sq = min(min_dist_sq, get_seg_dist_sq(x, y, pa, pb))\n\n min_dist = sqrt(min_dist_sq)\n if not inside:\n min_dist *= -1\n\n return min_dist\n\n\ndef get_centroid_cell(polygon):\n \"\"\"get the centroid of a cell\"\"\"\n area = 0\n x = 0\n y = 0\n n_pts = len(polygon)\n for i, j in zip(range(n_pts), rotate(range(n_pts), 1)):\n pa = polygon[i]\n pb = polygon[j]\n f = pa[0] * pb[1] - pb[0] * pa[1]\n x += (pa[0] + pb[0]) * f\n y += (pa[1] + pb[1]) * f\n area += f * 3\n if area == 0:\n return Cell(polygon[0][0], polygon[0][1], 0, polygon)\n return Cell(x / area, y / area, 0, polygon)\n\n\ndef get_seg_dist_sq(px, py, p1, p2):\n \"\"\"squared distance from (px, py) to line segment [p1, p2]\"\"\"\n x = p1[0]\n y = p1[1]\n dx = p2[0] - x\n dy = p2[1] - y\n\n if dx != 0 or dy != 0:\n tt = ((px - x) * dx + (py - y) * dy) / (dx * dx + dy * dy)\n if tt > 1:\n x = p2[0]\n y = p2[1]\n elif tt > 0:\n x += dx * tt\n y += dy * tt\n\n dx = px - x\n dy = py - y\n\n return dx * dx + dy * dy\n\n\ndef rotate(arr, x):\n \"\"\"rotate an array by x\"\"\"\n return arr[-x:] + arr[:-x]\n","repo_name":"Justin-Kuehn/polylabel-py","sub_path":"polylabel/polylabel.py","file_name":"polylabel.py","file_ext":"py","file_size_in_byte":3933,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"70129397221","text":"print('setting up...')\r\nimport os\r\nimport numpy as np\r\nimport datetime as dt\r\nimport matplotlib as mp\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.dates as mpd\r\n\r\n\r\nfol = r'E:\\CRMS\\clean_daily'\r\noutf = r'E:\\CRMS\\clean_daily\\salinity_plots'\r\n\r\nsitefiles = os.listdir(fol)\r\n\r\nfor sitefile in sitefiles:\r\n if (sitefile[0:4] == 'CRMS'):\r\n \r\n site = sitefile.split('_')[0]\r\n \r\n print('plotting %s...' % site)\r\n\r\n plot_title = site\r\n\r\n dates = []\r\n vals = []\r\n\r\n\r\n fn = r'%s_daily_English.csv' % site\r\n\r\n csvf = r'%s\\%s' % (fol,fn)\r\n date_col = 1 # column with date in CRMS csv file\r\n val_col = 2 # column with value in CRMS csv file (2 = daily mean salinity, 8 = daily mean stage)\r\n\r\n y_txt = r'Daily Mean Salinity (ppt)'\r\n y_range = [0, 36]\r\n x_txt = r'Elapsed Year'\r\n x_range = [dt.date(2006,1,1),dt.date(2019,12,31)]\r\n serieslabel = site\r\n\r\n\r\n pngfile = r'%s\\%s_salinity.png' % (outf,site)\r\n\r\n f=np.genfromtxt(csvf,dtype=(str,str),skip_header=1,usecols=[date_col,val_col],delimiter=',')\r\n\r\n for row in f:\r\n if row[1] != 'na':\r\n dates.append(dt.date(int(row[0].split('/')[2]),int(row[0].split('/')[0]),int(row[0].split('/')[1])))\r\n vals.append(float(row[1]))\r\n\r\n\r\n dates_plt = mpd.date2num(dates)\r\n fig = plt.figure()\r\n fig.suptitle(plot_title)\r\n ax = fig.add_subplot(111,facecolor='whitesmoke')\r\n ax.set_ylabel(y_txt)\r\n #ax.set_xlabel(x_txt)\r\n ax.plot_date(dates_plt,vals,marker='o',markersize=2,linestyle='solid',linewidth=0,color='red',label=serieslabel)\r\n ax.legend(loc='upper right',edgecolor='none',facecolor='none')\r\n #ax.set_ylim(y_range)\r\n ax.set_xlim(x_range)\r\n ax.grid(True,which='both',axis='both',color='silver',linewidth=0.5)\r\n\r\n plt.savefig(pngfile)\r\n plt.close()\r\n\r\nprint('done.')\r\n\r\n\r\n\r\n","repo_name":"edwhite12/CRMS_processing","sub_path":"CRMS_plot_daily.py","file_name":"CRMS_plot_daily.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"39257228614","text":"from pytermrender import *\nfrom colorama import Fore, Back, Style\n\nScreen = TermScreen(height=24, width=80, framerate=30)\n\n\ndef setup():\n print(\"Setup\")\n print(\"Clearing\")\n clearScreen()\n return\n\n\ndef teardown():\n clearScreen()\n print(\"Teardown\")\n print(\"Teardown Complete\")\n return\n\n\n#not strictly needed, but useful\ndef tick():\n Screen.frame_no+=1\n return\n\ndef draw():\n clearScreen()\n clearAllBuffers()\n frame = Screen.frame_no\n\n drawBox(0,0,Screen.width,Screen.height,'#')\n printBuffer(1,1,str(Screen.framerate)+'fps -'+str(frame)+'-')\n for ix, c in enumerate(str(frame)):\n putBuffer(8+ix,1, Fore.BLUE, buffer=\"color\")\n return\n\n\nrun()\n","repo_name":"stephentoth/pytermrender","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"9147913361","text":"import enum\nimport time\nimport config\nfrom zonedirector import ZoneDirector\nfrom database import Database\n\nclass Action(enum.Enum):\n CONNECT = 1\n DISCONNECT = 2\n MOVE = 3\n\nclass Change:\n def __init__(self, user_mac, action, ap_source, ap_new=None):\n self.user_mac = user_mac\n self.action = action\n self.ap_source = ap_source\n self.ap_new = ap_new\n\nclass EventMonitor:\n def __init__(self):\n self.last_clients = None\n self.zd = ZoneDirector(config.server_address, config.username, config.password)\n self.db = Database()\n\n while True:\n self.update()\n time.sleep(2)\n\n def get_changes(self, old_clients, new_clients):\n changes = []\n\n for old_client in old_clients:\n \n # set if client hasn't disconnected\n matched_client = None\n\n for new_client in new_clients:\n if old_client['mac'] == new_client['mac']:\n matched_client = new_client\n break\n \n # client still connected\n if matched_client:\n # client has moved\n if matched_client['ap-name'] != old_client['ap-name']:\n changes.append(Change(\n old_client['mac'],\n Action.MOVE,\n old_client['ap-name'],\n matched_client['ap-name']\n ))\n\n # client has disconnected\n else:\n changes.append(Change(\n old_client['mac'],\n Action.DISCONNECT,\n old_client['ap-name'],\n ))\n\n # check for new clients\n for new_client in new_clients:\n isNewClient = True\n\n for old_client in old_clients:\n if old_client['mac'] == new_client['mac']:\n isNewClient = False\n break\n\n if isNewClient:\n self.db.add_client(new_client['mac'], new_client['user'])\n changes.append(Change(\n new_client['mac'],\n Action.CONNECT,\n new_client['ap-name'],\n ))\n \n return changes\n\n\n def update(self):\n new_clients = self.zd.get_clients()\n\n # first update\n if self.last_clients == None:\n for client in new_clients:\n self.db.add_client(client['mac'], client['user'])\n self.last_clients = new_clients\n return\n\n # later updates\n changes = self.get_changes(self.last_clients, new_clients)\n for change in changes:\n client_id = self.db.get_client_id(change.user_mac)\n self.db.add_activity(client_id, int(time.time()), str(change.action), change.ap_source, change.ap_new)\n print(f\"[{change.user_mac}] has {change.action} to the network\")\n\n\nEventMonitor()\n","repo_name":"ctrlsam/wireless_api","sub_path":"event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37337507430","text":"import cv2\nimport numpy as np\n\nimg = cv2.imread(\"C:\\\\Users\\\\Eswar\\\\Desktop\\\\sample_noise.jpg\")\nout_path = \"C:\\\\Users\\\\Eswar\\\\Desktop\\\\noiss.jpg\"\n\nprint(img.shape)\n#rESIZING THE IMAGE\n# resized_image =cv2.resize(img,(512,385))\n# print(resized_image.shape)\n# denoise_3 = cv2.fastNlMeansDenoisingColored(img,None,3,3,7,21)\n\n\ndef denoise():\n denoise = cv2.fastNlMeansDenoisingColored(img,None,3,3,7,21)\n print(\"function executed\")\n # crop = cv2.sharpening[0:1540,100:2048] #height and width\n # cv2.imshow(\"denoise image\", denoise)\n # cv2.waitKey(0)\n cv2.imwrite(out_path,denoise)\n print(\"denoise executed\")\n\n\ndef blur_methods(self):\n blur_img = cv2.blur(img,(5,5))\n\n gaussian_blur = cv2.GaussianBlur(img, (7,7), 2)\n\n median_blur = cv2.medianBlur(img, 5)\n\n bilateral_blur = cv2.bilateralFilter(img, 9, 75, 75)\n\n sharpening = cv2.addWeighted(bilateral_blur, 1.6, gaussian_blur, -0.5, 0)\n\n # crop = sharpening[0:1540,100:2048] #height and width\n\n \ndef warp_cutting(self):\n\n width,height = 1508,1040\n\n points= np.float32([[570,507],[1520,500],[0,1000],[1700,1260]]) #writing the points of card to warp\n #above points [width,height] [left top corner],[right top corner],[left bottom corner],[right bottom corner]points\n tell_pos = np.float32([[0,0],[width,0],[0,height],[width,height]]) #telling the points given above points to place where [left,top][right,top][left,bottom][right,bottom]\n\n matrix = cv2.getPerspectiveTransform(points,tell_pos) #creating the matrix for image\n\n # out_image =cv2.warpPerspective(crop,matrix,(width,height)) #applying the warpprespective \n\n# cv2.imshow(\"original\",resized_image)\n# # cv2.imshow(\"blur\",blur_img)\n# # cv2.imshow(\"median blur\", median_blur)\n# cv2.imshow(\"gaussian blur\", gaussian_blur)\n# cv2.imshow(\"bilateral blur\", cv2.resize(bilateral_blur,(1024,770)))\n# cv2.imshow(\"Sharpening\", cv2.resize(sharpening,(1024,770)))\n# cv2.imshow(\"cropped\", crop)\n# cv2.imshow(\"wrapped\", out_image)\n# cv2.imshow(\"denoise\", denoise_3)\n\n# cv2.imwrite(\"E:\\\\Coding\\\\Python\\\\openCV\\\\noisy_img1a2000.jpg\",crop)\n# cv2.imwrite(\"E:\\\\Coding\\\\Python\\\\openCV\\\\noisy_img1adfwrap2000.jpg\",out_image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\nprint(\"Done!\")\n\ndenoise()","repo_name":"Eswararaokotakota/Python-learning","sub_path":"openCV/noise_reduction.py","file_name":"noise_reduction.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"33857634568","text":"\"\"\"Application configuration file.\nEvery field should be correctly filled\"\"\"\nimport os.path\nimport pathlib\n\nAPP_NAME: str = \"FlickTrendz Backend\"\nDEBUG: bool = True\nTESTING: bool = False\nSECRET_KEY: str = \"\"\nALGORITHM: str = \"\"\nEXP_TIME_MIN: float = 0\nSQLALCHEMY_DATABASE_URI: str = \"sqlite://///\" + os.path.join(pathlib.Path(__file__).parent.parent, \"sqlite.db\")\nOAUTHLIB_INSECURE_TRANSPORT: str = \"1\"\n\n\nGOOGLE_CLIENT_ID: str = \"\"\nGOOGLE_CLIENT_SECRET: str = \"\"\nGOOGLE_AUTH_URI: str = \"\"\nGOOGLE_TOKEN_URI: str = \"\"\nGOOGLE_USER_INFO_URI: str = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n\nREDIRECT_URI = \"\"\n\nSCOPES = []\n","repo_name":"czyzewskijakub/PWSE","sub_path":"backend/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"797300540","text":"import pygame, random\r\n\r\n# Screen\r\nscreen_width = 1000\r\nscreen_height = 1000\r\nscreen = pygame.display.set_mode((screen_width, screen_height))\r\n# Enemy indicator\r\nenemy_indicator_image = pygame.image.load('graphics/EnemyIndicator.png').convert_alpha() # Image is originally 32 x 32 pixels\r\n# enemy_indicator_image = pygame.transform.scale(enemy_indicator_image, (32 * 1.5), (32 * 1.5))\r\n\r\n\r\n# Colours\r\nRED = (255,0,0)\r\nGREEN = (0,255,0)\r\nBLUE = (0,0,255)\r\nWHITE = (255,255,255)\r\nBLACK = (0,0,0)\r\n\r\nclass Enemy(pygame.sprite.Sprite):\r\n\tdef __init__(self, x, y):\r\n\t\tpygame.sprite.Sprite.__init__(self)\r\n\t\t# Animations\r\n\t\tself.animation_list = []\r\n\t\tself.animation_frame_index = 0\r\n\t\tself.animation_frame_displayed_time = pygame.time.get_ticks()\r\n\r\n\t\t# Load all the images for the animation\r\n\t\tfor num in range(0,5):\r\n\t\t\t# Note: Image is 110 x 120 pixels\r\n\t\t\timage = pygame.image.load(f'graphics/BlueArrow/BlueArrowV2{num}.png').convert_alpha()\r\n\t\t\tself.image = pygame.transform.scale(image, (165, 180))\r\n\t\t\tself.animation_list.append(self.image)\r\n\r\n\t\t# Assign animation\r\n\t\tself.image = self.animation_list[self.animation_frame_index]\r\n\t\tself.width = self.image.get_width()\r\n\t\tself.height = self.image.get_height()\r\n\t\tself.rect = self.image.get_rect()\r\n\t\tself.rect.center = (x,y)\r\n\r\n\t\t# Speed\r\n\t\tself.speed_multiplier = 1\r\n\r\n\tdef update(self):\r\n\t\tanimation_cooldown = 90 # Milliseconds\r\n\r\n\t\t#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\r\n\t\t# ANIMATIONS\r\n\r\n\t\t# Set the image based on the animation frame index\r\n\t\tself.image = self.animation_list[self.animation_frame_index]\r\n\r\n\t\t# Check if enough time has passed since the last animation frame update\r\n\t\tif (pygame.time.get_ticks() - self.animation_frame_displayed_time) > animation_cooldown:\r\n\t\t\tself.animation_frame_displayed_time = pygame.time.get_ticks()\r\n\t\t\tself.animation_frame_index += 1\r\n\r\n\t\t# Check if the frame index is greater than the number of frames in the animation, if it is, reset the animation\r\n\t\tif self.animation_frame_index >= len(self.animation_list):\r\n\t\t\tself.animation_frame_index = 0\t\r\n\r\n\t\t#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\r\n\t\t# Move down towards the bottom of the screen\r\n\t\tself.rect.y += 8 * self.speed_multiplier\r\n\r\n\t\t#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\r\n\t\t# Draw the enemy onto the screen\r\n\r\n\t\t#screen.blit(enemy_indicator_image, (self.rect.centerx - 16 , self.rect.centery))\r\n\t\tscreen.blit(self.image, (self.rect.x + 2, self.rect.y)) # The plus 2 is because when I was drawing the animation, the image wasn't aligned properly\r\n\t\tscreen.blit(enemy_indicator_image, (self.rect.centerx - 14 , 8)) # minus 14 is to make the indicator be centered with the arrow. I want the indicator to be near the top of the screen so y = 8\r\n\t\t\r\n\t\t# pygame.draw.rect(screen, BLACK, self.rect, 2)\r\n\r\n\r\n# Note: Need to start drawing the enemy because theres no point sorting all of this out yet.\r\n# Think of ideas for an enemy.\r\n\r\n# I think the enemies will be an arrow (like an arrow being shot from celestials) \r\n#[ Yellow arrow]\r\n# Could have another enemy which is also an arrow but a different colour (maybe blue)\r\n# Which tracks Wukong and after 4 seconds it will disappear automatically if the player hasn't\r\n# destroyed it themselves","repo_name":"LyleW473/20-Second-Game-jam","sub_path":"enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"4434473871","text":"# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, val=0, next=None):\r\n# self.val = val\r\n# self.next = next\r\nclass Solution:\r\n def getDecimalValue(self, head: ListNode) -> int:\r\n s = ''\r\n item = head\r\n while item:\r\n s += str(item.val)\r\n item = item.next\r\n return int(s, 2)","repo_name":"Ostanppro/Codewars","sub_path":"Leetcode/Convert Binary Number in a Linked List to Integer.py","file_name":"Convert Binary Number in a Linked List to Integer.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"72094676901","text":"#!/usr/bin/python3\n\nfrom collections import deque\n\ndroplets = set()\n\nwith open(\"18.txt\") as f:\n for line in f:\n droplet = tuple([int(x) for x in line.split(',')])\n droplets.add(droplet)\n\nmin_x = min({x[0] for x in droplets})-1\nmax_x = max({x[0] for x in droplets})+1\nmin_y = min({x[1] for x in droplets})-1\nmax_y = max({x[1] for x in droplets})+1\nmin_z = min({x[2] for x in droplets})-1\nmax_z = max({x[2] for x in droplets})+1\n\nstart = (min_x, min_y, min_z)\nassert start not in droplets\nexterior = {start}\n\nqueue = deque([start])\nwhile queue:\n x, y, z = queue.popleft()\n for n in [(x-1, y, z),\n (x+1, y, z),\n (x, y-1, z),\n (x, y+1, z),\n (x, y, z-1),\n (x, y, z+1)]:\n nx, ny, nz = n\n if nx < min_x or nx > max_x or ny < min_y or ny > max_y or nz < min_z or nz > max_z:\n continue\n if n not in exterior and n not in droplets:\n exterior.add(n)\n queue.append(n)\n\nsurface = 0\n\nfor droplet in droplets:\n x, y, z = droplet\n for n in [(x-1, y, z),\n (x+1, y, z),\n (x, y-1, z),\n (x, y+1, z),\n (x, y, z-1),\n (x, y, z+1)]:\n if n in exterior and n not in droplets:\n surface += 1\n\nprint(surface)\n","repo_name":"df7cb/aoc","sub_path":"2022/18b.py","file_name":"18b.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"21757915157","text":"string = input()\nn = len(string)\nres = []\nfor i in string:\n if(i != string[n-1]):\n res.append(i)\n else:\n break\nif(len(res) == 0):\n print(\"NULL\")\nelse:\n res.reverse()\n for i in res:\n print(i, end=\"\")\n","repo_name":"hashs-repo/Coding_Challenges","sub_path":"Company_Specific_Challenges/Accenture_Specific_Coding_Challenges/Convert_To_Palindrome.py","file_name":"Convert_To_Palindrome.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"71464794021","text":"import math\n\nimport numpy as np\n\n'''\nhttps://www.rapidtables.com/convert/color/hsv-to-rgb.html\nhttps://www.rapidtables.com/convert/color/rgb-to-hsv.html\n'''\n\ndef vector_to_munsell(u, v, max_v):\n '''\n convert a vector in 2D into Munsell color system\n '''\n \n # 映射到色调\n hue = math.degrees(np.arctan2(v, u))\n if hue < 0:\n hue += 360\n\n value = 100\n\n # 映射到饱和度\n chroma = math.sqrt(u ** 2 + v ** 2) / max_v * 100\n\n return hue, value, chroma\n\ndef munsell_to_rgb(hue: float, value: float, chroma: float):\n '''\n convert color in munsell system into RGB system\n \n 0 <= H < 360, 0 <= chroma <= 100, 0 <= value <= 100\n '''\n\n chroma = chroma / 100\n value = value / 100\n \n c = value * chroma\n x = c * (1 - abs((hue / 60) % 2 - 1))\n m = value - c\n \n if 0 <= hue < 60:\n R_, G_, B_ = (c, x, 0)\n elif 60 <= hue < 120:\n R_, G_, B_ = (x, c, 0)\n elif 120 <= hue < 180:\n R_, G_, B_ = (0, c, x)\n elif 180 <= hue < 240:\n R_, G_, B_ = (0, x, c)\n elif 240 <= hue < 300:\n R_, G_, B_ = (x, 0, c)\n elif 300 <= hue < 360:\n R_, G_, B_ = (c, 0, x)\n else:\n raise ValueError('hue should belong to [0, 360)')\n \n R = (R_ + m) * 255\n G = (G_ + m) * 255\n B = (B_ + m) * 255\n \n return (R, G, B)\n\n\ndef rgb_to_munsell(r, g, b):\n '''\n convert color in rgb system into munsell color system\n '''\n r_ = r / 255\n g_ = g / 255\n b_ = b / 255\n \n cmax = max(r_, g_, b_)\n cmin = min(r_, g_, b_)\n delta = cmax - cmin\n \n value = cmax\n \n if cmax == 0:\n chroma = 0\n else:\n chroma = delta / cmax\n \n if delta == 0:\n hue = 0\n elif cmax == r_:\n hue = 60 * (((g_ - b_) / delta) % 6)\n elif cmax == g_:\n hue = 60 * (((b_ - r_) / delta) + 2)\n elif cmax == b_:\n hue = 60 * (((r_ - g_) / delta) + 4)\n else:\n raise ValueError('can not calculate the hue')\n \n return hue, value * 100, chroma * 100\n\nif __name__ == \"__main__\":\n \n x, y, max_v = 4, 3, 10\n\n # 调用函数进行映射\n hue, value, chroma = vector_to_munsell(x, y, max_v)\n\n # 输出结果\n print(f\"色调(Hue): {hue:.2f}°\")\n print(f\"明度(Value): {value:.2f}\")\n print(f\"饱和度(Chroma): {chroma:.2f}\")\n\n # 调用函数进行转换\n r, g, b = munsell_to_rgb(hue, value, chroma)\n\n # 输出RGB值\n print(f\"对应的RGB值为:({r}, {g}, {b})\")\n \n # 再反向转换到munsell\n h, v, c = rgb_to_munsell(r, g, b)\n \n # 输出结果\n print(f\"色调(Hue): {h:.2f}°\")\n print(f\"明度(Value): {v:.2f}\")\n print(f\"饱和度(Chroma): {c:.2f}\")\n","repo_name":"glzhangzhi/DPIV","sub_path":"PIV_Image_Generator_V2/color_map.py","file_name":"color_map.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"1032358239","text":"import numpy as np\nimport itertools\n\ndef subtraj_ind(s):\n\n NF, Nmolec = np.shape(s)\n\n num_regions = 2 # micro region is 1, meso region is 2)\n region = np.ones_like(s) # default to region 1\n\n for i in range(num_regions-1):\n radius = 17 # mesochannel has 17 A radius\n region += (s <= radius) # increase region number if closer to corner\n\n\n# myregions = {'micro': 1, 'meso': 2}\n myregions = {1: 'micro', 2: 'meso'}\n\n # get indices that bound the subtrajectories\n indices = {}\n indices['micro'] = []\n indices['meso'] = []\n first = 0\n last = 0\n for p in range(Nmolec):\n for v in itertools.groupby(region[:,p]):\n key, group = v\n N = len(list(group))\n last = first + N\n\n indices[myregions[int(key)]].append((first, last))\n first = last\n\n # convert bounding indices to actual indices\n hashes = {}\n hashes['micro'] = []\n hashes['meso'] = []\n for k,v in indices.items():\n for i in v:\n all_indices = np.arange(i[0], i[1]).tolist()\n hashes[k].append(all_indices)\n\n return hashes\n\n\n\ndef assemble_list_of_trjs():\n\n list_of_trjs = []\n","repo_name":"vargaslo/custom_format_lammpstrj","sub_path":"subtrajectories.py","file_name":"subtrajectories.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"99178381","text":"import os\nimport sys\nimport argparse\nimport torch\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms\nfrom torchvision.utils import save_image\nfrom model import Generator\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--test_dir', default='data/facades/test', help='test data directory')\nparser.add_argument('--log_dir', default='logs', help='log directory')\nparser.add_argument('--epoch', type=int, help='epoch')\nparser.add_argument('--input_size', type=int, default=256, help='input size')\nparser.add_argument('--sample_size', type=int, default=10, help='sample size')\nparser.add_argument('--ngf', type=int, default=64, help='')\nparser.add_argument('--order', default='A2B', help='order of conversion (A2B or B2A)')\nopt = parser.parse_args()\nopt.use_gpu = torch.cuda.is_available()\n\nif not os.path.exists(os.path.join(opt.log_dir, 'G_epoch{:03d}.pth'.format(opt.epoch))):\n print(os.path.join(opt.log_dir, 'G_epoch{:03d}.pth'.format(epoch)) + ' is not exists.')\n sys.exit()\n\ntransform = transforms.Compose([\n transforms.Resize((opt.input_size, 2 * opt.input_size)),\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n])\ntest_dataset = datasets.ImageFolder(opt.test_dir, transform=transform)\ntest_loader = DataLoader(test_dataset, batch_size=opt.sample_size, shuffle=True)\n\n# network\nG = Generator(opt.ngf)\nG.load_state_dict(torch.load(os.path.join(opt.log_dir, 'G_epoch{:03d}.pth'.format(opt.epoch))))\nif opt.use_gpu:\n G = G.cuda()\n\nfor data, _ in test_loader:\n if opt.order == 'A2B':\n X = data[:, :, :, 0:opt.input_size]\n Y = data[:, :, :, opt.input_size:]\n elif opt.order == 'B2A':\n X = data[:, :, :, opt,input_size:]\n Y = data[:, :, :, 0:opt.input_size]\n else:\n print('order should be A2B or B2A.')\n sys.exit()\n\n if opt.use_gpu:\n X = Variable(X.cuda())\n Y = Variable(Y.cuda())\n else:\n X = Variable(X)\n Y = Variable(Y)\n\n G_result = G(X)\n result = torch.cat((X, Y, G_result), dim=0)\n save_image(result, os.path.join(opt.log_dir, 'generated_epoch{:03d}.png'.format(opt.epoch)), nrow=opt.sample_size)\n\n break","repo_name":"takahiro-itazuri/pix2pix-pytorch","sub_path":"visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37497575933","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\n\ndef transient_plot(T_mat, mesh):\n \"\"\"This function will plot each of the arrays from the output of the\n transient model at each timestep on a single 2D graph.\n\n imputs\n ------\n - T_mat: The 2D matrix from the output of the transient_model function\n - mesh: mesh: the array of points for the physical location of nodes and\n the materials assigned to them\n\n output\n ------\n A plot of the temperature at each node, at each timestep\n \"\"\"\n\n mesh_radii = []\n counter = 0\n for element in mesh:\n for node in element[1]:\n mesh_radii += [node]\n\n mesh_radii_set = list(set(mesh_radii))\n mesh_radii_ordered = sorted(mesh_radii_set, key=float)\n\n fig = plt.figure()\n fig.suptitle('Transient Model')\n for i in range(0, len(T_mat[0, :])):\n plt.plot(mesh_radii_ordered, T_mat[:, i])\n\n plt.xlabel('Radial Position (m)')\n plt.ylabel('Temperature (K)')\n\n plt.legend()\n plt.show()\n","repo_name":"THETA476/THETA","sub_path":"src/transient_plot.py","file_name":"transient_plot.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14657899593","text":"from node.node import Node\nfrom datalog import Datalog as D\n\nclass NodeP(Node):\n def __init__(self, _id, ligne_pwr,datalog):\n self.enableAtTime = 0\n self.disableAtTime = 0\n super().__init__( _id, ligne_pwr,datalog) \n\n def disable_prod(self,t):\n print(self)\n if self.enable:\n self.enable = False\n self.disableAtTime = t\n return self._id\n\n return -1\n\n def enable_prod(self,t):\n if self.enable == False:\n self.enable = True\n self.enableAtTime = t\n return self._id\n \n return -1","repo_name":"FlorianLebecque/GridSimulator","sub_path":"simulator/node/NodeP.py","file_name":"NodeP.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"43500741397","text":"#!/usr/bin/env python3\n\nimport unittest\nfrom graphtheory.structures.edges import Edge\nfrom graphtheory.structures.graphs import Graph\nfrom graphtheory.hamiltonian.tspmst import *\n\n# http://en.wikipedia.org/wiki/Travelling_salesman_problem\n# Wagi krawedzi spelniaja warunek trojkata.\n# 20 Three Hamiltonian cycles:\n# 0 --- 1 0 1 3 2 = 108\n# | \\ / | 0 2 1 3 = 141\n# 42| X |34 0 1 2 3 = 97\n# | / \\ |\n# 2 --- 3\n# 12\n\nclass TestTSP(unittest.TestCase):\n\n def setUp(self):\n self.N = 4\n self.G = Graph(self.N)\n self.nodes = range(self.N)\n self.edges = [\n Edge(0, 1, 20), Edge(0, 3, 35), Edge(0, 2, 42), \n Edge(1, 2, 30), Edge(1, 3, 34), Edge(2, 3, 12)]\n for node in self.nodes:\n self.G.add_node(node)\n for edge in self.edges:\n self.G.add_edge(edge)\n self.best_weight = 97\n #self.G.show()\n\n def test_prim_tsp_with_edges(self):\n algorithm = PrimTSPWithEdges(self.G)\n algorithm.run(0)\n weight = sum(edge.weight for edge in algorithm.hamiltonian_cycle)\n self.assertEqual(weight, self.best_weight)\n\n def test_prim_tsp_with_cycle_graph(self):\n algorithm = PrimTSPWithGraph(self.G)\n algorithm.run(0)\n weight = sum(edge.weight for edge in algorithm.hamiltonian_cycle.iteredges())\n self.assertEqual(weight, self.best_weight)\n #algorithm.hamiltonian_cycle.show()\n self.assertEqual(algorithm.hamiltonian_cycle.e(),\n algorithm.hamiltonian_cycle.v())\n for node in algorithm.hamiltonian_cycle.iternodes():\n self.assertEqual(algorithm.hamiltonian_cycle.degree(node), 2)\n\n def test_exceptions(self):\n self.assertRaises(ValueError, PrimTSPWithEdges, Graph(5, True))\n self.assertRaises(ValueError, PrimTSPWithGraph, Graph(5, True))\n\n def tearDown(self): pass\n\nif __name__ == \"__main__\":\n\n unittest.main()\n\n# EOF\n","repo_name":"ufkapano/graphtheory","sub_path":"graphtheory/hamiltonian/tests/test_tspmst.py","file_name":"test_tspmst.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"37"} +{"seq_id":"25940658114","text":"import os\n\n\ndef get_map():\n \"\"\"Função que lê o arquivo 'map.txt' e devolve uma matriz de caracteres\n\n Returns\n -------\n Retorna uma matriz de caracteres que representam o mapa do jogo\n \"\"\"\n here = os.path.dirname(os.path.abspath(__file__))\n\n filename = os.path.join(here, \"map.txt\")\n file = open(filename)\n map = file.read()\n\n line = []\n map_result = []\n\n for char in map:\n if char != \"\\n\":\n line.append(char)\n else:\n map_result.append(line.copy())\n line.clear()\n\n map_result.append(line.copy())\n return map_result\n","repo_name":"FengYungz/Turtle","sub_path":"turtle_map.py","file_name":"turtle_map.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19890404200","text":"#!/usr/bin/env python3\n\nimport random\n\nimport math\n\nDESCRIPTION = 'Find the greatest common divisor of given numbers.'\n\n\ndef get_question_and_answer():\n num1 = random.randint(1, 50)\n num2 = random.randint(1, 50)\n question = f'{num1} {num2}'\n answer = str(math.gcd(num1, num2))\n return question, answer\n","repo_name":"MaximBaiborodin/python-project-lvl1","sub_path":"brain_games/games/gcd.py","file_name":"gcd.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20566826287","text":"from tensorflow.keras.models import load_model\nimport numpy as np\n\n# Load the saved model\nmodel = load_model('my_model.h5')\n\n# New player data\nnew_player = [[4, 4, 5, 6, 4], [6, 6, 3, 3, 4], [5, 5, 5, 6, 6], [4, 5, 6, 7, 8], [6, 6, 7, 6, 8]]\n\n# Compute the total scores\ntotal_player = []\nfor scores in new_player:\n total_player.append(sum(scores))\n\n# Create the input data for prediction\nX_new = np.array(new_player).reshape((5, 5))\n\n# Use the loaded model to predict the next total score\nnext_total = model.predict(X_new)\nprint(\"Next possible score: \", next_total[0][0])\n","repo_name":"Jaimy-monsuur/Next-score-prediction","sub_path":"tensorflow-keras/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30299388853","text":"'''\npractice question for chapter 4\n'''\n\ndef printSpam(spam):\n string = ''\n for i in range(len(spam)-1):\n string += spam[i]\n string += ', '\n\n last_item = 'and ' + spam[-1]\n string += last_item\n\n print(string)\n\nspam = []\n\nwhile True:\n input_str = input(\"Enter an object. Input nothing to stop.\")\n if input_str == '':\n break\n else:\n spam.append(input_str)\n\nprintSpam(spam)\n","repo_name":"nimiew/automate-the-boring-stuff","sub_path":"Part-1/Chapter4-Lists/commacode.py","file_name":"commacode.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72777155306","text":"class Employee:#class won't be empty always\n no_of_leaves= 8 # this is a object of class\n\n pass\n\nharry = Employee()\nrohan = Employee()\n\nharry.name = 'harry'\nharry.salary = 40384\nharry.role='instrucure'\n\nrohan.name =\"Rohan\"\nrohan.salary = 45555\nrohan.role = \"Student\"\n\nprint (harry.salary)\nprint(rohan.role)\nprint(harry.role)\n# we can acces the object of the class through the elements of class or variables\nprint(harry.no_of_leaves)\nprint(rohan.no_of_leaves)\nprint(Employee.no_of_leaves)\n\n# changing the objects\nprint (Employee.__dict__) # this will returns a dict. which have all the class objects on the class \nEmployee.no_of_leaves= 9\n# rohan.no_of_leaves = 9 # tying to chnge the object by the variables (this wil not work )\nprint('the new one is',Employee.no_of_leaves)\n\n\n\n\n\n","repo_name":"KojoAning/PYHTON_PRACTICE","sub_path":"oops2.py","file_name":"oops2.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28764950989","text":"# encoding=utf8\r\n\r\n\"\"\"\r\n2021/03/02\r\n\"\"\"\r\n\r\nfrom flask import Flask, session, render_template, request, redirect, url_for, flash\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom forms import RegistrationForm, LoginForm, AddForm\r\nfrom flask_login import UserMixin\r\nfrom datetime import datetime\r\nimport os\r\n\r\n# create the app\r\napp = Flask(__name__)\r\napp.config['SECRET_KEY'] = os.urandom(24)\r\napp.config['SESSION_TYPE'] = 'filesystem'\r\n\r\n\r\n# create a database to store user information and sentences\r\ndb = SQLAlchemy()\r\nDB_NAME = 'database.db'\r\napp.config['SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{DB_NAME}'\r\ndb.init_app(app)\r\n\r\nclass User(db.Model, UserMixin):\r\n \"\"\"\r\n Define a User model to store the username and password \r\n that new user creates via the form\r\n \"\"\"\r\n id = db.Column(db.Integer, primary_key=True)\r\n username = db.Column(db.String(25), unique=True)\r\n password = db.Column(db.String(25))\r\n\r\n def __repr__(self):\r\n return '' % self.username\r\n\r\nclass Sentences(db.Model):\r\n \"\"\"\r\n Define a Sentences model to store the sentence \r\n that users enter via the form\r\n \"\"\"\r\n __tablename__ = 'sentences'\r\n id = db.Column(db.Integer, primary_key=True)\r\n sentence = db.Column(db.String(1000)) #The maximum characters that users can enter is 1000\r\n pub_date = db.Column(db.DateTime)\r\n\r\n # Initializes the fields with entered data\r\n def __init__(self, sentence):\r\n self.sentence = sentence\r\n self.pub_date = datetime.now()\r\n\r\nwith app.app_context():\r\n db.create_all()\r\n \r\n@app.route('/')\r\ndef index():\r\n \"\"\"\r\n Index Page: displaying all the sentences ordered in \r\n ascending order of post time\r\n sentences: all the sentences that users enter\r\n \"\"\"\r\n return render_template('index.html', sentences=Sentences.query.order_by(Sentences.pub_date.asc()).all())\r\n\r\n@app.route('/create', methods=['POST', 'GET'])\r\ndef create():\r\n \"\"\"\r\n create page: get sentences from users via form\r\n if the information is valid then redirect back to index page\r\n \"\"\"\r\n form = AddForm(request.form)\r\n # check if the information entered in the form is valid\r\n # if valid then store the sentence in the database\r\n if request.method == 'POST' and form.validate():\r\n sentence = Sentences(sentence=form.sentence.data)\r\n db.session.add(sentence)\r\n db.session.commit()\r\n return redirect(url_for('index'))\r\n return render_template('create.html', form = form)\r\n\r\n@app.route(\"/register\", methods=['GET', 'POST'])\r\ndef register():\r\n \"\"\"\r\n register page: get user information from new users via form\r\n if the information is valid then redirect to login page\r\n \"\"\"\r\n form = RegistrationForm(request.form)\r\n if request.method == 'POST' and form.validate(): # check if the information entered in the form is valid\r\n flash('The account is created, please log in')\r\n new_user = User(username=form.username.data, password=form.psw.data) # store the information in the database\r\n db.session.add(new_user)\r\n db.session.commit()\r\n return redirect(url_for('login'))\r\n return render_template('register.html', form = form)\r\n\r\n@app.route(\"/login\", methods=['GET', 'POST'])\r\ndef login():\r\n \"\"\"\r\n login page: get user information from new users via form\r\n if the information is valid then redirect to create page\r\n \"\"\"\r\n form = LoginForm(request.form)\r\n if request.method == 'POST' and form.validate():\r\n user = User.query.filter_by(username=form.username.data).first()\r\n if user:\r\n if user.password == form.psw.data: # check if the username and password entered match\r\n return redirect(url_for('create'))\r\n return render_template('login.html', form = form)\r\n\r\n\r\n\r\n\r\n\r\n# main\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","repo_name":"emmymmma/Final-Project","sub_path":"Python Project/app1.py","file_name":"app1.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71332560109","text":"import json\nimport pickle\nimport numpy as np\n\n\nclass car_price ():\n\n def __init__(self,data):\n self.data=data\n\n\n def loading_files(self):\n with open('artifacts/car_info.json','r') as file:\n self.car_info=json.load(file)\n\n with open('artifacts/car_prise_pred.pkl','rb') as file:\n self.car_model=pickle.load(file) \n\n\n\n def price_prediction(self):\n self.loading_files()\n\n Year = self.data['html_year']\n Present_Price = self.data['html_Present_Price']\n Kms_Driven = self.data['html_Kms_Driven']\n FUEL = self.data['html_fuel_type']\n TYPE_OF_SELLER = self.data['html_seller_type']\n TRANSMISSION_TYPE = self.data['html_trasmission']\n Owner = self.data['html_owner']\n\n Fuel_Type = self.car_info[\"Fuel_Type\"][FUEL]\n Seller_Type = self.car_info[\"Seller_Type\"][TYPE_OF_SELLER]\n Transmission = self.car_info[\"Transmission\"][TRANSMISSION_TYPE]\n\n\n\n user_data=np.zeros(len(self.car_info['column_name']))\n\n user_data[0]= Year\n user_data[1]= Present_Price\n user_data[2]= Kms_Driven\n user_data[3]= Fuel_Type\n user_data[4]= Seller_Type\n user_data[5]= Transmission\n user_data[6]= Owner\n\n result=self.car_model.predict([user_data])[0]\n \n\n return result\n \n\n\n ","repo_name":"shubh-555/car","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31740857635","text":"#Passo 1: import the sales base\n\nimport pandas as pd\n\ntabela = pd.read_csv('telecom_users.csv')\n\n#Passo 2: changing to better\n\n#coluna inútil(unnamed)\n#axis = 0 --> linha\n#axis = 1 --> coluna\n\ntabela = tabela.drop('Unnamed: 0', axis=1)\nprint(tabela)\n\n#Date processing\n#transformando elementos no type errado (total gasto = object --> float)\n#Se a transformação de nome a número der erro --> valor vazio = 'coerce'\n\ntabela['TotalGasto'] = pd.to_numeric(tabela['TotalGasto'], errors='coerce')\nprint(tabela.info())\n\n#valores vazios(Nan)\n\n#colunas vazias\n#excluir colunas vazias\n#how= 'all'> coluna completamente vazia\n#how= 'any'-> coluna que tem pelo menos 1 valor vazio\n\ntabela = tabela.dropna(how='all', axis=1)\n\n#excluir linhas vazias\n\ntabela = tabela.dropna(how='any', axis=0)\n\n#Passo 3: Initial review\n\nprint(tabela['Churn'].value_counts())\nprint(tabela['Churn'].value_counts(normalize=True).map('{:.1%}'.format))\n\n# Passo 4: Detailed analisys\n\nimport plotly.express as px\n\nfor coluna in tabela.columns:\n grafico = px.histogram(tabela, x=coluna, color='Churn')\n grafico.show()\n","repo_name":"JoaoPedroDevOz/AulasIntensivopython","sub_path":"Aula 2.py","file_name":"Aula 2.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73389818986","text":"'''小工具(函数)大集合'''\r\n\r\n'''========字符处理========'''\r\n## 提取字符串中的汉字 ##\r\ndef get_hanzi(str):\r\n\timport re\r\n\tns = ['0','1','2','3','4','5','6','7','8','9']\r\n\tline = str.strip() # 处理前进行相关的处理,包括转换成Unicode等\r\n\tpattern = re.compile('[^\\u4e00-\\u9fa50-9]') # 中文的编码范围是:\\u4e00到\\u9fa5\r\n\tzh = \"\".join(pattern.split(line)).strip()\r\n\t# zh = \",\".join(zh.split())\r\n\toutStr = zh # 经过相关处理后得到中文的文本\r\n\tzis = []\r\n\t\r\n\tfor j in outStr:\r\n\t\tif j not in ns:\r\n\t\t\tzis.append(j)\r\n\to = ''.join(zis)\r\n\treturn o\r\n\r\n## 字符串转数字 ##\r\ndef word2num(str,c=10):\r\n\timport binascii\r\n\tstring = bytes(str,encoding='utf-8')\r\n\tout = int(binascii.hexlify(string),16)\r\n\tif c == 8:\r\n\t\treturn oct(out)[2:]\r\n\telif c == 16:\r\n\t\treturn hex(out)[2:]\r\n\telif c == 2:\r\n\t\treturn bin(out)[2:]\r\n\telse:\r\n\t\treturn out\r\n\r\n## 数字转字符串 ##\t\t\r\ndef num2word(num):\r\n\timport binascii\r\n\tnum = int(num)\r\n\tw = binascii.unhexlify(hex(num)[2:])\r\n\t# words = str(w)[2:-1]\r\n\t# out = words.encode('raw_unicode_escape').decode()\r\n\tout = w.decode()\r\n\treturn out\r\n\r\n## 文本转二维码图片 ##\r\ndef make_qr(str,save_path):\r\n\t\r\n\timport qrcode\r\n\tfrom PIL import Image\r\n\timport os\r\n\tqr=qrcode.QRCode(\r\n\tversion=4, #生成二维码尺寸的大小 1-40 1:21*21(21+(n-1)*4)\r\n\terror_correction=qrcode.constants.ERROR_CORRECT_M, #L:7% M:15% Q:25% H:30%\r\n\tbox_size=10, #每个格子的像素大小\r\n\tborder=2, #边框的格子宽度大小\r\n\t)\r\n\tqr.add_data(str)\r\n\tqr.make(fit=True)\r\n\timg=qr.make_image()\r\n\timg.save(save_path)\r\n\tos.system(save_path)\r\n\t\r\n## 生成文本朗读音频文件 ##\t\r\ndef speech(words):\r\n\tfrom aip import AipSpeech\r\n\tfrom random import randint\r\n\timport os\r\n\t# 配置百度AI-api\r\n\tAPP_ID = '15836817'\r\n\tAPI_KEY = 'Yw0nM6YReM6DNHndO4c5qn81'\r\n\tSECRET_KEY = 'fVAeZsQaGpbEfkGLsVF5SnTipsRmV7rj'\r\n\tclient = AipSpeech(APP_ID, API_KEY, SECRET_KEY)\r\n\t\r\n\tresult = client.synthesis(words, 'zh', 1, {\r\n\t\t'vol': 8,'per':5,'spd':4\r\n\t\t})\r\n\tmp3 = os.getcwd()+'\\\\data\\\\' + words + '.mp3'\r\n\t# 识别正确返回语音二进制 错误则返回dict 参照下面错误码\r\n\tif not isinstance(result, dict):\r\n\t\ttry:\r\n\t\t\twith open(mp3, 'wb') as f:\r\n\t\t\t\tf.write(result)\t\t\r\n\t\texcept:\r\n\t\t\tmp3 = os.getcwd()+'\\\\data\\\\' + str(randint(10000000,99999999)) + '.mp3'\r\n\t\t\twith open(mp3, 'wb') as f:\r\n\t\t\t\tf.write(result)\t\t\r\n\treturn mp3\r\n\r\n'''========文件处理========'''\r\n## gif拆分为多张png静态图片 ##\r\ndef split_gif(path,sp=''):\r\n\t# sp参数若自定义,需在结尾加上‘\\\\’,如 sp='sp\\\\'\r\n\timport os\r\n\tfrom PIL import Image\r\n\tdef analyseImage(path):\r\n\t\tim = Image.open(path)\r\n\t\tresults = {\r\n\t\t\t'size': im.size,\r\n\t\t\t'mode': 'full',\r\n\t\t}\r\n\t\ttry:\r\n\t\t\twhile True:\r\n\t\t\t\tif im.tile:\r\n\t\t\t\t\ttile = im.tile[0]\r\n\t\t\t\t\tupdate_region = tile[1]\r\n\t\t\t\t\tupdate_region_dimensions = update_region[2:]\r\n\t\t\t\t\tif update_region_dimensions != im.size:\r\n\t\t\t\t\t\tresults['mode'] = 'partial'\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\tim.seek(im.tell() + 1)\r\n\t\texcept EOFError:\r\n\t\t\tpass\r\n\t\treturn results\r\n\t## main ##\r\n\tmode = analyseImage(path)['mode']\r\n\tim = Image.open(path)\r\n\ti = 0\r\n\tp = im.getpalette()\r\n\tlast_frame = im.convert('RGBA')\r\n\r\n\ttry:\r\n\t\twhile True:\r\n\t\t\tprint\r\n\t\t\t\"saving %s (%s) frame %d, %s %s\" % (path, mode, i, im.size, im.tile)\r\n\t\t\t'''\r\n\t\t\t如果GIF使用本地颜色表,每个框架都有自己的调色板。\r\n\t\t\t如果没有,我们需要将全局调色板应用到新框架上。\r\n\t\t\t'''\r\n\t\t\tif not im.getpalette():\r\n\t\t\t\tim.putpalette(p)\r\n\t\t\tnew_frame = Image.new('RGBA', im.size)\r\n\t\t\t'''\r\n\t\t\t这个文件是一个“部分”模式的GIF,其中帧更新一个不同大小的区域到整个图像?\r\n\t\t\t如果是这样,我们需要通过将新框架粘贴到前面的框架上来构建新框架。\r\n\t\t\t'''\r\n\t\t\tif mode == 'partial':\r\n\t\t\t\tnew_frame.paste(last_frame)\r\n\t\t\tnew_frame.paste(im, (0, 0), im.convert('RGBA'))\r\n\t\t\t#存储到gif所在目录\r\n\t\t\tspath = '\\\\'.join(path.split('\\\\')[:-1])+'\\\\'+sp+''.join(path.split('\\\\')[-1].split('.')[:-1])\r\n\t\t\tnew_frame.save('%s-%d.png' % (spath, i), 'PNG')\r\n\r\n\t\t\ti += 1\r\n\t\t\tlast_frame = new_frame\r\n\t\t\tim.seek(im.tell() + 1)\r\n\texcept EOFError:\r\n\t\tpass\r\n\r\n## 图片转文本风格 ##\r\ndef pic2txt(pic,txt,wh=(0.3,0.3),asciis = \"@%#&?*+=-. \"):\r\n\timport os\r\n\tfrom PIL import Image\r\n\timg = Image.open(pic)\r\n\tout = img.convert(\"L\")\r\n\tw = wh[0]\r\n\th = wh[1]\r\n\twidth,height = out.size\r\n\tout = out.resize((int(width * w),int(height * h)))\r\n\twidth,height = out.size\r\n\r\n\t# \r\n\t\r\n\ttexts = \"\"\r\n\tfor row in range(height):\r\n\t\tfor col in range(width):\r\n\t\t\tgray = out.getpixel((col,row))\r\n\t\t\ttexts += asciis[int (gray / 255 *(len(asciis)-1))]\r\n\t\ttexts += \"\\n\"\r\n\twith open(txt,\"w\") as file:\r\n\t\tfile.write(texts)\r\n\t\tout.close()\r\n\t\tos.system('start '+r'C:\\\\quickstart\\\\np.lnk '+txt)\r\n\r\n## 按像素剪裁图片 ##\r\ndef cut_img(img,edth=-24,edtw=0):\t\r\n\timport cv2\r\n\timport os\r\n\t# svimg = os.getcwd()+'\\\\data\\\\'+img.split('\\\\')[-1]\r\n\tsvimg = 'D:\\\\#My\\\\GiData\\\\Source\\\\arts\\\\images\\\\Pics\\\\TAGL\\\\cash\\\\cut\\\\'+img.split('\\\\')[-1]\r\n\timg = cv2.imread(img)\r\n\th = img.shape[0]\r\n\tw = img.shape[1]\r\n\tcropped = img[0:h+edth, 0:w+edtw] # 裁剪坐标为[y0:y1, x0:x1]\r\n\tcv2.imwrite(svimg, cropped)\r\n\r\n## Matplotlib绘制图表 ##\r\ndef draw_f():\r\n\timport matplotlib.pyplot as plt\t#常用的功能都包含在matplotlib的pyplot方法里面\r\n\timport numpy as np\r\n\tx = np.linspace(-10000,10000,1000)\r\n\r\n\t#定义函数(集)\r\n\t#-----------------#\r\n\tylist = [\r\n\t\t[3*x**3,'red',2.0,'-'],\r\n\t\t# [22*x+4,'grey',1.0,'-'],\r\n\t\t# [x**2,'blue',1.0,':']\r\n\t\t]\r\n\t#-----------------#\r\n\r\n\tplt.figure(num=3,figsize=(8,5))\t#它下面的plot显示在这个figure窗口中\r\n\t#num:自定义figure的编号,figsize:窗口长宽\r\n\tfor y in ylist:\r\n\t\tplt.plot(x,y[0],color=y[1],linewidth=y[2],linestyle=y[3])#'--'代表虚线\r\n\tplt.show()\t#显示图像\r\n\r\n'''========小功能========'''\r\n\r\n## 根据输入时间倒计时 ##\r\ndef timer():\r\n\t# 格式:秒.分.时(“.分.时”或“.时”可以省略)\r\n\timport datetime\r\n\timport time\r\n\timport pygame\r\n\tdef play_music():\r\n\t\t\tfilepath = \"D:\\\\#My\\\\python_work\\\\Gina\\\\sounds\\\\dudu.mp3\"\r\n\t\t\tpygame.mixer.init()\r\n\t\t\t# 加载音乐\r\n\t\t\tpygame.mixer.init(frequency=1550,size=-16,channels=4)\r\n\t\t\tpygame.mixer.music.load(filepath)\r\n\t\t\t# pygame.mixer.music.play(start=0.0)\r\n\t\t\t#播放时长,没有此设置,音乐不会播放,会一次性加载完\r\n\t\t\t# time.sleep(30)\r\n\t\t\t# pygame.mixer.music.stop()\r\n\t\t\tpygame.mixer.music.play()\r\n\tdef timer_end(s=0,min=0,h=0,d=0):\r\n\t\t# 参数:秒,分,时,天\r\n\t\t# 返回列表[当前时间,计时后时间](需格式化)\r\n\t\tcur_time = datetime.datetime.now() #获取当前(系统)时间\r\n\t\tgoal = cur_time + datetime.timedelta(days=d,hours=h,minutes=min,seconds=s)\r\n\t\treturn [cur_time,goal]\r\n\r\n\tdef er(ask):\r\n\t\tsplitasklist = ask.split('.')\r\n\t\tsptimes = []\r\n\t\ttry:\r\n\t\t\tfor i in splitasklist:\r\n\t\t\t\tsptimes.append(int(i))\r\n\t\t\tif len(sptimes)==1:\r\n\t\t\t\tgoallist = timer_end(sptimes[0])\r\n\t\t\telif len(sptimes)==2:\r\n\t\t\t\tgoallist = timer_end(sptimes[0],sptimes[1])\r\n\t\t\telif len(sptimes)==3:\r\n\t\t\t\tgoallist = timer_end(sptimes[0],sptimes[1],sptimes[2])\r\n\t\t\telif len(sptimes)==4:\r\n\t\t\t\tgoallist = timer_end(sptimes[0],sptimes[1],sptimes[2],sptimes[3])\r\n\t\t\ti_time = datetime.datetime.strftime(goallist[0],'%Y-%m-%d %H:%M:%S')\r\n\t\t\to_time = datetime.datetime.strftime(goallist[1],'%Y-%m-%d %H:%M:%S')\r\n\t\t\tprint('*'*50)\r\n\t\t\tprint('开始计时,计时将在',o_time,'结束。')\r\n\t\t\tprint('*'*50)\r\n\t\t\tprint('\\n\\t\\t _ _\\\\_ _ _/_ _\\n\\t\\t|| ||')\r\n\t\t\treturn goallist[1]\r\n\t\texcept:\r\n\t\t\tprint('咦?咦咦咦咦?')\r\n\t\t\treturn datetime.datetime.now()\r\n\tdef monitorkey(key):\r\n\t\tif keyboard.wait(key):\r\n\t\t\treturn True\r\n\t#=========TEST=========\r\n\task = input('计时时间:')\r\n\tdef counter(ask):\r\n\t\twhile True:\r\n\t\t\tend = 1\r\n\t\t\tswitch = 0\r\n\t\t\tgoaltime = er(ask)\r\n\t\t\twhile end != '0:00:00':\r\n\t\t\t\tshowrestdate = goaltime - datetime.datetime.now()\r\n\t\t\t\tshow = str(showrestdate).split('.')[0]\r\n\t\t\t\tprint('\\r\\t\\t||__'+show+'__||',end='')\r\n\t\t\t\tsecondtxt = str(showrestdate).split(':')[-1].split('.')[0]\r\n\t\t\t\tend = show\r\n\t\t\t\r\n\t\t\tplay_music()\r\n\t\t\task = input('\\n\\n\\n\\n计时时间:')\r\n\tcounter(ask)\r\n\r\n\r\n\r\n\r\n'''==========Test========='''\r\n# picpath = 'D:\\\\#My\\\\GiData\\\\Source\\\\Arts\\\\Images\\\\Pics\\\\anqr.liu.iahx.i.hvjp.iyr.o_s.1834.jpg'\r\n# picpath = 'C:\\\\Users\\\\111\\\\Documents\\\\桌面文件\\\\(1).jpg'\r\n# pic2txt(picpath,\r\n\t# \"C:\\\\Users\\\\111\\\\Documents\\\\桌面文件\\\\PIC.txt\",\r\n\t# asciis = \"@%#&?*+=-. \",wh=(0.2,0.1))\r\n\t\r\ns = bin(877523)\r\nprint(s)","repo_name":"ZacharyChiu/Gina","sub_path":"GiFun/GMain/Tools.py","file_name":"Tools.py","file_ext":"py","file_size_in_byte":8470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42234832713","text":"# Author: Alfredo Ceballos\n# Assignment: Lab #2\n# Completed: 1/8/2018\n\nfrom math import sqrt\n\n# Part 1\n\n# Created a function that takes in the rate as an argument and\n# finds the total of a savings account with an initial deposit\n# of $1000 with a time of 18 years to compound. Output is formatted\n# and the total is rounded to 2 decimal places\ndef calc_total(rate):\n initial_deposit = 1000\n num_years = 18\n total = initial_deposit * ((1 + (rate / 100)) ** num_years)\n print(\"At %d%% rate total is %.2f\" % (rate, total))\n\ncalc_total(3) # Rate 0f 3%\ncalc_total(5) # Rate of 5%\ncalc_total(12)# Rate of 12%\n\n\"\"\"\ninitial_deposit = 1000\nrate = 3\nnum_years = 18\ntotal = initial_deposit * ((1 + (rate / 100)) ** num_years)\nprint(\"At 3%% rate total is %.2f\" % total)\n\nrate = 5\ntotal = initial_deposit * ((1 + (rate / 100)) ** num_years)\nprint(\"At 5%% rate total is %.2f\" % total)\n\nrate = 12\ntotal = initial_deposit * ((1 + (rate / 100)) ** num_years)\nprint(\"At 12%% rate total is %.2f\" % total)\n\"\"\"\n\n# Part 2\n\n# Getting user input and saving those values as floating point numbers\n# using 'a' and 'b'\na = float(input(\"Input a number: \"))\nb = float(input(\"Input another number: \"))\n\n# Calculating the length of the hypotenuse of a triangle with sides of\n# that length\nhyp = sqrt((a * a) + (b * b))\n\n# Printing value of the hypotenuse and rounding it to 3 decimal places\nprint(\"The hypotenuse of a triangle with these sides would be %.3f\" % hyp)\n\n\"\"\" OUTPUT\nAt 3% rate total is 1702.43\nAt 5% rate total is 2406.62\nAt 12% rate total is 7689.97\nInput a number: 32\nInput another number: 12\nThe hypotenuse of a triangle with these sides would be 34.176\n\"\"\"\n","repo_name":"ace231/CS299_Labs","sub_path":"lyLab2.py","file_name":"lyLab2.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17118187103","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom window.canvas import *\nfrom window.log_viewer import Logger, LogView\nfrom listeners.click_listener import *\nfrom components.window import *\nfrom objects.point import Point\nfrom factory.object_factory import ObjectType\nfrom factory.window_factory import *\n# creating class for main window\n\n\nclass MainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n\n self.window = Window(-400, -300, 400, 300)\n\n Logger.init(LogView())\n Logger.log(\"Hello World\")\n\n # Configuration\n title = \"Graphic Computing\"\n\n top = 400\n left = 200\n width = 1280\n height = 720\n\n self.setWindowTitle(title)\n self.setGeometry(top, left, width, height)\n self.setWindowFlags(Qt.Window | Qt.MSWindowsFixedSizeDialogHint)\n\n # Layout\n w = QWidget()\n l = QGridLayout()\n w.setLayout(l)\n\n self.objectList = QListWidget()\n self.objectList.setSelectionMode(QAbstractItemView.ExtendedSelection)\n\n buttonWidget = QWidget()\n buttonLayout = QVBoxLayout()\n buttonWidget.setLayout(buttonLayout)\n buttonWidget.setMaximumWidth(350)\n\n # Center Layout\n centerLayout = QHBoxLayout()\n centerWidget = QWidget()\n centerWidget.setLayout(centerLayout)\n\n addLabel(\"X:\", centerLayout)\n self.center_x_input = addLineEditDouble(centerLayout)\n\n addLabel(\"Y:\", centerLayout)\n self.center_y_input = addLineEditDouble(centerLayout)\n\n addButton(\"Apply\", lambda: self.canvas.updateCenter(Point(float(\n self.center_x_input.text()), float(self.center_y_input.text()))), centerLayout)\n\n # Center buttons layout\n btnCenterLayout = QHBoxLayout()\n btnCenterWidget = QWidget()\n btnCenterWidget.setLayout(btnCenterLayout)\n\n addButton(\"World Center\", lambda: self.canvas.updateCenter(\n Point(0, 0)), btnCenterLayout)\n addButton(\"Object Center\",\n lambda: self.canvas.updateCenter(), btnCenterLayout)\n\n # Canvas\n self.canvas = Canvas(self.window, w, self.objectList,\n (self.center_x_input, self.center_y_input))\n\n self.canvas.updateCenter()\n\n # Elements\n addLabel(\"Objects\", buttonLayout)\n buttonLayout.addWidget(self.objectList)\n addLabel(\"Create Objects\", buttonLayout)\n addButton(\"New Point\", lambda: self.canvas.initCreation(\n ObjectType.POINT), buttonLayout)\n addButton(\"New Line\", lambda: self.canvas.initCreation(\n ObjectType.LINE), buttonLayout)\n addButton(\"New Wireframe\", lambda: self.canvas.initCreation(\n ObjectType.WIREFRAME), buttonLayout)\n addButton(\"New Curve\", lambda: self.canvas.initCreation(\n ObjectType.CURVE2D), buttonLayout)\n addLabel(\"Rotation center:\", buttonLayout)\n\n # self.center_x_input = QDoubleSpinBox()\n\n buttonLayout.addWidget(centerWidget)\n buttonLayout.addWidget(btnCenterWidget)\n\n l.addWidget(buttonWidget, 0, 0, 1, 1)\n l.addWidget(self.canvas, 0, 1, 5, 5)\n l.addWidget(Logger.logger, 6, 0, 6, 6)\n\n self.setCentralWidget(w)\n\n def updateObjectList(self, l):\n self.objectList.clear()\n self.objectList.addItems(l)\n","repo_name":"flametuner/computational-graphics","sub_path":"src/window/main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":3398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25210140124","text":"from django.shortcuts import render\nfrom django.contrib.sites.models import Site\nfrom middleware.language import LocaleMiddleware\nfrom django.shortcuts import get_object_or_404\nfrom Users.models import CustomUserModel\n# Create your views here.\n \n@LocaleMiddleware\ndef base(request):\n email_verification = True\n template_name = 'base.html'\n if request.user.is_authenticated:\n user = get_object_or_404(CustomUserModel, id = request.user.id)\n email_verification = user.email_verification\n return render(request, template_name ,{'email_verification':email_verification})\n \n@LocaleMiddleware\ndef about(request):\n email_verification = True\n template_name = 'about.html'\n if request.user.is_authenticated:\n user = get_object_or_404(CustomUserModel, id = request.user.id)\n email_verification = user.email_verification\n return render(request, template_name ,{'email_verification':email_verification})\n\ndef rate_limited(request):\n template_name = '429.html'\n return render(request, template_name)\n\ndef robots(request):\n template_name=\"bots/robots.txt\",\n current_site = Site.objects.get_current()\n return render(request, template_name, content_type=\"text/plain\", context={'site_domain':current_site})\n ","repo_name":"Shaxzodb/mysite","sub_path":"App/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23239898114","text":"from struct import pack\nfrom math import ceil\nimport base64\n\n# 2.4 Integer Encoding\n# I2OSP as well\n# Tested: OK\ndef encode(x, outlen=None):\n ret_x = b''\n while x != 0:\n ret_x = pack('=B', x & 0xff) + ret_x\n x >>= 8\n if outlen and len(ret_x) < outlen:\n ret_x = b'\\x00' * (outlen - len(ret_x)) + ret_x\n return ret_x\n\n\n# To test encoding\n# OS2IP as well\n# Tested: OK\ndef decode(ret_x):\n x = 0\n k = len(ret_x)\n for i in range(k):\n x = x + ret_x[i] * pow(2, 8 * (k - 1 - i))\n return x\n\n\n# Used only for formatting\ndef byte_to_str(i):\n if i == b'\\x00' or i is None:\n return '00'\n ret = ''\n ret = str(hex(i))\n ret = ret[2:]\n if len(ret) == 1:\n ret = '0' + ret\n return ret\n\n\n# Used only for formatting\ndef bytes_to_str(x):\n ret = ''\n for i in x:\n ret += byte_to_str(i)\n return ret\n\n\n# Base64 encoding without newlines or equality signs\ndef base64_custom_en(buf, with_equal):\n BASE64 = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\"\n out = \"\"\n length = len(buf)\n off = 0\n while length >= 3:\n w = buf[off] & 0xFF\n off += 1\n w = (w << 8) + (buf[off] & 0xFF)\n off += 1\n w = (w << 8) + (buf[off] & 0xFF)\n off += 1\n out += BASE64[w >> 18]\n out += BASE64[(w >> 12) & 0x3F]\n out += BASE64[(w >> 6) & 0x3F]\n out += BASE64[w & 0x3F]\n length -= 3\n if length == 1:\n w1 = buf[off] & 0xFF\n out += BASE64[w1 >> 2]\n out += BASE64[(w1 << 4) & 0x3F]\n if with_equal:\n out += \"==\"\n elif length == 2:\n w2 = ((buf[off] & 0xFF) << 8) + (buf[off + 1] & 0xFF)\n out += BASE64[w2 >> 10]\n out += BASE64[(w2 >> 4) & 0x3F]\n out += BASE64[(w2 << 2) & 0x3F]\n if with_equal:\n out += \"==\"\n return out\n\ndef base64_custom_de(string, reject_bad, with_equal):\n out = bytearray()\n n = len(string)\n num_eq = int(0)\n acc = int(0)\n k = int(0)\n for i in range(n):\n d = ord(string[i])\n if ord('A') <= d <= ord('Z'):\n d -= ord('A')\n elif ord('a') <= d <= ord('z'):\n d -= ord('a') - 26\n elif ord('0') <= d <= ord('9'):\n d -= ord('0') - 52\n elif d == ord('+'):\n d = 62\n elif d == ord('/'):\n d = 63\n elif d == ord('='):\n if not with_equal or num_eq >= 2:\n raise IOError(\"unexpected '=' sign\")\n num_eq += 1\n d = -1\n else:\n if reject_bad:\n raise ValueError(\"invalid Base64 string\")\n continue\n if d < 0:\n d = 0\n else:\n if num_eq != 0:\n raise ValueError(\"invalid Base64 termination\")\n acc = (acc << 6) + d\n k += 1\n if k == 4:\n out.append(acc // pow(256, 2))\n out.append((acc // 256) % 256)\n out.append(acc % 256)\n acc = 0\n k = 0\n if k != 0:\n if k == 1 or with_equal:\n raise ValueError(\"truncated base64 input\")\n if k == 2:\n out.append(acc // 16)\n if k == 3:\n out.append(acc // 1024)\n out.append(acc // 4 % 256)\n return out\n\n\ndef mpi_en(integer):\n if integer < 0:\n raise ValueError(\"Cannot encode MPI: negative\")\n byte_len = integer.bit_length() / 8\n header_1 = int(ceil((integer.bit_length() / 8) / 256))\n header_2 = int(ceil(integer.bit_length() / 8) % 256)\n length = integer.bit_length()//8\n body = integer.to_bytes(header_1 * 256 + header_2, 'big')\n out = bytearray()\n\n out += header_1.to_bytes(1, 'big')\n out += header_2.to_bytes(1, 'big')\n out+=(body)\n return out\n\n\ndef mpi_de(byte):\n length = int(byte[0:3], 16)\n buf = byte[4:3+length] # or (len+4)-1\n return buf\n","repo_name":"mkkow/BDAN_Projekt_Makwa","sub_path":"implementacja/encoding.py","file_name":"encoding.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16318323679","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@ time : 2018/7/2\r\n@ author : Xieyz\r\n@ software: PyCharm\r\n\"\"\"\r\nimport datetime\r\nfrom django.contrib.contenttypes.models import ContentType\r\n\r\nfrom opscenter.models import RevisionLogs\r\n\r\ndef change(choice = None):\r\n def out_r(fun):\r\n def wapper(self, request, *args, **kwargs):\r\n user = request.user\r\n fields = self.get_object()._meta.fields\r\n app_label = self.get_object()._meta.app_label\r\n model_name = self.get_object()._meta.object_name\r\n content_type = ContentType.objects.only('id').get(app_label=app_label, model=model_name)\r\n content = []\r\n for i in fields:\r\n _name = i.name\r\n try: field_key = eval(\"self.get_object().{field}.id\".format(field=_name))\r\n except: field_key = eval(\"self.get_object().{field}\".format(field=_name))\r\n new_field_key = request.POST.get(i.name, '9*#**#*9')\r\n if new_field_key == '9*#**#*9': continue\r\n if isinstance(field_key, int):\r\n try: new_field_key = int(new_field_key)\r\n except: continue\r\n if isinstance(field_key, datetime.date):\r\n field_key = field_key.strftime(\"%Y-%m-%d\")\r\n if isinstance(field_key, datetime.datetime):\r\n field_key = field_key.strftime(\"%Y-%m-%d %H:%M:%S\")\r\n if field_key != new_field_key:\r\n try: f_name = i.verbose_name\r\n except: f_name = i.name\r\n if field_key or new_field_key:\r\n if choice:\r\n if _name in choice.keys():\r\n i_choice = eval(\"self.get_object().{CHOICES}\".format(CHOICES=choice[_name]))\r\n for v in i_choice:\r\n if v[0] == field_key:\r\n field_key = v[1]\r\n if v[0] == new_field_key:\r\n new_field_key = v[1]\r\n content.append({i.verbose_name: {'old': field_key, 'new': new_field_key}})\r\n if content:\r\n RevisionLogs.objects.create(\r\n content_type=content_type,\r\n content=content,\r\n object_id=self.get_object().id,\r\n user=user\r\n )\r\n return fun(self, request, *args,**kwargs)\r\n return wapper\r\n return out_r\r\n","repo_name":"fanmiao1/devops","sub_path":"cloudops/devops/lib/revision_log.py","file_name":"revision_log.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42088230462","text":"'''\n지나야 하는 최소의 칸 수 -> bfs\n'''\n\nfrom collections import deque\n\n# 입력 조건\nn, m = map(int, input().split())\ngraph = [list(map(int, input())) for _ in range(n)]\n\ndx = [-1, 0, 1, 0]\ndy = [0, 1, 0, -1]\n\ndef bfs(x, y):\n queue = deque()\n queue.append((x, y))\n\n while queue:\n x, y = queue.popleft()\n\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n # 범위 내이고 해당 위치가 이동할 수 있는 칸이라면(1), 전 위치 + 1 한 값을 대입하기\n if 0 <= nx < n and 0 <= ny < m:\n if graph[nx][ny] == 1:\n graph[nx][ny] = graph[x][y] + 1\n queue.append((nx, ny))\n\n # (n, m) 위치 값 반환\n return graph[n-1][m-1]\n\nprint(bfs(0, 0))","repo_name":"subinmun1997/my_python-for-coding-test","sub_path":"BAEKJOON/백준 문제집/DFSBFS 필수 문제/solution9.py","file_name":"solution9.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"11155202237","text":"import os\nfrom os.path import dirname, abspath\nimport requests\nimport sys\n\n\ndef get_latest_db_migration_to_apply():\n project_dir = dirname(dirname(abspath(__file__))) # Get the main project directory\n migrations_dir = '{}/migrations/versions/'.format(project_dir)\n migration_files = [migration_file for migration_file in os.listdir(migrations_dir) if migration_file.endswith('py')]\n # sometimes there's a trailing underscore, if script was created with `flask db migrate --rev-id=...`\n latest_file = sorted(migration_files, reverse=True)[0].replace('_.py', '').replace('.py', '')\n return latest_file\n\n\ndef get_current_db_version():\n api_status_url = '{}/_status'.format(os.getenv('API_HOST_NAME'))\n response = requests.get(api_status_url)\n\n if response.status_code != 200:\n sys.exit('Could not make a request to the API: {}'.format())\n\n current_db_version = response.json()['db_version']\n return current_db_version\n\n\ndef run():\n if get_current_db_version() == get_latest_db_migration_to_apply():\n print('no')\n else:\n print('yes')\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"LouisStAmour/notifications-api","sub_path":"scripts/check_if_new_migration.py","file_name":"check_if_new_migration.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16523390446","text":"import sqlite3\n\ndef create_database():\n conn = sqlite3.connect('users_db.db')\n\n cursor = conn.cursor()\n cursor.execute('''\n CREATE TABLE IF NOT EXISTS users (\n id INTEGER PRIMARY KEY,\n telegram_id INTEGER UNIQUE,\n full_name TEXT,\n birth_date TEXT,\n company TEXT,\n position TEXT,\n avatar_url TEXT\n )\n ''')\n\n conn.commit()\n conn.close()\n\ncreate_database()\n\n\nimport telebot\nimport sqlite3\nfrom telebot.types import InlineKeyboardMarkup, InlineKeyboardButton\n\nTOKEN = 'YOUR_TOKEN'\nbot = telebot.TeleBot(TOKEN)\n\n# Словарь для временного хранения данных пользователей\nuser_data = {}\n\n@bot.message_handler(commands=['start'])\ndef start_message(message):\n msg = bot.send_message(message.chat.id, \"Введите ваше ФИО:\")\n bot.register_next_step_handler(msg, process_full_name_step)\n\ndef process_full_name_step(message):\n user_data[message.chat.id] = {'full_name': message.text}\n msg = bot.send_message(message.chat.id, \"Введите вашу дату рождения (дд.мм.гггг):\")\n bot.register_next_step_handler(msg, process_birth_date_step)\n\ndef process_birth_date_step(message):\n user_data[message.chat.id]['birth_date'] = message.text\n msg = bot.send_message(message.chat.id, \"Введите название вашей компании:\")\n bot.register_next_step_handler(msg, process_company_step)\n\ndef process_company_step(message):\n user_data[message.chat.id]['company'] = message.text\n msg = bot.send_message(message.chat.id, \"Введите вашу должность:\")\n bot.register_next_step_handler(msg, process_position_step)\n\ndef process_position_step(message):\n user_data[message.chat.id]['position'] = message.text\n\n # Получение аватарки пользователя\n user_info = bot.get_user_profile_photos(message.chat.id)\n if user_info and user_info.total_count > 0:\n user_data[message.chat.id]['avatar_url'] = user_info.photos[0][0].file_id\n else:\n user_data[message.chat.id]['avatar_url'] = None\n\n save_to_db(user_data[message.chat.id], message.chat.id)\n bot.send_message(message.chat.id, \"Ваш профиль успешно создан!\")\n\ndef save_to_db(data, user_id):\n with sqlite3.connect(\"users_db.db\") as con:\n cursor = con.cursor()\n cursor.execute('''INSERT OR REPLACE INTO users (telegram_id, full_name, birth_date, company, position, avatar_url)\n VALUES (?, ?, ?, ?, ?, ?)''',\n (user_id, data['full_name'], data['birth_date'], data['company'], data['position'], data['avatar_url']))\n con.commit()\n\n\n\n\n\n@bot.message_handler(commands=['show_profiles'])\ndef show_profiles(message):\n with sqlite3.connect(\"users_db.db\") as con:\n cursor = con.cursor()\n cursor.execute(\"SELECT full_name, birth_date, company, position, avatar_url FROM users\")\n\n\n\n for row in cursor.fetchall():\n full_name, birth_date, company, position, avatar_url = row\n profile_info = f\"ФИО: {full_name}\\nДата рождения: {birth_date}\\nКомпания: {company}\\nДолжность: {position}\"\n bot.send_message(message.chat.id, profile_info)\n if avatar_url:\n bot.send_photo(message.chat.id, avatar_url)\n\n\nbot.polling(none_stop=True)\n","repo_name":"astaspank/business_tinder","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22948584496","text":"from setuptools import setup\n\nwith open(\"requirements.txt\", \"r\") as file:\n requirements = file.read().splitlines()\n\nsetup(\n name=\"hashtablebot\",\n version=\"0.1.0\",\n packages=[\n \"hashtablebot\",\n \"hashtablebot.entity\",\n \"hashtablebot.banking\",\n \"hashtablebot.persistence\",\n \"hashtablebot.memory_entity\",\n ],\n url=\"\",\n license=\"\",\n author=\"douglascdev\",\n author_email=\"\",\n description=\"\",\n entry_points={\n \"console_scripts\": [\n \"hashtablebot = hashtablebot.main:main\",\n ],\n },\n install_requires=requirements,\n extras_require={\n \"dev\": [\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"black\",\n \"isort\",\n \"mypy\",\n \"coverage\",\n \"pre-commit\",\n ]\n },\n)\n","repo_name":"douglascdev/hashtablebot","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"38333574290","text":"import socket\nimport threading\nimport json\nfrom cmd import Cmd\n\nclass Client(Cmd):\n\n prompt = ''\n intro = '欢迎来到多人聊天室,输入help可获取帮助\\n'\n\n def __init__(self):\n super().__init__()\n self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.__id = None\n self.__username = None\n self.__isLogin = False\n\n def __receive_message_thread(self):\n while self.__isLogin:\n # noinspection PyBroadException\n try:\n buffer = self.__socket.recv(1024).decode()\n obj = json.loads(buffer)\n print('[' + str(obj['sender_username']) + '(' + str(obj['sender_id']) + ')' + ']', obj['message'])\n except Exception:\n print('[Client] 无法从服务器获取数据')\n\n def __send_message_thread(self, message):\n self.__socket.send(json.dumps({\n 'type': 'broadcast',\n 'sender_id': self.__id,\n 'message': message\n }).encode())\n\n def start(self):\n self.__socket.connect(('127.0.0.1', 8888))\n self.cmdloop()\n\n def do_login(self, args):\n username = args.split(' ')[0]\n self.__socket.send(json.dumps({\n 'type': 'login',\n 'username': username\n }).encode())\n try:\n buffer = self.__socket.recv(1024).decode()\n obj = json.loads(buffer)\n if obj['id']:\n self.__username = username\n self.__id = obj['id']\n self.__isLogin = True\n print('[Client] 成功登入聊天室')\n # 开启子线程用于接受数据\n thread = threading.Thread(target=self.__receive_message_thread)\n thread.setDaemon(True)\n thread.start()\n else:\n print('[Client] 无法登入聊天室')\n except Exception:\n print('[Client] 无法从服务器获取数据')\n\n def do_send(self, args):\n message = args\n # 显示自己发送的消息\n print('[' + str(self.__username) + '(' + str(self.__id) + ')' + ']', message)\n # 开启子线程用于发送数据\n thread = threading.Thread(target=self.__send_message_thread, args=(message,))\n thread.setDaemon(True)\n thread.start()\n\n def do_logout(self, args=None):\n self.__socket.send(json.dumps({\n 'type': 'logout',\n 'sender_id': self.__id\n }).encode())\n self.__isLogin = False\n return True\n\n def do_help(self, arg=None):\n print('[Help] login (username) - 登入聊天室')\n print('[Help] send (message) - 发送消息')\n print('[Help] logout - 退出聊天室\\n')","repo_name":"Aiobisio/Linux-chatroom","sub_path":"code/Base/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"311513738","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 18 17:05:29 2021\n\n@author: DSC Handsome\n\"\"\"\n\ndata =[]\ncount = 0\nwith open(\"reviews.txt\",\"r\") as f:\n for line in f:\n data.append(line)\n count+=1\n if count % 10000==0:\n print(len(data))\nprint(\"總共有\",len(data),\"筆資料\")\n \nsumlen = 0\nfor d in data: #每一筆字串取出放到d\n sumlen = sumlen+len(d) #每一筆字串總和相加\n \"\"\"\n print(\"總共留言長度\",sumlen)\n \"\"\"\n \nprint(\"平均留言長度是: \",sumlen/len(data))\n\n\nnew = []\n\nfor d in data:\n if len(d) < 100:\n new.append(d)\nprint(\"一共有\",len(new),\"筆資料留言長度小於100\")\nprint(new[0]) #把第一筆留言長度小於100的資料印出來\n\n\n# good = [] #把留言有good的資料都裝進來list\n# for d in data:\n# if \"good\" in d:\n# good.append(d)\n# print(\"一共有\", len(good),\"筆資料提到good\") \n\n#清單快寫法只要資料有good就把1寫入good清單裡面\n# =============================================================================\n# good = [1 for d in data if \"good\" in d]\n# # print(good)\n# =============================================================================\n","repo_name":"herry50307/reviews-analytics","sub_path":"留言分析.py","file_name":"留言分析.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2195990552","text":"import os\nimport urllib.parse\nimport urllib.request\n\nfrom member import Requester\n\n\n_member_to_add = {\n 'first': 'first_name',\n 'last': 'last_name',\n 'addr1': 'address_1',\n 'addr2': 'address_2',\n 'city': 'city',\n 'state': 'state',\n 'zipcode': 'zip_code',\n 'email': 'email'\n }\n\n\n_datadir = '/var/spool/registration'\n_queue = '{}/new_shell_accts'.format(_datadir)\n_dbfile = '{}/account_queue.db'.format(_datadir)\ndatabase = _dbfile\n\n\ndef add_to_member_db(new_member):\n '''This currently backposts to the existing member tool on the\n steering site. It will eventually directly interface with the\n database, but this is the quickest way to get us up for now.'''\n _url = 'https://steering.trilug.org/member_tool/?cmd=add'\n\n _varlist = list(\n (_member_to_add[v], new_member[v])\n for v in new_member.field_names() if v in _member_to_add)\n _varlist.append(('submit', 'Submit'))\n _request_vars = urllib.parse.urlencode(_varlist).encode('ascii')\n\n _headers = {\n \"Content-Type\": \"application/x-www-form-urlencoded\"\n }\n\n try:\n auth_header = os.environ['HTTP_AUTHORIZATION']\n except KeyError:\n raise RuntimeError('Unauthorized')\n else:\n _headers['Authorization'] = auth_header\n req = urllib.request.Request(_url, _request_vars, _headers)\n try:\n results = urllib.request.urlopen(req)\n except:\n raise RuntimeError(\"Call to add script failed.\")\n\n\ndef queue_for_shell(new_member):\n '''Add info to the list of shell accounts to add.'''\n with open(_queue, 'a') as queue:\n queue.write('\\t'.join(new_member.shell_values())+'\\n')\n\n","repo_name":"trilug/registration","sub_path":"register.py","file_name":"register.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"23362026800","text":"# A Pythagorean triplet is a set of three natural numbers, a b c, for which,\r\n# a^2 + b^2 = c^2\r\n# For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.\r\n# There exists exactly one Pythagorean triplet for which a + b + c = 1000.\r\n# Find the product abc.\r\n\r\ndef isPythagoreanTriplet(a, b, c):\r\n\treturn a**2 + b**2 == c**2\r\n\r\ndef findPythagoreanTripletTotaling(n):\r\n\tfor a in range(1, n):\r\n\t\tfor b in range(1, n-a):\r\n\t\t\tc = n - b - a\r\n\t\t\tif isPythagoreanTriplet(a, b, c):\r\n\t\t\t\treturn (a, b, c)\r\n\r\ntripplet = findPythagoreanTripletTotaling(1000)\r\nproduct = 1\r\nfor x in tripplet:\r\n\tproduct *= x\r\n\r\nprint(product)\r\n","repo_name":"SaqibS/project-euler","sub_path":"problem9.py","file_name":"problem9.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"74182312746","text":"from captcha.image import ImageCaptcha\n# 验证码的包\nfrom PIL import Image\nimport random\nimport time\n# 系统模块\nimport os\n\n\ndef random_captcha():\n captcha_text = []\n for i in range(4):\n c = random.choice(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])\n captcha_text.append(c)\n return ' '.join(captcha_text) # 字符串中间没有空格\n\n\n# 生成验证码方法\ndef gen_capthca():\n image = ImageCaptcha()\n captcha_text = random_captcha()\n captcha_image = Image.open(image.generate(captcha_text))\n return captcha_text, captcha_image\n\n\ndef return_img_code():\n path = './static/img_code'\n if not os.path.exists(path):\n os.makedirs(path)\n\n # 接收字符串和图片\n text, image = gen_capthca()\n # 定义图片名称\n filename = 'img_code' + '.png'\n # 存储图片\n image.save(path + os.path.sep + filename)\n num = ''.join(text.split(' '))\n return path + os.path.sep + filename, num\n\n\n","repo_name":"yuzitao/ShoppingWebsite","sub_path":"common/img_code.py","file_name":"img_code.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24980306533","text":"# import os\n# import sys\n# import termios\n# import fcntl\nimport pygame\n\nclass licker:\n \"\"\" this class handles the licks\n \"\"\"\n\n def getch(self):\n fd = sys.stdin.fileno()\n\n oldterm = termios.tcgetattr(fd)\n newattr = termios.tcgetattr(fd)\n newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO\n termios.tcsetattr(fd, termios.TCSANOW, newattr)\n\n oldflags = fcntl.fcntl(fd, fcntl.F_GETFL)\n fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)\n\n try:\n while 1:\n try:\n c = sys.stdin.read(1)\n break\n except IOError:\n pass\n finally:\n termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)\n fcntl.fcntl(fd, fcntl.F_SETFL, oldflags)\n return c\n\n def lick(self):\n # response = self.getch()\n # if response.isdigit() and int(response) == 1:\n # return True\n # else:\n # return False\n events = pygame.event.get()\n for event in events:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n return True\n else:\n return False\n\n","repo_name":"olakiril/Python","sub_path":"licker.py","file_name":"licker.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5083328787","text":"import sys\n \ndef wait_for_enter():\n raw_input(\"Press Enter to continue: \")\n\nclass DomainWideDelegation(object):\n def run(self, context):\n print(\"\\nIn the GCP console go to IAM > Service accounts:\")\n print(\" https://console.cloud.google.com/iam-admin/serviceaccounts\")\n print(\"Find the {0} service account (search for '{0}')\".format(context[\"app\"]))\n print(\"Enable domain-wide delegation for that account\\n\")\n wait_for_enter()\n\nclass AuthorizeDomain(object):\n def run(self, context):\n print(\"\\nIn the GCP console go to APIs & Services > Credentials > OAuth consent screen:\")\n print(\" https://console.cloud.google.com/apis/credentials/consent\")\n print((\n \"Add the root domain of {0} to the list of authorized domains\"\n \"if it's not already there\\n\"\n ).format(context[\"dns_domain\"])\n )\n wait_for_enter()\n\nclass Oauth(object):\n def run(self, context):\n print(\"\\nIn the GCP console go to APIs & Services > Credentials:\")\n print(\" https://console.cloud.google.com/apis/credentials\")\n print(\"Create a new 'OAuth client ID' credential, selecting 'Web application' for the type\")\n print(\" Name: {0} Oauth Credential \".format(context[\"app\"]))\n print(\" Add the authorized Javascript origins output by this profile\")\n print(\" Add the authorized redirect URIs output by this profile\\n\")\n wait_for_enter()\n print(\"\\nDownload the OAuth credential JSON\")\n print(\"Upload the JSON to vault:\")\n print((\n \" ./add_to_vault.sh \"\n \"[path to JSON] \"\n \"[vault path output by this profile]\"\n ).format(\n context[\"project_name\"],\n context[\"app\"]\n )\n )\n wait_for_enter()\n\nclass AddToGroups(object):\n def run(self, context):\n print(\"\\nIn the GSuite admin console (https://admin.google.com) for 'test.firecloud.org', go to:\")\n print(\" Groups -> Search for firecloud-project-editors-perf@test.firecloud.org -> Add members:\")\n print(\"Add {0} SA:\".format(context[\"app\"]))\n print(\" {0}-{1}@{2}.iam.gserviceaccount.com \".format(\n context[\"project_name\"],\n context[\"app\"],\n context[\"project_name\"]\n )\n )\n wait_for_enter()\n print(\"\\nIn the GSuite admin console (https://admin.google.com) for 'test.firecloud.org', go to:\")\n print(\" Groups -> Search for perfx-leo-service-accounts@test.firecloud.org -> Add members:\")\n print(\"Add {0} SA:\".format(context[\"app\"]))\n print(\" {0}-{1}@{2}.iam.gserviceaccount.com \".format(\n context[\"project_name\"],\n context[\"app\"],\n context[\"project_name\"]\n )\n )\n wait_for_enter()\n\n\nif __name__ == \"__main__\":\n context = {\n \"app\": \"leonardo\",\n \"dns_domain\": \"broadinstitute.org\",\n \"google_app_domain\": \"ephemeral.test.firecloud.org\",\n \"project_name\": sys.argv[1]\n }\n procedure = [\n DomainWideDelegation(),\n AuthorizeDomain(),\n Oauth(),\n AddToGroups()\n ]\n for step in procedure:\n step.run(context)\n print(\"\\nAll Done!\")\n","repo_name":"broadinstitute/terraform-terra","sub_path":"profiles/leonardo-sa/manual_steps.py","file_name":"manual_steps.py","file_ext":"py","file_size_in_byte":3282,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"34020414743","text":"from __future__ import annotations\n\nfrom l5r_auto.clans import BrotherhoodOfShinsei, DragonClan\nfrom l5r_auto.keywords import (\n Commander,\n Courtier,\n Destined,\n Duelist,\n Earth,\n Experienced,\n Magistrate,\n Monk,\n Resilient,\n Samurai,\n Shugenja,\n Tattooed,\n)\nfrom l5r_auto.legality import ModernEdition, OnyxEdition, TwentyFestivalsEdition\n\nfrom ..common import Personality\n\n\"(Draw a card after you Recruit a Destined card.)
Interrupt: After the action destroys a dishonorable Personality, gain 1 Honor.
Engage: Dishonor a target enemy Personality with lower Chi.\"\nKitsuki_Goshi = Personality(\n card_id=12452,\n title=\"Kitsuki Goshi\",\n force=3,\n chi=3,\n personal_honor=3,\n gold_cost=7,\n honor_requirement=5,\n clan=[DragonClan],\n keywords=[Destined, Courtier, Magistrate],\n traits=[],\n abilities=[],\n legality=[TwentyFestivalsEdition, OnyxEdition, ModernEdition],\n)\n\"(Once per game per card, a Resilient card does not die in battle resolution.)
Political Interrupt, :bow:: If you took an Honor Interrupt to the action, return the Honor card you discarded for it to your hand. You may not take Honor actions this turn.\"\nKitsuki_Goto = Personality(\n card_id=12453,\n title=\"Kitsuki Goto\",\n force=0,\n chi=3,\n personal_honor=3,\n gold_cost=4,\n honor_requirement=5,\n clan=[DragonClan],\n keywords=[Resilient, Courtier, Magistrate],\n traits=[],\n abilities=[],\n legality=[TwentyFestivalsEdition, OnyxEdition, ModernEdition],\n)\n\"(Duelists win tied duels versus non-Duelists.)
Battle: If Konoe is opposed, look at the top three cards of your Fate deck. You may discard a card to put one of them into your hand; if it is Iaijutsu, you may take an additional action to play it. Put the other cards back in any order.\"\nMirumoto_Konoe = Personality(\n card_id=12454,\n title=\"Mirumoto Konoe\",\n force=3,\n chi=3,\n personal_honor=1,\n gold_cost=5,\n honor_requirement=3,\n clan=[DragonClan],\n keywords=[Duelist, Samurai],\n traits=[],\n abilities=[],\n legality=[TwentyFestivalsEdition, OnyxEdition, ModernEdition],\n)\n'After Sannin enters play, permanently give them one of the following abilities (and its elemental keyword) not already on your Sannin in play: \"Void Dynasty, :bow:: Recruit a Sannin from your discard pile or Dynasty deck.\"; \"Fire Battle, :bow:: Melee 3 Attack.\"; or \"Air Battle: Straighten a target Sannin.\"'\nSannin = Personality(\n card_id=12455,\n title=\"Sannin\",\n force=3,\n chi=3,\n personal_honor=1,\n gold_cost=6,\n honor_requirement=3,\n clan=[DragonClan, BrotherhoodOfShinsei],\n keywords=[Monk, Tattooed],\n traits=[],\n abilities=[],\n legality=[TwentyFestivalsEdition, OnyxEdition, ModernEdition],\n)\n\"Invest :g2:, or :g0: if you have Courtesy: Search your deck for The Mountainborn or Bound Spirit, show it, and put it in your hand. (Courtesy does not take effect if you went first.)
Earth Battle, :bow:: Melee 3 Attack. You may bow one of Daiishu's Followers to straighten him.\"\nTamori_Daiishu_Experienced = Personality(\n card_id=12456,\n title=\"Tamori Daiishu\",\n force=3,\n chi=4,\n personal_honor=2,\n gold_cost=8,\n honor_requirement=2,\n clan=[DragonClan],\n keywords=[Commander, Earth, Experienced(\"1\"), Shugenja],\n traits=[],\n abilities=[],\n legality=[TwentyFestivalsEdition, OnyxEdition, ModernEdition],\n)\n\"(Draw a card after you Recruit a Destined card. Once per game per card, a Resilient card does not die in battle resolution.)
Gaitsuru's Earth attachments have Resilient.\"\nTamori_Gaitsuru = Personality(\n card_id=12457,\n title=\"Tamori Gaitsuru\",\n force=2,\n chi=3,\n personal_honor=1,\n gold_cost=5,\n honor_requirement=2,\n clan=[DragonClan],\n keywords=[Destined, Resilient, Commander, Earth, Shugenja],\n traits=[],\n abilities=[],\n legality=[TwentyFestivalsEdition, OnyxEdition, ModernEdition],\n)\n","repo_name":"aubustou/l5r","sub_path":"l5r_auto/cards/personalities/dragon/evil_portents.py","file_name":"evil_portents.py","file_ext":"py","file_size_in_byte":4074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35136926141","text":"from tkinter import messagebox, PhotoImage, Label, Frame, Button, BOTH, LEFT, CENTER\nfrom define.define_result import DefineResult\n\nclass Result:\n\n \"\"\"Luokka, joka alustaa näkymän, jossa pelaaja tapaa lohikäärmeen ja sen tapaamisen tuloksen\n \"\"\"\n\n def __init__(self, root, hello, play,treasure, change, survive, write):\n\n \"\"\"Luokan konstruktori\n\n Args:\n root ([tk]): [tk-oli, pelinnäyttö]\n hello ([funktio]): [käsky, joka avaa pelin alkunäkymän]\n play ([funktio]): [käsky, joka avaa pelin pelinäkymän]\n treasure ([int]): [pelaajan rahamäärä]\n change ([funktio]): [käsky, joka vaihtaa pelaajan rahaatilannetta, jos hän häviää tai voittaa]\n survive ([funktio]): [tilanteessa jossa pelaaja on kuollut ja haluaa yrittää pelastua, avaa näkymän jossa pelaaja saa tehtävän suoritettavaksi]\n write ([funktio]): [käsky, joka avaa näkymän, jossa pelaaja voi kirjoittaa tietojaan tietokantaan]\n \"\"\"\n\n self._root=root\n self._frame=None\n self._open_hello=hello\n self._open_play=play\n self._survive=survive\n self._treasure=treasure\n self._change=change\n self._write=write\n self._status='alive'\n self._background_die=PhotoImage(file='src/background/inside.png')\n self._bg_loss=PhotoImage(file='src/background/loss.png')\n self._bg_win=PhotoImage(file='src/background/gold.png')\n\n self._determine()\n\n def _determine(self):\n \n \"\"\"Funktio, joka luo luoka, joka määrittelee teidon siitä, onko pelaaja kuollut/pelastunut/ hävinnyt\n suorittaa toimenpiteitä sen mukaan, mikä tulos on\n \"\"\"\n\n outcome= DefineResult(self._treasure)\n result=outcome.result()\n\n if result=='win':\n win=outcome.treasure_change()\n balance=outcome.balance()\n self._change(win)\n self._initialize_win(win, balance)\n\n elif result=='die':\n self._status='dead'\n self._initialize_die()\n\n elif result=='lose':\n loss=outcome.treasure_change()\n self._change(loss)\n balance=outcome.balance()\n self._initialize_loss(loss, balance)\n \n def pack(self):\n\n \"\"\"Funktio, joka pakkaa näkymän, näytölle\n \"\"\"\n\n self._frame.pack(fill=BOTH, side=LEFT, expand=True)\n\n def destroy(self):\n\n \"\"\"Funktio, joka tuhoaa näkymän, ennen seuraavan näkymän alustusta\n \"\"\"\n\n self._frame.destroy()\n\n def _last_question(self):\n\n \"\"\"Funktio, joka tilanteessa, jossa pelaaja haluaa sulkea pelin, kysyy, jos pelaaja haluaa kirjoittaa tietojaan tietokantaan\n \"\"\"\n\n question=messagebox.askyesno('book of hunters', 'Do you want to write yor name in history?')\n\n if question:\n self._write(self._status)\n\n else:\n if self._status=='alive':\n message='Enjoy your life and treasures'\n\n else:\n message='Enjoy your afterlife'\n\n info=messagebox.showinfo('', message)\n if info:\n self._root.destroy()\n\n def _info(self):\n\n \"\"\"Funktio, joka luo näkymään alkutekstin, joka on sama tuloksesta riippumatta\n \"\"\"\n\n infolabel = Label(master=self._frame,\n text=\"You approach the cave...\\nIt is dark and spooky...\\nA large dragon jumps out in front of you! He opens his jaws and...\")\n \n infolabel.place(relx=0.5, rely=.1, anchor =CENTER)\n\n def _initialize_die(self):\n\n \"\"\"Funktio, joka alustaa teksti ja napin, tuloksessa, jossa pelaaja on kuollut\n \"\"\"\n\n self._frame=Frame(master=self._root)\n\n background=Label(master=self._frame, image=self._background_die)\n background.place(x=0,y=0,relwidth=1, relheight=1)\n\n self._info()\n\n label=Label(master=self._frame, \n text='Gobbles you down in one bite!')\n\n survive_button=Button(master=self._frame,\n text='Try to survive?',\n command=self._survive)\n\n exit_button=Button(master=self._frame,\n text='Go to the afterworld', \n command=self._last_question)\n\n label.place(relx=0.5, rely=.5, anchor =CENTER)\n survive_button.place(relx=0.25, rely=.95, anchor =CENTER)\n exit_button.place(relx=0.75, rely=.95, anchor =CENTER)\n \n def _initialize_win(self, win, treasure_balance):\n\n \"\"\"Funktio, joka alustaa näkymän tilanteessa, jossa pelaaja on voittanut\n\n Args:\n win ([int]): [pelaajn voittomäärä]\n treasure_balance ([int]): [pelaajan uusi rahatilanne]\n \"\"\"\n\n self._frame=Frame(master=self._root)\n\n background=Label(master=self._frame, image=self._bg_win)\n background.place(x=0,y=0,relwidth=1, relheight=1)\n\n self._info()\n result_label=Label(master=self._frame, \n text='Gives you his treasure!\\n+'+str(win))\n\n treasure_balance_label=Label(master=self._frame, \n text='Treasures currently:'+str(treasure_balance))\n\n exit_button=Button(master=self._frame,\n text='Exit the realm \\n you will not find road back ever again', \n command=self._last_question)\n\n play_button=Button(master=self._frame,\n text='Go Further',\n command=self._open_play)\n\n\n result_label.place(relx=.5, rely=.5, anchor =CENTER)\n\n treasure_balance_label.place(relx=.5, rely=.85, anchor =CENTER)\n\n play_button.place(relx=.1, rely=.95, anchor =CENTER)\n\n exit_button.place(relx=.75, rely=.95, anchor =CENTER)\n\n\n def _initialize_loss(self, loss, treasure_balance):\n\n \"\"\"FUnktio, joka alustaa näkymän, tilanteessa, jossa pelaaja on hävinnyt osan rahasta\n\n Args:\n loss ([int]): [häviön määrä]\n treasure_balance ([int]): [pelaajan uusi tämänhetkinen rahatilanne]\n \"\"\"\n\n self._frame=Frame(master=self._root)\n\n background=Label(master=self._frame, image=self._bg_loss)\n background.place(x=0,y=0,relwidth=1, relheight=1)\n\n\n self._info()\n\n result_label=Label(master=self._frame, \n text='Takes part of your treasures\\n -'+str(loss))\n\n treasure_balance_label=Label(master=self._frame, \n text='Treasures left: '+str(treasure_balance))\n\n exit_button=Button(master=self._frame,\n text='Exit the realm \\n you will not find road back ever again', \n command=self._last_question)\n\n play_button=Button(master=self._frame,\n text='Go Further',\n command=self._open_play)\n\n result_label.place(relx=.5, rely=.5, anchor =CENTER)\n\n treasure_balance_label.place(relx=.5, rely=.75, anchor =CENTER)\n\n play_button.place(relx=.25, rely=.95, anchor =CENTER)\n \n exit_button.place(relx=.75, rely=.95, anchor =CENTER)\n\n","repo_name":"lina-ova/ot-harjoitystyo","sub_path":"src/ui/win_die_lose_views.py","file_name":"win_die_lose_views.py","file_ext":"py","file_size_in_byte":6980,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32827227583","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\" --- This is only the usage file ---\"\"\"\nimport argparse,sys,os\nif sys.argv[0].find('pydoc') > 0 :\n print(__doc__); sys.exit(0)\n\nfrom __main__ import prgname,dbg,cfg\nclass myFormatter(argparse.RawTextHelpFormatter,argparse.ArgumentDefaultsHelpFormatter): \n pass\n\nparser = argparse.ArgumentParser(formatter_class=myFormatter)\nparser.set_defaults(**cfg.argdefaults)\n\nparser.add_argument('-d', type=int,default=cfg.argdefaults.debug, \n metavar=\"debug\", dest=\"debug\", \n help=\"set debug level to num\\t\")\nparser.add_argument('-v', action='count', dest=\"verbose\", \n help=\"increase verbosity\\t\") \nparser.add_argument('-L', action='store_true', dest=\"log\",\n help=\"write log file \\t\\t\")\nparser.add_argument('-c', type=str,default=\"SRV-default\",metavar=\"Config\",\n dest=\"config\", \n help=\"configuration to use\\t\") \nparser.add_argument('-x', nargs='+',metavar=\"xargs\",default=[],\n dest=\"xargs\", \n help=\"filter args for module\\t\") \nreq = parser.add_argument_group('required')\ngroup = req.add_mutually_exclusive_group(required=True)\ngroup.add_argument('-l',action='store_true', dest=\"listmod\",default=argparse.SUPPRESS,\n help=\"show the available modules to execute\")\ngroup.add_argument('-e',nargs='+', default=argparse.SUPPRESS, \n metavar=(\"mod\",\"opt\") , dest=\"module\", \n help=\"module and options execute\\n\\n\")\n \nargs = parser.parse_args()\nglobals()['prgargs'] = args\ndbg._initlvl(prgargs.debug,verbose=prgargs.verbose) \ndbg.dprint(2, \"prgargs\" , prgargs)\n","repo_name":"py3sourcesForAdministration/uyuni","sub_path":"uyu_usg.py","file_name":"uyu_usg.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71020860586","text":"import os\nimport shutil\nimport sys\n\ndef gen_mini_train(data_dir):\n dirs = [s[0] for s in os.walk(data_dir)]\n for subdir in dirs:\n new_subdir = os.path.join('../mini-train/', subdir)\n os.makedirs(new_subdir)\n assert(os.path.isdir(new_subdir))\n for img in os.listdir(subdir)[:10]:\n if img.endswith(\".jpg\"):\n shutil.copy(os.path.join(subdir, img), new_subdir)\n\nif __name__ == '__main__':\n directory = sys.argv[1]\n gen_mini_train(directory)\n","repo_name":"lfvarela/LandmarkDetection","sub_path":"Scripts/gen_mini_train.py","file_name":"gen_mini_train.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29755801655","text":"from sys import stdin\n\ntotal_score: float = 0.0\ntotal_time: float = 0.0\n\nscore_map: dict[str, float] = {\n \"A+\": 4.5,\n \"A0\": 4.0,\n \"B+\": 3.5,\n \"B0\": 3.0,\n \"C+\": 2.5,\n \"C0\": 2.0,\n \"D+\": 1.5,\n \"D0\": 1.0,\n \"F\": 0.0\n}\n\nfor _ in range(20):\n name, time, score = stdin.readline().split()\n if score == \"P\":\n continue\n time = float(time)\n total_score += score_map[score] * time\n total_time += time\n\nprint(total_score / total_time)\n","repo_name":"Lapis0875/algorithm_datastructure","sub_path":"boj/boj25206.py","file_name":"boj25206.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"26562180020","text":"\"\"\"Update Email model with body_html\n\nRevision ID: 05c2a2ce8deb\nRevises: 8123d20b9cbb\nCreate Date: 2023-09-22 09:53:16.311981\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '05c2a2ce8deb'\ndown_revision = '8123d20b9cbb'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('emails', schema=None) as batch_op:\n batch_op.add_column(sa.Column('body_html', sa.Text(), nullable=True))\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('emails', schema=None) as batch_op:\n batch_op.drop_column('body_html')\n\n # ### end Alembic commands ###\n","repo_name":"mahouboy23/CS-Ia","sub_path":"migrations/versions/05c2a2ce8deb_update_email_model_with_body_html.py","file_name":"05c2a2ce8deb_update_email_model_with_body_html.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"75039542188","text":"import torch as torch\nimport torch.nn as nn\nimport h5py\nfrom models.encoder.pointnet_encoder import PointNet_Encoder\n\nclass PointNet_Decoder(nn.Module):\n\n def __init__(self,\n layers = [256, 256],\n input_dimension = 128,\n activation = nn.ReLU(),\n number_of_points = 2048):\n super(PointNet_Decoder, self).__init__()\n\n #### layers\n self.feature_dims = [input_dimension] + layers + [3 * number_of_points]\n self.activation = activation\n self.number_of_points = number_of_points\n\n self.forward_layers = nn.ModuleList(\n [ nn.Linear(self.feature_dims[i], self.feature_dims[i+1] )for i in range(len(self.feature_dims) - 1)]\n )\n\n def forward(self, x):\n\n # batch_size\n batch_size = x.size(0)\n\n ### forward pass\n for i in range(len(self.forward_layers) - 1):\n x = self.forward_layers[i](x)\n x = self.activation(x)\n\n ### no activation for last layer\n x = self.forward_layers[-1](x)\n\n x = x.view(batch_size, self.number_of_points, 3)\n\n return x\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nif __name__ == '__main__' :\n encoder = PointNet_Encoder()\n decoder = PointNet_Decoder()\n\n f = h5py.File('./data/train_data.h5')\n data_train = f['data'][:] ### [9840, 2048, 3]\n\n X = torch.from_numpy(data_train[:32]).float().to(device)\n\n X_code = encoder(X)\n\n X_reconstructed = decoder(X_code)\n\n print(X_reconstructed.size())\n\n\n","repo_name":"edward1997104/CSCI5210","sub_path":"Assignment_1/models/decoder/pointnet_decoder.py","file_name":"pointnet_decoder.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"17491311557","text":"\"\"\"\nOne Away: There are three types of edits that can be performed on strings:\ninsert a character, remove a character, or replace a character. Given two\nstrings, write a function to check if they are one edit (or zero edits) away.\n\"\"\"\n\n\ndef one_away_replace(first: str, second: str) -> bool:\n \"\"\"\n Checks whether a string can be reached by replacing a single character.\n\n Parameters\n ----------\n first : str\n The first string\n second : str\n The second string\n\n Returns\n -------\n bool\n True if the `first` can be transformed into `second` with one\n replacement\n \"\"\"\n # This is used to check whether a change has already been made\n change_found = False\n\n # Iterate over both strings and check whether changes line up\n for i, j in zip(first, second):\n if i != j:\n\n # It's fine if one change was found, but not more than one\n if change_found:\n return False\n\n # Since a change was definitely found, update change_found\n change_found = True\n\n # Made it to the end of the loop, so first\n # must be within one replacement of second\n return True\n\n\ndef one_away_insert(first: str, second: str) -> bool:\n \"\"\"\n Checks whether a string can be reached by inserting a single character.\n\n Parameters\n ----------\n first : str\n The first string\n second : str\n The second string\n\n Returns\n -------\n bool\n True if the `first` can be transformed into `second` with one insertion\n \"\"\"\n # This is used to check whether a change has already been made\n change_found = False\n\n # Iterate over both strings and check whether changes line up\n i, j = 0, 0\n while i < len(first) and j < len(second):\n if first[i] != second[j]:\n if change_found:\n return False\n change_found = True\n j += 1\n else:\n i += 1\n j += 1\n return True\n\n\ndef one_away(first: str, second: str) -> bool:\n \"\"\"\n Checks whether a string is within one edit of another\n\n Parameters\n ----------\n first : str\n The first string\n second : str\n The second string\n\n Returns\n -------\n bool\n True if the `first` can be transformed into `second` with one edit\n \"\"\"\n if len(first) == len(second):\n return one_away_replace(first, second)\n if len(first) + 1 == len(second):\n return one_away_insert(first, second)\n if len(first) - 1 == len(second):\n return one_away_insert(second, first)\n return False\n\n\nprint(f\"pale, ple -> {one_away('pale', 'ple')}\")\nprint(f\"pales, pale -> {one_away('pales', 'pale')}\")\nprint(f\"pale, bale -> {one_away('pale', 'bale')}\")\nprint(f\"pale, bake -> {one_away('pale', 'bake')}\")\n","repo_name":"jesse-toftum/python-projects","sub_path":"cracking_the_coding_interview/01_arrays_and_strings/question_5.py","file_name":"question_5.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70768751788","text":"\"\"\"uuid to article\n\nRevision ID: 832888d7a6ad\nRevises: 1c2f2a87f1db\nCreate Date: 2021-09-06 15:02:20.503517\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '832888d7a6ad'\ndown_revision = '1c2f2a87f1db'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('article', sa.Column('uuid', sa.String(), nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('article', 'uuid')\n # ### end Alembic commands ###\n","repo_name":"dkrussia/fdu_backend","sub_path":"migrations/versions/832888d7a6ad_uuid_to_article.py","file_name":"832888d7a6ad_uuid_to_article.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"20404441836","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 15 14:51:43 2019\n\n@author: giorgiod\n\"\"\"\n\nfrom horton import *\nimport numpy as np\n\ndef st3g_uhf(distance,pseudo_numbers):\n \n natoms=np.array([6,8] )\n coordinates=np.array([[-distance/2.,0.,0.],[distance/2.,0.,0.]])\n \n mol=IOData(title='CO')\n mol.numbers=natoms\n mol.coordinates=coordinates\n mol.pseudo_numbers=pseudo_numbers\n \n obasis1=get_gobasis(mol.coordinates,mol.numbers,'sto-3G')\n obasis2=get_gobasis(mol.coordinates,np.flipud(mol.numbers),'sto-3G')\n obasis=GOBasis.concatenate(obasis1,obasis2)\n \n \n \n lf=DenseLinalgFactory(obasis.nbasis)\n \n #orbital integrals\n olp=obasis.compute_overlap(lf)\n kin=obasis.compute_kinetic(lf)\n na=obasis.compute_nuclear_attraction(mol.coordinates,mol.pseudo_numbers,lf)\n er=obasis.compute_electron_repulsion(lf)\n \n exp_alpha=lf.create_expansion()\n exp_beta=lf.create_expansion()\n #print(exp_alpha.coeffs)\n\n\n \n guess_core_hamiltonian(olp, kin, na, exp_alpha,exp_beta)\n\n #print(exp_alpha.coeffs)\n \n occ_model=AufbauOccModel(7,7)\n occ_model.assign(exp_alpha, exp_beta)\n \n external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}\n terms = [\n UTwoIndexTerm(kin, 'kin'),\n UDirectTerm(er, 'hartree'),\n UExchangeTerm(er, 'x_hf'),\n UTwoIndexTerm(na, 'ne'),\n ]\n ham = UEffHam(terms, external)\n \n \n dm_alpha = exp_alpha.to_dm()\n dm_beta = exp_beta.to_dm()\n # - SCF solver\n scf_solver = EDIIS2SCFSolver(1e-5,maxiter=500)\n scf_solver(ham, olp, occ_model,dm_alpha,dm_beta)\n \n grid=BeckeMolGrid(mol.coordinates,mol.numbers,mol.numbers, random_rotate=False)\n\n dm_alpha = exp_alpha.to_dm()\n dm_beta = exp_beta.to_dm()\n \n rho_alpha = obasis.compute_grid_density_dm(dm_alpha, grid.points)\n rho_beta = obasis.compute_grid_density_dm(dm_beta, grid.points)\n rho=rho_alpha+rho_beta\n\n \n return(ham.cache['energy'],rho)\n\n\n\n\n\n\n","repo_name":"ferchault/APDFT","sub_path":"prototyping/hessian/Alchemy CO-N2/Carbon Monoxide/co_fun.py","file_name":"co_fun.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"37"} +{"seq_id":"31803193094","text":"from twilio.rest import Client\nfrom .worker import Worker\n\nfrom . import config\nimport os\n\nclass Msg:\n\n def __init__(self):\n self.account_sid = os.environ.get('TWILIO_ACCOUNT_SID')\n self.auth_token = os.environ.get('TWILIO_AUTH_TOKEN')\n self.twilio_number = os.environ.get('TWILIO_NUMBER')\n\n def parse(self, request):\n\n resp = {}\n\n # convert the custom type received from Twilio into a usable dictionary\n for key in request.form.keys():\n resp[key] = request.form[key]\n\n body_parts = request.form['Body'].split()\n if request.form['Body'].startswith(config.COMMAND_IDENTIFIER):\n command = body_parts[0].lstrip(config.COMMAND_IDENTIFIER).lower()\n else:\n command = None\n\n resp['command'] = command\n resp['args'] = body_parts[1:]\n\n return resp\n\n def send(self, resp, body=False):\n client = Client(self.account_sid, self.auth_token)\n\n message = client.messages.create(\n body = body,\n from_= self.twilio_number,\n to = resp['From']\n )\n\n return message.sid\n","repo_name":"jfray/smores","sub_path":"smores/msg.py","file_name":"msg.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"19102554466","text":"from azure.digitaltwins.core import DigitalTwinsClient\nfrom azure.identity import DefaultAzureCredential\nimport pandas\nimport Tools.RDF_PARSER.RDF_parser\nfrom aniso8601 import parse_date, parse_datetime\nimport datetime\nimport concurrent\n\ndata_types_map = {\n '#String': str,\n '#Simple_Float': float,\n '#Float': float,\n '#Boolean': bool,\n '#Reactance': float,\n '#Resistance': float,\n '#Voltage': float,\n '#Integer': int,\n '#ActivePower': float,\n '#ReactivePower': float,\n '#CurrentFlow': float,\n '#AngleDegrees': float,\n '#PerCent': float,\n '#Conductance': float,\n '#Susceptance': float,\n '#PU': float,\n '#Date': parse_date,\n '#Length': float,\n '#DateTime': parse_datetime,\n '#ApparentPower': float,\n '#Seconds': float,\n '#Inductance': float,\n '#Money': float,\n '#MonthDay': int,\n '#VoltagePerReactivePower': float,\n '#Capacitance': float,\n '#ActivePowerPerFrequency': float,\n '#ResistancePerLength': float,\n '#RotationSpeed': float,\n '#AngleRadians': float,\n '#InductancePerLength': float,\n '#ActivePowerPerCurrentFlow': float,\n '#CapacitancePerLength': float,\n '#Decimal': float,\n '#Frequency': float,\n '#Temperature': float}\n\n\n\n\nrdfs = pandas.read_RDF([r\"rdfs\\CGMES_2_4_15_09May2019_RDFS\\UNIQUE_RDFSAugmented-v2_4_15-09May2019.zip\"])\n\n# Get all relations\nall_data_links = rdfs.query(\"KEY == 'AssociationUsed' and VALUE == 'Yes'\").ID.str[1:].rename(\"KEY\")\n\n# Get all data types\ndata_types = rdfs.query(\"KEY == 'dataType'\")\n\n# Clean ID-s\ndata_types[\"ID\"] = data_types[\"ID\"].str.split(\"#\", expand=True)[1].str.replace(\".\", \"_\")\n\n# Create data types lookup table\ndata_types_dict = data_types.replace({\"VALUE\": data_types_map}).set_index(\"ID\")[\"VALUE\"].to_dict()\n\n\n\n\ninput_data = [r\"test_models\\TestConfigurations_packageCASv2.0\\MiniGrid\\NodeBreaker\\CGMES_v2.4.15_MiniGridTestConfiguration_BaseCase_Complete_v3.zip\"]\n\ndata = pandas.read_RDF(input_data)\n\n# DefaultAzureCredential supports different authentication mechanisms and determines the appropriate credential type based of the environment it is executing in.\n# It attempts to use multiple credential types in an order until it finds a working credential.\n\n# - AZURE_URL: The URL to the ADT in Azure\n#url = os.getenv(\"AZURE_URL\")\nurl = \"https://Test-Digikaksik01.api.neu.digitaltwins.azure.net\"\n#url = input(\"Please copy here your Azure DT URL\")\n\n# DefaultAzureCredential expects the following three environment variables:\n# - AZURE_TENANT_ID: The tenant ID in Azure Active Directory\n# - AZURE_CLIENT_ID: The application (client) ID registered in the AAD tenant\n# - AZURE_CLIENT_SECRET: The client secret for the registered application\ncredential = DefaultAzureCredential(exclude_interactive_browser_credential=False)\nservice_client = DigitalTwinsClient(url, credential)\n\n\"https://docs.microsoft.com/en-us/python/api/overview/azure/digitaltwins-core-readme?view=azure-python\"\n# Examples\ndef print_all_models():\n\n listed_models = service_client.list_models()\n for model in listed_models:\n print(model)\n\n\ndef add_object(data_object):\n # Get object id\n data_id = data_object.ID.iloc[0]\n\n # Convert to dictionary\n data_dict = data_object.set_index(\"KEY\")[[\"VALUE\"]].to_dict()[\"VALUE\"]\n\n # Extract and remove data/class type\n data_type = data_dict.pop(\"Type\")\n\n # Convert data types\n for key, value in data_dict.items():\n data_dict[key] = data_types_dict.get(key, str)(value)\n\n # Add object/instance ID\n data_dict[\"$dtId\"] = data_id\n\n # Create Azure TD format\n twin = {\"$metadata\": {\"$model\": f\"dtmi:iec:cim:schema:{data_type};16\"}}\n twin.update(data_dict)\n\n # Upload data\n return service_client.upsert_digital_twin(data_id, twin)\n\n\ndef add_relation(relation):\n twin_relation = {\n \"$relationshipId\": f\"dtmi:iec:cim:schema:{relation.KEY.replace('.', ':')};16\",\n \"$sourceId\": relation.ID_FROM,\n \"$relationshipName\": relation.KEY.replace('.', '_'),\n \"$targetId\": relation.ID_TO,\n }\n\n return service_client.upsert_relationship(twin_relation[\"$sourceId\"], twin_relation[\"$relationshipId\"], twin_relation)\n\n# Trigger Azure authentication\n\nlisted_models = service_client.list_models()\nprint(listed_models.__next__())\n\ninput(\"Press Enter once Azure auth is done\")\n\n\n# Filter out full model and Instance ID\ndata = data.merge(data.query(\"KEY == 'Type' and VALUE != 'FullModel'\").ID, on=\"ID\")[[\"ID\", \"KEY\", \"VALUE\"]]\n\n# Get all relations\nall_relations = data.references_all()\n\n# Filter out all relations\ndata = data.merge(all_data_links, on=\"KEY\", how=\"outer\", indicator=True).query(\"_merge == 'left_only'\")[[\"ID\", \"KEY\", \"VALUE\"]]\n\n# Replace . with _ for Azure\ndata.KEY = data.KEY.str.replace(\".\", \"_\")\n\n# Convert numbers\n#data.VALUE = pandas.to_numeric(data.VALUE, errors='ignore')\n\n# Convert boolians\n#data = data.replace({'VALUE': {'true': True, 'false': False}})\n\n# Group all data by ID to objects\ndata_objects = data.groupby(\"ID\")\n\n# Create thread pool to not wait for Azure response\nexecutor = concurrent.futures.ThreadPoolExecutor(200)\n\n# Time process start\nstart_time = datetime.datetime.now()\n\n# List to collect all thread results\nsubmitted_relations = []\n\n# Add objects\nnumber_of_objects = len(data_objects)\nfor count, _data in enumerate(data_objects):\n\n data_id, data_object = _data\n\n submitted_relations.append(executor.submit(add_object, data_object))\n\n print(f\"INFO - {count+1}/{number_of_objects} - Added {data_id}\")\n\n\n# Add relations\nnumber_of_relations = len(all_relations)\nfor count, relation in enumerate(all_relations.itertuples()):\n\n #add_relation(relation)\n submitted_relations.append(executor.submit(add_relation, relation))\n\n print(f\"INFO - {count+1}/{number_of_relations} - Submitted: {relation.KEY}\")\n\n_, start_time = Tools.RDF_PARSER.RDF_parser.print_duration(f\"INFO - {number_of_objects} objects and {number_of_relations} relations sent to Azure TD, waiting for submission confirmations\", start_time)\n\nconcurrent.futures.wait(submitted_relations)\n\n_, start_time = Tools.RDF_PARSER.RDF_parser.print_duration(f\"INFO - Submissions confirmed\", start_time)\n\n\n\n","repo_name":"Haigutus/USVDM","sub_path":"Tools/RDF_PARSER/Azure_DT_conection.py","file_name":"Azure_DT_conection.py","file_ext":"py","file_size_in_byte":6040,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"26086967306","text":"def findodd(array):\r\n res = 0\r\n for i in array:\r\n res = res ^ i\r\n\r\n return res\r\n\r\ndef findOdd1(array):\r\n res = None\r\n for i in array:\r\n count = array.count(i)\r\n if count % 2 != 0:\r\n res = i\r\n break\r\n return res\r\n\r\n\r\nif __name__ == \"__main__\":\r\n array = [10,20,20,30,10,10,30,10]\r\n print(findOdd1(array))\r\n print(findodd(array))","repo_name":"Akshay-Ravichandran-10/DataStructures-Algorithms","sub_path":"Arrays/findOdd.py","file_name":"findOdd.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11617427563","text":"import sys\r\nN = int(input())\r\n\r\nD = {}\r\n\r\nfor _ in range(N):\r\n a, b = map(str, sys.stdin.readline().split())\r\n b = int(b)\r\n if a not in D:\r\n D[a] = 0\r\n D[a] += b\r\nfor i in D.keys():\r\n if D[i] == 5:\r\n print(\"YES\")\r\n exit()\r\nprint(\"NO\")","repo_name":"KongUm/BOJ","sub_path":"백준/Bronze/27160. 할리갈리/할리갈리.py","file_name":"할리갈리.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42542418072","text":"\nimport logging\nimport copy\nimport traceback\nimport datetime\nfrom evo.genome_editor import save_genome\nfrom evo.genome_validator import genome_validator\nfrom inf import runtime_data\n\nlogger = logging.getLogger(__name__)\n\n\ndef genome_ver_check(genome):\n try:\n if genome['version'] == \"2.0\":\n print(\"\\n\\n\\n************ Genome Version 2.0 has been detected **************\\n\\n\\n\")\n try:\n runtime_data.genome_validity = genome_validator(genome)\n print(\"Genome validity=\", runtime_data.genome_validity)\n except Exception as e:\n print(\"Error during genome validation!!\\n\", traceback.print_exc(), e)\n genome = genome_morphology_updator(genome)\n save_genome(genome=genome, file_name=runtime_data.connectome_path + \"genome.json\")\n genome1 = genome_2_1_convertor(flat_genome=genome['blueprint'])\n genome_2_hierarchifier(flat_genome=genome['blueprint'])\n genome['blueprint'] = genome1['blueprint']\n return genome\n else:\n print(\"ERROR! Genome is not compatible with 2.0 standard\")\n except KeyError as e:\n print(\"Exception during genome version check\", e, traceback.print_exc())\n pass\n\n\ndef genome_2_print(genome):\n for cortical_area in genome:\n print(cortical_area)\n for gene in genome[cortical_area]:\n try:\n print(\" \", genome_2_to_1[gene], \"\\n\\t\\t\\t\", genome[cortical_area][gene])\n except:\n pass\n\n\ndef genome_2_validator(genome_2):\n \"\"\"\n Conducts various test to ensure the stability of the Genome 2.0\n \"\"\"\n standard_gene_length = 27\n\n def structure_test_gene_lengths():\n \"\"\"\n Check length requirements for each gene\n \"\"\"\n gene_anomalies = 0\n for key in genome_2:\n if len(key) != standard_gene_length:\n print(\"Warning! Key did not meet length requirement:\", key)\n gene_anomalies += 1\n if gene_anomalies == 0:\n print(\"\\nGene length verification ...... PASSED!\")\n else:\n print(\"\\nGene length verification...... Failed! \", gene_anomalies, \" anomalies detected\")\n return gene_anomalies\n\n\ndef genome_2_hierarchifier(flat_genome):\n \"\"\"\n Converts Genome 2.0 to a hierarchical data structure\n \"\"\"\n hierarchical_genome = dict()\n for key in flat_genome:\n cortical_id = key[9:15]\n exon = key[16:]\n if key[7] == \"c\":\n if cortical_id not in hierarchical_genome:\n hierarchical_genome[cortical_id] = dict()\n if exon not in hierarchical_genome[cortical_id]:\n hierarchical_genome[cortical_id][exon] = flat_genome[key]\n # genome_2_print(hierarchical_genome)\n return hierarchical_genome\n\n\ndef genome_1_cortical_list(genome):\n cortical_list = list()\n for cortical_area in genome:\n cortical_list.append(cortical_area)\n return cortical_list\n\n\ndef genome_2_cortical_list(flat_genome):\n \"\"\"\n Generates a list of cortical areas inside genome\n \"\"\"\n try:\n cortical_list = list()\n for key in flat_genome:\n if json_comment_catcher(key):\n cortical_id = key[9:15]\n if cortical_id not in cortical_list and key[7] == \"c\":\n cortical_list.append(cortical_id)\n return cortical_list\n except Exception as e:\n print(\"Exception during genome_2_cortical_list\", e, traceback.print_exc())\n\n\ndef genome_1_cortical_list(genome):\n cortical_list = list()\n for cortical_area in genome['blueprint']:\n cortical_list.append(cortical_area)\n\n return cortical_list\n\n\ndef json_comment_catcher(key):\n if key[:1] == '/':\n return False\n else:\n return True\n\n\ndef genome_2_1_convertor(flat_genome):\n genome = dict()\n genome['blueprint'] = dict()\n cortical_list = genome_2_cortical_list(flat_genome)\n # Assign a blank template to each cortical area\n for cortical_area in cortical_list:\n genome['blueprint'][cortical_area] = copy.deepcopy(genome_1_template)\n # Populate each cortical area with\n for cortical_area in genome['blueprint']:\n try:\n for gene in flat_genome:\n if json_comment_catcher(gene):\n cortical_id = gene[9:15]\n exon = gene[19:]\n gene_type = gene[16:18]\n if exon in genome_2_to_1:\n if cortical_id == cortical_area:\n if genome_2_to_1[exon] == \"cortical_name\":\n genome['blueprint'][cortical_area][genome_2_to_1[exon]] = flat_genome[gene]\n elif genome_2_to_1[exon] == \"location_generation_type\":\n if flat_genome[gene]:\n genome['blueprint'][cortical_area][genome_2_to_1[exon]] = \"random\"\n else:\n genome['blueprint'][cortical_area][genome_2_to_1[exon]] = \"sequential\"\n elif genome_2_to_1[exon] == \"cortical_mapping_dst\":\n for destination in flat_genome[gene]:\n if json_comment_catcher(flat_genome[gene][destination]) and \\\n json_comment_catcher(destination):\n for mapping_recipe in flat_genome[gene][destination]:\n if destination not in genome['blueprint'][cortical_area][genome_2_to_1[exon]]:\n genome['blueprint'][cortical_area][genome_2_to_1[exon]][destination] = \\\n list()\n\n temp_dict = dict()\n\n temp_dict[\"morphology_id\"] = mapping_recipe[0]\n temp_dict[\"morphology_scalar\"] = mapping_recipe[1]\n temp_dict[\"postSynapticCurrent_multiplier\"] = mapping_recipe[2]\n temp_dict[\"plasticity_flag\"] = mapping_recipe[3]\n if mapping_recipe[3]:\n try:\n temp_dict[\"plasticity_constant\"] = mapping_recipe[4]\n temp_dict[\"ltp_multiplier\"] = mapping_recipe[5]\n temp_dict[\"ltd_multiplier\"] = mapping_recipe[6]\n except Exception as e:\n temp_dict[\"plasticity_constant\"] = 1\n temp_dict[\"ltp_multiplier\"] = 1\n temp_dict[\"ltd_multiplier\"] = 1\n else:\n temp_dict[\"plasticity_flag\"] = False\n temp_dict[\"plasticity_constant\"] = 1\n temp_dict[\"ltp_multiplier\"] = 1\n temp_dict[\"ltd_multiplier\"] = 1\n\n genome['blueprint'][\n cortical_area][genome_2_to_1[exon]][destination].append(temp_dict)\n\n elif genome_2_to_1[exon] == \"block_boundaries\":\n if gene[24] == 'x':\n genome['blueprint'][cortical_area][\"block_boundaries\"][0] = \\\n flat_genome[gene]\n elif gene[24] == 'y':\n genome['blueprint'][cortical_area][\"block_boundaries\"][1] = \\\n flat_genome[gene]\n elif gene[24] == 'z':\n genome['blueprint'][cortical_area][\"block_boundaries\"][2] = \\\n flat_genome[gene]\n else:\n pass\n\n elif genome_2_to_1[exon] == \"relative_coordinate\":\n if gene[24] == 'x':\n genome['blueprint'][cortical_area][\"relative_coordinate\"][0] = \\\n flat_genome[gene]\n elif gene[24] == 'y':\n genome['blueprint'][cortical_area][\"relative_coordinate\"][1] = \\\n flat_genome[gene]\n elif gene[24] == 'z':\n genome['blueprint'][cortical_area][\"relative_coordinate\"][2] = \\\n flat_genome[gene]\n else:\n pass\n elif genome_2_to_1[exon] == \"2d_coordinate\":\n if gene[24] == 'x':\n genome['blueprint'][cortical_area][\"2d_coordinate\"][0] = \\\n flat_genome[gene]\n elif gene[24] == 'y':\n genome['blueprint'][cortical_area][\"2d_coordinate\"][1] = \\\n flat_genome[gene]\n else:\n pass\n\n else:\n try:\n genome['blueprint'][cortical_area][genome_2_to_1[exon]] = flat_genome[gene]\n except Exception as e:\n print(\"Key not processed: \", cortical_area, e, traceback.print_exc())\n\n except Exception as e:\n print(f\"Exception during gene translation of {cortical_area}\", e, traceback.print_exc())\n return genome\n\n\ndef genome_v1_v2_converter(genome_v1):\n genome_v2 = genome_v1.copy()\n genome_v2.pop('blueprint')\n genome_v2['blueprint'] = {}\n\n for cortical_area in genome_v1['blueprint']:\n for key in genome_v1['blueprint'][cortical_area]:\n if type(key) is not dict and key not in [\"cortical_mapping_dst\"]:\n if key in genome_1_to_2:\n gene = \"_____10c-\" + cortical_area + \"-\" + genome_1_to_2[key]\n genome_v2['blueprint'][gene] = genome_v1['blueprint'][cortical_area][key]\n else:\n if key not in [\"block_boundaries\", \"relative_coordinate\", \"2d_coordinate\"]:\n if key in genome_1_to_2:\n gene = \"_____10c-\" + cortical_area + \"-\" + genome_1_to_2[key]\n genome_v2['blueprint'][gene] = genome_v1['blueprint'][cortical_area][key]\n if key == \"block_boundaries\":\n genex = \"_____10c-\" + cortical_area + \"-\" + \"cx-___bbx-i\"\n geney = \"_____10c-\" + cortical_area + \"-\" + \"cx-___bby-i\"\n genez = \"_____10c-\" + cortical_area + \"-\" + \"cx-___bbz-i\"\n\n genome_v2['blueprint'][genex] = \\\n genome_v1['blueprint'][cortical_area][\"block_boundaries\"][0]\n genome_v2['blueprint'][geney] = \\\n genome_v1['blueprint'][cortical_area][\"block_boundaries\"][1]\n genome_v2['blueprint'][genez] = \\\n genome_v1['blueprint'][cortical_area][\"block_boundaries\"][2]\n if key == \"relative_coordinate\":\n genex = \"_____10c-\" + cortical_area + \"-\" + \"cx-rcordx-i\"\n geney = \"_____10c-\" + cortical_area + \"-\" + \"cx-rcordy-i\"\n genez = \"_____10c-\" + cortical_area + \"-\" + \"cx-rcordz-i\"\n\n genome_v2['blueprint'][genex] = \\\n genome_v1['blueprint'][cortical_area][\"relative_coordinate\"][0]\n genome_v2['blueprint'][geney] = \\\n genome_v1['blueprint'][cortical_area][\"relative_coordinate\"][1]\n genome_v2['blueprint'][genez] = \\\n genome_v1['blueprint'][cortical_area][\"relative_coordinate\"][2]\n if key == \"2d_coordinate\":\n genex = \"_____10c-\" + cortical_area + \"-\" + \"cx-2dcorx-i\"\n geney = \"_____10c-\" + cortical_area + \"-\" + \"cx-2dcory-i\"\n\n genome_v2['blueprint'][genex] = \\\n genome_v1['blueprint'][cortical_area][\"2d_coordinate\"][0]\n genome_v2['blueprint'][geney] = \\\n genome_v1['blueprint'][cortical_area][\"2d_coordinate\"][1]\n\n elif key == \"cortical_mapping_dst\":\n gene = \"_____10c-\" + cortical_area + \"-cx-dstmap-d\"\n destination_map = {}\n for destination in genome_v1['blueprint'][cortical_area][\"cortical_mapping_dst\"]:\n destination_map[destination] = list()\n for entry in genome_v1['blueprint'][cortical_area][\"cortical_mapping_dst\"][destination]:\n morphology_id = entry[\"morphology_id\"]\n morphology_scalar = entry[\"morphology_scalar\"]\n postSynapticCurrent_multiplier = entry[\"postSynapticCurrent_multiplier\"]\n plasticity_flag = entry[\"plasticity_flag\"]\n\n if \"plasticity_constant\" in entry:\n plasticity_constant = entry[\"plasticity_constant\"]\n else:\n plasticity_constant = 1\n\n if \"ltp_multiplier\" in entry:\n ltp_multiplier = entry[\"ltp_multiplier\"]\n else:\n ltp_multiplier = 1\n\n if \"ltp_multiplier\" in entry:\n ltd_multiplier = entry[\"ltd_multiplier\"]\n else:\n ltd_multiplier = 1\n\n destination_map[destination].append([morphology_id,\n morphology_scalar,\n postSynapticCurrent_multiplier,\n plasticity_flag,\n plasticity_constant,\n ltp_multiplier,\n ltd_multiplier])\n\n genome_v2['blueprint'][gene] = destination_map\n else:\n print(\"Warning! \", key, \" not found in genome_1_template!\")\n\n return genome_v2\n\n\ndef morphology_convertor(morphology_in):\n morphology_out = dict()\n morphology_out[\"parameters\"] = dict()\n if \"type\" in morphology_in:\n return morphology_in\n else:\n if \"vectors\" in morphology_in:\n morphology_out[\"type\"] = \"vectors\"\n morphology_out[\"parameters\"][\"vectors\"] = morphology_in[\"vectors\"]\n print(\"morphology_out:\", morphology_out)\n elif \"patterns\" in morphology_in:\n morphology_out[\"type\"] = \"patterns\"\n morphology_out[\"parameters\"][\"patterns\"] = morphology_in[\"patterns\"]\n elif \"composite\" in morphology_in:\n morphology_out[\"type\"] = \"composite\"\n morphology_out[\"parameters\"][\"src_seed\"] = morphology_in[\"composite\"][\"parameters\"][\"src_seed\"]\n morphology_out[\"parameters\"][\"src_pattern\"] = morphology_in[\"composite\"][\"parameters\"][\"src_pattern\"]\n morphology_out[\"parameters\"][\"mapper_morphology\"] = morphology_in[\"composite\"][\"mapper_morphology\"]\n elif \"functions\" in morphology_in:\n morphology_out[\"type\"] = \"functions\"\n\n else:\n pass\n return morphology_out\n\n\ndef genome_morphology_updator(genome):\n try:\n for morphology in genome[\"neuron_morphologies\"]:\n genome[\"neuron_morphologies\"][morphology] = morphology_convertor(genome[\"neuron_morphologies\"][morphology])\n except Exception as e:\n print(\"Error during genome morphology update!\", e, traceback.print_exc())\n\n return genome\n\n\ngene_decoder = {\n \"_______b-_____s-__-__name-t\": \"species_name\",\n \"_______c-______-cx-__name-t\": \"cortical_name\",\n \"_______c-______-cx-_n_cnt-i\": \"cortical_neuron_count\",\n \"_______c-______-cx-gd_vis-b\": \"godot_visualization\",\n \"_______c-______-cx-rcordx-i\": \"relative_coordinate_x\",\n \"_______c-______-cx-rcordy-i\": \"relative_coordinate_y\",\n \"_______c-______-cx-rcordz-i\": \"relative_coordinate_z\",\n \"_______c-______-cx-2dcorx-i\": \"2d_coordinate_x\",\n \"_______c-______-cx-2dcory-i\": \"2d_coordinate_y\",\n \"_______c-______-cx-___bbx-i\": \"block_boundary_x\",\n \"_______c-______-cx-___bby-i\": \"block_boundary_y\",\n \"_______c-______-cx-___bbz-i\": \"block_boundary_z\",\n \"_______c-______-cx-synatt-i\": \"synapse_attractivity\",\n \"_______c-______-cx-__rand-b\": \"location_generation_type\",\n \"_______c-______-cx-dstmap-d\": \"cortical_mapping_dst\",\n \"_______c-______-cx-de_gen-f\": \"degeneration\",\n \"_______c-______-nx-pstcr_-f\": \"postsynaptic_current\",\n \"_______c-______-nx-pstcrm-f\": \"postsynaptic_current_max\",\n \"_______c-______-nx-fire_t-f\": 'firing_threshold',\n \"_______c-______-nx-ftincx-f\": \"firing_threshold_increment_x\",\n \"_______c-______-nx-ftincy-f\": \"firing_threshold_increment_y\",\n \"_______c-______-nx-ftincz-f\": \"firing_threshold_increment_z\",\n \"_______c-______-nx-fthlim-i\": \"firing_threshold_limit\",\n \"_______c-______-nx-mp_acc-b\": \"mp_charge_accumulation\",\n \"_______c-______-nx-refrac-i\": \"refractory_period\",\n \"_______c-______-nx-leak_c-f\": \"leak_coefficient\",\n \"_______c-______-nx-leak_v-i\": \"leak_variability\",\n \"_______c-______-nx-c_fr_c-i\": \"consecutive_fire_cnt_max\",\n \"_______c-______-nx-snooze-f\": \"snooze_length\"\n}\n\ngenome_1_template = {\n \"per_voxel_neuron_cnt\": 1,\n \"synapse_attractivity\": 100,\n \"degeneration\": 0,\n \"psp_uniform_distribution\": False,\n \"postsynaptic_current_max\": 99999,\n \"cortical_mapping_dst\": {},\n \"block_boundaries\": [\n None,\n None,\n None\n ],\n \"relative_coordinate\": [\n 0,\n 0,\n 0\n ],\n \"2d_coordinate\": [\n 0,\n 0\n ],\n \"visualization\": True,\n \"postsynaptic_current\": 1,\n 'firing_threshold': 1,\n \"refractory_period\": 0,\n \"leak_coefficient\": 0,\n \"leak_variability\": 0,\n \"consecutive_fire_cnt_max\": 0,\n \"snooze_length\": 0,\n \"firing_threshold_increment_x\": 0,\n \"firing_threshold_increment_y\": 0,\n \"firing_threshold_increment_z\": 0,\n \"firing_threshold_limit\": 0,\n \"mp_charge_accumulation\": True\n }\n\ngenome_2_to_1 = {\n \"_n_cnt-i\": \"per_voxel_neuron_cnt\",\n \"gd_vis-b\": \"visualization\",\n \"__name-t\": \"cortical_name\",\n \"rcordx-i\": \"relative_coordinate\",\n \"rcordy-i\": \"relative_coordinate\",\n \"rcordz-i\": \"relative_coordinate\",\n \"2dcorx-i\": \"2d_coordinate\",\n \"2dcory-i\": \"2d_coordinate\",\n \"___bbx-i\": \"block_boundaries\",\n \"___bby-i\": \"block_boundaries\",\n \"___bbz-i\": \"block_boundaries\",\n \"__rand-b\": \"location_generation_type\",\n \"synatt-i\": \"synapse_attractivity\",\n \"pstcr_-f\": \"postsynaptic_current\",\n \"pstcrm-f\": \"postsynaptic_current_max\",\n \"fire_t-f\": 'firing_threshold',\n \"ftincx-f\": \"firing_threshold_increment_x\",\n \"ftincy-f\": \"firing_threshold_increment_y\",\n \"ftincz-f\": \"firing_threshold_increment_z\",\n \"fthlim-i\": \"firing_threshold_limit\",\n \"refrac-i\": \"refractory_period\",\n \"leak_c-f\": \"leak_coefficient\",\n \"leak_v-f\": \"leak_variability\",\n \"c_fr_c-i\": \"consecutive_fire_cnt_max\",\n \"snooze-f\": \"snooze_length\",\n \"_group-t\": \"group_id\",\n \"dstmap-d\": \"cortical_mapping_dst\",\n \"de_gen-f\": \"degeneration\",\n \"pspuni-b\": \"psp_uniform_distribution\",\n \"mp_acc-b\": \"mp_charge_accumulation\"\n}\n\ngenome_1_to_2 = {\n \"cortical_name\": \"cx-__name-t\",\n \"group_id\": \"cx-_group-t\",\n \"per_voxel_neuron_cnt\": \"cx-_n_cnt-i\",\n \"visualization\": \"cx-gd_vis-b\",\n \"location_generation_type\": \"cx-__rand-b\",\n \"synapse_attractivity\": \"cx-synatt-i\",\n \"postsynaptic_current\": \"nx-pstcr_-f\",\n \"postsynaptic_current_max\": \"nx-pstcrm-f\",\n 'firing_threshold': \"nx-fire_t-f\",\n \"firing_threshold_increment_x\": \"nx-ftincx-f\",\n \"firing_threshold_increment_y\": \"nx-ftincy-f\",\n \"firing_threshold_increment_z\": \"nx-ftincz-f\",\n \"firing_threshold_limit\": \"nx-fthlim-i\",\n \"refractory_period\": \"nx-refrac-i\",\n \"leak_coefficient\": \"nx-leak_c-f\",\n \"leak_variability\": \"nx-leak_v-f\",\n \"consecutive_fire_cnt_max\": \"nx-c_fr_c-i\",\n \"snooze_length\": \"nx-snooze-f\",\n \"degeneration\": \"cx-de_gen-f\",\n \"psp_uniform_distribution\": \"cx-pspuni-b\",\n \"cortical_mapping_dst\": \"cx-dstmap-d\",\n \"mp_charge_accumulation\": \"nx-mp_acc-b\"\n}\n","repo_name":"feagi/feagi","sub_path":"src/evo/genome_processor.py","file_name":"genome_processor.py","file_ext":"py","file_size_in_byte":21408,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"37"} +{"seq_id":"72486008108","text":"import numpy as np\r\nimport os\r\nimport sys\r\nfrom math import sqrt\r\nfrom pathlib import Path\r\nimport rasterio as rst\r\nimport matplotlib.pyplot as plt\r\nimport cv2 as cv\r\nfrom scipy.ndimage import gaussian_filter\r\n\r\nclass KMeansConverter:\r\n\r\n ###################################################\r\n ## Function: image_locations ##\r\n ## Retrieves file paths from the designated ##\r\n ## directory, and returns a numpy array of file ##\r\n ## names. ##\r\n ###################################################\r\n def image_locations(self, path=\"Original_Images\"):\r\n dir = Path(path)\r\n image_names = []\r\n for file in dir.glob('*.tif*'):\r\n file_name = os.path.dirname(file) + \"/\" + os.path.basename(file)\r\n image_names.append(file_name) \r\n image_names.sort() \r\n return np.asarray(image_names)\r\n\r\n ###################################################\r\n ## Function: get_file_name ##\r\n ## Parses the file name from a path string. ##\r\n ###################################################\r\n def get_file_name(self, image_path):\r\n index_slash = image_path.rfind('/')\r\n return image_path[index_slash+1:len(image_path)]\r\n\r\n\r\n ###################################################\r\n ## Function: apply_KMeans ##\r\n ## Applies 5 cluster k-means to images. If you ##\r\n ## would like to you can add max_accumulate to ##\r\n ## k-means. ##\r\n ###################################################\r\n previous_image = []\r\n iteration = 0\r\n def apply_KMeans(self, image_path, save_directory, apply_max_accumulate = False, save = True):\r\n raster = rst.open(image_path)\r\n band = raster.read(1)\r\n meta = raster.profile\r\n\r\n image_array = []\r\n if(apply_max_accumulate):\r\n if(self.iteration == 0):\r\n self.previous_image = np.copy(band)\r\n self.iteration += 1\r\n current_image = np.copy(band)\r\n image_array.append(self.previous_image)\r\n image_array.append(current_image)\r\n image_array = np.asarray(image_array)\r\n max_accum = np.maximum.accumulate(image_array, axis=0)\r\n self.previous_image = np.copy(max_accum[1])\r\n band = np.copy(max_accum[1])\r\n\r\n\r\n blurred_image = gaussian_filter(band, sigma=5)\r\n \r\n float_image = np.float32(blurred_image)\r\n num_data = float_image.shape[0]*float_image.shape[1]\r\n float_image = float_image.reshape(num_data, 1)\r\n \r\n # Criteria determines when K-Means will stop\r\n criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 5, 0.0001)\r\n ret,label,center = cv.kmeans(float_image, 5, None, criteria, 5, cv.KMEANS_RANDOM_CENTERS)\r\n\r\n center = np.uint8(center)\r\n res = center[label.flatten()]\r\n k_applied_image = res.reshape((band.shape))\r\n un = np.unique(k_applied_image)\r\n\r\n k_applied_image[k_applied_image == np.max(k_applied_image)] = 255\r\n\r\n k_applied_image[k_applied_image < np.max(k_applied_image)] = 0\r\n\r\n meta['dtype'] = 'uint8'\r\n if(save):\r\n if not os.path.exists(save_directory):\r\n os.makedirs(save_directory)\r\n name = self.get_file_name(image_path)\r\n if not os.path.exists(save_directory + \"/\" + name):\r\n new_raster = rst.open(save_directory + '/' + name, 'w', **meta)\r\n new_raster.write(k_applied_image, 1)\r\n new_raster.close()\r\n return k_applied_image\r\n\r\n\r\n\r\ndef main():\r\n print(\"Notice: You can run all processes at once using Main.py. If you run scripts individually please note that this script is intended to run after CompressImage.py. If it is executed after a different script it will not work.\")\r\n km = KMeansConverter()\r\n if len(sys.argv) < 2:\r\n print(\"_________________________________\\n\")\r\n print(\"\\tK Means Converter\\n\")\r\n print(\"_________________________________\\n\\n\")\r\n print(\"1. Apply K Means\\n\")\r\n print(\"2. Help\\n\")\r\n print(\"press any key (other than 1 or 2) to quit.\\n\\n\")\r\n user_input = input(\">>> \")\r\n if(user_input == '1'):\r\n path = input(\"Directory to apply k means to: \")\r\n save = input(\"Would you like to save (y/n)? \")\r\n max_accumulate = input(\"Would you like to apply the max pixel value accumulation (y/n)? \")\r\n save_directory = \"\"\r\n if (save == 'y'):\r\n save = True\r\n save_directory = input(\"Directory to save to: \")\r\n else:\r\n save = False\r\n save_directory = \"Not Applicable\"\r\n if (max_accumulate == 'y'):\r\n max_accumulate = True\r\n else:\r\n max_accumulate = False\r\n print(\"Applying KMeans...\")\r\n for image_path in km.image_locations(path):\r\n km.apply_KMeans(image_path, save_directory, max_accumulate, save)\r\n elif(user_input == '2'):\r\n print(\"To compress images you can use command line arguments, or if you do not insert anything\\n\")\r\n print(\"then you can run this program and enter input.\\n\")\r\n print(\"K Means Converter expects a directory of 1 band geotiffs.\")\r\n print(\"To apply kmeans to images please enter: kmeans [directory to apply] [save directory] [use max accumulate y or n] [save y or n]\")\r\n elif sys.argv[1].lower() == \"kmeans\" and len(sys.argv) == 6:\r\n max_accum = False\r\n save = False\r\n if sys.argv[4] == 'y':\r\n max_accum = True\r\n if sys.argv[5] == 'y':\r\n save = True\r\n for image_path in km.image_locations(path=sys.argv[2]):\r\n if save == True:\r\n name = km.get_file_name(image_path)\r\n new_path = sys.argv[3] + name\r\n if not os.path.exists(new_path):\r\n print(image_path)\r\n km.apply_KMeans(image_path, sys.argv[3], sys.argv[4], sys.argv[5])\r\n else:\r\n print(\"Not Saving\")\r\n km.apply_KMeans(image_path, sys.argv[3], sys.argv[4], sys.argv[5])\r\n elif sys.argv[1].lower() == \"kmeans\" and len(sys.argv) != 4:\r\n print(\"To apply kmeans to images please enter: kmeans image_directory_to_apply save_directory use_max_accumulate(t/n) save(y/n)\")\r\n else:\r\n print(\"To apply kmeans to images please enter: kmeans image_directory_to_apply save_directory use_max_accumulate(t/n) save(y/n)\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"kat09tails/FireContourResearch","sub_path":"KMeansConverter.py","file_name":"KMeansConverter.py","file_ext":"py","file_size_in_byte":6471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74456659619","text":"from flask_wtf import Form\nfrom wtforms import TextField, HiddenField, DecimalField, validators\n\nfrom .models import Console\n\ndef console_name_unique(form, field):\n if field.data in [c.name for c in Console.query.all()]:\n raise validators.ValidationError(message='Console name already exists')\n\nclass ConsoleForm(Form):\n type = HiddenField(default='console')\n name = TextField('Name', [validators.Required(), validators.Length(min=2, max=50), console_name_unique])\n\nclass CommandForm(Form):\n type = HiddenField(default='command')\n cmd = TextField('Command', [validators.Required(), validators.Length(min=1, max=255)])\n\nclass ButtonForm(CommandForm):\n type = HiddenField(default='button')\n\nclass ShellForm(CommandForm):\n type = HiddenField(default='shell')\n\nclass LoopForm(CommandForm):\n type = HiddenField(default='loop')\n interval = DecimalField('Interval', [validators.Required(), validators.NumberRange(min=0)])\n start_date = DecimalField('Start', [validators.Optional()])\n","repo_name":"d9w/joystick","sub_path":"joystick/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"21409145867","text":"import cv2\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\nimport imutils\r\nimport easyocr\r\nimport os\r\n\r\nfrom django.conf.urls.static import static\r\nfrom django.conf import settings\r\n\r\nfrom .models import *\r\nfrom .SMSLab import sendsms\r\n\r\ndef trash():\r\n directory = settings.MEDIA_ROOT\r\n # Change the current directory \r\n # to specified directory \r\n os.chdir(directory)\r\n\r\n vid = cv2.VideoCapture(0)\r\n vid.set(cv2.CAP_PROP_POS_FRAMES, 1)\r\n\r\n count = 0\r\n\r\n prev_vehicle = None\r\n \r\n while (True):\r\n\r\n # Capture the video frame by frame\r\n ret, img = vid.read()\r\n\r\n # # Display the resulting frame\r\n # cv2.imshow('frame', img)\r\n\r\n # img = cv2.imread('image4.jpg')\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n # cv2.cvtColor(gray, cv2.COLOR_BGR2RGB)\r\n\r\n bfilter = cv2.bilateralFilter(gray, 11, 17, 17) # Noise reduction\r\n edged = cv2.Canny(bfilter, 30, 200) # Edge detection\r\n # plt.imshow(cv2.cvtColor(edged, cv2.COLOR_BGR2RGB))\r\n\r\n keypoints = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n contours = imutils.grab_contours(keypoints)\r\n contours = sorted(contours, key=cv2.contourArea, reverse=True)[:10]\r\n\r\n location = 0\r\n for contour in contours:\r\n approx = cv2.approxPolyDP(contour, 10, True)\r\n if len(approx) == 4:\r\n location = approx\r\n break\r\n\r\n mask = np.zeros(gray.shape, np.uint8)\r\n\r\n try:\r\n new_image = cv2.drawContours(mask, [location], 0, 255, -1)\r\n new_image = cv2.bitwise_and(img, img, mask=mask)\r\n\r\n # plt.imshow(cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB))\r\n\r\n (x, y) = np.where(mask == 255)\r\n (x1, y1) = (np.min(x), np.min(y))\r\n (x2, y2) = (np.max(x), np.max(y))\r\n cropped_image = gray[x1:x2+1, y1:y2+1]\r\n\r\n # plt.imshow(cv2.cvtColor(cropped_image, cv2.COLOR_BGR2RGB))\r\n count=count+1\r\n videodirname = str(os.getcwd())\r\n cv2.imwrite(str(videodirname+\"\\\\\"+str(count)+\".jpg\"),new_image)\r\n\r\n reader = easyocr.Reader(['en'])\r\n result = reader.readtext(cropped_image)\r\n print(result)\r\n\r\n \r\n text = result[0][-2]\r\n\r\n curr_vehicle = Vehicle.objects.get(registration_no = text)\r\n if(prev_vehicle.registration_no != curr_vehicle.registration_no):\r\n prev_vehicle=curr_vehicle\r\n sendsms(curr_vehicle.mobile)\r\n\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n res = cv2.putText(img, text=text, org=(approx[0][0][0], approx[1][0][1]+60), fontFace=font, fontScale=1, color=(0,255,0), thickness=2, lineType=cv2.LINE_AA)\r\n res = cv2.rectangle(img, tuple(approx[0][0]), tuple(approx[2][0]), (0,255,0),3)\r\n # plt.imshow(cv2.cvtColor(res, cv2.COLOR_BGR2RGB))\r\n\r\n cv2.imshow('Processed Frame',res)\r\n\r\n # the 'q' button is set as the\r\n # quitting button you may use any\r\n # desired button of your choice\r\n if (cv2.waitKey(30) & 0xff) == (ord('q') or ret):\r\n break\r\n except:\r\n continue\r\n\r\n # After the loop release the cap object\r\n vid.release()\r\n # Destroy all the windows\r\n cv2.destroyAllWindows()\r\n","repo_name":"NitishS05/Stolen-Vehicle-Detection","sub_path":"RCDetection/detection/trash.py","file_name":"trash.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"43216319424","text":"#!/usr/bin/env python\n\nglobal_params = {\n \"temp_dir\" : \"data\",\n \"collectors\": [\"ineapi\"],\n \"loader\" : \"hdfs_loader\",\n \"branch_id\":\"BCN\"\n}\n \n## collector \ndolibarr = {\n \"datasource_name\": \"dolibarr\",\n \"host_path\" : '10.4.41.57:80',\n \"api_key\" : \"nd6hgbcr\",\n \"limit\": 1000\n}\n\nodoo = {\n \"datasource_name\": \"odoo\",\n \"server_url\" : \"https://demo.odoo.com/start\",\n \"limit\": 1000\n}\n\nweather_api = {\n \"datasource_name\": \"weatherapi\",\n \"server_url\" : 'http://api.weatherapi.com/v1',\n \"api_key\" : \"e3cf29be53a84b4d961224304231204\",\n \"forecast\":{\n \"city\" : \"Barcelona\",\n \"days\": 1\n },\n \"history\":{\n \"start_date\": \"2020-01-01\",\n \"end_date\" :\"2020-01-02\"\n }\n\n}\n\nine_api = {\n \"datasource_name\": \"ine\",\n \"server_url\" : \"https://servicios.ine.es/wstempus/js/ES\",\n \"provincial\": {\n \"start_date\": \"2014-01-01\",\n \"end_date\" :\"2018-01-01\" \n }\n}\n\n## persistent loader\n\nhdfs = {\n \"host_path\" : '10.4.41.57:27000/user/bdm'\n}\n\n","repo_name":"ahmadabusaiid/zen-bdma","sub_path":"BDM/landing_zone/configs/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"40392459764","text":"import math\n\nfrom utils import Log\nfrom utils.xmlx import _\n\nfrom hbd.core.DISTRICT_CAPITAL_LIST import DISTRICT_CAPITAL_LIST\n\nlog = Log(__name__)\n\n\nclass DrawNode:\n def draw_node_circle(self, sx, sy):\n return _(\n 'circle',\n None,\n self.styler.node_circle\n | dict(\n cx=sx,\n cy=sy,\n ),\n )\n\n def draw_node_blip(self, sx, sy, node, text_angle):\n text_angle = 0 if text_angle is None else text_angle\n\n color = self.config.node_to_color_set[node].pop()\n\n dx = math.cos(math.radians(text_angle))\n dy = -math.sin(math.radians(text_angle))\n return _(\n 'rect',\n None,\n self.styler.line_end_blip\n | dict(\n x=sx + self.styler.RADIUS * (dx - 1),\n y=sy + self.styler.RADIUS * (dy - 1),\n width=self.styler.RADIUS * 2,\n height=self.styler.RADIUS * 2,\n fill=color,\n ),\n )\n\n def draw_node_text(self, sx, sy, node, x, y, text_angle):\n text_anchor = 'start'\n space_dir = 1\n if 90 < text_angle <= 270:\n text_anchor = 'end'\n space_dir = -1\n text_angle -= 180\n text_angle = -text_angle\n\n transform = ' '.join(\n [\n f'translate({sx},{sy})',\n f'rotate({text_angle})',\n f'translate({-sx},{-sy})',\n ]\n )\n\n label = node\n # label = f'{node} ({x}, {y})'\n\n cmp = self.config.get_node_cmp_value(node)\n default_font_size = int(self.styler.node_text['font_size'])\n font_size = default_font_size * [1.4, 1.2, 1.1, 1][cmp]\n fill = 'black' if cmp < 2 else 'gray'\n\n for district_name in DISTRICT_CAPITAL_LIST:\n if district_name in label:\n label = label.replace(district_name, district_name.upper())\n\n return _(\n 'text',\n label,\n self.styler.node_text\n | dict(\n x=sx\n + space_dir * (self.styler.RADIUS * 1.5 + font_size * 0.5),\n y=sy,\n text_anchor=text_anchor,\n transform=transform,\n font_size=font_size,\n fill=fill,\n ),\n )\n\n def draw_node(self, node, x, y, t):\n sx, sy = t(x, y)\n inner_list = []\n text_angle = self.config.node_to_text_angle[node]\n\n if node in self.config.junction_list:\n inner_list.append(self.draw_node_circle(sx, sy))\n # else:\n # if node not in self.config.terminal_list:\n # inner_list.append(\n # self.draw_node_blip(sx, sy, node, text_angle)\n # )\n\n if text_angle is not None:\n inner_list.append(\n self.draw_node_text(sx, sy, node, x, y, text_angle)\n )\n\n return _(\n 'g',\n inner_list,\n )\n","repo_name":"nuuuwan/harry_beck_diagram","sub_path":"src/hbd/draw/draw_node.py","file_name":"draw_node.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"17720825029","text":"from celery import Celery\nimport time\n\napp = Celery(\n 'test_celery'\n ,broker= 'redis://localhost/0'\n)\n\napp.conf.update(\n CELERY_RESULT_BACKEND = 'redis://localhost/1'\n)\n\n@app.task\ndef add(x, y):\n time.sleep(2)\n return x + y\n\nif __name__ == '__main__':\n result = add.delay(4, 4)\n print( result.get() )","repo_name":"ehsansh84/python-celery-hello","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"9898095176","text":"__author__ = 'jgwall'\n\nimport argparse\nimport itertools\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import ttest_ind as t_test\nimport string\n\ndebug = False\n\n\ndef main():\n args = parse_args()\n print(\"Calculating day-night distances from\",args.infile)\n dist, labels = load_distances(args.infile)\n np.random.seed(args.seed)\n\n # Get pairs of samples to test\n pairs = get_sample_pairs(dist, labels) # Sample plot day-night pairs\n neighbors = get_neighbors(dist, labels) # Nieghboring plots, day-night pairs\n randoms = get_randoms(dist, labels, pairs, neighbors) # Random plots, day-night pairs\n\n # Plot\n fig = plt.figure(figsize=(10,5))\n grid = gridspec.GridSpec(nrows=10, ncols=3, wspace=0.2)\n ylim = [0, 1.05] if \".weighted_unifrac\" not in args.outprefix else [0, 1.25] # Weighted unifract distances can be >1, so have to adjust\n ax_8 = fig.add_subplot(grid[:8,0], title=\"August 8 Samples\")\n ax_26 = fig.add_subplot(grid[:8, 1], title=\"August 26 Samples\")\n ax_all = fig.add_subplot(grid[:8, 2], title=\"All Samples\")\n plot_violins(ax_8, pairs, neighbors, randoms, date=\"8\", pval_outfile=args.outprefix+\".pvals_aug8.txt\", ylim=ylim)\n plot_violins(ax_26, pairs, neighbors, randoms, date=\"26\", pval_outfile=args.outprefix+\".pvals_aug26.txt\", ylim=ylim)\n plot_violins(ax_all, pairs, neighbors, randoms, date=None, pval_outfile=args.outprefix+\".pvals_all.txt\", ylim=ylim)\n\n\n fig.savefig(args.outprefix +\".png\", dpi=100)\n fig.savefig(args.outprefix + \".svg\", dpi=100)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--infile\", help=\"QIIME distance matrix file\")\n parser.add_argument(\"-o\", \"--outprefix\", help=\"Output file prefix\")\n parser.add_argument(\"-s\", \"--seed\", type=int, default=1, help=\"Random seed\")\n parser.add_argument(\"--debug\", default=False, action=\"store_true\")\n args = parser.parse_args()\n\n # Handle debug flag\n global debug\n debug = args.debug\n\n return parser.parse_args()\n\ndef load_distances(infile):\n dist = pd.read_table(infile, index_col=0)\n rownames, colnames = dist.index, dist.columns\n mismatches = [r!=c for r,c in zip(rownames, colnames)]\n if any(mismatches):\n print(\"WARNING! Row and column labels do not match!\")\n\n labels=dict()\n for i in range(len(rownames)):\n sample = rownames[i]\n labels[sample]=i\n\n dist = np.array(dist)\n return dist, labels\n\ndef get_sample_pairs(dist, labels):\n print(\"\\tExtracting day-night pairs from the same samples\")\n pairs=dict()\n n=0\n for s1 in sorted(labels.keys()):\n s2=switch_day_night(s1)\n if s2 not in labels: continue\n pairs = add_to_sampleset(pairs, s1, s2, labels, dist)\n n+=1\n print(\"\\t\\tIdentified\",n,\"total pairs that collapse to\",len(pairs),\"unique pairs of the same plot in day-night combinations\")\n return pairs\n\ndef get_neighbors(dist, labels):\n print(\"\\tExtracting day-night pairs from neighboring samples\")\n pairs=dict()\n n=0\n for s1 in sorted(labels.keys()):\n s2 = switch_day_night(s1) # Change from day to night and vice-versa\n # Get neighboring by adding or subtracting 2 from the plot number\n s2_left = change_plot(s2, -2)\n s2_right = change_plot(s2, 2)\n\n # Check if corresponding samples exist, and add\n if s2_left in labels:\n pairs = add_to_sampleset(pairs, s1, s2_left, labels, dist)\n n+=1\n if s2_right in labels:\n pairs = add_to_sampleset(pairs, s1, s2_right, labels, dist)\n n+=1\n print(\"\\t\\tIdentified\",n,\"total pairs that collapse to\",len(pairs),\"unique pairs of neighboring plots in day-night combinations\")\n return pairs\n\ndef get_randoms(dist, labels, sample_pairs, neighbors):\n print(\"\\tExtracting day-night pairs from random samples that are not in the above sets\")\n pairs=dict()\n n=0\n samples = sorted(labels.keys())\n for i in range(len(samples)):\n s1 = samples[i]\n for j in range(i, len(samples)):\n s2=samples[j]\n\n if s1 == s2: continue # If samples are same, skip. (Shouldn't happen, but best to be safe)\n\n # If both are day or both are night, skip\n if s1.startswith(\"LMAN\") and s2.startswith(\"LMAN\"): continue\n if s1.startswith(\"LMAD\") and s2.startswith(\"LMAD\"): continue\n\n # If sample pair is part of either previous set, skip\n key = make_key(s1, s2)\n if key in sample_pairs or key in neighbors: continue\n\n # If samples are on different dates, skip\n if get_date(s1) != get_date(s2):\n # print(\"Plots\",s1,s2,\"are on different dates and so skipping\")\n continue\n # else: print(\"Plots\",s1,s2,\"are on the same date\")\n\n # Add pairs that made it this far to the set\n pairs = add_to_sampleset(pairs, s1, s2, labels, dist)\n n+=1\n print(\"\\t\\tIdentified\",n,\"total pairs that collapse to\",len(pairs),\"unique pairs of random plots in day-night combinations on same date\")\n return pairs\n\n\n# Do string substitution to get corresponding day or night value\ndef switch_day_night(s1):\n if \"LMAN\" in s1: s2 = s1.replace(\"LMAN\", \"LMAD\")\n if \"LMAD\" in s1: s2 = s1.replace(\"LMAD\", \"LMAN\")\n return s2\n\ndef change_plot(sample, increment):\n plot = int(sample[-4:])\n # print(\"Sample\",sample,\"has plot number\",plot)\n plot += increment\n newsample = sample[:-4] + \"{0:0>4}\".format(plot) # formatting is just how to get leading zeroes okay\n # print(\"\\tNeighbor has sample ID\",newsample)\n return newsample\n\n# Extract distance and add to a dictionary of\ndef add_to_sampleset(pairs, s1, s2, labels, dist):\n row, col = labels[s1], labels[s2]\n mydist = dist[row, col]\n # print(\"\\t\\tPair\", s1, s2, \"is at\", row, col, \"with distnace\", mydist)\n\n # Make unique by sorting and adding to dictionary\n key = make_key(s1, s2)\n if key in pairs: # If already added, check that distance values are the same\n if pairs[key] != mydist:\n print(\"\\t\\tWARNING!! Pair\",key,\"already added but previous distance\",pairs[key],\"!= new distance\",mydist)\n else: pass\n else: # If not present, add\n pairs[key] = mydist\n\n return pairs\n\ndef make_key(s1, s2):\n return \"-\".join(sorted([s1, s2]))\n\ndef unmake_key(key):\n return key.split(\"-\")\n\ndef get_date(sample):\n return sample.split('.')[1]\n\n\ndef plot_violins(ax, pairs, neighbors, randoms, date=None, pval_outfile=None, ylim=[0, 1.05]):\n print(\"Plotting violins for date\",date)\n if date is not None: # Subset by date if required\n pairs = subset_pairs_by_date(pairs, date)\n neighbors = subset_pairs_by_date(neighbors, date)\n randoms = subset_pairs_by_date(randoms, date)\n\n # Randomly subset the randoms to same size as the pair data\n random_subset = np.random.choice(sorted(randoms.keys()), size=len(pairs))\n subset_data = [randoms[k] for k in random_subset]\n\n # Plot\n pair_data, neighbors_data, randoms_data = list(pairs.values()), list(neighbors.values()), list(randoms.values())\n plot_data = [pair_data, neighbors_data, randoms_data, subset_data]\n xvals = [1,2,3,4]\n violins = ax.violinplot(plot_data, positions=xvals)\n\n # Statistical tests\n lettercodes, pvals = get_significant_groupings(plot_data, labels=[\"Pairs\",\"Neighbors\",\"Randoms\",\"Randoms_subset\"], cutoff=0.01)\n maxes = [max(d) for d in plot_data]\n for x,y,s in zip(xvals, maxes, lettercodes):\n ax.text(x=x, y=y+0.02, s=s, horizontalalignment='center', fontweight='bold', fontsize='small')\n print(\"P-values for significance tests:\\n\",pvals)\n\n # Write out if requested\n if pval_outfile:\n pvals['dataset'] = ax.get_title()\n pvals.to_csv(pval_outfile, sep='\\t')\n\n\n # Clean up axes\n ax.set_xticks(xvals)\n ax.set_xticklabels([\"Same plots\",\"Neighbor plots\",\"Random plots\",\"Random plots\\n(subset)\"], rotation=\"vertical\", fontsize=\"x-small\", fontweight=\"bold\")\n ax.xaxis.set_ticks_position('none')\n ax.yaxis.set_ticks_position('left')\n ax.set_title(ax.get_title(), fontsize=\"large\", fontweight=\"bold\")\n\n # Y axis stuff\n ax.set_ylim(ylim)\n yticks = [0, 0.2, 0.4, 0.6, 0.8, 1.0]\n yticklabels=[str(y) for y in yticks]\n ax.set_yticks(yticks)\n ax.set_yticklabels(yticklabels, fontsize='small', weight='bold')\n\n # Clean up violins\n for body in violins['bodies']:\n body.set_color('cornflowerblue')\n body.set_alpha(1)\n body.set_linewidth(0)\n for component in ['cmins','cmaxes','cbars']:\n violins[component].set_color('midnightblue')\n\ndef subset_pairs_by_date(pairs, date):\n newpairs=dict()\n for key in pairs:\n s1, s2 = unmake_key(key)\n d1, d2 = get_date(s1), get_date(s2)\n if d1 != d2: print(\"\\t\\tWARNING! Dates are different for pair\",key)\n\n # If sample pair was on the indicated date, keep. Otherwise discarded\n if d1 == date:\n newpairs[key] = pairs[key]\n print(\"\\t\",len(newpairs),\"of\",len(pairs),\"sample pairs kept for date\",date)\n return(newpairs)\n\n# function to take a list of datapoints in a group, do pairwise t-tests, determine which groups are statistically significant, and return the letter codes for groupings\ndef get_significant_groupings(groupdata, labels=None, cutoff=0.05):\n print(\"\\t\\tDetermining significant letter groupings for plot\\n\")\n if labels==None: labels=[\"Data\" + str(i+1) for i in range(len(groupdata))] # Labels for each dataset\n \n # Calculate p-values\n pvals=np.empty(shape=(len(groupdata), len(groupdata)))\n pvals[:] = np.nan\n for i in range(len(groupdata)):\n myset = set()\n for j in range(i+1, len(groupdata)):\n t, p = t_test(groupdata[i], groupdata[j])\n pvals[i,j], pvals[j,i] = p, p\n pvals=pd.DataFrame(pvals, columns=labels, index=labels)\n print(\"P-values:\\n\",pvals)\n\n # Make all possible combinations of groups\n allgroups=list()\n for size in range(2, len(labels)+1):\n for mygroup in itertools.combinations(labels, size):\n allgroups.append(mygroup)\n #print(mygroup)\n \n # Subset to just the ones where all members are statistically indistinguishable\n goodgroups=list()\n for mygroup in allgroups:\n isgood=True\n for i in range(len(mygroup)):\n for j in range(i, len(mygroup)):\n if pvals.loc[mygroup[i], mygroup[j]] <= cutoff:\n isgood=False\n #print(isgood, mygroup)\n if isgood: goodgroups.append(mygroup)\n \n # Check if one-member groups exist by collapsing groups to a set of labels\n already_included = set(np.hstack(goodgroups))\n for l in labels:\n if l not in already_included: \n goodgroups.append([l]) #Append 1-item list of groups\n #print(goodgroups)\n\n # Now assign letter values; do in a loop over labels to preserve order\n lettercodes = {l:\"\" for l in labels} # Dictionary of which letter codes each label has\n letter_i=0 # Keeping track of which group label we're using\n for l in labels:\n for g in goodgroups:\n if l not in g: continue # Skip groups this label is not in\n for mylabel in g:\n lettercodes[mylabel] += string.ascii_lowercase[letter_i]\n goodgroups.remove(g) # Remove a group after it's been processed so it doesn't get processed a second time\n letter_i+=1\n #print(lettercodes)\n \n # Make letters in order of original list\n lettervals = [lettercodes[l] for l in labels]\n #print(lettervals)\n return lettervals, pvals\n \n \n\nif __name__ == '__main__': main()","repo_name":"wallacelab/paper-maize-phyllosphere-2014","sub_path":"0_Scripts/9e_PLotDistancesBetweenDayAndNightSamples.py","file_name":"9e_PLotDistancesBetweenDayAndNightSamples.py","file_ext":"py","file_size_in_byte":11805,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"18635406169","text":"# -*- coding:utf-8 -*-\n\n\n# Given a triangle, find the minimum path sum from top to bottom. Each step you may move to adjacent numbers on the row below.\r\n#\n#\n# For example, given the following triangle\r\n#\n# [\r\n# [2],\r\n# [3,4],\r\n# [6,5,7],\r\n# [4,1,8,3]\r\n# ]\r\n#\n#\n#\n# The minimum path sum from top to bottom is 11 (i.e., 2 + 3 + 5 + 1 = 11).\r\n#\n#\n#\n# Note:\r\n# Bonus point if you are able to do this using only O(n) extra space, where n is the total number of rows in the triangle.\r\n#\n\n\nclass Solution(object):\n def minimumTotal(self, triangle):\n \"\"\"\n :type triangle: List[List[int]]\n :rtype: int\n \"\"\"\n \n if not triangle:\n return 0\n \n# lists=triangle[0] \n# for i in range(1, len(triangle)):\n# pre = lists[0]\n# for j in range(0, i + 1):\n# cur = lists[j] if j < i else 0\n# if j < i:\n# lists[j] = triangle[i][j] + min(pre, lists[j])\n# else:\n# lists.append(triangle[i][j]+pre)\n# pre = cur\n\n# return min(lists)\n\n res = [0 for i in range(0, len(triangle)+1)]\n \n for i in range(len(triangle)-1, -1, -1):\n for j in range(0, len(triangle[i])):\n res[j] = min(res[j], res[j+1])+triangle[i][j]\n \n return res[0]\n \n","repo_name":"privateHmmmm/leetcode","sub_path":"120-triangle/triangle.py","file_name":"triangle.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"20695400101","text":"import urllib.request as url\r\nimport json\r\n\r\npath = \"https://data.covid19india.org/states_daily.json\"\r\nresponse = url.urlopen(path)\r\ndata = json.load(response)\r\n\r\nstates = data['states_daily']\r\n\r\nconfirmed_cases = []\r\nrecovered_cases = []\r\ndeceased_cases = []\r\n\r\nfor i in range(len(states)):\r\n if states[i]['status'] == 'Confirmed':\r\n confirmed_cases.append(states[i])\r\n elif states[i]['status'] == 'Recovered':\r\n recovered_cases.append(states[i])\r\n else:\r\n deceased_cases.append(states[i])\r\n\r\nconf_sum = 0\r\nrec_sum = 0\r\ndec_sum = 0\r\nfor i in range(len(confirmed_cases)):\r\n conf_sum += int(confirmed_cases[i]['dl'])\r\n rec_sum += int(recovered_cases[i]['dl'])\r\n dec_sum += int(deceased_cases[i]['dl'])\r\n\r\nprint(\"Total Confirmed Cases in Delhi till 16 Aug 2021 :\",conf_sum)\r\nprint(\"Total Recovered Cases in Delhi till 16 Aug 2021 :\",rec_sum)\r\nprint(\"Total Deceased Cases in Delhi till 16 Aug 2021 :\",dec_sum)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"brainmentorspvtltd/TP_Python2022","sub_path":"covid_api.py","file_name":"covid_api.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"28248667272","text":"# Это альтернативное решение, немного отличающееся от шаблона. Шаблон решения 17 задания в файле шаблон.py\n\n# В файле содержится последовательность из 10 000 целых положительных чисел.\n# Каждое число не превышает 10 000. Определите и запишите в ответе сначала количество пар элементов\n# последовательности, у которых разность элементов кратна 60 и хотя бы один из элементов кратен 15,\n# затем максимальную из разностей элементов таких пар.\n# В данной задаче под парой подразумевается два различных элемента последовательности. Порядок элементов в паре не важен.\n\n# https://inf-ege.sdamgia.ru/problem?id=37370\n\nf = open(\"37370.txt\")\ntext = f.readlines() # Считываем файл и разбиваем по строкам\n\na = []\nmaxDif = 0\ncount = 0\n\nfor i in text:\n a.append(int(i)) # Добавляем каждый элемент последовательности в список a\n\n# Так как в данном задании последовательность - это не только два подряд идущий числа, делаем два вложенных цикла for\n\nfor i in range(0, len(a)): # Первый цикл получает первый элемент последовательности\n\n for j in range(i+1, len(a)): # Второй цикл подбирает к первому числу пары, проходясь по всему списку, но не берет элемент с индексом i, чтобы не было дубликатов\n\n # Подсчет подходящих пар\n if (abs(a[i] - a[j]) % 60 == 0) and ((a[i] % 15 == 0) or (a[j] % 15 == 0)):\n\n count += 1\n\n if abs(a[i] - a[j]) > maxDif: # Получение максимальной разности\n\n maxSum = abs(a[i] - a[j])\n\nprint(count, maxDif)\n# |\n# |\n# ↓\n# 63517 9960\n","repo_name":"paracosm17/egeinformatics","sub_path":"17/37370/37370.py","file_name":"37370.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"26092616095","text":"# -*- coding: utf-8 -*-\r\n\r\nimport nltk\r\nfrom nltk.corpus import names\r\nimport random\r\n\r\ndef gender_features(word):\r\n return {'last_letter': word[-1]}\r\n\r\n\r\nnames = ([(name, 'male') for name in names.words('male.txt')] +\r\n [(name, 'female') for name in names.words('female.txt')]\r\n )\r\nrandom.shuffle(names)\r\n\r\nfeatureSets = [(gender_features(n), g) for (n,g) in names]\r\ntrain_set, test_set = featureSets[500:], featureSets[:500]\r\nclf = nltk.NaiveBayesClassifier.train(train_set)\r\npred_gender = clf.classify(gender_features('Neo'))\r\nprint(pred_gender)\r\n","repo_name":"leizhang-geo/machine_learning_algorithms","sub_path":"nlp/nltk/4_classify.py","file_name":"4_classify.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"35"} +{"seq_id":"15734093220","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.metrics.pairwise import linear_kernel\n\ndf = pd.read_csv('model/NetflixFlattened.csv')\n\n# Flattening the dataset\nsingle_col = ['type', 'rating']\nmulti_col = ['director', 'cast', 'country', 'listed_in']\n\nbinary_df = pd.DataFrame({'Index':df.index})\nbinary_df = binary_df.set_index('Index')\n\n \n# Single Value\nfor i in single_col:\n for j in df[i].unique():\n binary_df[j] = 0\nfor i in range(len(df)):\n row = df.index[i]\n for j in single_col:\n value = df[j][row]\n binary_df.loc[row,value] = 1\n \n# Multiple Value\n\nfor i in multi_col:\n unique_list = []\n for j in df[i]:\n for x in j:\n unique_list.append(x)\n unique_set = set(unique_list)\n \n for value in unique_set:\n binary_df[value] = 0 \n\nfor i in range(len(df)):\n row = df.index[i]\n for j in multi_col:\n for value in df[j][row]:\n binary_df.loc[row,value] = 1\n\n\n# Compute the cosine similarity matrix\ncosine_sim = linear_kernel(binary_df, binary_df)\n\n# Construct a reverse map of indices and movie titles\nindices = pd.Series(df.index, index=df['title']).drop_duplicates() \n\ndef results(title):\n \n title = title.lower()\n row_index = indices[title]\n sim_scores = list(enumerate(cosine_sim[row_index]))\n sim_scores = sorted(sim_scores, key = lambda x: x[1], reverse = True)\n sim_scores = sim_scores[1:11]\n movie_indices = [i[0] for i in sim_scores]\n return df.iloc[movie_indices]['title'].str.upper()\n","repo_name":"100piyushsingh/RecommenderSystem","sub_path":"recommendation.py","file_name":"recommendation.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26654540989","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[83]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport pickle as pkl\nimport os \n\n\n# In[84]:\n\n\n# labels=[]\n# with open(\"./labels.pickle\",\"rb\") as file:\n# labels=pkl.load(file)\n\n\n# In[85]:\n\n\n#translation or cropping 1\ndef translation(img,coor):\n coor=np.array(coor)\n coor=coor.reshape((-1,4))\n img=cv2.resize(img,(224,224))\n rows,cols,_ = img.shape\n dst=img\n M = np.float32([[1,0,0],[0,1,50]])\n dst = cv2.warpAffine(img,M,(cols,rows))\n M = np.float32([[1,0,0],[0,1,-50]])\n dst = cv2.warpAffine(dst,M,(cols,rows))\n dst=dst[50:-50,:,:]\n dst=cv2.resize(dst,(224,224))\n coor[:,1]=(coor[:,1]-50) * 224/ 124.0\n coor[:,3]=(coor[:,3]-50) * 224/ 124.0\n return dst,coor.reshape((-1,4))\n\n\n# In[86]:\n\n\n# Rotation and Scaling 2 \ndef rotationScale(img,coor,angle=5,scale=1.1):\n coor=np.array(coor)\n img=cv2.resize(img,(224,224))\n coor=coor.reshape((-1,2,2))\n rows,cols,_ = img.shape\n M = cv2.getRotationMatrix2D((cols/2,rows/2),angle,scale)\n dst = cv2.warpAffine(img,M,(cols,rows))\n dst=cv2.resize(dst,(224,224))\n coor=np.array(cv2.transform(coor,M))\n# print(coor)\n return dst,coor.reshape((-1,4))\n\n\n# In[87]:\n\n\n#FLIP 3\ndef flip(img,coor):\n coor=np.array(coor)\n img=cv2.resize(img,(224,224))\n coor=coor.reshape((-1,4))\n dst=cv2.flip(img,1)\n dst=cv2.resize(dst,(224,224))\n# print(coor)\n coor[:,0]=224-coor[:,0]\n coor[:,2]=224-coor[:,2]\n return dst,coor.reshape((-1,4))\n\n\n# In[90]:\n\n\ndef augmentation(labels):\n augment_labels=[]\n count=1\n for image in labels:\n if image[1][0]==0:\n continue\n img=cv2.imread(\"./images/\"+image[0])\n augs=[]\n augs.append(translation(img,image[1][1])) #1\n augs.append(rotationScale(img,image[1][1])) #2\n augs.append(rotationScale(img,image[1][1],angle=-7)) #3\n augs.append(rotationScale(img,image[1][1],angle=0)) #4\n augs.append(flip(img,image[1][1])) #5\n i=0\n for augim in augs:\n name=image[0].split(\".\")[0]+\"_\"+str(i+1)+\".jpg\"\n i+=1\n cv2.imwrite(\"./aug_images/\"+name,augim[0])\n augment_labels.append((name,(image[1][0],augim[1],image[1][-1])))\n# print(count,end=\" \")\n count+=1\n return augment_labels\n\n\n# In[91]:\n\n\n# augment_labels=augmentation(labels=labels)\n\n\n# In[93]:\n\n\n# len(augment_labels)+177\n\n\n# In[94]:\n\n\n\n# In[95]:\n\n\n# a=np.random.randint(0,135)\n# image=labels[a]\n# img=cv2.imread(\"./Data/images/\"+image[0])\n\n# fig=plt.figur\n\n# with open(\"./augments_label.pickle\",\"wb\") as file:\n# pkl.dump(augment_labels,file)e(figsize=(20,20))\n\n# ax1=fig.add_subplot(1,4,2)\n# im,coor=flip(img,image[1][1])\n# for i in range(coor.shape[0]):\n# im=cv2.rectangle(im,(int(coor[i][0]),int(coor[i][1])),(int(coor[i][2]),int(coor[i][3])),(0,255,0),2)\n# ax1.imshow(cv2.cvtColor(im,cv2.COLOR_BGR2RGB))\n\n# ax1=fig.add_subplot(1,4,3)\n# im,coor=translation(img,image[1][1])\n# for i in range(coor.shape[0]):\n# im=cv2.rectangle(im,(int(coor[i][0]),int(coor[i][1])),(int(coor[i][2]),int(coor[i][3])),(0,255,0),2)\n# ax1.imshow(cv2.cvtColor(im,cv2.COLOR_BGR2RGB))\n\n# ax1=fig.add_subplot(1,4,4)\n# im,coor=rotationScale(img,image[1][1],angle=7)\n# for i in range(coor.shape[0]):\n# im=cv2.rectangle(im,(int(coor[i][0]),int(coor[i][1])),(int(coor[i][2]),int(coor[i][3])),(0,255,0),2)\n# ax1.imshow(cv2.cvtColor(im,cv2.COLOR_BGR2RGB))\n\n# ax1=fig.add_subplot(1,4,1)\n# coor=np.array(image[1][1])\n# img=cv2.resize(img,(224,224))\n# for i in range(coor.shape[0]):\n# img=cv2.rectangle(img,(int(coor[i][0]),int(coor[i][1])),(int(coor[i][2]),int(coor[i][3])),(0,255,0),2)\n\n# ax1.imshow(cv2.cvtColor(img,cv2.COLOR_BGR2RGB))\n\n\n# # In[ ]:\n\n\n\n\n","repo_name":"Jask-AI/Illegal_Parking","sub_path":"image_augmentations.py","file_name":"image_augmentations.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"35"} +{"seq_id":"71184269222","text":"\"\"\"\nSafe string\n\"\"\"\nimport re\nfrom datetime import date\nfrom unidecode import unidecode\n\nCONTRASENA_REGEXP = r\"^(?=.*[a-z])(?=.*[A-Z])(?=.*\\d)[a-zA-Z\\d]{8,48}$\"\nCURP_REGEXP = r\"^[A-Z]{4}\\d{6}[A-Z]{6}[A-Z0-9]{2}$\"\nEMAIL_REGEXP = r\"^[\\w.-]+@[\\w.-]+\\.\\w+$\"\nEXPEDIENTE_REGEXP = r\"^\\d+\\/[12]\\d\\d\\d(-[a-zA-Z0-9]+(-[a-zA-Z0-9]+)?)?$\"\nTOKEN_REGEXP = r\"^[a-zA-Z0-9_.=+-]+$\"\nFOLIO_REGEXP = r\"^\\d+/[12]\\d\\d\\d$\"\nNUMERO_PUBLICACION_REGEXP = r\"^\\d+/[12]\\d\\d\\d$\"\nSENTENCIA_REGEXP = r\"^\\d+/[12]\\d\\d\\d$\"\nURL_REGEXP = r\"^(https?:\\/\\/)[0-9a-z-_]*(\\.[0-9a-z-_]+)*(\\.[a-z]+)+(\\/[0-9a-z%-_]*)*?\\/?$\"\n\n\ndef safe_clave(input_str, max_len=16):\n \"\"\"Safe clave\"\"\"\n if not isinstance(input_str, str):\n return \"\"\n new_string = re.sub(r\"[^a-zA-Z0-9()-]+\", \" \", unidecode(input_str))\n removed_multiple_spaces = re.sub(r\"\\s+\", \" \", new_string)\n spaces_to_dashes = re.sub(r\"\\s+\", \"-\", removed_multiple_spaces)\n final = spaces_to_dashes.strip().upper()\n if len(final) > max_len:\n return final[:max_len]\n return final\n\n\ndef safe_email(input_str, search_fragment=False):\n \"\"\"Safe string\"\"\"\n if not isinstance(input_str, str):\n return \"\"\n final = input_str.strip().lower()\n if search_fragment:\n if re.match(r\"^[\\w.-]*@*[\\w.-]*\\.*\\w*$\", final) is None:\n return \"\"\n return final\n if re.match(EMAIL_REGEXP, final) is None:\n return \"\"\n return final\n\n\ndef safe_expediente(input_str):\n \"\"\"Safe expediente\"\"\"\n if not isinstance(input_str, str) or input_str.strip() == \"\":\n return \"\"\n elementos = re.sub(r\"[^a-zA-Z0-9]+\", \"|\", unidecode(input_str)).split(\"|\")\n try:\n numero = str(elementos[0])\n ano = int(elementos[1])\n except (IndexError, ValueError) as error:\n raise error\n if ano < 1950 or ano > date.today().year:\n raise ValueError\n extra_1 = \"\"\n if len(elementos) >= 3:\n extra_1 = \"-\" + elementos[2].upper()\n extra_2 = \"\"\n if len(elementos) >= 4:\n extra_2 = \"-\" + elementos[3].upper()\n limpio = f\"{str(numero)}/{str(ano)}{extra_1}{extra_2}\"\n if len(limpio) > 16:\n raise ValueError\n return limpio\n\n\ndef safe_string(input_str, max_len=250, do_unidecode=True, save_enie=False, to_uppercase=True):\n \"\"\"Safe string\"\"\"\n if not isinstance(input_str, str):\n return \"\"\n if do_unidecode:\n new_string = re.sub(r\"[^a-zA-Z0-9.()/-]+\", \" \", input_str)\n if save_enie:\n new_string = \"\"\n for char in input_str:\n if char == \"ñ\":\n new_string += \"ñ\"\n elif char == \"Ñ\":\n new_string += \"Ñ\"\n else:\n new_string += unidecode(char)\n else:\n if save_enie is False:\n new_string = re.sub(r\"[^a-záéíóúüA-ZÁÉÍÓÚÜ0-9.()/-]+\", \" \", input_str)\n else:\n new_string = re.sub(r\"[^a-záéíóúüñA-ZÁÉÍÓÚÜÑ0-9.()/-]+\", \" \", input_str)\n removed_multiple_spaces = re.sub(r\"\\s+\", \" \", new_string)\n final = removed_multiple_spaces.strip()\n if to_uppercase:\n final = final.upper()\n if max_len == 0:\n return final\n return (final[:max_len] + \"...\") if len(final) > max_len else final\n\n\ndef safe_text(input_str, max_len=4096, to_uppercase=True):\n \"\"\"Safe string\"\"\"\n if not isinstance(input_str, str):\n return \"\"\n new_string = re.sub(r\"[^a-zA-Z0-9@\\n()\\?=\\[\\]:/_.-]+\", \" \", unidecode(input_str))\n final = new_string.strip()\n if to_uppercase:\n final = final.upper()\n if max_len == 0:\n return final\n return (final[:max_len] + \"...\") if len(final) > max_len else final\n\n\ndef safe_message(input_str, max_len=250):\n \"\"\"Safe message\"\"\"\n message = str(input_str)\n if message == \"\":\n message = \"Sin descripción\"\n return (message[:max_len] + \"...\") if len(message) > max_len else message\n\n\ndef safe_numero_publicacion(input_str):\n \"\"\"Safe número publicación\"\"\"\n return safe_sentencia(input_str)\n\n\ndef safe_sentencia(input_str):\n \"\"\"Safe sentencia\"\"\"\n if not isinstance(input_str, str) or input_str.strip() == \"\":\n return \"\"\n elementos = re.sub(r\"[^0-9A-Z]+\", \"|\", unidecode(input_str)).split(\"|\")\n try:\n numero = int(elementos[0])\n ano = int(elementos[1])\n except (IndexError, ValueError) as error:\n raise error\n if numero <= 0:\n raise ValueError\n if ano < 1950 or ano > date.today().year:\n raise ValueError\n limpio = f\"{str(numero)}/{str(ano)}\"\n if len(limpio) > 16:\n raise ValueError\n return limpio\n\n\ndef safe_url(input_str):\n \"\"\"Safe URL\"\"\"\n if not isinstance(input_str, str) or input_str.strip() == \"\":\n return \"\"\n input_str = input_str.strip()\n if re.match(URL_REGEXP, input_str) is None:\n return \"\"\n return input_str\n\n\ndef safe_curp(input_str, max_len=32):\n \"\"\"Safe CURP\"\"\"\n if not isinstance(input_str, str):\n return None\n removed_spaces = re.sub(r\"\\s\", \"\", input_str)\n removed_simbols = re.sub(r\"[()\\[\\]:/.-]+\", \"\", removed_spaces)\n mayusculas_str = removed_simbols.upper()\n if re.fullmatch(CURP_REGEXP, unidecode(mayusculas_str)):\n final = mayusculas_str.strip()\n else:\n # raise ValueError(\"No es un valor permitido para una CURP\")\n return None\n return final\n\n\ndef safe_tel(input_str):\n \"\"\"Safe Teléfono\"\"\"\n if not isinstance(input_str, str):\n return None\n removed_spaces = re.sub(r\"\\s\", \"\", input_str)\n removed_simbols = re.sub(r\"[()\\[\\]:/.-]+\", \"\", removed_spaces)\n removed_letters = re.sub(r\"[a-zA-Z]+\", \"\", removed_simbols)\n return removed_letters.strip()\n","repo_name":"PJECZ/pjecz-citas-v2-admin","sub_path":"lib/safe_string.py","file_name":"safe_string.py","file_ext":"py","file_size_in_byte":5664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"16158303532","text":"import flask\nfrom flask import json\nfrom sqlalchemy import exc as sa_exc\nimport sqlalchemy.orm as sa_orm\n\nfrom dci.api.v1 import api\nfrom dci.api.v1 import base\nfrom dci.api.v1 import remotecis\nfrom dci.api.v1 import utils as v1_utils\nfrom dci import decorators\nfrom dci.common import exceptions as dci_exc\nfrom dci.common.schemas import (\n check_json_is_valid,\n clean_json_with_schema,\n create_team_schema,\n update_team_schema,\n check_and_get_args,\n)\nfrom dci.common import utils\nfrom dci.db import declarative as d\nfrom dci.db import models2\n\n\n@api.route(\"/teams\", methods=[\"POST\"])\n@decorators.login_required\n@decorators.log\ndef create_teams(user):\n values = flask.request.json\n check_json_is_valid(create_team_schema, values)\n values.update(v1_utils.common_values_dict())\n\n if user.is_not_super_admin() and user.is_not_epm():\n raise dci_exc.Unauthorized()\n\n try:\n t = models2.Team(**values)\n t_serialized = t.serialize()\n flask.g.session.add(t)\n flask.g.session.commit()\n except sa_exc.IntegrityError as ie:\n flask.g.session.rollback()\n raise dci_exc.DCIException(message=str(ie), status_code=409)\n except Exception as e:\n flask.g.session.rollback()\n raise dci_exc.DCIException(message=str(e))\n\n return flask.Response(\n json.dumps({\"team\": t_serialized}),\n 201,\n headers={\"ETag\": values[\"etag\"]},\n content_type=\"application/json\",\n )\n\n\n@api.route(\"/teams\", methods=[\"GET\"])\n@decorators.login_required\ndef get_all_teams(user):\n args = check_and_get_args(flask.request.args.to_dict())\n\n q = flask.g.session.query(models2.Team)\n\n if user.is_not_super_admin() and user.is_not_epm() and user.is_not_read_only_user():\n q = q.filter(models2.Team.id.in_(user.teams_ids))\n\n q = (\n q.filter(models2.Team.state != \"archived\")\n .options(sa_orm.selectinload(\"topics\"))\n .options(sa_orm.selectinload(\"remotecis\"))\n )\n q = d.handle_args(q, models2.Team, args)\n nb_teams = q.count()\n\n q = d.handle_pagination(q, args)\n teams = q.all()\n teams = list(map(lambda t: t.serialize(), teams))\n\n return flask.jsonify({\"teams\": teams, \"_meta\": {\"count\": nb_teams}})\n\n\n@api.route(\"/teams/\", methods=[\"GET\"])\n@decorators.login_required\ndef get_team_by_id(user, t_id):\n base.get_resource_orm(models2.Team, t_id)\n if user.is_not_in_team(t_id) and user.is_not_epm():\n raise dci_exc.Unauthorized()\n\n try:\n t = (\n flask.g.session.query(models2.Team)\n .filter(models2.Team.state != \"archived\")\n .filter(models2.Team.id == t_id)\n .options(sa_orm.selectinload(\"remotecis\"))\n .options(sa_orm.selectinload(\"topics\"))\n .one()\n )\n except sa_orm.exc.NoResultFound:\n raise dci_exc.DCIException(message=\"team not found\", status_code=404)\n\n return flask.Response(\n json.dumps({\"team\": t.serialize()}),\n 200,\n headers={\"ETag\": t.etag},\n content_type=\"application/json\",\n )\n\n\n@api.route(\"/teams//remotecis\", methods=[\"GET\"])\n@decorators.login_required\ndef get_remotecis_by_team(user, team_id):\n if user.is_not_in_team(team_id) and user.is_not_epm():\n raise dci_exc.Unauthorized()\n\n team = base.get_resource_orm(models2.Team, team_id)\n return remotecis.get_all_remotecis(team.id)\n\n\n@api.route(\"/teams/\", methods=[\"PUT\"])\n@decorators.login_required\ndef put_team(user, t_id):\n # get If-Match header\n if_match_etag = utils.check_and_get_etag(flask.request.headers)\n values = clean_json_with_schema(update_team_schema, flask.request.json)\n\n if user.is_not_super_admin() and user.is_not_epm():\n raise dci_exc.Unauthorized()\n\n base.get_resource_orm(models2.Team, t_id)\n\n values[\"etag\"] = utils.gen_etag()\n\n updated_team = (\n flask.g.session.query(models2.Team)\n .filter(models2.Team.id == t_id)\n .filter(models2.Team.etag == if_match_etag)\n .update(values)\n )\n flask.g.session.commit()\n\n if not updated_team:\n flask.g.session.rollback()\n raise dci_exc.DCIException(\n message=\"update failed, either team not found or etag not matched\",\n status_code=409,\n )\n\n t = flask.g.session.query(models2.Team).filter(models2.Team.id == t_id).one()\n if not t:\n raise dci_exc.DCIException(message=\"unable to return team\", status_code=400)\n\n return flask.Response(\n json.dumps({\"team\": t.serialize()}),\n 200,\n headers={\"ETag\": values[\"etag\"]},\n content_type=\"application/json\",\n )\n\n\n@api.route(\"/teams/\", methods=[\"DELETE\"])\n@decorators.login_required\ndef delete_team_by_id(user, t_id):\n # get If-Match header\n if_match_etag = utils.check_and_get_etag(flask.request.headers)\n base.get_resource_orm(models2.Team, t_id)\n\n if user.is_not_super_admin():\n raise dci_exc.Unauthorized()\n\n deleted_team = (\n flask.g.session.query(models2.Team)\n .filter(models2.Team.id == t_id)\n .filter(models2.Team.etag == if_match_etag)\n .update({\"state\": \"archived\"})\n )\n flask.g.session.commit()\n\n if not deleted_team:\n flask.g.session.rollback()\n raise dci_exc.DCIException(\n message=\"delete failed, either team already deleted or etag not matched\",\n status_code=409,\n )\n\n try:\n for model in [models2.File, models2.Remoteci, models2.Job]:\n flask.g.session.query(model).filter(model.team_id == t_id).update(\n {\"state\": \"archived\"}\n )\n flask.g.session.commit()\n except Exception as e:\n flask.g.session.rollback()\n raise dci_exc.DCIException(message=str(e), status_code=409)\n\n return flask.Response(None, 204, content_type=\"application/json\")\n\n\n@api.route(\"/teams/purge\", methods=[\"GET\"])\n@decorators.login_required\ndef get_to_purge_archived_teams(user):\n return base.get_to_purge_archived_resources(user, models2.Team)\n\n\n@api.route(\"/teams/purge\", methods=[\"POST\"])\n@decorators.login_required\ndef purge_archived_teams(user):\n return base.purge_archived_resources(user, models2.Team)\n","repo_name":"redhat-cip/dci-control-server","sub_path":"dci/api/v1/teams.py","file_name":"teams.py","file_ext":"py","file_size_in_byte":6233,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"35"} +{"seq_id":"3325672007","text":"\nfrom plugin import Plugin\n\nfrom PyQt5.QtWidgets import (QWidget, QLabel, QDoubleSpinBox, QCheckBox,\n QSpinBox, QSlider,\n QRadioButton, QLineEdit, QPushButton,\n QGridLayout, QHBoxLayout, QVBoxLayout,\n QStackedLayout,\n QFileDialog, QButtonGroup,\n QSizePolicy,\n )\nfrom PyQt5.QtCore import QDir, pyqtSignal, QObject, Qt\n\n\nimport cv2 as cv\nimport numpy as np\n\nfrom widget_helpers import radio_filler, spinbox_slider\n\ndef getInstance():\n ''' provides easy way for plugin finder to get an instance of this\n particular plugin\n '''\n instance = MovingAverage()\n instance.setSizeMarginSpacing()\n return instance\n \n# keep statusbar_message as module level variable\nstatusbar_message = 'display average over n frames. will take fps*n time to stabilize'\n\nclass MovingAverage(Plugin):\n def __init__(self, parent = None):\n \n super(MovingAverage, self).__init__()\n\n self.Params = {}\n \n self.setObjectName('moving_average') # should be plugin name\n \n # declare all parameter widgets below\n vbox1 = QVBoxLayout()\n self.spinFrames = QSpinBox(objectName = 'spinFrames')\n hbox1 = spinbox_slider(self.spinFrames, label = 'num frames', \n orientation = 'horizontal', min_ = 1, max_ = 10, \n single_step = 1, default_value = 1)\n \n vbox1.addLayout(hbox1)\n\n self.setLayout(vbox1)\n # ~ widget.img = None\n \n \n def radio_handler(self):\n bid = self.bgrpMethod.checkedId()\n self.stackedLayout.setCurrentIndex(bid)\n self.stackedLayout.setCurrentIndex(bid)\n \n def mainFunc(self, playing, scriptList, row):\n # dst = src1*alpha + src2*beta + gamma\n src1 = self.inputImg\n alpha = 1.0/float(self.spinFrames.value())\n \n if self.outputImg is not None:\n src2 = self.outputImg\n else:\n src2 = self.inputImg\n \n beta = 1.0 - alpha\n \n print('alpha: ', alpha, ' beta: ', beta)\n \n self.outputImg = cv.addWeighted(src1, alpha, src2, beta, 0.0)\n \n","repo_name":"wpeaton/cvrobot2","sub_path":"plugins/moving_average.py","file_name":"moving_average.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"72787821541","text":"import re\n\ncount = 0\n\ndef hasAbba(string):\n for i in range(0, len(string)-3):\n substr = string[i:i+4]\n if substr == ''.join(reversed(substr)):\n if substr.count(substr[0]) is not 4: # No same 4 chars\n return True\n return False\n\ndef tryList(list):\n for item in list:\n if hasAbba(item):\n return True\n return False\n\nwith open('day7.input', 'r') as f:\n for line in f:\n splitted = re.split(r'(\\[|\\])', line)\n\n hypernet = []\n regular = []\n\n hyper = False\n\n # Split in hypernet and regular and look for ABBAs separately\n for section in splitted:\n if section == '[' or section == ']':\n hyper = not hyper\n else:\n if hyper:\n hypernet.append(section)\n else:\n regular.append(section)\n\n if tryList(regular):\n if not tryList(hypernet):\n count += 1\n\nprint(count)\n","repo_name":"jberglinds/adventofcode2016","sub_path":"day7/day7.py","file_name":"day7.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26521283653","text":"#!/usr/bin/env python3\n\"\"\"\nThis module provides classes to work with business models.\n\"\"\"\n\nfrom datetime import datetime\n\nfrom auxi.core.objects import NamedObject\nfrom auxi.core.time import Clock, TimePeriod\nfrom auxi.core.helpers import get_date\nfrom auxi.modelling.business.structure import Entity\n\n__version__ = '0.3.6'\n__license__ = 'LGPL v3'\n__copyright__ = 'Copyright 2016, Ex Mente Technologies (Pty) Ltd'\n__author__ = 'Johan Zietsman'\n__credits__ = ['Johan Zietsman']\n__maintainer__ = 'Johan Zietsman'\n__email__ = 'johan.zietsman@ex-mente.co.za'\n__status__ = 'Planning'\n\n\nclass TimeBasedModel(NamedObject):\n \"\"\"\n Represents an time based model class. An instance of this class is by\n default configured to run only once, thus functioning as a steady state\n model. The instance's time based parameters must be configured for it to\n function as a time based model.\n\n :param name: The name.\n :param description: The description.\n :param start_datetime: The start datetime of the model.\n :param period_duration: The duration of the model's time period.\n e.g. month, day etc.\n :param period_count: The number of periods to execute the model for.\n \"\"\"\n\n def __init__(self,\n name,\n description=None,\n start_datetime=datetime.now(),\n period_duration=TimePeriod.year,\n period_count=1):\n self.clock = Clock(\n \"Clock\",\n start_datetime=get_date(start_datetime),\n timestep_period_duration=period_duration)\n self.period_count = period_count\n self.entities = []\n super(TimeBasedModel, self).__init__(name, description=description)\n\n def _update_childrens_parent_path(self):\n for e in self.entities:\n e.set_parent_path(self.name)\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, value):\n self._name = value\n self._update_childrens_parent_path()\n\n def create_entity(self, name, gl_structure, description=None):\n \"\"\"\n Create an entity and add it to the model.\n\n :param name: The entity name.\n :param gl_structure: The entity's general ledger structure.\n :param description: The entity description.\n\n :returns: The created entity.\n \"\"\"\n\n new_entity = Entity(name, gl_structure, description=description)\n self.entities.append(new_entity)\n return new_entity\n\n def remove_entity(self, name):\n \"\"\"\n Remove an entity from the model.\n\n :param name: The name of the entity to remove.\n \"\"\"\n\n entity_to_remove = None\n for e in self.entities:\n if e.name == name:\n entity_to_remove = e\n if entity_to_remove is not None:\n self.entities.remove(entity_to_remove)\n\n def prepare_to_run(self):\n \"\"\"\n Prepare the model for execution.\n \"\"\"\n\n self.clock.reset()\n for e in self.entities:\n e.prepare_to_run(self.clock, self.period_count)\n\n def run(self):\n \"\"\"\n Execute the model.\n \"\"\"\n\n self.prepare_to_run()\n for i in range(0, self.period_count):\n for e in self.entities:\n e.run(self.clock)\n self.clock.tick()\n\n def __getitem__(self, key):\n return [e for e in self.entities if e.name == key][0]\n\n\nif __name__ == \"__main__\":\n import unittest\n from auxi.modelling.business.models_test import TimeBasedModelUnitTester\n unittest.main()\n","repo_name":"Ex-Mente/auxi.0","sub_path":"auxi/modelling/business/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"35"} +{"seq_id":"9478124341","text":"from common_test_openrave import *\n\n@with_destroy\ndef test_pluginloading():\n RaveInitialize(load_all_plugins=False)\n assert(RaveLoadPlugin('ikfastsolvers'))\n assert(RaveLoadPlugin('libikfastsolvers'))\n env=Environment()\n assert(RaveCreateProblem(env,'ikfast') is not None)\n\n","repo_name":"Praveen-Ramanujam/MobRAVE","sub_path":"test/test_global.py","file_name":"test_global.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"17569848779","text":"# Imports\nimport cv2\nimport os\nimport glob\nimport shutil\nimport numpy as np\nimport tkinter as tk\nfrom tkinter import filedialog\n\n# ** IMPORTANT NOTICE **\n# Please run this program as administrator, otherwise the cleanup process will not be able to delete the remaining files\n\n# Program Description\n# This program will take a video file as an input, and will output a trimmed version of the file\n# Speed is not a concern\n# The frames that need to be deleted are one's with no footage of the recorded tests\n\n# Algorithm Description\n# This algorithm will iterate through each frame of the video\n# Ex: Frame 1-3 need to be kept, frame 4-6 needs to be skipped, frame 7-8 kept\n# Frame 1 - Frame 2 = Difference, keep frame 1\n# Frame 2 - Frame 3 = Difference, keep frame 2\n# Frame 4 - Frame 3 = Difference, keep frame 3\n# Frame 5 - Frame 4 = No Difference, discard frame 4\n# Frame 6 - Frame 5 = No difference, discard frame 5\n# Frame 7 - Frame 6 = Difference, but frame 6 is last frame so discard\n# ** Will need to check that if the previous frame was discarded, and there is now a frame with a difference,\n# to discard that last frame **\n# Frame 8 - Frame 7 = Difference, keep frame 7\n# If last frame in whole video, keep it\n\n# Algo 2\n\n# This one is much simpler, since we know for sure that a if a certain area of the screen\n# is Completly black, then that is a frame we want to discard, hence the \"No picture\" seconds\n# of the video.\n# Open file with cv2, crop it to [250:300, 250:300] [y1, y2], [x1, x2] where\n# (x1,y1) is the top left of the image, the (x2,y2) is the bottom right\n# By cropping and getting this specific area, and summing up the pixel values of the image\n# if the value is = 0, then that is a frame we want to discard\n# This removes any issues that might happen with double iterations, and deleting files as the algorithm\n# works\n\n\n# This class will hold the file location of the video to process, the path of the frames folder\n# The path to the output video, and the frames per second of the video to process\nclass DirectoryInformation:\n def __init__(self, videotoprocess, pathtoframes, outputvideopath, fps):\n self.videotoprocess = videotoprocess\n self.pathtoframes = pathtoframes\n self.outputvideopath = outputvideopath\n self.fps = fps\n\n\n# Instance of empty DirectoryInformation\nmainDir = DirectoryInformation(None, None, None, None)\n\n\ndef convertVideoToImages(dirinfo):\n # eventually, user will be able to select multiple videos at once to process\n cap = cv2.VideoCapture(dirinfo.videotoprocess)\n framespersecond = cap.get(cv2.CAP_PROP_FPS)\n mainDir.fps = framespersecond\n\n path = dirinfo.pathtoframes\n i = 0\n # save each frame to its corresponding folder\n while cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n break\n cv2.imwrite(os.path.join(path, 'frame' + str(i) + '.jpg'), frame)\n i += 1\n cap.release()\n cv2.destroyAllWindows()\n\n\ndef removeUnwantedFrames(dirinfo):\n path = dirinfo.pathtoframes\n\n # set i = number of frames\n listofframes = os.listdir(path) # dir is your directory path\n count = len(listofframes)\n # array of all frames that need to be deleted\n framestodelete = []\n\n # look at specific region in image, if it's black, then we want to delete it\n # This obviously isn't the best way to differenciate a frame we want to keep and want to delete,\n # especially if the resolution of the video feed changes, but because all the videos are the same size\n # it does the job well.\n imgcropselector = cv2.imread(path + '/frame' + str(5) + '.jpg')\n fromcenter = False\n\n roi = cv2.selectROI(\"Select Crop Area - Press Enter to Continue\", imgcropselector, fromcenter)\n cv2.destroyWindow('Select Crop Area - Press Enter to Continue')\n\n x1 = roi[0]\n x2 = roi[0] + roi[2]\n y1 = roi[1]\n y2 = roi[1] + roi[3]\n\n # img[250:300, 250:300]\n # this goes [y1:y+h, x1:x+w]\n for i in range(0, count - 1):\n img = cv2.imread(path + '/frame' + str(i) + '.jpg')\n region = img[y1:y2, x1:x2]\n sumofpixels = np.sum(region)\n\n if sumofpixels == 0:\n framestodelete.append('frame' + str(i))\n\n # delete all frames from list\n for i in range(0, len(framestodelete)):\n os.remove(dirinfo.pathtoframes + str(framestodelete[i]) + '.jpg')\n\n\ndef makeVideo(dirinfo):\n videoname = filedialog.asksaveasfilename(\n parent=window,\n title=\"Save File\",\n initialdir=mainDir.outputvideopath,\n filetypes=[\n (\"All files\", \"*\")])\n\n # take all cleaned up frames and make into video\n frame_array = []\n size = 0\n\n # Because of the way glob.glob works, need to have separate for looks for frame names\n # Here, assuming that the program won't exceed 999,999 total frames, and if it does get that large,\n # Just add another for loop with more ?'s.\n # so frame? = frame[0-9], frame?? = frame[10-99], frame??? = frame[100-999], etc\n for filename in glob.glob(dirinfo.pathtoframes + 'frame?.jpg'):\n img = cv2.imread(filename)\n height, width, layers = img.shape\n size = (width, height)\n frame_array.append(img)\n\n for filename in glob.glob(dirinfo.pathtoframes + 'frame??.jpg'):\n img = cv2.imread(filename)\n height, width, layers = img.shape\n size = (width, height)\n frame_array.append(img)\n\n for filename in glob.glob(dirinfo.pathtoframes + 'frame???.jpg'):\n img = cv2.imread(filename)\n height, width, layers = img.shape\n size = (width, height)\n frame_array.append(img)\n\n for filename in glob.glob(dirinfo.pathtoframes + 'frame????.jpg'):\n img = cv2.imread(filename)\n height, width, layers = img.shape\n size = (width, height)\n frame_array.append(img)\n\n for filename in glob.glob(dirinfo.pathtoframes + 'frame?????.jpg'):\n img = cv2.imread(filename)\n height, width, layers = img.shape\n size = (width, height)\n frame_array.append(img)\n\n for filename in glob.glob(dirinfo.pathtoframes + 'frame??????.jpg'):\n img = cv2.imread(filename)\n height, width, layers = img.shape\n size = (width, height)\n frame_array.append(img)\n\n for filename in glob.glob(dirinfo.pathtoframes + 'frame???????.jpg'):\n img = cv2.imread(filename)\n height, width, layers = img.shape\n size = (width, height)\n frame_array.append(img)\n\n # Create a videowriter with size and framerate we want\n\n output = cv2.VideoWriter(videoname + '.avi', cv2.VideoWriter_fourcc(*'DIVX'), mainDir.fps,\n size)\n\n # This loop adds each frame into the videowriter, creating the video\n\n for i in range(len(frame_array)):\n output.write(frame_array[i])\n\n # Release all the frames from memory\n output.release()\n\n # Delete the frames folder that contains all the frames, since the video is made and complete, we don't need it\n # anymore. This will sometimes throw an error about \"Permission denied - read only\", which will mean the folder\n # will have to be deleted manually. Most of the time this line works, as I ignore. Main functionality\n # of the program remains, and this will not stop the final video from being made properly.\n shutil.rmtree(mainDir.pathtoframes, ignore_errors=True, onerror=None)\n\n\ndef main():\n newpath = mainDir.outputvideopath + '/frames/'\n if not os.path.exists(newpath):\n os.makedirs(newpath)\n mainDir.pathtoframes = newpath\n convertVideoToImages(mainDir)\n removeUnwantedFrames(mainDir)\n makeVideo(mainDir)\n\n\n# Tkinter Stuff\nwindow = tk.Tk()\nwindow.title(\"Video Trimmer - Roman Wicky van Doyer 2021\")\nwindow.geometry(\"500x300\")\n\n\n# Open File\ndef open_file():\n rep = filedialog.askopenfile(\n parent=window,\n title=\"Select TIFF File to Process\",\n initialdir='/',\n filetypes=[\n (\"All files\", \"*\")])\n mainDir.videotoprocess = rep.name\n return rep.name\n\n\n# Save file\ndef save_file():\n rep = filedialog.askdirectory(\n parent=window,\n title=\"Select Directory to Save File\",\n initialdir='/'\n )\n mainDir.outputvideopath = rep\n return rep\n\n\nframe_1 = tk.Frame()\nframe_2 = tk.Frame()\nframe_3 = tk.Frame()\nbutton = tk.Button(\n master=frame_1,\n text=\"Select File\",\n font=\"Nunito\",\n width=15,\n height=5,\n bg=\"#32a889\",\n fg=\"black\",\n command=lambda: open_file()\n)\nbutton2 = tk.Button(\n master=frame_2,\n text=\"Select Destination\",\n font=\"Nunito\",\n width=15,\n height=5,\n bg=\"#32a889\",\n fg=\"black\",\n command=lambda: save_file()\n)\nbutton3 = tk.Button(\n master=frame_2,\n text=\"Process\",\n font=\"Nunito\",\n width=15,\n height=5,\n bg=\"#32a889\",\n fg=\"black\",\n command=lambda: main()\n)\nframe_1.pack()\nframe_2.pack()\nframe_3.pack()\nbutton.pack()\nbutton2.pack()\nbutton3.pack()\nwindow.mainloop()\n","repo_name":"romanwicky/videocleanerumass","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25538758580","text":"# -*- coding: utf-8 -*-\n# @Time : 2018年11月16日 4:27 PM\n# @Author : 李思原\n# @Email : shulisiyuan@163.com\n# @File : unsplashImageSpider.py\n# @Software: PyCharm\n# @Describe: 爬取unsplash上的图片.\n\nimport http.client\nimport hashlib\nfrom urllib import parse\nimport random\nimport json\nimport langid\nimport requests\nimport os\nimport datetime\nimport threading\nimport time\nfrom multiprocessing import Pool\nfrom retry import retry\n\nfileSaveBasePath = './unsplashImageSpider/'\n\n\n# type表示翻译类型\n# type=1表示中文翻译为英文\n# type=2表示英文翻译为中文\ndef Chinese2English(word='朋友', type=1):\n appid = '20180901000201564' # 你的appid\n secretKey = 'fV2gfeIQtEEVgol3PJ9R' # 你的密钥\n\n httpClient = None\n myurl = '/api/trans/vip/translate'\n fromLang = 'zh'\n toLang = 'en'\n salt = random.randint(32768, 65536)\n\n sign = appid + word + str(salt) + secretKey\n m1 = hashlib.new('md5')\n m1.update(sign.encode('utf-8'))\n sign = m1.hexdigest()\n if type == 1:\n myurl = myurl + '?appid=' + appid + '&q=' + parse.quote(word) + '&from=zh&to=en' + '&salt=' + str(\n salt) + '&sign=' + sign\n elif type == 2:\n myurl = myurl + '?appid=' + appid + '&q=' + parse.quote(word) + '&from=en&to=zh' + '&salt=' + str(\n salt) + '&sign=' + sign\n\n try:\n httpClient = http.client.HTTPConnection('api.fanyi.baidu.com')\n httpClient.request('GET', myurl)\n\n # response是HTTPResponse对象\n response = httpClient.getresponse()\n responseData = json.loads(response.read().decode())\n word = responseData['trans_result'][0]['dst']\n # print(responseData)\n return word\n except Exception as e:\n print(e)\n finally:\n if httpClient:\n httpClient.close()\n\n\n# 判断语言种类,并获得文字的中英文\ndef judgeLanguageType(word):\n languageType = langid.classify(word)[0]\n\n if languageType == 'en':\n wordEnglish = word\n wordChinese = Chinese2English(word=word, type=2)\n else:\n wordChinese = word\n wordEnglish = Chinese2English(word=word, type=1)\n\n return wordEnglish, wordChinese\n\n\n# 根据URL地址,文件名,文件存储路径下载文件\n@retry(tries=5, delay=1, backoff=2, max_delay=8, jitter=1)\ndef saveFile(path, file_name, url):\n if not os.path.exists(os.path.join(path, file_name)):\n data = requests.get(url).content\n if data == None:\n return\n if not os.path.exists(path):\n os.makedirs(path)\n\n if (not path.endswith(\"/\")):\n path = path + \"/\"\n file = open(path + file_name, \"wb\")\n file.write(data)\n file.flush()\n file.close()\n\n\n# 爬取图片\ndef getImg(wordEnglish='boy', wordChinese='男孩', pageNum=1):\n url = \"https://unsplash.com/napi/search/photos\"\n\n headers = {\n 'accept': \"*/*\",\n 'accept-encoding': \"gzip, deflate, br\",\n 'accept-language': \"en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7\",\n 'cookie': \"xp-search-cervantes-v1=experiment; _ga=GA1.2.679081907.1542356444; _gid=GA1.2.1314320132.1542356444; lsnp=sigsrn9xi0A; uuid=83b1b910-e978-11e8-b31c-79173615a02c; xpos=%7B%7D; lux_uid=154235838759040539; _sp_ses.0295=*; _gat=1; _sp_id.0295=9b24233c-ddff-4962-bb9b-29642f7eea2a.1542356445.2.1542358413.1542356505.499207fd-2257-46f0-b05f-b9f8a43edd51\",\n 'dpr': \"2\",\n 'referer': \"https://unsplash.com/search/photos/boy\",\n 'user-agent': \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36\",\n 'viewport-width': \"813\"\n }\n\n for page in range(1, int(pageNum) + 1):\n print('正在爬取关键字:%s_%s第%s页' % (wordChinese, wordEnglish, page))\n querystring = {\"query\": '%s' % wordEnglish, \"page\": \"%s\" % page, \"per_page\": \"30\"}\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n responseData = response.text\n # print(responseData)\n\n pageNum = min(int(pageNum), int(json.loads(responseData)['total_pages']))\n responseDatas = json.loads(responseData)['results']\n\n def action(arg):\n startime = datetime.datetime.now()\n img = responseDatas[arg]\n try:\n imgDescription = img['description']\n except AttributeError:\n return\n imgUrl = img['urls']\n # print('关键词:%s_%s>>页面:%s爬取线程%s>>标题:%s' % (wordChinese, wordEnglish, page, arg, imgDescription))\n\n fileSavePath = os.path.join(fileSaveBasePath, '%s_%s') % (wordChinese, wordEnglish)\n fileSaveName = '%s_%s_%s.jpg' % (wordChinese, wordEnglish, imgDescription)\n\n # 图片从大到小尺寸:raw,full,regular,small,thumb\n saveFile(path=fileSavePath, file_name=fileSaveName, url=imgUrl['regular'])\n endtime = datetime.datetime.now()\n print('###关键词:%s_%s>>页面%s图片%s耗时:%s' % (wordChinese, wordEnglish, page, arg, endtime - startime))\n\n for i in range(len(responseDatas)):\n # print('*******关键词:%s_%s页面%s正在启动线程%s' % (wordChinese, wordEnglish, page, i))\n time.sleep(1)\n t = threading.Thread(target=action, args=(i,))\n t.start()\n\n\n# 处理爬取图片页面数\ndef spiderPageNum(keywords):\n try:\n pageNum = int(input('请输入要爬取主题\"%s\"的页面数目(每页大约30张图片): ' % keywords))\n except ValueError:\n print('输入错误,请输入数字')\n pageNum = spiderPageNum()\n return pageNum\n\n\nif __name__ == '__main__':\n keywords = input('请输入要下载的关键字(使用空格分开关键字): ').split(' ')\n print('共有%s个关键字' % str(len(keywords)))\n pageNum = spiderPageNum(keywords)\n\n\n def getImgByWord(keyword):\n wordEnglish, wordChinese = judgeLanguageType(word=keyword)\n getImg(wordEnglish=wordEnglish, wordChinese=wordChinese, pageNum=pageNum)\n print('\\n>>>>>>>>>>关键词:%s爬取完毕!\\n' % keyword)\n\n\n p = Pool(8)\n for i in keywords:\n p.apply_async(getImgByWord, args=(i,))\n print('Waiting for all subprocesses done...')\n p.close()\n p.join()\n print('All subprocesses done.')\n","repo_name":"monk678/someSpider","sub_path":"unsplashImageSpider.py","file_name":"unsplashImageSpider.py","file_ext":"py","file_size_in_byte":6354,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"28633208116","text":"def leggiMatr(name,x):\r\n m=[]\r\n for riga in range (0,x):\r\n l=[]\r\n for col in range(0,x):\r\n l.append(int(input(f\"{name}[{riga}][{col}]:\")))\r\n m.append(l)\r\n return m\r\n\r\ndef stampaMat(m):\r\n for riga in range(0, len(m)):\r\n for col in range(0, len(m[riga])):\r\n print(m[riga][col], end=\"\\t\")\r\n print()\r\n\r\ndef prodMat(m1,m2):\r\n m3=[]\r\n for riga in range(0, len(m1)):\r\n m3.append([])\r\n for col in range(0, len(m2[riga])):\r\n m3[riga].append(0)\r\n for z in range(0, len(m1[riga])):\r\n m3[riga][col]+=m1[riga][z]*m2[z][col]\r\n return m3\r\n\r\nx=int(input(\"quante righe?:\")); y=x\r\nm1=leggiMatr(\"m1:\",x)\r\nm2=leggiMatr(\"m2:\",x)\r\nstampaMat(m1)\r\nprint()\r\nstampaMat(m2)\r\nprint()\r\nstampaMat(prodMat(m1,m2) )","repo_name":"Daniiiele/Uni","sub_path":"Moltiplicazzioni tra matrici.py","file_name":"Moltiplicazzioni tra matrici.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"31284579751","text":"# -*- coding: utf-8 -*-\nimport os, sys, socket\nfrom setproctitle import setproctitle\nfrom multiprocessing import cpu_count,Process\nfrom Config.config import HOST, PORT, BACKLOG, PROCESSES_NUM\nfrom System.mylog import myLogging as logging\nfrom System.core import Epoll\n\nclass Server:\n def __init__(self):\n # 初始化进程名称 序号\n self.processNameNum = 1\n\n # 设置进程名\n setproctitle('SOWEB: master process %s'%self.getFullPath())\n\n # 创建socket监听服务\n self.createSocketServer()\n\n # 创建子进程\n self.createChildProcessing()\n\n # 开始日志处理器\n logging.startLogHandles()\n\n def createSocketServer(self):\n try:\n # 创建socket套接字\n # socket.SOCK_STREAM 流式socket\n # socket.AF_INET 服务器之间网络通信\n self.serverFd = serverFd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n serverFd.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n # 将套接字绑定到地址, 在AF_INET下,以元组(host,port)的形式表示地址.\n serverFd.bind((HOST, PORT))\n\n # 开始监听TCP传入连接。backlog指定在拒绝连接之前,操作系统可以挂起的最大连接数量。该值至少为1,大部分应用程序设为5就可以了。\n serverFd.listen(BACKLOG)\n\n # 如果flag为0,则将套接字设为非阻塞模式,否则将套接字设为阻塞模式(默认值)。非阻塞模式下,如果调用recv()没有发现任何数据,或send()调用无法立即发送数据,那么将引起socket.error异常。\n serverFd.setblocking(0)\n # serverFd.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n\n except socket.error as message:\n # print('监听服务失败,请检查端口冲突,或者查看是否重复运行 %s' % message)\n logging.error('监听服务失败,请检查端口冲突,或者查看是否重复运行 %s' % message)\n sys.exit(0)\n\n def createChildProcessing(self):\n\n # 如果config.PROCESSES_NUM设置为0, 按照CPU核心数量来创建子进程\n if not PROCESSES_NUM:\n num = cpu_count()\n else:\n num = PROCESSES_NUM\n for i in range(num):\n self.forkProcess()\n self.processNameNum = self.processNameNum + 1\n\n\n def forkProcess(self):\n print('create')\n # 创建工作子进程\n p = Process(target=self.initProcess, args=())\n p.daemon = True\n p.start()\n\n def initProcess(self):\n setproctitle('SOWEB: worker process %d'%self.processNameNum)\n Epoll(self.serverFd)\n\n def getFullPath(self):\n pwd = ''\n arg = sys.argv[0]\n argSplit = arg.split('/')\n file = argSplit[-1]\n if os.path.isfile(file):\n pwd = os.getcwd()\n return os.path.join(pwd, file)\n","repo_name":"nonenull/SOWEB","sub_path":"System/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"70171197221","text":"# 返回元素为字符串的list\n# 参数是[100,800]的list\nfrom sympy import *\nimport numpy as np\n\ndef p7_4_volume_xy(red_word):\n red_word = list(map(int,red_word))\n result = []\n x = symbols('x')\n y1 = red_word[0]/(x**2+red_word[1]*x+red_word[2])\n x0 = 0; x1 = 1;\n ##x\n a = pi * y1**2;\n vx = integrate(a,(x,0,1))\n result.append(latex(abs(simplify(vx))).replace('log','ln'))\n ##y\n c = 2*pi*x\n h = y1;\n v = integrate(c * h, (x,x0, x1))\n result.append(latex(abs(simplify(v))).replace('log','ln'))\n return result\n\nif __name__ == '__main__':\n red_word = [10,5,6];\n for i in p7_4_volume_xy(red_word):\n print(i)\n ","repo_name":"awesome-yyh/math_pyside2","sub_path":"mathplus/web/web113_py/py7_4/volume_xy.py","file_name":"volume_xy.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"15987394378","text":"import torch\nfrom torchvision import datasets\n\ndata = datasets.MNIST(root='study\\exercise\\임현경\\data', download=True)\n\nimage, label = data[0]\n\nprint(type(image))\nprint(type(label))\n\nimage.show()\nprint(label)\n","repo_name":"CarpeDM2017/StudyML","sub_path":"study/exercise/임현경/session06.py","file_name":"session06.py","file_ext":"py","file_size_in_byte":212,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"21851946804","text":"'''\r\nProgram name: Farm database\r\nAuthor: Samantha Kolb\r\nDate last updated: 7/21/2022\r\nFunctions: a createCon function that creates a connection with a database, \r\n a createCur function that creates a cursor with the connection given it\r\n a closeConAndCur function that closes a connection and cursor,\r\n an isInt function to check if a variable is an int\r\n an isPriceValid function to check if a variable is a valid price\r\n an isSQL function to check for if a variable is a string of sql statements\r\n a getItemID function to get an item's number based off the item's number,\r\n a deleteItem function to delete one single record from the database,\r\n an addItem function to add a single record to the database,\r\n an updateItem function to update a single record in the database\r\n a viewItems function to return a list of all the values in the item table\r\n\r\n the delete, add, and update functions will all return 0 if everything executes, and an error message if there is an error of some kind\r\n the view function will return a list if everything executes, and an error message if not\r\n the delete function parameters are: the database file, and the item_num to be deleted (2 parameters)\r\n the add function parameters are: the database file, the item name, quantity, description, category, unit, and price (7 parameters)\r\n the update function parameters are: the database file, the item number, name, quantity, description, category, unit, and price (8 parameters) \r\n it is almost the same as the add fuction, just with the item number in between the database file and the name\r\n\r\n'''\r\nimport re\r\nimport sqlite3 \r\nfrom sqlite3 import Error\r\n\r\n\r\n#this function checks if the parameter is an integer or not, returns True or False\r\ndef isInt(num):\r\n num = str(num)\r\n try:\r\n num = int(num)\r\n return True\r\n except ValueError:\r\n return False\r\n\r\n#this function checks for if the parameter is a valid price, returns true if the parameter is a valid price\r\ndef isPriceValid(num):\r\n pattern = re.compile(r\"\"\"^[0-9]+(\\.[0-9]{1,2})?$\"\"\")\r\n result = pattern.search(str(num))\r\n return bool(result)\r\n\r\n#this funtion checks a string for if it contains SQL which could cause problems, if everything is ok, will return None\r\ndef isSQL(s):\r\n pattern = r\"(SELECT|INSERT|UPDATE|DELETE|DROP|CREATE|JOIN|ALTER|GROUP BY|ORDER BY|;)\"\r\n result = re.search(pattern, str(s), re.IGNORECASE)\r\n if result != None:\r\n raise ValueError(\"SQL found\")\r\n return result\r\n\r\n#this function creates a connection with the database file passed to it, returns -1 if there's an error\r\ndef createCon(dbFile):\r\n con = -1\r\n try:\r\n con = sqlite3.connect(dbFile)\r\n except Exception:\r\n raise Exception(\"Connection issue\")\r\n \r\n return con \r\n\r\n#this function creates a cursor from the connection given to it, returns -1 if there's an error\r\ndef createCur(con):\r\n cursor = -1\r\n try:\r\n cursor = con.cursor()\r\n except Exception:\r\n raise Exception(\"Cursor issue\")\r\n return cursor\r\n\r\n#this function closes the connection and the cursor passed to it, returns -1 if there's an error\r\ndef closeConAndCur(con, cursor):\r\n try:\r\n #close the cursor\r\n cursor.close()\r\n #close the connection\r\n con.close()\r\n return\r\n except Exception:\r\n raise Exception(\"Closing issue\")\r\n\r\n#this function gets an item's id from the name\r\ndef getItemID(dbFile, name):\r\n\r\n try:\r\n con = createCon(dbFile)\r\n #create a cursor\r\n cursor = createCur(con)\r\n \r\n if type(name) != str:\r\n raise ValueError(\"Invalid name\")\r\n isSQL(name)\r\n #select statement to get the item id where the item name is the parameter\r\n sql = \"SELECT item_num \"\r\n sql += \"FROM item \"\r\n sql += \"WHERE item_name LIKE '\" + name + \"';\"\r\n cursor.execute(sql)\r\n\r\n l = cursor.fetchall()\r\n\r\n if len(l) == 0:\r\n raise (\"Item not found\")\r\n #elif len(l) > 1:\r\n #print(\"Warning: mulitple items discovered, only the first one will be altered\") \r\n id = l[0]\r\n num = id[0]\r\n return num \r\n\r\n except ValueError as error:\r\n e = \"Error: \" + str(error) + \". Incorrect value supplied.\"\r\n con.rollback()\r\n return e \r\n #if there is an error, return the error and rollback\r\n except Exception as error:\r\n e = \"Error: \" + str(error) + \". Item not selected\"\r\n con.rollback()\r\n return e \r\n\r\n finally:\r\n closeConAndCur(con, cursor)\r\n\r\n#this function takes a db connection and an item name and deletes them, returns 0 if all goes well, and an error message otherwise\r\ndef deleteItem(dbFile, num):\r\n\r\n try:\r\n #create a connection\r\n con = createCon(dbFile)\r\n\r\n #create a cursor\r\n cursor = createCur(con)\r\n #\r\n if not isInt(num) or int(num) <= 0:\r\n raise Error(\"Item number not valid\")\r\n#\r\n isSQL(num) \r\n selectsql = \"SELECT MAX(rowid) from item;\"\r\n cursor.execute(selectsql)\r\n c = cursor.fetchone()\r\n id = c[0]\r\n if int(num) > id:\r\n raise Error(\"Item not in the database\")\r\n #delete statement \r\n sql = \"DELETE FROM item WHERE item_num = \" + str(num) + \";\"\r\n\r\n #execute the statement \r\n cursor.execute(sql)\r\n #commit the changes, SUCCESS!\r\n con.commit()\r\n #print(\"Item successfully deleted\")\r\n return 0\r\n #if there is an error, return the error and rollback\r\n except Error as error:\r\n e = \"Error: \" + str(error) + \". Item not deleted\"\r\n con.rollback()\r\n return e\r\n except ValueError:\r\n e = \"Error: the value supplied is not a correct value. \" + \"Item not deleted\"\r\n con.rollback()\r\n return e\r\n except NameError as error:\r\n e = \"Error: \" + str(error) + \". Item not deleted\"\r\n con.rollback()\r\n return e\r\n finally:\r\n closeConAndCur(con, cursor)\r\n\r\n#this funtion takes a db connection and all of the attributes of the item table and adds a record with those attributes, returns 0 if all goes well, and an error message otherwise\r\ndef addItem(dbFile, name, qnt, desc, cate, unit, price):\r\n\r\n try:\r\n con = createCon(dbFile)\r\n\r\n #create a cursor\r\n cursor = createCur(con)\r\n\r\n if not isInt(qnt):\r\n raise Error(\"Quantity must be a positive integer\")\r\n if int(qnt) < 0:\r\n raise Error(\"Quantity must be a positive integer\")\r\n if not isPriceValid(price):\r\n raise Error(\"Price must be a positive number, e.g. 14.20\")\r\n isSQL(name)\r\n isSQL(desc)\r\n isSQL(cate)\r\n isSQL(unit) \r\n \r\n #making sure there is an actual name\r\n if name == \"\":\r\n raise Error(\"No item name provided\")\r\n if unit == \"\":\r\n raise Error(\"No item unit provided\")\r\n #an insert into statement to insert the function parameters into the item table\r\n sql = \"INSERT INTO Item (item_name, item_qnt, item_desc, item_cate, item_unit, item_price) VALUES ( '\" + str(name) + \"', \" + str(qnt) + \", '\" + str(desc) + \"', '\" + str(cate) + \"', '\" + str(unit) + \"', \" + str(price) + \");\"\r\n #execute the statement \r\n cursor.execute(sql)\r\n #commit the changes, SUCCESS!\r\n con.commit()\r\n #print(\"Item successfully added\")\r\n return 0\r\n \r\n #if there is an error, return the error and rollback\r\n except Exception as error:\r\n e = \"Error: \" + str(error) + \". Item not added\"\r\n con.rollback()\r\n return e\r\n\r\n finally:\r\n closeConAndCur(con, cursor)\r\n \r\ndef updateName(dbFile, num, name):\r\n try:\r\n con = createCon(dbFile)\r\n cursor = createCur(con)\r\n\r\n if type(num) != int:\r\n raise TypeError(\"Item number is not valid\")\r\n if num < 0:\r\n raise ValueError(\"Item number is not valid\")\r\n isSQL(name) \r\n selectsql = \"SELECT MAX(rowid) from item;\"\r\n cursor.execute(selectsql)\r\n c = cursor.fetchone()\r\n id = c[0]\r\n if num > id:\r\n raise IndexError(\"Item not in the database\")\r\n #start the sql statement\r\n sql = \"UPDATE item Set item_name = '\" + str(name) + \"' WHERE item_num = \"\r\n sql += str(num)\r\n sql += \";\"\r\n cursor.execute(sql)\r\n #commit\r\n con.commit()\r\n return 0\r\n\r\n except Exception as error:\r\n e = \"Error: \" + str(error) + \". Item not updated\"\r\n con.rollback()\r\n return e \r\n finally:\r\n #close the cursor\r\n closeConAndCur(con, cursor) \r\n\r\n#function to update a record in the database, returns 0 if it works, and an error message otherwise\r\ndef updateItem(dbFile, num, qnt, desc, cate, unit, price):\r\n\r\n try:\r\n con = createCon(dbFile)\r\n if con == -1:\r\n raise Error(\"connection issue\")\r\n #create a cursor\r\n cursor = createCur(con)\r\n if cursor == -1:\r\n raise Error(\"cursor issue\")\r\n updateName = False\r\n updateQnt = False\r\n updateDesc = False\r\n updateCate = False\r\n updateUnit = False\r\n updatePrice = False \r\n\r\n #start the sql statement\r\n sql = \"UPDATE item Set \"\r\n if type(num) != int:\r\n raise TypeError(\"Item number is not valid\")\r\n if num < 0:\r\n raise ValueError(\"Item number is not valid\")\r\n selectsql = \"SELECT MAX(rowid) from item;\"\r\n cursor.execute(selectsql)\r\n c = cursor.fetchone()\r\n id = c[0]\r\n if num > id:\r\n raise IndexError(\"Item not in the database\")\r\n\r\n #if the variable is not null or \"\", we will update it, these variables will be used to create the update statement properly\r\n '''if name != \"\" and name != None:\r\n if isSQL(name) != None:\r\n raise Error(\"That is not a valid name\")'''\r\n updateName = True\r\n if qnt != \"\" and qnt != None: \r\n if not isInt(qnt):\r\n raise Error(\"Quantity must be a positive integer\")\r\n if int(qnt) < 0:\r\n raise Error(\"Quantity must be a positive integer\")\r\n updateQnt = True\r\n if desc != \"\" and desc != None: \r\n if isSQL(desc) != None:\r\n raise Error(\"That is not a valid description\")\r\n updateDesc = True\r\n if cate != \"\" and cate != None:\r\n if isSQL(cate) != None:\r\n raise Error(\"That is not a valid category\")\r\n updateCate = True\r\n if unit != \"\" and unit != None:\r\n if isSQL(unit) != None:\r\n raise Error(\"That is not a valid unit\")\r\n updateUnit = True\r\n if price != \"\" and price != None:\r\n if not isPriceValid(price):\r\n raise Error(\"Price must be a positive number with no more than 2 decimal places\")\r\n updatePrice = True \r\n if not updateName and not updateQnt and not updateDesc and not updateCate and not updateUnit and not updatePrice:\r\n raise Error(\"No information provided\")\r\n\r\n #these if statements go through and add the sql based on if there is another variable to be updated or not (if there is another variable then a comma is needed)\r\n '''if updateName and (updateQnt or updateDesc or updateCate or updateUnit or updatePrice):\r\n sql += \"item_name = '\" + str(name) + \"', \"\r\n elif updateName and not (updateQnt or updateDesc or updateCate or updateUnit or updatePrice):\r\n sql += \"item_name = '\" + str(name) + \"' \"'''\r\n\r\n if updateQnt and (updateDesc or updateCate or updateUnit or updatePrice):\r\n sql += \"item_qnt = \" + str(qnt) + \", \"\r\n elif updateQnt and not (updateDesc or updateCate or updateUnit or updatePrice):\r\n sql += \"item_qnt = \" + str(qnt) + \" \"\r\n\r\n if updateDesc and (updateCate or updateUnit or updatePrice):\r\n sql += \"item_desc = '\" + str(desc) + \"', \"\r\n elif updateDesc and not (updateCate or updateUnit or updatePrice):\r\n sql += \"item_desc = '\" + str(desc) + \"' \"\r\n\r\n if updateCate and (updateUnit or updatePrice):\r\n sql += \"item_cate = '\" + str(cate) + \"', \"\r\n elif updateCate and not (updateUnit or updatePrice):\r\n sql += \"item_cate = '\" + str(cate) + \"' \"\r\n \r\n if updateUnit and updatePrice:\r\n sql += \"item_unit = '\" + str(unit) + \"', \"\r\n elif updateUnit and not updatePrice:\r\n sql += \"item_unit = '\" + str(unit) + \"' \"\r\n\r\n if updatePrice:\r\n sql += \"item_price = \" + str(price) + \" \"\r\n\r\n sql += \" WHERE item_num = \" + str(num) + \";\" \r\n cursor.execute(sql)\r\n #commit\r\n con.commit()\r\n #print(\"Item \" + str(num) + \" successfully updated\")\r\n return 0\r\n except Exception as error:\r\n e = \"Error: \" + str(error) + \". Item not updated\"\r\n con.rollback()\r\n return e\r\n\r\n finally:\r\n #close the cursor\r\n closeConAndCur(con, cursor)\r\n\r\n#this function returns a list of all the items in the item table, if there is an issue or if there are no items, an error message is returned\r\ndef viewItems(dbFile):\r\n con = createCon(dbFile)\r\n\r\n cursor = createCur(con)\r\n\r\n try:\r\n cursor.execute(\"SELECT * FROM Item;\")\r\n items = cursor.fetchall()\r\n\r\n except Error as error:\r\n e = \"Error: \" + str(error) + \". Items not viewable\"\r\n return e\r\n\r\n finally:\r\n #close the cursor\r\n closeConAndCur(con, cursor)\r\n\r\n return items\r\n\r\ndef viewByCate(dbFile, cate):\r\n\r\n try:\r\n con = createCon(dbFile)\r\n\r\n cursor = createCur(con)\r\n if isSQL(cate) != None:\r\n raise ValueError(\"That is not a valid name\")\r\n sql =\"SELECT * FROM Item WHERE Item.item_cate LIKE '\" + cate + \"';\"\r\n cursor.execute(sql) \r\n items = cursor.fetchall() \r\n if len(items) == 0:\r\n raise IndexError(\"The category does not exist\")\r\n except Exception as error:\r\n e = \"Error: \" + str(error) + \". Items not viewable\"\r\n return e\r\n\r\n finally:\r\n #close the cursor\r\n closeConAndCur(con, cursor)\r\n\r\n return items\r\n\r\n#this function selects all the item names and quantities or if a category is specified then only names and quantites from that category will be selected\r\ndef viewqnt(dbFile, cate):\r\n\r\n try:\r\n con = createCon(dbFile)\r\n\r\n cursor = createCur(con)\r\n if type(cate) != str:\r\n raise ValueError(\"That is not a valid name.\")\r\n\r\n sql = \"SELECT item_name, item_qnt FROM Item \"\r\n if cate != \"\" and cate != None:\r\n if isSQL(cate) != None:\r\n raise ValueError(\"That is not a valid name.\")\r\n sql += \"WHERE item_cate LIKE '\" + cate + \"'\"\r\n sql += \";\"\r\n cursor.execute(sql)\r\n items = cursor.fetchall()\r\n if len(items) == 0:\r\n raise IndexError(\"The category does not exist.\")\r\n\r\n except Exception as error:\r\n e = \"Error: \" + str(error) + \" Error viewing quantities\"\r\n return e\r\n\r\n finally:\r\n #close the cursor\r\n closeConAndCur(con, cursor)\r\n\r\n return items\r\n\r\ndef view_an_item(dbFile, num):\r\n\r\n try:\r\n con = createCon(dbFile)\r\n\r\n cursor = createCur(con) \r\n if type(num) != int:\r\n raise TypeError(\"Item number is not valid.\")\r\n if num < 0:\r\n raise ValueError(\"Item number is not valid.\")\r\n selectsql = \"SELECT MAX(rowid) from item;\"\r\n cursor.execute(selectsql)\r\n c = cursor.fetchone()\r\n id = c[0]\r\n if num > id:\r\n raise IndexError(\"Item not in the database\")\r\n\r\n sql = \"SELECT * FROM Item WHERE Item_num = \" + str(num)\r\n sql += \";\"\r\n cursor.execute(sql)\r\n items = cursor.fetchone()\r\n\r\n except Exception as error:\r\n e = \"Error: \" + str(error) + \" Item not viewable\"\r\n return e\r\n\r\n finally:\r\n #close the cursor\r\n closeConAndCur(con, cursor)\r\n\r\n return items\r\n\r\n\r\ndbFile = 'Becker Farms Inventory.db'\r\n\r\n\r\n#print(updateItem(dbFile, getItemID(dbFile, 'Thick Hickory Bacon'), \"Thickory\", \"\", \"\", \"\", \"\", \"\"))\r\n'''\r\nprint(updateName(dbFile, '5', 5))\r\n\r\nprint(updateName(dbFile, 4, 'Cube'))\r\n\r\nprint(updateName(dbFile, 100, 'k'))'''\r\n'''\r\nprint(updateName(dbFile, 110, 'Whole Fresh Side'))\r\nprint(updateName(dbFile, 111, 'Whole Sm Belly'))\r\nprint(updateName(dbFile, 112, 'Bulk Jowl'))\r\nprint(updateName(dbFile, 113, 'Sm Ham'))\r\nprint(updateName(dbFile, 114, 'Fresh Ham'))\r\nprint(updateName(dbFile, 115, 'Bulk Wh Chicken'))\r\nprint(updateName(dbFile, 116, 'Bulk Ch Wings'))\r\nprint(updateName(dbFile, 117, 'Bulk LQ'))\r\nprint(updateName(dbFile, 118, 'Bulk B/S Thighs'))\r\nprint(updateName(dbFile, 119, 'Bulk B/S Breasts'))''''''\r\nprint(updateName(dbFile, 120, 'Bulk T Drums'))\r\nprint(updateName(dbFile, 121, 'Bulk Gr Turkey'))\r\nprint(updateName(dbFile, 122, '3L'))\r\nprint(updateName(dbFile, 126, 'Veal Chuck'))\r\nprint(updateName(dbFile, 128, 'Veal Sirloin Steak'))\r\nprint(updateName(dbFile, 129, 'Veal NY'))''''''\r\nprint(updateName(dbFile, 130, 'Veal Rib Chop'))\r\nprint(updateName(dbFile, 131, 'Veal Filet'))\r\nprint(updateName(dbFile, 132, 'Veal Gr Beef'))\r\nprint(updateName(dbFile, 133, 'Veal Ribs'))'''\r\n\r\n\r\n","repo_name":"minikin2/SoftwareProjectFarmDB","sub_path":"DBaccess.py","file_name":"DBaccess.py","file_ext":"py","file_size_in_byte":17589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71180666662","text":"import pandas as pd\nimport urllib\nfrom pathlib import Path\n\n\ncsv = pd.read_csv(\"https://docs.google.com/spreadsheets/d/10T3lIOc5fZgsyvmYeWOlmgsaRiGI2BhYNBPY5YH0EFk/export?format=csv&id=10T3lIOc5fZgsyvmYeWOlmgsaRiGI2BhYNBPY5YH0EFk&gid=0\")\ncsv.dropna()\ncsv.drop_duplicates()\n\ncurr_dir = Path(__file__).parent.resolve()\n\nfor ind in csv.index:\n try:\n print(str(csv[\"titles\"][ind]))\n file_name = str(csv[\"total\"][ind])+\"_\"+str(csv[\"id\"][ind])+\".jpg\"\n urllib.request.urlretrieve(\"https://i.ytimg.com/vi/\"+str(csv[\"id\"][ind])+\"/hqdefault.jpg\", curr_dir / \"data\" / \"data_manualy_tag\" / file_name)\n except Exception as e:\n print(e)\n print(\"error on :\"+str(csv[\"titles\"][ind]))\n","repo_name":"Fredpwol/clean-tube-app","sub_path":"thumbnail_downloader.py","file_name":"thumbnail_downloader.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"40529714283","text":"def solution(m,n=64): #scales the matrix up!\n new = []\n inner = []\n data = enumerate(m)\n for i in data:\n for x in range(0,n):\n for d in i[1]:\n #print(round(int(n/2)))\n for x in range(0,round(int(n/2))):\n for x in range(0,round(int(n/2))):\n inner.append(d)\n new.append(inner)\n inner = []\n\n return new\ndef matrixfromnum(num:int):\n new = []\n inner = []\n for x in range(0,num):\n for z in range(1,num + 1):\n inner.append(z)\n new.append(inner)\n inner = []\n return new\ndef formatmatrix(m):\n strs = '['\n x = 0\n full = len(m)\n for data in m:\n x += 1\n if x >= full:\n strs += str(data) + ']'\n else:\n strs += str(data) + ',\\n'\n return strs\nif __name__ == \"__main__\":\n f = [[1,2,3,4], #the matrix to scale\n [5,6,7,8],\n [9,10,11,12],\n [13,14,15,16]]\n print(formatmatrix(matrix(f,4))) #SCALE THE MATRIX TO 16X16 from factors ","repo_name":"banana-galaxy/challenges","sub_path":"challenge1/feedfal8.py","file_name":"feedfal8.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"35"} +{"seq_id":"73864191781","text":"import sys\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.feature_selection import mutual_info_classif\nfrom scipy.stats import entropy, chi2_contingency\nfrom researchpy.correlation import corr_case\nfrom matplotlib import pyplot as plt\nfrom matplotlib.colors import LinearSegmentedColormap\nimport seaborn as sns\n\n\ndef compute_corr(train_load):\n # pearson corr\n\n train_load_df = pd.DataFrame(train_load)\n _, corr, pval = corr_case(train_load_df)\n corr = train_load_df.corr()\n corr = np.tril(corr.to_numpy())\n f, ax = plt.subplots(figsize=(11, 9))\n # Generate a custom diverging colormap\n cmap = sns.diverging_palette(229, 20, as_cmap=True)\n\n # Draw the heatmap with the mask and correct aspect ratio\n mask = np.zeros_like(corr, dtype=np.bool)\n mask[np.triu_indices_from(mask)] = True\n sns.heatmap(corr, cmap=cmap, mask=mask, vmin=0.8, center=0.8,\n square=True, linewidths=.5, cbar_kws={\"shrink\": .5})\n plt.savefig(\"plots/pearsoncorr.png\")\n\n\n# Mutual information\ndef compute_mi(train_load):\n def calc_MI(x, y, bins):\n c_xy = np.histogram2d(x, y, bins)[0]\n c_xy[c_xy == 0] = 1 / (bins * bins)\n\n g, p, dof, expected = chi2_contingency(c_xy, correction=False, lambda_=\"log-likelihood\")\n mi = 0.5 * g / c_xy.sum()\n return mi\n\n nbin = 50\n A = train_load\n n = A.shape[1]\n matMI = np.zeros((n, n))\n\n for ix in np.arange(n):\n for jx in np.arange(ix + 1, n):\n matMI[ix, jx] = calc_MI(A[:, ix], A[:, jx], nbin)\n matMI = matMI.T\n\n if __name__ == \"__main__\":\n listmatMI = matMI[np.tril_indices(n)]\n f, ax = plt.subplots(figsize=(11, 9))\n # Generate a custom diverging colormap\n cmap = sns.diverging_palette(229, 20, as_cmap=True)\n mask = np.zeros_like(matMI, dtype=np.bool)\n mask[np.triu_indices_from(mask)] = True\n # Draw the heatmap with the mask and correct aspect ratio\n sns.heatmap(matMI, cmap=cmap, mask=mask, center=0.5,\n square=True, linewidths=.1, cbar_kws={\"shrink\": .5})\n plt.savefig(\"plots/mutualinfo.png\")\n return matMI\n\n\ndef corr_zone(load):\n load_df = pd.DataFrame(load)\n load_corr = load_df.corr()\n load_corr = load_corr.to_numpy()\n\n zones_corr = np.zeros(shape=(8, 24, 24))\n names = ['CT', 'ME', 'NH', 'RI', 'VT', 'NEMASSBOST', 'WCMASS', 'SEMASS']\n for name, i in zip(names, np.arange(8)):\n f, ax = plt.subplots(figsize=(11, 9))\n # Generate a custom diverging colormap\n cmap = sns.diverging_palette(229, 20, as_cmap=True)\n sns.heatmap(load_corr[i * 24:(i + 1) * 24, i * 24:(i + 1) * 24], cmap=cmap, vmin=0, vmax=1, center=0.5,\n square=True, linewidths=.5, cbar_kws={\"shrink\": .5})\n plt.savefig(\"plots/corr_{}.png\".format(name))\n\n\ndef differentiatePCvsMI(train_load, PCthresh, MIthresh):\n mi = compute_mi(train_load)\n mi[mi >= MIthresh] = 1\n mi[mi < MIthresh] = 0\n A_MI = np.tril(mi, -1)\n\n train_load_df = pd.DataFrame(train_load.reshape(-1, 192))\n corr = train_load_df.corr().abs()\n corr[corr >= PCthresh] = 1\n corr[corr < PCthresh] = 0\n A_PC = corr.to_numpy()\n A_PC = np.tril(A_PC, -1)\n\n diff = np.zeros((A_PC.shape[0], A_PC.shape[1]))\n diff[(A_PC == A_MI) & (A_PC == 1)] = -2\n diff[(A_PC == A_MI) & (A_PC == 0)] = -1\n diff[(A_PC > A_MI)] = 1\n diff[(A_PC < A_MI)] = 2\n\n print(len(diff[diff == 1]))\n print(len(diff[diff == 2]))\n print(len(diff[diff == -1]))\n print(len(diff[diff == -2]))\n f, ax = plt.subplots(figsize=(11, 9))\n # Generate a custom discrete colormap\n myColors = ((0.8, 0.0, 0.0, 1.0), (0.8, 0.8, 0.8, 1.0), (0.0, 0.0, 0.8, 1.0), (0.0, 0.8, 0.0, 1.0))\n cmap = LinearSegmentedColormap.from_list('Custom', myColors, len(myColors))\n\n # Draw the heatmap with the mask and correct aspect ratio\n mask = np.zeros_like(corr, dtype=np.bool)\n mask[np.triu_indices_from(mask)] = True\n sns.heatmap(diff, cmap=cmap, mask=mask, square=True, linewidths=.5, vmin=-2, vmax=2)\n # Manually specify colorbar labelling after it's been generated\n colorbar = ax.collections[0].colorbar\n colorbar.set_ticks([-1.5, -0.5, 0.5, 1.5])\n colorbar.set_ticklabels(['PC & MI = 1', 'PC & MI = 0', 'PC > MI', 'PC < MI'])\n\n plt.savefig(\"plots/differentiatePC{}vsMI{}.png\".format(PCthresh, MIthresh))\n\n\ndef main():\n train_load = np.load(\"../../02_datasets/Sets/train_load.npy\")\n # compute_corr(train_load)\n # compute_mi(train_load)\n # corr_zone(train_load)\n differentiatePCvsMI(train_load, 0.9, 0.8)\n \"\"\"\n len_train_load = len(train_load)\n train_load_rnd = train_load\n np.random.shuffle(train_load_rnd.reshape((-1)))\n print(train_load_rnd)\n train_load_rnd = train_load_rnd.reshape((len_train_load, -1))\n compute_mi(train_load_rnd)\n \"\"\"\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"bendelv/Inge-M2TFE_load-forecast-GNF","sub_path":"03_scripts/EDA/correlations.py","file_name":"correlations.py","file_ext":"py","file_size_in_byte":4886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"28924416754","text":"from flask import Flask, render_template, request\nfrom flask_bootstrap import Bootstrap\nfrom werkzeug.utils import redirect\nimport os\nfrom PIL import Image\n\napp = Flask(__name__)\nBootstrap(app)\n\n\n@app.route(\"/\")\ndef home():\n file = 'static/myphoto.png'\n if os.path.exists(file) and os.path.isfile(file):\n os.remove(file)\n\n return render_template(\"index.html\")\n\n\n@app.route('/upload_file', methods=[\"GET\", \"POST\"])\ndef upload_file():\n if request.method == \"POST\":\n path_image = request.form.get('my_image')\n image = Image.open(path_image)\n image.save('static/myphoto.png')\n\n return redirect('/show_color_image')\n return render_template(\"index.html\")\n\n\n@app.route('/show_color_image', methods=[\"GET\", \"POST\"])\ndef show_color_image():\n img = Image.open(\"static/myphoto.png\")\n img = img.convert(\"RGB\")\n\n d = img.getdata()\n color_counter = {}\n\n for item in d:\n if item in color_counter:\n color_counter[item] += 1\n else:\n color_counter[item] = 1\n\n common_color = sorted(color_counter, key=color_counter.get, reverse=True)\n top_10 = common_color[:10]\n image_saved = 'yes'\n\n return render_template(\"index.html\", list_color=top_10, image_saved=image_saved)\n\n\n@app.route('/restart')\ndef restart():\n return redirect('/')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"Glomsre/Image-Colour-Palette-Generator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"18619454386","text":"import pandas as pd\nfrom operator import add\ncombine_paths = ['../../output/Matching/Match_Data_1.csv','../../output/Matching/Match_Data_2.csv','../../output/Matching/output/Match_Data_3.csv','../../output/Matching/output/Match_Data_4.csv']\noutput_path = '../../output/Matchingoutput/Match_Data_Combined_final.csv'\ndef do(combine_paths,output_path):\n \n Code_Count = []\n Intent_Count = []\n Fuzzy_Code_Count = []\n Fuzzy_Intent_Count = []\n Ontology_Words = []\n for i in range(len(combine_paths)):\n \n df = pd.read_csv(combine_paths[i])\n if i == 0:\n Ontology_Words = df['Ontology_Words']\n Code_Count = df['Code_Count']\n Intent_Count = df['Intent_Count']\n Fuzzy_Code_Count = df['Fuzzy_Code_Count']\n Fuzzy_Intent_Count = df['Fuzzy_Intent_Count']\n else:\n Code_Count = list( map (add, Code_Count, list(df['Code_Count']))) \n Intent_Count = list( map (add, Intent_Count, list(df['Intent_Count']))) \n Fuzzy_Code_Count = list( map (add, Fuzzy_Code_Count, list(df['Fuzzy_Code_Count']))) \n Fuzzy_Intent_Count = list( map (add, Fuzzy_Intent_Count, list(df['Fuzzy_Intent_Count']))) \n \n dataframe = pd.DataFrame({'Ontology_Words':Ontology_Words,'Code_Count':Code_Count,'Intent_Count':Intent_Count,'Fuzzy_Code_Count':Fuzzy_Code_Count,'Fuzzy_Intent_Count':Fuzzy_Intent_Count})\n print(dataframe.head())\n dataframe.to_csv(output_path)\nif __name__ == '__main__':\n do(combine_paths,output_path)","repo_name":"kushazsehgal/FIM-OntologyMatching","sub_path":"src/Matching/combine.py","file_name":"combine.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71577624742","text":"# -*- coding: utf-8 -*-\n# Import Python libs\nfrom __future__ import absolute_import, unicode_literals\nimport os\nimport sys\nimport logging\nimport subprocess\nimport tempfile\n\n\n# Import Salt Testing libs\nfrom tests.support.unit import TestCase, skipIf\nfrom tests.support.runtests import RUNTIME_VARS\nimport tests.support.helpers\n\n# Import Salt libs\nimport salt\nimport salt.ext.six\nimport salt.modules.cmdmod\nimport salt.utils.platform\nimport salt.utils.files\n\nlog = logging.getLogger(__name__)\n\n\n@skipIf(not salt.utils.path.which('bash'), 'Bash needed for this test')\nclass VendorTornadoTest(TestCase):\n '''\n Ensure we are not using any non vendor'ed tornado\n '''\n\n def test_import_override(self):\n tmp = tempfile.mkdtemp()\n test_source = tests.support.helpers.dedent('''\n from __future__ import absolute_import, print_function\n import salt\n import tornado\n print(tornado.__name__)\n ''')\n test_source_path = os.path.join(tmp, 'test.py')\n tornado_source = tests.support.helpers.dedent('''\n foo = 'bar'\n ''')\n tornado_source_path = os.path.join(tmp, 'tornado.py')\n with salt.utils.files.fopen(test_source_path, 'w') as fp:\n fp.write(test_source)\n with salt.utils.files.fopen(tornado_source_path, 'w') as fp:\n fp.write(tornado_source)\n # Preserve the virtual environment\n env = os.environ.copy()\n if salt.utils.platform.is_windows():\n if salt.ext.six.PY2:\n env[b'PYTHONPATH'] = b';'.join([a.encode() for a in sys.path])\n else:\n env['PYTHONPATH'] = ';'.join(sys.path)\n else:\n env['PYTHONPATH'] = ':'.join(sys.path)\n p = subprocess.Popen(\n [sys.executable, test_source_path],\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n env=env\n )\n p.wait()\n pout = p.stdout.read().strip().decode()\n assert pout == 'salt.ext.tornado', pout\n\n def test_vendored_tornado_import(self):\n grep_call = salt.modules.cmdmod.run_stdout(\n cmd='bash -c \\'grep -r \"import tornado\" ./salt/*\\'',\n cwd=RUNTIME_VARS.CODE_DIR,\n ignore_retcode=True,\n ).split('\\n')\n valid_lines = []\n for line in grep_call:\n if line == '':\n continue\n # Skip salt/ext/tornado/.. since there are a bunch of imports like\n # this in docstrings.\n if 'salt/ext/tornado/' in line:\n continue\n log.error(\"Test found bad line: %s\", line)\n valid_lines.append(line)\n assert valid_lines == [], len(valid_lines)\n\n def test_vendored_tornado_import_from(self):\n grep_call = salt.modules.cmdmod.run_stdout(\n cmd='bash -c \\'grep -r \"from tornado\" ./salt/*\\'',\n cwd=RUNTIME_VARS.CODE_DIR,\n ignore_retcode=True,\n ).split('\\n')\n valid_lines = []\n for line in grep_call:\n if line == '':\n continue\n log.error(\"Test found bad line: %s\", line)\n valid_lines.append(line)\n assert valid_lines == [], len(valid_lines)\n\n def test_regression_56063(self):\n importer = salt.TornadoImporter()\n importer.find_module('tornado') # for TypeError\n #try:\n # importer.find_module('tornado')\n #except TypeError:\n # assert False, 'TornadoImporter raised type error when one argument passed'\n","repo_name":"JohnnyPeng18/TypeFix","sub_path":"benchmarks/typebugs/salt/salt-56094/tests/unit/test_ext.py","file_name":"test_ext.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"35"} +{"seq_id":"9042481612","text":"def average(array):\n a=set(array)\n s=sum(a)\n x=len(a)\n avg=s/x\n return(avg)\n\nif __name__ == '__main__':\n n = int(input())\n arr = list(map(int, input().split()))\n result = average(arr)\n print(result)\n","repo_name":"Pulkit3108/Coding","sub_path":"HackerRank Problems/Python/Sets.py","file_name":"Sets.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72071260268","text":"import sys\nimport cwiid\nimport time\n\nSEQUENCE = [\n cwiid.LED1_ON,\n cwiid.LED2_ON,\n cwiid.LED3_ON,\n cwiid.LED4_ON,\n cwiid.LED3_ON,\n cwiid.LED2_ON,\n]\n\nprint('Put Wiimote in discoverable mode now (press 1+2)...')\nif len(sys.argv) > 1:\n wiimote = cwiid.Wiimote(sys.argv[1])\nelse:\n wiimote = cwiid.Wiimote()\ntry:\n print(wiimote.state)\n wiimote.mesg_callback = print\n while True:\n for led in SEQUENCE:\n print(led)\n wiimote.led = led\n time.sleep(0.5)\nfinally:\n wiimote.close()\n","repo_name":"AstraLuma/ppb-wiimote","sub_path":"kitt.py","file_name":"kitt.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10818669782","text":"import random\n\nclass Dice:\n def __init__(self, sides):\n self.sides = sides\n\n def roll(self):\n \"\"\" Returns roll results for a given number of dice.\"\"\"\n roll = random.randint(1,self.sides)\n return roll\n\n def test(self, number_of_times):\n \"\"\" Tests the probability of the dice with a given number of trials.\"\"\"\n results = {}\n for n in range(number_of_times):\n roll = self.roll()\n try:\n results[roll] += 1\n except KeyError:\n results[roll] = 1\n print(\"\\n{}\\n\".format(results))\n print(\"{}-sided Dice Statistics:\".format(self.sides))\n for k in results.keys():\n prob = (results[k] / number_of_times) * 100\n print(\"Probability of {}: {}/{} = {}%\".format(k, results[k], number_of_times, prob))\n\nclass Players:\n def __init__(self):\n self.players = {\n \"Gandolf\": {\n \"food\": 5,\n \"grapefruit\": 10,\n \"green potions\": 7,\n \"red potions\": 8,\n \"spells of enchantment\": 10\n },\n \"Frodo\": {\n \"food\": 0,\n \"kiwi\": 5,\n \"wands of confusion\": 7,\n \"green potions\": 8\n },\n \"Sauron\": {\n \"bat wings\": 5,\n \"evil spells\": 10,\n \"fire wands\": 5\n }\n }\n\n def create_player(self, name, inventory={}):\n \"\"\" Creates a new player with a given inventory and adds them to the main dictionary.\"\"\" \n try:\n i = self.players[name]\n print(\"{} already exists!\\nInventory: {}\".format(name, i))\n except KeyError:\n self.players[name] = inventory\n \n def add_item(self, player, item, quantity=0):\n \"\"\" Adds an item with a given quantity to a player's inventory \"\"\"\n if player in self.players.keys():\n try:\n self.players[player][item] += quantity\n except KeyError:\n self.players[player][item] = quantity\n else:\n print(\"{} does not exist in the game.\".format(player))\n print(\"{}: {}\".format(player, self.players[player]))\n\n def remove_item(self, player, item, quantity):\n if player in self.players.keys():\n try:\n n = self.players[player][item]\n if n < quantity:\n print(\"{} does not have {} {}s.\".format(player, quantity, item))\n return None\n else:\n self.players[player][item]-= quantity\n if self.players[player][item] == 0:\n print(\"{} is all out of {}s\".format(player, item))\n except KeyError:\n print(\"{} does not have any {}s.\".format(player, item))\n else:\n print(\"{} does not exist in the game.\".format(player))\n print(\"{}: {}\".format(player, self.players[player]))\n\n\ndef main():\n d = Dice(12)\n d.test(10000)\n print(\"\\n\\n\")\n p = Players()\n\n bilbo_inventory = {\n \"food\": 0,\n \"kiwi\": 5,\n \"wands of confusion\": 7,\n \"green potions\": 8\n }\n\n p.create_player(\"Bilbo\", bilbo_inventory)\n\n p.add_item(\"Frodo\", \"food\", 1)\n p.remove_item(\"Gandolf\", \"grapefruit\", 6)\n p.remove_item(\"Sauron\", \"bat wings\", 5)\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"robertschaedler3/SSW-215","sub_path":"game_o_matic.py","file_name":"game_o_matic.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39257825844","text":"import cv2\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\n\n\ndef draw_boxes(img, boxes, labels, class_names, scores):\n\n\tsize = img.shape\n\tprint(size)\n\n\tfor label, [x1, y1, x2, y2], class_name, score in zip(labels, boxes, class_names, scores):\n\n\t\tycv1 = size[0]-y1\n\t\tycv2 = size[0]-y2\n\t\txcv1 = x1\n\t\txcv2 = x2\n\n\t\tcmap_boxes = cm.get_cmap(\"Set1\") # colour map\n\t\trgba = cmap_boxes(int(label%9))\n\t\tbgr = rgba[2]*255, rgba[1]*255, rgba[0]*255\n\n\t\t#box\n\t\tline_thickness=5\n\t\ttl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness\n\t\tc1, c2 = (int(xcv1), int(ycv1)), (int(xcv2), int(ycv2))\n\t\tcv2.rectangle(img, c1, c2, color=bgr, thickness=tl, lineType=cv2.LINE_AA)\n\n\t\t#label on boxes\n\t\ttxt = class_name # + ' ' + str(round(score,2))\n\t\ttf = max(tl - 1, 1) # font thickness\n\t\tt_size = cv2.getTextSize(txt, 0, fontScale=tl / 3, thickness=tf)[0]\n\t\tc2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3\n\t\tcv2.rectangle(img, c1, c2, bgr, -1, cv2.LINE_AA) # filled\n\t\tcv2.putText(img, txt, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)\n\n\n\treturn img\n\n\n\n","repo_name":"Michnux/demo_datascience","sub_path":"YOLO/PDFReport/draw_boxes.py","file_name":"draw_boxes.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72062251948","text":"#!/usr/bin/env python\n\n'''\n@Author: Bilgehan NAL\nVoice Recogniser is a class to help listening the speech and converting to string\n'''\n\nimport speech_recognition as sr\nimport io\nimport sys\n\nclass Voice_Recogniser:\n\n # These variables are sample language code\n TURKEY = 'tr-TR'\n US = 'en-US'\n UK = 'en-GB'\n FRANCE = 'fr-FR'\n SPAIN = 'es-ES'\n GERMAN = 'de-DE'\n ITALY = 'it-IT'\n RUSSIA = 'ru-RU'\n\n def __init__(self):\n # Dictionary, all comands are stored in a dictionary\n self.commands = {'---': '---'}\n\n def __init__(self, txt_path):\n self.commands = {'---': '---'}\n self.updateDictionary(txt_path)\n\n ''' \n Dictionary is used for to decide the action\n if the string(speech) includes any key value, it returns command\n txt file should be like below\n\n hello.be_happy\n wellcome.be_happy\n\n if we process the string given from listen function.\n The string includes hello or wellcome then, it returns be_happy\n\n '''\n def updateDictionary(self, path):\n #Read data line by line to a list from the txt file.\n with io.open(path, 'r', encoding='utf-8') as file:\n my_list = file.readlines()\n #Seperating waited speech and commands. These two things are seperated by a character of dot(.)\n for row in my_list:\n command = row.encode('utf-8').split(\".\")\n if len(command) == 2:\n self.commands[command[0]] = command[1]\n print (\"Key: {}, Value: {}\".format(command[0], command[1]))\n\n # listen_language is a voice recognition function, language is given through a parameter.\n def listen_language(self, language):\n string = \"-\"\n r = sr.Recognizer()\n while string == \"-\":\n with sr.Microphone() as source: \n print(\"Baxter is listening you...\") \n audio = r.listen(source) \n print(\"wait...\") \n try:\n string = r.recognize_google(audio, language=language) #Recognize speech\n print(\"Baxter thinks you said -> \" + string)\n except sr.UnknownValueError:\n string = \"-\"\n except sr.RequestError as e:\n print(\"Could not request results from Google Speech Recognition service; {0}\".format(e))\n print(\"Done...\")\n return string\n\n # Default listen function, it recognises US English\n def listen(self):\n string = \"-\"\n r = sr.Recognizer()\n while string == \"-\":\n with sr.Microphone() as source: \n print(\"Baxter is listening you...\") \n audio = r.listen(source) \n print(\"wait...\") \n try:\n string = r.recognize_google(audio, language=US) #Recognize speech\n print(\"Baxter thinks you said -> \" + string)\n except sr.UnknownValueError:\n string = \"-\"\n except sr.RequestError as e:\n print(\"Could not request results from Google Speech Recognition service; {0}\".format(e))\n print(\"Done...\")\n return string\n\n def process(self, string):\n enc = sys.getdefaultencoding()\n result = \"Speech could not be processed\" #Default message\n string = string.lower()#All cases are converted to lower case\n # Search the commands in dictionary\n for key in self.commands.keys():\n # if the key is substring of string -> key is our commands.\n if key in string:\n result = self.commands[key]\n break\n return result.rstrip().lower().encode('utf_8')","repo_name":"bilgehannal/baxter_face_software","sub_path":"baxter_face/scripts/Voice_Recogniser.py","file_name":"Voice_Recogniser.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"26095863045","text":"\"\"\"Simple DOM example (Go sciter example port).\"\"\"\n\nimport sciter, sys\n\nif __name__ == \"__main__\":\n frame = sciter.Window(ismain=True, uni_theme=False)\n frame.set_title(\"Inserting example\")\n\n # load simple html\n frame.load_html(b\"\"\"html\"\"\")\n\n # create div and link as child of root node ()\n div = sciter.Element.create(\"div\", \"hello, world\")\n\n root = frame.get_root()\n root.insert(div, 0)\n\n # show window and run app\n frame.run_app()\n","repo_name":"sciter-sdk/pysciter","sub_path":"examples/insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":389,"dataset":"github-code","pt":"37"} +{"seq_id":"38778010551","text":"import unittest\nfrom pywikitools.fortraininglib import ForTrainingLib\nfrom pywikitools.resourcesbot.changes import ChangeLog\n\nfrom pywikitools.resourcesbot.consistency_checks import ConsistencyCheck\nfrom pywikitools.resourcesbot.data_structures import LanguageInfo\n\n\nclass TestConsistencyCheck(unittest.TestCase):\n def setUp(self):\n self.fortraininglib = ForTrainingLib(\"https://test.4training.net\")\n\n def tearDown(self):\n # workaround to remove annoying ResourceWarning: unclosed HHLLL' )\n\n self.append( timestamp, 4 )\n self.append( flags, 4 )\n","repo_name":"CiscoSecurity/fp-05-firepower-cef-connector-arcsight","sub_path":"estreamer/message/eventstream.py","file_name":"eventstream.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"37"} +{"seq_id":"40124105447","text":"#!/usr/bin/env python\nimport os, sys\nimport re\nimport time\nimport math\n\nfrom glob import glob\nfrom datetime import datetime\n\n\n#-----------------------------------------------------------------------------------------------------------\n# move pics to archive\n#-----------------------------------------------------------------------------------------------------------\ndef move_pics_archive(pwd, newdir):\n for ext in ('jpg', 'jpeg', 'png', 'gif', 'jfif'):\n for file in glob(f'*.{ext}'):\n print(f'moved pic... {file}')\n \n dest = rf'{pwd}\\{newdir}\\{file}'\n # print(dest)\n\n # handles case if filename already exist\n try:\n os.rename(file, dest)\n except:\n os.remove(dest)\n\n os.rename(file, dest)\n\n print('>> cleaned')\n\n\n#-----------------------------------------------------------------------------------------------------------\n# move packages to downloads\n#-----------------------------------------------------------------------------------------------------------\ndef move_packs_dl(pwd):\n for ext in ('exe', ''):\n for file in glob(f'*.{ext}'):\n print(f'moved package... {file}')\n\n newdir2 = 'Downloads'\n\n try:\n os.mkdir(newdir2)\n except:\n pass\n\n pwd1 = pwd.replace('Desktop', newdir2)\n\n dest = rf'{pwd1}\\{file}'\n # print(dest)\n\n # handles case if filename already exist\n try:\n os.rename(file, dest)\n except:\n os.remove(dest)\n\n os.rename(file, dest)\n\n print('>> cleaned')\n\n\n#-----------------------------------------------------------------------------------------------------------\n# remove duplicate files\n#-----------------------------------------------------------------------------------------------------------\ndef del_dup_files():\n for i in range(1, 50):\n for file in glob(f'*({i}).*'):\n print(f'removed duplicate file... {file}')\n os.remove(file)\n\n print('>> cleaned')\n\n\n#-----------------------------------------------------------------------------------------------------------\n# move past pdf to archive\n#-----------------------------------------------------------------------------------------------------------\ndef move_past_pdf(weeks, pwd, newdir1):\n c = 0\n\n for ext in ('pdf', ''):\n for file in glob(f'*.{ext}'):\n # print(file)\n path = rf'{pwd}\\{file}'\n # print(path)\n\n create_epoch = os.path.getctime(path)\n mod_epoch = os.path.getmtime(path)\n\n now_epoch = datetime.now().timestamp()\n\n diff_days_create = math.floor((now_epoch - create_epoch) / (60 * 60 * 24))\n diff_days_modify = math.floor((now_epoch - mod_epoch) / (60 * 60 * 24))\n\n\n\n if diff_days_modify >= weeks * 7:\n c += 1\n\n # print(file)\n # print(f'days since create: {diff_days_create}')\n # print(f'days since modify: {diff_days_mod}')\n # print('-' * 100)\n\n print(f'moved pdf... {file}')\n\n dest = rf'{pwd}\\{newdir1}\\{file}'\n # print(dest)\n\n # handles case if filename already exist\n try:\n os.rename(file, dest)\n except:\n os.remove(dest)\n\n os.rename(file, dest)\n\n # print(c)\n print('>> cleaned')\n\n\ndef main():\n pwd = os.getcwd()\n # print(pwd)\n\n newdir = 'archive_pic'\n try:\n os.mkdir(newdir)\n except:\n pass\n \n newdir1 = 'archive_pdf'\n try:\n os.mkdir(newdir1)\n except:\n pass\n\n #-----------------------------------------------------------------------------------------------------------\n # execute fns\n #-----------------------------------------------------------------------------------------------------------\n ip0 = input('Delete duplicate files? (y/n) ')\n if ip0 == 'y':\n del_dup_files()\n \n ip1 = input('Move all pictures to archive? (y/n) ')\n if ip1 == 'y':\n move_pics_archive(pwd, newdir)\n\n ip2 = input('Move all Exes to downloads? (y/n) ')\n if ip2 == 'y':\n move_packs_dl(pwd)\n\n wk = 2 ## SPECIFY past weeks as criteria to move\n num_word = {\n 1: 'one'\n , 2: 'two'\n , 3: 'three'\n , 4: 'four'\n , 5: 'five'\n , 6: 'six'\n , 7: 'seven'\n , 8: 'eight'\n , 9: 'nine'\n , 10: 'ten'\n }\n\n ip3 = input(f'Move {num_word[wk]} weeks and older PDFs to archive? (y/n) ')\n if ip3 == 'y':\n move_past_pdf(wk, pwd, newdir1)\n\n\n input('\\n\\nPress any key to exit...')\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"jameswniu/fun-code","sub_path":"run_on_desktop_to_clean_dups_n_old_pics_exes_pdfs.py","file_name":"run_on_desktop_to_clean_dups_n_old_pics_exes_pdfs.py","file_ext":"py","file_size_in_byte":4875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6641202004","text":"import numpy as np\nfrom pyannote.database import get_annotated\nfrom pyannote.core import Segment\nfrom pyannote.core import Timeline\n\nfrom pyannote.generators.fragment import random_segment\nfrom pyannote.generators.fragment import random_subsegment\nfrom pyannote.generators.fragment import SlidingSegments\nfrom pyannote.generators.batch import batchify\n\nfrom pyannote.audio.features import RawAudio\n\nfrom .base import LabelingTask\nfrom .base import LabelingTaskGenerator\nfrom .base import TASK_MULTI_CLASS_CLASSIFICATION\n\n\nnormalize = lambda wav: wav / (np.sqrt(np.mean(wav ** 2)) + 1e-8)\n\n\nclass OverlapDetectionGenerator(LabelingTaskGenerator):\n \"\"\"Batch generator for training overlap detection\n\n Parameters\n ----------\n feature_extraction : `pyannote.audio.features.FeatureExtraction`\n Feature extraction\n protocol : `pyannote.database.Protocol`\n subset : {'train', 'development', 'test'}\n frame_info : `pyannote.core.SlidingWindow`, optional\n Override `feature_extraction.sliding_window`. This is useful for\n models that include the feature extraction step (e.g. SincNet) and\n therefore output a lower sample rate than that of the input.\n frame_crop : {'center', 'loose', 'strict'}, optional\n Which mode to use when cropping labels. This is useful for models\n that include the feature extraction step (e.g. SincNet) and\n therefore use a different cropping mode. Defaults to 'center'.\n duration : float, optional\n Duration of sub-sequences. Defaults to 3.2s.\n snr_min, snr_max : int, optional\n Defines Signal-to-Overlap Ratio range in dB. Defaults to [0, 10].\n batch_size : int, optional\n Batch size. Defaults to 32.\n per_epoch : float, optional\n Total audio duration per epoch, in days.\n Defaults to one day (1).\n parallel : int, optional\n Number of prefetching background generators. Defaults to 1.\n Each generator will prefetch enough batches to cover a whole epoch.\n Set `parallel` to 0 to not use background generators.\n \"\"\"\n\n def __init__(self, feature_extraction, protocol, subset='train',\n frame_info=None, frame_crop=None, duration=3.2,\n snr_min=0, snr_max=10,\n batch_size=32, per_epoch=1, parallel=1):\n\n self.snr_min = snr_min\n self.snr_max = snr_max\n self.raw_audio_ = RawAudio(sample_rate=feature_extraction.sample_rate)\n\n super().__init__(feature_extraction, protocol, subset=subset,\n frame_info=frame_info, frame_crop=frame_crop,\n duration=duration,\n batch_size=batch_size, per_epoch=per_epoch,\n parallel=parallel, shuffle=True)\n\n def overlap_samples(self):\n \"\"\"Random overlap samples\n\n Returns\n -------\n samples : generator\n Generator that yields {'waveform': ..., 'y': ...} samples\n indefinitely.\n \"\"\"\n\n uris = list(self.data_)\n durations = np.array([self.data_[uri]['duration'] for uri in uris])\n probabilities = durations / np.sum(durations)\n\n while True:\n\n # choose file at random with probability\n # proportional to its (annotated) duration\n uri = uris[np.random.choice(len(uris), p=probabilities)]\n\n datum = self.data_[uri]\n current_file = datum['current_file']\n\n # choose one segment at random with probability\n # proportional to its duration\n segment = next(random_segment(datum['segments'], weighted=True))\n\n # choose random subsegment\n # duration = np.random.rand() * self.duration\n sequence = next(random_subsegment(segment, self.duration))\n\n # get corresponding waveform\n X = self.raw_audio_.crop(current_file,\n sequence,\n mode='center',\n fixed=self.duration)\n\n # get corresponding labels\n y = datum['y'].crop(sequence,\n mode=self.frame_crop,\n fixed=self.duration)\n\n yield {'waveform': normalize(X),\n 'y': y}\n\n def sliding_samples(self):\n \"\"\"Sliding window\n\n Returns\n -------\n samples : generator\n Generator that yields {'waveform': ..., 'y': ...} samples\n indefinitely.\n \"\"\"\n\n uris = list(self.data_)\n durations = np.array([self.data_[uri]['duration'] for uri in uris])\n probabilities = durations / np.sum(durations)\n\n sliding_segments = SlidingSegments(duration=self.duration,\n step=self.duration,\n source='annotated')\n\n while True:\n\n # shuffle files\n np.random.shuffle(uris)\n\n # loop on shuffled files\n for uri in uris:\n\n datum = self.data_[uri]\n\n # make a copy of current file\n current_file = dict(datum['current_file'])\n\n # read waveform for the whole file\n waveform = self.raw_audio_(current_file)\n\n # randomly shift 'annotated' segments start time so that\n # we avoid generating exactly the same subsequence twice\n shifted_segments = [\n Segment(s.start + np.random.random() * self.duration, s.end)\n for s in get_annotated(current_file)]\n # deal with corner case where a shifted segment would be empty\n shifted_segments = [s for s in shifted_segments if s]\n annotated = Timeline(segments=shifted_segments)\n current_file['annotated'] = annotated\n\n if self.shuffle:\n samples = []\n\n for sequence in sliding_segments.from_file(current_file):\n\n X = waveform.crop(sequence, mode='center',\n fixed=self.duration)\n\n y = datum['y'].crop(sequence, mode=self.frame_crop,\n fixed=self.duration)\n\n sample = {'waveform': normalize(X),\n 'y': y}\n\n if self.shuffle:\n samples.append(sample)\n else:\n yield sample\n\n if self.shuffle:\n np.random.shuffle(samples)\n for sample in samples:\n yield sample\n\n @property\n def signature(self):\n return {'X': {'@': (None, np.stack)},\n 'y': {'@': (None, np.stack)}}\n\n @property\n def batches_per_epoch(self):\n \"\"\"Number of batches needed to complete an epoch\"\"\"\n duration_per_epoch = self.per_epoch * 24 * 60 * 60\n duration_per_batch = self.duration * self.batch_size\n return int(np.ceil(duration_per_epoch / duration_per_batch))\n\n def __call__(self):\n \"\"\"(Parallelized) batch generator\"\"\"\n\n # number of batches needed to complete an epoch\n batches_per_epoch = self.batches_per_epoch\n\n def generator():\n\n sliding_samples = self.sliding_samples()\n overlap_samples = self.overlap_samples()\n\n while True:\n\n # get fixed duration random sequence\n original = next(sliding_samples)\n\n if np.random.rand() < 0.5:\n pass\n\n else:\n # get random overlapping sequence\n overlap = next(overlap_samples)\n\n # select SNR at random\n snr = (self.snr_max - self.snr_min) * np.random.random_sample() + self.snr_min\n alpha = np.exp(-np.log(10) * snr / 20)\n\n original['waveform'] += alpha * overlap['waveform']\n original['y'] += overlap['y']\n\n speaker_count = np.sum(original['y'], axis=1, keepdims=True)\n original['y'] = np.int64(speaker_count > 1)\n\n # run feature extraction\n original['duration'] = self.duration\n original['X'] = self.feature_extraction.crop(\n original, Segment(0, self.duration), mode='center',\n fixed=self.duration)\n\n del original['waveform']\n del original['duration']\n\n yield original\n\n generators = []\n\n if self.parallel:\n for _ in range(self.parallel):\n\n # initialize one sample generator\n samples = generator()\n\n # batchify it and make sure at least\n # `batches_per_epoch` batches are prefetched.\n batches = batchify(samples, self.signature,\n batch_size=self.batch_size,\n prefetch=batches_per_epoch)\n\n # add batch generator to the list of (background) generators\n generators.append(batches)\n else:\n\n # initialize one sample generator\n samples = generator()\n\n # batchify it without prefetching\n batches = batchify(samples, self.signature,\n batch_size=self.batch_size, prefetch=0)\n\n # add it to the list of generators\n # NOTE: this list will only contain one generator\n generators.append(batches)\n\n # loop on (background) generators indefinitely\n while True:\n for batches in generators:\n # yield `batches_per_epoch` batches from current generator\n # so that each epoch is covered by exactly one generator\n for _ in range(batches_per_epoch):\n yield next(batches)\n\n @property\n def specifications(self):\n return {\n 'task': TASK_MULTI_CLASS_CLASSIFICATION,\n 'X': {'dimension': self.feature_extraction.dimension},\n 'y': {'classes': ['non_overlap', 'overlap']},\n }\n\n\nclass OverlapDetection(LabelingTask):\n \"\"\"Train overlap detection\n\n Parameters\n ----------\n duration : float, optional\n Duration of sub-sequences. Defaults to 3.2s.\n batch_size : int, optional\n Batch size. Defaults to 32.\n per_epoch : float, optional\n Total audio duration per epoch, in days.\n Defaults to one day (1).\n parallel : int, optional\n Number of prefetching background generators. Defaults to 1.\n Each generator will prefetch enough batches to cover a whole epoch.\n Set `parallel` to 0 to not use background generators.\n\n Usage\n -----\n >>> task = OverlapDetection()\n\n # precomputed features\n >>> from pyannote.audio.features import Precomputed\n >>> precomputed = Precomputed('/path/to/features')\n\n # model architecture\n >>> from pyannote.audio.labeling.models import StackedRNN\n >>> model = StackedRNN(precomputed.dimension, task.n_classes)\n\n # evaluation protocol\n >>> from pyannote.database import get_protocol\n >>> protocol = get_protocol('Etape.SpeakerDiarization.TV')\n\n # train model using protocol training set\n >>> for epoch, model in task.fit_iter(model, precomputed, protocol):\n ... pass\n\n \"\"\"\n\n def __init__(self, **kwargs):\n super(OverlapDetection, self).__init__(**kwargs)\n\n def get_batch_generator(self, feature_extraction, protocol, subset='train',\n frame_info=None, frame_crop=None):\n \"\"\"\n frame_info : `pyannote.core.SlidingWindow`, optional\n Override `feature_extraction.sliding_window`. This is useful for\n models that include the feature extraction step (e.g. SincNet) and\n therefore output a lower sample rate than that of the input.\n frame_crop : {'center', 'loose', 'strict'}, optional\n Which mode to use when cropping labels. This is useful for models\n that include the feature extraction step (e.g. SincNet) and\n therefore use a different cropping mode. Defaults to 'center'.\n \"\"\"\n return OverlapDetectionGenerator(\n feature_extraction,\n protocol, subset=subset,\n frame_info=frame_info,\n frame_crop=frame_crop,\n duration=self.duration,\n per_epoch=self.per_epoch,\n batch_size=self.batch_size,\n parallel=self.parallel)\n","repo_name":"jsalt-coml/pyannote-audio","sub_path":"pyannote/audio/labeling/tasks/overlap_detection.py","file_name":"overlap_detection.py","file_ext":"py","file_size_in_byte":12646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39263515357","text":"from flask import Flask\n\nfrom .config import app_config\nfrom .models import db, bcrypt\n#from createtable import db\n\nfrom .views.UserView import user_api as user_blueprint\nfrom .views.PriceView import price_api as price_blueprint\n\nenv_name = 'development'\n\n\ndef create_app(env_name):\n \"\"\"\n Create app\n \"\"\"\n\n # app initiliazation\n app = Flask(__name__)\n\n app.config.from_object(app_config[env_name])\n\n # initializing bcrypt and db\n bcrypt.init_app(app)\n db.init_app(app)\n\n app.register_blueprint(user_blueprint, url_prefix='/users')\n app.register_blueprint(price_blueprint, url_prefix='/prices')\n #app.register_blueprint(user_blueprint, url_prefix='/api/v1//users')\n #app.register_blueprint(price_blueprint, url_prefix='/api/v1//prices')\n\n @app.route('/', methods=['GET'])\n def index():\n print(\"testing!\")\n return 'Congratulations! Your API is working'\n\n @app.errorhandler(404)\n @app.route(\"/error404\")\n def page_not_found(error):\n return app.send_static_file('404.html')\n\n\n @app.errorhandler(500)\n @app.route(\"/error500\")\n def requests_error(error):\n return app.send_static_file('500.html')\n\n return app\n","repo_name":"rebootshen/loratech_test","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3451079891","text":"import numpy as np\n\n\ndef mutate(p):\n point_1, point_2 = sorted(np.random.choice(len(p), 2, replace=False))\n # Flip\n ret = np.copy(p)\n tmp = p[point_1:point_2 + 1]\n ret[point_1:point_2 + 1] = tmp[::-1]\n return ret\n\n\n# Partially Mapped Crossover Operator\ndef partially_mapped_crossover(p1, p2, point_1, point_2):\n def __crossover(p1, p2):\n child = np.empty_like(p1)\n child.fill(-1)\n child[point_1 + 1:point_2 + 1] = p2[point_1 + 1:point_2 + 1]\n for (index, value) in enumerate(child):\n if value != -1:\n continue\n p_value = p1[index]\n while p_value in child:\n p_value = p1[np.where(p2 == p_value)[0][0]]\n child[index] = p_value\n return child\n\n child_1 = __crossover(p1, p2)\n child_2 = __crossover(p2, p1)\n return child_1, child_2\n","repo_name":"dranhclub/Truck-drone-tandem-delivery-network","sub_path":"mfea_pkg/evolution_operators.py","file_name":"evolution_operators.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"10526624552","text":"from cobaya.model import get_model\nfrom cobaya.yaml import yaml_load\nfrom low_discrepancy import QuasiRandomSequence, Parameters\nimport chaospy as cp\nimport pyDOE\nfrom mpi4py import MPI\nimport numpy as np\nimport h5py\nimport yaml\nimport sys\n\n\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nnproc = comm.Get_size()\n\nclass Parameters():\n \"\"\"Returns a vector of parameters to sample at.\"\"\"\n\n def __init__(self, pmin, pmax, scheme='qrs', ntot=None):\n \"\"\"Takes the range of parameters, only varying those where pmin!=pmax.\n The dimensions of pmin and pmax should equal the total number\n of parameters for the model.\"\"\"\n self.pmin = pmin\n self.pmax = pmax\n self.ndim = np.sum(np.abs(pmax-pmin) > 0)\n self.scheme = scheme\n if scheme == 'qrs':\n self.seq = QuasiRandomSequence(self.ndim)\n elif scheme == 'sobol':\n import chaospy as cp\n self.seq = cp.create_sobol_samples(ntot, self.ndim).T\n elif scheme == 'lhs':\n self.seq = pyDOE.lhs(self.ndim, samples=ntot)\n elif scheme == 'lhs_maximin':\n self.seq = pyDOE.lhs(self.ndim, samples=ntot, criterion='maximin')\n elif scheme == 'aemulus':\n self.seq = np.genfromtxt('aemulus_alpha_lh.txt')\n elif scheme == 'aemulus_lowsig8':\n self.seq = np.genfromtxt('aemulus_alpha_lowsig8_lh.txt') \n elif scheme == 'aemulus_test':\n self.seq = np.genfromtxt('aemulus_alpha_test.txt')\n\n def sample(self, n):\n \"\"\"Gets the n'th vector of parameter values.\"\"\"\n pars = self.pmin.copy()\n vary = np.abs(self.pmax-self.pmin) > 0\n if self.scheme == 'qrs':\n v = self.seq.get_vector(n)\n else:\n v = self.seq[n]\n\n if 'aemulus' not in self.scheme:\n pars[vary] = self.pmin[vary] + \\\n (self.pmax-self.pmin)[vary]*v\n else:\n pars = self.seq[n]\n return pars\n\n\ndef generate_models(params, param_names, model, emu_info, nstart=0, nend=10,\n params_fast=None, param_names_fast=None, nfast_per_slow=1):\n\n if params_fast is None:\n allslow = True\n if nfast_per_slow != 1:\n print('Setting nfast_per_slow=1')\n nfast_per_slow = 1\n else:\n allslow = False\n assert(nfast_per_slow > 1) #otherwise no point in fast\n \n npars = nend - nstart\n \n if param_names_fast is not None:\n param_names_all = param_names + param_names_fast\n else:\n param_names_all = param_names\n \n npars_slow = (npars + nfast_per_slow - 1) // nfast_per_slow\n npars_this = ((npars_slow + nproc - 1) // nproc) * nfast_per_slow\n \n out = {'params': np.zeros((npars_this, len(param_names_all)))} \n count = 0\n for n in range(npars_slow):\n if n % nproc == rank:\n if rank==0:\n print(n)\n sys.stdout.flush()\n\n pslow = params.sample(n)\n for m in range(nfast_per_slow):\n if not allslow:\n fvec = params_fast.sample(n * nfast_per_slow + m)\n pvec = np.concatenate([pslow, fvec])\n else:\n pvec = pslow\n\n pars = dict(zip(param_names_all, pvec))\n out['params'][count] = pvec\n model.logposterior(pars)\n\n if 'provider' in emu_info:\n for thy in emu_info['provider']:\n pred = model.provider.get_result(thy)\n \n if thy not in out:\n tsize = [npars_this]\n [tsize.append(d) for d in pred.shape]\n out[thy] = np.zeros(tsize)\n out[thy][count] = pred\n\n if 'likelihood' in emu_info:\n for like in emu_info['likelihood']:\n if hasattr(emu_info['likelihood'][like], '__iter__'):\n for l in emu_info['likelihood'][like]:\n pred = getattr(model.likelihood[like], l)\n name = '{}.{}'.format(like, l)\n if name not in out:\n tsize = [npars_this]\n [tsize.append(d) for d in pred.shape]\n out[name] = np.zeros(tsize)\n out[name][count] = pred\n \n else:\n attr = emu_info['likelihood'][like]\n pred = getattr(model.likelihood[like], attr)\n name = '{}.{}'.format(like, attr)\n if name not in out:\n tsize = [npars_this]\n [tsize.append(d) for d in pred.shape]\n out[name] = np.zeros(tsize)\n out[name][count] = pred\n count += 1\n\n\n for k in out: \n with h5py.File('{}.{}'.format(emu_info['output_filename'], rank), 'w') as fp:\n for k in out:\n shape = out[k].shape\n fp.create_dataset(k, shape)\n fp[k][:] = out[k] \n\n comm.Barrier()\n if rank==0:\n with h5py.File(emu_info['output_filename'], 'w') as fp:\n for k in out:\n for n in range(nproc):\n fp['{}_{}'.format(k, n)] = h5py.ExternalLink('{}.{}'.format(emu_info['output_filename'], n), k)\n\nif __name__ == '__main__':\n\n\n\n info_txt = sys.argv[1]\n with open(info_txt, 'rb') as fp:\n info = yaml.load(fp)\n\n emu_info = info['emulate'] \n# info['debug'] = emu_info.pop('debug', False)\n\n model = get_model(info)\n\n bounds = model.prior.bounds()\n param_names = model.prior.params\n\n\n nstart = emu_info.pop('nstart', 0)\n nend = emu_info.pop('nend', 100)\n param_names_fast = emu_info.pop('param_names_fast', None)\n nfast = emu_info.pop('nfast_per_slow', 1)\n design_scheme = emu_info.pop('design_scheme', 'qrs')\n seed = emu_info.pop('seed', 0)\n\n #seed with same params\n np.random.seed(seed) \n\n if param_names_fast is not None:\n param_names = [p for p in param_names if p not in param_names_fast]\n fast_idx = [model.prior.params.index(f) for f in param_names_fast]\n slow_idx = [model.prior.params.index(f) for f in param_names]\n\n bounds_fast = bounds[fast_idx]\n bounds_slow = bounds[slow_idx]\n\n params = Parameters(bounds_slow[:, 0], bounds_slow[:, 1], scheme=design_scheme, ntot=(nend - nstart))\n params_fast = Parameters(bounds_fast[:, 0], bounds_fast[:, 1], scheme=design_scheme, ntot=(nend- nstart))\n else:\n params = Parameters(bounds[:, 0], bounds[:, 1], scheme=design_scheme, ntot=(nend - nstart))\n params_fast = None\n \n generate_models(params, param_names, model, emu_info, nstart=nstart, nend=nend,\n params_fast=params_fast, param_names_fast=param_names_fast,\n nfast_per_slow=nfast)\n\n\n","repo_name":"sfschen/EmulateLSS","sub_path":"generate_training_data.py","file_name":"generate_training_data.py","file_ext":"py","file_size_in_byte":7216,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"31420919799","text":"from flask import request\nfrom flask_restplus import Resource\nimport logging\nfrom ..util.dto import OrderDto\nfrom ..service.order_service import get_all_available_orders, save_order, delete_order, get_order_byid, get_alluser_orders, update_order_status\n\napi = OrderDto.api\norderdetails = OrderDto.order_details\nupdate_status = OrderDto.updatestatus\n\n@api.route('/getall_orders//')\nclass GetAvailableOrders(Resource):\n \"\"\"\n Get all available orders\n \"\"\"\n @api.doc('get all orders')\n def get(self, page, row):\n # get the post data\n try:\n return get_all_available_orders(page, row)\n except Exception as e:\n print(e)\n print('get_all_available_orders controller error:' + str(e))\n\n\n@api.route('/save_order')\nclass SaveOrder(Resource):\n \"\"\"\n Save order\n \"\"\"\n @api.doc('save orders')\n @api.expect(orderdetails, validate=True)\n def post(self):\n # get the post data\n try:\n data = request.json\n return save_order(data)\n except Exception as e:\n print(e)\n print('save_order controller error:' + str(e))\n\n\n@api.route('/delete_order/')\nclass DeleteOrder(Resource):\n @api.doc('Delete order from list')\n def get(self, order_id):\n try:\n \"\"\"Delete order from list\"\"\"\n return delete_order(order_id)\n except Exception as e:\n print(\"delete order controller error: \"+str(e))\n return \"\"\n\n@api.route('/get_order_byid/')\nclass AvailableOrders(Resource):\n \"\"\"\n Get order by id\n \"\"\"\n @api.doc('get order by id')\n def get(self, order_id):\n # get the post data\n try:\n return get_order_byid(order_id)\n except Exception as e:\n print(e)\n print('get_order_byid controller error:' + str(e))\n\n\n@api.route('/get_alluser_orders//')\nclass GetAlluserOrders(Resource):\n \"\"\"\n Get all available orders\n \"\"\"\n @api.doc('get all users orders')\n def get(self, page, row):\n # get the post data\n try:\n return get_alluser_orders(page, row)\n except Exception as e:\n print(e)\n print('get_alluser_orders controller error:' + str(e))\n\n@api.route('/update_order_status')\nclass UpdateOrderstatus(Resource):\n \"\"\"\n Update order\n \"\"\"\n @api.doc('update order status')\n @api.expect(update_status, validate=True)\n def post(self):\n # get the post data\n try:\n data = request.json\n return update_order_status(data)\n except Exception as e:\n print(e)\n print('update_order_status controller error:' + str(e))","repo_name":"NithiyananthamK/factoryapp","sub_path":"API/app/factoryapp/orders/controller/order_controller.py","file_name":"order_controller.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71037325868","text":"\"\"\"SnowFox Utils: Google Calendar.\"\"\"\n\nimport datetime as dt\nimport logging\n\nfrom gcsa.calendar import CalendarListEntry\nfrom gcsa.event import Event\nfrom gcsa.google_calendar import GoogleCalendar\n\nfrom snowfox.utils.time import SFDate\n\n\ndef get_calendar_events_for_today(gc: GoogleCalendar) -> list[Event]:\n \"\"\"Get events for today.\"\"\"\n logging.info(\"Get events for today\")\n today = SFDate.today().get_date()\n result = []\n for event in gc.get_events(today, today, single_events=True):\n result.append(event)\n return result\n\n\ndef get_all_user_calendars(\n root_gc: GoogleCalendar,\n) -> list[tuple[CalendarListEntry, GoogleCalendar]]:\n \"\"\"Get all user calendars.\"\"\"\n logging.info(\"Get all user calendars\")\n result = []\n for cal_entry in root_gc.get_calendar_list():\n result.append((cal_entry, GoogleCalendar(cal_entry.id)))\n return result\n\n\ndef get_all_events_for_today() -> list[tuple[CalendarListEntry, Event]]:\n \"\"\"Get all events for today.\"\"\"\n logging.info(\"Get all events for today\")\n result = []\n gc = GoogleCalendar()\n for cal_entry, cal in get_all_user_calendars(gc):\n for event in get_calendar_events_for_today(cal):\n result.append((cal_entry, event))\n return result\n\n\ndef daily_events_to_markdown() -> str:\n \"\"\"Convert events to markdown.\"\"\"\n logging.info(\"Convert events to markdown\")\n result = \"| Event | Calendar | Start | End |\\n\"\n result += \"| :--- | :--- | :--- | :--- |\\n\"\n all_events = get_all_events_for_today()\n all_events = sorted(\n all_events,\n key=lambda x: x[1].start.time() if isinstance(x[1].start, dt.datetime) else dt.time(0, 0),\n )\n for cal_entry, event in all_events:\n start = (\n \"\"\n if event.start is None\n else (\n event.start.strftime(\"%H:%M\")\n if isinstance(event.start, dt.datetime)\n else event.start.strftime(\"%m-%d\")\n )\n )\n end = (\n \"\"\n if event.end is None\n else (\n event.end.strftime(\"%H:%M\")\n if isinstance(event.end, dt.datetime)\n else event.end.strftime(\"%m-%d\")\n )\n )\n # result += f\"* `{cal_entry.summary}` {event.summary} ({event.start} - {event.end})\\n\"\n result += f\"| {event.summary} | `{cal_entry.summary}` | {start} | {end} |\\n\"\n return result\n","repo_name":"LER0ever/Snowfox","sub_path":"src/snowfox/utils/gcal.py","file_name":"gcal.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11325629972","text":"import csv\n\nfrom claseMenu import Menu\n\nfrom claseLista import Lista\n\nfrom clasePlanAhorro import PlanAhorro\n\ndef leer_datos() -> Lista:\n planes = Lista()\n archivo = open('planes.csv')\n reader = csv.reader(archivo, delimiter=';')\n for fila in reader:\n planes.agregarPlan(PlanAhorro(int(fila[0]),\n fila[1],\n fila[2],\n float(fila[3])))\n if PlanAhorro.getCantidadCuotas() != int(fila[4]):\n PlanAhorro.setCantidadCuotas(int(fila[4]))\n if PlanAhorro.getCantCuotasPagas() != int(fila[5]):\n PlanAhorro.setCantidadCuotasPagas(int(fila[5]))\n #print('Cantidad cuotas del plan: {}, Cantidad cutas pagas necesarias para...: {}'.format(PlanAhorro.getCantidadCuotas(),\n # PlanAhorro.getCantCuotasPagas()))\n archivo.close()\n return planes\n\ndef menu(planes):\n menu = Menu(planes)\n exit = False\n while not exit:\n print(\"---------------MENU---------------\\n\"\n \"1.Actualizar el valor del vehículo de cada plan.\\n\"\n \"2.Dado un valor, mostrar código del plan, modelo y versión del vehículo cuyo valor de la cuota sea inferior al valor dado.\\n\"\n \"3.Mostrar el monto que se debe haber pagado para licitar el vehículo.\\n\"\n \"4.Dado el código de un plan, modificar la cantidad cuotas que debe tener pagas para licitar..\\n\"\n \"0.Salir\")\n opt = int(input('Ingrese una opcion: '))\n menu.option(opt)\n exit = opt==0\n\nif __name__ == '__main__':\n planes = leer_datos()\n menu(planes)\n","repo_name":"manurdls/POO-Unidad-2","sub_path":"Ejercicio 5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"23055385570","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 7 10:56:26 2021\n\n@author: matth\n\"\"\"\nfrom room import Room\n\nclass Floor:\n def __init__(self,longueur,largeur,num):\n \"\"\"\n Parameters\n ----------\n longueur : INT\n length of the floor, along the y axis.\n largeur : INT\n width of the floor, along the x axis.\n num : INT\n number of the floor.\n \"\"\"\n self.longueur=longueur\n self.largeur=largeur\n self.rooms=[]\n self.floorNb=num\n \n def addRoom(self,room):\n \"\"\"\n Parameters\n ----------\n room : Room\n room to add in the floor.\n \"\"\"\n if(room.x+room.largeur>self.largeur or room.y+room.longueur>self.longueur):\n print(\"room too large\")\n return\n for r in self.rooms:\n if((room.x>=r.x+r.largeur or room.x+room.largeur<=r.x or room.y>=r.y+r.longueur or room.y+room.longueur<=r.y)==False):\n print(\"impossible to add this room\")\n return\n self.rooms.append(room)\n \n ","repo_name":"Crazyp0/DigitalTwinCS","sub_path":"floor.py","file_name":"floor.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70779496109","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 23 09:37:36 2019\n\n@author: lhjin\n\"\"\"\nfrom xlwt import *\nimport numpy as np\nimport os\nimport torch\nfrom torch.utils.data import DataLoader\nfrom skimage import io, transform\nimport random \n\nimport sys\npath = '/home/star/0_code_lhj/DL-SIM-github/Training_codes/UNet/'\nsys.path.append(path)\n\nfrom unet_model import UNet\n\nclass ToTensor(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n\n def __call__(self, sample):\n data_in, data_out = sample['image_in'], sample['groundtruth']\n\n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n #image = image.transpose((2, 0, 1))\n #landmarks = landmarks.transpose((2, 0, 1))\n \n #return {'image': image, 'landmarks': torch.from_numpy(landmarks)}\n return {'image_in': torch.from_numpy(data_in),\n 'groundtruth': torch.from_numpy(data_out)}\n\nclass ReconsDataset(torch.utils.data.Dataset):\n def __init__(self, all_data_path, maximum_intensity_4normalization_path, transform, training_dataset, in_size, train_in_size):\n self.all_data_path = all_data_path\n self.maximum_intensity_4normalization_path = maximum_intensity_4normalization_path\n self.transform = transform\n self.in_size = in_size\n self.training_dataset = training_dataset\n self.dirs_data = os.listdir(self.all_data_path)\n self.train_in_size = train_in_size\n \n self.dirs_data.sort() # make sure that the filenames have a fixed order before shuffling\n random.seed(1000)\n random.shuffle(self.dirs_data) # shuffles the ordering of filenames (deterministic given the chosen seed)\n \n split_1 = int(0.9 * len(self.dirs_data))\n self.dirs_training = self.dirs_data[:split_1]\n self.dirs_testing = self.dirs_data[split_1+1:]\n \n def __len__(self):\n if self.training_dataset:\n dirs = self.dirs_training \n else:\n dirs = self.dirs_testing\n return len(dirs) \n\n def __getitem__(self, idx):\n if self.training_dataset:\n self.dirs = self.dirs_training \n else:\n self.dirs = self.dirs_testing\n \n max_intensity = np.load(self.maximum_intensity_4normalization_path,allow_pickle=True)\n max_HE_SRRF = max_intensity[0]['objValue']\n max_HE = max_intensity[2]['objValue']\n \n file_name = os.path.join(self.all_data_path, self.dirs[idx])\n data_all = np.load(file_name)\n \n data_gt = data_all[:,:,0]/max_HE_SRRF\n \n train_in_size = self.train_in_size\n \n data_in = np.zeros((self.in_size, self.in_size, train_in_size))\n for i in range(train_in_size):\n data_in[:,:,i] = data_all[:,:,i+2]\n data_in = data_in/max_HE\n \n sample = {'image_in': data_in, 'groundtruth': data_gt}\n \n if self.transform:\n sample = self.transform(sample)\n return sample\n\ndef get_learning_rate(epoch):\n limits = [3, 8, 12]\n lrs = [1, 0.1, 0.05, 0.005]\n assert len(lrs) == len(limits) + 1\n for lim, lr in zip(limits, lrs):\n if epoch < lim:\n return lr * learning_rate\n return lrs[-1] * learning_rate\n\ndef val_during_training(dataloader):\n model.eval()\n \n loss_all = np.zeros((len(dataloader)))\n for batch_idx, items in enumerate(dataloader):\n image = items['image_in']\n gt = items['groundtruth']\n \n image = np.swapaxes(image, 1,3)\n image = np.swapaxes(image, 2,3)\n image = image.float()\n image = image.cuda(cuda) \n \n gt = gt.squeeze()\n gt = gt.float()\n gt = gt.cuda(cuda)\n \n pred = model(image).squeeze()\n loss0 =(pred-gt).abs().mean()\n \n loss_all[batch_idx] = loss0.item()\n \n mae_m, mae_s = loss_all.mean(), loss_all.std()\n return mae_m, mae_s\n\n\nif __name__ == \"__main__\":\n cuda = torch.device('cuda:0')\n learning_rate = 0.001\n # momentum = 0.99\n # weight_decay = 0.0001\n batch_size = 1\n input_size = 5\n output_size = 1\n SRRFDATASET = ReconsDataset(all_data_path=\"/media/star/LuhongJin/UNC_data/SRRF/New_training_20190829/0NPY_Dataset/Dataset/Microtubule/\",\n maximum_intensity_4normalization_path=\"/home/star/0_code_lhj/DL-SIM-github/Training_codes/UNet/Max_intensity.npy\",\n transform = ToTensor(),\n training_dataset = True,\n in_size = 320,\n train_in_size = input_size)\n train_dataloader = torch.utils.data.DataLoader(SRRFDATASET, batch_size=batch_size, shuffle=True, pin_memory=True) # better than for loop\n \n SRRFDATASET2 = ReconsDataset(all_data_path=\"/media/star/LuhongJin/UNC_data/SRRF/New_training_20190829/0NPY_Dataset/Dataset/Microtubule/\",\n maximum_intensity_4normalization_path=\"/home/star/0_code_lhj/DL-SIM-github/Training_codes/UNetMax_intensity.npy\",\n transform = ToTensor(),\n training_dataset = False,\n in_size = 320,\n train_in_size = input_size)\n validation_dataloader = torch.utils.data.DataLoader(SRRFDATASET2, batch_size=batch_size, shuffle=True, pin_memory=True) # better than for loop\n\n model = UNet(n_channels=input_size, n_classes=output_size)\n\n print(\"{} paramerters in total\".format(sum(x.numel() for x in model.parameters())))\n model.cuda(cuda)\n optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate, betas=(0.9, 0.999))\n\n loss_all = np.zeros((2000, 4))\n for epoch in range(2000):\n \n mae_m, mae_s = val_during_training(train_dataloader)\n loss_all[epoch,0] = mae_m\n loss_all[epoch,1] = mae_s\n mae_m, mae_s = val_during_training(validation_dataloader) \n loss_all[epoch,2] = mae_m\n loss_all[epoch,3] = mae_s\n \n file = Workbook(encoding = 'utf-8')\n table = file.add_sheet('loss_all')\n for i,p in enumerate(loss_all):\n for j,q in enumerate(p):\n table.write(i,j,q)\n file.save('/home/star/0_code_lhj/DL-SIM-github/Training_codes/UNet/loss_UNet_SRRF_microtubule.xls')\n\n lr = get_learning_rate(epoch)\n for p in optimizer.param_groups:\n p['lr'] = lr\n print(\"learning rate = {}\".format(p['lr']))\n \n for batch_idx, items in enumerate(train_dataloader):\n \n image = items['image_in']\n gt = items['groundtruth']\n \n model.train()\n \n image = np.swapaxes(image, 1,3)\n image = np.swapaxes(image, 2,3)\n image = image.float()\n image = image.cuda(cuda) \n \n gt = gt.squeeze()\n gt = gt.float()\n gt = gt.cuda(cuda)\n \n pred = model(image).squeeze()\n\n loss = (pred-gt).abs().mean() + 5 * ((pred-gt)**2).mean()\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n print (\"[Epoch %d] [Batch %d/%d] [loss: %f]\" % (epoch, batch_idx, len(train_dataloader), loss.item()))\n\n if epoch % 50 == 49:\n torch.save(model.state_dict(), \"/home/star/0_code_lhj/DL-SIM-github/Training_codes/UNet/UNet_SRRF_microtubule_\"+str(epoch+1)+\".pkl\")\n","repo_name":"drbeiliu/DeepLearning","sub_path":"Training_codes/UNet/training_SRRF.py","file_name":"training_SRRF.py","file_ext":"py","file_size_in_byte":7629,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"37"} +{"seq_id":"25918586758","text":"def PatternCount(Text, Pattern):\n #Counts the number of times input 'Pattern' appears in string 'Text'\n #by iterating through text.\n count = 0\n for i in range(len(Text)-len(Pattern)+1):\n #Iterates through each index in 'Pattern' while accounting\n #for Pattern length.\n if Text[i:i+len(Pattern)] == Pattern:\n #Assessess the string beginning with index that is of\n #the same length as Pattern.\n count = count+1\n #If the string of the same length as Pattern is equal to\n #the input Pattern, the PatternCount is increased.\n return count\n","repo_name":"mjmcandrew/Coursera_2020","sub_path":"Bioinformatics1_Spring2020/Func_PatternCount.py","file_name":"Func_PatternCount.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1737237853","text":"from sys import stderr\nfrom typing import Iterable\n\nfrom turing.programs import PROGRAMS\nfrom .utils import TuringAction, TuringMemory\nfrom .types import Symbol, State\nfrom .errors import NoTuringActionFoundException\n\n\nclass TuringProgram():\n def __init__(self, pgrm: Iterable[TuringAction], input: Iterable[Symbol], head=0):\n self.pgrm = pgrm\n self.memory = TuringMemory(list(input), head=head)\n\n def _get_action(self):\n action_to_exec = None\n memory = self.memory\n for action in self.pgrm:\n if action.match(memory.get_step()):\n action_to_exec = action\n break\n if not action_to_exec:\n raise NoTuringActionFoundException(str(memory.get_step()))\n return action_to_exec\n\n def run(self):\n while self.memory.get_state() != State.END:\n action_to_exec = self._get_action()\n new_step, direction = action_to_exec.exec()\n self.memory.make_step(new_step, direction)\n print(self.memory)\n\n\ndef load_package():\n pgrm_keys = list(PROGRAMS.keys())\n pgrm_list = '\\n'.join(f'{i}) {name}' for i,\n name in enumerate(pgrm_keys))\n print(f'Select your program :\\n\\n{pgrm_list}\\n')\n while True:\n try:\n selected_index = int(input('> '))\n assert 0 <= selected_index < len(pgrm_keys)\n except (ValueError, AssertionError):\n print('Bad input', file=stderr)\n continue\n break\n return PROGRAMS[pgrm_keys[selected_index]]\n\n\ndef main():\n package = load_package()\n pgrm = TuringProgram(package.PROGRAM, package.INPUT)\n pgrm.run()\n","repo_name":"lucasmrdt/turing-machine","sub_path":"turing/turing.py","file_name":"turing.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70585620269","text":"import copy\nimport time\n\nimport jax\nimport jax.numpy as jnp\nimport jax.scipy as jsp\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import integrate, interpolate\nfrom sklearn import metrics\n\n\ndef model(t, y, args):\n \"\"\"\n The Model is a forced oscillator given by the second order scalar valued\n linear ODE y'' + c*y' + k*y = f * cos(w*t)\n\n where: y : Displacement y' : Velocity y'' : acceleration c : friction\n coefficient k : spring stiffness w : frequency of forcing t : time f :\n amplitude of forcing\n\n This scalar ODE of order 2 is transformed into a system of ODEs with order 1\n \"\"\"\n c, k, f, w = args\n\n return jnp.array([\n y[1],\n f * jnp.cos(w*t) - k * y[0] - c*y[1]\n ])\n\ndef forward_sensitivity_model(t, s, args):\n \"\"\"\n This solves a system of (P+1)*N differential equations (needs to solve the\n original ODE in conjunction)\n \"\"\"\n y = s.reshape((2, 5))[:, 0]\n sensitivities = s.reshape((2, 5))[:, 1:]\n\n del_f__del_u = jax.jacobian(model, argnums=1)(t, y, args)\n del_f__del_theta = jnp.array(jax.jacobian(model, argnums=2)(t, y, args)).T\n \n \n original_rhs = model(t, y, args).reshape((-1, 1))\n sensitivity_rhs = del_f__del_u @ sensitivities + del_f__del_theta\n\n return jnp.concatenate((original_rhs, sensitivity_rhs), axis=1).flatten()\n\ndef adjoint_model_homogeneous(t, s, args):\n \"\"\"\n This solves a system of 2*N differential equations in reverse, first the\n original ODE system (which is needed since we require the solution to\n evaluate the Jacobian) and then the adjoint system.\n\n THIS IS THE HOMOGENEOUS VERSION, that only works if the loss function is\n evaluated at the end of time horizon, because then the del_J__del_u can be\n used as a terminal condition and makes the ODE homogeneous\n \"\"\"\n y = s.reshape((2, 2))[:, 0]\n adjoint_variable = s.reshape((2, 2))[:, 1]\n\n del_f__del_u = jax.jacobian(model, argnums=1)(t, y, args)\n\n original_rhs = model(t, y, args).reshape((-1, 1))\n adjoint_rhs = (- del_f__del_u.T @ adjoint_variable).reshape((-1, 1))\n\n return jnp.concatenate((original_rhs, adjoint_rhs), axis=1).flatten()\n\ndef adjoint_model_general(t, s, args, y_at_t_ref, t_discrete):\n \"\"\"\n This solves a system of 2*N differential equations in reverse, first the\n original ODE system (which is needed since we require the solution to\n evaluate the Jacobian) and then the adjoint system.\n\n This is the general version, that works if the loss function is evaluated\n over the entire trajectory. It therefore requires the reference solution to\n be given as part of the args, since it requires an interpolation of such.\n Additionally, the temporal mesh on which the reference solution is given has\n to be provided.\n \"\"\"\n # Unpack the state vector\n y = s.reshape((2, 2))[:, 0]\n adjoint_variable = s.reshape((2, 2))[:, 1]\n\n # Interpolate the reference solution\n y_at_current_t_ref = interpolate.interp1d(t_discrete, y_at_t_ref, axis=-1)(t)\n\n del_f__del_u = jax.jacobian(model, argnums=1)(t, y, args)\n del_J__del_u = (y - y_at_current_t_ref).T\n\n original_rhs = model(t, y, args).reshape((-1, 1))\n adjoint_rhs = (- del_f__del_u.T @ adjoint_variable - del_J__del_u.T).reshape((-1, 1))\n\n return jnp.concatenate((original_rhs, adjoint_rhs), axis=1).flatten()\n\n\nif __name__ == '__main__':\n # Defining the integration horizon and the discretization\n integration_horizon = (0.0, 20.0)\n time_points_inbetween = 100\n t_discrete = np.linspace(integration_horizon[0], integration_horizon[1], time_points_inbetween)\n\n # The initial condition are not subject to parameters\n initital_condition = [0.5, 0.0]\n\n #################\n ###### Creating a reference trajectory using \"true\" values\n #################\n c_true = 0.5\n k_true = 2.0\n f_true = 0.5\n w_true = 1.05\n parameters_true = [[\n c_true,\n k_true,\n f_true,\n w_true,\n ],]\n\n y_at_t_ref = integrate.solve_ivp(\n fun=model, \n t_span=integration_horizon,\n y0=initital_condition,\n t_eval=t_discrete,\n args=parameters_true\n )[\"y\"]\n\n ##############\n ##### Define Loss functions that rely on the reference solution\n ##############\n\n # Loss function that only consider the value at the very end of the\n # integration horizon. It uses a quadratic loss to contract the dimension\n def loss_function_at_end(y_at_t, theta):\n y_at_terminal_point = y_at_t[:, -1]\n y_at_terminal_point_ref = y_at_t_ref[:, -1]\n\n return 0.5 * (y_at_terminal_point - y_at_terminal_point_ref).T @ (y_at_terminal_point - y_at_terminal_point_ref)\n\n # Loss function that considers the quadratic loss over the entire trajectory\n def loss_function_entire_trajectory(y_at_t, theta):\n difference_at_t = y_at_t - y_at_t_ref\n quadratic_loss_at_t = 0.5 * np.einsum(\"iN,iN->N\", difference_at_t, difference_at_t)\n\n return integrate.trapezoid(quadratic_loss_at_t, t_discrete, axis=-1)\n\n ##############\n ##### Now work based on parameter guesses\n ##############\n c_guess = 0.4\n k_guess = 1.8\n f_guess = 0.6\n w_guess = 1.1\n parameters_guess = [[\n c_guess,\n k_guess,\n f_guess,\n w_guess\n ],]\n\n # Solve the \"classical\" system, i.e. solve forward with the current guess\n # for the parameters. This would have to be solved anyways if wanted to\n # evaluate the prediction of the system or are interested in how well our\n # parameter guesses hold (i.e. evaluate the loss). Keep in mind that the\n # solution is queried at some intermediate points in order to then evaluate\n # the (potentially) integral-based loss over the entire time.\n time_classical_problem = time.time_ns()\n y_at_t = integrate.solve_ivp(\n fun=model, \n t_span=integration_horizon,\n y0=initital_condition,\n t_eval=t_discrete,\n args=parameters_guess\n )[\"y\"]\n time_classical_problem = time.time_ns() - time_classical_problem\n \n J_at_end = loss_function_at_end(y_at_t, parameters_guess)\n J_entire_trajectory = loss_function_entire_trajectory(y_at_t, parameters_guess)\n\n\n ########################\n ########################\n ##### Sensitivities by Finite Differences\n ########################\n ########################\n\n ###########\n #### (1) Finite Differences for loss_function at end\n ##########\n time_finite_differences__at_end = time.time_ns()\n\n eps = 1.0e-4\n d_J__d_theta__at_end__finite_differences = np.empty((1, 4))\n for i in range(4):\n parameters_guess_augmented = copy.deepcopy(parameters_guess)\n parameters_guess_augmented[0][i] += eps\n\n y_augmented_at_t = integrate.solve_ivp(\n fun=model, \n t_span=integration_horizon,\n y0=initital_condition,\n t_eval=t_discrete,\n args=parameters_guess_augmented\n )[\"y\"]\n\n J_augmented_at_end = loss_function_at_end(y_augmented_at_t, parameters_guess_augmented)\n\n d_J__d_theta__at_end__finite_differences[0, i] = (\n J_augmented_at_end - J_at_end\n ) / eps\n \n time_finite_differences__at_end = time.time_ns() - time_finite_differences__at_end\n \n ###########\n #### (2) Finite Differences for loss_function over entire trajectory\n ##########\n\n eps = 1.0e-4\n d_J__d_theta__entire_trajectory__finite_differences = np.empty((1, 4))\n for i in range(4):\n parameters_guess_augmented = copy.deepcopy(parameters_guess)\n parameters_guess_augmented[0][i] += eps\n\n y_augmented_at_t = integrate.solve_ivp(\n fun=model, \n t_span=integration_horizon,\n y0=initital_condition,\n t_eval=t_discrete,\n args=parameters_guess_augmented\n )[\"y\"]\n\n J_augmented_entire_trajectory = loss_function_entire_trajectory(y_augmented_at_t, parameters_guess_augmented)\n\n d_J__d_theta__entire_trajectory__finite_differences[0, i] = (\n J_augmented_entire_trajectory - J_entire_trajectory\n ) / eps\n\n ########################\n ########################\n ##### Forward Sensitivities\n ########################\n ########################\n\n ######\n ### (1.1) Using an additional trajectory co-developed, loss function at end\n ######\n\n time_forward__at_end = time.time_ns()\n # We can not integrate over a two-array object. Therefore it has to be a flattened vector.\n ic_forward_sensitivities = jnp.zeros((2, 5))\n ic_forward_sensitivities = ic_forward_sensitivities.at[:, 0].set(initital_condition).flatten()\n solution_and_solution_sensitivities_at_t = integrate.solve_ivp(\n fun=forward_sensitivity_model, \n t_span=integration_horizon,\n y0=ic_forward_sensitivities,\n t_eval=t_discrete,\n args=parameters_guess,\n )[\"y\"].reshape((2, 5, time_points_inbetween))\n\n solution_sensitivities_at_t = solution_and_solution_sensitivities_at_t[:, 1:, :]\n \n del_J__del_u_at_end = (y_at_t[:, -1] - y_at_t_ref[:, -1]).T\n d_u__d_theta_at_end = solution_sensitivities_at_t[:, :, -1]\n\n d_J__d_theta__at_end__forward = (del_J__del_u_at_end @ d_u__d_theta_at_end).reshape((1, -1))\n\n time_forward__at_end = time.time_ns() - time_forward__at_end\n\n\n ######\n ### (1.2) Using an additional trajectory co-developed, loss function over entire trajectory\n ######\n\n # We can just reuse the solution sensitivities from before\n #\n # d_u__d_theta has the shape (n_dim, n_params, n_time_points) \n d_u__d_theta__entire_trajectory = solution_sensitivities_at_t\n # del_J__del_u has the shape (1, n_dim, n_time_points) - the leading 1\n # (i.e. a proxy axis) is required in order to mimic the batch of row vectors\n del_J__del_u__entire_trajectory = (y_at_t - y_at_t_ref).reshape((1, 2, time_points_inbetween))\n\n d_j__d_theta__entire_trajectory = np.einsum(\n \"EiN,iPN->EPN\",\n del_J__del_u__entire_trajectory,\n d_u__d_theta__entire_trajectory,\n )\n\n # Integrate d_j__d_theta over entire trajectory to obtain d_J__d_theta\n d_J__d_theta__entire_trajectory__forward = integrate.trapezoid(\n d_j__d_theta__entire_trajectory,\n t_discrete,\n axis=-1,\n )\n\n\n ########################\n ########################\n ##### Adjoint Sensitivities\n ########################\n ########################\n\n #######\n # (1.1) Using an additional trajectory that runs backwards alongside with\n # the backwards ODE for loss only at the end\n #######\n time_adjoint__at_end = time.time_ns()\n\n terminal_condition_adjoint_sensitivities = jnp.zeros((2, 2))\n\n # The reverse running original ODE of course starts where the forward running one ended\n terminal_condition_adjoint_sensitivities = terminal_condition_adjoint_sensitivities.at[:, 0].set(y_at_t[:, -1])\n\n # Since the loss function is only evaluated at the end of the integration\n # horizon, we use a homogeneous adjoint ODE, but have to add sth to the\n # terminal condition\n #\n # Running the ODE backwards does not seem to be a problem, once t_span is\n # set correctly and t_eval points are reversed (using np.flip)\n del_J__del_u_at_end = (y_at_t[:, -1] - y_at_t_ref[:, -1]).T\n terminal_condition_adjoint_sensitivities = terminal_condition_adjoint_sensitivities.at[:, 1].set(del_J__del_u_at_end.T)\n solution_and_adjoint_variable_at_t = np.flip(integrate.solve_ivp(\n fun=adjoint_model_homogeneous, \n t_span=np.flip(integration_horizon),\n y0=terminal_condition_adjoint_sensitivities.flatten(),\n t_eval=np.flip(t_discrete),\n args=parameters_guess,\n )[\"y\"].reshape((2, 2, time_points_inbetween)), axis=2)\n\n y_at_t__backwards = solution_and_adjoint_variable_at_t[:, 0, :]\n adjoint_variable_at_t = solution_and_adjoint_variable_at_t[:, 1, :]\n\n J_entire_trajectory__reverse_classical_solve = loss_function_entire_trajectory(y_at_t__backwards, parameters_guess)\n\n # The initial condition was not dependent on the parameters\n d_u0__d_theta = jnp.zeros((2, 4))\n\n # We still have to do an integration over the time horizon\n dynamic_sensitivity_jacobian = lambda t, y, params: jnp.array(jax.jacobian(model, argnums=2)(t, y, *params)).T\n # The jit is not really advantageous, because we are only calling the function once\n vectorized_dynamic_sensitivity_jacobian = jax.jit(jax.vmap(dynamic_sensitivity_jacobian, in_axes=(0, 1, None), out_axes=2))\n del_f__del_theta__at_t = vectorized_dynamic_sensitivity_jacobian(t_discrete, y_at_t, parameters_guess)\n \n adjoint_variable_matmul_del_f__del_theta_at_t = jnp.einsum(\"iN,ijN->jN\", adjoint_variable_at_t, del_f__del_theta__at_t)\n\n d_J__d_theta__at_end__adjoint = (\n adjoint_variable_at_t[:, -1].T @ d_u0__d_theta\n + \n jnp.zeros((1, 4))\n +\n integrate.trapezoid(adjoint_variable_matmul_del_f__del_theta_at_t, t_discrete, axis=-1)\n )\n\n time_adjoint__at_end = time.time_ns() - time_adjoint__at_end\n\n #######\n # (1.2) Using an additional trajectory that runs backwards alongside with\n # the backwards ODE for loss over the entire trajectory\n #######\n\n terminal_condition_adjoint_sensitivities = jnp.zeros((2, 2))\n\n # The reverse running original ODE of course starts where the forward running one ended\n terminal_condition_adjoint_sensitivities = terminal_condition_adjoint_sensitivities.at[:, 0].set(y_at_t[:, -1])\n\n # In contrast to before, we now have the loss function valid over the entire\n # trajectory. Therefore, the adjoint ODE is inhomogeneous, but has a\n # homogenous (=zero) terminal condition. Since the corresponding array is\n # already initialized as zeros, nothing has to be done\n #\n # However, we have to provide the reference solution as well as its temporal\n # mesh such that it can be interpolated in the evaluation of the dynamics\n #\n # Running the ODE backwards does not seem to be a problem, once t_span is\n # set correctly and t_eval points are reversed (using np.flip)\n solution_and_adjoint_variable_at_t = np.flip(integrate.solve_ivp(\n fun=adjoint_model_general, \n t_span=np.flip(integration_horizon),\n y0=terminal_condition_adjoint_sensitivities.flatten(),\n t_eval=np.flip(t_discrete),\n args=(*parameters_guess, y_at_t_ref, t_discrete),\n )[\"y\"].reshape((2, 2, time_points_inbetween)), axis=2)\n\n adjoint_variable_at_t = solution_and_adjoint_variable_at_t[:, 1, :]\n\n # The initial condition was not dependent on the parameters\n d_u0__d_theta = jnp.zeros((2, 4))\n\n dynamic_sensitivity_jacobian = lambda t, y, params: jnp.array(jax.jacobian(model, argnums=2)(t, y, *params)).T\n # The jit is not really advantageous, because we are only calling the function once\n vectorized_dynamic_sensitivity_jacobian = jax.jit(jax.vmap(dynamic_sensitivity_jacobian, in_axes=(0, 1, None), out_axes=2))\n del_f__del_theta__at_t = vectorized_dynamic_sensitivity_jacobian(t_discrete, y_at_t, parameters_guess)\n \n adjoint_variable_matmul_del_f__del_theta_at_t = jnp.einsum(\"iN,ijN->jN\", adjoint_variable_at_t, del_f__del_theta__at_t)\n\n d_J__d_theta__entire_trajectory__adjoint = (\n adjoint_variable_at_t[:, -1].T @ d_u0__d_theta\n + \n jnp.zeros((1, 4))\n +\n integrate.trapezoid(adjoint_variable_matmul_del_f__del_theta_at_t, t_discrete, axis=-1)\n )\n\n\n ###########\n #### Post-Processing\n ###########\n\n # Calculates how far the gradients are away from each other\n collection_of_gradients__at_end = {\n \"finite_differences\": d_J__d_theta__at_end__finite_differences,\n \"forward\": d_J__d_theta__at_end__forward,\n \"adjoint\": d_J__d_theta__at_end__adjoint,\n }\n gradients_distances__at_end = metrics.pairwise_distances(\n np.concatenate(list(collection_of_gradients__at_end.values()), axis=0)\n )\n\n collection_of_gradients__entire_trajectory = {\n \"finite_differences\": d_J__d_theta__entire_trajectory__finite_differences,\n \"forward\": d_J__d_theta__entire_trajectory__forward,\n \"adjoint\": d_J__d_theta__entire_trajectory__adjoint,\n }\n gradients_distances__entire_trajectory = metrics.pairwise_distances(\n np.concatenate(list(collection_of_gradients__entire_trajectory.values()), axis=0)\n )\n\n ###########\n #### Resporting\n ###########\n\n print(\"----- Setup ------\")\n print(\"Time Domain: \", integration_horizon)\n print(\"Number of discretization points: \", time_points_inbetween)\n \n print()\n print(\"----- Loss Values ------\")\n print(\"Quadratic loss using the parameters guessed (classical solution) at end only\")\n print(J_at_end)\n print(\"Quadratic Loss using the parameters guessed (classical solution) over entire trajectory\")\n print(J_entire_trajectory)\n print(\"Quadratic Loss using the parameters guessed (reverse classical solution) over entire trajectory\")\n print(J_entire_trajectory__reverse_classical_solve)\n\n print()\n print(\"----- Loss sensitivities -----\")\n print(\"-> Loss Function end only\")\n print(\"Finite Differences\")\n print(d_J__d_theta__at_end__finite_differences)\n print(\"Forward Method\")\n print(d_J__d_theta__at_end__forward)\n print(\"Adjoint Method\")\n print(d_J__d_theta__at_end__adjoint)\n print(\"Distances of Gradients\")\n print(gradients_distances__at_end)\n print(\"-> Loss Function entire trajectory\")\n print(\"Finite Differences\")\n print(d_J__d_theta__entire_trajectory__finite_differences)\n print(\"Forward Method\")\n print(d_J__d_theta__entire_trajectory__forward)\n print(\"Adjoint Method\")\n print(d_J__d_theta__entire_trajectory__adjoint)\n print(\"Distances of Gradients\")\n print(gradients_distances__entire_trajectory)\n\n print()\n print(\"----- Timings ----- [ns] - lower is better\")\n print(\"Classical Solve\")\n print(time_classical_problem)\n print(\"-> Loss Function end only\")\n print(\"Finite Differences\")\n print(time_finite_differences__at_end)\n print(\"Forward Method\")\n print(time_forward__at_end)\n print(\"Adjoint Method\")\n print(time_adjoint__at_end)\n\n\n # Plots\n\n plt.figure()\n plt.subplot(121)\n plt.plot(t_discrete, y_at_t_ref[0, :], label=\"Reference solution\")\n plt.plot(t_discrete, y_at_t[0, :], label=\"Guessed parameters\")\n plt.legend()\n plt.grid()\n plt.subplot(122)\n plt.plot(t_discrete, y_at_t_ref[1, :], label=\"Reference solution\")\n plt.plot(t_discrete, y_at_t[1, :], label=\"Guessed parameters\")\n plt.legend()\n plt.grid()\n\n plt.figure()\n plt.title(\"Solution run reverse\")\n plt.plot(t_discrete, y_at_t__backwards[0, :])\n plt.plot(t_discrete, y_at_t__backwards[1, :])\n\n plt.figure()\n plt.plot(t_discrete, y_at_t__backwards[0, :])\n plt.plot(t_discrete, y_at_t__backwards[1, :])\n plt.title(\"Stuff\")\n\n plt.show()\n","repo_name":"Ceyron/machine-learning-and-simulation","sub_path":"english/adjoints_sensitivities_automatic_differentiation/adjoint_ode.py","file_name":"adjoint_ode.py","file_ext":"py","file_size_in_byte":18997,"program_lang":"python","lang":"en","doc_type":"code","stars":521,"dataset":"github-code","pt":"37"} +{"seq_id":"73025867626","text":"import Pyro4\r\nfrom Pyro4.util import SerializerBase\r\nimport objeto_agenda\r\n\r\n\r\n@Pyro4.expose\r\nclass AgendaServidor:\r\n\r\n def __init__(self):\r\n self.contador_id = 0\r\n self.agenda = []\r\n\r\n def inserir(self, objeto:objeto_agenda.Objeto):\r\n objeto.id = self.contador_id\r\n self.contador_id += 1\r\n self.agenda.append(objeto)\r\n return True\r\n \r\n def atualizar(self, dados):\r\n for i, cadastro in enumerate(self.agenda):\r\n if (cadastro.id == dados.id):\r\n self.agenda[i] = dados\r\n return True\r\n return False\r\n \r\n def buscar(self, id):\r\n for objeto in self.agenda:\r\n if (objeto.id == id):\r\n return objeto\r\n return None\r\n \r\n def listar_todos(self):\r\n return self.agenda\r\n \r\n def excluir(self, id):\r\n for i, cadastro in enumerate(self.agenda):\r\n if (cadastro.id == id):\r\n self.agenda.pop(i)\r\n return True\r\n return False\r\n\r\n \r\ndef main():\r\n\r\n daemon = Pyro4.Daemon() \r\n uri = daemon.register(AgendaServidor)\r\n print(\"URI do objeto: \", uri)\r\n ns = Pyro4.locateNS()\r\n ns.register(\"KLEIN_ROCHA\", uri)\r\n\r\n SerializerBase.register_dict_to_class(\"objeto_agenda.Objeto\", objeto_agenda.dict_to_class)\r\n SerializerBase.register_class_to_dict(objeto_agenda.Objeto, objeto_agenda.class_to_dict)\r\n\r\n daemon.requestLoop()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"dev-marcos/Agenda-de-Contatos-usando-Pyro4","sub_path":"servidor_agenda.py","file_name":"servidor_agenda.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24814863523","text":"# swea 5215 햄버거다이어트\nfrom collections import deque\ndef dfs(idx,taste,cal):\n global maxv\n if cal >L:\n return\n if idx == N:\n maxv = max(maxv,taste)\n return\n dfs(idx+1,taste+data[idx][0],cal+data[idx][1])\n dfs(idx + 1, taste, cal)\nT = int(input())\n\nfor tc in range(1,T+1):\n N,L = map(int,input().split())\n data = []\n ans = [0]*(L+1)\n for i in range(N):\n a,b = map(int,input().split())\n data.append((a,b))\n maxv = 0\n dfs(0,0,0)\n print(f'#{tc} {maxv}')","repo_name":"JiIJu/algorithm_algorithm","sub_path":"학사 지이주/2023/7월/0724/5215.py","file_name":"5215.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43460633431","text":"# coding: utf-8\n# This file is part of Thomas Aquinas.\n#\n# Thomas Aquinas is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Thomas Aquinas is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Thomas Aquinas. If not, see .\n#\n# veni, Sancte Spiritus.\n\nimport logging\nimport nose\nfrom nose.tools import ok_, eq_, raises\nfrom lib.spritefactory import Entity\nfrom lib import media\n\nimport sfml\n\nclass TestEntity:\n @classmethod\n def setup_class(cls):\n print (\"Creando una ventana SFML\")\n global window\n global tex\n window = sfml.RenderWindow(sfml.VideoMode(320, 240),\n \"Test_spritefactory\")\n tex = media.loadimg(\"uniteststuff/test.png\", False)\n\n def test_entitybuild(self):\n entity = Entity(\"test\", tex, window, None, None)\n eq_(entity.id, \"test\")\n ok_(isinstance(entity.sprite, sfml.Sprite),\n \"La entidad no tiene un objet sprite\")\n ok_(isinstance(entity.clock, sfml.Clock),\n \"La entidad no tiene un objeto clock\")\n eq_(entity.zindex, None)\n\n","repo_name":"shackra/thomas-aquinas","sub_path":"notestno/test_spritefactory.py","file_name":"test_spritefactory.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"13938642170","text":"from setproctitle import setproctitle\nimport datetime\nimport json\nimport os\nimport redis\nimport serial\nimport time\n\n\nclass IndoorAirQualitySerial:\n INPUT_MAP = {\n \"14\": \"mq-3\",\n \"15\": \"mq-2\",\n \"16\": \"mq-9\",\n \"17\": \"aq\",\n \"20\": \"mq-5\",\n \"21\": \"hcho\",\n }\n\n def __init__(self, serial_device, redis_host, redis_port):\n self.redis_instance = redis.StrictRedis(host=redis_host, port=redis_port)\n self.serial = serial.Serial(serial_device, 9600)\n\n def run(self):\n influx_fields = {}\n last_updated_at = time.time()\n while True:\n current_time = datetime.datetime.utcnow()\n line = self.serial.readline().strip()\n print(\"Received '%s'\" % line)\n line = line.strip().split(\":\")\n if len(line) != 2:\n continue\n try:\n k = self.INPUT_MAP[line[0]]\n except KeyError:\n print(\"Invalid key: %s\" % line[0])\n continue\n influx_fields[k] = round(float(line[1]), 1)\n if time.time() - last_updated_at > 10:\n influx_data = [\n {\n \"measurement\": \"gas_sensors\",\n \"tags\": {\n \"location\": \"display\",\n },\n \"time\": current_time.isoformat() + \"Z\",\n \"fields\": influx_fields\n }\n ]\n self.redis_instance.publish(\"influx-update-pubsub\", json.dumps(influx_data))\n influx_fields = {}\n last_updated_at = time.time()\n\n\ndef main():\n setproctitle(\"indoor_air_quality - serial: run\")\n serial_device = os.environ[\"SERIAL_DEVICE\"]\n redis_host = os.environ[\"REDIS_HOST\"]\n redis_port = os.environ[\"REDIS_PORT\"]\n iqps = IndoorAirQualitySerial(serial_device, redis_host, redis_port)\n iqps.run()\n\nif __name__ == '__main__':\n main()\n","repo_name":"ojarva/home-info-display","sub_path":"indoor_air_quality_publisher/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"14129212435","text":"def get_granger_causality(X, neuron_idx, alpha=0.05):\n \"\"\"\n Estimates the lag-1 granger causality of the given neuron on the other neurons in the system.\n\n Args:\n X (np.ndarray): the matrix holding our dynamical system of shape (n_neurons, timesteps)\n neuron_idx (int): the index of the neuron we want to estimate granger causality for\n observed_ratio (float): the proportion of n_neurons observed, must be betweem 0 and 1.\n regression_args (dict): dictionary of lasso regression arguments and hyperparameters\n\n Returns:\n A tuple (reject_null, p_vals)\n reject_null (list): a binary list of length n_neurons whether the null was \n rejected for the selected neuron granger causing the other neurons\n p_vals (list): a list of the p-values for the corresponding Granger causality tests\n \"\"\"\n n_neurons = X.shape[0]\n max_lag = 1\n alpha = alpha #/ (n_neurons* max_lag) # Bonferroni multiple comparisons correction\n\n reject_null = [] \n p_vals = []\n\n for target_neuron in range(n_neurons):\n ts_data = X[[neuron_idx, target_neuron], :].transpose()\n\n \"\"\"solution\"\"\"\n res = grangercausalitytests(ts_data, maxlag=max_lag, verbose=False)\n # gets the p-value for the log-ratio test \n pval = res[1][0]['lrtest'][1]\n\n p_vals.append(pval)\n reject_null.append(int(pval < alpha))\n\n return reject_null, p_vals","repo_name":"ddinesan/Neuroscience","sub_path":"tutorials/W3D3_NetworkCausality/solutions/W3D3_Tutorial3_Solution_8792fb84.py","file_name":"W3D3_Tutorial3_Solution_8792fb84.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31357554153","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 27 17:57:50 2018\n\n@author: Krishna mohan\n\"\"\"\n\nimport cv2\nimport numpy as np\nimport functions as fn\n\n\n\n\n\n\nroads = fn.read_img(\"download3.jpg\")\nroads = fn.resize(roads)\n#road = fn.img_bw(roads)\nroad = cv2.cvtColor(roads, cv2.COLOR_BGR2GRAY)\nroad = cv2.medianBlur(road, 1)\nfn.show_img(road)\n\n\ncont = cv2.Canny(road, 300, 450)\n\nlines = cv2.HoughLinesP(cont, 1, np.pi/180, 100, minLineLength=10, maxLineGap=20)\n\n\n\n\n\n\n\n#lines = cv2.HoughLines(cont, 1, np.pi/180, 200)\n'''\nfor i in lines:\n rho, theta = i[0]\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a*rho\n y0 = b*rho\n x1 = int(x0 + 1000*(-b))\n y1 = int(y0 + 1000*(a))\n x2 = int(x0 - 1000*(-b))\n y2 = int(y0 - 1000*(a))\n \n cv2.line(roads,(x1,y1),(x2,y2),(0,0,255),2)\n\nfn.show_img(roads)\n\n'''\nmidX = roads.shape[1]\nmidY = roads.shape[0]\nfor i in lines:\n x1, y1, x2, y2 = i[0]\n \n if (x2-x1 > 50 or y2-y1 > 120):\n print(i)\n cv2.line(roads,(x1,y1),(x2,y2),(0,255,0),2)\n fn.show_img(roads)\n\n \n","repo_name":"krishnardt/AutoDrive","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28454202123","text":"import test\r\n\r\nzeromatrix = [[0 for j in range(1, 10)] for i in range(1, 10)]\r\n\r\nstartcandidates = [[[x for x in range(1, 10)] for j in range(1, 10)]\r\n for i in range(1, 10)]\r\n\r\n\r\ndef printmatrix(matrix, start=zeromatrix):\r\n for i in range(9):\r\n for j in range(9):\r\n if (j == 2 or j == 5):\r\n if (matrix[i][j] == 0): # TODO ternary experiment\r\n print(\"\\x1b[1;90m.\\x1b[0m\", end=\" \\x1b[1;90m|\\x1b[0m \")\r\n else:\r\n if (start[i][j] == 0):\r\n print(\r\n f\"\\x1b[1;91m{matrix[i][j]}\\x1b[m\",\r\n end=\" \\x1b[1;90m|\\x1b[0m \")\r\n else:\r\n print(matrix[i][j], end=\" \\x1b[1;90m|\\x1b[0m \")\r\n else:\r\n if (matrix[i][j] == 0):\r\n print(\"\\x1b[1;90m.\\x1b[0m\", end=\" \")\r\n else:\r\n if (start[i][j] == 0):\r\n print(f\"\\x1b[1;91m{matrix[i][j]}\\x1b[m\", end=\" \")\r\n else:\r\n print(matrix[i][j], end=\" \")\r\n print(\"\")\r\n if (i == 2 or i == 5):\r\n print(\"\\x1b[1;90m- - - + - - - + - - -\\x1b[0m\")\r\n\r\n\r\ndef matrixfromcandidates(candidates):\r\n matrix = zeromatrix\r\n for i in range(9):\r\n for j in range(9):\r\n if len(candidates[i][j]) == 1:\r\n matrix[i][j] = candidates[i][j][0]\r\n return matrix\r\n\r\n\r\ndef zerocounter(current):\r\n count = 0\r\n for row in current:\r\n count += row.count(0)\r\n return count\r\n\r\n\r\ndef nonzerocounter(current):\r\n count = 81 - zerocounter(current)\r\n return count\r\n\r\n\r\ndef solve(start):\r\n # initialize\r\n global current, candidates, zerocounter, iterationcount\r\n current = zeromatrix\r\n candidates = startcandidates\r\n for i in range(9):\r\n for j in range(9):\r\n if start[i][j] != 0:\r\n current[i][j] = start[i][j]\r\n deleteCandidates(i, j)\r\n print(f\"========START========\")\r\n print(f\"{nonzerocounter(start)} clues\")\r\n printmatrix(current)\r\n while (zerocounter(current) > 0):\r\n zeroesbefore = zerocounter(current)\r\n iteration(candidates)\r\n checkCandidates()\r\n zeroesafter = zerocounter(current)\r\n if (zeroesbefore == zeroesafter):\r\n print(\"=========END=========\")\r\n s = (\"\" if iteration.count == 1 else \"s\")\r\n print(\r\n f\"Couldn't solve! {nonzerocounter(current)-nonzerocounter(start)} found in {iteration.count} iteration{s}\"\r\n )\r\n printmatrix(current, start)\r\n raise SystemExit\r\n\r\n else:\r\n print(\"=========END=========\")\r\n s = (\"\" if iteration.count == 1 else \"s\")\r\n print(f\"Solved in {iteration.count} iteration{s}\")\r\n printmatrix(current, start)\r\n\r\n\r\ndef deleteCandidates(i, j):\r\n deleteHorizontal(i, j)\r\n deleteVertical(i, j)\r\n deleteSubgrid(i, j)\r\n\r\n\r\ndef deleteHorizontal(i, j):\r\n disqualified = current[i][j]\r\n for x in range(9):\r\n if x == j:\r\n candidates[i][x] = [disqualified]\r\n else:\r\n if disqualified in candidates[i][x]:\r\n candidates[i][x].remove(disqualified)\r\n\r\n\r\ndef deleteVertical(i, j):\r\n disqualified = current[i][j]\r\n for x in range(9):\r\n if x == i:\r\n candidates[x][j] = [disqualified]\r\n else:\r\n if disqualified in candidates[x][j]:\r\n candidates[x][j].remove(disqualified)\r\n\r\n\r\ndef deleteSubgrid(i, j):\r\n disqualified = current[i][j]\r\n # get top left cell\r\n a = i - (i % 3)\r\n b = j - (j % 3)\r\n for x in range(3):\r\n for y in range(3):\r\n if a + x == i and b + y == j:\r\n candidates[i][j] = [disqualified]\r\n else:\r\n if disqualified in candidates[a + x][b + y]:\r\n candidates[a + x][b + y].remove(disqualified)\r\n\r\n\r\ndef checkCandidates():\r\n checkRows()\r\n checkColumns()\r\n checkSubgrids()\r\n\r\n\r\ndef checkRows():\r\n for x in range(1, 10):\r\n for i in range(9):\r\n count = 0\r\n for j in range(9):\r\n if x in candidates[i][j]:\r\n count += 1\r\n position = [i, j]\r\n if count == 1:\r\n candidates[position[0]][position[1]] = [x]\r\n iteration(candidates)\r\n\r\n\r\ndef checkColumns():\r\n for x in range(1, 10):\r\n for j in range(9):\r\n count = 0\r\n for i in range(9):\r\n if x in candidates[i][j]:\r\n count += 1\r\n position = [i, j]\r\n if count == 1:\r\n candidates[position[0]][position[1]] = [x]\r\n iteration(candidates)\r\n\r\n\r\ndef checkSubgrids():\r\n for x in range(1, 10):\r\n for i in range(0, 9, 3):\r\n for j in range(0, 9, 3):\r\n count = 0\r\n for a in range(3):\r\n for b in range(3):\r\n if x in candidates[i + a][j + b]:\r\n count += 1\r\n position = [i + a, j + b]\r\n if count == 1:\r\n candidates[position[0]][position[1]] = [x]\r\n iteration(candidates)\r\n\r\n\r\ndef iteration(candidates):\r\n iteration.count += 1\r\n current = matrixfromcandidates(candidates)\r\n for i in range(9):\r\n for j in range(9):\r\n if current[i][j] != 0:\r\n deleteCandidates(i, j)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n iteration.count = 0\r\n # solve(test.sudoku1) # 32 clues - Solved in 238 iterations\r\n # solve(test.sudoku2) # 28 clues - Solved in 153 iterations\r\n # solve(test.sudoku3) # 28 clues - Solved in 153 iterations\r\n # solve(test.sudoku4) # 26 clues - Solved in 185 iterations\r\n # solve(test.sudoku5) # 26 clues - Couldn't solve! 3 found in 175 iterations\r\n # solve(test.sudoku6) # 21 clues - Couldn't solve! 0 found in 64 iterations\r\n # solve(test.sudoku7) # 17 clues - Couldn't solve! 4 found in 183 iterations\r\n solve(test.sudoku5) # 17 clues - Solved in 349 iterations\r\n pass\r\n","repo_name":"tweakimp/littleprojects","sub_path":"sudoku/sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":6221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27190927164","text":"# encoding: utf8\nfrom plan.session import SessionType\nfrom plan.training import TrainingPlan\nfrom plan.interval import NORMAL\nfrom plan.runner import Runner\n\n\ndef plan(\n race=SessionType.TEN,\n vma=18.5,\n weeks=8,\n spw=5,\n level=NORMAL,\n cross=False,\n age=43,\n max_hr=192,\n):\n runner = Runner(age, vma, max_hr)\n training = TrainingPlan(race, runner, level, cross=cross)\n training.build(weeks, spw)\n return training\n\n\ndef plan_from_hash(hash):\n return TrainingPlan.from_small_hash(hash)\n\n\nif __name__ == \"__main__\":\n p = plan(cross=True)\n hash = p.small_hash\n\n print(p.hash)\n print(len(p.small_hash))\n p2 = TrainingPlan.from_small_hash(hash)\n # data = zlib.decompress(base64.b64decode(p.small_hash))\n # print(data)\n # print(len(data))\n # print(plan().json())\n","repo_name":"tarekziade/vma","sub_path":"plan/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9968253280","text":"class String:\n def int_with_zero(self,int1,num):\n str2 = '0'\n print(f'integer with append zeros on left side:{str2 * num + str(int1)}')\n\n\nint1=99\nnum=int(input(\"enter number of zeros:\"))\ns1=String()\ns1.int_with_zero(int1,num)","repo_name":"SACHINKV14/MCS_00_Sachin_Core_Python","sub_path":"practice 04 Dec/oops task/oops_strings/_33_int_with_zeros_on_left_of_specified_width.py","file_name":"_33_int_with_zeros_on_left_of_specified_width.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70115779628","text":"from slack import WebClient\nimport os\nimport json\nimport warnings\n\n\ndef post_slack_message(channel, text):\n cred_path = os.path.expanduser(\"~/.credentials.json\")\n if not os.path.exists(cred_path):\n msg = \"Could not find ~/.credentials.json with Slack credentials, not posting message...\"\n warnings.warn(msg, UserWarning)\n return\n credentials = json.load(open(cred_path))\n if \"slack\" not in credentials or \"bot_token\" not in credentials[\"slack\"]:\n warnings.warn(\n \"Could not find Slack credentials in ~/.credentials.json\", UserWarning\n )\n return\n client = WebClient(token=credentials[\"slack\"][\"bot_token\"])\n client.chat_postMessage(channel=channel, text=text)\n","repo_name":"facebookresearch/covid19_spread","sub_path":"covid19_spread/lib/slack.py","file_name":"slack.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"37"} +{"seq_id":"8463225082","text":"#https://www.beecrowd.com.br/judge/pt/custom-problems/view/1759\n\nanoFim = int(input(''))\n\nsalarioInicio = 1000\nsalario = salarioInicio \nporcentual = 1.015\nif anoFim > 2005:\n for c in range(2005,anoFim):\n c +=1\n if c == 2006:\n salario = salario * 1.015\n else:\n porcentual = porcentual + 0.01\n salario = salario * porcentual\n print('Salário atual: R$%.2f' %salario)\nelse:\n print('O ano informado deverá ser > 2005!')\n","repo_name":"hugobr72/Algoritmos-e-programa--o-Unilavras","sub_path":"Lista4/Hugo/1759.py","file_name":"1759.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1374660921","text":"import uuid\n\nfrom django.contrib.auth import get_user_model\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\n\nUser = get_user_model()\n\n\nclass UUIDModelAbstract(models.Model):\n id = models.UUIDField(_(\"ID\"), default=uuid.uuid4, primary_key=True, editable=False)\n\n class Meta:\n abstract = True\n\n\nclass CreatedAtAbstract(models.Model):\n created_at = models.DateTimeField(_(\"Created at\"), auto_now_add=True)\n\n class Meta:\n abstract = True\n\n\nclass UpdatedAtAbstract(models.Model):\n updated_at = models.DateTimeField(_(\"Updated at\"), auto_now=True, null=True)\n\n class Meta:\n abstract = True\n\n\nclass OwnerAbstract(models.Model):\n owner = models.ForeignKey(User, verbose_name=_(\"Owner\"), on_delete=models.SET_NULL, null=True, blank=True)\n\n class Meta:\n abstract = True\n","repo_name":"postman17/backuper","sub_path":"project/helpers/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38428722189","text":"from fractions import Fraction\n\nfrom pyx import text, utils\nfrom pyx.graph.axis.tick import tick as Tick\n\n\nclass _texter:\n def labels(self, ticks):\n \"\"\"fill the label attribute of ticks\n - ticks is a list of instances of tick\n - for each element of ticks the value of the attribute label is set to\n a string or MultiEngineText instance appropriate to the attributes\n num and denom of that tick instance\n - label attributes of the tick instances are just kept, whenever they\n are not equal to None\n - the method might modify the labelattrs attribute of the ticks; be sure\n to not modify it in-place!\"\"\"\n raise NotImplementedError\n\n\nclass decimal(_texter):\n \"a texter creating decimal labels (e.g. '1.234' or even '0.\\overline{3}')\"\n\n def __init__(self, prefix=\"\", infix=\"\", suffix=\"\", equalprecision=False,\n decimalsep=\".\", thousandsep=\"\", thousandthpartsep=\"\",\n plus=\"\", minus=\"-\", period=r\"\\overline{%s}\",\n labelattrs=[text.mathmode]):\n r\"\"\"initializes the instance\n - prefix, infix, and suffix (strings) are added at the begin,\n immediately after the minus, and at the end of the label,\n respectively\n - equalprecision forces the same number of digits after decimalsep,\n even when the tailing digits are zero\n - decimalsep, thousandsep, and thousandthpartsep (strings)\n are used as separators\n - plus or minus (string) is inserted for non-negative or negative numbers\n - period (string) is taken as a format string generating a period;\n it has to contain exactly one string insert operators \"%s\" for the\n period; usually it should be r\"\\overline{%s}\"\n - labelattrs is a list of attributes to be added to the label attributes\n given in the painter\"\"\"\n self.prefix = prefix\n self.infix = infix\n self.suffix = suffix\n self.equalprecision = equalprecision\n self.decimalsep = decimalsep\n self.thousandsep = thousandsep\n self.thousandthpartsep = thousandthpartsep\n self.plus = plus\n self.minus = minus\n self.period = period\n self.labelattrs = labelattrs\n\n def labels(self, ticks):\n labeledticks = []\n maxdecprecision = 0\n for tick in ticks:\n if tick.label is None and tick.labellevel is not None:\n labeledticks.append(tick)\n m, n = tick.num, tick.denom\n if m < 0: m = -m\n if n < 0: n = -n\n quotient, remainder = divmod(m, n)\n quotient = str(quotient)\n if len(self.thousandsep):\n l = len(quotient)\n tick.label = \"\"\n for i in range(l):\n tick.label += quotient[i]\n if not ((l-i-1) % 3) and l > i+1:\n tick.label += self.thousandsep\n else:\n tick.label = quotient\n if remainder:\n tick.label += self.decimalsep\n oldremainders = []\n tick.temp_decprecision = 0\n while (remainder):\n tick.temp_decprecision += 1\n if remainder in oldremainders:\n tick.temp_decprecision = None\n periodstart = len(tick.label) - (len(oldremainders) - oldremainders.index(remainder))\n tick.label = tick.label[:periodstart] + self.period % tick.label[periodstart:]\n break\n oldremainders += [remainder]\n remainder *= 10\n quotient, remainder = divmod(remainder, n)\n if not ((tick.temp_decprecision - 1) % 3) and tick.temp_decprecision > 1:\n tick.label += self.thousandthpartsep\n tick.label += str(quotient)\n else:\n if maxdecprecision < tick.temp_decprecision:\n maxdecprecision = tick.temp_decprecision\n if self.equalprecision:\n for tick in labeledticks:\n if tick.temp_decprecision is not None:\n if tick.temp_decprecision == 0 and maxdecprecision > 0:\n tick.label += self.decimalsep\n for i in range(tick.temp_decprecision, maxdecprecision):\n if not ((i - 1) % 3) and i > 1:\n tick.label += self.thousandthpartsep\n tick.label += \"0\"\n for tick in labeledticks:\n if tick.num * tick.denom < 0:\n plusminus = self.minus\n else:\n plusminus = self.plus\n tick.label = \"%s%s%s%s%s\" % (self.prefix, plusminus, self.infix, tick.label, self.suffix)\n tick.labelattrs = tick.labelattrs + self.labelattrs\n\n # del tick.temp_decprecision # we've inserted this temporary variable ... and do not care any longer about it\n\n\n\nclass skipmantissaunity:\n pass\n\nskipmantissaunity.never = 0\nskipmantissaunity.each = 1\nskipmantissaunity.all = 2\n\n\nclass default(_texter):\n\n \"a texter creating regular (e.g. '2') and exponential (e.g. '2\\cdot10^5') labels\"\n\n def __init__(self, multiplication_tex=r\"\\cdot{}\", multiplication_unicode=\"·\", base=Fraction(10),\n skipmantissaunity=skipmantissaunity.all, minusunity=\"-\",\n minexponent=4, minnegexponent=None, uniformexponent=True,\n mantissatexter=decimal(), basetexter=decimal(), exponenttexter=decimal(),\n labelattrs=[text.mathmode]):\n # , **kwargs): # future\n r\"\"\"initializes the instance\n - multiplication_tex and multiplication_unicode are the strings to\n indicate the multiplication between the mantissa and the base\n number for the TexEngine and the UnicodeEngine, respecitvely\n - base is the number of the base of the exponent\n - skipmantissaunity is either skipmantissaunity.never (never skip the\n unity mantissa), skipmantissaunity.each (skip the unity mantissa\n whenever it occurs for each label separately), or skipmantissaunity.all\n (skip the unity mantissa whenever if all labels happen to be\n mantissafixed with unity)\n - minusunity is used as the output of -unity for the mantissa\n - minexponent is the minimal positive exponent value to be printed by\n exponential notation\n - minnegexponent is the minimal negative exponent value to be printed by\n exponential notation, for None it is considered to be equal to minexponent\n - uniformexponent forces all numbers to be written in exponential notation\n when at least one label excets the limits for non-exponential\n notiation\n - mantissatexter, basetexter, and exponenttexter generate the texts\n for the mantissa, basetexter, and exponenttexter\n - labelattrs is a list of attributes to be added to the label attributes\n given in the painter\"\"\"\n self.multiplication_tex = multiplication_tex\n self.multiplication_unicode = multiplication_unicode\n self.base = base\n self.skipmantissaunity = skipmantissaunity\n self.minusunity = minusunity\n self.minexponent = minexponent\n self.minnegexponent = minnegexponent if minnegexponent is not None else minexponent\n self.uniformexponent = uniformexponent\n self.mantissatexter = mantissatexter\n self.basetexter = basetexter\n self.exponenttexter = exponenttexter\n self.labelattrs = labelattrs\n\n # future:\n # kwargs = utils.kwsplit(kwargs, ['mantissatexter', 'basetexter', 'exponenttexter'])\n # self.mantissatexter = mantissatexter(a=1, **kwargs['mantissatexter'])\n # self.basetexter = basetexter(**kwargs['basetexter'])\n # self.exponenttexter = exponenttexter(**kwargs['exponenttexter'])\n\n def labels(self, ticks):\n labeledticks = []\n for tick in ticks:\n if tick.label is None and tick.labellevel is not None:\n labeledticks.append(tick)\n\n tick.labelattrs = tick.labelattrs + self.labelattrs\n\n if tick.num:\n # express tick = tick.temp_sign * tick.temp_mantissa * self.base ** tick.temp_exponent with 1 <= temp_mantissa < self.base \n # and decide whether a tick is to be written in exponential notation\n tick.temp_sign = 1 if tick >= 0 else -1\n tick.temp_mantissa = abs(Fraction(tick.num, tick.denom))\n tick.temp_exponent = 0\n while tick.temp_mantissa >= self.base:\n tick.temp_exponent += 1\n tick.temp_mantissa /= self.base\n while tick.temp_mantissa < 1:\n tick.temp_exponent -= 1\n tick.temp_mantissa *= self.base\n tick.temp_wantexponent = not (-self.minnegexponent < tick.temp_exponent < self.minexponent)\n else:\n tick.temp_mantissa = tick.temp_exponent = 0\n tick.temp_sign = 1\n tick.temp_wantexponent = not (-self.minnegexponent < 0 < self.minexponent)\n\n # make decision on exponential notation uniform if requested\n if self.uniformexponent and any(tick.temp_wantexponent for tick in labeledticks):\n for tick in labeledticks:\n if tick.num:\n tick.temp_wantexponent = True\n\n # mark mantissa == 1 to be not labeled\n if self.skipmantissaunity == skipmantissaunity.each:\n for tick in labeledticks:\n if tick.temp_wantexponent and tick.temp_mantissa == 1:\n tick.temp_mantissa = None\n elif self.skipmantissaunity == skipmantissaunity.all and all(tick.temp_mantissa == 1 for tick in labeledticks if tick.temp_wantexponent):\n for tick in labeledticks:\n if tick.temp_wantexponent:\n tick.temp_mantissa = None\n\n # construct labels\n basetick = Tick(self.base, labellevel=0)\n self.basetexter.labels([basetick])\n for tick in labeledticks:\n if tick.temp_wantexponent:\n if tick.temp_mantissa is not None:\n tick.temp_mantissatick = Tick(tick.temp_sign * tick.temp_mantissa, labellevel=0)\n tick.temp_exponenttick = Tick(tick.temp_exponent, labellevel=0)\n else:\n tick.temp_mantissatick = tick\n\n self.mantissatexter.labels([tick.temp_mantissatick for tick in labeledticks if tick.temp_mantissa is not None])\n self.exponenttexter.labels([tick.temp_exponenttick for tick in labeledticks if tick.temp_wantexponent])\n for tick in labeledticks:\n if tick.temp_wantexponent:\n if tick.temp_mantissa is not None:\n mantissalabel_tex = tick.temp_mantissatick.label + self.multiplication_tex\n mantissalabel_unicode = tick.temp_mantissatick.label + self.multiplication_unicode\n else:\n mantissalabel_tex = self.minusunity if tick.temp_sign == -1 else \"\"\n mantissalabel_unicode = self.minusunity if tick.temp_sign == -1 else \"\"\n tick.label = text.MultiEngineText(\"%s%s^{%s}\" % (mantissalabel_tex, basetick.label, tick.temp_exponenttick.label), [mantissalabel_unicode + basetick.label, text.Text(tick.temp_exponenttick.label, scale=0.8, shift=0.5)])\n\n\nclass rational(_texter):\n \"a texter creating rational labels (e.g. 'a/b' or even 'a \\over b')\"\n # we use divmod here to be more explicit\n\n def __init__(self, prefix=\"\", infix=\"\", suffix=\"\",\n numprefix=\"\", numinfix=\"\", numsuffix=\"\",\n denomprefix=\"\", denominfix=\"\", denomsuffix=\"\",\n plus=\"\", minus=\"-\", minuspos=0, over=r\"{{%s}\\over{%s}}\",\n equaldenom=False, skip1=True, skipnum0=True, skipnum1=True, skipdenom1=True,\n labelattrs=[text.mathmode]):\n r\"\"\"initializes the instance\n - prefix, infix, and suffix (strings) are added at the begin,\n immediately after the minus, and at the end of the label,\n respectively\n - prefixnum, infixnum, and suffixnum (strings) are added\n to the labels numerator correspondingly\n - prefixdenom, infixdenom, and suffixdenom (strings) are added\n to the labels denominator correspondingly\n - plus or minus (string) is inserted for non-negative or negative numbers\n - minuspos is an integer, which determines the position, where the\n plus or minus sign has to be placed; the following values are allowed:\n 1 - writes the plus or minus in front of the numerator\n 0 - writes the plus or minus in front of the hole fraction\n -1 - writes the plus or minus in front of the denominator\n - over (string) is taken as a format string generating the\n fraction bar; it has to contain exactly two string insert\n operators \"%s\" -- the first for the numerator and the second\n for the denominator; by far the most common examples are\n r\"{{%s}\\over{%s}}\" and \"{{%s}/{%s}}\"\n - usually the numerator and denominator are canceled; however,\n when equaldenom is set, the least common multiple of all\n denominators is used\n - skip1 (boolean) just prints the prefix, the plus or minus,\n the infix and the suffix, when the value is plus or minus one\n and at least one of prefix, infix and the suffix is present\n - skipnum0 (boolean) just prints a zero instead of\n the hole fraction, when the numerator is zero;\n no prefixes, infixes, and suffixes are taken into account\n - skipnum1 (boolean) just prints the numprefix, the plus or minus,\n the numinfix and the numsuffix, when the num value is plus or minus one\n and at least one of numprefix, numinfix and the numsuffix is present\n - skipdenom1 (boolean) just prints the numerator instead of\n the hole fraction, when the denominator is one and none of the parameters\n denomprefix, denominfix and denomsuffix are set and minuspos is not -1 or the\n fraction is positive\n - labelattrs is a list of attributes for a textengines text method;\n None is considered as an empty list; labelattrs might be changed\n in the painter as well\"\"\"\n self.prefix = prefix\n self.infix = infix\n self.suffix = suffix\n self.numprefix = numprefix\n self.numinfix = numinfix\n self.numsuffix = numsuffix\n self.denomprefix = denomprefix\n self.denominfix = denominfix\n self.denomsuffix = denomsuffix\n self.plus = plus\n self.minus = minus\n self.minuspos = minuspos\n self.over = over\n self.equaldenom = equaldenom\n self.skip1 = skip1\n self.skipnum0 = skipnum0\n self.skipnum1 = skipnum1\n self.skipdenom1 = skipdenom1\n self.labelattrs = labelattrs\n\n def gcd(self, *n):\n \"\"\"returns the greates common divisor of all elements in n\n - the elements of n must be non-negative integers\n - return None if the number of elements is zero\n - the greates common divisor is not affected when some\n of the elements are zero, but it becomes zero when\n all elements are zero\"\"\"\n if len(n) == 2:\n i, j = n\n if i < j:\n i, j = j, i\n while j > 0:\n i, (dummy, j) = j, divmod(i, j)\n return i\n if len(n):\n res = n[0]\n for i in n[1:]:\n res = self.gcd(res, i)\n return res\n\n def lcm(self, *n):\n \"\"\"returns the least common multiple of all elements in n\n - the elements of n must be non-negative integers\n - return None if the number of elements is zero\n - the least common multiple is zero when some of the\n elements are zero\"\"\"\n if len(n):\n res = n[0]\n for i in n[1:]:\n res = divmod(res * i, self.gcd(res, i))[0]\n return res\n\n def labels(self, ticks):\n labeledticks = []\n for tick in ticks:\n if tick.label is None and tick.labellevel is not None:\n labeledticks.append(tick)\n tick.temp_rationalnum = tick.num\n tick.temp_rationaldenom = tick.denom\n tick.temp_rationalminus = 1\n if tick.temp_rationalnum < 0:\n tick.temp_rationalminus = -tick.temp_rationalminus\n tick.temp_rationalnum = -tick.temp_rationalnum\n if tick.temp_rationaldenom < 0:\n tick.temp_rationalminus = -tick.temp_rationalminus\n tick.temp_rationaldenom = -tick.temp_rationaldenom\n gcd = self.gcd(tick.temp_rationalnum, tick.temp_rationaldenom)\n (tick.temp_rationalnum, dummy1), (tick.temp_rationaldenom, dummy2) = divmod(tick.temp_rationalnum, gcd), divmod(tick.temp_rationaldenom, gcd)\n if self.equaldenom:\n equaldenom = self.lcm(*[tick.temp_rationaldenom for tick in ticks if tick.label is None])\n if equaldenom is not None:\n for tick in labeledticks:\n factor, dummy = divmod(equaldenom, tick.temp_rationaldenom)\n tick.temp_rationalnum, tick.temp_rationaldenom = factor * tick.temp_rationalnum, factor * tick.temp_rationaldenom\n for tick in labeledticks:\n rationalminus = rationalnumminus = rationaldenomminus = \"\"\n if tick.temp_rationalminus == -1:\n plusminus = self.minus\n else:\n plusminus = self.plus\n if self.minuspos == 0:\n rationalminus = plusminus\n elif self.minuspos == 1:\n rationalnumminus = plusminus\n elif self.minuspos == -1:\n rationaldenomminus = plusminus\n else:\n raise RuntimeError(\"invalid minuspos\")\n if self.skipnum0 and tick.temp_rationalnum == 0:\n tick.label = \"0\"\n elif (self.skip1 and self.skipdenom1 and tick.temp_rationalnum == 1 and tick.temp_rationaldenom == 1 and\n (len(self.prefix) or len(self.infix) or len(self.suffix)) and\n not len(rationalnumminus) and not len(self.numprefix) and not len(self.numinfix) and not len(self.numsuffix) and\n not len(rationaldenomminus) and not len(self.denomprefix) and not len(self.denominfix) and not len(self.denomsuffix)):\n tick.label = \"%s%s%s%s\" % (self.prefix, rationalminus, self.infix, self.suffix)\n else:\n if self.skipnum1 and tick.temp_rationalnum == 1 and (len(self.numprefix) or len(self.numinfix) or len(self.numsuffix)):\n tick.temp_rationalnum = \"%s%s%s%s\" % (self.numprefix, rationalnumminus, self.numinfix, self.numsuffix)\n else:\n tick.temp_rationalnum = \"%s%s%s%i%s\" % (self.numprefix, rationalnumminus, self.numinfix, tick.temp_rationalnum, self.numsuffix)\n if self.skipdenom1 and tick.temp_rationaldenom == 1 and not len(rationaldenomminus) and not len(self.denomprefix) and not len(self.denominfix) and not len(self.denomsuffix):\n tick.label = \"%s%s%s%s%s\" % (self.prefix, rationalminus, self.infix, tick.temp_rationalnum, self.suffix)\n else:\n tick.temp_rationaldenom = \"%s%s%s%i%s\" % (self.denomprefix, rationaldenomminus, self.denominfix, tick.temp_rationaldenom, self.denomsuffix)\n tick.label = text.MultiEngineText(\"%s%s%s%s%s\" % (self.prefix, rationalminus, self.infix, self.over % (tick.temp_rationalnum, tick.temp_rationaldenom), self.suffix),\n [\"%s%s%s\" % (self.prefix, rationalminus, self.infix)] + [text.StackedText([text.Text(tick.temp_rationalnum, shift=0.3), text.Text(tick.temp_rationaldenom, shift=-0.9)], frac=True, align=0.5)] + [self.suffix])\n tick.labelattrs = tick.labelattrs + self.labelattrs\n\n # del tick.temp_rationalnum # we've inserted those temporary variables ... and do not care any longer about them\n # del tick.temp_rationaldenom\n # del tick.temp_rationalminus\n\n","repo_name":"kevancress/MeasureIt_ARCH","sub_path":"libs/pyx/graph/axis/texter.py","file_name":"texter.py","file_ext":"py","file_size_in_byte":20851,"program_lang":"python","lang":"en","doc_type":"code","stars":220,"dataset":"github-code","pt":"37"} +{"seq_id":"30489851612","text":"import sys\nimport os\n\nargs = sys.argv\n\n# 引数が1つでないならコンパイル\nif len(args) != 1:\n # 引数の数を得る\n length = len(args)-1\n # 引数の数だけループ\n for num in range(length):\n # 引数の1番目から\n num += 1\n filename = args[num]\n\n os.system('platex ../' + filename + '.tex')\n os.system('platex ../' + filename + '.tex')\n os.system('dvipdfmx ' + filename + '.dvi')\n\n os.system('mv ' + filename + '.pdf ../')\n\nos.system('rm ../*.log ../*.aux../*.dvi')\nos.system('rm *.log *.aux *.dvi')\n","repo_name":"kindainoob/latex-s-compile","sub_path":"compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42386279777","text":"import numpy as np\nimport pandas as pd\n\ndef load_wine_quality() :\n filePath_red = \"C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/Wine Quality/winequality-red.csv\"\n filePath_white = \"C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/Wine Quality/winequality-white.csv\"\n\n data_csv_red = pd.read_csv(filePath_red, delimiter=\";\")\n data_csv_white = pd.read_csv(filePath_white, delimiter=\";\")\n\n X_red = data_csv_red.iloc[:,:11]\n y_red = data_csv_red.iloc[:,11]\n\n X_white = data_csv_white.iloc[:,:11]\n y_white = data_csv_white.iloc[:,11]\n\n X_red.append(X_white)\n y_red.append(y_white)\n\n return X_red,y_red","repo_name":"AliD101v/ml-course-project-f19","sub_path":"rgs/data/wine_quality.py","file_name":"wine_quality.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72605281387","text":"import numpy as np\nfrom scipy.sparse import coo_matrix\nfrom config import *\n# xp = chainer.cuda.cupy if use_gpu else np\nxp = np\n\n\nclass Graph:\n def __init__(self, n, is_dense):\n self.n = n\n self.m = 0\n self.is_dense = is_dense\n self.tmp = [set() for _ in range(n)]\n\n def add_edge(self, a, b):\n assert 0 <= a < self.n and 0 <= b < self.n\n self.tmp[a].add(b)\n self.tmp[b].add(a)\n\n def build(self):\n if self.is_dense:\n self.adj = xp.zeros((self.n, self.n), dtype=xp.float32)\n for a in range(self.n):\n for b in self.tmp[a]:\n if a < b:\n self.adj[a, b] = 1\n self.adj[b, a] = 1\n self.m += 1\n else:\n x = []\n y = []\n for a in range(self.n):\n for b in self.tmp[a]:\n if a < b:\n x.append(a)\n y.append(b)\n x.append(b)\n y.append(a)\n self.m += 1\n self.adj = coo_matrix((np.ones(\n 2 * self.m, dtype=np.float32), (np.array(x), np.array(y))), shape=(self.n, self.n))\n\n\ndef generate_random_graph(n, m):\n g = Graph(n, use_dense)\n acc = 0\n while acc < m:\n # don't use xp!\n a = np.random.randint(n)\n b = np.random.randint(n)\n if a != b and a not in g.tmp[b]:\n g.add_edge(a, b)\n acc += 1\n g.build()\n assert g.m == m\n return g\n\n\ndef read_graph(filename):\n f = open(filename)\n text = f.readlines()\n n, m = map(int, text[0].split())\n g = Graph(n, use_dense)\n print(\"Start reading file {}\".format(filename))\n for i in range(m):\n a, b = map(int, text[1 + i].split())\n g.add_edge(a, b)\n print(\"Finish reading file {}\".format(filename))\n g.build()\n f.close()\n return g\n\n\ndef write_graph(graph, filename):\n n, _ = graph.adj.shape\n if graph.is_dense:\n edges = []\n for i in range(n):\n for j in range(i + 1, n):\n if graph.adj[i, j]:\n edges.append((i, j))\n else:\n edges = []\n for i in range(graph.adj.row.size):\n a = graph.adj.row[i]\n b = graph.adj.col[i]\n if a < b:\n edges.append((a, b))\n f = open(filename, 'w')\n m = len(edges)\n f.write(\"{} {}\\n\".format(n, m))\n for i in range(m):\n f.write(\"{} {}\\n\".format(edges[i][0], edges[i][1]))\n f.close()\n\n\ndef read_test_graphs(size):\n return [read_graph(\"data/random/{}_{}_{}\".format(size, int(size * 2.5), idx)).adj for idx in range(10)]\n\n\nif __name__ == \"__main__\":\n # (n, m)\n GRAPHS = [\n (10000, 25000),\n (1000, 2500),\n (100, 250),\n (10, 25),\n ]\n for graph in GRAPHS:\n for idx in range(10):\n g = generate_random_graph(graph[0], graph[1])\n write_graph(\n g, \"data/random/{}_{}_{}\".format(graph[0], graph[1], idx))\n","repo_name":"knshnb/MIS_solver","sub_path":"utils/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":3079,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"70399040109","text":"# Best Time to Buy and Sell Stock - https://leetcode.com/problems/best-time-to-buy-and-sell-stock/\nimport sys\nclass Solution(object):\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n res = 0\n mn = sys.maxint\n mx = 0\n for i in xrange(len(prices)):\n v = prices[i]\n if v < mn:\n mx = 0\n mn = v\n mx = max(mx, v)\n res = max(res, mx - mn)\n return res","repo_name":"igorsubbotin/leetcode_python","sub_path":"problem_121.py","file_name":"problem_121.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39214939246","text":"from mysql.connector import MySQLConnection, Error\nfrom mysql.connector.errors import IntegrityError\nfrom project_dbconfig import read_db_config\nfrom flask import Flask\n\nfrom datetime import date\nfrom datetime import datetime\n\n\nfrom flask import render_template\nfrom flask import abort, redirect, url_for\nfrom flask import flash\nfrom flask import request, session\n\napp = Flask(__name__)\napp.secret_key = b'\\x93\\xf3\\xb2x\\x81s\\xd9.\\x856iY\\xd6\\xf1;\\xad'\n\ndef connect():\n \"\"\" Connect to MySQL database \"\"\"\n\n db_config = read_db_config()\n conn = None\n try:\n print('Connecting to MySQL database...')\n conn = MySQLConnection(**db_config)\n\n if conn.is_connected():\n print('Connection established.')\n else:\n print('Connection failed.')\n ##if database connection failed should show 404 error\n except Error as error:\n print(error)\n\n return conn\n\ndef authorize(user, password):\n conn = connect()\n row = None\n try: \n cursor = conn.cursor()\n query = \"SELECT username,userid FROM users WHERE username = %s AND password = %s\"\n args = (user, password)\n print(\"Authenticating(user,pass): \", format(args))\n cursor.execute(query, args)\n row = cursor.fetchone()\n print(\"Authentication Query Result:\", format(row))\n\n except Error as error:\n print(error)\n return(\"Error: {}\".format(error))\n\n finally:\n cursor.close()\n conn.close()\n print('Connection closed.')\n \n return row\n\n@app.route(\"/\")\ndef index():\n return redirect(url_for(\"home\"))\n\n@app.route(\"/home\")\ndef home():\n ## OLD CODE IS HERE ##\n #if 'username' in session:\n # return redirect(url_for(\"user\"))\n #else:\n # return render_template(\"home.html\")\n \n #Before logging in we display all the blogs if any are available\n conn = connect()\n blogs = None\n \n try:\n cursor = conn.cursor()\n query = \"SELECT blogid,subject,description,pdate,username FROM blogs INNER JOIN users ON users.userid = blogs.userid\"\n print(\"Executing query: \" + query)\n cursor.execute(query)\n blogs = cursor.fetchall()\n print(\"Query result: \", len(blogs))\n\n except Error as error:\n print(error)\n return(\"Error: {}\".format(error))\n \n finally:\n cursor.close()\n conn.close()\n print('Connection closed.')\n return render_template('home.html', blogs = blogs)\n\n@app.route(\"/blog/\")\ndef blogcontent():\n conn = connect()\n tags = None\n blogid = request.args.get(\"blogid\")\n comments = None\n try:\n cursor = conn.cursor()\n query = \"SELECT tag FROM blogstags WHERE blogid = %s\"\n args = (blogid,)\n print(\"Executing query: \" + query)\n cursor.execute(query, args)\n tags = cursor.fetchall()\n print(\"Query result: \", len(tags))\n\n query = \"SELECT commentid,sentiment,description,cdate,username from comments inner join users where authorid = users.userid AND blogid = %s\"\n print(\"Executing query: \" + query)\n cursor.execute(query, args)\n comments = cursor.fetchall()\n print(\"Query result: \", format(comments))\n\n query = \"SELECT blogid,subject,description,pdate,username FROM blogs INNER JOIN users ON users.userid = blogs.userid WHERE blogid = %s\"\n print(\"Executing query: \" + query)\n cursor.execute(query,args)\n blog = cursor.fetchone()\n print(\"Query result: \", format(blog))\n \n except Error as error:\n print(error)\n return(\"Error: {}\".format(error))\n \n finally:\n cursor.close()\n conn.close()\n print('Connection closed.')\n return render_template(\"blogview.html\", blog=blog, comments = comments, tags = tags)\n\n@app.route(\"/blog/newcomment/\", methods = ['POST', 'GET'])\ndef newcomment():\n conn = connect()\n #RULES:\n #1) USER MUST BE LOGGED IN TO POST A COMMENT\n #2) USER CAN POST AT MOST 1 COMMENT FOR A BLOG\n #3) USER CANNOT COMMENT ON THEIR OWN BLOG\n if request.method == \"POST\":\n if \"username\" in session:\n #If logged in\n query = \"SELECT userid FROM blogs WHERE blogid = %s\"\n args = (request.args.get(\"blogid\"),)\n cursor = conn.cursor()\n cursor.execute(query,args)\n blogauthor = cursor.fetchone()\n #Satisfies Rule #3\n if blogauthor[0] == session['userid']:\n #Check if logged in user is the author of the blog\n flash(\"Cannot commment on your own blog!\")\n return redirect(\"/blog/?blogid=\"+request.args.get(\"blogid\")) \n else:\n #Check if user has already commented on the blog\n query = \"SELECT commentid FROM comments where authorid = %s AND blogid = %s\"\n args = (session['userid'], request.args.get(\"blogid\"))\n cursor.execute(query,args)\n commentlimit = cursor.fetchone()\n if commentlimit is None:\n #Post the comment\n query = \"INSERT INTO comments(sentiment,description,cdate,blogid,authorid) VALUES (%s,%s,%s,%s,%s)\"\n args = (request.form['sentiment'],request.form['description'], date.today().strftime(\"%Y-%m-%d\"), request.args.get(\"blogid\"), session['userid'])\n print(\"Inserting comment: {}\".format(args))\n cursor.execute(query,args)\n if cursor.lastrowid:\n print(\"Successfully inserted comment: \", cursor.lastrowid)\n conn.commit()\n return redirect(\"/blog/?blogid=\"+request.args.get(\"blogid\"))\n else:\n #Already commented on this blog\n flash(\"You've already commented on this blog\")\n return redirect(\"/blog/?blogid=\"+request.args.get(\"blogid\"))\n\n else:\n return render_template(\"login.html\",authError = \"Please log in before you comment\")\n\n@app.route(\"/login\", methods=['POST', 'GET'])\ndef login():\n error = \"\"\n\n if request.method == 'POST':\n user = authorize(request.form['username'], request.form['password'])\n if user is not None:\n session['username'] = user[0]\n session['userid'] = user[1]\n return redirect(\"/user/\"+user[0])\n else:\n error = 'Invalid username/password'\n # the code below is executed if the request method\n # was GET or the credentials were invalid\n return render_template('login.html', authError=error)\n\n@app.route(\"/queries/\")\ndef queries():\n return render_template('queries.html')\n\n@app.route(\"/displayUsers\", methods = ['POST', 'GET'])\ndef displayUsers():\n\n conn = connect()\n blogid = None\n if request.method == 'POST':\n try:\n cursor = conn.cursor()\n query = \"SELECT * from blogs where userid = %s and blogid NOT in (SELECT distinct blogid from comments where sentiment = 'negative')\"\n args = (request.form['userid'],)\n cursor.execute(query,args)\n print(\"Userid: \", format(args))\n \n #### fetch blog title ####\n blogid = cursor.fetchall()\n print(\"Title: \", format(blogid))\n\n except Error as error:\n print(error)\n return(\"Error: {}\".format(error))\n \n finally:\n cursor.close()\n conn.close()\n print('Connection closed.')\n return render_template('displayUsers.html', blogid = blogid, userid = request.form['userid'])\n else:\n return redirect ('/queries') \n\n@app.route(\"/blogcontaintag\", methods = ['POST', 'GET'])\ndef blogcontaintag():\n conn = connect()\n blogid = None\n if request.method == 'POST':\n try:\n ### get tag input ###\n cursor = conn.cursor()\n query = \"SELECT blogid FROM blogstags where tag = %s\" \n args = (request.form['blogstag'],)\n cursor.execute(query,args)\n print(\"Blogstag: \", format(args))\n\n #### fetch tag ####\n blogid = cursor.fetchall()\n print(\"Blogid: \", format(blogid))\n \n except Error as error:\n print(error)\n return(\"Error: {}\".format(error))\n \n finally:\n cursor.close()\n conn.close()\n print('Connection closed.')\n return render_template('blogcontaintag.html', blogid = blogid, blogstags = request.form['blogstag'])\n else:\n return redirect ('/queries')\n\n@app.route(\"/usersnevercomment/\")\ndef usersnevercomment():\n conn = connect()\n userid = None\n try:\n cursor = conn.cursor()\n query = \"select userid from users where userid NOT in (select distinct authorid from comments)\"\n cursor.execute(query)\n userid = cursor.fetchall()\n \n except Error as error:\n print(error)\n return(\"Error: {}\".format(error))\n \n finally:\n cursor.close()\n conn.close()\n print('Connection closed.')\n return render_template('usersnevercomment.html', userid = userid)\n\n@app.route(\"/dateUsers/\")\ndef dateUsers():\n conn = connect()\n userid = None\n \n try:\n ##### Get date from user input#######\n cursor = conn.cursor()\n\n ##### query by date#########\n query = \"select userid from blogs where pdate = '2021-08-25' group by userid Having Max(blogid)\"\n cursor.execute(query)\n userid = cursor.fetchall()\n\n except Error as error:\n print(error)\n return(\"Error: {}\".format(error))\n \n finally:\n cursor.close()\n conn.close()\n print('Connection closed.')\n return render_template('dateUsers.html', userid = userid)\n \n@app.route(\"/listFollowedUsers\", methods = ['POST','GET'])\ndef listFollowedUsers():\n conn = connect()\n leaderid = None\n followerid_1 = None\n followerid_2 = None\n if request.method == 'POST':\n try:\n ### get followers input ###\n cursor = conn.cursor()\n query = \"select distinct leaderid from follows where followerid = %s AND leaderid in ( select leaderid from follows where followerid = %s)\" \n args = (request.form['followerid_1'], request.form['followerid_2'])\n cursor.execute(query, args)\n\n #### fetch tag ####\n leaderid = cursor.fetchall()\n print(\"Leaderid: \", format(leaderid))\n \n except Error as error:\n print(error)\n return(\"Error: {}\".format(error))\n \n finally:\n cursor.close()\n conn.close()\n print('Connection closed.')\n return render_template('listFollowedUsers.html', leaderid = leaderid, followerid_1= request.form['followerid_1'], followerid_2 = request.form['followerid_2'])\n else:\n return redirect ('/queries')\n\n@app.route(\"/user/\")\n@app.route(\"/user/\")\ndef user(username=None):\n if 'username' in session:\n conn = connect()\n row = None\n blogs = None\n cursor = conn.cursor()\n\n try: \n query = \"SELECT username, email FROM users WHERE username = %s\"\n args = (session['username'],)\n print(\"Retrieving data for: \", format(args))\n cursor.execute(query, args)\n row = cursor.fetchone()\n print(\"Authentication Query Result:\", format(row))\n\n query = \"SELECT blogid,subject,description,pdate FROM blogs INNER JOIN users ON users.userid = blogs.userid WHERE users.username = %s\"\n print(\"Executing Query: \", query.format(args))\n cursor.execute(query,args)\n blogs = cursor.fetchall()\n print(\"Query Result: \", format(blogs))\n\n except Error as error:\n print(error)\n return(\"Error: {}\".format(error))\n\n finally:\n cursor.close()\n conn.close()\n print('Connection closed.')\n return render_template('user.html', username=session['username'], row=row, blogs=blogs)\n else:\n return render_template(\"login.html\", authError=\"Please log in\")\n\n@app.route(\"/user/newblogpost\", methods = ['POST','GET'])\ndef newblogpost():\n if 'username' in session:\n conn = connect()\n count = None\n try:\n cursor = conn.cursor()\n query = \"SELECT count(pdate) from blogs where userid = %s AND pdate = %s\"\n args = (session['userid'], date.today().strftime(\"%Y-%m-%d\"))\n print(\"Retreiving data for: \", format(args))\n cursor.execute(query, args)\n count = cursor.fetchone()\n print(\"Authentication Query Result:\", format(count))\n\n if count[0] != 2:\n #Allowed to blog post\n #Get next blogid\n query = \"SELECT MAX(blogid) FROM blogs\"\n cursor.execute(query)\n blogid = cursor.fetchone()\n blogid = blogid[0] + 1\n print(\"New blogid: \", blogid)\n subject = request.form[\"subject\"]\n description = request.form[\"description\"]\n #Insert blog\n query = \"INSERT INTO blogs(blogid,subject,description,pdate,userid) VALUES (%s,%s,%s,%s,%s)\"\n args = (blogid,subject,description,date.today().strftime(\"%Y-%m-%d\"),session['userid'])\n cursor.execute(query,args)\n print(\"Inserted blog at \", cursor.lastrowid)\n\n tags = request.form[\"blogstags\"].split(\";\")\n #Set up taginsert\n blogidlist = [blogid]*len(tags)\n tags = list(zip(blogidlist,tags))\n query = \"INSERT INTO blogstags(blogid,tag) VALUES(%s,%s)\"\n cursor.executemany(query,tags)\n print(\"Inserted tags at \", cursor.lastrowid)\n conn.commit()\n else:\n return \"Maximum of 2 blogs per day\"\n\n except Error as error:\n print(error)\n return(\"Error: {}\".format(error))\n\n finally:\n cursor.close()\n conn.close()\n print('Connection closed.')\n return redirect(\"/user/\"+session['username'])\n else:\n return render_template(\"login.html\", authError=\"Please log in\")\n \n@app.route(\"/register\", methods=['POST', 'GET'])\ndef register():\n error = \"\"\n if request.method == 'POST':\n if request.form['password'] != request.form['password_confirm']:\n return render_template('register.html', show_reg_form = 1, passwordMismatch = \"Passwords do not match\", email=request.form['email'], username=request.form['username'])\n \n conn = connect()\n try: \n cursor = conn.cursor()\n query = \"INSERT INTO users(username, password, email) VALUES(%s,%s,%s)\"\n args = (request.form['username'], request.form['password'], request.form['email'])\n cursor.execute(query, args)\n print(\"Attempting to add to table: \", format(args))\n\n if cursor.lastrowid:\n print(\"Successfully inserted id: \", cursor.lastrowid)\n conn.commit()\n return render_template(\"register.html\", show_reg_form = 0, userid=cursor.lastrowid, email=request.form['email'], username=request.form['username'])\n\n except IntegrityError as error:\n print(error)\n return render_template(\"login.html\", authError=\"Username/email already exists. Please login\")\n except Error as error:\n print(error)\n return(\"Error: {}\".format(error))\n\n finally:\n cursor.close()\n conn.close()\n print('Connection closed.') \n\n return render_template('register.html', show_reg_form = 1)\n\n@app.route(\"/logout\")\ndef logout():\n session.pop('username',None)\n return redirect(url_for('index'))\n\n# run the app.\nif __name__ == \"__main__\":\n # Setting debug to True enables debug output. This line should be\n # removed before deploying a production app.\n app.debug = True\n app.run()","repo_name":"mihai-mihailescu/COMP440-Website","sub_path":"beehive.py","file_name":"beehive.py","file_ext":"py","file_size_in_byte":16222,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"3257542397","text":"import sys\r\nimport os \r\nfrom cx_Freeze import executable, setup, Executable\r\n\r\nfiles = ['main.ico']\r\n\r\ntarget = Executable(\r\n script=\"app.py\",\r\n base = \"WIN32GUI\",\r\n icon=\"main.ico\"\r\n)\r\n\r\n\r\nsetup(\r\n name='MCORD GUI',\r\n version=\"0.5\",\r\n description = 'GUI for AFE Hubs',\r\n author = \"MK\",\r\n options = {'build.exe': {'include_files':files}},\r\n executables = [target]\r\n)","repo_name":"kruksik-dev/pyside6_layout","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"70378571949","text":"#!/usr/bin/python3\n\"\"\"divide a matrix module\"\"\"\n\n\ndef matrix_divided(matrix, div):\n \"\"\"funciton a divide a matrix\n\n Args:\n matrix: a list of lists\n div(int): what to divide the matrix by\n Returns:\n the divided matrix\n \"\"\"\n new_matrix = []\n if (not isinstance(matrix, list) or matrix == [] or\n not all(isinstance(row, list) for row in matrix) or\n not all((isinstance(ele, int) or isinstance(ele, float))\n for ele in [num for row in matrix for num in row])):\n raise TypeError(\"matrix must be a matrix (list of lists) of \"\n \"integers/floats\")\n if not isinstance(div, int) and not isinstance(div, float):\n raise TypeError(\"div must be a number\")\n if div == 0:\n raise ZeroDivisionError(\"division by zero\")\n for row in matrix:\n first_row = len(matrix[0])\n if len(row) != first_row:\n raise TypeError(\"Each row of the matrix must have the same size\")\n new_row = []\n for element in row:\n res = \"{:.2f}\".format(element/div)\n new_row.append(float(res))\n new_matrix.append(new_row)\n\n return new_matrix\n","repo_name":"marsade/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/2-matrix_divided.py","file_name":"2-matrix_divided.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11097827156","text":"\"\"\"Test control_flow.bb: basic blocks and basd-block management\"\"\"\nfrom typing import Callable\nfrom xdis.bytecode import get_instructions_bytes\nfrom xdis.std import opc\nfrom control_flow.bb import basic_blocks\nfrom control_flow.cfg import ControlFlowGraph\nfrom control_flow.graph import write_dot\nfrom example_fns import two_basic_blocks, if_else_blocks\n\ndebug = True\nif debug:\n import dis\n\n\ndef check_cfg(fn: Callable, cfg: ControlFlowGraph):\n bytecode = fn.__code__.co_code\n\n # Check that all get_node returns the correct node\n # for all instruction offsets in bytecode\n current_block = cfg.offset2block[0]\n end_offset = current_block.bb.end_offset\n need_new_block = False\n offset2block = cfg.offset2block\n cached_offsets = len(offset2block)\n cache_diff = 0\n for inst in get_instructions_bytes(bytecode, opc):\n offset = inst.offset\n if need_new_block:\n current_block = offset2block[offset]\n end_offset = current_block.bb.end_offset\n if offset == end_offset:\n need_new_block = True\n else:\n # Increment number of entries added to cache after next cfg.get_node\n cache_diff += 1\n need_new_block = False\n\n assert current_block == cfg.get_node(offset)\n\n # Next check that all cfg.offset2block is populated\n # for all instruction offsets in bytecode as a result of\n # asking for each offset above\n assert all(\n (inst.offset in offset2block for inst in get_instructions_bytes(bytecode, opc))\n )\n\n # Assert offset originally was in offset2block or was added in cache\n assert len(offset2block) == cached_offsets + cache_diff\n return\n\n\ndef test_basic():\n offset2inst_index = {}\n for fn in (two_basic_blocks, if_else_blocks):\n if debug:\n print(fn.__name__)\n dis.dis(fn)\n print()\n bb_mgr = basic_blocks(fn.__code__, offset2inst_index)\n cfg = ControlFlowGraph(bb_mgr)\n if debug:\n write_dot(fn.__name__, \"/tmp/test_cfg-\", cfg.graph, write_png=True)\n check_cfg(fn, cfg)\n\n\nif __name__ == \"__main__\":\n test_basic()\n","repo_name":"rocky/python-control-flow","sub_path":"pytest/test_cfg.py","file_name":"test_cfg.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"37"} +{"seq_id":"38768662797","text":"# -*- encoding: utf-8 -*-\n'''\n@File : cheat_Server.py\n@Time : 2020/05/04 11:41:31\n@Author : xdbcb8 \n@Version : 1.0\n@Contact : 838025538@qq.com\n'''\n\n# here put the import lib\nimport socket\nimport threading\nimport time\nimport os\nclass Server:\n def __init__(self):\n self.s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n self.addr = ('192.168.1.103', 9999)\n self.user = {}\n def start_server(self):\n try :\n self.s.bind(self.addr)\n self.s.listen(5)\n except Exception as e:\n print(e)\n print('服务器已开启,等待连接')\n print('输入\\'esc\\'关闭服务器')\n threading.Thread(target=self.close).start()\n self.accept_connect()\n \n\n def accept_connect(self):\n while True:\n s, addr = self.s.accept()\n self.user[addr]= s\n num = len(self.user)\n print('user:{} online. There are {} users. '.format(addr, num))\n threading.Thread(target=self.recv_and_send2all,args=(addr,s,)).start()\n def recv_and_send2all(self, addr, socket):\n while True:\n try:\n response = socket.recv(1024).decode('utf-8')\n message = '{}:{}发来消息: {}'.format(time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime()),addr,response) \n for client in self.user.values():\n client.send(message.encode('utf-8'))\n except ConnectionResetError:\n print(\"用户{}已经退出聊天!\".format(addr))\n self.user.pop(addr)\n break\n def close(self):\n while True:\n cmd = input()\n if cmd == 'esc':\n for client in self.user.values():\n client.close()\n self.s.close()\n os._exit(0)\n else:\n print('命令无效')\n\nif __name__ == \"__main__\":\n Server = Server()\n Server.start_server()\n ","repo_name":"Liugenhao-gh/python_homework","sub_path":"homework9/4_cheat_Server.py","file_name":"4_cheat_Server.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"21715270610","text":"def dig_pow(n, p):\n #splits int into seperate numbers\n na = [int(x) for x in str(n)]\n c = []\n for x in na:\n calculation = x ** p\n c.append(calculation)\n p = p + 1 \n \n if sum(c) % n == 0:\n return int(sum(c) / n)\n else:\n return -1\n\nprint(dig_pow(46288, 3))\n\n# def dig_pow(n, p):\n# s = 0\n# for i,c in enumerate(str(n)): # i for item c for counter\n# s += pow(int(c),p+i) # here is sum and **\n# return s/n if s%n==0 else -1 # smart way of if statemtment\n","repo_name":"lukan1u/pythonProject","sub_path":"codeWars/playingWithDigits.py","file_name":"playingWithDigits.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"11783236631","text":"import glob\n\n\nclass DataLoader:\n \"\"\"Data Loader class\"\"\"\n\n @staticmethod\n def load_data(images_path, masks_path, n=2160):\n \"\"\"Loads dataset from path\"\"\"\n imgfiles = [f for f in glob.glob(images_path)]\n print(images_path)\n print(imgfiles)\n mskfiles = [f for f in glob.glob(masks_path)]\n imgfiles.sort()\n mskfiles.sort()\n\n n_pic = len(imgfiles) // n\n\n whole_imgl = [{} for _ in range(n_pic)]\n whole_mskl = [{} for _ in range(n_pic)]\n for r, i in enumerate(zip(imgfiles, mskfiles)):\n whole_imgl[r % n_pic][int(i[0].split('/')[-1].split('_')[0])] = (i[0])\n whole_mskl[r % n_pic][int(i[0].split('/')[-1].split('_')[0])] = (i[1])\n w_imgl = [[x.get(i) for i in range(1, len(x) + 1, 1)] for x in whole_imgl]\n w_mskl = [[x.get(i) for i in range(1, len(x) + 1, 1)] for x in whole_mskl]\n return [w_imgl, w_mskl]\n","repo_name":"yaochung/-NCU-RSS-1.3","sub_path":"src/data_loader/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9708888763","text":"import sys\nimport time\nimport uos\nimport uerrno\n\nCRITICAL = 50\nERROR = 40\nWARNING = 30\nINFO = 20\nDEBUG = 10\nNOTSET = 0\n\n_level_dict = {\n CRITICAL: \"CRIT\",\n ERROR: \"ERROR\",\n WARNING: \"WARN\",\n INFO: \"INFO\",\n DEBUG: \"DEBUG\",\n}\n\nclass Logger:\n\n level = NOTSET\n\n def __init__(self, name):\n self.name = name\n\n def _level_str(self, level):\n l = _level_dict.get(level)\n if l is not None:\n return l\n return \"LVL%s\" % level\n\n def setLevel(self, level):\n self.level = level\n\n def isEnabledFor(self, level):\n return level >= (self.level or _level)\n\n def check_log_size(self, log_file, max_log_size):\n while(uos.stat(log_file)[6] > max_log_size):\n # print(\"Log file is TOO Large\")\n # print(\"Opening original file\")\n with open(log_file, 'r') as original_file:\n data = original_file.read().splitlines(True)\n # print(\"Closing original file\")\n original_file.close()\n\n # print(\"Opening new file\")\n with open(log_file, 'w') as new_file:\n # print(\"Writing new file\")\n for line in data[1:]:\n new_file.write(line)\n # print(\"Closing new file\")\n new_file.close()\n # print(\"OK\")\n\n def log(self, level, msg, *args):\n if level >= (self.level or _level):\n try:\n # print(uos.stat(\"/main/web_files/log.html\")[6])\n self.check_log_size(\"/main/web_files/log.html\", 2000)\n except OSError as exc:\n print(exc)\n if exc.args[0] == uerrno.ENOENT:\n # print(\"File Doesn't exist, creating one\")\n temp_file = open(\"/main/web_files/log.html\", \"w\")\n temp_file.close()\n # print(\"File created succsefully!\")\n \n _stream = open(\"/main/web_files/log.html\", \"a+\")\n print(\"[{}][{}][{}]: {}\".format(time.ticks_ms(), self._level_str(level), self.name, msg))\n if not args:\n try:\n print(\"[{}][{}][{}]: {}\".format(time.ticks_ms(), self._level_str(level), self.name, msg), file=_stream)\n except Exception as err:\n print(\"Exception: %s\", err)\n else: # TUKAJ MI ŠE NI JASNO KAJ IN KAKO\n print(msg % args, file=_stream)\n _stream.close()\n\n def debug(self, msg, *args):\n self.log(DEBUG, msg, *args)\n\n def info(self, msg, *args):\n self.log(INFO, msg, *args)\n\n def warning(self, msg, *args):\n self.log(WARNING, msg, *args)\n\n def error(self, msg, *args):\n self.log(ERROR, msg, *args)\n\n def critical(self, msg, *args):\n self.log(CRITICAL, msg, *args)\n\n def exc(self, e, msg, *args):\n self.log(ERROR, msg, *args)\n sys.print_exception(e, _stream)\n\n def exception(self, msg, *args):\n self.exc(sys.exc_info()[1], msg, *args)\n\n_level = INFO\n_loggers = {}\n\ndef getLogger(name):\n if name in _loggers:\n return _loggers[name]\n l = Logger(name)\n _loggers[name] = l\n return l\n\ndef info(msg, *args):\n getLogger(None).info(msg, *args)\n\ndef debug(msg, *args):\n getLogger(None).debug(msg, *args)\n\ndef basicConfig(level=INFO, filename=None, stream=None, format=None):\n global _level, _stream\n _level = level\n if stream:\n _stream = stream\n if filename is not None:\n print(\"logging.basicConfig: filename arg is not supported\")\n if format is not None:\n print(\"logging.basicConfig: format arg is not supported\")\n","repo_name":"DenisCrnic/SECCS_client","sub_path":"lib/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37358640189","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Apr 7 01:15:18 2019\r\n\r\n@author: hp\r\n\"\"\"\r\n\r\ndef isSubset(arr1, arr2, m, n): \r\n i = 0\r\n j = 0\r\n for i in range(n): \r\n for j in range(m): \r\n if(arr2[i] == arr1[j]): \r\n break\r\n if (j == m): \r\n return 0\r\n return 1\r\nif __name__ == \"__main__\":\r\n l1=input()\r\n l2=input()\r\n l3=l1.split()\r\n l4=l2.split()\r\n m=len(l3)\r\n n=len(l4)\r\n if(isSubset(l3,l4,m,n)==0):\r\n print(\"YES\")\r\n else:\r\n print(\"NO\")","repo_name":"amritavarshi/guvi","sub_path":"hunter10.py","file_name":"hunter10.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24788326771","text":"\"\"\"Utilities for DETR trainer.\"\"\"\n\nimport copy\nimport json\nimport os\nfrom typing import Any, Dict, Optional, Set\n\nfrom absl import logging\nfrom flax import core as flax_core\nfrom flax import optim as optimizers\nfrom flax import traverse_util\nimport jax\nfrom jax.example_libraries import optimizers as experimental_optimizers\nimport jax.numpy as jnp\nimport numpy as np\nimport PIL\nimport PIL.ImageDraw\nimport PIL.ImageFont\nfrom scenic.common_lib import image_utils\nfrom scenic.dataset_lib.coco_dataset import coco_eval\nfrom scenic.model_lib.base_models import box_utils\nfrom scenic.train_lib_deprecated import optimizers as scenic_optimizers\nfrom scenic.train_lib_deprecated import train_utils\nimport scipy.special\nimport tensorflow as tf\n\n\nclass DetrGlobalEvaluator():\n \"\"\"An interface between the Scenic DETR implementation and COCO evaluators.\"\"\"\n\n def __init__(self, dataset_name: str, **kwargs):\n del dataset_name # Unused.\n\n self.coco_evaluator = coco_eval.DetectionEvaluator(\n threshold=0.0, **kwargs)\n self._included_image_ids = set()\n self._num_examples_added = 0\n\n def add_example(\n self, prediction: Dict[str, np.ndarray], target: Dict[str, np.ndarray]):\n \"\"\"Add a single example to the evaluator.\n\n Args:\n prediction: Model prediction dictionary with keys 'pred_img_ids',\n 'pred_probs' in shape of `[num_objects, num_classes]` and 'pred_boxes'\n in shape of `[num_objects, 4]`. Box coordinates should be in raw DETR\n format, i.e. [cx, cy, w, h] in range [0, 1].\n target: Target dictionary with keys 'orig_size', 'size', and 'image/id'.\n Must also contain 'padding_mask' if the input image was padded.\n \"\"\"\n if 'pred_boxes' not in prediction:\n # Add dummy to make eval work:\n prediction = copy.deepcopy(prediction)\n prediction['pred_boxes'] = np.zeros(\n (prediction['pred_logits'].shape[0], 4)) + 0.5\n\n # Convert from DETR [cx, cy, w, h] to COCO [x, y, w, h] bounding box format:\n boxes = box_utils.box_cxcywh_to_xyxy(prediction['pred_boxes'])\n boxes = np.array(boxes)\n boxes[:, 2] -= boxes[:, 0]\n boxes[:, 3] -= boxes[:, 1]\n\n # Scale from relative to absolute size:\n # Note that the padding is implemented such that such that the model's\n # predictions are [0,1] normalized to the non-padded image, so scaling by\n # `orig_size` will convert correctly to the original image coordinates. No\n # image flipping happens during evaluation.\n h, w = np.asarray(target['orig_size'])\n scale_factor = np.array([w, h, w, h])\n boxes = boxes * scale_factor[np.newaxis, :]\n boxes_np = np.asarray(boxes)\n\n # Get scores, excluding the background class:\n if 'pred_probs' in prediction:\n scores = prediction['pred_probs'][:, 1:]\n else:\n scores = scipy.special.softmax(prediction['pred_logits'], axis=-1)[:, 1:]\n\n # Add example to evaluator:\n self.coco_evaluator.add_annotation(\n bboxes=boxes_np,\n scores=np.asarray(scores),\n img_id=int(target['image/id']))\n\n self._num_examples_added += 1\n\n def compute_metrics(\n self,\n included_image_ids: Optional[Set[int]] = None,\n clear_annotations: Optional[bool] = True) -> Dict[str, Any]:\n \"\"\"Computes the metrics for all added predictions.\"\"\"\n if included_image_ids is not None:\n self.coco_evaluator.coco.reload_ground_truth(included_image_ids)\n return self.coco_evaluator.compute_coco_metrics(\n clear_annotations=clear_annotations)\n\n def clear(self):\n self.coco_evaluator.clear_annotations()\n self._num_examples_added = 0\n\n def __len__(self):\n return self._num_examples_added\n\n def write_pred_annotations_to_file(self,\n path: str,\n fname_app: Optional[str] = None,\n clear_annotations: Optional[bool] = True):\n \"\"\"Writes predictions to file in JSON format.\n\n Args:\n path: Path to write the prediction annotation JSON file.\n fname_app: Optional string to append to the file name.\n clear_annotations: Clear annotations after they are stored in a file.\n \"\"\"\n if not tf.io.gfile.exists(path):\n tf.io.gfile.makedirs(path)\n json_file_name = f\"predictions{fname_app if fname_app else ''}.json\"\n json_file_path = os.path.join(path, json_file_name)\n\n def _convert_to_serializable(obj):\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n elif isinstance(obj, np.float32):\n return float(obj)\n else:\n raise TypeError(f'Unserializable object {obj} of type {type(obj)}')\n\n with tf.io.gfile.GFile(json_file_path, 'w') as f:\n f.write(\n json.dumps(\n self.coco_evaluator.annotations,\n default=_convert_to_serializable))\n logging.info('Predicted annotations are stored in %s.', json_file_path)\n if clear_annotations:\n self.coco_evaluator.clear_annotations()\n\n\ndef _unpad_and_resize(masks, padding_mask, orig_size):\n \"\"\"Removes padding and resizes masks.\"\"\"\n # Resize masks to the padding_mask size in case they have a lower resolution:\n masks = image_utils.resize_pil(\n masks,\n out_h=padding_mask.shape[0],\n out_w=padding_mask.shape[1],\n num_batch_dims=1,\n method='linear')\n # Remove padding:\n row_masks = np.any(padding_mask, axis=-1)\n col_masks = np.any(padding_mask, axis=-2)\n masks = masks[:, row_masks, :]\n masks = masks[:, :, col_masks]\n # Resize to original size:\n return image_utils.resize_pil(\n masks,\n out_h=orig_size[0],\n out_w=orig_size[1],\n num_batch_dims=1,\n method='linear')\n\n\ndef normalize_metrics_summary(metrics_summary, split,\n object_detection_loss_keys):\n \"\"\"Normalizes the metrics in the given metrics summary.\n\n Note that currently we only support metrics of the form 1/N sum f(x_i).\n\n Args:\n metrics_summary: dict; Each value is a sum of a calculated metric over all\n examples.\n split: str; Split for which we normalize the metrics. Used for logging.\n object_detection_loss_keys: list; A loss key used for computing the object\n detection loss.\n\n Returns:\n Normalized metrics summary.\n\n Raises:\n TrainingDivergedError: Due to observing a NaN in the metrics.\n \"\"\"\n for key, val in metrics_summary.items():\n metrics_summary[key] = val[0] / val[1]\n if np.isnan(metrics_summary[key]):\n raise train_utils.TrainingDivergedError(\n 'NaN detected in {}'.format(f'{split}_{key}'))\n\n # compute and add object_detection_loss using globally normalize terms\n object_detection_loss = 0\n for loss_term_key in object_detection_loss_keys:\n object_detection_loss += metrics_summary[loss_term_key]\n metrics_summary['object_detection_loss'] = object_detection_loss\n\n return metrics_summary\n\n\ndef process_and_fetch_to_host(pred_or_tgt, batch_mask):\n \"\"\"Used to collect predictions and targets of the whole valid/test set.\n\n Args:\n pred_or_tgt: pytree; A pytree of jnp-arrays where leaves are of shape\n `[num_devices, bs, X,...,Y]`.\n batch_mask: A nd-array of shape `[num_devices, bs]`, where zero values\n indicate padded examples.\n\n Returns:\n A list of length num_devices * bs of items, where each item is a tree with\n the same structure as `pred_or_tgt` and each leaf contains a single example.\n \"\"\"\n # Fetch to host in a single call.\n pred_or_tgt, batch_mask = jax.device_get((pred_or_tgt, batch_mask))\n batch_mask = np.array(batch_mask).astype(bool)\n\n def _split_mini_batches(x):\n # Filter out padded examples.\n x = x[batch_mask]\n # Split minibatch of examples into a list of examples.\n x_list = np.split(x, x.shape[0], axis=0)\n # Squeeze out the dummy dimension.\n return jax.tree_util.tree_map(lambda x: np.squeeze(x, axis=0), x_list)\n\n leaves, treedef = jax.tree_util.tree_flatten(pred_or_tgt)\n\n batch_shape = batch_mask.shape\n assert all([leaf.shape[:2] == batch_shape for leaf in leaves]), (\n 'Inconsistent batch shapes.')\n\n # Split batched leaves into lists of examples:\n leaves = list(map(_split_mini_batches, leaves))\n\n # Go from leaf-lists to list of trees:\n out = []\n if leaves:\n num_examples = np.sum(batch_mask, dtype=np.int32)\n for example_ind in range(num_examples):\n out.append(treedef.unflatten([leaf[example_ind] for leaf in leaves]))\n return out\n\n\ndef draw_boxes_side_by_side(pred: Dict[str, Any], batch: Dict[str, Any],\n label_map: Dict[int, str]) -> np.ndarray:\n \"\"\"Side-by-side visualization of detection predictions and ground truth.\"\"\"\n\n viz = []\n\n # unnormalizes images to be [0, 1]\n mean_rgb = np.reshape(np.array([0.48, 0.456, 0.406]), [1, 1, 1, 3])\n std_rgb = np.reshape(np.array([0.229, 0.224, 0.225]), [1, 1, 1, 3])\n imgs = ((batch['inputs'] * std_rgb + mean_rgb) * 255.0).astype(np.uint8)\n\n font = PIL.ImageFont.load_default()\n\n # iterates over images in the batch and makes visualizations\n for indx in range(imgs.shape[0]):\n h, w = batch['label']['size'][indx]\n\n # first for ground truth\n gtim = PIL.Image.fromarray(imgs[indx])\n gtdraw = PIL.ImageDraw.Draw(gtim)\n for bb, cls, is_crowd in zip(batch['label']['boxes'][indx],\n batch['label']['labels'][indx],\n batch['label']['is_crowd'][indx]):\n if cls == 0:\n continue # dummy object.\n\n bcx, bcy, bw, bh = bb * [w, h, w, h]\n bb = [bcx - bw / 2, bcy - bh / 2, bcx + bw / 2, bcy + bh / 2]\n if is_crowd:\n edgecolor = (255, 0, 0)\n else:\n edgecolor = (255, 255, 0)\n\n gtdraw.rectangle(bb, fill=None, outline=edgecolor, width=3)\n gtdraw.text([bb[0], max(bb[1] - 10, 0)],\n label_map[cls],\n font=font,\n fill=(0, 0, 255))\n\n # second for model predictions\n predim = PIL.Image.fromarray(imgs[indx])\n preddraw = PIL.ImageDraw.Draw(predim)\n pred_lbls = np.argmax(pred['pred_logits'], axis=-1)\n for bb, cls in zip(pred['pred_boxes'][indx], pred_lbls[indx]):\n h, w = batch['label']['size'][indx]\n bcx, bcy, bw, bh = bb * [w, h, w, h]\n bb = [bcx - bw / 2, bcy - bh / 2, bcx + bw / 2, bcy + bh / 2]\n edgecolor = (0, 255, 0) if cls else (0, 150, 0)\n preddraw.rectangle(\n bb, fill=None, outline=edgecolor, width=3 if cls else 1)\n # Separate loop for text to prevent occlusion of text by boxes:\n for bb, cls in zip(pred['pred_boxes'][indx], pred_lbls[indx]):\n if not cls:\n continue\n h, w = batch['label']['size'][indx]\n bcx, bcy, bw, bh = bb * [w, h, w, h]\n bb = [bcx - bw / 2, bcy - bh / 2, bcx + bw / 2, bcy + bh / 2]\n preddraw.text([bb[0], max(bb[1] - 10, 0)],\n label_map[cls],\n font=font,\n fill=(0, 0, 255))\n\n gtim_np = np.asarray(gtim)\n predim_np = np.asarray(predim)\n composite = np.concatenate([predim_np, gtim_np], axis=1)\n\n viz.append(composite)\n return np.stack(viz, axis=0)\n\n\ndef get_detr_optimizer(config):\n \"\"\"Makes a Flax MultiOptimizer for DETR.\"\"\"\n other_optim = scenic_optimizers.get_optimizer(config)\n\n if config.get('backbone_training'):\n backbone_optim = scenic_optimizers.get_optimizer(config.backbone_training)\n else:\n backbone_optim = other_optim\n\n def is_bn(path):\n # For DETR we need to skip the BN affine transforms as well.\n names = ['/bn1/', '/bn2/', '/bn3/', '/init_bn/', '/proj_bn/']\n for s in names:\n if s in path:\n return True\n return False\n backbone_traversal = optimizers.ModelParamTraversal(\n lambda path, param: ('backbone' in path) and (not is_bn(path)))\n other_traversal = optimizers.ModelParamTraversal(\n lambda path, param: 'backbone' not in path)\n\n return MultiOptimizerWithLogging((backbone_traversal, backbone_optim),\n (other_traversal, other_optim))\n\n\nclass MultiOptimizerWithLogging(optimizers.MultiOptimizer):\n \"\"\"Adds logging to MultiOptimizer to show which params are trained.\"\"\"\n\n def init_state(self, params):\n self.log(params)\n return super().init_state(params)\n\n def log(self, inputs):\n for i, traversal in enumerate(self.traversals):\n params = _get_params_dict(inputs)\n flat_dict = traverse_util.flatten_dict(params)\n for key, value in _sorted_items(flat_dict):\n path = '/' + '/'.join(key)\n if traversal._filter_fn(path, value): # pylint: disable=protected-access\n logging.info(\n 'ParamTraversalLogger (opt %d): %s, %s', i, value.shape, path)\n\n\ndef _sorted_items(x):\n \"\"\"Returns items of a dict ordered by keys.\"\"\"\n return sorted(x.items(), key=lambda x: x[0])\n\n\ndef _get_params_dict(inputs):\n if isinstance(inputs, (dict, flax_core.FrozenDict)):\n return flax_core.unfreeze(inputs)\n else:\n raise ValueError(\n 'Can only traverse a flax Model instance or a nested dict, not '\n f'{type(inputs)}')\n\n\ndef clip_grads(grad_tree, max_norm):\n \"\"\"Clip gradients stored as a pytree of arrays to maximum norm `max_norm`.\"\"\"\n # jax.example_libraries.optimizers.clip_grads implements this differently.\n # First, it uses clip_coef of max_norm / norm without the 1e-6.\n # Second, the jnp.where condition and argument order are reversed. This should\n # normally be a no-change but we do not know what changes in XLA are triggered\n # as a result of this and how that effects precision etc.\n norm = experimental_optimizers.l2_norm(grad_tree)\n clip_coef = max_norm / (norm + 1e-6)\n normalize = lambda g: jnp.where(clip_coef < 1., g * clip_coef, g)\n return jax.tree_util.tree_map(normalize, grad_tree)\n","repo_name":"google-research/scenic","sub_path":"scenic/projects/baselines/detr/train_utils.py","file_name":"train_utils.py","file_ext":"py","file_size_in_byte":13674,"program_lang":"python","lang":"en","doc_type":"code","stars":2619,"dataset":"github-code","pt":"37"} +{"seq_id":"20043935952","text":"###############################################################################\n# Author: Declan Smyth\n# Email : declan.smyth@gmail.com\n###############################################################################\n# \n# Module to Provide Print functionality\n#\n###############################################################################\n\nimport boto3\nimport json\n\n###############################################################################\n# -- Function: Print Starting Title on Screen\n# Input: None\n# Return: None\ndef PrintTitle():\n PrintThickLine()\n print (\"\"\"\\\n\n \\t Welcome to the Chaos Monkey \n \\tAWS Test Harness\n \"\"\")\n PrintThickLine()\n\n\n\n###############################################################################\n# -- Function: Print a think line\n# Input: None\n# Return: None\ndef PrintThickLine():\n print (\"===========================================================================\")\n\n\n\n###############################################################################\n# -- Function: Print a thin line\n# Input: None\n# Return: None\ndef PrintThinLine():\n print (\"---------------------------------------------------------------------------\")\n\n\n\n###############################################################################\n# -- Function: Print Instance Informatgion on Screen\n# Input: List of Instances\n# Return: None\ndef PrintInformationToScreen(instLst):\n PrintThickLine()\n print (\"Instance ID\\t\\t Public IP\\t Instance State\\t Availability Zone\")\n PrintThinLine()\n # Print the list of Instances to the screen\n for instance in instLst:\n print(\"{0}\\t {1}\\t {2}\\t {3}\\t\".format(instance.id, instance.public_ip_address, instance.state[\"Name\"], instance.placement[\"AvailabilityZone\"]))\n print (\"\"\"\\\n\\nThere are %s instances running your environment\n \"\"\" % len(instLst))\n PrintThickLine()\n\n\n###############################################################################\n# -- Function: Print Instance Informatgion on Screen\n# Input: List of Instances\n# Return: None\ndef PrintSAutoScaleGroupInfo(instLst):\n PrintThickLine()\n print (\"Instance ID\\t\\t Health Status\\t LifeCycleState\\t Availability Zone\")\n PrintThinLine()\n \n # Print the list of Instances to the screen\n for instance in instLst:\n print(\"{0}\\t {1}\\t {2}\\t {3}\\t\".format(instance[\"InstanceId\"], instance[\"HealthStatus\"], instance[\"LifecycleState\"], instance[\"AvailabilityZone\"]))\n print (\"\"\"\\\n\\nThere are %s instances your auto scale group\n \"\"\" % len(instLst))\n PrintThickLine()\n\n\n\n\n###############################################################################\n# -- Function: Print List Informatgion on Screen\n# Input: A List of data\n# Return: None\ndef PrintListToScreen(aLst):\n # The content of a list\n for item in aLst:\n print(item)\n\n\n\n\n###############################################################################\n# -- Function: Print Results of testings\n# Input: JSON Results\n# Return: None\ndef PrintTestResult(notifymessage):\n PrintThickLine()\n print (\"\"\"\\\n\n Results of Test Run\n \"\"\")\n PrintThinLine()\n print ( \n \"Start Time: {0}\\n \\\n End Time: {1}\\n \\\n Instances Stopped: {2}\\n \\\n Instances Restarted: {3}\\n \\\n Elapsed Time: {4}\\n \\\n Test Status: {5}\\n \\\n New Instance IDs: {6}\\n\".format(\n notifymessage['starttime'], \n notifymessage['endtime'], \n notifymessage['instancesstopped'],\n notifymessage['instancesrestarted'], \n notifymessage['elapsedtime'], \n notifymessage['teststatus'], \n notifymessage['newinstanceid'])\n )\n PrintThickLine()\n\n\n\n###############################################################################\n# -- Function: Print Results of testings\n# Input: JSON Results\n# Return: None\ndef PrintTestStart(numDisrupt, disruptionList):\n PrintThickLine()\n print (\"\"\"\\\n \\t Starting Test Run\n \"\"\")\n PrintThinLine()\n print (\"This test will terminate {0} instances and monitor re-instatement time\".format(numDisrupt))\n print (\"\\nThe following instances will be disrupted:\\n\")\n print (\"\\t%s\\n\" %disruptionList)\n PrintThickLine()","repo_name":"declan-smyth/entarchdesign-ca-dev","sub_path":"scripts/printinfo.py","file_name":"printinfo.py","file_ext":"py","file_size_in_byte":4750,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70673153387","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error\r\nfrom statistics import mean\r\nimport tensorflow as tf\r\nimport keras\r\nfrom keras.wrappers.scikit_learn import KerasRegressor\r\nfrom keras.layers import Dense\r\nfrom keras.layers import LSTM\r\nfrom keras.models import Sequential\r\nfrom keras.models import *\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.patches as mpatches\r\n\r\n#------------------------------------------Pre-Processing-------------------------------------\r\ndef pre_process(data):#Create data sets for current pollution level and next pollution level\r\n X = []\r\n y = []\r\n for i in range(len(data)-1-1):\r\n row = data[i:(i+1), 0]#X data - current polluition level at given time\r\n X.append(row)\r\n y.append(data[i+1, 0])#y data - pollution level at next time\r\n return np.array(X), np.array(y)\r\n\r\n#------------------------------------------Model----------------------------------------------\r\ndef Model1():\r\n model = Sequential()\r\n model.add(LSTM(16, input_shape=(1,1)))#Add LSTM layer and input shape\r\n model.add(Dense(8))\r\n model.add(Dense(1, 'sigmoid'))#Add output layer\r\n model.compile(loss='mean_squared_error', optimizer='adam')#Define accracy reading for each epcoh\r\n return model\r\n #model.fit(X, y, epochs=epochs, batch_size=batch, verbose=ver)#Pass data and paramters\r\n\r\n#----------------------------------------Read Data in----------------------------------------\r\ndf = pd.read_csv(\"Processed.csv\",usecols=[7])#Change column number to chnage pollutant being read in\r\ndf = df.sample(1000)\r\ndata = df.values #Extract values from column\r\ndata = data.astype('float32')#Conver to float\r\n#--------------------------------------------prediction---------------------------------------\r\nscaler = MinMaxScaler(feature_range=(0, 1))#Set a scalar\r\ndata = scaler.fit_transform(data)#Transform data from current states to numbers from 0 to 1\r\nX, y = pre_process(data)#Pass to function\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)#Split test train 30 70\r\n\r\n\r\n#Hyper parameter tunning - commented out to ave time and computational power\r\n\"\"\"\r\nparam_FCNN = {'batch_size':[10,20,50],'epochs':[50, 100, 200]}\r\n\r\nmodel_tunning = KerasRegressor(Model1)\r\nTUNE_model = GridSearchCV(estimator=model_tunning, param_grid=param_FCNN, cv=10, refit='accuracy')\r\ngs = TUNE_model.fit(X_train,y_train)\r\n\r\nprint(\"HyperParameter results\")\r\nprint(gs.cv_results_)\r\n\r\nprint(\"\")\r\nprint(\"Best HyperParameters:\")\r\nt = gs.best_estimator_\r\nprint(t)\r\n\"\"\"\r\n\r\nLSTM_Model = KerasRegressor(build_fn=Model1, epochs=50, batch_size=20)#build LTSM passing paraters found from hyper parater tunning\r\nModel_train = LSTM_Model.fit(X_train, y_train)#Fit training data\r\npred = LSTM_Model.predict(X_test)#Predict using trained model\r\n\r\n#Accuracy for model\r\nprint(\"Accracy untransformed:\")\r\nprint('R2: ',r2_score(y_test, pred))\r\nprint('MSE: ', (mean_squared_error(y_test,pred)))\r\nprint('RMSE: ', np.sqrt(mean_squared_error(y_test, pred)))\r\nprint('MAE: ', mean_absolute_error(y_test, pred))\r\n\r\n#Transform predcitions and test data back from 0 to 1 to original numbers\r\npred_reshape = pred.reshape(-1,1)\r\npred_transform = scaler.inverse_transform(pred_reshape)\r\ny_test_reshape = y_test.reshape(-1,1)\r\ny_test_transform = scaler.inverse_transform(y_test_reshape)\r\n\r\n#Accuracy based on origianl numbers\r\nprint(\"\")\r\nprint(\"Accracy transformed:\")\r\nprint('R2: ',r2_score(y_test_transform, pred_transform))\r\nprint('MSE: ', (mean_squared_error(y_test_transform,pred_transform)))\r\nprint('RMSE: ', np.sqrt(mean_squared_error(y_test_transform, pred_transform)))\r\nprint('MAE: ', mean_absolute_error(y_test_transform, pred_transform))\r\n\r\n\r\n#Plot accuracy\r\nplt.figure()\r\nred_patch = mpatches.Patch(color='red', label='Predicted Values')\r\nblue_patch = mpatches.Patch(color='blue', label='True values')\r\nplt.title(\"0-1 O3 Prediciton μg/m3\")\r\nplt.xlabel(\"Time\")\r\nplt.ylabel(\"Value\")\r\nplt.plot(y_test)\r\nplt.plot(pred, color='r')\r\nplt.legend(handles=[red_patch, blue_patch])\r\nplt.show()\r\n\r\nplt.figure()\r\nred_patch = mpatches.Patch(color='red', label='Predicted Values')\r\nblue_patch = mpatches.Patch(color='blue', label='True values')\r\nplt.title(\"True Values O3 Prediciton μg/m3\")\r\nplt.xlabel(\"Time\")\r\nplt.ylabel(\"Value\")\r\nplt.plot(y_test_transform)\r\nplt.plot(pred_transform, color='r')\r\nplt.legend(handles=[red_patch, blue_patch])\r\nplt.show()","repo_name":"j4ck23/Master_Project","sub_path":"FinishedProject/LSTM.py","file_name":"LSTM.py","file_ext":"py","file_size_in_byte":4572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71621625707","text":"\"\"\"\nUtilities for handling ZIP \"extra data\" fields\n\"\"\"\n\nfrom dataclasses import dataclass, replace, field\nfrom typing import List, Optional, Dict, Tuple, Type, TypeVar, ClassVar\n\nfrom atmfjstc.lib.binary_utils.BinaryReader import BinaryReader\nfrom atmfjstc.lib.iso_timestamp import ISOTimestamp, iso_from_unix_time\nfrom atmfjstc.lib.os_forensics.windows import iso_from_ntfs_time\nfrom atmfjstc.lib.os_forensics.windows.security import NTSecurityDescriptor\nfrom atmfjstc.lib.os_forensics.windows.security.parse import decode_nt_security_descriptor\nfrom atmfjstc.lib.os_forensics.posix import PosixUID, PosixGID, PosixDeviceID\n\nfrom atmfjstc.lib.archive_forensics.zip import decompress_now\n\n\ndef parse_zip_central_extra_data(field_bytes: bytes) -> List['ZipExtraHeader']:\n return _parse_zip_extra_data(field_bytes, is_local=False)\n\n\ndef parse_zip_local_extra_data(field_bytes: bytes) -> List['ZipExtraHeader']:\n return _parse_zip_extra_data(field_bytes, is_local=True)\n\n\ndef _parse_zip_extra_data(data: bytes, is_local: bool) -> List['ZipExtraHeader']:\n result = []\n\n reader = BinaryReader(data, big_endian=False)\n\n try:\n for header_id, value in reader.iter_tlv(type_bytes=2, length_bytes=2, meaning='ZIP extra headers'):\n result.append(ZipExtraHeader.parse_from_tlv(header_id, value, is_local))\n except Exception as e:\n raise MalformedZipExtraDataError(\n f\"Malformed binary data for ZIP {'local' if is_local else 'central'} extra field\"\n ) from e\n\n return result\n\n\n@dataclass(frozen=True)\nclass ZipExtraHeader:\n magic: int\n is_local: bool\n warnings: Tuple[str, ...]\n unconsumed_data: Optional[bytes]\n\n def description(self) -> str:\n return f\"ZIP {'local' if self.is_local else 'central'} extra header of type \" + \\\n (self.__class__.__name__ if not isinstance(self, ZXHUnrecognized) else f\"0x{self.magic:04x}\")\n\n @staticmethod\n def parse_from_tlv(header_id: int, data: bytes, is_local: bool) -> 'ZipExtraHeader':\n reader = BinaryReader(data, big_endian=False)\n\n header_class = ZipExtraHeader.get_header_class_for_magic(header_id)\n if header_class is None:\n return ZXHUnrecognized(header_id, is_local, (), None, reader.read_remainder())\n\n result = header_class.parse(reader, is_local)\n\n if not reader.eof():\n result = replace(\n result,\n warnings=(*result.warnings, \"Header was not fully consumed\"),\n unconsumed_data=reader.read_remainder()\n )\n\n return result\n\n @staticmethod\n def parse(reader: BinaryReader, is_local: bool) -> 'ZipExtraHeader':\n raise NotImplementedError(\"Must override this in concrete header classes\")\n\n _cached_header_index: ClassVar[Optional[Dict[int, Type['ZipExtraHeader']]]] = None\n\n @classmethod\n def get_header_class_for_magic(cls, magic: int) -> Optional[Type['ZipExtraHeader']]:\n if cls._cached_header_index is None:\n cls._cached_header_index = { header_class.magic: header_class for header_class in _ALL_HEADER_CLASSES }\n\n return cls._cached_header_index.get(magic)\n\n\n@dataclass(frozen=True)\nclass ZXHUnrecognized(ZipExtraHeader):\n raw_data: bytes\n\n\n@dataclass(frozen=True)\nclass ZXHZip64(ZipExtraHeader):\n magic: int = field(default=0x0001, init=False)\n sizes: Tuple[int, ...]\n disk_start_no: Optional[int]\n\n @staticmethod\n def parse(reader: BinaryReader, is_local: bool) -> 'ZXHZip64':\n # Due to the way this header works (subfields may be included or omitted depending on other fields in the\n # local/central directory record), we can't decode it here completely as we need a lot more context. So we just\n # return a list of unmarked 64-bit sizes, and the 32-bit disk start number.\n\n total_bytes = reader.bytes_remaining()\n\n if (total_bytes > 28) or (total_bytes % 4 != 0):\n raise MalformedZipExtraDataError(\n f\"ZIP64 extra header size should be a multiple of 4 between 0 and 28, but it is {total_bytes}\"\n )\n\n n_64bit_values = total_bytes >> 3\n sizes = reader.read_struct(f'{n_64bit_values}Q', '64-bit sizes') if n_64bit_values > 0 else ()\n disk_start_no = reader.read_uint32('disk start no.') if (total_bytes % 8 != 0) else None\n\n return ZXHZip64(is_local, (), None, sizes, disk_start_no)\n\n\nTagT = TypeVar('TagT', bound='NTFSInfoTag')\n\n\n@dataclass(frozen=True)\nclass ZXHPkWareNTFS(ZipExtraHeader):\n magic: int = field(default=0x000a, init=False)\n tags: Tuple['NTFSInfoTag', ...]\n reserved: int\n\n def get_single_tag(self, tag_type: Type[TagT]) -> Optional[TagT]:\n result = None\n\n for tag in self.tags:\n if isinstance(tag, tag_type):\n if result is not None:\n raise ValueError(f\"Found multiple {tag_type.__class__.__name__} tags\")\n\n result = tag\n\n return result\n\n @staticmethod\n def parse(reader: BinaryReader, is_local: bool) -> 'ZXHPkWareNTFS':\n reserved = reader.read_uint32('reserved field')\n\n tags = tuple(NTFSInfoTag.parse(tag, value) for tag, value in reader.iter_tlv(type_bytes=2, length_bytes=2))\n\n unhandled = set(tag.tag for tag in tags if isinstance(tag, NTFSInfoUnhandledTag))\n warnings = (f\"Unhandled tag(s) of type {', '.join(str(tag) for tag in unhandled)}\",) if len(\n unhandled) > 0 else ()\n\n return ZXHPkWareNTFS(is_local, warnings, None, tags, reserved)\n\n\n@dataclass(frozen=True)\nclass NTFSInfoTag:\n tag: int\n\n @staticmethod\n def parse(tag: int, value: bytes) -> 'NTFSInfoTag':\n reader = BinaryReader(value, big_endian=False)\n\n if tag == 1:\n return NTFSInfoTimestampsTag(*(\n iso_from_ntfs_time(raw_time) for raw_time in reader.read_struct('QQQ', 'timestamps')\n ))\n\n return NTFSInfoUnhandledTag(tag, value)\n\n\n@dataclass(frozen=True)\nclass NTFSInfoTimestampsTag(NTFSInfoTag):\n tag: int = field(default=1, init=False)\n mtime: ISOTimestamp\n atime: ISOTimestamp\n ctime: ISOTimestamp\n\n\n@dataclass(frozen=True)\nclass NTFSInfoUnhandledTag(NTFSInfoTag):\n raw_data: bytes\n\n\n@dataclass(frozen=True)\nclass ZXHPkWareUnix(ZipExtraHeader):\n magic: int = field(default=0x000d, init=False)\n atime: ISOTimestamp\n mtime: ISOTimestamp\n uid: PosixUID\n gid: PosixGID\n device: Optional[PosixDeviceID] = None\n link_target: Optional[bytes] = None\n\n @staticmethod\n def parse(reader: BinaryReader, is_local: bool) -> 'ZXHPkWareUnix':\n raw_atime, raw_mtime, uid, gid = reader.read_struct('IIHH')\n device = link_target = None\n\n special_data = reader.read_remainder()\n\n # KLUDGE: normally we'd need to know the type of the file to know whether special_data represents the link\n # target (for sym/hardlinks) or the major/minor device numbers (for char/block devices). We use a trick instead.\n # Given that the major number is almost certainly 16 bits at best, there will almost definitely be a \\x00 byte\n # in the special data, whereas for a link this will definitely not be the case.\n\n if (len(special_data) == 8) and (b'\\x00' in special_data):\n device = BinaryReader(special_data, big_endian=False).read_struct('II')\n else:\n link_target = special_data\n\n return ZXHPkWareUnix(\n is_local, (), None,\n atime=iso_from_unix_time(raw_atime),\n mtime=iso_from_unix_time(raw_mtime),\n uid=uid, gid=gid, device=device, link_target=link_target\n )\n\n\n@dataclass(frozen=True)\nclass ZXHNTSecurityDescriptor(ZipExtraHeader):\n magic: int = field(default=0x4453, init=False)\n uncompressed_data_size: int\n data: Optional['NTSecurityDescriptorData']\n\n @staticmethod\n def parse(reader: BinaryReader, is_local: bool) -> 'ZXHNTSecurityDescriptor':\n descriptor_size = reader.read_uint32('descriptor size')\n data = None\n warnings = ()\n\n if is_local:\n data = NTSecurityDescriptorData.parse(reader)\n\n if isinstance(data, NTSecurityDescriptorDataCompressed):\n warnings = (f\"Failed to decompress descriptor\",)\n elif data.__class__ == NTSecurityDescriptorDataDecompressed:\n warnings = (f\"Don't know how to decode this format version\",)\n\n return ZXHNTSecurityDescriptor(is_local, warnings, None, descriptor_size, data)\n\n\n@dataclass(frozen=True)\nclass NTSecurityDescriptorData:\n format_version: int\n compression_method: int\n\n @staticmethod\n def parse(reader: BinaryReader) -> 'NTSecurityDescriptorData':\n version, compress_type, crc = reader.read_struct('BHI')\n compressed_data = reader.read_remainder()\n\n try:\n raw_data = decompress_now(compressed_data, compress_type)\n except Exception:\n return NTSecurityDescriptorDataCompressed(version, compress_type, compressed_data)\n\n if version == 0:\n return NTSecurityDescriptorDataV0(compress_type, raw_data, decode_nt_security_descriptor(raw_data))\n\n return NTSecurityDescriptorDataDecompressed(version, compress_type, raw_data)\n\n\n@dataclass(frozen=True)\nclass NTSecurityDescriptorDataCompressed(NTSecurityDescriptorData):\n compressed_data: bytes\n\n\n@dataclass(frozen=True)\nclass NTSecurityDescriptorDataDecompressed(NTSecurityDescriptorData):\n raw_data: bytes\n\n\n@dataclass(frozen=True)\nclass NTSecurityDescriptorDataV0(NTSecurityDescriptorDataDecompressed):\n format_version: int = field(default=0, init=False)\n descriptor: NTSecurityDescriptor\n\n\n@dataclass(frozen=True)\nclass ZXHExtendedTimestamps(ZipExtraHeader):\n magic: int = field(default=0x5455, init=False)\n mtime: Optional[ISOTimestamp] = None\n atime: Optional[ISOTimestamp] = None\n ctime: Optional[ISOTimestamp] = None\n\n @staticmethod\n def parse(reader: BinaryReader, is_local: bool) -> 'ZXHExtendedTimestamps':\n flags = reader.read_uint8('flags')\n\n mtime = iso_from_unix_time(reader.read_uint32('mtime')) if flags & (1 << 0) else None\n atime = iso_from_unix_time(reader.read_uint32('atime')) if is_local and (flags & (1 << 1)) else None\n ctime = iso_from_unix_time(reader.read_uint32('ctime')) if is_local and (flags & (1 << 2)) else None\n\n return ZXHExtendedTimestamps(is_local, (), None, mtime=mtime, atime=atime, ctime=ctime)\n\n\n@dataclass(frozen=True)\nclass ZXHInfoZipUnixV1(ZipExtraHeader):\n magic: int = field(default=0x5855, init=False)\n mtime: ISOTimestamp\n atime: ISOTimestamp\n uid: Optional[PosixUID] = None\n gid: Optional[PosixGID] = None\n\n @staticmethod\n def parse(reader: BinaryReader, is_local: bool) -> 'ZXHInfoZipUnixV1':\n raw_atime, raw_mtime = reader.read_struct('II', 'timestamps')\n\n if not reader.eof():\n uid, gid = reader.read_struct('HH', 'UID/GID')\n else:\n uid = gid = None\n\n return ZXHInfoZipUnixV1(\n is_local, (), None,\n mtime=iso_from_unix_time(raw_mtime), atime=iso_from_unix_time(raw_atime), uid=uid, gid=gid,\n )\n\n\n@dataclass(frozen=True)\nclass ZXHInfoZipUnicodeComment(ZipExtraHeader):\n magic: int = field(default=0x6375, init=False)\n data: Optional['IZUnicodeCommentData']\n\n @staticmethod\n def parse(reader: BinaryReader, is_local: bool) -> 'ZXHInfoZipUnicodeComment':\n data = IZUnicodeCommentData.parse(reader)\n\n warnings = ()\n if isinstance(data, IZUnicodeCommentDataUnsupported):\n warnings = (f\"Don't know how to decode this format version\",)\n\n return ZXHInfoZipUnicodeComment(is_local, warnings, None, data)\n\n\n@dataclass(frozen=True)\nclass IZUnicodeCommentData:\n format_version: int\n\n @staticmethod\n def parse(reader: BinaryReader) -> 'IZUnicodeCommentData':\n version = reader.read_uint8('version')\n\n if version == 1:\n standard_comment_crc32 = reader.read_uint32('CRC32')\n comment = reader.read_remainder().decode('utf-8')\n return IZUnicodeCommentDataV1(comment, standard_comment_crc32)\n else:\n return IZUnicodeCommentDataUnsupported(version, reader.read_remainder())\n\n\n@dataclass(frozen=True)\nclass IZUnicodeCommentDataUnsupported(IZUnicodeCommentData):\n raw_data: bytes\n\n\n@dataclass(frozen=True)\nclass IZUnicodeCommentDataV1(IZUnicodeCommentData):\n format_version: int = field(default=1, init=False)\n comment: str\n standard_comment_crc32: int\n\n\n@dataclass(frozen=True)\nclass ZXHInfoZipUnicodePath(ZipExtraHeader):\n magic: int = field(default=0x7075, init=False)\n data: Optional['IZUnicodePathData']\n\n @staticmethod\n def parse(reader: BinaryReader, is_local: bool) -> 'ZXHInfoZipUnicodePath':\n data = IZUnicodePathData.parse(reader)\n\n warnings = ()\n if isinstance(data, IZUnicodePathDataUnsupported):\n warnings = (f\"Don't know how to decode this format version\",)\n\n return ZXHInfoZipUnicodePath(is_local, warnings, None, data)\n\n\n@dataclass(frozen=True)\nclass IZUnicodePathData:\n format_version: int\n\n @staticmethod\n def parse(reader: BinaryReader) -> 'IZUnicodePathData':\n version = reader.read_uint8('version')\n\n if version == 1:\n standard_path_crc32 = reader.read_uint32('CRC32')\n path = reader.read_remainder().decode('utf-8')\n return IZUnicodePathDataV1(path, standard_path_crc32)\n else:\n return IZUnicodePathDataUnsupported(version, reader.read_remainder())\n\n\n@dataclass(frozen=True)\nclass IZUnicodePathDataUnsupported(IZUnicodePathData):\n raw_data: bytes\n\n\n@dataclass(frozen=True)\nclass IZUnicodePathDataV1(IZUnicodePathData):\n format_version: int = field(default=1, init=False)\n path: str\n standard_path_crc32: int\n\n\n@dataclass(frozen=True)\nclass ZXHInfoZipUnixV2(ZipExtraHeader):\n magic: int = field(default=0x7855, init=False)\n uid: Optional[PosixUID] = None\n gid: Optional[PosixGID] = None\n\n @staticmethod\n def parse(reader: BinaryReader, is_local: bool) -> 'ZXHInfoZipUnixV2':\n if is_local:\n uid, gid = reader.read_struct('HH', 'UID/GID')\n else:\n uid = gid = None\n\n return ZXHInfoZipUnixV2(is_local, (), None, uid=uid, gid=gid)\n\n\n@dataclass(frozen=True)\nclass ZXHInfoZipUnixV3(ZipExtraHeader):\n magic: int = field(default=0x7875, init=False)\n data: Optional['IZUnixV3Data']\n\n @staticmethod\n def parse(reader: BinaryReader, is_local: bool) -> 'ZXHInfoZipUnixV3':\n data = IZUnixV3Data.parse(reader)\n\n warnings = ()\n if isinstance(data, IZUnixV3DataUnsupported):\n warnings = (f\"Don't know how to decode this format version\",)\n\n return ZXHInfoZipUnixV3(is_local, warnings, None, data)\n\n\n@dataclass(frozen=True)\nclass IZUnixV3Data:\n format_version: int\n\n @staticmethod\n def parse(reader: BinaryReader) -> 'IZUnixV3Data':\n version = reader.read_uint8('version')\n\n if version == 1:\n uid_size = reader.read_uint8('UID size')\n uid = PosixUID(reader.read_fixed_size_int(uid_size, signed=False, meaning='UID'))\n gid_size = reader.read_uint8('GID size')\n gid = PosixGID(reader.read_fixed_size_int(gid_size, signed=False, meaning='GID'))\n\n return IZUnixV3DataV1(uid, gid)\n else:\n return IZUnixV3DataUnsupported(version, reader.read_remainder())\n\n\n@dataclass(frozen=True)\nclass IZUnixV3DataUnsupported(IZUnixV3Data):\n raw_data: bytes\n\n\n@dataclass(frozen=True)\nclass IZUnixV3DataV1(IZUnixV3Data):\n format_version: int = field(default=1, init=False)\n uid: PosixUID\n gid: PosixGID\n\n\n_ALL_HEADER_CLASSES : List[Type[ZipExtraHeader]] = [\n ZXHZip64, ZXHPkWareNTFS, ZXHPkWareUnix, ZXHNTSecurityDescriptor, ZXHExtendedTimestamps, ZXHInfoZipUnixV1,\n ZXHInfoZipUnicodeComment, ZXHInfoZipUnicodePath, ZXHInfoZipUnixV2, ZXHInfoZipUnixV3,\n]\n\n\nclass ZipExtraFieldsError(Exception):\n pass\n\n\nclass MalformedZipExtraDataError(ZipExtraFieldsError):\n pass\n","repo_name":"goc9000/python-library","sub_path":"archive-forensics/src/atmfjstc/lib/archive_forensics/zip/extra_headers.py","file_name":"extra_headers.py","file_ext":"py","file_size_in_byte":16110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25615797509","text":"import discord\nfrom discord.ext import commands\nfrom discord.utils import get\n\nclass c76(commands.Cog, name=\"c76\"):\n\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n @commands.command(name='Enchanted_Forest_of_Springtime', aliases=['c76'])\n async def example_embed(self, ctx):\n embed = discord.Embed(title='Enchanted Forest of Springtime',\n color=0x1D9E74)\n embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2321498.jpg')\n\n embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3', inline=True)\n embed.add_field(name='Type', value='Spell/Continuous', inline=False)\n embed.add_field(name='Card Effect', value='When this card is activated: You can take 1 Plant monster from your GY and either place it on top or bottom of your Deck. Once per turn: You can Tribute 1 Plant monster you control; Special Summon 1 Level 4 or lower Plant monster from your GY, but its effects are negated this turn, also banish it when it leaves the field. During your Standby Phase: Gain 200 LP for each Plant monster in your GY.', inline=False)\n embed.set_footer(text='Set Code: ANCF')\n\n await ctx.send(embed=embed)\n\ndef setup(bot: commands.Bot):\n bot.add_cog(c76(bot))","repo_name":"ProfessorSean/Kasutamaiza","sub_path":"upcfcardsearch/c76.py","file_name":"c76.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10442895616","text":"import re\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nfrom datetime import datetime\nimport pytz\n\nlogFilename = './data/access.log'\n\n\n\ndef parse_str(x):\n \"\"\"\n Returns the string delimited by two characters.\n\n Example:\n `>>> parse_str('[my string]')`\n `'my string'`\n \"\"\"\n return x[1:-1]\n\ndef parse_datetime(x):\n '''\n Parses datetime with timezone formatted as:\n `[day/month/year:hour:minute:second zone]`\n\n Example:\n `>>> parse_datetime('13/Nov/2015:11:45:42 +0000')`\n `datetime.datetime(2015, 11, 3, 11, 45, 4, tzinfo=)`\n\n Due to problems parsing the timezone (`%z`) with `datetime.strptime`, the\n timezone will be obtained using the `pytz` library.\n '''\n dt = datetime.strptime(x[1:-1], '%d/%b/%Y:%H:%M:%S')\n return dt\n #this log file does not have a timezone so I can ignore this issue \n #dt_tz = int(x[-6:-3])*60+int(x[-3:-1])\n #return dt.replace(tzinfo=pytz.FixedOffset(dt_tz))\n\n\ndf = pd.read_csv(\n 'data/access.log',\n sep=r'\\s(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)(?![^\\[]*\\])',\n engine='python',\n na_values='-',\n header=None,\n usecols=[0, 3, 4, 5, 6, 7, 8],\n names=['ip', 'time', 'request', 'status', 'size', 'referer', 'user_agent'],\n converters={'time': parse_datetime,\n 'request': parse_str,\n 'status': int,\n 'size': int,\n 'referer': parse_str,\n 'user_agent': parse_str})\n\n\n# informaation about the request\nrequest = df.pop('request').str.split()\ndf['resource'] = request.str[1]\ndf['method'] = request.str[0]\n#yes I could have used regex for this \n# from the request get the string before the ?\ndf['url'] = request.str[1].str.split('?').str[0] \n\n# I did this to look at the data\n#excelFilename = './data/log.xlsx'\n#df.to_excel(excelFilename, index=False, sheet_name='data')\n\ndfbyhour=df.resample('H',on='time').sum()\n# now we can extract the hours and dates\n#df['time_of_day'] = df['time'].dt.time\n# the index of this data frame is a datetime object\ndfbyhour['hour'] = dfbyhour.index.hour\ndfbyhour['date'] = dfbyhour.index.date\n# now on to seaborn\n#sns.lmplot(x=\"hour\", y=\"size\", order=5 ,data=dfbyhour, x_jitter=0.5)\nsns.residplot(x=\"hour\", y=\"size\", data=dfbyhour, order=1)\n\nplt.show()\n\n\n\n","repo_name":"andrewbeattycourseware/pforcs2021","sub_path":"sem2-code/week06-regression/lab2-06.03-log.py","file_name":"lab2-06.03-log.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34890476508","text":"import scipy.integrate;\nimport numpy;\n\n\ndef f2(xs):\n def g(a):\n b = numpy.arccosh(1/numpy.cos(a))\n return (1-numpy.exp(-b * abs(xs[0]-xs[1])) * numpy.cos(a * (xs[0] + xs[1]))) / (2 * numpy.pi * numpy.sin(a))\n return scipy.integrate.quad(g, 0, numpy.pi/2)[0]\n\n\ndef f(xs):\n n = len(xs)\n coef = 2 * (2 * numpy.pi)**(n - 1)\n def g(*args):\n a = numpy.arccosh(n - sum(numpy.cos(args)))\n return (1 - numpy.exp(-a*abs(xs[n-1])) * numpy.cos(sum([xs[i] * args[i] for i in range(n-1)])) ) / (coef * numpy.sinh(a))\n return scipy.integrate.nquad(g, [[0, 2*numpy.pi]] * (n - 1))[0]\n\ndef f_sym(xs):\n n = len(xs)\n X = numpy.array(xs)\n def f(*args):\n T = numpy.array(args)\n up = numpy.sin(numpy.sum(T * X))**2\n down = numpy.sum(numpy.sin(T)**2)\n return up/down \n\n return scipy.integrate.nquad(f, [[0, numpy.pi]] * n)[0] / (numpy.pi) ** n / 2\n\nP = [1,-2]\n\ndef I(xs):\n n = len(xs)\n return sum([ f(xs[0:i] + [xs[i]-1] + xs[i+1:n]) + f(xs[0:i] + [xs[i]+1] + xs[i+1:n]) for i in range(n) ]) - f(xs) * n * 2\n\n\nprint(f2(P))\nprint(f(P))\nprint(f_sym(P))\nprint(f\"{I(P):f}\")\nprint(\"====\")\n\nfor y in range(-5, 6):\n for x in range(-5, 6):\n print(f\"{f_sym([x,y]):+.5f} \", end = '')\n print(\"\")","repo_name":"wwylele/math-stuff","sub_path":"grid_circuit/grid-circ.py","file_name":"grid-circ.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15552539946","text":"'''\nYou are given a perfect binary tree where all leaves are on the same level, \nand every parent has two children. \nThe binary tree has the following definition:\nstruct Node {\n int val;\n Node *left;\n Node *right;\n Node *next;\n}\n\nPopulate each next pointer to point to its next right node. \nIf there is no next right node, the next pointer should be set to NULL.\nInitially, all next pointers are set to NULL.\n\nFollow up:\nYou may only use constant extra space.\nRecursive approach is fine, you may assume implicit stack space \ndoes not count as extra space for this problem.\n\nExample 1:\nInput: root = [1,2,3,4,5,6,7]\nOutput: [1,#,2,3,#,4,5,6,7,#]\nExplanation: Given the above perfect binary tree (Figure A), \nyour function should populate each next pointer to point to its next right node, just like in Figure B. \nThe serialized output is in level order as connected by the next pointers, \nwith '#' signifying the end of each level.\n'''\n\nclass Node:\n def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):\n self.val = val\n self.left = left\n self.right = right\n self.next = next\n\n\ndef recursive(root, i):\n num_nodes = pow(2, i)\n print(\"-------\")\n print(\"num_nodes= \", num_nodes)\n print(\"-------\")\n print(root)\n if not root:\n return\n head = Node(root[0])\n for j in range(num_nodes):\n print(head.val)\n print(j)\n head.next = Node(root[j])\n head = head.next\n\n i += 1\n recursive(root[num_nodes:], i)\n# not solved\ndef connect(root):\n i = 0\n recursive(root, i)\n\n# others solutions\ndef connect(self, root):\n \"\"\"\n :type root: TreeLinkNode\n :rtype: nothing\n \"\"\"\n \n if not root:\n return None\n cur = root\n next = root.left\n\n while cur.left :\n cur.left.next = cur.right\n if cur.next:\n cur.right.next = cur.next.left\n cur = cur.next\n else:\n cur = next\n next = cur.left\n\n return root\n\n# dfs\ndef connect(self, root: 'Node') -> 'Node':\n \"\"\"\n 1 (1)\n 2 (2)-> 3(1)\n 4(4) -> 5(3) -> 6(2) -> 7(1)\n \"\"\"\n self.dfs(root)\n return root\n\n## (1). left child -> right child\n## (2). right child -> next.left child\ndef dfs(self,root):\n if root == None or root.left == None:\n return\n root.left.next = root.right\n if root.next != None: \n root.right.next = root.next.left\n self.dfs(root.left)\n self.dfs(root.right)\n\n\nroot = [1,2,3,4,5,6,7]\nconnect(root)\n\n","repo_name":"qscez2001/leetcode","sub_path":"116_Populating_Next_Right_Pointers_in_Each_Node.py","file_name":"116_Populating_Next_Right_Pointers_in_Each_Node.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41690470855","text":"import os\r\nimport subprocess\r\nimport re\r\nimport json\r\nimport logging\r\nimport math # Required for the math.log function\r\nfrom orm.commit import *\r\nfrom ingester.commitFile import * # Represents a file\r\nfrom classifier.classifier import * # Used for classifying each commit\r\nimport time\r\nimport csv\r\n\r\n\"\"\"\r\nfile: repository.py\r\nauthors: Ben Grawi , Christoffer Rosen \r\ndate: October 2013\r\ndescription: Holds the repository git abstraction class\r\n\"\"\"\r\n\r\nclass Git():\r\n \"\"\"\r\n Git():\r\n pre-conditions: git is in the current PATH\r\n self.path is set in a parent class\r\n description: a very basic abstraction for using git in python.\r\n \"\"\"\r\n # Two backslashes to allow one backslash to be passed in the command.\r\n # This is given as a command line option to git for formatting output.\r\n\r\n # A commit mesasge in git is done such that first line is treated as the subject,\r\n # and the rest is treated as the message. We combine them under field commit_message\r\n\r\n # We want the log in ascending order, so we call --reverse\r\n # Numstat is used to get statistics for each commit\r\n LOG_FORMAT = '--pretty=format:\\\" CAS_READER_STARTPRETTY\\\r\n \\\\\"parent_hashes\\\\\"CAS_READER_PROP_DELIMITER: \\\\\"%P\\\\\",CAS_READER_PROP_DELIMITER2\\\r\n \\\\\"commit_hash\\\\\"CAS_READER_PROP_DELIMITER: \\\\\"%H\\\\\",CAS_READER_PROP_DELIMITER2\\\r\n \\\\\"author_name\\\\\"CAS_READER_PROP_DELIMITER: \\\\\"%an\\\\\",CAS_READER_PROP_DELIMITER2\\\r\n \\\\\"author_email\\\\\"CAS_READER_PROP_DELIMITER: \\\\\"%ae\\\\\",CAS_READER_PROP_DELIMITER2\\\r\n \\\\\"author_date\\\\\"CAS_READER_PROP_DELIMITER: \\\\\"%ad\\\\\",CAS_READER_PROP_DELIMITER2\\\r\n \\\\\"author_date_unix_timestamp\\\\\"CAS_READER_PROP_DELIMITER: \\\\\"%at\\\\\",CAS_READER_PROP_DELIMITER2\\\r\n \\\\\"commit_message\\\\\"CAS_READER_PROP_DELIMITER: \\\\\"%s%b\\\\\"\\\r\n CAS_READER_STOPPRETTY \\\" --numstat --reverse --before=\\\"2017-10-1\\\"'\r\n\r\n CLONE_CMD = 'git clone {!s} {!s}' # git clone command w/o downloading src code\r\n PULL_CMD = 'git pull' # git pull command\r\n RESET_CMD = 'git reset --hard FETCH_HEAD'\r\n CLEAN_CMD = 'git clean -df' # f for force clean, d for untracked directories\r\n DIFF_CMD = \"git diff {0}^ {1} \"\r\n DIFF_CMD_NAME = \"git diff {0}^ {1} --name-only\"\r\n DIFF_CMD_INIT = \"git diff {0} \"\r\n\r\n REPO_DIRECTORY = \"/CASRepos/git/\" # directory in which to store repositories\r\n DIFF_DIRECTORY = \"/CASRepos/diff/\" # directory in which to store diff information\r\n LEAST_CHARACTER = 10\r\n MAX_LINE = 10000 # if modified line of one commit is more then MAX_LINE, then ommit this commit \r\n\r\n\r\n def getCommitStatsProperties( stats, commitFiles, devExperience, author, unixTimeStamp ):\r\n \"\"\"\r\n getCommitStatsProperties\r\n Helper method for log. Caclulates statistics for each change/commit and\r\n returns them as a comma seperated string. Log will add these to the commit object\r\n properties\r\n\r\n @param stats These are the stats given by --numstat as an array\r\n @param commitFiles These are all tracked commit files\r\n @param devExperience These are all tracked developer experiences\r\n @param author The author of the commit\r\n @param unixTimeStamp Time of the commit\r\n \"\"\"\r\n\r\n statProperties = \"\"\r\n\r\n # Data structures to keep track of info needed for stats\r\n subsystemsSeen = [] # List of system names seen\r\n directoriesSeen = [] # List of directory names seen\r\n locModifiedPerFile = [] # List of modified loc in each file seen\r\n authors = [] # List of all unique authors seen for each file\r\n fileAges = [] # List of the ages for each file in a commit\r\n\r\n # Stats variables\r\n la = 0 # lines added\r\n ld = 0 # lines deleted\r\n nf = 0 # Number of modified files\r\n ns = 0 # Number of modified subsystems\r\n nd = 0 # number of modified directories\r\n entrophy = 0 # entrophy: distriubtion of modified code across each file\r\n lt = 0 # lines of code in each file (sum) before the commit\r\n ndev = 0 # the number of developers that modifed the files in a commit\r\n age = 0 # the average time interval between the last and current change\r\n exp = 0 # number of changes made by author previously\r\n rexp = 0 # experience weighted by age of files ( 1 / (n + 1))\r\n sexp = 0 # changes made previous by author in same subsystem\r\n totalLOCModified = 0 # Total modified LOC across all files\r\n nuc = 0 # number of unique changes to the files\r\n filesSeen = \"\" # files seen in change/commit\r\n\r\n for stat in stats:\r\n\r\n if( stat == ' ' or stat == '' ):\r\n continue\r\n\r\n fileStat = stat.split(\"\\\\t\")\r\n\r\n # Check that we are only looking at file stat (i.e., remove extra newlines)\r\n if( len(fileStat) < 2):\r\n continue\r\n # catch the git \"-\" line changes\r\n try:\r\n fileLa = int(fileStat[0])\r\n fileLd = int(fileStat[1])\r\n except:\r\n fileLa = 0\r\n fileLd = 0\r\n\r\n # Remove oddities in filename so we can process it\r\n fileName = (fileStat[2].replace(\"'\",'').replace('\"','').replace(\"\\\\\",\"\"))\r\n\r\n totalModified = fileLa + fileLd\r\n\r\n # have we seen this file already?\r\n if(fileName in commitFiles):\r\n prevFileChanged = commitFiles[fileName]\r\n prevLOC = getattr(prevFileChanged, 'loc')\r\n prevAuthors = getattr(prevFileChanged, 'authors')\r\n prevChanged = getattr(prevFileChanged, 'lastchanged')\r\n file_nuc = getattr(prevFileChanged, 'nuc')\r\n nuc += file_nuc\r\n lt += prevLOC\r\n\r\n for prevAuthor in prevAuthors:\r\n if prevAuthor not in authors:\r\n authors.append(prevAuthor)\r\n\r\n # Convert age to days instead of seconds\r\n age += ( (int(unixTimeStamp) - int(prevChanged)) / 86400 )\r\n fileAges.append(prevChanged)\r\n\r\n # Update the file info\r\n\r\n file_nuc += 1 # file was modified in this commit\r\n setattr(prevFileChanged, 'loc', prevLOC + fileLa - fileLd)\r\n setattr(prevFileChanged, 'authors', authors)\r\n setattr(prevFileChanged, 'lastchanged', unixTimeStamp)\r\n setattr(prevFileChanged, 'nuc', file_nuc)\r\n\r\n else:\r\n\r\n # new file we haven't seen b4, add it to file commit files dict\r\n if(author not in authors):\r\n authors.append(author)\r\n\r\n if(unixTimeStamp not in fileAges):\r\n fileAges.append(unixTimeStamp)\r\n\r\n fileObject = CommitFile(fileName, fileLa - fileLd, authors, unixTimeStamp)\r\n commitFiles[fileName] = fileObject\r\n\r\n # end of stats loop\r\n\r\n locModifiedPerFile.append(totalModified) # Required for entrophy\r\n totalLOCModified += totalModified\r\n fileDirs = fileName.split(\"/\")\r\n\r\n if( len(fileDirs) == 1 ):\r\n subsystem = \"root\"\r\n directory = \"root\"\r\n else:\r\n subsystem = fileDirs[0]\r\n directory = \"/\".join(fileDirs[0:-1])\r\n\r\n if( subsystem not in subsystemsSeen ):\r\n subsystemsSeen.append( subsystem )\r\n\r\n if( author in devExperience ):\r\n experiences = devExperience[author]\r\n exp += sum(experiences.values())\r\n\r\n if( subsystem in experiences ):\r\n sexp = experiences[subsystem]\r\n experiences[subsystem] += 1\r\n else:\r\n experiences[subsystem] = 1\r\n\r\n try:\r\n rexp += (1 / (age) + 1)\r\n except:\r\n rexp += 0\r\n\r\n else:\r\n devExperience[author] = {subsystem: 1}\r\n\r\n if( directory not in directoriesSeen ):\r\n directoriesSeen.append( directory )\r\n\r\n # Update file-level metrics\r\n la += fileLa\r\n ld += fileLd\r\n nf += 1\r\n filesSeen += fileName + \",CAS_DELIMITER,\"\r\n\r\n # End stats loop\r\n\r\n if( nf < 1):\r\n return \"\"\r\n\r\n # Update commit-level metrics\r\n ns = len(subsystemsSeen)\r\n nd = len(directoriesSeen)\r\n ndev = len(authors)\r\n lt = lt / nf\r\n age = age / nf\r\n exp = exp / nf\r\n rexp = rexp / nf\r\n\r\n # Update entrophy\r\n for fileLocMod in locModifiedPerFile:\r\n if (fileLocMod != 0 ):\r\n avg = fileLocMod/totalLOCModified\r\n entrophy -= ( avg * math.log( avg,2 ) )\r\n\r\n # Add stat properties to the commit object\r\n statProperties += ',\"la\":\"' + str( la ) + '\\\"'\r\n statProperties += ',\"ld\":\"' + str( ld ) + '\\\"'\r\n statProperties += ',\"fileschanged\":\"' + filesSeen[0:-1] + '\\\"'\r\n statProperties += ',\"nf\":\"' + str( nf ) + '\\\"'\r\n statProperties += ',\"ns\":\"' + str( ns ) + '\\\"'\r\n statProperties += ',\"nd\":\"' + str( nd ) + '\\\"'\r\n statProperties += ',\"entrophy\":\"' + str( entrophy ) + '\\\"'\r\n statProperties += ',\"ndev\":\"' + str( ndev ) + '\\\"'\r\n statProperties += ',\"lt\":\"' + str( lt ) + '\\\"'\r\n statProperties += ',\"nuc\":\"' + str( nuc ) + '\\\"'\r\n statProperties += ',\"age\":\"' + str( age ) + '\\\"'\r\n statProperties += ',\"exp\":\"' + str( exp ) + '\\\"'\r\n statProperties += ',\"rexp\":\"' + str( rexp ) + '\\\"'\r\n statProperties += ',\"sexp\":\"' + str( sexp ) + '\\\"'\r\n\r\n return statProperties\r\n # End stats\r\n\r\n def isComment(self, line):\r\n \"\"\"\r\n isComment():helper method for parsingDiff(), to decide whether a line is a comment or not\r\n :param line: a string\r\n :return: boolean\r\n \"\"\"\r\n if line.startswith('//') or line.startswith('/**') or line.startswith('*') or line.startswith(\r\n '/*') or line.endswith('*/'):\r\n return True\r\n else:\r\n return False\r\n\r\n def getBuggyLines(self,commit):\r\n bug = {}\r\n if commit.buggy_lines == 'NULL':\r\n return bug\r\n buggy_files = commit.buggy_lines.split('FILE_START:')[1:]\r\n\r\n for buggy_file in buggy_files:\r\n info = buggy_file.split(',')\r\n file_name = info[0]\r\n lines = info[1:]\r\n bug[file_name] = lines\r\n return bug\r\n\r\n def getBugLabel(self, file, line_num,buggy_lines):\r\n lines = buggy_lines.get(file,[])\r\n if lines:\r\n if str(line_num) in lines:\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False\r\n\r\n\r\n def isOneLine(self,line,next_line,mode):\r\n # avoid combining a original line between tow modified lines\r\n if next_line == 'EndLine_DELIMITER':\r\n return True\r\n if mode =='+' and next_line.startswith(\"+\") == False:\r\n return True\r\n if mode == '-' and next_line.startswith('-') == False:\r\n return True\r\n # line.find(\"class \"): a line to define a class\r\n # line.find(\"throws “): a line to define exception\r\n if line.endswith(\"{\") or line.endswith(\"}\") or line.endswith(\";\") or line.startswith(\"@\") or\\\r\n line.endswith(\")\") or line.find(\"class \") != -1 or line.find(\"throws \") != -1:\r\n return True\r\n else:\r\n return False\r\n\r\n def parsingDiff(self, diff_info, commit):\r\n if len(diff_info.split('\\n')) > self.MAX_LINE:\r\n return []\r\n region_diff = {}\r\n # only link code source files as any type of README, etc typically have HUGE changes and reduces\r\n # the performance to unacceptable levels. it's very hard to blacklist everything; much easier just to whitelist\r\n # code source files endings.\r\n\r\n list_ext_dir = os.path.dirname(__file__)+ \"/../analyzer/code_file_extentions.txt\"\r\n with open(list_ext_dir,'r') as file:\r\n file_exts_to_include = file.read().splitlines()\r\n\r\n regions = diff_info.split('diff --git ')\r\n if len(regions) < 2:\r\n return [] # ignore commits without diff information like merge commit\r\n\r\n add_results = []\r\n del_results = []\r\n addresuluts_header = ['commit_hash', 'content', 'file_pre', 'file_new', 'line_num', 'author', 'time', 'bug_introducing','commit_label']\r\n delresuluts_header = ['commit_hash', 'content', 'file_pre', 'file_new', 'line_num', 'author', 'time', 'fix']\r\n # file to store results\r\n add_file = os.path.dirname(\r\n __file__) + self.DIFF_DIRECTORY + commit.repository_id + '/' + commit.repository_id + '_add.csv'\r\n del_file = os.path.dirname(\r\n __file__) + self.DIFF_DIRECTORY + commit.repository_id + '/' + commit.repository_id + '_del.csv'\r\n buggy_lines = self.getBuggyLines(commit)\r\n\r\n for region in regions[1:]:\r\n chunks = region.split('@@ -')\r\n # get the previous file name and new file name\r\n file_pre = re.search('\\-{3} (a/)?(.*)', chunks[0])\r\n if hasattr(file_pre, 'group'):\r\n file_pre = file_pre.group(2)\r\n else:\r\n continue\r\n file_new = re.search('\\+{3} (b)/?(.*)', chunks[0])\r\n if hasattr(file_new, 'group'):\r\n file_new = file_new.group(2)\r\n else:\r\n continue\r\n\r\n # only focus on \".java\" file\r\n file_info = file_new.split(\".\")\r\n if len(file_info) > 1: # get extentions\r\n file_ext = (file_info[1]).lower()\r\n if file_ext.upper() not in file_exts_to_include:# ensure these source code file endings\r\n continue\r\n else:\r\n continue\r\n line_am = '' # variable to process added multiple lines\r\n line_dm = '' # variable to process deleted multiple lines\r\n first_segm = True # helper variable to process multiple lines condition\r\n num_m = 0\r\n for chunk in chunks[1:]:\r\n lines = chunk.split('\\n')\r\n # get the line number of each change\r\n nums = re.match(r'^(\\d+),*\\d* \\+(\\d+),*\\d* @@', lines[0])\r\n if hasattr(nums, 'group'):\r\n pre_current = int(nums.group(1))\r\n new_current = int(nums.group(2))\r\n else:\r\n continue\r\n bug_introducing = False\r\n fix = False\r\n lines = lines[1:]\r\n for count, line in enumerate(lines):\r\n is_add = line.startswith('+') # this line add some code(missing in previous file but added to new file)\r\n is_del = line.startswith('-') # this line delete some code(appears in previous file but removed in new file)\r\n if is_add:\r\n line = line.lstrip('+').strip().strip('\\t').strip('\\r')\r\n # this line is a comment or not\r\n comment = self.isComment(line)\r\n if not comment:\r\n if len(line) < self.LEAST_CHARACTER:\r\n new_current += 1\r\n continue # escape those line without enought information\r\n bug_flag = self.getBugLabel(file_new, new_current, buggy_lines)\r\n if bug_flag:\r\n bug_introducing = True\r\n if count < len(lines) -1:\r\n next_line = lines[count+1]\r\n else:\r\n next_line = 'EndLine_DELIMITER'\r\n if self.isOneLine(line, next_line,'+'):\r\n line_am += ' ' + line # modified line in add.csv\r\n if first_segm:\r\n num_m = new_current\r\n\r\n result = (commit.commit_hash, line_am, file_pre, file_new, num_m, commit.author_name,\r\n commit.author_date, bug_introducing, commit.contains_bug)\r\n # bug all contain_bug became False\r\n add_results.append(result)\r\n line_am = '' # reset\r\n first_segm = True # reset\r\n bug_introducing = False # reset\r\n else:\r\n if first_segm:\r\n num_m = new_current\r\n line_am += line\r\n first_segm = False # set for the next segment, if exist.\r\n new_current += 1\r\n else:\r\n new_current += 1\r\n continue\r\n elif is_del:\r\n line = line.lstrip('-').strip().strip('\\t').strip('\\r') # remove some useless characters\r\n comment = self.isComment(line)\r\n if not comment:\r\n if len(line) < self.LEAST_CHARACTER:\r\n pre_current += 1\r\n continue # ignore blank lines\r\n fix_flag = commit.fix\r\n if fix_flag=='True':\r\n fix = True\r\n if count < len(lines) -1:\r\n next_line = lines[count+1]\r\n else:\r\n next_line = 'EndLine_DELIMITER'\r\n if self.isOneLine(line,next_line,'-'):\r\n line_dm += ' ' + line\r\n if first_segm:\r\n num_m = pre_current\r\n result = (commit.commit_hash, line_dm, file_pre, file_new, num_m, commit.author_name,\r\n commit.author_date, fix)\r\n del_results.append(result)\r\n line_dm = ''\r\n first_segm = True\r\n fix = False # reset\r\n else:\r\n if first_segm:\r\n num_m = pre_current\r\n line_dm += line\r\n first_segm = False\r\n pre_current += 1\r\n else:\r\n pre_current += 1\r\n continue\r\n else:\r\n pre_current += 1\r\n new_current += 1\r\n continue\r\n add_exist = os.path.isfile(add_file) # avoid write file header towice\r\n with open(add_file, 'a') as file:\r\n f_csv = csv.writer(file)\r\n if not add_exist:\r\n f_csv.writerow(addresuluts_header)\r\n f_csv.writerows(add_results)\r\n del_exist = os.path.isfile(del_file)\r\n with open(del_file, 'a') as file:\r\n f_csv = csv.writer(file)\r\n if not del_exist:\r\n f_csv.writerow(delresuluts_header)\r\n f_csv.writerows(del_results)\r\n\r\n\r\n def diff(self,repoId):\r\n repo_dir = os.chdir(os.path.dirname(__file__) + self.REPO_DIRECTORY + repoId)\r\n diff_dir = os.path.dirname(__file__)+ self.DIFF_DIRECTORY + repoId\r\n\r\n # check the directory exist or not\r\n if not os.path.isdir(diff_dir):\r\n os.mkdir(diff_dir)\r\n else:\r\n pass\r\n\r\n # get commit hash\r\n session = Session()\r\n commits = (session.query(Commit).filter((Commit.repository_id==repoId)&(Commit.diffed==False))\r\n .order_by( Commit.author_date_unix_timestamp.desc()).all())\r\n\r\n # diff\r\n logging.info('Starting get/parsing diff information.')\r\n for commit in commits:\r\n try:\r\n diff_info = (subprocess.check_output(self.DIFF_CMD.format(commit.commit_hash, commit.commit_hash),\\\r\n shell=True, cwd=repo_dir)).decode('utf-8','replace')\r\n\r\n self.parsingDiff(diff_info, commit)\r\n commit.diffed = True\r\n except:\r\n try:\r\n diff_info = (subprocess.check_output(self.DIFF_CMD_INIT.format(commit.commit_hash), \\\r\n shell=True, cwd=repo_dir)).decode('utf-8', 'replace')\r\n\r\n self.parsingDiff(diff_info, commit)\r\n commit.diffed = True\r\n #session.commit() # update diffed\r\n except Exception as e:\r\n logging.info(e)\r\n continue\r\n # the initial commit\r\n session.commit()\r\n session.close()\r\n logging.info('Done getting/parsing diff informations.')\r\n\r\n def log(self, repo, firstSync):\r\n \"\"\"\r\n log(): Repository, Boolean -> Dictionary\r\n arguments: repo Repository: the repository to clone\r\n firstSync Boolean: whether to sync all commits or after the\r\n ingestion date\r\n description: a very basic abstraction for using git in python.\r\n \"\"\"\r\n repo_dir = os.chdir(os.path.dirname(__file__) + self.REPO_DIRECTORY + repo.id)\r\n logging.info('Getting/parsing git commits: '+ str(repo) )\r\n # Spawn a git process and convert the output to a string\r\n if not firstSync and repo.ingestion_date is not None:\r\n cmd = 'git log --after=\"' + repo.ingestion_date + '\" '\r\n else:\r\n cmd = 'git log '\r\n\r\n log = str( subprocess.check_output(cmd + self.LOG_FORMAT, shell=True, cwd = repo_dir ) )\r\n log = log[2:-1] # Remove head/end clutter\r\n\r\n # List of json objects\r\n json_list = []\r\n\r\n # Make sure there are commits to parse\r\n if len(log) == 0:\r\n return []\r\n\r\n commitFiles = {} # keep track of ALL file changes\r\n devExperience = {} # Keep track of ALL developer experience\r\n classifier = Classifier() # classifier for classifying commits (i.e., corrective, feature addition, etc)\r\n\r\n commitList = log.split(\"CAS_READER_STARTPRETTY\")\r\n\r\n for commit in commitList:\r\n author = \"\" # author of commit\r\n unixTimeStamp = 0 # timestamp of commit\r\n fix = False # whether or not the change is a defect fix\r\n classification = None # classification of the commit (i.e., corrective, feature addition, etc)\r\n isMerge = False # whether or not the change is a merge\r\n\r\n commit = commit.replace('\\\\x', '\\\\u00') # Remove invalid json escape characters\r\n splitCommitStat = commit.split(\"CAS_READER_STOPPRETTY\") # split the commit info and its stats\r\n\r\n # The first split will contain an empty list\r\n if(len(splitCommitStat) < 2):\r\n continue\r\n\r\n prettyCommit = splitCommitStat[0]\r\n statCommit = splitCommitStat[1]\r\n commitObject = \"\"\r\n\r\n # Start with the commit info (i.e., commit hash, author, date, subject, etc)\r\n prettyInfo = prettyCommit.split(',CAS_READER_PROP_DELIMITER2 \"')\r\n for propValue in prettyInfo:\r\n props = propValue.split('\"CAS_READER_PROP_DELIMITER: \"')\r\n propStr = ''\r\n for prop in props:\r\n prop = prop.replace('\\\\','').replace(\"\\\\n\", '') # avoid escapes & newlines for JSON formatting\r\n propStr = propStr + '\"' + prop.replace('\"','') + '\":'\r\n\r\n values = propStr[0:-1].split(\":\")\r\n\r\n if(values[0] == '\" parent_hashes\"'):\r\n # Check to see if this is a merge change. Fix for Issue #26. \r\n # Detects merges by counting the # of parent commits\r\n \r\n parents = values[1].split(' ')\r\n if len(parents) == 2:\r\n isMerge = True\r\n\r\n if(values[0] == '\"author_name\"'):\r\n author = values[1].replace('\"', '')\r\n\r\n if(values[0] == '\"author_date_unix_timestamp\"'):\r\n unixTimeStamp = values[1].replace('\"','')\r\n\r\n # Classify the commit\r\n if(values[0] == '\"commit_message\"'):\r\n\r\n if (isMerge):\r\n classification = \"Merge\"\r\n else:\r\n classification = classifier.categorize(values[1].lower())\r\n\r\n # If it is a corrective commit, we induce it fixes a bug somewhere in the system\r\n if classification == \"Corrective\":\r\n fix = True\r\n\r\n\r\n commitObject += \",\" + propStr[0:-1]\r\n # End property loop\r\n # End pretty info loop\r\n\r\n # Get the stat properties\r\n stats = statCommit.split(\"\\\\n\")\r\n commitObject += self.getCommitStatsProperties(stats, commitFiles, devExperience, author, unixTimeStamp)\r\n\r\n # Update the classification of the commit\r\n commitObject += ',\"classification\":\"' + str( classification ) + '\\\"'\r\n\r\n # Update whether commit was a fix or not\r\n commitObject += ',\"fix\":\"' + str( fix ) + '\\\"'\r\n\r\n # Remove first comma and extra space\r\n commitObject = commitObject[1:].replace(' ','')\r\n # Add commit object to json_list\r\n json_list.append(json.loads('{' + commitObject + '}'))\r\n # End commit loop\r\n\r\n logging.info('Done getting/parsing git commits.')\r\n\r\n return json_list\r\n\r\n def clone(self, repo):\r\n \"\"\"\r\n clone(repo): Repository -> String\r\n description:Takes the current repo and clones it into the\r\n `clone_directory/the_repo_id`\r\n arguments: repo Repository: the repository to clone\r\n pre-conditions: The repo has not been already created\r\n \"\"\"\r\n repo_dir = os.chdir(os.path.dirname(__file__) + self.REPO_DIRECTORY)\r\n\r\n # Run the clone command and return the results\r\n\r\n logging.info('Git cloning repo: '+ str(repo) )\r\n cloneResult = str(subprocess.check_output(\r\n self.CLONE_CMD.format(repo.url, './' + repo.id),\r\n shell= True,\r\n cwd = repo_dir ) )\r\n logging.info('Done cloning.')\r\n #logging.debug(\"Git clone result:\\n\" + cloneResult)\r\n\r\n # Reset path for next repo\r\n\r\n # TODO: only return true on success, else return false\r\n return True\r\n\r\n def pull(self, repo):\r\n \"\"\"\r\n fetch(repo): Repository -> String\r\n description:Takes the current repo and pulls the latest changes.\r\n arguments: repo Repository: the repository to pull\r\n pre-conditions: The repo has already been created\r\n \"\"\"\r\n\r\n repo_dir = os.path.dirname(__file__) + self.REPO_DIRECTORY + repo.id\r\n\r\n # Weird sceneario where something in repo gets modified - reset all changes before pulling\r\n subprocess.call(self.RESET_CMD, shell=True, cwd= repo_dir)\r\n subprocess.call(self.CLEAN_CMD, shell=True, cwd= repo_dir)\r\n\r\n # Run the pull command and return the results\r\n logging.info('Pulling latest changes from repo: '+ str(repo) )\r\n fetchResult = str(subprocess.check_output(\r\n self.RESET_CMD + \"\\n\" + self.PULL_CMD ,\r\n shell=True,\r\n cwd= repo_dir ) )\r\n logging.info('Done fetching.')\r\n #logging.debug(\"Git pull result:\\n\" + cloneResult)\r\n\r\n # TODO: only return true on success, else return false\r\n return True\r\n","repo_name":"wenfengzCN/cas_vlis","sub_path":"ingester/git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":29267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13080615401","text":"from collections import defaultdict\n\nclass Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n if not s:\n return 0\n\n memo = defaultdict(int)\n\n # start together\n l = r = 0\n ret = 0\n\n while r < len(s):\n # check the window\n while l < r and memo[s[r]] > 0:\n memo[s[l]] -= 1\n l += 1\n\n memo[s[r]] += 1\n ret = max(ret, r - l + 1)\n r += 1\n\n return ret\n\n\nprint(Solution().lengthOfLongestSubstring(\"abcabcbb\"))\n\n\n","repo_name":"alfmunny/coding-practice","sub_path":"3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30082590926","text":"\n\ndef relationScore(a, b):\n inAnotB = 0\n inBnotA = 0\n inBoth = 0\n for item in a:\n if item in b:\n inBoth+=1\n else:\n inAnotB+=1\n for item in b:\n if item not in a:\n inBnotA+=1\n return min(inBnotA, inBnotA, inBoth)\n\n\n\"\"\"\n\n\"\"\"\ndef getLines(lineListh, lineListv):\n for i in range(lineListv):\n matchLocation = i\n bestMatch = -1\n if lineListv[i] == False:\n continue\n for k in range(i+1, lineListv):\n if lineListv[k] == False:\n continue\n if relationScore(lineListv[i][2],lineListv[k][2])>bestMatch:\n bestMatch = relationScore(lineListv[i][2],lineListv[k][2])\n matchLocation = k\n lineListv[i][2] = lineListv[i][2].union(lineListv[matchLocation][2])\n lineListv[i][0] = lineListv[i][0] + \" \"+lineListv[k][0]\n lineListv[i][1] = lineListv[i][1] + lineListv[k][1]\n lineListv[k] = False\n newList = []\n for i in range(lineListv):\n if lineListv[i] != False:\n newList.append(lineListv[i])\n lineListv = newList\n almostOutput = []\n\n\n for data in lineListv:\n bestMatch = -1\n matchLocation = -1 ##check\n for k in range(lineListh):\n matchLocation = k\n if lineListv[k] == False:\n continue\n if relationScore(data[2],lineListv[k][2]) > bestMatch:\n bestMatch = relationScore(data[2], lineListv[k][2])\n matchLocation = k\n almostOutput.append(data)\n almostOutput.append(lineListv[matchLocation])\n lineListv[k] = False\n\n return almostOutput\n\n\n\n\n","repo_name":"elkanatovey/NAND2TETRIS_10","sub_path":"workLists.py","file_name":"workLists.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"34038128330","text":"from tkinter import *\n\nwindow = Tk()\nwindow.geometry('420x420')\nwindow.title('First GUI')\nwindow.config(background='black')\n\n# label = Label(window,text='Hello world',font=('Arial',15,'bold '),fg='red', bd=10, padx=5, pady=5)\n# label.pack()\n# label.place(x=0,y=0)\n\nclicks = 0\ndef increase():\n global clicks\n clicks += 1\n print(clicks)\n\nbutton = Button(window,\n text='click me', \n command=increase, \n font=('Comic Sans', 25), \n activebackground='black', \n activeforeground='white')\nbutton.pack()\n#photoimage for images\nwindow.mainloop()","repo_name":"robert1811/30-days-python","sub_path":"day-8/buttons.py","file_name":"buttons.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4754301597","text":"import time\nimport json\nimport pathlib\nimport requests\nimport base64\n\nfrom PIL import Image\nimport cv2\nimport imutils\n\nfrom urllib.request import urlopen\n\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.firefox.options import Options\n\n#url = \"https://imnotjuice.neocities.org/CYOA/Evil%20Empire%200.8/\"\nurl = \"https://radioarc.neocities.org/cyoa/Fire%20Emblem%20v2/\"\n# url = 'https://dragonswhore-cyoas.neocities.org/Lure_p4_Dalet/'\n\n# store the response of URL\n# response = urlopen(url + 'project.json')\nresponse = requests.get(url + 'project.json')\n# response = urlopen('https://imnotjuice.neocities.org/project.json')\n\n# storing the JSON response\n# from url in data\ndata_json = response.json()\n# print(data_json)\n\ndef inject_image(uri, url, filename):\n if not uri:\n return uri\n if 'data:image' in uri:\n return uri\n if 'http' in uri:\n absolute_url = uri\n else:\n absolute_url = url + uri\n print(absolute_url)\n r = requests.get(absolute_url)\n print(r)\n data_base64 = base64.b64encode(r.content) # encode to base64 (bytes)\n data_base64 = data_base64.decode()\n\n print(data_base64)\n #img.save(filename)\n return 'data:image/jpeg;base64,' + data_base64\n\n\n# Remove all required elements\ndata_rows = data_json['rows']\ni = 0\nfor row in data_rows:\n if 'requireds' in row:\n row['requireds'] = []\n if 'image' in row:\n row['image'] = inject_image(row['image'], url, f'{i}.jpg')\n i = i + 1\n if 'objects' in row:\n data_objects = row['objects']\n for object in data_objects:\n if 'requireds' in object:\n object['requireds'] = []\n if 'image' in object:\n object['image'] = inject_image(object['image'], url, f'{i}.jpg')\n i = i + 1\n\n# Generate new app.js file\nwith open('selenium/js/app.c533aa25.js', 'w') as f:\n with open('selenium/js/app_head.js', 'r') as hfile:\n f.write(hfile.read())\n f.write(json.dumps(data_json))\n with open('selenium/js/app_tail.js', 'r') as tfile:\n f.write(tfile.read())\n\n# Inject image urls\n# If image is data:image/jpeg;base64, don't touch\n\nindex_file = 'selenium/index.html'\noptions = Options()\noptions.add_argument(\"-headless\")\nwith webdriver.Firefox(options=options) as driver:\n driver.get(\"file://\" + str(pathlib.Path(index_file).resolve()))\n actions = ActionChains(driver)\n try:\n elem = WebDriverWait(driver, 30).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"objectRow\")) #This is a dummy element\n )\n finally:\n driver.save_screenshot(\"image.png\")\n\n SCROLL_PAUSE_TIME = 0.5\n # wait_until_images_loaded(driver)\n\n # Get scroll height\n driver.set_window_size(1900, 3800)\n max_height = driver.execute_script(\"return document.body.scrollHeight\")\n\n print(max_height)\n if max_height > 30000:\n driver.set_window_size(1920, 30000)\n else:\n driver.set_window_size(1920, max_height)\n\n # Scroll to load images\n height = 0\n # image_filenames = []\n i = 0\n while height < max_height:\n # Scroll down to bottom\n driver.execute_script(f\"window.scrollTo(0, {height});\")\n\n # Wait to load page\n time.sleep(5)\n\n image_filename = f\"image_{i}.png\"\n driver.save_screenshot(image_filename)\n im = cv2.imread(image_filename)\n im_height, im_width, im_channels = im.shape\n\n # Calculate new scroll height and compare with last scroll height\n # image_filename = f\"image_{i}.png\"\n # driver.save_screenshot(image_filename)\n # image_filenames.append(image_filename)\n height = height + im_height\n i = i + 1\n\n # If last scroll, resize window again\n max_height = driver.execute_script(\"return document.body.scrollHeight\")\n print(max_height)\n if height + im_height > max_height:\n # Arbitrary extra 1000 pixels because screen height isn't same as window hieght\n driver.set_window_size(1920, max_height - height + 1000)\n\n driver.close()","repo_name":"anonekama/cyoa-archives","sub_path":"test_selenium.py","file_name":"test_selenium.py","file_ext":"py","file_size_in_byte":4492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29668710283","text":"#!/usr/bin/env python3\n\"\"\"\nScript that takes in input from the user with the prompt Q:\nand prints A: as a response. If the user inputs exit, quit,\ngoodbye, or bye, case insensitive, print A: Goodbye and exit.\n\"\"\"\nexits = [\"exit\", \"quit\", \"goodbye\", \"bye\"]\nwhile True:\n question = input(\"Q: \").lower().strip()\n if question in exits:\n print(\"A: Goodbye\")\n exit()\n else:\n print(\"A:\")\n","repo_name":"rolandoquiroz/holbertonschool-machine_learning","sub_path":"supervised_learning/0x13-qa_bot/1-loop.py","file_name":"1-loop.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20811390323","text":"from PyQt5.QtWidgets import QGraphicsItem\nfrom PyQt5.QtGui import QLinearGradient\nfrom PyQt5.QtGui import QColor\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtCore import QPointF\nfrom PyQt5.QtCore import QRectF\nfrom PyQt5.QtCore import QTimer\nfrom PyQt5.QtCore import pyqtSignal\n\n\nclass FadingPixmapItem(QGraphicsItem):\n def __init__(self, pixmap = None, parent=None):\n super(FadingPixmapItem, self).__init__(parent)\n\n self.__pixmap = pixmap\n if self.__pixmap is None:\n self.__pixmap = QPixmap()\n\n self.__boundingRect = QRectF(self.__pixmap.rect())\n\n self.__opacity = 100\n self.__opacitySpeed = 3\n\n self.__timer = QTimer()\n self.__timer.timeout.connect(self.__changeOpacity)\n self.__timer.start(17)\n\n def setPixmap(pixmap):\n if pixmap is None:\n raise ValueError(\"pixmap is None\")\n self.__pixmap = pixmap\n self.__boundingRect = QRectF(__pixmap.rect())\n\n def setSize(width, height):\n self.__boundingRect = QRectF(0, 0, width, height)\n\n def __changeOpacity(self):\n self.__opacity -= self.__opacitySpeed\n if self.__opacity <= 0:\n self.__opacity = 100\n self.update(self.__boundingRect)\n\n def boundingRect(self):\n return self.__boundingRect\n\n def paint(self, painter, style_option, widget):\n painter.setOpacity(self.__opacity / 100)\n width = self.__boundingRect.width()\n height = self.__boundingRect.height()\n painter.drawPixmap(\n 0, 0, width, height,\n self.__pixmap,\n 0, 0, self.__pixmap.width(), self.__pixmap.height()\n )\n\n\nif __name__ == \"__main__\":\n pass","repo_name":"thehighestmath/sea_battle","sub_path":"src/Presenter/GraphcisItems/FadingPixmap.py","file_name":"FadingPixmap.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28441882075","text":"\"\"\"\nReplyMarkup keyboards (used as commands)\n\"\"\"\n\nfrom aiogram.types import ReplyKeyboardMarkup, KeyboardButton\n\n\nasync def get_main_menu_markup(user_type: str = None) -> ReplyKeyboardMarkup:\n \"\"\"\n Get main menu actions markup\n :returns: ReplyKeyboardMarkup\n \"\"\"\n\n if user_type == \"teacher\":\n keyboard = [\n [\n KeyboardButton(\"Create group\"),\n KeyboardButton(\"Managed groups\"),\n ]\n ]\n elif user_type == \"student\":\n keyboard = [\n [\n KeyboardButton(\"Add group\"),\n KeyboardButton(\"My groups\"),\n ],\n [\n KeyboardButton(\"My marks\"),\n KeyboardButton(\"Deadlines\"),\n ]\n ]\n else:\n keyboard = [\n [\n KeyboardButton(\"Register\")\n ]\n ]\n return ReplyKeyboardMarkup(keyboard=keyboard, resize_keyboard=True)\n","repo_name":"sesorov/Altedy","sub_path":"infrastructure/keyboards/reply_keyboards.py","file_name":"reply_keyboards.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15149575162","text":"#!/usr/bin/env python\n##############################################################################\n# EVOLIFE http://evolife.telecom-paristech.fr Jean-Louis Dessalles #\n# Telecom ParisTech 2017 www.dessalles.fr #\n# -------------------------------------------------------------------------- #\n# License: Creative Commons BY-NC-SA #\n##############################################################################\n\n\n##############################################################################\n# Window system #\n##############################################################################\n\nimport sys\nif __name__ == '__main__': sys.path.append('../..') # for tests\n\nfrom PyQt4 import QtGui, QtCore\nimport webbrowser # is user clicks on link\nimport math\nimport os.path\nimport random\n\nimport Evolife.QtGraphics.Plot_Area as Plot_Area\nimport Evolife.QtGraphics.Evolife_Graphic as Evolife_Graphic\nimport Evolife.QtGraphics.Simulation_Thread as Simulation_Thread\t # Thread to run the simulation in parallel\nfrom Evolife.Tools.Tools import EvolifeError\n\nDefaultIconName = 'QtGraphics/EvolifeIcon.png'\nHelpFileName = 'Help.txt'\n\n\n\n\n##################################################\n# Interface with the simulation thread #\n##################################################\n\nclass Simulation_Control:\n\t\"\"\" Controls the simulation, either step by step, or in\n\t\ta continuous mode.\n\t\"\"\"\n\n\tdef __init__(self, SimulationStep, Obs, method='timer'):\n\n\t\tself.Obs = Obs # simulation observer\n\t\tself.SimulationStep = SimulationStep # function that launches one step of the simulation\n\t\tself.method = method\t# should be either 'timer' or 'thread'\n\t\tself.timer = None # using a timer is one way of running simulation\n\t\t\n\t\t## Status of the simulation programme\n\t\tself.simulation = None \t\t\t# name of the simulation thread\n\t\tself.simulation_steady_mode = False\t# true when simulation is automatically repeated\n\t\tself.simulation_under_way = True\t# becomes false when the simulation thinks it's really over\n\t\tself.previous_Disp_period = self.Disp_period = Obs.DisplayPeriod()\t# display period\n\n\tdef RunButtonClick(self, event=None):\n\t\tself.Disp_period = self.previous_Disp_period\n\t\tself.Obs.DisplayPeriod(self.Disp_period)\t# let Obs know\n\t\tself.simulation_steady_mode = True\t # Continuous functioning\n\t\tself.Simulation_resume()\n\t\n\tdef StepButtonClick(self, event=None):\n\t\tself.Disp_period = 1\n\t\tself.Obs.DisplayPeriod(self.Disp_period)\t# let Obs know\n\t\tself.simulation_steady_mode = False\t# Stepwise functioning\n\t\tself.simulation_under_way = True\t# to allow for one more step\n\t\tself.Simulation_resume()\n\t\n\tdef Simulation_stop(self):\n\t\tif self.method == 'timer':\n\t\t\tif self.timer is not None and self.timer.isActive():\n\t\t\t\tself.timer.stop()\n\t\telif self.method == 'thread':\n\t\t\tif self.simulation is not None:\n\t\t\t\tself.simulation.stop()\n\t\t\t\tif self.simulation.isAlive():\n\t\t\t\t\t#print 'strange...'\n\t\t\t\t\tself.simulation = None # well...\n\t\t\t\t\treturn False\n\t\t\t\tself.simulation = None\n\t\treturn True\n\t\t\n\tdef Simulation_launch(self,continuous_mode):\n\t\tself.Simulation_stop()\n\t\tif self.method == 'timer':\n\t\t\tif continuous_mode:\n\t\t\t\tif self.timer is None:\n\t\t\t\t\tself.timer = QtCore.QTimer()\n\t\t\t\t\tself.timer.timeout.connect(self.OneStep)\n\t\t\t\tself.timer.start()\n\t\t\telse:\n\t\t\t\tself.OneStep()\n\t\telif self.method == 'thread':\n\t\t\t# A new simulation thread is created\n\t\t\tself.simulation = Simulation_Thread.Simulation(self.SimulationStep,continuous_mode, self.ReturnFromThread)\n\t\t\tself.simulation.start()\n\t\treturn True\n\t\t\n\tdef Simulation_resume(self):\n\t\treturn self.Simulation_launch(self.simulation_steady_mode)\t# same functioning as before\t\t\t\n\t\t\n\tdef OneStep(self):\n\t\t# print('-', end=\"\", flush=True)\n\t\tif self.simulation_under_way:\n\t\t\ttry:\tself.simulation_under_way = self.SimulationStep()\n\t\t\texcept EvolifeError:\n\t\t\t\tself.Simulation_stop()\n\t\t\t\timport traceback\n\t\t\t\ttraceback.print_exc()\n\t\telse:\t\n\t\t\tself.StepButtonClick()\t# avoids to loop\n\t\t\tself.DecisionToEnd()\n\t\tif self.ReturnFromThread() < 0:\t\t# should return negative value only once, not next time\n\t\t# if self.ReturnFromThread() < 0:\n\t\t\t# The simulation is over\n\t\t\t#self.Simulation_stop()\n\t\t\tself.StepButtonClick()\n\t\t\n\tdef ReturnFromThread(self):\n\t\tpass\t# to be overloaded\n\t\n\tdef DecisionToEnd(self):\n\t\tpass\t# to be overloaded\n\t\n\n\n##################################################\n# Incremental definition of windows\t\t\t #\n##################################################\n\t\t\n\t\t\n#---------------------------#\n# Control panel #\n#---------------------------#\n\nclass Simulation_Control_Frame(Evolife_Graphic.Active_Frame, Simulation_Control):\n\t\"\"\" Minimal control panel with [Run] [Step] [Help] and [quit] buttons\n\t\"\"\"\n\t\n\tdef __init__(self, SimulationStep, Obs):\n\t\tself.Name = Obs.get_info('Title')\n\t\tself.IconName = Obs.get_info('Icon')\n\t\tif not self.IconName:\tself.IconName = DefaultIconName\n\t\tEvolife_Graphic.Active_Frame.__init__(self, parent=None, control=self)\n\t\tSimulation_Control.__init__(self, SimulationStep, Obs, method='timer')\n\t\tif self.Name:\n\t\t\tself.setWindowTitle(self.Name)\n\t\tself.setWindowIcon(QtGui.QIcon(os.path.join(self.Obs.get_info('EvolifeMainDir'),self.IconName)))\n\n\t\t## List and status of Satellite windows\n\t\tself.SWindows = dict()\n\t\tself.SWindowsPreferredGeometry = dict()\n\t\tself.Finish = False\n\t\tself.alive = True\n\t\tself.PhotoMode = 0 # no photo, no film\n\t\tself.CurrentFrame = 0 # keeps track of photo numbers\n\t\t\n\t\t# control frame\n\t\tself.control_frame = QtGui.QVBoxLayout()\n\t\t#self.control_frame.setGeometry(QtCore.QRect(0,0,60,100))\n\t\t\n\t\t# inside control_frame we create two labels and button_frames\n\t\tNameLabel = QtGui.QLabel(\"%s\" % self.Name.upper(), self)\n\t\tNameLabel.setAlignment(QtCore.Qt.AlignHCenter)\n\t\tself.control_frame.addWidget(NameLabel)\n\t\tAdrLabel = QtGui.QLabel(\"www.dessalles.fr/%s\" % (self.Name.replace(' ','_'), self.Name), self)\n\t\tAdrLabel.setAlignment(QtCore.Qt.AlignHCenter)\n\t\tAdrLabel.linkActivated.connect(self.EvolifeWebSite)\n\t\tself.control_frame.addWidget(AdrLabel)\n\n\t\t# Button names\n\t\tself.Buttons = dict()\n\n\t\t# button frame\n\t\tself.button_frame = QtGui.QVBoxLayout()\n\t\tself.control_frame.addLayout(self.button_frame)\n\n\t\t# Creating small button frame\n\t\tself.SmallButtonFrame = QtGui.QHBoxLayout()\n\t\tself.control_frame.addLayout(self.SmallButtonFrame)\n\n\t\t# Creating help button frame\n\t\tself.HelpButtonFrame = QtGui.QHBoxLayout()\n\t\tself.control_frame.addLayout(self.HelpButtonFrame)\n\n\t\t# Creating big buttons\n\t\tself.Buttons['Run'] = self.LocalButton(self.button_frame, QtGui.QPushButton, \"&Run\", \"Runs the simulation continuously\", self.RunButtonClick) # Run button\n\t\tself.Buttons['Step'] = self.LocalButton(self.button_frame, QtGui.QPushButton, \"&Step\", \"Pauses the simulation or runs it stepwise\", self.StepButtonClick)\n\t\tself.control_frame.addStretch(1)\n\t\tself.Buttons['Help'] = self.LocalButton(self.HelpButtonFrame, QtGui.QPushButton, \"&Help\", \"Provides help about this interface\", self.HelpButtonClick)\n\t\tself.Buttons['Quit'] = self.LocalButton(self.control_frame, QtGui.QPushButton, \"&Quit\", \"Quit the programme\", self.QuitButtonClick)\n\t\t\n\t\t# room for plot panel\t\t\t#\n\t\tself.plot_frame = QtGui.QHBoxLayout()\n\t\tself.plot_frame.addLayout(self.control_frame)\n\t\t#self.plot_frame.addStretch(1)\n\n\t\tself.setLayout(self.plot_frame)\n\t\tself.setGeometry(200, 200, 140, 300)\t\t\n\t\tself.show()\n\t\t\n\n\tdef LocalButton(self, ParentFrame, ButtonType, Text, Tip, ClickFunction, ShortCutKey=None):\n\t\tButton = ButtonType(Text, self)\n\t\tButton.setToolTip(Tip)\n\t\tButton.clicked.connect(ClickFunction)\n\t\tif ShortCutKey is not None:\n\t\t\tButton.setShortcut(QtGui.QKeySequence(ShortCutKey))\n\t\tParentFrame.addWidget(Button)\n\t\treturn Button\n\n\tdef EvolifeWebSite(self, e):\n\t\twebbrowser.open(e)\n\t\t\n\tdef HelpButtonClick(self, event=None):\n\t\t\" Displays a text file named: \"\n\t\tif not 'Help' in self.SWindows:\n\t\t\tself.SWindows['Help'] = Evolife_Graphic.Help_window(self)\n\t\t\tself.SWindows['Help'].setWindowIcon(QtGui.QIcon(os.path.join(self.Obs.get_info('EvolifeMainDir'),self.IconName)))\n\t\t\ttry:\n\t\t\t\tself.SWindows['Help'].display(os.path.join(self.Obs.get_info('EvolifeMainDir'), HelpFileName))\n\t\t\texcept IOError:\n\t\t\t\tself.Obs.TextDisplay(\"Unable to find help file %s\" % HelpFileName)\n\t\t\t\tdel self.SWindows['Help']\n\t\telse: self.SWindows['Help'].Raise()\n\n\tdef QuitButtonClick(self, event): \n\t\tself.close()\n##\t\tif self.closeEvent(None):\n##\t\t\tQtCore.QCoreApplication.instance().quit()\n\t\t\n\tdef Raise(self):\n\t\tif self.isActiveWindow():\n\t\t\tfor SWName in self.SWindows:\n\t\t\t\tself.SWindows[SWName].raise_()\n\t\t\tif self.SWindows:\n\t\t\t\tSWName = random.choice(list(self.SWindows.keys()))\n\t\t\t\tself.SWindows[SWName].Raise()\t\t\t\t\n\t\telse:\n\t\t\tself.raise_()\n\t\t\tself.activateWindow()\n\n\n\tdef closeEvent(self, event):\n\t\tself.Finish = True\n\t\tself.simulation_steady_mode = False\t# Stepwise functioning\t\t\n\t\tfor (SWName,SW) in list(self.SWindows.items()): # items() necessary here; list necessary for python 3\n\t\t\tself.SWindows[SWName].close()\t\t \n\t\t# No more satelite window left at this stage\n\t\tself.Simulation_stop()\n\t\tevent.accept()\n\n\tdef SWDestroyed(self, SW):\n\t\t# A satellite window has been destroyed\n\t\tfor SWName in self.SWindows:\n\t\t\tif self.SWindows[SWName] == SW:\n\t\t\t\tdel self.SWindows[SWName]\n\t\t\t\treturn\n\t\terror('Evolife_Window', 'Unidentified destroyed window')\n\n\tdef ReturnFromThread(self):\n\t\tSimulation_Control.ReturnFromThread(self)\t# parent class procedure\n\t\tif self.Obs.Visible():\tself.Process_graph_orders()\n\t\tif self.Obs.Over():\treturn -1\t# Stops the simulation thread\n\t\treturn False\n\n\tdef Process_graph_orders(self):\n\t\tself.Obs.displayed() # Let Obs know that display takes place\n\t\tself.CurrentFrame += 1\t\t\t \n\t\tif self.PhotoMode == 1:\n\t\t\t# single shot mode is over\n\t\t\tself.PhotoMode = 0\n\n\tdef keyPressEvent(self, e):\n\t\tif e.key() in [QtCore.Qt.Key_Q, QtCore.Qt.Key_Escape]:\n\t\t\tself.close()\t\t\n\t\telif e.key() in [QtCore.Qt.Key_S, QtCore.Qt.Key_Space]: # Space does not work...\n\t\t\tself.StepButtonClick()\n\t\telif e.key() in [QtCore.Qt.Key_R, QtCore.Qt.Key_C]:\n\t\t\tself.Buttons['Run'].animateClick()\n\t\telif e.key() in [QtCore.Qt.Key_H, QtCore.Qt.Key_F1]:\n\t\t\tself.Buttons['Help'].animateClick()\n\t\telif e.key() in [QtCore.Qt.Key_M]: # to avoid recursion\n\t\t\tself.Raise()\n\t\t# let Obs know\n\t\ttry:\tself.Obs.inform(str(e.text()))\n\t\texcept UnicodeEncodeError:\tpass\n\n\n#---------------------------#\n# Control panel + Slider #\n#---------------------------#\nclass Simulation_Display_Control_Frame(Simulation_Control_Frame):\n\t\"\"\" This class combines a control panel and a slider for controlling display period\n\t\"\"\"\n\n\tdef __init__(self, SimulationStep, Obs, Background=None):\n\n\t\tSimulation_Control_Frame.__init__(self, SimulationStep, Obs)\n\n\t\t# DisplayPeriod slider\n\t\tself.lcd = QtGui.QLCDNumber(self)\n\t\tself.lcd.SegmentStyle(QtGui.QLCDNumber.Filled)\n\t\tlcdPalette = QtGui.QPalette()\n\t\tlcdPalette.setColor(QtGui.QPalette.Light, QtGui.QColor(200,10,10))\n\t\tself.lcd.setPalette(lcdPalette)\n\t\tself.button_frame.addWidget(self.lcd)\n\t\tself.DisplayPeriodSlider = QtGui.QSlider(QtCore.Qt.Horizontal, self)\n\t\tself.button_frame.addWidget(self.DisplayPeriodSlider)\n\t\tself.DisplayPeriodSlider.valueChanged.connect(self.DisplayPeriodChanged)\n\t\tself.DisplayPeriodSlider.setMinimum(0)\n\t\tself.sliderPrecision = 5\t# decimal precision, as now slider valueChanged events are integers\n\t\tself.DisplayPeriodSlider.setMaximum(3 * 10 ** self.sliderPrecision)\n\t\tself.DisplayPeriodSet(self.Obs.DisplayPeriod())\n\n\tdef DisplayPeriodChanged(self, event):\n\t\t\"\"\" The displayed value varies exponentially with the slider's position\n\t\t\"\"\"\n\t\tdisp = int(10 ** ((int(event)+1)/(10.0 ** self.sliderPrecision)))\n\t\tif (disp > 2999): disp = ((disp+500) // 1000) * 1000\n\t\telif (disp > 299): disp = ((disp+50) // 100) * 100\n\t\telif (disp > 29): disp = ((disp+5) // 10) * 10\n\t\telif (disp > 14): disp = ((disp+2) // 5) * 5\n\t\tdisp = int(disp)\n\t\tself.previous_Disp_period = disp\n\t\tself.Disp_period = disp\n\t\tself.lcd.display(str(disp))\n\t\tself.Obs.DisplayPeriod(self.Disp_period)\t# let Obs know\n\n\tdef DisplayPeriodSet(self, Period, FlagForce=True):\n\t\tif Period == 0: Period = 1\n\t\tPosition = math.log(abs(Period),10) * 10 ** self.sliderPrecision\n\t\tself.DisplayPeriodSlider.setSliderPosition(Position)\n\t\tself.lcd.display(Period)\n\n\n\n#---------------------------#\n# Control panel + Curves\t#\n#---------------------------#\n\nclass Simulation_Frame(Simulation_Display_Control_Frame):\n\t\"\"\" This class combines a control panel and a space to display curves\n\t\"\"\"\n\n\tdef __init__(self, SimulationStep, Obs, Background=None):\n\n\t\tSimulation_Display_Control_Frame.__init__(self, SimulationStep, Obs)\n\t\tself.setGeometry(50, 50, 700, 420)\t\t\n\n\t\t##################################\n\t\t# plot panel\t\t\t\t\t #\n\t\t##################################\n\t\tself.plot_area= Plot_Area.AreaView(Plot_Area.Plot_Area, image=Background)\n\t\tself.plot_frame.addWidget(self.plot_area,1)\t \n\t\t#self.plot_area.show()\n\t\t#self.plot_area.Area.drawPoints()\n\t\t# self.Obs.TextDisplay(self.plot_area.Area.Curvenames(self.Obs.get_info('CurveNames')))\n\t\t\n\t\t# adding legend button\n\t\tself.Buttons['Legend'] = self.LocalButton(self.HelpButtonFrame, QtGui.QPushButton, \"Legen&d\", \"Displays legend for curves\", self.LegendButtonClick)\n\n\n\tdef LegendButtonClick(self, event=None):\n\t\t\" Displays a text file named: \"\n\t\tif not 'Legend' in self.SWindows:\n\t\t\tself.SWindows['Legend'] = Evolife_Graphic.Legend_window(self)\n\t\t\tself.SWindows['Legend'].setWindowIcon(QtGui.QIcon(os.path.join(self.Obs.get_info('EvolifeMainDir'),self.IconName)))\n\t\t\ttry:\n\t\t\t\tself.plot_area.Area.Curvenames(self.Obs.get_info('CurveNames'))\t# stores curve names\n\t\t\t\tComments = self.Obs.get_info('WindowLegends')\n\t\t\t\t# self.SWindows['Legend'].display(self.Obs.get_info('CurveNames'), Comments=Comments)\n\t\t\t\tself.SWindows['Legend'].display(self.plot_area.Area.Legend(), Comments=Comments)\n\t\t\texcept IOError:\n\t\t\t\tself.Obs.TextDisplay(\"Unable to find information on curves\")\n\t\t\t\tdel self.SWindows['Legend']\n\t\telse: self.SWindows['Legend'].Raise()\n\n\tdef Process_graph_orders(self):\n\t\tif self.Finish:\treturn\n\t\tif self.PhotoMode:\t# one takes a photo\n\t\t\tImgC = self.plot_area.photo('___Curves_', self.CurrentFrame, outputDir=self.Obs.get_info('OutputDir'))\n\t\t\tif self.PhotoMode == 1:\t# Photo mode, not film\n\t\t\t\tself.Obs.TextDisplay('%s Created' % ImgC)\n\t\t\t\tself.dump()\n\t\tPlotData = self.Obs.get_info('PlotOrders')\n\t\tif PlotData:\t\n\t\t\tfor (CurveId, Point) in PlotData:\n\t\t\t\tself.plot_area.Area.plot(CurveId, Point)\n\t\tSimulation_Control_Frame.Process_graph_orders(self)\n\n\tdef dump(self, verbose=False):\n\t\t\" store and print simulation results\t\"\n\t\t# creates a result file and writes parameter names into it\n\t\tRF = self.Obs.get_info('ResultFile')\n\t\tif RF:\n\t\t\tself.plot_area.Area.Curvenames(self.Obs.get_info('CurveNames'))\t# stores curve names - may have been updated\n\t\t\tAverageValues = self.plot_area.Area.dump(RF, self.Obs.get_info('ResultHeader'), \n\t\t\t\t\t\t\t\t\t\tself.Obs.get_info('ResultOffset', 0))\n\t\t\tif verbose:\n\t\t\t\tself.Obs.TextDisplay('\\n. ' + '\\n. '.join(['%s\\t%s' % (C, AverageValues[C]) for C in sorted(AverageValues)]))\n\t\t\t\tself.Obs.TextDisplay('\\nResults stored in %s*.csv' % os.path.normpath(RF))\n\t\t\n\tdef closeEvent(self, event):\n\t\tif self.alive:\tself.dump(verbose=True)\n\t\tself.alive = False\n\t\tSimulation_Control_Frame.closeEvent(self, event)\n\t\tevent.accept()\n\n#-------------------------------------------#\n# Control panel + Curves + Genomes + . . . #\n#-------------------------------------------#\n\t \nclass Evolife_Frame(Simulation_Frame):\n\t\"\"\" Defines Evolife main window by modification of the generic Simulation Frame\n\t\"\"\"\n\n\tdef __init__(self, SimulationStep, Obs, Capabilities='C', Options=[]):\n\n\t\t###################################\n\t\t# Creation of the main window #\n\t\t###################################\n\t\tself.Capabilities = list(Capabilities)\n\t\t# Determining backagounds\n\t\tself.Background = dict()\n\t\tself.DOptions = dict(Options)\n\t\tif 'Background' in self.DOptions:\t# Default background for all windows\n\t\t\tself.Background['Default'] = dict(Options)['Background']\n\t\telse:\tself.Background['Default'] = Obs.get_info('Background')\n\t\tif self.Background['Default'] is None:\t\n\t\t\tself.Background['Default'] = \"#F0B554\"\n\t\tfor W in ['Curves', 'Genomes', 'Photo', 'Trajectories', 'Network', 'Field', 'Log', 'Image']:\n\t\t\tself.Background[W] = Obs.get_info(W + 'Wallpaper')\n\t\t\tif self.Background[W] is None:\tself.Background[W] = self.Background['Default']\n\n\t\tif 'C' in self.Capabilities:\n\t\t\tself.ParentClass = Simulation_Frame\n\t\t\tSimulation_Frame.__init__(self, SimulationStep, Obs, Background=self.Background['Curves'])\n\t\telif set('FRGNT') & set(Capabilities):\n\t\t\tself.ParentClass = Simulation_Display_Control_Frame\n\t\t\tSimulation_Display_Control_Frame.__init__(self, SimulationStep, Obs)\n\t\telse:\n\t\t\tself.ParentClass = Simulation_Control_Frame\n\t\t\tSimulation_Control_Frame.__init__(self, SimulationStep, Obs)\n\n\t\t##################################\n\t\t# Control panel #\n\t\t##################################\n\n\t\t# Creating small buttons\n\t\tif 'T' in self.Capabilities:\n\t\t\tself.Buttons['Trajectories'] = self.LocalButton(self.SmallButtonFrame, QtGui.QCheckBox, \"&T\", 'Displays trajectories', self.TrajectoryButtonClick, QtCore.Qt.Key_T)\n\t\tif 'N' in self.Capabilities:\n\t\t\tself.Buttons['Network'] = self.LocalButton(self.SmallButtonFrame, QtGui.QCheckBox, \"&N\", 'Displays social links', self.NetworkButtonClick, QtCore.Qt.Key_N)\n\t\tif set('FRI') & set(self.Capabilities):\n\t\t\t# Region is a kind of field\n\t\t\tself.Buttons['Field'] = self.LocalButton(self.SmallButtonFrame, QtGui.QCheckBox, \"&F\", 'Displays field', self.FieldButtonClick, QtCore.Qt.Key_F)\n\t\tif 'L' in self.Capabilities:\n\t\t\tself.Buttons['Log'] = self.LocalButton(self.SmallButtonFrame, QtGui.QCheckBox, \"&L\", 'Displays Labyrinth', self.LogButtonClick, QtCore.Qt.Key_L)\n\n\t\tif 'R' in self.Capabilities:\tself.FieldOngoingDisplay = True\n\t\telse:\tself.FieldOngoingDisplay = False\n\n\t\t# Creating big buttons (they are big for historical reasons)\n\t\tif 'G' in self.Capabilities:\n\t\t\tself.Buttons['Genomes'] = self.LocalButton(self.button_frame, QtGui.QPushButton, \"&Genomes\", 'Displays genomes', self.GenomeButtonClick) # Genome button\n\t\tif 'P' in self.Capabilities:\n\t\t\tself.Buttons['Photo'] = self.LocalButton(self.button_frame, QtGui.QPushButton, \"&Photo\", 'Saves a .jpg picture', self.PhotoButtonClick) # Photo button\n\n\t\t# Activate the main satellite windows\n\t\tDefViews = self.Obs.get_info('DefaultViews')\n\t\tif DefViews:\n\t\t\tDefViews.reverse()\t# surprisingly necessary to get the last window active\n\t\t\tfor B in DefViews:\n\t\t\t\t# two syntaxes allowed: 'WindowName' or ('Windowname', width [,height])\n\t\t\t\tif type(B) == str:\tself.Buttons[B].animateClick()\n\t\t\t\telif type(B) == tuple:\n\t\t\t\t\tself.Buttons[B[0]].animateClick()\n\t\t\t\t\t# self.Buttons[B[0]].animateClick(*B[1:])\n\t\t\t\t\tself.SWindowsPreferredGeometry[B[0]] = B[1:]\n\t\telif DefViews is None:\n\t\t\tfor B in ['Trajectories', 'Field', 'Network', 'Genomes', 'Log']:\t# ordered list\n\t\t\t\tif B in self.Buttons:\n\t\t\t\t\tself.Buttons[B].animateClick()\n\t\t\t\t\tbreak\t# opening only one satelite window\n\t\t\t\t\t\n\t\t# start mode\n\t\tif 'Run' in self.DOptions and self.DOptions['Run'] == 'Yes':\tself.Buttons['Run'].animateClick()\n\t\n\tdef keyPressEvent(self, e):\n\t\tself.ParentClass.keyPressEvent(self,e)\n\t\t# Additional key actions\n\t\ttry:\n\t\t\tif e.key() == QtCore.Qt.Key_G: self.Buttons['Genomes'].animateClick()\n\t\t\tif e.key() == QtCore.Qt.Key_P: self.Buttons['Photo'].animateClick()\n\t\t\tif e.key() == QtCore.Qt.Key_T: self.Buttons['Trajectories'].animateClick()\n\t\t\tif e.key() == QtCore.Qt.Key_N: self.Buttons['Network'].animateClick()\n\t\t\tif e.key() == QtCore.Qt.Key_F: self.Buttons['Field'].animateClick()\n\t\t\tif e.key() == QtCore.Qt.Key_L: self.Buttons['Log'].animateClick()\t\t\n\t\t\tif e.key() == QtCore.Qt.Key_I: self.Buttons['Image'].animateClick()\t\t\n\t\t\tif e.key() == QtCore.Qt.Key_D: self.Buttons['Legend'].animateClick()\t\t\n\t\t\tif e.key() == QtCore.Qt.Key_V: self.FilmButtonClick(e)\n\t\texcept KeyError:\tpass\n\t\tself.checkButtonState()\n\n\tdef GenomeButtonClick(self, event):\n\t\tif 'Genomes' not in self.Buttons:\treturn\n\t\tif not 'Genomes' in self.SWindows:\n\t\t\tself.SWindows['Genomes'] = Evolife_Graphic.Genome_window(control=self,outputDir=self.Obs.get_info('OutputDir'), image=self.Background['Genomes'])\n\t\t\t# moving the window\n\t\t\tself.SWindows['Genomes'].move(800, 200)\t\t\n\t\t\tself.WindowActivation('Genomes')\n\t\telse:\tself.SWindows['Genomes'].Raise()\n\n\tdef PhotoButtonClick(self, event):\n\t\tif 'Photo' not in self.Buttons:\treturn\n\t\tif self.PhotoMode:\n\t\t\tself.Obs.TextDisplay('Photo mode ended\\n')\n\t\t\tself.PhotoMode = 0\n\t\telse:\n\t\t\tself.PhotoMode = 1 # take one shot\n\t\t\tself.StepButtonClick()\n\t\t\tself.Obs.TextDisplay('\\nPhoto mode' + self.Obs.__repr__() + '\\n' + 'Frame %d' % self.CurrentFrame)\n\t\t\tif not self.Obs.Visible():\tself.Process_graph_orders()\t# possible if photo event occurs between years\n\n\tdef FilmButtonClick(self, event):\n\t\tif 'Photo' not in self.Buttons:\treturn\n\t\t# at present, the button is not shown and is only accessible by pressing 'V' \n\t\tself.PhotoMode = 2 - self.PhotoMode\n\t\tif self.PhotoMode:\n\t\t\tself.setWindowTitle(\"%s (FILM MODE)\" % self.Name)\n\t\telse:\tself.setWindowTitle(self.Name)\n\t\n\tdef TrajectoryButtonClick(self, event):\n\t\tif 'Trajectories' not in self.Buttons:\treturn\n\t\tif 'Trajectories' not in self.SWindows:\n\t\t\tself.SWindows['Trajectories'] = Evolife_Graphic.Field_window(control=self, \n\t\t\t\t\t\t\t\t\t\t\t\tWtitle='Trajectories', \n\t\t\t\t\t\t\t\t\t\t\t\toutputDir=self.Obs.get_info('OutputDir'), \n\t\t\t\t\t\t\t\t\t\t\t\timage=self.Background['Trajectories'])\n\t\t\t# moving the window\n\t\t\tself.SWindows['Trajectories'].move(275, 500)\t\t\n\t\t\tself.WindowActivation('Trajectories')\n\t\telse:\tself.SWindows['Trajectories'].Raise()\n \n\tdef NetworkButtonClick(self, event):\n\t\tif 'Network' not in self.Buttons:\treturn\n\t\tif 'Network' not in self.SWindows:\n\t\t\tself.SWindows['Network'] = Evolife_Graphic.Network_window(control=self, \n\t\t\t\t\t\t\t\t\t\t\t\toutputDir=self.Obs.get_info('OutputDir'), \n\t\t\t\t\t\t\t\t\t\t\t\timage=self.Background['Network'])\n\t\t\tself.WindowActivation('Network')\n\t\t\tself.SWindows['Network'].move(790, 500)\t\t\n\t\telse:\tself.SWindows['Network'].Raise()\n\t\n\tdef FieldButtonClick(self, event):\n\t\tif 'Field' not in self.Buttons:\treturn\n\t\tif 'Field' not in self.SWindows:\n\t\t\tself.SWindows['Field'] = Evolife_Graphic.Field_window(control=self, \n\t\t\t\t\t\t\t\t\t\t\t\tWtitle=self.Name, \n\t\t\t\t\t\t\t\t\t\t\t\toutputDir=self.Obs.get_info('OutputDir'), \n\t\t\t\t\t\t\t\t\t\t\t\timage=self.Background['Field'])\n\t\t\t# moving the window\n\t\t\tself.SWindows['Field'].move(800, 100)\t\t\n\t\t\tself.WindowActivation('Field')\n\t\telse:\tself.SWindows['Field'].Raise()\n\t\t\n\tdef LogButtonClick(self, event):\n\t\tif 'Log' not in self.Buttons:\treturn\n\t\tself.Obs.TextDisplay('LogTerminal\\n')\n\t\tpass\t\t\t\n\t\n\tdef WindowActivation(self, WindowName):\t\t# complement after click\n\t\tself.SWindows[WindowName].setWindowIcon(QtGui.QIcon(os.path.join(self.Obs.get_info('EvolifeMainDir'),self.IconName)))\n\t\tself.Process_graph_orders()\n\t\tif WindowName in self.SWindowsPreferredGeometry:\tself.SWindows[WindowName].dimension(*self.SWindowsPreferredGeometry[WindowName])\n\t\n\tdef checkButtonState(self):\n\t\tfor B in self.Buttons:\n\t\t\tif B in ['Network','Field','Image','Trajectories','Log']:\n\t\t\t\tif self.Buttons[B].isEnabled and B not in self.SWindows:\n\t\t\t\t\tself.Buttons[B].setCheckState(False)\n\t\t\t\tif self.Buttons[B].isEnabled and B in self.SWindows:\n\t\t\t\t\tself.Buttons[B].setCheckState(True)\n\t\t\t\t\t\t\t \n\tdef Process_graph_orders(self):\n\t\tImgG, ImgN, ImgF, ImgT = ('',) * 4\n\t\tif 'Genomes' in self.SWindows:\n\t\t\tImgG = self.SWindows['Genomes'].genome_display(genome=self.Obs.get_data('DNA'),\n\t\t\t\t\t\t\t\t\t\t\t\t\tgene_pattern=self.Obs.get_info('GenePattern'),\n\t\t\t\t\t\t\t\t\t\t\t\t\tPhoto=self.PhotoMode, CurrentFrame=self.CurrentFrame)\n\t\tif 'Network' in self.SWindows:\n\t\t\tImgN = self.SWindows['Network'].Network_display(self.Obs.get_data('Positions', Consumption=False),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.Obs.get_data('Network'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tPhoto=self.PhotoMode, CurrentFrame=self.CurrentFrame)\n\t\tif 'Field' in self.SWindows:\n\t\t\tself.SWindows['Field'].image_display(self.Obs.get_info('Image'), windowResize=True)\n\t\t\tImgF = self.SWindows['Field'].Field_display(self.Obs.get_data('Positions'), \n\t\t\t\t\t\t\t\t\t\t\t\t Photo=self.PhotoMode,\n\t\t\t\t\t\t\t\t\t\t\t\t CurrentFrame=self.CurrentFrame,\n\t\t\t\t\t\t\t\t\t\t\t\t Ongoing=self.FieldOngoingDisplay, Prefix='___Field_')\n\t\tif 'Trajectories' in self.SWindows:\n\t\t\tself.SWindows['Trajectories'].image_display(self.Obs.get_info('Pattern'), windowResize=True)\n\t\t\tImgT = self.SWindows['Trajectories'].Field_display(self.Obs.get_data('Trajectories'),\n\t\t\t\t\t\t\t\t\t\t\t\t Photo=self.PhotoMode,\n\t\t\t\t\t\t\t\t\t\t\t\t CurrentFrame=self.CurrentFrame, Ongoing=self.FieldOngoingDisplay, Prefix='___Traj_')\n\t\tif self.PhotoMode == 1:\t\n\t\t\tif ''.join([ImgG, ImgN, ImgF, ImgT]):\n\t\t\t\tself.Obs.TextDisplay('%s Created' % ' '.join([ImgG, ImgN, ImgF, ImgT]))\n\t\tself.ParentClass.Process_graph_orders(self) # draws curves (or not)\n\t\tself.checkButtonState()\n\n\tdef DecisionToEnd(self):\n\t\tif 'ExitOnEnd' in self.DOptions and self.DOptions['ExitOnEnd'] == 'Yes':\n\t\t\tself.PhotoMode = 1\t# taking photos\n\t\t\tself.Process_graph_orders()\n\t\t\tself.Buttons['Quit'].animateClick()\t# exiting\n\t\t\n\tdef SWDestroyed(self, SW):\n\t\tself.ParentClass.SWDestroyed(self,SW)\n\t\tself.checkButtonState()\t\t\n\t\t\t\t\n\tdef closeEvent(self, event):\n\t\tself.ParentClass.closeEvent(self, event)\n\t\tevent.accept()\n\t\t\t\t\n\n##################################################\n# Creation of the graphic application\t\t\t#\n##################################################\n\ndef Start(SimulationStep, Obs, Capabilities='C', Options=[]):\n\t\"\"\" SimulationStep is a function that performs a simulation step\n\t\tObs is the observer that stores statistics\n\t\tCapabilities (curves, genome display, trajectory display...)\n\t\t\t= any string of letters from: CFGNTP\n\t\"\"\"\n\n\tMainApp = QtGui.QApplication(sys.argv)\n\n\tif set(Capabilities) <= set('CFGILNPRT'):\n\t\tMainWindow= Evolife_Frame(SimulationStep, Obs, Capabilities, Options)\n\t\t \n\t\t# Entering main loop\n\t\tMainApp.exec_()\n\t\tif os.name != 'nt':\tMainApp.deleteLater()\t# Necessary to avoid problems on Unix\n\telse:\n\t\tMainWindow = None\n\t\tprint(\"\"\" Error: should be a string of letters taken from: \n\t\tC = Curves \n\t\tF = Field (2D seasonal display) (excludes R)\n\t\tI = Image (same as Field, but no slider)\n\t\tG = Genome display\n\t\tL = Log Terminal\n\t\tN = social network display\n\t\tP = Photo (screenshot)\n\t\tR = Region (2D ongoing display) (excludes F)\n\t\tT = Trajectory display\n\t\t\"\"\")\n\n\n\t\n\t\t\nif __name__ == '__main__':\n\n\tprint(__doc__)\n\n\n__author__ = 'Dessalles'\n","repo_name":"piochelepiotr/jump","sub_path":"Evolife/QtGraphics/Evolife_Window.py","file_name":"Evolife_Window.py","file_ext":"py","file_size_in_byte":26562,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"36189800144","text":"import docx\nfrom docx.oxml.ns import qn\nimport re\n\ndef regex_inc(regex_list, regex_counter, match_list):\n if(regex_counter < len(regex_list)):\n regex = re.findall(regex_list[regex_counter], para.text)\n if (regex):\n match_list.append(regex[0])\n #if (regex_counter < len(regex_list) - 1):\n regex_counter += 1\n return (regex_counter, match_list)\n\n\n## Change this file location to the location of the file you are parsing\ndoc = docx.Document(\"C:/Users/twins/Desktop/UNO classes/Spring 2021 Semester/CSCI 4970 - Capstone/Python tests/undergrad2018-regular.docx\")\n\n# 'C:\\Users\\twins\\Desktop\\UNO classes\\Spring 2021 Semester\\CSCI 4970 - Capstone\\Python tests'\n# https://github.com/ahesselgesser/TeamAAA\n\n#### Variables to capture\n## College, Department/School ?, Program, Degree Level, Academic Year of Report,\n## Date Range of Reported Data, Person Preparing the Report, SLOs (maybe in an array, or tuple, or something)\n## Bloom's Taxonomy (checkboxes)\n\n### Creating lists to hold all the different regex's and matches, then iterate through them.\nregex_counter = 0\nug18_regex_header_list = ['College:\\s*(.*)\\s*Department/School:', 'Department/School:\\s*(.*)', 'Program:\\s*(.*)\\s*Degree Level:', 'Degree Level:\\s*(.*)', 'Academic Year of Report:\\s*(.*)\\s*Date', 'Data:\\s*(.*)',\n'Person Preparing the Report:\\s(.*)'] \n\n### Match 1 is Colleges\n### Match 2 is Department/School\n### \nmatch_list = []\n\nall_paras = doc.paragraphs\n\nfor para in all_paras:\n (regex_counter, match_list) = regex_inc(ug18_regex_header_list, regex_counter, match_list)\n (regex_counter, match_list) = regex_inc(ug18_regex_header_list, regex_counter, match_list)\n \"\"\"\n if (regex_counter >= 7):\n print(para.text)\n \"\"\"\nprint(match_list)\n\n### https://stackoverflow.com/questions/27861732/parsing-of-table-from-docx-file/27862205 ###\ndata = []\n\n### UG 2018 Regular Tables\n## SLO Table\n# Checkboxes\n## SLO communication to stakeholders\n## Assessment Methods\n# Separate table for EACH SLO\n# Will have to devise a method to determine when these tables end and the next category begins\n# Checkboxes in these tables\n## Data Collection And Analysis Table\n# Two tables\n# \n## Resuls communicated within program Table\n## Decisions & Actions Table\n### END\n\ntable = doc.tables[1]\nfor i, row in enumerate(table.rows):\n text = (cell.text for cell in row.cells)\n \n # Establish the mapping based on the first row\n # headers; these will become the keys of our dictionary\n if i == 0 and table != doc.tables[1]:\n keys = tuple(text)\n continue\n elif i == 4:\n keys = tuple(text)\n continue\n\n # Construct a dictionary for this row, mapping\n # keys to values for this row\n row_data = dict(zip(keys, text))\n data.append(row_data)\n\nprint(data)\n#print(data[0]['Student Learning Outcomes'])\n### https://stackoverflow.com/questions/27861732/parsing-of-table-from-docx-file/27862205 ###\n","repo_name":"ahesselgesser/TeamAAA","sub_path":"mysite/core/paser/read_doc_test.py","file_name":"read_doc_test.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72883779948","text":"\"\"\"\nMain tool.\n\"\"\"\n\nimport argparse\n\nfrom .. import VERSION\nfrom ..config import (\n set_config,\n load_config,\n update_config,\n setup_config,\n)\nfrom ..item import make_item\nfrom ..modules import load_ref\nfrom ..search import (\n StopAtFirst,\n StopAtLast,\n StopBelowComplexity,\n)\n\nfrom .subcommands import (\n function_search,\n function_shell,\n function_test,\n function_compile,\n function_doc,\n function_tree,\n function_config_show,\n function_config_write,\n function_config_reset,\n)\n\n__all__ = [\n 'main',\n 'main_argparse',\n]\n\n\ndef type_stop_below_complexity(string):\n return StopBelowComplexity(int(string))\n\n\ndef type_config_key_value(string):\n key, value = string.split(\"=\", 1)\n value = eval(value)\n return (key, value)\n\n\ndef main_argparse():\n \"\"\"Main function\"\"\"\n top_level_parser = argparse.ArgumentParser(\n description=\"\"\"\\\nSequel v{version} - integer sequence finder\n\"\"\".format(version=VERSION))\n\n top_level_parser.set_defaults(\n function=function_shell,\n function_args=[])\n\n top_level_parser.add_argument(\n \"-l\", \"--load\",\n metavar=\"F\",\n dest=\"pymodules\", default=[], type=str, action=\"append\",\n help=\"python module\")\n\n top_level_parser.add_argument(\n \"-c\", \"--config\",\n metavar=\"F\",\n dest=\"config_filename\", default=None, type=str,\n help=\"config filename\")\n\n top_level_parser.add_argument(\n \"-k\", \"--set-key\",\n metavar=\"K=V\",\n dest=\"config_keys\", default=[], type=type_config_key_value,\n action=\"append\",\n help=\"set config key\")\n\n top_level_parser.add_argument(\n \"--version\",\n action=\"version\",\n version=VERSION)\n\n subparsers = top_level_parser.add_subparsers()\n\n common_display_args = [\n 'num_items', 'item_mode', 'separator', 'item_format', 'base', 'wraps',\n 'max_compact_digits', 'max_full_digits', 'colored',\n ]\n common_search_args = [\n 'handler', 'profile',\n ]\n search_description=\"\"\"\\\nSearch sequence matching items {}\n\nFor instance:\n\n$ sequel search 2 3 5 7 11\n 0] p\n 2 3 5 7 11 13 17 19 23 29 ...\n\nThe '??' symbol matches with any value:\n\n$ sequel search 2 3 5 7 ??\n 0] p\n 2 3 5 7 11 13 17 19 23 29 ...\n 1] m_exp\n 2 3 5 7 13 17 19 31 61 89 ...\n\nA value MIN..MAX matches with any value with MIN <= value <= MAX:\n\n$ sequel search 2 3 5 7 12..20\n 0] m_exp\n 2 3 5 7 13 17 19 31 61 89 ...\n\n\"\"\"\n search_parser = subparsers.add_parser(\n 'search',\n description=search_description.format(\"\")\n )\n search_parser.set_defaults(\n function=function_search,\n function_args=['items', 'limit', 'sort', 'reverse'] + common_search_args + ['display_kwargs'])\n\n doc_parser = subparsers.add_parser(\n 'doc',\n description=\"\"\"\\\nShow sequence documentation\"\"\")\n doc_parser.set_defaults(\n function=function_doc,\n function_args=['sources', 'simplify', 'full'] + ['display_kwargs'])\n\n compile_parser = subparsers.add_parser(\n 'compile',\n description=\"\"\"\\\nCompile a sequence\"\"\")\n compile_parser.set_defaults(\n function=function_compile,\n function_args=['sources', 'simplify', 'tree'] + ['display_kwargs'])\n\n tree_parser = subparsers.add_parser(\n 'tree',\n description=\"\"\"\\\nCompile a sequence and show it as a tree\"\"\")\n tree_parser.set_defaults(\n function=function_tree,\n function_args=['sources', 'simplify'] + ['display_kwargs'])\n\n config_parser = subparsers.add_parser(\n 'config',\n description=\"\"\"\\\nShow config file\"\"\")\n config_parser.set_defaults(\n function=function_config_show,\n function_args=[])\n\n config_subparsers = config_parser.add_subparsers()\n\n config_show_parser = config_subparsers.add_parser(\n 'show',\n description=\"\"\"\\\nShow config file\"\"\")\n config_show_parser.set_defaults(\n function=function_config_show,\n function_args=[])\n\n config_write_parser = config_subparsers.add_parser(\n 'write',\n description=\"\"\"\\\nWrite config file\"\"\")\n config_write_parser.set_defaults(\n function=function_config_write,\n function_args=[\"output_config_filename\", \"reset\"])\n\n config_write_parser.add_argument(\n \"-r\", \"--reset\",\n action=\"store_true\",\n default=False,\n help=\"reset to default values\")\n\n config_write_parser.add_argument(\n \"-o\", \"--output-config-filename\",\n type=str,\n default=None,\n help=\"output config filename\")\n\n config_reset_parser = config_subparsers.add_parser(\n 'reset',\n description=\"\"\"\\\nReset config file\"\"\")\n config_reset_parser.set_defaults(\n function=function_config_reset,\n function_args=[])\n\n shell_parser = subparsers.add_parser(\n 'shell',\n description=\"\"\"\\\nOpen an interactive shell\"\"\")\n shell_parser.set_defaults(\n function=function_shell,\n function_args=['display_kwargs'])\n\n test_parser = subparsers.add_parser(\n 'test',\n description=\"\"\"\\\nCompile a sequence and tries to search it\"\"\")\n test_parser.set_defaults(\n function=function_test,\n function_args=['sources', 'simplify', 'limit', 'sort', 'reverse'] + common_search_args + ['display_kwargs'])\n\n for parser in compile_parser, test_parser, doc_parser, tree_parser:\n parser.add_argument(\n \"-s\", \"--simplify\",\n action=\"store_true\",\n default=False,\n help=\"simplify expression\")\n\n for parser in search_parser, compile_parser, test_parser, doc_parser, tree_parser, shell_parser:\n parser.add_argument(\n \"-n\", \"--num-items\",\n metavar=\"N\",\n type=int,\n default=10,\n help=\"num items to show\")\n\n mode_group = parser.add_mutually_exclusive_group()\n mode_group.add_argument(\n \"-m\", \"--multiline\",\n dest=\"item_mode\", action=\"store_const\", const=\"multiline\",\n default=None,\n help=\"multiline item mode\")\n\n mode_group.add_argument(\n \"-o\", \"--oneline\",\n dest=\"item_mode\", action=\"store_const\", const=\"oneline\",\n default=None,\n help=\"oneline item mode\")\n\n wraps_group = parser.add_mutually_exclusive_group()\n wraps_group.add_argument(\n \"-w\", \"--wraps\",\n dest=\"wraps\", action=\"store_true\",\n default=None,\n help=\"wraps items line (oneline mode only)\")\n\n wraps_group.add_argument(\n \"-W\", \"--no-wraps\",\n dest=\"wraps\", action=\"store_false\",\n default=None,\n help=\"no wraps items line (oneline mode only)\")\n\n colored_group = parser.add_mutually_exclusive_group()\n colored_group.add_argument(\n \"-x\", \"--colored\",\n dest=\"colored\", action=\"store_true\",\n default=None,\n help=\"enables colored output\")\n\n colored_group.add_argument(\n \"-X\", \"--no-colored\",\n dest=\"colored\", action=\"store_false\",\n default=None,\n help=\"disable colored output\")\n\n parser.add_argument(\n \"-z\", \"--separator\",\n metavar=\"S\",\n type=str,\n default=None,\n help=\"item separator (oneline mode only)\")\n\n parser.add_argument(\n \"-f\", \"--item-format\",\n metavar=\"F\",\n type=str,\n default=None,\n help=\"item format\")\n\n parser.add_argument(\n \"-b\", \"--base\",\n metavar=\"B\",\n type=int,\n default=None,\n help=\"set base\")\n\n parser.add_argument(\n \"-C\", \"--max-compact-digits\",\n metavar=\"N\",\n type=int,\n default=None,\n help=\"maximum number of digits for compact item display\")\n\n parser.add_argument(\n \"-F\", \"--max-full-digits\",\n metavar=\"N\",\n type=int,\n default=None,\n help=\"maximum number of digits for full item display\")\n\n for parser in search_parser, test_parser:\n parser.add_argument(\n \"-l\", \"--limit\",\n metavar=\"L\",\n type=int,\n default=None,\n help=\"max number of results\")\n\n parser.add_argument(\n \"-t\", \"--sort\",\n action=\"store_true\",\n default=False,\n help=\"sort by complexity\")\n\n parser.add_argument(\n \"-R\", \"--reverse\",\n action=\"store_true\",\n default=False,\n help=\"reverse sorting\")\n\n for parser in search_parser, test_parser:\n parser.add_argument(\n \"-p\", \"--profile\",\n action=\"store_true\",\n default=False,\n help=\"show timing stats\")\n\n for parser in search_parser,:\n parser.add_argument(\n \"items\",\n nargs='+',\n type=make_item,\n help=\"sequence items\")\n\n compile_parser.add_argument(\n '-t', '--tree',\n action='store_true',\n default=False,\n help=\"show sequence tree\")\n\n for parser in test_parser, compile_parser, tree_parser:\n parser.add_argument(\n \"sources\",\n type=str,\n nargs='+',\n help=\"sequence source\")\n\n for parser in doc_parser,:\n parser.add_argument(\n \"sources\",\n type=str,\n nargs='*',\n help=\"sequence source\")\n\n parser.add_argument(\n \"-t\", \"--full\",\n default=False,\n action=\"store_true\",\n help=\"full output\")\n\n for parser in search_parser, test_parser:\n handler_group = parser.add_mutually_exclusive_group()\n handler_group.add_argument(\n \"--first\",\n dest=\"handler\", default=None,\n action=\"store_const\",\n const=StopAtFirst(),\n help=\"stop search at first results\")\n\n handler_group.add_argument(\n \"--last\",\n dest=\"handler\", default=None,\n action=\"store_const\",\n const=StopAtLast(),\n help=\"never stops search\")\n\n handler_group.add_argument(\n \"--complexity\",\n dest=\"handler\", default=None, metavar='C',\n type=type_stop_below_complexity,\n help=\"stop when below complexity\")\n\n namespace = top_level_parser.parse_args()\n if 'display_kwargs' in namespace.function_args:\n display_kwargs = {}\n for arg in common_display_args:\n display_kwargs[arg] = getattr(namespace, arg)\n namespace.display_kwargs = display_kwargs\n\n config = load_config(namespace.config_filename)\n setup_config(config)\n\n for pymodule in namespace.pymodules:\n load_ref(pymodule)\n\n for key, value in namespace.config_keys:\n update_config(config, key, value)\n set_config(config)\n kwargs = {arg: getattr(namespace, arg) for arg in namespace.function_args}\n result = namespace.function(**kwargs)\n return result\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"simone-campagna/sequel","sub_path":"src/sequel/tool/main_argparse.py","file_name":"main_argparse.py","file_ext":"py","file_size_in_byte":11168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73995314987","text":"from sqlalchemy import Column, Integer, String, ForeignKey, Text, Float\nfrom sqlalchemy.orm import declarative_base, relationship\n\nBase = declarative_base()\n\nclass Subject(Base):\n __tablename__ = 'subjects'\n id = Column(Integer, primary_key=True)\n name = Column(String, unique=True, nullable=False)\n books = relationship('Book', order_by=\"Book.id\", back_populates='subject') \n problems = relationship('Problem', order_by=\"Problem.id\", back_populates='subject') \n\n def __init__(self, name):\n self.name = name\n\nclass Book(Base):\n __tablename__ = 'books'\n id = Column(Integer, primary_key=True)\n title = Column(String, nullable=False)\n subject_id = Column(Integer, ForeignKey('subjects.id')) \n subject = relationship('Subject', back_populates='books')\n problems = relationship('Problem', back_populates='book')\n\nclass Problem(Base):\n __tablename__ = 'problems'\n id = Column(Integer, primary_key=True)\n image_path = Column(String, nullable=True)\n problem_description = Column(String, nullable=False)\n book_id = Column(Integer, ForeignKey('books.id'), nullable=True)\n subject_id = Column(Integer, ForeignKey('subjects.id'), nullable=True)\n book = relationship(\"Book\", back_populates=\"problems\")\n subject = relationship(\"Subject\", back_populates=\"problems\")\n solution = relationship('Solution', uselist=False, back_populates='problem')\n solved = Column(Integer, default=0)\n time = Column(Float, nullable=True)\n\n def __init__(self, problem_description,image_path=None,book_id=None, book=None, subject_id=None, subject=None, solution=None):\n self.problem_description = problem_description\n self.image_path = image_path\n self.book_id = book_id\n self.book = book\n self.subject_id = subject_id\n self.subject = subject\n self.solution = solution\n\nclass Solution(Base):\n __tablename__ = 'solutions'\n id = Column(Integer, primary_key=True)\n description = Column(Text, nullable=False)\n image_path = Column(String, nullable=True) # Add this line to include image_path\n problem_id = Column(Integer, ForeignKey('problems.id'))\n problem = relationship('Problem', back_populates='solution')\n\n def __init__(self, description, problem_id=None, problem=None, image_path=None):\n self.description = description\n self.problem_id = problem_id\n self.problem = problem\n self.image_path = image_path # Add this line to handle image_path in the constructor\n","repo_name":"Ammarovenas/MathRev","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70083788908","text":"from typing import List\n\n\nclass Solution:\n def reorderLogFiles(self, logs: List[str]) -> List[str]:\n letters, digits = [], []\n # 문자 / 숫자를 구분하여 모은다.\n # 모든 로그는 Letter-log 이거나 Digit-logs 이기 때문에 identifier를 제외한 첫 번 째 값만 확인하면 된다.\n for log in logs:\n if log.split()[1].isdigit():\n digits.append(log)\n else:\n letters.append(log)\n\n letters.sort(key=lambda x: (x.split()[1:], x.split()[0]))\n\n return letters + digits\n","repo_name":"jeongth9446/problem-solving","sub_path":"leetcode/python/reorder-data-in-log-files.py","file_name":"reorder-data-in-log-files.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"23076698596","text":"# python3\n\n\ndef lcm_naive(a, b):\n assert 1 <= a <= 2 * 10 ** 9 and 1 <= b <= 2 * 10 ** 9\n\n multiple = max(a, b)\n while multiple % a != 0 or multiple % b != 0:\n multiple += 1\n\n return multiple\n\n\ndef lcm(a, b):\n assert 1 <= a <= 2 * 10 ** 9 and 1 <= b <= 2 * 10 ** 9\n\n high = max(a, b)\n low = min(a, b)\n c = high\n while True:\n if c % low == 0:\n return c\n else:\n c += high\n\n\nif __name__ == '__main__':\n input_a, input_b = map(int, input().split())\n print(lcm(input_a, input_b))\n","repo_name":"Tarbo/algo-data-structure","sub_path":"Algorithmic Warm Up/Least Common Multiple/lcm.py","file_name":"lcm.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16504961185","text":"import pygame\nimport sys\n#Test anothert test\npygame.init()\n\n# Set up the display\nwidth, height = 400, 400\nscreen = pygame.display.set_mode((width, height))\npygame.display.set_caption(\"Girl Drawing\")\n\n# Define colors\nwhite = (255, 255, 255)\nblack = (0, 0, 0)\npink = (255, 192, 203)\nblue = (0, 0, 255)\nbrown = (139, 69, 19)\nred = (255, 0, 0)\n\ndef draw_girl():\n # Draw the girl's head\n pygame.draw.circle(screen, pink, (200, 150), 60)\n\n # Draw the girl's body\n pygame.draw.rect(screen, blue, (170, 210, 60, 100))\n\n # Draw the girl's hair (simple representation)\n pygame.draw.arc(screen, brown, (140, 110, 120, 80), 0, 3.14)\n\n # Draw the girl's eyes\n pygame.draw.circle(screen, black, (175, 135), 10)\n pygame.draw.circle(screen, black, (225, 135), 10)\n\n # Draw the girl's mouth (simple representation)\n pygame.draw.arc(screen, red, (175, 150, 50, 30), 0, 3.14)\n\nrunning = True\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n screen.fill(white)\n draw_girl()\n pygame.display.flip()\n\npygame.quit()\nsys.exit()\n","repo_name":"Sawrozzz/fun","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"22285152491","text":"# -*- coding: utf-8 -*-\n\nfrom airflow import DAG\nfrom airflow.operators.python_operator import PythonOperator\nfrom datetime import timedelta\nfrom utils.utils import trans_utc_datetime\nfrom utils.airflow_utils import set_valuation_date, set_calendar_import_year\n\n\n\n# -------------------------------------------------------------------------------\n# dag\n# these args will get passed on to each operator\n# you can override them on a per-task basis during operator initialization\ndefault_args = {\n 'owner': 'TongYu',\n 'catchup': False,\n 'start_date': trans_utc_datetime('0:00:00'),\n}\ndag = DAG(\n 'valuation_date_update_dag',\n catchup=False,\n default_args=default_args,\n schedule_interval='0,1 17 * * *',\n dagrun_timeout=timedelta(minutes=10),\n description='valuation date manager dag')\n# ----------------------------------------\n\n\nPythonOperator(\n task_id='valuation_date_update_task',\n python_callable=set_valuation_date,\n execution_timeout=timedelta(minutes=10),\n dag=dag)\n\nPythonOperator(\n task_id='calendar_import_year_update_task',\n python_callable=set_calendar_import_year,\n execution_timeout=timedelta(minutes=10),\n dag=dag)\n","repo_name":"zhanrendong/jkzx1","sub_path":"scripts/airflow/valuation_date_update_dag.py","file_name":"valuation_date_update_dag.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6264865715","text":"# -*- coding: utf-8 -*-\nfrom decimal import Decimal as D\n\nfrom django.test import TestCase\n\nfrom oscar.apps.basket.models import Basket\nfrom oscar.apps.catalogue.models import Option\nfrom oscar.apps.partner import availability, prices, strategy\nfrom oscar.test import factories\nfrom oscar.test.factories import (\n BasketFactory,\n BasketLineAttributeFactory,\n OptionFactory,\n ProductFactory,\n)\n\n\nclass TestANewBasket(TestCase):\n def setUp(self):\n self.basket = Basket()\n self.basket.strategy = strategy.Default()\n\n def test_has_zero_lines(self):\n self.assertEqual(0, self.basket.num_lines)\n\n def test_has_zero_items(self):\n self.assertEqual(0, self.basket.num_items)\n\n def test_doesnt_contain_vouchers(self):\n self.assertFalse(self.basket.contains_a_voucher)\n\n def test_can_be_edited(self):\n self.assertTrue(self.basket.can_be_edited)\n\n def test_is_empty(self):\n self.assertTrue(self.basket.is_empty)\n\n def test_is_not_submitted(self):\n self.assertFalse(self.basket.is_submitted)\n\n def test_has_no_applied_offers(self):\n self.assertEqual({}, self.basket.applied_offers())\n\n def test_is_tax_unknown(self):\n self.assertTrue(self.basket.is_empty)\n self.assertFalse(self.basket.is_tax_known)\n\n\nclass TestBasketLine(TestCase):\n def test_description(self):\n basket = BasketFactory()\n product = ProductFactory(title=\"A product\")\n basket.add_product(product)\n\n line = basket.all_lines()[0]\n self.assertEqual(line.description, \"A product\")\n\n def test_description_with_attributes(self):\n basket = BasketFactory()\n product = ProductFactory(title=\"A product\")\n basket.add_product(product)\n\n # pylint: disable=no-member\n line = basket.lines.first()\n BasketLineAttributeFactory(line=line, value=\"\\u2603\", option__name=\"with\")\n self.assertEqual(line.description, \"A product (with = '\\u2603')\")\n\n def test_create_line_reference(self):\n basket = BasketFactory()\n product = ProductFactory(title=\"A product\")\n option = OptionFactory(name=\"product_option\", code=\"product_option\")\n option_product = ProductFactory(title=\"Asunción\")\n options = [{\"option\": option, \"value\": str(option_product)}]\n basket.add_product(product, options=options)\n\n def test_basket_lines_queryset_is_ordered(self):\n # This is needed to make sure a formset is not performing the query\n # again with an order_by clause (losing all calculated discounts)\n basket = BasketFactory()\n product = ProductFactory(title=\"A product\")\n another_product = ProductFactory(title=\"Another product\")\n basket.add_product(product)\n basket.add_product(another_product)\n queryset = basket.all_lines()\n self.assertTrue(queryset.ordered)\n\n def test_line_tax_for_zero_tax_strategies(self):\n basket = Basket()\n basket.strategy = strategy.Default()\n product = factories.create_product()\n # Tax for the default strategy will be 0\n factories.create_stockrecord(product, price=D(\"75.00\"), num_in_stock=10)\n basket.add(product, 1)\n\n self.assertEqual(basket.all_lines()[0].line_tax, D(\"0\"))\n\n def test_line_tax_for_unknown_tax_strategies(self):\n class UnknownTaxStrategy(strategy.Default):\n \"\"\"A test strategy where the tax is not known\"\"\"\n\n def pricing_policy(self, product, stockrecord):\n return prices.FixedPrice(\"GBP\", stockrecord.price, tax=None)\n\n basket = Basket()\n basket.strategy = UnknownTaxStrategy()\n product = factories.create_product()\n factories.create_stockrecord(product, num_in_stock=10)\n basket.add(product, 1)\n\n self.assertEqual(basket.all_lines()[0].line_tax, None)\n\n\nclass TestAddingAProductToABasket(TestCase):\n def setUp(self):\n self.basket = Basket()\n self.basket.strategy = strategy.Default()\n self.product = factories.create_product()\n self.record = factories.create_stockrecord(\n currency=\"GBP\", product=self.product, price=D(\"10.00\")\n )\n self.purchase_info = factories.create_purchase_info(self.record)\n self.basket.add(self.product)\n\n def test_creates_a_line(self):\n self.assertEqual(1, self.basket.num_lines)\n\n def test_sets_line_prices(self):\n line = self.basket.all_lines()[0]\n self.assertEqual(line.price_incl_tax, self.purchase_info.price.incl_tax)\n self.assertEqual(line.price_excl_tax, self.purchase_info.price.excl_tax)\n\n def test_adding_negative_quantity(self):\n self.assertEqual(1, self.basket.num_lines)\n self.basket.add(self.product, quantity=4)\n self.assertEqual(5, self.basket.line_quantity(self.product, self.record))\n self.basket.add(self.product, quantity=-10)\n self.assertEqual(0, self.basket.line_quantity(self.product, self.record))\n\n def test_means_another_currency_product_cannot_be_added(self):\n product = factories.create_product()\n factories.create_stockrecord(currency=\"USD\", product=product, price=D(\"20.00\"))\n with self.assertRaises(ValueError):\n self.basket.add(product)\n\n def test_cannot_add_a_product_without_price(self):\n product = factories.create_product(price=None)\n with self.assertRaises(ValueError):\n self.basket.add(product)\n\n def test_is_tax_known(self):\n self.assertTrue(self.basket.is_tax_known)\n\n\nclass TestANonEmptyBasket(TestCase):\n def setUp(self):\n self.basket = Basket()\n self.basket.strategy = strategy.Default()\n self.product = factories.create_product()\n self.record = factories.create_stockrecord(self.product, price=D(\"10.00\"))\n self.purchase_info = factories.create_purchase_info(self.record)\n self.basket.add(self.product, 10)\n\n def test_can_be_flushed(self):\n self.basket.flush()\n self.assertEqual(self.basket.num_items, 0)\n\n def test_returns_correct_product_quantity(self):\n self.assertEqual(10, self.basket.product_quantity(self.product))\n\n def test_returns_correct_line_quantity_for_existing_product_and_stockrecord(self):\n self.assertEqual(10, self.basket.line_quantity(self.product, self.record))\n\n def test_returns_zero_line_quantity_for_alternative_stockrecord(self):\n record = factories.create_stockrecord(self.product, price=D(\"5.00\"))\n self.assertEqual(0, self.basket.line_quantity(self.product, record))\n\n def test_returns_zero_line_quantity_for_missing_product_and_stockrecord(self):\n product = factories.create_product()\n record = factories.create_stockrecord(product, price=D(\"5.00\"))\n self.assertEqual(0, self.basket.line_quantity(product, record))\n\n def test_returns_correct_quantity_for_existing_product_and_stockrecord_and_options(\n self,\n ):\n product = factories.create_product()\n record = factories.create_stockrecord(product, price=D(\"5.00\"))\n option = Option.objects.create(name=\"Message\")\n options = [{\"option\": option, \"value\": \"2\"}]\n\n self.basket.add(product, options=options)\n self.assertEqual(0, self.basket.line_quantity(product, record))\n self.assertEqual(1, self.basket.line_quantity(product, record, options))\n\n def test_total_sums_product_totals(self):\n product = factories.create_product()\n factories.create_stockrecord(product, price=D(\"5.00\"))\n self.basket.add(product, 1)\n self.assertEqual(self.basket.total_excl_tax, 105)\n\n def test_totals_for_free_products(self):\n basket = Basket()\n basket.strategy = strategy.Default()\n # Add a zero-priced product to the basket\n product = factories.create_product()\n factories.create_stockrecord(product, price=D(\"0.00\"), num_in_stock=10)\n basket.add(product, 1)\n\n self.assertEqual(len(basket.all_lines()), 1)\n self.assertEqual(basket.total_excl_tax, 0)\n self.assertEqual(basket.total_incl_tax, 0)\n\n def test_basket_prices_calculation_for_unavailable_pricing(self):\n new_product = factories.create_product()\n factories.create_stockrecord(new_product, price=D(\"5.00\"))\n self.basket.add(new_product, 1)\n\n class UnavailableProductStrategy(strategy.Default):\n \"\"\"A test strategy that makes a specific product unavailable\"\"\"\n\n def availability_policy(self, product, stockrecord):\n if product == new_product:\n return availability.Unavailable()\n return super().availability_policy(product, stockrecord)\n\n def pricing_policy(self, product, stockrecord):\n if product == new_product:\n return prices.Unavailable()\n return super().pricing_policy(product, stockrecord)\n\n self.basket.strategy = UnavailableProductStrategy()\n line = self.basket.all_lines()[1]\n self.assertEqual(\n line.get_warning(), \"'D\\xf9\\uff4d\\u03fb\\u03d2 title' is no longer available\"\n )\n self.assertIsNone(line.line_price_excl_tax)\n self.assertIsNone(line.line_price_incl_tax)\n self.assertIsNone(line.line_price_excl_tax_incl_discounts)\n self.assertIsNone(line.line_price_incl_tax_incl_discounts)\n self.assertIsNone(line.line_tax)\n self.assertEqual(self.basket.total_excl_tax, 100)\n self.assertEqual(self.basket.total_incl_tax, 100)\n self.assertEqual(self.basket.total_excl_tax_excl_discounts, 100)\n self.assertEqual(self.basket.total_incl_tax_excl_discounts, 100)\n\n def test_max_allowed_quantity(self):\n self.basket.add_product(self.product, quantity=3)\n\n # max allowed here is 7 (20-10+3)\n with self.settings(OSCAR_MAX_BASKET_QUANTITY_THRESHOLD=20):\n max_allowed, basket_threshold = self.basket.max_allowed_quantity()\n self.assertEqual(max_allowed, 7)\n self.assertEqual(basket_threshold, 20)\n\n # but we can also completely disable the threshold\n with self.settings(OSCAR_MAX_BASKET_QUANTITY_THRESHOLD=None):\n max_allowed, basket_threshold = self.basket.max_allowed_quantity()\n self.assertEqual(max_allowed, None)\n self.assertEqual(basket_threshold, None)\n\n def test_is_quantity_allowed(self):\n with self.settings(OSCAR_MAX_BASKET_QUANTITY_THRESHOLD=20):\n # 7 or below is possible\n allowed, message = self.basket.is_quantity_allowed(qty=7)\n self.assertTrue(allowed)\n self.assertIsNone(message)\n # but above it's not\n allowed, message = self.basket.is_quantity_allowed(qty=11)\n self.assertFalse(allowed)\n self.assertIsNotNone(message)\n\n with self.settings(OSCAR_MAX_BASKET_QUANTITY_THRESHOLD=None):\n # with the threshold disabled all quantities are possible\n allowed, message = self.basket.is_quantity_allowed(qty=7)\n self.assertTrue(allowed)\n self.assertIsNone(message)\n allowed, message = self.basket.is_quantity_allowed(qty=5000)\n self.assertTrue(allowed)\n self.assertIsNone(message)\n\n\nclass TestMergingTwoBaskets(TestCase):\n def setUp(self):\n self.product = factories.create_product()\n self.record = factories.create_stockrecord(self.product, price=D(\"10.00\"))\n self.purchase_info = factories.create_purchase_info(self.record)\n\n self.main_basket = Basket()\n self.main_basket.strategy = strategy.Default()\n self.main_basket.add(self.product, quantity=2)\n\n self.merge_basket = Basket()\n self.merge_basket.strategy = strategy.Default()\n self.merge_basket.add(self.product, quantity=1)\n\n self.main_basket.merge(self.merge_basket)\n\n def test_doesnt_sum_quantities(self):\n self.assertEqual(1, self.main_basket.num_lines)\n\n def test_changes_status_of_merge_basket(self):\n self.assertEqual(Basket.MERGED, self.merge_basket.status)\n\n\nclass TestASubmittedBasket(TestCase):\n def setUp(self):\n self.basket = Basket()\n self.basket.strategy = strategy.Default()\n self.basket.submit()\n\n def test_has_correct_status(self):\n self.assertTrue(self.basket.is_submitted)\n\n def test_can_be_edited(self):\n self.assertFalse(self.basket.can_be_edited)\n\n\nclass TestMergingAVoucherBasket(TestCase):\n def test_transfers_vouchers_to_new_basket(self):\n baskets = [factories.BasketFactory(), factories.BasketFactory()]\n voucher = factories.VoucherFactory()\n baskets[0].vouchers.add(voucher)\n baskets[1].merge(baskets[0])\n\n self.assertEqual(1, baskets[1].vouchers.all().count())\n","repo_name":"django-oscar/django-oscar","sub_path":"tests/integration/basket/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":12864,"program_lang":"python","lang":"en","doc_type":"code","stars":5941,"dataset":"github-code","pt":"37"} +{"seq_id":"21951813073","text":"from Game.Role.RoleName import *\r\n\r\nclass GameRule:\r\n def __init__(self):\r\n # 遺言\r\n self.is_testament = True\r\n\r\n # 役職の割り当て人数\r\n self.assign_roles = {\r\n RoleNameTag.VILLAGER: 0,\r\n RoleNameTag.WAREWOLF: 1,\r\n RoleNameTag.FORTUNE_TELLER: 0,\r\n RoleNameTag.MEDIUM: 0,\r\n RoleNameTag.KNIGHT: 1,\r\n RoleNameTag.MAD_MAN: 0\r\n }\r\n\r\n # 役欠けがありか\r\n self.is_role_lack = False\r\n\r\n\r\n # 役職を割り当てる\r\n def AssignRole(self, index):\r\n count_num = 0\r\n for key in self.assign_roles.keys():\r\n count_num += self.assign_roles[key]\r\n\r\n if index < count_num:\r\n return key\r\n\r\n return None\r\n\r\n \r\n # 役職の割り当て人数を設定する\r\n def SetNumRole(roleNameTag, num):\r\n self.assign_roles[roleNameTag] = num","repo_name":"yoppy104/JinroGMBot","sub_path":"Game/GameRule.py","file_name":"GameRule.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44010628135","text":"\n\n#-------------------------------------------------------------------------------\n\nimport random\nimport cv2\nimport os\n\nimport numpy as np\nimport xml.etree.ElementTree as et\n\nfrom collections import namedtuple\nfrom glob import glob\n\n#-------------------------------------------------------------------------------\n# Labels\n#-------------------------------------------------------------------------------\n\n#def rgb2bgr(tpl):\n# return (tpl[2], tpl[1], tpl[0])\n\ndef rgb2bgr(tpl):\n return (tpl[2], tpl[1], tpl[0])\n\nLabel = namedtuple('Label', ['name', 'color'])\n#-------------------------------------------------------------------------------\n# Import labels colours from xml \n#-------------------------------------------------------------------------------\ntree = et.parse(os.path.dirname(os.path.abspath(__file__)) + \"\\colours.xml\")\np = tree.find(\"Labels\")\nnames = list(tree.iter(\"Name\"))\nrs = list(tree.iter(\"R\"))\ngs = list(tree.iter(\"G\"))\nbs = list(tree.iter(\"B\"))\nlabel_defs = []\n\nfor i in range(0, len(rs)): \n label = Label(names[i], rgb2bgr((int(rs[i].text), int(gs[i].text), int(bs[i].text))))\n label_defs.append(label) \n\n\n#-------------------------------------------------------------------------------\n#def build_file_list(images_root, labels_root, sample_name): \n# image_sample_root = images_root + '/' + sample_name\n# image_root_len = len(image_sample_root)\n# label_sample_root = labels_root + '/' + sample_name\n# image_files = glob(image_sample_root + '/**/*png')\n# file_list = []\n# for f in image_files:\n# f_relative = f[image_root_len:]\n# f_dir = os.path.dirname(f_relative)\n# f_base = os.path.basename(f_relative)\n# f_base_gt = f_base.replace('leftImg8bit', 'gtFine_color')\n# f_label = label_sample_root + f_dir + '/' + f_base_gt\n# if os.path.exists(f_label):\n# file_list.append((f, f_label))\n# return file_list\n\n\ndef build_file_list(images_root, labels_root, sample_name): \n image_sample_root = images_root + '/' + sample_name\n image_root_len = len(image_sample_root)\n label_sample_root = labels_root + '/' + sample_name\n image_files = glob(image_sample_root + '/*png')\n label_files = glob(label_sample_root + '/*png')\n file_list = []\n for f in range(len(image_files)):\n file_list.append((image_files[f], label_files[f]))\n\n# if sample_name == 'train':\n# g = open('D:/file_list_train.txt', 'w')\n# print(file_list, file = g)\n# g.close()\n\n# if sample_name == 'val':\n# g = open('D:/file_list_val.txt', 'w')\n# print(file_list, file = g)\n# g.close()\n \n return file_list\n\n\n#-------------------------------------------------------------------------------\nclass CityscapesSource:\n #---------------------------------------------------------------------------\n def __init__(self):\n# self.image_size = (1024, 480)\n self.image_size = (1120, 1024) #(416, 384) ##(800,736) \n self.num_classes = len(label_defs)\n\n self.label_colors = {i: np.array(l.color) for i, l \\\n in enumerate(label_defs)}\n\n self.num_training = None\n self.num_validation = None\n self.train_generator = None\n self.valid_generator = None\n\n #---------------------------------------------------------------------------\n def load_data(self, data_dir, valid_fraction):\n \"\"\"\n Load the data and make the generators\n :param data_dir: the directory where the dataset's file are stored\n :param valid_fraction: what franction of the dataset should be used\n as a validation sample\n \"\"\"\n images_root = data_dir + '/images'\n labels_root = data_dir + '/labels'\n\n train_images = build_file_list(images_root, labels_root, 'train')\n valid_images = build_file_list(images_root, labels_root, 'val')\n\n if len(train_images) == 0:\n raise RuntimeError('No training images found in ' + data_dir)\n if len(valid_images) == 0:\n raise RuntimeError('No validatoin images found in ' + data_dir)\n\n self.num_training = len(train_images)\n self.num_validation = len(valid_images)\n self.train_generator = self.batch_generator(train_images)\n self.valid_generator = self.batch_generator(valid_images)\n\n #---------------------------------------------------------------------------\n def batch_generator(self, image_paths):\n def gen_batch(batch_size, names=False):\n\n#!!!!!!Uwaga wartosci srednie kanalow ze zdjec torow to wpisania\n mean_track_R_channel = 0\n mean_track_G_channel = 0 \n mean_track_B_channel = 0\n#------------------------------------------------------------\n meanImagNet_RChannel = 0\n meanImagNet_GChannel = 0\n meanImagNet_BChannel = 0\n\n \n random.shuffle(image_paths)\n for offset in range(0, len(image_paths), batch_size):\n files = image_paths[offset:offset+batch_size]\n\n images = []\n labels = []\n names_images = []\n names_labels = []\n for f in files:\n image_file = f[0]\n label_file = f[1] \n image = cv2.resize(cv2.imread(image_file), self.image_size)\n label = cv2.resize(cv2.imread(label_file), self.image_size)\n \n#-------------------------------------------------------------------\n image = image.astype(np.float32)\n \n image[:, :, 0] = image[:, :, 0] - mean_track_B_channel + meanImagNet_BChannel\n image[:, :, 1] = image[:, :, 1] - mean_track_G_channel + meanImagNet_GChannel \n image[:, :, 2] = image[:, :, 2] - mean_track_R_channel + meanImagNet_RChannel\n#-------------------------------------------------------------------- \n label_bg = np.zeros([image.shape[0], image.shape[1]], dtype=bool)\n label_list = []\n for ldef in label_defs[1:]:\n label_current = np.all(label == ldef.color, axis=2)\n label_bg |= label_current\n label_list.append(label_current)\n\n label_bg = ~label_bg\n label_all = np.dstack([label_bg, *label_list])\n label_all = label_all.astype(np.float32)\n\n images.append(image.astype(np.float32))\n labels.append(label_all)\n\n if names:\n names_images.append(image_file)\n names_labels.append(label_file)\n\n if names:\n yield np.array(images), np.array(labels), \\\n names_images, names_labels\n else:\n yield np.array(images), np.array(labels)\n return gen_batch\n\n#-------------------------------------------------------------------------------\ndef get_source():\n return CityscapesSource()\n","repo_name":"kaleszczyk/VI.Segmentation","sub_path":"Resources/python/source_cityscapes.py","file_name":"source_cityscapes.py","file_ext":"py","file_size_in_byte":7340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71257351466","text":"import random\nlist = [\"red\",\"blue\",\"yellow\",\"green\",\"black\",\"white\",\"pink\",\"brown\"]\n\n\nwhile True:\n\tsimon = random.choice(list)\n\tprint(\"Simon says... \" + simon + \"(Exit to leave)\")\n\n\tstr = input()\n\n\tif str.casefold() == \"exit\":\n\t\tprint(\"Bye then.\")\n\t\tbreak\n\n\tif str.casefold() == simon.casefold():\n\t\tprint(\"Not bad, how about this...\")\n\telse :\n\t\tprint(\"DOOOOOOM!\")\n","repo_name":"Dargkkast/python-scripts","sub_path":"simon-says/simon-says.py","file_name":"simon-says.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19589102749","text":"from telegram import (\n Update,\n ParseMode,\n InlineKeyboardMarkup,\n InlineKeyboardButton,\n)\nfrom telegram.ext import (\n Updater,\n CommandHandler,\n MessageHandler,\n CallbackQueryHandler,\n Filters,\n CallbackContext,\n)\n\nimport keyboards\nimport settings\n\nfrom decorators import log_errors\n\n@log_errors\ndef start_message(update: Update, context: CallbackContext):\n message = update.message\n chat_id = message.chat_id\n message_id = message.message_id\n try:\n while True:\n context.bot.delete_message(chat_id, message_id)\n message_id -= 1\n except:\n pass\n text, markup = keyboards.get_message()\n update.message.reply_text(\n text=text,\n parse_mode=ParseMode.MARKDOWN_V2,\n reply_markup=markup,\n )\n\n\n@log_errors\ndef keyboard_callback_handler(update: Update, context: CallbackContext):\n query = update.callback_query\n text, markup = keyboards.get_message(query)\n\n query.edit_message_text(\n text=text,\n parse_mode='MarkdownV2',\n reply_markup=markup,\n )\n\n\ndef main():\n updater = Updater(\n token=settings.TOKEN,\n request_kwargs={\n 'connect_timeout': 0.5,\n 'read_timeout': 1.0,\n },\n use_context=True,\n )\n handlers = [\n CommandHandler(['start'], start_message),\n CommandHandler(['help'], start_message),\n MessageHandler(Filters.text, start_message),\n CallbackQueryHandler(callback=keyboard_callback_handler, pass_chat_data=True)\n ]\n\n for handler in handlers:\n updater.dispatcher.add_handler(handler)\n\n print('Bot starts...')\n updater.start_polling()\n updater.idle()\n\n\nif __name__ == '__main__':\n main()","repo_name":"altvik2503/two_steps_bot_project","sub_path":"two_steps_bot.py","file_name":"two_steps_bot.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10883001230","text":"import datetime\n\nfrom django.contrib import messages\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.generic import ListView, DetailView, UpdateView\nfrom .forms import *\nfrom .models import *\nfrom .permcheck import *\nfrom .shortcuts import *\n\nUser = get_user_model()\n\n@login_required\ndef index(request):\n context = {}\n if request.user.is_authenticated:\n user = request.user\n context['projects_managers'] = [(pro, get_collaborations(project=pro, is_manager=True)[0].user) for pro in\n get_projects(user=user)]\n context['notifications'] = Notification.objects.filter(user=user, has_seen=False)\n print(context['notifications'])\n if request.method == 'POST':\n create_form = ProjectCreationForm(request.POST)\n join_form = ProjectJoinForm(request.POST)\n\n if create_form.is_valid():\n new_project = create_form.save()\n\n if new_project.id:\n Collaboration.objects.create(project=new_project, user=user, is_manager=True)\n return redirect('project-detail', pk=new_project.id)\n return redirect('index')\n\n if join_form.is_valid():\n presumed_invitation = Invitation.objects.filter(key=request.POST['key'], is_active=True)\n print(presumed_invitation)\n if presumed_invitation.count() == 1:\n invitation = presumed_invitation[0]\n if invitation.id:\n project_to_join = invitation.project\n if project_to_join.id:\n if not is_user_in_project(user, project_to_join):\n Collaboration.objects.create(user=request.user, project=project_to_join, is_manager=False)\n invitation.receivers.add(user)\n invitation.save()\n return redirect('project-detail', pk=project_to_join.id)\n else:\n messages.error(request, \"Vous faites déjà partie du projet\")\n else:\n messages.error(request, \"Pas de projet correspondant\")\n else:\n messages.error(request, \"Clé invalide\")\n\n if 'notification-id' in request.POST:\n notification = Notification.objects.get(id=request.POST['notification-id'])\n notification.has_seen = True\n notification.save()\n\n return redirect('index')\n else:\n create_form = ProjectCreationForm()\n join_form = ProjectJoinForm()\n\n context['create_form'] = create_form\n context['join_form'] = join_form\n return render(request, 'www/index.html', context)\n\n\n@login_required\ndef project_detail(request, pk):\n project = get_object_or_404(Project, id=pk)\n user = request.user\n\n if is_user_in_project(request.user, project):\n collaborators = get_collaborators(project=project)\n is_manager = is_user_manager(request.user, project)\n tasks = get_active_tasks(project=project)\n context = {\n 'project': project,\n 'init_on_load': True,\n 'manager': get_manager(project),\n 'is_manager': is_manager,\n 'collaborators': collaborators,\n 'tasks': tasks,\n 'status': Status.objects.filter(project=project),\n 'team': get_team(project=project),\n }\n\n if is_manager:\n invitations = Invitation.objects.filter(project=project, is_active=True)\n if invitations.count() == 1:\n context['invitation_key'] = Invitation.objects.get(project=project, is_active=True)\n else:\n context['invitation_key'] = None\n\n task_form = TaskCreationForm()\n\n if request.method == 'POST':\n data = request.POST\n if 'new-task' in data:\n if is_manager:\n task_form = TaskCreationForm(data)\n if task_form.is_valid():\n new_task = task_form.save(commit=False)\n new_task.project = project\n new_task.user = None\n new_task.save()\n if new_task.id:\n TaskState.objects.create(task=new_task, user=user)\n Log.objects.create(type='add', task=new_task)\n return redirect('project-detail', pk=pk)\n else:\n task = get_object_or_404(Task, id=data['task-id'], is_active=True)\n if 'delete-task' in data and is_manager:\n task.get_last_task_state().close()\n task.is_active = False\n task.save()\n Log.objects.create(type='rem', task=task)\n elif 'update-task' in data:\n if user == task.user or is_manager:\n new_sts = None\n if data.get('task-st-id'):\n new_sts = Status.objects.get(id=data['task-st-id']) if int(data['task-st-id']) > 0 else None\n if task.status != new_sts:\n try:\n task.get_last_task_state().close()\n except:\n pass\n TaskState.objects.create(task=task, user=request.user, status=new_sts)\n Log.objects.create(type='upd', task=task)\n task.status = new_sts\n\n if is_manager and data['designation']:\n new_des = data['designation']\n task.designation = new_des\n if is_manager:\n if int(data['user-id']) > 0:\n new_task_user = User.objects.get(id=data['user-id'])\n task.user = new_task_user if is_user_in_project(new_task_user, project) else None\n else:\n task.user = None\n\n task.save()\n\n return redirect('project-detail', pk=pk)\n\n \"\"\"if request.method == 'POST':\n data = request.POST\n task_form = TaskCreationForm(data)\n if data.get('designation'):\n new_task = task_form.save(commit=False)\n new_task.user = request.user\n new_task.project = project\n new_task.save()\n else:\n context['t_form'] = TaskCreationForm()\"\"\"\n context['t_form'] = task_form\n return render(request, 'www/project_detail.html', context)\n else:\n return redirect('index')\n\n\n@login_required\ndef project_update(request, pk):\n project = get_object_or_404(Project, id=pk, is_active=True)\n\n if is_user_manager(request.user, project):\n context = {}\n context['id'] = pk\n context['project'] = project\n context['collaborators'] = collaborators = get_collaborators(project=project)\n context['form'] = ProjectUpdateForm(collaborators=collaborators, instance=project)\n context['status'] = Status.objects.filter(project=project)\n\n # Redirection après envoi form\n if request.method == 'POST':\n data = request.POST\n print(data)\n # Actualisation informations générales\n if 'info-update' in data:\n context['form'] = ProjectUpdateForm(collaborators=collaborators, data=data, instance=project)\n\n # Nouveau titre\n new_title = data['title']\n if new_title:\n project.title = new_title\n else:\n messages.error(request, \"Le nouveau titre ne peut être une chaîne vide.\")\n\n # Nouveau chef de projet\n new_manager_id = data['manager_id']\n if new_manager_id:\n new_manager = User.objects.get(id=new_manager_id)\n if is_user_collaborator(new_manager, project):\n # before 3.1 - project.collaborators.add(project.manager)\n # after 3.1 -\n\n update_manager(new_manager=new_manager, project=project)\n\n # before 3.1 - project.manager = new_manager\n # project.collaborators.remove(new_manager)\n\n project.description = data['description']\n project.save()\n\n elif 'collaborator-remove' in data:\n if data['user-id']:\n user_to_rem = User.objects.get(id=data['user-id'])\n if is_user_collaborator(user_to_rem, project):\n remove_collaborator(user=user_to_rem, project=project)\n else:\n messages.error(request, \"Une erreur est survenue.\")\n else:\n messages.error(request, \"Une erreur est survenue.\")\n\n elif 'append-status' in data:\n designation = data.get('designation')\n color = data.get('color')\n Status.objects.create(designation=designation, project=project, color=color)\n\n elif 'update-status' in data:\n status_id = data.get('update-status')\n designation = data.get('designation')\n color = data.get('color')\n status = get_object_or_404(Status, id=status_id)\n if 'delete-status' in data:\n status.delete()\n else:\n status.designation = designation\n status.color = color\n status.save()\n else:\n return render(request, 'www/project_update.html', context)\n return redirect('project-detail', pk=project.id)\n\n\n@login_required\ndef generate_new_key(request, pk):\n project = get_object_or_404(Project, id=pk, is_active=True)\n\n if is_user_manager(request.user, project):\n keys = Invitation.objects.filter(project=project, is_active=True)\n for k in keys:\n k.is_active = False\n k.save()\n\n Invitation.objects.create(key=secrets.token_hex(8), project=project, is_active=True, creator=request.user)\n\n return redirect('project-detail', pk=project.id)\n\n\n@login_required\ndef timeline(request, pk):\n user = request.user\n project = get_object_or_404(Project, id=pk, is_active=True)\n is_manager = is_user_manager(user, project)\n team = get_team(project)\n\n context = {\n 'project': project,\n 'is_manager': is_manager,\n 'team': team\n }\n\n if is_manager:\n tasks = get_active_tasks(project=project)\n\n context['tasks'] = tasks\n context['status'] = Status.objects.filter(project=project)\n\n for s in context['status']:\n print(s.get_task_states())\n\n date_diff_btw_creation = datetime.datetime.now(datetime.timezone.utc) - project.datetime_created\n context['unit'] = \"day\" if (date_diff_btw_creation.seconds // 3600) > 24 else \"hour\"\n\n if request.method == 'POST':\n data = request.POST\n print(data)\n if 'analysis-user-id' in data:\n if data.get('analysis-user-id') != \"0\":\n analysis_user = get_object_or_404(User, id=data.get('analysis-user-id'))\n context['analysis_user'] = analysis_user\n\n analysis_user_tasks = get_active_tasks(project=project, user=analysis_user)\n analysis_user_data = {}\n for t in analysis_user_tasks:\n status_designation = \"Sans status\"\n color = \"#ffffff\"\n if t.status:\n status_designation = t.status.designation\n color = t.status.color\n if status_designation not in analysis_user_data:\n analysis_user_data[status_designation] = {\n 'nb': 0\n }\n analysis_user_data[status_designation]['nb'] += 1\n analysis_user_data[status_designation]['color'] = color\n context['analysis_user_data'] = analysis_user_data\n else:\n context['analysis_user'] = None\n return render(request, 'www/project_timeline.html', context=context)\n return redirect('project-detail', pk=pk)\n\n\n\"\"\"\nclass ProjectUpdateView(LoginRequiredMixin, UpdateView):\n model = Project\n fields = ['title', 'manager']\n template_name_suffix = \"_update\"\n\n def form_valid(self, form):\n form.instance.author = self.request.user\n return super().form_valid(form)\n\"\"\"\n","repo_name":"lcalcas/HELBManager","sub_path":"source/helbmanager/www/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19417128356","text":"import subprocess\nfrom subprocess import TimeoutExpired\n\nfrom pynvim.api.nvim import NvimError\n\nimport vim\nfrom ncm2 import Ncm2Source\nfrom ncm2 import Popen\nfrom ncm2 import getLogger\n\nlogger = getLogger(__name__)\n\nclass SourceException(Exception):\n pass\n\nclass Source(Ncm2Source):\n PROC_TIMEOUT = 5\n DEFUALT_COMMANDS = {\n 'accounts': ['ledger', 'accounts'],\n 'tags': ['ledger', 'tags'],\n 'payees': ['ledger', 'payees'],\n 'commodities': ['ledger', 'commodities']\n }\n DEFAULT_OPT_FILE = '--file'\n\n def __init__(self, nvim, name):\n super().__init__(nvim)\n\n self.name = name\n self.command = None\n self.candidates = []\n\n try:\n setting = 'g:ncm2_ledger_cmd_' + name\n self.command = self.nvim.eval(setting)\n except NvimError:\n self.command = Source.DEFUALT_COMMANDS[name]\n\n try:\n self.optfile = self.nvim.eval('g:ncm2_ledger_opt_file')\n except NvimError:\n self.optfile = self.DEFAULT_OPT_FILE\n\n logger.debug('Command for \"%s\" candidates is: %s', name, ' '.join(self.command))\n\n def on_complete(self, ctx):\n base = ctx['base']\n filepath = ctx['filepath']\n matcher = self.matcher_get(ctx)\n\n try:\n command = self.command + [self.optfile, filepath]\n\n proc = Popen(\n args=command,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n outs, errs = proc.communicate(timeout=Source.PROC_TIMEOUT)\n\n if errs:\n raise SourceException(errs.decode('utf-8'))\n\n candidates = outs.decode('utf-8').splitlines()\n candidates = self.matches_formalize(ctx, candidates)\n\n if candidates:\n self.candidates = candidates\n logger.info('Found %s \"%s\" completion candidates',\n len(self.candidates), self.name\n )\n except SourceException as err:\n logger.error('Error collecting \"%s\" candidates', self.name)\n logger.error(err)\n except TimeoutExpired as err:\n proc.kill()\n logger.exception('Error collecting \"%s\" candidates', self.name)\n except Exception as err:\n logger.exception('Error collecting \"%s\" candidates', self.name)\n\n matches = [candidate for candidate in self.candidates if matcher(base, candidate)]\n logger.info('Found %s \"%s\" completion matches',\n len(matches), self.name\n )\n self.complete(ctx, ctx['startccol'], matches, True)\n\naccounts = Source(vim, 'accounts')\ntags = Source(vim, 'tags')\npayees = Source(vim, 'payees')\ncommodities = Source(vim, 'commodities')\n\naccounts_provider = accounts.on_complete\ntags_provider = tags.on_complete\npayees_provider = payees.on_complete\ncommodities_provider = commodities.on_complete\n","repo_name":"loonies/ncm2-ledger","sub_path":"pythonx/ncm2_ledger.py","file_name":"ncm2_ledger.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70295011306","text":"from pygame.constants import K_LEFT, K_RIGHT\nimport game\nimport pygame\nfrom Controller import Controller\nimport os\nfrom Player import Player\npygame.init()\n\nscreen = pygame.display.set_mode([500, 500])\n\nclock = pygame.time.Clock()\ngame.PID = os.getpid()\nprint('PID : '+ str(game.PID))\n\nrunning = True\n\nplayer = Player(screen.get_height() / 2,screen.get_width()/2)\nplayer.set_animation('idle')\ncontroller = Controller(1)\n\nwhile running:\n game.DT = clock.tick(60)/1000\n\n screen.fill((0,0,0))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n \n player_action = controller.get_controller_action(event)\n player.set_action(player_action)\n\n player.move()\n player.draw(screen)\n\n pygame.display.update()\n\npygame.quit()","repo_name":"damienmarchandfr/python-learn","sub_path":"game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40503885025","text":"import gspread\nfrom google.oauth2.service_account import Credentials\nfrom datetime import datetime\nfrom termcolor import colored\nfrom tabulate import tabulate\nimport random\nimport os\nfrom time import sleep\n\n\n# Lists the API:s for access\n# Code from my Love Sandwiches project\n\nSCOPE = [\n \"https://www.googleapis.com/auth/spreadsheets\",\n \"https://www.googleapis.com/auth/drive.file\",\n \"https://www.googleapis.com/auth/drive\"\n ]\n\nCREDS = Credentials.from_service_account_file('creds.json')\nSCOPED_CREDS = CREDS.with_scopes(SCOPE)\nGSPREAD_CLIENT = gspread.authorize(SCOPED_CREDS)\nSHEET = GSPREAD_CLIENT.open('random-destination')\n\n\ndef ascii_art():\n \"\"\"\n Ascii art files do display the welcome text and the airplane.\n The files are openend and read, and if there is an error a print message\n will be displayed in the exception.\n \"\"\"\n # Ascii art files to print images\n welcome_text = \"ascii-art/welcome.txt\"\n airplane_file = \"ascii-art/airplane.txt\"\n\n # Prints welcome text\n try:\n with open(welcome_text, \"r\") as file:\n welcome = file.read()\n print(welcome)\n\n # Prints airplane image\n with open(airplane_file, \"r\") as file:\n airplane = file.read()\n print(colored(airplane, color=\"light_blue\"))\n print(\"\\n\")\n except FileNotFoundError:\n print(\" File not found\")\n\n\ndef user_input():\n \"\"\"\n Get input from user and to make sure user is making a valid\n choice. The while loop will repeat and request data that is valid.\n The user can only enter a number starting from 1 with a limitation of\n 15 travelers.\n \"\"\"\n while True:\n user_choice = (input(\" How many travelers are there?: \\n \"\n \"(1 if you are traveling alone, 2 or\"\n \" more for traveling with company)\\n\\n \"))\n if user_choice.isdigit() and 0 < int(user_choice) <= 15:\n return int(user_choice)\n else:\n print(colored(\n \" Invalid choice. Please enter a valid number.\", \"red\"))\n\n\ndef travel_date():\n \"\"\"\n Get input from the user about travel date. A while loop\n that will repeat until a valid date format is entered.\n The datetime.strptime will parse the date. By using datetime.now.date\n it will then check and validate that the user's\n input is a date in the future and not in the past.\n The weekday corresponding to the date will also be printed.\n \"\"\"\n while True:\n departure = input(\" When do you want to travel? (YYYY-MM-DD):\\n\\n \")\n try:\n day_and_date = datetime.strptime(departure, \"%Y-%m-%d\")\n if day_and_date.date() >= datetime.now().date():\n weekday = day_and_date.strftime(\"%A\")\n print(f\" {weekday}\\n\")\n return day_and_date, weekday\n print(\"\\n\")\n else:\n print(colored(\n \" Please enter a valid date in YYYY-MM-DD format\", \"red\"))\n except ValueError:\n print(colored(\n \" Please enter a valid date in YYYY-MM-DD format\", \"red\"))\n\n\ndef duration():\n \"\"\"\n Get input from the user about the duration of the stay with\n validation to make sure the user only can type in numbers,\n starting from 1 with a limitation of 90 days.\n \"\"\"\n while True:\n travel_duration = (input(\" How many days are you planning\"\n \" to stay?:\\n\\n \"))\n if travel_duration.isdigit() and 0 < int(travel_duration) <= 90:\n return int(travel_duration)\n else:\n print(colored(\" Invalid input. Please enter a number\", \"red\"))\n\n\ndef continent():\n \"\"\"\n Get user input. User needs to select a contintent to get\n a random city returned in the random_destination function.\n The continents are each linked to a city and price in\n google sheets. Here it will access the worksheet for the\n chosen continent.\n \"\"\"\n while True:\n user_selection = input(\" Choose a continent (Africa, Asia, Europe,\"\n \" North America, South America): \\n\\n\"\n \" \").lower()\n try:\n SHEET.worksheet(user_selection)\n return user_selection\n print(\"\\n\")\n except gspread.exceptions.WorksheetNotFound:\n print(colored(\n \" Invalid continent. Please choose from the \"\n \"continents listed\", \"red\"))\n\n\ndef random_destination(user_selection):\n \"\"\"\n Returns a random city and the price from the the list of citys\n from the google worksheet to the user.\n Based on the user input in the continent\n function it will access the worksheet. It will\n then get all the rows from the worksheet,\n choose a random city and print the random city and price.\n \"\"\"\n try:\n worksheet = SHEET.worksheet(user_selection)\n data = worksheet.get_all_records()\n random_city = random.choice(data)\n the_city = random_city[\"City\"]\n the_price = random_city[\"Price\"]\n\n print(\" Your Random destination is: \\n \")\n print(f\" City: {the_city}\")\n print(f\" Price: ${the_price}\\n\")\n return the_city, the_price\n except gspread.exceptions.WorksheetNotFound:\n print(\"No data found\")\n\n\ndef another_choice(user_selection):\n \"\"\"\n Get user input if user wants to get another city chosen.\n A while loop that will display the continent function if user\n chooses 'y' and break the loop if user chooses 'n'. Will then\n proceed to next function. If neither y och n is entered a print\n message will display to promt the user to make a choice.\n \"\"\"\n while True:\n new_choice = input(\" Do you want to choose another city?:\"\n \" (Y/N)\\n\\n \").upper()\n if new_choice == \"Y\":\n user_selection = continent()\n the_city, the_price = random_destination(user_selection)\n return the_city, the_price\n elif new_choice == \"N\":\n return None\n else:\n print(colored(\" Invalid choice. Please enter 'Y' or 'N'\", \"red\"))\n\n\ndef travel_package():\n \"\"\"\n Get user input if user wants to add accommodation. A while loop will run\n until a valid choice is made ('y' or 'n').\n If 'y' the accommodation function is called. If 'n' the loop will break.\n If neither y och n is entered, a print message will display to promt the\n user to make a choice.\n \"\"\"\n while True:\n accommodation = input(\" Do you want to add accommodation?: \"\n \"(Y/N)\\n\\n \").upper()\n if accommodation == \"Y\":\n return accommodation_choices()\n elif accommodation == \"N\":\n break\n else:\n print(colored(\" Invalid choice. Please enter 'Y' or 'N'\", \"red\"))\n\n\ndef accommodation_choices():\n \"\"\"\n Prints out the choices for accommodation to the user and user can enter a\n number. If statements to return the correct value based on users choice.\n And validation if neither of the provided numbers are entered by the user.\n \"\"\"\n print(\" Select an option for accommodation\\n\")\n print(\" 1. Luxury Hotel\")\n print(\" 2. Budget Hotel\")\n print(\" 3. Airbnb\")\n print(\" 4. Hostel\\n\")\n\n option = input(\" Enter the number of your choice\\n \")\n\n if option == \"1\":\n print(\" Luxury Hotel\\n\")\n return \" Luxury Hotel\\n \"\n elif option == \"2\":\n print(\" Budget Hotel\\n\")\n return \" Budget Hotel\\n \"\n elif option == \"3\":\n print(\" Airbnb\\n\")\n return \" Airbnb\\n \"\n elif option == \"4\":\n print(\" Hostel\\n\")\n return \" Hostel\\n \"\n else:\n print(colored(\n \" Invalid choice. Please choose from the options \"\n \"provided\\n \", \"red\"))\n sleep(2)\n return accommodation_choices()\n\n\ndef transportation_service():\n \"\"\"\n Get user input if the user wished to add transportation services.\n A while loop will run until a valid choice is made ('y' or 'n'). If\n 'y' the transportation_options function is called.\n If 'n' the loop will break. If neither y och n is entered,\n a print message will display to promt the user to make a choice.\n \"\"\"\n while True:\n transportation = input(\" Do you want to add transportation?: \"\n \"(Y/N)\\n\\n \").upper()\n if transportation == \"Y\":\n return transportation_options()\n elif transportation == \"N\":\n break\n else:\n print(colored(\" Invalid choice. Please enter 'Y' or 'N'\", \"red\"))\n\n\ndef transportation_options():\n \"\"\"\n The options for transportation that will be displayed to the user.\n The choice will be printed out and the user has to type a valid number.\n \"\"\"\n print(\" Select an option for transportation\\n \")\n print(\" 1. Airport taxi\")\n print(\" 2. Car rental\")\n print(\" 3. Bus transfer\\n \")\n\n selection = input(\" Enter the number of your choice\\n \")\n\n if selection == \"1\":\n print(\" Airport taxi\\n \")\n return \" Airport taxi\\n \"\n elif selection == \"2\":\n print(\" Car rental\\n \")\n return \" Car rental\\n \"\n elif selection == \"3\":\n print(\" Bus transfer\\n \")\n return \" Bus transfer\\n \"\n else:\n print(colored(\n \" Invalid choice. Please choose from the options \"\n \"provided\\n \", \"red\"))\n sleep(2)\n return transportation_options()\n\n\n\"\"\"\nFunction to display a summary over the travel information, taking\nthe parameters all related to the travel info. From users input and the\nrandom city generated. The summary will be displayed in a table using the\ntabulate module.\n\"\"\"\n\n\ndef summary(user_choice, departure, travel_duration, the_city, the_price,\n option, selection):\n \"\"\"\n Sum up the price with number of people, displayed in the table\n Define departure so both date and weekday are displayed in the table\n \"\"\"\n total_cost = user_choice * the_price\n departure, weekday = departure\n\n travel_details = [\n [\"Traveling on:\", f'{weekday}, {departure.strftime(\"%Y-%m-%d\")}'],\n [\"Number of people traveling:\", user_choice],\n [\"Duration of stay:\", f\"{travel_duration} days\"],\n [\"Destination:\", the_city],\n [\"Price:\", f\"${total_cost}\"],\n ]\n\n \"\"\"\n This will append the accommodation choice and transportation choice to\n the travel details table if the user makes a choice.\n \"\"\"\n if option:\n travel_details.append([\"Accommodation:\", option])\n\n if selection:\n travel_details.append([\"Transportation:\", selection])\n\n # Displays the travel details in a table\n table_style = \"grid\"\n table = tabulate(travel_details, tablefmt=table_style)\n\n print(\" Here is your travel information: \\n\")\n print(colored(table, color=\"yellow\", attrs=[\"reverse\", \"bold\"]))\n print(\" \\n\")\n\n\ndef final_step():\n \"\"\"\n Let's the user choose to start over or exit the program. If the user want\n to start over, the main function is called. If the user wants to exit the\n terminal is cleared and a print message is displayed. A while loop that\n will run until user enters valid data.\n \"\"\"\n while True:\n print(\" What do you want to do next?:\\n \")\n print(\" 1. Start over\")\n print(\" 2. Exit\")\n\n next = input(\" Enter the number of your choice\\n \")\n\n if next == \"1\":\n os.system('clear')\n print(\" Okay...let's start from the beginning!\\n \")\n sleep(2)\n return main()\n elif next == \"2\":\n os.system('clear')\n print(\" Have a nice trip!\\n \")\n break\n else:\n print(colored(\n \" Invalid choice. Please choose from the options\"\n \" provided\\n \", \"red\"))\n\n\ndef main():\n \"\"\"\n A welcome message followed with an input that will prompt the user to\n start the program. The input message will display after 1.5 seconds.\n \"\"\"\n ascii_art()\n print(\" Get a random travel destination based on your choice of \"\n \"continent.\\n ...Let's begin! \\n\")\n sleep(1.5)\n input(\" Press Enter to continue...\\n\\n \")\n \"\"\"\n The functions below are called in the right order to display information\n to the user and get the input from the user\n \"\"\"\n user_choice = user_input()\n departure = travel_date()\n travel_duration = duration()\n user_selection = continent()\n the_city, the_price = random_destination(user_selection)\n \"\"\"\n Prevents a NoneType error when the user chooses no in the another_choice\n function that returns None. A while loop that will keep running until the\n user chooses no on the choose another city question.\n \"\"\"\n while True:\n another_choice_input = another_choice(user_selection)\n if another_choice_input is not None:\n the_city, the_price = another_choice_input\n else:\n break\n\n accommodation = travel_package()\n transportation = transportation_service()\n summary(user_choice, departure, travel_duration, the_city, the_price,\n accommodation, transportation)\n final_step()\n\n\nif __name__ == \"__main__\":\n main()\n \n","repo_name":"Code-Institute-Submissions/random-destination","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":13364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"13584401358","text":"forbidden = [\n 'confiability', \n 'address', \n 'author',\n 'location',\n]\n\nonly_numbers = [\n 'year_publication', \n 'num_pages', \n 'number', \n 'cnpj',\n 'edition',\n 'stand',\n 'shelf',\n 'amount',\n]\n\nbig_text = [\n 'description'\n]\n\ndate = [\n 'birth_date',\n 'start_date',\n 'return_date',\n 'return_period'\n]\n\nlike = [\n 'name', \n 'title', \n 'publisher', \n 'author', \n 'user', \n 'book',\n 'confiability'\n]\n\nexact = [\n 'cpf', \n 'id', \n 'stand', \n 'shelf', \n 'year_publication', \n 'barcode', \n 'edition', \n 'birth_date', \n 'start_date', \n 'return_date',\n 'return_period'\n]","repo_name":"Dogaum1/FDB-Libraza","sub_path":"Util/Attributes.py","file_name":"Attributes.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28450438965","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils\nimport torch.distributions\nimport torchvision\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\n\ndef train_vae(epochs, vae, train_loader, val_loader, lr=0.001, is_cifar=False):\n '''\n training loop for vae and plot the loss curve\n\n param:\n epochs: the number of epochs\n vae: vae torch model\n train_loader, val_loader: data loader\n lr: learning rate, default: 0.001\n is_cifar: whether the dataset for training is cifar\n\n return None\n '''\n ep = []\n train_loss_list = []\n val_loss_list = []\n\n opt = torch.optim.Adam(vae.parameters(), lr=lr)\n # train\n \n for epoch in tqdm(range(epochs)):\n train_loss = 0\n val_loss = 0\n data_num = 0\n val_data_num = 0\n if is_cifar:\n for i, (x, y) in enumerate(train_loader):\n \n x = x.reshape((len(x), -1))\n opt.zero_grad()\n \n x_hat = vae(x)\n loss = ((x - x_hat)**2).mean() + vae.encoder.kl\n loss.backward()\n opt.step()\n train_loss += loss\n data_num += len(x)\n\n with torch.no_grad():\n for i, (x, y) in enumerate(val_loader):\n x = x.reshape((len(x), -1))\n x_hat_val = vae(x)\n val_loss += ((x - x_hat_val)**2).mean() + vae.encoder.kl\n val_data_num += len(x)\n else:\n for i, x in enumerate(train_loader):\n opt.zero_grad()\n x_hat = vae(x)\n loss = ((x - x_hat)**2).mean() + vae.encoder.kl\n loss.backward()\n opt.step()\n train_loss += loss\n data_num += len(x)\n\n\n with torch.no_grad():\n for i, x in enumerate(val_loader):\n x_hat_val = vae(x)\n val_loss += ((x - x_hat_val)**2).mean() + vae.encoder.kl\n val_data_num += len(x)\n\n ep.append(epoch + 1) \n train_loss = train_loss.item() / data_num\n train_loss_list.append(train_loss)\n val_loss = val_loss.item() / val_data_num\n val_loss_list.append(val_loss)\n # print('ep: ', epoch ,', train loss: ', train_loss, ', val loss: ', val_loss)\n\n plt.title(\"vae loss\")\n plt.xlabel(\"steps\")\n plt.ylabel(\"loss\")\n plt.plot(ep, train_loss_list, 'red', label='training loss')\n plt.plot(ep, val_loss_list, 'blue', label='testing loss')\n plt.legend(loc=\"upper right\")\n plt.show()\n\ndef train_resnet18(epochs, model, opt, train_loader, val_loader, batch_size): \n '''\n train a resnet18 and plot the result\n\n param:\n epochs: the number of epochs\n model: torch model\n opt: optimizer (adam, sgd)\n train_loader, val_loader: data loader\n batch_size: the size of a batch\n\n return None\n '''\n ep = []\n train_loss_list = []\n test_loss_list = []\n\n N = len(train_loader) * batch_size\n loss_func = torch.nn.CrossEntropyLoss() \n for ep in range(epochs):\n loss_ep = 0\n for (x, y) in tqdm(train_loader):\n opt.zero_grad()\n y = y.to(torch.long)\n y_ = model(x)\n loss = loss_func(y_, y)\n loss.backward()\n opt.step()\n loss_ep += loss\n print('for ep ', ep, ' training loss: ', loss_ep / N)\n\n with torch.no_grad():\n for epoch in range(epochs):\n val_loss_ep = 0\n acc = 0\n for (x, y) in tqdm(val_loader):\n y = y.to(torch.long)\n y_ = model(x)\n loss = loss_func(y_, y)\n val_loss_ep += loss\n pred = torch.argmax(y_, axis=1)\n acc += torch.sum(pred == y) \n acc /= (len(val_loader) * batch_size)\n print('for ep ', ep, ' val loss: ', val_loss_ep / (len(val_loader) * batch_size))\n print('for ep ', ep, ' val acc: ', acc)\n\n ep.append(epoch + 1) \n train_loss_list.append(loss_ep.item() / (len(train_loader) * batch_size))\n test_loss_list.append(val_loss_ep.item() / (len(train_loader) * batch_size))\n\n plt.title(\"resnet18 loss\")\n plt.xlabel(\"steps\")\n plt.ylabel(\"loss\")\n plt.plot(ep, train_loss_list, 'red', label='training loss')\n plt.plot(ep, test_loss_list, 'blue', label='testing loss')\n plt.legend(loc=\"upper right\")\n plt.show()","repo_name":"HUWENBIN2024/ML_Crowd_Modeling_and_Simulation","sub_path":"final_proj/src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32505511593","text":"from django.shortcuts import render, redirect, HttpResponseRedirect, reverse\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom appmart.forms import CreateUserForm\nfrom product.forms import TagForm, ColorForm, SizeForm, ProductForm\nfrom product.models import Product\n\ndef registerPage(request):\n\tif request.user.is_authenticated:\n\t\treturn redirect('dashboard')\n\telse:\n\t\tform = CreateUserForm()\n\t\tif request.method == 'POST':\n\t\t\tform = CreateUserForm(request.POST)\n\t\t\tif form.is_valid():\n\t\t\t\tform.save()\n\t\t\t\tuser = form.cleaned_data.get('username')\n\t\t\t\tmessages.success(request, 'Account was created for ' + user)\n\t\t\t\treturn redirect('loginPage')\n\t\t\t\n\tcontext = {'form': form}\n\treturn render(request, 'register.html', context)\n\ndef loginPage(request):\n\tif request.user.is_authenticated:\n\t\treturn redirect('dashboard')\n\telse:\n\t\tif request.method == 'POST':\n\t\t\tusername = request.POST.get('username')\n\t\t\tpassword = request.POST.get('password')\n\t\t\tuser = authenticate(request, username=username, password=password)\n\t\t\tif user is not None:\n\t\t\t\tlogin(request, user)\n\t\t\t\treturn redirect('dashboard')\n\t\t\telse:\n\t\t\t\tmessages.info(request, 'Username OR password is incorrect')\n\tcontext = {}\n\treturn render(request, 'login.html', context)\n\ndef logoutUser(request):\n\tlogout(request)\n\treturn redirect('loginPage')\n\n@login_required(login_url='loginPage')\ndef main(request):\n\treturn render(request, \"dashboard.html\")\n\n@login_required(login_url='loginPage')\ndef product(request):\n\tif request.method == 'POST':\n\t\ttagform = TagForm(request.POST)\n\t\tcolorform = ColorForm(request.POST)\n\t\tsizeform = SizeForm(request.POST)\n\t\tproductform = ProductForm(request.POST)\n\t\tif productform.is_valid():\n\t\t\tproductform.save()\n\t\t\tmessages.success(request, 'product data added')\n\t\t\treturn HttpResponseRedirect(reverse('product'))\n\t\telif colorform.is_valid():\n\t\t\tcolorform.save()\n\t\t\tmessages.success(request, 'product color added')\n\t\t\treturn HttpResponseRedirect(reverse('product'))\n\t\telif sizeform.is_valid():\n\t\t\tsizeform.save()\n\t\t\tmessages.success(request, 'product size added')\n\t\t\treturn HttpResponseRedirect(reverse('product'))\n\t\telif tagform.is_valid():\n\t\t\ttagform.save()\n\t\t\tmessages.success(request, 'product tag added')\n\t\t\treturn HttpResponseRedirect(reverse('product'))\n\t\telse:\n\t\t\tmessages.success(request, 'please fill relevant data')\n\t\t\treturn HttpResponseRedirect(reverse('product'))\n\telse:\n\t\ttagform = TagForm()\n\t\tcolorform = ColorForm()\n\t\tsizeform = SizeForm()\n\t\tproductform = ProductForm()\n\t\tdata = Product.objects.all()\n\tmessages.success(request, 'Add the element')\n\treturn render(request, 'product.html', {\n\t\t'tagform': tagform,\n\t\t'colorform': colorform,\n\t\t'sizeform': sizeform,\n\t\t'productform': productform,\n\t\t'data': data\n\n\n\t})\n\n@login_required(login_url='loginPage')\ndef customer(request):\n\treturn render(request, \"customer.html\")\n\n@login_required(login_url='loginPage')\ndef createOrder(request):\n\treturn render(request, \"orderform.html\")\n\n@login_required(login_url='loginPage')\ndef updateOrder(request):\n\n\treturn render(request, \"orderform.html\")\n\n@login_required(login_url='loginPage')\ndef deleteOrder(request):\n\treturn render(request, 'delete.html')\n\n@login_required(login_url='loginPage')\ndef productdelete(request, pk):\n\tproduct = Product.objects.get(id=pk)\n\tproduct.delete()\n\treturn render(request, 'product.html')\n\n@login_required(login_url='loginPage')\ndef productedit(request, pk):\n\tproduct = Product.objects.get(id=pk)\n\tform = ProductForm(instance=product)\n\tif request.method == 'POST':\n\t\tform = ProductForm(request.POST, instance=product)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('product')\n\n\tcontext = {'form': form, 'product': product}\n\treturn render(request, 'edit.html', context)\n","repo_name":"ankitrrock/women_fashion","sub_path":"product/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3802,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"30922401015","text":"data = []\n\nwith open(\"python/Day 01/puzzleInput.txt\") as f:\n data = f.readlines()\nformattedData = []\nfor e in data:\n try:\n formattedData.append(int(e))\n except:\n formattedData.append(-1)\n\nformattedData.append(-1)\n\nrunningTotal = 0\nmaxValue = 0\nmaxValues = [0,0,0]\nfor e in formattedData:\n if e != -1:\n runningTotal += e\n \n else:\n maxValue = max(runningTotal, maxValue)\n\n for i in range(len(maxValues)):\n if (maxValues[i] < runningTotal):\n maxValues[i] = runningTotal\n break\n maxValues.sort()\n runningTotal = 0\n\nprint(maxValue)\nprint(maxValues)\nprint(sum(maxValues))","repo_name":"vrnprkh/AOC-2022","sub_path":"python/Day 01/Day 1.py","file_name":"Day 1.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32607422241","text":"import datetime\nfrom os import environ\nimport random\n\nimport MySQLdb\nfrom datadefs import ParticipantBrewerRecord\n\n_dry_run = (environ.get('DRY_RUN', 'false') == 'true')\n\n_db = None\n\nif _db is None and not _dry_run:\n _db = MySQLdb.connect(\n database=environ[\"DB_DATABASE\"],\n host=environ[\"DB_HOST\"],\n password=environ[\"DB_PASSWORD\"],\n port=int(environ[\"DB_PORT\"]),\n user=environ[\"DB_USER\"],\n )\n\ndef create_brew_judging_number() -> str:\n candidate = str(random.randint(100000, 999999))\n cur = _db.cursor()\n cur.execute(\"\"\"\nSELECT COUNT(*)\nFROM brewing\nWHERE brewJudgingNumber = %s;\n \"\"\", (candidate, ))\n exists = cur.fetchone()[0] > 0\n \n if exists:\n print(f\"Judging number {candidate} already exists, trying again...\")\n return create_brew_judging_number()\n\n return candidate\n \n\ndef create_entries(participant: ParticipantBrewerRecord):\n if _dry_run:\n return\n \n assert participant.user_account.user_id is not None\n cur = _db.cursor()\n entries = participant.entries\n \n for entry in entries:\n print(f\"Creating entry '{entry.name}'\")\n style = entry.style\n brew_info = None\n brew_judging_no = create_brew_judging_number()\n \n if style.brew_cat() == \"21\":\n brew_info = f\"Required info for {entry.name}\"\n cur.execute(\"\"\"\n INSERT INTO brewing (\n brewName, brewStyle, brewCategory, brewCategorySort, brewSubCategory,\n brewInfo,\n brewBrewerFirstName, brewBrewerLastName, brewBrewerID,\n brewPaid,\n brewReceived,\n brewJudgingNumber,\n brewUpdated,\n brewConfirmed)\n VALUES (\n %s, %s, %s, %s, %s,\n %s,\n %s, %s, %s,\n %s,\n %s,\n %s,\n %s,\n %s\n )\"\"\", (\n entry.name, style.brew_style_name, style.brew_cat(), style.brew_cat_sort(), style.brew_style_num,\n brew_info,\n participant.first_name, participant.last_name, participant.user_account.user_id,\n 1,\n 1,\n brew_judging_no,\n datetime.datetime.now(),\n 1\n ))\n\ndef create_participant(participant: ParticipantBrewerRecord):\n if _dry_run:\n return\n cur = _db.cursor()\n cur.execute(\"\"\"\nINSERT INTO users(user_name,password,userLevel,userCreated)\nVALUES (%s, %s, %s, %s);\n \"\"\", (\n participant.user_account.user_name,\n \"$2a$08$S.KNXq.yQL9uAOQ8zzUxtOL0dJ.srZRN.VDtVCOn7NzuntcPUW3oS\",\n participant.user_account.user_level,\n datetime.datetime.now()\n ))\n \n cur.execute(\"SELECT LAST_INSERT_ID();\")\n user_id = cur.fetchone()[0]\n \n print(f\"Created user {participant.user_account.user_name} ({user_id})\")\n participant.user_account.user_id = user_id\n # create brewer\n brewer_staff = 'Y' if participant.is_staff else 'N'\n brewer_steward = 'Y' if participant.is_steward else 'N'\n brewer_judge = 'Y' if participant.is_judge else 'N'\n brewer_judge_location = 'Y-1' if participant.is_judge else None\n brewer_steward_location = 'Y-1' if participant.is_steward else None\n brewer_judge_waiver = 'Y' if participant.is_judge else None\n brewer_judge_exp = '2' if participant.is_judge else None\n cur.execute(\"\"\"\nINSERT INTO brewer (\n uid, brewerFirstName, brewerLastName, \n brewerAddress, brewerCity, brewerState, brewerZip, brewerCountry, brewerPhone1, \n brewerClubs, brewerEmail, \n brewerStaff, brewerSteward, \n brewerJudge, brewerJudgeRank, brewerJudgeLocation, brewerJudgeExp,\n brewerStewardLocation, \n brewerJudgeWaiver, \n brewerDropOff\n) VALUES (\n %s, %s, %s, \n %s, %s, %s, %s, %s, %s,\n %s, %s,\n %s, %s,\n %s, %s, %s,%s,\n %s,\n %s,\n %s\n );\"\"\",\n (\n user_id, participant.first_name, participant.last_name,\n participant.street_address, participant.city, participant.state, participant.postcode, \"Australia\", participant.phone,\n participant.club, participant.email,\n brewer_staff, brewer_steward,\n brewer_judge, \"Non-BJCP\", brewer_judge_location,brewer_judge_exp,\n brewer_steward_location,\n brewer_judge_waiver,\n \"1\"\n ))\n # create staff record \n cur.execute(\"\"\"\n INSERT INTO staff (\n uid,staff_judge,staff_judge_bos,staff_steward,\n staff_organizer,staff_staff\n ) VALUES\n\t (%s,0,0,0,0,0);\n \"\"\",\n (user_id,))\n\ndef participant_exists(participant: ParticipantBrewerRecord) -> bool:\n if _dry_run:\n return False\n\n cur = _db.cursor()\n cur.execute(\"SELECT COUNT(*) FROM users WHERE user_name = %s\", (participant.user_account.user_name,))\n exists = cur.fetchone()[0] > 0\n \n if exists:\n return True\n \n cur.execute(\"SELECT COUNT(*) FROM brewer WHERE brewerFirstName = %s AND brewerLastName = %s\", (participant.first_name, participant.last_name,))\n exists = cur.fetchone()[0] > 0\n \n return exists\n","repo_name":"cgspeck/westgate-bcoem-testdata","sub_path":"src/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":4958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29799750945","text":"def make_table(rows,labels=None,centered=False):\r\n\r\n returnTabe = ''\r\n\r\n colsCount = len(rows[0]) \r\n rowsCount = len(rows)\r\n\r\n ##################### Find Longest Word ###############################\r\n\r\n largestLength = 0\r\n for x in range(rowsCount):\r\n for y in range(colsCount):\r\n element = rows[x][y] = str(rows[x][y])\r\n if len(element) > largestLength:\r\n largestLength = len(element)\r\n \r\n cellLength = largestLength+2\r\n\r\n ############# Header/Footer Padding ####################\r\n\r\n tCount = colsCount - 1\r\n middlPartheader = ''\r\n middlePartFooter = ''\r\n if labels:\r\n BottomPartLabel = ''\r\n \r\n for t in range(tCount):\r\n middlPartheader += cellLength*'─'\r\n middlePartFooter += cellLength*'─'\r\n middlPartheader += '┬'\r\n middlePartFooter += '┴'\r\n if labels:\r\n BottomPartLabel += cellLength*'─'\r\n BottomPartLabel += '┼'\r\n\r\n middlPartheader += cellLength*'─'\r\n middlePartFooter += cellLength*'─'\r\n if labels: BottomPartLabel += cellLength*'─'\r\n\r\n headerLine = f\"┌{middlPartheader}┐\"\r\n footerLine = f\"└{middlePartFooter}┘\"\r\n returnTabe += headerLine+'\\n'\r\n\r\n ################### Labels Padding ######################\r\n\r\n if labels:\r\n for label in labels: \r\n labelIndex = labels.index(label)\r\n padding = cellLength - len(label)\r\n\r\n if centered == True:\r\n lpadLen = padding//2\r\n rpadLen = padding-lpadLen\r\n paddedCell = f\"│{lpadLen*' '}{label}{rpadLen*' '}\"\r\n\r\n if labelIndex==len(labels)-1:\r\n paddedCell += \"│\"\r\n\r\n else:\r\n paddedCell = f\"│{' '}{label}{(padding-1)*' '}\"\r\n if labelIndex==len(labels)-1:\r\n paddedCell += \"│\"\r\n\r\n returnTabe += paddedCell\r\n\r\n middleLine = f\"├{BottomPartLabel}┤\"\r\n\r\n returnTabe += \"\\n\"+middleLine+\"\\n\"\r\n\r\n ################### Content Padding #####################\r\n\r\n for x in range(rowsCount):\r\n for y in range(colsCount):\r\n cellValue = rows[x][y]\r\n wlen = len(cellValue)\r\n \r\n #for centered\r\n padding = cellLength - wlen\r\n if centered == True:\r\n lpadLen = padding//2\r\n rpadLen = padding-lpadLen\r\n paddedCell = f\"│{lpadLen*' '}{cellValue}{rpadLen*' '}\"\r\n if y==colsCount-1:\r\n paddedCell += \"│\"\r\n else:\r\n paddedCell = f\"│{' '}{cellValue}{(padding-1)*' '}\"\r\n if y==colsCount-1:\r\n paddedCell += \"│\"\r\n\r\n returnTabe += paddedCell\r\n\r\n returnTabe += '\\n'\r\n\r\n#################### Returning Table #################\r\n\r\n returnTabe += footerLine\r\n return returnTabe","repo_name":"ShanTen/PyCodeJam","sub_path":"makeTable.py","file_name":"makeTable.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71404997548","text":"# Emergent Objects\n# Analogin input/output\n\nimport time\nimport board\nimport analogio\nimport neopixel\n\npixels = neopixel.NeoPixel(board.NEOPIXEL, 10, brightness=0.1, auto_write=False)\n\nanalogin = analogio.AnalogIn(board.A1)\n\nprint(\"hello\")\n\nwhile True:\n\tprint(analogin.value)\n\n\tif analogin.value > 600:\n\t\tprint(\"hello\")\n\t\tpixels[9] = (255, 50, 50)\n\telse:\n\t\tpixels[9] = (0, 0, 0)\n\tif analogin.value > 3200:\n\t\tpixels[8] = (255, 50, 50)\n\telse:\n\t\tpixels[8] = (0, 0, 0)\n\tif analogin.value > 4000:\n\t\tpixels[7] = (255, 50, 50)\n\telse:\n\t\tpixels[7] = (0, 0, 0)\n\tif analogin.value > 52000:\n\t\tpixels[6] = (255, 50, 50)\n\telse:\n\t\tpixels[6] = (0, 0, 0)\n\tif analogin.value > 60000:\n\t\tpixels[5] = (255, 50, 50)\n\telse:\n\t\tpixels[5] = (0, 0, 0)\n\tpixels.show()\n\ttime.sleep(0.01)\n","repo_name":"arielchuri/emergentobjects","sub_path":"analogin/analog_in_neopixel.py","file_name":"analog_in_neopixel.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29831431011","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 31 17:33:52 2022\n\n@author: dan\n\"\"\"\n\n\nimport json\nimport os\nimport sys\nimport pandas as pd\nimport numpy as np\nimport nibabel as nib\nimport nilearn\n#import scilpy as scil\nsys.path.append('wma_pyTools')\nimport wmaPyTools.roiTools\nimport wmaPyTools.streamlineTools\nimport wmaPyTools.analysisTools \nfrom dipy.segment.tissue import TissueClassifierHMRF\n\n# load inputs from config.json\nwith open('config.json') as config_json:\n\tconfig = json.load(config_json)\n\n#identity reminder:\n# 1 = gm\n# 2 = subscortical\n# 3 = wm\n# 4 = CSF\n# 5 = Lesions\n# if os.path.exists(config['5tt']):\n# inputTissueNifti=nib.load(config['5tt'])\n# else:\n# inputTissueNifti=None\n\n# if os.path.exists(config['fa']):\n# fa_nifti=nib.load(config['fa'])\n# else:\n# fa_nifti=None\n \n# if os.path.exists(config['rd']):\n# rd_nifti=nib.load(config['rd'])\n# else:\n# rd_nifti=None\n\n# if os.path.exists(config['ad']):\n# ad_nifti=nib.load(config['ad'])\n# else:\n# ad_nifti=None\n \n# if os.path.exists(config['md']):\n# md_nifti=nib.load(config['md'])\n# else:\n# md_nifti=None\n \n# if os.path.exists(config['anat']):\n# refT1=nib.load(config['anat'])\n# else:\n# refT1=None\n\n\ninputTissueNifti=nib.load('5tt.nii.gz')\n\n#everything except dwi should have been created and on the main path\ndwi=nib.load(config['diff'])\nbval=config['bval']\nbvec=config['bvec']\nrefT1=nib.load(config['anat'])\n\n\ntissues=['cortGM','subCortGM','WM','CSF','Path']\n#run some clever conversions here for the 5TT\n\n#first resample it to the dwi data\n#WARNING this is going to cause all kinds of problems\nif not inputTissueNifti.shape[0:3]==dwi.shape[0:3]:\n print('resampling 5 tissue mask to fit diffusion data')\n resampledFiveTissue=nilearn.image.resample_img(inputTissueNifti,target_affine=dwi.affine,target_shape=dwi.shape[0:3])\nelse:\n #just rename it, I guess\n resampledFiveTissue=inputTissueNifti\n#if it's a 4d nifiti\nif len(resampledFiveTissue.shape)==4:\n print('4D input 5TT nifti detected, converting to int based 3D')\n outTissueData=np.zeros(resampledFiveTissue.shape[0:3])\n #iterate across the tissue labels\n for iTissues in range(resampledFiveTissue.shape[3]):\n roundedDataMask=np.around(resampledFiveTissue.get_data()[:,:,:,iTissues]).astype(bool)\n print(str(np.sum(roundedDataMask))+' voxels for ' + tissues[iTissues])\n outTissueData[roundedDataMask]=iTissues+1\n tissueNifti=nib.Nifti1Image(outTissueData, resampledFiveTissue.affine)\nelse:\n #figure out what to do for 3d vis based input\n print('3D input 5TT nifti detected, converting to int based 3D')\n\n#initialize the method\n#tissueClassifier=TissueClassifierHMRF()\n\n#initial_segmentation, final_segmentation, PVE=tissueClassifier.classify(refAnatT1.get_data(), nclasses=5, beta=0.1)\n\n#outTissue=nib.Nifti1Image(final_segmentation, refAnatT1.affine, refAnatT1.header)\n#nib.save(outTissue,'test5tt.nii.gz')\n\ndef compute_snr(dwi, bval, bvec, mask):\n #stealing from scilpy and dipy\n\n\n from dipy.segment.mask import median_otsu\n from scipy.ndimage.morphology import binary_dilation\n from dipy.io.gradients import read_bvals_bvecs\n import nibabel as nib\n \n #extract the mask if an input nifti was passed\n if isinstance(mask, nib.nifti1.Nifti1Image):\n mask=mask.get_data()\n \n \n bvals, bvecs = read_bvals_bvecs(bval, bvec)\n data = dwi.get_fdata(dtype=np.float32)\n #arbitrary value to compartmentalize bvals with\n bvalBinSize=500\n bvalBins=np.around(np. divide(bvals,bvalBinSize))\n roundedBvals=np.multiply(bvalBins,bvalBinSize)\n b0Indexes=np.where(roundedBvals==0)[0]\n\n #temporarily create and save down a noise mask. Helps speed up processing\n if not os.path.exists('noise_mask.nii.gz'):\n print('No noise mask found. Computing new noise mask')\n b0_mask, noise_mask = median_otsu(data, vol_idx=b0Indexes)\n \n # we inflate the mask, then invert it to recover only the noise\n noise_mask = binary_dilation(noise_mask, iterations=10).squeeze()\n \n # Add the upper half in order to delete the neck and shoulder\n # when inverting the mask\n noise_mask[..., :noise_mask.shape[-1]//2] = 1\n \n # Reverse the mask to get only noise\n noise_mask = (~noise_mask).astype('float32')\n \n noise_maskNifti=nib.Nifti1Image(noise_mask, dwi.affine)\n nib.save(noise_maskNifti,'noise_mask.nii.gz')\n else:\n print('Noise mask found in working directory. Loading...')\n noise_maskNifti=nib.load('noise_mask.nii.gz')\n noise_mask=noise_maskNifti.get_data()\n \n val = {0: {'bvec': [0, 0, 0], 'bval': 0, 'mean': 0, 'std': 0}}\n for idx in range(data.shape[-1]):\n val[idx] = {}\n val[idx]['bvec'] = bvecs[idx]\n val[idx]['bval'] = bvals[idx]\n val[idx]['mean'] = np.mean(data[..., idx:idx+1][mask > 0])\n #because I want a report about the std in the specific tissue as well\n noiseMaskSTD= np.std(data[..., idx:idx+1][noise_mask > 0])\n val[idx]['std'] = np.std(data[..., idx:idx+1][mask > 0])\n if noiseMaskSTD == 0:\n raise ValueError('Your noise mask does not capture any data'\n '(std=0). Please check your noise mask.')\n \n val[idx]['snr'] = val[idx]['mean'] / noiseMaskSTD\n\n return val\n\ndef fullSNR_report(dwi, bval, bvec, refT1=None, fiveTissue=None, other_niftiList=None,other_niftiNames=None):\n #test\n import nibabel as nib\n import numpy as np\n from dipy.io.gradients import read_bvals_bvecs\n import pandas as pd\n \n if np.logical_and(refT1==None,fiveTissue==None):\n raise ValueError('Either refT1 or five tissue type needed as input')\n \n if isinstance(dwi, str):\n dwi=nib.load(dwi)\n \n if isinstance(refT1, str):\n refT1=nib.load(refT1)\n if isinstance(fiveTissue, str):\n fiveTissue=nib.load(fiveTissue)\n \n #initialize a vector for the fa/rd/etc\n #additionalNiftiTypes=['fa','rd','ad','md']\n #additionalNiftiVec=[fa_nifti,rd_nifti,ad_nifti,md_nifti]\n additionalNiftiTypes=other_niftiNames\n additionalNiftiVec=other_niftiList\n \n #using the 5tt mrtrix convention\n tissues=['cortGM','subCortGM','WM','CSF','Path']\n metrics=['mean','std','snr']\n columnLabels=['source']\n #generate the column labels\n for iTissues in tissues:\n for iMetrics in metrics:\n columnLabels.extend([iTissues + '_'+ iMetrics])\n \n snrTable=pd.DataFrame(columns=columnLabels)\n print(str(range(len(np.unique(fiveTissue.get_data())))))\n #better have some information about the tissues in the 5tt mask\n for tissueIterator in range(len(np.unique(fiveTissue.get_data()))):\n currentMask=wmaPyTools.roiTools.multiROIrequestToMask(fiveTissue,tissueIterator+1,inflateIter=0)\n snrOut=compute_snr(dwi, bval, bvec, currentMask)\n #compute for dwi\n bvals, bvecs = read_bvals_bvecs(bval, bvec)\n #same as in the compute snr code \n bvalBinSize=500\n bvalBins=np.around(np. divide(bvals,bvalBinSize))\n roundedBvals=np.multiply(bvalBins,bvalBinSize)\n #b0Indexes=np.where(roundedBvals==0)[0]\n uniqueBvals= np.unique(roundedBvals).astype(int)\n \n print ('Computing stastics for tissue type ' + tissues[tissueIterator] )\n for bvalIterator,curBval in enumerate(uniqueBvals):\n #get the current indexes for this bval\n curBvalIndexes=np.where(roundedBvals==np.unique(roundedBvals)[bvalIterator])[0]\n #place the current bvalue in the source column\n snrTable.at[bvalIterator,'source']=curBval\n #get the mean, std, and snr respectively\n snrTable.at[bvalIterator,tissues[tissueIterator]+'_'+metrics[0]]=np.mean([snrOut[icurBvalIndexes][metrics[0]] for icurBvalIndexes in curBvalIndexes])\n snrTable.at[bvalIterator,tissues[tissueIterator]+'_'+metrics[1]]=np.mean([snrOut[icurBvalIndexes][metrics[1]] for icurBvalIndexes in curBvalIndexes])\n snrTable.at[bvalIterator,tissues[tissueIterator]+'_'+metrics[2]]=np.mean([snrOut[icurBvalIndexes][metrics[2]] for icurBvalIndexes in curBvalIndexes])\n print ('performing SNR analysis for bval level ' + str(curBval) )\n \n \n for iAddIterator,iAdditionalNifti in enumerate(additionalNiftiVec):\n if not iAdditionalNifti==None:\n if isinstance(iAdditionalNifti, str):\n currentAddNif=nib.load(iAdditionalNifti)\n else:\n currentAddNif=iAdditionalNifti\n #does this actually work\n print('Computing tissue-specific metrics for ' + additionalNiftiTypes[iAddIterator])\n currentSubsetFullData=currentAddNif.get_data()\n snrTable.at[iAddIterator+len(uniqueBvals),'source']=additionalNiftiTypes[iAddIterator]\n snrTable.at[iAddIterator+len(uniqueBvals),tissues[tissueIterator]+'_'+metrics[0]]=np.nanmean(currentSubsetFullData[currentMask.get_data()> 0])\n snrTable.at[iAddIterator+len(uniqueBvals),tissues[tissueIterator]+'_'+metrics[1]]=np.nanstd(currentSubsetFullData[currentMask.get_data()> 0])\n snrTable.at[iAddIterator+len(uniqueBvals),tissues[tissueIterator]+'_'+metrics[2]]='NaN' \n \n \n return snrTable\n\nother_niftiNames=['fa', 'md', 'ad', 'rd', 'cl', 'cp', 'cs', 'dk']\nother_niftiList=[iNames+'.nii.gz' for iNames in other_niftiNames]\n\ntestOut=fullSNR_report(dwi, bval, bvec, refT1, tissueNifti,other_niftiList,other_niftiNames)\n\noutDir='output'\nif not os.path.exists(outDir):\n os.makedirs(outDir)\n\ntestOut.to_csv(os.path.join(outDir,'snr_report.csv'))\n\n# get ROIS for each tissue type from NMT_segmentation_in_FA.nii.gz\n\n#get mean snr for FA AD RD MD + each shell\n\n","repo_name":"DanNBullock/app-SNR_Report","sub_path":"SNR_report.py","file_name":"SNR_report.py","file_ext":"py","file_size_in_byte":9915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8288017313","text":"def sort_list(lst):\n lst: list\n new_list = []\n while len(lst) > 0:\n new_list.append(min(lst))\n lst.remove(min(lst))\n return new_list\n\n\nif __name__ == \"__main__\":\n print(sort_list([3, 5, 2, 1, 8, 4]))\n","repo_name":"Lucas-vdr-Horst/Mastermind-extraExercises","sub_path":"exercise-1/Sorteren.py","file_name":"Sorteren.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8395394141","text":"import threading\n\nfrom wandb.sdk.internal.settings_static import SettingsStatic\nfrom wandb.sdk.internal.system.assets import Disk\nfrom wandb.sdk.internal.system.assets.disk import DiskIn, DiskOut\nfrom wandb.sdk.internal.system.system_monitor import AssetInterface\n\n\ndef test_disk_metrics(test_settings):\n interface = AssetInterface()\n settings = SettingsStatic(\n test_settings(\n dict(\n _stats_sample_rate_seconds=0.1,\n _stats_samples_to_average=2,\n )\n ).to_proto()\n )\n shutdown_event = threading.Event()\n\n disk = Disk(interface=interface, settings=settings, shutdown_event=shutdown_event)\n\n # Test that the probe() method returns the correct disk metrics\n expected_metrics = {\n \"disk\": {\n \"/\": {\n \"total\": disk.probe()[\"disk\"][\"/\"][\"total\"],\n \"used\": disk.probe()[\"disk\"][\"/\"][\"used\"],\n }\n }\n }\n\n assert disk.is_available()\n\n assert disk.probe() == expected_metrics\n\n # Test that the metrics_monitor was started & finished\n disk.start()\n\n shutdown_event.set()\n\n disk.finish()\n\n assert not interface.metrics_queue.empty()\n\n\ndef test_disk_in():\n disk_in = DiskIn()\n disk_in.sample()\n assert len(disk_in.samples) == 1\n\n\ndef test_disk_out():\n disk_out = DiskOut()\n disk_out.sample()\n assert len(disk_out.samples) == 1\n","repo_name":"wandb/wandb","sub_path":"tests/pytest_tests/unit_tests/test_system_metrics/test_disk.py","file_name":"test_disk.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":7479,"dataset":"github-code","pt":"37"} +{"seq_id":"15132665822","text":"import os\r\nimport sys\r\n\r\nfolder1=input(\"Please enter the name of folder: \")\r\nlibsiz=input(\"Please enter the size of library: \")\r\nfileoutput=open(\"meta-data.txt\",\"w\")\r\nfileoutput.write(\"SampleID\"+\"\\t\"+\"Name\"+'\\t'+'Category'+'\\t'+'ReadLength'+'\\n')\r\n\r\nd1=[] #####whole length\r\nfor root,dirs,files in os.walk(folder1):\r\n for file in files:\r\n d1.append(str(file).replace(\".fa\",\"\").replace(\".gz\",\"\").replace(\"_1.fq\",\"\").replace(\"_2.fq\",\"\"))\r\nd2=list(set(d1))\r\n\r\nfor i in d2:\r\n num= d2.index(i)+1\r\n fileoutput.write(str(num)+ '\\t'+str(i)+'\\t'+'influent'+'\\t'+libsiz+'\\n')\r\n\r\n\r\nfileoutput.close()\r\n \r\n\r\n","repo_name":"MAXINELSX/bestpractice","sub_path":"make_meta_data_file.py","file_name":"make_meta_data_file.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7523298734","text":"import pandas as pd\nimport os\nimport gc\nimport math\nimport joblib\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom collections import defaultdict\nfrom itertools import combinations\nimport copy\n\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import KFold\nimport lightgbm as lgb\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nfrom tools import *\n\nfilename = './DATA/s1/'\ntrain_s1 = pd.read_csv(filename+'train.tsv', sep='\\t')\ntrain_5core_s1 = pd.read_csv(filename+'train_5core.tsv', sep='\\t')\nvalid_qrel_s1 = pd.read_csv(filename+'valid_qrel.tsv', sep='\\t') # 验证集 正样本\nvalid_run_s1 = pd.read_csv(filename+'valid_run.tsv', sep='\\t', header=None) # 验证样本\nvalid_run_s1.columns = ['userId','itemIds']\ntrain_s1['source'] = 0\ntrain_5core_s1['rating']*=5\ntrain_5core_s1['source'] = 1\ntrain_s1 = pd.concat([train_s1,train_5core_s1])\n\nfilename = './DATA/s2/'\ntrain_s2 = pd.read_csv(filename+'train.tsv', sep='\\t')\ntrain_5core_s2 = pd.read_csv(filename+'train_5core.tsv', sep='\\t')\nvalid_qrel_s2 = pd.read_csv(filename+'valid_qrel.tsv', sep='\\t') # 验证集 正样本\nvalid_run_s2 = pd.read_csv(filename+'valid_run.tsv', sep='\\t', header=None) # 验证样本\nvalid_run_s2.columns = ['userId','itemIds']\ntrain_s2['source'] = 0\ntrain_5core_s2['rating']*=5\ntrain_5core_s2['source'] = 1\ntrain_s2 = pd.concat([train_s2,train_5core_s2])\n\nfilename = './DATA/s3/'\ntrain_s3 = pd.read_csv(filename+'train.tsv', sep='\\t')\ntrain_5core_s3 = pd.read_csv(filename+'train_5core.tsv', sep='\\t')\nvalid_qrel_s3 = pd.read_csv(filename+'valid_qrel.tsv', sep='\\t') # 验证集 正样本\nvalid_run_s3 = pd.read_csv(filename+'valid_run.tsv', sep='\\t', header=None) # 验证样本\nvalid_run_s3.columns = ['userId','itemIds']\ntrain_s3['source'] = 0\ntrain_5core_s3['rating']*=5\ntrain_5core_s3['source'] = 1\ntrain_s3 = pd.concat([train_s3,train_5core_s3])\n\nfilename = './DATA/t1/'\ntrain_t1 = pd.read_csv(filename+'train.tsv', sep='\\t')\ntrain_5core_t1 = pd.read_csv(filename+'train_5core.tsv', sep='\\t')\nvalid_qrel_t1 = pd.read_csv(filename+'valid_qrel.tsv', sep='\\t') # 验证集 正样本\nvalid_run_t1 = pd.read_csv(filename+'valid_run.tsv', sep='\\t', header=None) # 验证样本\nvalid_run_t1.columns = ['userId','itemIds']\ntest_run_t1 = pd.read_csv(filename+'test_run.tsv', sep='\\t', header=None) # 测试样本\ntest_run_t1.columns = ['userId','itemIds']\ntrain_t1['source'] = 0\ntrain_5core_t1['rating']*=5\ntrain_5core_t1['source'] = 1\ntrain1 = pd.concat([train_t1,train_5core_t1])\n\nfilename = './DATA/t2/'\ntrain_t2 = pd.read_csv(filename+'train.tsv', sep='\\t')\ntrain_5core_t2 = pd.read_csv(filename+'train_5core.tsv', sep='\\t')\nvalid_qrel_t2 = pd.read_csv(filename+'valid_qrel.tsv', sep='\\t') # 验证集 正样本\nvalid_run_t2 = pd.read_csv(filename+'valid_run.tsv', sep='\\t', header=None) # 验证样本\nvalid_run_t2.columns = ['userId','itemIds']\ntest_run_t2 = pd.read_csv(filename+'test_run.tsv', sep='\\t', header=None) # 测试样本\ntest_run_t2.columns = ['userId','itemIds']\ntrain_t2['source'] = 0\ntrain_5core_t2['rating']*=5\ntrain_5core_t2['source'] = 1\ntrain2 = pd.concat([train_t2,train_5core_t2])\n\n\n### 对s1,s2,s3的数据过滤\nvalid_qrel_s1['rating']*=5\nvalid_qrel_s2['rating']*=5\nvalid_qrel_s3['rating']*=5\n\nall_s1 = pd.concat([train_s1,valid_qrel_s1])\nall_s2 = pd.concat([train_s2,valid_qrel_s2])\nall_s3 = pd.concat([train_s3,valid_qrel_s3])\n\n# 这个部分加了验证集的正例和负例\nvalid_t1 = get_Source(valid_qrel_t1,valid_run_t1)\nvalid_t2 = get_Source(valid_qrel_t2,valid_run_t2)\n\n# 得到t1,t2测试集\ntest_t1 = get_init_Test(test_run_t1)\ntest_t2 = get_init_Test(test_run_t2)\n\nuse_t1 = pd.concat([valid_t1,test_t1])\nuse_t2 = pd.concat([valid_t2,test_t2])\n\nt1_all_items = set(use_t1['itemId'].values)\nt2_all_items = set(use_t2['itemId'].values)\n\ndef is_in_t1(x):\n if x in t1_all_items:\n return True\n else:\n return False\n\ndef is_in_t2(x):\n if x in t2_all_items:\n return True\n else:\n return False\n\n###\nall_s1_t1 = all_s1[all_s1['itemId'].apply(lambda x:is_in_t1(x))]\nall_s2_t1 = all_s2[all_s2['itemId'].apply(lambda x:is_in_t1(x))]\nall_s3_t1 = all_s3[all_s3['itemId'].apply(lambda x:is_in_t1(x))]\n\n###\nall_s1_t2 = all_s1[all_s1['itemId'].apply(lambda x:is_in_t2(x))]\nall_s2_t2 = all_s2[all_s2['itemId'].apply(lambda x:is_in_t2(x))]\nall_s3_t2 = all_s3[all_s3['itemId'].apply(lambda x:is_in_t2(x))]\n\nall_s_t1 = pd.concat([all_s1_t1,all_s2_t1,all_s3_t1])\nall_s_t2 = pd.concat([all_s1_t2,all_s2_t2,all_s3_t2])\n\ndel all_s_t1['source'], all_s_t2['source']\n\ntrain1_added = pd.concat([all_s_t1,train1])\ntrain2_added = pd.concat([all_s_t2,train2])\n\ndef mkdir(d):\n if not os.path.exists(d):\n os.makedirs(d)\nmkdir('./embedding/')\n\n### embedding特征\nre_train = True # 设置是否重新训练word2vec,此处不重新训练\n## 获得embedding特征,前两个是user,后两个是item\nfilename = './embeddings/'\nname1 = filename+'word2vec_t1_users'\nname2 = filename+'word2vec_t2_users'\nname3 = filename+'word2vec_t1_items'\nname4 = filename+'word2vec_t2_items'\n\nuserId = 'userId'\nitemId = 'itemId'\n\nembedding_t1_users = get_w2v_embedding(train1,name1,'userId','itemId',re_train,16)\nembedding_t2_users = get_w2v_embedding(train2,name2,'userId','itemId',re_train,16)\n\nembedding_t1_items = get_w2v_embedding(train1,name3,'itemId','userId',re_train,16)\nembedding_t2_items = get_w2v_embedding(train2,name4,'itemId','userId',re_train,16)\n\nname3 = filename+'word2vec_t1_items_test'\nname4 = filename+'word2vec_t2_items_test'\nembedding_t1_items_test = get_w2v_embedding(pd.concat([train1,valid_qrel_t1]),name3,'itemId','userId',re_train,16)\nembedding_t2_items_test = get_w2v_embedding(pd.concat([train2,valid_qrel_t2]),name4,'itemId','userId',re_train,16)\n\nall_s1_t1 = pd.concat([all_s1_t1,train1])\nall_s2_t1 = pd.concat([all_s2_t1,train1])\nall_s3_t1 = pd.concat([all_s3_t1,train1])\n\nall_s1_t2 = pd.concat([all_s1_t2,train2])\nall_s2_t2 = pd.concat([all_s2_t2,train2])\nall_s3_t2 = pd.concat([all_s3_t2,train2])\n\n# 分别对s1,s2,s3计算相似度\nname3 = filename+'word2vec_t1_items_s1'\nname4 = filename+'word2vec_t2_items_s1'\ns1_embedding_t1_items = get_w2v_embedding(all_s1_t1,name3,'itemId','userId',re_train,16)\ns1_embedding_t2_items = get_w2v_embedding(all_s1_t2,name4,'itemId','userId',re_train,16)\n\nname3 = filename+'word2vec_t1_items_s1_test'\nname4 = filename+'word2vec_t2_items_s1_test'\ns1_embedding_t1_items_test = get_w2v_embedding(pd.concat([all_s1_t1,valid_qrel_t1]),name3,'itemId','userId',re_train,16)\ns1_embedding_t2_items_test = get_w2v_embedding(pd.concat([all_s1_t2,valid_qrel_t2]),name4,'itemId','userId',re_train,16)\n\n\nname3 = filename+'word2vec_t1_items_s2'\nname4 = filename+'word2vec_t2_items_s2'\ns2_embedding_t1_items = get_w2v_embedding(all_s2_t1,name3,'itemId','userId',re_train,16)\ns2_embedding_t2_items = get_w2v_embedding(all_s2_t2,name4,'itemId','userId',re_train,16)\n\nname3 = filename+'word2vec_t1_items_s2_test'\nname4 = filename+'word2vec_t2_items_s2_test'\ns2_embedding_t1_items_test = get_w2v_embedding(pd.concat([all_s2_t1,valid_qrel_t1]),name3,'itemId','userId',re_train,16)\ns2_embedding_t2_items_test = get_w2v_embedding(pd.concat([all_s2_t2,valid_qrel_t2]),name4,'itemId','userId',re_train,16)\n\n\nname3 = filename+'word2vec_t1_items_s3'\nname4 = filename+'word2vec_t2_items_s3'\ns3_embedding_t1_items = get_w2v_embedding(all_s3_t1,name3,'itemId','userId',re_train,16)\ns3_embedding_t2_items = get_w2v_embedding(all_s3_t2,name4,'itemId','userId',re_train,16)\n\nname3 = filename+'word2vec_t1_items_s3_test'\nname4 = filename+'word2vec_t2_items_s3_test'\ns3_embedding_t1_items_test = get_w2v_embedding(pd.concat([all_s3_t1,valid_qrel_t1]),name3,'itemId','userId',re_train,16)\ns3_embedding_t2_items_test = get_w2v_embedding(pd.concat([all_s3_t2,valid_qrel_t2]),name4,'itemId','userId',re_train,16)\n\n\nname3 = filename+'word2vec_t1_items_added'\nname4 = filename+'word2vec_t2_items_added'\nembedding_t1_items_added = get_w2v_embedding(train1_added,name3,'itemId','userId',re_train,30)\nembedding_t2_items_added = get_w2v_embedding(train2_added,name4,'itemId','userId',re_train,30)\n\nname3 = filename+'word2vec_t1_items_added_test'\nname4 = filename+'word2vec_t2_items_added_test'\nembedding_t1_items_added_test = get_w2v_embedding(pd.concat([train1_added,valid_qrel_t1]),name3,'itemId','userId',re_train,30)\nembedding_t2_items_added_test = get_w2v_embedding(pd.concat([train2_added,valid_qrel_t2]),name4,'itemId','userId',re_train,30)\n\n# TF-IDF特征\ndf_tfidf_emb_t1 = tfidf_svd(train1, f1='itemId', f2='userId', n_components=24)\ndf_tfidf_emb_t2 = tfidf_svd(train2, f1='itemId', f2='userId', n_components=24)\n\n# 统计特征\nfeature_list_t1 = get_Statistical_Features(train_t1)\nfeature_list_t2 = get_Statistical_Features(train_t2)\n\n# 统计特征_added\nfeature_list_t1_added = get_Statistical_Features_added(train1_added)\nfeature_list_t2_added = get_Statistical_Features_added(train2_added)\n\n# 加入上述特征\nTrain1 = get_Train(valid_qrel_t1, valid_run_t1, [feature_list_t1,feature_list_t1_added],[embedding_t1_users,embedding_t1_items,df_tfidf_emb_t1])\nTrain2 = get_Train(valid_qrel_t2, valid_run_t2,[feature_list_t2,feature_list_t2_added],[embedding_t2_users,embedding_t2_items,df_tfidf_emb_t2])\nTest1 = get_Test(test_run_t1, [feature_list_t1,feature_list_t1_added],[embedding_t1_users,embedding_t1_items,df_tfidf_emb_t1])\nTest2 = get_Test(test_run_t2, [feature_list_t2,feature_list_t2_added],[embedding_t2_users,embedding_t2_items,df_tfidf_emb_t2])\n\ndel feature_list_t1,feature_list_t2,feature_list_t1_added,feature_list_t2_added\n\n############# cos\nfeature_name = ['cf','cos']\n[Train1,Train2,Test1,Test2] = cal_sim_for_all(train1,train2,pd.concat([train1,valid_qrel_t1]),pd.concat([train2,valid_qrel_t2]),\\\n Train1,Train2,Test1,Test2,\\\n embedding_t1_items,embedding_t2_items,embedding_t1_items_test,embedding_t2_items_test,\\\n feature_name)\n\nfeature_name = ['all_Source_global_cf','all_Source_global_cos']\n[Train1,Train2,Test1,Test2] = cal_sim_for_all(train1_added,train2_added,pd.concat([train1_added,valid_qrel_t1]),pd.concat([train2_added,valid_qrel_t2]),\\\n Train1,Train2,Test1,Test2,\\\n embedding_t1_items_added,embedding_t2_items_added,embedding_t1_items_added_test,embedding_t2_items_added_test,\\\n feature_name)\n\nfeature_name = ['s1_cf','s1_cos']\n[Train1,Train2,Test1,Test2] = cal_sim_for_all(all_s1_t1,all_s1_t2,pd.concat([all_s1_t1,valid_qrel_t1]),pd.concat([all_s1_t2,valid_qrel_t2]),\\\n Train1,Train2,Test1,Test2,\\\n s1_embedding_t1_items,s1_embedding_t2_items,s1_embedding_t1_items_test,s1_embedding_t2_items_test,\\\n feature_name)\n\nfeature_name = ['s2_cf','s2_cos']\n[Train1,Train2,Test1,Test2] = cal_sim_for_all(all_s2_t1,all_s2_t2,pd.concat([all_s2_t1,valid_qrel_t1]),pd.concat([all_s2_t2,valid_qrel_t2]),\\\n Train1,Train2,Test1,Test2,\\\n s2_embedding_t1_items,s2_embedding_t2_items,s2_embedding_t1_items_test,s2_embedding_t2_items_test,\\\n feature_name)\n\nfeature_name = ['s3_cf','s3_cos']\n[Train1,Train2,Test1,Test2] = cal_sim_for_all(all_s3_t1,all_s3_t2,pd.concat([all_s3_t1,valid_qrel_t1]),pd.concat([all_s3_t2,valid_qrel_t2]),\\\n Train1,Train2,Test1,Test2,\\\n s3_embedding_t1_items,s3_embedding_t2_items,s3_embedding_t1_items_test,s3_embedding_t2_items_test,\\\n feature_name)\n\n\n# LightGCN stacking\nnn_valid_t1_df = pd.read_csv('./merge/t1/LightGCN_with_w2v_valid.tsv', sep='\\t')\nnn_test_t1_df = pd.read_csv('./merge/t1/LightGCN_with_w2v_test.tsv', sep='\\t')\n\nnn_valid_t2_df = pd.read_csv('./merge/t2/LightGCN_with_w2v_valid.tsv', sep='\\t')\nnn_test_t2_df = pd.read_csv('./merge/t2/LightGCN_with_w2v_test.tsv', sep='\\t')\n\n###\nTrain1 = Train1.merge(nn_valid_t1_df, on=['userId','itemId'], how='left')\nTest1 = Test1.merge(nn_test_t1_df, on=['userId','itemId'], how='left')\n\nTrain2 = Train2.merge(nn_valid_t2_df, on=['userId','itemId'], how='left')\nTest2 = Test2.merge(nn_test_t2_df, on=['userId','itemId'], how='left')\n\n# LightGCN stacking\nnn_valid_t1_df = pd.read_csv('./merge/t1/LightGCN_without_w2v_valid.tsv', sep='\\t')\nnn_test_t1_df = pd.read_csv('./merge/t1/LightGCN_without_w2v_test.tsv', sep='\\t')\n\nnn_valid_t2_df = pd.read_csv('./merge/t2/LightGCN_without_w2v_valid.tsv', sep='\\t')\nnn_test_t2_df = pd.read_csv('./merge/t2/LightGCN_without_w2v_test.tsv', sep='\\t')\n\n###\nTrain1 = Train1.merge(nn_valid_t1_df, on=['userId','itemId'], how='left')\nTest1 = Test1.merge(nn_test_t1_df, on=['userId','itemId'], how='left')\n\nTrain2 = Train2.merge(nn_valid_t2_df, on=['userId','itemId'], how='left')\nTest2 = Test2.merge(nn_test_t2_df, on=['userId','itemId'], how='left')\n\n\ndef lgb_ranker_model(train,test,k_fold=5,seed=2022):\n trn_df = train\n user_set = get_kfold_users(trn_df, n=k_fold,seed=seed)\n score_list = []\n sub_preds = np.zeros(test.shape[0])\n \n for n_fold, valid_user in enumerate(user_set):\n print('************************************ {} ************************************'.format(str(n_fold+1)))\n train_idx = trn_df[~trn_df['userId'].isin(valid_user)] # add slide user\n valid_idx = trn_df[trn_df['userId'].isin(valid_user)]\n\n # 训练集与验证集的用户分组\n train_idx.sort_values(by=['userId'], inplace=True)\n g_train = train_idx.groupby(['userId'], as_index=False).count()[\"label\"].values\n\n valid_idx.sort_values(by=['userId'], inplace=True)\n g_val = valid_idx.groupby(['userId'], as_index=False).count()[\"label\"].values\n \n # 训练模型\n lgb_ranker = lgb.LGBMRanker(boosting_type='gbdt', num_leaves=7, reg_alpha=0.0, reg_lambda=1,\n max_depth=-1, n_estimators=1000, subsample=0.8, colsample_bytree=0.8, \n subsample_freq=1,learning_rate=0.03, min_child_weight=10, random_state=2022, \n n_jobs=-1) \n \n feats = [f for f in train_idx.columns if f not in ['userId','itemId','label']]\n lgb_ranker.fit(train_idx[feats], train_idx['label'], group=g_train,\n eval_set=[(valid_idx[feats], valid_idx['label'])], eval_group= [g_val], \n eval_at=[10], eval_metric=['ndcg', ], early_stopping_rounds=200, verbose=100)\n \n # 预测验证集结果\n valid_idx['pred_score'] = lgb_ranker.predict(valid_idx[feats], num_iteration=lgb_ranker.best_iteration_)\n \n # 对输出结果进行归一化\n valid_idx['pred_score'] = valid_idx[['pred_score']].transform(lambda x: norm_sim(x))\n valid_idx.sort_values(by=['userId', 'pred_score'])\n valid_idx['pred_rank'] = valid_idx.groupby(['userId'])['pred_score'].rank(ascending=False, method='first')\n\n # 将验证集的预测结果放到一个列表中,后面进行拼接\n score_list.append(valid_idx[['userId', 'itemId', 'pred_score', 'pred_rank']])\n \n # 测试\n sub_preds += lgb_ranker.predict(test[feats], lgb_ranker.best_iteration_)\n \n test['pred_score'] = sub_preds / k_fold\n test['pred_score'] = test['pred_score'].transform(lambda x: norm_sim(x))\n test.sort_values(by=['userId', 'pred_score'])\n test['pred_rank'] = test.groupby(['userId'])['pred_score'].rank(ascending=False, method='first')\n return score_list,test\n\n##### 5fold\nprint('====================================== t1 ======================================')\nscore_list_t1,test_t1_pred = lgb_ranker_model(Train1,Test1,5,2022)\nprint('====================================== t2 ======================================')\nscore_list_t2,test_t2_pred = lgb_ranker_model(Train2,Test2,5,2022)\n\n### t1\nscore_df_t1 = pd.concat(score_list_t1, axis=0)\noutput_cols = ['userId','itemId','pred_score']\n\nscore_df_t1 = score_df_t1[output_cols].rename(columns={'pred_score':'score'})\ntest_t1_pred = test_t1_pred[output_cols].rename(columns={'pred_score':'score'})\nscore_df_t1 = score_df_t1.sort_values(by=['userId','score'],ascending=[True,False])\ntest_t1_pred = test_t1_pred.sort_values(by=['userId','score'],ascending=[True,False])\n\nvalid_qrel_t1 = valid_qrel_t1.sort_values(by=['userId'],ascending=[True])\n\n### t2\nscore_df_t2 = pd.concat(score_list_t2, axis=0)\noutput_cols = ['userId','itemId','pred_score']\n\nscore_df_t2 = score_df_t2[output_cols].rename(columns={'pred_score':'score'})\ntest_t2_pred = test_t2_pred[output_cols].rename(columns={'pred_score':'score'})\nscore_df_t2 = score_df_t2.sort_values(by=['userId','score'],ascending=[True,False])\ntest_t2_pred = test_t2_pred.sort_values(by=['userId','score'],ascending=[True,False])\n\nvalid_qrel_t2 = valid_qrel_t2.sort_values(by=['userId'],ascending=[True])\n\noutput_dir = './merge/result_rank1/'\nscore_df_t1.to_csv(output_dir+'t1/valid_pred.tsv',index=False,sep='\\t')\ntest_t1_pred.to_csv(output_dir+'t1/test_pred.tsv',index=False,sep='\\t')\nscore_df_t2.to_csv(output_dir+'t2/valid_pred.tsv',index=False,sep='\\t')\ntest_t2_pred.to_csv(output_dir+'t2/test_pred.tsv',index=False,sep='\\t')\n\n\n##### 10fold\nprint('====================================== t1 ======================================')\nscore_list_t1,test_t1_pred = lgb_ranker_model(Train1,Test1,10,2021)\nprint('====================================== t2 ======================================')\nscore_list_t2,test_t2_pred = lgb_ranker_model(Train2,Test2,10,2021)\n\n### t1\nscore_df_t1 = pd.concat(score_list_t1, axis=0)\noutput_cols = ['userId','itemId','pred_score']\n\nscore_df_t1 = score_df_t1[output_cols].rename(columns={'pred_score':'score'})\ntest_t1_pred = test_t1_pred[output_cols].rename(columns={'pred_score':'score'})\nscore_df_t1 = score_df_t1.sort_values(by=['userId','score'],ascending=[True,False])\ntest_t1_pred = test_t1_pred.sort_values(by=['userId','score'],ascending=[True,False])\n\nvalid_qrel_t1 = valid_qrel_t1.sort_values(by=['userId'],ascending=[True])\n\n### t2\nscore_df_t2 = pd.concat(score_list_t2, axis=0)\noutput_cols = ['userId','itemId','pred_score']\n\nscore_df_t2 = score_df_t2[output_cols].rename(columns={'pred_score':'score'})\ntest_t2_pred = test_t2_pred[output_cols].rename(columns={'pred_score':'score'})\nscore_df_t2 = score_df_t2.sort_values(by=['userId','score'],ascending=[True,False])\ntest_t2_pred = test_t2_pred.sort_values(by=['userId','score'],ascending=[True,False])\n\nvalid_qrel_t2 = valid_qrel_t2.sort_values(by=['userId'],ascending=[True])\n\noutput_dir = './merge/result_rank2/'\nscore_df_t1.to_csv(output_dir+'t1/valid_pred.tsv',index=False,sep='\\t')\ntest_t1_pred.to_csv(output_dir+'t1/test_pred.tsv',index=False,sep='\\t')\nscore_df_t2.to_csv(output_dir+'t2/valid_pred.tsv',index=False,sep='\\t')\ntest_t2_pred.to_csv(output_dir+'t2/test_pred.tsv',index=False,sep='\\t')","repo_name":"bettenW/2022WSDM-Cup-Cross-market-Recommendation-Rank2","sub_path":"rank_lgb_model.py","file_name":"rank_lgb_model.py","file_ext":"py","file_size_in_byte":18854,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"37"} +{"seq_id":"44811320084","text":"def recur(A,S,n):\n if S==0:\n return 1\n if n==0:\n return 0\n if A[n-1]>S:\n return recur(A,S,n-1)\n else:\n return recur(A,S,n-1)+recur(A,S-A[n-1],n-1)\n\ndef tab(A,S,n):\n if n==0:\n dp[n][S]=0\n if S==0:\n dp[n][S]=1\n if dp[n][S]!=-1:\n return dp[n][S]\n if A[n-1]>S:\n dp[n][S]=tab(A,S,n-1)\n else:\n dp[n][S]=tab(A,S,n-1)+tab(A,S-A[n-1],n-1)\n return dp[n][S]\n\ndef printmat(dp):\n for row in dp:\n print(row)\n print('========================')\ndef mem(A,S,n):\n dp=[[-1 for _ in range(S+1)] for _ in range(n+1)]\n mask=1<j:\n dp[i][j]=dp[i-1][j]\n else:\n dp[i][j]=dp[i-1][j]+dp[i-1][j-A[i-1]]\n printmat(dp)\n subset(dp,n,S,A)\n return dp[n][S]\ndef subset(dp,n,S,A):\n queue=[(n,S,[])]\n counter=10\n while queue:\n #print(queue)\n i,j,L=queue.pop(0)\n #print(i,j,id(L))\n \n if i==0 and j==0:\n print((L))\n continue\n exc=dp[i-1][j]\n #print(inc)\n if exc:\n B=list(L)\n queue.append((i-1,j,B.copy()))\n \n if (j>=A[i-1]):\n inc=dp[i-1][j-A[i-1]]\n if inc:\n B=list(L+[A[i-1]])\n queue.append((i-1,j-A[i-1],B.copy()))\n counter-=1\n \nif __name__=='__main__':\n A=[3, 34, 4, 12, 1,5,2]\n S=9\n n=len(A)\n\n dp=[[-1 for _ in range(S+1)] for _ in range(n+1)]\n print(mem(A,S,n))\n","repo_name":"ChiragSinghai/450-Questions","sub_path":"Chirag/Leetcode_problem/count_subset_sum_problem.py","file_name":"count_subset_sum_problem.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20375188088","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.support.ui import Select\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nimport time\r\n\r\nPATH = \"C:\\Program Files (x86)\\chromedriver.exe\"\r\ndriver = webdriver.Chrome(PATH)\r\n\r\ndriver.get(\"https://www.smv.gob.pe/Frm_EVCP?data=5A959494701B26421F184C081CACF55BFA328E8EBC\")\r\n\r\nxpath1 = '//select[@name = \"ctl00$MainContent$cboDenominacionSocial\"]'\r\n# Selecciona la empresa\r\nselect1 = Select(driver.find_element_by_xpath(xpath1)) \r\nselect1.select_by_value('38036')\r\nwait = WebDriverWait(driver, 10)\r\n\r\n#Cambiar el valor \"0001\" para cambiar al Fondo\r\nnFondo = \"0001\"\r\nxpathFondo = '//option[@value = ' + nFondo + ']'\r\nelement = wait.until(EC.presence_of_element_located((By.XPATH, xpathFondo )))\r\n\r\n#Selecciona el fondo\r\nxpath2 = '//select[@name = \"ctl00$MainContent$cboFondo\"]'\r\nselect2 = Select(driver.find_element_by_xpath(xpath2))\r\nselect2.select_by_value('0001')\r\nprint(driver.find_element_by_xpath(xpathFondo).text)\r\n\r\n#Selecciona fecha inicio\r\nxpath3 = '//input[@name = \"ctl00$MainContent$txtFechDesde\"]'\r\nselect3 = driver.find_element_by_xpath(xpath3)\r\nselect3.clear()\r\nFI = input('Ingrese la fecha en formato \"dd/mm/aaaa\":')\r\n#FI = \"01/08/2020\"\r\nselect3.send_keys(FI)\r\n\r\n#Selecciona fecha final\r\nxpath4 = '//input[@name = \"ctl00$MainContent$txtFechHasta\"]'\r\nselect4 = driver.find_element_by_xpath(xpath4)\r\nselect4.clear()\r\nFF = input('Ingrese la fecha en formato \"dd/mm/aaaa\":')\r\n#FF = \"14/08/2020\"\r\nselect4.send_keys(FF)\r\n\r\n#Click buscar\r\nxpathBuscar = '//input[@name = \"ctl00$MainContent$btnBuscar\"]'\r\nselectBuscar = driver.find_element_by_xpath(xpathBuscar)\r\nselectBuscar.click()\r\n\r\n#Seleccionar valores iniciales\r\nxpathTabla = '//table[@id = \"MainContent_grdEVCP_grdDetalle_0\"]'\r\nelement2 = wait.until(EC.presence_of_element_located((By.XPATH, xpathTabla)))\r\nselectFechaI = driver.find_element_by_xpath(xpathTabla + '/tbody/tr[1]/td[1]').text\r\nselectCuotaI = driver.find_element_by_xpath(xpathTabla + '/tbody/tr[1]/td[4]').text\r\nprint('Valor cuota el',selectFechaI,'es',selectCuotaI)\r\n\r\n#Seleccionar valores finales\r\nselectFechaF = driver.find_element_by_xpath(xpathTabla + '/tbody/tr[last()]/td[1]').text\r\nselectCuotaF = driver.find_element_by_xpath(xpathTabla + '/tbody/tr[last()]/td[4]').text\r\nprint('Valor cuota el',selectFechaF,'es',selectCuotaF)\r\n\r\n#Variación\r\nvar = round((float(selectCuotaF) -float(selectCuotaI))/float(selectCuotaI)*100,2)\r\nprint('Variación:', str(var) + '%')\r\n\r\ntime.sleep(5)\r\ndriver.quit()","repo_name":"enriquereategui94/WebScrapping-SMV-con-Selenium","sub_path":"smvscrap.py","file_name":"smvscrap.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36665158187","text":"# Assignment Secure Coding (Kilo)\n# Author: Maurice Bauer\n# Date: 15.06.2021\n\nimport os\nfrom typing import Dict, List\nfrom .seeds import Seed\nfrom .fuzzers import GrammarFuzzer\nfrom .grammars import START_SYMBOL\n\n\nclass Seeder:\n \"\"\" Dummy class to be extended. \"\"\"\n\n def seeds(self) -> List[Seed]:\n \"\"\"Get a list of seeds.\n\n Returns:\n List[Seed]: List of seeds.\n \"\"\"\n return []\n\n\nclass StringSeeder(Seeder):\n def __init__(self, strings: List[str]) -> None:\n \"\"\"Initializes the `StringSeeder`.\n\n Args:\n strings (List[str]): List of strings to be used as seeds.\n \"\"\"\n self.strings = strings\n\n def seeds(self) -> List[Seed]:\n \"\"\"Get a list of seeds.\n\n Returns:\n List[Seed]: List of seeds.\n \"\"\"\n return list(map(lambda s: Seed(s), self.strings))\n\n\nclass FileSeeder(Seeder):\n def __init__(self, path: str) -> None:\n \"\"\"Initializes the `FileSeeder`.\n\n Args:\n path (str): Path to a directory containing files to be used as seeds.\n\n Raises:\n NotADirectoryError: Path does not match a directory. Have you passed a file-path?\n \"\"\"\n if not os.path.isdir(path):\n raise NotADirectoryError()\n self.path = path\n\n def seeds(self) -> List[Seed]:\n \"\"\"Get a list of seeds from the directory.\n\n Returns:\n List[Seed]: List of seeds.\n \"\"\"\n seeds: List[Seed] = []\n for root, _, files in os.walk(self.path):\n for file_path in files:\n with open(os.path.join(root, file_path), 'r') as file:\n seeds.append(Seed(file.read()))\n if len(seeds) == 0:\n raise FileNotFoundError(f\"No input seeds in {self.path}\")\n return seeds\n\n\nclass GrammarSeeder(Seeder):\n def __init__(self, grammar: Dict[str, List[str]], start_symbol: str = START_SYMBOL, max_nonterminals: int = 10, max_expansion_trials: int = 100, n: int = 10) -> None:\n \"\"\"Initialize the `GrammarSeeder`.\n\n Args:\n grammar (Dict[str, List[str]]): Grammar to be used for grammar constructions to create seeds.\n start_symbol (str, optional): Nonterminal which should be used to start the grammar constructions. Defaults to START_SYMBOL.\n max_nonterminals (int, optional): Maximum number of nonterminals. Defaults to 10.\n max_expansion_trials (int, optional): Maximum number of expansion trials. Defaults to 100.\n n (int, optional): Number of seeds to be created using the grammar. Defaults to 10.\n \"\"\"\n self.n = n\n self.grammar = grammar\n self.start_symbol = start_symbol\n self.max_nonterminals = max_nonterminals\n self.max_expansion_trials = max_expansion_trials\n\n def seeds(self) -> List[Seed]:\n \"\"\"Get a list of seeds using the grammar.\n\n Returns:\n List[Seed]: List of seeds.\n \"\"\"\n seeds: List[Seed] = []\n fuzzer = GrammarFuzzer(self.grammar,\n start_symbol=self.start_symbol,\n max_nonterminals=self.max_nonterminals,\n max_expansion_trials=self.max_expansion_trials)\n for _ in range(self.n):\n seeds.append(Seed(fuzzer.fuzz()))\n return seeds\n","repo_name":"mauricebauer/SecCode_Kilo","sub_path":"baufuzz/seeders.py","file_name":"seeders.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14468348901","text":"# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# useful for handling different item types with a single interface\nimport scrapy\nfrom scrapy.pipelines.images import ImagesPipeline\n\n\nclass MztModelPipeline(ImagesPipeline):\n def get_media_requests(self, item, info):\n yield scrapy.Request(\n url=item['photo_url'],\n meta={\n 'model_name': item['model_name'],\n 'album_title': item['album_title'],\n },\n )\n\n def file_path(self, request, response=None, info=None, *, item=None):\n fn = r'result-model/%s/%s/%s' % (request.meta['model_name'], request.meta['album_title'], request.url[-8:])\n return fn\n","repo_name":"I-m-PhD/mztScrapy","sub_path":"mzt_model/mzt_model/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15577005408","text":"import csv, sys\nfrom configparser import ConfigParser\n\nconfig = ConfigParser()\nconfig.read('picLab.ini')\ndataset_thumbs = (config.get('CSV', 'csv_file'))\n\n\ndef count_img_in_csv(filename):\n counter = 0\n with open(dataset_thumbs, 'r') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n counter = counter + 1\n return counter\n\n\ndef write_pic(r,b,g,pic):\n with open(dataset_thumbs, 'a') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n csvwriter.writerow([r, b, g, pic])\n\n\ndef read_csv():\n with open(dataset_thumbs, 'r', encoding='utf-8') as csvfile:\n reader = csv.reader(csvfile)\n try:\n for row in reader:\n print (row)\n except csv.Error as e:\n sys.exit('file %s, line %d: %s' % (dataset_thumbs, reader.line_num, e))\n\n\ndef load_dataset_in_matrix():\n m = [[[0 for k in range(256)] for j in range(256)] for i in range(256)]\n with open(dataset_thumbs, 'r', encoding='utf-8') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n if m[int(row[0])][int(row[1])][int(row[2])]:\n m[int(row[0])][int(row[1])][int(row[2])] += ','+row[3]\n else:\n m[int(row[0])][int(row[1])][int(row[2])] = row[3]\n return m\n\n","repo_name":"enricapq/picLab","sub_path":"dataset_thumbs.py","file_name":"dataset_thumbs.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15800517508","text":"from enum import Enum\n\nclass Win32LobAppMsiPackageType(str, Enum):\n # Indicates a per-machine app package.\n PerMachine = \"perMachine\",\n # Indicates a per-user app package.\n PerUser = \"perUser\",\n # Indicates a dual-purpose app package.\n DualPurpose = \"dualPurpose\",\n\n","repo_name":"microsoftgraph/msgraph-sdk-python","sub_path":"msgraph/generated/models/win32_lob_app_msi_package_type.py","file_name":"win32_lob_app_msi_package_type.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":186,"dataset":"github-code","pt":"37"} +{"seq_id":"20637473687","text":"from typing import List\n\n\ndef bubble_sort(l: List):\n for i in range(len(l)):\n for j in range(len(l) - 1):\n if l[j] > l[j + 1]:\n l[j], l[j + 1] = l[j + 1], l[j]\n\n\ndef quicksort(A, lo, hi):\n def partition(lo, hi):\n pivot = A[hi]\n left = lo\n for right in range(lo, hi):\n if A[right] < pivot:\n A[left], A[right] = A[right], A[left]\n left += 1\n\n A[left], A[hi] = A[hi], A[left]\n return left\n\n if lo < hi:\n pivot = partition(lo, hi)\n quicksort(A, lo, pivot - 1)\n quicksort(A, pivot + 1, hi)\n","repo_name":"PARKINHYO/Algorithm","sub_path":"python algorithm interview/17장 정렬/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"70312806506","text":"#Python supports overloading but in a Pythonic way. Here’s an example:\ndef add(instanceOf, *args):\n if instanceOf == 'int':\n result = 0\n if instanceOf == 'str':\n result = ''\n for i in args:\n result = result + i\n return result\n\nres=add('int', 3,4,5)\nprint(res)\noutStr=add('str', 'I',' am',' in', ' Python')\nprint(outStr)\n#print add('int', 3,4,5)\n#print add('str', 'I',' am',' in', ' Python')\n\n\n#===============================================\n#def add(a,b):\n #return a+b\n\n#def add(a,b,c):\n# return a+b+c\n\n#res=add(4,5)\n#print(res)\n\n\n#The result of the previous code\n#>>> print add(4,5)\n#Traceback (most recent call last):\n# File \"\", line 1, in \n#TypeError: add() missing 1 required positional argument: 'c'\n\n#The reason is:“TypeError: add() takes exactly 3 arguments (2 given)”.\n#This is because, Python understands the latest definition of method add() which takes only two arguments.\n\n\n#======================================\nclass Human:\n def __init__(self,age):\n self.__age=age\n @property\n def age(self):\n return self.__age\n\n @age.setter\n def age(self,age):\n if age > 0:\n self.__age = age\n if age <= 0:\n self.__age =0\n\n\nman = Human(23)\nprint(man.age)\n\nman.age = -25\nprint(man.age)\n\n#The result of the code of this file until now :\n#asmaa@asmaa-Satellite-C850-C007:~/Documents$ python3 method_overloading.py\n#23\n#0\n#=================================We conlude that there is not method overloading in python======\n#So there is not method overloading in python\n","repo_name":"asmaaismaiel/Python_tasks","sub_path":"Reports/Report-02/method_overloading.py","file_name":"method_overloading.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33909223688","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 4 21:59:52 2016\n\n@author: pip5\n\"\"\"\n\nimport requests\nimport os\nimport sys\nimport sqlite3\nimport logging\nfrom functools import reduce\nfrom json import JSONDecodeError\n#later put config to config.cfg file to split logic and specific command.\nSQLITE3DATABASENAME='cc98.db'\nURL=\"http://api.cc98.org/Topic/Hot\"\nDELAYTIME=30\n\n\ndef constructureUpdate(pairOfdata):\n strformat_int=' %s=%d '\n strformat_str=\" %s='%s' \"\n if type(pairOfdata[1])==int:\n res=strformat_int%(pairOfdata)\n else:\n res=strformat_str%(pairOfdata)\n return res\n\nclass fetchAndWrite(object ):\n def __init__(self,url,sqlite3DatabaseName,delaytime):\n self.url=url\n self.sqlite3DatabaseName=sqlite3DatabaseName\n self.delaytime=delaytime\n self.hotdata=None\n self.DBfileExist=False\n self.conn=None\n self.cursor=None\n self.checkedDBmark=False\n #just fetch hot pages\n def fetch_hotpage(self):\n \"\"\"\n Fetch hotpage used json to dict,from the http://api.cc98.org\n return dict(json) \n \"\"\" \n \n hotpage=requests.get(self.url)\n self.hotdata=hotpage.json()#in a list,sorted by Index.\n return self.hotdata\n def checkDBFilexist(self):\n currentDir=os.listdir()\n if self.sqlite3DatabaseName in currentDir:\n #db in the list\n return True\n else:\n return False\n def createDB(self):\n #init DB's table \n self.conn = sqlite3.connect(self.sqlite3DatabaseName)\n self.cursor = self.conn.cursor()\n self.cursor.execute(\"\"\"create table hotpage (\n id int not null,\n hotRank int ,\n hitCount int not null,\n boardid int not null,\n boardName varchar(30) not null,\n authorName varchar(30),\n replyCount int not null,\n participantCount int not null,\n title varchar(110) not null,\n createTime varchar(20) not null,\n primary key(id) );\"\"\")\n self.cursor.close()\n self.conn.close()\n def constructQuery(self,targetDict):\n if targetDict['authorName']==None:\n targetDict['authorName']='null'\n keys=targetDict.keys()\n valuses=targetDict.values()\n #column_name=reduce( lambda x,y:x+','+y , keys)\n column_name=str(tuple(keys)).replace(\"'\",\"\")#all with \"(xxxx)\"\n valuses_data=str(tuple(valuses))\n return (column_name,valuses_data )\n \n def insertDB(self,oneQuery):\n #string format\n \n #constructure sql \n readyforinsert=self.constructQuery(oneQuery)\n self.cursor.execute(\"insert into hotpage %s values %s\"%readyforinsert)\n \n \n def searchDB(self,oneQuery):\n #search str\n self.cursor.execute('select id from hotpage where id==?', \n (oneQuery['id'],)\n )\n sql_result=self.cursor.fetchall()\n if sql_result==[]:\n #represent not exist in DB\n return False\n else:\n return sql_result\n \n \n def updateDB(self,oneQuery):\n whereid=oneQuery['id']\n del oneQuery['id']\n keys=oneQuery.keys()\n values=oneQuery.values()\n newpair=tuple( map(constructureUpdate,zip(keys,values)) )\n \n self.cursor.execute(\n 'update hotpage set %s where id==?'%(\"%s,%s,%s,%s,%s, %s,%s,%s,%s\"%newpair),\n (whereid,))\n \n \n def writeLogicDB(self):\n self.conn=sqlite3.connect(self.sqlite3DatabaseName)\n self.cursor = self.conn.cursor()\n for i in range(len(self.hotdata)):\n perPost=self.hotdata[i]\n perPost['hotRank']=i+1\n if self.searchDB(perPost):#this query has existed in the DB.\n self.updateDB(perPost)\n else:\n self.insertDB(perPost)\n \n self.cursor.close()\n self.conn.commit()\n self.conn.close()\n def runOnce(self):\n try:\n self.fetch_hotpage()\n for i in self.hotdata:\n logging.info(i['createTime']+'\\t'+i['title'])\n except JSONDecodeError as e:\n logging.info('[-]JSONDecodeError:%s'%e)\n return -1\n except :\n logging.info('[-]OtherError')\n return -2\n if self.checkedDBmark==False:\n if self.checkDBFilexist()!=True:\n self.createDB()\n self.checkedDBmark=True\n self.writeLogicDB()\n return True\n \n \n \n\nif __name__=='__main__':\n fetchcore=fetchAndWrite(URL,SQLITE3DATABASENAME,DELAYTIME)\n fetchcore.runOnce()","repo_name":"6769/m14kabing","sub_path":"Python/CC98Issue/HotPageFetch/fetchData.py","file_name":"fetchData.py","file_ext":"py","file_size_in_byte":4977,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"31836570460","text":"\r\nimport gramatica as g\r\nimport turtle\r\nimport ts as TS\r\nfrom expresiones import *\r\nfrom instrucciones import *\r\nimport instrucciones as IS\r\n\r\ndef procesar_avanzar():\r\n simbolo = IS.Avanzar.turtleA\r\n return simbolo\r\n \r\ndef procesar_girarderecha(instr, ts):\r\n simbolo = TS.Simbolo(instr.PR , TS.TIPO_DATO.GIRARDERECHA)\r\n ts.agregar(simbolo)\r\n \r\ndef procesar_girarizquierda(instr, ts):\r\n simbolo = TS.Simbolo(instr.PR , TS.TIPO_DATO.GIRARIZQUIERDA)\r\n ts.agregar(simbolo)\r\n\r\n \r\ndef procesar_instrucciones() :\r\n if TS.TIPO_DATO.AVANZAR:\r\n procesar_avanzar()\r\n #if isinstance(instr, Avanzar) : procesar_avanzar(instr, ts)\r\n #elif isinstance(instr,Girar_derecha) : procesar_girarderecha(instr, ts)\r\n #elif isinstance(instr, Girar_izquierda) : procesar_girarizquierda(instr, ts)\r\n #else : print('Error: instrucción no válida')\r\n \r\n \r\ntortuga = turtle.Turtle()\r\ntortuga = shape(\"turtle\")\r\nwn = turtle.Screen()\r\nwn.bgcolor(\"lightgreen\")\r\n\r\n\r\nf = open(\"program.txt\", \"r\")\r\ninput = f.read()\r\n\r\ninstrucciones = g.parse(input)\r\nts_global = TS.TablaDeSimbolos()\r\n\r\nprocesar_instrucciones()\r\nwn.mainloop()","repo_name":"Katy-Bejar/test","sub_path":"prueba-compi/principal.py","file_name":"principal.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5020283325","text":"from flask import jsonify, make_response\nfrom flask_restx import cors\nfrom flask_jwt_extended import jwt_required\nfrom flask_jwt_extended import verify_jwt_in_request\nfrom flask_jwt_extended import get_jwt\nfrom flask_cors import cross_origin\nfrom api.v1 import api\nfrom api.arguments.schedule_arguments import schedule_arguments\nfrom flask_restx import Resource\nfrom core import limiter, cache\nfrom utils import handle400error, handle404error, handle500error\nfrom api.models.User import User\nfrom api.models.Computer import Computer\nfrom api.models.Schedule import Schedule\nimport schedule\nimport time\n\nschedules_ns = api.namespace('schedule',description='Manages the schedule for when the computers have to power up',decorators=[cors.crossdomain(origin=\"*\")])\n\n\n@schedules_ns.route('',methods=['GET','POST','OPTIONS'])\n\nclass SchedulePowerUp(Resource):\n\t@limiter.limit('1000/hour')\n\t@api.expect(schedule_arguments)\n\t@api.response(200, 'OK')\n\t@api.response(404, 'Data not found')\n\t@api.response(500, 'Unhandled errors')\n\t@api.response(400, 'Invalid parameters')\n\t@cache.cached(timeout=1, query_string=True)\n\t@jwt_required()\n\t@cross_origin()\n\tdef post(self):\n\t\t\"\"\"\n\t\tRegisters time and days to power a computer\n\t\t\"\"\"\n\t\tresult = 0\n\t\ttry:\n\t\t\targs = schedule_arguments.parse_args()\n\t\t\tcomputer = args['computerId']\n\t\t\tdays = args['days']\n\t\t\ttime = args['time']\n\t\t\tuser = args['username']\n\t\t\tuserId = User.fetchByUsername(user)[0]\n\t\t\tscheduleobj = Schedule(userId,computer,time,days)\n\t\texcept:\n\t\t\thandle400error(schedules_ns,\"The provided arguments are not correct. Please, check the swagger documentation at /v1\")\n\t\ttry:\n\t\t\tresult = scheduleobj.insert()[0]\n\t\t\tcomputer = Computer.fetch(computer)\n\t\t\t#computer[2] has the mac\n\t\t\tfor day in days:\n\t\t\t\tprint(day)\n\t\t\t\tprint(schedule.get_jobs())\n\t\t\t\tif day.lower() == \"monday\":\n\t\t\t\t\tschedule.every().monday.at(time).do(Computer.powerOn,computer[2],user)\n\t\t\t\telif day.lower() == \"tuesday\":\n\t\t\t\t\tschedule.every().tuesday.at(time).do(Computer.powerOn,computer[2],user)\n\t\t\t\telif day.lower() == \"wednesday\":\n\t\t\t\t\tschedule.every().wednesday.at(time).do(Computer.powerOn,computer[2],user)\n\t\t\t\telif day.lower() == \"thursday\":\n\t\t\t\t\tschedule.every().thursday.at(time).do(Computer.powerOn,computer[2],user)\n\t\t\t\telif day.lower() == \"friday\":\n\t\t\t\t\tschedule.every().friday.at(time).do(Computer.powerOn,computer[2],user)\n\t\t\t\telif day.lower() == \"saturday\":\n\t\t\t\t\tschedule.every().saturday.at(time).do(Computer.powerOn,computer[2],user)\n\t\t\t\telif day.lower() == \"sunday\":\n\t\t\t\t\tschedule.every().sunday.at(time).do(Computer.powerOn,computer[2],user)\n\t\texcept:\n\t\t\thandle500error(schedules_ns)\n\t\tresponse = jsonify(result)\n\t\treturn make_response(response,200)\n\t\t","repo_name":"DarioGar/WakeOnLan","sub_path":"WakeOnLan-server/flask/api/namespaces/schedules_ns.py","file_name":"schedules_ns.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"34902648663","text":"\"\"\"\nFile: testbag.py\nAuthor: Ken Lambert\nA tester program for bag implementations.\n\"\"\"\n\nfrom arraybag import ArrayBag\nfrom linkedbag import LinkedBag\n\ndef test(bagType):\n \"\"\"Expects a bag type as an argument and runs some tests\n on objects of that type.\"\"\"\n print(\"Testing\", bagType)\n lyst = list(range(1, 11))\n print(\"The list of items added is:\", lyst)\n b = bagType(lyst)\n print(\"Expect the bag's string:\", b)\n print(\"Add 5 more items to test increasing the array size:\")\n for i in range(11, 16):\n b.add(i)\n print(\"Expect the bag's string:\", b)\n\ntest(ArrayBag)\n#test(LinkedBag)\n","repo_name":"Spaun12/CIAT","sub_path":"ASD103A/Student Files/Starter Code/Week 3/Ch 5 Project 3/testbag.py","file_name":"testbag.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"5391175146","text":"cartas=int(input())\nx=0\ndcartas=0\nocartas=0\ndocartas=0\ncuentapar=0\ncuentatercia=0\nwhile x//', views.activate, name='activate'),\n\n # Betklax\n path(\"home_betKlax/\", views.homeBetKlax, name=\"homeBetKlax\"),\n path(\"home_betKlax/betKlax/\", views.betKlax, name=\"betKlax\"),\n path(\"home_betKlax/ratingRecalculation\", views.ratingRecalculation, name=\"ratingRecalculation\"),\n path('home_betKlax/makeBetW//', views.makeBetW, name='makeBetW'),\n path('home_betKlax/makeBetL//', views.makeBetL, name='makeBetL'),\n path(\"home_betKlax/myBets/\", views.myBets, name=\"myBets\"),\n path(\"home_betKlax/addGains/[, ]\", views.addGains, name=\"addGains\"),\n path(\"home_betKlax/finalizeBet/\", views.finalizeBet, name=\"finalizeBet\"),\n\n # Event\n path(\"event/\", views.event, name=\"event\"),\n path(\"event/eventRegistration//\", views.eventRegistration, name=\"eventRegistration\"),\n path(\"event/eventUnregistration//\", views.eventUnregistration, name=\"eventUnregistration\"),\n path(\"event/Chasse_au_tresor/\", views.Chasse, name=\"Chasse\"),\n path(\"event/Tournois_des_foufous/\", views.Tournois, name=\"Tournois\"),\n path(\"event/Soiree_KFet/\", views.Kfet, name=\"Kfet\"),\n path(\"event/BBQ_Kermess/\", views.BBK, name=\"BBK\"),\n path(\"event/Master_Of_The_Grid/\", views.Master, name=\"Master\"),\n path(\"event/Wati_by_Chiche/\", views.Chiche, name=\"Chiche\"),\n path(\"event/Tournois_de_cartes/\", views.Cartes, name=\"Cartes\"),\n path(\"event/Tournois_de_billard_et_babyfoot/\", views.Billard, name=\"Billard\"),\n\n # Liste\n path(\"liste/\", views.liste, name=\"liste\"),\n\n # Klaxment\n path(\"klaxment/\", views.klaxment, name=\"klaxment\"),\n\n # Goals\n path(\"goals/\", views.goals, name=\"goals\"),\n\n # Sutom\n path(\"sutom/\", views.sutom, name=\"sutom\"),\n\n # Allos\n path(\"home_allos/\", views.homeAllos, name=\"homeAllos\"),\n path(\"home_allos/allos/\", views.allos, name=\"allos\"),\n path(\"home_allos/myAllos/\", views.myAllos, name=\"myAllos\"),\n path(\"home_allos/myAllos/removeAllo//\", views.removeAllo, name=\"removeAllo\"),\n path(\"home_allos/sendAllo/[, , ]/\", views.sendAllo, name=\"sendAllo\"),\n path(\"home_allos/alloRegistration//\", views.alloRegistration, name=\"alloRegistration\"),\n\n # Footer\n path(\"partners/\", views.partners, name=\"partners\"),\n path(\"thanks/\", views.ourThanks, name=\"thanks\"),\n path(\"values/\", views.ourValues, name=\"values\"),\n path(\"promises/\", views.promises, name=\"promises\"),\n\n # Staff\n path(\"staff/\", views.staff, name=\"staff\"),\n path(\"staff/users\", views.suUsers, name=\"suUsers\"),\n path(\"staff/betCreator/\", views.betCreator, name=\"betCreator\"),\n path(\"staff/betCreator/setVisbleBet//\", views.setVisibleBet, name=\"setVisibleBet\"),\n path(\"staff/eventCreator/\", views.eventCreator, name=\"eventCreator\"),\n path(\"staff/eventCreator/setVisbleEvent//\", views.setVisibleEvent, name=\"setVisibleEvent\"),\n path(\"staff/alloCreator/\", views.alloCreator, name=\"alloCreator\"),\n path(\"staff/alloCreator/setVisbleAllo//\", views.setVisibleAllo, name=\"setVisibleAllo\"),\n path(\"staff/alloRequested/\", views.alloRequested, name=\"alloRequested\"),\n path(\"staff/alloRequested/finalizeAllo//\", views.finalizeAllo, name=\"finalizeAllo\"),\n path(\"staff/alloRequested/alloEmailConfirmation//\", views.alloEmailConfirmation, name=\"alloEmailConfirmation\"),\n\n # SuperUser\n path(\"staff/betSuperUser/\", views.suBets, name=\"suBets\"),\n path(\"staff/betSuperUser/suCheckBet//\", views.suCheckBet, name=\"suCheckBet\"),\n path(\"staff/betSuperUser/closeBet//\", views.closeBet, name=\"closeBet\"),\n path(\"staff/betSuperUser/deleteBet//\", views.deleteBet, name=\"deleteBet\"),\n path(\"staff/betSuperUser/sendBetsKalxcoins/[, ]/\", views.sendBetsKalxcoins, name=\"sendBetsKalxcoins\"),\n path(\"staff/eventSuperUser/\", views.suEvents, name=\"suEvents\"),\n path(\"staff/betSuperUser/suCheckEvent//\", views.suCheckEvent, name=\"suCheckEvent\"),\n path(\"staff/eventSuperUser/closeEvent//\", views.closeEvent, name=\"closeEvent\"),\n path(\"staff/betSuperUser/deleteEvent//\", views.deleteEvent, name=\"deleteEvent\"),\n path(\"staff/alloSuperUser/\", views.suAllos, name=\"suAllos\"),\n path(\"staff/alloSuperUser/closeAllo//\", views.closeAllo, name=\"closeAllo\"),\n path(\"staff/betSuperUser/deleteAllo//\", views.deleteAllo, name=\"deleteAllo\"),\n]\n","repo_name":"Gurwan/Mafienssat","sub_path":"main_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7564684481","text":"import gym\r\nimport gym_bank_world\r\n# from gym_bank_world.Qlearning import train, test\r\nfrom gym_bank_world.Qlearning_without_options import train, test\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nif __name__ == \"__main__\":\r\n env = gym.make('bank_world-v0')\r\n\r\n # train(env, action_space=5, num_episodes=6000, q_path=None , verbose=False)\r\n rewards = []\r\n x = np.arange(1000)\r\n for i in range(10):\r\n _, rew = test(env, action_space=5, q_path=\"QTable_without_options\\QTable_without_options.pkl\", verbose=False)\r\n rew = np.append(rew, [rew[-1] for j in range(1000-len(rew))])\r\n rewards.append(rew)\r\n \r\n rewards = np.array(rewards)\r\n \r\n min_rewards = []\r\n max_rewards = []\r\n avg_rewards = []\r\n for i in range(1000):\r\n min_rewards.append(np.min(rewards[:,i]))\r\n max_rewards.append(np.max(rewards[:,i]))\r\n avg_rewards.append(np.average(rewards[:,i]))\r\n \r\n fig, ax = plt.subplots()\r\n ax.fill_between(x, min_rewards, max_rewards, facecolor=\"blue\", alpha=0.3)\r\n ax.plot(x, avg_rewards, \"r--\")\r\n plt.show()\r\n\r\n","repo_name":"petrichor1998/Multi_Agent_RL","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33251731102","text":"import os\nimport numpy as np\n\nfrom reprod_log import ReprodLogger, ReprodDiffHelper\n\nfrom utilities import load_config,build_paddle_data_pipeline, build_torch_data_pipeline\nfrom paddlevision.datasets.dataset import Words\n\ndef test_data_pipeline(params):\n image_path = os.path.join(params['base_dir'], params['train_image_path'])\n label_path = os.path.join(params['base_dir'], params['train_label_path'])\n words = Words(os.path.join(params['base_dir'], params['word_path']))\n\n # 构造paddle和torch两个数据集\n paddle_dataset, paddle_dataloader = build_paddle_data_pipeline(image_path, label_path, words)\n torch_dataset, torch_dataloader = build_torch_data_pipeline(image_path, label_path, words)\n\n # logger相关\n logger_paddle_data = ReprodLogger()\n logger_torch_data = ReprodLogger()\n\n logger_paddle_data.add(\"length\", np.array(len(paddle_dataset)))\n logger_torch_data.add(\"length\", np.array(len(torch_dataset)))\n\n for idx, (paddle_batch, torch_batch) in enumerate(zip(paddle_dataloader, torch_dataloader)):\n if idx >= 5:\n break\n logger_paddle_data.add(f\"dataloader_{idx}\", paddle_batch[0].numpy())\n logger_torch_data.add(f\"dataloader_{idx}\", torch_batch[0].detach().cpu().numpy())\n logger_paddle_data.save(os.path.join(params['result_dir'],'result/data_paddle.npy'))\n logger_torch_data.save(os.path.join(params['result_dir'],'result/data_ref.npy'))\n\n\nif __name__ == \"__main__\":\n params = load_config('alignment/step1-5/alignment_config.yaml')\n test_data_pipeline(params)\n\n # load data\n diff_helper = ReprodDiffHelper()\n torch_info = diff_helper.load_info(os.path.join(params['result_dir'],'data_ref.npy'))\n paddle_info = diff_helper.load_info(os.path.join(params['result_dir'],'data_paddle.npy'))\n\n # compare result and produce log\n diff_helper.compare_info(torch_info, paddle_info)\n diff_helper.report(path=os.path.join(params['result_dir'],'log/data_diff.log'))","repo_name":"Lllllolita/CAN_Paddle","sub_path":"alignment/step1-5/02_test_data.py","file_name":"02_test_data.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"5068794877","text":"# Identifier in Python\n\n\"\"\"\n In the identifier naming, only the following are ALLOWED:\n - English\n - Chinese\n - Number\n - Underline (_)\n\n Warning\n - Chinese is NOT recommended\n - Can't start with a number\n\"\"\"\n\nfoo = \"foo\"\nFoo = \"foo\"\n\n# foo and Foo are 2 different variables\n\n# The Keywords can't be an identifier\n# Such as False | True | None | and | ...\n\n# 名字 = \"张三\"\nname = \"Andrea\"\n\n_name = \"foo\"\n","repo_name":"ryanfawcett/py-quick-start","sub_path":"01-basic-syntax/06-identifier.py","file_name":"06-identifier.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16680704428","text":"# Function to validate json files:\n\nimport enum # For ValueType as enumerations\nimport ast # For converting string representation of list to actual list variable\n\n# creating enumerations using class\nclass ValueType(enum.Enum):\n string = 1\n number = 2\n list = 3\n jsonObject = 4\n\nspaceChars = [\"\",\" \",\"\\n\",\"\\t\"]\n\n\ndef validateJsonFile(jsonFilePath):\n print(\"Validating json file at: \",jsonFilePath)\n try:\n print(\"Attempting to read the json file\")\n jsonFile = open(jsonFilePath, 'r')\n\n jsonFileContent = jsonFile.read()\n print(\"Read Successfuly\")\n\n return validateJsonObject(jsonFileContent);\n\n\n\n\n except FileNotFoundError:\n print(\"Error. File to read not found at:\",jsonFilePath)\n except IOError:\n print(\"Error. Unable to read file:\",jsonFilePath)\n except:\n print(\"Unhandled Exception while attempting to read the file:\",jsonFilePath)\n\n return [False, \"Could not read json file\"];\n\n\ndef validateJsonObject(jsonobj):\n if jsonobj is None:\n return [False, \"Param jsonobj is Null\"];\n if not type(jsonobj) == str:\n return [False, \"Param jsonobj is of type: \"+str(type(jsonobj))+\" instead of a String type\"];\n\n # Trim the string:\n jsonobj = jsonobj.strip()\n\n if not jsonobj[0] == \"{\":\n return [False, \"Param jsonobj doesn't start with a '{' sign\"];\n if not jsonobj[-1] == \"}\":\n return [False, \"Param jsonobj doesn't end with a '}' sign\"];\n if len(jsonobj) == 2:\n return [True, jsonobj]\n\n subJsonObj = jsonobj[1:-1]\n curInd = 0;\n while (curInd < len(subJsonObj)):\n nextPair = getNextPair(curInd, subJsonObj);\n if nextPair[0] is None:\n return [False, nextPair[1]];\n\n print(nextPair)\n curInd = nextPair[1];\n getCommaResult = getNextCommaOrEndSignIndex(curInd+1,subJsonObj);\n\n curInd = getCommaResult[0]\n if curInd == -1:\n errorStr = getCommaResult[1];\n return [False , errorStr];\n\n curInd += 1;\n\n return [True, \"Json Object is Valid\"];\n\n\ndef getNextPair(curInd , jsonobj):\n pair = []\n\n key = getNextKey(curInd,jsonobj);\n if key[0] is None:\n errorStr = key[1]\n return [None,errorStr];\n\n pair.append(key[0]);\n curInd = key[1];\n\n colonResult = getNextColonSignIndex(curInd+1,jsonobj);\n curInd = colonResult[0]\n if curInd == -1:\n errorStr = \"Could not find Colon ':' sign. \"+colonResult[1]+\"\\nStart Iindex=\" + str(curInd) + \",jsonobj=\" + jsonobj\n return [None, errorStr];\n\n value = getNextValue(curInd+1,jsonobj)\n if value[0] is None:\n errorStr = value[1];\n return [None, errorStr];\n\n pair.append(value[0])\n curInd = value[1];\n return [pair,curInd];\n\n\ndef getNextKey(curInd , jsonObj):\n key = \"\";\n foundInd = -1;\n foundFirstQuote = False;\n errorStr = \"Could not get next key\";\n for i in range(curInd,len(jsonObj)):\n ch = jsonObj[i];\n\n # Continue until encounter the first quote sign \"\n if (not foundFirstQuote) and ch in spaceChars:\n continue;\n\n # When found - mark 'foundFirstQuote'\n elif ch == '\"' and (not foundFirstQuote):\n foundFirstQuote = True;\n\n # Everything inside first quite and second quite is a key\n # so add it to a string variable: key\n elif foundFirstQuote and ch != '\"':\n key += ch;\n\n # If found the second quote sign \" - mark found index, and break the loop\n elif ch == '\"' and foundFirstQuote:\n foundInd = i;\n break;\n\n else:\n errorStr = \" Found bad char: '\"+ch+\"' while attempting to get next Key. At index: \"+str(curInd) + \",jsonObj=\" + jsonObj;\n break;\n\n if foundInd != 1 and type(key) == str and len(key) > 0:\n return [key,foundInd]\n else:\n return [None,errorStr]\n\ndef getNextColonSignIndex(curInd , jsonObj):\n\n for i in range(curInd,len(jsonObj)):\n ch = jsonObj[i];\n\n if ch in spaceChars:\n continue;\n elif ch == ':':\n info = \"\"\n return [i,info];\n else:\n info = \"Found Bad Char: \"+ch;\n break;\n\n return [-1,info];\n\ndef getNextValue(curInd , jsonObj):\n readResult = [];\n valueType = -1;\n info = \"\";\n for i in range(curInd,len(jsonObj)):\n ch = jsonObj[i];\n\n # Looking for the value:\n if valueType == -1 and ch in spaceChars:\n continue;\n\n # String Value\n elif valueType == -1 and ch == '\"' :\n valueType = (ValueType.string).value;\n readResult = readStringValue(i , jsonObj)\n break;\n\n # Number Value\n elif (valueType == -1) and ch.isdigit():\n info += \" Found digit - Looking for Number value.\"\n valueType = (ValueType.number).value;\n readResult = readNumberValue(i, jsonObj)\n break;\n\n # List Value\n elif valueType == -1 and ch == '[':\n valueType = (ValueType.list).value;\n readResult = readListValue(i, jsonObj);\n break;\n\n # JsonObject Value\n elif valueType == -1 and ch == '{':\n valueType = (ValueType.jsonObject).value;\n readResult = readJsonObjectValue(i, jsonObj);\n break;\n else:\n continue;\n\n foundInd = readResult[0];\n value = readResult[1];\n info += readResult[2];\n if foundInd == -1:\n errorStr = info + \" Error while trying to read value: \" + value\n return [None, errorStr];\n\n validateResult = validateValue(value,valueType,foundInd)\n\n if validateResult[0] == True:\n return [value,foundInd];\n else:\n errorStr = info + \" Error: \"+validateResult[1];\n return [None,errorStr];\n\ndef readStringValue(curInd, jsonObj):\n value = \"\";\n info = \"\";\n foundFirstQoute = False\n foundInd = -1;\n for i in range(curInd,len(jsonObj)):\n ch = jsonObj[i];\n if not foundFirstQoute and ch in spaceChars:\n continue;\n\n elif not foundFirstQoute and ch == '\"':\n info += \" Found first quote sign '\"' - looking for a String value.'\n foundFirstQoute = True;\n continue;\n\n elif foundFirstQoute and not ch == '\"':\n value += ch;\n\n elif foundFirstQoute and ch == '\"':\n foundInd = i;\n break;\n\n return [foundInd , value , info]\n\ndef readNumberValue(curInd, jsonObj):\n value = \"\";\n info = \"\";\n foundInd = -1;\n for i in range(curInd,len(jsonObj)):\n ch = jsonObj[i];\n if ch.isdigit():\n foundInd = i\n value += ch;\n elif (ch in spaceChars) or ch == '}' or ch == ',':\n break;\n else:\n # Bad value case\n info += \" Found bad char: '\" + ch + \"' while reading the number: '\" + value + \"'.\";\n foundInd = -1;\n break;\n\n return [foundInd , value , info]\n\ndef readJsonObjectValue(curInd, jsonObj):\n value = \"\";\n info = \"\";\n foundInd = -1;\n curlyBracketsCount = 0;\n for i in range(curInd,len(jsonObj)):\n ch = jsonObj[i];\n\n value += ch;\n\n if ch == '{':\n curlyBracketsCount += 1;\n elif ch == '}':\n curlyBracketsCount -= 1;\n if curlyBracketsCount == 0:\n foundInd = i;\n break;\n\n return [foundInd , value , info]\n\ndef readListValue(curInd, jsonObj):\n value = \"\";\n info = \"\";\n foundInd = -1;\n squareBracketsCount = 0;\n for i in range(curInd,len(jsonObj)):\n ch = jsonObj[i];\n\n value += ch;\n\n if ch == '[':\n squareBracketsCount += 1;\n elif ch == ']':\n squareBracketsCount -= 1;\n if squareBracketsCount == 0:\n foundInd = i;\n break;\n\n return [foundInd , value , info]\n\ndef validateValue(value,valueType,foundInd):\n if foundInd == -1:\n return [False, \"Found index is -1. Could not find value(or end of value)\"];\n\n if valueType == (ValueType.jsonObject).value:\n return (validateJsonObject(value));\n\n if valueType == (ValueType.list).value:\n return (validateListValue(value));\n\n return [True,\"Value is ok\"];\n\n\ndef validateListValue(value):\n if len(value) == 2:\n return [True, \"List is empty\"]\n\n try:\n validateRes = ast.literal_eval(value)\n return [True, \"\"]\n except SyntaxError:\n info = \"List: \"+value+\"\\nis of wrong syntax. Correct list values syntax is: [\\\"StringVal\\\" , Digits, True, False, [AnotherList] ]\"\n return [False, info]\n except:\n info = \"Unknown error occured while validating list: \"+value\n return [False, info]\n\n\n\n\ndef getNextCommaOrEndSignIndex(curInd,jsonobj):\n for i in range(curInd,len(jsonobj)):\n ch = jsonobj[i]\n\n if ch in spaceChars:\n continue;\n elif ch == ',':\n # Check if have enough chars for another key\n if (len(jsonobj) - i) >= 5:\n info = \"\"\n return [i, info];\n else:\n info = \"Found comma sign ',' but not enough space for another key after it before the end of the string.\"\n return [-1, info];\n else:\n info = \"Found Bad Char: \"+ch;\n return [-1, info];\n\n # Default value - no comma\n info = \"End of object - No comma found after char index \"+str(curInd)+\"\\nObject: \" + jsonobj;\n return [len(jsonobj), info];\n\n\n\n\n\n\n\nprint(\"started\")\nmyFile = \"D:/aTraining_1.txt\"\nresult = validateJsonFile(myFile)\n\n\nif result[0]:\n print(\"Success\",result[1])\nelse:\n print(result[1])\n","repo_name":"Davis8988/PythonScripts","sub_path":"ValidateJsonFile/ValidateJsonFile.py","file_name":"ValidateJsonFile.py","file_ext":"py","file_size_in_byte":9705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13159096151","text":"import os\nfrom dataclasses import dataclass\nfrom typing import Union, List\n\nimport wandb\nfrom loguru import logger\n\nfrom data_tree.wandb_util.artifact_getter import WandbArtifactGetter\nfrom data_tree.wandb_util.artifact_identifier import ArtifactMetadata\nfrom data_tree.wandb_util.local_artifact import LocalArtifact\n\n\n@dataclass\nclass LoggableArtifact:\n metadata: ArtifactMetadata\n file_or_dirs: Union[str, List[str]]\n\n\n@dataclass\nclass ArtifactLogger:\n wandb: wandb\n wandb_artifact_getter: WandbArtifactGetter\n\n def save_artifact(self, art: LoggableArtifact):\n return self.save2(art.file_or_dirs, art.metadata)\n\n def save2(self, file_or_dirs: Union[str, List[str]], metadata: ArtifactMetadata):\n if metadata.identifier.version is None:\n aliases = []\n elif metadata.identifier.version != \"latest\":\n aliases = [metadata.identifier.version]\n else:\n aliases = []\n aliases += metadata.aliases\n return self.save(name=metadata.identifier.name, file_or_dirs=file_or_dirs, description=metadata.description,\n aliases=aliases, type=metadata.identifier.type, metadata=metadata.metadata)\n\n def save(self, name, type,\n file_or_dirs: Union[str, List[str]],\n description: str = None,\n metadata: dict = None,\n aliases: list = None) -> LocalArtifact:\n logger.info(f\"making artifact with name:{name}, type:{type}\")\n art = self.wandb_artifact_getter.artifact_for_save(name, type=type, metadata=metadata, description=description)\n if isinstance(file_or_dirs, str):\n file_or_dirs = [file_or_dirs]\n for p in file_or_dirs:\n if os.path.isdir(p):\n logger.info(f\"adding artifact from dir:{p}\")\n art.add_dir(p)\n else:\n logger.info(f\"adding artifact from file:{p}\")\n art.add_file(p)\n logger.info(f\"logging artifact... name={name}/aliases={aliases}\")\n self.wandb.log_artifact(art, aliases=aliases)\n logger.info(f\"logging artifact...done.\")\n return LocalArtifact(self.wandb, art)\n\n def save_table(self, name, type, table, description: str = None, metadata=None):\n art = self.wandb_artifact_getter.artifact_for_save(name, type=type, metadata=metadata, description=description)\n art.add(table, type)\n logger.info(f\"logging artifact {name}...\")\n self.wandb.log_artifact(art)\n logger.info(f\"logging artifact...done.\")\n return LocalArtifact(self.wandb, art)\n\n def save_artifact(self,\n name,\n type,\n object,\n object_name: str,\n description=None,\n metadata=None):\n logger.info(f\"making artifact with name:{name}, type:{type}\")\n art = self.wandb_artifact_getter.artifact_for_save(name, type=type, metadata=metadata, description=description)\n art.add(object, object_name)\n logger.info(f\"logging artifact...\")\n self.wandb.log_artifact(art)\n logger.info(f\"logging artifact...done.\")\n return LocalArtifact(self.wandb, art)\n\n def save_artifact2(self, target, target_name, artifact_metadata: ArtifactMetadata):\n logger.info(f\"creating artifact with metadata:{artifact_metadata}\")\n art = self.wandb_artifact_getter.artifact_for_save(\n name=artifact_metadata.identifier.name,\n type=artifact_metadata.identifier.type,\n metadata=artifact_metadata.metadata,\n description=artifact_metadata.description,\n )\n art.add(target, target_name)\n self.wandb.log_artifact(art, aliases=artifact_metadata.aliases)\n logger.info(f\"logging artifact...done.\")\n return LocalArtifact(self.wandb, art)\n","repo_name":"proboscis/data_tree","sub_path":"data_tree/wandb_util/artifact_logger.py","file_name":"artifact_logger.py","file_ext":"py","file_size_in_byte":3869,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"36813705369","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nCreated on March 7, 2014\n\n@author: Chunwei Yan @ PKU\n@mail: yanchunwei@outlook.com\n'''\nfrom __future__ import division\nimport numpy as np\n\nclass BaseNode(object):\n '''\n base model of tree's node\n '''\n def __init__(self, lchild=None, rchild=None):\n self.lchild = lchild\n self.rchild = rchild\n\n def is_leaf(self):\n return not (self.lchild or self.rchild)\n\n\nclass BinaryNode(BaseNode):\n def __init__(self, lchild=None, rchild=None, vector=None):\n # index to determine wheather to update vectors\n BaseNode.__init__(self, \n lchild, rchild)\n self.pred_index = 0\n # count of children\n self.n_children = np.float32(1.0)\n self.vector = vector\n self.delta_out1 = None\n self.delta_out2 = None\n self.parent_delta = None\n self.Y1C1 = None\n self.Y2C2 = None\n\n\n\nif __name__ == \"__main__\":\n pass\n\n","repo_name":"Superjomn/NeuralNetworks","sub_path":"models/recursive_autoencoder/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1495439065","text":"first_n = int(input())\nnew_num = 0\ncount = 0\nn = first_n\nwhile True:\n if int(n) < 10:\n n = \"0\" + str(int(n))\n else:\n n = str(n)\n n_arr = list(map(int, n))\n a = n_arr[0] + n_arr[1]\n if a < 10:\n new_num = str(n_arr[1]) + str(a)\n else:\n new_num = str(n_arr[1]) + str(a % 10)\n count += 1\n n = new_num\n if int(new_num) == first_n:\n print(count)\n break","repo_name":"junghyeonsu/algorithm-python","sub_path":"BAEKJOON/Step by step/while statement/1110.py","file_name":"1110.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22139908695","text":"from __future__ import print_function\nimport argparse\nimport datetime\nfrom utils.load_data.data_loader_instances import load_dataset\nfrom utils.utils import importing_model\nimport torch\nimport math\nimport os\nfrom utils.utils import save_model, load_model\nfrom utils.optimizer import AdamNormGrad\nimport time\nfrom utils.training import train_one_epoch\nfrom utils.evaluation import evaluate_loss, final_evaluation\nimport random\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\nparser = argparse.ArgumentParser(description='VAE+VampPrior')\nparser.add_argument('--batch_size', type=int, default=100, metavar='BStrain',\n help='input batch size for training (default: 100)')\nparser.add_argument('--test_batch_size', type=int, default=100, metavar='BStest',\n help='input batch size for testing (default: 100)')\nparser.add_argument('--epochs', type=int, default=2000, metavar='E',\n help='number of epochs to train (default: 2000)')\nparser.add_argument('--lr', type=float, default=0.0005, metavar='LR',\n help='learning rate (default: 0.0005)')\nparser.add_argument('--early_stopping_epochs', type=int, default=50, metavar='ES',\n help='number of epochs for early stopping')\nparser.add_argument('--z1_size', type=int, default=40, metavar='M1',\n help='latent size')\nparser.add_argument('--z2_size', type=int, default=40, metavar='M2',\n help='latent size')\nparser.add_argument('--input_size', type=int, default=[1, 28, 28], metavar='D',\n help='input size')\nparser.add_argument('--number_components', type=int, default=50000, metavar='NC',\n help='number of pseudo-inputs')\nparser.add_argument('--pseudoinputs_mean', type=float, default=-0.05, metavar='PM',\n help='mean for init pseudo-inputs')\nparser.add_argument('--pseudoinputs_std', type=float, default=0.01, metavar='PS',\n help='std for init pseudo-inputs')\nparser.add_argument('--use_training_data_init', action='store_true', default=False,\n help='initialize pseudo-inputs with randomly chosen training data')\nparser.add_argument('--model_name', type=str, default='vae', metavar='MN',\n help='model name: vae, hvae_2level, convhvae_2level')\nparser.add_argument('--prior', type=str, default='vampprior', metavar='P',\n help='prior: standard, vampprior, exemplar_prior')\nparser.add_argument('--input_type', type=str, default='binary', metavar='IT',\n help='type of the input: binary, gray, continuous, pca')\nparser.add_argument('--S', type=int, default=5000, metavar='SLL',\n help='number of samples used for approximating log-likelihood,'\n 'i.e. number of samples in IWAE')\nparser.add_argument('--MB', type=int, default=100, metavar='MBLL',\n help='size of a mini-batch used for approximating log-likelihood')\nparser.add_argument('--use_whole_train', type=str2bool, default=False,\n help='use whole training data points at the test time')\nparser.add_argument('--dataset_name', type=str, default='freyfaces', metavar='DN',\n help='name of the dataset: static_mnist, dynamic_mnist, omniglot, caltech101silhouettes,'\n ' histopathologyGray, freyfaces, cifar10')\nparser.add_argument('--dynamic_binarization', action='store_true', default=False,\n help='allow dynamic binarization')\nparser.add_argument('--seed', type=int, default=14, metavar='S',\n help='random seed (default: 14)')\n\nparser.add_argument('--no_mask', action='store_true', default=False, help='no leave one out')\n\nparser.add_argument('--parent_dir', type=str, default='')\nparser.add_argument('--same_variational_var', type=str2bool, default=False,\n help='use same variance for different dimentions')\nparser.add_argument('--model_signature', type=str, default='', help='load from this directory and continue training')\nparser.add_argument('--warmup', type=int, default=100, metavar='WU',\n help='number of epochs for warmu-up')\nparser.add_argument('--slurm_task_id', type=str, default='')\nparser.add_argument('--slurm_job_id', type=str, default='')\nparser.add_argument('--approximate_prior', type=str2bool, default=False)\nparser.add_argument('--just_evaluate', type=str2bool, default=False)\nparser.add_argument('--no_attention', type=str2bool, default=False)\nparser.add_argument('--approximate_k', type=int, default=10)\nparser.add_argument('--hidden_size', type=int, default=300)\nparser.add_argument('--base_dir', type=str, default='snapshots/')\nparser.add_argument('--continuous', type=str2bool, default=False)\nparser.add_argument('--use_logit', type=str2bool, default=False)\nparser.add_argument('--lambd', type=float, default=1e-4)\nparser.add_argument('--bottleneck', type=int, default=6)\nparser.add_argument('--training_set_size', type=int, default=50000)\n\n\ndef initial_or_load(checkpoint_path_load, model, optimizer, dir):\n if os.path.exists(checkpoint_path_load):\n model_loaded_str = \"******model is loaded*********\"\n print(model_loaded_str)\n with open(dir + 'whole_log.txt', 'a') as f:\n print(model_loaded_str, file=f)\n checkpoint = load_model(checkpoint_path_load, model, optimizer)\n begin_epoch = checkpoint['epoch']\n best_loss = checkpoint['best_loss']\n e = checkpoint['e']\n else:\n torch.manual_seed(args.seed)\n if args.device=='cuda':\n torch.cuda.manual_seed(args.seed)\n random.seed(args.seed)\n begin_epoch = 1\n best_loss = math.inf\n e = 0\n return begin_epoch, best_loss, e\n\n\ndef save_loss_files(folder, train_loss_history,\n train_re_history, train_kl_history, val_loss_history, val_re_history, val_kl_history):\n torch.save(train_loss_history, folder + '.train_loss')\n torch.save(train_re_history, folder + '.train_re')\n torch.save(train_kl_history, folder + '.train_kl')\n torch.save(val_loss_history, folder + '.val_loss')\n torch.save(val_re_history, folder + '.val_re')\n torch.save(val_kl_history, folder + '.val_kl')\n\n\ndef run_density_estimation(args, train_loader_input, val_loader_input, test_loader_input, model, optimizer, dir, model_name='vae'):\n torch.save(args, dir + args.model_name + '.config')\n train_loss_history, train_re_history, train_kl_history, val_loss_history, val_re_history, val_kl_history, \\\n time_history = [], [], [], [], [], [], []\n checkpoint_path_save = os.path.join(dir, 'checkpoint_temp.pth')\n checkpoint_path_load = os.path.join(dir, 'checkpoint.pth')\n best_model_path_load = os.path.join(dir, 'checkpoint_best.pth')\n decayed = False\n time_history = []\n # with torch.autograd.detect_anomaly():\n begin_epoch, best_loss, e = initial_or_load(checkpoint_path_load, model, optimizer, dir)\n if args.just_evaluate is False:\n for epoch in range(begin_epoch, args.epochs + 1):\n time_start = time.time()\n train_loss_epoch, train_re_epoch, train_kl_epoch \\\n = train_one_epoch(epoch, args, train_loader_input, model, optimizer)\n with torch.no_grad():\n val_loss_epoch, val_re_epoch, val_kl_epoch = evaluate_loss(args, model, val_loader_input,\n dataset=train_loader_input.dataset)\n time_end = time.time()\n time_elapsed = time_end - time_start\n content = {'epoch': epoch, 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(), 'best_loss': best_loss, 'e': e}\n if epoch % 10 == 0:\n save_model(checkpoint_path_save, checkpoint_path_load, content)\n if val_loss_epoch < best_loss:\n e = 0\n best_loss = val_loss_epoch\n print('->model saved<-')\n save_model(checkpoint_path_save, best_model_path_load, content)\n else:\n e += 1\n if epoch < args.warmup:\n e = 0\n if e > args.early_stopping_epochs:\n break\n\n if math.isnan(val_loss_epoch):\n print(\"***** val loss is Nan *******\")\n break\n\n for param_group in optimizer.param_groups:\n learning_rate = param_group['lr']\n break\n\n time_history.append(time_elapsed)\n\n epoch_report = 'Epoch: {}/{}, Time elapsed: {:.2f}s\\n' \\\n 'learning rate: {:.5f}\\n' \\\n '* Train loss: {:.2f} (RE: {:.2f}, KL: {:.2f})\\n' \\\n 'o Val. loss: {:.2f} (RE: {:.2f}, KL: {:.2f})\\n' \\\n '--> Early stopping: {}/{} (BEST: {:.2f})\\n'.format(epoch, args.epochs, time_elapsed,\n learning_rate,\n train_loss_epoch, train_re_epoch,\n train_kl_epoch, val_loss_epoch,\n val_re_epoch, val_kl_epoch, e,\n args.early_stopping_epochs, best_loss)\n\n if args.prior == 'exemplar_prior':\n print(\"Prior Variance\", model.prior_log_variance.item())\n if args.continuous is True:\n print(\"Decoder Variance\", model.decoder_logstd.item())\n print(epoch_report)\n with open(dir + 'whole_log.txt', 'a') as f:\n print(epoch_report, file=f)\n\n train_loss_history.append(train_loss_epoch), train_re_history.append(\n train_re_epoch), train_kl_history.append(train_kl_epoch)\n val_loss_history.append(val_loss_epoch), val_re_history.append(val_re_epoch), val_kl_history.append(\n val_kl_epoch)\n\n save_loss_files(dir + args.model_name, train_loss_history,\n train_re_history, train_kl_history, val_loss_history, val_re_history, val_kl_history)\n\n with torch.no_grad():\n final_evaluation(train_loader_input, test_loader_input, val_loader_input,\n best_model_path_load, model, optimizer, args, dir)\n\n\ndef run(args, kwargs):\n print('create model')\n # importing model\n VAE = importing_model(args)\n print('load data')\n train_loader, val_loader, test_loader, args = load_dataset(args, use_fixed_validation=True, **kwargs)\n if args.slurm_job_id != '':\n args.model_signature = str(args.seed)\n # base_dir = 'checkpoints/final_report/'\n elif args.model_signature == '':\n args.model_signature = str(datetime.datetime.now())[0:19]\n\n if args.parent_dir == '':\n args.parent_dir = args.prior + '_on_' + args.dataset_name+'_model_name='+args.model_name\n model_name = args.dataset_name + '_' + args.model_name + '_' + args.prior \\\n + '_(components_' + str(args.number_components) + ', lr=' + str(args.lr) + ')'\n snapshots_path = os.path.join(args.base_dir, args.parent_dir) + '/'\n dir = snapshots_path + args.model_signature + '_' + model_name + '_' + args.parent_dir + '/'\n\n if args.just_evaluate:\n config = torch.load(dir + args.model_name + '.config')\n config.translation = False\n config.hidden_size = 300\n model = VAE(config)\n else:\n model = VAE(args)\n if not os.path.exists(dir):\n os.makedirs(dir)\n model.to(args.device)\n optimizer = AdamNormGrad(model.parameters(), lr=args.lr)\n print(args)\n config_file = dir+'vae_config.txt'\n with open(config_file, 'a') as f:\n print(args, file=f)\n run_density_estimation(args, train_loader, val_loader, test_loader, model, optimizer, dir, model_name = args.model_name)\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n args.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n kwargs = {'num_workers': 2, 'pin_memory': True} if args.device=='cuda' else {}\n run(args, kwargs)\n\n","repo_name":"sajadn/Exemplar-VAE","sub_path":"density_estimation.py","file_name":"density_estimation.py","file_ext":"py","file_size_in_byte":12583,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"37"} +{"seq_id":"43776428672","text":"\nimport sys, os\n#sys.path.insert(0, os.path.abspath(\"../detect_to_standard/\"))\n#from detection_visualizer import *\nimport numpy as np\n\n#sys.path.append(os.path.abspath(\"../keypoint_to_standard/\"))\n#from keypoint_visualizer import *\nimport torchvision\nfrom PIL import Image\nimport cv2\ndraw_threshold = 0.4\nclasses = ['person']\ndef rescale_img(img,rescale_img_factor):\n shape = img.size\n w = shape[0]\n h = shape[1]\n desired_h = h*rescale_img_factor\n desired_w = w*rescale_img_factor\n img = torchvision.transforms.Resize([int(desired_h), int(desired_w)])(img)\n w_pad = (w - desired_w)/2.\n h_pad = (h - desired_h)/2.\n img = torchvision.transforms.Pad((int(w_pad),int(h_pad)))(img)\n return(img)\n\n\n\ndef show_all_from_dict(keypoints_list_list, bbox_dets_list_list, classes, rescale_img_factor = 1., path = None, output_folder_path = None, flag_track= False, flag_method = False):\n cap=cv2.VideoCapture(path)\n ret,im=cap.read()\n\n img_name=0\n flag =0\n for i,bbox_dets_list in enumerate(bbox_dets_list_list) :\n _,img=cap.read()\n bbox_dets_init = bbox_dets_list[0]\n\n if flag == 0:\n h,w=img.shape[0],img.shape[1]\n fourcc=cv2.VideoWriter_fourcc(*'MPEG')\n vid=cv2.VideoWriter(output_folder_path,fourcc,25.0,(w,h))\n flag+=1\n\n\n for j,candidate in enumerate(bbox_dets_list) :\n\n bbox = np.array(candidate[\"bbox\"]).astype(int)\n method = candidate[\"method\"]\n\n if flag_track is True:\n track_id = candidate[\"track_id\"]\n img = draw_bbox(img, bbox, classes, method, track_id = track_id, flag_method = flag_method)\n \n img=np.uint8(img)\n vid.write(img)\n \n cap.release() \n vid.release() \n return\n\ndef draw_bbox(img, bbox, classes, method, track_id = -1, img_id = -1, flag_method = False):\n color=(0,0,255)\n #print(color)\n\n cv2.rectangle(img,\n (bbox[0], bbox[1]),\n (bbox[2], bbox[3]),\n color = color,\n thickness = 2)\n\n cls_name = classes[0]\n font = cv2.FONT_HERSHEY_SIMPLEX\n color=(255,0,0)\n if track_id == -1:\n cv2.putText(img,\n #'{:s} {:.2f}'.format(cls_name, score),\n '{:s}'.format(cls_name),\n (bbox[0], bbox[1]-5),\n font,\n fontScale=0.8,\n color=color,\n thickness = 2,\n lineType = cv2.LINE_AA)\n else:\n cv2.putText(img,\n #'{:s} {:.2f}'.format(\"ID:\"+str(track_id), score),\n '{:s}'.format(\"ID:\"+str(track_id)),\n (bbox[0], bbox[1]-5),\n font,\n fontScale=0.8,\n color=color,\n thickness = 2,\n lineType = cv2.LINE_AA)\n\n if flag_method and method is not None and method is not 'spacial' :\n cv2.putText(img,\n #'{:s} {:.2f}'.format(\"ID:\"+str(track_id), score),\n '{:s}'.format(method),\n (bbox[0], bbox[3] + 10),\n font,\n fontScale=0.5,\n color=color,\n thickness = 2,\n lineType = cv2.LINE_AA)\n\n return img\n\n","repo_name":"MostafaAbdelfadil/Soccer-Players-Detection-and-Tracking","sub_path":"scripts/other_utils/track/visualizer/visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72581630186","text":"import RPi.GPIO as GPIO\nimport time\nfrom rpi_ws281x import PixelStrip, Color\n\n# LED-Strip konfiguration\nLED_COUNT = 64 # Anzahl der Pixel\nLED_PIN = 12 # GPIO-Pin, andem das Modul angeschlossen ist\nLED_FREQ_HZ = 800000 # LED-Signal-Frequenz\nLED_DMA = 10 # DMA-Kanal, der zur Generierung des Signals verwendet wird\nLED_BRIGHTNESS = 10 # Helligkeitseinstellung\nLED_INVERT = False # Signal-Invertierung\nLED_CHANNEL = 0 # Setze auf 1, falls die GPIOs 13, 19, 41, 45 oder 53 verwendet werden\n\ntilt_pin = 22\n# setze GPIO Modus\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(tilt_pin, GPIO.IN)\n\ndef colorWipe(strip, color, wait_ms=50):\n \"\"\"Farben pixelweise ueber die LEDs schieben\"\"\"\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, color)\n strip.show()\n time.sleep(wait_ms / 1000.0)\n\nstrip = PixelStrip(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)\nstrip.begin()\n\nsprite_number_left = [24,17,10,3,33,42,51,25,26,27,28,29,30]\nsprite_number_right = [31,22,13,4,38,45,52,30,29,28,27,26,25]\nsprite_number_del_left = [3,10,17,24,33,42,51]\nsprite_number_del_right = [4,13,22,31,38,45,52]\n\ncolorWipe(strip,Color(0,0,0),10)\ntry:\n while True:\n if(GPIO.input(tilt_pin) == True):\n for i in sprite_number_del_right:\n strip.setPixelColor(i,Color(0,0,0))\n for i in sprite_number_left:\n strip.setPixelColor(i,Color(255,0,0))\n strip.show()\n else:\n for i in sprite_number_del_left:\n strip.setPixelColor(i,Color(0,0,0))\n for i in sprite_number_right:\n strip.setPixelColor(i,Color(0,255,0))\n strip.show()\nexcept KeyboardInterrupt:\n colorWipe(strip,Color(0,0,0),10)\n GPIO.cleanup()\n","repo_name":"gimfo/gimpublic","sub_path":"src_joy_py_prj/07_Tilt_reminder/Tilt_reminder.py","file_name":"Tilt_reminder.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27051980595","text":"from tkinter import *\n\nroot = Tk()\nroot.title = (\"Complaint Form\")\nroot.iconbitmap('download.ico')\n\nlabel1 = Label(root, text = \"Enter Name:\")\nname = Entry(root, width = 50)\nlabel2 = Label(root, text = \"Enter Address:\")\naddress = Entry(root, width = 50)\nlabel3 = Label(root, text = \"Enter Complaint:\")\ncomm = Entry(root, width = 50)\nQuit = Button(root, text = \"Submit\", command = lambda: file_apend() )\n\nlabel1.grid(row = 0, column = 0)\nname.grid(row = 1, column = 0, padx = 70, pady = 70)\nlabel2.grid(row = 2, column = 0)\naddress.grid(row = 3, column = 0, padx = 70, pady = 70)\nlabel3.grid(row = 4, column = 0)\ncomm.grid(row = 5, column = 0, padx = 70, pady = 70)\nQuit.grid(row = 6, column = 0)\n\ndef file_apend():\n name1 = name.get()\n address1 = address.get()\n comm1 = comm.get()\n person = \"Name: \" + name1 + \" \" + \"Address: \" + address1 + \" \" + \"Complaint: \" + comm1\n\n Dem = open(\"PROJECT Demo.txt\", 'a+')\n Dem.write(person)\n Dem.write(\"\\n\")\n Dem.close()\n label = Label(root, text = \"Information Saved!\")\n label.grid(row = 7, column = 0)\n\n\nroot.mainloop()\n","repo_name":"TheCodingAstronomer/Python-3","sub_path":"Tkinter/COMPLAIN.exe.py","file_name":"COMPLAIN.exe.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18762460369","text":"import json\nfrom socket import *\nfrom config import *\n\ns = socket()\ns.setsockopt(SOL_SOCKET, SO_REUSEADDR, DEBUG)\ns.bind(('0.0.0.0', 8080))\ns.listen(3)\n\nc,addr = s.accept()\ndata = c.recv(1024).decode()\nprint(data)\n\nc.send(json.dumps({'status':'200', 'data':'http test'}).encode())\nc.close()\ns.close()","repo_name":"BruceDGit/Dictionary","sub_path":"httpserver_test.py","file_name":"httpserver_test.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44460210524","text":"# CMPT459 Data Mining\n# Spring 2021 Milestone 2\n# Joshua Peng & Lucia Schmidt\n\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport os\nfrom sklearn.preprocessing import LabelEncoder\nimport matplotlib.pyplot as plt\n\nimport RandomForests, LightGbm\n\n\ndef main():\n directory = os.path.dirname('../models/')\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n df = pd.read_csv('../data/cases_train_processed.csv')\n\n random_forest(df)\n light_gbm(df)\n\ndef random_forest(df):\n all_data = df[['age_filled', 'filled_sex', 'province_filled',\n 'country_filled','Confirmed', 'Deaths', 'Recovered','Active',\n 'Incidence_Rate', 'Case-Fatality_Ratio']]\n\n # One hot encoding for categorical values\n category_clean = pd.get_dummies(all_data)\n\n # Attach outcome column back\n category_clean['outcome'] = df['outcome']\n category_clean['age_filled'] = df['age_filled']\n category_clean['filled_sex'] = df['filled_sex']\n category_clean['province_filled'] = df['province_filled']\n category_clean['country_filled'] = df['country_filled']\n\n print(\"Splitting data into training and validation sets\")\n train, validate = train_test_split(category_clean, test_size=0.2, random_state=42, shuffle=True)\n\n\n train_attr = train.drop(columns=['outcome', 'age_filled','filled_sex','province_filled','country_filled']) # Features\n train_outcomes = train[['outcome']]\n\n v_data = validate.drop(columns=['outcome', 'age_filled','filled_sex','province_filled','country_filled'])\n v_outcomes = validate[['outcome']]\n # 2.2 Training Model\n print(\"Training Random Forests\")\n RandomForests.rf_train(train_attr, train_outcomes)\n # 2.3 Evaluate performance\n print(\"Evaluating Random Forests Training\")\n RandomForests.rf_eval(train_attr,train_outcomes, True)\n\n print(\"Evaluating Random Forests Validation\")\n RandomForests.rf_eval(v_data,v_outcomes,False)\n\n RandomForests.investigate_deaths(validate)\n\n # 2.4 Vary hyperparameter and check for overfitting\n train_scores = []\n validation_scores = []\n depth_values = range(10,110,10)\n for depth in depth_values:\n print(\"Training RandomForest with depth\", depth)\n clf = RandomForests.overfit_rf_train(train_attr, train_outcomes, depth)\n train_accuracy = RandomForests.overfit_eval(train_attr, train_outcomes, clf)\n train_scores.append(train_accuracy)\n\n validation_accuracy = RandomForests.overfit_eval(v_data, v_outcomes, clf)\n validation_scores.append(validation_accuracy)\n plt.figure()\n plt.plot(depth_values, train_scores)\n plt.plot(depth_values, validation_scores)\n plt.title(\"Accuracy vs Max Depth Hyperparameter for Random Forests\")\n plt.ylabel(\"Accuracy\")\n plt.xlabel(\"Max Depth Hyperparameter\")\n plt.legend(['training scores', 'validation scores'])\n plt.savefig(\"../plots/overfitting_check_rf.png\")\n\ndef light_gbm(df):\n X = df[['age_filled', 'filled_sex', 'province_filled',\n 'country_filled','Confirmed', 'Deaths', 'Recovered','Active',\n 'Incidence_Rate', 'Case-Fatality_Ratio']]\n y = df[['outcome']]\n le = LabelEncoder()\n le.fit(y)\n y_encoded = le.transform(y)\n\n X_train, X_valid, y_train, y_valid = train_test_split(X, y_encoded, test_size=0.2, random_state=42, shuffle=True)\n\n # 2.2 Train Model\n LightGbm.boosted_train(X_train, y_train, 8)\n\n # 2.3 Evaluate performance\n LightGbm.boosted_eval(X_train, y_train, le, True, True)\n LightGbm.boosted_eval(X_valid, y_valid, le, True, False)\n\n # Find feature importance\n LightGbm.boosted_feature_importance(X_train)\n\n # 2.4 Vary hyperparameter and check for overfitting\n train_scores = []\n validation_scores = []\n depth_values = range(2,20,2)\n for depth in depth_values:\n print(\"Training LightGBM with depth\", depth)\n LightGbm.boosted_train(X_train, y_train, depth)\n train_accuracy = LightGbm.boosted_eval(X_train, y_train, le, False)\n train_scores.append(train_accuracy)\n\n validation_accuracy = LightGbm.boosted_eval(X_valid, y_valid, le, False)\n validation_scores.append(validation_accuracy)\n plt.figure()\n plt.plot(depth_values, train_scores)\n plt.plot(depth_values, validation_scores)\n plt.title(\"Accuracy vs Max Depth Hyperparameter for LightGBD\")\n plt.ylabel(\"Accuracy\")\n plt.xlabel(\"Max Depth Hyperparameter\")\n plt.legend(['training scores', 'validation scores'])\n plt.savefig(\"../plots/overfitting_check_gbd.png\", bbox_inches = \"tight\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"joshpeng1999/MileStone-3-Cmpt-459","sub_path":"Milestone2/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24424773113","text":"\"\"\"\nConvenience methods for working with ruby types\n\"\"\"\n\nBASIC_RUBY_TYPES = ['Integer', 'Numeric', 'Bool', 'Array', 'String',\n 'self', 'Enumerator', 'nil',\n '%any', 'Hash', 'Time', 'Set', 'Symbol']\n\ndef generalize_type(t):\n \"\"\"\n Given a type t, generalize it to a number of basic Ruby types in order to\n make fewer classes for the machine learning algorithms to work over.\n \"\"\"\n \n # Generalize subclasses of Integer to Integer\n if t in ['Bignum', 'Fixnum']:\n return 'Integer'\n # Generalize other Numeric types to Numeric\n if t in ['Complex', 'Rational', 'Float']:\n return 'Numeric'\n \n # Convert NilClass -> nil\n if t == 'NilClass':\n return 'nil'\n \n # Otherwise, if t is not a basic Ruby type then return 'Object'\n if t in BASIC_RUBY_TYPES:\n return t\n else:\n return 'Object'\n","repo_name":"cassidylaidlaw/rdl-ml","sub_path":"src-python/rdlml/ruby.py","file_name":"ruby.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72547698668","text":"import cx_Oracle\n#import oracledb\n\nnumero = 2\n\n#Obter conexao\ndef conexao():\n try:\n conn = cx_Oracle.connect(user=\"xxxxxxx\", password=\"xxxxxx\", host=\"oracle.fiap.com.br\", port=\"1521\", \n service_name=\"orcl\")\n print(f\"Conexão: {conn.version}\")\n except Exception as e:\n print(\"Erro ao obter uma conexão\", e)\n return conn\n\ndef select():\n conn = conexao()\n cursor = conn.cursor()\n sql_query = \"SELECT * FROM \"\n cursor.execute(sql_query)\n for result in cursor:\n print(result)\n conn.commit()\n\ndef insert():\n conn = conexao()\n cursor = conn.cursor()\n sql_query = \"INSERT INTO ceo_details VALUES('Steve', 'Jobs', 'Apple', 50)\"\n cursor.execute(sql_query)\n conn.commit()\n\ndef update():\n try:\n conn = conexao()\n cursor = conn.cursor()\n sql_query = \"UPDATE ceo_details SET AGE = 50 WHERE first_name = 'Steve'\"\n cursor.execute()\n conn.commit()\n print(\"CEO updated!\")\n except Exception as e:\n print(f'Something went wrong - update: {e}')\n finally:\n conn.close()\n cursor.close()\n\ndef delete():\n try:\n conn = conexao()\n cursor = conn.cursor()\n sql_query = \"DELETE FROM ceo_details WHERE AGE = 50\"\n cursor.execute(sql_query)\n conn.commit()\n print(\"CEO removed!\")\n except Exception as e:\n print(f'Something went wrong - delete: {e}')\n finally:\n conn.close()\n\ndef close_connection(conn):\n try:\n conn.close()\n print(f'Connection closed!')\n except Exception as e:\n print(f'Something went wrong - close_connection {e}')\n\n#Programa pricipal\nprint(f'Obtendo dados do BD')\nconn = conexao()\nselect()\nclose_connection(conn)","repo_name":"gabrielrodri33/Python-Fiap","sub_path":"2° Semestre FIAP/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"362712349","text":"# ------------------------------------------------\nimport cv2 # first import cv2, then torch\ncv2.setNumThreads(0)\n# https://github.com/pytorch/pytorch/issues/1838\n# ------------------------------------------------\nimport os\nimport math\nimport argparse\nimport builtins\nimport shutil\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom torch.utils.data import DataLoader, DistributedSampler\n\nfrom scannet.scannet_pretrain import ScannetDepthPointDataset\nfrom train_dp_simsiam import SimSiam\nfrom meters import AverageMeter, ProgressMeter\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--save\", type=str, default=\"log/DP_simsiam\", help=\"save path [default:log/DP_simsiam]\")\nparser.add_argument(\"--momentum-encoder\", action=\"store_true\", help=\"use momentum encoders. Equivalent to BYOL\")\n\nparser.add_argument('-j', '--workers', default=8, type=int, metavar='N',\n help='number of data loading workers (default: 8)')\nparser.add_argument('--epochs', default=200, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=32, type=int,\n metavar='N',\n help='mini-batch size (default: 32), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\nparser.add_argument('--lr', '--learning-rate', default=0.03, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\nparser.add_argument('--schedule', default=[120, 160], nargs='*', type=int,\n help='learning rate schedule (when to drop lr by 10x)')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum of SGD solver')\nparser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('-p', '--print-freq', default=10, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('--world-size', default=-1, type=int,\n help='number of nodes for distributed training')\nparser.add_argument('--rank', default=-1, type=int,\n help='node rank for distributed training')\nparser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,\n help='url used to set up distributed training')\nparser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\nparser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training. ')\nparser.add_argument('--gpu', default=None, type=int,\n help='GPU id to use.')\nparser.add_argument('--multiprocessing-distributed', action='store_true',\n help='Use multi-processing distributed training to launch '\n 'N processes per node, which has N GPUs. This is the '\n 'fastest way to use PyTorch for either single node or '\n 'multi node data parallel training')\nparser.add_argument('--cos', action='store_true',\n help='use cosine lr schedule')\n\n\ndef main():\n args = parser.parse_args() \n SAVE_PATH = os.path.join(BASE_DIR, args.save)\n if not os.path.exists(SAVE_PATH):\n os.makedirs(SAVE_PATH)\n args.save = SAVE_PATH\n\n if args.dist_url == \"env://\" and args.world_size == -1:\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n args.distributed = args.world_size > 1 or args.multiprocessing_distributed\n\n ngpus_per_node = torch.cuda.device_count()\n if args.multiprocessing_distributed:\n # Since we have ngpus_per_node processes per node, the total world_size\n # needs to be adjusted accordingly\n args.world_size = ngpus_per_node * args.world_size\n # Use torch.multiprocessing.spawn to launch distributed processes: the\n # main_worker process function\n mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\n else:\n # Simply call main_worker function\n main_worker(args.gpu, ngpus_per_node, args)\n\n\ndef main_worker(gpu, ngpus_per_node, args):\n args.gpu = gpu\n\n # suppress printing if not master\n if args.multiprocessing_distributed and args.gpu != 0:\n def print_pass(*args):\n pass\n builtins.print = print_pass\n\n if args.gpu is not None:\n print(\"Use GPU: {} for training\".format(args.gpu))\n\n if args.distributed:\n if args.dist_url == \"env://\" and args.rank == -1:\n args.rank = int(os.environ[\"RANK\"])\n if args.multiprocessing_distributed:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n args.rank = args.rank * ngpus_per_node + gpu\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n # create model\n model = SimSiam(momentum_encoder=args.momentum_encoder)\n \n if args.distributed:\n # use SyncBN\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n \n # For multiprocessing distributed, DistributedDataParallel constructor\n # should always set the single device scope, otherwise,\n # DistributedDataParallel will use all available devices.\n if args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model.cuda(args.gpu)\n # When using a single GPU per process and per\n # DistributedDataParallel, we need to divide the batch size\n # ourselves based on the total number of GPUs we have\n args.batch_size = int(args.batch_size / ngpus_per_node)\n args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n else:\n model.cuda()\n # DistributedDataParallel will divide and allocate batch_size to all\n # available GPUs if device_ids are not set\n model = torch.nn.parallel.DistributedDataParallel(model)\n \n elif args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model = model.cuda(args.gpu)\n # comment out the following line for debugging\n raise NotImplementedError(\"Only DistributedDataParallel is supported.\")\n else:\n # AllGather implementation (batch shuffle, queue update, etc.) in\n # this code only supports DistributedDataParallel.\n raise NotImplementedError(\"Only DistributedDataParallel is supported.\")\n\n optimizer = optim.SGD(model.parameters(), args.lr, \n momentum=args.momentum,\n weight_decay=args.weight_decay)\n \n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n if args.gpu is None:\n checkpoint = torch.load(args.resume)\n else:\n # Map model to be loaded to specified single gpu.\n loc = 'cuda:{}'.format(args.gpu)\n checkpoint = torch.load(args.resume, map_location=loc)\n args.start_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n \n train_dataset = ScannetDepthPointDataset(\"train\", diff_crop=True, augment=True, num_match=10)\n\n if args.distributed:\n train_sampler = DistributedSampler(train_dataset)\n else:\n train_sampler = None\n\n train_loader = DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),\n num_workers=args.workers, pin_memory=False, sampler=train_sampler)\n\n\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n train_sampler.set_epoch(epoch)\n adjust_learning_rate(optimizer, epoch, args)\n \n # train for one epoch\n train(train_loader, model, optimizer, epoch, args)\n \n if not args.multiprocessing_distributed or (args.multiprocessing_distributed\n and args.rank % ngpus_per_node == 0):\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'optimizer' : optimizer.state_dict(),\n }, is_best=False, filename='{:s}/checkpoint_{:04d}.pth.tar'.format(args.save, epoch))\n\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'model_best.pth.tar')\n \n\ndef train(train_loader, model, optimizer, epoch, args):\n progress = ProgressMeter(\n len(train_loader),\n prefix=\"Epoch: [{}]\".format(epoch))\n\n # switch to train mode\n model.train()\n for i, data in enumerate(train_loader):\n points = data[\"pcd\"].to(args.gpu)\n depth = data[\"depthmap\"].to(args.gpu)\n loss = model(depth=depth, points=points)\n progress.update(i, {\"loss\": loss.item()})\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n if i % args.print_freq == 0:\n progress.display(i)\n\n\ndef adjust_learning_rate(optimizer, epoch, args):\n \"\"\"Decay the learning rate based on schedule\"\"\"\n lr = args.lr\n if args.cos: # cosine lr schedule\n lr *= 0.5 * (1. + math.cos(math.pi * epoch / args.epochs))\n else: # stepwise lr schedule\n for milestone in args.schedule:\n lr *= 0.1 if epoch >= milestone else 1.\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n \n\nif __name__ == \"__main__\":\n main()\n ","repo_name":"lilanxiao/Invar3D","sub_path":"train_dp_simsiam_ddp.py","file_name":"train_dp_simsiam_ddp.py","file_ext":"py","file_size_in_byte":10626,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"18572317812","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport handle\nimport config\nimport xls as excel\nimport json\nDEFAULT_WORKSPACE = {config.TapdRequestArg.REQ_WORKSPACE_ID_FIELD: config.TapdSearchContent.DEFAULT_WORKSPACE_ID}\n\n\n\ndef write_task(iter_name, task_filter, condits, xlspath, line_dict, row = 2, workspace = DEFAULT_WORKSPACE):\n r\"\"\" 筛选相应的迭代内的任务,并写入任务数据\n :param iter_name: string, 迭代名称\n :param task_filter: 需要筛选写入excel表的任务字段,必须包含('owner',),格式如:('owner', 'effort', 'name', ...)\n :type task_filter: tuple\n :param condits: 筛选的人员名单,同时也会作为excel表的sheet分页,注意:写入的excel表的sheet分页必须与传入的这个参数一致,格式如:(u'甘芳琳;', u'邹祖业;', ...)\n :type condits: tuple\n :param xlspath: string, 写入的excel表的路径\n :param line_dict: 行字典,用于控制各人员的起始行,格式如:{u'甘芳琳;': 2, u'邹祖业;': 2, ...}, 其中2为起始的行\n :type line_dict: dict\n :param row: int, 为起始列,会根据 task_filter 内元素加一行列,默认值为2\n :param workspace: int, tapd默认工作空间id,默认是Q6的工作空间id\n :return: None\n \"\"\"\n tapdHander = handle.TpadHandler()\n wb = excel.open_xls(xlspath)\n iter_id = tapdHander.get_iterid_by_name(iter_name, workspace)\n for datas in tapdHander.get_data_by_iterid('task', iter_id, workspace):\n for data in tapdHander.task_handle(datas, task_filter):\n page = data['owner']\n for condit in condits:\n if condit in page:\n start_row = row\n for key, value in data.items():\n if key == 'effort':\n value = float(value)\n excel.write_xls(wb, condit, line_dict[condit], start_row, value)\n start_row += 1\n line_dict[condit] += 1\n excel.save_xls(xlspath, wb)\n\n\ndef write_story_adnormal_data(iter_name, story_filter_tuple, xlspath, sheet, line = 2, row = 2, workspace = DEFAULT_WORKSPACE, need_new_date = False):\n r\"\"\" 筛选对应迭代内异常的需求,将相应需要的需求数据写入excel表,对于异常的时间会加上红色警示格\n :param iter_name: 迭代名称\n :param story_filter_tuple: 需要写入excel表的需求的字段序列,至少包含('name',),格式如:('owner', 'effort', 'name', ...)\n :type story_filter_tuple: tuple\n :param xlspath: string, 写入的excel表路径\n :param sheet: string, 写入的excel的sheet分页\n :param line: int, 写入数据的起始行,默认为2\n :param row: int, 写入数据的起始列,默认为2\n :param workspace: int, tapd默认工作空间id,默认是Q6的工作空间id\n :param need_new_date: bool, 是否需要最新的时间, tapd同个状态可能存在多个时间,如多次待测,默认值为False,即只保留第一次待测时间, True则是最近待测的时间\n :return: None\n \"\"\"\n # 初始化表格相关的字典\n info_story_status_dict = dict(zip(config.TapdSearchContent.CHANGE_STORY_STATUS_KEY_WORD, config.TapdSearchContent.CHANGE_STORY_TIME_LIMIT))\n info_story_status_row_dict = dict(zip(config.TapdSearchContent.CHANGE_STORY_STATUS_KEY_WORD, config.TapdSearchContent.CHANGE_STORY_STATUS_ROW))\n wb = excel.open_xls(xlspath)\n tapdHander = handle.TpadHandler()\n iter_id = tapdHander.get_iterid_by_name(iter_name)\n story_ids = tapdHander.get_storyids_by_iterid(iter_id)\n story_status = tapdHander.get_status_stream('story', workspace)\n\n start_line = line\n\n # 遍历迭代内的所有需求id\n for story_id in story_ids:\n # 获取每个需求的所有变更历史,并遍历\n for changes in tapdHander.get_story_change_by_storyid(story_id):\n # is_abnormal_flag = False\n info_story_status_time = dict(zip(config.TapdSearchContent.CHANGE_STORY_STATUS_KEY_WORD, config.TapdSearchContent.CHANGE_STORY_DEFAULT_TIME))\n\n # 遍历所有变更历史里的单个变更内容\n for change in tapdHander.story_change_handle(changes, ('created', 'changes')):\n change_field = json.loads(change['changes'])\n for status in change_field:\n # 如果变更内容是状态,则确定是否异常需要找的异常状态,如果是异常则计入数据\n if status['field'] == 'status':\n change_time = change['created']\n if story_status[status['value_after']] in info_story_status_dict.keys():\n # find the abnormal story(the changed time lower than limit time).\n status_key = story_status[status['value_after']]\n # 更新时间\n update_status_time(info_story_status_time, status_key, change_time, need_new_date)\n\n # 写入异常需求的时间数据\n story_json = tapdHander.get_story_data_by_storyid(story_id)\n story_datas = tapdHander.story_handle(story_json, story_filter_tuple)\n start_row = row\n for story_data in story_datas:\n for story_v in story_data.values():\n excel.write_xls(wb, sheet, start_line, start_row, story_v)\n start_row += 1\n # 写入时间数据\n for k, v in info_story_status_time.items():\n if is_abnormal_time(info_story_status_time[k], info_story_status_dict[k]):\n excel.write_abnromal_xls(wb, sheet, start_line, info_story_status_row_dict[k], time_to_date(v))\n else:\n excel.write_xls(wb, sheet, start_line, info_story_status_row_dict[k], time_to_date(v))\n start_line += 1\n excel.save_xls(xlspath, wb)\n\n\ndef is_abnormal_time(changed_time, limit_time):\n # 判断是否异常时间\n if changed_time > limit_time:\n return True\n return False\n\n\ndef time_to_date(times):\n # 拆分tapd的修改时间,返回只有日期的函数\n if times is None:\n return None\n t = times.split(' ')\n d = t[0].split('-')\n return '/'.join(d)\n\n\ndef update_status_time(dictionary, key, new_date, need_new):\n # 更新状态时间字典\n if dictionary[key] is None:\n dictionary[key] = new_date\n return\n if need_new:\n if dictionary[key] < new_date:\n dictionary[key] = new_date\n else:\n if dictionary[key] > new_date:\n dictionary[key] = new_date\n\n\nif __name__ == '__main__':\n write_task(u'2018.05.18维护', config.TapdSearchContent.TASK_FILTER_LIST, config.TapdSearchContent.TASK_OWNER_Q6_QC, 'test.xls', {u'甘芳琳;': 2, u'邹祖业;': 2, u'赖增涛;': 2, u'肖兴亮;': 2})\n write_story_adnormal_data(u'测试迭代', ('id', 'name', 'owner'), 'test.xls', u'甘芳琳;')\n\n","repo_name":"stcnchenxin/Tapd","sub_path":"tapd_req/count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":7024,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"28806941974","text":"from bot import Telegram_Bot\n\nfs = open(\"token.config\", \"r\", encoding='utf-8')\ntoken = fs.read()\nfs.close()\n\nlala = Telegram_Bot(token)\nresponse = lala.validate()\n\nprint(\"\\nBOT - [\" + str(response[\"id\"]) + \"] \" + response[\"first_name\"] + '\\n')\n\nlala.start()\n","repo_name":"Remmoze/Komsborfair-telegram-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15610224215","text":"def move_element_to_the_end(arr, ele):\n i = 0\n j = len(arr) - 1\n while i < j:\n while i < j and arr[j] == ele:\n j -= 1\n if arr[i] == ele:\n swap(arr, i, j)\n i += 1\n\n return arr\n\n\ndef swap(arr, i, j):\n arr[i], arr[j] = arr[j], arr[i]\n\n\n# print(move_element_to_the_end([2, 1, 3, 4, 2, 2, 2, 4], 2))\n# print(move_element_to_the_end([2, 1, 2, 2, 2, 3, 4, 2], 2))\n# print(move_element_to_the_end([4, 1, 2, 2, 2, 3, 2, 2], 2))\n\n\ndef two_sum(arr, target):\n for i in range(len(arr)-1):\n for j in range(i+1, len(arr)):\n if arr[i] + arr[j] == target:\n print([arr[i], arr[j]])\n return []\n\n\n# two_sum([2, 4, -2, 0, 1, 4], target=2)\nprint(\".................\")\n\n\ndef twoSum(arr, t):\n hashM = {}\n for i in range(len(arr)):\n if t - arr[i] in hashM:\n print([arr[i], t - arr[i]])\n else:\n hashM[arr[i]] = True\n return []\n\n\n# twoSum([2, 4, -2, 0, 1, 4], t=2)\n\n\"\"\"\n[2, 4, -2, 0, 1, 4], target = 2\n\na + b = target\nb = target - a\n\n\"\"\"\n","repo_name":"Ewooral/Ewooral-Tutorials","sub_path":"tests/try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22926760531","text":"# faça um programa que peça dois numeros e impra o maior dele\n\n\nlista = []\nnum = 1\nwhile num in range(1,2):\n numeroadd = float(input('Insira um numero :'))\n resultado = lista.append(numeroadd)\n tamanho_da_lista = len(lista)\n num = tamanho_da_lista\n \nprint(max(lista))\n\n\n\n","repo_name":"aalnunes/exercicios-python","sub_path":"Lista_de_exercicios_2.py","file_name":"Lista_de_exercicios_2.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"6231017352","text":"from typing import Callable\n\nfrom grid import Grid\nfrom gui import GUI\nfrom colors import BLACK, Color, WHITE\nfrom inspect import signature\nfrom point import Point\n\n\"\"\"Main file\n\nThis also acts as the controller of the visualizer\n\"\"\"\n_DEFAULT_CELL_COLOR = BLACK\n_DEFAULT_BG_COLOR = WHITE\n\n_DEFAULT_MARGIN_LENGTH = 2\n_DEFAULT_WINDOW_HEIGHT = 600\n\n\nclass Viz:\n \n def __init__(self, \n height: int,\n width: int = None,\n window_height: int = _DEFAULT_WINDOW_HEIGHT,\n window_width: int = None,\n margin_len: int = _DEFAULT_MARGIN_LENGTH,\n cell_color: Color = _DEFAULT_CELL_COLOR,\n bg_color: Color = _DEFAULT_BG_COLOR) -> None:\n \n self.__bg_color = bg_color\n self.__cell_color = cell_color\n \n self.__grid = Grid(height, width, cell_color)\n \n self.__gui = GUI(self.__grid,\n window_height,\n window_width, \n margin_len, \n cell_color, \n bg_color)\n \n self.__try_render()\n\n def display(self) -> None:\n self.__try_render()\n\n def resize(self, height: int, width: int = None) -> None:\n assert type(height) is int\n if width: assert type(width) is int\n \"\"\" Resize height and width of number of cells.\n Has no effect on the display window.\n\n Args:\n height (int): new number of cells vertically.\n width (int, optional): new number of cells horizontally. Defaults to new height.\n \"\"\"\n if not width:\n width = height\n self.__grid.resize(height, width)\n self.reset()\n\n\n def reset(self) -> None:\n \"\"\"Reset to original settings and redraw canvas\n \"\"\"\n self.__gui.reset()\n self.__try_render()\n\n def __process_func(self, func: Callable) -> Callable:\n sig = signature(func)\n params = sig.parameters\n p_len = len(params)\n if p_len == 1:\n return lambda x, y, o: func(o)\n elif p_len == 2:\n return lambda x, y, o: func(x, y)\n elif p_len == 3:\n return func\n else:\n return None\n\n def __try_render(self):\n if not self.__gui.render():\n print(\"Too many cells to display in window\")\n\n def filter(self, func: Callable) -> None:\n f = self.__process_func(func)\n if not f:\n print(\"Invalid input function\")\n return\n self.__grid.transform(self.__gui.bg_color(), f)\n self.__gui.clear_screen()\n self.__gui.render()\n \n \n def map(self, func: Callable) -> None:\n f = self.__process_func(func)\n if not f:\n print(\"Invalid input function\")\n return\n self.__grid.apply(f)\n self.__gui.clear_screen()\n self.__gui.render()\n\n def scale_up(self) -> None:\n self.__grid.scale_up({self.__bg_color: self.__bg_color})\n self.__gui.clear_screen()\n success = self.__gui.render()\n if not success:\n self.__grid.scale_down()\n print(\"Too many cells to display in window\")\n \n def scale_down(self) -> None:\n success = self.__grid.scale_down()\n if not success:\n print(\"Cannot scale down anymore\")\n else:\n self.__gui.clear_screen()\n self.__gui.render()\n\n def track(self, obj, mapping) -> None:\n self.__tracked = obj\n self.__mapping = mapping\n self.reset()\n # resize\n self.resize(len(obj), len(obj[0]))\n self.update()\n\n\n def update(self):\n colors = {'white': Color(255,255,255), \n 'black': Color(0,0,0), 'red': Color(255,0,0), \n 'green': Color(0,255,0), 'blue': Color(0,0,255)}\n for r in range(len(self.__tracked)):\n for c in range(len(self.__tracked[0])):\n color = self.__mapping[self.__tracked[r][c]]\n if color in colors:\n color = colors[color]\n self.__grid[Point(c,r)] = color\n self.__gui.render()\n\n # ----------------------------------- Debug ---------------------------------- #\n \n def _print_grid(self) -> None:\n print(self.__grid)\n","repo_name":"bojinyao/Pixel-Grid","sub_path":"gridviz.py","file_name":"gridviz.py","file_ext":"py","file_size_in_byte":4337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37448634130","text":"#!/usr/bin/python3\n\"\"\" Module contain function that divide matrix by div number. \"\"\"\n\n\ndef matrix_divided(matrix, div):\n \"\"\" Function that divide matrix by div number. \"\"\"\n\n err_message = \"matrix must be a matrix (list of lists) of integers/floats\"\n if not isinstance(matrix, list):\n raise TypeError(err_message)\n\n row_sz = []\n for row in matrix:\n if not isinstance(row, list):\n raise TypeError(err_message)\n row_sz.append(len(row))\n for item in row:\n if not isinstance(item, (int, float)):\n raise TypeError(err_message)\n\n if row_sz.count(row_sz[0]) != len(matrix):\n raise TypeError(\"Each row of the matrix must have the same size\")\n if not isinstance(div, (int, float)):\n raise TypeError(\"div must be a number\")\n if div == 0:\n raise ZeroDivisionError(\"division by zero\")\n return [\n list(map(lambda item: round(item / div, 2), row)) for row in matrix\n ]\n","repo_name":"AmiraWalid1/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/2-matrix_divided.py","file_name":"2-matrix_divided.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"884811919","text":"import re\nfrom typing import Pattern, Callable\n\nfrom atcodertools.codegen.code_generators import cpp, java, rust, python, nim, d, cs, swift, go, julia\nfrom atcodertools.codegen.models.code_gen_args import CodeGenArgs\nfrom atcodertools.tools.templates import get_default_template_path\nfrom atcodertools.codegen.code_style_config import INDENT_TYPE_TAB\nimport platform\n\n\nclass LanguageNotFoundError(Exception):\n pass\n\n\nclass CodeStyle:\n def __init__(self,\n indent_width=None,\n indent_type=None\n ):\n self.indent_width = indent_width\n self.indent_type = indent_type\n\n\nclass Language:\n def __init__(self,\n name: str,\n display_name: str,\n extension: str,\n submission_lang_pattern: Pattern[str],\n default_code_generator: Callable[[CodeGenArgs], str],\n default_template_path: str,\n default_code_style=None,\n compile_command=None,\n test_command=None,\n exec_filename=None\n ):\n self.name = name\n self.display_name = display_name\n self.extension = extension\n self.submission_lang_pattern = submission_lang_pattern\n self.default_code_generator = default_code_generator\n self.default_template_path = default_template_path\n self.default_code_style = default_code_style\n self.compile_command = compile_command\n self.test_command = test_command\n self.code_filename = \"{filename}.\" + extension\n if platform.system() == \"Windows\":\n self.exec_filename = exec_filename.replace(\n \"{exec_extension}\", \".exe\")\n else:\n self.exec_filename = exec_filename.replace(\"{exec_extension}\", \"\")\n\n def source_code_name(self, name_without_extension: str) -> str:\n # put extension to the name\n return \"{}.{}\".format(name_without_extension, self.extension)\n\n def get_compile_command(self, filename: str, base_command: str = None):\n if base_command is None:\n base_command = self.compile_command\n return base_command.format(filename=filename)\n\n def get_code_filename(self, filename: str):\n return self.code_filename.format(filename=filename)\n\n def get_exec_filename(self, filename: str):\n return self.exec_filename.format(filename=filename, capitalized_filename=filename.capitalize())\n\n def get_test_command(self, filename: str, cwd: str = '.'):\n exec_filename = cwd + '/'\n if platform.system() == \"Windows\":\n exec_filename += filename + \".exe\"\n else:\n exec_filename += filename\n capitalized_filename = filename.capitalize()\n return self.test_command.format(filename=filename, exec_filename=exec_filename, capitalized_filename=capitalized_filename)\n\n @classmethod\n def from_name(cls, name: str):\n for lang in ALL_LANGUAGES:\n if lang.name == name:\n return lang\n raise LanguageNotFoundError(\n \"No language support for '{}'\".format(ALL_LANGUAGE_NAMES))\n\n\nCPP = Language(\n name=\"cpp\",\n display_name=\"C++\",\n extension=\"cpp\",\n submission_lang_pattern=re.compile(\".*C\\\\+\\\\+ \\\\(GCC 9.*|.*C\\\\+\\\\+ 20 \"),\n default_code_generator=cpp.main,\n default_template_path=get_default_template_path('cpp'),\n compile_command=\"g++ {filename}.cpp -o {filename} -std=c++20\",\n test_command=\"{exec_filename}\",\n exec_filename=\"{filename}{exec_extension}\"\n)\n\nJAVA = Language(\n name=\"java\",\n display_name=\"Java\",\n extension=\"java\",\n submission_lang_pattern=re.compile(\".*Java \\\\(OpenJDK .*\"),\n default_code_generator=java.main,\n default_template_path=get_default_template_path('java'),\n compile_command=\"javac {filename}.java\",\n test_command=\"java {capitalized_filename}\",\n exec_filename=\"{capitalized_filename}.class\"\n)\n\nRUST = Language(\n name=\"rust\",\n display_name=\"Rust\",\n extension=\"rs\",\n submission_lang_pattern=re.compile(\".*Rust \\\\(1.*|.*Rust \\\\(rustc 1.*\"),\n default_code_generator=rust.main,\n default_template_path=get_default_template_path('rs'),\n compile_command=\"rustc {filename}.rs -o {filename}\",\n test_command=\"{exec_filename}\",\n exec_filename=\"{filename}{exec_extension}\"\n)\n\nPYTHON = Language(\n name=\"python\",\n display_name=\"Python\",\n extension=\"py\",\n submission_lang_pattern=re.compile(\".*Python \\\\(3.*|.*Python \\\\(CPython 3.*\"),\n default_code_generator=python.main,\n default_template_path=get_default_template_path('py'),\n compile_command=\"python3 -mpy_compile {filename}.py\",\n test_command=\"python3 {filename}.py\",\n exec_filename=\"{filename}.pyc\"\n)\n\nDLANG = Language(\n name=\"d\",\n display_name=\"D\",\n extension=\"d\",\n submission_lang_pattern=re.compile(\".*D \\\\(DMD.*\"),\n default_code_generator=d.main,\n default_template_path=get_default_template_path('d'),\n compile_command=\"dmd {filename}.d -of={filename}\",\n test_command=\"{exec_filename}\",\n exec_filename=\"{filename}{exec_extension}\"\n)\n\nNIM = Language(\n name=\"nim\",\n display_name=\"NIM\",\n extension=\"nim\",\n submission_lang_pattern=re.compile(\".*Nim \\\\(1.*|.*Nim \\\\(Nim 1.*\"),\n default_code_generator=nim.main,\n default_template_path=get_default_template_path('nim'),\n default_code_style=CodeStyle(indent_width=2),\n compile_command=\"nim cpp -o:{filename} {filename}.nim\",\n test_command=\"{exec_filename}\",\n exec_filename=\"{filename}{exec_extension}\"\n)\n\nCSHARP = Language(\n name=\"cs\",\n display_name=\"C#\",\n extension=\"cs\",\n submission_lang_pattern=re.compile(\".*C# \\\\(Mono.*|.*C# 11.0 \"),\n default_code_generator=cs.main,\n default_template_path=get_default_template_path('cs'),\n compile_command=\"mcs {filename}.cs -o {filename}\",\n test_command=\"{exec_filename}\",\n exec_filename=\"{filename}{exec_extension}\"\n)\n\nSWIFT = Language(\n name=\"swift\",\n display_name=\"Swift\",\n extension=\"swift\",\n submission_lang_pattern=re.compile(\".*Swift \\\\(5.*|.*Swift \\\\(swift 5.*\"),\n default_code_generator=swift.main,\n default_template_path=get_default_template_path('swift'),\n compile_command=\"swiftc {filename}.swift -o {filename}\",\n test_command=\"{exec_filename}\",\n exec_filename=\"{filename}{exec_extension}\"\n)\n\nGO = Language(\n name=\"go\",\n display_name=\"Go\",\n extension=\"go\",\n submission_lang_pattern=re.compile(\".*Go \\\\(1.*|.*Go \\\\(go 1.*\"),\n default_code_generator=go.main,\n default_template_path=get_default_template_path('go'),\n default_code_style=CodeStyle(indent_type=INDENT_TYPE_TAB),\n compile_command=\"go build -o {filename} {filename}.go\",\n test_command=\"{exec_filename}\",\n exec_filename=\"{filename}{exec_extension}\"\n)\n\nJULIA = Language(\n name=\"julia\",\n display_name=\"Julia\",\n extension=\"jl\",\n submission_lang_pattern=re.compile(\".*Julia \\\\(1.*|.*Julia \\\\(Julia 1.*\"),\n default_code_generator=julia.main,\n default_template_path=get_default_template_path('jl'),\n compile_command=\"\",\n test_command=\"julia {filename}.jl\",\n exec_filename=\"{filename}.jl\"\n)\n\nALL_LANGUAGES = [CPP, JAVA, RUST, PYTHON, NIM, DLANG, CSHARP, SWIFT, GO, JULIA]\nALL_LANGUAGE_NAMES = [lang.display_name for lang in ALL_LANGUAGES]\n","repo_name":"kyuridenamida/atcoder-tools","sub_path":"atcodertools/common/language.py","file_name":"language.py","file_ext":"py","file_size_in_byte":7311,"program_lang":"python","lang":"en","doc_type":"code","stars":367,"dataset":"github-code","pt":"37"} +{"seq_id":"5116778938","text":"import os\n\nfrom envyaml import EnvYAML\n\nfrom connectors.logger import logger\n\n\ndef load_config(config_file):\n logger.info(f\"Loading config from {config_file}\")\n yaml_config = EnvYAML(config_file, flatten=False).export()\n nested_yaml_config = {}\n for key, value in yaml_config.items():\n _nest_configs(nested_yaml_config, key, value)\n configuration = dict(_merge_dicts(_default_config(), nested_yaml_config))\n _ent_search_config(configuration)\n return configuration\n\n\n# Left - in Enterprise Search; Right - in Connectors\nconfig_mappings = {\n \"elasticsearch.host\": \"elasticsearch.host\",\n \"elasticsearch.username\": \"elasticsearch.username\",\n \"elasticsearch.password\": \"elasticsearch.password\",\n \"elasticsearch.headers\": \"elasticsearch.headers\",\n \"log_level\": \"service.log_level\",\n}\n\n# Enterprise Search uses Ruby and is in lower case always, so hacking it here for now\n# Ruby-supported log levels: 'debug', 'info', 'warn', 'error', 'fatal', 'unknown'\n# Python-supported log levels: 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL', 'NOTSET'\nlog_level_mappings = {\n \"debug\": \"DEBUG\",\n \"info\": \"INFO\",\n \"warn\": \"WARNING\",\n \"error\": \"ERROR\",\n \"fatal\": \"CRITICAL\",\n \"unknown\": \"NOTSET\",\n}\n\n\ndef _default_config():\n return {\n \"elasticsearch\": {\n \"host\": \"http://localhost:9200\",\n \"username\": \"elastic\",\n \"password\": \"changeme\",\n \"ssl\": True,\n \"bulk\": {\n \"queue_max_size\": 1024,\n \"queue_max_mem_size\": 25,\n \"display_every\": 100,\n \"chunk_size\": 1000,\n \"max_concurrency\": 5,\n \"chunk_max_mem_size\": 5,\n \"concurrent_downloads\": 10,\n \"max_retries\": 3,\n },\n \"retry_on_timeout\": True,\n \"request_timeout\": 120,\n \"max_wait_duration\": 120,\n \"initial_backoff_duration\": 1,\n \"backoff_multiplier\": 2,\n \"log_level\": \"info\",\n },\n \"service\": {\n \"idling\": 30,\n \"heartbeat\": 300,\n \"preflight_max_attempts\": 10,\n \"preflight_idle\": 30,\n \"max_errors\": 20,\n \"max_errors_span\": 600,\n \"max_concurrent_content_syncs\": 1,\n \"max_concurrent_access_control_syncs\": 1,\n \"job_cleanup_interval\": 300,\n \"log_level\": \"INFO\",\n },\n \"sources\": {\n \"azure_blob_storage\": \"connectors.sources.azure_blob_storage:AzureBlobStorageDataSource\",\n \"confluence\": \"connectors.sources.confluence:ConfluenceDataSource\",\n \"dir\": \"connectors.sources.directory:DirectoryDataSource\",\n \"dropbox\": \"connectors.sources.dropbox:DropboxDataSource\",\n \"github\": \"connectors.sources.github:GitHubDataSource\",\n \"gmail\": \"connectors.sources.gmail:GMailDataSource\",\n \"google_cloud_storage\": \"connectors.sources.google_cloud_storage:GoogleCloudStorageDataSource\",\n \"google_drive\": \"connectors.sources.google_drive:GoogleDriveDataSource\",\n \"jira\": \"connectors.sources.jira:JiraDataSource\",\n \"mongodb\": \"connectors.sources.mongo:MongoDataSource\",\n \"mssql\": \"connectors.sources.mssql:MSSQLDataSource\",\n \"mysql\": \"connectors.sources.mysql:MySqlDataSource\",\n \"network_drive\": \"connectors.sources.network_drive:NASDataSource\",\n \"onedrive\": \"connectors.sources.onedrive:OneDriveDataSource\",\n \"oracle\": \"connectors.sources.oracle:OracleDataSource\",\n \"outlook\": \"connectors.sources.outlook:OutlookDataSource\",\n \"postgresql\": \"connectors.sources.postgresql:PostgreSQLDataSource\",\n \"s3\": \"connectors.sources.s3:S3DataSource\",\n \"salesforce\": \"connectors.sources.salesforce:SalesforceDataSource\",\n \"servicenow\": \"connectors.sources.servicenow:ServiceNowDataSource\",\n \"sharepoint_online\": \"connectors.sources.sharepoint_online:SharepointOnlineDataSource\",\n \"sharepoint_server\": \"connectors.sources.sharepoint_server:SharepointServerDataSource\",\n \"slack\": \"connectors.sources.slack:SlackDataSource\",\n \"microsoft_teams\": \"connectors.sources.microsoft_teams:MicrosoftTeamsDataSource\",\n \"zoom\": \"connectors.sources.zoom:ZoomDataSource\",\n \"box\": \"connectors.sources.box:BoxDataSource\",\n },\n }\n\n\ndef _ent_search_config(configuration):\n if \"ENT_SEARCH_CONFIG_PATH\" not in os.environ:\n return\n logger.info(\"Found ENT_SEARCH_CONFIG_PATH, loading ent-search config\")\n ent_search_config = EnvYAML(os.environ[\"ENT_SEARCH_CONFIG_PATH\"])\n for es_field in config_mappings.keys():\n if es_field not in ent_search_config:\n continue\n\n connector_field = config_mappings[es_field]\n es_field_value = ent_search_config[es_field]\n\n if es_field == \"log_level\":\n if es_field_value not in log_level_mappings:\n msg = f\"Unexpected log level: {es_field_value}. Allowed values: {', '.join(log_level_mappings.keys())}\"\n raise ValueError(msg)\n es_field_value = log_level_mappings[es_field_value]\n\n _nest_configs(configuration, connector_field, es_field_value)\n\n logger.debug(f\"Overridden {connector_field}\")\n\n\ndef _nest_configs(configuration, field, value):\n \"\"\"\n Update configuration field value taking into account the nesting.\n\n Configuration is a hash of hashes, so we need to dive inside to do proper assignment.\n\n E.g. _nest_config({}, \"elasticsearch.bulk.queuesize\", 20) will result in the following config:\n {\n \"elasticsearch\": {\n \"bulk\": {\n \"queuesize\": 20\n }\n }\n }\n \"\"\"\n subfields = field.split(\".\")\n last_key = subfields[-1]\n\n current_leaf = configuration\n for subfield in subfields[:-1]:\n if subfield not in current_leaf:\n current_leaf[subfield] = {}\n current_leaf = current_leaf[subfield]\n\n if isinstance(current_leaf.get(last_key), dict):\n current_leaf[last_key] = dict(_merge_dicts(current_leaf[last_key], value))\n else:\n current_leaf[last_key] = value\n\n\ndef _merge_dicts(hsh1, hsh2):\n for k in set(hsh1.keys()).union(hsh2.keys()):\n if k in hsh1 and k in hsh2:\n if isinstance(hsh1[k], dict) and isinstance(\n hsh2[k], dict\n ): # only merge objects\n yield (k, dict(_merge_dicts(hsh1[k], hsh2[k])))\n else:\n yield (k, hsh2[k])\n elif k in hsh1:\n yield (k, hsh1[k])\n else:\n yield (k, hsh2[k])\n","repo_name":"elastic/connectors","sub_path":"connectors/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":6712,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"37"} +{"seq_id":"20253742562","text":"import torch\n\nclass Generator():\n def __init__(self, model, tokenizer) -> None:\n self.model = model\n self.tokenizer = tokenizer\n\n def __call__(self, text, **kwargs):\n self.model.eval()\n inputs = self.tokenizer(text, return_tensors='pt', padding=True).to(self.model.device)\n with torch.no_grad():\n hypotheses = self.model.generate(**inputs, **kwargs, )\n return self.tokenizer.batch_decode(hypotheses, skip_special_tokens=True)\n\n ","repo_name":"ArtemNechaev/enrut5","sub_path":"generation.py","file_name":"generation.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42623325354","text":"with open('Day7 input.txt') as f:\n lines = f.readlines()\n\ncrabs = [int(a) for a in lines[0].split(',')].copy()\ncrabs.sort()\n\nlowest_cost = float('inf')\nbest_loc = 0\nfor i in range(crabs[-1]):\n temp = sum([abs(c-i) for c in crabs])\n if temp < lowest_cost:\n lowest_cost = temp\n best_loc = i\n\nprint(best_loc, lowest_cost)\n\n\ndef binary_search(crabs,crab_list):\n if len(crabs)==1:\n return crabs[0]\n left = crabs[:(len(crabs)//2)]\n right = crabs[(len(crabs)//2):]\n left_cost = sum([abs(c-(sum(left)/len(left))) for c in crab_list])\n right_cost = sum([abs(c-(sum(right)/len(right))) for c in crab_list])\n\n if left_cost == right_cost:\n return min(binary_search(left,crab_list), binary_search(right,crab_list))\n elif left_cost < right_cost:\n return binary_search(left,crab_list)\n else:\n return binary_search(right, crab_list) \n\nbest_loc2 = binary_search(crabs,crabs)\nlowest_cost2 = sum([abs(c-best_loc2) for c in crabs])\n\nprint(best_loc2, lowest_cost2)\n","repo_name":"MichaelMKKang/AdventOfCode","sub_path":"Day7/Day7.py","file_name":"Day7.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28236890533","text":"import logging\nimport math\nimport inspect\n\nimport torch\nfrom flame.channel import VAL_CH_STATE_SEND\nfrom flame.common.constants import DeviceType\nfrom flame.common.util import weights_to_device\nfrom flame.common.custom_abcmeta import abstract_attribute\nfrom flame.mode.composer import Composer\nfrom flame.mode.horizontal.syncfl.trainer import TAG_FETCH, TAG_UPLOAD\nfrom flame.mode.horizontal.syncfl.trainer import Trainer as BaseTrainer\nfrom flame.mode.message import MessageType\nfrom flame.mode.tasklet import Loop, Tasklet\n\nlogger = logging.getLogger(__name__)\n\n\nclass Trainer(BaseTrainer):\n \"\"\"Oort Trainer implements an ML training role.\"\"\"\n\n @abstract_attribute\n def loss_fn(self):\n \"\"\"Abstract attribute for loss function.\"\"\"\n\n def _send_weights(self, tag: str) -> None:\n \"\"\"\n Send local model weights to the aggregator, and the statistical\n utility information of a trainer for Oort algorithm.\n\n This method is overriden from one in horizontal trainer\n (..trainer).\n \"\"\"\n logger.debug(\"calling _send_weights\")\n channel = self.cm.get_by_tag(tag)\n if not channel:\n logger.debug(f\"[_send_weights] channel not found with {tag}\")\n return\n\n # this call waits for at least one peer to join this channel\n channel.await_join()\n\n # one aggregator is sufficient\n end = channel.one_end(VAL_CH_STATE_SEND)\n\n self._update_weights()\n\n delta_weights = self._delta_weights_fn(self.weights, self.prev_weights)\n\n delta_weights = self.privacy.apply_dp_fn(delta_weights)\n\n msg = {\n MessageType.WEIGHTS: weights_to_device(delta_weights, DeviceType.CPU),\n MessageType.DATASET_SIZE: self.dataset_size,\n MessageType.MODEL_VERSION: self._round,\n MessageType.STAT_UTILITY: self._stat_utility,\n }\n channel.send(end, msg)\n logger.debug(\"sending weights done\")\n\n def init_oort_variables(self) -> None:\n \"\"\"Initialize Oort variables.\"\"\"\n self._stat_utility = 0\n\n if 'reduction' not in inspect.signature(self.loss_fn).parameters:\n msg = \"Parameter 'reduction' not found in loss function \"\n msg += f\"'{self.loss_fn.__name__}', which is required for Oort\"\n raise TypeError(msg)\n\n def oort_loss(\n self,\n output: torch.Tensor,\n target: torch.Tensor,\n epoch: int,\n batch_idx: int,\n **kwargs\n ) -> torch.Tensor:\n \"\"\"\n Measure the loss of a trainer during training.\n The trainer's statistical utility is measured at epoch 1.\n \"\"\"\n if epoch == 1 and batch_idx == 0:\n if 'reduction' in kwargs.keys():\n reduction = kwargs['reduction']\n else:\n reduction = 'mean' # default reduction policy is mean\n kwargs_wo_reduction = {key: value for key, value in kwargs.items() if key != 'reduction'}\n\n criterion = self.loss_fn(reduction='none', **kwargs_wo_reduction)\n loss_list = criterion(output, target)\n self._stat_utility += torch.square(loss_list).sum()\n \n if reduction == 'mean':\n loss = loss_list.mean()\n elif reduction == 'sum':\n loss = loss_list.sum()\n else:\n criterion = self.loss_fn(**kwargs)\n loss = criterion(output, target)\n\n return loss\n\n def normalize_stat_utility(self, epoch) -> None:\n \"\"\"\n Normalize statistical utility of a trainer based on the size\n of the trainer's datset, at epoch 1.\n \"\"\"\n if epoch == 1:\n self._stat_utility = len(self.train_loader.dataset) * math.sqrt(\n self._stat_utility / len(self.train_loader.dataset)\n )\n else:\n return\n\n def reset_stat_utility(self) -> None:\n \"\"\"Reset the trainer's statistical utility to zero.\"\"\"\n self._stat_utility = 0\n\n def compose(self) -> None:\n \"\"\"Compose role with tasklets.\"\"\"\n with Composer() as composer:\n self.composer = composer\n\n task_internal_init = Tasklet(\"\", self.internal_init)\n\n task_init_oort_variables = Tasklet(\"\", self.init_oort_variables)\n\n task_load_data = Tasklet(\"\", self.load_data)\n\n task_init = Tasklet(\"\", self.initialize)\n\n task_get = Tasklet(\"\", self.get, TAG_FETCH)\n\n task_train = Tasklet(\"\", self.train)\n\n task_eval = Tasklet(\"\", self.evaluate)\n\n task_put = Tasklet(\"\", self.put, TAG_UPLOAD)\n\n task_save_metrics = Tasklet(\"\", self.save_metrics)\n\n # create a loop object with loop exit condition function\n loop = Loop(loop_check_fn=lambda: self._work_done)\n (\n task_internal_init\n >> task_init_oort_variables\n >> task_load_data\n >> task_init\n >> loop(\n task_get >> task_train >> task_eval >> task_put >> task_save_metrics\n )\n )\n","repo_name":"cisco-open/flame","sub_path":"lib/python/flame/mode/horizontal/oort/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":5130,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"37"} +{"seq_id":"17202403028","text":"from typing import Literal\n\nimport sepal_ui.sepalwidgets as sw\nfrom component.scripts.gwb_version import get_gwb_version\n\n\nclass CustomAppBar(sw.AppBar):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n version = get_gwb_version()\n\n version_holder = sw.Flex(\n class_=\"d-inline-flex justify-end\",\n children=[\n sw.Html(\n tag=\"span\",\n class_=\"text--secondary mr-1\",\n children=[f\"Running GWB: v.{version}\"],\n ),\n ],\n )\n\n self.children = self.children[:3] + [version_holder] + self.children[3:]\n","repo_name":"sepal-contrib/gwb","sub_path":"component/widget/custom_widgets.py","file_name":"custom_widgets.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25022224797","text":"\"\"\"\nhttp://118.190.20.162/view.page?gpid=T142\n差分+前缀和\n\"\"\"\nn, m, k = map(int, input().split())\nspots = [[i for i in map(int, input().split())] for j in range(n)]\nnum = 0\nfor i in range(n):\n if num < spots[i][0]:\n num = spots[i][0]+1\n spots[i][0], spots[i][1] = max(1, spots[i][0]-spots[i][1]+1), spots[i][0]\ntimes = []\nfor i in range(m):\n point = int(input())\n times.append(point+k)\ntemp = [0 for i in range(num*2)]\nfor spot in spots:\n temp[spot[0]] += 1\n temp[spot[1]+1] -= 1\nfor i in range(1, num+1):\n temp[i] += temp[i-1]\nfor time in times:\n print(temp[time])\n\n","repo_name":"Beerander/CCF-CSP-python-","sub_path":"20220302.py","file_name":"20220302.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"19630103660","text":"import numpy as np\nimport pandas as pd\nfrom pandas_profiling import ProfileReport\nimport csv\nimport pandas as pd\nfrom pymongo import MongoClient\n\n\ndef pandas_profile(uploaded_file, path_to_file, DB_HOST, DB_PORT, DB_Name):\n print(uploaded_file)\n print(path_to_file)\n csvFile = open(path_to_file + \"/\" + uploaded_file, 'r')\n reader = csv.DictReader(csvFile)\n with open(path_to_file + \"/\" + uploaded_file) as csv_file:\n csvReader = csv.reader(csv_file, delimiter=',')\n list_of_columns = []\n # get list of columns\n for row in csvReader:\n list_of_columns = row\n break\n\n # open connection\n COLLECTION_NAME = uploaded_file.split('.')[0]\n connection = MongoClient(DB_HOST, DB_PORT)\n collection = connection[DB_Name][COLLECTION_NAME]\n\n profile = ProfileReport(reader, title=\"Pandas Profiling Report\", explorative=True)\n","repo_name":"Hamza-Salman/Data2Int","sub_path":"html/pandas_profiling_file.py","file_name":"pandas_profiling_file.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28297168050","text":"# Imports\nimport pygame \t# graphismes\nimport Tile \t# tuiles\nimport sys \t\t# sys.exit()\nimport random \t# gen de nouvelles tuiles\nimport time \t# temps\n\ndef coord2px(x, y):\n\t\"\"\" Converti les coordonnées du tableau en pixel \"\"\"\n\treturn (353+155*x,16+155*y)\n\ndef getTimeStr():\n\t\"\"\" Retourne le temps a afficher \"\"\"\n\ttimeNow = time.time() - GameBeginTime[0]\n\t\n\thours = int(timeNow // 3600)\n\tmint = int(timeNow // 60)\n\tsec = int(timeNow % 60)\n\n\tif sec < 10:\n\t\tsec = '0' + str(sec)\n\tif mint < 10:\n\t\tmint = '0' + str(mint)\n\n\treturn str(hours) + \":\" + str(mint) + \":\" + str(sec)\n\ndef display(fallTime = 0):\n\t\"\"\" Affiche le tout en fonction du temps de chute \"\"\"\n\tscreen.blit(backgroundImg,(0,0))\n\tx = 0\n\twhile x < 4 :\n\t\ty = 0\n\t\twhile y < 4 :\n\t\t\tif tiles[x][y] != None :\n\t\t\t\ttiles[x][y].display(screen, coord2px(x,y), fallTime) # need to calc real X and Y\n\t\t\ty += 1\n\t\tx += 1\n\n\tif won():\n\t\tscreen.blit(wonImg, (0,0))\n\n\t# Score et Temps\n\tdisplayText(str(score[0]), (1025,200))\n\tdisplayText(getTimeStr(), (1000, 300))\n\tpygame.display.flip()\n\ndef displayText(text, pos):\n\t\"\"\" affiche du texte au coordonnées indiquée \"\"\"\n\tlabel = myfont.render(text, 1, (0,0,0))\n\tscreen.blit(label, pos)\n\ndef won():\n\t\"\"\" Definit si le joueur a gagné ou non \"\"\"\n\tfor tile in getTileList():\n\t\tif (tile.getValue() >= 2048):\n\t\t\treturn True\n\treturn False\n\ndef loose():\n\t\"\"\" Affiche l'écran de fin \"\"\"\n\tdisplay()\n\tscreen.blit(pygame.image.load(\"../img/lost.png\"), (0,0))\n\tpygame.display.flip()\n\twhile True:\n\t\tevent = pygame.event.poll()\n\t\tif event.type == pygame.QUIT:\n\t\t\tsys.exit()\n\t\tif event.type == pygame.KEYDOWN:\n\t\t\tif event.key == 8:\n\t\t\t\treturn Back()\n\ndef newRandomTile():\n\t\"\"\" Fait apparaitre une nouvelle tuile\n\tet check si on perd \"\"\"\n\t# random.randrange(0,4) => 0,1,2,3\n\tx = random.randrange(0,4)\n\ty = random.randrange(0,4)\n\twhile tiles[x][y] != None :\n\t\tx = random.randrange(0,4)\n\t\ty = random.randrange(0,4)\n\tif random.randrange(0,10) == 1:\n\t\ttiles[x][y] = Tile.Tile(4)\n\telse :\n\t\ttiles[x][y] = Tile.Tile(2)\n\n\tfor x in range(0,4):\n\t\tfor y in range(0,4):\n\t\t\tif ( tiles[x][y] == None):\n\t\t\t\treturn\n\t# GRILLE COMPLETE, CHECK LOOSE\n\tfor x in range(0,3):\n\t\tfor y in range(0,4):\n\t\t\tif (tiles[x][y].getValue() == tiles[x+1][y].getValue()):\n\t\t\t\treturn\n\tfor x in range(0,4):\n\t\tfor y in range(0,3):\n\t\t\tif (tiles[x][y].getValue() == tiles[x][y+1].getValue()):\n\t\t\t\treturn\n\tloose()\n\ndef getFallDistance(arg):\n\t\"\"\" Retourne la distance de chute\n\ten fonction des tuiles sur la route \"\"\"\n\tresult = 0\n\tlast = None\n\tfor tile in arg:\n\t\tif (tile == None):\n\t\t\tresult += 1\n\t\telif (last == None):\n\t\t\tlast = tile\n\t\telif (tile.getValue() == last.getValue()):\n\t\t\tresult += 1\n\t\t\tlast = None\n\t\telse:\n\t\t\tlast = tile\n\treturn result\n\ndef getTileList():\n\t\"\"\" Retourne une liste des tuiles\n\tdans une liste unidimentionelle \"\"\"\n\tresult = []\n\tfor x in range(0,4):\n\t\tfor y in range(0,4):\n\t\t\tif tiles[x][y] != None:\n\t\t\t\tresult.append(tiles[x][y])\n\treturn result\n\ndef setLastGrid(grid):\n\t\"\"\" sauvegarde la grille indiquée\n\tcomme la grille du dernier tour \"\"\"\n\tfor x in range(0, 4):\n\t\tfor y in range(0,4):\n\t\t\tlastGrid[x][y] = grid[x][y]\n\ndef setGrid(grid, pos, t):\n\t\"\"\" Remplace l'élément de la grille indiquée\n\tà l'index indiquée par l'élément indiqué \"\"\"\n\tgrid[pos[0]][pos[1]] = t\n\ndef getGrid(grid, pos):\n\t\"\"\" retourne la valeure dans l'indexe indiqué\n\tde la grille indiquée \"\"\"\n\treturn grid[pos[0]][pos[1]]\n\ndef setTiles(grid):\n\t\"\"\" Remplace la grille principale \n\tpar la grille indiquée \"\"\"\n\tx = 0\n\twhile x < 4:\n\t\ty = 0\n\t\twhile y < 4:\n\t\t\ttiles[x][y] = grid[x][y]\n\t\t\ty += 1\n\t\tx += 1\n\ndef fall(direction):\n\t\"\"\" Fais tomber les tuiles \n\tdans la direction indiquée \"\"\"\n\tspeed = 25\n\ttilesOnWay = []\n\tnewList = [[None for x in range(4)] for y in range(4)]\n\tliste = getTileList()\n\tnewScore = score[0]\n\n\ti = 0\n\tx = 0\n\t\n\t\"\"\" Definission d'une vitesse et de la position finale\n \tdes tuiles individuellement \"\"\"\n\n\tif direction == \"right\":\n\t\tfor x in range(0,4):\n\t\t\tfor y in range(0,4):\n\t\t\t\tif tiles[x][y] == None:\n\t\t\t\t\tcontinue\n\t\t\t\ttilesOnWay = []\n\t\t\t\tfor i in range(x,4):\n\t\t\t\t\ttilesOnWay.append(tiles[i][y])\n\n\t\t\t\ttiles[x][y].finalPos((x + getFallDistance(tilesOnWay), y))\n\t\t\t\ttiles[x][y].setSpeed((getFallDistance(tilesOnWay), 0))\n\n\tif direction == \"down\":\n\t\tfor x in range(0,4):\n\t\t\tfor y in range(0,4):\n\t\t\t\tif tiles[x][y] == None:\n\t\t\t\t\tcontinue\n\t\t\t\ttilesOnWay = []\n\t\t\t\tfor i in range(y,4):\n\t\t\t\t\ttilesOnWay.append(tiles[x][i])\n\n\t\t\t\ttiles[x][y].finalPos((x, y + getFallDistance(tilesOnWay)))\n\t\t\t\ttiles[x][y].setSpeed((0, getFallDistance(tilesOnWay)))\n\n\tif direction == \"left\":\n\t\tfor x in range(3,-1,-1):\n\t\t\tfor y in range(0,4):\n\t\t\t\tif tiles[x][y] == None:\n\t\t\t\t\tcontinue\n\t\t\t\ttilesOnWay = []\n\t\t\t\tfor i in range(0,x+1):\n\t\t\t\t\ttilesOnWay.append(tiles[i][y])\n\n\t\t\t\ttiles[x][y].finalPos((x - getFallDistance(tilesOnWay), y))\n\t\t\t\ttiles[x][y].setSpeed((-getFallDistance(tilesOnWay), 0))\n\n\tif direction == \"up\":\n\t\tfor x in range(0,4):\n\t\t\tfor y in range(3,-1,-1):\n\t\t\t\tif tiles[x][y] == None:\n\t\t\t\t\tcontinue\n\t\t\t\ttilesOnWay = []\n\t\t\t\tfor i in range(0,y+1):\n\t\t\t\t\ttilesOnWay.append(tiles[x][i])\n\n\t\t\t\ttiles[x][y].finalPos((x, y - getFallDistance(tilesOnWay)))\n\t\t\t\ttiles[x][y].setSpeed((0, -getFallDistance(tilesOnWay)))\n\n\t\"\"\" Definition de la nouvelle grille ( après le mouvement ) \"\"\"\n\n\tlastScore[0] = score[0]\n\tfor t in liste:\n\t\tif (getGrid(newList, t.finalPos()) != None):\n\t\t\tif (getGrid(newList, t.finalPos()).getValue() == t.getValue()):\n\t\t\t\tsetGrid(newList, t.finalPos(), Tile.Tile(t.getValue() * 2))\n\t\t\t\tnewScore += t.getValue() * 2\n\t\t\telse:\n\t\t\t\tcontinue\n\t\telse:\n\t\t\tsetGrid(newList, t.finalPos(), t)\n\n\tif ( newList == tiles): # mouvement interdit\n\t\treturn False\n\n\tsetLastGrid(tiles)\n\n\t# Animation\n\ttime = 1\n\twhile time < 155 : \n\t\tdisplay(time)\n\t\ttime += speed\n\n\n\tsetTiles(newList)\n\tscore[0] = newScore\n\treturn True\n\ndef right():\n\treturn fall(\"right\")\ndef left():\n\treturn fall(\"left\")\ndef up():\n\treturn fall(\"up\")\ndef down():\n\treturn fall(\"down\")\n\ndef NewGame():\n\t\"\"\" Réinitialise la partie \"\"\"\n\tsetTiles([[None for x in range(4)] for y in range(4)])\n\tscore = 0\n\tGameBeginTime[0] = time.time()\n\treturn True\n\t\ndef Back():\n\t\"\"\" Retour arrière \"\"\"\n\tsetTiles(lastGrid)\n\tscore[0] = lastScore[0]\n\tdisplay()\n\treturn False\n\ndef play():\n\t\"\"\" Gère les évenements \"\"\"\n\tkeyEvents = {\n\t\t\t\t \t273:up,\n\t\t\t\t\t274:down,\n\t\t\t\t\t276:left,\n\t\t\t\t\t275:right,\n\t\t\t\t\t13:NewGame,\n\t\t\t\t\t8:Back\n\t\t\t\t}\n\twhile True:\n\t\tdisplay()\n\t\tevent = pygame.event.poll()\n\t\tif event.type == pygame.QUIT:\n\t\t\tsys.exit()\n\t\tif event.type == pygame.KEYDOWN:\n\t\t\tif event.key in keyEvents:\n\t\t\t\treturn keyEvents[event.key]()\n\t\t\t\t\n\n################### DEBUT #######################\n\npygame.init()\nwidth, height = 1152,648\n\nscreen = pygame.display.set_mode((width, height))\npygame.display.set_caption('2048')\nmyfont = pygame.font.SysFont(\"monospace\", 30)\n\nwonImg = pygame.image.load(\"../img/won.png\")\nbackgroundImg = pygame.image.load(\"../img/background.png\")\ntiles = [[None for x in range(4)] for y in range(4)]\nlastGrid = [[None for x in range(4)] for y in range(4)]\nscore = [0]\nlastScore = [0]\n\nGameBeginTime = [time.time()]\n\nif (not pygame.display):\n\tprint(\"Error during window creation\")\n\n# MAIN LOOP #\nwhile True :\n\tnewRandomTile()\n\tdisplay()\n\twhile not play():\n\t\tpass","repo_name":"Yanis-F/ProjetISN","sub_path":"1/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36222112492","text":"from data import question_data\nfrom quiz_brain import QuizBrain\nfrom question_model import Question\n\nquestion_bank = []\n\nfor item in question_data:\n question_bank.append(Question(item[\"text\"], item[\"answer\"]))\n\nquiz = QuizBrain(question_bank)\nwhile quiz.still_has_questions():\n quiz.next_question()\n\nprint(f\"You've completed the quiz.\\nYour final score is: {quiz.score}/{quiz.q_number}\")\n","repo_name":"MWah99d/100-Days-of-Code","sub_path":"Day 17 Project - The Quiz Project/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"22895943885","text":"# heights and positions are available as lists\n\n# Import numpy\nimport numpy as np\n\n# Convert positions and heights to numpy arrays: np_positions, np_heights\nnp_heights = np.array(heights)\nnp_positions = np.array(positions)\n\n# Heights of the goalkeepers: gk_heights\ngk_heights = np_heights[np_positions == 'GK']\n\n# Heights of the other players: other_heights\nother_heights = np_heights[np_positions != 'GK']\n\n# Print out the median height of goalkeepers. Replace 'None'\nprint(\"Median height of goalkeepers: \" + str(np.median(gk_heights)))\n\n# Print out the median height of other players. Replace 'None'\nprint(\"Median height of other players: \" + str(np.median(other_heights)))\n","repo_name":"eightynine89/machine-learning-research","sub_path":"introduction/numpy/python/13.statistics_2.py","file_name":"13.statistics_2.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"37"} +{"seq_id":"24782744923","text":"#!/usr/bin/env python \r\n\r\n#\tCopyright 2018 Battelle Energy Alliance, LLC\r\n\r\nimport argparse\r\nimport sys\r\nimport re\r\nimport textwrap\r\nfrom argparse import RawTextHelpFormatter\r\nimport os\r\n#import subprocess \r\nimport getpass\r\n\r\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../lib'))\r\nimport myldap\r\n\r\n### Arguments ######################################################################################################################\r\nparser = argparse.ArgumentParser(\r\n description='Search LDAP accross the following attributes: cn, uidNumber, gidNumber, uid, telephoneNumber, employeeNumber, givenName, sn',\r\n epilog=textwrap.dedent('''\r\n Examples:\r\n\r\n lsearch USERNAME\r\n lsearch \"myers*spencer\"\r\n lsearch -s USERNAME\r\n lsearch -s spencer\r\n lsearch -n \"spencer myers\"\r\n lsearch -s USERNAME -u -e\r\n lsearch -b -s USERNAME\r\n lsearch -s USERNAME -w dns\r\n lsearch -g -s USERNAME\r\n lsearch '*'\r\n lsearch -a -s boise\r\n '''),\r\n formatter_class=RawTextHelpFormatter\r\n)\r\n\r\nsearch_group = parser.add_mutually_exclusive_group()\r\nbind_group = parser.add_mutually_exclusive_group()\r\nparser.add_argument('-a', '--active', action='store_true', help=\"Only print active users\")\r\nparser.add_argument('-b', '--brief', action='store_true', help=\"Turn on brief search or shortened version of output\")\r\nparser.add_argument('-c', '--csv', action='store_true', help=\"print query in csv format\")\r\nparser.add_argument('-e', '--email', action='store_true', help=\"Print email info for account renewal\")\r\nparser.add_argument('-w', '--where', help=\"Name of ldap server: default is ldap://LDAPSERVER\")\r\nsearch_group.add_argument('-s', '--search', help=\"String used for people search\")\r\nsearch_group.add_argument('-g', '--group', help=\"String used for group search\")\r\nsearch_group.add_argument('-n', '--name', help=\"First and last of person to search for in quotes: 'spencer myers'\")\r\nbind_group.add_argument('-u', '--user', action='store_true', help=\"Bind to LDAP as a user\")\r\nbind_group.add_argument('-m', '--manager', action='store_true', help=\"Bind to LDAP the manager\")\r\n\r\n\r\nif len(sys.argv)==1:\r\n parser.print_help()\r\n sys.exit(1)\r\n\r\nif len(sys.argv)==2:\r\n orig = sys.argv[1]\r\n sys.argv.append(orig)\r\n sys.argv[1] = '-s'\r\n \r\nargs = parser.parse_args()\r\n##########################################################################################################################################\r\nwhere = \"\"\r\nif not args.where:\r\n where = 'ldap://LDAPSERVER'\r\nelse:\r\n where = args.where\r\nldap_obj = myldap.MyLDAP()\r\nldap_server = ldap_obj.select_server(where)\r\n\r\nconn = None\r\nif args.manager:\r\n conn = ldap_obj.manager_login(host=ldap_server)\r\nelif args.user:\r\n conn = ldap_obj.user_login(host=ldap_server)\r\nelse:\r\n conn = ldap_obj.anonymous_login(host=ldap_server)\r\n\r\n\r\nif args.search:\r\n entries = ldap_obj.people_search(conn, args.search, active=args.active)\r\n if (args.csv):\r\n print(ldap_obj.print_csv(entries))\r\n elif (args.email):\r\n print(ldap_obj.print_email(entries))\r\n else:\r\n if args.brief:\r\n ldap_obj.print_brief_person_entries(conn, entries)\r\n else:\r\n ldap_obj.print_person_entries(conn, entries)\r\nelif args.group:\r\n entries = ldap_obj.group_search(conn, args.group)\r\n ldap_obj.print_entries(entries)\r\nelif args.name:\r\n name_search = re.search(r'(\\S+)\\s+(\\S+)', args.name)\r\n first_name = name_search.group(1)\r\n last_name = name_search.group(2)\r\n entries = ldap_obj._MyLDAP__get_query(conn, '(cn=*{}*{}*)'.format(last_name, first_name))\r\n if args.brief:\r\n ldap_obj.print_brief_person_entries(conn, entries)\r\n else:\r\n ldap_obj.print_person_entries(conn, entries)\r\n\r\n\r\nconn.unbind_s()\r\n","repo_name":"IdahoLabUnsupported/LinuxSA","sub_path":"LDAP/scripts/lsearch.py","file_name":"lsearch.py","file_ext":"py","file_size_in_byte":3826,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"31496371699","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 12 23:11:49 2020\n\n@author: elyvic\n\"\"\"\n\nimport tkinter as tk\nimport random as rand\n\ndef RollD4(): \n val = int(d4Spinbox.get())\n total = 0\n \n for x in range(val):\n roll = rand.randint(1, 4)\n total += roll\n \n d4TextBox.config(state = tk.NORMAL)\n d4TextBox.delete(0, \"end\")\n d4TextBox.insert(0, str(total))\n d4TextBox.configure(state = tk.DISABLED)\n \n allRollText.config(state = tk.NORMAL)\n allRollText.see(tk.END)\n allRollText.insert(tk.END, d4Spinbox.get() + \" x D4 - \" + str(total) + \"\\n\")\n allRollText.config(state = tk.DISABLED)\n\n\ndef RollD6():\n val = int(d6Spinbox.get())\n total = 0\n \n for x in range(val):\n roll = rand.randint(1, 6)\n total += roll\n \n d6TextBox.config(state = tk.NORMAL) \n d6TextBox.delete(0, \"end\")\n d6TextBox.insert(0, str(total))\n d6TextBox.config(state = tk.DISABLED)\n \n allRollText.config(state = tk.NORMAL)\n allRollText.see(tk.END) \n allRollText.insert(tk.END, d6Spinbox.get() + \" x D6 - \" + str(total) + \"\\n\")\n allRollText.config(state = tk.DISABLED)\n\n \ndef RollD8():\n val = int(d8Spinbox.get())\n total = 0\n \n for x in range(val):\n roll = rand.randint(1, 8)\n total += roll\n \n d8TextBox.config(state = tk.NORMAL)\n d8TextBox.delete(0, \"end\")\n d8TextBox.insert(0, str(total))\n d8TextBox.config(state = tk.DISABLED)\n \n allRollText.config(state = tk.NORMAL)\n allRollText.see(tk.END)\n allRollText.insert(tk.END, d8Spinbox.get() + \" x D8 - \" + str(total) + \"\\n\")\n allRollText.config(state = tk.DISABLED)\n\n\ndef RollD10():\n val = int(d10Spinbox.get())\n total = 0\n \n for x in range(val):\n roll = rand.randint(1, 8)\n total += roll \n \n d10TextBox.config(state = tk.NORMAL)\n d10TextBox.delete(0, \"end\")\n d10TextBox.insert(0, str(total))\n d10TextBox.config(state = tk.DISABLED)\n \n allRollText.config(state = tk.NORMAL)\n allRollText.see(tk.END)\n allRollText.insert(tk.END, d10Spinbox.get() + \" x D10 - \" + str(total) + \"\\n\")\n allRollText.config(state = tk.DISABLED)\n\n \ndef RollD12():\n val = int(d12Spinbox.get())\n total = 0\n \n for x in range(val):\n roll = rand.randint(1, 12)\n total += roll\n \n d12TextBox.config(state = tk.NORMAL)\n d12TextBox.delete(0, \"end\")\n d12TextBox.insert(0, str(total)) \n d12TextBox.config(state = tk.DISABLED)\n \n allRollText.config(state = tk.NORMAL)\n allRollText.see(tk.END)\n allRollText.insert(tk.END, d12Spinbox.get() + \" x D12 - \" + str(total) + \"\\n\")\n allRollText.config(state = tk.DISABLED)\n\n\ndef RollD20():\n val = int(d20Spinbox.get())\n total = 0\n \n for x in range(val):\n roll = rand.randint(1, 20)\n total += roll\n \n d20TextBox.config(state = tk.NORMAL)\n d20TextBox.delete(0, \"end\")\n d20TextBox.insert(0, str(total)) \n d20TextBox.config(state = tk.DISABLED)\n \n allRollText.config(state = tk.NORMAL)\n allRollText.insert(tk.END, d20Spinbox.get() + \" x D20 - \" + str(total) + \"\\n\")\n allRollText.see(tk.END)\n allRollText.config(state = tk.DISABLED)\n\n \ndef RollD100():\n val = int(d100Spinbox.get())\n total = 0\n \n for x in range(val):\n roll = rand.randint(1, 100)\n total += roll\n \n d100TextBox.config(state = tk.NORMAL)\n d100TextBox.delete(0, \"end\")\n d100TextBox.insert(0, str(total))\n d100TextBox.config(state = tk.DISABLED)\n \n allRollText.config(state = tk.NORMAL)\n allRollText.insert(tk.END, d100Spinbox.get() + \" x D100 - \" + str(total) + \"\\n\")\n allRollText.see(tk.END)\n allRollText.configure(state = tk.DISABLED)\n\n\n\ndef ClearText():\n allRollText.config(state = tk.NORMAL)\n allRollText.delete('1.0', tk.END)\n allRollText.config(state = tk.DISABLED)\n\nroot = tk.Tk();\n\nrightFrame = tk.Frame(root, height = 300, width = 120, padx = 5, pady = 5)\nleftFrame = tk.Frame(root, height = 300, width = 120, padx = 5, pady = 5)\n\n\n\nroot.title(\"Dice Roller\")\nroot.resizable(True, True)\n\n\n#all dice labels and buttons\nd4Label = tk.Label(leftFrame, text = \"D4\")\nd4TextBox = tk.Entry(leftFrame, width = 4)\nd4Button = tk.Button(leftFrame, text = \"Roll\", command = RollD4)\nd4Spinbox = tk.Spinbox(leftFrame, from_ = 1, to = 10, width = 4)\n\nd6Label = tk.Label(leftFrame, text = \"D6\")\nd6TextBox = tk.Entry(leftFrame, width = 4)\nd6Button = tk.Button(leftFrame, text = \"Roll\", command = RollD6)\nd6Spinbox = tk.Spinbox(leftFrame, from_ = 1, to = 10, width = 4)\n\nd8Label = tk.Label(leftFrame, text = \"D8\")\nd8TextBox = tk.Entry(leftFrame, width = 4)\nd8Button = tk.Button(leftFrame, text = \"Roll\", command = RollD8)\nd8Spinbox = tk.Spinbox(leftFrame, from_ = 1, to = 10, width = 4)\n\nd10Label = tk.Label(leftFrame, text = \"D10\")\nd10TextBox = tk.Entry(leftFrame, width = 4)\nd10Button = tk.Button(leftFrame, text = \"Roll\", command = RollD10)\nd10Spinbox = tk.Spinbox(leftFrame, from_ = 1, to = 10, width = 4)\n\nd12Label = tk.Label(leftFrame, text = \"D12\")\nd12TextBox = tk.Entry(leftFrame, width = 4)\nd12Button = tk.Button(leftFrame, text = \"Roll\", command = RollD12)\nd12Spinbox = tk.Spinbox(leftFrame, from_ = 1, to = 10, width = 4)\n\nd20Label = tk.Label(leftFrame, text = \"D20\")\nd20TextBox = tk.Entry(leftFrame, width = 4)\nd20Button = tk.Button(leftFrame, text = \"Roll\", command = RollD20)\nd20Spinbox = tk.Spinbox(leftFrame, from_ = 1, to = 10, width = 4)\n\nd100Label = tk.Label(leftFrame, text = \"D100\")\nd100TextBox = tk.Entry(leftFrame, width = 4)\nd100Button = tk.Button(leftFrame, text = \"Roll\", command = RollD100)\nd100Spinbox = tk.Spinbox(leftFrame, from_ = 1, to = 10, width = 4)\n\nallRollText = tk.Text(rightFrame, width = 16, height = 15, padx = 3, pady = 2)\nallRollClearButton = tk.Button(rightFrame, text = \"Clear\", command = ClearText, padx = 3 ,pady = 3)\n\n#make all entry and text not editable\nd4TextBox.config(state = tk.DISABLED)\nd6TextBox.config(state = tk.DISABLED)\nd8TextBox.config(state = tk.DISABLED)\nd10TextBox.config(state = tk.DISABLED)\nd12TextBox.config(state = tk.DISABLED)\nd20TextBox.config(state = tk.DISABLED)\nd100TextBox.config(state = tk.DISABLED)\nallRollText.config(state = tk.DISABLED)\n\n\n#labels and buttons placement\nrightFrame.grid(column = 1, row = 0)\nleftFrame.grid(column = 0, row = 0)\n\nd4Label.grid(row = 0, sticky = tk.W)\nd4Spinbox.grid(column = 1, row = 0)\nd4TextBox.grid(column = 2, row = 0)\nd4Button.grid(column = 3, row = 0, padx = 3, pady = 1)\n\nd6Label.grid(row = 1, sticky = tk.W)\nd6Spinbox.grid(column = 1, row = 1)\nd6TextBox.grid(column = 2, row = 1)\nd6Button.grid(column = 3, row = 1, pady = 1)\n\nd8Label.grid(row = 2, sticky = tk.W)\nd8Spinbox.grid(column = 1, row = 2)\nd8TextBox.grid(column = 2, row = 2)\nd8Button.grid(column = 3, row = 2, pady = 1)\n\nd10Label.grid(row = 3, sticky = tk.W)\nd10Spinbox.grid(column = 1, row = 3)\nd10TextBox.grid(column = 2, row = 3)\nd10Button.grid(column = 3, row = 3, pady = 1)\n\nd12Label.grid(row = 4, sticky = tk.W)\nd12Spinbox.grid(column = 1, row = 4)\nd12TextBox.grid(column = 2, row = 4)\nd12Button.grid(column = 3, row = 4, pady = 1)\n\nd20Label.grid(row = 5, sticky = tk.W)\nd20Spinbox.grid(column = 1, row = 5)\nd20TextBox.grid(column = 2, row = 5)\nd20Button.grid(column = 3, row = 5, pady = 1)\n\nd100Label.grid(row = 6, sticky = tk.W)\nd100Spinbox.grid(column = 1, row = 6)\nd100TextBox.grid(column = 2, row = 6)\nd100Button.grid(column = 3, row = 6, pady = 1)\n\n\n\nallRollText.grid(column = 0, row = 0, columnspan = 10, rowspan = 10, sticky = tk.E)\nallRollClearButton.grid(column = 9, row = 10, sticky = tk.E) \n\n\nroot.mainloop()","repo_name":"Elyvic/DND","sub_path":"DND.py","file_name":"DND.py","file_ext":"py","file_size_in_byte":7589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7324884333","text":"# import math\n\n# print(dir(math))\n# print(help(math))\n\n# import os\n#HOW TO PRINT VARIABLE NAME\n# print(dir(os))\n# print(help(os))\n# name = \"Embryo\"\n# HOW TO PRINT FUNCTION WHILE IMPORTING\n# def Embryo():\n# return 7\n\n#HOW TO PRINT CLASS NAME.\n# class MyEmbryo:\n# def __init__(self):\n# self.name = \"embryo\"\n\n# def Embryo(self):\n# return 7\n\n# y = MyEmbryo\n\n# HOW TO BUILD OUR OWN MODULES\nclass MyEmbryo:\n def __init__(self):\n self.name = \"embryo\"\n\n def add(self , x, y):\n \"\"\"return add of x and y \"\"\"\n return x + y\n\n def sub(self , x, y):\n \"\"\"return sub of x and y\"\"\"\n return x - y\n\n def mul(self , x, y):\n \"\"\"return mul of x and y\"\"\"\n return x * y\n\n def mud(self, x, y):\n \"\"\"return mud of x and y \"\"\"\n return x % y\n\n def exp(self, x, y):\n \"\"\"return exp of x and y \"\"\"\n return x ** y\n\n def fac(self, x):\n \"\"\"return fac of x\"\"\"\n if x < 0:\n return f'Operation not valid for negative int'\n elif x == 0:\n return 1\n else:\n fac = 1\n for number in range(1, x+1):\n fac *= number\n return fac\n \n def perm(self, x, y):\n \"\"\"return perm of x and y\"\"\"\n if x < y:\n return f'{x} must be greater than{y}'\n elif x < 0 and y < 0:\n return f'Operation not valid for negative int'\n elif x == 0:\n return 1\n else:\n fac = 1\n for number in range(1, x+1):\n fac *= number\n fac1 = 1\n for other_number in range(1, (x-y)+1):\n fac1 *= other_number\n perm = fac / fac1\n return perm\n\n def comm(self, x, y):\n \"\"\"return comm of x and y\"\"\"\n if x < y:\n return f'{x} must be greater than{y}'\n elif x < 0 and y < 0:\n return f'Operation not valid for negative int'\n elif x == 0:\n return 1\n else:\n fac = 1\n for number in range(1, x+1):\n fac *= number\n fac1 = 1\n for other_number in range(1, (x-y)+1):\n fac1 *= other_number\n fac2 = 1\n for another_number in range(1, y+1):\n fac2 *= another_number\n comm = fac / (fac1 * fac2)\n return comm\n\n\n\n\n\n\n\n\n\n\n","repo_name":"codingembryo/allproject","sub_path":"MODULE/modulee.py","file_name":"modulee.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20196130932","text":"\ndef solve(N, D, lights):\n\n for loc, start, g, r in lights:\n total = g+r\n if (loc-start)%total > g or loc-start < 0: return 'NO'\n return 'YES'\nif __name__ == '__main__':\n ans = []\n N, D = map(int, input().split())\n lights = []\n for _ in range(N):\n lights.append(list(map(int, input().split())))\n print(solve(N, D, lights))","repo_name":"miruts-xz/competitive-programming","sub_path":"contests/codeforces/week-1/Lights in the Morning.py","file_name":"Lights in the Morning.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18173791618","text":"from multiprocessing import context\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, redirect, HttpResponse, reverse, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom . import models\nfrom .forms import postartikel,postkategori, postaduan\nfrom django.views.generic import ListView\nfrom django.views.generic.edit import DeleteView, CreateView\nfrom django.urls import reverse_lazy\n\nfrom django.template.loader import get_template\nfrom xhtml2pdf import pisa\n\n# cetak pdf\ndef cetak_surat(request, *args, **kwargs):\n pk = kwargs.get('pk')\n aduan = get_object_or_404(models.aduan, pk=pk)\n\n template_path = 'admin/surat.html'\n context = {'aduan': aduan}\n # Create a Django response object, and specify content_type as pdf\n response = HttpResponse(content_type='application/pdf')\n # kalo download\n # response['Content-Disposition'] = 'attachment; filename=\"report.pdf\"'\n # kalo display\n response['Content-Disposition'] = 'filename=\"report.pdf\"'\n # find the template and render it.\n template = get_template(template_path)\n html = template.render(context)\n\n # create a pdf\n pisa_status = pisa.CreatePDF(\n html, dest=response)\n # if error then show some funny view\n if pisa_status.err:\n return HttpResponse('We had some errors
' + html + '
')\n return response\n\n# Create your views here.\ndef dashboard_admin(request): \n if request.user.is_staff == 1:\n posts = models.aduan.objects.all()\n context = {\n 'posts':posts,\n 'total_pengaduan':len(models.aduan.objects.all()),\n 'total_konfirmasi':len(models.aduan.objects.filter(status = 'konfirmasi')),\n 'total_proses':len(models.aduan.objects.filter(status = 'proses')),\n 'total_tolak':len(models.aduan.objects.filter(status = 'tolak')),\n }\n return render(request, 'admin/dashboard.html',context)\n else:\n return render(request,'eror_404.html')\n\ndef berita(request): \n if request.user.is_staff == 1:\n posts = models.artikel.objects.all()\n context={\n 'page_title': 'berita',\n 'posts': posts,\n } \n return render(request, 'admin/berita.html', context)\n else:\n return render(request,'eror_404.html')\n\ndef detail_berita(request, artikelslug):\n if request.user.is_staff == 1:\n posts = models.artikel.objects.get(slug=artikelslug)\n context={\n 'page_title':'detail berita',\n 'posts':posts,\n }\n return render(request, 'admin/detail_berita.html', context)\n else:\n return render(request,'eror_404.html')\n\ndef tambah_berita(request): \n if request.user.is_staff == 1:\n post_artikel = postartikel() \n if request.method =='POST':\n post_artikel = postartikel(request.POST, request.FILES) \n if post_artikel.is_valid(): \n post_artikel.save()\n return redirect ('berita_admin') \n context={\n 'page_title':'tambah berita',\n 'post_artikel':post_artikel, \n } \n return render(request, 'admin/tambah_berita.html', context)\n else:\n return render(request,'eror_404.html')\n\ndef edit_berita(request, id):\n if request.user.is_staff == 1:\n berita = models.artikel.objects.get(id=id)\n data = {\n 'judul':berita.judul,\n 'isi':berita.isi,\n 'sumber':berita.sumber,\n 'gambar':berita.gambar,\n }\n form = postartikel(request.POST or None, request.FILES or None, initial=data, instance=berita)\n if request.method =='POST':\n if form.is_valid(): \n form.save()\n return redirect ('berita_admin') \n context = { \n 'form':form,\n 'berita':berita,\n }\n return render(request, 'admin/edit_berita.html', context)\n else:\n return render(request,'eror_404.html')\n\ndef hapusberita(request, delete_id): \n models.artikel.objects.filter(id = delete_id).delete() \n return redirect ('berita_admin')\n\ndef kategori(request):\n if request.user.is_staff == 1:\n posts = models.kategori.objects.all()\n post_kategori = postkategori() \n if request.method =='POST':\n post_kategori = postkategori(request.POST) \n if post_kategori.is_valid(): \n post_kategori.save()\n return redirect ('kategori_admin') \n context={\n 'page_title':'kategori',\n 'post_kategori':post_kategori, \n 'posts':posts, \n } \n return render(request, 'admin/kategori.html', context)\n else:\n return render(request,'eror_404.html')\n\ndef edit_kategori(request, id):\n if request.user.is_staff == 1:\n kategori = models.kategori.objects.get(id = id)\n data = {\n 'nama':kategori.nama,\n 'sifat':kategori.sifat,\n 'jenis':kategori.jenis, \n }\n form = postkategori(request.POST or None, initial=data, instance=kategori)\n if request.method == 'POST':\n if form.is_valid():\n form.save()\n return redirect('kategori_admin')\n context = {\n 'form':form,\n 'kategori':kategori,\n }\n return render(request,'admin/edit_kategori.html', context)\n else:\n return render(request,'eror_404.html')\n\ndef hapuskategori(request, delete_id):\n models.kategori.objects.filter(id = delete_id).delete()\n return redirect('kategori_admin')\n\ndef pengaduan(request):\n if request.user.is_staff == 1:\n posts = models.aduan.objects.all()\n context = {\n 'page_title': 'pengaduan',\n 'posts': posts,\n }\n return render(request, 'admin/pengaduan.html', context)\n else:\n return render(request,'eror_404.html')\n\ndef detail_pengaduan(request, id):\n if request.user.is_staff == 1:\n posts = models.aduan.objects.get(id =id)\n data = {\n 'nama':posts.nama,\n 'jalan':posts.jalan,\n 'kecamatan':posts.kecamatan,\n 'keterangan':posts.keterangan,\n 'status':posts.status,\n }\n form = postaduan(request.POST or None, request.FILES or None, initial=data, instance=posts)\n if request.method =='POST':\n if form.is_valid(): \n form.save()\n return redirect ('pengaduan_admin') \n context={\n 'page_title':'detail pengaduan',\n 'form':form,\n 'posts':posts,\n }\n return render(request, 'admin/detail_pengaduan.html',context)\n else:\n return render(request,'eror_404.html') \n\ndef edit_pengaduan(request, id):\n if request.user.is_staff == 1:\n posts = models.aduan.objects.get(id =id)\n data = {\n 'nama':posts.nama,\n 'jalan':posts.jalan,\n 'kecamatan':posts.kecamatan,\n 'keterangan':posts.keterangan,\n 'status':posts.status,\n }\n form = postaduan(request.POST or None, request.FILES or None, initial=data, instance=posts)\n if request.method =='POST':\n if form.is_valid(): \n form.save()\n return redirect ('pengaduan_admin') \n context={\n 'page_title':'edit pengaduan',\n 'form':form,\n 'posts':posts,\n }\n return render(request, 'admin/edit_pengaduan.html',context)\n else:\n return render(request,'eror_404.html')\n\ndef hapusaduan(request, delete_id): \n models.aduan.objects.filter(id = delete_id).delete() \n return redirect ('tolak') \n\ndef konfirmasi(request):\n if request.user.is_staff == 1:\n posts = models.aduan.objects.all()\n context = {\n 'page_title': 'konfirmasi',\n 'posts': posts,\n }\n return render(request, 'admin/konfirmasi.html', context)\n else:\n return render(request,'eror_404.html') \n\ndef detail_konfirmasi(request):\n context={}\n if request.user.is_staff == 1:\n return render(request, 'admin/detail_konfirmasi.html')\n else:\n return render(request,'eror_404.html')\n\ndef tolak(request):\n if request.user.is_staff == 1:\n posts = models.aduan.objects.all()\n context = {\n 'posts':posts\n }\n return render(request, 'admin/tolak.html', context)\n else:\n return render(request, 'eror_404.html')","repo_name":"alzahfariski/simpegal","sub_path":"petugas/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1438464865","text":"'''\n@article{IINet,\n title={Intra-Inter View Interaction Network for Light Field Image Super-Resolution},\n author={Liu, Gaosheng and Yue, Huanjing and Wu, Jiamin and Yang, Jingyu},\n journal={IEEE Transactions on Multimedia},\n year={2021},\n publisher={IEEE}\n}\n'''\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom math import sqrt\n\n\nclass get_model(nn.Module):\n def __init__(self, args):\n super(get_model, self).__init__()\n n_blocks, channel = 4, 32\n self.factor = args.scale_factor\n self.angRes = args.angRes_in\n self.IntraFeaExtract = FeaExtract(channel)\n self.InterFeaExtract = Extract_inter_fea(channel, self.angRes)\n self.MCB_1 = MCB(channel, self.angRes)\n self.MCB_2 = MCB(channel, self.angRes)\n self.MCB_3 = MCB(channel, self.angRes)\n self.MCB_4 = MCB(channel, self.angRes)\n self.Interact_1 = Intra_inter_FUM(channel, self.angRes)\n self.Interact_2 = Intra_inter_FUM(channel, self.angRes)\n self.Interact_3 = Intra_inter_FUM(channel, self.angRes)\n self.Interact_4 = Intra_inter_FUM(channel, self.angRes, last=True)\n \n self.FBM = FBM(channel*4)\n self.UpSample = Upsample(channel, self.factor)\n\n def forward(self, x, info=None):\n \n x_multi = LFsplit(x, self.angRes)\n \n intra_fea_initial = self.IntraFeaExtract(x_multi) \n inter_fea_initial = self.InterFeaExtract(x_multi)\n \n b, n, c, h, w = x_multi.shape\n x_multi = x_multi.contiguous().view(b*n, -1, h, w)\n x_upscale = F.interpolate(x_multi, scale_factor=self.factor, mode='bicubic', align_corners=False)\n _, c, h, w = x_upscale.shape\n x_upscale = x_upscale.unsqueeze(1).contiguous().view(b, -1, c, h, w)\n \n intra_fea_0, inter_fea_1 = self.Interact_1(intra_fea_initial, inter_fea_initial) \n intra_fea_0 = self.MCB_1(intra_fea_0)\n \n intra_fea_1, inter_fea_2 = self.Interact_2(intra_fea_0.permute(0,2,1,3,4), inter_fea_1)\n intra_fea_1 = self.MCB_2(intra_fea_1)\n \n intra_fea_2, inter_fea_3 = self.Interact_3(intra_fea_1.permute(0,2,1,3,4), inter_fea_2)\n intra_fea_2 = self.MCB_3(intra_fea_2)\n \n intra_fea_3, _ = self.Interact_4(intra_fea_2.permute(0,2,1,3,4), inter_fea_3)\n intra_fea_3 = self.MCB_4(intra_fea_3)\n \n intra_fea = torch.cat((intra_fea_0, intra_fea_1, intra_fea_2, intra_fea_3), 1).permute(0,2,1,3,4)\n\n intra_fea = self.FBM(intra_fea)\n out_sv = self.UpSample(intra_fea)\n \n out = FormOutput(out_sv) + FormOutput(x_upscale)\n\n return out\n\n\nclass Upsample(nn.Module):\n def __init__(self, channel, factor):\n super(Upsample, self).__init__()\n self.upsp = nn.Sequential(\n nn.Conv2d(4*channel, channel * factor * factor, kernel_size=1, stride=1, padding=0, bias=False),\n nn.PixelShuffle(factor),\n nn.Conv2d(channel, 1, kernel_size=1, stride=1, padding=0, bias=False))\n\n def forward(self, x):\n b, n, c, h, w = x.shape\n x = x.contiguous().view(b*n, -1, h, w)\n out = self.upsp(x)\n _, _, H, W = out.shape\n out = out.contiguous().view(b, n, -1, H, W)\n return out\n\n\nclass FeaExtract(nn.Module):\n def __init__(self, channel):\n super(FeaExtract, self).__init__()\n self.FEconv = nn.Conv2d(1, channel, kernel_size=1, stride=1, padding=0, bias=False)\n self.FERB_1 = ResASPP(channel)\n self.FERB_2 = RB(channel)\n self.FERB_3 = ResASPP(channel)\n self.FERB_4 = RB(channel)\n\n def forward(self, x_mv):\n b, n, r, h, w = x_mv.shape\n x_mv = x_mv.contiguous().view(b*n, -1, h, w)\n intra_fea_0 = self.FEconv(x_mv)\n intra_fea = self.FERB_1(intra_fea_0)\n intra_fea = self.FERB_2(intra_fea)\n intra_fea = self.FERB_3(intra_fea)\n intra_fea = self.FERB_4(intra_fea)\n _, c, h, w = intra_fea.shape\n intra_fea = intra_fea.unsqueeze(1).contiguous().view(b, -1, c, h, w)#.permute(0,2,1,3,4) # intra_fea: B, N, C, H, W\n\n return intra_fea\n\n\nclass Extract_inter_fea(nn.Module):\n def __init__(self, channel, angRes):\n super(Extract_inter_fea, self).__init__()\n self.FEconv = nn.Conv2d(angRes*angRes, channel, kernel_size=1, stride=1, padding=0, bias=False)\n self.FERB_1 = ResASPP(channel)\n self.FERB_2 = RB(channel)\n self.FERB_3 = ResASPP(channel)\n self.FERB_4 = RB(channel)\n\n def forward(self, x_mv):\n b, n, r, h, w = x_mv.shape\n x_mv = x_mv.contiguous().view(b,-1, h, w)\n inter_fea_0 = self.FEconv(x_mv)\n inter_fea = self.FERB_1(inter_fea_0)\n inter_fea = self.FERB_2(inter_fea)\n inter_fea = self.FERB_3(inter_fea)\n inter_fea = self.FERB_4(inter_fea)\n return inter_fea\n\n\nclass Intra_inter_FUM(nn.Module):\n '''\n Inter-assist-intra feature updating module & intra-assist-inter feature updating module \n '''\n def __init__(self, channel, angRes, last=False):\n super(Intra_inter_FUM, self).__init__()\n self.conv_fusing = nn.Conv2d(channel*2, channel, kernel_size=1, stride=1, padding=0)\n self.conv_sharing = nn.Conv2d(angRes*angRes*channel, angRes*angRes*channel, kernel_size=1, stride=1, padding=0)\n self.last = last\n \n if not last:\n self.conv_f1 = nn.Conv2d(angRes*angRes*channel, channel, kernel_size=1, stride=1, padding=0)\n self.conv_f2 = nn.Conv2d(2*channel, channel, kernel_size=1, stride=1, padding=0)\n\n self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n\n def forward(self, intra_fea, inter_fea):\n #intra_fea = intra_fea.permute(0,2,1,3,4)\n b, n, c, h, w = intra_fea.shape\n \n ##update inter-view feature\n upda_intra_feas = []\n for i in range(n):\n current_sv = intra_fea[:, i, :, :, :].contiguous()\n buffer = torch.cat((current_sv, inter_fea), dim=1)\n \n buffer = self.lrelu(self.conv_fusing(buffer))\n upda_intra_feas.append(buffer)\n upda_intra_feas = torch.cat(upda_intra_feas, dim=1)\n fuse_fea = self.conv_sharing(upda_intra_feas)\n \n ##update inter-view feature\n if not self.last: \n fea_c = self.conv_f1(upda_intra_feas)\n out_c = self.conv_f2(torch.cat((fea_c, inter_fea), 1))\n else:\n out_c = inter_fea \n \n fuse_fea = fuse_fea.unsqueeze(1).contiguous().view(b, -1, c, h, w).permute(0,2,1,3,4)\n\n return fuse_fea, out_c\n\n\nclass MCB(nn.Module):\n '''\n Multi-view Contex Block\n '''\n def __init__(self, channels, angRes):\n super(MCB, self).__init__()\n self.prelu1 = nn.LeakyReLU(0.02, inplace=True)\n self.conv1 = nn.Conv3d(channels, channels, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)\n self.ASPP = D3ResASPP(channels)\n self.conv2 = nn.Conv3d(channels, channels, kernel_size=(1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=False)\n\n def forward(self, x_init):\n b, c, n, h, w = x_init.shape\n x = self.conv1(x_init)\n buffer = self.prelu1(x)\n buffer = self.ASPP(buffer)\n x = self.conv2(buffer)+x_init\n #x = self.prelu2(x)\n return x#.permute(0,2,1,3,4)\n\n\nclass RB(nn.Module):\n '''\n Residual Block\n '''\n def __init__(self, channel):\n super(RB, self).__init__()\n self.conv01 = nn.Conv2d(channel, channel, kernel_size=3, stride=1, padding=1)\n self.lrelu = nn.LeakyReLU(0.1, inplace=True)\n self.conv02 = nn.Conv2d(channel, channel, kernel_size=3, stride=1, padding=1)\n\n def forward(self, x):\n buffer = self.conv01(x)\n buffer = self.lrelu(buffer)\n buffer = self.conv02(buffer)\n return buffer + x\n\n\nclass SELayer(nn.Module):\n '''\n Channel Attention\n '''\n def __init__(self, out_ch,g=16):\n super(SELayer, self).__init__()\n self.att_c = nn.Sequential(\n nn.Conv2d(out_ch, out_ch//g, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_ch//g, out_ch, 1, 1, 0),\n nn.Sigmoid()\n )\n\n def forward(self,fm):\n ##channel\n fm_pool = F.adaptive_avg_pool2d(fm, (1, 1))\n att = self.att_c(fm_pool)\n fm = fm * att\n return fm\n\n\nclass FBM(nn.Module):\n '''\n Feature Blending \n '''\n def __init__(self, channel):\n super(FBM, self).__init__()\n self.FERB_1 = RB(channel)\n self.FERB_2 = RB(channel)\n self.FERB_3 = RB(channel)\n self.FERB_4 = RB(channel)\n self.att1 = SELayer(channel)\n self.att2 = SELayer(channel)\n self.att3 = SELayer(channel)\n self.att4 = SELayer(channel)\n\n def forward(self, x):\n b, n, c, h, w = x.shape\n buffer_init = x.contiguous().view(b*n, -1, h, w)\n buffer_1 = self.att1(self.FERB_1(buffer_init))\n buffer_2 = self.att2(self.FERB_2(buffer_1))\n buffer_3 = self.att3(self.FERB_3(buffer_2))\n buffer_4 = self.att4(self.FERB_4(buffer_3))\n buffer = buffer_4.contiguous().view(b, n, -1, h, w)\n return buffer\n\ndef ChannelSplit(input):\n _, C, _, _ = input.shape\n c = C//4\n output_1 = input[:, :c, :, :]\n output_2 = input[:, c:, :, :]\n return output_1, output_2\n\n\nclass ResASPP(nn.Module):\n def __init__(self, channel):\n super(ResASPP, self).__init__()\n self.conv_1 = nn.Sequential(nn.Conv2d(channel,channel, kernel_size=3, stride=1, padding=1,\n dilation=1, bias=False), nn.LeakyReLU(0.1, inplace=True))\n self.conv_2 = nn.Sequential(nn.Conv2d(channel, channel, kernel_size=3, stride=1, padding=2,\n dilation=2, bias=False), nn.LeakyReLU(0.1, inplace=True))\n self.conv_3 = nn.Sequential(nn.Conv2d(channel, channel, kernel_size=3, stride=1, padding=4,\n dilation=4, bias=False), nn.LeakyReLU(0.1, inplace=True))\n self.conv_t = nn.Conv2d(channel*3, channel, kernel_size=1, stride=1, padding=0)\n\n def __call__(self, x):\n buffer_1 = []\n buffer_1.append(self.conv_1(x))\n buffer_1.append(self.conv_2(x))\n buffer_1.append(self.conv_3(x))\n buffer_1 = self.conv_t(torch.cat(buffer_1, 1))\n return x + buffer_1\n\n\nclass D3ResASPP(nn.Module):\n def __init__(self, channel):\n super(D3ResASPP, self).__init__()\n self.conv_1 = nn.Sequential(nn.Conv3d(channel, channel, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1,1,1), bias=False), \n nn.LeakyReLU(0.1, inplace=True))\n self.conv_2 = nn.Sequential(nn.Conv3d(channel, channel, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(2, 1, 1), dilation=(2,1,1), bias=False), \n nn.LeakyReLU(0.1, inplace=True))\n self.conv_3 = nn.Sequential(nn.Conv3d(channel, channel, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(4, 1, 1), dilation=(4,1,1), bias=False), \n nn.LeakyReLU(0.1, inplace=True))\n self.conv_t = nn.Conv3d(channel*3, channel, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1,1,1))\n\n def __call__(self, x):\n buffer_1 = []\n buffer_1.append(self.conv_1(x))\n buffer_1.append(self.conv_2(x))\n buffer_1.append(self.conv_3(x))\n buffer_1 = self.conv_t(torch.cat(buffer_1, 1))\n return x + buffer_1\n\n\ndef LFsplit(data, angRes):\n b, _, H, W = data.shape\n h = int(H/angRes)\n w = int(W/angRes)\n data_sv = []\n for u in range(angRes):\n for v in range(angRes):\n data_sv.append(data[:, :, u*h:(u+1)*h, v*w:(v+1)*w])\n\n data_st = torch.stack(data_sv, dim=1)\n return data_st\n\n\ndef FormOutput(intra_fea):\n b, n, c, h, w = intra_fea.shape\n angRes = int(sqrt(n+1))\n out = []\n kk = 0\n for u in range(angRes):\n buffer = []\n for v in range(angRes):\n buffer.append(intra_fea[:, kk, :, :, :])\n kk = kk+1\n buffer = torch.cat(buffer, 3)\n out.append(buffer)\n out = torch.cat(out, 2)\n\n return out\n\n","repo_name":"ZhengyuLiang24/BasicLFSR","sub_path":"model/SR/LF_IINet.py","file_name":"LF_IINet.py","file_ext":"py","file_size_in_byte":12371,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"37"} +{"seq_id":"11107893135","text":"import requests\nimport json\nimport pprint\nfrom datetime import datetime\nimport socket\n\nhost = \"127.0.0.1\" #Processingで立ち上げたサーバのIPアドレス\nport = 10001 #Processingで設定したポート番号\n\ndef main():\n socket_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #オブジェクトの作成\n socket_client.connect((host, port)) #サーバに接続\n\n url1 = 'https://www.showroom-live.com/api/live/gift_log?room_id=313757'\n url2 = 'https://www.showroom-live.com/api/live/gift_list?room_id=313757'\n headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'}\n r = requests.get(url1,headers=headers)\n r2 = requests.get(url2,headers=headers)\n json_log = r.json()\n json_list = r2.json()\n\n gift_master_list = []\n gift_master_list.extend(json_list.get('enquete'))\n gift_master_list.extend(json_list.get('normal'))\n\n for a_gift in json_log['gift_log']:\n time = datetime.fromtimestamp(a_gift['created_at'])\n\n socket_client.send('name:{}\\n'.format(a_gift['name']).encode('utf-8')) #データを送信 Python3\n socket_client.send('gift_id:{}\\n'.format(a_gift['gift_id']).encode('utf-8'))\n socket_client.send('num:{}\\n'.format(a_gift['num']).encode('utf-8'))\n socket_client.send('created_at:{}\\n'.format(time).encode('utf-8'))\n \n pprint.pprint('name:{}'.format(a_gift['name']))\n pprint.pprint('gift_id:{}'.format(a_gift['gift_id']))\n pprint.pprint('num:{}'.format(a_gift['num']))\n pprint.pprint('created_at:{}'.format(time))\n\n gift_name = next(filter(lambda x: x.get('gift_id') == a_gift.get('gift_id'), gift_master_list)).get('gift_name', '')\n if gift_name != '':\n socket_client.send('gift_name:{}\\n\\n'.format(gift_name).encode('utf-8'))\n pprint.pprint('gift_name:{}'.format(gift_name))\n\n socket_client.close()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pain-de-azuki/SHOWROOM_gift","sub_path":"sr_gift.py","file_name":"sr_gift.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"24319120831","text":"def colorMe(root, n, x):\n # sizes of the .left and .right subtrees\n c = []\n def recurse(root):\n if not root: return 0\n l = recurse(root.left)\n r = recurse(root.right)\n\n if root.val == x:\n c.append(l)\n c.append(r)\n return l + r + 1\n\n recurse(root)\n canWin = n // 2 < max(max(c), n - sum(c))\n # print(c)\n return canWin\n\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\na = TreeNode(1)\nb = TreeNode(2)\nc = TreeNode(3)\nd = TreeNode(4)\ne = TreeNode(5)\nf = TreeNode(6)\ng = TreeNode(7)\nh = TreeNode(8)\ni = TreeNode(9)\nj = TreeNode(10)\nk = TreeNode(11)\n\n # a\n # / \\\n # b c\n # / \\ / \\\n # d e f \n # h i j k \n\na.left = b\na.right = c\n\nb.left = d\nb.right = e\n\nd.left = h\nd.right = i\n\nc.left = f\n# c.right = g\n\ne.left = j\ne.right = k\n\n\nprint(colorMe(a, 11, 3))\nprint(colorMe(a, 11, 5))\nprint(colorMe(a, 11, 2))","repo_name":"rjk79/Data-Structures-Algorithms-System-Design","sub_path":"engine/_tree_coloring_game.py","file_name":"_tree_coloring_game.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"933000417","text":"from models.schedulemodels import SchedulesModel\nfrom flask import Blueprint, request, render_template, jsonify\nfrom models.figureSkatingmodels import FigureSkatingModel\n# import jieba\n# from flask import Blueprint,render_template,request,g,redirect,url_for,flash\n# from stylecloud import stylecloud\n# from config.base_config import BASE_DIR\n\nbp = Blueprint('home', __name__, url_prefix=\"/\")\n\n\n# @bp.route('')\n# def world_cloud():\n# with open(rf'{BASE_DIR}\\static\\test.txt', 'r', encoding='utf8') as f:\n# word_list = jieba.cut(f.read())\n# result = \" \".join(word_list) # 分词用空格隔开\n#\n# stylecloud.gen_stylecloud(\n# text=result, # 上面分词的结果作为文本传给text参数\n# size=200,\n# font_path='msyh.ttc', # 字体设置\n# background_color='black',\n# palette='colorbrewer.diverging.Spectral_11', # 调色方案选取,从palettable里选择\n# gradient='horizontal', # 渐变色方向选了垂直方向\n# icon_name='fas fa-circle', # 蒙版选取,从Font Awesome里选\n# output_name=f'{BASE_DIR}/static/img/ciyun.png') # 输出词云图\n# return render_template('index.html', data='45')\n\n@bp.route('')\ndef index():\n schedule_list = []\n schedules = SchedulesModel.query.order_by(SchedulesModel.mtime.asc()).all()\n for schedule in schedules:\n schedule_list.append(schedule.to_dict())\n\n figureskate_list = []\n figureskatings = FigureSkatingModel.query.order_by(FigureSkatingModel.id.desc()).all()\n for figureskate in figureskatings:\n figureskate_list.append(figureskate.to_dict())\n\n # print({'data': figureskate_list})\n # return jsonify({'data': figureskate_list, 'schedule_list': schedule_list})\n\n return render_template('index.html', result={'data': figureskate_list, 'schedule_list': schedule_list})\n\n# @bp.route('/aliyun')\n# def zhibo():\n# return render_template('aliyunplay.html')\n\n# @bp.route(\"/search\")\n# def search():\n# # /search?q=xxx\n# q = request.args.get(\"put\")\n# # filter_by:直接使用字段的名称\n# # filter:使用模型.字段名称\n# # questions =QuestionModel.query.filter(or_(QuestionModel.title.contains(q),QuestionModel.content.contains(q))).order_by(db.text(\"-create_time\"))\n# # return render_template(\"index.html\", data=q)\n# return {f\"你搜索的内容是:{q}\"}\n","repo_name":"jackhao2018/flaskProject","sub_path":"api/page/home/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5041321912","text":"from adventofcode2021.utils import get_input, get_ints\n\n\n# We might need to switch to 'ranges' instead of actual cubics to make it work\ndef is_valid(min_x, max_x):\n return -50 <= min_x <= 50 or -50 <= max_x <= 50\n\n\ndef part1():\n data = get_input(22, lambda line: line.split(\" \"))\n\n cuboids = set()\n\n for step in data:\n switch = step[0]\n min_x, max_x, min_y, max_y, min_z, max_z = map(int, get_ints(step[1]))\n if is_valid(min_x, max_x) and is_valid(min_y, max_y) and is_valid(min_z, max_z):\n min_x, max_x = limit(min_x, max_x)\n min_y, max_y = limit(min_y, max_y)\n min_z, max_z = limit(min_z, max_z)\n else:\n continue\n\n # print(f\"Processing {switch}:\\t x: {min_x}:{max_x} y: {min_y}:{max_y} z: {min_z}:{max_z}\")\n\n for x in range(min_x, max_x + 1):\n for y in range(min_y, max_y + 1):\n for z in range(min_z, max_z + 1):\n if switch == \"on\":\n cuboids.add((x, y, z))\n else:\n cuboids.discard((x, y, z))\n\n return len(cuboids)\n\n\ndef limit(min_coord, max_coord):\n return max([-50, min_coord]), min([50, max_coord])\n\n\n# Obviously the initial approach is not going to work for that type of input...\n# I should move to range-based calculations\ndef part2():\n # data = get_input(22, lambda line: line.split(\" \"))\n #\n # for step in data:\n # switch = step[0]\n # min_x, max_x, min_y, max_y, min_z, max_z = map(int, get_ints(step[1]))\n pass\n","repo_name":"laeith/comp-sci-excursions","sub_path":"aoc/adventofcode2021/day22.py","file_name":"day22.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28064168254","text":"import random\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom modules.multi_head_attention import MultiHeadAttention\nfrom modules.feedforward import PoswiseFeedForwardNet\nfrom modules.encoding import PositionalEncoding\n\n\nclass EncoderLayer(nn.Module):\n\n def __init__(self, d_model, d_ff, d_k, d_v, n_heads,\n device, attn_type, seed, few_shot):\n super(EncoderLayer, self).__init__()\n\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n\n self.enc_self_attn = MultiHeadAttention(\n d_model=d_model, d_k=d_k,\n d_v=d_v, n_heads=n_heads, device=device,\n attn_type=attn_type, seed=seed, few_shot=few_shot)\n self.pos_ffn = PoswiseFeedForwardNet(\n d_model=d_model, d_ff=d_ff, seed=seed)\n self.layer_norm = nn.LayerNorm(d_model, elementwise_affine=False)\n self.few_shot = few_shot\n\n def forward(self, enc_inputs, enc_self_attn_mask=None):\n\n if self.few_shot:\n out, attn, loss = self.enc_self_attn(enc_inputs, enc_inputs, enc_inputs, attn_mask=enc_self_attn_mask)\n else:\n out, attn = self.enc_self_attn(enc_inputs, enc_inputs, enc_inputs, attn_mask=enc_self_attn_mask)\n out = self.layer_norm(out + enc_inputs)\n out_2 = self.pos_ffn(out)\n out_2 = self.layer_norm(out_2 + out)\n if self.few_shot:\n return out_2, loss\n else:\n return out_2\n\n\nclass Encoder(nn.Module):\n\n def __init__(self, d_model, d_ff, d_k, d_v, n_heads,\n n_layers, pad_index, device,\n attn_type, seed, few_shot):\n super(Encoder, self).__init__()\n\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n\n self.device = device\n self.pad_index = pad_index\n self.attn_type = attn_type\n self.pos_emb = PositionalEncoding(\n d_hid=d_model,\n device=device)\n self.n_layers = n_layers\n self.layers = []\n for _ in range(n_layers):\n encoder_layer = EncoderLayer(\n d_model=d_model, d_ff=d_ff,\n d_k=d_k, d_v=d_v, n_heads=n_heads,\n device=device,\n attn_type=attn_type, seed=seed, few_shot=few_shot)\n self.layers.append(encoder_layer)\n self.layers = nn.ModuleList(self.layers)\n self.few_shot = few_shot\n\n def forward(self, enc_input):\n\n enc_outputs = self.pos_emb(enc_input)\n\n enc_self_attn_mask = None\n\n for layer in self.layers:\n if self.few_shot:\n enc_outputs, loss = layer(enc_outputs, enc_self_attn_mask)\n else:\n enc_outputs = layer(enc_outputs, enc_self_attn_mask)\n\n if self.few_shot:\n return enc_outputs, loss\n else:\n return enc_outputs","repo_name":"SepKfr/few_shot_2","sub_path":"modules/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26202526922","text":"import os\nimport re\nimport sys\nimport json\nimport time\nimport random\nimport requests\nfrom hashlib import md5\nfrom pyquery import PyQuery as pq\nimport socket\nimport socks\n\n\n\n\n# https://wallpaper.mob.org/image/downloadImage?id=40703&l=240&t=0&r=468&b=415&s=0.5932475884244373\n\n\"\"\"\nid: 40703\nl: 137\nt: 0\nr: 365\nb: 415\ns: 0.5928571428571429\n\nid: 47437\nl: 132\nt: 0\nr: 360\nb: 415\ns: 0.5937834941050375\n\n\n# 壁纸爬虫网站 分类有\n \n ABSTRACT 475张 ANIMALS 633张 ART 464张 CARS 451张 FOOD & DRINK 285张 GAMES 267张 MOVIES 356张\n \n MUSIC 116张 NATURE 1993张 PHOTOS 829张 PLACES 411张 QUOTES 66张 SPORTS 127张\n \n\nhttp://www.mobileswall.com/#\n\nhttp://www.mobileswall.com/wallpaper/swan-2/\nhttp://www.mobileswall.com/wp-content/uploads/2015/12/901-Swan-l.jpg\nhttp://www.mobileswall.com/wallpaper/good-scenery-come-out-to-find-something-to-eat/\nhttp://www.mobileswall.com/wp-content/uploads/2015/12/901-Good-Scenery-Come-Out-to-Find-Something-to-Eat-l.jpg\n\nhttp://www.mobileswall.com/wallpaper/want-to-eat-something/\nhttp://www.mobileswall.com/wp-content/uploads/2015/12/901-Want-to-Eat-Something-l.jpg\n\nhttp://www.mobileswall.com/wp-content/uploads/2015/07/901-Red-whiskered-Bulbuls-l.jpg\n\n\nhttp://www.mobileswall.com/wp-content/uploads/2015/07/300-Ripples-l.jpg\nhttp://www.mobileswall.com/wp-content/uploads/2015/07/901-Ripples-l.jpg\n\"\"\"\n\n# 境外连接socks\nip='127.0.0.1' # change your proxy's ip\nport = 10801 # change your proxy's port\nsocks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, ip, port)\nsocket.socket = socks.socksocket\n\nurl_base = u'https://www.instagram.com/'\nuri = u'https://www.instagram.com/graphql/query/?query_hash=a5164aed103f24b03e7b7747a2d94e3c&variables=%7B%22id%22%3A%22{user_id}%22%2C%22first%22%3A12%2C%22after%22%3A%22{cursor}%22%7D'\n\n\nBASE_PATH = 'C:\\\\videos\\\\instagram\\\\'\n\nheaders = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',\n 'cookie': 'csrftoken=BoPC7agZgyLblrQxYtHWl2hKte0FApDI; ds_user_id=10147571424; fbm_124024574287414=base_domain=.instagram.com; mcd=2; mid=XDSttwALAAFV17Ay-mMBg9iDtO_8; rur=PRN; sessionid=10147571424%3AROjFKgQrAqP2lG%3A14; shbid=9207; shbts=1551788462.055799; urlgen=\"{\\\"45.77.128.242\\\": 20473}:1h1Gwv:Ije7VWOZYGJb4mp9kGs_aODfoxE\"'\n}\n\n\ndef get_html(url):\n try:\n response = requests.get(url, headers=headers)\n print(response.text)\n if response.status_code == 200:\n return response.text\n else:\n print('请求网页源代码错误, 错误状态码:', response.status_code)\n except Exception as e:\n print(e)\n return None\n\n\ndef get_json(url):\n try:\n response = requests.get(url, headers=headers, timeout=10)\n if response.status_code == 200:\n return response.json()\n else:\n print('请求网页json错误, 错误状态码:', response.status_code)\n except Exception as e:\n print(e)\n time.sleep(10 + float(random.randint(1, 4000))/100)\n return get_json(url)\n\n\ndef get_content(url):\n try:\n response = requests.get(url, headers=headers, timeout=10)\n if response.status_code == 200:\n return response.content\n else:\n print('请求照片二进制流错误, 错误状态码:', response.status_code)\n except Exception as e:\n print(e)\n return None\n\n\ndef get_urls(html):\n urls = []\n user_id = re.findall('\"profilePage_([0-9]+)\"', html, re.S)[0]\n print('user_id:' + user_id)\n doc = pq(html)\n items = doc('script[type=\"text/javascript\"]').items()\n for item in items:\n if item.text().strip().startswith('window._sharedData'):\n js_data = json.loads(item.text()[21:-1], encoding='utf-8')\n edges = js_data[\"entry_data\"][\"ProfilePage\"][0][\"graphql\"][\"user\"][\"edge_owner_to_timeline_media\"][\"edges\"]\n page_info = js_data[\"entry_data\"][\"ProfilePage\"][0][\"graphql\"][\"user\"][\"edge_owner_to_timeline_media\"]['page_info']\n cursor = page_info['end_cursor']\n flag = page_info['has_next_page']\n for edge in edges:\n if edge['node']['display_url']:\n display_url = edge['node']['display_url']\n print(display_url)\n urls.append(display_url)\n print(cursor, flag)\n while flag:\n url = uri.format(user_id=user_id, cursor=cursor)\n js_data = get_json(url)\n infos = js_data['data']['user']['edge_owner_to_timeline_media']['edges']\n cursor = js_data['data']['user']['edge_owner_to_timeline_media']['page_info']['end_cursor']\n flag = js_data['data']['user']['edge_owner_to_timeline_media']['page_info']['has_next_page']\n for info in infos:\n if info['node']['is_video']:\n video_url = info['node']['video_url']\n if video_url:\n print(video_url)\n urls.append(video_url)\n else:\n if info['node']['display_url']:\n display_url = info['node']['display_url']\n print(display_url)\n urls.append(display_url)\n print(cursor, flag)\n # time.sleep(4 + float(random.randint(1, 800))/200) # if count > 2000, turn on\n return urls\n\n\ndef main(user):\n url = url_base + user + '/'\n html = get_html(url)\n urls = get_urls(html)\n\n with open('./urls.json', 'w') as f:\n f.write(json.dumps(urls))\n\n dirpath = BASE_PATH+'{0}'.format(user)\n if not os.path.exists(dirpath):\n os.mkdir(dirpath)\n for i in range(len(urls)):\n print('\\n正在下载第{0}张: '.format(i) + urls[i], ' 还剩{0}张'.format(len(urls)-i-1))\n try:\n content = get_content(urls[i])\n\n houzui = ''\n print('---------------------------------------------')\n print(urls[i])\n if(re.search(\"\\.jpg\", urls[i])):\n print('匹配到jpg文件')\n houzui = 'jpg'\n elif(re.search(\"\\.mp4\\?+\", urls[i])):\n print('匹配到MP4文件')\n houzui = 'mp4'\n else:\n print('什么都没匹配到')\n print(houzui)\n\n file_path = BASE_PATH+'{0}\\\\{1}.{2}'.format(user, '高质量-美女-生活自拍-'+str(i), houzui) #md5(content).hexdigest()\n print(file_path)\n if not os.path.exists(file_path):\n with open(file_path, 'wb') as f:\n print('第{0}张下载完成: '.format(i) + urls[i])\n f.write(content)\n f.close()\n else:\n print('第{0}张照片已下载'.format(i))\n except Exception as e:\n print(e)\n print('这张图片or视频下载失败')\n\n\nif __name__ == '__main__':\n user_name = sys.argv[1]\n start = time.time()\n main(user_name)\n print('Complete!!!!!!!!!!')\n end = time.time()\n spend = end - start\n hour = spend // 3600\n minu = (spend - 3600 * hour) // 60\n sec = spend - 3600 * hour - 60 * minu\n print(f'一共花费了{hour}小时{minu}分钟{sec}秒')\n","repo_name":"RiversCoder/ins-spider","sub_path":"instagram.py","file_name":"instagram.py","file_ext":"py","file_size_in_byte":7210,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"}