diff --git "a/6666.jsonl" "b/6666.jsonl" new file mode 100644--- /dev/null +++ "b/6666.jsonl" @@ -0,0 +1,513 @@ +{"seq_id":"23339461603","text":"\nimport angr\nfrom angrutils import *\nimport os, sys\n\ndef main(argv):\n if len(argv) < 2:\n return\n \n prog_name = argv[1]\n project = angr.Project(prog_name, auto_load_libs=False)\n res = printGraph(project)\n print(res)\n\n \ndef printGraph(project: angr.Project):\n\n cfg = p.analyses.CFGEmulated()\n plot_cfg(cfg, \"ais3_cfg\", asminst=True, remove_imports=False, remove_path_terminator=False)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)","repo_name":"blacksheeep/SwissSec","sub_path":"printGraph.py","file_name":"printGraph.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30166673615","text":"'''\nCreated on Mar. 2, 2023\n\n@author: cefect\n'''\nimport os, datetime, shutil\nimport numpy as np\nimport numpy.ma as ma\nimport pandas as pd\nimport rasterio as rio\nfrom rasterio import shutil as rshutil\n\n\n\nfrom hp.basic import now\nfrom hp.rio import (\n assert_rlay_simple, \n write_array, \n\n )\n\nfrom fdsc.simple import WetPartials\n\nfrom fdsc.base import (\n assert_dem_ar, assert_wse_ar, rlay_extract, assert_partial_wet\n )\n\n\nclass BufferGrowLoop(WetPartials):\n\n def __init__(self,\n loop_range=range(30),\n run_dsc_handle_d=None,\n **kwargs):\n \n if run_dsc_handle_d is None: run_dsc_handle_d=dict()\n \n self.loop_range = loop_range\n \n run_dsc_handle_d['BufferGrowLoop'] = self.run_bufferGrowLoop # add your main method to the caller dict\n \n super().__init__(run_dsc_handle_d=run_dsc_handle_d, **kwargs)\n \n def run_bufferGrowLoop(self, wse_fp=None, dem_fp=None,\n loop_range=None,\n **kwargs):\n \"\"\"run CostGrow pipeline\n \"\"\"\n method = 'BufferGrowLoop'\n log, tmp_dir, out_dir, ofp, resname = self._func_setup(self.nicknames_d[method], subdir=False, **kwargs)\n skwargs = dict(logger=log, out_dir=tmp_dir, tmp_dir=tmp_dir)\n meta_lib = dict()\n \n # skwargs, meta_lib, log, ofp, start = self._func_setup_dsc(nicknames_d[method], wse_fp, dem_fp, **kwargs)\n downscale = self.downscale\n \n #=======================================================================\n # p1: wet partials\n #======================================================================= \n wse1_wp_fp, meta_lib['p1_wp'] = self.p1_wetPartials(wse_fp, dem_fp, downscale=downscale, **skwargs)\n \n #=======================================================================\n # p2: dry partials\n #=======================================================================\n wse1_dp_fp, meta_lib['p2_DP'] = self.get_bufferGrowLoop_DP(wse1_wp_fp, dem_fp, ofp=ofp, \n loop_range=loop_range, **skwargs)\n \n return wse1_dp_fp, meta_lib\n \n def get_bufferGrowLoop_DP(self, wse1_fp, dem_fp,\n loop_range=None,\n min_growth_ratio=1.00001,\n **kwargs):\n \"\"\"loop of buffer + filter\n \n Parameters\n -------------\n loop_range: iterator, default range(30)\n buffer cell distance loop iterator. \n \n min_growth_ratio: float, default 1.005\n minimum ratio of inundation count growth for buffer loop\n must be greater than 1.0\n values closer to 1.0 will allow the loop to continue\n \"\"\"\n #=======================================================================\n # defaults\n #=======================================================================\n if loop_range is None: loop_range = self.loop_range\n assert loop_range.__class__.__name__ == 'range'\n assert min_growth_ratio >= 1.0\n \n log, tmp_dir, out_dir, ofp, resname = self._func_setup('bg_dp', subdir=False, **kwargs)\n skwargs = dict(logger=log, out_dir=tmp_dir, tmp_dir=tmp_dir)\n meta_lib={'smry':{'wse_fp':wse1_fp}}\n start=now()\n \n #=======================================================================\n # preload\n #=======================================================================\n dem_stats_d, dem_ar = rlay_extract(dem_fp) \n assert_dem_ar(dem_ar)\n \n with rio.open(wse1_fp, mode='r') as ds:\n assert_rlay_simple(ds)\n wse1_ar = ds.read(1, masked=True)\n prof = ds.profile\n \n assert_wse_ar(wse1_ar)\n \n assert not np.any(wse1_ar < dem_ar)\n \n #=======================================================================\n # buffer loop\n #=======================================================================\n log.info(f'on {loop_range}')\n wse1j_ar = np.where(wse1_ar.mask, np.nan, wse1_ar.data) # drop mask\n for i in loop_range:\n if i > min(wse1_ar.shape):\n log.warning(f'loop {i} exceeds minimum dimension of array.. breaking')\n break\n \n meta_d = {'pre_null_cnt':np.isnan(wse1j_ar).sum()}\n \n log.info(f'{i} w/ {meta_d}')\n # buffer\n wse1jb_ar = ar_buffer(wse1j_ar)\n \n # filter\n wse1j_ar = np.where(wse1jb_ar <= dem_ar.data, np.nan, wse1jb_ar)\n \n # wrap\n \n meta_d.update({'post_buff_null_cnt':np.isnan(wse1jb_ar).sum(),\n 'post_filter_null_cnt':np.isnan(wse1j_ar).sum()})\n meta_d['growth_rate'] = meta_d['pre_null_cnt'] / meta_d['post_filter_null_cnt']\n assert meta_d['growth_rate'] >= 1.0, 'lost inundation somehow...'\n \n if meta_d['growth_rate'] < min_growth_ratio:\n log.warning(f'at i={i} growth_rate=%.2f failed to achieve minimum growth.. breaking' % (\n meta_d['growth_rate']))\n break\n \n meta_lib[str(i)] = meta_d\n \n #=======================================================================\n # to raster\n #=======================================================================\n write_array(wse1j_ar, ofp, **prof) \n \n log.info(f'wrote {wse1j_ar.shape} to \\n {ofp}')\n \n #=======================================================================\n # wrap\n #=======================================================================\n tdelta = (now() - start).total_seconds()\n meta_lib['smry']['tdelta'] = tdelta\n log.info(f'finished in {tdelta:.2f} secs')\n \n return ofp, meta_lib\n\n \ndef ar_buffer(wse_ar):\n \"\"\"disaggregate/downscale the array to the specified scale\n \n results in an array with scale = ar.shape*downscale\n \"\"\"\n \n res_ar = np.full(wse_ar.shape, np.nan)\n \n it = np.nditer([wse_ar, res_ar],\n flags=[\n 'multi_index'\n # 'external_loop', \n # 'buffered'\n ],\n op_flags=[['readonly'],\n ['writeonly',\n # 'allocate', #populate None \n # 'no_broadcast', #no aggregation?\n ]],\n # op_axes=[None, new_shape],\n )\n \n #===========================================================================\n # execute iteration\n #===========================================================================\n with it: \n for wse, res in it:\n \n # dry\n if np.isnan(wse):\n # retrieve neighbours\n nei_ar = get_neighbours_D4(wse_ar, it.multi_index)\n \n # all dry\n if np.isnan(nei_ar).all():\n res[...] = np.nan\n \n # wet neighbours\n else:\n res[...] = np.ravel(nei_ar[~np.isnan(nei_ar)]).mean()\n #===========================================================\n # #collapse to wet cells\n # nei_ar2 = np.ravel(nei_ar[~np.isnan(nei_ar)])\n # print(f'nei_ar2={nei_ar2}')\n # \n # #higher than dem\n # if nei_ar2.max()>dem: \n # res[...] = nei_ar2.max()\n # else:\n # res[...] = np.nan\n #===========================================================\n \n # wet\n else:\n res[...] = wse\n \n # print(f'{it.multi_index}: wse={wse} dem={dem}, res={res}')\n \n result = it.operands[-1]\n \n return result\n\n\ndef get_neighbours_D4(ar, mindex):\n \"\"\"get values of d4 neighbours\"\"\"\n \n res_ar = np.full((3, 3), np.nan)\n val_d, loc_d = dict(), dict()\n for i, shift in enumerate([\n (0, 1), (0, -1),\n (1, 0), (-1, 0)]):\n \n # get shifted location\n jx = mindex[0] + shift[0]\n jy = mindex[1] + shift[1]\n \n if jx < 0 or jx >= ar.shape[0]:\n res = ma.masked\n elif jy < 0 or jy >= ar.shape[1]:\n res = ma.masked\n else:\n try:\n res = ar[jx, jy]\n except Exception as e:\n raise IndexError(\n f'failed to retrieve value jy={jy} jx={jx} shape={ar.shape} w/ \\n {e}')\n \n res_ar[shift] = res\n \n #===========================================================================\n # loc_d[i] = (jx, jy) \n # val_d[i]= res\n # \n # \n # print(mindex)\n # print(loc_d)\n # print(val_d)\n #===========================================================================\n \n return res_ar\n","repo_name":"cefect/FloodDownscaler","sub_path":"fdsc/bufferLoop.py","file_name":"bufferLoop.py","file_ext":"py","file_size_in_byte":9362,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"7620201897","text":"from typing import OrderedDict\nfrom Expressions.Value import Value\nfrom SymbolsTable.Type import Type,LogicOperator\nfrom SymbolsTable.Exceptions import Exceptions\nfrom SymbolsTable.Symbols import Symbols\nfrom Abstract.Instruction import Instruction, Instruction\nfrom Abstract.NodoAST import NodoAST\n\n\nclass Logic(Instruction):\n def __init__(self, Operator, OperacionIzq, OperacionDer, Row, Column):\n self.Operator = Operator\n self.OperacionIzq = OperacionIzq\n self.OperacionDer = OperacionDer\n self.Row = Row\n self.Column = Column\n self.Type = Type.BOOLEANO\n self.true_label=''\n self.false_label=''\n\n # operaciones logicas \n # se inerpretan cada instruccion segun el Operator en este caso es el simbolo \n # lo cual debe de generar una respuesta booleana\n def interpreter(self, tree, table):\n self.checkLabels(tree)\n lbAndOr=''\n\n if self.Operator == LogicOperator.AND:\n lbAndOr=self.OperacionIzq.true_label=tree.addLiteral() \n self.OperacionDer.true_label=self.true_label\n self.OperacionIzq.false_label= self.OperacionDer.false_label=self.false_label\n\n\n elif self.Operator == LogicOperator.OR:\n self.OperacionIzq.true_label=self.OperacionDer.true_label=self.true_label\n lbAndOr= self.OperacionIzq.false_label= tree.addLiteral()\n self.OperacionDer.false_label=self.false_label\n \n izq = self.OperacionIzq.interpreter(tree, table)\n if izq.type!=Type.BOOLEANO:\n print('no se puede men')\n return \n \n tree.putLabel(lbAndOr)\n\n der = self.OperacionDer.interpreter(tree, table)\n if der.type!=Type.BOOLEANO:\n print('no se puede')\n return\n if isinstance(der, Exceptions): return der\n \n ret = Value(None,Type.BOOLEANO,False)\n ret.true_label=self.true_label\n ret.false_label= self.false_label\n return ret\n\n\n def obtenerVal(self, Type, val):\n if Type == Type.ENTERO:\n return int(val)\n elif Type == Type.DECIMAL:\n return float(val)\n elif Type == Type.BOOLEANO:\n return bool(val)\n return str(val)\n\n\n\n def getNodo(self):\n nodo= NodoAST(\"EXPRESSION\") \n if self.OperacionDer != None:\n nodo.Agregar_Hijo_Nodo(self.OperacionIzq.getNodo())\n nodo.Agregar_Hijo(self.simb(self.Operator.name))\n nodo.Agregar_Hijo_Nodo(self.OperacionDer.getNodo())\n else:\n nodo.Agregar_Hijo(self.simb(self.Operator.name))\n nodo.Agregar_Hijo_Nodo(self.OperacionIzq.getNodo())\n \n \n return nodo\n\n\n\n def simb(self, Operator):\n if Operator==\"NOT\":\n return \"!\"\n elif Operator==\"AND\":\n return \"&&\"\n elif Operator==\"OR\":\n return \"||\"\n\n def checkLabels(self,tree):\n if self.true_label=='':\n self.true_label = tree.addLiteral()\n if self.false_label=='':\n self.false_label= tree.addLiteral()\n\n\n def getRow(self):\n return self.Row\n\n def setRow(self, Row):\n self.Row= Row \n\n def getColumn(self):\n return self.Column\n\n def setColumn(self, Column):\n self.Column= Column\n \n def getType(self):\n return self.Type\n\n def setType(self, Type):\n self.Type=Type\n\n \n def getOperacionIzq(self):\n return self.OperacionIzq\n\n def setOperacionIzq(self, OperacionIzq):\n self.OperacionIzq=OperacionIzq\n\n def getOperacionDer(self):\n return self.OperacionDer\n\n def setOperacionDer(self, OperacionDer):\n self.OperacionDer=OperacionDer\n","repo_name":"MynorSaban1906/OLC2_P2","sub_path":"Expressions/Logic.py","file_name":"Logic.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27866626690","text":"# coding: utf-8\nimport sys\nfrom datetime import datetime, timedelta\n\ndelim = \"'\\\\''\"\ntable_name = \"executions\"\n\ndef error_argument():\n print('need \"yyyy-mm-dd\" format')\n sys.exit(1)\n\ndef dump_query(start):\n rfc3339 = '%Y-%m-%dT%H:%M:%SZ'\n end = start + timedelta(days=1)\n tmp = start\n after = start\n\n while tmp < end:\n after = tmp + timedelta(minutes=30)\n print(\"'select * from \" + table_name + \" where time >=\" + delim + tmp.strftime(rfc3339) + delim + \" and time < \" + delim + after.strftime(rfc3339) + delim + \"'\" )\n tmp = after\n\n\nif __name__ == '__main__':\n\n args = sys.argv\n if len(args) == 2:\n try:\n timestamp = datetime.strptime(args[1], '%Y-%m-%d')\n except:\n error_argument()\n\n dump_query(timestamp)\n\n else:\n error_argument()\n","repo_name":"kazudotdev/backtest-tools","sub_path":"query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43680410477","text":"from diffusers import StableDiffusionPipeline\nfrom diffusers import StableDiffusionImg2ImgPipeline\nfrom diffusers import StableDiffusionControlNetPipeline, ControlNetModel\nfrom diffusers import DiffusionPipeline\nimport torch\nimport os\nimport uuid\nimport cv2\nfrom PIL import Image\nimport numpy as np\n\ndef prompts(name, description):\n def decorator(func):\n func.name = name\n func.description = description\n return func\n\n return decorator\n\nclass CyberPunkText2Image:\n \"\"\"\n Text2Image is a text prior generation model, which use clip embedding associate with text and image\n \"\"\"\n\n def __init__(self, device):\n\n self.device = device\n self.torch_dtype = torch.float16 if (\"cuda\" or \"mps\") in device else torch.float32\n # self.pipline = StableDiffusionPipeline.from_pretrained(\"../models--runwayml--stable-diffusion-v1-5/snapshots/39593d5650112b4cc580433f6b0435385882d819\", trust_remote_code=True)\n self.pipline = StableDiffusionPipeline.from_pretrained(\n \"runwayml/stable-diffusion-v1-5\",\n trust_remote_code=True)\n self.pipline.to(device)\n self.prompt = f\"Complex 3 D rendering \" \\\n f\"semi robot, in outer space, cyberpunk, robot parts, 150 mm, beautiful studio soft light, \" \\\n f\"fine face, vibrant details, luxury cyberpunk, surrealism, anatomy, facial muscles, cables, \" \\\n f\"microchips, electronic boards, current, elegance, beautiful background, octane rendering, H. R. Gig style, \" \\\n f\"8k, wallpaper, best quality, masterpiece, illustration, an extremely exquisite and beautiful, extremely detailed, CG, unified, wallpaper, \" \\\n f\"(realistic, photo realistic: 1.37), stunning, fine details, masterpiece, best quality, \" \\\n f\"official art, extremely detailed CG unified, 8k wallpaper, robot, silver dummy,\"\n\n @prompts(name=\"CyperPunk image\",\n description=\"useful when you want to generate an cyberpunk image from prefixed prompt setting. \"\n \"like: generate an image of an object or something, or generate an image that includes some objects. \"\n \"The input to this tool should be a string, representing the text used to generate image. \")\n\n def inference(self, text):\n\n prompt = text + \",\" + self.prompt\n image_name = f\"{str(uuid.uuid4())[:8]}.png\"\n image_filename = os.path.join('./image', image_name)\n print(\"the CyberPunk prompt is:\", prompt)\n image = self.pipline(prompt, num_inference_steps = 20).images[0]\n image.save(image_filename)\n print(\n f\"\\nProcessed Text2Image, Input Text: {text}, Output Image: {image_filename}\")\n return image_filename\n\n\nclass TextGuided_image2image:\n\n \"\"\"\n similar latent diffusion model, which change nosie to noise + img_embedding info\n \"\"\"\n\n def __init__(self, device):\n\n self.device = device\n self.torch_dtype = torch.float16 if (\"cuda\" or \"mps\") in device else torch.float32\n self.pipeline = StableDiffusionImg2ImgPipeline.from_pretrained(\"nitrosocke/Ghibli-Diffusion\",torch_dtype=torch.float16).to(device)\n self.pipeline.to(device)\n self.prompt = 'high resolution image, detailed riched'\n\n @prompts(name=\"Generate Image From User Input Text\",\n description=\"useful when you want to generate an image from a user input text and save it to a file. \"\n \"like: generate an image of an object or something, or generate an image that includes some objects. \"\n \"The input to this tool should be a string, representing the text used to generate image. \")\n\n def inference(self, text, image):\n\n prompt = text + \",\" + self.prompt\n image_filename = os.path.join('./image', f\"{str(uuid.uuid4())[:8]}.png\")\n image = self.pipeline(prompt=prompt, image=image, strength=0.75, guidance_scale=7.5).images[0]\n image.save(image_filename)\n print(\n f\"\\nProcessed Text2Image, Input Text: {text}, Output Image: {image_filename}\")\n return image_filename\n\nclass Text2Image:\n \"\"\"\n Text2Image is a text prior generation model, which use clip embedding associate with text and image\n \"\"\"\n\n def __init__(self, device):\n\n self.device = device\n self.torch_dtype = torch.float16 if (\"cuda\" or \"mps\") in device else torch.float32\n # self.pipline = StableDiffusionPipeline.from_pretrained(\"../models--runwayml--stable-diffusion-v1-5/snapshots/39593d5650112b4cc580433f6b0435385882d819\", trust_remote_code=True)\n self.pipline = StableDiffusionPipeline.from_pretrained(\n \"runwayml/stable-diffusion-v1-5\",\n trust_remote_code=True)\n self.pipline.to(device)\n # self.prompt = 'a high-quality, detailed, and professional image'\n self.prompt = \"a high-quality, background clean\"\n\n\n @prompts(name=\"Generate Image From User Input Text\",\n description=\"useful when you want to generate an image from a user input text and save it to a file. \"\n \"like: generate an image of an object or something, or generate an image that includes some objects. \"\n \"The input to this tool should be a string, representing the text used to generate image. \")\n\n def inference(self, text):\n\n prompt = text + \",\" + self.prompt\n image_name = f\"{str(uuid.uuid4())[:8]}.png\"\n image_filename = os.path.join('./image', image_name)\n\n image = self.pipline(prompt, num_inference_steps = 20).images[0]\n image.save(image_filename)\n print(\n f\"\\nProcessed Text2Image, Input Text: {text}, Output Image: {image_filename}\")\n return image_filename\n\nclass Dreambooth_Text2Image:\n \"\"\"\n Dreambooth_Text2Image is a strone control image generation method,\n which can maintain the user target object. It can generate the various user taget object images,\n guided by the text prompt embedding.\n It is need retrain the sd models and provide a special target identifier\n\n \"\"\"\n\n def __init__(self, device):\n\n self.device = device\n self.torch_dtype = torch.float16 if (\"cuda\" or \"mps\") in device else torch.float32\n model_id = \"path_to_saved_model\"\n self.pipline = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to(\"cuda\")\n # self.pipline = StableDiffusionPipeline.from_pretrained(\"../models--runwayml--stable-diffusion-v1-5/snapshots/39593d5650112b4cc580433f6b0435385882d819\", trust_remote_code=True)\n self.pipline.to(device)\n self.prompt = 'style, typical style, professional environment, cinematic lighting, 8K' \\\n 'style, typical style, professional environment, cinematic lighting, 8K' \\\n 'a high-quality, detailed, and professional image'\n @prompts(name=\"Generate Image From User Input Text\",\n description=\"useful when you want to generate an image from a user input text and save it to a file. \"\n \"like: generate an image of an object or something, or generate an image that includes some objects. \"\n \"The input to this tool should be a string, representing the text used to generate image. \")\n\n def inference(self, text):\n\n prompt = text + \",\" + self.prompt\n image_filename = os.path.join('./image', f\"{str(uuid.uuid4())[:8]}.png\")\n image = self.pipline(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]\n image.save(image_filename)\n print(\n f\"\\nProcessed Dreambooth_Text2Image, Input Text: {text}, Output Image: {image_filename}\")\n return image\n\n\ndef auto_canny(image, sigma=0.9):\n # compute the median of the single channel pixel intensities\n v = np.median(image)\n\n # apply automatic Canny edge detection using the computed median\n\n lower = int(max(0, (1.0 - sigma) * v))\n upper = int(min(255, (1.0 + sigma) * v))\n edged = cv2.Canny(image, lower, upper)\n\n # return the edged image\n return edged\n\nclass ControlText2Image:\n \"\"\"\n controlNet text2Image is a multi priors diffusion generation model, one is the text embedding\n prior, another one is the skeleton information which maintain the main component of the image\n \"\"\"\n\n def __init__(self, device):\n\n self.device = device\n self.torch_dtype = torch.float16 if (\"cuda\" or \"mps\") in device else torch.float32\n controlnet = ControlNetModel.from_pretrained(\"fusing/stable-diffusion-v1-5-controlnet-canny\", torch_dtype=torch.float16)\n # self.pipline = StableDiffusionControlNetPipeline.from_pretrained(\"/Users/ample/Downloads/models--runwayml--stable-diffusion-v1-5/snapshots/39593d5650112b4cc580433f6b0435385882d819\",\n # trust_remote_code=True, controlnet = controlnet, torch_dtype=torch.float16,safety_checker = None)\n self.pipline = StableDiffusionControlNetPipeline.from_pretrained(\n \"runwayml/stable-diffusion-v1-5\",\n trust_remote_code=True, controlnet=controlnet, torch_dtype=torch.float16, safety_checker=None)\n self.pipline.to(device)\n self.prompt = f'style, strong target environment style'\n\n @prompts(name=\"transfer Image style using input image skeleton\",\n description=\"useful when you want to generate an image from a user input text and maintain its component. \"\n \"like: generate an image of an object or something with the component . \"\n \"The input to this tool should be a string, representing the text used to generate image. \"\n \"The input to this tool should be a comma separated string of two, \")\n\n def inference(self, inputs):\n image_path, instruct_text = inputs.split(\",\")[0], ','.join(inputs.split(',')[1:])\n control_image = Image.open(image_path)\n image = np.array(control_image)\n\n kernel = int(np.median(image) // 20)\n if kernel % 2 == 0: kernel = kernel + 1\n image = cv2.GaussianBlur(image, (kernel, kernel), 0)\n\n # # transform img to edg map\n image = auto_canny(image)\n image = image[:, :, None]\n image = np.concatenate([image, image, image], axis=2)\n canny_image = Image.fromarray(image)\n prompt = instruct_text + \" \" + self.prompt\n\n image_filename = os.path.join('./image', f\"{str(uuid.uuid4())[:8]}.png\")\n image_canny_filename = os.path.join('./image', f\"{str(uuid.uuid4())[:8]}_canny.png\")\n canny_image.save(image_canny_filename)\n print(\"the control net input is:\",prompt)\n image = self.pipline(prompt, num_inference_steps=100, image=canny_image).images[0]\n image.save(image_filename)\n\n print(f\"\\nProcessed ControlNet_Text2Image, Input Text: {instruct_text}, Output Image: {image_filename}\"\n f\"canny Image: {image_canny_filename}\")\n return image_filename\n\nif __name__ == \"__main__\":\n\n # model = TextGuided_image2image(\"mps\")\n # text = \"rich detail, beautiful cloth\"\n # image = cv2.imread(\"../image/47d25199.png\")\n # model.inference(text, image)\n\n model = ControlText2Image(\"mps\")\n text = \"disco dancer with colorful lights\"\n # image = cv2.imread(\"../image/a20c4f74.png\")\n # image = np.array(image)\n model.inference(\"../image/650d66be.png\")\n\n # model = Dreambooth_Text2Image(\"mps\")\n # text = \"rich detail, beautiful cloth\"\n # image = cv2.imread(\"../image/47d25199.png\")\n # image = np.array(image)\n # model.inference(text, image)\n","repo_name":"AmpleFiled/Chat_Diffusers","sub_path":"image_generate/image_gen.py","file_name":"image_gen.py","file_ext":"py","file_size_in_byte":11726,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"3093222507","text":"import sys, io, fcntl\n\ndev = sys.argv[1]\ndither = int(sys.argv[2])\nbrightness = int(sys.argv[3])\nx = int(sys.argv[4])\ny = int(sys.argv[5])\nw = 128\nh = 64\n\n\nI2C_SLAVE=0x0703 # from i2c-dev.h\ni2caddr = 0x3c\n\nbus = io.open(dev, \"wb\", buffering=0)\nfcntl.ioctl(bus, I2C_SLAVE, i2caddr)\n\ndef ssd1306_cmd(cmd):\n bus.write(bytearray([0]+cmd))\n\ndef ssd1306_data(data):\n chunk = 256\n for i in range(0, len(data), chunk):\n bus.write(bytearray([0x40]+data[i:i+chunk]))\n\n\n# ssd1306 init\nssd1306_cmd([\n0xae, # Display off (reset)\n0xd5, # Set display clock div\n0x80, # Suggested ratio (reset: 0x80)\n0xa8, # Set mux ratio\n0x3f, # height-1 (reset: 63)\n0xd3, # Set display offset\n0x00, # No offset (reset: 0)\n0x40, # Set startline = 0 (0x40 | 0x00) (reset: 0x40)\n\n0x8d, # Charge pump - documented separately\n0x14, # charge pump enable\n\n0x20, # Memory mode\n0x00, # Horizontal addressing mode (reset: page address mode)\n0xa1, # Segment remap: col 127 is mapped to SEG0\n\n0xc8, # com scan direction remapped (reset: 0xc0)\n\n0xda, # Set com pins\n0x12, # reset: 0x12\n\n0x81, # Set contrast\nbrightness, # 0xcf for internal vcc (reset: 0x7f)\n\n0xd9, # Set precharge period\n0xf1, # for internal vcc (reset: 0x22)\n0xdb, # Set vcomh deselect level\n0x40, # undocumented (reset: 0x20)\n\n0xa4, # Resume entire display on (reset)\n0xa6, # Normal display (reset)\n0xaf]) # Display on\n\n\nfrom Xlib import display, X\nfrom PIL import Image\nimport numpy as np\n\nd = display.Display()\nroot = d.screen().root\n\nfrom pyxcursor import Xcursor\ncursor = Xcursor()\n\ncb = 10 # cursor boundary\n\ndef getFrameAsByteList():\n raw = root.get_image(x,y,w,h, X.ZPixmap, 0xffffffff)\n simg = Image.frombytes(\"RGB\", (w, h), raw.data, \"raw\", \"BGRX\").convert(\"1\", dither=dither)\n\n pointer = root.query_pointer()\n px, py = pointer.root_x, pointer.root_y\n if px >= x-cb and px<=x+w+cb and py >= y-cb and py < y+h+cb:\n iarray, xhot, yhot = cursor.getCursorImageArrayFast()\n cimg = Image.fromarray(iarray)\n simg.paste(cimg.convert(\"1\",dither=dither), (px-x-xhot,py-y-yhot), cimg) \n \n b = simg.rotate(-90,expand=True).tobytes()\n r = np.frombuffer(b, np.uint8).reshape(w,h//8)\n r = np.transpose(r)\n r = np.flip(r, 0)\n\n return list(r.tobytes())\n\n# There is no way to simply set the pointer, we have to reconfigure the draw area\n# There is an eight? byte cost to setting the draw area, plus setting it back afterwards\n\ndef setDrawArea(col,page):\n ssd1306_cmd([0x21, col, 127, 0x22, page, 7])\n\noldbuffer = getFrameAsByteList()\nsetDrawArea(0,0)\nssd1306_data(oldbuffer)\n\nimport time\n\ncost = 8\n\nwhile True:\n newbuffer = getFrameAsByteList()\n changed = list(map(lambda a,b: int(a!=b), newbuffer, oldbuffer))\n\n if sum(changed)==0:\n time.sleep(0.016)\n continue\n\n transactions = []\n i=0\n while i<1024:\n if changed[i]:\n # look for the next group of unchanged bytes larger than cost\n start = i\n while sum(changed[i:i+cost])!=0 and i<1024:\n i+=1\n end=i-1\n # at this point, we need to worry about if the transaction crosses a page boundary\n # if start_col != 0 and start_page != end_page, split transaction\n if start%128 != 0 and start//128 != end//128:\n split_point = ((start//128)+1)*128\n transactions.append((start, split_point-1))\n transactions.append((split_point, end))\n else:\n transactions.append((start, end))\n else:\n i+=1\n \n # future optimization: transactions on adjacent pages with similar start and end col can be combined\n\n for start, end in transactions:\n setDrawArea( start%128, start//128 )\n ssd1306_data( newbuffer[start:end+1] )\n\n oldbuffer = newbuffer\n\n\n","repo_name":"mitxela/ddc-oled","sub_path":"ssd1306.py","file_name":"ssd1306.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","stars":126,"dataset":"github-code","pt":"81"} +{"seq_id":"41212235948","text":"import cv2\nimport numpy as np\n\n# Create a video capture object\ncap = cv2.VideoCapture(1)\n\n# Set the frame width and height\nframewidth = 640\nframeheight = 480\n\n# Set the camera properties\ncap.set(3, framewidth)\ncap.set(4, frameheight)\n\n# Define a color dictionary to map RGB values to color names\ncolor_dict = {\n (200, 160, 180): \"Fresh\",\n (170, 170, 180): \"Still Fresh\",\n (100, 200, 210): \"Past Best\",\n}\n\n# Define a function to get the closest color in the color dictionary\ndef get_closest_color(color):\n # Calculate the distance between the color and each color in the dictionary\n distances = {np.linalg.norm(np.array(color) - np.array(k)): v for k, v in color_dict.items()}\n \n # Return the color name with the minimum distance\n return distances[min(distances)]\n\n# Initialize variables for circle detection\nprevcircle = None\ndist = lambda x1, y1, x2, y2: (x1 - x2) ** 2 + (y1 - y2) ** 2\n\n# Loop until the user presses 'q'\nwhile True:\n # Read a frame from the camera\n ret, frame = cap.read()\n\n # Check if the frame is valid\n if not ret:\n print(\"Failed to grab frame\")\n break\n\n # Convert the frame to grayscale\n grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Apply Gaussian blur to the grayscale frame\n blurFrame = cv2.GaussianBlur(grayFrame, (17, 17), 0)\n\n # Detect circles in the blurred frame\n circles = cv2.HoughCircles(blurFrame, cv2.HOUGH_GRADIENT, 1.2, 100,\n param1=100, param2=40, minRadius=20, maxRadius=150)\n\n # Check if circles are detected\n if circles is not None:\n circles = np.uint16(np.around(circles))\n chosen = None\n\n # Find the closest circle to the previous circle\n for i in circles[0, :]:\n if chosen is None:\n chosen = i\n if prevcircle is not None:\n if dist(chosen[0], chosen[1], prevcircle[0], prevcircle[1]) <= dist(i[0], i[1], prevcircle[0], prevcircle[1]):\n chosen = i\n\n # Update the previous circle\n prevcircle = chosen\n\n # Extract the region of interest (circle) from the frame\n x, y, radius = chosen\n roi = frame[y - radius: y + radius, x - radius: x + radius]\n\n # Get the average color in the circular region\n average_color = cv2.mean(roi)[:3]\n\n # Get the closest color name from the color dictionary\n color_name = get_closest_color(average_color)\n print(average_color)\n\n # Draw the detected circle and display the color name\n cv2.circle(frame, (x, y), radius, (255, 0, 255), 3)\n cv2.putText(frame, f\"Color: {color_name}\", (int(x - radius), int(y - radius - 10)), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 255), 2)\n # Show the frame in a window\n cv2.imshow(\"Webcam\", frame)\n\n # Wait for a key press\n key = cv2.waitKey(1)\n\n # If 'q' is pressed, exit the loop\n if key == ord('q'):\n break\n\n# Release the camera and destroy the window\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"Reza75ma/cam","sub_path":"sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24540471370","text":"from selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\n\n\ndef hover_and_right_click(browser: webdriver.Chrome, element):\n hover = ActionChains(browser).move_to_element(element).context_click(element)\n hover.perform()\n\n\nfrom typing import Callable\nfrom functools import wraps\nimport datetime\nimport uuid\n\nimport logging\n\nlogger = logging.getLogger()\n\n\ndef add_meta(website: str, time_fmt: str = \"%Y-%m-%dT%H:%M:%SZ\") -> Callable:\n def dec_outer(fn):\n @wraps(fn)\n def somedec_inner(*args, **kwargs):\n meta = {}\n meta[\"parsing_started\"] = datetime.datetime.now().strftime(time_fmt)\n try:\n result, stat = fn(*args, **kwargs)\n meta[\"failed\"] = False\n except Exception as e:\n meta[\"failed\"] = True\n meta[\"exception\"] = str(e)\n logger.exception(e)\n stat = None\n result = None\n\n meta[\"parsing_ended\"] = datetime.datetime.now().strftime(time_fmt)\n meta[\"stat\"] = stat\n meta[\"website\"] = website\n meta[\"parsing_id\"] = str(uuid.uuid4())\n logger.info(f\"Parsing id generated {meta['parsing_id']}\")\n if \"browser\" in kwargs:\n kwargs.pop(\"browser\")\n meta[\"func_args\"] = kwargs\n return result, meta\n\n return somedec_inner\n\n return dec_outer\n\n\nimport os\nimport json\n\n\ndef safe_mkdir(path: str):\n if not os.path.exists(path):\n os.makedirs(path)\n\n\ndef dump_json(data: list, path: str):\n with open(path, \"w\", encoding=\"utf-8\") as file:\n json.dump(data, file)\n\n\ndef load_json(path: str):\n with open(path, \"r\", encoding=\"utf-8\") as file:\n data = json.load(file)\n return data\n\n\ndef dump_result_and_meta(base_dir: str, result: str, meta: dict):\n parsing_id = meta[\"parsing_id\"]\n dirname = os.path.join(base_dir, parsing_id)\n safe_mkdir(dirname)\n dump_json(meta, os.path.join(dirname, \"meta.json\"))\n with open(os.path.join(dirname, \"content.html\"), \"w\", encoding=\"utf-8\") as file:\n file.write(result)\n\n\nimport boto3\nfrom typing import Union\n\n\ndef load_to_s3(data: Union[str, dict, list], Key, Bucket, is_json=False):\n boto_session = boto3.session.Session(\n aws_access_key_id=os.environ[\"AWS_ACCESS_KEY_ID\"],\n aws_secret_access_key=os.environ[\"AWS_SECRET_ACCESS_KEY\"],\n )\n\n s3 = boto_session.client(\n service_name=\"s3\",\n endpoint_url=os.environ[\"AWS_ENDPOITNT_URL\"],\n region_name=os.environ[\"AWS_REGION_NAME\"],\n )\n\n if isinstance(data, list) or isinstance(data, dict):\n if is_json:\n data = json.dumps(data)\n else:\n raise ValueError(\"You should explicitly specify is_json option\")\n elif isinstance(data, str):\n pass\n else:\n raise ValueError(\"data should be str, list or dict\")\n s3.put_object(Body=data, Bucket=Bucket, Key=Key)\n\n\ndef dump_result_and_meta_s3(Bucket: str, base_dir: str, result: str, meta: dict):\n parsing_id = meta[\"parsing_id\"]\n dirname = os.path.join(base_dir, parsing_id)\n load_to_s3(\n meta, Key=os.path.join(dirname, \"meta.json\"), Bucket=Bucket, is_json=True\n )\n if result is not None:\n load_to_s3(result, Key=os.path.join(dirname, \"content.html\"), Bucket=Bucket)\n else:\n logger.info(\"Null result, not dumping to s3\")\n\n\nfrom itertools import product\n\n\ndef create_grid(params: dict) -> map:\n return map(lambda x: dict(zip(params.keys(), x)), product(*params.values()))\n\n\nimport sys\nimport logging\n\n\ndef config_logger(logger, name: str, folder: str) -> None:\n handler = logging.StreamHandler(sys.stdout)\n\n log_folder = os.path.join(folder, f\"logs_{name}\")\n safe_mkdir(log_folder)\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n filename = os.path.join(log_folder, f\"log_{now}.log\")\n file_handler = logging.FileHandler(filename, mode=\"w\", encoding=\"utf-8\")\n\n formatter = logging.Formatter(\n \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n )\n handler.setFormatter(formatter)\n file_handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.addHandler(file_handler)\n logger.setLevel(logging.INFO)\n\n\nimport time\n\n\ndef infinite_scroll(browser, sleep_between_scroll, delta=0.25):\n logger.info(\"Scrolling\")\n old_height = 0\n delta = delta * browser.execute_script(\"return document.body.scrollHeight\")\n logger.info(f\"delta = {delta}\")\n second_chance = True\n while True:\n browser.execute_script(\n f\"window.scrollTo(0, document.body.scrollHeight - {delta});\"\n )\n time.sleep(sleep_between_scroll)\n browser.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n new_height = browser.execute_script(\"return document.body.scrollHeight\")\n logger.info(f\"New height is {new_height}\")\n time.sleep(sleep_between_scroll)\n if new_height == old_height:\n if second_chance:\n old_height = new_height\n browser.execute_script(\n f\"window.scrollTo(0, document.body.scrollHeight - {delta});\"\n )\n logger.info(\"Second chance\")\n second_chance = False\n time.sleep(6)\n else:\n logger.info(\"Scrolling finished!\")\n break\n else:\n old_height = new_height\n\n\ndef scroll_bottom(browser: webdriver.Chrome):\n browser.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n\ndef return_height(browser: webdriver.Chrome):\n return browser.execute_script(\"return document.body.scrollHeight\")\n","repo_name":"oldhroft/travel_scraper","sub_path":"travel_scraper/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5732,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72457104585","text":"# 基础解析模块\r\n# 改进方向: 一套统一的表层接口, 再由指定的解析器具体分发\r\n\r\nimport os\r\nimport numpy\r\n\r\nfrom parse.link import LinkExcel\r\nfrom parse.linkjc import LinkJCExcel\r\n\r\n\r\nclass DataPre(object):\r\n \r\n def __init__(self, parser):\r\n self.parser = eval(parser)\r\n \r\n def load_all_files(self, tar_path, ref_path):\r\n self.tar_path = tar_path\r\n self.ref_path = ref_path\r\n \r\n self.__get_filenames()\r\n self.__get_instances()\r\n \r\n def __get_filenames(self):\r\n ref_files = list(filename for filename in os.listdir(self.ref_path))\r\n tar_files = list(filename for filename in os.listdir(self.tar_path))\r\n \r\n self.files = {self.ref_path: ref_files, self.tar_path: tar_files}\r\n \r\n def __get_instances(self):\r\n ref_instances = list(self.parser(os.path.join(self.ref_path, filename)) for filename in self.files[self.ref_path])\r\n tar_instances = list(self.parser(os.path.join(self.tar_path, filename)) for filename in self.files[self.tar_path])\r\n \r\n self.instances = {self.ref_path: ref_instances, self.tar_path: tar_instances}\r\n \r\n def filter_by_shape(self, shape_save=None, remove=False, printable=False):\r\n '''\r\n 以指定的shape过滤ref_path、tar_path下的excel文件\r\n shape: (bands_num, Grays_num)\r\n remove: 是否直接删除不合要求文件\r\n '''\r\n if not hasattr(self, 'instances'):\r\n raise Exception('>>> filter before load <<<')\r\n if not hasattr(self, '__shapes'):\r\n self.__get_shapes()\r\n \r\n sorted_shapes = sorted(list(self.__shapes.items()), key=lambda item: len(item[1]))\r\n self.shape_save = sorted_shapes[-1][0] if shape_save is None else tuple(shape_save)\r\n \r\n tmp_files = {self.ref_path: list(), self.tar_path: list()}\r\n tmp_instances = {self.ref_path: list(), self.tar_path: list()}\r\n \r\n for shape_curr, files_loc in sorted_shapes:\r\n if shape_curr != self.shape_save:\r\n for path, idx in files_loc:\r\n if remove:\r\n if printable:\r\n print('remove: {} --- {}'.format(self.files[path][idx], shape_curr))\r\n os.remove(os.path.join(path, self.files[path][idx]))\r\n else:\r\n if printable:\r\n print('noNeed: {} --- {}'.format(self.files[path][idx], shape_curr))\r\n tmp_files[path].append(self.files[path][idx])\r\n tmp_instances[path].append(self.instances[path][idx])\r\n else:\r\n if printable:\r\n for path, idx in files_loc:\r\n print('remain: {} --- {}'.format(self.files[path][idx], shape_curr))\r\n \r\n for path, files in tmp_files.items():\r\n for file in files:\r\n self.files[path].remove(file)\r\n \r\n for path, instances in tmp_instances.items():\r\n for instance in instances:\r\n self.instances[path].remove(instance)\r\n \r\n def __get_shapes(self):\r\n self.__shapes = dict()\r\n \r\n for path, instances in self.instances.items():\r\n for idx, ins in enumerate(instances):\r\n \r\n shape = ins.shape\r\n \r\n if shape not in self.__shapes:\r\n self.__shapes[shape] = [(path, idx)]\r\n else:\r\n self.__shapes[shape].append((path, idx))\r\n \r\n def get_D_and_A(self, tar_idx=0, ref_idx_list=None):\r\n '''\r\n 获取所有绑点的D矢量与A矩阵\r\n index_tar: 目标屏编号\r\n index_ref_list: 参考屏编号列表\r\n return: {(Gray, Lmax): (D, A)}\r\n '''\r\n if not hasattr(self, 'shape_save'):\r\n self.filter_by_shape()\r\n if ref_idx_list is None:\r\n ref_idx_list = range(len(self.instances[self.ref_path]))\r\n \r\n self.__get_D_and_A(tar_idx, ref_idx_list)\r\n \r\n return self.init_D_and_A, self.stop_D_and_A\r\n \r\n def __get_D_and_A(self, tar_idx, ref_idx_list):\r\n self.init_D_and_A = dict()\r\n self.stop_D_and_A = dict()\r\n \r\n self.tar_ins = self.instances[self.tar_path][tar_idx] # 当前目标屏实例 -> 索引顺序参考\r\n self.ref_ins_list = list(self.instances[self.ref_path][ref_idx] for ref_idx in ref_idx_list) # 当前参考屏实例列表\r\n \r\n for GrayLmax, stop_rgb in self.tar_ins.get_attrs()['stop_GrayLmax2rgb'].items():\r\n \r\n D_init = numpy.array(self.tar_ins.get_attrs()['init_GrayLmax2rgb'][GrayLmax]).reshape(-1, 1)\r\n D_stop = numpy.array(stop_rgb).reshape(-1, 1)\r\n \r\n A_init, A_stop = self.__extract_A(GrayLmax)\r\n \r\n self.init_D_and_A[GrayLmax] = (D_init, A_init)\r\n self.stop_D_and_A[GrayLmax] = (D_stop, A_stop)\r\n \r\n def __extract_A(self, GrayLmax):\r\n A_init, A_stop = list(), list()\r\n \r\n for ref_ins in self.ref_ins_list:\r\n A_init.append(ref_ins.get_attrs()['init_GrayLmax2rgb'][GrayLmax])\r\n A_stop.append(ref_ins.get_attrs()['stop_GrayLmax2rgb'][GrayLmax])\r\n \r\n return numpy.array(A_init).T, numpy.array(A_stop).T\r\n \r\n \r\n \r\nif __name__ == '__main__':\r\n obj = DataPre('LinkJCExcel')\r\n obj.load_all_files('screens_tar', 'screens_ref')\r\n # print(obj.instances)\r\n # obj.filter_by_shape()\r\n # print(obj.instances)\r\n init, stop = obj.get_D_and_A()\r\n print(stop)","repo_name":"xxhbdk/jojo","sub_path":"parse/gamma_parse.py","file_name":"gamma_parse.py","file_ext":"py","file_size_in_byte":5809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14391125496","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nfrom matplotlib.widgets import Slider\nimport time\n\nimport matplotlib.animation as anim\nfrom rooms import *\n\n\n\nclass scenario:\n def __init__(self, infection_rate = 0.2, number_infected = 1, deathrate = 0.1, max_infected_time = 7):\n self.number_of_rooms = 5\n self.rooms = []\n self.opt_arangement = 0,0\n self.shape = (20,30)\n self.members = 100\n self.infection_rate = infection_rate\n self.number_infected = number_infected\n self.deathrate = deathrate\n self.max_infected_time = max_infected_time\n def find_opt_arangement(self):\n a, b = fig.get_size_inches()\n return find_opt_arangement(self.number_of_rooms, ratio=(self.shape[1] * a / 2) / (self.shape[0] * b))\n def create_rooms(self):\n i,j = self.find_opt_arangement()\n for l in range(self.number_of_rooms):\n newroom = room(self.number_infected, self.shape)\n newroom.show_on_fig(fig, i, 2 * j, (1 + (l // j)) * j + l + 1)\n newroom.compute_scale(fig)\n the_infected = random.sample(range(self.members), self.number_infected)\n for m in range(self.members):\n if m in the_infected:\n person = Person(newroom, infected = True)\n else:\n person = Person(newroom)\n person.register()\n self.rooms.append(newroom)\n def draw_all_rooms(self):\n for thisroom in self.rooms:\n thisroom.draw()\n def update_scatters(self):\n for room in self.rooms:\n room.clear_room()\n room.compute_scale(fig)\n for person in room.persons:\n person.position = person.new_random_pos()\n #person.wiggle()\n person.register()\n room.draw()\n #fig.canvas.restore_region(room.axbackground)\n #room.ax.draw_artist(room.scatter)\n #room.ax.draw_artist(room.scatter2)\n #fig.canvas.blit(room.ax.bbox)\n\n def calculate_infected(self):\n for space in self.rooms:\n for prsn in space.persons:\n if prsn.status == \"i\":\n for otherprsn in space.persons:\n if otherprsn.status != \"i\" and prsn is not otherprsn:\n distance = np.linalg.norm(np.array(prsn.position) - np.array(otherprsn.position))\n if distance <= prsn.radius or distance <= otherprsn.radius:\n if random.random() <= self.infection_rate:\n otherprsn.status = \"i\"\n otherprsn.infected_days = 0\n\n def calculate_death(self):\n for space in self.rooms:\n for prsn in space.persons:\n if prsn.status == \"i\":\n prsn.infected_days += 1\n if prsn.infected_days >= self.max_infected_time:\n if random.random() <= self.deathrate:\n prsn.status = \"d\"\n else:\n prsn.status = \"c\"\n\n\n\n def time_step(self):\n self.calculate_infected()\n self.calculate_death()\n\n\n\n\nplt.ion()\nfig = plt.figure(figsize=(11,4))\nfig.patch.set_facecolor('#123456')\nfig.patch.set_alpha(0.7)\n\nk = 9\nnewscenario = scenario()\nnewscenario.create_rooms()\nnewscenario.draw_all_rooms()\n\n\nt = time.time()\ncounter = 0\n\nfig.canvas.draw()\nwhile(True):\n '''if time.time()-t < 1:\n counter += 1\n else:\n print(\"fps: \", counter)\n #break'''\n\n #plt.waitforbuttonpress() #auskommentieren falls nicht erwünscht\n #insert some (positions/infections/ etc)- update function here\n newscenario.time_step()\n newscenario.update_scatters()\n fig.canvas.draw()\n fig.canvas.flush_events()\n\n\n\n\n\n","repo_name":"P4r4b0x/Corona-Simulator","sub_path":"corona-Simulator.py","file_name":"corona-Simulator.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14325384758","text":"# from distutils import cmd\nfrom functools import cmp_to_key\n\n\n# while True:\n# response = input(\"Enter a number:\")\n# if int(response) % 7 == 0:\n# break\n\ndef custom_comparator(word1, word2):\n if word1 == word2:\n return 0\n\n p = 0\n \n while p <= len(word1) - 1 and p <= len(word2) - 1:\n diff = ord(word1[p]) - ord(word2[p]) \n if diff != 0:\n return diff\n \n p += 1\n \n return -1 if p == len(word1) else 1\n\narr = [\"bbb\", \"aaa\", \"abb\", \"abbbb\"]\n\nanswer = sorted(arr, key=cmp_to_key(custom_comparator))\nprint(answer)","repo_name":"InderdeepSync/art_of_programming","sub_path":"scratch.py","file_name":"scratch.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"16215257704","text":"\nfrom tkinter import *\n\nventana= Tk()\n\n#Estamos indicando que vamos a almacenar numeros enteros, contrario de 'StringVar()':\nvarOpcion= IntVar()\n\n\"\"\"Imprime por pantalla la opción seleccionada.\n\n\"\"\"\ndef imprimir_valor():\n\n #Preguntando si la opción seleccionada corresponde a masculino:\n if(varOpcion.get()==1):\n\n informacion.config(text=\"Masculino\")\n else:\n\n informacion.config(text='Femenino')\n\n#Un simple titulo informativo\ntitulo_informativo= Label(ventana,text=\"Seleccione una opción\").pack()\n\n#El Radiobutton me permite seleccionar una Única opcion entre varias disponibles, mediante 'value' es posible gestionar lo que se selecciona:\nRadiobutton(ventana, text=\"Masculino\", variable=varOpcion, value=1, command=imprimir_valor).pack()\n\nRadiobutton(ventana, text=\"Femenino\", variable=varOpcion, value=2, command=imprimir_valor).pack()\n\ninformacion= Label(ventana)\ninformacion.pack()\n\nventana.mainloop()","repo_name":"duran221/curso_python","sub_path":"50.InterfazGrafica (Radiobutton)/interfaces.py","file_name":"interfaces.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40837061735","text":"import dh\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom dh.dhsim import load_img, ft_phase_screen, ang_spec_multi_prop\nfrom dh.dhdefs import ift2, circ, cmagsq, ft2, cnormdb, forward, normdb\nfrom skimage.restoration import unwrap_phase\n\nfrom demo.demo_utils import new_ang_spec_multi_prop\n\n# pixel diameter of the object. Determines resolution\ndiam = 500\n\n# circle of 100s\nxx, yy = np.mgrid[:diam, :diam]\nImg = np.array(circ(xx - diam / 2, yy - diam / 2, diam)) * 10\n\n# AFRL sample image\n# Img=load_img(None, diam)+1\n\n\n# Arrays of 100s\n# Img=np.ones((diam,diam))*100\n\n# Amount of grid zero padding (not necessary w/o DH)\npadval = 0\n\n# make mask for unwrapper\nimage_mask = (np.pad(Img, padval) == 0)\n\n# real image\nImg = np.pad(Img, padval)\n\n# determining total image size\nN, _ = np.shape(Img)\nprint(N)\n\n# Field of view of our image in microns\nL = N * 20000 / diam # Ensuring that centered image is 20 mm in diameter to match the paper\n\n# 20000 microns= 20 mm = 2 centimeters\n\nconvert = 1e-6 # multiply by to convert microns to meters\n\ndx = L / N # microns per grid point\nx = np.linspace(-L / 2, L / 2, num=N, endpoint=False) # np.arange(-L / 2, L / 2, dx)\nfx = np.linspace(-1 / (2 * dx), 1 / (2 * dx), num=N, endpoint=False) # np.arange(-1 / (2 * dx), 1 / (2 * dx), 1 / L)\n\n[FX, FY] = np.meshgrid(fx, fx)\n\n# wavelength (microns)\nlam = 0.78\n\n# distance of propagation in microns (26 cm to match article roughly)\nz = 260000 * 1\n\n# wavenumber\nk = 2 * np.pi / lam\nkz = np.sqrt(k ** 2 - (2 * np.pi * FX) ** 2 - (2 * np.pi * FY) ** 2)\n\n# -- Angular spectrum propagation with num_phz_screen phase screen -- #\nnp.random.seed(2)\n# Make Phase screens (ro is 7000 microns (7 mm) to match article)\nA = ft_phase_screen(r0=7000 * convert, N=N, delta=dx * convert)\n\nB = ft_phase_screen(r0=7000 * convert, N=N, delta=dx * convert)\n\nC = ft_phase_screen(r0=7000 * convert, N=N, delta=dx * convert)\n\n# all phase screens together\nA_B_C = np.concatenate((A[np.newaxis,:,:], B[np.newaxis, :,:], C[np.newaxis, :,:]),axis=0)\n\n# propagate with each phase separately\nimgA, _, _ = new_ang_spec_multi_prop(Img, lam * convert, dx * convert, dx * convert,\n np.array([0, z * convert / 4, z * convert]), np.exp(1j * A))\n\nimgB, _, _ = new_ang_spec_multi_prop(Img, lam * convert, dx * convert, dx * convert,\n np.array([0, z * convert / 2, z * convert]), np.exp(1j * B))\n\nimgC, _, _ = new_ang_spec_multi_prop(Img, lam * convert, dx * convert, dx * convert,\n np.array([0, z * convert * 3 / 4, z * convert]), np.exp(1j * C))\n\n# propagated with all plates present\nimgABC, _, _ = new_ang_spec_multi_prop(Img, lam * convert, dx * convert, dx * convert,\n np.array(\n [0, z * convert / 4, z * convert / 2, z * convert * 3 / 4, z * convert]),\n np.exp(1j * A_B_C))\n\n# collect phase from all images\nwrapped_phzA = np.angle(imgA)\nunwrapped_phzA = unwrap_phase(np.ma.array(wrapped_phzA, mask=image_mask))\nunwrapped_phzA -= np.mean(unwrapped_phzA)\n\nwrapped_phzB = np.angle(imgB)\nunwrapped_phzB = unwrap_phase(np.ma.array(wrapped_phzB, mask=image_mask))\nunwrapped_phzB -= np.mean(unwrapped_phzB)\n\nwrapped_phzC = np.angle(imgC)\nunwrapped_phzC = unwrap_phase(np.ma.array(wrapped_phzC, mask=image_mask))\nunwrapped_phzC -= np.mean(unwrapped_phzC)\n\nwrapped_phzABC = np.angle(imgABC)\nunwrapped_phzABC = unwrap_phase(np.ma.array(wrapped_phzABC, mask=image_mask))\nunwrapped_phzABC -= np.mean(unwrapped_phzABC)\n\n\n\ncmap = 'jet'\n\nplt.figure()\nplt.imshow(unwrapped_phzA, cmap=cmap, extent=[0, L * 1e-4, 0, L * 1e-4])\nplt.title(\"Unwrapped phase w/ screen A @ 1/4 distance (cm)\")\nplt.colorbar()\n\nplt.figure()\nplt.imshow(A, cmap=cmap, extent=[0, L * 1e-4, 0, L * 1e-4])\nplt.title(\"Screen A prescription (cm)\")\nplt.colorbar()\n\nplt.figure()\nplt.imshow(unwrapped_phzB, cmap=cmap, extent=[0, L * 1e-4, 0, L * 1e-4])\nplt.title(\"Unwrapped phase w/ screen B @ 2/4 distance (cm)\")\nplt.colorbar()\n\nplt.figure()\nplt.imshow(B, cmap=cmap, extent=[0, L * 1e-4, 0, L * 1e-4])\nplt.title(\"Screen B prescription (cm)\")\nplt.colorbar()\n\n\nplt.figure()\nplt.imshow(unwrapped_phzC, cmap=cmap, extent=[0, L * 1e-4, 0, L * 1e-4])\nplt.title(\"Unwrapped phase w/ screen C @ 3/4 distance (cm)\")\nplt.colorbar()\n\nplt.figure()\nplt.imshow(C, cmap=cmap, extent=[0, L * 1e-4, 0, L * 1e-4])\nplt.title(\"Screen C prescription (cm)\")\nplt.colorbar()\n\nplt.figure()\nplt.imshow(unwrapped_phzA + unwrapped_phzB + unwrapped_phzC, cmap=cmap, extent=[0, L * 1e-4, 0, L * 1e-4])\nplt.title(\"Sum of unwrapped phases (cm)\")\nplt.colorbar()\n\nplt.figure()\nplt.imshow(unwrapped_phzABC, cmap=cmap, extent=[0, L * 1e-4, 0, L * 1e-4])\nplt.title(\"Unwrapped phase w/ all screens present (cm)\")\nplt.colorbar()\n\nplt.figure()\nplt.imshow(unwrapped_phzABC-(unwrapped_phzA + unwrapped_phzB + unwrapped_phzC), cmap=cmap, extent=[0, L * 1e-4, 0, L * 1e-4])\nplt.title(\"all screens - sum of individual (cm)\")\nplt.colorbar()\n\nplt.figure()\nplt.imshow(A+B+C, cmap=cmap, extent=[0, L * 1e-4, 0, L * 1e-4])\nplt.title(\"Sum of all screen prescriptions (cm)\")\nplt.colorbar()\n\nplt.show()\n\n\n","repo_name":"cabouman/wind_tomo","sub_path":"demo/phz_distortion_additivity.py","file_name":"phz_distortion_additivity.py","file_ext":"py","file_size_in_byte":5186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32362560887","text":"#\n# @lc app=leetcode id=88 lang=python3\n#\n# [88] Merge Sorted Array\n#\n# https://leetcode.com/problems/merge-sorted-array/description/\n#\n# algorithms\n# Easy (34.57%)\n# Total Accepted: 336K\n# Total Submissions: 960K\n# Testcase Example: '[1,2,3,0,0,0]\\n3\\n[2,5,6]\\n3'\n#\n# Given two sorted integer arrays nums1 and nums2, merge nums2 into nums1 as\n# one sorted array.\n#\n# Note:\n#\n#\n# The number of elements initialized in nums1 and nums2 are m and n\n# respectively.\n# You may assume that nums1 has enough space (size that is greater or equal to\n# m + n) to hold additional elements from nums2.\n#\n#\n# Example:\n#\n#\n# Input:\n# nums1 = [1,2,3,0,0,0], m = 3\n# nums2 = [2,5,6], n = 3\n#\n# Output: [1,2,2,3,5,6]\n#\n#\n#\nfrom typing import List\n\n\nclass Solution:\n def merge(self, a: List[int], m: int, b: List[int], n: int) -> None:\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n w = len(a) - 1\n l, r = m-1, n-1\n\n while l >= 0 and r >= 0:\n if a[l] >= b[r]:\n a[w] = a[l]\n l -= 1\n else:\n a[w] = b[r]\n r -= 1\n w -= 1\n while r >= 0:\n a[w] = b[r]\n r -= 1\n w -= 1\n while l >= 0:\n a[w] = a[l]\n l -= 1\n w -= 1\n\n # return a\n\n\ndef assert_eq(actual, expected):\n if actual != expected:\n raise AssertionError('expected: %s, actual: %s' % (expected, actual))\n\n\ndef test(input_, output):\n assert_eq(Solution().merge(*input_), output)\n\n\n# if __name__ == '__main__':\n# test(([1, 2, 3, 0, 0, 0], 3, [2, 5, 6], 3), [1, 2, 2, 3, 5, 6])\n","repo_name":"balta2ar/scratchpad","sub_path":"myleetcode/088_merge_sorted_array/88.merge-sorted-array.py","file_name":"88.merge-sorted-array.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"22949976679","text":"import numpy as np\n\n\nclass FrameFactory:\n\n @staticmethod\n def get_image(robot_bodies, sensor_lines, simulation_map, collis_ind=None):\n local_map = np.multiply(simulation_map.plane.astype(int), 255)\n local_map = np.dstack((local_map, local_map, local_map))\n for r in range(0, robot_bodies.shape[1]):\n # draw robot body\n for b in range(0, robot_bodies.shape[0]):\n if 0 <= robot_bodies[b, r, 0] < simulation_map.plane.shape[0] and 0 <= robot_bodies[b, r, 1] < \\\n simulation_map.plane.shape[1]:\n if collis_ind[0, r] == 1.0:\n local_map[robot_bodies[b, r, 0], robot_bodies[b, r, 1], 0] = 255\n local_map[robot_bodies[b, r, 0], robot_bodies[b, r, 1], 1] = 0\n local_map[robot_bodies[b, r, 0], robot_bodies[b, r, 1], 2] = 0\n else:\n local_map[robot_bodies[b, r, 0], robot_bodies[b, r, 1], 0] = r + 2\n local_map[robot_bodies[b, r, 0], robot_bodies[b, r, 1], 1] = r + 2\n local_map[robot_bodies[b, r, 0], robot_bodies[b, r, 1], 2] = r + 2\n\n # draw robot sensors\n for s in range(0, len(sensor_lines[r])):\n for lx, ly in sensor_lines[r][s]:\n if 0 <= lx < simulation_map.plane.shape[0] and 0 <= ly < simulation_map.plane.shape[1]:\n local_map[lx, ly] = r + 2\n\n # start point as blue\n local_map[simulation_map.start_point[0, 0, 0], simulation_map.start_point[0, 0, 1], 0] = 0\n local_map[simulation_map.start_point[0, 0, 0], simulation_map.start_point[0, 0, 1], 1] = 0\n local_map[simulation_map.start_point[0, 0, 0], simulation_map.start_point[0, 0, 1], 2] = 255\n\n # end point as green\n local_map[simulation_map.end_point[0, 0, 0], simulation_map.end_point[0, 0, 1], 0] = 0\n local_map[simulation_map.end_point[0, 0, 0], simulation_map.end_point[0, 0, 1], 1] = 255\n local_map[simulation_map.end_point[0, 0, 0], simulation_map.end_point[0, 0, 1], 2] = 0\n\n return local_map\n\n @staticmethod\n def get_image_zxy(robot_bodies, sensor_lines, simulation_map):\n xyz = FrameFactory.get_image(robot_bodies, sensor_lines, simulation_map)\n return FrameFactory.to_zxy(xyz)\n\n @staticmethod\n def to_zxy(xyz):\n xzy = np.swapaxes(xyz, 1, 2)\n zxy = np.swapaxes(xzy, 0, 1)\n return zxy\n","repo_name":"michalmlaticek/navigation_neat","sub_path":"src/evolution/FrameFactory.py","file_name":"FrameFactory.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37878935895","text":"from django.shortcuts import redirect ,render\nfrom .models import Event\nfrom django.core.paginator import Paginator\nfrom .forms import ReservationForm , EventForm\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import login_required\nfrom .filters import EventFilter\nfrom datetime import datetime\n\ndef event_weekly(request):\n today = datetime.now().date()\n event_weekly= Event.objects.filter(eve_date_start__gte=today).order_by('eve_date_start')\n paginator = Paginator(event_weekly, 5) \n page_number = request.GET.get(\"page\")\n page_obj = paginator.get_page(page_number)\n\n context={\"event_weekly\":page_obj}\n return render(request, \"event/event_weekly.html\", context)\n\ndef events_list(request):\n events_list = Event.objects.order_by(\"eve_date_start\")\n my_filter = EventFilter(request.GET,queryset=events_list)\n events_list = my_filter.qs\n paginator = Paginator(events_list, 5) \n page_number = request.GET.get(\"page\")\n page_obj = paginator.get_page(page_number)\n\n context={\"events_list\":page_obj,\"my_filter\":my_filter}\n return render(request, \"event/events_list.html\", context)\n\ndef event_detail(request,eve_slug):\n event_detail = Event.objects.get(eve_slug=eve_slug)\n if request.method == 'POST':\n form = ReservationForm(request.POST)\n if form.is_valid():\n myform = form.save(commit=False)\n myform.res_event = event_detail\n myform.save()\n form = ReservationForm()\n context = {'event_detail':event_detail,'form':form}\n return render(request,'event/event_detail.html',context)\n else:\n form = ReservationForm()\n context = {'event_detail':event_detail,'form':form}\n return render(request,'event/event_detail.html',context)\n\n@login_required\ndef add_event(request):\n if request.method == 'POST':\n form = EventForm(request.POST ,request.FILES)\n if form.is_valid():\n myform=form.save(commit=False)\n myform.eve_owner = request.user\n myform.save()\n\n return redirect(reverse('event:events_list'))\n else:\n form = EventForm()\n return render(request,'event/add_event.html',{'form':form})","repo_name":"KinanWehba/partea","sub_path":"event/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12745988745","text":"nums1 = [1,2,3,0,0,0]\r\nm = 3\r\nnums2 = [2,5,6]\r\nn = 3\r\n\r\nclass Sorted():\r\n def mergeSorted(self, nums1, nums2):\r\n nums3 = (nums1 + nums2)\r\n a = len(nums3)\r\n for i in range(a - 1):\r\n for j in range (i + 1, a):\r\n if nums3[i] > nums3[j]:\r\n nums3[i], nums3[j] = nums3[j], nums3[i]\r\n return nums3\r\n\r\nz = Sorted()\r\nprint (z.mergeSorted(nums1, nums2))\r\n\r\nclass Sorted1():\r\n def mergeSorted1(self, nums1, m, nums2, n):\r\n nums1_copy = nums1[:m]\r\n nums1 = []\r\n p1 = 0\r\n p2 = 0\r\n while p1 < m and p2 < n:\r\n if nums1_copy[p1] < nums2[p2]:\r\n nums1.append(nums1_copy[p1])\r\n p1 += 1\r\n else:\r\n nums1.append(nums2[p2])\r\n p2 += 1\r\n if p1 < m:\r\n nums1[p1 + p2:] = nums1_copy[p1:]\r\n if p2 < n:\r\n nums1[p1 + p2:] = nums2[p2:]\r\n return nums1\r\n\r\nz = Sorted1()\r\nprint (z.mergeSorted1(nums1, m, nums2, n))","repo_name":"derahul9/Python","sub_path":"Programming Interview/Programs/Easy/Merge Sorted Array.py","file_name":"Merge Sorted Array.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13527231996","text":"from datetime import datetime\n\nimport pytz\nfrom dateutil.tz import tzutc\n\nfrom app import db\n\n\nclass User(db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(256), nullable=False)\n father = db.Column(db.String(256), nullable=False)\n timestamp = db.Column(db.DateTime, server_default=db.func.now(),)\n\n def __init__(self, name,father):\n self.name = name\n self.father=father\n self.timestamp = datetime.utcnow()\n\n\n def serialize(self):\n return {\n 'id': str(self.id),\n 'name': self.name,\n 'created_at': str(self.timestamp),\n\n }\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n","repo_name":"MohamadAbdUlaziz938/flask-my_sql","sub_path":"app/models/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"11938235548","text":"# for i in range(4):\n# print(\"# \"*4)\nfor j in range(4):\n for i in range (4):\n print(\"# \",end=\" \")\n print() ### must be to print new line when finishing from the nested loop \nprint(\"^\"*20)\n\nfor i in range(4):\n for j in range(4-i):\n print(\"*\", end=\" \")\n print()\nprint(\"^\"*20)\nfor i in range(4):\n for j in range(i+1):\n print(\"%\", end =\" \")\n print()\n\nprint(\"^\"*20)\nfor i in range(4):\n for j in range(i,4):\n print(\"%\", end =\" \")\n print()\n\nfor j in range(1,6):\n for i in range(1, 7 - j):\n print(f\"{i}\",end =\" \")\n print()\n\nprint(list(range(21)))\nsum20 = 0\nfor i in list(range(21)) :\n if i % 2==0:\n sum20+=i\n\nprint(sum20)\n\ndice_result = [5,6,4,4,2,5,4,4,5,3,3,2,6,1,2,1,1,6,5]\nn6=0\nn1 = 0\nj=0\nfor i in dice_result :\n if i == 6 :\n n6+=1\n elif i==1 :\n n1+=1\n\n\nelse :\n print(\"it doesn't appear\")\nprint(f\" 6 appeard {n6} times \")\nprint(f\" 1 appeard {n1} times \")\nprint(len(dice_result))\nfours = 0\nle = len(dice_result)\nfor i in range(le):\n if dice_result[i] == 4 and dice_result[i + 1] == 4:\n fours += 1\n\nprint(f\"4s came two times in a row {fours} times\")\n\n# two_6s = 0\n# l = len(dice_result)\n#\n# for i in range(l - 1):\n# if dice_result[i] == 4 and dice_result[i + 1] == 4:\n# two_6s += 1\n# print(two_6s)\n\nprint(\"^\"* 30 )\n\nfor i in range(60):\n i+=10\n if i == 50 :\n print(\"Congrats yoy have completed the challange\")\n break\n elif i % 10 == 0 :\n answer = input(\"Are you tired ? Yes/NO ? \").lower().strip()\n if answer == \"yes\":\n print(f\"You did {i} push ups \")\n break\n else :\n print(f\"You completed {i+10} push up \")\n\n","repo_name":"Motasem-mk/code-basics-python","sub_path":"for exercises.py","file_name":"for exercises.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21883676648","text":"import os, random, string\nfrom test_framework.util import findBitcoind, expectException, JSONRPCException\nimport test_framework.loginit\nfrom test_framework.test_framework import ComparisonTestFramework\nfrom test_framework.util import waitFor, satoshi_round, assert_equal, assert_raises_rpc_error, start_node, standardFlags\nfrom test_framework.comptool import TestManager, TestInstance, RejectResult\nfrom test_framework.blocktools import *\nfrom test_framework.script import *\n\n# far into the future\nNOV152018_START_TIME = 2000000000\n\n# Error due to invalid opcodes\nBAD_OPCODE_ERROR = b'mandatory-script-verify-flag-failed (Opcode missing or not understood)'\nRPC_BAD_OPCODE_ERROR = \"16: \" + \\\n BAD_OPCODE_ERROR.decode(\"utf-8\")\n\n\nclass PreviousSpendableOutput():\n def __init__(self, tx=CTransaction(), n=-1):\n self.tx = tx\n self.n = n # the output we're spending\n\n\nclass CheckDataSigActivationTest(ComparisonTestFramework):\n def __init__(self):\n super().__init__()\n self.num_nodes = 1\n self.setup_clean_chain = True\n\n def setup_network(self):\n self.nodes = []\n self.nodes.append(start_node(0, self.options.tmpdir,\n ['-debug', '-whitelist=127.0.0.1',\n \"-consensus.forkNov2018Time=%d\" % NOV152018_START_TIME]))\n self.is_network_split = False\n\n def create_checkdatasig_tx(self, count):\n node = self.nodes[0]\n utxos = node.listunspent()\n assert(len(utxos) > 0)\n utxo = utxos[0]\n tx = CTransaction()\n value = int(satoshi_round(utxo[\"amount\"]) * COIN) // count\n tx.vin = [CTxIn(COutPoint(int(utxo[\"txid\"], 16), utxo[\"vout\"]))]\n tx.vout = []\n\n # test using the signdata rpc\n addr = node.getnewaddress()\n msg = \"Joo Janta 200 Super-Chromatic Peril Sensitive Sunglasses\"\n message = bytearray(msg.encode())\n rpc = node.signdata(addr, \"string\", msg,\"verbose\")\n\n signature = bytearray.fromhex(rpc[\"signature\"])\n pubkey = bytearray.fromhex(rpc[\"pubkey\"])\n\n # test with hard-coded values\n #signature = bytearray.fromhex(\n # '30440220256c12175e809381f97637933ed6ab97737d263eaaebca6add21bced67fd12a402205ce29ecc1369d6fc1b51977ed38faaf41119e3be1d7edfafd7cfaf0b6061bd07')\n #pubkey = bytearray.fromhex(\n # '038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508')\n\n for _ in range(count):\n tx.vout.append(CTxOut(value, CScript(\n [signature, message, pubkey, OP_CHECKDATASIG])))\n tx.vout[0].nValue -= min(value/100, 1000) #node.calculate_fee(tx.toHex())\n tx_signed = node.signrawtransaction(tx.toHex())[\"hex\"]\n return tx_signed\n\n def testDatasigRpc(self):\n node = self.nodes[0]\n # bad address\n expectException(lambda: node.signdata(\"bad\",\"string\", \"foo\"), JSONRPCException)\n # address I can't sign\n expectException(lambda: node.signdata('bchreg:qq0ndugr327fwxucntduem4t3jvvmjtdevmdry0lqc',\"string\", \"foo\"), JSONRPCException)\n addr = node.getnewaddress()\n # bad message format\n expectException(lambda: node.signdata(addr,\"bad\", \"foo\"), JSONRPCException)\n expectException(lambda: node.signdata(addr,\"hex\", \"zzbad\"), JSONRPCException)\n expectException(lambda: node.signdata(addr,\"hash\", \"ba0d\"), JSONRPCException) # its hex but wrong length\n expectException(lambda: node.signdata(addr,\"hash\", \"z\"*32), JSONRPCException) # not hex correct length\n\n # check same sig for same input of different format (works because using rfc6979 deterministic sigs)\n s = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(20))\n sig0 = node.signdata(addr,\"string\", s)\n sig1 = node.signdata(addr,\"hex\", hexlify(s.encode()).decode())\n sig2 = node.signdata(addr,\"hash\", hexlify(sha256(s.encode())).decode())\n assert_equal(sig0, sig1)\n assert_equal(sig0, sig2)\n\n # good signatures\n node.importprivatekeys(\"no-rescan\", 'cU4WAhpniFvwT8Z13MjNyE1tkzp8n7wDPwwe8WzqqBAejZXq948J')\n sig = node.signdata('bchreg:qq3srvg7hrzf9wu5h33du5l7n3fpx7jw5gdhhk390u',\"string\", \"foo\")\n assert_equal(sig, '3045022100C548DFF1354117D260B03E167BD25B5F79F6AB17219CAF5FC9CA1C94F0B9BB3C022049D9521CC04A0AD4DE42F81A1623D1F179A47B21B814AB7FBC2FF192AF023ECF')\n sig2 = node.signdata('bchreg:qq3srvg7hrzf9wu5h33du5l7n3fpx7jw5gdhhk390u',\"hex\", \"0102030405060708090a\")\n assert_equal(sig2, '3044022061E179B30A3B6ACD31F21A77ACB162FBB998473ACE4AAA56CC808FFD2415963B0220793AF5402AD9534D2D5F26248421499520E6B571C4BE31B90CBBFF602C0A7FDC')\n\n # If I use the \"hash\" format and calculate the sha256 myself, it ought to create the same signature as \"signdata\"\n assert_equal(sig, node.signdata('bchreg:qq3srvg7hrzf9wu5h33du5l7n3fpx7jw5gdhhk390u',\"hash\", hexlify(sha256(b\"foo\")).decode()))\n\n\n def run_test(self):\n self.testDatasigRpc()\n self.test = TestManager(self, self.options.tmpdir)\n self.test.add_all_connections(self.nodes)\n # Start up network handling in another thread\n NetworkThread().start()\n self.test.run()\n\n def get_tests(self):\n node = self.nodes[0]\n\n # First, we generate some coins to spend.\n node.generate(125)\n\n # Create various outputs using the OP_CHECKDATASIG\n # to check for activation.\n tx_hex = self.create_checkdatasig_tx(25)\n txid = node.sendrawtransaction(tx_hex)\n assert(txid in set(node.getrawmempool()))\n\n node.generate(1)\n assert(txid not in set(node.getrawmempool()))\n\n # register the spendable outputs.\n tx = FromHex(CTransaction(), tx_hex)\n tx.rehash()\n spendable_checkdatasigs = [PreviousSpendableOutput(tx, i)\n for i in range(len(tx.vout))]\n\n def spend_checkdatasig():\n outpoint = spendable_checkdatasigs.pop()\n out = outpoint.tx.vout[outpoint.n]\n tx = CTransaction()\n tx.vin = [CTxIn(COutPoint(outpoint.tx.sha256, outpoint.n))]\n tx.vout = [CTxOut(out.nValue, CScript([])),\n CTxOut(0, CScript([random.getrandbits(800), OP_RETURN]))]\n tx.vout[0].nValue -= min(tx.vout[0].nValue/100, 1000) # node.calculate_fee(tx)\n tx.rehash()\n return tx\n\n # Check that transactions using checkdatasig are not accepted yet.\n logging.info(\"Try to use the checkdatasig opcodes before activation\")\n\n tx0 = spend_checkdatasig()\n tx0_hex = ToHex(tx0)\n assert_raises_rpc_error(-26, RPC_BAD_OPCODE_ERROR,\n node.sendrawtransaction, tx0_hex)\n\n # Push MTP forward just before activation.\n logging.info(\"Pushing MTP just before the activation and check again\")\n node.setmocktime(NOV152018_START_TIME)\n\n # returns a test case that asserts that the current tip was accepted\n def accepted(tip):\n return TestInstance([[tip, True]])\n\n # returns a test case that asserts that the current tip was rejected\n def rejected(tip, reject=None):\n if reject is None:\n return TestInstance([[tip, False]])\n else:\n return TestInstance([[tip, reject]])\n\n def next_block(block_time):\n # get block height\n blockchaininfo = node.getblockchaininfo()\n height = int(blockchaininfo['blocks'])\n\n # create the block\n coinbase = create_coinbase(height)\n coinbase.rehash()\n block = create_block(\n int(node.getbestblockhash(), 16), coinbase, block_time)\n\n # Do PoW, which is cheap on regnet\n block.solve()\n return block\n\n for i in range(6):\n b = next_block(NOV152018_START_TIME + i - 1)\n yield accepted(b)\n\n # Check again just before the activation time\n assert_equal(node.getblockheader(node.getbestblockhash())['mediantime'],\n NOV152018_START_TIME - 1)\n assert_raises_rpc_error(-26, RPC_BAD_OPCODE_ERROR,\n node.sendrawtransaction, tx0_hex)\n\n def add_tx(block, tx):\n block.vtx.append(tx)\n block.hashMerkleRoot = block.calc_merkle_root()\n block.solve()\n\n b = next_block(NOV152018_START_TIME + 6)\n add_tx(b, tx0)\n yield rejected(b, RejectResult(16, b'bad-blk-signatures'))\n\n logging.info(\"Activates checkdatasig\")\n fork_block = next_block(NOV152018_START_TIME + 6)\n yield accepted(fork_block)\n\n assert_equal(node.getblockheader(node.getbestblockhash())['mediantime'],\n NOV152018_START_TIME)\n\n tx0id = node.sendrawtransaction(tx0_hex)\n assert(tx0id in set(node.getrawmempool()))\n\n # Transactions can also be included in blocks.\n nov152018forkblock = next_block(NOV152018_START_TIME + 7)\n add_tx(nov152018forkblock, tx0)\n yield accepted(nov152018forkblock)\n\n logging.info(\"Cause a reorg that deactivate the checkdatasig opcodes\")\n\n # Invalidate the checkdatasig block, ensure tx0 gets back to the mempool.\n assert(tx0id not in set(node.getrawmempool()))\n\n node.invalidateblock(format(nov152018forkblock.sha256, 'x'))\n waitFor(3, lambda: tx0id in set(node.getrawmempool()), \"Transaction shoud be included in the mempool\")\n\n node.invalidateblock(format(fork_block.sha256, 'x'))\n waitFor(3, lambda: tx0id not in set(node.getrawmempool()), \"Transaction should not be included in the memopool\")\n\n\nif __name__ == '__main__':\n CheckDataSigActivationTest().main()\n\n# Create a convenient function for an interactive python debugging session\ndef Test():\n t = CheckDataSigActivationTest()\n t.drop_to_pdb = True\n bitcoinConf = {\n \"debug\": [\"blk\", \"mempool\", \"net\", \"req\"],\n \"logtimemicros\": 1\n }\n\n flags = standardFlags()\n t.main(flags, bitcoinConf, None)\n","repo_name":"BitcoinUnlimited/BitcoinUnlimited","sub_path":"qa/rpc-tests/checkdatasig_activation.py","file_name":"checkdatasig_activation.py","file_ext":"py","file_size_in_byte":10124,"program_lang":"python","lang":"en","doc_type":"code","stars":453,"dataset":"github-code","pt":"81"} +{"seq_id":"41383209570","text":"inputFile = open(r'C:\\Users\\imars\\Documents\\Advent of Code\\Advent-of-Code\\Day9\\PuzzleInput.txt', 'r')\n\n#defining necessary variables for figuring out grid size and knot starting position\nrTotal = 0\nlTotal = 0\nuTotal = 0\ndTotal = 0\n\nvTotal = 0\nvMax = 0\nvMin = 0\nhTotal = 0\nhMax = 0\nhMin = 0\n\nfor line in inputFile:\n instructions = (line.strip()).split()\n direction = instructions[0]\n count = int(instructions[1])\n\n #figure out how far the head moves in every direction\n if direction == \"R\":\n rTotal += count\n hTotal += count\n if hTotal > hMax:\n hMax = hTotal\n if hTotal < hMin:\n hMin = hTotal\n \n elif direction == \"L\":\n lTotal += count\n hTotal -= count\n if hTotal > hMax:\n hMax = hTotal\n if hTotal < hMin:\n hMin = hTotal\n\n elif direction == \"U\":\n uTotal += count\n vTotal -= count\n if vTotal > vMax:\n vMax = vTotal\n if vTotal < vMin:\n vMin = vTotal\n\n elif direction == \"D\":\n dTotal += count\n vTotal += count\n if vTotal > vMax:\n vMax = vTotal\n if vTotal < vMin:\n vMin = vTotal\n\n#reset the input file so we can read through it again later\ninputFile.seek(0)\n\n#build the grid\ngridHeight = abs(vMax - vMin + 1)\ngridWidth = abs(hMax - hMin + 1)\n\ngrid = []\n\nfor row in range(gridHeight):\n newRow = []\n for col in range(gridWidth):\n newRow.append('.')\n grid.append(newRow)\n\nstartingPosition = [abs(vMin), abs(hMin)]\n\n# make a knot class and instantiate the head and the tail, just to make it easier to reference their positions\nclass Knot:\n def __init__(self, row, col):\n self.row = row\n self.col = col\n\nhead = Knot(startingPosition[0], startingPosition[1])\ntail = Knot(startingPosition[0], startingPosition[1])\n\n#function to move the head movement - not actually marking the head on the grid because it's not necessary at least right now\ndef moveHead(direction):\n if direction == 'R':\n head.col += 1\n elif direction =='L':\n head.col -= 1\n elif direction == 'U':\n head.row -= 1\n elif direction == 'D':\n head.row += 1\n\n#function to move the tail according to the movement of the head\ndef moveTail(direction):\n #only move the tail if the head is more than 2 units away - tail should always be aligned behind the head in direction of movement\n if abs(head.row - tail.row) > 1 or abs(head.col - tail.col) > 1:\n if direction == 'R':\n tail.col = head.col-1\n tail.row = head.row\n elif direction =='L':\n tail.col = head.col+1\n tail.row = head.row\n elif direction == 'U':\n tail.row = head.row+1\n tail.col = head.col\n elif direction == 'D':\n tail.row = head.row-1\n tail.col = head.col\n #mark each tail position on the grid\n grid[tail.row][tail.col] = '#'\n\n# read the file again and move the head and tail according to the instructions\nfor line in inputFile:\n instructions = (line.strip()).split()\n direction = instructions[0]\n count = int(instructions[1])\n\n for i in range(count):\n moveHead(direction)\n moveTail(direction)\n\ntailCount = 0\n\n# look at each square on the grid and count how many squares have had the tail in them\nfor row in grid:\n for col in row:\n if col == '#':\n tailCount += 1\n\n# Part 1 Solution\nprint(tailCount)","repo_name":"IMarshall/Advent-of-Code","sub_path":"Day9/9_RopeBridge.py","file_name":"9_RopeBridge.py","file_ext":"py","file_size_in_byte":3486,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"30672670774","text":"def make_table_readonly(lua, tbl, error_format=\"Cannot set value '%s' on %s\"):\n \"\"\"Wrap a Lua table with a metatable that prevents writes.\"\"\"\n return lua.eval('''\n function(tbl, error_format)\n local new_table = {}\n setmetatable(new_table, {\n __index=tbl,\n __newindex=function(t, k, v)\n error(string.format(error_format, k, new_table))\n end,\n })\n return new_table\n end\n ''')(tbl, error_format)\n\ndef make_table_readonly_recursive(lua, tbl, *args, **kwargs):\n \"\"\"Run LuaSandbox.make_table_readonly() recursively, so that any tables\n that are members of this one are also read-only.\n \"\"\"\n new_table = lua.table()\n for key, value in tbl.items():\n if lua.eval('type')(value) == 'table':\n new_table[key] = make_table_readonly_recursive(lua, value, *args, **kwargs)\n else:\n new_table[key] = value\n return make_table_readonly(lua, new_table, *args, **kwargs)\n","repo_name":"HactarCE/cellua","sub_path":"src/utils/lua.py","file_name":"lua.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"9471583724","text":"t = int(input())\nfor _ in range(t):\n\tn = int(input())\n\tarr = []\n\ts = input()\n\tfor x in s:\n\t\tif x == 'L' or x == 'R':\n\t\t\tarr += [x]\n\t\telif x == 'D':\n\t\t\tarr += ['U']\n\t\telse:\n\t\t\tarr += ['D']\n\n\tprint(*arr, sep='')\n\t\n\n","repo_name":"qazz625/Competitive-Programming-Codes","sub_path":"Codeforces/1567/1567a.py","file_name":"1567a.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31283303467","text":"import machine\nimport utime\n\n# Konfiguration der Pins\npin_GP26 = machine.ADC(26)\npin_GP27 = machine.ADC(27)\nadc_width = 3.3 # Maximale Spannung in Volt (3.3V für Raspberry Pi Pico)\n\ndef measure_voltage(pin):\n raw_value = pin.read_u16()\n voltage = (raw_value / 65535) * adc_width\n return voltage\n\n# Anzahl der Messungen\nnum_measurements = 2\n\n# Benutzereingabe für die zusätzliche Zeile\ndatName = input(\"Bitte geben Sie den Name Datei mit Dateierwartungen '.txt' ein: \")\nprint(\"'\\n\")\nprint(\"Die Datei namens \", datName, \" wird geschafft.\")\nprint(\"\\n\")\nuser_line = input(\"Bitte geben Sie den Name Messungen ein: \")\n\n# dateiaufgaben nennen\nwith open(datName, \"a\") as file: # \"a\" zum Anhängen an die Datei\n if file.tell() != 0: # Überprüfen, ob die Datei nicht leer ist\n file.write(\"\\n\" * 3) # Drei Zeilenumbrüche hinzufügen\n file.write(user_line + \"\\n\") # Benutzerdefinierte Zeile hinzufügen\n\nwhile True:\n input(\"Drücken Sie Enter, um eine Messung durchzuführen...\")\n \n results = []\n for _ in range(num_measurements):\n voltage1 = measure_voltage(pin_GP26) # Annahme: 3.3V Referenzspannung\n voltage2 = measure_voltage(pin_GP27) # Annahme: 3.3V Referenzspannung\n diff_voltage = voltage1 - voltage2\n current = voltage2 / 0.098 # Annahme: Umrechnungsfaktor 0.098\n results.append((diff_voltage, current))\n utime.sleep(1) # Eine Sekunde warten\n print(f\"Differenzspannung: {diff_voltage:.4f} V, Strom: {current:.4f} A\")\n\n # Ergebnisse in Datei speichern\n with open(datName, \"a\") as file: # \"a\" zum Anhängen an die Datei\n for diff_voltage, current in results:\n file.write(f\"Differenzspannung: {diff_voltage:.4f} V, Strom: {current:.4f} A\\n\")\n\n print(\"Messungen abgeschlossen und Ergebnisse in 'kennlinien.txt' gespeichert.\")\n","repo_name":"WilhelmBelov/PicoLED-VoltLogger","sub_path":"flash.py","file_name":"flash.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5893988636","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''A module that manages rezzurect and Rez package distribution.'''\n\n# IMPORT STANDARD LIBRARIES\nfrom distutils import dir_util\nimport functools\nimport getpass\nimport logging\nimport glob\nimport imp\nimport os\n\n# IMPORT THIRD-PARTY LIBRARIES\nfrom rez.vendor.version import version as rez_version\nfrom rez import package_maker__ as package_maker\nfrom rez import resolved_context\nfrom rez import exceptions\nimport rezzurect\n\n# IMPORT LOCAL LIBRARIES\nfrom .utils import multipurpose_helper\n\n\n_DEFAULT_VALUE = object()\nPACKAGE_EXCEPTIONS = (\n # This happens if no versions of a package have been installed yet\n exceptions.PackageFamilyNotFoundError,\n\n # This happens if a package has at least one installed version but it is\n # not the version that you trying to query in `packages`\n #\n exceptions.PackageNotFoundError,\n)\nLOGGER = logging.getLogger('rezzurect.manager')\n\n\ndef copy_rezzurect_to(path):\n '''Copy rezzurect into the given directory.\n\n rezzurect needs to be available for import when a package is executed so\n this function should be run (ideally always but only) once on-build.\n\n Args:\n path (str): The absolute path to a package's python folder.\n\n '''\n root = os.path.dirname(os.path.realpath(rezzurect.__file__))\n name = os.path.basename(root) # Should always just be 'rezzurect'\n\n if not os.path.isdir(path):\n os.makedirs(path)\n\n destination = os.path.join(path, name)\n dir_util.copy_tree(root, destination)\n\n\ndef sort_by_version(version, item):\n '''int: If the given version is found, send it to the front.\n\n Sort the other non-matching items alphabetically.\n\n '''\n base = os.path.basename(item)\n\n if base == version:\n return 1\n\n return 2\n\n\ndef get_version_path(root, package, version=''):\n '''Get the absolute path to a package's version-install folder.\n\n Args:\n root (str):\n The absolute path to where all installed packages live.\n package (str):\n The name of the package to get the version path of.\n version (str, optional):\n The specific version to get the path of. If no version is given,\n the latest version is used, instead. Default: \"\".\n\n Returns:\n str: The found path, if any.\n\n '''\n if not version and '-' in package:\n package, version = package.split('-')\n\n path = os.path.join(root, package)\n\n versions = [path_ for path_ in glob.glob(os.path.join(path, '*'))\n if os.path.isdir(path_)]\n versions = sorted(versions, key=functools.partial(sort_by_version, version))\n\n try:\n return versions[0]\n except IndexError:\n return ''\n\n\ndef get_package_definition(root, package, version=''):\n '''Import a Rez package's package.py file as a Python module.\n\n Args:\n root (str):\n The absolute path to where all installed packages2live.\n package (str):\n The name of the package to get the version path of.\n version (str, optional):\n The specific version to get the path of. If no version is given,\n the latest version is used, instead. Default: \"\".\n\n Raises:\n EnvironmentError:\n If the given package has missing or incomplete data.\n This is usually an indicator of a much bigger problem that requires\n intervention of a developer so, instead of returning early, we\n raise an exception instead.\n\n Returns:\n module or NoneType: The found package.py file, if any.\n\n '''\n if not version and '-' in package:\n # Note: This section assumes that the package name NEVER contains a \"-\"\n # (which, as far as I know, is enforced by Rez everywhere)\n #\n items = package.split('-')\n package = items[0]\n version = '-'.join(items[1:])\n\n version_path = get_version_path(root, package, version=version)\n\n if not version_path:\n raise EnvironmentError('No version could be found')\n\n package_path = os.path.join(version_path, 'package.py')\n\n if not os.path.isfile(package_path):\n raise EnvironmentError('Package path \"{package_path}\" does not exist.'\n ''.format(package_path=package_path))\n\n try:\n return imp.load_source(\n 'rez_{package}_definition'.format(package=package),\n package_path,\n )\n except ImportError:\n pass\n\n\ndef make_package(definition, build_path):\n '''Install our predefined Rez package.py file to a separate build folder.\n\n Args:\n definition (module): The package.py to make a copy to into `build_path`.\n build_path (str): The absolute path to install `definition` to.\n\n Returns:\n `rez.package_maker__.PackageMaker`: The created package.\n\n '''\n with package_maker.make_package(definition.name, build_path, skip_existing=True) as pkg:\n mirror('authors', definition, pkg, default=[getpass.getuser()])\n mirror('commands', definition, pkg)\n mirror('description', definition, pkg)\n mirror('help', definition, pkg, default='')\n mirror('name', definition, pkg)\n mirror('requires', definition, pkg, default=[])\n mirror('timestamp', definition, pkg)\n mirror('tools', definition, pkg)\n # mirror('uuid', definition, pkg, default=str(uuid.uuid4()))\n pkg.version = rez_version.Version(definition.version)\n\n return pkg\n\n\ndef build_package_recursively(root, package, version='', build_path=''):\n '''Build a package by building its required packages recursively.\n\n Basically the logic goes like this:\n - Evaluate if a package has been built. If it has been built, do nothing.\n - If a package has no requirements, build it.\n - If a package has requirements and that requirement isn't installed,\n then evaluate and build it, too.\n - Repeat until all packages are built.\n\n Raises:\n RuntimeError:\n If a package was build incorrectly or\n if an attempt to build a package failed.\n\n '''\n def build_definition(definition, build_path):\n try:\n multipurpose_helper.build(\n os.path.dirname(definition.__file__),\n install_path=build_path,\n )\n except Exception:\n # TODO : Consider deleting the contents of\n # `os.path.dirname(definition.__file__)`\n # before erroring out, here\n #\n message = 'Definition \"%s\" failed to build.'\n LOGGER.exception(message, definition.__file__)\n raise RuntimeError(message % definition.__file__)\n\n LOGGER.debug('Building package \"%s\".', package)\n\n definition = get_package_definition(root, package, version)\n\n if not definition:\n raise RuntimeError('No definition file could be loaded.')\n\n pkg = make_package(definition, build_path)\n requirements = pkg.get_package().requires\n\n for requirement in requirements:\n try:\n resolved_context.ResolvedContext([requirement])\n except PACKAGE_EXCEPTIONS:\n requirement_package, requirement_version = str(requirement).split('-')\n\n build_package_recursively(\n root, requirement_package, requirement_version, build_path=build_path)\n\n # Now that all of the requirements are installed, install this package\n build_definition(definition, build_path)\n\n\ndef mirror(attribute, module, package, default=_DEFAULT_VALUE):\n '''Set set an attribute from one object onto another, if needed.\n\n Args:\n attribute (str): The attribute to mirror.\n module (object): The object which will be used to mirror `attribute`.\n package (object): The object to mirror `attribute` onto.\n default (object, optional):\n A value to use if `attribute` does not exist. If no default is\n specified then `attribute` is not mirrored.\n\n '''\n try:\n value = getattr(module, attribute)\n except AttributeError:\n if default == _DEFAULT_VALUE:\n return\n\n value = default\n\n setattr(package, attribute, value)\n\n\ndef install(package, root, build_path, version=''):\n '''Install a given package into `build_path`.\n\n Args:\n root (str):\n The absolute path to where all installed packages live.\n build_path (str):\n The absolute path to install the package to.\n version (str, optional):\n The specific version to get the path of. If no version is given,\n the latest version is used, instead. Default: \"\".\n\n '''\n package = package.split('-')[0]\n\n LOGGER.debug('Installing \"%s\".', package)\n\n try:\n # Request the specific package-version\n resolved_context.ResolvedContext(['{}-{}'.format(package, version)])\n except PACKAGE_EXCEPTIONS:\n build_package_recursively(root, package, version=version, build_path=build_path)\n","repo_name":"ColinKennedy/rezzurect","sub_path":"manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":9009,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"25804252113","text":"import math\nimport torch\nfrom torch import nn\n\nclass TokenEmbedding(nn.Module):\n def __init__(self, c_in, d_model=64):\n super(TokenEmbedding, self).__init__()\n \n self.conv = nn.Conv1d(in_channels=c_in, out_channels=d_model, kernel_size=3, padding=1, padding_mode=\"circular\", bias=False)\n for m in self.modules():\n if isinstance(m, nn.Conv1d):\n nn.init.kaiming_normal_(m.weight, mode=\"fan_in\", nonlinearity=\"relu\")\n\n def forward(self, x):\n return self.conv(x.permute(0, 2, 1)).transpose(1, 2)\n \n\nclass PositionEmbedding(nn.Module):\n def __init__(self, d_model=64, max_len=5000):\n super(PositionEmbedding, self).__init__()\n \n div = (torch.arange(0, d_model, 2) / d_model * math.log(10000.0)).exp()\n t = torch.arange(0, max_len).unsqueeze(1)\n pe = torch.zeros(max_len, d_model)\n pe[:, 0::2] = torch.sin(t / div)\n pe[:, 1::2] = torch.cos(t / div)\n \n pe = pe.unsqueeze(0)\n self.register_buffer(\"pe\", pe)\n\n def forward(self, x):\n return self.pe[:, :x.size(1)]\n\n\nclass FixedEmbedding(nn.Module):\n def __init__(self, c_in, d_model=64):\n super(FixedEmbedding, self).__init__()\n \n div = (torch.arange(0, d_model, 2) / d_model * math.log(10000.0)).exp()\n t = torch.arange(0, c_in).unsqueeze(1)\n w = torch.zeros(c_in, d_model)\n w.requires_grad = False\n w[:, 0::2] = torch.sin(t / div)\n w[:, 1::2] = torch.cos(t / div)\n\n self.emb = nn.Embedding(num_embeddings=c_in, embedding_dim=d_model)\n self.emb.weight = nn.Parameter(w, requires_grad=False)\n\n\n def forward(self, x):\n return self.emb(x).detach()\n\n\nclass TemporalEmbedding(nn.Module):\n def __init__(self, d_model=64, embed_type='fixed', freq='h'):\n super(TemporalEmbedding, self).__init__()\n \n if embed_type == \"fixed\":\n Emb = FixedEmbedding\n else:\n Emb = nn.Embedding\n if freq == 't':\n self.minute_emb = Emb(c_in=4, d_model=d_model)\n self.hour_emb = Emb(c_in=24, d_model=d_model)\n self.day_emb = Emb(c_in=32, d_model=d_model)\n self.week_emb = Emb(c_in=7, d_model=d_model)\n self.month_emb = Emb(c_in=13, d_model=d_model)\n\n def forward(self, x):\n x = x.long()\n if hasattr(self, 'minute_emb'):\n minute_x = self.minute_emb(x[:, :, 4])\n else:\n minute_x = 0\n hour_x = self.hour_emb(x[:, :, 3])\n day_x = self.day_emb(x[:, :, 2])\n week_x = self.week_emb(x[:, :, 1])\n month_x = self.month_emb(x[:, :, 0])\n print(minute_x)\n return minute_x + hour_x + day_x + week_x + month_x\n \n\nclass DataEmbedding(nn.Module):\n def __init__(self, c_in, d_model=64, embed_type='fixed', freq='h', dropout=0.1):\n super(DataEmbedding, self).__init__()\n \n self.value_emb = TokenEmbedding(c_in=c_in, d_model=d_model)\n self.position_emb = PositionEmbedding(d_model=d_model)\n self.temporal_emb = TemporalEmbedding(d_model=d_model, embed_type=embed_type, freq=freq)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, x_mark):\n y = self.value_emb(x) + self.position_emb(x)\n if x_mark != None:\n y += self.temporal_emb(x_mark)\n return self.dropout(y)\n","repo_name":"Jameci/tmsnt","sub_path":"embedding.py","file_name":"embedding.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"34987371422","text":"import tensorflow as tf\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras import Model, Input\nfrom tensorflow.keras.layers import LSTM, Embedding, Dense\nfrom tensorflow.keras.layers import TimeDistributed, SpatialDropout1D, Bidirectional\nfrom tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping\nimport numpy as np\n\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.utils import to_categorical\n\nfrom sklearn.metrics import classification_report\n\nclass NERTextPrepper():\n def __init__(self, data, max_len=50, case=\"lower\", punctuation=\"trailing\"):\n self.data = data\n\n #\n # Text regularization\n #\n if case == \"lower\":\n data['Word'] = data['Word'].str.lower()\n\n if punctuation == \"trailing\":\n data['Word'] = data['Word'].str.replace(\"'s\", \"\");\n data['Word'] = data['Word'].str.replace(\"\\([a-z0-9]+\\)(\\.|,|!|\\?)\\+\", \"\\1\", regex=True)\n data['Word'] = data['Word'].str.replace(\"~\", \"\", regex=True)\n elif punctuation == \"all\":\n data['Word'] = data['Word'].str.replace(\"[^a-z0-9]\", \" \", regex=True)\n\n #\n # Create vocabulary\n #\n self.words = list(set(self.data['Word'].values))\n self.words.append(\"\")\n self.words.append(\"ENDPAD\")\n self.num_words = len(self.words) + 1\n\n\n # Create tag list\n\n # We do not have enough 'nat', 'art' or 'eve' tags to predict them\n # with any meaningful accuracy, so we will create a new 'misc' category\n self.data['Tag'] = self.data['Tag'].str.replace(\"(nat|art|eve)\", \"misc\", regex=True)\n\n self.tags = list(set(self.data['Tag'].values))\n self.num_tags = len(self.tags)\n \n\n # Create vocab and tag dictionary for word -> integer lookup\n\n self.word2idx = {w : i+1 for i,w in enumerate(self.words)}\n self.tag2idx = {t : i for i,t in enumerate(self.tags)}\n \n\n\n # Split 'Word' series to arrays of sentences\n\n self.data['Sentence #'] = self.data['Sentence #'].fillna(method='ffill')\n agg_func = lambda s:[(w,t) for w,t in zip(s['Word'].tolist(),s['Tag'].tolist())]\n self.grouped = self.data.groupby(\"Sentence #\").apply(agg_func)\n self.sentences = [s for s in self.grouped]\n \n\n # Convert data to form needed by Tensorflow model\n\n self.max_len = 50\n \n self.X = [[self.word2idx[w[0]] for w in s]for s in self.sentences]\n self.X = pad_sequences(maxlen=self.max_len , sequences=self.X, padding='post', value=self.num_words-1)\n\n self.y = [[self.tag2idx[w[1]] for w in s]for s in self.sentences]\n self.y = pad_sequences(maxlen=max_len , sequences=self.y, padding='post', value=self.tag2idx[\"O\"])\n self.y = [to_categorical(i, num_classes=self.num_tags) for i in self.y]\n\n def fit(self, x_train, y_train, epochs=10):\n \n # Layers\n\n input_word = Input(shape = (self.max_len,))\n self.model = Embedding(input_dim=self.num_words,output_dim=self.max_len,input_length=self.max_len)(input_word)\n self.model = SpatialDropout1D(0.1)(self.model)\n self.model = Bidirectional(LSTM(units=100, return_sequences=True, recurrent_dropout=0.1))(self.model)\n out = TimeDistributed(Dense(self.num_tags,activation='softmax'))(self.model)\n self.model = Model(input_word,out)\n self.model.summary()\n\n # Compile\n\n self.model.compile(optimizer=\"adam\",loss='categorical_crossentropy',metrics=['accuracy'])\n\n # Fit controls and start fit\n\n early_stopping = EarlyStopping(monitor='val_accuracy',patience=1,verbose=0,mode='max',restore_best_weights=True)\n callbacks = [early_stopping]\n\n history = self.model.fit(x_train,np.array(y_train), validation_split = 0.2, batch_size = 64, epochs = epochs, verbose = 1,\n callbacks=callbacks)\n\n return history\n \n def evaluate(self, x_test, y_test):\n\n self.model.evaluate(x_test, np.array(y_test))\n\n # Convert y_true and model predictions to integers that\n # match our tag2idx dictionary\n\n self.y_pred_probs = self.model.predict(np.array(x_test))\n self.y_true = np.argmax(np.array(y_test), axis=-1)\n self.y_true_flat = [item for sublist in self.y_true for item in sublist]\n\n y_pred = np.argmax(self.y_pred_probs, axis=-1)\n self.y_pred = [item for sublist in y_pred for item in sublist]\n\n print(self.tags)\n print(classification_report(self.y_true_flat, self.y_pred, target_names=self.tags))\n","repo_name":"rafael-ariascalles/nlp-glg-clustering-pipeline","sub_path":"notebooks/nerutils.py","file_name":"nerutils.py","file_ext":"py","file_size_in_byte":4679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24144809948","text":"import primer3 as p3\n\n#Read Coding sequence from text file\ncds = ''\nwith open(\"cds.txt\", 'r') as f:\n strin = f.readlines()\n for a in strin:\n a = a.strip()\n cds += a\n\n#Creating the dictionary that results from Primer3's algorithms of 200 possible primers\nres = p3.bindings.designPrimers(\n {'SEQUENCE_TEMPLATE': cds},\n {'PRIMER_PRODUCT_SIZE_RANGE': [[300, 400], [250, 500], [500, 1000]],\n 'PRIMER_NUM_RETURN': 200,\n 'PRIMER_MIN_SIZE': 18,\n 'PRIMER_OPT_SIZE': 24,\n 'PRIMER_MAX_SIZE': 30,\n 'PRIMER_MIN_TM': 50,\n 'PRIMER_OPT_TM': 60,\n 'PRIMER_MAX_TM': 65,\n 'PRIMER_MIN_GC': 40,\n 'PRIMER_OPT_GC': 50,\n 'PRIMER_MAX_GC': 60,\n 'PRIMER_MAX_SELF_ANY': 3,\n 'PRIMER_MAX_SELF_END': 3})\n\n#Creating a readable version of the result dictionary that can be sorted based on certain parameters\ncounter = -7\nuseful = {}\nfor a in res.keys():\n if str(counter) not in a:\n counter += 1\n if counter > 0 and counter not in useful.keys():\n useful[counter] = [res[a]]\n elif counter not in useful.keys():\n useful[counter] = [res[a]]\n else:\n useful[counter].append(res[a])\n\n#0=Pair Penalty, 1= Left Penalty, 2= Right penalty, 3= left sequence, 4= right sequence, 5= primer left, 6= primer right, 7=primer left TM, 8=primer right TM, 9=Primer left GC%, 10= primer right gc%, 11= primer left self anyTH, 12= primer right self anyTH, 13= primer left self endTH, 14=PRimerRight self endTH, 15= left hairpinTH, 16= right hairpinTH, 17=leftendStability,18=rightEndStability,19=pairComplAnyTH,20=pairComplyEndTH, 21=pairProductSize\n#sortby = some int based on which number was picked or what not\nuseful = (sorted(useful.items(), key=lambda items: items[1][0]))\nprint(useful)\n\n","repo_name":"yipengyou/primer3","sub_path":"PrimerPipeLine.py","file_name":"PrimerPipeLine.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"855441013","text":"\nimport RPi.GPIO as GPIO\nfrom time import sleep\nimport datetime\n#from firebase import firebase\n\n\n#import urllib2, urllib, httplib\nimport json\nimport os \nfrom functools import partial\n\nimport spi\n\ndef SPI_Rasp():\n # Open file descriptor for \n # spi device 0 using the CE0 pin for chip select\n device_0 = spi.openSPI(device=\"/dev/spidev0.0\",\n mode=0,\n speed=1000000)\n\n \n\n # Transact data\n data_out = (0x05,)\n\n # This is not necessary, not just demonstrate loop-back\n data_in = (0x00,)\n data_in = spi.transfer(device_0, data_out)\n #print(data_in)\n while(1):\n data_in = spi.transfer(device_0, data_out)\n if data_in == (0x20,):\n print(\"Received from device 0:\")\n print(data_in)\n\n \n # Close file descriptors\n #spi.closeSPI(device_0)\n \n\n\ndef update_firebase():\n \n firebase = firebase.FirebaseApplication('https://curt-dashboard.firebaseio.com/', None)\n Speedometer=20\n Battery=50\n Temprature=10\n Suspension=30\n Yaw=30\n Pitch=45\n Roll=90\n longAcc=20\n latAcc=10\n distance=100\n Mode=\"Racing\"\n Faultmsg=\"No Fault ystaaaa\"\n throttle=10\n \n\n \n #data = {\"Speedometer\": Speedometer, \"Fault Message\": Faultmsg, \"Throttle Position\": throttle \"Battery Percentage\": Battery, \"Temprature\": Temprature, \"Suspension Travel \": Suspension, \"Yaw angle\": Yaw, \"Pitch angle\": Pitch, \"Roll angle\": Roll, \"Longitude Acceleration\": longAcc, \"lateral\": latAcc, \"Covered Distance\": distance, \"Mode\": Mode}\n #firebase.post('/dashboard', data)\n \n\n\n\n #update_firebase()\nSPI_Rasp()\n \n \n\n\n\n\n\n\n\n\n","repo_name":"curtfs/CURTEV-19","sub_path":"Dashboard/Dashboard/RaspSPI/SPI-Py-master/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"34225299977","text":"from re import I\nfrom turtle import Pen\nfrom PySide6.QtCore import (QCoreApplication, QDate, QDateTime, QLocale,\n QMetaObject, QObject, QPoint, QRect,SIGNAL,Signal,QItemSelectionModel,Slot,\n QSize, QTime, QUrl, Qt) \nfrom PySide6.QtGui import (QBrush, QColor, QConicalGradient, QCursor,QAction,QActionGroup,\n QFont, QFontDatabase, QGradient, QIcon,QTransform,\n QImage, QKeySequence, QLinearGradient, QPainter,\n QPalette, QPixmap, QRadialGradient, QTransform,QCloseEvent)\nfrom PySide6.QtWidgets import (QApplication, QCheckBox, QComboBox, QPushButton,QListWidgetItem,QTableWidgetItem,QGridLayout,\n QAbstractItemView,QToolButton,QSizePolicy, QWidget, QFileDialog,QMenu)\n\n\n\nclass ROIToolButton(QToolButton):\n toolButtonClicked_signal = Signal(object,object)\n def __init__(self,parent=None):\n super(ROIToolButton, self).__init__(parent)\n # Set up the user interface from Designer.\n self.setupToolButton()\n self.setPopupMode(QToolButton.MenuButtonPopup)\n self.setToolButtonStyle(Qt.ToolButtonTextOnly)\n self.setAutoRaise(True)\n self.setArrowType(Qt.RightArrow) \n\n def setupToolButton(self): \n if not self.menu():\n self.menu= QMenu(self) \n self.linearRegion_menu = QMenu('Add Linear Region')\n\n self.linearRegionMenu_HorizontalAction = QAction('LR Horizontal')\n self.linearRegionMenu_HorizontalAction.setData('LR_H')\n self.linearRegionMenu_HorizontalAction.triggered.connect(self.actionClicked)\n self.linearRegion_menu.addAction(self.linearRegionMenu_HorizontalAction)\n\n self.linearRegionMenu_VerticalAction = QAction('LR Vertical')\n self.linearRegionMenu_VerticalAction.setData('LR_V')\n self.linearRegionMenu_VerticalAction.triggered.connect(self.actionClicked)\n self.linearRegion_menu.addAction(self.linearRegionMenu_VerticalAction) \n\n self.infiniteLine_menu = QMenu('Add Infinite Line')\n\n self.infiniteLineMenu_HorizontalAction = QAction('IL Horizontal')\n self.infiniteLineMenu_HorizontalAction.setData('IL_H')\n self.infiniteLineMenu_HorizontalAction.triggered.connect(self.actionClicked)\n self.infiniteLine_menu.addAction(self.infiniteLineMenu_HorizontalAction)\n\n self.infiniteLineMenu_VerticalAction = QAction('IL Vertical')\n self.infiniteLineMenu_VerticalAction.setData('IL_V')\n self.infiniteLineMenu_VerticalAction.triggered.connect(self.actionClicked)\n self.infiniteLine_menu.addAction(self.infiniteLineMenu_VerticalAction) \n\n self.menu.addMenu(self.linearRegion_menu) \n self.menu.addMenu(self.infiniteLine_menu)\n self.setMenu(self.menu)\n self.setDefaultAction(self.infiniteLine_menu.actions()[1]) \n\n @Slot(QAction)\n def actionClicked(self,action):\n action = self.sender()\n self.toolButtonClicked_signal.emit(self,action)\n print(action.text())\n print(action.data()) \n self.setDefaultAction(action) \n\n\ndef main():\n import sys\n app = QApplication([])\n tof = ROIToolButton()\n # tof.show()\n win =QWidget()\n layout = QGridLayout()\n win.setLayout(layout)\n layout.addWidget(tof)\n win.show()\n app.exec()\n\nif __name__==\"__main__\":\n main()\n","repo_name":"Attolab/GUI_ToF","sub_path":"CustomToolButton.py","file_name":"CustomToolButton.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7711823219","text":"# -*- coding: utf-8 -*-`\r\n\r\nINSERTS = {}\r\n\r\n\r\nINSERTS['nastavnik'] = (\"INSERT INTO nastavnik \"\r\n \"(ime, prezime, tip, email) \"\r\n \"VALUES (%s, %s, %s, %s)\")\r\n\r\nINSERTS['predmet'] = (\"INSERT INTO predmet \"\r\n \"(naziv) \"\r\n \"VALUES (%s)\")\r\n\r\nINSERTS['predmetsifra'] = (\"INSERT INTO predmet \"\r\n \"(sifra, naziv) \"\r\n \"VALUES (%s, %s)\")\r\n\r\n\r\nINSERTS['drzi_predmet'] =(\"INSERT INTO drzi_predmet \"\r\n \"(id_nastavnika, id_predmeta, grupa, id_semestra) \"\r\n \"VALUES (%s, %s, %s, %s)\")\r\n\r\nINSERTS['pitanje'] = (\"INSERT INTO pitanje \"\r\n \"(tekst, tip, format) \"\r\n \"VALUES (%s, %s, %s)\")\r\n\r\n\r\nINSERTS['semestar'] = (\"INSERT INTO `semestar` (`skolska_godina`, `tip_semestra`) VALUES \"\r\n \"(%s, %s)\")\r\n\r\n\r\n\r\nINSERTS['student'] = (\"INSERT INTO `student` (`ime`, `prezime`, `smer`, `indeks`, `godinaUpisa`, `username` ) VALUES\"\r\n \"(%s, %s, %s, %s, %s, %s)\")\r\n\r\nINSERTS['student1'] = (\"INSERT INTO `student` (`ime`, `prezime`, `smer`, `indeks`, `godinaUpisa`, `username`, `usernamestari` ) VALUES\"\r\n \"(%s, %s, %s, %s, %s, %s, %s)\")\r\n\r\n\r\n\r\nINSERTS['studentski_nalog'] = (\"INSERT INTO `studentski_nalog` (`id_studenta`, `username`, `password`) VALUES\"\r\n \"(1, 'ivan', 'ivan'),\"\r\n \"(2, 'marko', 'marko'),\"\r\n \"(3, 'nikola', 'nikola');\")\r\n\r\n\r\n\r\nINSERTS['student_predmet_semestar'] = (\"INSERT INTO `student_predmet_semestar` \"\r\n \"(`student_id`, `predmet_id`,`semestar_id` ) VALUES (%s, %s, %s)\")\r\n","repo_name":"RAFSoftLab/studentske-ankete-init-and-reports","sub_path":"insert_queries.py","file_name":"insert_queries.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12456778778","text":"#https://leetcode.com/problems/unique-binary-search-trees/description/\n\n#Given an integer n, return the number of structurally unique BST's (binary search trees) which has exactly n nodes of unique values from 1 to n.\n\"\"\"\nExample 1:\n\nInput: n = 3\nOutput: 5\nExample 2:\n\nInput: n = 1\nOutput: 1\n\nConstraints:\n\n1 <= n <= 19\n\"\"\"\n\nclass Solution:\n def numTrees(self, n: int) -> int:\n\n arr = [1,1,2]\n len_arr = 3\n while (n+1)>len_arr:\n #do something\n combo_count = 0\n # i-> 0,1,2 and len_arr = 3\n for i in range(0,len_arr):\n combo_count += arr[i]*arr[len_arr-1-i]\n arr.append(combo_count)\n len_arr+=1\n \n return arr[n]","repo_name":"yp3722/DSAGuide","sub_path":"Recursion 1/UniqueBST.py","file_name":"UniqueBST.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26647088378","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom keras.datasets import mnist\nfrom keras.layers import Input, Dense, Reshape, Flatten, BatchNormalization\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.models import Sequential, Model\nfrom keras.optimizers import Adam\n\n\nif not os.path.exists('images'):\n os.mkdir('images')\n\n\nimg_rows = 28\nimg_cols = 28\nchannels = 1\nimg_shape = (img_rows, img_cols, channels)\nlatent_dim = 100\nepochs = 5000\nbatch_size = 128\nsample_interval = 50\ncolab = False\n\n\n(x_train, _), (_, _) = mnist.load_data()\n\nx_train = x_train / 127.5 - 1\nx_train = np.expand_dims(x_train, axis=3)\n\nvalid = np.ones((batch_size, 1))\nfake = np.zeros((batch_size, 1))\n\n\ndef build_generator():\n model = Sequential()\n model.add(Dense(units=256, input_dim=latent_dim))\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Dense(units=512))\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Dense(units=1024))\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Dense(units=28 * 28, activation='tanh'))\n model.add(Reshape(target_shape=img_shape))\n\n model.summary()\n\n noise = Input(shape=(latent_dim,))\n img = model(noise)\n\n return Model(noise, img)\n\n\ndef build_discriminator():\n model = Sequential()\n model.add(Flatten(input_shape=img_shape))\n model.add(Dense(units=512))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dense(units=256))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dense(units=1, activation='sigmoid'))\n\n model.summary()\n img = Input(shape=img_shape)\n validity = model(img)\n\n return Model(inputs=img, outputs=validity)\n\n\noptimizer = Adam(0.0002, 0.5)\n\ndiscriminator = build_discriminator()\ndiscriminator.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n\ngenerator = build_generator()\n\nz = Input(shape=(latent_dim,))\nimg = generator(z)\n\ndiscriminator.trainable = False\nvalidity = discriminator(img)\n\ncombined = Model(z, validity)\ncombined.compile(loss='binary_crossentropy', optimizer=optimizer)\n\n\ndef sample_images(epoch):\n r, c = 5, 5\n noise = np.random.normal(0, 1, (r * c, latent_dim))\n gen_imgs = generator.predict(noise)\n\n gen_imgs = 0.5 * gen_imgs + 0.5\n\n fig, axs = plt.subplots(r, c)\n cnt = 0\n for i in range(r):\n for j in range(c):\n axs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray')\n axs[i,j].axis('off')\n cnt += 1\n\n if not colab:\n fig.savefig(\"images/%d.png\" % epoch)\n else:\n plt.show()\n plt.close()\n\n\nfor epoch in range(epochs):\n idx = np.random.randint(0, x_train.shape[0], batch_size)\n imgs = x_train[idx]\n noise = np.random.normal(0, 1, (batch_size, latent_dim))\n gen_imgs = generator.predict(noise)\n\n d_loss_real = discriminator.train_on_batch(imgs, valid)\n d_loss_fake = discriminator.train_on_batch(gen_imgs, fake)\n\n d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)\n noise = np.random.normal(0, 1, (batch_size, latent_dim))\n g_loss = combined.train_on_batch(noise, valid)\n\n print(\"%d [D loss: %f, acc.: %.2f%%] [G loss: %f]\" % (epoch, d_loss[0], 100 * d_loss[1], g_loss))\n\n if epoch % sample_interval == 0:\n sample_images(epoch)\n\n","repo_name":"urgeekneighbor/jubilant-barnacle","sub_path":"source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27729237630","text":"'''\nAPS-2020\nProblem Description, Input, Output : https://leetcode.com/problems/maximum-69-number/\nCode by : Sagar Kulkarni\n'''\n\nclass Solution:\n def maximum69Number (self, num: int) -> int:\n currMax=num\n str1=str(num)\n list1=list(str1)\n\n for i in range(len(str1)):\n list2=list1[:]\n if list2[i]=='6':\n list2[i]='9'\n print(list2)\n res=int(\"\".join(list2))\n if res>currMax:\n currMax=res\n continue\n\n elif list2[i]=='9':\n list2[i]='6'\n print(list2)\n res=int(\"\".join(list2))\n if res>currMax:\n currMax=res\n continue\n return currMax\n","repo_name":"SagarKulk539/APS_Lib_2020","sub_path":"LeetCode/max69.py","file_name":"max69.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36170398455","text":"#!/usr/bin/env python3\n\nchallenge= [\"science\", \"turbo\", [\"goggles\", \"eyes\"], \"nothing\"]\n\n\ntrial= [\"science\", \"turbo\", {\"eyes\": \"goggles\", \"goggles\": \"eyes\"}, \"nothing\"]\n\n\nnightmare= [{\"slappy\": \"a\", \"text\": \"b\", \"kumquat\": \"goggles\", \"user\":{\"awesome\": \"c\", \"name\": {\"first\": \"eyes\", \"last\": \"toes\"}},\"banana\": 15, \"d\": \"nothing\"}]\n\n# First Challenge\nx = challenge[2][1]\ny = challenge[2][0]\nz = challenge[3]\n\nprint(f\"My {x}! The {y} do {z}!\")\n\n# Second Challenge\nx = trial[2][\"goggles\"]\ny = trial[2][\"eyes\"]\nz = trial[-1]\n\nprint(f\"My {x}! The {y} do {z}!\")\n\n\n# Third Challenge\nx = nightmare[0][\"user\"][\"name\"][\"first\"]\ny = nightmare[0][\"kumquat\"]\nz = nightmare[0][\"d\"]\n\nprint(f\"My {x}! The {y} do {z}!\")\n","repo_name":"mvillagracia11/mycode","sub_path":"dict_challenge.py","file_name":"dict_challenge.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42445584897","text":"import time\nimport os\nimport animation\nimport subprocess\nimport sqlite3\nimport pandas as pd\nimport ast\n\nthreshold = 45\nth = threshold * (-1)\n\nduration = 0.125 # The duration of time in seconds between each frame.\ncycles = 10 # The number of cycles the animation plays through.\n\n\n\ndef animate():\n \"\"\"Iterate through the frames, printing then clearing each one to create an animation.\"\"\"\n print(os.system('clear'))\n for frame in animation.frames:\n print('\\033[1;34m' , frame, '\\033[1;m') # Print the frame in color blue.\n time.sleep(duration)\n print(os.system('clear'))\n\ndef good_postion():\n print(os.system('clear'))\n print(animation.frame_good)\n time.sleep(1)\n print(os.system('clear'))\n\n\nif __name__ == \"__main__\":\n # Connect to databases\n db_parity = sqlite3.connect(\"/home/pi/Workspace/receiver_server_docker_parity/parity_0.sqlite\")\n db_0 = sqlite3.connect(\"/home/pi/Workspace/receiver_server_docker_parity/content_0.sqlite\")\n db_1 = sqlite3.connect(\"/home/pi/Workspace/receiver_server_docker_parity/content_1.sqlite\")\n\n\n while(1):\n ## parity bits\n parity_df = pd.read_sql('SELECT * FROM parity ORDER BY time DESC LIMIT 1', db_parity)\n ## First Half Content\n for i in range(0,7):\n queryString = \"select content, time from segment{} ORDER BY time DESC LIMIT 1\".format(i)\n temp_df = pd.read_sql(queryString, db_0)\n parity_df = parity_df.merge(temp_df, on='time')\n if i != 0:\n parity_df['content'] = parity_df['content_x'] + parity_df['content_y']\n parity_df.drop(['content_x', 'content_y'], axis=1, inplace=True)\n ## Second Half Content\n for i in range(0,7):\n queryString = \"select content, time from segment{} ORDER BY time DESC LIMIT 1\".format(i)\n temp_df = pd.read_sql(queryString, db_1)\n parity_df = parity_df.merge(temp_df, on='time')\n parity_df['content'] = parity_df['content_x'] + parity_df['content_y']\n parity_df.drop(['content_x', 'content_y'], axis=1, inplace=True)\n\n # correct and store the corrected result in column\n parity_df['corrected'] = \"0\"\n for index, row in parity_df.iterrows():\n res = subprocess.Popen([\"/home/pi/Workspace/RSCODEC/rs_decoder\", \n row['content'], row['parity']], stdout=subprocess.PIPE)\n out = res.communicate()\n row['corrected'] = out[0].decode().strip('\\n')\n \n \n cnt = 0\n for index, row in parity_df.iterrows():\n xypair_tuple = ast.literal_eval(row['corrected'])\n for xypair in xypair_tuple: \n xx = xypair[0]\n yy = xypair[1]\n if (yy > th or xx >threshold or xx < th):\n cnt += 1\n \n if (cnt > 10):\n animate()\n else:\n good_postion()\n","repo_name":"xph3120/EC544_Project","sub_path":"data_server/terminal_animation.py","file_name":"terminal_animation.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39717292654","text":"import random\n\n\n# 自己实现 O(1) 的插入和删除,主要是删除需要知道删除哪个index,因此有个字典纪录,另外删除之后要维护一下防止其他index变化\n\nclass RandomizedSet:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.val = []\n self.index = {}\n\n def insert(self, val: int) -> bool:\n \"\"\"\n Inserts a value to the set. Returns true if the set did not already contain the specified element.\n \"\"\"\n if not val in self.index:\n self.val.append(val)\n self.index[val] = len(self.val) - 1\n return True\n\n def remove(self, val: int) -> bool:\n \"\"\"\n Removes a value from the set. Returns true if the set contained the specified element.\n \"\"\"\n if val in self.val:\n i = self.index[val]\n self.val[i] = self.val[-1]\n self.index[self.val[-1]] = i\n self.val.pop(-1)\n del self.index[val]\n return True\n\n def getRandom(self) -> int:\n \"\"\"\n Get a random element from the set.\n \"\"\"\n\n return int(random.sample(self.val, 1)[0])\n\n\n# Your RandomizedSet object will be instantiated and called as such:\nobj = RandomizedSet()\nval = 1\nparam_1 = obj.insert(val)\n# param_2 = obj.remove(val)\nparam_3 = obj.getRandom()\nprint(param_3, param_1)\n","repo_name":"shiyutang/DL-Prep","sub_path":"04_Algorithms/Leetcode/L380. Insert Delete GetRandom O(1).py","file_name":"L380. Insert Delete GetRandom O(1).py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"81"} +{"seq_id":"17138503193","text":"from resticweb.tools.local_session import LocalSession\nfrom resticweb.models.general import SavedJobs, JobParameter, Repository, BackupObject\nfrom resticweb.misc import job_queue\nfrom resticweb.misc.credential_manager import credential_manager\nfrom resticweb.engine_classes.class_name_map import get_class_from_name\nimport resticweb.tools.job_callbacks as job_callbacks\nfrom resticweb.dictionary.resticweb_variables import Config\n\ndef delete_jobs(ids):\n with LocalSession() as session:\n for id in ids:\n session.query(SavedJobs).filter_by(id=id).delete()\n session.commit()\n\n\ndef add_job(info):\n with LocalSession() as session:\n jobs = SavedJobs(\n name=info['name'],\n notes=info.get('notes'),\n engine_class=info['engine_class']\n )\n session.add(jobs)\n session.commit()\n if info.get('params'):\n for key, value in info.get('params').items():\n parameter = JobParameter(\n param_name=key,\n param_value=value,\n job_id=jobs.id\n )\n session.add(parameter)\n session.commit()\n return jobs.id\n\ndef update_job(info):\n with LocalSession() as session:\n job = session.query(SavedJobs).filter_by(id=info['saved_job_id']).first()\n job.name = info['name']\n job.notes = info['notes']\n session.commit()\n another_commit = False\n # this will just update the existing parameters\n # if a new parameter is passed in through the info dictionary argument\n # it will not be added at this time\n if info.get('params'):\n for param in job.parameters:\n if info['params'].get(param.param_name) != param.param_value:\n param.param_value = info['params'].get(param.param_name)\n another_commit = True\n if another_commit:\n session.commit()\n\n\ndef get_job_info(id):\n with LocalSession() as session:\n job = session.query(SavedJobs).filter_by(id=id).first()\n if job:\n info_dict = dict(\n name=job.name,\n notes=job.notes,\n engine_class=job.engine_class,\n params=job.params,\n last_attempted_run=job.last_attempted_run,\n last_successful_run=job.last_successful_run,\n time_added=job.time_added\n )\n return info_dict\n\n\ndef update_job_times(id, info):\n with LocalSession() as session:\n job = session.query(SavedJobs).filter_by(id=id).first()\n if info.get('last_attempted_run'):\n job.last_attempted_run = info['last_attempted_run']\n if info.get('last_successful_run'):\n job.last_successful_run = info['last_successful_run']\n session.commit()\n","repo_name":"XXL6/resticweb","sub_path":"resticweb/interfaces/saved_jobs.py","file_name":"saved_jobs.py","file_ext":"py","file_size_in_byte":2872,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"25651013749","text":"# -*- coding=utf-8 -*-\nimport logging\nimport os\nimport subprocess\nimport tempfile\n\nlogger = logging.getLogger(__name__)\n\n__all__ = [\"run_editor\"]\n\nEDITOR = os.environ.get(\"EDITOR\", \"mcedit\")\n\n\ndef run_editor(text):\n with tempfile.NamedTemporaryFile(mode=\"w\", encoding=\"utf-8\", prefix=\"midcli-\") as f:\n f.write(text)\n f.flush()\n\n try:\n subprocess.run([EDITOR, f.name])\n except FileNotFoundError:\n input(f\"EDITOR={EDITOR} not found. Press any key to continue.\")\n return \"\"\n\n with open(f.name, encoding=\"utf-8\") as fr:\n return fr.read()\n","repo_name":"truenas/midcli","sub_path":"midcli/editor/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"81"} +{"seq_id":"672841123","text":"from fs import open_fs\n\ndef count_python_loc(fs):\n \"\"\"Count non-blank lines of Python code.\n -after we wrote the function we need to give a variable that fit the func\n we give a variable with the open_fs func in order to open in right formate for fs \n and run the count_python_loc we wrote\n we cant run it without the open_fs bec in this function we had 'walk' method and she \n is not support string (if we will give a regular name of dic) just fs formate\"\"\"\n count = 0\n for path in fs.walk.files(filter=['*.py']): #for every path search the py files\n with fs.open(path) as python_file: # open the files as py files \n count += sum(1 for line in python_file if line.strip()) #count the blank space\n return count\n\n# here we let the right dic to search into him\n\nprojects_fs = open_fs('.') \n#we give a variable with the open_fs func in order to open in right formate for fs and run the count_python_loc we wrote\n#we cant run it without the open_fs bec in this function we had 'walk' method and she is not support string just fs formate\nprint(count_python_loc(projects_fs))\n\n\n","repo_name":"shirepsh/homeWork_25_08","sub_path":"fs_func.py","file_name":"fs_func.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16524349295","text":"import numpy as np\nimport gym\n\n\nclass POMDPWrapper(gym.ObservationWrapper):\n def __init__(self, env_name, pomdp_type='POMDP-RV',\n flicker_prob=0.2, random_noise_sigma=0.1, random_sensor_missing_prob=0.1):\n \"\"\"\n :param env_name:\n :param pomdp_type:\n 1. POMDP-RV: remove velocity related observation\n 2. POMDP-FLK: obscure the entire observation with a certain probability at each time step with the\n probability flicker_prob.\n 3. POMDP-RN: each sensor in an observation is disturbed by a random noise Normal ~ (0, sigma).\n 4. POMDP-RSM: each sensor in an observation will miss with a relatively low probability sensor_miss_prob\n 5. POMDP-RV_and_POMDP-FLK:\n 6. POMDP-RV_and_POMDP-RN:\n 7. POMDP-RV_and_POMDP-RSM:\n 8. POMDP-FLK_and_POMDP-RN:\n 9. POMDP-RN_and_POMDP-RSM\n 10. POMDP-RSM_and_POMDP-RN:\n\n \"\"\"\n super().__init__(gym.make(env_name))\n self.pomdp_type = pomdp_type\n self.flicker_prob = flicker_prob\n self.random_noise_sigma = random_noise_sigma\n self.random_sensor_missing_prob = random_sensor_missing_prob\n\n if pomdp_type == 'POMDP-RV':\n # Remove Velocity info, comes with the change in observation space.\n self.remain_obs_idx, self.observation_space = self._remove_velocity(env_name)\n elif pomdp_type == 'POMDP-FLK':\n pass\n elif self.pomdp_type == 'POMDP-RN':\n pass\n elif self.pomdp_type == 'POMDP-RSM':\n pass\n elif self.pomdp_type == 'POMDP-RV_and_POMDP-FLK':\n # Remove Velocity Info, comes with the change in observation space.\n self.remain_obs_idx, self.observation_space = self._remove_velocity(env_name)\n elif self.pomdp_type == 'POMDP-RV_and_POMDP-RN':\n # Remove Velocity Info, comes with the change in observation space.\n self.remain_obs_idx, self.observation_space = self._remove_velocity(env_name)\n elif self.pomdp_type == 'POMDP-RV_and_POMDP-RSM':\n # Remove Velocity Info, comes with the change in observation space.\n self.remain_obs_idx, self.observation_space = self._remove_velocity(env_name)\n elif self.pomdp_type == 'POMDP-FLK_and_POMDP-RN':\n pass\n elif self.pomdp_type == 'POMDP-RN_and_POMDP-RSM':\n pass\n elif self.pomdp_type == 'POMDP-RSM_and_POMDP-RN':\n pass\n else:\n raise ValueError(\"pomdp_type was not specified!\")\n\n def observation(self, obs):\n # Single source of POMDP\n if self.pomdp_type == 'POMDP-RV':\n return obs.flatten()[self.remain_obs_idx]\n elif self.pomdp_type == 'POMDP-FLK':\n # Note: POMDP-FLK is equivalent to:\n # POMDP-FLK_and_POMDP-RSM, POMDP-RN_and_POMDP-FLK, POMDP-RSM_and_POMDP-FLK\n if np.random.rand() <= self.flicker_prob:\n return np.zeros(obs.shape)\n else:\n return obs.flatten()\n elif self.pomdp_type == 'POMDP-RN':\n return (obs + np.random.normal(0, self.random_noise_sigma, obs.shape)).flatten()\n elif self.pomdp_type == 'POMDP-RSM':\n obs[np.random.rand(len(obs)) <= self.random_sensor_missing_prob] = 0\n return obs.flatten()\n # Multiple source of POMDP\n elif self.pomdp_type == 'POMDP-RV_and_POMDP-FLK':\n # Note: POMDP-RV_and_POMDP-FLK is equivalent to POMDP-FLK_and_POMDP-RV\n # Remove velocity\n new_obs = obs.flatten()[self.remain_obs_idx]\n # Flickering\n if np.random.rand() <= self.flicker_prob:\n return np.zeros(new_obs.shape)\n else:\n return new_obs\n elif self.pomdp_type == 'POMDP-RV_and_POMDP-RN':\n # Note: POMDP-RV_and_POMDP-RN is equivalent to POMDP-RN_and_POMDP-RV\n # Remove velocity\n new_obs = obs.flatten()[self.remain_obs_idx]\n # Add random noise\n return (new_obs + np.random.normal(0, self.random_noise_sigma, new_obs.shape)).flatten()\n elif self.pomdp_type == 'POMDP-RV_and_POMDP-RSM':\n # Note: POMDP-RV_and_POMDP-RSM is equivalent to POMDP-RSM_and_POMDP-RV\n # Remove velocity\n new_obs = obs.flatten()[self.remain_obs_idx]\n # Random sensor missing\n new_obs[np.random.rand(len(new_obs)) <= self.random_sensor_missing_prob] = 0\n return new_obs\n elif self.pomdp_type == 'POMDP-FLK_and_POMDP-RN':\n # Flickering\n if np.random.rand() <= self.flicker_prob:\n new_obs = np.zeros(obs.shape)\n else:\n new_obs = obs\n # Add random noise\n return (new_obs + np.random.normal(0, self.random_noise_sigma, new_obs.shape)).flatten()\n elif self.pomdp_type == 'POMDP-RN_and_POMDP-RSM':\n # Random noise\n new_obs = (obs + np.random.normal(0, self.random_noise_sigma, obs.shape)).flatten()\n # Random sensor missing\n new_obs[np.random.rand(len(new_obs)) <= self.random_sensor_missing_prob] = 0\n return new_obs\n elif self.pomdp_type == 'POMDP-RSM_and_POMDP-RN':\n # Random sensor missing\n obs[np.random.rand(len(obs)) <= self.random_sensor_missing_prob] = 0\n # Random noise\n return (obs + np.random.normal(0, self.random_noise_sigma, obs.shape)).flatten()\n else:\n raise ValueError(\"pomdp_type was not in ['POMDP-RV', 'POMDP-FLK', 'POMDP-RN', 'POMDP-RSM']!\")\n\n def _remove_velocity(self, env_name):\n # OpenAIGym\n # 1. Classic Control\n if env_name == \"Pendulum-v0\":\n remain_obs_idx = np.arange(0, 2)\n elif env_name == \"Acrobot-v1\":\n remain_obs_idx = list(np.arange(0, 4))\n elif env_name == \"MountainCarContinuous-v0\":\n remain_obs_idx = list([0])\n # 1. MuJoCo\n elif env_name == \"HalfCheetah-v3\" or env_name == \"HalfCheetah-v2\":\n remain_obs_idx = np.arange(0, 8)\n elif env_name == \"Ant-v3\" or env_name == \"Ant-v2\":\n remain_obs_idx = list(np.arange(0, 13)) + list(np.arange(27, 111))\n elif env_name == 'Walker2d-v3' or env_name == \"Walker2d-v2\":\n remain_obs_idx = np.arange(0, 8)\n elif env_name == 'Hopper-v3' or env_name == \"Hopper-v2\":\n remain_obs_idx = np.arange(0, 5)\n elif env_name == \"InvertedPendulum-v2\":\n remain_obs_idx = np.arange(0, 2)\n elif env_name == \"InvertedDoublePendulum-v2\":\n remain_obs_idx = list(np.arange(0, 5)) + list(np.arange(8, 11))\n elif env_name == \"Swimmer-v3\" or env_name == \"Swimmer-v2\":\n remain_obs_idx = np.arange(0, 3)\n elif env_name == \"Thrower-v2\":\n remain_obs_idx = list(np.arange(0, 7)) + list(np.arange(14, 23))\n elif env_name == \"Striker-v2\":\n remain_obs_idx = list(np.arange(0, 7)) + list(np.arange(14, 23))\n elif env_name == \"Pusher-v2\":\n remain_obs_idx = list(np.arange(0, 7)) + list(np.arange(14, 23))\n elif env_name == \"Reacher-v2\":\n remain_obs_idx = list(np.arange(0, 6)) + list(np.arange(8, 11))\n elif env_name == 'Humanoid-v3' or env_name == \"Humanoid-v2\":\n remain_obs_idx = list(np.arange(0, 22)) + list(np.arange(45, 185)) + list(np.arange(269, 376))\n elif env_name == 'HumanoidStandup-v2':\n remain_obs_idx = list(np.arange(0, 22)) + list(np.arange(45, 185)) + list(np.arange(269, 376))\n # PyBulletEnv:\n # The following is not implemented:\n # HumanoidDeepMimicBulletEnv - v1\n # CartPoleBulletEnv - v1\n # MinitaurBulletEnv - v0\n # MinitaurBulletDuckEnv - v0\n # RacecarBulletEnv - v0\n # RacecarZedBulletEnv - v0\n # KukaBulletEnv - v0\n # KukaCamBulletEnv - v0\n # PusherBulletEnv - v0\n # ThrowerBulletEnv - v0\n # StrikerBulletEnv - v0\n # HumanoidBulletEnv - v0\n # HumanoidFlagrunBulletEnv - v0\n # HumanoidFlagrunHarderBulletEnv - v0\n elif env_name == 'HalfCheetahBulletEnv-v0':\n remain_obs_idx = list(set(np.arange(0, 26)) - set(np.arange(3, 6)))\n elif env_name == 'AntBulletEnv-v0':\n remain_obs_idx = list(set(np.arange(0, 28)) - set(np.arange(3, 6)))\n elif env_name == 'HopperBulletEnv-v0':\n remain_obs_idx = list(set(np.arange(0, 15)) - set(np.arange(3, 6)))\n elif env_name == 'Walker2DBulletEnv-v0':\n remain_obs_idx = list(set(np.arange(0, 22)) - set(np.arange(3, 6)))\n elif env_name == 'InvertedPendulumBulletEnv-v0':\n remain_obs_idx = list(set(np.arange(0, 5)) - set([1, 4]))\n elif env_name == 'InvertedDoublePendulumBulletEnv-v0':\n remain_obs_idx = list(set(np.arange(0, 9)) - set([1, 5, 8]))\n elif env_name == 'InvertedPendulumSwingupBulletEnv-v0':\n pass\n elif env_name == 'ReacherBulletEnv-v0':\n remain_obs_idx = list(set(np.arange(0, 9)) - set([6, 8]))\n # PyBulletGym\n # 1. MuJoCo\n elif env_name == 'HalfCheetahMuJoCoEnv-v0':\n remain_obs_idx = np.arange(0, 8)\n elif env_name == 'AntMuJoCoEnv-v0':\n remain_obs_idx = list(np.arange(0, 13)) + list(np.arange(27, 111))\n elif env_name == 'Walker2DMuJoCoEnv-v0':\n remain_obs_idx = np.arange(0, 8)\n elif env_name == 'HopperMuJoCoEnv-v0':\n remain_obs_idx = np.arange(0, 7)\n elif env_name == 'InvertedPendulumMuJoCoEnv-v0':\n remain_obs_idx = np.arange(0, 3)\n elif env_name == 'InvertedDoublePendulumMuJoCoEnv-v0':\n remain_obs_idx = list(np.arange(0, 5)) + list(np.arange(8, 11))\n # 2. Roboschool\n elif env_name == 'HalfCheetahPyBulletEnv-v0':\n remain_obs_idx = list(set(np.arange(0, 26)) - set(np.arange(3, 6)))\n elif env_name == 'AntPyBulletEnv-v0':\n remain_obs_idx = list(set(np.arange(0, 28)) - set(np.arange(3, 6)))\n elif env_name == 'Walker2DPyBulletEnv-v0':\n remain_obs_idx = list(set(np.arange(0, 22)) - set(np.arange(3, 6)))\n elif env_name == 'HopperPyBulletEnv-v0':\n remain_obs_idx = list(set(np.arange(0, 15)) - set(np.arange(3, 6)))\n elif env_name == 'InvertedPendulumPyBulletEnv-v0':\n remain_obs_idx = list(set(np.arange(0, 5)) - set([1, 4]))\n elif env_name == 'InvertedDoublePendulumPyBulletEnv-v0':\n remain_obs_idx = list(set(np.arange(0, 9)) - set([1, 5, 8]))\n elif env_name == 'ReacherPyBulletEnv-v0':\n remain_obs_idx = list(set(np.arange(0, 9)) - set([6, 8]))\n else:\n raise ValueError('POMDP for {} is not defined!'.format(env_name))\n\n # Redefine observation_space\n obs_low = np.array([-np.inf for i in range(len(remain_obs_idx))], dtype=\"float32\")\n obs_high = np.array([np.inf for i in range(len(remain_obs_idx))], dtype=\"float32\")\n observation_space = gym.spaces.Box(obs_low, obs_high)\n return remain_obs_idx, observation_space\n\n def reset(self):\n observation = self.env.reset()\n return self.observation(observation)\n\nif __name__ == '__main__':\n # import pybulletgym\n import pybullet_envs\n import gym\n # POMDP-FLK, POMDP-RV, POMDP-RN\n env = POMDPWrapper(\"HalfCheetahBulletEnv-v0\", 'POMDP-RN') # Pendulum-v0 AntBulletEnv-v0 HalfCheetahBulletEnv-v0\n obs = env.reset()\n env.step(env.action_space.sample())\n print('action_space: {}'.format(env.action_space))\n print('observation_space: {}'.format(env.observation_space))\n print('obs: {}'.format(obs))\n","repo_name":"LinghengMeng/rl-teacher-copy","sub_path":"rl_teacher/pomdp_env.py","file_name":"pomdp_env.py","file_ext":"py","file_size_in_byte":11907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9182053557","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nfrom timm.models.vision_transformer import Block\n\nfrom src.models.components.tokenized_base import TokenizedBase\nfrom src.utils.pos_embed import (\n get_1d_sincos_pos_embed_from_grid,\n get_2d_sincos_pos_embed,\n)\n\n\nclass TokenizedVideoMAE(TokenizedBase):\n \"\"\"Masked Autoencoder with VisionTransformer backbone.\"\"\"\n\n def __init__(\n self,\n timesteps=8,\n img_size=[128, 256],\n patch_size=16,\n drop_path=0.0,\n drop_rate=0.0,\n learn_pos_emb=False,\n mask_type='uniform',\n default_vars=[\n \"geopotential_1000\",\n \"geopotential_850\",\n \"geopotential_500\",\n \"geopotential_50\",\n \"relative_humidity_850\",\n \"relative_humidity_500\",\n \"u_component_of_wind_1000\",\n \"u_component_of_wind_850\",\n \"u_component_of_wind_500\",\n \"v_component_of_wind_1000\",\n \"v_component_of_wind_850\",\n \"v_component_of_wind_500\",\n \"temperature_850\",\n \"temperature_500\",\n \"2m_temperature\",\n \"10m_u_component_of_wind\",\n \"10m_v_component_of_wind\",\n ],\n channel_agg=\"mean\",\n embed_dim=1024,\n depth=24,\n num_heads=16,\n decoder_embed_dim=512,\n decoder_depth=8,\n decoder_num_heads=16,\n mlp_ratio=4.0,\n init_mode=\"xavier\",\n ):\n super().__init__(\n img_size,\n patch_size,\n drop_path,\n drop_rate,\n learn_pos_emb,\n embed_dim,\n depth,\n num_heads,\n mlp_ratio,\n init_mode,\n default_vars,\n channel_agg,\n )\n\n self.timesteps = timesteps\n assert mask_type in ['space', 'time', 'uniform']\n self.mask_type = mask_type\n\n # time embedding\n self.time_pos_embed = nn.Parameter(torch.zeros(1, timesteps, embed_dim), requires_grad=learn_pos_emb)\n\n # --------------------------------------------------------------------------\n # MAE decoder specifics\n self.decoder_embed = nn.Linear(embed_dim, decoder_embed_dim, bias=True)\n\n self.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_embed_dim))\n # TODO: each channel has its own mask token\n\n self.decoder_pos_embed = nn.Parameter(\n torch.zeros(1, self.num_patches, decoder_embed_dim), requires_grad=learn_pos_emb\n )\n self.decoder_time_pos_embed = nn.Parameter(\n torch.zeros(1, timesteps, decoder_embed_dim), requires_grad=learn_pos_emb\n )\n\n self.decoder_blocks = nn.ModuleList(\n [\n Block(\n decoder_embed_dim,\n decoder_num_heads,\n mlp_ratio,\n qkv_bias=True,\n norm_layer=nn.LayerNorm,\n )\n for i in range(decoder_depth)\n ]\n )\n\n self.decoder_norm = nn.LayerNorm(decoder_embed_dim)\n self.decoder_pred = nn.Linear(\n decoder_embed_dim, len(default_vars) * patch_size**2, bias=True\n ) # decoder to token\n # --------------------------------------------------------------------------\n\n self.initialize_weights()\n\n def initialize_weights(self):\n # initialization\n super().initialize_weights()\n\n # time embedding\n time_pos_embed = get_1d_sincos_pos_embed_from_grid(self.time_pos_embed.shape[-1], np.arange(self.timesteps))\n self.time_pos_embed.data.copy_(torch.from_numpy(time_pos_embed).float().unsqueeze(0))\n\n # decoder pos embedding\n decoder_pos_embed = get_2d_sincos_pos_embed(\n self.decoder_pos_embed.shape[-1],\n int(self.img_size[0] / self.patch_size),\n int(self.img_size[1] / self.patch_size),\n cls_token=False,\n )\n self.decoder_pos_embed.data.copy_(torch.from_numpy(decoder_pos_embed).float().unsqueeze(0))\n decoder_time_pos_embed = get_1d_sincos_pos_embed_from_grid(\n self.decoder_time_pos_embed.shape[-1], np.arange(self.timesteps)\n )\n self.decoder_time_pos_embed.data.copy_(torch.from_numpy(decoder_time_pos_embed).float().unsqueeze(0))\n\n # timm's trunc_normal_(std=.02) is effectively normal_(std=0.02) as cutoff is too big (2.)\n torch.nn.init.normal_(self.mask_token, std=0.02)\n\n def unpatchify(self, x):\n \"\"\"\n x: (B, L, patch_size**2 * C)\n return: (B, C, H, W)\n \"\"\"\n p = self.patch_size\n c = len(self.default_vars)\n h = self.img_size[0] // p\n w = self.img_size[1] // p\n assert h * w == x.shape[1]\n\n x = x.reshape(shape=(x.shape[0], h, w, p, p, c))\n x = torch.einsum(\"nhwpqc->nchpwq\", x)\n imgs = x.reshape(shape=(x.shape[0], c, h * p, w * p))\n return imgs\n\n def aggregate_channel(self, x: torch.Tensor):\n \"\"\"\n x: B, C, L, D\n \"\"\"\n b, _, l, _ = x.shape\n x = torch.einsum(\"bcld->blcd\", x)\n x = x.flatten(0, 1) # BxTxL, C, D\n\n if self.channel_agg is not None:\n channel_query = self.channel_query.repeat_interleave(x.shape[0], dim=0)\n x, _ = self.channel_agg(channel_query, x, x) # BxTxL, D\n x = x.squeeze()\n else:\n x = torch.mean(x, dim=1) # BxTxL, D\n\n x = x.unflatten(dim=0, sizes=(b, l)) # BxT, L, D\n return x\n\n def space_masking(self, x, mask_ratio):\n # x: B, T, L, D\n N, T, L, D = x.shape\n len_keep = L - int(L * mask_ratio)\n\n noise = torch.rand(N, L, device=x.device)\n\n ids_shuffle = torch.argsort(noise, dim=1) # N, L\n ids_restore = torch.argsort(ids_shuffle, dim=1) # N, L\n\n # # keep the first subset\n ids_keep = ids_shuffle[:, :len_keep] # N, L*(1-mask_ratio)\n x_masked = torch.gather(x, dim=2, index=ids_keep.unsqueeze(-1).unsqueeze(1).repeat(1, T, 1, D)) # B, T, L*(1-mask_ratio), D\n\n mask = torch.ones([N, L], device=x.device)\n mask[:, :len_keep] = 0\n # unshuffle to get the binary mask\n mask = torch.gather(mask, dim=1, index=ids_restore)\n\n x_masked = x_masked.flatten(1, 2)\n mask = mask.unsqueeze(1).repeat(1, T, 1).flatten(1, 2) # B, T*L\n return x_masked, mask, ids_restore\n\n def time_masking(self, x, mask_ratio):\n # x: B, T, L, D\n N, T, L, D = x.shape\n len_keep = T - int(T * mask_ratio)\n\n noise = torch.rand(N, T, device=x.device)\n\n ids_shuffle = torch.argsort(noise, dim=1) # N, T\n ids_restore = torch.argsort(ids_shuffle, dim=1) # N, T\n\n # # keep the first subset\n ids_keep = ids_shuffle[:, :len_keep] # N, T*(1-mask_ratio)\n x_masked = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, L, D)) # B, T*(1-mask_ratio), L, D\n\n mask = torch.ones([N, T], device=x.device)\n mask[:, :len_keep] = 0\n # unshuffle to get the binary mask\n mask = torch.gather(mask, dim=1, index=ids_restore)\n\n x_masked = x_masked.flatten(1, 2)\n mask = mask.unsqueeze(-1).repeat(1, 1, L).flatten(1, 2) # B, TxL\n return x_masked, mask, ids_restore\n\n def append_mask_tokens(self, x, ids_restore):\n if self.mask_type == 'space':\n # ids_restore: B, L\n x = x.unflatten(1, sizes=(self.timesteps, -1)) # B, T, L*(1-mask_ratio), D\n # mask_tokens: 1, 1, D\n mask_tokens = self.mask_token.unsqueeze(1).repeat(x.shape[0], x.shape[1], ids_restore.shape[1] - x.shape[2], 1) # B, T, L*mask_ratio, D\n x = torch.cat([x, mask_tokens], dim=2) # B, T, L, D\n x = torch.gather(x, dim=2, index=ids_restore.unsqueeze(-1).unsqueeze(1).repeat(1, x.shape[1], 1, x.shape[-1])) # unshuffle, B, T, L, D\n elif self.mask_type == 'time':\n # ids_restore: B, T\n x = x.unflatten(1, sizes=(-1, self.num_patches)) # B, T*(1-mask_ratio), L, D\n # mask_tokens: 1, 1, D\n mask_tokens = self.mask_token.unsqueeze(2).repeat(x.shape[0], ids_restore.shape[1] - x.shape[1], x.shape[2], 1) # B, T*mask_ratio, L, D\n x = torch.cat([x, mask_tokens], dim=1) # B, T, L, D\n x = torch.gather(x, dim=1, index=ids_restore.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, x.shape[2], x.shape[-1])) # unshuffle, B, T, L, D\n else:\n mask_tokens = self.mask_token.repeat(x.shape[0], ids_restore.shape[1] + 1 - x.shape[1], 1)\n x = torch.cat([x[:, :, :], mask_tokens], dim=1) # no cls token\n x = torch.gather(x, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, x.shape[2])) # unshuffle, B, TxL, D\n x = x.unflatten(dim=1, sizes=(self.timesteps, self.num_patches)) # B, T, L, D\n return x\n\n def forward_encoder(self, x, variables, mask_ratio):\n \"\"\"\n x: B, T, C, H, W\n \"\"\"\n # embed tokens\n b, t, _, _, _ = x.shape\n x = x.flatten(0, 1) # BxT, C, H, W\n\n embeds = []\n var_ids = self.get_channel_ids(variables)\n for i in range(len(var_ids)):\n id = var_ids[i]\n embeds.append(self.token_embeds[id](x[:, i : i + 1]))\n x = torch.stack(embeds, dim=1) # BxT, C, L, D\n\n # add channel embedding, channel_embed: 1, C, D\n channel_embed = self.get_channel_emb(self.channel_embed, variables)\n x = x + channel_embed.unsqueeze(2) # BxT, C, L, D\n\n x = self.aggregate_channel(x) # BxT, L, D\n\n x = x.unflatten(dim=0, sizes=(b, t)) # B, T, L, D\n\n # add pos embedding, pos_emb: 1, L, D\n x = x + self.pos_embed.unsqueeze(1)\n # add time pos embedding, time_emb: 1, T, D\n x = x + self.time_pos_embed.unsqueeze(2)\n\n x = self.pos_drop(x)\n\n if self.mask_type == 'space':\n x, mask, ids_restore = self.space_masking(x, mask_ratio)\n elif self.mask_type == 'time':\n x, mask, ids_restore = self.time_masking(x, mask_ratio)\n else:\n x = x.flatten(1, 2) # B, TxL, D\n x, mask, ids_restore = self.random_masking(x, mask_ratio)\n\n # apply Transformer blocks\n for blk in self.blocks:\n x = blk(x)\n x = self.norm(x)\n\n return x, mask, ids_restore\n\n def forward_decoder(self, x, variables, ids_restore):\n # embed tokens\n x = self.decoder_embed(x) # B, T x L x mask_ratio, D\n\n # append mask tokens to sequence\n x = self.append_mask_tokens(x, ids_restore)\n\n # add pos embedding, pos_emb: 1, L, D\n x = x + self.decoder_pos_embed.unsqueeze(1)\n # add time pos embedding, time_emb: 1, T, D\n x = x + self.decoder_time_pos_embed.unsqueeze(2)\n\n x = x.flatten(1, 2) # B, TxL, D\n\n # apply Transformer blocks\n for blk in self.decoder_blocks:\n x = blk(x)\n x = self.decoder_norm(x)\n\n # predictor projection\n x = self.decoder_pred(x)\n\n return x\n\n def forward_loss(self, imgs, pred, out_variables, metric, lat, mask, reconstruct_all):\n \"\"\"\n imgs: [B, T, C, H, W]\n pred: [B, TxL, p*p*C]\n mask: [B, TxL], 0 is keep, 1 is remove,\n \"\"\"\n\n imgs = imgs.flatten(0, 1) # BxT, C, H, W\n\n img_mask = mask.unsqueeze(-1).repeat(1, 1, pred.shape[-1]) # [B, TxL, p*p]\n img_mask = img_mask.unflatten(dim=1, sizes=(self.timesteps, self.num_patches)) # [B, T, L, p*p*C]\n img_mask = img_mask.flatten(0, 1) # [BxT, L, p*p*C]\n img_mask = self.unpatchify(img_mask)[:, 0] # [BxT, H, W]\n\n pred = pred.unflatten(dim=1, sizes=(self.timesteps, self.num_patches)) # [B, T, L, p*p*C]\n pred = pred.flatten(0, 1) # [BxT, L, p*p*C]\n img_pred = self.unpatchify(pred) # [BxT, C, H, W]\n var_ids = self.get_channel_ids(out_variables)\n img_pred = img_pred[:, var_ids] # only compute loss over present variables\n\n if metric is None:\n return None, img_pred, img_mask\n\n if reconstruct_all:\n loss_dict = metric(img_pred, imgs, out_variables, lat, None)\n else:\n loss_dict = metric(img_pred, imgs, out_variables, lat, img_mask)\n\n return loss_dict, img_pred, img_mask\n\n def forward(self, imgs, gt_imgs, variables, out_variables, metric, lat, mask_ratio=0.75, reconstruct_all=False):\n latent, mask, ids_restore = self.forward_encoder(imgs, variables, mask_ratio)\n pred = self.forward_decoder(latent, variables, ids_restore) # [B, TxL, p*p]\n loss, pred, mask = self.forward_loss(gt_imgs, pred, out_variables, metric, lat, mask, reconstruct_all)\n return loss, pred, mask\n\n def pred(self, imgs, gt_imgs, variables, out_variables, mask_ratio):\n _, pred, mask = self.forward(imgs, gt_imgs, variables, out_variables, None, None, mask_ratio)\n return pred, mask\n\n\n# model = TokenizedMAE(depth=4, decoder_depth=2).cuda()\n# x = torch.randn(2, 3, 128, 256).cuda()\n# loss, pred, mask = model(x)\n# print (loss)\n\n# x = torch.randn(2, 2, 3, 2)\n# mask_ratio = 0.5\n\n# N, T, L, D = x.shape\n# len_keep = L - int(L * mask_ratio)\n\n# noise = torch.rand(N, L, device=x.device) # noise in [0, 1]\n\n# ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove\n# ids_restore = torch.argsort(ids_shuffle, dim=1)\n\n# # # keep the first subset\n# ids_keep = ids_shuffle[:, :len_keep]\n# x_masked = torch.gather(x, dim=2, index=ids_keep.unsqueeze(-1).unsqueeze(1).repeat(1, T, 1, D))\n\n# mask = torch.ones([N, L], device=x.device)\n# mask[:, :len_keep] = 0\n# # unshuffle to get the binary mask\n# mask = torch.gather(mask, dim=1, index=ids_restore)\n\n# len_keep = T - int(T * mask_ratio)\n\n# noise = torch.rand(N, T, device=x.device)\n\n# ids_shuffle = torch.argsort(noise, dim=1) # N, T\n# ids_restore = torch.argsort(ids_shuffle, dim=1) # N, T\n\n# # # keep the first subset\n# ids_keep = ids_shuffle[:, :len_keep] # N, T*(1-mask_ratio)\n# x_masked = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, L, D)) # B, T*(1-mask_ratio), L, D\n\n# mask = torch.ones([N, T], device=x.device)\n# mask[:, :len_keep] = 0\n# # unshuffle to get the binary mask\n# mask = torch.gather(mask, dim=1, index=ids_restore)\n\n# for i in range(N):\n# print ('x', x[i])\n# print ('ids shuffle', ids_shuffle[i])\n# print ('id restore', ids_restore[i])\n# print ('ids keep', ids_keep[i])\n# print ('x masked', x_masked[i])\n# print ('=' * 50)\n","repo_name":"tung-nd/climax_all","sub_path":"src/models/components/tokenized_video_mae.py","file_name":"tokenized_video_mae.py","file_ext":"py","file_size_in_byte":14565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21622227650","text":"import fnmatch\nimport mimetypes\nimport os\nfrom datetime import datetime, timedelta, timezone\nfrom threading import Thread\nfrom time import sleep, time\n\nimport requests\nfrom dateutil import parser\nfrom requests import auth\n\nimport config\nfrom db_handler import db_queue\nfrom frameioclient.utils import calculate_hash\nfrom logger import logger\nfrom main import authenticated_client\n\n\nclass SyncLoop(Thread):\n def __init__(self, db, project, asset, ignore_folder, **kwargs):\n super().__init__(**kwargs)\n self.daemon = True\n self.db = db\n self.Project = project\n self.Asset = asset\n self.IgnoreFolder = ignore_folder\n\n @staticmethod\n def wildcard_match(name, ignore_list):\n for ignore in ignore_list:\n if fnmatch.fnmatch(name, ignore):\n return True\n\n return False\n\n def update_projects(self):\n \"\"\"Get all projects from Frame.io and add new to DB.\"\"\"\n projects = []\n try:\n for team in authenticated_client().get_all_teams():\n # print(f\"{team['name']}: {team['id']}\")\n projects += authenticated_client().get_projects(team['id'])\n except (requests.exceptions.ConnectionError,\n requests.exceptions.HTTPError):\n raise\n\n # try:\n # projects += authenticated_client().get_shared_projects() # could take a while to run\n # except (requests.exceptions.ConnectionError,\n # requests.exceptions.HTTPError):\n # raise\n\n for project in projects:\n try:\n db_project = self.Project.get(\n self.Project.project_id == project['id'])\n\n # Check if project has been renamed\n if db_project.name != project['name']:\n db_project.name = project['name']\n db_queue.put([db_project, 'save'])\n\n logger.info(\n 'Renamed project {} to {}'.format(db_project.name,\n project['name']))\n\n except self.Project.DoesNotExist:\n logger.info('New project found: {}'.format(project['name']))\n\n new_project = self.Project(name=project['name'],\n project_id=project['id'],\n root_asset_id=project['root_asset_id'],\n team_id=project['team_id'],\n on_frameio=True)\n db_queue.put([new_project, 'save'])\n\n # Check if any projects have been deleted\n active_projects = [project['id'] for project in projects]\n\n for db_project in self.Project.select().where(\n self.Project.deleted_from_frameio == False):\n if db_project.project_id not in active_projects:\n db_project.deleted_from_frameio = True\n db_project.sync = False\n db_queue.put([db_project, 'save'])\n\n logger.info(\n \"Project {} has been deleted, \"\n \"turning off sync.\".format(db_project.name))\n\n def calculate_missing_paths(self, project, parent_id, parent_path,\n ignore_folders, ignore):\n \"\"\"Recurse through DB and add missing paths and if folder should be\n ignored.\n \"\"\"\n children = self.Asset.select().where(\n (self.Asset.project_id == project.project_id) &\n (self.Asset.is_file == False) &\n (self.Asset.parent_id == parent_id))\n\n for child in children:\n if child.name in ignore_folders or ignore or self.wildcard_match(\n child.name, ignore_folders):\n ignore = True\n\n if child.path == '':\n child.path = os.path.join(parent_path, child.name)\n child.ignore = ignore\n db_queue.put([child, 'save'])\n logger.info('Added path to folder {}'.format(child.path))\n\n self.calculate_missing_paths(project, child.asset_id, child.path,\n ignore_folders, ignore)\n\n def update_frameio_assets(self, project, ignore_folders):\n \"\"\"Fetch assets that've been added since last scan and add them to DB.\n\n :Args:\n project (DB Project)\n ignore_folders (List)\n \"\"\"\n # Always overscan by 10 minutes to help avoid missing assets.\n new_scan_timestamp = (\n datetime.now(timezone.utc) - timedelta(minutes=10)).isoformat()\n\n try:\n account_id = authenticated_client().get_project(project.project_id)['root_asset']['account_id']\n updated_assets = authenticated_client().get_updated_assets(\n account_id, project.project_id, project.last_frameio_scan)\n\n except (requests.exceptions.ConnectionError,\n requests.exceptions.HTTPError):\n raise\n\n # Find assets not already in DB.\n new_assets = []\n for asset in updated_assets:\n try:\n self.Asset.get(self.Asset.asset_id == asset['id'])\n\n except self.Asset.DoesNotExist:\n new_assets.append(asset)\n\n new_folders = [a for a in new_assets if a['type'] == 'folder']\n new_folders.sort(key=lambda a: a['inserted_at']) # Oldest first\n new_files = [a for a in new_assets if a['type'] == 'file']\n\n added_folders = 0\n added_files = 0\n duplicates_folders = 0\n duplicates_files = 0\n\n # Filter out duplicate folders with same name/path\n new_folders_filtered = []\n for folder in new_folders:\n if (folder['name'] + folder['parent_id']) not in [\n f['name'] + f['parent_id'] for f in new_folders_filtered]:\n new_folders_filtered.append(folder)\n\n for folder in new_folders_filtered:\n ignore = False\n path = ''\n\n if folder['name'] in ignore_folders or self.wildcard_match(\n folder['name'], ignore_folders):\n ignore = True\n\n if folder['parent_id'] == project.root_asset_id:\n path = folder['name']\n\n else:\n try:\n parent = self.Asset.get(\n self.Asset.asset_id == folder['parent_id'])\n\n if parent.path != '':\n path = os.path.join(parent.path, folder['name'])\n\n if parent.ignore:\n ignore = True\n\n except self.Asset.DoesNotExist:\n pass\n\n # If folder has the same path/name as an existing one, ignore it\n try:\n self.Asset.get(self.Asset.path == path,\n self.Asset.project_id == project.project_id)\n ignore = True\n duplicates_folders += 1\n\n except self.Asset.DoesNotExist:\n pass\n\n new_asset = self.Asset(name=folder['name'],\n project_id=project.project_id,\n path=path,\n asset_id=folder['id'],\n parent_id=folder['parent_id'],\n ignore=ignore,\n on_frameio=True)\n db_queue.put([new_asset, 'save'])\n added_folders += 1\n\n # If folders are out of order from Frame.io we need to calc paths.\n if self.Asset.select().where(self.Asset.path == ''):\n self.calculate_missing_paths(project=project,\n parent_id=project.root_asset_id,\n parent_path='',\n ignore_folders=ignore_folders,\n ignore=False)\n\n for file in new_files:\n if file['upload_completed_at'] is not None:\n ignore = False\n if file['parent_id'] == project.root_asset_id:\n parent_path = ''\n\n else:\n try:\n parent = self.Asset.get(\n self.Asset.asset_id == file['parent_id'])\n\n if parent.path == '':\n logger.info(\n \"Parent to {} path is not set, retry\".format(\n file['name']))\n continue\n\n parent_path = parent.path\n\n if parent.ignore:\n ignore = True\n\n except self.Asset.DoesNotExist:\n logger.info('Parent to {} not found, retry'.format(\n file['name']))\n continue\n\n # Only add files with unique path and name.\n asset_path = os.path.join(parent_path, file['name'])\n try:\n self.Asset.get(self.Asset.path == asset_path,\n self.Asset.project_id == project.project_id)\n duplicates_files += 1\n\n except self.Asset.DoesNotExist:\n new_asset = self.Asset(name=file['name'],\n project_id=project.project_id,\n path=asset_path,\n is_file=True,\n asset_id=file['id'],\n parent_id=file['parent_id'],\n original=file['original'],\n ignore=ignore,\n on_frameio=True)\n db_queue.put([new_asset, 'save'])\n added_files += 1\n\n if added_folders - duplicates_folders != 0:\n logger.info(\n 'New folders on Frame.io for project {}'.format(project.name))\n if added_files - duplicates_files != 0:\n logger.info(\n 'New files on Frame.io for project {}'.format(project.name))\n\n if len(new_files) == added_files: # All done. Moving up timestamp.\n project.last_frameio_scan = new_scan_timestamp\n\n db_queue.put([project, 'save'])\n\n def update_local_assets(self, project, ignore_folders):\n \"\"\"Scan local storage for assets and creates new ones in DB.\n\n :Args:\n project (DB project)\n ignore_folders (List)\n \"\"\"\n abs_project_path = os.path.abspath(project.local_path)\n if not os.path.isdir(abs_project_path):\n logger.info(\n 'Local folder for {} not found, turning off sync'.format(\n project.name))\n self.delete_db_project(project)\n return\n\n new_scan_time = int(time()) - 500 # Overscan to avoid missing assets.\n all_assets_ready = True\n new_folders = False\n new_files = False\n\n for root, dirs, files in os.walk(abs_project_path, topdown=True):\n dirs[:] = [d for d in dirs if\n d not in ignore_folders and not d.startswith(\n \".\") and not self.wildcard_match(d, ignore_folders)]\n\n for name in files:\n full_path = os.path.join(root, name)\n if not name.startswith('.'):\n try:\n create_time = os.path.getctime(full_path)\n except FileNotFoundError:\n all_assets_ready = False\n continue\n\n # Add new file criteria\n # - Created since last complete scan (all_assets_ready)\n # - Not changed in the last 60 secs\n # - Size not 0\n if create_time > project.last_local_scan:\n if (time() - create_time) < 60 or os.path.getsize(\n full_path) == 0:\n all_assets_ready = False\n continue\n\n path = os.path.relpath(full_path, abs_project_path)\n\n try:\n db_asset = self.Asset.get(\n self.Asset.project_id == project.project_id,\n self.Asset.path == path)\n\n # Already synced\n if db_asset.on_local_storage is False:\n db_asset.on_local_storage = True\n db_queue.put([db_asset, 'save'])\n\n # New file\n except self.Asset.DoesNotExist:\n try:\n file_hash = calculate_hash(full_path)\n except FileNotFoundError:\n all_assets_ready = False\n continue\n\n new_files = True\n new_asset = self.Asset(name=name,\n project_id=project.project_id,\n path=path,\n is_file=True,\n on_local_storage=True,\n local_xxhash=file_hash)\n db_queue.put([new_asset, 'save'])\n\n for name in dirs:\n full_path = os.path.join(root, name)\n try:\n create_time = os.path.getctime(full_path)\n except FileNotFoundError:\n all_assets_ready = False\n continue\n\n if create_time > project.last_local_scan:\n path = os.path.relpath(full_path, abs_project_path)\n\n try:\n db_asset = self.Asset.get(\n self.Asset.project_id == project.project_id,\n self.Asset.path == path)\n\n # Already synced\n if db_asset.on_local_storage is False:\n db_asset.on_local_storage = True\n db_queue.put([db_asset, 'save'])\n\n except self.Asset.DoesNotExist:\n new_folders = True\n new_asset = self.Asset(name=name,\n project_id=project.project_id,\n on_local_storage=True,\n path=path)\n db_queue.put([new_asset, 'save'])\n\n if all_assets_ready:\n project.last_local_scan = new_scan_time\n db_queue.put([project, 'save'])\n\n if new_folders:\n logger.info(\n 'New local folders for project {}'.format(project.name))\n if new_files:\n logger.info(\n 'New local files for project {}'.format(project.name))\n\n def download_new_assets(self, project):\n \"\"\"Get new assets from DB and download them\"\"\"\n new_folders = self.Asset.select().where(\n (self.Asset.on_local_storage == False) &\n (self.Asset.is_file == False) &\n (self.Asset.project_id == project.project_id) &\n (self.Asset.ignore == False) &\n (self.Asset.path != ''))\n\n new_files = self.Asset.select().where(\n (self.Asset.on_local_storage == False) &\n (self.Asset.is_file == True) &\n (self.Asset.project_id == project.project_id) &\n (self.Asset.ignore == False))\n\n if len(new_folders) == 0 and len(new_files) == 0:\n return\n\n for folder in new_folders:\n logger.info('Creating local folder: {}'.format(\n os.path.join(project.local_path, folder.path)))\n\n os.makedirs(os.path.join(\n project.local_path, folder.path),\n exist_ok=True)\n\n folder.on_local_storage = True\n db_queue.put([folder, 'save'])\n\n for file in new_files:\n try:\n asset = authenticated_client().get_asset(file.asset_id)\n except requests.exceptions.HTTPError:\n logger.info('File removed from Frame.io')\n db_queue.put(file, 'delete')\n continue\n\n if asset['checksums'] is None:\n logger.info('No checksum for {}'.format(file.name))\n\n # Allow Frame.io some time to calculate hash, retry next loop\n asset_uploaded_epoch = parser.parse(\n asset['upload_completed_at']).timestamp()\n if time() - asset_uploaded_epoch < 300:\n logger.info('Waiting for checksum'.format(file.name))\n continue\n\n download_folder = os.path.join(project.local_path,\n os.path.dirname(file.path))\n\n if os.path.isdir(download_folder):\n logger.info('Downloading: {}'.format(file.path))\n\n try:\n authenticated_client().download(asset,\n download_folder=download_folder,\n replace=False)\n\n except FileExistsError:\n logger.info('{} already exists.'.format(file.path))\n\n # Add local props to new file\n file.on_local_storage = True\n db_queue.put([file, 'save'])\n else:\n logger.info('Download folder not found: {}'.format(file.path))\n db_queue.put([file, 'delete'])\n\n @staticmethod\n def new_frameio_folder(name, parent_asset_id):\n \"\"\"Create single folder on Frame.io\"\"\"\n logger.info('Creating Frame.io folder: {}'.format(name))\n\n asset = authenticated_client().create_asset(\n parent_asset_id=parent_asset_id,\n name=name,\n type=\"folder\",\n )\n\n return asset['id']\n\n def create_frameio_folder_tree(self, project, new_folder_path):\n \"\"\"Step up folder tree in DB to find the closest parent folder on\n Frame.io. Then creates new folders on Frame.io down to the\n new folder and save all new asset_ids to DB.\n\n :Args:\n project (Project): In what project to create\n new_folder (string): Path to folder to create\n \"\"\"\n current_path = os.path.dirname(new_folder_path)\n while current_path != '': # While not at root\n try:\n db_asset = self.Asset.get(\n self.Asset.project_id == project.project_id,\n self.Asset.path == current_path,\n self.Asset.on_frameio == True)\n break\n except self.Asset.DoesNotExist:\n # Not found, continue up in tree\n current_path = os.path.dirname(current_path)\n\n # Parent folder was found\n try:\n parent_asset_id = db_asset.asset_id\n parent_path = db_asset.path\n\n # No parent folder found, root is closest\n except NameError:\n parent_asset_id = project.root_asset_id\n parent_path = ''\n\n new_tree = os.path.relpath(new_folder_path, parent_path)\n for folder in new_tree.split('/'):\n asset_id = self.new_frameio_folder(folder, parent_asset_id)\n\n path = os.path.join(parent_path, folder)\n asset = self.Asset.get(self.Asset.path == path,\n self.Asset.project_id == project.project_id)\n asset.asset_id = asset_id\n asset.on_frameio = True\n db_queue.put([asset, 'save'])\n\n parent_asset_id = asset_id\n parent_path = path\n\n @staticmethod\n def upload_asset(abs_path, parent_asset_id):\n \"\"\"Upload single asset to Frame.io.\"\"\"\n file_mime = mimetypes.guess_type(abs_path)[0]\n new_asset = authenticated_client().create_asset(\n parent_asset_id=parent_asset_id,\n name=os.path.basename(abs_path),\n type=\"file\",\n filetype=file_mime,\n filesize=os.path.getsize(abs_path)\n )\n\n with open(abs_path, \"rb\") as ul_file:\n authenticated_client().upload(new_asset, ul_file)\n\n return new_asset\n\n def upload_new_assets(self, project):\n \"\"\"Upload new local assets to Frame.io and save new asset ids to DB.\"\"\"\n new_folders = self.Asset.select().where(\n (self.Asset.on_frameio == False) &\n (self.Asset.is_file == False) &\n (self.Asset.project_id == project.project_id))\n\n new_files = self.Asset.select().where(\n (self.Asset.on_frameio == False) &\n (self.Asset.is_file == True) &\n (self.Asset.project_id == project.project_id))\n\n if len(new_folders) == 0 and len(new_files) == 0:\n return\n\n for folder in new_folders:\n if not os.path.isdir(\n os.path.join(project.local_path, folder.path)):\n logger.info(\"Can't find {}, skipping.\".format(folder.name))\n db_queue.put([folder, 'delete'])\n continue\n\n self.create_frameio_folder_tree(project=project,\n new_folder_path=folder.path)\n\n for file in new_files:\n if not os.path.isfile(os.path.join(project.local_path, file.path)):\n logger.info(\"Can't find {}\".format(file.name))\n db_queue.put([file, 'delete'])\n continue\n\n logger.info('Uploading asset: {}'.format(file.path))\n abs_path = os.path.abspath(\n os.path.join(project.local_path, file.path))\n\n if os.path.dirname(file.path) == '':\n parent_asset_id = project.root_asset_id\n else:\n parent_asset_id = self.Asset.get(\n self.Asset.project_id == project.project_id,\n self.Asset.path == os.path.dirname(\n file.path)).asset_id\n\n new_asset = self.upload_asset(abs_path, parent_asset_id)\n logger.info('Upload done')\n\n file.asset_id = new_asset['id']\n file.original = new_asset['original']\n file.uploaded_at = int(time())\n file.on_frameio = True\n file.upload_verified = False\n db_queue.put([file, 'save'])\n\n def delete_and_reupload(self, project, asset):\n \"\"\"Delete and re-upload asset to Frame.io. Max attempts: 3.\"\"\"\n logger.info('Deleting and re-uploading: {}'.format(asset.name))\n\n if asset.upload_retries == 2:\n logger.info(\n 'Asset already uploaded 3 times. Marking as successful anyway')\n asset.upload_verified = True\n db_queue.put([asset, 'save'])\n return\n\n try:\n authenticated_client().delete_asset(asset.asset_id)\n except requests.exceptions.HTTPError: # Deleted by user already.\n pass\n\n abs_path = os.path.abspath(\n os.path.join(project.local_path, asset.path))\n\n if not os.path.isfile(abs_path):\n logger.info('{} not found'.format(asset.name))\n db_queue.put([asset, 'delete'])\n return\n\n if os.path.dirname(asset.path) == '':\n parent_asset_id = project.root_asset_id\n else:\n parent_asset_id = self.Asset.get(\n self.Asset.project_id == project.project_id,\n self.Asset.path == os.path.dirname(\n asset.path)).asset_id\n\n new_asset = self.upload_asset(abs_path, parent_asset_id)\n asset.asset_id = new_asset['id']\n asset.original = new_asset['original']\n asset.uploaded_at = int(time())\n asset.on_frameio = True\n asset.upload_verified = False\n asset.upload_retries += 1\n db_queue.put([asset, 'save'])\n\n def verify_new_uploads(self):\n \"\"\"Get xxhash from Frame.io and compare it to local hash in DB.\n Call delete and re-upload if hashes don't match.\n \"\"\"\n new_assets = self.Asset.select().where(\n (self.Asset.upload_verified == False))\n\n if len(new_assets) == 0:\n return\n\n for asset in new_assets:\n if int(time()) - asset.uploaded_at < 100:\n continue # Giving Frame.io time to calculate hash.\n\n logger.info('New upload to verify: {}'.format(asset.path))\n\n project = self.Project.get(\n self.Project.project_id == asset.project_id)\n\n try:\n frameio_asset = authenticated_client().get_asset(\n asset.asset_id)\n except requests.exceptions.HTTPError:\n logger.info('Asset deleted from Frame.io, skipping')\n asset.upload_verified = True\n db_queue.put([asset, 'save'])\n continue\n\n if frameio_asset.get('upload_completed_at') is None:\n logger.info('Upload failed')\n self.delete_and_reupload(project=project, asset=asset)\n\n else:\n try:\n frameio_hash = frameio_asset['checksums']['xx_hash']\n if frameio_hash != asset.local_xxhash:\n logger.info('Hash mismatch')\n self.delete_and_reupload(project=project, asset=asset)\n\n else:\n logger.info('Upload succeeded')\n asset.frameio_xxhash = frameio_hash\n asset.upload_verified = True\n db_queue.put([asset, 'save'])\n\n except (KeyError, TypeError):\n logger.info('No calculated checksum yet')\n\n # Edge cases where Frame.io fails to calculate a checksum.\n # Mark as successful anyway.\n if (time() - asset.uploaded_at) > 1800:\n logger.info(\n \"\"\"30 mins since upload and no checksum on \n Frame.io, marking as successful anyway\"\"\")\n asset.upload_verified = True\n db_queue.put([asset, 'save'])\n\n def delete_db_project(self, project):\n \"\"\"Delete project and its associated assets from DB.\"\"\"\n logger.info('Deleting project {} from DB.'.format(project.name))\n\n for asset in self.Asset.select().where(\n self.Asset.project_id == project.project_id):\n db_queue.put([asset, 'delete'])\n\n db_queue.put([project, 'delete'])\n\n def delete_assets_from_db(self, project):\n \"\"\"Delete all assets from DB.\"\"\"\n for asset in self.Asset.select().where(\n self.Asset.project_id == project.project_id):\n db_queue.put([asset, 'delete'])\n\n def remove_ignore_flag(self, assets, ignore_folders):\n for asset in assets:\n if asset.is_file:\n asset.ignore = False\n db_queue.put([asset, 'save'])\n\n if not asset.is_file:\n if asset.name not in ignore_folders and not self.wildcard_match(\n asset.name, ignore_folders):\n asset.ignore = False\n db_queue.put([asset, 'save'])\n children = self.Asset.select().where(\n self.Asset.parent_id == asset.asset_id)\n self.remove_ignore_flag(children, ignore_folders)\n\n def update_ignored_assets(self):\n \"\"\"Find changes to ignore folders and un-flag blocked assets.\n\n Frame.io assets are added to DB even if they match an ignore folder.\n Remove the ignore flag, and download_new_assets will pick them up.\n\n Local assets are not added to DB if they match an ignore folder, just\n skipped. Reset last_scan will add/upload them.\n \"\"\"\n removed_ignore_folders = self.IgnoreFolder.select().where(\n self.IgnoreFolder.removed == True)\n\n active_ignore_folders = [folder.name for folder in\n self.IgnoreFolder.select().where(\n self.IgnoreFolder.removed == False)]\n\n all_blocked_folders = self.Asset.select().where(\n (self.Asset.ignore == True) &\n (self.Asset.is_file == False))\n\n for ignore_folder in removed_ignore_folders:\n logger.info('Removing ignore folder {}'.format(ignore_folder.name))\n\n blocked_folders = [f for f in all_blocked_folders if\n f.name == ignore_folder.name or self.wildcard_match(\n f.name, [ignore_folder.name])]\n\n self.remove_ignore_flag(blocked_folders, active_ignore_folders)\n db_queue.put([ignore_folder, 'delete'])\n\n for project in self.Project.select(): # Trigger re-scan of all\n project.last_local_scan = 0\n db_queue.put([project, 'save'])\n\n def run(self):\n while True:\n if authenticated_client():\n try:\n logger.info('Checking for updates')\n self.update_projects()\n\n ignore_folders = [folder.name for folder in\n self.IgnoreFolder.select().where(\n self.IgnoreFolder.removed == False)]\n\n for project in self.Project.select().where(\n self.Project.sync == True):\n self.update_frameio_assets(\n project=project,\n ignore_folders=ignore_folders)\n\n self.update_local_assets(\n project=project,\n ignore_folders=ignore_folders)\n\n if config.SyncSetting.ASSETS_LOCAL_TO_FRAME:\n self.upload_new_assets(project)\n if config.SyncSetting.ASSETS_FRAMEIO_TO_LOCAL:\n self.download_new_assets(project)\n\n self.verify_new_uploads()\n\n except (requests.exceptions.ConnectionError,\n requests.exceptions.HTTPError):\n logger.info('Could not connect, retrying in {}'.format(\n config.SCAN_INTERVAL))\n\n # Delete project from DB if requested by user.\n for project in self.Project.select().where(\n self.Project.db_delete_requested == True):\n self.delete_db_project(project)\n\n # Delete assets to redo sync from scratch if path has been changed.\n for project in self.Project.select().where(\n self.Project.local_path_changed == True):\n logger.info('Path changed, deleting and recreating assets in db')\n\n self.delete_assets_from_db(project)\n project.local_path_changed = False\n project.last_local_scan = 0\n project.last_frameio_scan = '2014-02-07T00:00:01.000000+00:00'\n db_queue.put([project, 'save'])\n\n # Updated ignored assets if an ignore folder has been removed.\n if self.IgnoreFolder.select().where(\n self.IgnoreFolder.removed == True):\n self.update_ignored_assets()\n\n self.db.close()\n sleep(config.SCAN_INTERVAL)\n","repo_name":"strombergdev/frameio-python-sync","sub_path":"server/sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":32113,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"81"} +{"seq_id":"73792581386","text":"# creo cadena de caracteres tanto con \"\" como con ''\nnombre = 'Macarena'\nnombre\n'Macarena'\nnombre2 = \"Daniel\"\n# puedo concatenar\nnombre + nombre2\n'MacarenaDaniel'\n# puedo multiplicar\nnombre * 4\n'MacarenaMacarenaMacarenaMacarena'\n# mejor estetica del mensaje\nnombre + \", \" + nombre2\n'Macarena, Daniel'\n# numeros decimales\nnumero_decimal = 3.4\nnumero_decimal\n3.4\n# creando booleanos\nes_estudiante = True\nes_estudiante\nTrue\nprint(es_estudiante)\nTrue\n# creando el falso\ntrabaja = False\ntrabaja\nFalse\n","repo_name":"kolodani/basico_de_Python","sub_path":"conceptos_basicos_de_python/03-tipos_de_datos.py","file_name":"03-tipos_de_datos.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71557894026","text":"import json\nimport os\nfrom chronosynth import chronogram\n\nsc = chronogram.find_trees(value='ot:substitutionCount')\n#un = chronogram.find_trees(value='ot:undefined')\n#cc = chronogram.find_trees(value='ot:changesCount')\n#ot = chronogram.find_trees(value='ot:other')\n\n#source = 'ot_303@Tr62284'\n\n#source = 'pg_1098@tree2158'\nsource = sc[12]\n\n#source = 'pg_61@tree816'\n\nsource = 'pg_2551@tree6180'\n\nmaps = chronogram.map_conflict_nodes(source)\n\n\n##Example node prior format\n#A,B ln(5,10,50)\n#C,D ln(10,3,10)\n\n\n\n##Next create priors file using fastdate\ndates = json.load(open(\"node_ages.json\"))\n\n#for taxon in maps['tree'].taxon_namespace:\n# taxon._label = 'ott' + str(taxon.ott_id)\n\nmaps['tree'].resolve_polytomies()\n\nfor edge in maps['tree'].inorder_edge_iter():\n if (edge.tail_node is not None) and (edge.length is None):\n edge.length = 0.001\n if edge.length == 0:\n edge.length = 0.001\n\n\n\nmaps['tree'].write(path='test.tre', schema='newick')\n\nmrca_dict = {}\nfor node_label in maps['matched_nodes']:\n if maps['matched_nodes'][node_label] in dates['node_ages'].keys():\n mrca_dict[node_label] = {}\n mrca_dict[node_label]['synth_node'] = maps['matched_nodes'][node_label]\n mrca_dict[node_label]['ages'] = dates['node_ages'][maps['matched_nodes'][node_label]]\n nd = maps['tree'].find_node_with_label(node_label)\n mrca_dict[node_label]['tips'] = [ti.taxon.label.replace(' ','_') for ti in nd.leaf_iter()]\n# assert(maps['tree'].mrca(taxon_labels=mrca_dict[node_label]['tips']).label == node_label)\n\n##\nif len(mrca_dict) == 0:\n print(\"no calibrations\")\n exit()\n\nfi=open('test_prior.txt','w')\nfor dated_node in mrca_dict:\n fi.write(\"'\")\n fi.write(\"','\".join(mrca_dict[dated_node]['tips']))\n fi.write(\"'\")\n fi.write(' ')\n ages = [source['age'] for source in mrca_dict[dated_node]['ages']]\n avgage = sum(ages)/len(ages)\n fi.write('norm({},{},{})\\n'.format(0, 0.01*avgage, 0.8*avgage))\n\nfi.close()\n\n\nott_taxa = []\nfor taxon in maps['tree'].taxon_namespace:\n ott_taxa.append('ott' + str(taxon.ott_id))\n\n\n##Set some sort of uniform prior on the root informed by taxonomy?\n## and We actually have a good estimate on bd_rho!!\n\n#os.system(\"fastdate --method_nodeprior --tree_file test.tre --prior_file test_prior.txt --out_file node_prior.tre --max_age 100 --bd_rho 1 --show_tree --grid 100\")\n\n\n###### Remind self what the calibratrions even meaaaaaannn","repo_name":"OpenTreeOfLife/chronosynth","sub_path":"examples/roughdate.py","file_name":"roughdate.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"30817336284","text":"from django.conf.urls.defaults import patterns, url\nfrom www.authentification import stop_anon\nfrom profile.views import my_profile, some_profile\n\nurlpatterns = patterns('',\n url(r'^me$', \n stop_anon(my_profile), \n name='profile_me'),\n\n url(r'^get/(?P[^/]+)$',\n stop_anon(some_profile),\n name='profile_get'),\n)\n","repo_name":"Bouska/resp402","sub_path":"profile/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"43702379709","text":"from flask import Blueprint, abort, jsonify\nfrom backend.models.wineries import Winery\n\nwineries = Blueprint('wineries', __name__,\n template_folder='templates')\n\n@wineries.route('/')\ndef get_wineries_by_country(country):\n wineries = Winery.query.filter_by(country=country).all()\n if len(wineries) == 0:\n abort(404)\n\n formatted_wineries = [winery.format() for winery in wineries]\n return jsonify({\n 'success': True,\n 'wineries': formatted_wineries\n })\n\n@wineries.route('/US/')\ndef get_wineries_by_state(state):\n wines = Winery.query.filter_by(state=state).all()\n if len(wines) == 0:\n abort(404)\n\n formatted_wineries = [winery.format() for winery in wineries]\n return jsonify({\n 'success': True,\n 'wineries': formatted_wineries\n })\n\n@wineries.route('//wines')\ndef get_wines_by_winery_id(winery_id):\n winery = Winery.query.filter_by(id=winery_id).first()\n if not winery:\n abort(404)\n\n formatted_wines = [wine.format() for wine in winery.wines]\n return jsonify({\n 'success': True,\n 'wines': formatted_wines\n })","repo_name":"qinghaoyang/LocalWine","sub_path":"backend/api/wineries.py","file_name":"wineries.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33342589758","text":"import pytest\n\nimport deeplake\nfrom deeplake.util.exceptions import InvalidSourcePathError, TokenPermissionError\n\n\n@pytest.mark.slow\ndef test_connect_dataset_api(\n hub_cloud_dev_token,\n hub_cloud_path,\n hub_cloud_dev_managed_creds_key,\n s3_ds_generator,\n):\n s3_ds = s3_ds_generator()\n s3_ds.create_tensor(\"x\")\n s3_ds.x.append(10)\n\n ds = deeplake.connect(\n s3_ds.path,\n creds_key=hub_cloud_dev_managed_creds_key,\n dest_path=hub_cloud_path,\n token=hub_cloud_dev_token,\n )\n\n assert ds is not None\n assert ds.path.startswith(\"hub://\")\n assert \"x\" in ds.tensors\n assert ds.x[0].numpy() == 10\n\n\n@pytest.mark.slow\ndef test_in_place_dataset_connect(\n hub_cloud_dev_token,\n hub_cloud_path,\n hub_cloud_dev_managed_creds_key,\n s3_ds_generator,\n):\n s3_ds = s3_ds_generator()\n s3_ds.create_tensor(\"x\")\n s3_ds.x.append(10)\n\n s3_ds.connect(\n creds_key=hub_cloud_dev_managed_creds_key,\n dest_path=hub_cloud_path,\n token=hub_cloud_dev_token,\n )\n s3_ds.add_creds_key(hub_cloud_dev_managed_creds_key, managed=True)\n\n assert s3_ds.path.startswith(\"hub://\")\n assert \"x\" in s3_ds.tensors\n assert s3_ds.x[0].numpy() == 10\n\n\n@pytest.mark.slow\ndef test_connect_dataset_cases(local_ds, memory_ds, hub_cloud_ds):\n # Connecting Local or Memory datasets makes no sense.\n with pytest.raises(InvalidSourcePathError):\n local_ds.connect(creds_key=\"some_creds\", dest_path=\"hub://someorg/somename\")\n\n with pytest.raises(InvalidSourcePathError):\n memory_ds.connect(creds_key=\"some_creds\", dest_path=\"hub://someorg/somename\")\n\n # Connecting a cloud dataset is not permitted as its already avaliable via a Deep Lake path\n with pytest.raises(InvalidSourcePathError):\n hub_cloud_ds.connect(creds_key=\"some_creds\", dest_path=\"hub://someorg/somename\")\n\n\n@pytest.mark.slow\ndef test_connect_user_not_in_org(s3_ds_generator, hub_cloud_dev_token):\n with s3_ds_generator() as ds:\n ds.create_tensor(\"x\")\n ds.x.append(10)\n\n with pytest.raises(TokenPermissionError) as e:\n ds.connect(\n creds_key=\"some_creds\",\n dest_path=\"hub://bad-org/some-name\",\n token=hub_cloud_dev_token,\n )\n assert \"dataset path\" in str(e)\n\n with pytest.raises(TokenPermissionError) as e:\n ds.connect(\n creds_key=\"some_creds\",\n org_id=\"bad-org\",\n ds_name=\"some-name\",\n token=hub_cloud_dev_token,\n )\n assert \"organization id\" in str(e)\n","repo_name":"activeloopai/deeplake","sub_path":"deeplake/api/tests/test_connect_datasets.py","file_name":"test_connect_datasets.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","stars":7141,"dataset":"github-code","pt":"81"} +{"seq_id":"7555924266","text":"# Neural Network Assignment - Week 8\n# Written by Chase Jacobs\n\nimport math\nimport numpy as np\nimport random\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\n\n# Each node contains certain variables\n# value - This is \"a\" in terms of our reading and equations given.\n# weights - This is a list of weights that correspond to the nodes on the previous layer. Randomized.\n# error - This is the error value. Default is 0 but we go back and calculate it during back propogation.\n# isBiasNode - This is an easy way to keep track whether a node is a bias node or not, as\n#\t\t\t bias nodes don't have weights that connect to the previous layer.\nclass NeuralNode:\n\tdef __init__(self, value, prevLength, isBiasNode):\n\t\tself.value = value\n\t\tself.weights = []\n\t\tself.error = 0\n\t\tself.isBiasNode = isBiasNode\n\t\tif isBiasNode == False:\n\t\t\tfor x in range(prevLength):\t\n\t\t\t\tself.weights.append(random.uniform(-1,1))\n\t\t\n\nclass NeuralNetwork:\n\tdef __init__(self, inputValues, expectedOutput, hiddenLayersArray, nodesInOutputLayer):\n\t\tprint(\"Creating neural network with \", len(inputValues[0]), \" input nodes, \", len(hiddenLayersArray), \" hidden layers, and \", nodesInOutputLayer, \" nodes in the output layer.\")\n\t\tself.inputValues = inputValues\n\t\tself.expectedOutput = expectedOutput\n\t\tself.hiddenLayersArray = hiddenLayersArray\n\t\tself.networkLayers = []\n\t\t\n\t\t# add bias node to input layer with value -1\n\t\tinputLayer = [(NeuralNode(-1, False, True))]\n\t\t\n\t\tlastArray = len(inputValues[0])\n\t\tfor x in range(lastArray):\n\t\t\tinputLayer.append(NeuralNode(False, False, False))\n\t\tself.networkLayers.append(inputLayer)\n\t\t\n\t\t# Create hidden layers\n\t\thiddenLayers = []\n\t\tlastArray = len(inputLayer)\n\t\tfor x in hiddenLayersArray:\n#\t\t\tStart with bias node with value -1\n\t\t\tlayer = [(NeuralNode(-1, lastArray, False))]\n\t\t\tfor nodes in range(x):\n\t\t\t\tlayer.append(NeuralNode(False, lastArray, False))\n\t\t\tlastArray = len(layer)\n\t\t\tself.networkLayers.append(layer)\n\t\t\n\t\t# Create output layer\n\t\toutputLayer = []\n\t\tfor j in range(nodesInOutputLayer):\n\t\t\toutputLayer.append((NeuralNode(False, lastArray,False)))\n\t\tself.networkLayers.append(outputLayer)\t\t\n\t\t\n\tdef calculateResult(self, nInput):\n\t\tresultArray = []\n\t\tfor index, layer in enumerate(self.networkLayers):\n\t\t\tif index == 0:\n\t\t\t\tfor index2, node in enumerate(layer):\n\t\t\t\t\tif index2 != 0:\n\t\t\t\t\t\tnode.value = nInput[index2-1]\n\t\t\telse:\n\t\t\t\tfor nodeIndex, node in enumerate(layer):\n\t\t\t\t\tif node.isBiasNode == False:\t\n\t\t\t\t\t\tsumTotal = 0\n\t\t\t\t\t\tfor weightindex, weight in enumerate(node.weights):\n\t\t\t\t\t\t\tsumTotal = sumTotal + (weight * self.networkLayers[index - 1][weightindex].value)\n\t\t\t\t\t\t#add together all weights and values from previous node and run it through sigmoid\n\t\t\t\t\t\tnode.value = self.sigmoid(sumTotal)\n\t\t\t\t\t\tif index == len(self.networkLayers) - 1:\n\t\t\t\t\t\t\tresultArray.append(node.value)\n\t\treturn resultArray\n\t\n#\tSigmoid function.\n\tdef sigmoid(self, h):\n\t\treturn (1 / (1 + math.exp(-h)))\n\t\t\n\tdef backProp(self, expectedResult, result, learningRate):\n\t\t# loop through each layer and calculate error of each node\n\t\terrors = []\n\t\tfor i, layer in reversed(list(enumerate(self.networkLayers))):\n\t\t\tif i == len(self.networkLayers) - 1:\n\t\t\t\t# This is the output layer\n\t\t\t\tfor nodeIndex, node in enumerate(layer):\n\t\t\t\t\tnode.error = node.value * (1 - node.value) * (node.value - expectedResult[nodeIndex])\n\t\t\t\t\terrors.append(node.error)\n\t\t\telif i == 0:\n\t\t\t\t# This is the first layer. Skip the back propogation for this layer.\n\t\t\t\tyes = 1\n\t\t\telse:\n\t\t\t\t# This is every other layer\n\t\t\t\tfor indexNode, node in enumerate(layer):\n\t\t\t\t\tsumOfErrors = 0\n\t\t\t\t\tfor prevIndex, previousNode in enumerate(self.networkLayers[i + 1]):\n\t\t\t\t\t\tif prevIndex != 0 or (prevIndex == 0 and i == len(self.networkLayers) - 2):\n\t\t\t\t\t\t\tsumOfErrors = sumOfErrors + (previousNode.error * previousNode.weights[indexNode])\n\t\t\t\t\tnode.error = node.value * (1 - node.value) * (sumOfErrors)\n\t\t\t\t\n\t\t# loop through again and change weights based on errors\n\t\tfor layerIndex, layer in enumerate(self.networkLayers):\n\t\t\tif layerIndex != 0:\n\t\t\t\tfor nodeIndex, node in enumerate(layer):\n#\t\t\t\t\tprint(\"Before node weights: \", node.weights)\n\t\t\t\t\tnewWeights = []\n\t\t\t\t\tfor weightIndex, weight in enumerate(node.weights):\n#\t\t\t\t\t\tprint(learningRate, node.error, node.value)\n\t\t\t\t\t\tnewWeights.append(weight - (learningRate * node.error * self.networkLayers[layerIndex - 1][weightIndex].value))\n\t\t\t\t\tnode.weights = list(newWeights)\n\t\treturn errors\n\t\t\t\n\t\nprint(\"Starting neural network tests. This takes a few minutes due to multiple tests on the iris dataset with a large number of training iterations.\")\nx1 = [[1.2, .6],\n\t\t[1.9, 2.1],\n\t\t[-1, 0.05],\n\t\t[0.2,-0.2]]\ny = [[1],[0],[1],[1]]\n\n# The hidden layers array is how we define how many hidden layers\n# there are and how many nodes are in each layer. For each integer\n# in the array there is a hidden layer, and that integer defines\n# how many nodes are in that layer.\nhiddenLayers = [2,4,8,8]\nepoch = 5000\nprint(\"Training with simple data set with \", epoch, \" iterations.\")\nnetwork = NeuralNetwork(x1, y, hiddenLayers, 1)\n\n# Train simple data set\nfor epach in range(epoch):\n\tfor x in range(len(x1)):\n\t\tresult = [network.calculateResult(x1[x])]\n\t\tnetwork.backProp(y[x], result, .1)\n\n# Test simple data set. Also add another point to see the output.\nfor x in range(len(x1)):\n\tresult = [network.calculateResult(x1[x])]\n\tprint(\"For \", x1[x], \" the result is : \", result, \" and the expected output is: \", y[x])\nresult = network.calculateResult([-1,0])\nprint(\"At data point (-1,0) the predicted result is: \", result)\n\t\n\t\n\n# This function serves two purposes. First, it can find the result\n# of neural network based on the given arrays by subtracting the result\n# by 1. So if the 3 output nodes give [.01, .97, .02] then the correct\n# classifier is the 2nd one. This function will return that classifier.\n# It also serves a purpose to undo the preperation of the result data.\n# For example, I turned each expected output of \"1\" to [0,1,0] so it\n# could be used to calculate the error of the 3 output nodes. This\n# function will turn [0,1,0] back to \"1\" as the same calculation\n# works.\ndef findResults(possibleResults, results):\n\thighestDifference = 1\n\treturnIndex = len(results)\n\tfor index, result in enumerate(results):\n\t\ttempDiff = 1 - result\n\t\tif tempDiff < highestDifference:\n\t\t\thighestDifference = tempDiff\n\t\t\treturnIndex = index\n\treturn possibleResults[returnIndex]\n\t\ndef testIris(epoch, hiddenLayers, graphNumber):\n\tiris = datasets.load_iris()\n\txtrain, xtest, ytrain, ytest = train_test_split(iris.data, iris.target, test_size=0.7, random_state=42)\n\n\t# Prepare iris datasets\n\ttempYtrain = []\n\ttempYtest = []\n\tfor y in ytrain:\n\t\ttemp = []\n\t\tif y == 0:\n\t\t\ttemp = [1,0,0]\n\t\telif y == 1:\n\t\t\ttemp = [0,1,0]\n\t\telif y == 2:\n\t\t\ttemp = [0,0,1]\n\t\ttempYtrain.append(temp)\n\tytrain = tempYtrain\n\tfor y in ytest:\n\t\ttemp = []\n\t\tif y == 0:\n\t\t\ttemp = [1,0,0]\n\t\telif y == 1:\n\t\t\ttemp = [0,1,0]\n\t\telif y == 2:\n\t\t\ttemp = [0,0,1]\n\t\ttempYtest.append(temp)\n\tytest = tempYtest\n\t#xtrain matches with ytrain\n\t#xtest matches with ytest\n\n\tpossibleResults = [0,1,2]\n\tprint(\"Training with iris data set with \", epoch, \" iterations.\")\n\tnetwork2 = NeuralNetwork(xtrain, ytrain, hiddenLayers, 3)\n\terrorOverTime = []\n\t# Train the network with the xtrain and ytrain part of the data set (70% of original dataset)\n\tfor epoch2 in range(epoch):\n\t\tfor y in range(len(xtrain)):\n\t\t\tresult2 = network2.calculateResult(xtrain[y])\n\t\t\terror = network2.backProp(ytrain[y], result2, .1)\n\t\t\terror = (sum(error) / len(error))\n\t\t\terrorOverTime.append(error)\n\t\t\t\n\t\t\t\n\t# Here is where I graph the error. There is an error recorded for every\n\t# time we back propogate. Meaning the x axis will always be\n\t# (# of epocs * number of data). For the 1st and 3rd tests the error\n\t# smoothes out over time (hence the high success rate of the network)\n\t# but in the 2nd one it doesn't (hence the low success rate). This is probably\n\t# due to low amount of epocs.\n\tfig = plt.figure(graphNumber)\n\tt = np.linspace(0, len(errorOverTime), len(errorOverTime), endpoint=True)\n\tplt.plot(t, errorOverTime, 'b-', label='error')\n\tplt.legend(loc = 'upper right')\n\tplt.xlabel('# of times backpropagated')\n\tplt.ylabel('y')\n\tfig.savefig(\"irisErrorPlot\" + str(graphNumber))\n\t\t\t\n\tcorrectCounter = 0\n\tfor y in range(len(xtest)):\n\t\tresult2 = network2.calculateResult(xtest[y])\n\t\tactualResult = findResults(possibleResults, result2)\n\t\texpectedResult = findResults(possibleResults, ytest[y])\n\t\tif expectedResult == actualResult:\n\t\t\tcorrectCounter = correctCounter + 1\n#\t\tprint(\"For \", xtest[y], \" the result is: \", actualResult, \" and the expected output is: \", expectedResult)\n\n\tprint(\"For the iris dataset with hidden arrays: \",hiddenLayers , \"and\", epoch ,\"iterations we correctly predict the data of the testing set \", ((correctCounter / len(xtest)) * 100), \"% of the time.\")\n\t\nepoch = 2000\nhiddenLayers2 = [2,4,3]\ntestIris(epoch, hiddenLayers2, 2)\n\nepoch = 1000\nhiddenLayers2 = [1,2,3,4,5]\ntestIris(epoch, hiddenLayers2, 3)\n\nepoch = 5000\nhiddenLayers2 = [8,8,8,2]\ntestIris(epoch, hiddenLayers2, 4)\n# Test the neural network with the xtest and ytest part of the data set (30% of the original dataset)\n\n\t\n\n\n\n\n","repo_name":"ChaseTJacobs/cs450","sub_path":"week8/neuralNetwork.py","file_name":"neuralNetwork.py","file_ext":"py","file_size_in_byte":9214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11947421899","text":"import email.message\nimport os\nimport sys\nimport re\nimport logging\n\n# get valid email addresses using regex\ndef get_valid_email_address(prompt):\n \"\"\"\n This function takes an email address prompt as input and returns a valid email address. \n It uses a regex to check whether the email address is in a valid format or not.\n \"\"\"\n\n email_regex = r\"[^@]+@[^@]+\\.[^@]+\"\n while True:\n email_address = input(prompt)\n if re.match(email_regex, email_address):\n return email_address\n print(\"Invalid email address format. Please enter a valid email address\")\n\n# add attachemnt to email message\ndef add_attachment(msg):\n \"\"\"\n This function takes an email message as input and adds attachments to it.\n It prompts the user to choose whether they want to attach a single file or multiple files.\n It then asks for the file/folder path and iterates through each file in the folder to add it as an attachment.\n If there are any errors (e.g. file not found, permission denied), it logs the error and continues with the next file.\n \"\"\"\n # attachment mode\n print(\"1.Single File\\n2.Multiple File\")\n while True:\n try:\n attach_mode = int(input(\"Choose 1 or 2: \"))\n # validating user input\n if attach_mode in (1,2):\n break\n except ValueError:\n pass\n print(\"Invalid Input.\")\n # Single File Mode\n if attach_mode == 1:\n file_path = input(\"File Path: \").strip('\"')\n print(\"\\n\")\n while(os.path.isfile(file_path) == False):\n file_path = input(\"A directory path was provided instead of a file path. Please enter File Path: \").strip('\"')\n try:\n # open file in binary mode\n with open(file_path, 'rb') as f:\n file_data = f.read()\n file_name= os.path.basename(file_path)\n # Gets File extension and removes dot from the extension\n file_extension= os.path.splitext(file_name)[1].strip(\".\")\n # add file data as attachment to message\n msg.add_attachment(file_data,maintype='application',subtype=file_extension,filename=file_name)\n except (FileNotFoundError, PermissionError) as e:\n # log error and print error message\n logging.error(f\"Error: {str(e)} - {file_name}\")\n print(f\"Error: {file_name} not found or permission denied.\")\n # Multiple File Mode\n elif attach_mode == 2:\n success_file_count = 0\n failure_file_count = 0\n folderPath = input(\"\\nFolder Path: \").strip('\"')\n try:\n # traverse directory structure recursively\n for root,dirs,files in os.walk(folderPath):\n #iterate over the files in current directory\n for file_name in files:\n #construct file path\n file_path = os.path.join(root,file_name)\n try:\n with open(file_path, 'rb') as f:\n file_data = f.read()\n file_extension= os.path.splitext(file_name)[1]\n msg.add_attachment(file_data,maintype='application',subtype=file_extension,filename=file_name)\n success_file_count+=1\n except PermissionError:\n logging.error(f\"Error: Permission denied {file_path}\")\n print(f\"File {file_name} not attached due to permission error.\")\n failure_file_count += 1\n except FileNotFoundError as e:\n logging.error(f\"{file_path} not found, {file_name}\")\n print(f\"File Not Found.\")\n failure_file_count += 1\n except Exception as e:\n logging.error(f\"Error: {str(e)} - {file_name}\")\n print(\"Error occured.Please check log.\")\n failure_file_count += 1\n except Exception as e:\n logging.error(f\"Error: {str(e)} - {file_name}\")\n print(\"Error occured.Please check log.\")\n if success_file_count > 0:\n print(f\"{success_file_count} files attached successfully.\")\n if failure_file_count > 0:\n print(f\"{failure_file_count} files failed to attach.\")\n else:\n print(\"Invalid Input. Please enter '1' or '2'.\")\n\nif __name__==\"__main__\":\n # configure logging settings\n logging.basicConfig(file_name=\"attachment.log\",level=logging.ERROR, format='%(asctime)s %(levelname)s: %(message)s')\n \n attachment_confirmation = [\"y\",\"n\",\"yes\",\"no\"]\n attachment_positive_confirm= [\"y\",\"yes\"]\n \n print(\"\\n\\tEML Creation\\n\")\n\n if len(sys.argv) > 1:\n sender=sys.argv[1]\n receiver = sys.argv[2]\n subject = sys.argv[3]\n else:\n sender = get_valid_email_address(\"From: \")\n receiver = get_valid_email_address(\"To: \")\n subject = input(\"Subject: \")\n\n #creating empty message\n msg = email.message.EmailMessage()\n\n #setting message header\n msg['From'] = sender\n msg['To'] = receiver\n msg['Subject'] = subject\n\n #setting message body\n msg_body=input(\"Message Body: \")\n msg.set_content(msg_body)\n\n #attachment prompt\n attach_file= input(\"\\nAttach File To Email? (y/n): \").lower()\n while attach_file not in attachment_confirmation:\n attach_file = input(\"Invalid input. Attach File To Email? (y/n): \").lower()\n if(attach_file in attachment_positive_confirm):\n add_attachment(msg)\n \n #EML File Creation\n eml_file_name=input(\"\\nEnter the EML File Name: \")\n full_eml_file_name = eml_file_name + \".eml\"\n try:\n with open(full_eml_file_name, 'w') as f:\n f.write(str(msg))\n print(f\"'{eml_file_name}' has been saved as '{full_eml_file_name}'.\")\n except Exception as e:\n logging.error(f\"Error: {str(e)}\")\n print(f\"An error occurred while saving '{eml_file_name}' as '{full_eml_file_name}' : {str(e)}\")","repo_name":"V4n-q/create-eml","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27330878308","text":"import os\n\nimport requests\nimport shutil\n\n\ndef save_image_url(url, path, fn):\n options_list = []\n if url:\n r = requests.get(url, stream=True)\n if r.status_code == 200:\n with open(os.path.join(path, f'{fn}.jpg'), 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n options_list.append(['img_path', os.path.join(path, f'{fn}.jpg')])\n return options_list\n","repo_name":"backupalisher/parser_price","sub_path":"profit_msk/html_parser/save_image_url.py","file_name":"save_image_url.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29332604237","text":"from itertools import combinations\n\n\ndef get_dist(a, b):\n return abs(a[0]-b[0]) + abs(a[1]-b[1])\n\n\nn, m = map(int, input().split())\nbbq, houses = [], []\ncity = [list(map(int, input().split())) for _ in range(n)]\n\nfor i, r in enumerate(city):\n for j, c in enumerate(r):\n if c == 1:\n houses.append([i, j])\n elif c == 2:\n bbq.append([i, j])\nres = 10**10\nfor case in combinations(bbq, m):\n sum_dist = 0\n for house in houses:\n dist = 101\n for c in case:\n dist = min(get_dist(c, house), dist)\n sum_dist += dist\n res = min(sum_dist, res)\nprint(res)\n","repo_name":"wookkl/backjoon-problemsolving","sub_path":"[15686]치킨 배달.py","file_name":"[15686]치킨 배달.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"878873517","text":"'''\nCreated on Apr 25, 2019\n\n@author: NOOK\n'''\n\"\"\" Taus for full range valid thetas [2e-16, 1-2e-16]\nGamma: {0: 1.1102230246251565e-16, 1: 1.651158036900312e-08, 2: 0.2898979485566357, 3: 0.4627475401696348, 4: 0.5697090329565893, 5: 0.6416277095878444}\nVRF: {0: 3.0518509447574615e-05, 1: 1.4142135623842478, 2: 2.5495097571983933, 3: 3.8369548115879297, 4: 5.583955190144479, 5: 8.241797269321978}\n\"\"\"\n\nimport unittest\n\nfrom astropy.io.ascii.tests.common import assert_almost_equal\nfrom docutils.nodes import target\nfrom netCDF4 import Dataset, Group\nfrom nose.tools import assert_equal\nfrom numpy import arange, array2string, cov, log, var, zeros, trace, mean, std, transpose, \\\n concatenate, allclose, min, max, nonzero, cumsum, histogram, where, diag, ones\nfrom numpy import array, array as vector, linspace, any, diff, log10\nfrom numpy import sqrt\nfrom numpy.core.defchararray import isdecimal\nfrom numpy.linalg import inv\nfrom numpy.random import randn, seed, get_state\nfrom numpy.random.mtrand import multivariate_normal\nfrom TestUtilities import assert_allclose, assert_almost_equal, assert_array_less\nfrom overrides import overrides\nfrom runstats import Statistics\nfrom scipy.optimize.zeros import brentq\nfrom scipy.stats import kstest, chi2, lognorm, norm, anderson\nfrom typing import List;\n\nfrom TestData import TestData\nfrom TestSuite import slow, TestCaseBase\nfrom TestSuite import testDataPath;\nfrom TestUtilities import generateTestPolynomial, generateTestData, createTestGroup, writeTestVariable, A2S, \\\n covarianceToCorrelation, assert_report, assert_clear\nfrom polynomialfiltering.Main import AbstractFilter, FilterStatus\nfrom polynomialfiltering.PythonUtilities import assert_not_empty\nfrom polynomialfiltering.PythonUtilities import ignore, testcase, testclass, testclassmethod, forcestatic\nfrom polynomialfiltering.components.Emp import makeEmp, makeEmpCore, nSwitch, nUnitLastVRF\nfrom polynomialfiltering.components.Fmp import makeFmp, makeFmpCore\nfrom polynomialfiltering.components.ICore import ICore\nfrom polynomialfiltering.filters.RecursivePolynomialFilter import RecursivePolynomialFilter\n\n\nclass Fmp_test(TestCaseBase):\n\n\n def setUp(self):\n pass\n\n\n def tearDown(self):\n pass\n \n def getMinTheta(self, order: int, tau: float) -> float:\n if (order == 0) :\n return 1e-6;\n thresholds = array([0, 1.414213562, 2.549509757, 3.836954811, 5.583955189, 8.241797269]);\n if (tau > thresholds[order]) :\n return 0.5;\n v = 1.0;\n def targetMaxDiag(t :float ) -> float:\n c = makeFmpCore(order, tau, t);\n return max(diag(c.getVRF(0))) - v;\n \n t0 = brentq( targetMaxDiag, 1e-6, 1-1e-8 );\n return t0\n\n def generateStates(self, testData : TestData) -> None:\n# print(\"generateStates\")\n# N = array([64, 128, 5120, 512, 1024, 2048])\n setup = array([ # order, tau\n [0, 0.01],[0, 0.1], [0, 1.0], [0, 10.0], \n [1, 0.01],[1, 0.1], [1, 1.0], [1, 10.0], \n [2, 0.01], [2, 0.1], [2, 1.0], [2, 10.0], \n [3, 0.01], [3, 0.1], [3, 1.0], [3, 10.0], \n [4, 0.01],[4, 0.1], [4, 1.0], [4, 10.0], \n [5, 0.01],[5, 0.1], [5, 1.0], [5, 10.0]\n ])\n\n nPass = 0\n nFail = 0\n group = testData.createTestGroup( 'States')\n writeTestVariable(group, 'setup', setup)\n iCase = 1;\n for i in range(0,setup.shape[0]) :\n order = int(setup[i,0])\n tau = setup[i,1]\n minTheta = self.getMinTheta(order, tau);\n thetas = linspace(minTheta, 1-1e-6, 10 )\n for theta in thetas :\n t0 = 0.0\n n = 2*max((200, int(nSwitch(order, theta)) ))\n S = zeros([order+1, order+1])\n# print(order, tau, theta, n, int(nSwitch(order, theta)))\n if (n > 1000*(order+1) ) : # skip extremely long runs\n break;\n caseGroup = testData.createTestGroup( 'States_Case_%d' % iCase)\n iCase += 1\n nTrials = 25;\n # variance of sample variances for a standard normal distribtion\n varVar = (nTrials-1)**2/nTrials**3 * 3 - (nTrials-1)*(nTrials-3)/nTrials**3 * 1**2\n for it in range(0,nTrials) :\n f = makeFmp(order, tau, theta);\n# Y = generateTestPolynomial( order, n, t0, tau )\n Y = f.getCore().getGamma(0.0, 1.0) * randn(1,order+1)\n Y.shape = [order+1]\n R = 10.0\n (times, truth, observations, noise) = generateTestData(order, n, 0.0, Y[0:order+1], tau, sigma=R)\n expected = zeros([n, order+1]) \n residuals = zeros([n, order+1]) \n R = std(noise)\n V = R**2 * f.core.getVRF(0)\n# D = sqrt(diag(V))\n Y0 = multivariate_normal(Y, V); # Y + D * randn(Y.shape[0]) # \n if (it == 0) :\n testData.putInteger(caseGroup, 'order', order)\n testData.putScalar(caseGroup, 'tau', tau)\n testData.putScalar(caseGroup, 'theta', theta)\n testData.putArray(caseGroup, 'Y0', Y0)\n testData.putArray(caseGroup, 'times', times)\n testData.putArray(caseGroup, 'observations', observations)\n testData.putArray(caseGroup, 'truth', truth)\n f.start(0.0, Y0)\n# tchi2 = chi2.ppf(0.95, df=order+1)\n for j in range(0,times.shape[0]) :\n Zstar = f.predict(times[j][0])\n e = observations[j] - Zstar[0]\n f.update(times[j][0], Zstar, e)\n expected[j,:] = f.getState();\n residuals[j,:] = f.getState() - truth[j,:]\n# print(A2S(residuals))\n if (it == 0) :\n testData.putArray(caseGroup, 'expected', expected)\n C = cov(residuals, rowvar=False)\n# if (it == 0) :\n# print(A2S(V))\n# print(A2S(C))\n# if (any(C < 0.0)) :\n# print(A2S(C))\n# print(A2S(V))\n CV = (C/V);\n S = S + CV;\n# print('%4d %8.6f, %d, %6.3f, %6.2f, %6.3f, %6.3f' % \n# (it, 1-theta, order, tau, R, min(CV.flatten()), max(CV.flatten())))\n S = S / nTrials;\n S = S.flatten();\n stdVar = sqrt(varVar)\n lB = 1 - stdVar\n uB = 1 + stdVar\n pf = 'FAIL'\n if (lB <= mean(S) and mean(S) <= uB) :\n pf = 'PASS'\n nPass += 1\n else:\n nFail += 1\n print( '%5d %10.3f %10.8f %7d %10.3f, %10.3f, %10.3f, %10.3f %s' % \n (order, tau, theta, n, mean(S), std(S)/stdVar, min(S)/lB, max(S)/uB, pf) );\n print('%d passed; %d failed' % (nPass, nFail))\n assert_equal(nFail,0)\n \n def generateGammas(self, testData : TestData) -> None:\n \n self.iCase = 0;\n \"\"\"\n Gamma: {0: 1.1102230246251565e-16, 1: 1.651158036900312e-08, 2: 0.2898979485566357, 3: 0.4627475401696348, 4: 0.5697090329565893, 5: 0.6416277095878444}\n \"\"\"\n self.gammaTaus = {new_list: [] for new_list in range(0,5+1)}\n \n def isValid(G : array) -> bool:\n return (1 - max(G)) >= 0.0 and min(G) > 0.0\n \n def isMonotonic(G : array) -> bool:\n for ig in range(1, len(G)) :\n if (G[ig-1] <= G[ig]) :\n return False;\n return True;\n \n def analyze(group : Group, order:int, tau:float, theta:float) -> array:\n f = makeFmp( order, tau, theta );\n c = f.getCore();\n G = c.getGamma(0, 1.0)\n q = 'MONOTONIC'\n for ig in range(1, len(G)) :\n if (G[ig-1] <= G[ig]) :\n q = 'IRREGULAR'\n break;\n if (isValid(G)) :\n v = 'STABLE'\n else :\n v = 'UNSTABLE'\n if (q == 'IRREGULAR' or v == 'UNSTABLE') :\n return G\n caseGroup = createTestGroup(group, 'Gammas_Case_%d' % self.iCase)\n self.iCase += 1\n testData.putInteger(caseGroup, 'order', order)\n testData.putScalar(caseGroup, 'tau', tau)\n testData.putScalar(caseGroup, 'theta', theta)\n testData.putScalar(caseGroup, 'nS', nSwitch(order, theta))\n testData.putArray(caseGroup, 'G', G)\n print('%2d %8.2g %15.8g %15.8g %s %s %s' % (order, nSwitch(order, theta), theta, 1-theta, A2S(G), q, v))\n return G;\n \n group = testData.createTestGroup('Gammas')\n \n delta = 2.0**-32;\n while ((1-delta) != 1.0) :\n delta = delta/2\n delta = delta*2\n \n \n tau = 1.0 # gamma vectors are not dependant on tau\n for order in range(0,5+1) :\n \n def targetValid(theta: float) -> float:\n f = makeFmp( order, tau, theta );\n c = f.getCore();\n G = c.getGamma(0, 1.0)\n return (1-2*delta - max(G))\n \n t0 = brentq( targetValid, delta, 1.0-delta, xtol=delta );\n \n def targetMonotonic( theta: float) -> bool:\n f = makeFmp( order, tau, theta );\n c = f.getCore();\n G = c.getGamma(0, 1.0)\n return isMonotonic(G)\n \n if (order > 0) :\n if (not targetMonotonic(t0)) :\n t1 = 1.0 - delta;\n while (abs(t0-t1) > 2*delta) :\n th = 0.5 * (t0 + t1)\n if (targetMonotonic(th)) :\n t1 = th;\n else :\n t0 = th;\n t0 = t1\n \n theta = t0;\n self.gammaTaus[order] = theta;\n while (theta > 0.0) :\n G = analyze(group, order, tau, theta)\n if (any(G) <= 0.0) :\n break;\n theta = theta/2\n theta = 0.5;\n while (theta > 0.0) :\n G = analyze(group, order, tau, 1-theta)\n if (any(G) <= 0.0) :\n break;\n theta = theta/2\n else :\n self.gammaTaus[order] = delta;\n theta = 0.999;\n while (theta > 0.0) :\n G = analyze(group, order, tau, 1-theta)\n if (any(G) <= 0.0) :\n break;\n theta = theta/2\n print('Gamma: ', self.gammaTaus )\n \n \n def generateVrfs(self, testData: TestData) -> None:\n \"\"\"\n {0: 3.0518509447574615e-05, 1: 1.4142135623842478, 2: 2.5495097571983933, 3: 3.8369548115879297, 4: 5.583955190144479, 5: 8.241797269321978}\n \"\"\"\n print('test8VRF')\n self.vrfTaus = {new_list: [0.0] for new_list in range(0,5+1)}\n group = testData.createTestGroup('Vrfs')\n self.iCase = 0;\n \n def isValid(V : array) -> bool:\n try :\n if (any(V.flatten() == 0) or any(V.flatten() > 1.0)) :\n return False\n except :\n return False\n return True\n \n def isDecreasing(V : array) -> bool:\n if (not isValid(V)) :\n return False\n for i in range(0, V.shape[0]) :\n if (any(V[i,:] != V[:,i])) :\n return False; # not symmetric\n for j in range(i+1, V.shape[0]) :\n if (V[i,j-1] <= V[i,j]) :\n return False\n return True\n \n delta = 2.0**-32;\n while ((1-delta) != 1.0) :\n delta = delta/2\n delta = delta*2\n for order in range(0, 5+1) :\n print(self.vrfTaus[order])\n for ptau in range(-14, 14+1) :\n tau = 2**ptau\n \n def targetDecreasing(theta : float) -> bool:\n try :\n f = makeFmp( order, tau, theta );\n V = f.getCore().getVRF(0)\n return isDecreasing(V)\n except :\n return False\n \n t0 = delta\n t1 = 1 - delta\n if (not targetDecreasing(t0)) :\n t1 = 1.0 - delta;\n while (abs(t0-t1) > 2*delta) :\n th = 0.5 * (t0 + t1)\n if (targetDecreasing(th)) :\n t1 = th;\n else :\n t0 = th;\n t0 = t1\n \n theta = t0; # first valid theta for tau at order\n f = makeFmp( order, tau, theta );\n V = f.getCore().getVRF(0)\n lowTheta = theta;\n \n# print(order, tau, theta, isDecreasing(V))\n if (self.vrfTaus[order] == 0.0 and theta == delta) :\n tau1 = tau # at tau1 is good VRF at theta == delta\n tau0 = tau/2 # was not good at tau/2\n while (abs(tau1-tau0) > 1e-9) :\n tau = 0.5*(tau1 + tau0)\n f = makeFmp( order, tau, delta );\n V = f.getCore().getVRF(0)\n# print(order, tau, isDecreasing(V))\n if (isDecreasing(V)) :\n tau1 = tau\n else :\n tau0 = tau\n f = makeFmp( order, tau1, delta );\n V = f.getCore().getVRF(0)\n# print(order, tau1, delta, isDecreasing(V)) \n self.vrfTaus[order] = tau1 \n \n t0 = lowTheta\n t1 = 1-delta\n if (not targetDecreasing(t1)) :\n while (abs(t0-t1) > 2*delta) :\n th = 0.5 * (t0 + t1)\n if (targetDecreasing(th)) :\n t1 = th;\n else :\n t0 = th;\n t0 = t1\n else :\n t0 = t1\n highTheta = t0\n print(order, tau, lowTheta, highTheta, log10(1-lowTheta), log10(1-highTheta)) \n for ps in linspace(log10(1-lowTheta), log10(1-highTheta), 10, endpoint=True) :\n theta = 1-10**ps;\n f = makeFmp( order, tau, theta );\n V = f.getCore().getVRF(0)\n caseGroup = testData.createTestGroup( 'Vrfs_Case_%d' % self.iCase)\n self.iCase += 1\n testData.putInteger(caseGroup, 'order', order)\n testData.putScalar(caseGroup, 'tau', tau)\n testData.putScalar(caseGroup, 'theta', theta)\n testData.putArray(caseGroup, 'V', V)\n print(order, tau, theta, isDecreasing(V))\n \n# tauFullRange = self.vrfTaus[order]\n# for ptau in range(-14, 14+1) :\n# tau = 2**ptau\n# # for theta in (0.1, 0.25, 0.5, 0.75, 0.90, 0.95, 0.99, 0.999) :\n# if (theta < t0) : \n# continue\n# f = makeFmp( order, tau, theta );\n# V = f.getCore().getVRF(0)\n# if (isValid(V)) :\n# C, d = covarianceToCorrelation(V)\n# print(order, tau, theta, isDecreasing(V))\n# print(A2S(C))\n print('VR: ', self.vrfTaus)\n\n \n def xstep0Generate(self):\n# order = 5\n# tau = 0.01\n# for i in range(1,6) :\n# theta = 1-10**-i\n# f = makeFmp(order, tau, theta);\n# print('VRF', order, tau, theta)\n# print(A2S(f.core._getVRF(tau, theta)))\n# \n print(\"test0Generate\")\n testData = TestData('testFMP.nc', \"w\");\n self.generateStates(testData)\n self.generateGammas(testData)\n self.generateVrfs(testData)\n testData.close()\n\n\n \n @testcase\n def step1CheckStates(self):\n '''@testData : TestData'''\n '''@matches : List[str]'''\n '''@order : int'''\n '''@setup : array'''\n '''@states : Group'''\n '''@cases : List[str]'''\n '''@caseName : str'''\n '''@caseGroup : Group'''\n '''@tau : float'''\n '''@theta : float'''\n '''@Y0 : array'''\n '''@times : array'''\n '''@observations : array'''\n '''@truth : array'''\n '''@expected : array'''\n '''@actual : array'''\n '''@f : RecursivePolynomialFilter'''\n '''@i : int'''\n '''@j : int'''\n '''@Zstar : array'''\n '''@e : float'''\n \n# print(\"test1CheckStates\")\n assert_clear()\n testData = TestData('testFMP.nc')\n matches = testData.getMatchingGroups('States')\n assert_not_empty(matches)\n setup = testData.getGroupVariable(matches[0], 'setup')\n states = testData.getGroup(matches[0])\n cases = testData.getMatchingGroups(\"Case_\");\n for i in range(0, len(cases)) :\n caseName = cases[i]\n caseGroup = testData.getGroup(caseName)\n order = testData.getInteger(caseGroup, 'order')\n tau = testData.getScalar(caseGroup, 'tau')\n theta = testData.getScalar(caseGroup, 'theta')\n Y0 = testData.getArray(caseGroup, 'Y0')\n times = testData.getArray(caseGroup, 'times')\n observations = testData.getArray(caseGroup, 'observations')\n truth = testData.getArray(caseGroup, 'truth')\n \n expected = testData.getArray(caseGroup, 'expected')\n actual = zeros([times.shape[0], order+1]) \n \n# print('%2d %6.3f %15.8g %8.2g %s' % (order, tau, theta, nSwitch(order,theta), A2S(Y0)) )\n \n f = makeFmp(order, tau, theta);\n f.start(0.0, Y0)\n for j in range(0,times.shape[0]) :\n Zstar = f.predict(times[j,0])\n e = observations[j] - Zstar[0]\n f.update(times[j,0], Zstar, e)\n actual[j,:] = transpose(f.getState());\n \n assert_allclose(actual, expected)\n assert_report(\"Fmp_test/test1CheckStates\", -1)\n testData.close()\n \n @testcase \n def step1CheckGammas(self) -> None:\n '''@testData : TestData'''\n '''@matches : List[str]'''\n '''@order : int'''\n '''@setup : array'''\n '''@states : Group'''\n '''@cases : List[str]'''\n '''@i : int'''\n '''@caseName : str'''\n '''@caseGroup : Group'''\n '''@tau : float'''\n '''@theta : float'''\n '''@Y0 : array'''\n '''@times : array'''\n '''@observations : array'''\n '''@truth : array'''\n '''@expectedG : array'''\n '''@actualG : array'''\n '''@nS : float'''\n '''@f : RecursivePolynomialFilter'''\n \n# print(\"test1CheckGammas\")\n assert_clear()\n testData = TestData('testFMP.nc')\n matches = testData.getMatchingGroups('Gammas')\n assert_not_empty(matches)\n states = testData.getGroup(matches[0])\n cases = testData.getMatchingGroups( \"Gammas_Case_\");\n for i in range(0, len(cases)) :\n caseName = cases[i]\n caseGroup = testData.getGroup(caseName)\n order = testData.getInteger(caseGroup, 'order')\n tau = testData.getScalar(caseGroup, 'tau')\n theta = testData.getScalar(caseGroup, 'theta')\n nS = testData.getScalar(caseGroup, 'nS')\n expectedG = testData.getArray(caseGroup, 'G')\n f = makeFmp( order, tau, theta );\n actualG = f.getCore().getGamma(0, 1.0)\n assert_allclose(nS, nSwitch(order, theta))\n assert_allclose(actualG, expectedG)\n assert_report(\"Fmp_test/test1CheckGammas\", -1)\n testData.close()\n \n \n @testcase \n def step1CheckVRF(self) -> None:\n '''@testData : TestData'''\n '''@matches : List[str]'''\n '''@order : int'''\n '''@setup : array'''\n '''@states : Group'''\n '''@cases : List[str]'''\n '''@i : int'''\n '''@caseName : str'''\n '''@caseGroup : Group'''\n '''@tau : float'''\n '''@theta : float'''\n '''@Y0 : array'''\n '''@times : array'''\n '''@observations : array'''\n '''@truth : array'''\n '''@expectedV : array'''\n '''@actualV : array'''\n '''@f : RecursivePolynomialFilter'''\n# print(\"test1CheckVRF\")\n assert_clear()\n testData = TestData('testFMP.nc')\n matches = testData.getMatchingGroups('Vrfs')\n assert_not_empty(matches)\n states = testData.getGroup(matches[0])\n cases = testData.getMatchingGroups(\"Vrfs_Case_\");\n for i in range(0, len(cases)) :\n caseName = cases[i]\n caseGroup = testData.getGroup(caseName)\n order = testData.getInteger(caseGroup, 'order')\n tau = testData.getScalar(caseGroup, 'tau')\n theta = testData.getScalar(caseGroup, 'theta')\n expectedV = testData.getArray(caseGroup, 'V')\n f = makeFmp( order, tau, theta );\n actualV = f.getCore().getVRF(0)\n assert_allclose(actualV, expectedV)\n assert_report(\"Fmp_test/test1CheckVrfs\", -1)\n testData.close()\n \n \n \n @testcase \n def step9CoreBasic(self) -> None:\n '''@core90 : ICore'''\n '''@core95 : ICore'''\n '''@core95half : ICore'''\n '''@core95double : ICore'''\n '''@ad : array'''\n '''@ah : array'''\n# print(\"test9CoreBasic\")\n assert_clear()\n core90 = makeFmpCore(3, 1.0, 0.90)\n core95 = makeFmpCore(3, 1.0, 0.95)\n core95half = makeFmpCore(3, 2.0, 0.95)\n core95double = makeFmpCore(3, 0.5, 0.95)\n \n assert_allclose( core90.getVRF(1), core90.getVRF(10) ) # should be time invariate\n assert_array_less( core95.getVRF(1), core90.getVRF(1))\n \n ad = (core95double.getVRF(1) / core95.getVRF(1))\n ah = (core95half.getVRF(1) / core95.getVRF(1))\n assert_allclose( ones([3+1,3+1]), ad * ah )\n \n assert_allclose( core90.getGamma(10.0, 5.0), core90.getGamma(11.0, 5.0) )\n assert_allclose( core90.getGamma(10.0, 5.0), core90.getGamma(10.0, 6.0) )\n assert_allclose( core95.getGamma(10.0, 5.0), core95half.getGamma(10.0, 5.0) ) \n assert_allclose( core95.getGamma(10.0, 5.0), core95double.getGamma(10.0, 5.0) ) \n assert_report(\"Fmp_test/test9CoreBasic\", -1)\n \n @testcase\n def step9Basic(self) -> None:\n '''@order : int'''\n '''@tau : float'''\n '''@theta : float'''\n '''@Y0 : array'''\n '''@observations : array'''\n '''@f : RecursivePolynomialFilter'''\n '''@t : float'''\n '''@Zstar : array'''\n '''@e : float'''\n '''@actual : array'''\n assert_clear()\n order = 5\n tau = 0.01\n theta = 0.9885155283985784\n actual = zeros([ order+1, 1])\n Y0 = array([ -5.373000000000E+00,-1.125200000000E+01,-1.740600000000E+01,-1.565700000000E+01,-7.458400000000E+00,-1.467800000000E+00 ]);\n observations = array([-5.2565E+00,-2.8652E+00,-1.4812E+01, 4.6590E+00, 4.7380E+00,-7.3765E+00, 1.3271E+01, 7.3593E+00, 3.4308E+00,-1.1329E+00,-1.5789E+00])\n f = makeFmp(order, tau, theta);\n f.start(0.0, Y0)\n t = 0\n Zstar = f.predict(t)\n assert_almost_equal(Zstar, array([ -5.373000000000E+00,-1.125200000000E-01,-1.740600000000E-03,-1.565700000000E-05,-7.458400000000E-08,-1.467800000000E-10 ])) \n e = observations[0] - Zstar[0]\n assert_almost_equal(e, 0.11650000000000027)\n actual = f.update(t, Zstar, e)\n assert_almost_equal(actual, array([ 7.800661521666E-03,2.252446577781E-04,3.468911273583E-06,3.005115515478E-08,1.388453238252E-10,2.672957382342E-13 ]))\n\n actual = f.getState();\n assert_almost_equal(actual, array([-5.365199338478335, -11.229475534222193, -17.37131088726417, -15.62694884484522, -7.444515467617483, -1.4651270426176584]))\n t += 0.01\n Zstar = f.predict(t)\n assert_almost_equal(Zstar, array([-5.47836527e+00, -1.14039712e-01, -1.75279528e-03, -1.57014673e-05, -7.45916674e-08, -1.46512704e-10]))\n e = observations[1] - Zstar[0]\n assert_almost_equal(e, 2.6131652669594962)\n f.update(t, Zstar, e)\n actual = f.getState();\n assert_almost_equal(actual, array([ -5.30339172, -10.89873388, -16.74985512, -15.02740172, -7.1477283, -1.405171 ]))\n assert_report(\"Fmp_test/test9Basic\", -1)\n \n @testcase \n def step9NSwitch(self) -> None:\n '''@emp : ICore'''\n '''@fmp : ICore'''\n '''@order : int'''\n '''@tau : float'''\n '''@itau : int'''\n '''@taus : array'''\n '''@theta : float'''\n '''@itheta : int'''\n '''@thetas : array'''\n '''@n : int'''\n '''@nThetas : int'''\n '''@nTaus : int'''\n \n# print(\"test9NSwitch\")\n assert_clear()\n taus = array([0.01, 0.1, 1, 10, 100]);\n nTaus = len(taus);\n thetas = array([0.90, 0.95, 0.99, 0.999])\n nThetas = len(thetas);\n for order in range(0,5+1) :\n for itheta in range(0, nThetas):\n theta = thetas[itheta]\n for itau in range(0, nTaus):\n tau = taus[itau]\n emp = makeEmpCore(order, tau)\n fmp = makeFmpCore(order, tau, theta)\n n = int(nSwitch( order, theta ))\n assert( fmp.getFirstVRF(0)/emp.getFirstVRF(n) < 1.25 )\n# print('%2d, %8.3f, %7.5f, %8.1f, %6.2f, %6.2f, %6.2f' %\n# (order, tau, theta, n, fmp.getFirstVRF(0)/emp.getFirstVRF(n-1), fmp.getFirstVRF(0)/emp.getFirstVRF(n), fmp.getFirstVRF(0)/emp.getFirstVRF(n+1)))\n \n \nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()","repo_name":"lintondf/MorrisonPolynomialFiltering","sub_path":"Python/test/components/Fmp_test.py","file_name":"Fmp_test.py","file_ext":"py","file_size_in_byte":27053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13734940516","text":"import os\nimport argparse\n\nfrom src.util.spark_util import get_spark_session\nfrom pyspark.sql.functions import *\n\nspark = get_spark_session()\n\n\ndef read_and_normalize_streets(path, district_name):\n streets_df = spark.read.option(\"header\", True).csv(path)\n\n return streets_df.select(\"Crime ID\", \"Longitude\", \"Latitude\", \"Crime type\", \"Last outcome category\") \\\n .withColumnRenamed(\"Crime ID\", \"crimeId\") \\\n .withColumnRenamed(\"Crime type\", \"crimeType\") \\\n .withColumnRenamed(\"Last outcome category\", \"streets_lastOutcome\") \\\n .withColumn(\"district_name\", lit(district_name))\n\n\ndef read_and_normalize_outcomes(path):\n outcomes_df = spark.read.option(\"header\", True).csv(path)\n\n return outcomes_df.select(\"Crime ID\", \"Outcome type\") \\\n .withColumnRenamed(\"Crime ID\", \"crimeId\") \\\n .withColumnRenamed(\"Outcome type\", \"outcomeType\")\n\n\ndef write_dataset_as_parquet(df, path):\n df.coalesce(1) \\\n .write \\\n .partitionBy(\"district_name\", \"crimeType\", \"lastOutcome\") \\\n .mode(\"append\") \\\n .parquet(path, compression='gzip')\n\n\ndef extract_district(line):\n return line.replace(\"-street.csv\", \"\")[8:]\n\n\ndef main(input_csv_root_folder, output_parquet_folder):\n for item in os.listdir(input_csv_root_folder):\n if \".DS_Store\" not in item:\n nested = os.listdir(f\"{input_csv_root_folder}/{item}\")\n outcomes = set()\n streets = set()\n\n for element in nested:\n if 'outcomes' in element:\n outcomes.add(element)\n elif 'street' in element:\n streets.add(element)\n else:\n continue\n\n for street_path in streets:\n district_name = extract_district(street_path)\n\n current_street_df = read_and_normalize_streets(f\"{input_csv_root_folder}/{item}/{street_path}\",\n district_name)\n result_df = current_street_df\n\n outcome_path = street_path.replace(\"-street\", \"-outcomes\")\n\n if outcome_path in outcomes:\n current_outcome_df = read_and_normalize_outcomes(f\"{input_csv_root_folder}/{item}/{outcome_path}\")\n result_df = result_df.join(current_outcome_df, ['crimeId'], \"left\")\n\n result_df = result_df.select(\"crimeId\", \"district_name\", \"Longitude\", \"Latitude\", \"crimeType\",\n when(col(\"outcomeType\").isNotNull(), col(\"outcomeType\")).otherwise(\n col(\"streets_lastOutcome\")).alias(\"lastOutcome\"))\n\n write_dataset_as_parquet(result_df, output_parquet_folder)\n\n else:\n result_df = result_df.withColumnRenamed(\"streets_lastOutcome\", \"lastOutcome\")\n write_dataset_as_parquet(result_df, output_parquet_folder)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Args for script')\n parser.add_argument('--input_csv_path', type=str)\n parser.add_argument('--output_parquet_path', type=str)\n\n args = parser.parse_args()\n main(args.input_csv_path, args.output_parquet_path)\n","repo_name":"DiNoWaR/TRG_Task","sub_path":"src/util/convertor.py","file_name":"convertor.py","file_ext":"py","file_size_in_byte":3282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35366413945","text":"from lib.config import ClusterConfig, BoundBox, TimeWindow\nfrom lib.utils import GoogleArchive, Data\nfrom lib.models import MockModel\nimport ray\nfrom collections import defaultdict\nimport time as clock\nimport argparse\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Sample application for running models\")\n parser.add_argument(\"--urls\", dest=\"urlCount\", help=\"Number of urls to download\", type=int, required=True)\n args = parser.parse_args()\n runtime_env = {\"working_dir\": \".\"}\n ray.init(address=\"auto\", runtime_env =runtime_env)\n global_timers = defaultdict(list)\n global_timers['e2e'].append(clock.time())\n global_timers['clusterCreation'].append(clock.time())\n c = ClusterConfig(len(ray.nodes()), ray.cluster_resources()['CPU'], int(ray.cluster_resources()['CPU']))\n c.initialize_workers()\n global_timers['clusterCreation'].append(clock.time())\n bound_box = BoundBox(12, 31, 12, 31)\n time_window = TimeWindow(1)\n global_timers['archiveDownload'].append(clock.time())\n archive = GoogleArchive(c)\n archive.query(bound_box, time_window).fetch_urls(args.urlCount).download()\n global_timers['archiveDownload'].append(clock.time())\n global_timers['redistribute'].append(clock.time())\n imageData = archive.redistribute()\n global_timers['redistribute'].append(clock.time())\n global_timers['applyModel'].append(clock.time())\n r_model = MockModel()\n imageData.apply_model(r_model)\n global_timers['applyModel'].append(clock.time())\n global_timers['collectFiles'].append(clock.time())\n imageData.collect_data()\n global_timers['collectFiles'].append(clock.time())\n global_timers['e2e'].append(clock.time())\n for key in global_timers.keys():\n print(f'{key}: {global_timers[key][1] - global_timers[key][0]}')\n","repo_name":"abhi6689-develop/TerraSight","sub_path":"src/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41801343344","text":"from textual.app import ComposeResult\nfrom textual.containers import Grid\nfrom textual.screen import ModalScreen\nfrom textual.widgets import Button, Label\n\n\nclass DeleteTask(ModalScreen[bool]):\n \"\"\"Screen with a dialog to return yes or not\"\"\"\n\n BINDINGS = [\n (\"y\", \"say_yes\", \"\"),\n (\"c\", \"say_no\", \"\"),\n ]\n\n def compose(self) -> ComposeResult:\n \"\"\"Formar la pantalla\"\"\"\n yield Grid(\n Label(f\"Delete task?\", id=\"question\"),\n Button(\"(Y)es\", variant=\"error\", id=\"yes\"),\n Button(\"(C)ancel\", variant=\"primary\", id=\"cancel\"),\n id=\"dialog\",\n )\n\n def action_say_yes(self) -> None:\n \"\"\"Press y key\"\"\"\n self.dismiss(True)\n\n def action_say_no(self) -> None:\n \"\"\"Press n key\"\"\"\n self.dismiss(False)\n\n def on_button_pressed(self, event: Button.Pressed) -> None:\n if event.button.id == \"yes\":\n self.dismiss(True)\n else:\n self.dismiss(False)\n","repo_name":"justafewwords4/kultimate","sub_path":"kultimate/screens/deleteTask.py","file_name":"deleteTask.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2875978109","text":"import os\nimport glob\nimport Exercise1\n\n#1\ndef takes_a_path_to_folder(folder_path, out_file):\n lines = [f for f in os.listdir(folder_path) if os.path.isfile(f)]\n \n with open(out_file, 'w') as file_object:\n for line in lines:\n file_object.write(line + '\\n')\n\n#2\ndef find_files_in_all_subfolders(folder_path, ext):\n #lines = glob.glob(folder_path + r'\\**\\*.py', recursive=True)\n \n subfolders, files = [], []\n for f in os.scandir(folder_path):\n if f.is_dir():\n subfolders.append(f.path)\n if f.is_file():\n files.append(f.name)\n\n for dir in list(subfolders):\n sf, f = find_files_in_all_subfolders(dir, ext)\n subfolders.extend(sf)\n files.extend(f)\n\n return subfolders, files \n\ndef files_in_subfolders(folder_path, out_file):\n sf, f = find_files_in_all_subfolders(folder_path, [])\n\n with open(out_file, 'w') as file_object:\n for line in f:\n file_object.write(line + '\\n')\n\n#3\ndef list_of_filenames(list_filenames):\n for filename in list_filenames:\n if not (filename.endswith('.py')):\n with open(os.path.join(filename)) as f_obj:\n print(f_obj.read().splitlines())\n\n#4\ndef contains_an_email(list_filenames):\n for filename in list_filenames:\n if not (filename.endswith('.py')):\n with open(os.path.join(filename)) as f_obj:\n for line in f_obj:\n if '@' in line:\n print(line)\n\n#5\ndef takes_list_of_md_files(md_list, md_out_file):\n headers = []\n for md_file in md_list:\n if (md_file.endswith('.md')):\n with open(md_file) as f_obj:\n for line in f_obj:\n if '#' in line:\n headers.append(line)\n\n with open(md_out_file, 'w') as file_object:\n for header in headers:\n file_object.write(header + '\\n')\n\nif __name__ == \"__main__\" :\n folder_path = 'C:/Users/rasmu/Desktop/Python/Week-6/02-Exercise'\n out_file = 'out_file.txt'\n filenames = Exercise1.read_csv(folder_path+'/'+out_file)\n md_list = ['C:/Users/rasmu/Desktop/Python/README.md','C:/Users/rasmu/Documents/github/testPipeline/README.md','C:/Users/rasmu/Documents/github/TestExamExerciseType2/README.md']\n md_out_file = 'md_out_file.txt'\n\n files_in_subfolders(folder_path, out_file)\n takes_a_path_to_folder(folder_path, out_file)\n list_of_filenames(filenames)\n contains_an_email(filenames)\n takes_list_of_md_files(md_list, md_out_file)","repo_name":"Rasm-P/Python","sub_path":"Week-6/02-Exercise/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72766743945","text":"import argparse\nimport os\nimport sys\nimport uuid\n\nfrom common.s2n_test_common import wait_for_output\nfrom common.s2n_test_openssl import run_openssl_connection_test\nfrom common.s2n_test_scenario import get_scenarios, Mode, Cipher, Version, Curve\nfrom common.s2n_test_reporting import Result, Status\nimport common.s2n_test_common as util\nimport time\n\ndef verify_hrr_random_data(server, client):\n \"\"\"\n This callback verifies a HelloRetryRequest was sent from the S2N\n server. If the rest of the integration test passes as well, then\n the handshake completed after the HelloRetryRequest was sent.\n \"\"\"\n result = Result()\n result.status = Status.FAILED\n\n # Start of HRR random data which will be printed in the\n # client process output\n marker_found = False\n hello_count = 0\n finished_count = 0\n marker = b\"cf 21 ad 74 e5 9a 61 11 be 1d\"\n\n for line in client.stdout:\n if marker in line:\n marker_found = True\n if b'ClientHello' in line:\n hello_count += 1\n if b'], Finished' in line:\n finished_count += 1\n if marker_found and hello_count == 2 and finished_count == 2:\n result.status = Status.PASSED\n break\n\n\n return result\n\ndef key_update_send_and_receive(client, server, key_update_request):\n openssl_msg = \"Message:\" + str(uuid.uuid4())\n s2n_msg = \"Message:\" + str(uuid.uuid4())\n\n # Trigger Openssl to send a KeyUpdate message\n client.stdin.write((key_update_request + \"\\n\\n\").encode(\"utf-8\"))\n client.stdin.flush()\n\n # Confirm that the KeyUpdate was sent\n time.sleep(0.01)\n if not wait_for_output(client.stderr, 'KEYUPDATE', 100):\n return False\n\n # Write a message from Openssl\n client.stdin.write((openssl_msg + \"\\n\\n\").encode(\"utf-8\"))\n client.stdin.flush()\n\n # Confirm that s2n can decrypt msg\n if not (wait_for_output(server.stdout, openssl_msg, 100)):\n return False \n\n # Write a message from s2n\n server.stdin.write((s2n_msg + \"\\n\\n\").encode(\"utf-8\"))\n server.stdin.flush()\n\n # Confirm that Openssl can decrypt msg\n if not (wait_for_output(client.stdout, s2n_msg, 100)):\n return False\n \n return True\n\ndef key_update_test(server, client):\n '''\n This test proves that both a s2n server and an Openssl client can continue to encrypt and decrypt \n messages after a key update. It tests both update_not_requested\n and update_requested functionality described in RFC:\n https://tools.ietf.org/html/rfc8446#section-4.6.3 \n '''\n result = Result()\n result.status = Status.PASSED\n\n # 'K' triggers an update_requested message from Openssl\n if not key_update_send_and_receive(client, server, 'K'):\n result.status = Status.FAILED\n return result\n\n # 'k' triggers an update_not_requested message from Openssl\n if not key_update_send_and_receive(client, server, 'k'):\n result.status = Status.FAILED\n return result\n return result\n\ndef main():\n parser = argparse.ArgumentParser(description='Runs TLS1.3 minimal handshake integration tests against Openssl')\n parser.add_argument('host', help='The host to connect to')\n parser.add_argument('port', type=int, help='The port to bind to')\n\n args = parser.parse_args()\n host = args.host\n port = args.port\n\n failed = 0\n\n print(\"\\n\\tRunning TLS1.3 handshake tests with openssl: %s\" % os.popen('openssl version').read())\n failed += run_openssl_connection_test(get_scenarios(host, port, versions=[Version.TLS13], s2n_modes=Mode.all(), ciphers=Cipher.all()))\n print(\"\\n\\tRunning TLS1.3 HRR tests with openssl: %s\" % os.popen('openssl version').read())\n failed += run_openssl_connection_test(get_scenarios(host, port, versions=[Version.TLS13], s2n_modes=[Mode.server], ciphers=Cipher.all(),\n peer_flags=['-msg', '-curves', 'X448:P-256']), test_func=verify_hrr_random_data)\n print(\"\\n\\tRunning TLS1.3 key update tests with openssl: %s\" % os.popen('openssl version').read())\n failed += run_openssl_connection_test(get_scenarios(host, port, versions=[Version.TLS13], s2n_modes=[Mode.server], ciphers=Cipher.all()),\n test_func=key_update_test)\n\n return failed\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n\n","repo_name":"inxware/ert-contrib-middleware","sub_path":"contrib/s2n-tls/tests/integration/s2n_tls13_handshake_tests.py","file_name":"s2n_tls13_handshake_tests.py","file_ext":"py","file_size_in_byte":4358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"43800930465","text":"# This problem was asked by Apple.\n# Implement the function fib(n), which returns the nth number in the Fibonacci sequence, \n# using only O(1) space.\n####\ndef fib(n):\n a = 1\n b = 1\n while n:\n a, b = b, a + b\n n -= 1\n return b\n####\nprint(fib(6))","repo_name":"whoophee/DCP","sub_path":"233.py","file_name":"233.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"38472882435","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Feb 25 02:53:39 2018\r\n\r\n@author: HP\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib as mplot\r\n\r\n#Task2F\r\ndef polyfit(dates, levels, p):\r\n x = mplot.dates.date2num(dates)\r\n d0 = np.min(x)\r\n \r\n p_coeff = np.polyfit(x - d0, levels, p)\r\n \r\n poly = np.poly1d(p_coeff)\r\n \r\n return poly, d0\r\n\r\n","repo_name":"andreusjh99/CamCwk","sub_path":"IA_Flood_Warning_System/floodsystem/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72911564746","text":"from spikingjelly import datasets as sjds\nfrom typing import Callable, Dict, Optional, Tuple\nimport numpy as np\nimport os\nimport warnings\nfrom spikingjelly.datasets import configure\nnp_savez = np.savez_compressed if configure.save_datasets_compressed else np.savez\nwarnings.filterwarnings(\"always\", category=UserWarning, message='Detected a time step with no events within.')\nwarnings.filterwarnings(\"always\", category=UserWarning, message='Detected a time step possibly corrupted. t_min != t[0]')\n\ndef exp_decay_func(amp,t,t_now,tau):\n \"\"\"\n t: map of times or a given value of time\n t_0: actual time\n \"\"\"\n outp = amp * np.exp((t-t_now)/tau)\n return outp\n\ndef mycal_fixed_frames_number_segment(events_t: np.ndarray, split_by: str, frames_num: int) -> tuple:\n '''\n :param events_t: events' t\n :type events_t: numpy.ndarray\n :param split_by: 'time' or 'number'\n :type split_by: str\n :param frames_num: the number of frames\n :type frames_num: int\n :return: a tuple ``(j_l, j_r, dt)`` #Devuelve dos arrays con las posiciones de los índices del array tiempo donde empieza(j_l) y acaba(j_r) cada frame. Además devuelve el paso temporal en unidades de tiempo\n :rtype: tuple\n Denote ``frames_num`` as :math:`M`\n .. math::(split by time)\n \\\\Delta T & = [\\\\frac{t_{N-1} - t_{0}}{M}] \\\\\\\\\n j_{l} & = \\\\mathop{\\\\arg\\\\min}\\\\limits_{k} \\\\{t_{k} | t_{k} \\\\geq t_{0} + \\\\Delta T \\\\cdot j\\\\} \\\\\\\\\n j_{r} & = \\\\begin{cases} \\\\mathop{\\\\arg\\\\max}\\\\limits_{k} \\\\{t_{k} | t_{k} < t_{0} + \\\\Delta T \\\\cdot (j + 1)\\\\} + 1, & j < M - 1 \\\\cr N, & j = M - 1 \\\\end{cases}\n '''\n j_r = np.zeros(shape=[frames_num], dtype=int)\n N = events_t.size\n \n if split_by == 'number':\n j_l = np.zeros(shape=[frames_num], dtype=int)\n di = N // frames_num\n for i in range(frames_num):\n j_l[i] = i * di\n j_r[i] = j_l[i] + di\n j_r[-1] = N\n return j_l,j_r\n\n elif split_by == 'time':\n j_l = np.zeros(shape=[frames_num], dtype=int)\n dt = (events_t[-1] - events_t[0]) // frames_num\n #print(f't_0: {events_t[0]}, t_fin: {events_t[-1]}')\n idx = np.arange(N)\n for i in range(frames_num):\n t_l = dt * i + events_t[0]\n t_r = t_l + dt\n mask = np.logical_and(events_t >= t_l, events_t < t_r)\n #print(f' t_i: {t_l}, t_fi: {t_r} ', np.all(mask ==False))\n idx_masked = idx[mask]\n if len(idx_masked) == 0: #Este if es añadido por mi. \n warnings.warn('Detected a time step with no events within.',UserWarning)\n j_l[i] = j_r[i-1]\n j_r[i] = j_l[i]\n else:\n j_l[i] = idx_masked[0]\n j_r[i] = idx_masked[-1] + 1\n if events_t[j_l[i]] != events_t[ j_l[i] : j_r[i]].min():\n warnings.warn('Detected a time step possibly corrupted. t_min != t[0]',UserWarning)\n\n j_r[-1] = N \n return j_l, j_r\n \n elif split_by == 'exp_decay':\n dt = (events_t[-1] - events_t[0]) // frames_num\n idx = np.arange(N)\n for i in range(frames_num):\n #t_l = dt * i + events_t[0]\n t_r = events_t[0] + dt*(i +1)\n mask = np.logical_and(events_t >= events_t[0], events_t < t_r)\n idx_masked = idx[mask]\n if i == 0:\n j_l_0 = idx_masked[0]\n j_r[i] = idx_masked[-1] + 1\n\n j_r[-1] = N\n return j_l_0, j_r, dt\n else:\n raise NotImplementedError\n\ndef leave_last_xyp_values_in_time(x,y,p):\n #np.unique puede devolver los índices de las primeras apariciones de los valores. Le doy la vuelta al array para obtener las posiciones de los últimos. Estas posiciones estarán 'invertidas'\n uniq_values, positions_inverted = np.unique(list(zip(x,y,p))[::-1],axis=0,return_index=True)\n #Revertimos posiciones\n positions = len(x) - 1 - positions_inverted\n return uniq_values,positions\n\ndef integrate_events_segment_to_frame_bydecay(t: np.ndarray, x: np.ndarray, y: np.ndarray, p: np.ndarray, H: int, W: int,\n j_l_0: int = 0, j_r: int = -1,dt:float = 1e6,factor_tau: float = 1.5, scale_factor: int = 50) -> np.ndarray:\n '''\n :param x: x-coordinate of events\n :type x: numpy.ndarray\n :param y: y-coordinate of events\n :type y: numpy.ndarray\n :param p: polarity of events\n :type p: numpy.ndarray\n :param H: height of the frame\n :type H: int\n :param W: weight of the frame\n :type W: int\n :param j_l: the start index of the integral interval, which is included\n :type j_l: int\n :param j_r: the right index of the integral interval, which is not included\n :type j_r:\n :return: frames\n :rtype: np.ndarray\n Denote a two channels frame as :math:`F` and a pixel at :math:`(p, x, y)` as :math:`F(p, x, y)`, the pixel value is integrated from the events data whose indices are in :math:`[j_{l}, j_{r})`:\n '''\n\n frame = np.zeros([2, H, W])\n t = t[j_l_0:j_r]\n x = x[j_l_0:j_r]\n y = y[j_l_0:j_r]\n p = p[j_l_0:j_r]\n if t.min() != t[0]:\n warnings.warn('Detected a time step possibly corrupted. t_min != t[0]',UserWarning)\n #Taking unique (x,y,p) values. Leaving only last values of (x,y,p) if repeated(keeping last in time).\n uniq_values, idx_positions = leave_last_xyp_values_in_time(x,y,p)\n #Asigning time values in x,y,p positions obtained\n x_uniq, y_uniq, p_uniq = uniq_values[:,0], uniq_values[:,1], uniq_values[:,2]\n frame[p_uniq, y_uniq, x_uniq] = t[idx_positions]\n #Calculating time decay\n frame_decayed = exp_decay_func(amp = scale_factor,t = frame, t_now = t[-1],tau = factor_tau*dt).astype(int)\n return frame_decayed\n\n\ndef myintegrate_events_by_fixed_frames_number(events: Dict, split_by: str, frames_num: int, H: int, W: int, factor_tau: float = 0.8, scale_factor: int = 50) -> np.ndarray:\n '''\n :param events: a dict whose keys are ``['t', 'x', 'y', 'p']`` and values are ``numpy.ndarray``\n :type events: Dict\n :param frames_num: the number of frames\n :type frames_num: int\n :param H: the height of frame\n :type H: int\n :param W: the weight of frame\n :type W: int\n :return: frames\n :rtype: np.ndarray\n Integrate events to frames by fixed frames number. \n '''\n t, x, y, p = (events[key] for key in ('t', 'x', 'y', 'p'))\n #Putting time from us to ms.(ASUMING TIME IN US(MICROSECONDS)) (Avoid memmory leakage)\n #t = t*1e-3\n if split_by == 'exp_decay':\n j_l_0, j_r, dt = mycal_fixed_frames_number_segment(events_t = t, split_by = split_by, frames_num = frames_num)\n else:\n j_l, j_r = mycal_fixed_frames_number_segment(events_t = t, split_by = split_by, frames_num = frames_num)\n\n frames = np.zeros([frames_num, 2, H, W]).astype('int')\n for i in range(frames_num): \n if split_by == 'exp_decay':\n frames[i] = integrate_events_segment_to_frame_bydecay(t = t, x = x, y = y, p = p, H = H, W = W, j_l_0 = j_l_0, j_r = j_r[i],\n dt = dt, factor_tau = factor_tau, scale_factor = scale_factor)\n else:\n frames[i] = sjds.integrate_events_segment_to_frame(x, y, p, H, W, j_l[i], j_r[i])\n \n return frames\n\n#A continuation of integrate_events_file_to_frames_file_by_fixed_frames_number\ndef integrate_events_to_frame_wfixed_frames_num(loader: Callable, events_np_file: str, output_dir: str, split_by: str, frames_num: int, H: int, W: int,\n print_save: bool = False, factor_tau: float = 0.8, scale_factor: int = 50) -> None:\n if events_np_file.endswith('.npy'):\n fname = os.path.join(output_dir, os.path.basename(events_np_file[:-4]))\n else:\n fname = os.path.join(output_dir, os.path.basename(events_np_file))\n np_savez(fname, frames=myintegrate_events_by_fixed_frames_number(loader(events_np_file),split_by, frames_num, H, W, factor_tau, scale_factor))\n if print_save:\n print(f'Frames [{fname}] saved.')\n","repo_name":"marcosesgonz/SNN_VisProc","sub_path":"src/event_integration_to_frame.py","file_name":"event_integration_to_frame.py","file_ext":"py","file_size_in_byte":8119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18134023861","text":"import email\nfrom flask_login import current_user\nfrom .models import *\nfrom datetime import datetime, date, timedelta \nfrom dateutil.relativedelta import relativedelta\nimport base64\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom flask import make_response, redirect, render_template, request, url_for\n\ndef obtener_encuestas():\n \"\"\"Consulta para obtener todas las encuestas de la bd\"\"\"\n lista_encuestas = []\n if db.session.query(Encuesta).first() == None:\n return lista_encuestas\n else:\n tuplas_de_encuestas = db.session.query(Encuesta).order_by(Encuesta.id_encuesta).all()\n for tupla_encuesta in tuplas_de_encuestas:\n crea_encuesta_aux = db.session.query(Crea_Encuesta).filter_by(id_encuesta=tupla_encuesta.id_encuesta).first()\n admin_aux = db.session.query(Admin).filter_by(id_admin=crea_encuesta_aux.id_admin).first()\n datos_encuesta_aux = {}\n datos_encuesta_aux.clear()\n if tupla_encuesta.fecha_fin == None:\n datos_encuesta_aux = {\n \"id_survey\": tupla_encuesta.id_encuesta,\n \"title\": tupla_encuesta.titulo,\n \"description\": tupla_encuesta.descripcion,\n \"start_date\": tupla_encuesta.fecha_inicio.strftime(\"%d-%m-%Y\"),\n \"end_date\": \"\",\n \"active\" : tupla_encuesta.activa,\n \"visits\": tupla_encuesta.visitas,\n \"answers\": {\"total\":tupla_encuesta.total_asignados,\"current_answers\": tupla_encuesta.respuestas},\n \"author\": admin_aux.nombre,\n \"asigned\" : tupla_encuesta.total_asignados\n }\n else:\n datos_encuesta_aux = {\n \"id_survey\": tupla_encuesta.id_encuesta,\n \"title\": tupla_encuesta.titulo,\n \"description\": tupla_encuesta.descripcion,\n \"start_date\": tupla_encuesta.fecha_inicio.strftime(\"%d-%m-%Y\"),\n \"end_date\": tupla_encuesta.fecha_fin,\n \"active\" : tupla_encuesta.activa,\n \"visits\": tupla_encuesta.visitas,\n \"answers\": {\"total\":tupla_encuesta.total_asignados,\"current_answers\": tupla_encuesta.respuestas},\n \"author\": admin_aux.nombre,\n \"asigned\" : tupla_encuesta.total_asignados\n }\n lista_encuestas.append(datos_encuesta_aux)\n return lista_encuestas\n\ndef obtener_usuarios():\n \"\"\"Consulta para obtener todos los usuarios encuestados (invitados y registrados)\"\"\"\n dataUsers = []\n if db.session.query(Encuestado).first() == None:\n return dataUsers\n else:\n tuplas_encuestados = db.session.query(Encuestado).order_by(Encuestado.email).all()\n i = 0\n for tupla_encuestado in tuplas_encuestados:\n datos_encuestado_aux = {}\n datos_encuestado_aux.clear()\n if db.session.query(Registrado).filter_by(email=tupla_encuestado.email).first() == None:\n datos_encuestado_aux = {\n \"id_user\": i,\n \"name\": \"Invitado\",\n \"lastName\": \"None\",\n \"email\": tupla_encuestado.email,\n \"age\": \"None\",\n \"registration_date\": None,\n \"gender\": None,\n \"state\": tupla_encuestado.activo,\n \"rut\": \"xx.xxx.xxx-x\"\n }\n else:\n tupla_registrado_aux = db.session.query(Registrado).filter_by(email=tupla_encuestado.email).first()\n datos_encuestado_aux = {\n \"id_user\": i,\n \"name\": tupla_registrado_aux.nombre,\n \"lastName\": tupla_registrado_aux.apellidos,\n \"email\": tupla_encuestado.email,\n \"age\": relativedelta(datetime.now(),datetime.combine(tupla_registrado_aux.fecha_nacimiento, datetime.min.time())).years,\n \"registration_date\": tupla_registrado_aux.fecha_registro.strftime(\"%d-%m-%Y\"),\n \"gender\": tupla_registrado_aux.genero,\n \"state\": tupla_encuestado.activo,\n \"rut\": tupla_registrado_aux.rut\n }\n dataUsers.append(datos_encuestado_aux)\n i = i + 1\n return dataUsers\n\ndef obtener_cantidad_registrados_e_invitados():\n cantidad_registrados = db.session.query(Registrado).count()\n cantidad_invitados = db.session.query(Encuestado).count()\n dataChart = {\n \"anonymous\": cantidad_invitados - cantidad_registrados,\n \"registered\": cantidad_registrados\n }\n return dataChart\n\ndef obtener_encuesta_creada(id_encuesta):\n \"\"\"Consulta para obtener datos de las preguntas\"\"\"\n ids_preguntas_alternativas = []\n numeros_preguntas_alternativas = []\n enunciados_preguntas_alternativas = []\n cantidad_opciones_por_pregunta = []\n ids_opciones = []\n strings_opciones = []\n if db.session.query(Alternativa_Encuesta).filter_by(id_encuesta=id_encuesta).first() != None:\n tuplas_alternativa_encuesta = db.session.query(\n Alternativa_Encuesta).filter_by(id_encuesta=id_encuesta).all()\n for tupla_alternativa_encuesta in tuplas_alternativa_encuesta:\n ids_preguntas_alternativas.append(\n tupla_alternativa_encuesta.id_pregunta_alternativa)\n tuplas_pregunta_alternativa = db.session.query(Pregunta_Alternativa).filter(\n Pregunta_Alternativa.id_pregunta_alternativa.in_(ids_preguntas_alternativas)).order_by(Pregunta_Alternativa.numero).all()\n ids_preguntas_alternativas.clear()\n for tupla_pregunta_alternativa in tuplas_pregunta_alternativa:\n ids_preguntas_alternativas.append(tupla_pregunta_alternativa.id_pregunta_alternativa)\n numeros_preguntas_alternativas.append(\n tupla_pregunta_alternativa.numero)\n enunciados_preguntas_alternativas.append(\n tupla_pregunta_alternativa.enunciado)\n for id_pregunta_alternativa in ids_preguntas_alternativas:\n if db.session.query(Alternativas).filter_by(id_pregunta_alternativa=id_pregunta_alternativa).first() != None:\n tuplas_alternativas_aux = db.session.query(Alternativas).filter_by(\n id_pregunta_alternativa=id_pregunta_alternativa).all()\n k = 0\n for tupla_alternativa in tuplas_alternativas_aux:\n ids_opciones.append(tupla_alternativa.id_opcion)\n tupla_opcion_aux = db.session.query(Opcion).filter_by(id_opcion=tupla_alternativa.id_opcion).first()\n strings_opciones.append(tupla_opcion_aux.opcion)\n k = k + 1\n cantidad_opciones_por_pregunta.append(k)\n # id_opciones[i] y string_opciones[i] es una i-esima opcion\n\n \"\"\"Se crea diccionario con las listas que contienen datos de la encuesta\"\"\"\n datos_encuesta_creada = {\n \"ids_preguntas_alternativas\": ids_preguntas_alternativas,\n \"numeros_preguntas_alternativas\": numeros_preguntas_alternativas,\n \"enunciados_preguntas_alternativas\": enunciados_preguntas_alternativas,\n \"cantidad_opciones_por_pregunta\": cantidad_opciones_por_pregunta,\n \"ids_opciones\": ids_opciones,\n \"strings_opciones\": strings_opciones\n }\n return datos_encuesta_creada\n\ndef guardar_encuesta(surveyData,id_admin):\n if surveyData[\"title\"] == \"\":\n encuesta_aux = Encuesta(titulo=\"titulo encuesta por defecto\", descripcion=surveyData[\"description\"],\n fecha_inicio=date.today(), activa=False, visitas=0, respuestas=0, total_asignados=0, asunto_mail=\"\", mensaje_mail=\"\")\n else:\n encuesta_aux = Encuesta(titulo=surveyData[\"title\"], descripcion=surveyData[\"description\"],\n fecha_inicio=date.today(), activa=False, visitas=0, respuestas=0, total_asignados=0, asunto_mail=\"\", mensaje_mail=\"\")\n db.session.add(encuesta_aux)\n db.session.commit()\n crea_encuesta_aux = Crea_Encuesta.insert().values(\n id_admin=id_admin, id_encuesta=encuesta_aux.id_encuesta)\n db.engine.execute(crea_encuesta_aux)\n db.session.commit()\n i = 1\n for pregunta in surveyData[\"questions\"]:\n pregunta_alternativa_aux = Pregunta_Alternativa(\n enunciado=pregunta[\"statement\"], numero=i)\n db.session.add(pregunta_alternativa_aux)\n db.session.commit()\n alternativa_encuesta_aux = Alternativa_Encuesta.insert().values(\n id_encuesta=encuesta_aux.id_encuesta, id_pregunta_alternativa=pregunta_alternativa_aux.id_pregunta_alternativa)\n db.engine.execute(alternativa_encuesta_aux)\n db.session.commit()\n for alternativa in pregunta[\"alternatives\"]:\n opcion_aux = Opcion(opcion=alternativa[\"textAlt\"])\n db.session.add(opcion_aux)\n db.session.commit()\n alternativas_aux = Alternativas.insert().values(\n id_pregunta_alternativa=pregunta_alternativa_aux.id_pregunta_alternativa, id_opcion=opcion_aux.id_opcion)\n db.engine.execute(alternativas_aux)\n db.session.commit()\n i = i + 1\n return \"Encuesta Guardada\"\n\ndef modificar_encuesta(surveyData):\n\n # Modifica titulo y descripcion de la encuesta\n nueva_encuesta = db.session.query(Encuesta).filter_by(id_encuesta=surveyData[\"id\"]).first()\n if surveyData[\"title\"] == \"\":\n nueva_encuesta.titulo = \"titulo encuesta por defecto\"\n else:\n nueva_encuesta.titulo = surveyData[\"title\"]\n nueva_encuesta.descripcion = surveyData[\"description\"]\n db.session.commit()\n\n # Elimina las preguntas de la encuesta que se quitaron en la interfaz\n ids_preguntas_a_borrar = []\n ids_preguntas_surveyData = []\n for pregunta in surveyData[\"questions\"]:\n ids_preguntas_surveyData.append(pregunta[\"id\"])\n\n if db.session.query(Alternativa_Encuesta).filter_by(id_encuesta=surveyData[\"id\"]).first() != None:\n tuplas_alternativa_encuesta = db.session.query(Alternativa_Encuesta).filter_by(id_encuesta=surveyData[\"id\"]).all()\n for tupla_alternativa_encuesta in tuplas_alternativa_encuesta:\n if tupla_alternativa_encuesta.id_pregunta_alternativa not in ids_preguntas_surveyData:\n borra_tupla_alternativa_encuesta = Alternativa_Encuesta.delete().where(Alternativa_Encuesta.c.id_pregunta_alternativa == tupla_alternativa_encuesta.id_pregunta_alternativa)\n db.engine.execute(borra_tupla_alternativa_encuesta)\n db.session.commit()\n ids_preguntas_a_borrar.append(tupla_alternativa_encuesta.id_pregunta_alternativa)\n\n ids_opciones_a_borrar = []\n for id_pregunta_a_borrar in ids_preguntas_a_borrar:\n if db.session.query(Alternativas).filter_by(id_pregunta_alternativa=id_pregunta_a_borrar).first() != None:\n tuplas_alternativas_aux = db.session.query(Alternativas).filter_by(\n id_pregunta_alternativa=id_pregunta_a_borrar).all()\n for tupla_alternativa in tuplas_alternativas_aux:\n ids_opciones_a_borrar.append(tupla_alternativa.id_opcion)\n borra_tuplas_alternativas = Alternativas.delete().where(Alternativas.c.id_pregunta_alternativa == id_pregunta_a_borrar)\n db.engine.execute(borra_tuplas_alternativas)\n db.session.commit()\n\n tuplas_opcion = db.session.query(Opcion).filter(Opcion.id_opcion.in_(ids_opciones_a_borrar)).all()\n for tupla_opcion in tuplas_opcion:\n db.session.delete(tupla_opcion)\n db.session.commit()\n\n tuplas_pregunta_alternativa = db.session.query(Pregunta_Alternativa).filter(\n Pregunta_Alternativa.id_pregunta_alternativa.in_(ids_preguntas_a_borrar)).all()\n for tupla_pregunta_alternativa in tuplas_pregunta_alternativa:\n db.session.delete(tupla_pregunta_alternativa)\n db.session.commit()\n\n # Crea lista de preguntas a modificar\n ids_preguntas_a_modificar = []\n if db.session.query(Alternativa_Encuesta).filter_by(id_encuesta=surveyData[\"id\"]).first() != None:\n tuplas_alternativa_encuesta = db.session.query(Alternativa_Encuesta).filter_by(id_encuesta=surveyData[\"id\"]).all()\n for tupla_alternativa_encuesta in tuplas_alternativa_encuesta:\n ids_preguntas_a_modificar.append(tupla_alternativa_encuesta.id_pregunta_alternativa)\n\n i = 1\n for pregunta in surveyData[\"questions\"]:\n # Modifica preguntas de acuerdo a la lista\n if pregunta[\"id\"] in ids_preguntas_a_modificar:\n pregunta_alternativa_aux = db.session.query(Pregunta_Alternativa).filter_by(id_pregunta_alternativa=pregunta[\"id\"]).first()\n pregunta_alternativa_aux.numero = i\n pregunta_alternativa_aux.enunciado = pregunta[\"statement\"]\n db.session.commit()\n # Elimina las alternativas que ya no incluye la pregunta\n ids_opciones_surveyData = []\n ids_opciones_a_borrar2 = []\n\n for alternativa in pregunta[\"alternatives\"]:\n ids_opciones_surveyData.append(alternativa[\"id\"])\n \n tuplas_alternativas_aux = db.session.query(Alternativas).filter_by(\n id_pregunta_alternativa=pregunta[\"id\"]).all()\n for tupla_alternativa in tuplas_alternativas_aux:\n if tupla_alternativa.id_opcion not in ids_opciones_surveyData:\n ids_opciones_a_borrar2.append(tupla_alternativa.id_opcion)\n borra_tupla_alternativas = Alternativas.delete().where(Alternativas.c.id_opcion == tupla_alternativa.id_opcion)\n db.engine.execute(borra_tupla_alternativas)\n db.session.commit()\n tuplas_opcion = db.session.query(Opcion).filter(Opcion.id_opcion.in_(ids_opciones_a_borrar2)).all()\n \n for tupla_opcion in tuplas_opcion:\n db.session.delete(tupla_opcion)\n db.session.commit()\n \n ids_opciones_a_borrar2.clear()\n\n # Crea lista de opciones a modificar\n ids_opciones_a_modificar = []\n if db.session.query(Alternativas).filter_by(id_pregunta_alternativa=pregunta[\"id\"]).first() != None:\n tuplas_alternativas_aux = db.session.query(Alternativas).filter_by(\n id_pregunta_alternativa=pregunta[\"id\"]).all()\n for tupla_alternativa_aux in tuplas_alternativas_aux:\n if tupla_alternativa_aux.id_opcion in ids_opciones_surveyData:\n ids_opciones_a_modificar.append(tupla_alternativa_aux.id_opcion)\n \n for alternativa in pregunta[\"alternatives\"]:\n # Modifica opciones de acuerdo a la lista\n if alternativa[\"id\"] in ids_opciones_a_modificar:\n opcion_aux = db.session.query(Opcion).filter_by(id_opcion=alternativa[\"id\"]).first()\n opcion_aux.opcion = alternativa[\"textAlt\"]\n db.session.commit()\n # Crea las opciones que no estaban en la lista\n else:\n opcion_aux = Opcion(opcion=alternativa[\"textAlt\"])\n db.session.add(opcion_aux)\n db.session.commit()\n alternativas_aux = Alternativas.insert().values(\n id_pregunta_alternativa=pregunta[\"id\"], id_opcion=opcion_aux.id_opcion)\n db.engine.execute(alternativas_aux)\n db.session.commit()\n\n ids_opciones_a_modificar.clear()\n ids_opciones_surveyData.clear()\n # Crea las preguntas que no estaban en la lista\n else:\n pregunta_alternativa_aux = Pregunta_Alternativa(\n enunciado=pregunta[\"statement\"], numero=i)\n db.session.add(pregunta_alternativa_aux)\n db.session.commit()\n\n alternativa_encuesta_aux = Alternativa_Encuesta.insert().values(\n id_encuesta=surveyData[\"id\"], id_pregunta_alternativa=pregunta_alternativa_aux.id_pregunta_alternativa)\n db.engine.execute(alternativa_encuesta_aux)\n db.session.commit()\n for alternativa in pregunta[\"alternatives\"]:\n opcion_aux = Opcion(opcion=alternativa[\"textAlt\"])\n db.session.add(opcion_aux)\n db.session.commit()\n alternativas_aux = Alternativas.insert().values(\n id_pregunta_alternativa=pregunta_alternativa_aux.id_pregunta_alternativa, id_opcion=opcion_aux.id_opcion)\n db.engine.execute(alternativas_aux)\n db.session.commit()\n i = i + 1\n\n return(\"Modificacion Exitosa\")\n\n\ndef eliminar_encuesta(id_encuesta):\n ids_preguntas_alternativas = []\n if db.session.query(Alternativa_Encuesta).filter_by(id_encuesta=id_encuesta).first() != None:\n tuplas_alternativa_encuesta = db.session.query(\n Alternativa_Encuesta).filter_by(id_encuesta=id_encuesta).all()\n for tupla_alternativa_encuesta in tuplas_alternativa_encuesta:\n ids_preguntas_alternativas.append(\n tupla_alternativa_encuesta.id_pregunta_alternativa)\n borra_tuplas_alternativa_encuesta = Alternativa_Encuesta.delete().where(Alternativa_Encuesta.c.id_encuesta == id_encuesta)\n db.engine.execute(borra_tuplas_alternativa_encuesta)\n db.session.commit()\n ids_opciones = []\n for id_pregunta_alternativa in ids_preguntas_alternativas:\n if db.session.query(Alternativas).filter_by(id_pregunta_alternativa=id_pregunta_alternativa).first() != None:\n tuplas_alternativas_aux = db.session.query(Alternativas).filter_by(\n id_pregunta_alternativa=id_pregunta_alternativa).all()\n for tupla_alternativa in tuplas_alternativas_aux:\n ids_opciones.append(tupla_alternativa.id_opcion)\n borra_tuplas_alternativas = Alternativas.delete().where(Alternativas.c.id_pregunta_alternativa == id_pregunta_alternativa)\n db.engine.execute(borra_tuplas_alternativas)\n db.session.commit()\n tuplas_opcion = db.session.query(Opcion).filter(\n Opcion.id_opcion.in_(ids_opciones)).all()\n for tupla_opcion in tuplas_opcion:\n db.session.delete(tupla_opcion)\n db.session.commit()\n tuplas_pregunta_alternativa = db.session.query(Pregunta_Alternativa).filter(\n Pregunta_Alternativa.id_pregunta_alternativa.in_(ids_preguntas_alternativas)).all()\n for tupla_pregunta_alternativa in tuplas_pregunta_alternativa:\n db.session.delete(tupla_pregunta_alternativa)\n db.session.commit()\n # Elimina relacion encuesta-admin\n crea_encuesta = Crea_Encuesta.delete().where(Crea_Encuesta.c.id_encuesta == id_encuesta)\n db.engine.execute(crea_encuesta)\n db.session.commit()\n # Con la tupla que encuesta-admin eliminada, se puede borrar la tupla de encuesta\n encuesta = db.session.query(Encuesta).filter_by(id_encuesta=id_encuesta).first()\n db.session.delete(encuesta)\n db.session.commit()\n return \"Borrada Correctamente\"\n\ndef guardar_respuesta(responses):\n # Incrementa la cantidad de respuestas de una encuesta\n encuesta_aux = db.session.query(Encuesta).filter_by(id_encuesta=responses[\"id\"]).first()\n respuestas_actuales = encuesta_aux.respuestas\n encuesta_aux.respuestas = respuestas_actuales + 1\n db.session.commit()\n # Almacena en la base de datos las respuestas de un usuario\n for i in range(0, len(responses[\"respuestas\"])):\n respuesta_alternativa_aux = Respuesta_Alternativa.insert().values(\n id_opcion=responses[\"respuestas\"][i][\"response\"][\"idOpcion\"], email=responses[\"correo\"])\n db.engine.execute(respuesta_alternativa_aux)\n db.session.commit()\n\n # Se guarda la fecha en donde el encuestado contesto la encuesta\n encuestar_aux = Encuestar.update().values(\n fecha_contestada=date.today(), contestada=True).where(Encuestar.c.email == responses[\"correo\"])\n db.engine.execute(encuestar_aux)\n db.session.commit()\n return \"Respuestas Guardada\"\n\ndef obtener_numero_encuestados_activos():\n record = db.session.query(Encuestado).filter_by(activo=True).all()\n return (len(record))\n\ndef obtener_numero_encuestados_responden(id_encuesta):\n\n total_responden = db.session.query(Encuestar).filter_by(id_encuesta = id_encuesta, contestada = True).count()\n return total_responden\n\n#Se obtiene el título de la encuesta\ndef obtener_titulo_encuesta(id_encuesta):\n record = db.session.query(Encuesta).filter_by(id_encuesta = id_encuesta).first()\n\n if (record != None):\n titulo_encuesta = {\n \"titulo\" : record.titulo\n }\n else:\n titulo_encuesta = {\n \"titulo\" : None\n }\n return (titulo_encuesta)\n\n#Se obtienen las respuestas de cada pregunta con la cantidad por cada opción\ndef obtener_respuestas_opcion(id_encuesta):\n \n record = db.session.query(Alternativa_Encuesta).filter_by(id_encuesta = id_encuesta).all()\n\n if record == None:\n\n datos_pregunta = {\n \"id_pregunta\": None,\n \"numero\": None,\n \"enunciado\": None,\n \"opciones\": []\n }\n \n return (datos_pregunta)\n\n lista_preguntas = []\n\n for rec in record:\n\n pregunta = db.session.query(Pregunta_Alternativa).filter_by(id_pregunta_alternativa = rec[1]).all()\n \n for p in pregunta:\n \n datos_pregunta = {}\n datos_pregunta.clear() \n\n opciones = db.session.query(Alternativas).filter_by(id_pregunta_alternativa = p.id_pregunta_alternativa).all()\n\n lista_opciones = []\n\n for op in opciones:\n resultado_opcion = {}\n resultado_opcion.clear()\n respuestas = db.session.query(Respuesta_Alternativa).filter_by(id_opcion = op.id_opcion).all()\n numero_respuestas = len(respuestas)\n\n opcion = db.session.query(Opcion).filter_by(id_opcion = op.id_opcion).first()\n\n resultado_opcion = {\n \"id_opcion\": op.id_opcion,\n \"opcion\": opcion.opcion,\n \"respuestas\": numero_respuestas,\n \"porcentaje\": 0\n }\n\n lista_opciones.append(resultado_opcion)\n \n total_respuestas = 0\n for i in lista_opciones:\n total_respuestas = total_respuestas + i.get(\"respuestas\")\n \n if (total_respuestas != 0):\n for i in lista_opciones:\n i[\"porcentaje\"] = round((i.get(\"respuestas\")/total_respuestas)*100,1)\n \n datos_pregunta = {\n \"id_pregunta\":p.id_pregunta_alternativa,\n \"numero\": p.numero,\n \"enunciado\": p.enunciado,\n \"opciones\": lista_opciones,\n \"total_respuestas\": total_respuestas\n }\n\n lista_preguntas.append(datos_pregunta)\n\n sorted(lista_preguntas, key = lambda i: i['numero'])\n\n return lista_preguntas\n\n#Se obtienen los datos de los usuarios que responden la encuesta\n#Se asume que no se pueden dejar encuestas incompletas\ndef obtener_encuestados_responden(id_encuesta):\n\n responden = db.session.query(Encuestar).filter_by(id_encuesta = id_encuesta).all() \n\n list_encuestados = []\n\n for i in responden:\n\n datos_encuestado = {}\n datos_encuestado.clear()\n\n if (i.contestada == True):\n estado = \"Contestada\"\n else:\n estado = \"No Contestada\"\n\n encuestado = db.session.query(Registrado).filter_by(email=i.email).first()\n\n if(encuestado == None):\n datos_encuestado={\n \"id_registrado\": None,\n \"email\": i.email,\n \"nombre\": \"Invitado\",\n \"apellido\": \"-\", \n \"genero\": \"-\",\n \"edad\": \"-\",\n \"estado\": estado\n }\n\n else:\n datos_encuestado={\n \"id_registrado\": encuestado.id_registrado,\n \"email\": i.email,\n \"nombre\": encuestado.nombre,\n \"apellido\": encuestado.apellidos,\n \"genero\": encuestado.genero,\n \"edad\": calcular_edad(encuestado.fecha_nacimiento),\n \"estado\": estado\n }\n \n list_encuestados.append(datos_encuestado)\n\n return (list_encuestados)\n\ndef calcular_edad(fecha_nacimiento):\n \n hoy = date.today()\n edad = hoy.year - fecha_nacimiento.year - ((hoy.month, hoy.day) < (fecha_nacimiento.month, fecha_nacimiento.day))\n if(edad < 0):\n edad = 0\n return edad\n\ndef agregar_invitado(responses):\n encuestado_invitado = Encuestado(email=responses[\"email\"],activo=False)\n db.session.add(encuestado_invitado)\n db.session.commit()\n return print(\"Usuario invitado se ha agregado correctamente\")\n\ndef cambiar_estado_invitado(responses):\n encuestado_invitado = db.session.query(Encuestado).filter_by(email=responses[\"email\"]).first()\n encuestado_invitado.activo = responses[\"state\"]\n db.session.commit()\n return print(\"Estado de usuario invitado cambiado correctamente\")\n\ndef desunscribir_registrado(response):\n if db.session.query(Registrado).filter_by(id_registrado=response[\"id_survey\"]).first() != None:\n registrado_aux = db.session.query(Registrado).filter_by(id_registrado=response[\"id_survey\"]).first()\n db.session.delete(registrado_aux)\n db.session.commit()\n return \"USUARIO DESUSCRITO\"\n else:\n return \"Error: este usuario no esta registrado\"\n\n#Obtener el link personalizado desde el mail\ndef codificar_mail(mail):\n str_bytes = mail.encode(\"ascii\") \n base_bytes = base64.b64encode(str_bytes)\n return base_bytes.decode(\"ascii\")\n\n#Obtener el mail desde el link personalizado\ndef decodificar_mail(code):\n base_bytes = code.encode(\"ascii\")\n str_bytes = base64.b64decode(base_bytes)\n return str_bytes.decode(\"ascii\")\n\n#Comprueba si el encuestado ha contestado la encuesta previamente\n#También se comprueba si la fecha de la encuesta no ha finalizado\n#True: Ya la ha contestado\ndef comprobar_encuestado_encuesta(id_encuesta, email):\n\n respondida = db.session.query(Encuestar).filter_by(id_encuesta=id_encuesta, email=email).first()\n\n #Si no se ha enviado por mail, no puede contestar\n if (respondida == None):\n return True\n\n if (respondida.contestada == True):\n return True\n\n else:\n #comprobación de la fecha\n encuesta = db.session.query(Encuesta).filter_by(id_encuesta=id_encuesta).first()\n\n #Si la fecha_fin es menor a la fecha actual\n if(encuesta.fecha_fin != None and encuesta.fecha_fin < date.today()):\n encuesta.activa = False\n db.session.commit()\n\n return True\n \n else:\n return False\n\ndef cambiar_estado_encuesta(responses):\n\n encuesta = db.session.query(Encuesta).filter_by(id_encuesta = responses[\"id_survey\"]).first()\n\n if encuesta != None:\n\n encuesta.activa = responses[\"status\"]\n db.session.commit()\n \n return \"Estado cambiado con exito!\"\n\ndef comprobar_tipo_encuestado(email):\n\n encuestado = db.session.query(Registrado).filter_by(email = email).first()\n\n if(encuestado == None):\n return (\"anonimo\")\n \n else:\n return (\"registrado\")\n\ndef registrar_encuestado(dataRegister):\n\n if (db.session.query(Registrado).filter_by(email = dataRegister.get(\"email\")).first() != None):\n return \"Email ya registrado\"\n \n if (db.session.query(Encuestado).filter_by(email = dataRegister.get(\"email\")).first() == None):\n encuestado = Encuestado(email = dataRegister.get(\"email\"), activo = True)\n db.session.add(encuestado)\n db.session.commit()\n\n password = generate_password_hash(dataRegister.get(\"password\"))\n\n if(dataRegister.get(\"genero\") == \"1\"):\n genero = \"M\"\n elif(dataRegister.get(\"genero\") == \"2\"):\n genero = \"F\"\n else:\n genero = \"O\"\n\n registrado = Registrado(email = dataRegister.get(\"email\"), password = password, \n nombre = dataRegister.get(\"name\"), apellidos = dataRegister.get(\"apellido\"),\n rut = dataRegister.get(\"rut\"), genero = genero, fecha_nacimiento = dataRegister.get(\"fecha_nacimiento\"),\n fecha_registro = date.today() )\n\n db.session.add(registrado)\n db.session.commit()\n\n return redirect(url_for(\"dashboard_user\"))\n\ndef aumentar_visitas(id_encuesta):\n encuesta = db.session.query(Encuesta).filter_by(id_encuesta = id_encuesta).first()\n\n if (encuesta != None):\n\n encuesta.visitas = encuesta.visitas + 1\n db.session.commit()\n \n return \"Visitas actualizadas\"\n\ndef modificar_tiempo_limite(responses):\n from dateutil import parser\n encuesta = db.session.query(Encuesta).filter_by(id_encuesta = responses[\"id\"]).first()\n if responses[\"end_date\"] == \"\":\n return \"error: Fecha de termino vacia\"\n\n dt = parser.parse(responses[\"end_date\"])\n fecha_termino = datetime.strftime(dt, '%Y-%m-%d')\n fecha_inicio = datetime.strftime(encuesta.fecha_inicio, '%Y-%m-%d')\n\n if fecha_inicio >= fecha_termino:\n return \"error: Fecha de termino mayor o igual a la de inicio\"\n encuesta.fecha_fin = responses[\"end_date\"]\n db.session.commit()\n return \"Tiempo limite de encuesta modificado correctamente\"\n\ndef asignar_asunto_y_mensaje(responses):\n encuesta = db.session.query(Encuesta).filter_by(id_encuesta = responses[\"id\"]).first()\n \n encuesta.asunto_mail = responses[\"mail_subject\"]\n encuesta.mensaje_mail = responses[\"mail_body\"]\n db.session.commit()\n\n return \"Asunto y mensaje actualizados\"\n\ndef desunscribir_encuestado(email):\n\n encuestado = db.session.query(Encuestado).filter_by(email = email).first()\n\n if(encuestado != None):\n encuestado.activo = False\n db.session.commit()\n\n return \"Usuario desuscrito\"\n\n else: \n return \"Email no existe\"\n\ndef get_dataUser():\n dataUser = {}\n if current_user.rol == \"admin\":\n admin = db.session.query(Admin).filter_by(id_admin=current_user.id).first()\n dataUser = {\n \"name\": admin.nombre,\n \"lastName\": \"None\",\n \"rut\": \"None\",\n \"gender\": \"None\",\n \"birthday\": \"dd-mm-aaaa\",\n \"email\": admin.email,\n \"avatar\": \"None\",\n \"encuestas\": []\n }\n\n if current_user.rol == \"encuestado\":\n encuestado = db.session.query(Encuestado).filter_by(email=current_user.email).first()\n dataUser = {\n \"name\": \"Invitado\",\n \"lastName\": \"None\",\n \"rut\": \"None\",\n \"gender\": \"None\",\n \"birthday\": \"dd-mm-aaaa\",\n \"email\": encuestado.email,\n \"avatar\": \"None\",\n \"encuestas\": []\n }\n\n if current_user.rol == \"registrado\":\n registrado = db.session.query(Registrado).filter_by(id_registrado=current_user.id).first()\n encuestado = db.session.query(Encuestado).filter_by(email=current_user.email).first()\n genero = \"\"\n if registrado.genero == \"M\":\n genero = \"Masculino\"\n elif registrado.genero == \"F\":\n genero = \"Femenino\"\n else:\n genero = \"No especificado\"\n\n encuestas = db.session.query(Encuestar).filter_by(email=current_user.email, contestada=True).all()\n lista_encuestas = []\n\n if (encuestas != None):\n\n for l in encuestas:\n\n e = db.session.query(Encuesta).filter_by(id_encuesta = l.id_encuesta).first()\n encuesta={\n \"title\": e.titulo,\n \"date\": (l.fecha_contestada).strftime(\"%d-%m-%Y\")\n }\n\n lista_encuestas.append(encuesta)\n \n\n dataUser = {\n \"name\": registrado.nombre,\n \"lastName\": registrado.apellidos,\n \"rut\": registrado.rut,\n \"gender\": genero,\n \"birthday\": registrado.fecha_nacimiento.strftime(\"%d-%m-%Y\"),\n \"email\": registrado.email,\n \"avatar\": registrado.avatar,\n \"estado\": encuestado.activo,\n \"encuestas\": lista_encuestas\n }\n return dataUser\n\n# Falta testing\ndef cambiar_password(user, password):\n\n usuario = db.session.query(Admin).filter_by(email=user).first()\n\n if (usuario == None):\n usuario = db.session.query(Registrado).filter_by(email=user).first()\n\n if(usuario == None):\n return \"mail incorrecto\"\n \n usuario.password = generate_password_hash(password)\n db.session.commit()\n\n return \"password cambiada exitosamente\"\n\n# Cambia la imagen del avatar del usario\ndef cambiar_avatar(user, url):\n\n usuario = db.session.query(Admin).filter_by(email=user).first()\n\n if (usuario == None):\n usuario = db.session.query(Registrado).filter_by(email=user).first()\n\n if(usuario == None):\n return \"usuario incorrecto\"\n \n usuario.avatar = url\n db.session.commit()\n\n return \"avatar cambiado correctamente\"\n\n","repo_name":"mellokx/Proyecto-Semestral-IS2","sub_path":"application/consultas.py","file_name":"consultas.py","file_ext":"py","file_size_in_byte":33538,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72096229706","text":"import boto3\n\ncloudwatch_client = boto3.client('cloudwatch', region_name='ap-southeast-1')\nsns_topic_arn = 'arn:aws:sns:ap-south-1:605536185498:devops'\n\nmetricName = 'HTTPCode_ELB_5XX_Count'\n\nalblist = boto3.client('elbv2')\nbals = alblist.describe_load_balancers()\n\n\n#def get_albs():\n# alb_list = []\n# for alb in bals['LoadBalancerDescriptions']:\n# alb_list.append(alb['LoadBalancerName'])\n\n# print(alb_list)\n# return alb_list\n\nalb_list=['meesho-production-alb']\n\ndef create_alarm():\n print('Creating alarm for ALB')\n for alb_name in alb_list:\n response = cloudwatch_client.put_metric_alarm(\n AlarmName='%s-High-%s' % (alb_name, metricName),\n AlarmDescription='alarm for testing',\n MetricName=metricName,\n Namespace='AWS/ApplicationELB',\n Statistic='Average',\n Dimensions=[\n {\n 'Name': 'LoadBalancerName',\n 'Value': alb_name\n }\n ],\n Period=300,\n Unit='Count',\n EvaluationPeriods=1,\n Threshold=1.0,\n ComparisonOperator='GreaterThanOrEqualToThreshold',\n ActionsEnabled=True,\n AlarmActions=[sns_topic_arn],\n )\n\n\nif __name__ == \"__main__\":\n create_alarm()\n","repo_name":"pankajmeesho/Scripts","sub_path":"AlbCloudwatch.py","file_name":"AlbCloudwatch.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22845617955","text":"import flask\nfrom numpy.random.mtrand import random\nimport pandas as pd\nimport numpy as np\nfrom flask import jsonify\nimport pickle\nfrom ibmcloudant.cloudant_v1 import CloudantV1\nfrom ibmcloudant import CouchDbSessionAuthenticator\nfrom pymongo import MongoClient\nfrom flask import request\nfrom flask_cors import CORS, cross_origin\nimport json\nfrom local_functions import deployable_callable\nfrom ibmcloudant.cloudant_v1 import Document\nfrom local_arima import arima_go\n\nLAST_ID = \"1\"\nURI = r\"mongodb+srv://anhnd:Dota2fan@cluster0.3myha.mongodb.net/myFirstDatabase?retryWrites=true&w=majority\"\nmongo_client = MongoClient(URI)\nmongo_db = mongo_client.get_database(\"products\")\n\nCLOUDANT_URL = \"https://apikey-v2-1go7e481wm20t9cbozt1iwtu40pr0q8bqiro5zz82b6l:989e7571b0708a89cac8aab73990e7eb@38ea2994-be4a-4a49-a6b9-2e7fc862b20f-bluemix.cloudantnosqldb.appdomain.cloud\"\nCLOUDANT_APIKEY = \"15woWAc-BnP_R3-TGN9NhLFlCxPQFEYIPz1V7v9PQLsi\"\n\nauthenticator = CouchDbSessionAuthenticator(\n 'apikey-v2-1go7e481wm20t9cbozt1iwtu40pr0q8bqiro5zz82b6l', '989e7571b0708a89cac8aab73990e7eb')\nSERVICE = CloudantV1(authenticator=authenticator)\nSERVICE.set_service_url(CLOUDANT_URL)\nCLOUDANT_DB = \"rfm_cluster_test\"\n\nTAKEN = False\n\nwith open(\"./data/datapickle\", \"rb\") as f:\n df = pickle.load(f)\nwith open(\"./data/datapickle_2\", \"rb\") as f:\n df2 = pickle.load(f)\nwith open(\"./data/datapickle_dict\", \"rb\") as f:\n df3 = pickle.load(f)\nwith open(\"./data/datapickle_category_dict\", \"rb\") as f:\n df4 = pickle.load(f)\n\nMAX_WAS_BOUGHT = 5\ndata_recommended = df\ndata_cus_pro = df2\ndict_products = df3\ndict_pro_cate = df4\n\nimport numpy as np\n\nclass NpEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n if isinstance(obj, np.floating):\n return float(obj)\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return super(NpEncoder, self).default(obj)\n\ndef default(obj):\n if type(obj).__module__ == np.__name__:\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return obj.item()\n raise TypeError('Unknown type:', type(obj))\n\n\ndef recommend_products(customerId, numpro_recommended):\n try:\n temp_cus = data_recommended.loc[customerId].sort_values(\n ascending=False)\n temp_cus_his = data_cus_pro.loc[customerId]\n except:\n return pd.DataFrame({\"Error\": [\"Không có dữ liệu của user {}\".format(customerId)]})\n list_product_recommend = []\n list_ratings = []\n list_category = []\n list_was_bought = []\n\n count = 0\n count_max_was_bought = 0\n for index, rating in temp_cus.items():\n if rating <= 0:\n break\n history_rating = temp_cus_his[index]\n\n if history_rating != 0:\n if count_max_was_bought >= MAX_WAS_BOUGHT:\n continue\n count_max_was_bought += 1\n list_was_bought.append(True)\n else:\n list_was_bought.append(False)\n\n count = count + 1\n\n product_name = dict_products[index]\n category = dict_pro_cate[product_name[0].upper()]\n list_product_recommend.append(product_name)\n list_ratings.append(np.round(rating, 2))\n list_category.append(category)\n\n if count >= numpro_recommended:\n break\n\n df_product_recommend = pd.DataFrame(data={\"Product\": list_product_recommend,\n \"Rating\": list_ratings,\n \"Category\": list_category,\n \"Bought\": list_was_bought})\n return df_product_recommend\n\n\napp = flask.Flask(__name__)\n\nCORS(app)\n\n\napp.config[\"DEBUG\"] = True\nRFM_AND_ID = pd.read_csv(r'./data/rfm_and_id.csv')\n\n\ndef normalize(lower, upper):\n def norm(x):\n l, h = min(x), max(x)\n return [lower + (upper - lower) * (ele - l)/(h - l) for ele in x]\n return norm\n\n\n@app.route('/user/get', methods=['GET'])\ndef home():\n re = list(map(lambda x: {\"id\": str(x)}, RFM_AND_ID.CustomerID.tolist()))\n response = jsonify(re)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n\n@app.route(\"/cluster/get\", methods=['GET'])\ndef get_rfm():\n tmp = RFM_AND_ID.loc[:, [\"Recency\", \"Frequency\",\n \"MonetaryValue\", \"KMeans_Label\"]]\n re = [{\"x\": tmp.loc[tmp.KMeans_Label == i].Recency.tolist(),\n \"y\":tmp.loc[tmp.KMeans_Label == i].Frequency.tolist(),\n \"z\":tmp.loc[tmp.KMeans_Label == i].MonetaryValue.tolist(),\n \"label\":i\n } for i in range(4)]\n\n response = jsonify(re)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n\n@app.route(\"/cluster_online/get\")\ndef get_rfm_online():\n data = SERVICE.get_document(CLOUDANT_DB, \"1\").result\n rfm_df = pd.DataFrame(data[\"data\"])\n re = [{\"x\": rfm_df.loc[rfm_df.KMeans_Label == i].Recency.tolist(),\n \"y\":rfm_df.loc[rfm_df.KMeans_Label == i].Frequency.tolist(),\n \"z\":rfm_df.loc[rfm_df.KMeans_Label == i].MonetaryValue.tolist(),\n \"label\":i\n } for i in range(4)]\n\n response = jsonify(re)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n\n@app.route(\"/recommend/\")\ndef recommend_by_id(idx):\n DF = pd.read_csv(\"./data/data.csv\",encoding=\"unicode_escape\")\n def get_bought(idx):\n tmp = DF.loc[DF.CustomerID == float(idx)].loc[:,[\"Description\", \"Quantity\"]].groupby(\"Description\").count()\n bought = list(tmp.to_dict()[\"Quantity\"].items())\n bought.sort(key = lambda x:x[1],reverse = True)\n items = [ele[0] for ele in bought[:5]]\n return items\n #try:\n idx = str(idx) + '.0'\n df_outcome = recommend_products(idx, 5)\n df_outcome.Product = df_outcome.Product.apply(lambda x: x[0])\n re = [df_outcome.loc[i, [\"Product\", \"Category\"]].to_dict()\n for i in range(len(df_outcome))]\n bought = get_bought(idx)\n bought_items = [{\"Product\":item,\"Category\":\"\"} for item in bought]\n print(bought_items)\n mongo_client = MongoClient(URI)\n mongo_client.get_database(\"products\")\n col = mongo_db[\"images\"]\n\n for i, ele in enumerate(re):\n re_img = list(col.find({\"name\": re[i][\"Product\"]}))\n if len(re_img) != 0:\n print(\"hi\")\n re[i][\"src\"] = re_img[0][\"src\"]\n else:\n re[i][\"src\"] = \"\"\n\n for i, ele in enumerate(bought_items):\n re_img = list(col.find({\"name\": ele[\"Product\"]}))\n if len(re_img) != 0:\n print(\"hi\")\n bought_items[i][\"src\"] = re_img[0][\"src\"]\n else:\n bought_items[i][\"src\"] = \"\"\n \n mongo_client.close()\n #except:\n # re = []\n # bought_items = []\n \n \n\n res = {\"recommend\":re,\"bought\":bought_items}\n response = jsonify(res)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n\n@app.route(\"/products/get_all\", methods=[\"GET\"])\ndef get_all_product():\n with open(\"./data/all_products\", \"rb\") as f:\n re = pickle.load(f)\n response = jsonify(re)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n\n@app.route(\"/stimulate\", methods=[\"POST\"])\ndef stimulate_product():\n re = request.data\n re = json.loads(re)\n print(re)\n count = re[\"count\"]\n items = re[\"items\"]\n\n def random_time():\n date = np.random.randint(1, 11)\n month = np.random.randint(1, 11)\n year = 2010\n return \"{}/{}/{}\".format(month, date, year)\n\n def epoch_converter(readable_time):\n import calendar\n import time\n return calendar.timegm(time.strptime(readable_time, '%m/%d/%Y'))\n return \"ok\"\n\n def random_buy(count, items):\n n = len(items)\n users = []\n for i in range(count):\n t = random_time()\n idx = np.random.randint(1,100000000)\n buy = np.random.randint(1, n-1)\n \n\n unique = epoch_converter(t)\n\n for j in range(buy):\n bought = np.random.randint(1, 4)\n\n users.append({\"InvoiceNo\": unique, \"InvoiceDate\": t, \"CustomerID\": idx,\n \"Description\": items[j], \"UnitPrice\": str(1),\n \"Country\": \"Vietnam\", \"StockCode\": str(unique) + \"G\", \"Quantity\": bought})\n return pd.DataFrame(users)\n\n tmp = random_buy(int(count), items)\n prices = []\n for item in tmp.Description:\n try:\n prices.append(df.loc[df.Description == item].UnitPrice[0])\n except:\n prices.append(1)\n\n tmp.UnitPrice = prices\n payload_scoring = {\"input_data\": [{\"values\": [tmp.to_dict()\n\n ]}]}\n deployable_callable()(payload_scoring)\n print(\"Done\")\n return \"ok\"\n\n\n@app.route(\"/arima/&\")\ndef arima(previous, ahead):\n re = arima_go()(int(previous), int(ahead))\n response = json.dumps(re, default = default)\n response = jsonify(json.loads(response))\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n \n@app.route(\"/arima/listen\")\ndef arima_listen():\n import pickle\n with open(\"./data/arima\",\"rb\") as f:\n re = pickle.load(f)\n\n response = json.dumps(re, default = default)\n response = jsonify(json.loads(response))\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n@app.route('/serve/table')\ndef serve():\n table = pd.read_csv(\"./data/rule_EIRE.csv\")\n re = []\n for i in range(table.shape[0]):\n re.append(table.iloc[i,[1,2,4]].to_list())\n response = json.dumps(re, cls = NpEncoder)\n response = jsonify(json.loads(response))\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\napp.run(host='0.0.0.0', port='8090', threaded=True)\n","repo_name":"nduc4nh/IBM-customer-segmnetation-app","sub_path":"backend/src/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":9959,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"32588695815","text":"from unittest import case\nfrom pathlib import Path\nfrom mdclasses.utils.path_resolver import get_path_resolver\nfrom mdclasses.configuration_enums import Format, ObjectType\n\n\nclass TestPathResolverConfiguration(case.TestCase):\n\n def setUp(self) -> None:\n self.test_data_root = Path(Path(__file__).parent).joinpath('test_data', 'config')\n self.file_format = Format.CONFIGURATOR\n self.path_resolver = get_path_resolver(self.file_format)\n\n def test_config_file_path(self):\n paths = {\n 'FilterCriteria': ObjectType.FILTER_CRITERION,\n 'ChartsOfCharacteristicTypes': ObjectType.CHART_OF_CHARACTERISTIC_TYPES,\n 'ChartsOfAccounts': ObjectType.CHART_OF_ACCOUNTS,\n 'BusinessProcesses': ObjectType.BUSINESS_PROCESS,\n 'ChartsOfCalculationTypes': ObjectType.CHART_OF_CALCULATION_TYPES,\n 'Catalogs': ObjectType.CATALOG,\n\n }\n name = 'Test'\n\n for path in paths:\n test_path = self.path_resolver.conf_file_path(paths[path], name)\n self.assertEqual(test_path, Path(path).joinpath(f'{name}.xml'),\n f'Для типа {paths[path]} не верно определен путь к файлу описанию.')\n\n def test_ext_path(self):\n name = 'test'\n paths = {\n 'FilterCriteria': ObjectType.FILTER_CRITERION,\n 'ChartsOfCharacteristicTypes': ObjectType.CHART_OF_CHARACTERISTIC_TYPES,\n 'ChartsOfAccounts': ObjectType.CHART_OF_ACCOUNTS,\n 'BusinessProcesses': ObjectType.BUSINESS_PROCESS,\n 'ChartsOfCalculationTypes': ObjectType.CHART_OF_CALCULATION_TYPES,\n 'Catalogs': ObjectType.CATALOG,\n\n }\n\n for path in paths:\n test_path = self.path_resolver.ext_path(paths[path], name)\n self.assertEqual(\n test_path,\n Path(path).joinpath(name).joinpath('Ext'),\n f'Для типа {paths[path]} не верно определен путь.'\n )\n\n def test_type_path(self):\n\n paths = {\n '': ObjectType.CONFIGURATION,\n 'FilterCriteria': ObjectType.FILTER_CRITERION,\n 'ChartsOfCharacteristicTypes': ObjectType.CHART_OF_CHARACTERISTIC_TYPES,\n 'ChartsOfAccounts': ObjectType.CHART_OF_ACCOUNTS,\n 'BusinessProcesses': ObjectType.BUSINESS_PROCESS,\n 'ChartsOfCalculationTypes': ObjectType.CHART_OF_CALCULATION_TYPES,\n 'Catalogs': ObjectType.CATALOG,\n\n }\n\n for path in paths:\n test_path = self.path_resolver.type_path(paths[path])\n self.assertEqual(test_path, Path(path), f'Для типа {paths[path]} не верно определен путь.')\n\n def test_form_path(self):\n\n types_without_forms = [\n ObjectType.CONFIGURATION,\n ObjectType.COMMAND_GROUP,\n ObjectType.COMMON_MODULE,\n ObjectType.COMMON_ATTRIBUTE,\n ObjectType.COMMON_PICTURE,\n ObjectType.COMMON_TEMPLATE\n ]\n\n for obj_type in types_without_forms:\n with self.assertRaises(ValueError) as ex:\n self.path_resolver.form_path(obj_type, 'form')\n self.assertIn(f'{obj_type}', ex.exception.args[0], 'Неожиданное иссключение.')\n\n constant_path = self.path_resolver.form_path(ObjectType.CONSTANT, 'Конст')\n self.assertEqual(constant_path, Path('CommonForms/ФормаКонстант'), 'Не верно определён путь к форме констант')\n\n comm_form__path = self.path_resolver.form_path(ObjectType.COMMON_FORM, 'Конст')\n self.assertEqual(comm_form__path, Path('CommonForms/Конст'), 'Не верно определен путь к общей форме')\n\n catalog_path = self.path_resolver.form_path(ObjectType.CATALOG, 'Конст', 'Form')\n self.assertEqual(catalog_path, Path('Catalogs/Конст/Forms/Form'),\n 'Не верно определена путь к форме справочника')","repo_name":"AlexanderNiMo/mdclasses_1c","sub_path":"mdclasses/tests/test_path_resolver.py","file_name":"test_path_resolver.py","file_ext":"py","file_size_in_byte":4134,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"21245998867","text":"import fnmatch\nimport hashlib\nimport os\nimport re\nfrom urllib.parse import urlparse\n\nimport requests\nfrom lxml import etree\nfrom copy import deepcopy\n\nfrom corpustools import (\n adder,\n corpuspath,\n crawler,\n namechanger,\n text_cat,\n versioncontrol,\n)\n\n\ndef make_digest(bytestring):\n \"\"\"Make a md5 hash to identify possible dupes.\"\"\"\n hasher = hashlib.md5()\n hasher.update(bytestring)\n return hasher.hexdigest()\n\n\nclass SamediggiFiPage:\n \"\"\"Save a www.samediggi.fi page to the corpus.\"\"\"\n\n address_re = re.compile(r\"((http(s)):\\/\\/)www.samediggi.fi\")\n unwanted_endings = (\n \".pdf\",\n \".jpg\",\n \".docx\",\n \".xlsx\",\n \".csv\",\n \".pptx\",\n \".eps\",\n \".doc\",\n \".png\",\n \".xls\",\n )\n language_mapper = {\n \"fi\": \"fin\",\n \"dav\": \"sme\",\n \"an\": \"smn\",\n \"nuo\": \"sms\",\n \"en\": \"eng\",\n }\n corpus_dir = os.getenv(\"GTLANGS\")\n\n def __init__(self, result, dupe_table):\n \"\"\"Initialise the SamediggiFiPage class.\"\"\"\n self.result = result\n self.url = result.url\n self.parsed_url = urlparse(self.url)\n self.tree = etree.HTML(result.text)\n self.dupe = False\n\n filename = namechanger.normalise_filename(self.create_filename())\n\n fullpath = os.path.join(\n self.corpus_dir,\n f\"corpus-{self.lang}-orig\",\n \"admin/sd/www.samediggi.fi\",\n filename,\n )\n if os.path.isfile(fullpath):\n basepath = os.path.split(fullpath)[0]\n name = filename[:-5]\n tag = filename[-5:]\n # print(f\"\\nFile already exists: {fullpath}\\n\")\n i = 0\n while os.path.isfile(os.path.join(basepath, f\"{name}({i}){tag}\")):\n i += 1\n\n fullpath = os.path.join(os.path.join(basepath, f\"{name}({i}){tag}\"))\n\n possible_dupe = dupe_table.get(make_digest(self.content_string), fullpath)\n self.corpuspath = corpuspath.make_corpus_path(possible_dupe)\n if fullpath == possible_dupe:\n self.set_initial_metadata()\n else:\n print(f\"\\nDupe! {self.url} is dupe of {possible_dupe}\\n\")\n self.dupe = True\n\n def create_filename(self):\n if self.tree is not None:\n title = (\n self.tree.findtext(\".//title\")\n .strip()\n .replace(\"/\", \"_\")\n .replace(\".\", \"_\")\n )\n if title:\n return title.rsplit(\" - S\")[0] + \".html\"\n return adder.url_to_filename(self.url)\n\n @property\n def content(self):\n \"\"\"Extract only the content that is interesting from the web page.\"\"\"\n content = etree.Element(\"html\")\n body = etree.SubElement(content, \"body\")\n doc_type = self.tree.find('.//meta[@property=\"og:type\"]').get(\"content\")\n if doc_type == \"article\":\n for xpath_directive in [\n './/main[starts-with(@class, \"template-page content\") or starts-with(@class, \"content\")]',\n './/div[starts-with(@class, \"template-page content\")]',\n ]:\n for element in self.tree.xpath(xpath_directive):\n body.append(self.filter_content((deepcopy(element))))\n\n return content\n\n @staticmethod\n def filter_content(element):\n \"\"\"Remove elements without interesting content.\"\"\"\n for unwanted in [\n './/div[starts-with(@class, \"avia-content-slider\")]', # menu showing other sites\n './/div[starts-with(@class, \"avia-slideshow\")]',\n './/div[starts-with(@class, \"avia-button\")]', # buttons\n './/div[starts-with(@class, \"avia-video\")]', # viedos\n './/div[starts-with(@class, \"big-preview\")]', # large pictures\n './/div[starts-with(@class, \"avia-image\")]', # images\n \".//img\", # images\n \".//figure\", # images\n './/div[starts-with(@class, \"av-alb-blogposts\")]', # news list\n './/div[contains(@class, \"table\")]', # tables\n \".//table\",\n \".//form\", # forms\n \".//fieldset\",\n \".//footer\", # share buttons\n './/span[@class=\"post-meta-infos\"]',\n './/span[@class=\"hidden\"]',\n ]:\n for unwanted_element in element.xpath(unwanted):\n unwanted_element.getparent().remove(unwanted_element)\n\n return element\n\n @property\n def content_string(self):\n \"\"\"This will be the content of the saved file.\"\"\"\n return etree.tostring(self.content, encoding=\"utf8\", pretty_print=True)\n\n def set_initial_metadata(self):\n \"\"\"Extract metadata from the web page.\"\"\"\n self.corpuspath.metadata.set_variable(\n \"title\", self.tree.find(\".//title\").text.strip()\n )\n self.corpuspath.metadata.set_variable(\"filename\", self.url)\n self.corpuspath.metadata.set_variable(\"genre\", \"admin\")\n self.corpuspath.metadata.set_variable(\"mainlang\", self.lang)\n self.corpuspath.metadata.set_variable(\"license_type\", \"free\")\n if self.lang not in (\"fin\", \"eng\"):\n self.corpuspath.metadata.set_variable(\"translated_from\", \"fin\")\n time = self.tree.find('.//meta[@property=\"article:modified_time\"]')\n if time is not None:\n self.corpuspath.metadata.set_variable(\"year\", time.get(\"content\")[:4])\n\n @property\n def basename(self):\n \"\"\"Get the basename of the corpus filename.\"\"\"\n return os.path.basename(self.corpuspath.orig)\n\n def sanity_test(self):\n \"\"\"Check if the pages seem to have the expected structure.\"\"\"\n for parallel_link in self.parallel_links:\n if not parallel_link.startswith(\"https://www.samediggi.fi\"):\n raise SystemExit(\n f\"The links to parallel documents has changed {self.url}\"\n )\n if self.lang is None:\n raise SystemExit(\"Language format has changed.\")\n\n @property\n def parallel_links(self):\n \"\"\"Get links to the parallels of this document.\"\"\"\n links = []\n\n for a in self.tree.xpath('.//link[@rel=\"alternate\"]'):\n if (\n a.get(\"hreflang\") in self.language_mapper.keys()\n and self.language_mapper[a.get(\"hreflang\")] != self.lang\n ):\n links.append(a.get(\"href\"))\n\n return links\n\n @property\n def saveable(self):\n \"\"\"Check if the content of this file is worth saving.\"\"\"\n return self.result.ok and len(self.content) and len(self.body_text.split()) > 40\n\n @property\n def lang(self):\n \"\"\"Return the language of the file.\"\"\"\n if self.tree.attrib.get(\"lang\") == \"en-US\":\n return self.language_mapper[\"en\"]\n return self.language_mapper[self.tree.attrib.get(\"lang\")]\n\n def is_valid_address(self, href):\n \"\"\"Check if this is an address that should be crawled.\"\"\"\n match = self.address_re.match(href)\n return (\n match\n and not re.search(\n \"#|tuote|shop|kauppa\",\n href,\n )\n and not href.endswith(self.unwanted_endings)\n )\n\n @property\n def links(self):\n \"\"\"Get all the links found in a file.\"\"\"\n link_set = set()\n for address in self.tree.xpath(\".//a[@href]\"):\n url = address.get(\"href\")\n if self.is_valid_address(url.lower()):\n link_set.add(url.split(\"?\")[0])\n\n return link_set\n\n @property\n def body_text(self):\n \"\"\"Get all the text inside 'body'.\"\"\"\n return \" \".join(self.content.xpath(\".//text()\"))\n\n def set_parallel_file(self, lang, name):\n \"\"\"Update metadata info on parallel files.\"\"\"\n self.corpuspath.metadata.set_parallel_text(lang, name)\n\n def save(self):\n \"\"\"Save html and metadata.\"\"\"\n with open(self.corpuspath.orig, \"wb\") as xml:\n xml.write(self.content_string)\n self.corpuspath.metadata.write_file()\n\n\nclass SamediggiFiCrawler(crawler.Crawler):\n \"\"\"Crawl www.samediggi.fi and save html documents to the corpus.\"\"\"\n\n langs = [\"fin\", \"sme\", \"smn\", \"sms\", \"eng\"]\n languageguesser = text_cat.Classifier()\n\n def __init__(self):\n \"\"\"Initialise the SamediggiFiCrawler class.\"\"\"\n super().__init__()\n self.unvisited_links.add(\"https://www.samediggi.fi/\")\n self.vcs = {}\n\n for lang in self.langs:\n self.vcs[lang] = versioncontrol.vcs(self.goaldir / f\"corpus-{lang}-orig\")\n self.dupe_table = {digest: name for digest, name in self.make_dupe_tuple()}\n\n def make_dupe_tuple(self):\n \"\"\"Make a hash/filename tuple to be used in the dupe table.\"\"\"\n for lang in self.langs:\n root = os.path.join(\n os.getenv(\"GTLANGS\"), f\"corpus-{lang}-orig\", \"admin/sd/www.samediggi.fi\"\n )\n for path, _, filelist in os.walk(root):\n for name in fnmatch.filter(filelist, \"*.html\"):\n fullpath = os.path.join(path, name)\n with open(fullpath, \"rb\") as html_stream:\n yield make_digest(html_stream.read()), fullpath\n\n def crawl_page(self, link):\n \"\"\"Collect links from a page.\"\"\"\n self.visited_links.add(link)\n result = requests.get(link)\n\n if result.ok and \"html\" in result.headers[\"content-type\"].lower():\n orig_page = SamediggiFiPage(result, self.dupe_table)\n orig_page.sanity_test()\n self.visited_links.add(orig_page.url)\n self.unvisited_links.update(orig_page.links)\n if orig_page.dupe or orig_page.parsed_url.path.startswith(\"/category\"):\n return None\n return orig_page\n\n return None\n\n def crawl_site(self):\n \"\"\"Crawl samediggi.no.\"\"\"\n while self.unvisited_links:\n link = self.unvisited_links.pop()\n\n if link not in self.visited_links:\n self.crawl_pageset(link)\n\n self.unvisited_links.difference_update(self.visited_links)\n\n print(f\"Links in queue: {len(self.unvisited_links)}\")\n\n def add_page(self, page, parallel_pages):\n \"\"\"Add a page to the list of parallel pages.\"\"\"\n if page is not None and page.saveable:\n body_lang = self.languageguesser.classify(page.body_text, langs=self.langs)\n if page.lang == body_lang:\n if body_lang in (\"fin\", \"eng\"):\n parallel_pages.append(page)\n else:\n parallel_pages.insert(0, page)\n\n @staticmethod\n def set_parallel_info(parallel_pages):\n \"\"\"Set the parallels for this set of parallel pages.\"\"\"\n for parallel_page1 in parallel_pages:\n for parallel_page2 in parallel_pages:\n if parallel_page1 != parallel_page2:\n parallel_page1.set_parallel_file(\n parallel_page2.lang, parallel_page2.basename\n )\n\n def crawl_pageset(self, link):\n \"\"\"Crawl a pageset that link gives us.\"\"\"\n pages = []\n\n print(link)\n orig_page = self.crawl_page(link)\n if orig_page is not None:\n self.add_page(orig_page, pages)\n for parallel_link in orig_page.parallel_links:\n self.add_page(self.crawl_page(parallel_link), pages)\n\n if pages and pages[0].lang not in (\"fin\", \"eng\"):\n self.set_parallel_info(pages)\n for parallel_page in pages:\n print(f\"\\t{parallel_page.corpuspath.orig}\")\n self.dupe_table[\n make_digest(parallel_page.content_string)\n ] = parallel_page.corpuspath.orig\n parallel_page.save()\n self.vcs[parallel_page.lang].add(parallel_page.corpuspath.orig)\n self.vcs[parallel_page.lang].add(parallel_page.corpuspath.xsl)\n print()\n","repo_name":"giellalt/CorpusTools","sub_path":"corpustools/samediggi_fi_crawler.py","file_name":"samediggi_fi_crawler.py","file_ext":"py","file_size_in_byte":12075,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"9400427677","text":"'''\nTwo elements of a binary search tree (BST) are swapped by mistake.\n\nRecover the tree without changing its structure.\n'''\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nclass Solution:\n def recoverTree(self, root: TreeNode) -> None:\n \"\"\"\n Do not return anything, modify root in-place instead.\n \"\"\"\n self.a, self.b = None, None\n self.pre = None\n def inorder(node):\n if node:\n inorder(node.left)\n if self.pre and node.val < self.pre.val:\n self.a = self.a if self.a else self.pre\n self.b = node\n self.pre = node\n inorder(node.right)\n inorder(root)\n self.a.val, self.b.val = self.b.val, self.a.val","repo_name":"Jason003/interview","sub_path":"Amazon/Recover Binary Search Tree.py","file_name":"Recover Binary Search Tree.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"29939010851","text":"from django.shortcuts import render\nimport json\nimport urllib.request\n\n# Create your views here.\ndef index(request):\n if request.method == 'POST':\n city = request.POST['city']\n city_str = city.replace(' ', '%20')\n new_city = [word.capitalize() for word in city.split()]\n city = ' '.join(new_city)\n try:\n res = urllib.request.urlopen('http://api.openweathermap.org/data/2.5/weather?q='+city_str+'&appid=35bed192a5b122829bd8299a7d8be01a').read()\n json_data = json.loads(res)\n data = {\n 'country_code': str(json_data['sys']['country']),\n 'coordinate': str(json_data['coord']['lon']) + ' ' +\n str(json_data['coord']['lat']),\n 'temp': str(-273 + int(json_data['main']['temp']))+'ºC',\n 'pressure': str(json_data['main']['pressure']),\n 'humidity': str(json_data['main']['humidity']),\n }\n except:\n data = 'Not found'\n \n \n else:\n city = ''\n data = {}\n return render(request, 'index.html', {'city': city, 'data': data})","repo_name":"FilipeRaitz/django-weather-app","sub_path":"weather_app/weather/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17091662400","text":"data = [\n {\"name\": \"Alisa\", \"country\": \"UA\", \"item\": \"socks\", \"price\": 15},\n {\"name\": \"Alisa\", \"country\": \"UA\", \"item\": \"socks\", \"price\": 25},\n {\"name\": \"Ben\", \"country\": \"US\", \"item\": \"pencil\", \"price\": 95},\n {\"name\": \"Alisa\", \"country\": \"UA\", \"item\": \"pencil\", \"price\": 45},\n {\"name\": \"Oleg\", \"country\": \"GB\", \"item\": \"socks\", \"price\": 100},\n {\"name\": \"Ben\", \"country\": \"US\", \"item\": \"pencil\", \"price\": 10}\n]\n\nworkers = {sale [\"name\"] for sale in data}\n\nmoney = {name: sum(sale[\"price\"] for sale in data if sale\n[\"name\"] == name ) for name in workers}\n\nprint(\"Множина працівників: \", workers)\n\nprint(\"Словник \")\nfor name, full_money in money.items():\n print(f\"{name}: {full_money}\")","repo_name":"sharuys/homework","sub_path":"task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12507200137","text":"#import time\nimport io\nimport threading\nimport picommon\nfrom pistrobecam import PiStrobeCam\nfrom PIL import Image\n\nclass Camera(object):\n thread = None\n frame = None\n# last_access = 0\n \n def __init__( self, exit_event, socketio ):\n print( \"Init Camera Pi -------------------\" )\n self.exit_event = exit_event\n self.close_request = False\n self.socketio = socketio\n self.strobe_cam = PiStrobeCam( picommon.PORT_STROBE, 0.1 )\n self.camera = self.strobe_cam.camera\n \n self.strobe_data = { 'hold':0, 'enable':0, 'wait_ns':0, 'period_ns':100000, 'framerate':0, 'cam_read_time_us':0 }\n self.strobe_period_ns = int( self.strobe_data['period_ns'] )\n valid = self.strobe_cam.strobe.set_enable( self.strobe_data['enable'] )\n self.strobe_cam.strobe.set_hold( self.strobe_data['hold'] )\n# self.strobe_cam.strobe.set_timing( self.strobe_data['wait_ns'], self.strobe_period_ns )\n self.set_timing()\n self.enabled = valid\n \n self.cam_read_time_us = 0\n \n self.cam_data = { 'camera': 'rpi', 'status': '' }\n \n @self.socketio.on( 'cam' )\n def on_cam( data ):\n self.on_cam( data )\n\n @self.socketio.on( 'strobe' )\n def on_strobe( data ):\n self.on_strobe( data )\n\n def initialize( self ):\n if self.thread is None:\n # start background frame thread\n self.thread = threading.Thread( target=self._thread )\n self.thread.daemon = True\n self.thread.start()\n \n # wait until frames start to be available\n while self.frame is None:\n# time.sleep(0)\n pass\n\n def get_frame( self ):\n# self.last_access = time.time()\n if self.close_request:\n return None\n else:\n self.initialize()\n return self.frame\n \n def save( self ):\n img=Image.open( io.BytesIO( self.frame ) )\n img.save( \"/home/pi/snapshots/snapshot.jpg\", \"JPEG\" )\n# self.strobe_cam.camera.capture( \"snapshot.jpg\" )\n \n# @classmethod\n def _thread( self ):\n# with picamera.PiCamera() as camera:\n# with self.camera as camera:\n # camera setup\n# self.camera.resolution = ( 320, 240 )\n self.camera.resolution = ( 1024, 768 )\n# self.camera.hflip = True\n# self.camera.vflip = True\n self.camera.awb_mode = 'auto'\n self.camera.exposure_mode = 'off'\n #ISO will not adjust gains when exposure_mode='off'\n #self.camera.iso = 800\n\n # let camera warm up\n# camera.start_preview()\n# time.sleep(2)\n\n stream = io.BytesIO()\n for foo in self.camera.capture_continuous( stream, 'jpeg', use_video_port=True ):\n # store frame\n stream.seek(0)\n self.frame = stream.read()\n\n # reset stream for next frame\n stream.seek(0)\n stream.truncate()\n\n# time.sleep( 0.1 )\n \n # if there hasn't been any clients asking for frames in\n # the last 10 seconds stop the thread\n# if time.time() - self.last_access > 10:\n# print( \"Stop Camera thread\" )\n# break\n if self.exit_event.isSet() or self.close_request:\n print( \"Pi Camera thread exiting\" )\n break\n self.camera.close()\n self.thread_copy = self.thread\n self.thread = None\n# self.close_request = False\n \n def close( self ):\n self.close_request = True\n \n while self.thread != None:\n pass\n \n self.thread_copy.join()\n print( \"Pi Camera thread exited\" )\n \n def emit( self ):\n self.socketio.emit( 'cam', self.cam_data )\n self.socketio.emit( 'strobe', self.strobe_data )\n \n def set_timing( self ):\n try:\n self.strobe_period_ns = min( self.strobe_period_ns, 16000000 )\n valid = self.strobe_cam.set_timing( 32, self.strobe_period_ns, 20000000 )\n self.optimize_fps_btn_enabled = True\n if valid:\n self.enabled = True\n except:\n pass\n \n def optimize_fps( self ):\n get_cam_read_time_us = 10000\n get_cam_read_time_us_prev = 0\n strobe_post_padding_ns = 1000000\n tries = 10\n while ( abs( get_cam_read_time_us - get_cam_read_time_us_prev ) > 1000 ):\n get_cam_read_time_us_prev = get_cam_read_time_us\n self.strobe_cam.set_timing( 32, self.strobe_period_ns, strobe_post_padding_ns )\n #print( \"strobe wait={}ns, strobe period={}ns, strobe padding={}ns\".format( self.strobe_cam.strobe_wait_ns, self.strobe_cam.strobe_period_ns, strobe_post_padding_ns ) )\n valid, get_cam_read_time_us = self.strobe_cam.strobe.get_cam_read_time();\n #print( \"get_cam_read_time_us={}\".format( get_cam_read_time_us ) )\n strobe_post_padding_ns = ( get_cam_read_time_us + 100 ) * 1000\n \n tries = tries - 1\n if tries <= 0:\n break\n \n def update( self ):\n valid, cam_read_time_us = self.strobe_cam.strobe.get_cam_read_time()\n if valid:\n self.cam_read_time_us = cam_read_time_us\n \n self.strobe_framerate = int( self.strobe_cam.camera.framerate )\n# self.strobe_time_text = \"{} us\".format( round( float( self.strobe_cam.strobe_period_ns ) / 1000, 3 ) )\n \n def update_strobe_data( self ):\n self.update()\n self.strobe_data['cam_read_time_us'] = self.cam_read_time_us\n# self.strobe_data['strobe_time_text'] = self.strobe_time_text\n# self.strobe_data['wait_ns'] = wait_ns\n self.strobe_data['period_ns'] = self.strobe_period_ns\n self.strobe_data['framerate'] = self.strobe_framerate\n \n def on_strobe( self, data ):\n if ( data['cmd'] == 'hold' ):\n hold_on = 1 if ( data['parameters']['on'] != 0 ) else 0\n valid = self.strobe_cam.strobe.set_hold( hold_on )\n self.strobe_data['hold'] = hold_on\n elif ( data['cmd'] == 'enable' ):\n enabled = data['parameters']['on'] != 0\n valid = self.strobe_cam.strobe.set_enable( enabled )\n self.strobe_data['enable'] = enabled\n elif ( data['cmd'] == 'timing' ):\n# wait_ns = int( data['parameters']['wait_ns'] )\n period_ns = int( data['parameters']['period_ns'] )\n# valid = self.strobe_cam.set_timing( 32, period_ns, 20000000 )\n self.strobe_period_ns = period_ns\n self.set_timing()\n \n self.update_strobe_data()\n self.socketio.emit( 'strobe', self.strobe_data )\n \n def on_cam( self, data ):\n if ( data['cmd'] == 'snapshot' ):\n print( \"Snapshot\" )\n self.save()\n elif ( data['cmd'] == 'optimize' ):\n print( \"Optimize FPS\" )\n self.optimize_fps()\n \n self.update_strobe_data()\n self.socketio.emit( 'strobe', self.strobe_data )\n \n","repo_name":"wenzel-lab/open-microfluidics-workstation","sub_path":"module-fast-imaging/strobe_python/camera_pi.py","file_name":"camera_pi.py","file_ext":"py","file_size_in_byte":6983,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"13714647069","text":"import time\nfrom typing import Any, Dict, List\n\nfrom transformers.pipelines.base import Pipeline # type: ignore\nfrom transformers import AutoModelForSeq2SeqLM # type: ignore\nfrom gtmcore.data.db.results.issue import Issue\nfrom gtmcore.data.db.results.comment import Comment\nfrom gtmcore.logic.dbmanager import DBManager\nfrom gtmprocessing.logic.models.basemodel import BaseModel\n\n\nclass Summarization(BaseModel):\n \"\"\" Class in charge of applying the NLP Summarization model.\n \"\"\"\n # pylint: disable=too-many-instance-attributes\n\n def __init__(self, dbmanager: DBManager) -> None:\n super().__init__()\n self.__dbmanager: DBManager = dbmanager\n self.__pipeline: Pipeline = self.get_pipeline()\n\n self.__with_comments: bool = False\n self.__max_length: int = 0\n self.__min_length: int = 0\n\n def set_params(self, params: Dict[str, Any]) -> None:\n super().set_params(params)\n\n self.__with_comments = bool(params.get(\"with_comments\", False))\n self.__max_length = int(params.get(\"max_length\", 150))\n self.__min_length = int(params.get(\"min_length\", 50))\n\n def preprocess(self) -> None:\n try:\n issue: Issue = self.__dbmanager.get_issue(\n self._repo_dir, self._issue_id)\n\n inputs: List[str] = [issue.description]\n\n if self.__with_comments:\n comments: List[Comment] = self.__dbmanager.get_comments(\n self._repo_dir, self._issue_id)\n inputs += [comment.body for comment in comments]\n\n self._inputs = self.chunk_input(inputs, self.__pipeline.tokenizer)\n except ValueError as err:\n raise ValueError(\"Missing, incorrect or damaged model paramethers.\") from err\n\n def apply(self) -> None:\n\n start_time: float = time.time()\n\n summarized_inputs: str = \"\"\n\n for text in self._inputs:\n max_length = len(text) if self.__max_length > len(text) else self.__max_length\n min_length = 0 if self.__min_length > len(text) else self.__min_length\n summarized_input = self.__pipeline(\n text,\n max_length=max_length,\n min_length=min_length,\n do_sample=False\n )[0].get(\"summary_text\", \"\")\n summarized_inputs += (\" \" + summarized_input)\n\n self._exec_time = time.time() - start_time\n\n self._outcome = {\n \"input_chunks\": self._inputs,\n \"summarized_text\": summarized_inputs\n }\n\n def get_model(self) -> AutoModelForSeq2SeqLM:\n # pylint: disable=no-value-for-parameter\n return AutoModelForSeq2SeqLM.from_pretrained(self.get_model_path(), local_files_only = True)\n\n def get_task_str(self) -> str:\n return \"summarization\"\n","repo_name":"MrpYA45/github-text-mining-tfg","sub_path":"src/backend/gtmprocessing/gtmprocessing/logic/models/summarization.py","file_name":"summarization.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"71602082185","text":"import os.path as osp\nimport uuid\n\nfrom .git import GitRepoFs\nfrom flask import current_app, jsonify, request, Response\nfrom ..error import InvalidRequest\n\n\ndef create_repo():\n from ..database import db\n from .model import GitRepo, UserGitReposAssoc, GitRepoFullSchema\n from ..model import User\n body = request.data\n from ..decorators import requires_auth\n requires_auth()\n auth = request.authorization\n user = User.user_by_username(auth['username'])\n if not body:\n raise InvalidRequest('Empty body', 'local_path')\n data = request.get_json(force=True)\n if not 'local_path' in data or not data['local_path']:\n raise InvalidRequest('Empty body', 'local_path')\n local_path = data['local_path']\n if not 'remote_name' in data or not data['remote_name']:\n raise InvalidRequest('Empty body', 'remote_name')\n remote_name = data['remote_name']\n if 'server_repo_name' in data:\n repo_name = data['server_repo_name']\n else:\n repo_name = osp.basename(local_path)\n if repo_name[-4:] != '.git':\n repo_name += '.git'\n if data.get('server_parent_dir_relative', None):\n server_parent_dir_rel = data['server_parent_dir_relative']\n else:\n server_parent_dir_rel = \"\"\n if data.get('client_env', None):\n client_env_name = data['client_env']\n else:\n client_env_name = 'default'\n client_env_entity = None\n for client_env in user.client_envs:\n if client_env.env_name == client_env_name:\n client_env_entity = client_env\n break\n if not client_env_entity:\n raise InvalidRequest(f\"User has no environment with name '{client_env_name}' configured.\", 'client_env')\n if data.get('all_client_envs', False):\n desired_client_env_entities = user.client_envs\n else:\n desired_client_env_entities = [client_env_entity]\n server_path_rel = osp.join(server_parent_dir_rel, repo_name)\n git_repo_entity = GitRepo.load_by_server_path(\n _server_path_rel=GitRepo.get_server_path_rel(server_path_rel, user.id))\n if not git_repo_entity:\n git_repo_entity = GitRepo(server_path_rel=server_path_rel, user_id=user.id)\n else:\n git_repo_entity.user_id = user.id\n # only on initial creation of the repo can all client environments be referenced\n desired_client_env_entities = [client_env_entity]\n fs_git_repo = GitRepoFs(git_repo_entity)\n fs_git_repo.create_bare_repo()\n new_reference = git_repo_entity.add(_local_path_rel=local_path, _remote_name=remote_name,\n _client_envs=desired_client_env_entities)\n if not new_reference:\n is_env_referenced = False\n git_user_repo_assoc = None\n git_user_repo_assoc_ref = None\n for user_info in git_repo_entity.userinfo:\n if user_info.local_path_rel == local_path:\n git_user_repo_assoc = user_info\n else:\n continue\n referenced_envs = [env.env_name for env in user_info.client_envs]\n if client_env_name in referenced_envs:\n is_env_referenced = True\n git_user_repo_assoc_ref = user_info\n # existing reference found, abort lookup\n break\n if not is_env_referenced:\n if git_user_repo_assoc:\n git_user_repo_assoc.client_envs.append(client_env_entity)\n db.session.add(git_user_repo_assoc)\n db.session.commit()\n print(\"The existing local reference at \" +\n f\"{git_user_repo_assoc.local_path_rel} with remote \" +\n f\"{git_user_repo_assoc.remote_name} has been added to your current environment.\")\n else:\n id = uuid.uuid4()\n git_user_repo_assoc = UserGitReposAssoc(id=id, user_id=user.id, repo_id=git_repo_entity.id,\n remote_name=remote_name, local_path_rel=local_path)\n git_user_repo_assoc.client_envs.append(client_env_entity)\n git_repo_entity.userinfo.append(git_user_repo_assoc)\n db.session.add(git_repo_entity)\n db.session.commit()\n remote_name = git_user_repo_assoc.remote_name\n # a new reference has been created on the server for this environment, so client git config must be updated\n new_reference = True\n else:\n print(\"No new reference has been created. The existing local reference at \" +\n f\"{git_user_repo_assoc_ref.local_path_rel} with remote \" +\n f\"{git_user_repo_assoc_ref.remote_name} is already includes this remote repo.\")\n gitrepo_schema = GitRepoFullSchema()\n response = gitrepo_schema.dump(git_repo_entity)\n response['remote_name'] = remote_name\n response['is_new_reference'] = new_reference\n # ToDo distinguish status codes: new created or already existing\n return response\n\n\ndef update_repo_for_clientenv(repo_id, client_env):\n from .model import GitRepo, UserGitReposAssoc, GitRepoFullSchema\n from ..model import User\n from ..decorators import requires_auth\n from ..database import db\n requires_auth()\n auth = request.authorization\n user = User.user_by_username(auth['username'])\n data = request.get_json(force=True)\n git_repo_entity = GitRepo.get_repo_by_id_and_user_id(repo_id, user.id)\n if not git_repo_entity:\n message = f\"The repo {repo_id} does not exist for your user.\"\n raise InvalidRequest(message=message, field='repo_id', status_code=404)\n git_user_repo_assoc = find_git_user_repo_assoc(git_repo_entity, client_env)\n referenced_envs = [env.env_name for env in git_user_repo_assoc.client_envs]\n client_env_index = referenced_envs.index(client_env)\n if not git_user_repo_assoc:\n raise InvalidRequest(f\"The repo is not referenced in the given environment {client_env}\", 'client_env', 404)\n if 'server_path_rel' in data and data['server_path_rel']:\n if git_repo_entity.server_path_rel != data['server_path_rel']:\n fs_git_repo = GitRepoFs(git_repo_entity)\n success = fs_git_repo.move_repo(data['server_path_rel'])\n if success:\n git_repo_entity.server_path_rel = data['server_path_rel']\n db.session.add(git_repo_entity)\n db.session.commit()\n if 'local_path' in data and data['local_path']:\n if git_user_repo_assoc.local_path_rel != data['local_path']:\n if len(git_user_repo_assoc.client_envs) > 1:\n client_env_entity = git_user_repo_assoc.client_envs.pop(client_env_index)\n new_git_user_repo_assoc = find_git_user_repo_assoc_ref_by_local_path(git_repo_entity.userinfo,\n data['local_path'])\n if not new_git_user_repo_assoc:\n id = uuid.uuid4()\n new_git_user_repo_assoc = UserGitReposAssoc(id=id, user_id=user.id, repo_id=git_repo_entity.id,\n remote_name=git_user_repo_assoc.remote_name,\n local_path_rel=data['local_path'])\n new_git_user_repo_assoc.client_envs.append(client_env_entity)\n db.session.add(new_git_user_repo_assoc)\n else:\n # Todo improve: delete git_user_repo_assoc if new local_path is already accommodated by another reference\n git_user_repo_assoc.local_path_rel = data['local_path']\n db.session.add(git_user_repo_assoc)\n db.session.commit()\n # else nothing changed\n user_gitrepo_schema = GitRepoFullSchema(many=False)\n return user_gitrepo_schema.dump(git_repo_entity)\n\ndef find_git_user_repo_assoc(git_repo_entity, client_env):\n # find the reference to git repo for this user\n git_user_repo_assoc = None\n for ind, user_info in enumerate(git_repo_entity.userinfo):\n referenced_envs = [env.env_name for env in user_info.client_envs]\n if client_env in referenced_envs:\n git_user_repo_assoc = user_info\n # existing reference found, abort lookup\n break\n return git_user_repo_assoc\n\ndef delete_repo(repo_id):\n from .model import GitRepo\n from ..model import User\n from ..decorators import requires_auth\n requires_auth()\n auth = request.authorization\n user = User.user_by_username(auth['username'])\n git_repo_entity = GitRepo.get_repo_by_id_and_user_id(repo_id, user.id)\n if not git_repo_entity:\n return\n git_repo_entity.remove()\n\ndef delete_repo_assoc_for_clientenv(repo_id, client_env):\n from .model import GitRepo\n from ..model import User\n from ..decorators import requires_auth\n requires_auth()\n auth = request.authorization\n user = User.user_by_username(auth['username'])\n git_repo_entity = GitRepo.get_repo_by_id_and_user_id(repo_id, user.id)\n if not git_repo_entity:\n message = f\"The repo {repo_id} does not exist for your user.\"\n raise InvalidRequest(message=message, field='repo_id', status_code=404)\n git_user_repo_assoc = find_git_user_repo_assoc(git_repo_entity, client_env)\n if not git_user_repo_assoc:\n return\n git_user_repo_assoc.remove()\n\ndef find_git_user_repo_assoc_ref_by_local_path(user_infos, local_path):\n for user_info in user_infos:\n if user_info.local_path_rel == local_path:\n return user_info\n return None\n\n\ndef get_repos(full_info=False):\n from .model import UserGitReposAssoc, UserGitReposAssocSchema, UserGitReposAssocFullSchema\n user = get_user()\n if full_info:\n user_gitrepo_assoc_schema = UserGitReposAssocFullSchema(many=True)\n else:\n user_gitrepo_assoc_schema = UserGitReposAssocSchema(many=True)\n repos = UserGitReposAssoc.get_user_repos(_user_id=user.id)\n repo_list = dict()\n for client_env in user.client_envs:\n repo_list[client_env.env_name] = []\n if not repos:\n return jsonify(repo_list)\n for repo, client_env_name in repos:\n repo_list[client_env_name].append(repo)\n for client_env_name in repo_list:\n repo_list[client_env_name] = user_gitrepo_assoc_schema.dump(repo_list[client_env_name])\n return repo_list\n\n\ndef get_repos_by_clientenv(client_env, full_info=False):\n from .model import UserGitReposAssoc, UserGitReposAssocSchema, UserGitReposAssocFullSchema\n from ..model import ClientEnv\n user = get_user()\n client_env_entity = ClientEnv.get_client_env(_user_id=user.id, _env_name=client_env)\n if not client_env_entity:\n message = f\"The client environment {client_env} does not exist for your user.\"\n raise InvalidRequest(message=message, field='client_env', status_code=404)\n if full_info:\n user_gitrepo_assoc_schema = UserGitReposAssocFullSchema(many=True)\n else:\n user_gitrepo_assoc_schema = UserGitReposAssocSchema(many=True)\n repos = UserGitReposAssoc.get_user_repos_by_client_env_name(_user_id=user.id, _client_env_name=client_env)\n if not repos:\n return jsonify([])\n return user_gitrepo_assoc_schema.dump(repos)\n\n\ndef get_user():\n from ..model import User\n from ..decorators import requires_auth\n requires_auth()\n auth = request.authorization\n return User.user_by_username(auth['username'])\n\n# TODO: enrich class with logic \nclass GitRepoUpdate:\n \n git_repo_dao = None\n \n def __init__(self, repo_id, user, GitRepo):\n self.user = user\n self.repo_id = repo_id\n self.git_repo = None\n __class__.git_repo_dao = GitRepo\n \n def load_repo(self):\n self.git_repo_entity = __class__.git_repo_dao.get_repo_by_id_and_user_id(self.repo_id, self.user.id) \n ","repo_name":"Grid-LTS/syncmanager","sub_path":"syncmanagerapi/syncmanagerapi/git/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":11893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33785721610","text":"import numpy as np\nimport numba as nb\nimport torch\nimport inspect\nfrom functools import wraps\n\ndef numba_wrapper(cast_args=[], list_args=[], keep_torch=False, ref_arg=None):\n '''\n Function which wraps a *numba* function with some checks on the input\n to make the relevant conversions to numpy where necessary.\n\n Args:\n type_arg (str) : Argument name which determines the data type and device location\n list_args ([str]) : List of arguments which need to be cast to a numba list\n Returns:\n Function\n '''\n def outer(fn):\n @wraps(fn)\n def inner(*args, **kwargs):\n # Convert the positional arguments in args into key:value pairs in kwargs\n keys = list(inspect.signature(fn).parameters.keys())\n for i, val in enumerate(args):\n kwargs[keys[i]] = val\n\n # Extract the default values for the remaining parameters\n for key, val in inspect.signature(fn).parameters.items():\n if key not in kwargs and val.default != inspect.Parameter.empty:\n kwargs[key] = val.default\n\n # If a torch output is request, register the input dtype and device location\n if keep_torch:\n assert ref_arg in kwargs\n dtype, device = None, None\n if isinstance(kwargs[ref_arg], torch.Tensor):\n dtype = kwargs[ref_arg].dtype\n device = kwargs[ref_arg].device\n\n # If the cast data is not a numpy array, cast it\n for arg in cast_args:\n assert arg in kwargs\n if not isinstance(kwargs[arg], np.ndarray):\n assert isinstance(kwargs[arg], torch.Tensor)\n kwargs[arg] = kwargs[arg].detach().cpu().numpy() # For now cast to CPU only\n\n # If there is a reflected list in the input, type it\n for arg in list_args:\n assert arg in kwargs\n kwargs[arg] = nb.typed.List(kwargs[arg])\n\n # Get the output\n ret = fn(**kwargs)\n if keep_torch and dtype:\n if isinstance(ret, np.ndarray):\n ret = torch.tensor(ret, dtype=dtype, device=device)\n elif isinstance(ret, list):\n ret = [torch.tensor(r, dtype=dtype, device=device) for r in ret]\n else:\n raise TypeError('Return type not recognized, cannot cast to torch')\n return ret\n return inner\n return outer\n\n\n@nb.njit(cache=True)\ndef unique_nb(x: nb.int64[:]) -> (nb.int64[:], nb.int64[:]):\n b = np.sort(x.flatten())\n unique = list(b[:1])\n counts = [1 for _ in unique]\n for x in b[1:]:\n if x != unique[-1]:\n unique.append(x)\n counts.append(1)\n else:\n counts[-1] += 1\n return unique, counts\n\n\n@nb.njit(cache=True)\ndef submatrix_nb(x:nb.float64[:,:],\n index1: nb.int64[:],\n index2: nb.int64[:]) -> nb.float64[:,:]:\n \"\"\"\n Numba implementation of matrix subsampling\n \"\"\"\n subx = np.empty((len(index1), len(index2)), dtype=x.dtype)\n for i, i1 in enumerate(index1):\n for j, i2 in enumerate(index2):\n subx[i,j] = x[i1,i2]\n return subx\n\n\n@nb.njit(cache=True)\ndef cdist_nb(x1: nb.float64[:,:],\n x2: nb.float64[:,:]) -> nb.float64[:,:]:\n \"\"\"\n Numba implementation of Eucleadian cdist in 3D.\n \"\"\"\n res = np.empty((x1.shape[0], x2.shape[0]), dtype=x1.dtype)\n for i1 in range(x1.shape[0]):\n for i2 in range(x2.shape[0]):\n res[i1,i2] = np.sqrt((x1[i1][0]-x2[i2][0])**2+(x1[i1][1]-x2[i2][1])**2+(x1[i1][2]-x2[i2][2])**2)\n return res\n\n\n@nb.njit(cache=True)\ndef mean_nb(x: nb.float64[:,:],\n axis: nb.int64) -> nb.float64[:]:\n \"\"\"\n Numba implementation of np.mean(x, axis)\n \"\"\"\n assert axis == 0 or axis == 1\n mean = np.empty(x.shape[1-axis], dtype=x.dtype)\n if axis == 0:\n for i in range(len(mean)):\n mean[i] = np.mean(x[:,i])\n else:\n for i in range(len(mean)):\n mean[i] = np.mean(x[i])\n return mean\n\n\n@nb.njit(cache=True)\ndef argmin_nb(x: nb.float64[:,:],\n axis: nb.int64) -> nb.int64[:]:\n \"\"\"\n Numba implementation of np.argmin(x, axis)\n \"\"\"\n assert axis == 0 or axis == 1\n argmin = np.empty(x.shape[1-axis], dtype=np.int64)\n if axis == 0:\n for i in range(len(argmin)):\n argmin[i] = np.argmin(x[:,i])\n else:\n for i in range(len(argmin)):\n argmin[i] = np.argmin(x[i])\n return argmin\n\n\n@nb.njit(cache=True)\ndef argmax_nb(x: nb.float64[:,:],\n axis: nb.int64) -> nb.int64[:]:\n \"\"\"\n Numba implementation of np.argmax(x, axis)\n \"\"\"\n assert axis == 0 or axis == 1\n argmax = np.empty(x.shape[1-axis], dtype=np.int64)\n if axis == 0:\n for i in range(len(argmax)):\n argmax[i] = np.argmax(x[:,i])\n else:\n for i in range(len(argmax)):\n argmax[i] = np.argmax(x[i])\n return argmax\n\n\n@nb.njit(cache=True)\ndef all_nb(x: nb.float64[:,:],\n axis: nb.int64) -> nb.int64[:]:\n \"\"\"\n Numba implementation of np.all(x, axis)\n \"\"\"\n assert axis == 0 or axis == 1\n all = np.empty(x.shape[1-axis], dtype=np.bool_)\n if axis == 0:\n for i in range(len(all)):\n all[i] = np.all(x[:,i])\n else:\n for i in range(len(all)):\n all[i] = np.all(x[i])\n return all\n\n\n@nb.njit(cache=True)\ndef softmax_nb(x: nb.float64[:,:],\n axis: nb.int64) -> nb.float64[:,:]:\n assert axis == 0 or axis == 1\n exps = np.exp(x)\n if axis == 0:\n return exps/np.sum(exps,axis=0)\n else:\n return exps/np.sum(exps,axis=1).reshape(-1,1)\n\n\n@nb.njit(cache=True)\ndef log_loss_nb(x1: nb.boolean[:], x2: nb.float64[:]) -> nb.float64:\n if len(x1) > 0:\n return -(np.sum(np.log(x2[x1])) + np.sum(np.log(1.-x2[~x1])))/len(x1)\n else:\n return 0.","repo_name":"andriiknu/lartpc_mlreco3d_old","sub_path":"mlreco/utils/numba.py","file_name":"numba.py","file_ext":"py","file_size_in_byte":6019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74880989703","text":"from produto import produto\nfrom datetime import date\n\nclass limpeza(produto):\n \n def __init__(self,id,nome,preco,peso,quantidade,marca,ano,mes,dia,data_aquisicao):\n super().__init__(id,nome,preco,peso,quantidade,marca,ano,mes,dia,data_aquisicao)\n self.type = \"Limpeza\"\n\n def dados(self, limpezas):\n print(\"\\n\"*30)\n print(\"LIMPEZA: \")\n for limpeza in limpezas:\n print(\"NOME: {}\".format(limpeza.nome))\n print(\"ID: {}\".format(limpeza.id))\n print(\"PREÇO: {}\".format(limpeza.preco))\n print(\"PESO: {}\".format(limpeza.peso))\n print(\"QUANTIDADE: {}\".format(limpeza.quantidade))\n print(\"MARCA: {}\".format(limpeza.marca))\n print(\"VALIDADE: {}\".format(limpeza.validade.strftime('%d/%m/%Y')))\n print()\n \n freeze = input(\"ENTER para continuar\")\n\n def validade(self,limpezas):\n data_hoje = date.today()\n print(\"\\n\"*30)\n print(\"Os seguintes produtos de limpeza apresentam data de validade proxima: \")\n\n for limpeza in limpezas:\n if((limpeza.validade - data_hoje).days <= 30):\n print(\"NOME: {}\".format(limpeza.nome))\n print(\"ID: {}\".format(limpeza.id))\n print()\n freeze = input(\"ENTER para continuar\")\n \n def remover(self,limpezas,id):\n for limpeza in limpezas:\n if(limpeza.id == id):\n print(\"Produto de limpeza {} removido.\".format(limpeza.nome))\n limpezas.remove(limpeza)\n freeze = input(\"ENTER para continuar\")\n return True\n \n return False\n\n def buscar(self,limpezas,id):\n for limpeza in limpezas:\n if(limpeza.id == id):\n limpeza.Alterar_dado()\n return True\n \n return False","repo_name":"tuliocl/Minicurso-python---POO","sub_path":"limpeza.py","file_name":"limpeza.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14023238488","text":"import logging\nfrom logging.handlers import TimedRotatingFileHandler\n\n\"\"\"Creates/gets a logger and changes it so it shows all logs at the info level and formats it nicely\"\"\"\ndef setup_logger(name: str) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n handler =TimedRotatingFileHandler(\n filename=\"logs/discord.log\",\n encoding=\"utf-8\",\n interval=1,\n when=\"D\",\n backupCount=5\n )\n dt_fmt = \"%Y-%m-%d %H:%M:%S\"\n formatter = logging.Formatter(f'[{{asctime}}] [{{levelname}}] {{name}}: {{message}}', dt_fmt, style='{')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n \n return logger","repo_name":"grqphical07/stock_bot","sub_path":"stock_bot/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6588565376","text":"import cv2 as cv\nimport os\n\nimport numpy as np\n\nvideo_format = [\".avi\"]\nimage_format = [\".png\"]\n\n\n# Original\ntest_original_path = \"Test_images/Original\"\n# imageenes te pruebas\ntest_input_path = \"Test_images\"\n# Imagenes preporcesadas\ntest_out_pre_path = \"Test_images/Output/Pre\"\n# Resultados de la red\ntest_net_path = \"Test_images/Output/Net\"\n# Resultados de SR\ntest_SR_path = \"Test_images/Final\"\n# Mediciones\ntest_metric_path = \"Test_images/Final/Medidas\"\n\ndef image_load(path, name) -> np.array:\n image = cv.imread(os.path.join(path, name))\n return image\n\n\ndef video_load(path, function=None, range_frame=None):\n cap = cv.VideoCapture(path)\n totalFrames = cap.get(cv.CAP_PROP_FRAME_COUNT)\n if range_frame is None:\n range_frame = (0, totalFrames)\n\n image_list = []\n for frame in range(range_frame[0], range_frame[1]):\n cap.set(1, frame)\n _, image = cap.read()\n if function is not None:\n image = function(image)\n image_list.append(image)\n\n return image_list\n\n\ndef _aux_load(path, **params) -> list or dict:\n image_list = []\n image_dict = {} # En caso de ser un video.\n function = params.get(\"function\", None)\n name_data_list = os.listdir(path)\n for name in name_data_list:\n aux_path = os.path.join(path, name)\n data_format = os.path.splitext(name)[-1]\n if data_format in image_format:\n image_list.append(image_load(aux_path, function=function))\n elif data_format in video_format:\n range_frame = params.get(\"range_frame\", None)\n name = os.path.splitext(name)[0]\n image_dict[\"/{}\".format(name)] = video_load(aux_path, function=function, range_frame=range_frame)\n return []\n\n\ndef load_data(load_path, load_sub_path: list = None, **params) -> dict:\n data_dict = {}\n if load_sub_path is not None:\n for sub_path in load_sub_path:\n path = os.path.join(load_path, sub_path)\n data_dict[\"/{}\".format(sub_path)] = _aux_load(path, **params)\n else:\n name = os.path.split(load_path)[-1]\n data_dict[name] = _aux_load(load_path, **params)\n\n\ndef save(path, name, image):\n if not os.path.exists(path):\n os.makedirs(path)\n\n cv.imwrite(\"{}/{}.png\".format(path, name), image)\n","repo_name":"Ignacio2M/TFG","sub_path":"Utils/file_tools.py","file_name":"file_tools.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27012904165","text":"from django.contrib import admin\r\nfrom .models import *\r\n# 修改title和header\r\nadmin.site.site_title = '我的音乐后台管理系统'\r\nadmin.site.site_header = '我的音乐'\r\n\r\n@admin.register(Label)\r\nclass LabelAdmin(admin.ModelAdmin):\r\n # 设置模型字段,用于Admin后台数据的表头设置\r\n list_display = ['label_id', 'label_name']\r\n # 设置可搜索的字段并在Admin后台数据生成搜索框,如有外键应使用双下划线连接两个模型的字段\r\n search_fields = ['label_name']\r\n # 设置排序方式\r\n ordering = ['label_id']\r\n\r\n@admin.register(Song)\r\nclass SongAdmin(admin.ModelAdmin):\r\n # 设置模型字段,用于Admin后台数据的表头设置\r\n list_display = ['song_id','song_name','song_singer','song_album','song_languages','song_release']\r\n # 设置可搜索的字段并在Admin后台数据生成搜索框,如有外键应使用双下划线连接两个模型的字段\r\n search_fields = ['song_name','song_singer','song_album','song_languages']\r\n # 设置过滤器,在后台数据的右侧生成导航栏,如有外键应使用双下划线连接两个模型的字段\r\n list_filter = ['song_singer','song_album','song_languages']\r\n # 设置排序方式\r\n ordering = ['song_id']\r\n\r\n@admin.register(Dynamic)\r\nclass DynamicAdmin(admin.ModelAdmin):\r\n # 设置模型字段,用于Admin后台数据的表头设置\r\n list_display = ['dynamic_id','song','dynamic_plays','dynamic_search','dynamic_down']\r\n # 设置可搜索的字段并在Admin后台数据生成搜索框,如有外键应使用双下划线连接两个模型的字段\r\n search_fields = ['song']\r\n # 设置过滤器,在后台数据的右侧生成导航栏,如有外键应使用双下划线连接两个模型的字段\r\n list_filter = ['dynamic_plays','dynamic_search','dynamic_down']\r\n # 设置排序方式\r\n ordering = ['dynamic_id']\r\n\r\n@admin.register(Comment)\r\nclass CommentAdmin(admin.ModelAdmin):\r\n # 设置模型字段,用于Admin后台数据的表头设置\r\n list_display = ['comment_id','comment_text','comment_user','song','comment_date']\r\n # 设置可搜索的字段并在Admin后台数据生成搜索框,如有外键应使用双下划线连接两个模型的字段\r\n search_fields = ['comment_user','song','comment_date']\r\n # 设置过滤器,在后台数据的右侧生成导航栏,如有外键应使用双下划线连接两个模型的字段\r\n list_filter = ['song','comment_date']\r\n # 设置排序方式\r\n ordering = ['comment_id']","repo_name":"Rockyzsu/CodePool","sub_path":"玩转Django2源代码/第11章/music/index/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"zh","doc_type":"code","stars":19,"dataset":"github-code","pt":"81"} +{"seq_id":"24769904078","text":"n = int(input())\nlen_n = len(str(n))\n\nm = n - len_n * 9\nif m < 1:\n m = 1\n\nfor i in range(m, n):\n tmp = i\n tmpArr = []\n for _ in range(len_n):\n tmpArr.append(tmp % 10)\n tmp //= 10\n \n if i + sum(tmpArr) == n:\n print(i)\n break\n\nelse:\n print(0)","repo_name":"sleepyMS/-","sub_path":"2231.py","file_name":"2231.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25600611043","text":"\"\"\"\ninterface.py\n\"\"\"\nimport os\n\nimport numpy as np\nimport pandas as pd\n\nimport config\nimport src.algorithms.integrity\nimport src.functions.directories\nimport src.functions.streams\n\n\nclass Interface:\n \"\"\"\n Class Interface\n \"\"\"\n\n def __init__(self):\n \"\"\"\n The constructor\n \"\"\"\n\n self.__years = config.Config().years\n\n # Streams\n self.__streams = src.functions.streams.Streams()\n\n # The water integrity data, and hydrological data, warehouse paths\n self.__segments = ['integrity']\n self.__pathstr = os.path.join(os.getcwd(), 'warehouse', '{segment}', '{year}')\n\n def __directories(self):\n \"\"\"\n Prepare directories\n\n :return:\n \"\"\"\n\n directories = src.functions.directories.Directories()\n\n [directories.cleanup(self.__pathstr.format(segment=segment, year=year))\n for segment in self.__segments for year in self.__years]\n [directories.create(self.__pathstr.format(segment=segment, year=year))\n for segment in self.__segments for year in self.__years]\n\n def __areas(self) -> np.ndarray:\n \"\"\"\n\n :return:\n \"\"\"\n\n filepath = os.path.join(os.getcwd(), 'warehouse', 'references', 'environment_agency_area.csv')\n frame = self.__streams.read(uri=filepath, usecols=['area_id'])\n areas = frame['area_id'].array\n\n return areas[~areas.isin(['R'])]\n\n def __sampled_material_types(self) -> pd.DataFrame:\n \"\"\"\n\n :return:\n \"\"\"\n\n filepath = os.path.join(os.getcwd(), 'warehouse', 'references', 'sampled_material_types.csv')\n return self.__streams.read(uri=filepath,\n usecols=['sampled_material_type_id', 'sampled_material_type_desc'])\n\n def __purposes(self) -> pd.DataFrame:\n \"\"\"\n\n :return:\n \"\"\"\n\n filepath = os.path.join(os.getcwd(), 'warehouse', 'references', 'purposes.csv')\n return self.__streams.read(uri=filepath,\n usecols=['purpose_id', 'purpose_desc'])\n\n def exc(self):\n \"\"\"\n\n :return:\n \"\"\"\n\n # Prepare directories\n self.__directories()\n\n # Water integrity measures\n src.algorithms.integrity.Integrity(\n sampled_material_types=self.__sampled_material_types(), purposes=self.__purposes()) \\\n .exc(years=self.__years, areas=self.__areas())\n","repo_name":"thirdreading/water","sub_path":"src/algorithms/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13922317677","text":"# 삽입 정렬: 데이터를 차례로 적절한 위치에 삽입하며 정렬하는 알고리즘.\n\narray = [7, 5, 9, 0, 3, 1, 6, 2, 4, 8]\n\nfor i in range(1, len(array)): # 두번째 데이터부터 차례로\n for j in range(i, 0, -1): # 적절한 위치에 삽입하기 위해 j는 i부터 1까지 -1씩 감소하며 진행.\n if array[j] < array[j - 1]:\n array[j], array[j - 1] = array[j - 1], array[j] # 작으면 swap.\n else: # 삽입 완료.\n break # 다음 데이터로.\n\nprint(array)\n\n\n'''\n// c언어에서 삽입 정렬\n\n#include \n#include \n\nvoid insertion(int a[], int n)\n{\n int i, j;\n for(i = 1; i < n; i++){ // 두번째 데이터부터 차례로\n int tmp = a[i]; // 삽입할 데이터 tmp에 저장해두기. (swap 함수 안쓴다면)\n for(j = i; j > 0 && a[j - 1] > tmp; j--) // 적절한 위치에 삽입하기 위해 j는 i부터 1까지 -1씩 감소하며 진행.\n a[j] = a[j - 1]; // 작으면 데이터 오른쪽으로 계속 밀기.\n a[j] = tmp; // 삽입 완료. 저장해둔 tmp를 적절한 위치에 도달했을때 a[j]에 대입.\n }\n}\n\n'''\n","repo_name":"kyj91032/mainalgorithm","sub_path":"insertion sort.py","file_name":"insertion sort.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17764525373","text":"from http import HTTPStatus\nfrom uuid import uuid4\n\nimport pytest\nfrom pydantic import AnyHttpUrl\nfrom tests.functional.settings import test_settings\nfrom tests.functional.test_data import PERSONS_DATA\nfrom tests.functional.utils import PersonTest\n\npytestmark = pytest.mark.asyncio\n\nPERSONS_URL: AnyHttpUrl = test_settings.service_dsn + '/api/v1/persons'\n\nPERSONS_SHORT: list[dict] = [PersonTest(**row).dict() for row in PERSONS_DATA['persons']]\n\n\nPERSONS_SORTED_BY_FULL_NAME_ASC: list[dict] = sorted(PERSONS_SHORT, key=lambda x: x['full_name'])\nPERSONS_SORTED_BY_FULL_NAME_DESC: list[dict] = sorted(PERSONS_SHORT, key=lambda x: x['full_name'], reverse=True)\n\n\n@pytest.mark.parametrize(\n 'query_data, expected_answer',\n [\n (\n {'person_id': PERSONS_DATA['persons'][0]['id']},\n {'status': HTTPStatus.OK, 'body': PERSONS_DATA['persons'][0]},\n ),\n ({'person_id': str(uuid4())}, {'status': HTTPStatus.NOT_FOUND, 'body': {'detail': 'not found'}}),\n (\n {'person_id': 'non-existent'},\n {\n 'status': HTTPStatus.UNPROCESSABLE_ENTITY,\n 'body': {\n 'detail': [\n {'loc': ['path', 'person_id'], 'msg': 'value is not a valid uuid', 'type': 'type_error.uuid'}\n ]\n },\n },\n ),\n ],\n)\nasync def test_person_details(make_get_request, redis_clean_data, query_data, expected_answer):\n \"\"\"Test the person details API endpoint.\"\"\"\n\n body, status = await make_get_request(\n url='{}/<{}:UUID>/'.format(PERSONS_URL, query_data['person_id']), query_data=query_data\n )\n\n assert status == expected_answer['status']\n assert body == expected_answer['body']\n\n\n@pytest.mark.parametrize(\n 'query_data, expected_answer',\n [\n (\n {},\n {'status': HTTPStatus.OK, 'body': PERSONS_SORTED_BY_FULL_NAME_ASC[:50], 'length': 50},\n ),\n ({'sort': 'full_name'}, {'status': HTTPStatus.OK, 'body': PERSONS_SORTED_BY_FULL_NAME_ASC[:50], 'length': 50}),\n (\n {'sort': '-full_name'},\n {'status': HTTPStatus.OK, 'body': PERSONS_SORTED_BY_FULL_NAME_DESC[:50], 'length': 50},\n ),\n ({'sort': 'non-existent'}, {'status': HTTPStatus.UNPROCESSABLE_ENTITY, 'body': [], 'length': 0}),\n ({'page': 5}, {'status': HTTPStatus.OK, 'body': [], 'length': 0}),\n (\n {'page': 1, 'page_size': 2},\n {'status': HTTPStatus.OK, 'body': PERSONS_SORTED_BY_FULL_NAME_ASC[:2], 'length': 2},\n ),\n ],\n)\nasync def test_persons_main(make_get_request, redis_clean_data, query_data, expected_answer):\n \"\"\"Test the main persons API endpoint.\"\"\"\n\n body, status = await make_get_request(url=PERSONS_URL, query_data=query_data)\n\n assert status == expected_answer['status']\n assert body.get('persons', []) == expected_answer['body']\n assert len(body.get('persons', [])) <= expected_answer['length']\n\n\n@pytest.mark.parametrize(\n 'query_data, expected_answer',\n [\n (\n {'query': PERSONS_DATA['persons'][0]['full_name'], 'page_size': 1},\n {'status': HTTPStatus.OK, 'body': [PersonTest(**PERSONS_DATA['persons'][0]).dict()], 'length': 1},\n ),\n ({'query': 'non-existent'}, {'status': HTTPStatus.NOT_FOUND, 'body': [], 'length': 0}),\n ({'sort': 'full_name'}, {'status': HTTPStatus.OK, 'body': PERSONS_SORTED_BY_FULL_NAME_ASC[:50], 'length': 50}),\n (\n {'sort': '-full_name'},\n {'status': HTTPStatus.OK, 'body': PERSONS_SORTED_BY_FULL_NAME_DESC[:50], 'length': 50},\n ),\n ({'sort': 'non-existent'}, {'status': HTTPStatus.UNPROCESSABLE_ENTITY, 'body': [], 'length': 0}),\n ({'page': 5}, {'status': HTTPStatus.OK, 'body': [], 'length': 0}),\n (\n {'page': 1, 'page_size': 2},\n {'status': HTTPStatus.OK, 'body': PERSONS_SORTED_BY_FULL_NAME_ASC[:2], 'length': 2},\n ),\n ],\n)\nasync def test_persons_search(make_get_request, redis_clean_data, query_data, expected_answer):\n \"\"\"Test the persons search API endpoint.\"\"\"\n\n body, status = await make_get_request(url=f'{PERSONS_URL}/search', query_data=query_data)\n\n assert status == expected_answer['status']\n assert body.get('persons', []) == expected_answer['body']\n assert len(body.get('persons', [])) <= expected_answer['length']\n","repo_name":"TRUEVORO/Async_API_sprint_2","sub_path":"fastapi-solution/tests/functional/src/api/test_persons.py","file_name":"test_persons.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6850151827","text":"from datetime import datetime, date, time, timedelta\nfrom gmail import GMail, Message\nimport myfitnesspal\nimport math\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nuname = ''\npw = ''\ntoday = date.today()\n\nclient = myfitnesspal.Client(uname, pw)\n\ntodays_mfp = client.get_date(today).totals\ngoals = client.get_date(today).goals\n\nemail_str = f\"Ivis stats for {today}\\n\"\nemail_str += f\"Cals {todays_mfp['calories']}/{goals['calories']}\\n\"\nemail_str += f\"Carbs {todays_mfp['carbohydrates']}/{goals['carbohydrates']}\\n\"\nemail_str += f\"Protein {todays_mfp['protein']}/{goals['protein']}\\n\"\nemail_str += f\"Fat {todays_mfp['fat']}/{goals['fat']}\\n\"\nprint(email_str)\n\nstart_day = date(2019,2,19)\nnumDays = (today - start_day).days + 1\ndate_list = [today - timedelta(days=x) for x in range(0, numDays)]\ndate_list.reverse()\ncals = []\ncolors = []\nfor day in date_list:\n\tstats = client.get_date(day.year, day.month, day.day)\n\t# want to plot the calories from day to day\n\tif 'calories' in stats.totals:\n\t\tdiff = stats.totals['calories'] - goals['calories']\n\t\tif diff > 0:\n\t\t\tcolors.append('r')\n\t\telse:\n\t\t\tcolors.append('g')\n\t\tcals.append(diff)\n\telse:\n\t\tcals.append(0)\n\nxlabels = [f'{date}' for date in date_list]\nindex = range(0, numDays)\ns = pd.Series(cals, index=xlabels)\nplt.xlabel('Date')\nplt.ylabel('Calories')\nplt.xticks(index, xlabels, rotation=30)\nplt.title(\"Calories over Time since 2/19/19\")\ns.plot(kind='bar', color=colors)\nplt.savefig('calories.png')\n\ngmail = GMail('Me ', '')\nmsg = Message('MFP Stats', to='', text=email_str, attachments=['calories.png'])\ngmail.send(msg)\n","repo_name":"tjohnsen8/mfp-data","sub_path":"mfp_slim.py","file_name":"mfp_slim.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16530514947","text":"#!/usr/bin/env python\nimport os\nimport sys\n\nenvironment = os.environ.get('ENVIRONMENT')\nif environment == 'production':\n config = 'django_test_demo.settings.prod'\nelif environment == 'development':\n config = 'django_test_demo.settings.dev'\nelif environment == 'staging':\n config = 'django_test_demo.settings.staging'\nelse:\n config = 'django_test_demo.settings.common'\n\n\ndef main():\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', config)\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n raise ImportError(\n \"Couldn't import Django. Are you sure it's installed and \"\n \"available on your PYTHONPATH environment variable? Did you \"\n \"forget to activate a virtual environment?\"\n ) from exc\n execute_from_command_line(sys.argv)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Jane-Terziev/django-template","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"25038979394","text":"class MenuItem:\n def __init__(self, name, water, coffee, cost, milk=0):\n self.name = name\n self.cost = cost\n self.ingredients = {\n \"water\": water,\n \"coffee\": coffee,\n \"milk\": milk,\n }\n\n def __str__(self):\n return self.name\n\n\n\n","repo_name":"SibindiK/python-useful","sub_path":"_04_day1_100/day16/menu_item.py","file_name":"menu_item.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71907519944","text":"from typing import Any\n\n\nclass Array:\n def __init__(self):\n self.max_size = 1\n self.values = [None for _ in range(self.max_size)]\n self.size = 0\n self.sorted = False\n\n def __len__(self):\n return self.size\n\n def sort(self):\n self.values.sort()\n self.sorted = True\n\n def insert(self, index: int, item: Any):\n if index < 0 or index > self.size:\n raise IndexError()\n if self.size == self.max_size:\n self.max_size = self.max_size * 2\n new_values = [None for _ in range(self.max_size)]\n for i, value in enumerate(self.values):\n new_values[i] = value\n self.values = new_values\n del new_values\n if index == self.size or self.is_empty():\n self.values[index] = item\n else:\n for i, value in enumerate(self.values):\n self.values[i+1] = value\n self.values[index] = item\n self.size += 1\n return self.values\n\n def is_empty(self):\n return self.size == 0\n\n def __contains__(self, item):\n for i in self.values:\n if i == item:\n return True\n return False\n\n def __setitem__(self, index, value): # real signature unknown\n if index < 0 or index > self.size:\n raise IndexError()\n self.insert(item=value, index=self.size)\n\n def __getitem__(self, index):\n if index < 0 or index > self.size:\n raise IndexError()\n return self.values[index]\n\n def index(self, item):\n for i, value in enumerate(self.values):\n if item == value:\n return i\n return -1\n\n def append(self, item):\n self[self.size] = item\n\n def show(self):\n return self.values[:self.size]\n\n def remove(self, index):\n if index < 0 or index > self.size:\n raise IndexError()\n if index < self.size - 1:\n for i in range(index, self.size-1):\n self.values[i] = self.values[i+1]\n self.values[self.size-1] = None\n self.size -= 1\n\n def clear(self):\n \"\"\" Remove all items from list. \"\"\"\n for i, value in enumerate(self.values):\n if value is not None:\n self.values[i] = None\n\n def copy(self):\n \"\"\" Return a shallow copy of the list. \"\"\"\n new_values = [i for i in self.values]\n return new_values\n\n def count(self, item): # real signature unknown\n \"\"\" Return number of occurrences of value. \"\"\"\n count = 0\n for value in self.values:\n if value == item:\n count += 1\n return count\n\n def extend(self, *args, **kwargs): # real signature unknown\n \"\"\" Extend list by appending elements from the iterable. \"\"\"\n pass\n\n\n def pop(self, index): # real signature unknown\n \"\"\"\n Remove and return item at index (default last).\n\n Raises IndexError if list is empty or index is out of range.\n \"\"\"\n pass\n\n def reverse(self, *args, **kwargs): # real signature unknown\n \"\"\" Reverse *IN PLACE*. \"\"\"\n pass\n\n\nif __name__ == '__main__':\n a = list()\n lst = Array()\n lst.append(1)\n assert lst[0] == 1\n lst.append(3)\n assert lst[1] == 3\n lst.append(5)\n assert lst[2] == 5\n assert lst.show() == [1, 3, 5]\n lst.remove(1)\n assert lst.show() == [1, 5]\n lst.remove(1)\n assert lst.show() == [1]\n lst.remove(0)\n assert lst.show() == []\n","repo_name":"confar/python_challenges","sub_path":"py_list.py","file_name":"py_list.py","file_ext":"py","file_size_in_byte":3534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16452940639","text":"class MediaType:\n AUDIO = \"audio\"\n IMAGE = \"image\"\n ARCHIVE = \"archive\"\n PHOTO = \"photograph\"\n MAP = \"map\"\n TRAVEL = \"travel\"\n VIDEO = \"video\"\n SHEET = \"sheet\"\n OTHER = \"other\"\n MANUSCRIPT = \"manuscript\"\n EBOOK = \"ebook\"\n\n choices = (\n (OTHER, \"אחר\"),\n (AUDIO, \"שיר\"),\n (IMAGE, \"תמונה\"),\n (ARCHIVE, \"פריט ארכיון\"),\n (PHOTO, \"צילום\"),\n (MAP, \"מפה\"),\n (TRAVEL, \"יומן מסע\"),\n (VIDEO, \"קטע וידאו\"),\n (SHEET, \"גליון\"),\n (MANUSCRIPT, \"כתב יד\"),\n (EBOOK, \"ספר אלקטרוני\"),\n )\n\n\n all = {x[0] for x in choices}\n","repo_name":"buzush/MapApp","sub_path":"librarian/consts.py","file_name":"consts.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34303662274","text":"import sys\nimport nltk\nimport nltk.data\nimport nltk.tree\nfrom stat_parser import Parser\nimport re\n\n\nparser = Parser()\n\n\ndef getNodes(parent):\n for node in parent:\n if type(node) is nltk.Tree:\n if not getNodes(node):\n if node.label() == \"VP\":\n # we want to remove some sentences describing\n # environment\n sentence = \" \".join(node.leaves()).lower()\n commands = re.split(\n r';|\\,|\\.|\\>|\\band\\b|\\bor\\b|\\bthen\\b', sentence)\n done = False\n for command in commands:\n if re.match(r'^[a-zA-Z0-9\\;\\,\\.\\-\\*\\:\\'\\\"\\/\\s]{1,80}$',\n command) \\\n and re.search(r'[a-zA-Z]', command):\n tokens = nltk.word_tokenize(command)\n if len(tokens) > 0 and len(tokens) <= 5:\n tagged = nltk.pos_tag(tokens)\n if tagged[0][1] not in \\\n [\"VBZ\", \"VBN\", \"VBD\", \"VBP\", \"VBG\",\n \"MD\", \"NNS\", \"DT\", \"JJ\"]:\n print(command)\n done = True\n return done\n else:\n return True\n return False\n\n\ngameText = sys.argv[1]\ntokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\nwith open(gameText, \"r\") as f:\n for line in f.readlines():\n splitted = line.strip().split(\" \")\n #print(splitted)\n try:\n if '0' <= splitted[0][0] <= '9' and splitted[0][-1] == ':':\n line = \" \".join(splitted[1:])\n except:\n pass\n tokenized = tokenizer.tokenize(line.strip())\n for token in tokenized:\n #print \"T\", token.strip()\n token = token.strip().split()\n for i in range(len(token)-5):\n fragment = \" \".join(token[i:i+5])\n try:\n #print \"F\", fragment\n tree = parser.parse(fragment.strip())\n getNodes(tree)\n except:\n pass\n","repo_name":"Kostero/text_rpg_ai","sub_path":"commands/parser/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"73080113225","text":"#!/usr/bin/env python\n\nimport os\nimport tweepy\n\nconsumer_key = os.environ.get('TWITTER_API_KEY')\nconsumer_secret = os.environ.get('TWITTER_API_SECRET')\naccess_token = os.environ.get('TWITTER_ACCESS_TOKEN')\naccess_token_secret = os.environ.get('TWITTER_ACCESS_TOKEN_SECRET')\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\n\napi = tweepy.API(auth)\n\nDWTS_COUPLES = [\n ['candacecbure', 'MarkBallas'],\n ['Meryl_Davis', 'MaksimC'],\n ['CharlieaWhite', 'SharnaBurgess'],\n ['jamesmaslow', 'PetaMurgatroyd'],\n ['AmyPurdyGurl', 'derekhough']\n]\n\ndef follower_count(screen_name):\n user = api.get_user(screen_name)\n return user.followers_count\n\nfor couple in DWTS_COUPLES:\n couple_followers = 0\n for person in couple:\n followers = follower_count(person)\n couple_followers += followers\n print(\"{0} has {1} followers\".format(person, followers))\n print(\"Couple {0} has {1} followers\".format(couple, couple_followers))\n print(\"\")\n","repo_name":"audreyfeldroy/dwts-analysis","sub_path":"dwts_followers.py","file_name":"dwts_followers.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"20341911211","text":"# 227. Basic Calculator II\n# Medium\n#\n# Implement a basic calculator to evaluate a simple expression string.\n#\n# The expression string contains only non-negative integers, +, -, *, / operators and empty spaces . The integer division should truncate toward zero.\n#\n# Example 1:\n#\n# Input: \"3+2*2\"\n# Output: 7\n# Example 2:\n#\n# Input: \" 3/2 \"\n# Output: 1\n# Example 3:\n#\n# Input: \" 3+5 / 2 \"\n# Output: 5\n\nclass Solution:\n\n def extend(self, char, index, string):\n p = index + 1\n\n while p < len(string) and string[p].isnumeric():\n char += string[p]\n p += 1\n\n index = p - 1\n\n return index, char\n\n def calculate(self, s: str) -> int:\n s = s.replace(\" \", \"\")\n stack = []\n operators = \"\"\n i = 0\n while i < len(s):\n ch = s[i]\n if ch.isnumeric():\n i, ch = self.extend(ch, i, s)\n stack.append(int(ch))\n elif ch in ['+', '-']:\n operators += ch\n elif ch == \"*\":\n i += 1\n next_ch = s[i]\n i, next_ch = self.extend(next_ch, i, s)\n stack.append(stack.pop() * int(next_ch))\n elif ch == \"/\":\n i += 1\n next_ch = s[i]\n i, next_ch = self.extend(next_ch, i, s)\n stack.append(stack.pop() // int(next_ch))\n\n i += 1\n\n if not operators:\n return stack.pop()\n else:\n stack.reverse()\n i = 0\n while i < len(operators):\n ch = operators[i]\n if ch == \"+\":\n stack.append(stack.pop() + stack.pop())\n else:\n stack.append(stack.pop() - stack.pop())\n i += 1\n return stack.pop()\n\n\ns = Solution()\n\n\ntests = [[\"3+2*2\", 7], [\" 3/2 \", 1], [\" 3+5 / 2 \", 5], [\"42\", 42], [\"1*2-3/4+5*6-7*8+9/10\", -24]]\n\nfor i in range (len(tests)):\n ans = tests[i][1]\n value = s.calculate(tests[i][0])\n print(\"result \", ans, value)\n print (ans == value)\n","repo_name":"addherbs/LeetCode","sub_path":"Medium/227. Basic Calculator II.py","file_name":"227. Basic Calculator II.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1504828618","text":"'''DESAFIO 17:\nFaça um programa que leia o comprimento do cateto oposto e do cateto adjacente de um triângulo retângulo\ne calcule sua hipotenusa.'''\n\nfrom math import hypot\n\nco = float(input('Qual o valor do cateto oposto: ')) # recebe o cateto oposto\nca = float(input('Qual o valor do cateto adjacente: ')) # recebe o cateto adjacente\nhipot = hypot(co, ca) # calcula a hipotenusa\n\n# resultado\nprint(f'A hipotenusa desse triângulo retângulo é {hipot:.2f}.')\n","repo_name":"AlexJr22/Exercicios-Python","sub_path":"Exercicios/Módulo 1/EX017/EX017.py","file_name":"EX017.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"72807874825","text":"import sys\n\ndiary = {'0': '1110111',\n '1': '0010010',\n '2': '1011101',\n '3': '1011011',\n '4': '0111010',\n '5': '1101011',\n '6': '1101111',\n '7': '1110010',\n '8': '1111111',\n '9': '1111011',\n ' ': '0000000'}\n\n\ndef solution(T: int, arr: list):\n answer = []\n for A, B in arr:\n cnt = 0\n A = str(A)\n B = str(B)\n A = ' ' * (5 - len(A)) + A\n B = ' ' * (5 - len(B)) + B\n\n for i in range(5):\n diary_A = diary[A[i]]\n diary_B = diary[B[i]]\n for j in range(7):\n if diary_A[j] != diary_B[j]:\n cnt += 1\n\n answer.append(cnt)\n return answer\n\n\ndef main():\n T = int(sys.stdin.readline())\n arr = []\n for i in range(T):\n arr.append(list(map(int, sys.stdin.readline().split())))\n\n answer = solution(T, arr)\n for element in answer:\n print(element)\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"fineman999/Algorithm","sub_path":"Softeer/Level2/electronic_display.py","file_name":"electronic_display.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17395304046","text":"import bisect, collections, copy, functools, heapq, itertools, math, random, string\nfrom operator import xor\nfrom typing import List, Optional, Tuple\nfrom heapq import heappushpop, heapreplace\n\nfrom sortedcontainers import SortedList, SortedDict, SortedSet\n\nfrom test_tool import ListNode, TreeNode, null, tree2array, parseInput\n\n\"\"\"\nSome hint\n\n10**6 = 1000000\n1 << 20 = 1048576\n10**9 = 1000000000\n1 << 30 = 1073741824\n\"\"\"\n\n\n# https://leetcode.cn/contest/weekly-contest-326/problems/count-the-digits-that-divide-a-number/\nclass Solution:\n def solve(self, num: int) -> int:\n cnt = collections.defaultdict(int)\n x = num\n while x:\n cnt[x % 10] += 1\n x //= 10\n ans = 0\n for k, v in cnt.items():\n if num % k == 0:\n ans += v\n return ans\n\n\n# print(Solution().solve())\n# return\n\n\ntestcase = \"\"\"\nnum = 7\nnum = 121\nnum = 1248\n\"\"\"\n\nobj = Solution()\nfor i, args in enumerate(parseInput(testcase)):\n print(f\"\\nTestcase {i}: {args}\\n\")\n print(obj.solve(*args))\n","repo_name":"bcvi/leetcode","sub_path":"lc_Python/contest/T4.py","file_name":"T4.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"10757750651","text":"from collections import deque\n\n\n# Perform BFS/DFS on graph `g` starting from vertex `v`\n# Input parameters: Graph \"g\"\n# Source vertex \"src\"\n# Destination vertex \"dest\"\n# Number of hops allowed \"m\"\n# Output : return number of paths from source to destination\ndef search(g, src, dest, m):\n # (Your code here)\n paths = []\n\n def dfs(curr, path):\n if curr == dest and len(path) == m:\n paths.append([x for x in path])\n return\n if curr == dest:\n return\n for c in graph[curr]:\n if c not in path:\n path.append(c)\n dfs(c, path)\n path.pop()\n return\n\n dfs(src, [src])\n for path in paths:\n print(path)\n return len(paths)\n\n\nif __name__ == '__main__':\n edges = [\n (0, 1),\n (0, 6),\n (0, 7),\n (1, 2),\n (1, 5),\n (1, 6),\n (2, 3),\n (3, 4),\n (4, 11),\n (5, 4),\n (6, 5),\n (6, 10),\n (6, 9),\n (7, 6),\n (7, 9),\n (7, 8),\n (8, 6),\n (8, 9),\n (9, 10),\n (10, 5),\n (10, 4),\n (10, 11),\n ]\n graph = {}\n for a, b in edges:\n if a in graph:\n graph[a].append(b)\n else:\n graph[a] = [b]\n print(search(graph, 0, 11, m=5))\n # To Do : Insert List of graph edges \n # To Do : Construct graph \n # To Do : Search traversal from the source vertex src\n","repo_name":"abhinand5ai/Fluency","sub_path":"AI/HW3/starter_code_Q1.py","file_name":"starter_code_Q1.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20244565490","text":"import inspect\nfrom typing import Callable, Iterable\n\nimport pytest\n\nfrom closure_optimizer.utils import (\n NameGenerator,\n ast_parameters,\n get_function_ast,\n is_inlinable,\n is_unrollable,\n map_parameters,\n rename,\n)\nfrom tests.utils import compare_ast, compare_func_body\n\n\ndef func_params(func: Callable) -> Iterable[inspect.Parameter]:\n return inspect.signature(func).parameters.values()\n\n\n@pytest.mark.parametrize(\n \"args, kwargs, partial, expected\",\n [\n ((0, 1), {\"d\": 2}, False, {\"__a\": 0, \"b\": 1, \"c\": (), \"d\": 2, \"e\": {}}),\n ((0, 1), {\"d\": 2}, True, {\"__a\": 0, \"b\": 1, \"d\": 2}),\n (\n (0, 1, 2, 3),\n {\"d\": 4, \"e\": 5, \"f\": 6},\n False,\n {\"__a\": 0, \"b\": 1, \"c\": (2, 3), \"d\": 4, \"e\": {\"e\": 5, \"f\": 6}},\n ),\n ((0, 1, 2, 3), {\"d\": 4, \"e\": 5, \"f\": 6}, True, {\"__a\": 0, \"b\": 1, \"d\": 4}),\n ((), {}, True, {}),\n ],\n)\ndef test_map_parameters(args, kwargs, partial, expected):\n def func(__a, b, *c, d, **e):\n ...\n\n assert map_parameters(func_params(func), args, kwargs, partial=partial) == expected\n\n\n@pytest.mark.parametrize(\n \"args, kwargs, a, b\", [((), {}, 0, 1), ((2,), {}, 2, 1), ((), {\"b\": 2}, 0, 2)]\n)\ndef test_map_parameters_default(args, kwargs, a, b):\n def func(a=0, b=1):\n ...\n\n assert map_parameters(func_params(func), args, kwargs) == dict(a=a, b=b)\n\n\ndef test_ast_parameters():\n def f(a, b=0, *c, d, e=0, **f):\n ...\n\n def g(a, b=0, *, d, e=0):\n ...\n\n for func in (f, g):\n assert [\n p.replace(default=0) if p.default is not inspect.Parameter.empty else p\n for p in ast_parameters(get_function_ast(func))\n ] == list(inspect.signature(func).parameters.values())\n\n\ndef func1(a, b):\n c = 0\n return a + c\n\n\ndef func2(a, b):\n _closure_optimizer_1 = 0\n return a + _closure_optimizer_1\n\n\ndef func3(a, b):\n d = 0\n return a + d\n\n\ndef func4(a, b):\n ...\n\n\n@pytest.mark.parametrize(\n \"other_func, expected\", [(func2, True), (func3, False), (func4, False)]\n)\ndef test_compare_func(other_func, expected, monkeypatch):\n assert compare_func_body(func1, other_func) == expected\n\n\ndef test_is_pure_for_loop():\n def for_loops():\n for _ in ():\n ...\n for _ in ():\n break\n for _ in ():\n continue\n for _ in ():\n ...\n else:\n ...\n\n pure, with_break, with_continue, with_else = get_function_ast(for_loops).body\n assert is_unrollable(pure)\n assert not is_unrollable(with_break)\n assert not is_unrollable(with_continue)\n assert not is_unrollable(with_else)\n\n\ndef test_inlinable():\n def f():\n pass\n\n assert is_inlinable(get_function_ast(f))\n\n def f():\n global test_inlinable\n\n assert not is_inlinable(get_function_ast(f))\n\n def f():\n nonlocal f\n\n assert not is_inlinable(get_function_ast(f))\n\n def f():\n for _ in ...:\n return\n\n assert not is_inlinable(get_function_ast(f))\n\n def f():\n while ...:\n return\n\n assert not is_inlinable(get_function_ast(f))\n\n def f():\n with ...:\n return\n\n assert not is_inlinable(get_function_ast(f))\n\n def f():\n if ...:\n return\n\n assert not is_inlinable(get_function_ast(f))\n\n def f():\n if ...:\n ...\n else:\n return\n\n assert not is_inlinable(get_function_ast(f))\n\n def f():\n if ...:\n ...\n else:\n ...\n if ...:\n return\n else:\n return\n\n assert is_inlinable(get_function_ast(f))\n\n\ndef test_rename():\n captured = ...\n\n def f(a):\n b = abs(a)\n\n def closure(*arg):\n return arg\n\n return closure(a, b, captured)\n\n _6 = ...\n\n def _1(_2):\n _3 = abs(_2)\n\n def _4(*_5):\n return _5\n\n return _4(_2, _3, _6)\n\n renamed = get_function_ast(f)\n assert rename(renamed, NameGenerator(\"_\").replace) == {\n \"f\": \"_1\",\n \"a\": \"_2\",\n \"b\": \"_3\",\n \"closure\": \"_4\",\n \"arg\": \"_5\",\n \"captured\": \"_6\",\n }\n assert compare_ast(renamed, get_function_ast(_1))\n\n def _1(_2):\n _3 = abs(_2)\n\n def _4(*_5):\n return _5\n\n return _4(_2, _3, captured)\n\n renamed = get_function_ast(f)\n assert rename(renamed, NameGenerator(\"_\").replace, only_declared=True) == {\n \"f\": \"_1\",\n \"a\": \"_2\",\n \"b\": \"_3\",\n \"closure\": \"_4\",\n \"arg\": \"_5\",\n }\n assert compare_ast(renamed, get_function_ast(_1))\n\n\ndef test_normalization():\n def f(a, b, c, d, e):\n if a:\n return\n else:\n a = ...\n if b:\n ...\n elif c:\n return\n for _ in d:\n if e:\n continue\n e = ...\n\n def g(a, b, c, d, e):\n if a:\n return\n else:\n a = ...\n if b:\n ...\n elif c:\n return\n for _ in d:\n if e:\n pass\n else:\n e = ...\n\n assert compare_func_body(f, g)\n","repo_name":"wyfo/closure-optimizer","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":5230,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37711865378","text":"# 判断是否为环形链表\nfrom typing import List, Optional\n\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\n# # 使用set集合\n# class Solution:\n# def hasCycle(self, head: ListNode) -> bool:\n# record = set()\n# while head:\n# if head in record:\n# return True\n# else:\n# record.add(head)\n# head = head.next\n# return False\n\n#\n# # 定义两个指针,快指针,慢指针\n# class Solution:\n# def hasCycle(self, head: ListNode) -> bool:\n# slow, fast = head, head\n# while fast and fast.next:\n# slow, fast = slow.next, fast.next.next\n# if slow is fast:\n# return True\n# return False\n\nclass Solution:\n # def hasCycle(self, head: ListNode) -> bool:\n # slow, fast = head, head\n # while fast and fast.next:\n # slow, fast = slow.next, fast.next.next\n # if slow is fast:\n # return True\n # return False\n\n def hasCycle(self, head: Optional[ListNode]) -> bool:\n slow = fast = head\n while fast and fast.next:\n slow, fast = slow.next, fast.next.next\n if slow is fast:\n return True\n return False\n\n\n\ndef generate_link(nums: List[int]) -> ListNode:\n head = ListNode(0)\n cur = head\n for num in nums:\n cur.next = ListNode(num)\n cur = cur.next\n return head.next\n\n\nx = generate_link([1, 2, 2, 1])\ns = Solution()\nprint(s.hasCycle(x))\n","repo_name":"BruceHi/leetcode","sub_path":"LinkedList/6hasCycle.py","file_name":"6hasCycle.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"6447873089","text":"#!/usr/bin/env python\n\n\"\"\"challenge4.py: Response to Challenge 4 of Rackspace API Challenge\"\"\"\n\"\"\"redirect stderr to eliminate status messages\"\"\"\n\n__author__ = \"david.wilde@rackspace.com\"\n\nimport pyrax\nimport sys\nimport json\nimport argparse\nimport os\n\npyrax.set_credential_file(\"/home/cdw/.rackspace_cloud_credentials\")\ndns = pyrax.cloud_dns\nsys.stdout.flush()\nsys.stderr.flush()\n\nparser = argparse.ArgumentParser(description='Challenge 4: Create a DNS A record')\nparser.add_argument('fqdn', help='Fully qualified domain name of A record to create')\nparser.add_argument('ip', help='IP address to point the A record at')\nargs = parser.parse_args()\n\n# we need to figure out where our fqdn goes\nfqdn_list = args.fqdn.split('.')\nfqdn_tld = []\ndomain_id = []\n\n# we loop through the domains and split them into reverse order lists\n# then we compare the current tld to the one specified in the arguments\n# and determine the existing domain (if any) to add the fqdn to\nfor domain in dns.list():\n domain_name = domain.name.split('.')\n for i in range(len(fqdn_list) - 1):\n try:\n if domain_name[::-1][i] == fqdn_list[::-1][i]:\n if len(fqdn_tld) <= i:\n fqdn_tld.append(domain_name[::-1][i])\n domain_id = domain.id\n except IndexError:\n sys.stderr.write(\"Index out of range, you probably want to create a sub-domain first.\\n\")\n quit()\n\n#print \"TLD Length: %d FQDN Length: %d Diff: %d\" %(len(fqdn_tld), len(fqdn_list), (len(fqdn_list) - len(fqdn_tld)))\nif len(fqdn_tld) <= 1:\n try:\n raise IndexError\n except:\n sys.stderr.write(\"Index out of range, it doesn't look like you have registered that domain.\\n\")\n quit()\n\ndom = dns.get(domain_id)\n \n# build our A record\nrecord = [{\n \"type\": \"A\",\n \"name\": args.fqdn,\n \"data\": args.ip,\n \"ttl\": 6000,\n \"comment\": \"challenge-4\"\n }]\n \nnew_rec = dom.add_records(record)\n\nsys.stdout.write(json.dumps({new_rec}))\nsys.stdout.flush()","repo_name":"d34dh0r53/APIChallenge","sub_path":"challenge4.py","file_name":"challenge4.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39961832577","text":"from model.users import Users\nfrom model.group import Group\nimport random\n\n\ndef test_add_user_to_group(app, db, orm):\n app.open_home_page()\n new_user = Users(users_name=\"специальный юзер\", users_lastname=\"Никалаев\", home_phone=\"2345678\",\n mobile_phone=\"89655783498\", work_phone=\"812-567-90-89\", phone2=\"812-789-56-37\")\n new_group = Group(group_name=\"test-test\")\n if len(db.get_user_list()) == 0:\n app.user.add_new_user(new_user)\n if len(db.get_group_list()) == 0:\n app.group.init_group(new_group)\n groups = db.get_group_list()\n if orm.all_users_in_groups(groups):\n app.group.init_group(new_group)\n for x in groups:\n users = orm.get_users_not_in_group(x)\n user = random.choice(users)\n app.user.add_to_group(user.id, x.id)\n user_in_group = orm.get_users_in_group(x)\n assert user in user_in_group\n break\n","repo_name":"annadenils/python_training","sub_path":"test/test_add_user_to_group.py","file_name":"test_add_user_to_group.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26385948560","text":"'''\nRepetição\nwhile (enquanto)\nExecuta uma ação enquanto um condição for verdadeira\nLoop infinito -> Quando um código não tem fim\n'''\n\ncondição = True\n\n# while condição:\n# print(1)\n# print(2)\n# print(3)\n# break\n\nwhile condição:\n nome = input('Qual o seu nome? ')\n print(f'Seu nome é {nome.capitalize()}')\n\n","repo_name":"JaymersonFerreira/Curso_Python_Otavio_Miranda","sub_path":"seção_3/aula34.py","file_name":"aula34.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23060699767","text":"import logging\nimport os.path\nimport random\nfrom collections import OrderedDict\nfrom typing import List, Iterable, Tuple, BinaryIO, Union\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport skimage\nimport skimage.color\nimport skimage.feature\nimport skimage.io\nimport skimage.transform\nimport sklearn.neighbors\nimport torch\nimport torch.nn.functional as F\nfrom numpy.typing import ArrayLike\nfrom torch import nn\nfrom torchvision.models import resnet18, wide_resnet50_2\n# device setup\nfrom typing.io import IO\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device('cuda' if use_cuda else 'cpu')\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\nclass MatchMeMaker(nn.Module):\n arch = 'resnet18'\n t_d = 448\n d = 100\n max_feature_dist = 0.85\n\n def __init__(self):\n super().__init__()\n self.feature_extractor = self.init_feature_extractor()\n self.feature_extractor.to(device)\n self.feature_extractor.eval()\n\n random.seed(1024)\n torch.manual_seed(1024)\n if use_cuda:\n torch.cuda.manual_seed_all(1024)\n\n self.dim_idxs = torch.tensor(random.sample(range(0, self.t_d), self.d))\n\n # set model's intermediate outputs\n self.outputs = []\n\n def hook(module, input, output):\n self.outputs.append(output)\n\n self.feature_extractor.layer1[-1].register_forward_hook(hook)\n self.feature_extractor.layer2[-1].register_forward_hook(hook)\n self.feature_extractor.layer3[-1].register_forward_hook(hook)\n\n self.images = []\n self.lengths = []\n self.borders = np.array(tuple(), dtype=int)\n self.embeddings = []\n self.keypoints = []\n self.kd_tree = None\n\n def save(self, file: Union[str, os.PathLike, BinaryIO, IO[bytes]]):\n state = {\n 'feature_extractor': self.feature_extractor.state_dict(),\n 'images': self.images,\n 'lengths': self.lengths,\n 'borders': self.borders,\n 'embeddings': self.embeddings,\n 'keypoints': self.keypoints,\n 'kd_tree': self.kd_tree,\n }\n torch.save(state, file)\n\n def load_state_dict(self, state_dict: 'OrderedDict[str, torch.Tensor]',\n strict: bool = True):\n self.feature_extractor.load_state_dict(state_dict['feature_extractor'])\n self.images = state_dict['images']\n self.lengths = state_dict['lengths']\n self.borders = state_dict['borders']\n self.embeddings = state_dict['embeddings']\n self.keypoints = state_dict['keypoints']\n self.kd_tree = state_dict['kd_tree']\n\n def rebuild_kd_tree(self):\n self.borders = np.cumsum(self.lengths)\n self.kd_tree = sklearn.neighbors.KDTree(np.concatenate(self.embeddings))\n\n def init_feature_extractor(self):\n if self.arch == 'resnet18':\n return resnet18(pretrained=True, progress=True)\n elif self.arch == 'wide_resnet50_2':\n return wide_resnet50_2(pretrained=True, progress=True)\n\n @staticmethod\n def embedding_concat(x, y):\n B, C1, H1, W1 = x.size()\n _, C2, H2, W2 = y.size()\n s = int(H1 / H2)\n x = F.unfold(x, kernel_size=s, dilation=1, stride=s)\n x = x.view(B, C1, -1, H2, W2)\n z = torch.zeros(B, C1 + C2, x.size(2), H2, W2)\n for i in range(x.size(2)):\n z[:, :, i, :, :] = torch.cat((x[:, :, i, :, :], y), 1)\n z = z.view(B, -1, H2 * W2)\n z = F.fold(z, kernel_size=s, output_size=(H1, W1), stride=s)\n\n return z\n\n def extract_all_features(self, images: np.array) -> np.array:\n \"\"\"\n Extracts the feature vector from all images.\n\n :param images: the images as (B, H, W, C) numpy array\n :return: the vectors as (B/2*H/2, d) array\n \"\"\"\n outputs = OrderedDict([('layer1', []), ('layer2', []), ('layer3', [])])\n # forward pass\n image = torch.as_tensor(images).permute((0, 3, 1, 2)).to(dtype=torch.float)\n self.outputs = []\n with torch.no_grad():\n _ = self.feature_extractor(image.to(next(self.feature_extractor.parameters()).device))\n # get intermediate layer outputs\n for k, v in zip(outputs.keys(), self.outputs):\n outputs[k].append(v.cpu().detach())\n\n # Merge features into feature vector\n train_outputs_concat = OrderedDict()\n for k, v in outputs.items():\n train_outputs_concat[k] = torch.cat(v, 0)\n embedding_vectors = train_outputs_concat['layer1']\n for layer_name in ['layer2', 'layer3']:\n embedding_vectors = self.embedding_concat(embedding_vectors, train_outputs_concat[layer_name])\n\n # randomly select d dimension\n embedding_vectors = torch.index_select(embedding_vectors, 1, self.dim_idxs)\n return embedding_vectors.numpy()\n\n def get_interesting_embeddings(self, images: ArrayLike) -> Tuple[List[np.array], List[np.array]]:\n \"\"\"\n Gets the embeddings at interest points for each image in the input.\n\n :param images: the images as (B, H, W, C) numpy array\n :return: a tuple of\n list of embeddings with shape (N_i, d) for i = 1..B\n list of keypoints with shape (N_i, 2) for i = 1..B\n \"\"\"\n images = np.asarray(images)\n assert len(images.shape) == 4\n keypoints = []\n embeddings = []\n for image in images:\n logger.debug('Finding harris corners.')\n harris_response = skimage.feature.corner_harris(skimage.color.rgb2gray(image), method='k', k=0.05,\n eps=1e-06, sigma=1)\n interest_points = skimage.feature.corner_peaks(harris_response, threshold_rel=0.001, min_distance=5)\n logger.debug('Extracting all embeddings.')\n all_embeddings = self.extract_all_features([image])\n interesting_embeddings = all_embeddings[:, :, interest_points[:, 0] // 4, interest_points[:, 1] // 4]\n # Get features at interest points\n interesting_embeddings = interesting_embeddings.transpose((0, 2, 1))[0]\n keypoints.append(interest_points)\n embeddings.append(interesting_embeddings)\n return keypoints, embeddings\n\n def add_images_to_db(self, images: Iterable[np.array]):\n for i, image in enumerate(images):\n self.images.append(image)\n logger.info(\"Getting interest points of {i}th image.\")\n keypoints, embeddings = self.get_interesting_embeddings([image])\n self.embeddings += embeddings\n self.keypoints += keypoints\n self.lengths.append([len(im_embeddings) for im_embeddings in embeddings])\n logger.info(\"Rebuilding KD Tree\")\n self.rebuild_kd_tree()\n\n def count_matches_per_image(self, indices):\n return np.unique(np.digitize(indices, self.borders), return_counts=True)\n\n def query_images(self, image: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Queries the database to find the best image overlap.\n Returns keypoints and match indices.\n\n :param image: the image as (H, W, C) numpy array, for which a match should be searched\n :return: tuple of:\n matched image as array of shape (H, W, C)\n keypoints in queried image as array of shape (N_q, 2)\n keypoints in queried image as array of shape (N_p, 2)\n match indices as uint-array of shape (M, 2)\n \"\"\"\n logger.info(\"Extracting features from test image.\")\n query_keypoints, query_features = self.get_interesting_embeddings([image])\n query_keypoints, query_features = query_keypoints[0], query_features[0]\n logger.info(\"Querying KD-Tree.\")\n dists, indices = self.kd_tree.query(query_features, k=1, return_distance=True)\n dists, indices = dists.squeeze(), indices.squeeze()\n logger.info(\"Searching best image match.\")\n image_idx, counts = self.count_matches_per_image(indices)\n image_idx = counts.argmax()\n # Select indices from matched images\n lower = 0 if image_idx == 0 else self.borders[image_idx - 1]\n upper = self.borders[image_idx]\n # Only take keypoints of matched image\n matches_mask = np.logical_and(lower <= indices, indices < upper)\n query_indices = np.argwhere(matches_mask).squeeze()\n match_indices = indices[matches_mask] - self.borders[image_idx]\n dists = dists[matches_mask]\n plt.hist(dists, bins=100)\n plt.show()\n # Filter out matches with high distance\n dists_mask = dists < self.max_feature_dist\n query_indices = query_indices[dists_mask]\n match_indices = match_indices[dists_mask]\n matches12 = np.stack([query_indices, match_indices], axis=1)\n return self.images[image_idx], query_keypoints, self.keypoints[image_idx], matches12\n\n\ndef make_divideable(image, div_factor=16):\n H, W = image.shape[:2]\n W = (W // div_factor) * div_factor\n H = (H // div_factor) * div_factor\n return skimage.transform.resize(image, output_shape=(H, W))\n\n\ndef prepare_image(path):\n image = skimage.io.imread(path)\n image = skimage.transform.rescale(image, [0.25, 0.25, 1.])\n image = make_divideable(image)\n return image\n\n\ndef train() -> MatchMeMaker:\n train_path = os.path.expanduser('~/data/photogrammetry/Uttenhofen Bach_inputs/DJI_0054.JPG')\n\n use_saved = False\n save_path = './model.pkl'\n if use_saved:\n mmm = MatchMeMaker()\n mmm.load_state_dict(torch.load(save_path))\n else:\n mmm = MatchMeMaker()\n mmm.add_images_to_db([prepare_image(train_path)])\n mmm.save(save_path)\n return mmm\n\n\ndef main():\n test_path = os.path.expanduser('~/data/photogrammetry/Uttenhofen Bach_inputs/DJI_0058.JPG')\n mmm = train()\n\n test_image = prepare_image(test_path)\n match_image, keypoints1, keypoints2, matches12 = mmm.query_images(test_image)\n matched_keypoints_1 = keypoints1[matches12[:, 0]]\n matched_keypoints_2 = keypoints2[matches12[:, 1]]\n num_matches = matched_keypoints_1.shape[0]\n aligned_matches = np.asarray([np.arange(num_matches), np.arange(num_matches)]).T\n #aligned_matches = np.array([])\n aligned_matches = aligned_matches[:20]\n\n plt.imshow(test_image)\n plt.show()\n plt.imshow(match_image)\n plt.show()\n\n fig, axes = plt.subplots()\n # skimage.feature.plot_matches(axes, test_image, match_image, keypoints1, keypoints2, matches12)\n skimage.feature.plot_matches(axes, test_image, match_image, matched_keypoints_1, matched_keypoints_2, aligned_matches)\n fig.show()\n print(\"finish\")\n\n\nif __name__ == '__main__':\n main()","repo_name":"jfehre/MatchMe","sub_path":"utils/cnn_matching.py","file_name":"cnn_matching.py","file_ext":"py","file_size_in_byte":10785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24420924994","text":"import model \nimport csv\n\ndef load_users(session):\n # use u.user\n f1 = csv.reader(open(\"./seed_data/u.user\"), delimiter=\"|\")\n\n for row in f1:\n\n user = model.User()\n user.id = int(row[0])\n user.age = int(row[1])\n user.gender = row[2]\n user.occupation = row[3]\n user.zipcode = row[4]\n\n\n session.add(user)\n\n session.commit()\n\ndef load_movies(session):\n # use u.item\n\n f2 = csv.reader(open(\"./seed_data/u.item\"), delimiter=\"|\") \n # movie id | movie title | release date |\n\n for row in f2:\n\n movie = model.Movie()\n movie.id = int(row[0])\n movie_title = row[1]\n movie.movie_title = movie_title.decode('latin-1')\n movie.release_date = row[2]\n\n session.add(movie) \n\n session.commit()\n\n\ndef load_ratings(session):\n # use u.data\n # user id | item id | rating | timestamp. \n f3 = csv.reader(open(\"./seed_data/u.data\"), delimiter=\"\\t\")\n\n for row in f3:\n\n rating = model.Rating()\n rating.user_id = int(row[0])\n rating.movie_id = int(row[1])\n rating.rating = int(row[2])\n rating.timestamp = int(row[3])\n\n session.add(rating) \n \n session.commit()\n\n\ndef main():\n # You'll call each of the load_* functions with the session as an argument\n \n load_users(session)\n load_movies(session)\n load_ratings(session)\n\n\nif __name__ == \"__main__\":\n session = model.connect()\n main()\n \n\n \n\n\n","repo_name":"NoraLou/Movie_Rating_App","sub_path":"seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12368264497","text":"import conductor.common.prometheus_metrics as PC\nfrom conductor.i18n import _LE, _LI\n# Conductor imports\nfrom conductor.solver.optimizer.constraints import constraint\n# Third-party library imports\nfrom oslo_log import log\n\nLOG = log.getLogger(__name__)\n\n\nclass HPA(constraint.Constraint):\n def __init__(self, _name, _type, _demand_list, _priority=0,\n _properties=None):\n constraint.Constraint.__init__(\n self, _name, _type, _demand_list, _priority)\n self.properties = _properties\n\n def solve(self, _decision_path, _candidate_list, _request):\n '''\n Solver for HPA constraint type.\n :param _decision_path: decision tree\n :param _candidate_list: List of candidates\n :param _request: solver request\n :return: candidate_list with hpa features and flavor label mapping\n '''\n # call conductor engine with request parameters\n cei = _request.cei\n demand_name = _decision_path.current_demand.name\n LOG.info(_LI(\"Solving constraint type '{}' for demand - [{}]\").format(\n self.constraint_type, demand_name))\n vm_label_list = self.properties.get('evaluate')\n for vm_demand in vm_label_list:\n id = vm_demand['id']\n type = vm_demand['type']\n directives = vm_demand['directives']\n flavorProperties = vm_demand['flavorProperties']\n response = (cei.get_candidates_with_hpa(id,\n type,\n directives,\n _candidate_list,\n flavorProperties))\n _candidate_list = response\n if not response:\n LOG.error(_LE(\"No matching candidates for HPA exists\").format(\n id))\n\n # Metrics to Prometheus\n PC.HPA_CLOUD_REGION_UNSUCCESSFUL.labels('ONAP', 'N/A',\n 'ALL').inc()\n break\n # No need to continue.\n\n return _candidate_list\n","repo_name":"onap/optf-has","sub_path":"conductor/conductor/solver/optimizer/constraints/hpa.py","file_name":"hpa.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"29266925196","text":"# The isBadVersion API is already defined for you.\n# @param version, an integer\n# @return an integer\ndef isBadVersion(version):\n if version > 3:\n return True\n else:\n return False\n\n\nclass Solution:\n def firstBadVersion(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n\n def binary_search(start, end):\n if end >= 1:\n # mid = start + (end - 1) // 2\n mid = (start + end) // 2\n if isBadVersion(mid):\n if not isBadVersion(mid - 1):\n return mid\n else:\n return binary_search(start, mid - 1)\n else:\n return binary_search(mid + 1, end)\n else:\n return -1\n\n return binary_search(1, n)\n\n\nif __name__ == '__main__':\n n = 4\n\n s = Solution()\n print(s.firstBadVersion(n))\n","repo_name":"Zer0xPoint/LeetCode","sub_path":"Top Interview Questions/5.Sorting and Searching/First Bad Version.py","file_name":"First Bad Version.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36983916500","text":"print(\"\"\"\n****************************************\n\nFaktoriyel Hesaplama Aracı\n\n(Çıkmak için Q'ya basın.)\n\n****************************************\"\"\")\n\n\n\nwhile True:\n sayi = input(\"Lütfen Faktoriyeli hesaplanacak sayıyı girin: \")\n\n if(sayi == \"q\"):\n print(\"Program Sonlanıyor...\")\n\n else:\n sayi = int(sayi)\n\n faktoriyel = 1\n\n for i in range(2,sayi+1):\n print(\"Faktoriyel\",faktoriyel,\"i'nin değeri: \",i)\n faktoriyel *= i\n\n print(\"Faktoriyel Değeri: \",faktoriyel)\n","repo_name":"EmrahGK/python-eski","sub_path":"Kodlama Egzersizleri/Bölüm_5/faktoriyel.py","file_name":"faktoriyel.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10935839498","text":"import requests\nfrom requests_toolbelt.adapters import host_header_ssl\nimport sys\nimport collections\nimport os\nimport errno\nimport uuid\n\nfrom stix_shifter_utils.utils import logger\n\n# This is a simple HTTP client that can be used to access the REST API\n\n\nclass RestApiClient:\n # cert_verify can be\n # True -- do proper signed cert check that is in trust store,\n # False -- skip all cert checks,\n # or The String content of your self signed cert required for TLS communication\n def __init__(self, host, port=None, headers={}, url_modifier_function=None, cert_verify=True, sni=None):\n self.logger = logger.set_logger(__name__)\n unique_file_handle = uuid.uuid4()\n self.server_cert_name = \"/tmp/{0}-server_cert.pem\".format(unique_file_handle)\n server_ip = host\n if port is not None:\n server_ip += \":\" + str(port)\n self.server_ip = server_ip\n # sni is none unless we are using a server cert\n self.sni = None\n\n if isinstance(cert_verify, bool):\n # verify certificate non self signed case\n if cert_verify:\n self.server_cert_content = True\n self.server_cert_file_content_exists = False\n # ignore certificates all together\n else:\n self.server_cert_content = False\n self.server_cert_file_content_exists = False\n # self signed cert provided\n elif isinstance(cert_verify, str):\n self.server_cert_content = self.server_cert_name\n self.server_cert_file_content_exists = True\n self.server_cert_file_content = cert_verify\n if sni is not None:\n self.sni = sni\n\n self.headers = headers\n self.url_modifier_function = url_modifier_function\n\n # This method is used to set up an HTTP request and send it to the server\n def call_api(self, endpoint, method, headers=None, params=[], data=None, urldata=None, timeout=None):\n try:\n # covnert server cert to file\n if self.server_cert_file_content_exists is True:\n with open(self.server_cert_name, 'w') as f:\n try:\n f.write(self.server_cert_file_content)\n except IOError:\n self.logger.error('Failed to setup certificate')\n\n url = None\n actual_headers = self.headers.copy()\n if headers is not None:\n for header_key in headers:\n actual_headers[header_key] = headers[header_key]\n\n if self.url_modifier_function is not None:\n url = self.url_modifier_function(\n self.server_ip, endpoint, actual_headers)\n else:\n url = 'https://' + self.server_ip + '/' + endpoint\n try:\n call = getattr(requests, method.lower())\n\n # only use the tool belt session in case of SNI for safety\n if self.sni is not None:\n session = requests.Session()\n call = getattr(session, method.lower())\n session.mount('https://', host_header_ssl.HostHeaderSSLAdapter())\n actual_headers[\"Host\"] = self.sni\n\n response = call(url, headers=actual_headers, params=urldata, data=data, verify=self.server_cert_content,\n timeout=timeout)\n\n if 'headers' in dir(response) and isinstance(response.headers, collections.Mapping) and \\\n 'Content-Type' in response.headers and \"Deprecated\" in response.headers['Content-Type']:\n self.logger.error(\"WARNING: \" +\n response.headers['Content-Type'], file=sys.stderr)\n return ResponseWrapper(response)\n except Exception as e:\n self.logger.error('exception occured during requesting url: ' + str(e))\n raise e\n finally:\n if self.server_cert_file_content_exists is True:\n try:\n os.remove(self.server_cert_name)\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise\n\n # Simple getters that can be used to inspect the state of this client.\n def get_headers(self):\n return self.headers.copy()\n\n def get_server_ip(self):\n return self.server_ip\n\n\nclass ResponseWrapper:\n def __init__(self, response):\n self.response = response\n\n def read(self):\n return self.response.content\n\n @property\n def bytes(self):\n return self.response.content\n\n @property\n def code(self):\n return self.response.status_code\n","repo_name":"daniel-herrera-ibm/stix-shifter","sub_path":"stix_shifter_utils/stix_transmission/utils/RestApiClient.py","file_name":"RestApiClient.py","file_ext":"py","file_size_in_byte":4751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"39336810988","text":"import modelnet40\n\n#\"\"\"\n#HW1 Part II\n#\"\"\"\n#\nimport os, os.path\nimport pprint\nimport glob\nimport random\nfrom keras.applications.resnet50 import preprocess_input, decode_predictions\nfrom keras.preprocessing import image\nimport numpy as np\nimport numpy\n#\"\"\"\n#from keras.applications.resnet50 import ResNet50\n#from keras.preprocessing import image\n#from keras.applications.resnet50 import preprocess_input, decode_predictions\n#\"\"\"\nfrom keras import applications\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras import optimizers\nfrom keras.models import Sequential\nfrom keras.layers import Dropout, Flatten, Dense,Input,concatenate,Maximum,Conv2D\nfrom keras.models import Model\nfrom keras import models\nfrom keras.layers.normalization import BatchNormalization\ndef rotateImage(im,row_axis, col_axis, channel_axis,fill_mode, cval,RangeRotation=True):\n\tif(RangeRotation==True):\n\t\tim=image.random_rotation(im,45, row_axis, col_axis, channel_axis,fill_mode, cval)\n\telse:\n\t\tangle= random.choice([-45,0,+45])\n#\t\tprint(\"angle is :\",angle)\n\t\ttheta = np.pi / 180 * angle\n\t\trotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],[np.sin(theta), np.cos(theta), 0],[0, 0, 1]])\n\t\th, w = im.shape[row_axis], im.shape[col_axis]\n\t\ttransform_matrix = image.transform_matrix_offset_center(rotation_matrix, h, w)\n\t\tim = image.apply_transform(im, transform_matrix, channel_axis, fill_mode, cval)\n\treturn im\n\ndef batchGenerator( SingleGenerator, batchSize, dataSetSize,single=True,augment=True,RangeRotation=True):\n\tdef nestedGen():\n\t\twhile True:\n\t\t\tfor i in range(batchSize):\n\t\t\t\t(a,b)=SingleGenerator.__next__() \n#\t\t\t\tprint(\"len a is\",len(a))\n#\t\t\t\tprint(a[0])\n\t\t\t\trow_axis=0\n\t\t\t\tcol_axis=1\n\t\t\t\tchannel_axis=2\n\t\t\t\tfill_mode='nearest'\n\t\t\t\tcval=0.\n\t\t\t\tif(augment==True and single==True):\n\t\t\t\t\ta[0]=rotateImage(a[0],row_axis, col_axis, channel_axis,fill_mode, cval,RangeRotation)\n\t\t\t\tif(augment==True and single==False):\n\t\t\t\t\tfor j in range(len(a)):\n\t\t\t\t\t#random horizontal flip along columns\n\t\t\t\t\t\ta[j][0]=rotateImage(a[j][0],row_axis, col_axis, channel_axis,fill_mode, cval,RangeRotation)\n\t\t\t\tif(augment==True and single==True):\n\t\t\t\t\trnd=np.random.uniform(0,1)\n\t\t\t\t\tif(rnd<0.5):\n\t\t\t\t\t\ta=image.flip_axis(a, 2)\n\t\t\t\tif(augment==True and single==False):\n\t\t\t\t\tfor j in range(len(a)):\n\t\t\t\t\t\trnd=np.random.uniform(0,1)\n\t\t\t\t\t\tif(rnd<0.5):\n\t\t\t\t\t\t\ta[j]=image.flip_axis(a[j], 2)\n\t\t\t\tif(i==0):\n\t\t\t\t\t(array_a,array_b)=(a,b)\n\t\t\t\telse:\n\t#\t\t\t\tprint(\"hi\")\n\t\t\t\t\tif(single==True):\n\t\t\t\t\t\tarray_a=np.append(array_a,a,axis=0)\n\t\t\t\t\t\tarray_b=np.append(array_b,b,axis=0)\n\t\t\t\t\telse:\n#\t\t\t\t\t\tprint(len(a))\n\t\t\t\t\t\tfor j in range(len(a)):\n\t\t\t\t\t\t\tarray_a[j]=np.append(array_a[j],a[j],axis=0)\n\t\t\t\t\t\tarray_b=np.append(array_b,b,axis=0)\n#\t\t\t\t\tprint(a.shape, b.shape)\n\t#\t\t\t\t\t\tprint(array_a[j].shape)\n\t\t\tyield (array_a,array_b)\n\treturn (nestedGen(),dataSetSize )\n","repo_name":"Marialean1985/3DHw1","sub_path":"batchGenerator.py","file_name":"batchGenerator.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7035815454","text":"import json\nimport logging\nimport os\nimport sys\n\nimport github\n\nlogging.basicConfig(level=logging.INFO)\nlogging.info(\"Running TFsum\")\n\ntry:\n # format: myname/something\n repo_name = os.environ[\"GITHUB_REPOSITORY\"]\n github_token = os.environ[\"INPUT_GITHUB_TOKEN\"]\n # this should be mounted once the action is running, it contains the root of the checkout\n workspace = os.environ[\"GITHUB_WORKSPACE\"]\n # format: refs/pull/1/merge\n ref = os.environ[\"GITHUB_REF\"]\n event = os.environ[\"GITHUB_EVENT_NAME\"]\n skip_noop = bool(os.environ.get(\"INPUT_SKIP_NOOP\", True))\nexcept Exception as e:\n logging.error(f\"Failed to load key from environment: {e}\")\n sys.exit()\n\nif event != \"pull_request\":\n logging.info(f\"Not responding to event of type {event}\")\n sys.exit()\n\nplan_file = workspace + \"/\" + os.environ.get(\"INPUT_PLAN_FILE\", \"tf.plan.json\")\n\ntry:\n with open(plan_file) as jf:\n plan = json.load(jf)\nexcept Exception as e:\n logging.error(f\"Failed to open {plan_file}: {e}\")\n sys.exit()\n\nactions = {}\n\nlogging.info(f\"Parsing plan in {plan_file}\")\nfor r in plan[\"resource_changes\"]:\n c = r[\"change\"]\n for action in c[\"actions\"]:\n if skip_noop and (action == \"no-op\"):\n continue\n if action in actions:\n actions[action] += 1\n else:\n actions[action] = 1\n\nactions_performed = list(map(lambda l: f\"{l}: {actions[l]}\", actions))\nsummary = \", \".join(actions_performed)\n\nlogging.info(f\"Actions found: {summary}\")\n\ntry:\n # ref format: refs/pull//merge\n logging.info(f\"Extracting PR number from {ref}\")\n pr_number = int(ref.split(\"/\")[2])\nexcept Exception as e:\n logging.error(f\"Failed: {e}\")\n sys.exit()\n\nlogging.info(f\"Connecting to GitHub for PR{pr_number} in {repo_name}\")\ntry:\n gh = github.Github(github_token)\n repo = gh.get_repo(repo_name)\n pr = repo.get_pull(pr_number)\n logging.info(\"Creating comment\")\n pr.create_issue_comment(f\":construction: Terraform plan summary: `{summary}`\")\n\nexcept Exception as e:\n logging.error(f\"Failed to connect: {e}\")\n sys.exit()\n\nlogging.info(\"Done\")\n","repo_name":"harmw/tfsum-action","sub_path":"src/tfsum.py","file_name":"tfsum.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2307347076","text":"from json import dumps, loads\nfrom os import path, remove\nfrom unittest import TestCase\nfrom src.three_way_diff.levenshtein import get_operations\nfrom src.three_way_diff.three_way_diff import (_compute_accuracy,\n _find_element, _format_input,\n _parse_operations,\n _three_way_diff_compute,\n three_way_diff_JSON)\nfrom test_class import TestClass\n\n\nclass ThreeWayDiffTest(TestCase):\n TEST_RESULTS_LOCATION = ''\n TEST_BASE_STRING_CAPS = TestClass.TEST_STRING_BASE.upper()\n TEST_OPERATION_LIST = [('replace', index, index)\n for index in range(TestClass.TEST_BASE_LEN)]\n TEST_OPERATION_LIST_HALF = TEST_OPERATION_LIST[:TestClass.TEST_BASE_LEN // 2]\n TEST_OPERATION_LIST_EXTRA = TEST_OPERATION_LIST[:TestClass.TEST_BASE_LEN // 2] + [\n ('replace', 3, 3),\n ('replace', 4, 4)\n ]\n TEST_OPERATION_LIST_DOUBLE = TEST_OPERATION_LIST[:-2] + [\n ('replace', 4, 4),\n ('replace', 5, 5)\n ]\n TEST_OPERATION_LIST_NOTHING = [\n ('nothing', 2, 2),\n ('nothing', 3, 3)\n ]\n TEST_OPERATION_LIST_EXTRA_NOTHING = TEST_OPERATION_LIST_HALF + \\\n TEST_OPERATION_LIST_NOTHING\n TEST_REPLACE_GROUP = TestClass.TEST_LIST_BASE[:-2] + [\n 'double', 'false']\n TEST_REPEATED = ['test' if element ==\n 'string' else element for element in TestClass.TEST_LIST_BASE]\n TEST_REPLACE_REPEATED = ['false' if element ==\n 'test' else element for element in TestClass.TEST_LIST_REPLACE]\n TEST_REPEATED_MULTIPLE = TEST_REPEATED + ['list', 'test']\n TEST_REPLACE_REPEATED_MULTIPLE = TEST_REPLACE_REPEATED + \\\n ['list', 'false']\n\n def tearDown(self):\n if self.TEST_RESULTS_LOCATION:\n remove(self.TEST_RESULTS_LOCATION)\n\n def test__format_input_returns_expected(self):\n lowered = _format_input(self.TEST_BASE_STRING_CAPS,\n self.TEST_BASE_STRING_CAPS)\n expected = (TestClass.TEST_LIST_BASE, TestClass.TEST_LIST_BASE)\n\n self.assertEqual(lowered, expected)\n\n def test_accuracy_returns_expected_without_nothing_operation(self):\n accuracy = _compute_accuracy(\n self.TEST_OPERATION_LIST_HALF, TestClass.TEST_LIST_BASE)\n expected = 0.5\n\n self.assertEqual(expected, accuracy)\n\n def test_accuracy_returns_rounded_up_correctly(self):\n accuracy = _compute_accuracy(\n self.TEST_OPERATION_LIST_EXTRA, TestClass.TEST_LIST_BASE)\n expected = 0.17\n\n self.assertEqual(expected, accuracy)\n\n def test_accuracy_returns_rounded_down_correctly(self):\n accuracy = _compute_accuracy(\n self.TEST_OPERATION_LIST_EXTRA[:4], TestClass.TEST_LIST_BASE)\n expected = 0.33\n self.assertEqual(expected, accuracy)\n\n def test_accuracy_returns_expected_with_nothing_operation(self):\n accuracy = _compute_accuracy(\n self.TEST_OPERATION_LIST_EXTRA_NOTHING, TestClass.TEST_LIST_BASE)\n expected = 0.5\n\n self.assertEqual(expected, accuracy)\n\n def test__find_element_returns_expected_without_nested(self):\n found_index = _find_element(TestClass.TEST_LIST_BASE, 'test')\n expected_index = TestClass.TEST_LIST_BASE.index('test')\n\n self.assertEqual(expected_index, found_index)\n\n def test__find_element_returns_expected_with_nested(self):\n searched = ['tested', 3]\n to_search = TestClass.TEST_LIST_BASE + [['tested', 3]]\n found_index = _find_element(to_search, 'tested')\n expected_index = to_search.index(searched)\n\n self.assertEqual(expected_index, found_index)\n\n def test_parse_returns_expected_with_same(self):\n correct, diff = _parse_operations(TestClass.TEST_LIST_BASE, TestClass.TEST_LIST_BASE,\n get_operations(TestClass.TEST_LIST_BASE, TestClass.TEST_LIST_BASE))\n expected_correct = TestClass.TEST_LIST_BASE\n expected_diff = {}\n\n self.assertEqual(expected_correct, correct)\n self.assertEqual(expected_diff, diff)\n\n def test_parse_returns_expected_with_different_replace(self):\n correct, diff = _parse_operations(TestClass.TEST_LIST_REPLACE, TestClass.TEST_LIST_BASE,\n get_operations(TestClass.TEST_LIST_REPLACE, TestClass.TEST_LIST_BASE))\n expected_correct = TestClass.TEST_LIST_BASE[:-1]\n expected_diff = {'false': 'string'}\n\n self.assertEqual(expected_correct, correct)\n self.assertEqual(expected_diff, diff)\n\n def test_parse_returns_expected_with_different_insert(self):\n correct, diff = _parse_operations(TestClass.TEST_LIST_INSERT, TestClass.TEST_LIST_BASE,\n get_operations(TestClass.TEST_LIST_INSERT, TestClass.TEST_LIST_BASE))\n expected_correct = TestClass.TEST_LIST_BASE[:-1]\n expected_diff = {'': 'string'}\n\n self.assertEqual(expected_correct, correct)\n self.assertEqual(expected_diff, diff)\n\n def test_parse_returns_expected_with_different_delete(self):\n correct, diff = _parse_operations(TestClass.TEST_LIST_DELETE, TestClass.TEST_LIST_BASE,\n get_operations(TestClass.TEST_LIST_DELETE, TestClass.TEST_LIST_BASE))\n expected_correct = TestClass.TEST_LIST_BASE\n expected_diff = {'extra': ''}\n\n self.assertEqual(expected_correct, correct)\n self.assertEqual(expected_diff, diff)\n\n def test_parse_returns_expected_with_different_replace_groups(self):\n correct, diff = _parse_operations(self.TEST_REPLACE_REPEATED, self.TEST_REPEATED,\n get_operations(self.TEST_REPLACE_REPEATED, TestClass.TEST_LIST_BASE))\n expected_correct = [\n element for element in self.TEST_REPLACE_REPEATED if element != 'false']\n expected_diff = {'false': ['test', 2]}\n\n self.assertEqual(expected_correct, correct)\n self.assertEqual(expected_diff, diff)\n\n def test_parse_returns_expected_with_different_replace_groups_multiple(self):\n base = self.TEST_REPEATED_MULTIPLE\n correct, diff = _parse_operations(self.TEST_REPLACE_REPEATED_MULTIPLE, base,\n get_operations(self.TEST_REPLACE_REPEATED_MULTIPLE, base))\n expected_correct = [\n element for element in self.TEST_REPLACE_REPEATED if element != 'false']\n expected_diff = {'false': ['test', 3]}\n\n self.assertEqual(expected_correct, correct)\n self.assertEqual(expected_diff, diff)\n\n def test_parse_returns_expected_with_different_replace_multiple_for_same(self):\n correct, diff = _parse_operations(self.TEST_REPLACE_REPEATED, TestClass.TEST_LIST_BASE,\n get_operations(self.TEST_REPLACE_REPEATED, TestClass.TEST_LIST_BASE))\n expected_correct = [\n element for element in self.TEST_REPLACE_REPEATED if element != 'false']\n expected_diff = {'false': ['test', 'string']}\n\n self.assertEqual(expected_correct, correct)\n self.assertEqual(expected_diff, diff)\n\n def test_parse_returns_expected_with_different_list_replace_multiple_for_same(self):\n base = TestClass.TEST_LIST_BASE + ['list', 'test']\n correct, diff = _parse_operations(self.TEST_REPLACE_REPEATED_MULTIPLE, base,\n get_operations(self.TEST_REPLACE_REPEATED_MULTIPLE, base))\n expected_correct = [\n element for element in base if element not in ['test', 'string']]\n expected_diff = {'false': ['string', ['test', 2]]}\n\n self.assertEqual(set(expected_correct), set(correct))\n self.assertEqual(expected_diff, diff)\n\n def test_parse_returns_expected_with_different_list_replace_multiple_for_same_list_multiple_first(self):\n comparable = ['not', 'is', 'not', 'is', 'not', 'is', 'not']\n base = ['it', 'is', 'it', 'is', 'are', 'is', 'it']\n correct, diff = _parse_operations(comparable, base,\n get_operations(comparable, base))\n expected_correct = ['is']\n expected_diff = {'not': [['it', 3], 'are']}\n\n self.assertEqual(set(expected_correct), set(correct))\n self.assertEqual(expected_diff, diff)\n\n def test_parse_returns_expected_with_different_list_replace_multiple_list(self):\n comparable = ['not', 'is', 'not', 'is', 'not']\n base = ['it', 'is', 'a', 'is', 'test']\n correct, diff = _parse_operations(comparable, base,\n get_operations(comparable, base))\n expected_correct = ['is']\n expected_diff = {'not': ['it', 'a', 'test']}\n\n self.assertEqual(set(expected_correct), set(correct))\n self.assertEqual(expected_diff, diff)\n\n def test_json_computed_correctly_string(self):\n comparable, base = _format_input(\n TestClass.TEST_STRING_REPLACE, TestClass.TEST_STRING_BASE)\n operations = get_operations(comparable,\n base)\n result = _three_way_diff_compute(TestClass.TEST_STRING_REPLACE,\n TestClass.TEST_STRING_REPLACE,\n TestClass.TEST_STRING_BASE)\n\n comparable_result, comparable_diff = _parse_operations(comparable,\n base,\n operations)\n\n assert result['correct']['comparable1'] == comparable_result\n assert result['correct']['comparable2'] == comparable_result\n assert result['diff']['comparable1'] == comparable_diff\n assert result['diff']['comparable2'] == comparable_diff\n\n accuracy = _compute_accuracy(operations, base)\n assert result['accuracy']['comparable1'] == accuracy\n assert result['accuracy']['comparable2'] == accuracy\n\n def test_json_file_created_string(self):\n three_way_diff_JSON(\n 'comparable1', 'comparable2', 'base')\n self.TEST_RESULTS_LOCATION = 'results.json'\n\n assert path.exists(self.TEST_RESULTS_LOCATION)\n","repo_name":"bedia-tv/lippy","sub_path":"tests/three_way_diff/test_three_way_diff.py","file_name":"test_three_way_diff.py","file_ext":"py","file_size_in_byte":10378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30010215061","text":"import os\nimport pytest\n\nfrom xcalar.compute.util.Qa import XcalarQaDatasetPath\nfrom xcalar.external.client import Client\n\nfrom xcalar.external.LegacyApi.XcalarApi import XcalarApi\nfrom xcalar.external.LegacyApi.Dataset import JsonDataset\nfrom xcalar.external.LegacyApi.Operators import Operators\nfrom xcalar.external.result_set import ResultSet\n\nfrom xcalar.compute.coretypes.OrderingEnums.constants import XcalarOrderingT\n\ntestSanitySessionName = \"TestSanity\"\n\n\n@pytest.mark.first\nclass TestSanity(object):\n def setup_class(cls):\n cls.client = Client()\n cls.xcalarApi = XcalarApi()\n cls.session = cls.client.create_session(testSanitySessionName)\n cls.xcalarApi.setSession(cls.session)\n cls.operators = Operators(cls.xcalarApi)\n\n def teardown_class(cls):\n assert cls.session.name == testSanitySessionName\n cls.client.destroy_session(testSanitySessionName)\n cls.xcalarApi.setSession(None)\n cls.session = None\n cls.xcalarApi = None\n pass\n\n def testVersion(self):\n output = self.client.get_version()\n assert output.version\n\n def testFunWithYelp(self):\n targetName = \"Default Shared Root\"\n path = os.path.join(XcalarQaDatasetPath, \"yelp/user\")\n dataset = JsonDataset(self.xcalarApi, targetName, path, \"yelpUsers\")\n dataset.load()\n assert dataset.record_count() > 0\n\n self.operators.indexDataset(\".XcalarDS.yelpUsers\", \"yelpUsersIndex\",\n \"yelping_since\", \"p\")\n self.operators.groupBy(\"yelpUsersIndex\", \"yelpUsersGroupBy\",\n [\"count(p::user_id)\"], [\"countYelpingSince\"])\n self.operators.indexTable(\n \"yelpUsersGroupBy\",\n \"yelpUsersGroupBySortDesc\",\n \"countYelpingSince\",\n ordering=XcalarOrderingT.XcalarOrderingDescending)\n first = next(\n ResultSet(self.client, table_name=\"yelpUsersGroupBySortDesc\", session_name=testSanitySessionName).record_iterator())\n print(first)\n assert first[\"p-yelping_since\"] == \"2011-07\"\n assert first[\"countYelpingSince\"] == 1600\n\n self.operators.aggregate(\"yelpUsersIndex\", \"yelpUsersIndexAgg\",\n \"count(p::user_id)\")\n\n self.operators.dropTable(\"yelpUsers*\")\n self.operators.dropConstants(\"*\")\n dataset.delete()\n","repo_name":"varlogtim/xcalar","sub_path":"src/bin/tests/pyTest/test_sanity.py","file_name":"test_sanity.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"20811329752","text":"import os\nimport time\nimport json\nfrom flask import Flask, render_template, request, redirect, url_for\nfrom flask_socketio import SocketIO, emit\nfrom werkzeug.utils import secure_filename\n\nUPLOAD_FOLDER = 'C:/Users/ACER/OneDrive/Рабочий стол/2-семестр/ввпд/file_upload/uploads'\nALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif', 'py'}\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.config['SECRET_KEY'] = 'jsbcfsbfjefebw237u3gdbdc'\nsocketio = SocketIO(app)\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n\ndef snake_game_module(filename):\n \"\"\"модуль игры змейка\"\"\"\n\n def eat_tail(head, arr):\n \"\"\"Если змейка укусила себя\"\"\"\n for arr_i in arr:\n if (head[\"x\"] == arr_i[\"x\"]) and (head[\"y\"] == arr_i[\"y\"]):\n pass\n # проигрыш\n # clearInterval(game)\n\n # Размер одной клетки в px.\n box = 32\n\n # Счет.\n score = 0\n\n # Объект еды.\n # food = {\n # \"x\": random.randint(1, 15) * box,\n # \"y\": random.randint(1, 15) * box\n # }\n food = {\n \"x\": 10 * box,\n \"y\": 10 * box\n }\n\n # Змейка. Представляет собой список объектов - координат.\n snake = [{}]\n snake[0] = {\n \"x\": 10 * box,\n \"y\": 10 * box\n }\n\n # Направление змейки.\n direction = \"\"\n\n while True:\n\n # Новый ход змейки(Программный код пользователя)\n pm_name = 'uploads.' + filename\n uploads = __import__(pm_name)\n # import uploads.main\n direction = uploads.main.user_algorithm(snake, direction)\n\n # Координаты головы змейки.\n snake_x = snake[0][\"x\"]\n snake_y = snake[0][\"y\"]\n\n # Если змейка съела еду.\n if (snake_x == food[\"x\"]) and (snake_y == food[\"y\"]):\n score += 1\n # food = {\n # \"x\": random.randint(1, 15) * box,\n # \"y\": random.randint(1, 15) * box\n # }\n food = {\n \"x\": 10 * box,\n \"y\": 10 * box\n }\n else:\n snake.pop()\n\n # Если змейка врезалась в стену.\n if (snake_x < box) or (snake_x > box * 17) or (snake_y < 3 * box) or (snake_y > box * 17):\n # проигрыш\n # clearInterval(game)\n pass\n\n # Изменяем координаты головы, т.е. двигаем змейку.\n if direction == \"left\":\n snake_x -= box\n if direction == \"right\":\n snake_x += box\n if direction == \"up\":\n snake_y -= box\n if direction == \"down\":\n snake_y += box\n\n # Новая голова.\n new_head = {\n \"x\": snake_x,\n \"y\": snake_y\n }\n\n # Проверка не укусила ли себя змейка.\n eat_tail(new_head, snake)\n\n # Добавляем новую голову в начало.\n snake.insert(0, new_head)\n\n # Подготавливаем данные для передачи на клиент\n shipping_customer = [snake, [food]]\n\n # Преобразуем обьект в строку\n shipping_customer_text = json.dumps(shipping_customer)\n\n # Отправка значений на клиент\n try:\n socketio.emit('my response', shipping_customer_text, callback=messageRecived)\n # print(snake_text)\n time.sleep(0.1)\n except:\n print(\"!\")\n\n\n@app.route('/upload_file', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n file = request.files['file']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return redirect(url_for('broadcast_game', filename=filename))\n return render_template('upload_file.html')\n\n\n@app.route('/broadcast_game')\ndef broadcast_game():\n \"\"\"ggg\"\"\"\n data = dict(filename=request.args.get('filename'))\n return render_template('broadcast_game.html', data=data)\n\n\n@app.route('/game', methods=['POST', 'GET'])\ndef game():\n \"\"\"Запуск трансляции\"\"\"\n if request.method == \"POST\":\n filename = request.form['filename'][0:-3]\n snake_game_module(filename)\n else:\n return render_template('game.html')\n\n\ndef messageRecived():\n print('message was received!!!')\n\n\n@socketio.on('my event')\ndef handle_my_custom_event(json):\n print('recived my event: ' + str(json))\n socketio.emit('my response', json, callback=messageRecived)\n\n\nif __name__ == '__main__':\n socketio.run(app, debug=True)\n\n","repo_name":"fktif228/websockets_snake","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5022,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26354478340","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport h5py\nimport argparse\n\nfrom tqdm import tqdm\n\n\ndef merge(config):\n dir_name = os.path.join('datasets/', config.dir_name)\n check_path(dir_name)\n\n f = h5py.File(os.path.join(dir_name, 'data.hdf5'), 'w')\n id_file = open(os.path.join(dir_name, 'id.txt'), 'w')\n\n new_dataset_paths = list(set(config.dataset_paths))\n if len(new_dataset_paths) != len(config.dataset_paths):\n raise ValueError('There is overlap in the dataset paths')\n\n num_train, num_test, num_val = 0, 0, 0\n h, w, c = None, None, None\n max_demo_length = 0\n max_program_length = 0\n num_program_tokens = None\n num_demo_per_program = 0\n num_test_demo_per_program = 0\n num_action_tokens = None\n percepts = None\n vizdoom_pos_keys = None\n vizdoom_max_init_pos_len = 0\n perception_type = None\n print('data_info checking')\n for i, dataset_path in enumerate(config.dataset_paths):\n print('dataset [{}/{}]'.format(i, len(config.dataset_paths)))\n fs = h5py.File(os.path.join(dataset_path, 'data.hdf5'), 'r')\n fs_max_demo_length = fs['data_info']['max_demo_length'].value\n fs_max_program_length = fs['data_info']['max_program_length'].value\n fs_num_program_tokens = fs['data_info']['num_program_tokens'].value\n fs_num_demo_per_program = fs['data_info']['num_demo_per_program'].value\n fs_num_test_demo_per_program = fs['data_info']['num_test_demo_per_program'].value\n fs_num_action_tokens = fs['data_info']['num_action_tokens'].value\n fs_num_train = fs['data_info']['num_train'].value\n fs_num_test = fs['data_info']['num_test'].value\n fs_num_val = fs['data_info']['num_val'].value\n fs_h = fs['data_info']['s_h_h'].value\n fs_w = fs['data_info']['s_h_w'].value\n fs_c = fs['data_info']['s_h_c'].value\n fs_percepts = list(fs['data_info']['percepts'].value)\n fs_vizdoom_pos_keys = list(fs['data_info']['vizdoom_pos_keys'].value)\n fs_vizdoom_max_init_pos_len = fs['data_info']['vizdoom_max_init_pos_len'].value\n fs_perception_type = fs['data_info']['perception_type'].value\n\n max_demo_length = max(max_demo_length, fs_max_demo_length)\n max_program_length = max(max_program_length, fs_max_program_length)\n if num_program_tokens is None: num_program_tokens = fs_num_program_tokens\n elif num_program_tokens != fs_num_program_tokens:\n raise ValueError('program token mismatch: {}'.format(dataset_path))\n num_demo_per_program = max(num_demo_per_program, fs_num_demo_per_program)\n num_test_demo_per_program = max(num_test_demo_per_program,\n fs_num_test_demo_per_program)\n if num_action_tokens is None: num_action_tokens = fs_num_action_tokens\n elif num_action_tokens != fs_num_action_tokens:\n raise ValueError('num action token mismatch: {}'.format(dataset_path))\n num_train += fs_num_train\n num_test += fs_num_test\n num_val += fs_num_val\n if h is None: h = fs_h\n elif h != fs_h: raise ValueError('image height mismatch: {}'.format(dataset_path))\n if w is None: w = fs_w\n elif w != fs_w: raise ValueError('image width mismatch: {}'.format(dataset_path))\n if c is None: c = fs_c\n elif c != fs_c: raise ValueError('image channel mismatch: {}'.format(dataset_path))\n if percepts is None: percepts = fs_percepts\n elif percepts != fs_percepts:\n raise ValueError('percepts mismatch: {}'.format(dataset_path))\n if vizdoom_pos_keys is None: vizdoom_pos_keys = fs_vizdoom_pos_keys\n elif vizdoom_pos_keys != fs_vizdoom_pos_keys:\n raise ValueError('vizdoom_pos_keys mismatch: {}'.format(dataset_path))\n vizdoom_max_init_pos_len = max(vizdoom_max_init_pos_len, fs_vizdoom_max_init_pos_len)\n if perception_type is None: perception_type = fs_perception_type\n elif perception_type != fs_perception_type:\n raise ValueError('perception_type mismatch: {}'.format(dataset_path))\n fs.close()\n print('copy data')\n for i, dataset_path in enumerate(config.dataset_paths):\n print('dataset [{}/{}]'.format(i, len(config.dataset_paths)))\n fs = h5py.File(os.path.join(dataset_path, 'data.hdf5'), 'r')\n ids = open(os.path.join(dataset_path, 'id.txt'),\n 'r').read().splitlines()\n for id in tqdm(ids):\n new_id = '{}_{}'.format(i, id)\n\n id_file.write(new_id+'\\n')\n grp = f.create_group(new_id)\n for key in fs[id].keys():\n grp[key] = fs[id][key].value\n fs.close()\n grp = f.create_group('data_info')\n grp['max_demo_length'] = max_demo_length\n grp['max_program_length'] = max_program_length\n grp['num_program_tokens'] = num_program_tokens\n grp['num_demo_per_program'] = num_demo_per_program\n grp['num_test_demo_per_program'] = num_test_demo_per_program\n grp['num_action_tokens'] = num_action_tokens\n grp['num_train'] = num_train\n grp['num_test'] = num_test\n grp['num_val'] = num_val\n grp['s_h_h'] = h\n grp['s_h_w'] = w\n grp['s_h_c'] = c\n grp['percepts'] = percepts\n grp['vizdoom_pos_keys'] = vizdoom_pos_keys\n grp['vizdoom_max_init_pos_len'] = vizdoom_max_init_pos_len\n grp['perception_type'] = perception_type\n f.close()\n id_file.close()\n print('Dataset generated under {} with {}'\n ' samples ({} for training and {} for testing '\n 'and {} for val'.format(dir_name, num_train + num_test + num_val,\n num_train, num_test, num_val))\n\n\ndef check_path(path):\n if not os.path.exists(path):\n os.makedirs(path)\n else:\n raise ValueError('Be careful, you are trying to overwrite some dir')\n\n\ndef get_args():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--dir_name', type=str, default='vizdoom_dataset')\n parser.add_argument('--dataset_paths', nargs='+',\n help='list of existing dataset paths')\n args = parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n args = get_args()\n\n merge(args)\n","repo_name":"shaohua0116/demo2program","sub_path":"vizdoom_env/merge_datasets.py","file_name":"merge_datasets.py","file_ext":"py","file_size_in_byte":6346,"program_lang":"python","lang":"en","doc_type":"code","stars":100,"dataset":"github-code","pt":"81"} +{"seq_id":"13531102993","text":"from io import BytesIO\nimport cv2\nfrom flask import Flask, render_template, request, jsonify\nfrom matplotlib import pyplot as plt\nfrom src.modeling.run_model_single import (\n load_model, load_inputs, process_augment_inputs, batch_to_tensor\n)\nfrom src.optimal_centers.get_optimal_center_single import get_optimal_center_single\nfrom src.cropping.crop_single import crop_single_mammogram\nfrom urllib.request import urlopen\nfrom PIL import Image\nimport numpy as np\nfrom pydicom import dcmread \nimport urllib \n\n# Initializations for the model\nshared_parameters = {\n\"device_type\": \"gpu\",\n\"gpu_number\": 0,\n\"max_crop_noise\": (100, 100),\n\"max_crop_size_noise\": 100,\n\"batch_size\": 1,\n\"seed\": 0,\n\"augmentation\": True,\n\"use_hdf5\": True,\n}\nrandom_number_generator = np.random.RandomState(shared_parameters[\"seed\"])\nimage_only_parameters = shared_parameters.copy()\n\n\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET'])\ndef home():\n return render_template('index.html')\n\n\n@app.route('/mammo/predict/', methods=['POST'])\ndef predict():\n data = request.get_json()\n imageURL = data['imageURL']\n viewData = data['view']\n # imageURL = request.args.get('imageURL')\n # token = request.args.get('token')\n # viewData = request.args.get('view')\n # Initializations for the model\n image_only_parameters[\"view\"] = viewData[0]+'-'+viewData[2:]\n image_only_parameters[\"use_heatmaps\"] = False\n image_only_parameters[\"model_path\"] = \"models/ImageOnly__ModeImage_weights.p\"\n model, device = load_model(image_only_parameters)\n # File Paths\n cropped_img_path = 'sample_single_output/' + 'out_file.png'\n metadata_path = 'sample_single_output/' + 'metadata' + '.pkl'\n # Preprocessing\n crop_single_mammogram(imageURL, \"NO\", viewData, cropped_img_path, metadata_path, 100, 50)\n get_optimal_center_single(cropped_img_path, metadata_path)\n # Load Inputs\n model_input = load_inputs(\n image_path=imageURL,\n metadata_path=metadata_path,\n use_heatmaps=False,\n )\n batch = [\n process_augment_inputs(\n model_input=model_input,\n random_number_generator=random_number_generator,\n parameters=image_only_parameters,\n ),\n ]\n # Classification\n tensor_batch = batch_to_tensor(batch, device)\n y_hat = model(tensor_batch)\n predictions = np.exp(y_hat.cpu().detach().numpy())[:, :2, 1]\n predictions_dict = {\n \"benign\": float(predictions[0][0]),\n \"malignant\": float(predictions[0][1]),\n }\n\n predBen = round(predictions_dict['benign'], 3) * 100\n predMal = round(predictions_dict['malignant'], 3) * 100\n \n result = {\n 'benign': predBen,\n 'malignant': predMal\n }\n\n return jsonify(result)\n\n@app.route('/mammo/dicom-to-png/', methods=['POST'])\ndef dicomToPng():\n data = request.get_json()\n imageURL = data['imageURL']\n image = dcmread(BytesIO(data), force=True)\n # image = (image - image.min()) / (image.max() - image.min()) * 255.0 \n # image = image.astype(np.uint8)\n # cv2.imwrite('hello.png', image)\n return ''\n\n\nif __name__ == '__main__':\n app.run(port=5000, debug=True)","repo_name":"Ahmed-Ragab11/test1_gp","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35457128988","text":"import streamlit as st\n\n\ndef add_wegits(ls_skill, inputs):\n add_selectbox = st.sidebar.selectbox(\"Pick a Custom group to custmize\", ls_skill)\n for line in inputs:\n if line.skill == add_selectbox:\n select_skill = line\n\n if st.sidebar.radio(f\"Remove {add_selectbox} Group\", [\"No\", \"Yes\"]) == \"Yes\":\n inputs = [line for line in inputs if line.skill != add_selectbox]\n\n return select_skill, inputs\n","repo_name":"AaronRoethe/rankingETL","sub_path":"src/frontEnd/sidebar.py","file_name":"sidebar.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71172249865","text":"import argparse\n\nimport matplotlib.pyplot as plt\nimport torch\nfrom torchvision.utils import make_grid\n\nfrom gans.models.builder import build_generator\nfrom gans.pretrained import PRETRAINED_CKPTS, autoload_ckpt\nfrom gans.utils import init_random_seed\n\nif __name__ == \"__main__\":\n torch.set_grad_enabled(False)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--arch\", choices=list(PRETRAINED_CKPTS.keys()), required=True)\n parser.add_argument(\"--batch_size\", type=int, default=8)\n parser.add_argument(\"--truncation_psi\", type=float, default=0.7)\n parser.add_argument(\"--seed\", type=int, default=0)\n parser.add_argument(\"--device\", choices=[\"cuda\", \"cpu\"], default=\"cuda\")\n args = parser.parse_args()\n\n init_random_seed(args.seed)\n\n # load checkpoint\n ckpt = autoload_ckpt(args.arch)\n z_dim = ckpt[\"cfg\"].model.generator.mapping_kwargs.in_ch\n\n # setup model\n G = build_generator(ckpt[\"cfg\"].model.generator)\n G.load_state_dict(ckpt[\"G_ema\"])\n G.eval().to(args.device)\n\n # generate\n z = torch.randn(args.batch_size, z_dim).to(args.device)\n angle = ckpt[\"angle\"].repeat_interleave(args.batch_size, dim=0).to(args.device)\n imgs = G(z=z, angle=angle, truncation_psi=args.truncation_psi)\n\n # visualize\n grid = make_grid(imgs[\"image\"], nrow=2, pad_value=float(\"nan\"))[0].cpu()\n plt.imshow(grid, cmap=\"turbo\", vmin=-1, vmax=1, interpolation=\"none\")\n plt.axis(\"off\")\n plt.show()\n","repo_name":"kazuto1011/dusty-gan-v2","sub_path":"quick_demo.py","file_name":"quick_demo.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"81"} +{"seq_id":"6114509457","text":"# Importando bibliotecas\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n# Abrindo os arquivos com os dados\ndf = pd.read_csv('enso.csv', delimiter=' ',nrows=253)\n\n# Conversao de uma coluna object para string\ndf['MES'] = df['MES'].astype('|S')\n\nx1 = df['MES']\n#print(x1)\n\n# Determinando a coluna de ENSO\ny1 = df['ENSO']\n#print(y1)\n\n# Determinando as colunas dos anos\ny2 = df['YEAR']\n#print(y2)\n\n# Plotando a curva da ENSO\ny1.plot(color='grey')\n\n# Este foi o último comando do código!!!\n# Criar 252 valores no eixo x de 0 a 250\nx1 = np.linspace(0,250,252)\n\n# Preencher as areas positivas acima de 0.5 com a cor vermelha\nplt.fill_between(x1,y1,0.5,where=y1>0.5,color='red',interpolate=True)\n\n# Preencher as areas negativas abaixo de -0.5 com a cor azul\nplt.fill_between(x1,y1,-0.5,where=y1<-0.5,color='blue',interpolate=True)\n\n# Colocando um titulo na figura\nplt.title('Evolução mensal do ENOS')\n\n# Colocando um titulo no eixo x\nplt.xlabel('Anos')\n\n# Colocando um titulo no eixo y\nplt.ylabel('Índice de ENOS')\n\n# Comando para plotar uma linha horizontal no meio da figura\nplt.axhline(0.5, color='black', lw= 0.5, linestyle= '-')\nplt.axhline(0, color='black', lw= 0.5, linestyle= '-')\nplt.axhline(-0.5, color='black', lw= 0.5, linestyle= '-')\n\n# Ajustando o intervalo de valores no eixo y\nplt.ylim([-3.0, 3.0])\n\n# Determinando os valores no eixo y\nplt.yticks([-3.0,-2.0,-1.0,0,1.0,2.0,3.0])\n\n# Ajustando os valores no eixo x\nplt.xticks([0,24,48,72,96,120,144,168,192,216,240],['2000','2002','2004','2006','2008','2010','2012','2014','2016','2018','2020'])\n\n# Ajustando a posição do valor inicial e do valor final no eixo x \nplt.xlim([0,250])\n\nplt.show()\n\n#########################################################################\n\n# RODAR: python enso.py\n\n#########################################################################\n","repo_name":"wilmorales21/Scripts","sub_path":"Python/enso.py","file_name":"enso.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42586508441","text":"from random import choice\n\n\ndef ask_destination(context=None):\n \"\"\"Prompts the user to specify their destination.\n \"\"\"\n options = [\n 'Where are you travelling to?',\n 'Where do you want to go?',\n 'Where to?',\n 'Where are you going?',\n 'Where are you getting off?',\n 'What\\'s your final stop?',\n 'What\\'s your destination?',\n ]\n\n # Check for context\n if context:\n # Departing From\n if context['departing_from']:\n dep_from = context['departing_from'].title()\n options.append('From {} to where?'.format(dep_from))\n options.append('Where are you going from {}?'.format(dep_from))\n\n # Select a random question.\n return choice(options)\n","repo_name":"jessola/AI_CW","sub_path":"questions/ask_destination.py","file_name":"ask_destination.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29885458416","text":"\ndef NoOf1(num):\n n = int(num)\n arr = []\n k = 0\n while n >= 1:\n if len(arr) < k:\n arr.append(0)\n if pow(2, k) >= n:\n if pow(2, k) > n:\n k -= 1\n else:\n arr.append(0)\n x = pow(2, k)\n n -= x\n arr[k] = 1\n k = 0\n k += 1\n print(\"Binary of given no. is:\", arr[::-1])\n\n result = arr.count(1)\n print(\"Number of 1 in\", num, \"is\", result)\n\n\nnum = int(input(\"Enter a no.: \"))\nNoOf1(num)\n","repo_name":"Ashutoshkr007/Spartans","sub_path":"Day1_Q2_No_of_1s.py","file_name":"Day1_Q2_No_of_1s.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38784107448","text":"import requests\nimport json\nimport re\nimport functools as ft\nimport sys\n\nfrom perso.params import *\n\nif len(sys.argv) == 1:\n from perso.params import *\nelif len(sys.argv) == 3:\n KEY = sys.argv[1],\n TOKEN = sys.argv[2],\ndef QUERY(): \n return {\n 'key': KEY,\n 'token': TOKEN,\n }\n\ntodoID = \"5fba9f4bb0753909be8d285a\"\ndoingID = \"5fba9f4e09b8760b0f131125\"\ndone2ID = \"5fba9f4f86a9890a88678fb9\"\npostponedID = \"5fba9f6dbb81f309ccd6e14d\"\nwaitingListID = \"5fbc1e0f1633c9696300d071\"\noldListID = \"5fbc4634ea01c87b03b78af8\"\ndone3ID = \"5fc2324b5c48267177d9b0b4\"\ntoFixID = \"5fc8bd523b78808f9f230247\"\n\nlistIDs = [todoID, doingID, done2ID, done3ID, postponedID, waitingListID, toFixID]\ndoneListsIDs = [done2ID, done3ID, oldListID]\n\naliceID = \"59a8725109a8dd7ff0455f52\"\nlouisID = \"5fbaa828b0cfcd0afbb2d7fb\"\nwilfriedID = \"5f73516ad7177427945901c4\"\n\n# nom , US , description, dépendances , charge, charge estimee , dev , status\n# 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7\n\ndef listToAdd(task):\n switcher = { \n \"TODO\" : todoID,\n \"DOING\" : doingID,\n \"DONE\" : done2ID\n }\n return switcher[task[7]]\n\ndef makeDesc(task):\n res = ( f\"US concernée : {task[1]:s}\\n\"\n f\"Dépendances : {task[3]:s}\\n\"\n f\"Charge estimée : {task[4]:s}\\n\"\n f\"Charge réelle : {task[5]:s}\\n\\n\"\n \"----------\\n\"\n f\"{task[2]:s}\" )\n return res\n\ndef members(task):\n switcher = {\n \"cartoonnerie\": aliceID,\n \"ljolliet\": louisID,\n \"wilfried\": wilfriedID\n }\n return ([switcher[task[6]]] if task[6] else [])\n\ndef labels(task):\n switcher = {\n \"TTES\": \"5fba9f34cdabcf46c0660326\",\n \"TDEV\": \"5fba9f34cdabcf46c0660328\",\n \"TDES\": \"5fba9f34cdabcf46c0660332\"\n }\n typeTask = task[0].split('-')[0]\n return [switcher[typeTask]]\n\ndef getAttachedCardsUrl(card):\n url = f\"https://api.trello.com/1/cards/{(card if isinstance(card, str) else card['id'])}/attachments\"\n headers = {\n \"Accept\": \"application/json\"\n }\n localQuery = QUERY()\n localQuery[\"fields\"] = [\"url\"]\n response = requests.request(\n \"GET\",\n url,\n headers=headers,\n params=localQuery\n )\n if response.status_code != 200:\n print(f\"ERROR {response.status_code} : {response.text}\")\n resMap = map( lambda x : x['url'], json.loads(response.text))\n return list(resMap)\n\ndef urlToCard(link):\n url = re.sub(r'trello.com/c/([a-zA-Z0-9]+)/.*', r'trello.com/1/cards/\\1/', link)\n localQuery = QUERY()\n response = requests.request(\n \"GET\",\n url,\n params=localQuery\n )\n if response.status_code != 200:\n print(f\"ERROR {response.status_code} : {response.text}\")\n return json.loads(response.text)\n\ndef getChecklists(card):\n url = f\"https://api.trello.com/1/cards/{card['id']}/checklists\"\n headers = {\n \"Accept\": \"application/json\"\n }\n localQuery = QUERY()\n response = requests.request(\n \"GET\",\n url,\n headers=headers,\n params=localQuery\n )\n if response.status_code != 200:\n print(f\"ERROR {response.status_code} : {response.text}\")\n def elt2dic(dic, elt):\n dic[elt['name']] = elt['id']\n return dic\n resMap = ft.reduce( elt2dic, json.loads(response.text), {} )\n return resMap\n\ndef getCardsFromListId(listID):\n url = f\"https://api.trello.com/1/lists/{listID}/cards\"\n localQuery = QUERY()\n response = requests.request(\n \"GET\",\n url,\n params=localQuery\n )\n if response.status_code != 200:\n print(f\"ERROR {response.status_code} : {response.text}\")\n return json.loads(response.text)\n\ndef getAllCards():\n return ft.reduce(\n lambda res, listID : res + getCardsFromListId(listID), \n listIDs, []\n )\n\ndef isDone(card):\n return card['idList'] in [done2ID, oldListID, done3ID]\n\ndef isTodo(card):\n return card['idList'] in [todoID, toFixID]\n\ndef isWaiting(card):\n return card['idList'] == waitingListID\n\n\n\n","repo_name":"cartoonnerie/g1-eq5-Release","sub_path":"trelloTools/trelloTools.py","file_name":"trelloTools.py","file_ext":"py","file_size_in_byte":4007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38213216909","text":"G_CONST = 9.81 # acceleration of gravity (m/sec^2)\nRHO_AIR = 1.2 # air density (kg/m^3)\n\n# rotation angle of footprint by wind direction index\nROTATION_BY_WIND_IDX = {0: 90.0, 4: 90.0, # S, N\n 1: 45.0, 5: 45.0, # SW, NE\n 2: 0.0, 6: 0.0, # E, W\n 3: -45.0, 7: -45.0} # SE, NW\n\n# wind direction\nWIND_DIR = ['S', 'SW', 'W', 'NW', 'N', 'NE', 'E', 'SE', 'RANDOM']\n\n# debris types\nDEBRIS_TYPES_KEYS = ['Rod', 'Compact', 'Sheet']\n\n# debris attributes\nDEBRIS_TYPES_ATTS = ['mass', 'frontal_area', 'cdav', 'ratio', 'flight_time']\n\nDEBRIS_TYPES_CLRS = {'Rod': 'r', 'Compact': 'c', 'Sheet': 'g'}\n\nBLDG_SPACING = [20.0, 40.0] # building spacing for debris impact model\n\nFLAGS_PRESSURE = ['cpe_str', 'cpe']\nFLAGS_DIST_DIR = ['row', 'col', 'patch', 'none']\nCOVERAGE_FAILURE_KEYS = ['failure_strength_in', 'failure_strength_out',\n 'failure_momentum']\n\n# CPI table\nDOMINANT_OPENING_RATIO_THRESHOLDS = [0.5, 1.5, 2.5, 6.0]\nCPI_TABLE_FOR_DOMINANT_OPENING = \\\n {0: {'windward': -0.3, 'leeward': -0.3, 'side1': -0.3, 'side2': -0.3},\n 1: {'windward': 0.2, 'leeward': -0.3, 'side1': -0.3, 'side2': -0.3},\n 2: {'windward': 0.7, 'leeward': 1.0, 'side1': 1.0, 'side2': 1.0},\n 3: {'windward': 0.85, 'leeward': 1.0, 'side1': 1.0, 'side2': 1.0},\n 4: {'windward': 1.0, 'leeward': 1.0, 'side1': 1.0, 'side2': 1.0}}\n\n# prob. dist. and shielding categories\nSHIELDING_MULTIPLIERS = {'full': 0.85,\n 'partial': 0.95,\n 'no': 1.0}\nSHIELDING_MULTIPLIERS_KEYS = ['full', 'partial', 'no']\nSHIELDING_MULTIPLIERS_PROB = [0.63, 0.15, 0.22]\n\n# debris flight distance\nFLIGHT_DISTANCE_COEFF = {2: {'Compact': [0.011, 0.2060],\n 'Sheet': [0.3456, 0.072],\n 'Rod': [0.2376, 0.0723]},\n 5: {'Compact': [0.405, -0.036, -0.052, 0.008],\n 'Sheet': [0.456, -0.148, 0.024, -0.0014],\n 'Rod': [0.4005, -0.16, 0.036, -0.0032]}}\n\nFLIGHT_DISTANCE_POWER = {2: [1, 2],\n 5: [2, 3, 4, 5]}\n\nCOSTING_FORMULA_TYPES = [1, 2]\n\nDEBRIS_VULNERABILITY = ['Weibull', 'Lognorm']\n\nVUL_DIC = {'Capital_city': {'alpha': 0.1586, 'beta': 3.8909},\n 'Tropical_town': {'alpha': 0.1030, 'beta': 4.1825}}\n\n","repo_name":"GeoscienceAustralia/vaws","sub_path":"vaws/model/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"2808157738","text":"# Laboratorio No.2, Server.py\n# 29/07/2021\n# Integrantes:\n# - Abril Palencia, 18198\n# - Cristina Bautista, 161260\n\n# Libreria socket\nfrom socket import *\nfrom ruido import Deteccion\n\nr = Deteccion()\n\n# Puerto del servidor\nserverPort = 12000\n# Socket del servidor\nserverSocket = socket(AF_INET, SOCK_DGRAM)\n# Bind address\nserverSocket.bind(('', serverPort))\n# mensaje del servidor\nprint(\"el server ya esta escuchando en el puerto: \" , serverPort)\n\nwhile True:\n mensaje, clientAddress = serverSocket.recvfrom(2048)\n mensajeCodificado = mensaje.decode()\n print(\"Mensaje recibido: \", mensajeCodificado)\n numero_f = str(r.fletcher32(mensajeCodificado, len(mensajeCodificado)))\n print(\"Mensaje enviado:\",numero_f)\n serverSocket.sendto(numero_f.encode(), clientAddress)\n","repo_name":"AbrilPal/Lab2_Redes","sub_path":"Server/Servidor.py","file_name":"Servidor.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4653841971","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def findDuplicateSubtrees(self, root: Optional[TreeNode]) -> List[Optional[TreeNode]]:\n def traverse(node):\n if not node:\n return \"\"\n represantation = (\n \"(\" + traverse(node.left) + \")\" + str(node.val) + \"(\" + traverse(node.right) + \")\"\n )\n counter[represantation] += 1\n if counter[represantation] == 2:\n output.append(node)\n return represantation\n counter = defaultdict(int)\n output = []\n traverse(root)\n return output\n","repo_name":"Xrenya/Algorithms","sub_path":"Leetcode/Python/_652.py","file_name":"_652.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3053499104","text":"# Markhus Dammar\r\n# 22 September 2021\r\n# This program will take user input, append numbers to a list, and then find min and max\r\n\r\nuser_list = []\r\n\r\namt = int(input(\"How many numbers do you want in this list? >>>\")) # asks user how many values will be in list\r\n\r\nfor amt in range(0, amt): # appends list with each number inputted by user\r\n nums = input(f\"Enter integer number {amt} >>>\")\r\n user_list.append(nums)\r\n\r\nprint(user_list) # prints list\r\nprint(\"the minimum is: \" + min(user_list) + \"\\nthe maximum is: \" + max(user_list)) # prints min/max\r\n","repo_name":"dammara/findMinMaxList.py","sub_path":"findMinMaxList.py","file_name":"findMinMaxList.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9227143428","text":"import pyttsx3\nimport random\n\nengine = pyttsx3.init()\nengine.setProperty('rate', 300)\n\ndef get_tts(sentence):\n file_name = \"resources/sounds/\" + str(random.randint(0, 999999)) + \".mp3\"\n engine.save_to_file(sentence, file_name)\n engine.runAndWait()\n return file_name\n","repo_name":"teky1/ortho","sub_path":"speak.py","file_name":"speak.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74406527626","text":"#!/usr/bin/env python3.1\n\nfrom tkinter import *\n\nclass Application(object):\n def __init__(self, parent):\n\n self.parent= parent\n\n self.img=PhotoImage(file='goldenstar.gif')\n self.ims=PhotoImage(file='silverstar.gif')\n self.imq=PhotoImage(file='exit.gif')\n\n self.StarButton=Button(\n self.parent,\n image=self.img,\n command=self.Toggle\n )\n\n self.isGold=True\n self.StarButton.pack()\n \n self.QuitButton=Button(\n self.parent,\n image=self.imq,\n command=self.Quit\n )\n self.QuitButton.pack()\n\n def Toggle(self):\n self.isGold = not self.isGold\n self.StarButton['image']=self.img if self.isGold else self.ims\n \n\n def Quit(self):\n self.parent.destroy()\n \n\ndef main():\n root = Tk()\n myapp = Application(root)\n \n root.mainloop() \n\nif __name__=='__main__':\n main()\n","repo_name":"loristissino/oopython","sub_path":"lessons/19/icon_button.pyw","file_name":"icon_button.pyw","file_ext":"pyw","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"43436319797","text":"#\n# Little script to check the WMATA API and find out\n# how many elevators are out of service on the Green Line\n# / when is the last train at a given stop\n#\nfrom __future__ import print_function\nimport json\ntry:\n from urllib2 import urlopen, Request\nexcept ImportError:\n from urllib.request import urlopen, Request\n\napi_key = '6b700f7ea9db408e9745c207da7ca827'\ntimings_url = 'https://api.wmata.com/Rail.svc/json/jStationTimes' # [?StationCode]'\n\nhdrs = {'api_key': api_key}\nreq = Request(timings_url, headers=hdrs)\nresult = urlopen(req) # timeout=0.001\ncontent = result.read().decode('utf8') # 'cp1252'\ndata = json.loads(content)\n\nstation_times = data['StationTimes']\nfor time_info in station_times:\n code = time_info['Code']\n name = time_info['StationName']\n today_info = time_info['Saturday']\n last_trains = today_info['LastTrains']\n print(code, name)\n for train_info in last_trains:\n print(\"\\t\", train_info['Time'], train_info['DestinationStation'])\n","repo_name":"dcpylab/classes","sub_path":"2016-10/22/wmata.py","file_name":"wmata.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"81"} +{"seq_id":"6591726221","text":"import sys\nimport os\nimport argparse\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Utility to list the includes and sources of a directory for populating a .slcc file')\n parser.add_argument('type', choices=['sources', 'headers'])\n parser.add_argument('--recursive', action='store_true')\n parser.add_argument('dir_path')\n\n args = parser.parse_args()\n\n def should_include_source(fn):\n if not fn.endswith(('.c', '.cc', '.cpp')):\n return False\n if fn.endswith('_test.cc'):\n return False\n\n return True\n\n def should_include_header(fn):\n if not fn.endswith(('.h', '.hpp')):\n return False\n\n return True\n\n\n lines = []\n if args.type == 'sources':\n if not args.recursive:\n for fn in os.listdir(args.dir_path):\n if should_include_source(fn):\n lines.append(fn)\n else:\n for root, _, files in os.walk(args.dir_path):\n for fn in files:\n if should_include_source(fn):\n p = f'{root}/{fn}'\n p = os.path.relpath(p, args.dir_path).replace('\\\\', '/')\n lines.append(p)\n\n for p in sorted(lines):\n print(f' - path: {p}')\n\n elif args.type == 'headers':\n if not args.recursive:\n for fn in os.listdir(args.dir_path):\n if should_include_header(fn):\n lines.append(fn)\n else:\n for root, _, files in os.walk(args.dir_path):\n for fn in files:\n if should_include_header(fn):\n p = f'{root}/{fn}'\n p = os.path.relpath(p, args.dir_path).replace('\\\\', '/')\n lines.append(p)\n\n for p in sorted(lines):\n print(f' - path: {p}')\n\n\n\nif __name__ == '__main__':\n main()","repo_name":"SiliconLabs/mltk","sub_path":"cpp/tools/utils/slcc_list.py","file_name":"slcc_list.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"81"} +{"seq_id":"17993100227","text":"\"\"\"\"\"\r\nDosya açmak ve oluşturmak için open() fonksiyonu kullanılır\r\n\r\nKullanımı : open(dosya,dosya_erişme_modu)\r\ndosya_erişme_modu => dosya hangi amaçla açtığımızı belirtir\r\n\"\"\"\r\n\r\n# \"w\": (Write) yazma modu. \r\n\"\"\" \r\n **Dosyayı konuma oluşturur\r\n **Dosya içeriğini siler ve yeniden ekleme yapar\r\n\"\"\"\r\n# file=open(\"newfile.txt\",\"w\")\r\n# file = open(\"C:/Users/teknikservis/Desktop/a/newfile2.txt\",\"w\")\r\n# file.close()\r\n\r\n# file = open(\"newfile.txt\",\"w\",encoding=\"utf-8\")\r\n# file.write(\"Mehmet Alp Varna ığöüç\")\r\n# file.close()\r\n\r\n# \"a\": (Append) ekleme. Dosya konumunda yoksa oluşturur\r\n\r\n# file = open(\"newfile.txt\",\"a\",encoding=\"utf-8\")\r\n# file.write(\" Mehmet Alp Varna\\n \")\r\n# file.close()\r\n\r\n# \"x\": (Create) oluşturma. Dosya zaaten varsa hata verir\r\nfile = open(\"newfile.txt\",\"x\",encoding=\"utf-8\")\r\nfile.close()\r\n# \"r\": (Read) okuma. varsayılan. dosya konumda yoksa hata verir\r\n","repo_name":"AlpVrn/Python","sub_path":"Pythonda Dosya Yönetimi/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8879737912","text":"import time\nfrom avocado import Test\nfrom avocado_cloud.app import Setup\nfrom avocado_cloud.app.azure import AzureAccount\n\n\nclass AzureTimeKeeping(Test):\n def setUp(self):\n self.casestatus = False\n account = AzureAccount(self.params)\n account.login()\n cloud = Setup(self.params, self.name)\n self.vm = cloud.vm\n self.session = cloud.init_vm()\n status, output = self.session.cmd_status_output('sudo su -')\n self.assertEqual(status, 0,\n \"User [root] login failed\\n{}\".format(str(output)))\n if self.name.name.endswith(\"test_chrony\"):\n status, output = self.session.cmd_status_output(\n 'systemctl is-enabled chronyd')\n self.assertEqual(status, 0, \"There isn't chrony installed\")\n self.assertEqual(output.strip(), \"enabled\", \"Chrony isn't enabled\")\n elif self.name.name.endswith(\"test_clocksource_performance\"):\n status, output = self.session.cmd_status_output('which gcc')\n if status != 0:\n status, output = self.session.cmd_status_output(\n 'rpm -qa | grep rhui-azure')\n if status != 0:\n status, output = self.session.cmd_status_output(\n 'rpm -ivh `ls /root/rhui-azure*`')\n self.assertEqual(status, 0, \"Failed to install rhui-azure\")\n status, output = self.session.cmd_status_output(\n 'yum -y install gcc', timeout=400)\n self.assertEqual(status, 0, \"Failed to install gcc\")\n\n def test_chrony(self):\n status, output = self.session.cmd_status_output(\n \"sed -i 's/^makestep.*$/makestep 10 6/g' /etc/chrony.conf\")\n self.assertEqual(status, 0, \"Failed to modify chrony conf file\")\n self.session.cmd_output('echo \"\" > /var/log/messages')\n status, output = self.session.cmd_status_output(\n 'systemctl restart chronyd')\n self.assertEqual(status, 0, \"Failed to restart chrony\")\n time.sleep(30)\n current_time = self.session.cmd_output('date +\"%s\"')\n # Modify time to 10 mins later\n target_time = str(int(current_time) + 630)\n format_time = self.session.cmd_output(\n 'date -d @{} +\"%Y%m%d %H:%M:%S\"'.format(target_time))\n status, output = self.session.cmd_status_output(\n \"date -s '{}'\".format(format_time))\n self.assertEqual(status, 0, \"Failed to modify system time\")\n cmd = \"cat /var/log/messages| grep 'System clock was stepped by'| awk \\\n-F ']:' '{print $2}' | awk -F 'by -' '{print $2}' | awk -F ' ' '{print $1}'\"\n\n for interval in [300, 180, 120]:\n time.sleep(interval)\n status, output = self.session.cmd_status_output(cmd)\n self.assertEqual(status, 0, \"Failed to execute command\")\n if len(output):\n stepby_time = output.strip().split()\n for item in stepby_time:\n ret = 400 < int(float(item)) < 800\n if ret is True:\n self.casestatus = True\n break\n if self.casestatus is True:\n break\n self.assertEqual(self.casestatus, True, \"Failed to sync time\")\n\n def test_clocksource_performance(self):\n script_content = [\n \"#include \", \"main()\", \"{ int rc;\", \"long i;\",\n \"struct timespec ts;\", \"for(i=0; i<100000000; i++)\",\n \"{ rc = clock_gettime(CLOCK_MONOTONIC, &ts); } }\"\n ]\n script_path = \"/tmp/clock_gettime.c\"\n with open(script_path, \"w+\") as f:\n for content in script_content:\n f.write(content + \"\\n\")\n self.session.copy_files_to(local_path=script_path,\n remote_path=script_path)\n status, output = self.session.cmd_status_output(\n \"gcc {} -o /tmp/clock\".format(script_path))\n self.assertEqual(status, 0, \"Failed to compile\")\n status, output = self.session.cmd_status_output(\"cat \\\n/sys/devices/system/clocksource/clocksource0/current_clocksource\")\n self.assertEqual(status, 0, \"Failed to get current clocksource\")\n self.assertEqual(\n output.strip(), \"hyperv_clocksource_tsc_page\",\n \"Current clocksource isn't hyperv_clocksource_tsc_page\\n{}\".format(\n output.strip()))\n status, output = self.session.cmd_status_output(\"time /tmp/clock\")\n self.assertEqual(status, 0, \"Failed to execute program\")\n\n def test_unbind_clocksource(self):\n cmd = \"cat /sys/devices/system/clocksource/clocksource0/\\\ncurrent_clocksource\"\n\n status, output = self.session.cmd_status_output(cmd)\n self.assertEqual(status, 0, \"Failed to get current clocksource\")\n self.assertEqual(\n output.strip(), u\"hyperv_clocksource_tsc_page\",\n \"Current clocksource isn't hyperv_clocksource_tsc_page\\n{}\".format(\n output.strip()))\n cmd = \"cat /sys/devices/system/clocksource/clocksource0/\\\navailable_clocksource\"\n\n status, output = self.session.cmd_status_output(cmd)\n self.assertEqual(status, 0, \"Failed to get available clocksource\")\n cmd = \"echo hyperv_clocksource_tsc_page > /sys/devices/system/\\\nclocksource/clocksource0/unbind_clocksource\"\n\n status, output = self.session.cmd_status_output(cmd)\n self.assertEqual(status, 0, \"Failed to unbind clocksource\")\n cmd = \"cat /sys/devices/system/clocksource/clocksource0/\\\ncurrent_clocksource\"\n\n status, output = self.session.cmd_status_output(cmd)\n self.assertEqual(status, 0, \"Failed to get current clocksource\")\n self.assertNotEqual(output.strip(), u'hyperv_clocksource_tsc_page',\n \"Failed to change to another clocksource\")\n cmd = \"dmesg | grep clock | egrep -i 'fail|error'\"\n output = self.session.cmd_output(cmd)\n self.assertEqual(\n len(output), 0,\n \"There are errors in dmesg about clocksource\\n{}\".format(\n output.strip()))\n\n def test_check_clocksource(self):\n # Check CPU vendor,not can only support Intel CPU\n cmd = \"lscpu | grep 'Vendor ID:' | awk -F ' ' '{print $3}'\"\n status, output = self.session.cmd_status_output(cmd)\n self.assertEqual(status, 0, \"Failed to get vendor id\")\n self.assertEqual(output.strip(), u'GenuineIntel',\n \"CPU is not Intel,now this case only support Intel\")\n cmd = \"cat /sys/devices/system/clocksource/clocksource0/\\\ncurrent_clocksource\"\n\n status, output = self.session.cmd_status_output(cmd)\n self.assertEqual(status, 0, \"Failed to get current clocksource\")\n self.assertEqual(\n output.strip(), u\"hyperv_clocksource_tsc_page\",\n \"Current clocksource isn't hyperv_clocksource_tsc_page\\n{}\".format(\n output.strip()))\n cmd = \"grep tsc /proc/cpuinfo\"\n status, output = self.session.cmd_status_output(cmd)\n self.assertEqual(0, status, \"CPU doesn't contain 'tsc' flag\")\n cmd = \"dmesg | egrep -i 'tsc|clock'\"\n status, output = self.session.cmd_status_output(cmd)\n self.assertEqual(0, status, \"Failed to get dmesg log\")\n self.log.info(\"\\n{}\".format(output))\n\n def test_check_clockevent(self):\n # =====This is test case of RHEL-82815=====\n cmd = \"cat /sys/devices/system/clockevents/clockevent0/current_device\"\n status, output = self.session.cmd_status_output(cmd)\n self.assertEqual(status, 0, \"Failed to get cloudevent\")\n self.assertEqual(output.strip(), u'Hyper-V clockevent',\n \"Current clockevent isn't Hyper-V clockevent\")\n # =====End of test case RHEL-82815=====\n\n def tearDown(self):\n if self.name.name.endswith(\"test_unbind_clocksource\"):\n self.session.send_line(\"reboot\")\n","repo_name":"virt-s1/avocado-cloud","sub_path":"tests/azure/test_timekeeping.py","file_name":"test_timekeeping.py","file_ext":"py","file_size_in_byte":7983,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"18304965381","text":"import re\nimport sys\n\nfrom pyglet.gl import *\nfrom pyglet import event\nfrom pyglet import graphics\nfrom pyglet.text import runlist\n\nfrom pyglet.font.base import _grapheme_break\n\n_is_pyglet_doc_run = hasattr(sys, \"is_pyglet_doc_run\") and sys.is_pyglet_doc_run\n\n_distance_re = re.compile(r'([-0-9.]+)([a-zA-Z]+)')\n\n\ndef _parse_distance(distance, dpi):\n \"\"\"Parse a distance string and return corresponding distance in pixels as\n an integer.\n \"\"\"\n if isinstance(distance, int):\n return distance\n elif isinstance(distance, float):\n return int(distance)\n\n match = _distance_re.match(distance)\n assert match, 'Could not parse distance %s' % distance\n if not match:\n return 0\n\n value, unit = match.groups()\n value = float(value)\n if unit == 'px':\n return int(value)\n elif unit == 'pt':\n return int(value * dpi / 72.0)\n elif unit == 'pc':\n return int(value * dpi / 6.0)\n elif unit == 'in':\n return int(value * dpi)\n elif unit == 'mm':\n return int(value * dpi * 0.0393700787)\n elif unit == 'cm':\n return int(value * dpi * 0.393700787)\n else:\n assert False, 'Unknown distance unit %s' % unit\n\n\nclass _Line:\n align = 'left'\n\n margin_left = 0\n margin_right = 0\n\n length = 0\n\n ascent = 0\n descent = 0\n width = 0\n paragraph_begin = False\n paragraph_end = False\n\n x = None\n y = None\n\n def __init__(self, start):\n self.vertex_lists = []\n self.start = start\n self.boxes = []\n\n def __repr__(self):\n return '_Line(%r)' % self.boxes\n\n def add_box(self, box):\n self.boxes.append(box)\n self.length += box.length\n self.ascent = max(self.ascent, box.ascent)\n self.descent = min(self.descent, box.descent)\n self.width += box.advance\n\n def delete(self, layout):\n for vertex_list in self.vertex_lists:\n vertex_list.delete()\n self.vertex_lists = []\n\n for box in self.boxes:\n box.delete(layout)\n\n\nclass _LayoutContext:\n def __init__(self, layout, document, colors_iter, background_iter):\n self.colors_iter = colors_iter\n underline_iter = document.get_style_runs('underline')\n self.decoration_iter = runlist.ZipRunIterator((background_iter, underline_iter))\n self.baseline_iter = runlist.FilteredRunIterator(\n document.get_style_runs('baseline'),\n lambda value: value is not None, 0)\n\n\nclass _StaticLayoutContext(_LayoutContext):\n def __init__(self, layout, document, colors_iter, background_iter):\n super().__init__(layout, document, colors_iter, background_iter)\n self.vertex_lists = layout._vertex_lists\n self.boxes = layout._boxes\n\n def add_list(self, vertex_list):\n self.vertex_lists.append(vertex_list)\n\n def add_box(self, box):\n self.boxes.append(box)\n\n\nclass _IncrementalLayoutContext(_LayoutContext):\n line = None\n\n def add_list(self, vertex_list):\n self.line.vertex_lists.append(vertex_list)\n\n def add_box(self, box):\n pass\n\n\nclass _AbstractBox:\n owner = None\n\n def __init__(self, ascent, descent, advance, length):\n self.ascent = ascent\n self.descent = descent\n self.advance = advance\n self.length = length\n\n def place(self, layout, i, x, y, context):\n raise NotImplementedError('abstract')\n\n def delete(self, layout):\n raise NotImplementedError('abstract')\n\n def get_position_in_box(self, x):\n raise NotImplementedError('abstract')\n\n def get_point_in_box(self, position):\n raise NotImplementedError('abstract')\n\n\nclass _GlyphBox(_AbstractBox):\n def __init__(self, owner, font, glyphs, advance):\n \"\"\"Create a run of glyphs sharing the same texture.\n\n :Parameters:\n `owner` : `pyglet.image.Texture`\n Texture of all glyphs in this run.\n `font` : `pyglet.font.base.Font`\n Font of all glyphs in this run.\n `glyphs` : list of (int, `pyglet.font.base.Glyph`)\n Pairs of ``(kern, glyph)``, where ``kern`` gives horizontal\n displacement of the glyph in pixels (typically 0).\n `advance` : int\n Width of glyph run; must correspond to the sum of advances\n and kerns in the glyph list.\n\n \"\"\"\n super().__init__(font.ascent, font.descent, advance, len(glyphs))\n assert owner\n self.owner = owner\n self.font = font\n self.glyphs = glyphs\n self.advance = advance\n\n def place(self, layout, i, x, y, context):\n assert self.glyphs\n try:\n group = layout.groups[self.owner]\n except KeyError:\n group = layout.groups[self.owner] = TextLayoutTextureGroup(self.owner, layout.foreground_group)\n\n n_glyphs = self.length\n vertices = []\n tex_coords = []\n x1 = x\n for start, end, baseline in context.baseline_iter.ranges(i, i + n_glyphs):\n baseline = layout._parse_distance(baseline)\n assert len(self.glyphs[start - i:end - i]) == end - start\n for kern, glyph in self.glyphs[start - i:end - i]:\n x1 += kern\n v0, v1, v2, v3 = glyph.vertices\n v0 += x1\n v2 += x1\n v1 += y + baseline\n v3 += y + baseline\n vertices.extend(map(int, [v0, v1, v2, v1, v2, v3, v0, v3]))\n t = glyph.tex_coords\n tex_coords.extend(t)\n x1 += glyph.advance\n\n # Text color\n colors = []\n for start, end, color in context.colors_iter.ranges(i, i + n_glyphs):\n if color is None:\n color = (0, 0, 0, 255)\n colors.extend(color * ((end - start) * 4))\n\n vertex_list = layout.batch.add(n_glyphs * 4, GL_QUADS, group,\n ('v2f/dynamic', vertices),\n ('t3f/dynamic', tex_coords),\n ('c4B/dynamic', colors))\n context.add_list(vertex_list)\n\n # Decoration (background color and underline)\n #\n # Should iterate over baseline too, but in practice any sensible\n # change in baseline will correspond with a change in font size,\n # and thus glyph run as well. So we cheat and just use whatever\n # baseline was seen last.\n background_vertices = []\n background_colors = []\n underline_vertices = []\n underline_colors = []\n y1 = y + self.descent + baseline\n y2 = y + self.ascent + baseline\n x1 = x\n for start, end, decoration in context.decoration_iter.ranges(i, i + n_glyphs):\n bg, underline = decoration\n x2 = x1\n for kern, glyph in self.glyphs[start - i:end - i]:\n x2 += glyph.advance + kern\n\n if bg is not None:\n background_vertices.extend([x1, y1, x2, y1, x2, y2, x1, y2])\n background_colors.extend(bg * 4)\n\n if underline is not None:\n underline_vertices.extend([x1, y + baseline - 2, x2, y + baseline - 2])\n underline_colors.extend(underline * 2)\n\n x1 = x2\n\n if background_vertices:\n background_list = layout.batch.add(\n len(background_vertices) // 2, GL_QUADS,\n layout.background_group,\n ('v2f/dynamic', background_vertices),\n ('c4B/dynamic', background_colors))\n context.add_list(background_list)\n\n if underline_vertices:\n underline_list = layout.batch.add(\n len(underline_vertices) // 2, GL_LINES,\n layout.foreground_decoration_group,\n ('v2f/dynamic', underline_vertices),\n ('c4B/dynamic', underline_colors))\n context.add_list(underline_list)\n\n def delete(self, layout):\n pass\n\n def get_point_in_box(self, position):\n x = 0\n for (kern, glyph) in self.glyphs:\n if position == 0:\n break\n position -= 1\n x += glyph.advance + kern\n return x\n\n def get_position_in_box(self, x):\n position = 0\n last_glyph_x = 0\n for kern, glyph in self.glyphs:\n last_glyph_x += kern\n if last_glyph_x + glyph.advance // 2 > x:\n return position\n position += 1\n last_glyph_x += glyph.advance\n return position\n\n def __repr__(self):\n return '_GlyphBox(%r)' % self.glyphs\n\n\nclass _InlineElementBox(_AbstractBox):\n def __init__(self, element):\n \"\"\"Create a glyph run holding a single element.\n \"\"\"\n super().__init__(element.ascent, element.descent, element.advance, 1)\n self.element = element\n self.placed = False\n\n def place(self, layout, i, x, y, context):\n self.element.place(layout, x, y)\n self.placed = True\n context.add_box(self)\n\n def delete(self, layout):\n # font == element\n if self.placed:\n self.element.remove(layout)\n self.placed = False\n\n def get_point_in_box(self, position):\n if position == 0:\n return 0\n else:\n return self.advance\n\n def get_position_in_box(self, x):\n if x < self.advance // 2:\n return 0\n else:\n return 1\n\n def __repr__(self):\n return '_InlineElementBox(%r)' % self.element\n\n\nclass _InvalidRange:\n def __init__(self):\n self.start = sys.maxsize\n self.end = 0\n\n def insert(self, start, length):\n if self.start >= start:\n self.start += length\n if self.end >= start:\n self.end += length\n self.invalidate(start, start + length)\n\n def delete(self, start, end):\n if self.start > end:\n self.start -= end - start\n elif self.start > start:\n self.start = start\n if self.end > end:\n self.end -= end - start\n elif self.end > start:\n self.end = start\n\n def invalidate(self, start, end):\n if end <= start:\n return\n self.start = min(self.start, start)\n self.end = max(self.end, end)\n\n def validate(self):\n start, end = self.start, self.end\n self.start = sys.maxsize\n self.end = 0\n return start, end\n\n def is_invalid(self):\n return self.end > self.start\n\n\n# Text group hierarchy\n#\n# top_group [Scrollable]TextLayoutGroup(Group)\n# background_group OrderedGroup(0)\n# foreground_group TextLayoutForegroundGroup(OrderedGroup(1))\n# [font textures] TextLayoutTextureGroup(Group)\n# [...] TextLayoutTextureGroup(Group)\n# foreground_decoration_group\n# TextLayoutForegroundDecorationGroup(OrderedGroup(2))\n\nclass TextLayoutGroup(graphics.Group):\n \"\"\"Top-level rendering group for :py:func:`~pyglet.text.layout.TextLayout`.\n\n The blend function is set for glyph rendering (``GL_SRC_ALPHA`` /\n ``GL_ONE_MINUS_SRC_ALPHA``). The group is shared by all :py:func:`~pyglet.text.layout.TextLayout`\n instances as it has no internal state.\n \"\"\"\n\n def set_state(self):\n glPushAttrib(GL_ENABLE_BIT | GL_CURRENT_BIT)\n glEnable(GL_BLEND)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n\n def unset_state(self):\n glPopAttrib()\n\n def __hash__(self):\n return hash((id(self.parent), GL_BLEND, GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA))\n\n def __eq__(self, other):\n return self.__class__ is other.__class__ and self.parent is other.parent\n\n def __repr__(self):\n return '%s(%r)' % (self.__class__.__name__, self.parent)\n\n\nclass ScrollableTextLayoutGroup(graphics.Group):\n \"\"\"Top-level rendering group for :py:class:`~pyglet.text.layout.ScrollableTextLayout`.\n\n The group maintains internal state for setting the clipping planes and\n view transform for scrolling. Because the group has internal state\n specific to the text layout, the group is never shared.\n \"\"\"\n x = 0\n y = 0\n width = 0\n height = 0\n view_x = 0\n view_y = 0\n\n def set_state(self):\n glPushAttrib(GL_ENABLE_BIT | GL_TRANSFORM_BIT | GL_CURRENT_BIT)\n\n glEnable(GL_BLEND)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n\n glEnable(GL_SCISSOR_TEST)\n glScissor(self.x, self.y, self.width, self.height)\n\n glTranslatef(-self.view_x, -self.view_y, 0)\n\n def unset_state(self):\n glTranslatef(self.view_x, self.view_y, 0)\n glDisable(GL_SCISSOR_TEST)\n glPopAttrib()\n\n def __eq__(self, other):\n return self is other\n\n def __hash__(self):\n return id(self)\n\n\nclass IncrementalTextLayoutGroup(graphics.Group):\n \"\"\"Top-level rendering group for :py:class:`~pyglet.text.layout.IncrementalTextLayout`.\n\n The group maintains internal state for setting the clipping planes and\n view transform for scrolling. Because the group has internal state\n specific to the text layout, the group is never shared.\n \"\"\"\n _clip_x = 0\n _clip_y = 0\n _clip_width = 0\n _clip_height = 0\n _view_x = 0\n _view_y = 0\n translate_x = 0 # x - view_x\n translate_y = 0 # y - view_y\n\n def set_state(self):\n glPushAttrib(GL_ENABLE_BIT | GL_TRANSFORM_BIT | GL_CURRENT_BIT)\n\n glEnable(GL_BLEND)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n\n glEnable(GL_SCISSOR_TEST)\n glScissor(self._clip_x, self._clip_y - self._clip_height, self._clip_width, self._clip_height)\n\n glTranslatef(self.translate_x, self.translate_y, 0)\n\n def unset_state(self):\n glTranslatef(-self.translate_x, -self.translate_y, 0)\n glDisable(GL_SCISSOR_TEST)\n glPopAttrib()\n\n @property\n def top(self):\n return self._clip_y\n\n @top.setter\n def top(self, top):\n \"\"\"Top edge of the text layout (measured from the\n bottom of the graphics viewport).\n\n :type: int\n \"\"\"\n self._clip_y = top\n self.translate_y = self._clip_y - self._view_y\n\n @property\n def left(self):\n return self._clip_x\n\n @left.setter\n def left(self, left):\n \"\"\"Left edge of the text layout.\n\n :type: int\n \"\"\"\n self._clip_x = left\n self.translate_x = self._clip_x - self._view_x\n\n @property\n def width(self):\n return self._clip_width\n\n @width.setter\n def width(self, width):\n \"\"\"Width of the text layout.\n\n :type: int\n \"\"\"\n self._clip_width = width\n\n @property\n def height(self):\n return self._clip_height\n\n @height.setter\n def height(self, height):\n \"\"\"Height of the text layout.\n\n :type: int\n \"\"\"\n self._clip_height = height\n\n @property\n def view_x(self):\n return self._view_x\n\n @view_x.setter\n def view_x(self, view_x):\n \"\"\"Horizontal scroll offset.\n\n :type: int\n \"\"\"\n self._view_x = view_x\n self.translate_x = self._clip_x - self._view_x\n\n @property\n def view_y(self):\n return self._view_y\n\n @view_y.setter\n def view_y(self, view_y):\n \"\"\"Vertical scroll offset.\n\n :type: int\n \"\"\"\n self._view_y = view_y\n self.translate_y = self._clip_y - self._view_y\n\n def __eq__(self, other):\n return self is other\n\n def __hash__(self):\n return id(self)\n\n\nclass TextLayoutForegroundGroup(graphics.OrderedGroup):\n \"\"\"Rendering group for foreground elements (glyphs) in all text layouts.\n\n The group enables ``GL_TEXTURE_2D``.\n \"\"\"\n\n def set_state(self):\n glEnable(GL_TEXTURE_2D)\n\n # unset_state not needed, as parent group will pop enable bit\n\n\nclass TextLayoutForegroundDecorationGroup(graphics.OrderedGroup):\n \"\"\"Rendering group for decorative elements (e.g., glyph underlines) in all\n text layouts.\n\n The group disables ``GL_TEXTURE_2D``.\n \"\"\"\n\n def set_state(self):\n glDisable(GL_TEXTURE_2D)\n\n # unset_state not needed, as parent group will pop enable bit\n\n\nclass TextLayoutTextureGroup(graphics.Group):\n \"\"\"Rendering group for a glyph texture in all text layouts.\n\n The group binds its texture to ``GL_TEXTURE_2D``. The group is shared\n between all other text layout uses of the same texture.\n \"\"\"\n\n def __init__(self, texture, parent):\n assert texture.target == GL_TEXTURE_2D\n super().__init__(parent)\n\n self.texture = texture\n\n def set_state(self):\n glBindTexture(GL_TEXTURE_2D, self.texture.id)\n\n # unset_state not needed, as next group will either\n # bind a new texture or pop enable bit.\n\n def __hash__(self):\n return hash((self.texture.id, self.parent))\n\n def __eq__(self, other):\n return (self.__class__ is other.__class__ and\n self.texture.id == other.texture.id and\n self.parent == other.parent)\n\n def __repr__(self):\n return '%s(%d, %r)' % (self.__class__.__name__, self.texture.id, self.parent)\n\n\nclass TextLayout:\n \"\"\"Lay out and display documents.\n\n This class is intended for displaying documents that do not change\n regularly -- any change will cost some time to lay out the complete\n document again and regenerate all vertex lists.\n\n The benefit of this class is that texture state is shared between\n all layouts of this class. The time to draw one :py:func:`~pyglet.text.layout.TextLayout` may be\n roughly the same as the time to draw one :py:class:`~pyglet.text.layout.IncrementalTextLayout`; but\n drawing ten :py:func:`~pyglet.text.layout.TextLayout` objects in one batch is much faster than drawing\n ten incremental or scrollable text layouts.\n\n :py:func:`~pyglet.text.Label` and :py:func:`~pyglet.text.HTMLLabel` provide a convenient interface to this class.\n\n :Ivariables:\n `content_width` : int\n Calculated width of the text in the layout. This may overflow\n the desired width if word-wrapping failed.\n `content_height` : int\n Calculated height of the text in the layout.\n `top_group` : `~pyglet.graphics.Group`\n Top-level rendering group.\n `background_group` : `~pyglet.graphics.Group`\n Rendering group for background color.\n `foreground_group` : `~pyglet.graphics.Group`\n Rendering group for glyphs.\n `foreground_decoration_group` : `~pyglet.graphics.Group`\n Rendering group for glyph underlines.\n\n \"\"\"\n _document = None\n _vertex_lists = ()\n _boxes = ()\n\n top_group = TextLayoutGroup()\n background_group = graphics.OrderedGroup(0, top_group)\n foreground_group = TextLayoutForegroundGroup(1, top_group)\n foreground_decoration_group = TextLayoutForegroundDecorationGroup(2, top_group)\n\n _update_enabled = True\n _own_batch = False\n _origin_layout = False # Lay out relative to origin? Otherwise to box.\n\n _x = 0\n _y = 0\n _width = None\n _height = None\n _anchor_x = 'left'\n _anchor_y = 'bottom'\n _content_valign = 'top'\n _multiline = False\n _visible = True\n\n def __init__(self, document, width=None, height=None,\n multiline=False, dpi=None, batch=None, group=None,\n wrap_lines=True):\n \"\"\"Create a text layout.\n\n :Parameters:\n `document` : `AbstractDocument`\n Document to display.\n `width` : int\n Width of the layout in pixels, or None\n `height` : int\n Height of the layout in pixels, or None\n `multiline` : bool\n If False, newline and paragraph characters are ignored, and\n text is not word-wrapped.\n If True, text is wrapped only if the `wrap_lines` is True.\n `dpi` : float\n Font resolution; defaults to 96.\n `batch` : `~pyglet.graphics.Batch`\n Optional graphics batch to add this layout to.\n `group` : `~pyglet.graphics.Group`\n Optional rendering group to parent all groups this text layout\n uses. Note that layouts with different\n rendered simultaneously in a batch.\n `wrap_lines` : bool\n If True and `multiline` is True, the text is word-wrapped using\n the specified width.\n\n \"\"\"\n self.content_width = 0\n self.content_height = 0\n\n self.groups = {}\n self._init_groups(group)\n\n if batch is None:\n batch = graphics.Batch()\n self._own_batch = True\n self._batch = batch\n\n self._width = width\n if height is not None:\n self._height = height\n if multiline:\n self._multiline = multiline\n\n self._wrap_lines_flag = wrap_lines\n self._wrap_lines_invariant()\n\n if dpi is None:\n dpi = 96\n self._dpi = dpi\n self.document = document\n\n @property\n def dpi(self):\n \"\"\"Get DPI used by this layout.\n\n :type: float\n \"\"\"\n return self._dpi\n\n @property\n def document(self):\n \"\"\"Document to display.\n\n For :py:class:`~pyglet.text.layout.IncrementalTextLayout` it is\n far more efficient to modify a document in-place than to replace\n the document instance on the layout.\n\n :type: `AbstractDocument`\n \"\"\"\n return self._document\n\n @document.setter\n def document(self, document):\n if self._document:\n self._document.remove_handlers(self)\n self._uninit_document()\n document.push_handlers(self)\n self._document = document\n self._init_document()\n\n @property\n def batch(self):\n \"\"\"The Batch that this Layout is assigned to.\n\n If no Batch is assigned, an internal Batch is \n created and used.\n\n :type: :py:class:`~pyglet.graphics.Batch`\n \n \"\"\"\n return self._batch\n\n @batch.setter\n def batch(self, batch):\n if self._batch == batch:\n return\n\n if batch is None:\n self._batch = graphics.Batch()\n self._own_batch = True\n self._update()\n elif batch is not None:\n self._batch = batch\n self._own_batch = False\n self._update()\n\n @property\n def x(self):\n \"\"\"X coordinate of the layout.\n\n See also :py:attr:`~pyglet.text.layout.TextLayout.anchor_x`.\n\n :type: int\n \"\"\"\n return self._x\n\n @x.setter\n def x(self, x):\n self._set_x(x)\n\n def _set_x(self, x):\n if self._boxes:\n self._x = x\n self._update()\n else:\n dx = x - self._x\n for vertex_list in self._vertex_lists:\n vertices = vertex_list.vertices[:]\n vertices[::2] = [x + dx for x in vertices[::2]]\n vertex_list.vertices[:] = vertices\n self._x = x\n\n @property\n def y(self):\n \"\"\"Y coordinate of the layout.\n\n See also `anchor_y`.\n\n :type: int\n \"\"\"\n return self._y\n\n @y.setter\n def y(self, y):\n self._set_y(y)\n\n def _set_y(self, y):\n if self._boxes:\n self._y = y\n self._update()\n else:\n dy = y - self._y\n for vertex_list in self._vertex_lists:\n vertices = vertex_list.vertices[:]\n vertices[1::2] = [y + dy for y in vertices[1::2]]\n vertex_list.vertices[:] = vertices\n self._y = y\n\n @property\n def position(self):\n \"\"\"The (X, Y) coordinates of the layout, as a tuple.\n\n See also :py:attr:`~pyglet.text.layout.TextLayout.anchor_x`,\n and :py:attr:`~pyglet.text.layout.TextLayout.anchor_y`.\n\n :type: (int, int)\n \"\"\"\n return self._x, self._y\n\n @position.setter\n def position(self, position):\n self.update(*position)\n\n def update(self, x, y):\n \"\"\"Change both X and Y positions of the layout at once for faster performance.\n\n :Parameters:\n `x` : int\n X coordinate of the layout.\n `y` : int\n Y coordinate of the layout.\n \"\"\"\n if self._boxes:\n self._x = x\n self._y = y\n self._update()\n else:\n dx = x - self._x\n dy = y - self._y\n for vertex_list in self._vertex_lists:\n vertices = vertex_list.vertices[:]\n vertices[::2] = [x + dx for x in vertices[::2]]\n vertices[1::2] = [y + dy for y in vertices[1::2]]\n vertex_list.vertices[:] = vertices\n self._x = x\n self._y = y\n\n @property\n def visible(self):\n \"\"\"True if the layout will be drawn.\n\n :type: bool\n \"\"\"\n return self._visible\n\n @visible.setter\n def visible(self, value):\n if value != self._visible:\n self._visible = value\n\n if value:\n self._update()\n else:\n self.delete()\n\n @property\n def width(self):\n \"\"\"Width of the layout.\n\n This property has no effect if `multiline` is False or `wrap_lines` is False.\n\n :type: int\n \"\"\"\n return self._width\n\n @width.setter\n def width(self, width):\n self._width = width\n self._wrap_lines_invariant()\n self._update()\n\n @property\n def height(self):\n \"\"\"Height of the layout.\n\n :type: int\n \"\"\"\n return self._height\n\n @height.setter\n def height(self, height):\n self._height = height\n self._update()\n\n @property\n def multiline(self):\n \"\"\"Set if multiline layout is enabled.\n\n If multiline is False, newline and paragraph characters are ignored and\n text is not word-wrapped.\n If True, the text is word-wrapped only if the `wrap_lines` is True.\n\n :type: bool\n \"\"\"\n return self._multiline\n\n @multiline.setter\n def multiline(self, multiline):\n self._multiline = multiline\n self._wrap_lines_invariant()\n self._update()\n\n @property\n def anchor_x(self):\n \"\"\"Horizontal anchor alignment.\n\n This property determines the meaning of the `x` coordinate.\n It is one of the enumerants:\n\n ``\"left\"`` (default)\n The X coordinate gives the position of the left edge of the layout.\n ``\"center\"``\n The X coordinate gives the position of the center of the layout.\n ``\"right\"``\n The X coordinate gives the position of the right edge of the layout.\n\n For the purposes of calculating the position resulting from this\n alignment, the width of the layout is taken to be `width` if `multiline`\n is True and `wrap_lines` is True, otherwise `content_width`.\n\n :type: str\n \"\"\"\n return self._anchor_x\n\n @anchor_x.setter\n def anchor_x(self, anchor_x):\n self._anchor_x = anchor_x\n self._update()\n\n @property\n def anchor_y(self):\n \"\"\"Vertical anchor alignment.\n\n This property determines the meaning of the `y` coordinate.\n It is one of the enumerants:\n\n ``\"top\"``\n The Y coordinate gives the position of the top edge of the layout.\n ``\"center\"``\n The Y coordinate gives the position of the center of the layout.\n ``\"baseline\"``\n The Y coordinate gives the position of the baseline of the first\n line of text in the layout.\n ``\"bottom\"`` (default)\n The Y coordinate gives the position of the bottom edge of the layout.\n\n For the purposes of calculating the position resulting from this\n alignment, the height of the layout is taken to be the smaller of\n `height` and `content_height`.\n\n See also `content_valign`.\n\n :type: str\n \"\"\"\n return self._anchor_y\n\n @anchor_y.setter\n def anchor_y(self, anchor_y):\n self._anchor_y = anchor_y\n self._update()\n\n @property\n def content_valign(self):\n \"\"\"Vertical alignment of content within larger layout box.\n\n This property determines how content is positioned within the layout\n box when ``content_height`` is less than ``height``. It is one\n of the enumerants:\n\n ``top`` (default)\n Content is aligned to the top of the layout box.\n ``center``\n Content is centered vertically within the layout box.\n ``bottom``\n Content is aligned to the bottom of the layout box.\n\n This property has no effect when ``content_height`` is greater\n than ``height`` (in which case the content is aligned to the top) or when\n ``height`` is ``None`` (in which case there is no vertical layout box\n dimension).\n\n :type: str\n \"\"\"\n return self._content_valign\n\n @content_valign.setter\n def content_valign(self, content_valign):\n self._content_valign = content_valign\n self._update()\n\n def _wrap_lines_invariant(self):\n self._wrap_lines = self._multiline and self._wrap_lines_flag\n assert not self._wrap_lines or self._width, \\\n \"When the parameters 'multiline' and 'wrap_lines' are True,\" \\\n \"the parameter 'width' must be a number.\"\n\n def _parse_distance(self, distance):\n if distance is None:\n return None\n return _parse_distance(distance, self._dpi)\n\n def begin_update(self):\n \"\"\"Indicate that a number of changes to the layout or document\n are about to occur.\n\n Changes to the layout or document between calls to `begin_update` and\n `end_update` do not trigger any costly relayout of text. Relayout of\n all changes is performed when `end_update` is called.\n\n Note that between the `begin_update` and `end_update` calls, values\n such as `content_width` and `content_height` are undefined (i.e., they\n may or may not be updated to reflect the latest changes).\n \"\"\"\n self._update_enabled = False\n\n def end_update(self):\n \"\"\"Perform pending layout changes since `begin_update`.\n\n See `begin_update`.\n \"\"\"\n self._update_enabled = True\n self._update()\n\n def delete(self):\n \"\"\"Remove this layout from its batch.\n \"\"\"\n for vertex_list in self._vertex_lists:\n vertex_list.delete()\n self._vertex_lists = []\n\n for box in self._boxes:\n box.delete(self)\n\n def draw(self):\n \"\"\"Draw this text layout.\n\n Note that this method performs very badly if a batch was supplied to\n the constructor. If you add this layout to a batch, you should\n ideally use only the batch's draw method.\n \"\"\"\n if self._own_batch:\n self._batch.draw()\n else:\n self._batch.draw_subset(self._vertex_lists)\n\n def _init_groups(self, group):\n if group:\n self.top_group = TextLayoutGroup(group)\n self.background_group = graphics.OrderedGroup(0, self.top_group)\n self.foreground_group = TextLayoutForegroundGroup(1, self.top_group)\n self.foreground_decoration_group = TextLayoutForegroundDecorationGroup(2, self.top_group)\n # Otherwise class groups are (re)used.\n\n def _get_lines(self):\n len_text = len(self._document.text)\n glyphs = self._get_glyphs()\n owner_runs = runlist.RunList(len_text, None)\n self._get_owner_runs(owner_runs, glyphs, 0, len_text)\n lines = [line for line in self._flow_glyphs(glyphs, owner_runs, 0, len_text)]\n self.content_width = 0\n self._flow_lines(lines, 0, len(lines))\n return lines\n\n def _update(self):\n if not self._update_enabled:\n return\n\n for _vertex_list in self._vertex_lists:\n _vertex_list.delete()\n for box in self._boxes:\n box.delete(self)\n self._vertex_lists = []\n self._boxes = []\n self.groups.clear()\n\n if not self._document or not self._document.text:\n return\n\n lines = self._get_lines()\n\n colors_iter = self._document.get_style_runs('color')\n background_iter = self._document.get_style_runs('background_color')\n\n if self._origin_layout:\n left = top = 0\n else:\n left = self._get_left()\n top = self._get_top(lines)\n\n context = _StaticLayoutContext(self, self._document, colors_iter, background_iter)\n for line in lines:\n self._create_vertex_lists(left + line.x, top + line.y, line.start, line.boxes, context)\n\n def _update_color(self):\n colors_iter = self._document.get_style_runs('color')\n colors = []\n for start, end, color in colors_iter.ranges(0, colors_iter.end):\n if color is None:\n color = (0, 0, 0, 255)\n colors.extend(color * ((end - start) * 4))\n\n start = 0\n for _vertex_list in self._vertex_lists:\n _vertex_list.colors = colors[start:start+len(_vertex_list.colors)]\n start += len(_vertex_list.colors)\n\n def _get_left(self):\n if self._multiline:\n width = self._width if self._wrap_lines else self.content_width\n else:\n width = self.content_width\n\n if self._anchor_x == 'left':\n return self._x\n elif self._anchor_x == 'center':\n return self._x - width // 2\n elif self._anchor_x == 'right':\n return self._x - width\n else:\n assert False, '`anchor_x` must be either \"left\", \"center\", or \"right\".'\n\n def _get_top(self, lines):\n if self._height is None:\n height = self.content_height\n offset = 0\n else:\n height = self._height\n if self._content_valign == 'top':\n offset = 0\n elif self._content_valign == 'bottom':\n offset = max(0, self._height - self.content_height)\n elif self._content_valign == 'center':\n offset = max(0, self._height - self.content_height) // 2\n else:\n assert False, '`content_valign` must be either \"top\", \"bottom\", or \"center\".'\n\n if self._anchor_y == 'top':\n return self._y - offset\n elif self._anchor_y == 'baseline':\n return self._y + lines[0].ascent - offset\n elif self._anchor_y == 'bottom':\n return self._y + height - offset\n elif self._anchor_y == 'center':\n if len(lines) == 1 and self._height is None:\n # This \"looks\" more centered than considering all of the descent.\n line = lines[0]\n return self._y + line.ascent // 2 - line.descent // 4\n else:\n return self._y + height // 2 - offset\n else:\n assert False, '`anchor_y` must be either \"top\", \"bottom\", \"center\", or \"baseline\".'\n\n def _get_bottom(self, lines):\n height = self._height or self.content_height\n\n if self._anchor_y == 'top':\n return self._y - height\n elif self._anchor_y == 'bottom':\n return self._y\n elif self._anchor_y == 'center':\n return self._y - height // 2\n elif self._anchor_y == 'baseline':\n return self._y - height + lines[0].ascent\n else:\n assert False, '`anchor_y` must be either \"top\", \"bottom\", \"center\", or \"baseline\".'\n\n def _init_document(self):\n self._update()\n\n def _uninit_document(self):\n pass\n\n def on_insert_text(self, start, text):\n \"\"\"Event handler for `AbstractDocument.on_insert_text`.\n\n The event handler is bound by the text layout; there is no need for\n applications to interact with this method.\n \"\"\"\n if self._visible:\n self._init_document()\n else:\n if self.document.text:\n # Update content width and height, since text may change while hidden.\n self._get_lines()\n\n def on_delete_text(self, start, end):\n \"\"\"Event handler for `AbstractDocument.on_delete_text`.\n\n The event handler is bound by the text layout; there is no need for\n applications to interact with this method.\n \"\"\"\n if self._visible:\n self._init_document()\n\n def on_style_text(self, start, end, attributes):\n \"\"\"Event handler for `AbstractDocument.on_style_text`.\n\n The event handler is bound by the text layout; there is no need for\n applications to interact with this method.\n \"\"\"\n if len(attributes) == 1 and 'color' in attributes.keys():\n self._update_color()\n else:\n self._init_document()\n\n def _get_glyphs(self):\n glyphs = []\n runs = runlist.ZipRunIterator((\n self._document.get_font_runs(dpi=self._dpi),\n self._document.get_element_runs()))\n text = self._document.text\n for start, end, (font, element) in runs.ranges(0, len(text)):\n if element:\n glyphs.append(_InlineElementBox(element))\n else:\n glyphs.extend(font.get_glyphs(text[start:end]))\n return glyphs\n\n def _get_owner_runs(self, owner_runs, glyphs, start, end):\n owner = glyphs[start].owner\n run_start = start\n # TODO avoid glyph slice on non-incremental\n for i, glyph in enumerate(glyphs[start:end]):\n if owner != glyph.owner:\n owner_runs.set_run(run_start, i + start, owner)\n owner = glyph.owner\n run_start = i + start\n owner_runs.set_run(run_start, end, owner)\n\n def _flow_glyphs(self, glyphs, owner_runs, start, end):\n # TODO change flow generator on self, avoiding this conditional.\n if not self._multiline:\n for line in self._flow_glyphs_single_line(glyphs, owner_runs,\n start, end):\n yield line\n else:\n for line in self._flow_glyphs_wrap(glyphs, owner_runs, start, end):\n yield line\n\n def _flow_glyphs_wrap(self, glyphs, owner_runs, start, end):\n \"\"\"Word-wrap styled text into lines of fixed width.\n\n Fits `glyphs` in range `start` to `end` into `_Line` s which are\n then yielded.\n \"\"\"\n owner_iterator = owner_runs.get_run_iterator().ranges(start, end)\n\n font_iterator = self._document.get_font_runs(dpi=self._dpi)\n\n align_iterator = runlist.FilteredRunIterator(\n self._document.get_style_runs('align'),\n lambda value: value in ('left', 'right', 'center'),\n 'left')\n if self._width is None:\n wrap_iterator = runlist.ConstRunIterator(\n len(self.document.text), False)\n else:\n wrap_iterator = runlist.FilteredRunIterator(\n self._document.get_style_runs('wrap'),\n lambda value: value in (True, False, 'char', 'word'),\n True)\n margin_left_iterator = runlist.FilteredRunIterator(\n self._document.get_style_runs('margin_left'),\n lambda value: value is not None, 0)\n margin_right_iterator = runlist.FilteredRunIterator(\n self._document.get_style_runs('margin_right'),\n lambda value: value is not None, 0)\n indent_iterator = runlist.FilteredRunIterator(\n self._document.get_style_runs('indent'),\n lambda value: value is not None, 0)\n kerning_iterator = runlist.FilteredRunIterator(\n self._document.get_style_runs('kerning'),\n lambda value: value is not None, 0)\n tab_stops_iterator = runlist.FilteredRunIterator(\n self._document.get_style_runs('tab_stops'),\n lambda value: value is not None, [])\n\n line = _Line(start)\n line.align = align_iterator[start]\n line.margin_left = self._parse_distance(margin_left_iterator[start])\n line.margin_right = self._parse_distance(margin_right_iterator[start])\n if start == 0 or self.document.text[start - 1] in u'\\n\\u2029':\n line.paragraph_begin = True\n line.margin_left += self._parse_distance(indent_iterator[start])\n wrap = wrap_iterator[start]\n if self._wrap_lines:\n width = self._width - line.margin_left - line.margin_right\n\n # Current right-most x position in line being laid out.\n x = 0\n\n # Boxes accumulated but not yet committed to a line.\n run_accum = []\n run_accum_width = 0\n\n # Amount of whitespace accumulated at end of line\n eol_ws = 0\n\n # Iterate over glyph owners (texture states); these form GlyphBoxes,\n # but broken into lines.\n font = None\n for start, end, owner in owner_iterator:\n font = font_iterator[start]\n\n # Glyphs accumulated in this owner but not yet committed to a\n # line.\n owner_accum = []\n owner_accum_width = 0\n\n # Glyphs accumulated in this owner AND also committed to the\n # current line (some whitespace has followed all of the committed\n # glyphs).\n owner_accum_commit = []\n owner_accum_commit_width = 0\n\n # Ignore kerning of first glyph on each line\n nokern = True\n\n # Current glyph index\n index = start\n\n # Iterate over glyphs in this owner run. `text` is the\n # corresponding character data for the glyph, and is used to find\n # whitespace and newlines.\n for (text, glyph) in zip(self.document.text[start:end], glyphs[start:end]):\n if nokern:\n kern = 0\n nokern = False\n else:\n kern = self._parse_distance(kerning_iterator[index])\n\n if wrap != 'char' and text in u'\\u0020\\u200b\\t':\n # Whitespace: commit pending runs to this line.\n for run in run_accum:\n line.add_box(run)\n run_accum = []\n run_accum_width = 0\n\n if text == '\\t':\n # Fix up kern for this glyph to align to the next tab stop\n for tab_stop in tab_stops_iterator[index]:\n tab_stop = self._parse_distance(tab_stop)\n if tab_stop > x + line.margin_left:\n break\n else:\n # No more tab stops, tab to 100 pixels\n tab = 50.\n tab_stop = (((x + line.margin_left) // tab) + 1) * tab\n kern = int(tab_stop - x - line.margin_left - glyph.advance)\n\n owner_accum.append((kern, glyph))\n owner_accum_commit.extend(owner_accum)\n owner_accum_commit_width += owner_accum_width + glyph.advance + kern\n eol_ws += glyph.advance + kern\n\n owner_accum = []\n owner_accum_width = 0\n\n x += glyph.advance + kern\n index += 1\n\n # The index at which the next line will begin (the\n # current index, because this is the current best\n # breakpoint).\n next_start = index\n else:\n new_paragraph = text in u'\\n\\u2029'\n new_line = (text == u'\\u2028') or new_paragraph\n if (wrap and self._wrap_lines and x + kern + glyph.advance >= width) or new_line:\n # Either the pending runs have overflowed the allowed\n # line width or a newline was encountered. Either\n # way, the current line must be flushed.\n\n if new_line or wrap == 'char':\n # Forced newline or char-level wrapping. Commit\n # everything pending without exception.\n for run in run_accum:\n line.add_box(run)\n run_accum = []\n run_accum_width = 0\n owner_accum_commit.extend(owner_accum)\n owner_accum_commit_width += owner_accum_width\n owner_accum = []\n owner_accum_width = 0\n\n line.length += 1\n next_start = index\n if new_line:\n next_start += 1\n\n # Create the _GlyphBox for the committed glyphs in the\n # current owner.\n if owner_accum_commit:\n line.add_box(\n _GlyphBox(owner, font, owner_accum_commit,\n owner_accum_commit_width))\n owner_accum_commit = []\n owner_accum_commit_width = 0\n\n if new_line and not line.boxes:\n # Empty line: give it the current font's default\n # line-height.\n line.ascent = font.ascent\n line.descent = font.descent\n\n # Flush the line, unless nothing got committed, in\n # which case it's a really long string of glyphs\n # without any breakpoints (in which case it will be\n # flushed at the earliest breakpoint, not before\n # something is committed).\n if line.boxes or new_line:\n # Trim line width of whitespace on right-side.\n line.width -= eol_ws\n if new_paragraph:\n line.paragraph_end = True\n yield line\n try:\n line = _Line(next_start)\n line.align = align_iterator[next_start]\n line.margin_left = self._parse_distance(\n margin_left_iterator[next_start])\n line.margin_right = self._parse_distance(\n margin_right_iterator[next_start])\n except IndexError:\n # XXX This used to throw StopIteration in some cases, causing the\n # final part of this method not to be executed. Refactoring\n # required to fix this\n return\n if new_paragraph:\n line.paragraph_begin = True\n\n # Remove kern from first glyph of line\n if run_accum and hasattr(run_accum, 'glyphs') and run_accum.glyphs:\n k, g = run_accum[0].glyphs[0]\n run_accum[0].glyphs[0] = (0, g)\n run_accum_width -= k\n elif owner_accum:\n k, g = owner_accum[0]\n owner_accum[0] = (0, g)\n owner_accum_width -= k\n else:\n nokern = True\n\n x = run_accum_width + owner_accum_width\n if self._wrap_lines:\n width = (self._width -\n line.margin_left - line.margin_right)\n\n if isinstance(glyph, _AbstractBox):\n # Glyph is already in a box. XXX Ignore kern?\n run_accum.append(glyph)\n run_accum_width += glyph.advance\n x += glyph.advance\n elif new_paragraph:\n # New paragraph started, update wrap style\n wrap = wrap_iterator[next_start]\n line.margin_left += \\\n self._parse_distance(indent_iterator[next_start])\n if self._wrap_lines:\n width = (self._width -\n line.margin_left - line.margin_right)\n elif not new_line:\n # If the glyph was any non-whitespace, non-newline\n # character, add it to the pending run.\n owner_accum.append((kern, glyph))\n owner_accum_width += glyph.advance + kern\n x += glyph.advance + kern\n index += 1\n eol_ws = 0\n\n # The owner run is finished; create GlyphBoxes for the committed\n # and pending glyphs.\n if owner_accum_commit:\n line.add_box(_GlyphBox(owner, font, owner_accum_commit,\n owner_accum_commit_width))\n if owner_accum:\n run_accum.append(_GlyphBox(owner, font, owner_accum,\n owner_accum_width))\n run_accum_width += owner_accum_width\n\n # All glyphs have been processed: commit everything pending and flush\n # the final line.\n for run in run_accum:\n line.add_box(run)\n\n if not line.boxes:\n # Empty line gets font's line-height\n if font is None:\n font = self._document.get_font(0, dpi=self._dpi)\n line.ascent = font.ascent\n line.descent = font.descent\n\n yield line\n\n def _flow_glyphs_single_line(self, glyphs, owner_runs, start, end):\n owner_iterator = owner_runs.get_run_iterator().ranges(start, end)\n font_iterator = self.document.get_font_runs(dpi=self._dpi)\n kern_iterator = runlist.FilteredRunIterator(\n self.document.get_style_runs('kerning'),\n lambda value: value is not None, 0)\n\n line = _Line(start)\n font = font_iterator[0]\n\n if self._width:\n align_iterator = runlist.FilteredRunIterator(\n self._document.get_style_runs('align'),\n lambda value: value in ('left', 'right', 'center'),\n 'left')\n line.align = align_iterator[start]\n\n for start, end, owner in owner_iterator:\n font = font_iterator[start]\n width = 0\n owner_glyphs = []\n for kern_start, kern_end, kern in kern_iterator.ranges(start, end):\n gs = glyphs[kern_start:kern_end]\n width += sum([g.advance for g in gs])\n width += kern * (kern_end - kern_start)\n owner_glyphs.extend(zip([kern] * (kern_end - kern_start), gs))\n if owner is None:\n # Assume glyphs are already boxes.\n for kern, glyph in owner_glyphs:\n line.add_box(glyph)\n else:\n line.add_box(_GlyphBox(owner, font, owner_glyphs, width))\n\n if not line.boxes:\n line.ascent = font.ascent\n line.descent = font.descent\n\n line.paragraph_begin = line.paragraph_end = True\n\n yield line\n\n def _flow_lines(self, lines, start, end):\n margin_top_iterator = runlist.FilteredRunIterator(\n self._document.get_style_runs('margin_top'),\n lambda value: value is not None, 0)\n margin_bottom_iterator = runlist.FilteredRunIterator(\n self._document.get_style_runs('margin_bottom'),\n lambda value: value is not None, 0)\n line_spacing_iterator = self._document.get_style_runs('line_spacing')\n leading_iterator = runlist.FilteredRunIterator(\n self._document.get_style_runs('leading'),\n lambda value: value is not None, 0)\n\n if start == 0:\n y = 0\n else:\n line = lines[start - 1]\n line_spacing = self._parse_distance(line_spacing_iterator[line.start])\n leading = self._parse_distance(leading_iterator[line.start])\n\n y = line.y\n if line_spacing is None:\n y += line.descent\n if line.paragraph_end:\n y -= self._parse_distance(margin_bottom_iterator[line.start])\n\n line_index = start\n for line in lines[start:]:\n if line.paragraph_begin:\n y -= self._parse_distance(margin_top_iterator[line.start])\n line_spacing = self._parse_distance(line_spacing_iterator[line.start])\n leading = self._parse_distance(leading_iterator[line.start])\n else:\n y -= leading\n\n if line_spacing is None:\n y -= line.ascent\n else:\n y -= line_spacing\n if line.align == 'left' or line.width > self.width:\n line.x = line.margin_left\n elif line.align == 'center':\n line.x = (self.width - line.margin_left - line.margin_right - line.width) // 2 + line.margin_left\n elif line.align == 'right':\n line.x = self.width - line.margin_right - line.width\n\n self.content_width = max(self.content_width, line.width + line.margin_left)\n\n if line.y == y and line_index >= end:\n # Early exit: all invalidated lines have been reflowed and the\n # next line has no change (therefore subsequent lines do not\n # need to be changed).\n break\n line.y = y\n\n if line_spacing is None:\n y += line.descent\n if line.paragraph_end:\n y -= self._parse_distance(margin_bottom_iterator[line.start])\n\n line_index += 1\n else:\n self.content_height = -y\n\n return line_index\n\n def _create_vertex_lists(self, x, y, i, boxes, context):\n for box in boxes:\n box.place(self, i, x, y, context)\n x += box.advance\n i += box.length\n\n\nclass ScrollableTextLayout(TextLayout):\n \"\"\"Display text in a scrollable viewport.\n\n This class does not display a scrollbar or handle scroll events; it merely\n clips the text that would be drawn in :py:func:`~pyglet.text.layout.TextLayout`\n to the bounds of the layout given by `x`, `y`, `width` and `height`;\n and offsets the text by a scroll offset.\n\n Use `view_x` and `view_y` to scroll the text within the viewport.\n \"\"\"\n\n def __init__(self, document, width, height, multiline=False, dpi=None, batch=None, group=None, wrap_lines=True):\n super().__init__(document, width, height, multiline, dpi, batch, group, wrap_lines)\n self.top_group.width = self._width\n self.top_group.height = self._height\n\n def _init_groups(self, group):\n # Scrollable layout never shares group becauase of translation.\n self.top_group = ScrollableTextLayoutGroup(group)\n self.background_group = graphics.OrderedGroup(0, self.top_group)\n self.foreground_group = TextLayoutForegroundGroup(1, self.top_group)\n self.foreground_decoration_group = TextLayoutForegroundDecorationGroup(2, self.top_group)\n\n # Properties\n\n @property\n def x(self):\n return self._x\n\n @x.setter\n def x(self, x):\n super()._set_x(x)\n self.top_group.x = self._get_left()\n\n @property\n def y(self):\n return self._y\n\n @y.setter\n def y(self, y):\n super()._set_y(y)\n self.top_group.y = y\n\n @property\n def position(self):\n return self._x, self._y\n\n @position.setter\n def position(self, position):\n self.x, self.y = position\n\n @property\n def anchor_x(self):\n return self._anchor_x\n\n @anchor_x.setter\n def anchor_x(self, anchor_x):\n self._anchor_x = anchor_x\n super()._update()\n self.top_group.x = self._get_left()\n\n @property\n def anchor_y(self):\n return self._anchor_y\n\n @anchor_y.setter\n def anchor_y(self, anchor_y):\n self._anchor_y = anchor_y\n super()._update()\n self.top_group.y = self._get_bottom(self._get_lines())\n\n # Offset of content within viewport\n\n @property\n def view_x(self):\n \"\"\"Horizontal scroll offset.\n\n The initial value is 0, and the left edge of the text will touch the left\n side of the layout bounds. A positive value causes the text to \"scroll\"\n to the right. Values are automatically clipped into the range\n ``[0, content_width - width]``\n\n :type: int\n \"\"\"\n return self.top_group.view_x\n\n @view_x.setter\n def view_x(self, view_x):\n view_x = max(0, min(self.content_width - self.width, view_x))\n self.top_group.view_x = view_x\n\n @property\n def view_y(self):\n \"\"\"Vertical scroll offset.\n\n The initial value is 0, and the top of the text will touch the top of the\n layout bounds (unless the content height is less than the layout height,\n in which case `content_valign` is used).\n\n A negative value causes the text to \"scroll\" upwards. Values outside of\n the range ``[height - content_height, 0]`` are automatically clipped in\n range.\n\n :type: int\n \"\"\"\n return self.top_group.view_y\n\n @view_y.setter\n def view_y(self, view_y):\n # view_y must be negative.\n view_y = min(0, max(self.height - self.content_height, view_y))\n self.top_group.view_y = view_y\n\n\nclass IncrementalTextLayout(TextLayout, event.EventDispatcher):\n \"\"\"Displayed text suitable for interactive editing and/or scrolling\n large documents.\n\n Unlike :py:func:`~pyglet.text.layout.TextLayout` and\n :py:class:`~pyglet.text.layout.ScrollableTextLayout`, this class generates\n vertex lists only for lines of text that are visible. As the document is\n scrolled, vertex lists are deleted and created as appropriate to keep\n video memory usage to a minimum and improve rendering speed.\n\n Changes to the document are quickly reflected in this layout, as only the\n affected line(s) are reflowed. Use `begin_update` and `end_update` to\n further reduce the amount of processing required.\n\n The layout can also display a text selection (text with a different\n background color). The :py:class:`~pyglet.text.caret.Caret` class implements a visible text cursor and\n provides event handlers for scrolling, selecting and editing text in an\n incremental text layout.\n \"\"\"\n _selection_start = 0\n _selection_end = 0\n _selection_color = [255, 255, 255, 255]\n _selection_background_color = [46, 106, 197, 255]\n\n _origin_layout = False\n\n def __init__(self, document, width, height, multiline=False, dpi=None, batch=None, group=None, wrap_lines=True):\n\n self.glyphs = []\n self.lines = []\n\n self.invalid_glyphs = _InvalidRange()\n self.invalid_flow = _InvalidRange()\n self.invalid_lines = _InvalidRange()\n self.invalid_style = _InvalidRange()\n self.invalid_vertex_lines = _InvalidRange()\n self.visible_lines = _InvalidRange()\n\n self.owner_runs = runlist.RunList(0, None)\n\n super().__init__(document, width, height, multiline, dpi, batch, group, wrap_lines)\n\n self.top_group.width = width\n self.top_group.left = self._get_left()\n self.top_group.height = height\n self.top_group.top = self._get_top(self._get_lines())\n\n def _init_groups(self, group):\n # Scrollable layout never shares group becauase of translation.\n self.top_group = IncrementalTextLayoutGroup(group)\n self.background_group = graphics.OrderedGroup(0, self.top_group)\n self.foreground_group = TextLayoutForegroundGroup(1, self.top_group)\n self.foreground_decoration_group = TextLayoutForegroundDecorationGroup(2, self.top_group)\n\n def _init_document(self):\n assert self._document, 'Cannot remove document from IncrementalTextLayout'\n self.on_insert_text(0, self._document.text)\n\n def _uninit_document(self):\n self.on_delete_text(0, len(self._document.text))\n\n def _get_lines(self):\n return self.lines\n\n def delete(self):\n for line in self.lines:\n line.delete(self)\n self._batch = None\n if self._document:\n self._document.remove_handlers(self)\n self._document = None\n\n def on_insert_text(self, start, text):\n len_text = len(text)\n self.glyphs[start:start] = [None] * len_text\n\n self.invalid_glyphs.insert(start, len_text)\n self.invalid_flow.insert(start, len_text)\n self.invalid_style.insert(start, len_text)\n\n self.owner_runs.insert(start, len_text)\n\n for line in self.lines:\n if line.start >= start:\n line.start += len_text\n\n self._update()\n\n def on_delete_text(self, start, end):\n self.glyphs[start:end] = []\n\n self.invalid_glyphs.delete(start, end)\n self.invalid_flow.delete(start, end)\n self.invalid_style.delete(start, end)\n\n self.owner_runs.delete(start, end)\n\n size = end - start\n for line in self.lines:\n if line.start > start:\n line.start = max(line.start - size, start)\n\n if start == 0:\n self.invalid_flow.invalidate(0, 1)\n else:\n self.invalid_flow.invalidate(start - 1, start)\n\n self._update()\n\n def on_style_text(self, start, end, attributes):\n if 'font_name' in attributes or 'font_size' in attributes or 'bold' in attributes or 'italic' in attributes:\n self.invalid_glyphs.invalidate(start, end)\n elif 'color' in attributes or 'background_color' in attributes:\n self.invalid_style.invalidate(start, end)\n else: # Attributes that change flow\n self.invalid_flow.invalidate(start, end)\n\n self._update()\n\n def _update(self):\n if not self._update_enabled:\n return\n\n trigger_update_event = (self.invalid_glyphs.is_invalid() or\n self.invalid_flow.is_invalid() or\n self.invalid_lines.is_invalid())\n\n # Special care if there is no text:\n if not self.glyphs:\n for line in self.lines:\n line.delete(self)\n del self.lines[:]\n self.lines.append(_Line(0))\n font = self.document.get_font(0, dpi=self._dpi)\n self.lines[0].ascent = font.ascent\n self.lines[0].descent = font.descent\n self.lines[0].paragraph_begin = self.lines[0].paragraph_end = True\n self.invalid_lines.invalidate(0, 1)\n\n self._update_glyphs()\n self._update_flow_glyphs()\n self._update_flow_lines()\n self._update_visible_lines()\n self._update_vertex_lists()\n self.top_group.top = self._get_top(self.lines)\n\n # Reclamp view_y in case content height has changed and reset top of content.\n self.view_y = self.view_y\n self.top_group.top = self._get_top(self._get_lines())\n\n if trigger_update_event:\n self.dispatch_event('on_layout_update')\n\n def _update_glyphs(self):\n invalid_start, invalid_end = self.invalid_glyphs.validate()\n\n if invalid_end - invalid_start <= 0:\n return\n\n # Find grapheme breaks and extend glyph range to encompass.\n text = self.document.text\n while invalid_start > 0:\n if _grapheme_break(text[invalid_start - 1], text[invalid_start]):\n break\n invalid_start -= 1\n\n len_text = len(text)\n while invalid_end < len_text:\n if _grapheme_break(text[invalid_end - 1], text[invalid_end]):\n break\n invalid_end += 1\n\n # Update glyphs\n runs = runlist.ZipRunIterator((\n self._document.get_font_runs(dpi=self._dpi),\n self._document.get_element_runs()))\n for start, end, (font, element) in runs.ranges(invalid_start, invalid_end):\n if element:\n self.glyphs[start] = _InlineElementBox(element)\n else:\n text = self.document.text[start:end]\n self.glyphs[start:end] = font.get_glyphs(text)\n\n # Update owner runs\n self._get_owner_runs(self.owner_runs, self.glyphs, invalid_start, invalid_end)\n\n # Updated glyphs need flowing\n self.invalid_flow.invalidate(invalid_start, invalid_end)\n\n def _update_flow_glyphs(self):\n invalid_start, invalid_end = self.invalid_flow.validate()\n\n if invalid_end - invalid_start <= 0:\n return\n\n # Find first invalid line\n line_index = 0\n for i, line in enumerate(self.lines):\n if line.start >= invalid_start:\n break\n line_index = i\n\n # Flow from previous line; fixes issue with adding a space into\n # overlong line (glyphs before space would then flow back onto\n # previous line).\n # TODO: Could optimise this by keeping track of where the overlong lines are.\n line_index = max(0, line_index - 1)\n\n # (No need to find last invalid line; the update loop below stops\n # calling the flow generator when no more changes are necessary.)\n\n try:\n line = self.lines[line_index]\n invalid_start = min(invalid_start, line.start)\n line.delete(self)\n line = self.lines[line_index] = _Line(invalid_start)\n self.invalid_lines.invalidate(line_index, line_index + 1)\n except IndexError:\n line_index = 0\n invalid_start = 0\n line = _Line(0)\n self.lines.append(line)\n self.invalid_lines.insert(0, 1)\n\n content_width_invalid = False\n next_start = invalid_start\n\n for line in self._flow_glyphs(self.glyphs, self.owner_runs, invalid_start, len(self._document.text)):\n try:\n old_line = self.lines[line_index]\n old_line.delete(self)\n old_line_width = old_line.width + old_line.margin_left\n new_line_width = line.width + line.margin_left\n if old_line_width == self.content_width and new_line_width < old_line_width:\n content_width_invalid = True\n self.lines[line_index] = line\n self.invalid_lines.invalidate(line_index, line_index + 1)\n except IndexError:\n self.lines.append(line)\n self.invalid_lines.insert(line_index, 1)\n\n next_start = line.start + line.length\n line_index += 1\n\n try:\n next_line = self.lines[line_index]\n if next_start == next_line.start and next_start > invalid_end:\n # No more lines need to be modified, early exit.\n break\n except IndexError:\n pass\n else:\n # The last line is at line_index - 1, if there are any more lines\n # after that they are stale and need to be deleted.\n if next_start == len(self._document.text) and line_index > 0:\n for line in self.lines[line_index:]:\n old_line_width = old_line.width + old_line.margin_left\n if old_line_width == self.content_width:\n content_width_invalid = True\n line.delete(self)\n del self.lines[line_index:]\n\n if content_width_invalid:\n # Rescan all lines to look for the new maximum content width\n content_width = 0\n for line in self.lines:\n content_width = max(line.width + line.margin_left, content_width)\n self.content_width = content_width\n\n def _update_flow_lines(self):\n invalid_start, invalid_end = self.invalid_lines.validate()\n if invalid_end - invalid_start <= 0:\n return\n\n invalid_end = self._flow_lines(self.lines, invalid_start, invalid_end)\n\n # Invalidate lines that need new vertex lists.\n self.invalid_vertex_lines.invalidate(invalid_start, invalid_end)\n\n def _update_visible_lines(self):\n start = sys.maxsize\n end = 0\n for i, line in enumerate(self.lines):\n if line.y + line.descent < self.view_y:\n start = min(start, i)\n if line.y + line.ascent > self.view_y - self.height:\n end = max(end, i) + 1\n\n # Delete newly invisible lines\n for i in range(self.visible_lines.start, min(start, len(self.lines))):\n self.lines[i].delete(self)\n for i in range(end, min(self.visible_lines.end, len(self.lines))):\n self.lines[i].delete(self)\n\n # Invalidate newly visible lines\n self.invalid_vertex_lines.invalidate(start, self.visible_lines.start)\n self.invalid_vertex_lines.invalidate(self.visible_lines.end, end)\n\n self.visible_lines.start = start\n self.visible_lines.end = end\n\n def _update_vertex_lists(self):\n # Find lines that have been affected by style changes\n style_invalid_start, style_invalid_end = self.invalid_style.validate()\n self.invalid_vertex_lines.invalidate(\n self.get_line_from_position(style_invalid_start),\n self.get_line_from_position(style_invalid_end) + 1)\n\n invalid_start, invalid_end = self.invalid_vertex_lines.validate()\n if invalid_end - invalid_start <= 0:\n return\n\n colors_iter = self.document.get_style_runs('color')\n background_iter = self.document.get_style_runs('background_color')\n if self._selection_end - self._selection_start > 0:\n colors_iter = runlist.OverriddenRunIterator(\n colors_iter,\n self._selection_start,\n self._selection_end,\n self._selection_color)\n background_iter = runlist.OverriddenRunIterator(\n background_iter,\n self._selection_start,\n self._selection_end,\n self._selection_background_color)\n\n context = _IncrementalLayoutContext(self, self._document, colors_iter, background_iter)\n\n for line in self.lines[invalid_start:invalid_end]:\n line.delete(self)\n context.line = line\n y = line.y\n\n # Early out if not visible\n if y + line.descent > self.view_y:\n continue\n elif y + line.ascent < self.view_y - self.height:\n break\n\n self._create_vertex_lists(line.x, y, line.start, line.boxes, context)\n\n @property\n def x(self):\n return self._x\n\n @x.setter\n def x(self, x):\n self._x = x\n self.top_group.left = self._get_left()\n\n @property\n def y(self):\n return self._y\n\n @y.setter\n def y(self, y):\n self._y = y\n self.top_group.top = self._get_top(self._get_lines())\n\n @property\n def position(self):\n return self._x, self._y\n\n @position.setter\n def position(self, position):\n self.x, self.y = position\n\n @property\n def anchor_x(self):\n return self._anchor_x\n\n @anchor_x.setter\n def anchor_x(self, anchor_x):\n self._anchor_x = anchor_x\n self.top_group.left = self._get_left()\n\n @property\n def anchor_y(self):\n return self._anchor_y\n\n @anchor_y.setter\n def anchor_y(self, anchor_y):\n self._anchor_y = anchor_y\n self.top_group.top = self._get_top(self._get_lines())\n\n @property\n def width(self):\n return self._width\n\n @width.setter\n def width(self, width):\n # Invalidate everything when width changes\n if width == self._width:\n return\n self._width = width\n super()._update()\n self.invalid_flow.invalidate(0, len(self.document.text))\n self.top_group.left = self._get_left()\n self.top_group.width = self._width\n\n @property\n def height(self):\n return self._height\n\n @height.setter\n def height(self, height):\n # Recalculate visible lines when height changes\n if height == self._height:\n return\n self._height = height\n super()._update()\n self.top_group.top = self._get_top(self._get_lines())\n self.top_group.height = self._height\n if self._update_enabled:\n self._update_visible_lines()\n self._update_vertex_lists()\n\n @property\n def multiline(self):\n return self._multiline\n\n @multiline.setter\n def multiline(self, multiline):\n self.invalid_flow.invalidate(0, len(self.document.text))\n self._multiline = multiline\n self._wrap_lines_invariant()\n self._update()\n\n @property\n def view_x(self):\n \"\"\"Horizontal scroll offset.\n\n The initial value is 0, and the left edge of the text will touch the left\n side of the layout bounds. A positive value causes the text to \"scroll\"\n to the right. Values are automatically clipped into the range\n ``[0, content_width - width]``\n\n :type: int\n \"\"\"\n return self.top_group.view_x\n\n @view_x.setter\n def view_x(self, view_x):\n view_x = max(0, min(self.content_width - self.width, view_x))\n self.top_group.view_x = view_x\n\n @property\n def view_y(self):\n \"\"\"Vertical scroll offset.\n\n The initial value is 0, and the top of the text will touch the top of the\n layout bounds (unless the content height is less than the layout height,\n in which case `content_valign` is used).\n\n A negative value causes the text to \"scroll\" upwards. Values outside of\n the range ``[height - content_height, 0]`` are automatically clipped in\n range.\n\n :type: int\n \"\"\"\n return self.top_group.view_y\n\n @view_y.setter\n def view_y(self, view_y):\n # Invalidate invisible/visible lines when y scrolls\n # view_y must be negative.\n view_y = min(0, max(self.height - self.content_height, view_y))\n self.top_group.view_y = view_y\n self._update_visible_lines()\n self._update_vertex_lists()\n\n # Visible selection\n\n def set_selection(self, start, end):\n \"\"\"Set the text selection range.\n\n If ``start`` equals ``end`` no selection will be visible.\n\n :Parameters:\n `start` : int\n Starting character position of selection.\n `end` : int\n End of selection, exclusive.\n\n \"\"\"\n start = max(0, start)\n end = min(end, len(self.document.text))\n if start == self._selection_start and end == self._selection_end:\n return\n\n if end > self._selection_start and start < self._selection_end:\n # Overlapping, only invalidate difference\n self.invalid_style.invalidate(min(start, self._selection_start),\n max(start, self._selection_start))\n self.invalid_style.invalidate(min(end, self._selection_end),\n max(end, self._selection_end))\n else:\n # Non-overlapping, invalidate both ranges\n self.invalid_style.invalidate(self._selection_start,\n self._selection_end)\n self.invalid_style.invalidate(start, end)\n\n self._selection_start = start\n self._selection_end = end\n\n self._update()\n\n @property\n def selection_start(self):\n \"\"\"Starting position of the active selection.\n\n :see: `set_selection`\n\n :type: int\n \"\"\"\n return self._selection_start\n\n @selection_start.setter\n def selection_start(self, start):\n self.set_selection(start, self._selection_end)\n\n @property\n def selection_end(self):\n \"\"\"End position of the active selection (exclusive).\n\n :see: `set_selection`\n\n :type: int\n \"\"\"\n return self._selection_end\n\n @selection_end.setter\n def selection_end(self, end):\n self.set_selection(self._selection_start, end)\n\n @property\n def selection_color(self):\n \"\"\"Text color of active selection.\n\n The color is an RGBA tuple with components in range [0, 255].\n\n :type: (int, int, int, int)\n \"\"\"\n return self._selection_color\n\n @selection_color.setter\n def selection_color(self, color):\n self._selection_color = color\n self.invalid_style.invalidate(self._selection_start, self._selection_end)\n\n @property\n def selection_background_color(self):\n \"\"\"Background color of active selection.\n\n The color is an RGBA tuple with components in range [0, 255].\n\n :type: (int, int, int, int)\n \"\"\"\n return self._selection_background_color\n\n @selection_background_color.setter\n def selection_background_color(self, background_color):\n self._selection_background_color = background_color\n self.invalid_style.invalidate(self._selection_start, self._selection_end)\n\n # Coordinate translation\n\n def get_position_from_point(self, x, y):\n \"\"\"Get the closest document position to a point.\n\n :Parameters:\n `x` : int\n X coordinate\n `y` : int\n Y coordinate\n\n \"\"\"\n line = self.get_line_from_point(x, y)\n return self.get_position_on_line(line, x)\n\n def get_point_from_position(self, position, line=None):\n \"\"\"Get the X, Y coordinates of a position in the document.\n\n The position that ends a line has an ambiguous point: it can be either\n the end of the line, or the beginning of the next line. You may\n optionally specify a line index to disambiguate the case.\n\n The resulting Y coordinate gives the baseline of the line.\n\n :Parameters:\n `position` : int\n Character position within document.\n `line` : int\n Line index.\n\n :rtype: (int, int)\n :return: (x, y)\n \"\"\"\n if line is None:\n line = self.lines[0]\n for next_line in self.lines:\n if next_line.start > position:\n break\n line = next_line\n else:\n line = self.lines[line]\n\n x = line.x\n\n baseline = self._document.get_style('baseline', max(0, position - 1))\n if baseline is None:\n baseline = 0\n else:\n baseline = self._parse_distance(baseline)\n\n position -= line.start\n for box in line.boxes:\n if position - box.length <= 0:\n x += box.get_point_in_box(position)\n break\n position -= box.length\n x += box.advance\n\n return x + self.top_group.view_x, line.y + self.top_group.view_y + baseline\n\n def get_line_from_point(self, x, y):\n \"\"\"Get the closest line index to a point.\n\n :Parameters:\n `x` : int\n X coordinate.\n `y` : int\n Y coordinate.\n\n :rtype: int\n \"\"\"\n x -= self.top_group.translate_x\n y -= self.top_group.translate_y\n\n line_index = 0\n for line in self.lines:\n if y > line.y + line.descent:\n break\n line_index += 1\n if line_index >= len(self.lines):\n line_index = len(self.lines) - 1\n return line_index\n\n def get_point_from_line(self, line):\n \"\"\"Get the X, Y coordinates of a line index.\n\n :Parameters:\n `line` : int\n Line index.\n\n :rtype: (int, int)\n :return: (x, y)\n \"\"\"\n line = self.lines[line]\n return line.x + self.top_group.translate_x, line.y + self.top_group.translate_y\n\n def get_line_from_position(self, position):\n \"\"\"Get the line index of a character position in the document.\n\n :Parameters:\n `position` : int\n Document position.\n\n :rtype: int\n \"\"\"\n line = -1\n for next_line in self.lines:\n if next_line.start > position:\n break\n line += 1\n return line\n\n def get_position_from_line(self, line):\n \"\"\"Get the first document character position of a given line index.\n\n :Parameters:\n `line` : int\n Line index.\n\n :rtype: int\n \"\"\"\n return self.lines[line].start\n\n def get_position_on_line(self, line, x):\n \"\"\"Get the closest document position for a given line index and X\n coordinate.\n\n :Parameters:\n `line` : int\n Line index.\n `x` : int\n X coordinate.\n\n :rtype: int\n \"\"\"\n line = self.lines[line]\n x -= self.top_group.translate_x\n\n if x < line.x:\n return line.start\n\n position = line.start\n last_glyph_x = line.x\n for box in line.boxes:\n if 0 <= x - last_glyph_x < box.advance:\n position += box.get_position_in_box(x - last_glyph_x)\n break\n last_glyph_x += box.advance\n position += box.length\n\n return position\n\n def get_line_count(self):\n \"\"\"Get the number of lines in the text layout.\n\n :rtype: int\n \"\"\"\n return len(self.lines)\n\n def ensure_line_visible(self, line):\n \"\"\"Adjust `view_y` so that the line with the given index is visible.\n\n :Parameters:\n `line` : int\n Line index.\n\n \"\"\"\n line = self.lines[line]\n y1 = line.y + line.ascent\n y2 = line.y + line.descent\n if y1 > self.view_y:\n self.view_y = y1\n elif y2 < self.view_y - self.height:\n self.view_y = y2 + self.height\n\n def ensure_x_visible(self, x):\n \"\"\"Adjust `view_x` so that the given X coordinate is visible.\n\n The X coordinate is given relative to the current `view_x`.\n\n :Parameters:\n `x` : int\n X coordinate\n\n \"\"\"\n if x <= self.view_x + 10:\n self.view_x = x - 10\n elif x >= self.view_x + self.width:\n self.view_x = x - self.width + 10\n elif x >= self.view_x + self.width - 10 and self.content_width > self.width:\n self.view_x = x - self.width + 10\n\n if _is_pyglet_doc_run:\n def on_layout_update(self):\n \"\"\"Some or all of the layout text was reflowed.\n\n Text reflow is caused by document edits or changes to the layout's\n size. Changes to the layout's position or active selection, and\n certain document edits such as text color, do not cause a reflow.\n\n Handle this event to update the position of a graphical element\n that depends on the laid out position of a glyph or line.\n\n :event:\n \"\"\"\n\n\nIncrementalTextLayout.register_event_type('on_layout_update')\n","repo_name":"jtl1207/comic-translation","sub_path":"pyglet/text/layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":83961,"program_lang":"python","lang":"en","doc_type":"code","stars":476,"dataset":"github-code","pt":"81"} +{"seq_id":"4398332406","text":"import pygame as game\nimport random\nimport sys\n\n# solid contribution\n\n# Game state class, holds all information regarding the game\nclass GameState:\n\n # Defining the initial variables of the game window, colours and score\n \n screen_width = 1400\n screen_height = 800\n half_width = screen_width / 2\n half_height = screen_height / 2\n\n bg_colour = (0, 0, 0)\n white = (255, 255, 255)\n\n p1_score = 0\n p2_score = 0\n score_message = \" \"\n restart_message = \"Press R to restart, or Q to quit\"\n welcome_message = \" \"\n\n screen = game.display.set_mode((screen_width, screen_height))\n\n # Function to initialise the game window\n def pygame_init(self):\n game.init()\n game.font.init()\n\n def window_init(self):\n self.pygame_init()\n game.display.set_caption('Pong AI')\n self.screen.fill(self.bg_colour)\n game.display.flip()\n\n # Function to draw the dividing line of the game\n def middle_line(self):\n \n # For the range of the screen height, a white pixel is drawn if the floor division of 10 of that number is even\n # Therefore the middle dashed line should be 10 pixels of white followed by 10 pixels of black etc\n for x in range(0, self.screen_width):\n if (x // 10) % 2 == 0:\n game.draw.rect(self.screen, paddle.white, [self.screen_width/2, x, 1, 1])\n else: \n game.draw.rect(self.screen, self.bg_colour, [self.screen_width/2, x, 1, 1])\n \n game.draw.rect(self.screen, paddle.white, [self.screen_width/2, 799, 1, 30])\n\n # Function to display the game score\n def game_score_display(self):\n\n p1_score_font = game.font.SysFont(\"calibri\", 70)\n p1_score_text = p1_score_font.render(str(self.p1_score), True, self.white)\n self.screen.blit(p1_score_text, (self.half_width - 100 , (self.screen_height / 2) - 300))\n\n p2_score_font = game.font.SysFont(\"calibri\", 70)\n p2_score_text = p2_score_font.render(str(self.p2_score), True, self.white)\n self.screen.blit(p2_score_text, (self.half_width + 68, (self.screen_height / 2) - 300))\n\n score_font = game.font.SysFont(\"calibri\", 32)\n score_text = score_font.render(game_state.score_message, True, self.white)\n self.screen.blit(score_text, (self.half_width - 150, self.half_height))\n\n if self.game_over() == 1:\n game_over_font = game.font.SysFont(\"calibri\", 32)\n game_over_text = game_over_font.render(self.restart_message, True, self.white)\n self.screen.blit(game_over_text, (self.half_width - 200, 10))\n\n if paddle.level_select == 0:\n self.welcome_message = \"Welcome to Pong, please select your difficulty: [1] - [2] - [3] or [4] for multiplayer\"\n else:\n self.welcome_message = \" \"\n \n welcome_font = game.font.SysFont(\"calibri\", 32)\n welcome_text = welcome_font.render(self.welcome_message, True, self.white)\n self.screen.blit(welcome_text, (200, 10))\n\n\n # Function to reset the game's values back to the init state\n def game_reset(self):\n \n ball.x_ball_dir = GameState.screen_width / 2\n ball.y_ball_dir = GameState.screen_height / 2\n\n paddle.x_direction = paddle.x_origin\n paddle.y_direction = paddle.y_origin\n\n paddle.x_direction_op = 1250\n paddle.y_direction_op = paddle.y_origin\n\n paddle.level_select = 0\n\n ball.ball_velocity = 3\n ball.ball_movement_x = ball.ball_velocity\n ball.init_y = 2.5\n\n ball.rally_count = 0\n ball.special_ability_y = 0\n\n game_state.p1_score = 0\n game_state.p2_score = 0\n\n game_state.score_message = \" \"\n \n game.time.delay(1000)\n \n def game_over(self):\n\n if ball.win_detect() == 1 or ball.win_detect() == 2:\n return 1\n\n # The game loop\n def game_loop(self):\n \n self.window_init()\n running = True\n while running:\n game.time.delay(2)\n paddle.move()\n for event in game.event.get():\n\n if event.type == game.QUIT: \n running = False\n game.quit()\n sys.exit()\n\n game_state.screen.fill(self.bg_colour)\n game_state.middle_line()\n game_state.game_score_display()\n game_state.game_over()\n ball.ball_init()\n ball.point_detect()\n ball.special_ability()\n ball.ball_move()\n ball.ball_deflect_y_direction()\n ball.win_detect()\n paddle.move()\n paddle.paddle_create()\n paddle.border_check()\n paddle.paddle_AI()\n\n# Class to hold all information regarding the pong ball\nclass Ball:\n \n green = (0, 255, 0)\n black = (0, 0, 0)\n special_colour = (0, 0, 0)\n ball_colour =(255, 255, 255)\n x_ball_dir = GameState.half_width\n y_ball_dir = GameState.half_height\n\n ball_velocity = 4\n init_y = 2.5\n ball_movement_x = ball_velocity\n ball_movement_y = init_y\n\n rally_count = 0\n special_ability_y = 0\n special_ability_x = 0\n special_velocity = 0\n special_hit = 0\n special_1_count = 0\n special_3_count = 0\n special_3_count_ai = 0\n abilities = random.randint(1,2)\n\n ability_text = \" \"\n\n def ball_init(self):\n game.draw.circle(game_state.screen, ((ball.ball_colour)), (self.x_ball_dir, self.y_ball_dir), 8)\n\n if self.special_1_count == 1:\n ball.ball_colour = (255, 0, 0)\n \n if self.special_1_count == 0:\n ball.ball_colour = (255, 255, 255)\n\n def ball_move(self):\n \n if game_state.game_over() == 1 or paddle.level_select == 0:\n self.x_ball_dir = game_state.half_width\n self.y_ball_dir = game_state.half_height\n else:\n self.x_ball_dir += self.ball_movement_x\n self.y_ball_dir += self.ball_movement_y\n\n # If the ball hits the middle of the paddle, the x velocity is increased by 25%, if the ball hits the ends of the paddle\n # the y velocity is increased by 50% and the x velocity is decreased by 10%\n\n #---------------------------- Ball movement for player paddle -----------------------------------#\n if self.x_ball_dir - 8 < paddle.x_direction + 20 and self.x_ball_dir - 8 > paddle.x_direction + 10:\n if self.y_ball_dir > paddle.y_direction and self.y_ball_dir < paddle.y_direction + 38:\n self.ball_movement_x = 0.9 * self.ball_velocity\n\n if self.ball_movement_y > 0:\n self.init_y = 1.1 * self.init_y\n self.ball_movement_y = 1.1 * self.init_y\n elif self.ball_movement_y < 0:\n self.init_y = 1.1 * self.init_y\n self.ball_movement_y = - (1.1 * self.init_y)\n\n self.rally_count += 1\n self.special_3_count = 0\n self.special_1_count = 0\n \n if self.x_ball_dir - 8 < paddle.x_direction + 20 and self.x_ball_dir - 8 > paddle.x_direction + 10:\n if self.y_ball_dir > paddle.y_direction + 39 and self.y_ball_dir < paddle.y_direction + 77:\n self.ball_movement_x = 1.2 * self.ball_velocity\n\n if self.ball_movement_y > 0:\n self.init_y = 0.9 * self.init_y\n self.ball_movement_y = 0.9 * self.init_y\n elif self.ball_movement_y < 0:\n self.init_y = 0.9 * self.init_y\n self.ball_movement_y = - (0.9 * self.init_y)\n\n self.rally_count += 1\n self.special_3_count = 0\n self.special_1_count = 0\n\n if self.x_ball_dir - 8 < paddle.x_direction + 20 and self.x_ball_dir - 8 > paddle.x_direction + 10:\n if self.y_ball_dir > paddle.y_direction + 78 and self.y_ball_dir < paddle.y_direction + 115:\n self.ball_movement_x = 0.9 * self.ball_velocity\n \n if self.ball_movement_y > 0:\n self.init_y = 1.1 * self.init_y\n self.ball_movement_y = 1.1 * self.init_y\n elif self.ball_movement_y < 0:\n self.init_y = 1.1 * self.init_y\n self.ball_movement_y = - (1.1 * self.init_y)\n \n self.rally_count += 1\n self.special_3_count = 0\n self.special_1_count = 0\n \n #---------------------------- Ball movement for computer paddle -----------------------------------#\n if self.x_ball_dir + 8 > paddle.x_direction_op and self.x_ball_dir + 8 < paddle.x_direction_op + 10:\n if self.y_ball_dir > paddle.y_direction_op + 39 and self.y_ball_dir < paddle.y_direction_op + 77:\n self.ball_movement_x = - ( 1.2 * self.ball_velocity)\n\n if self.ball_movement_y > 0:\n self.init_y = 0.9 * self.init_y\n self.ball_movement_y = 0.9 * self.init_y\n elif self.ball_movement_y < 0:\n self.init_y = 0.9 * self.init_y\n self.ball_movement_y = - (0.9 * self.init_y)\n\n self.rally_count += 1\n self.special_3_count_ai = 0\n self.special_1_count = 0\n\n if self.x_ball_dir + 8 > paddle.x_direction_op and self.x_ball_dir + 8 < paddle.x_direction_op + 10: \n if self.y_ball_dir > paddle.y_direction_op + 78 and self.y_ball_dir < paddle.y_direction_op + 115:\n self.ball_movement_x = - (0.9 * self.ball_velocity)\n \n if self.ball_movement_y > 0:\n self.init_y = 1.1 * self.init_y\n self.ball_movement_y = 1.1 * self.init_y\n elif self.ball_movement_y < 0:\n self.init_y = 1.1 * self.init_y\n self.ball_movement_y = - (1.1 * self.init_y)\n \n self.rally_count += 1\n self.special_3_count_ai = 0\n self.special_1_count = 0\n \n if self.x_ball_dir + 8 > paddle.x_direction_op and self.x_ball_dir + 8 < paddle.x_direction_op + 10: \n if self.y_ball_dir > paddle.y_direction_op and self.y_ball_dir < paddle.y_direction_op + 38:\n self.ball_movement_x = - (0.9 * self.ball_velocity)\n \n if self.ball_movement_y > 0:\n self.init_y = 1.1 * self.init_y\n self.ball_movement_y = 1.1 * self.init_y\n elif self.ball_movement_y < 0:\n self.init_y = 1.1 * self.init_y\n self.ball_movement_y = - (1.1 * self.init_y)\n\n self.rally_count += 1\n self.special_3_count_ai = 0\n self.special_1_count = 0\n \n # Function to handle the ball deflection on the y axis\n def ball_deflect_y_direction(self):\n\n if self.y_ball_dir > game_state.screen_height:\n self.ball_movement_y = -1 * self.init_y\n if self.y_ball_dir < 0:\n self.ball_movement_y = self.init_y\n\n # Function to handle the game state when a point is scored by either opponent \n def point_detect(self):\n\n # If a point is scored by either side, the game state will re-position the ball to the \n # centre of the screen and re - initialise the ability iterator and ball movement velocities\n if self.x_ball_dir > game_state.screen_width + 10:\n self.x_ball_dir = game_state.half_width\n self.y_ball_dir = game_state.half_height\n game_state.p1_score += 1\n self.rally_count = 0\n self.special_hit = 0\n self.ball_movement_x = 4\n self.special_3_count_ai = 0\n self.special_1_count = 0\n self.special_ability_y = 0\n self.init_y = random.randint(1,3)\n self.ball_movement_y = self.init_y\n self.ball_velocity = 4\n game.time.delay(1000)\n \n if self.x_ball_dir < -10:\n self.x_ball_dir = game_state.half_width\n self.y_ball_dir = game_state.half_height\n game_state.p2_score += 1\n ball.rally_count = 0\n self.special_hit = 0\n self.ball_movement_x = - 4\n self.special_3_count = 0\n self.special_1_count = 0\n self.special_ability_y = 0\n self.init_y = random.randint(1,3)\n self.ball_movement_y = self.init_y\n self.ball_velocity = 4\n game.time.delay(1000)\n\n \n # Function to detect a win, if any side scores 7 points they win\n def win_detect(self):\n\n if game_state.p1_score >= 7:\n game_state.score_message = \"Game Over Player 1 wins\"\n return 1\n \n if game_state.p2_score >= 7:\n game_state.score_message = \"Game Over Player 2 wins\"\n return 2 \n \n # Function to handle the special abilities of the game\n def special_ability(self):\n\n self.special_ability_y += self.special_velocity\n\n if self.abilities == 1:\n self.special_colour = (250, 0, 0)\n self.ability_text =\"2x Speed\"\n \n # if self.abilities == 2:\n # self.special_colour = (0, 255, 0)\n # self.ability_text = \"Not\"\n \n if self.abilities == 2:\n self.special_colour = (0, 255, 255)\n self.ability_text = \"Freeze\"\n\n if self.rally_count > 7:\n game.draw.circle(game_state.screen, (self.special_colour), (game_state.half_width, self.special_ability_y), 30)\n\n ability_font = game.font.SysFont(\"calibri\", 12)\n ability_text = ability_font.render(self.ability_text, True, self.black)\n game_state.screen.blit(ability_text, (game_state.half_width - 15, self.special_ability_y - 5))\n\n self.special_hit = 1\n\n if self.special_ability_y > GameState.screen_height:\n self.special_velocity = -1\n \n if self.special_ability_y < 1:\n self.special_velocity = 1\n\n # Conditions to make the special ability circle dissapear \n if self.x_ball_dir < game_state.half_width + 30 and self.x_ball_dir > game_state.half_width - 30 and self.special_hit == 1:\n if self.y_ball_dir < self.special_ability_y + 30 and self.y_ball_dir > self.special_ability_y - 30:\n self.special_hit += 1\n #self.rally_count = 0\n self.special_colour = (0, 0, 0)\n self.special_ability_y = -35\n self.special_velocity = 0\n\n if self.abilities == 1 and self.ball_movement_x < 0:\n self.ball_movement_x = - 8\n self.special_1_count += 1\n #self.special_hit = 0\n \n if self.abilities == 1 and self.ball_movement_x > 0:\n self.ball_movement_x = 8\n self.special_1_count += 1\n #self.special_hit = 0\n \n if self.abilities == 2 and self.ball_movement_x < 0:\n self.special_3_count += 1\n paddle.paddle_colour = (0, 255, 255)\n #self.special_hit = 0\n \n if self.abilities == 2 and self.ball_movement_x > 0:\n self.special_3_count_ai += 1\n paddle.paddle_colour_ai = (0, 255, 255)\n #self.special_hit = 0\n \n self.abilities = random.randint(1,2)\n\n# Class to handle the paddle state of the game \nclass Paddle:\n\n white = (255, 255, 255)\n paddle_colour = (255, 255, 255)\n paddle_colour_ai = (255, 255, 255)\n velocity = 3\n\n x_origin = GameState.screen_width/2 - 590\n y_origin = GameState.screen_height/2 - 100\n\n x_direction_op = 1290\n y_direction_op = y_origin\n\n x_direction = x_origin\n y_direction = y_origin\n\n ai_speed = 0\n\n level_select = 0\n\n # Function to draw the paddles\n def paddle_create(self):\n\n if ball.special_3_count == 0:\n self.paddle_colour = (255, 255, 255)\n\n if ball.special_3_count_ai == 0:\n self.paddle_colour_ai = (255, 255, 255)\n\n game.draw.rect(GameState.screen, (self.paddle_colour), [self.x_direction, self.y_direction, 20, 115] )\n game.draw.rect(GameState.screen, (self.paddle_colour_ai), [self.x_direction_op, self.y_direction_op, 20, 115] )\n\n game.display.flip()\n \n # Simple AI that tracks the position of the ball's y position\n def paddle_AI(self):\n\n if ball.x_ball_dir > game_state.half_width:\n\n if ball.y_ball_dir > paddle.y_direction_op + 50 and ball.special_3_count_ai == 0:\n self.y_direction_op += self.ai_speed\n\n if ball.y_ball_dir < paddle.y_direction_op and ball.special_3_count_ai == 0:\n self.y_direction_op -= self.ai_speed\n\n\n # Function to handle keybinds of the game\n def move(self):\n \n keys = game.key.get_pressed()\n #----- Movement --------#\n if keys[game.K_w]:\n\n if game_state.game_over() != 1 and paddle.level_select != 0 and ball.special_3_count == 0:\n self.y_direction -= self.velocity\n\n if keys[game.K_s]:\n if game_state.game_over() != 1 and paddle.level_select != 0 and ball.special_3_count == 0:\n self.y_direction += self.velocity \n\n if keys[game.K_UP]:\n self.y_direction_op -= self.velocity\n\n if keys[game.K_DOWN]:\n self.y_direction_op += self.velocity\n \n #----- Level selector --------#\n\n # The AI level is selected by pressing 1 - 3, pressing 4 will activate multiplayer mode\n if keys[game.K_1]:\n if self.level_select == 0:\n self.ai_speed = 3.2\n self.level_select += 1\n \n if keys[game.K_2]:\n if self.level_select == 0:\n self.ai_speed = 4.5\n self.level_select += 1\n\n if keys[game.K_3]:\n if self.level_select == 0:\n self.ai_speed = 5\n self.level_select += 1\n \n if keys[game.K_4]:\n if self.level_select == 0:\n self.ai_speed = 0\n self.level_select += 1\n \n #----- Restart and quit --------#\n if keys[game.K_r]:\n game_state.game_reset()\n\n if keys[game.K_q]:\n game.quit()\n \n # Function to ensure the paddles do not move off the screen\n def border_check(self):\n\n if self.y_direction < 0:\n self.y_direction = 1\n \n if self.y_direction > game_state.screen_height - 115:\n self.y_direction = game_state.screen_height - 120\n\n if self.y_direction_op < 0:\n self.y_direction_op = 1\n \n if self.y_direction_op > game_state.screen_height - 115:\n self.y_direction_op = game_state.screen_height - 114\n\ngame_state = GameState()\npaddle = Paddle()\nball = Ball()\n\ngame_state.game_loop()","repo_name":"n-dai/pong","sub_path":"pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":19199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10181033396","text":"import torch\nimport gensim\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nEMBEDDING_SIZE = 500\nNUM_FILTERS = 10\nOUTPUT_FOLDER = './all_outputs/'\nclass CnnTextClassifier(nn.Module):\n def __init__(self, vocab_size, num_classes, window_sizes=(1,2,3,5)):\n super(CnnTextClassifier, self).__init__()\n w2vmodel = gensim.models.KeyedVectors.load(OUTPUT_FOLDER + 'models/' + 'word2vec_500_PAD.model')\n weights = w2vmodel.wv\n # With pretrained embeddings\n self.embedding = nn.Embedding.from_pretrained(torch.FloatTensor(weights.vectors), padding_idx=w2vmodel.wv.vocab['pad'].index)\n # Without pretrained embeddings\n # self.embedding = nn.Embedding(vocab_size, EMBEDDING_SIZE)\n\n self.convs = nn.ModuleList([\n nn.Conv2d(1, NUM_FILTERS, [window_size, EMBEDDING_SIZE], padding=(window_size - 1, 0))\n for window_size in window_sizes\n ])\n\n self.fc = nn.Linear(NUM_FILTERS * len(window_sizes), num_classes)\n\n def forward(self, x):\n x = self.embedding(x) # [B, T, E]\n\n # Apply a convolution + max_pool layer for each window size\n x = torch.unsqueeze(x, 1)\n xs = []\n for conv in self.convs:\n x2 = torch.tanh(conv(x))\n x2 = torch.squeeze(x2, -1)\n x2 = F.max_pool1d(x2, x2.size(2))\n xs.append(x2)\n x = torch.cat(xs, 2)\n\n # FC\n x = x.view(x.size(0), -1)\n logits = self.fc(x)\n\n probs = F.softmax(logits, dim = 1)\n\n return probs","repo_name":"Skanda-Bharadwaj/CSE582","sub_path":"HW2/cnn_model.py","file_name":"cnn_model.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5179381981","text":"# Escreva um programa em Python que leia um número inteiro qualquer e peça para o usuário escolher qual será a base de conversão: 1 para binário, 2 para octal e 3 para hexadecimal.\n\nnumero = int(input(\"Digite um número: \"))\nescolha = int(input(\"1) Binário 2)Octal 3)Hexadecimal: \"))\n\nif escolha == 1:\n print(f\"O numero {numero} em Binário fica: {bin(numero)[2:]}\")# bin transformando numero em binário\nelif escolha == 2:\n print(f\"O numero {numero} em Octal fica: {oct(numero)[2:]}\") #oct transforma o numero em octal\nelif escolha == 3:\n print(f\"O numero {numero} em Hexadecimal fica: {hex(numero)[2:]}\") #hex transforma o numero em Hexadecimal\nelse: \n print(\"Opção inválida! Tente novamente.\")","repo_name":"Nandabdev/Python3-Mundo2","sub_path":"if-elif-else.py/conversorbasesnumericas.py","file_name":"conversorbasesnumericas.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"32550559691","text":"from django.conf.urls import url\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nfrom stories.views import (\n StoryViewSet,\n StoryAttributeViewSet,\n QueryViewSet,\n QueryNERViewSet,\n StoryRetrainViewSet\n)\n\nurlpatterns = [\n url(r'^stories/$', StoryViewSet.list_mapping(), name='story_list'),\n url(r'^stories/(?P[A-Za-z0-9-]+)/$', StoryViewSet.detail_mapping(), name='story_detail'),\n\n url(r'^stories/(?P[A-Za-z0-9-]+)/retrain/$', StoryRetrainViewSet.list_mapping(), name='story_retrain'),\n\n url(r'^stories/(?P[A-Za-z0-9-]+)/attributes/$', StoryAttributeViewSet.list_mapping(),\n name='story_attribute_list'),\n url(r'^stories/(?P[A-Za-z0-9-]+)/attributes/(?P[A-Za-z0-9]+)/$',\n StoryAttributeViewSet.detail_mapping(),\n name='story_attribute_detail'),\n\n url(r'^stories/(?P[A-Za-z0-9-]+)/queries/$', QueryViewSet.list_mapping(),\n name='query_list'),\n url(r'^stories/(?P[A-Za-z0-9-]+)/queries/(?P[0-9]+)/$', QueryViewSet.detail_mapping(),\n name='query_detail'),\n\n url(r'^stories/(?P[A-Za-z0-9-]+)/queries/(?P[0-9]+)/ner/$', QueryNERViewSet.list_mapping(),\n name='query_ner_list'),\n]\n","repo_name":"kendricktan/laice","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":172,"dataset":"github-code","pt":"81"} +{"seq_id":"32191982690","text":"\"\"\"\nWhat's the big idea?\n\nAn endpoint that traverses all restful endpoints producing a swagger 2.0 schema\nIf a swagger yaml description is found in the docstrings for an endpoint\nwe add the endpoint to swagger specification output\n\n- Added basic_path parameter\n\"\"\"\nimport inspect\nimport yaml\nimport re\nimport os\n\nfrom collections import defaultdict\n\n\ndef _sanitize(comment):\n return comment.replace('\\n', '
') if comment else comment\n\n\ndef _find_from_file(full_doc, from_file_keyword, base_path):\n \"\"\"\n Finds a line in like\n\n \n\n and return path\n \"\"\"\n path = None\n\n for line in full_doc.splitlines():\n if from_file_keyword in line:\n parts = line.strip().split(':')\n if len(parts) == 2 and parts[0].strip() == from_file_keyword:\n path = parts[1].strip()\n break\n path = path if not path else os.path.join(base_path, path)\n return path\n\n\ndef _doc_from_file(path):\n doc = None\n with open(path) as f:\n doc = f.read()\n return doc\n\n\ndef _parse_docstring(obj, process_doc, from_file_keyword, base_path):\n first_line, other_lines, swag = None, None, None\n full_doc = inspect.getdoc(obj)\n if full_doc:\n if from_file_keyword is not None:\n from_file = _find_from_file(full_doc, from_file_keyword, base_path)\n if from_file:\n full_doc_from_file = _doc_from_file(from_file)\n if full_doc_from_file:\n full_doc = full_doc_from_file\n line_feed = full_doc.find('\\n')\n if line_feed != -1:\n first_line = process_doc(full_doc[:line_feed])\n yaml_sep = full_doc[line_feed+1:].find('---')\n if yaml_sep != -1:\n other_lines = process_doc(full_doc[line_feed+1:line_feed+yaml_sep])\n swag = yaml.full_load(full_doc[line_feed+yaml_sep:])\n else:\n other_lines = process_doc(full_doc[line_feed+1:])\n else:\n first_line = full_doc\n return first_line, other_lines, swag\n\n\ndef _extract_definitions(alist, level=None):\n \"\"\"\n Since we couldn't be bothered to register models elsewhere\n our definitions need to be extracted from the parameters.\n We require an 'id' field for the schema to be correctly\n added to the definitions list.\n \"\"\"\n\n def _extract_array_defs(source):\n # extract any definitions that are within arrays\n # this occurs recursively\n ret = []\n items = source.get('items')\n if items is not None and 'schema' in items:\n ret += _extract_definitions([items], level+1)\n return ret\n\n # for tracking level of recursion\n if level is None:\n level = 0\n\n defs = list()\n if alist is not None:\n for item in alist:\n schema = item.get(\"schema\")\n if schema is not None:\n schema_id = schema.get(\"id\")\n if schema_id is not None:\n defs.append(schema)\n ref = {\"$ref\": \"#/definitions/{}\".format(schema_id)}\n\n # only add the reference as a schema if we are in a response or\n # a parameter i.e. at the top level\n # directly ref if a definition is used within another definition\n if level == 0:\n item['schema'] = ref\n else:\n item.update(ref)\n del item['schema']\n\n # extract any definitions that are within properties\n # this occurs recursively\n properties = schema.get('properties')\n if properties is not None:\n defs += _extract_definitions(properties.values(), level+1)\n\n defs += _extract_array_defs(schema)\n\n defs += _extract_array_defs(item)\n\n return defs\n\n\ndef swagger(app, prefix=None, process_doc=_sanitize,\n from_file_keyword=None, template=None, base_path=\"\"):\n \"\"\"\n Call this from an @app.route method like this\n @app.route('/spec.json')\n def spec():\n return jsonify(swagger(app))\n\n We go through all endpoints of the app searching for swagger endpoints\n We provide the minimum required data according to swagger specs\n Callers can and should add and override at will\n\n Arguments:\n app -- the flask app to inspect\n\n Keyword arguments:\n process_doc -- text sanitization method, the default simply replaces \\n with
\n from_file_keyword -- how to specify a file to load doc from\n template -- The spec to start with and update as flask-swagger finds paths.\n \"\"\"\n output = {\n \"swagger\": \"2.0\",\n \"info\": {\n \"version\": \"0.0.0\",\n \"title\": \"Cool product name\",\n }\n }\n paths = defaultdict(dict)\n definitions = defaultdict(dict)\n if template is not None:\n output.update(template)\n # check for template provided paths and definitions\n for k, v in output.get('paths', {}).items():\n paths[k] = v\n for k, v in output.get('definitions', {}).items():\n definitions[k] = v\n output[\"paths\"] = paths\n output[\"definitions\"] = definitions\n\n ignore_verbs = {\"HEAD\", \"OPTIONS\"}\n # technically only responses is non-optional\n optional_fields = ['tags', 'consumes', 'produces', 'schemes', 'security',\n 'deprecated', 'operationId', 'externalDocs']\n\n for rule in app.url_map.iter_rules():\n if prefix and rule.rule[:len(prefix)] != prefix:\n continue\n endpoint = app.view_functions[rule.endpoint]\n methods = dict()\n for verb in rule.methods.difference(ignore_verbs):\n verb = verb.lower()\n if hasattr(endpoint, 'methods') \\\n and verb in map(lambda m: m.lower(), endpoint.methods) \\\n and hasattr(endpoint.view_class, verb):\n methods[verb] = getattr(endpoint.view_class, verb)\n else:\n methods[verb] = endpoint\n operations = dict()\n for verb, method in methods.items():\n summary, description, swag = _parse_docstring(method, process_doc,\n from_file_keyword, base_path)\n if swag is not None: # we only add endpoints with swagger data in the docstrings\n defs = swag.get('definitions', [])\n defs = _extract_definitions(defs)\n params = swag.get('parameters', [])\n defs += _extract_definitions(params)\n responses = swag.get('responses', {})\n responses = {\n str(key): value\n for key, value in responses.items()\n }\n if responses is not None:\n defs = defs + _extract_definitions(responses.values())\n for definition in defs:\n def_id = definition.pop('id')\n if def_id is not None:\n definitions[def_id].update(definition)\n operation = dict(\n summary=summary,\n description=description,\n responses=responses\n )\n # parameters - swagger ui dislikes empty parameter lists\n if len(params) > 0:\n operation['parameters'] = params\n # other optionals\n for key in optional_fields:\n if key in swag:\n operation[key] = swag.get(key)\n operations[verb] = operation\n\n if len(operations):\n rule = str(rule)\n for arg in re.findall('(<([^<>]*:)?([^<>]*)>)', rule):\n rule = rule.replace(arg[0], '{%s}' % arg[2])\n paths[rule].update(operations)\n return output\n","repo_name":"getsling/flask-swagger","sub_path":"flask_swagger.py","file_name":"flask_swagger.py","file_ext":"py","file_size_in_byte":7961,"program_lang":"python","lang":"en","doc_type":"code","stars":455,"dataset":"github-code","pt":"81"} +{"seq_id":"22005046758","text":"import car_world\nimport os\n\n\nif __name__ == '__main__':\n car = car_world.CarWorld()\n input_f = os.path.join(\"test_vid\", \"project_video.mp4\")\n output_f = os.path.join(car.results_dir, \"project_video_output.mp4\")\n\n car.process_video(input_f, output_f)","repo_name":"supertetelman/sdc-vehicle-detection","sub_path":"runme.py","file_name":"runme.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36405488394","text":"import ipfshttpclient\nimport pickle\n\nfrom status_codes import StatusCode\n\n# Path to db file (binary dict with : key-value pairs)\ndb_path = \"db.data\"\n\n\ndef set_user_ipfs_link(user, link):\n db_dict = {}\n # Read dict from file\n try:\n with open(db_path, \"rb\") as db:\n db_dict = pickle.load(db)\n except:\n # Nothing has been saved so far\n pass\n # Update dict\n db_dict[user] = link\n with open(db_path, \"wb\") as db:\n pickle.dump(db_dict, db)\n\n\ndef get_ipfs_link_by_uid(user):\n db_dict = {}\n # Read dict from file\n try:\n with open(db_path, \"rb\") as db:\n db_dict = pickle.load(db)\n except:\n return None, StatusCode.ERROR_TABLE_NOT_EXISTS\n link = db_dict.get(user)\n return link, StatusCode.ERROR_NO_SUCH_USER_IN_TABLE if link is None else StatusCode.OK\n\n\ndef get_data_by_ipfs_link(link):\n try:\n client = ipfshttpclient.connect()\n except:\n print(\"Error while getting data: ipfs daemon is not running!\")\n return None, StatusCode.ERROR_DAEMON_IS_NOT_RUNNING\n try:\n data = client.cat(link).decode()\n except:\n print(\"Error while getting data: wrong link!\")\n return None, StatusCode.ERROR_WRONG_LINK\n return data, StatusCode.OK\n","repo_name":"PiroDev/ipfs-profiles-name-service","sub_path":"ipfs/ipfs_client.py","file_name":"ipfs_client.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70610546825","text":"from wsgiref.simple_server import make_server # the wsgiref webserver (default with Python)\nfrom pyramid.config import Configurator\n\nfrom pyramid.response import Response\nfrom pyramid.response import FileResponse\nfrom pyramid.renderers import render_to_response\n\n''' Routes '''\ndef basic_route(req):\n data = {}\n return render_to_response('landingpage.html', data, request=req)\n\ndef analytics_route(req):\n return FileResponse('analytics.html')\n\n''' Main Application '''\ndef main() :\n with Configurator() as config:\n\n # basic_route\n config.add_route('landing', '/')\n config.add_view(basic_route, route_name='landing')\n\n # view_route\n config.add_route('analytics', '/analytics')\n config.add_view(analytics_route, route_name='analytics')\n\n # for template_route / template_route2\n config.include('pyramid_jinja2')\n config.add_jinja2_renderer('.html')\n\n\n # add static folder to search path\n config.add_static_view(name='/', path='./public', cache_max_age=3600)\n\n # create the webserver config\n app = config.make_wsgi_app()\n\n # run the server\n server = make_server('127.0.0.1', 8080, app)\n print(\"The server is now running on: http://127.0.0.1:8080\")\n \n try:\n server.serve_forever()\n except KeyboardInterrupt:\n print(\"\\nExiting...\")\n exit(0)\n\nif __name__ == '__main__':\n main()\n","repo_name":"faraznshaikh/pill-buddy-website","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25853349993","text":"\nfrom kivymd.uix.responsivelayout import MDResponsiveLayout\n\nfrom View.AdminScreen.DockhandScreen.components import (\n DockhandMobileScreen,\n DockhandTabletScreen,\n DockhandDesktopScreen,\n)\nfrom View.base_screen import BaseScreenView\n\n\nclass DockhandScreenView(MDResponsiveLayout, BaseScreenView):\n def __init__(self, **kw):\n super().__init__(**kw)\n self.mobile_view = DockhandMobileScreen(controller=self.controller)\n self.tablet_view = DockhandTabletScreen(controller=self.controller)\n self.desktop_view = DockhandDesktopScreen()\n\n def model_is_changed(self) -> None:\n \"\"\"\n Called whenever any change has occurred in the data model.\n The view in this method tracks these changes and updates the UI\n according to these changes.\n \"\"\"\n print('dockhand model changed')\n self.on_water_list()\n self.rider_list()\n\n def rider_list(self):\n rider_list = []\n for rider in self.model.riders_on_deck():\n profile_image = self.model.get_image(rider.profile_image) if rider.profile_image else 'assets/images/default_profile.png'\n bib_color = rider.bib_color if rider.bib_color else 'white'\n rider_list.append({\n 'rider': rider,\n 'rider_name': rider.full_name,\n 'profile_image': profile_image,\n 'controller': self.controller,\n 'bib_color': bib_color\n })\n\n # Update for mobile view\n self.mobile_view.ids.participant_list.data = rider_list\n\n # Update for tablet view\n self.tablet_view.ids.participant_list.data = rider_list\n\n def on_water_list(self):\n carriers = [2, 4, 6, 8]\n\n if self.model.live_contest:\n for carrier in carriers:\n carrier_data = self.controller.get_carrier(carrier)\n if carrier_data.rider: # Check if the carrier has a rider\n self.mobile_view.ids[f'carrier{carrier}'].carrier = carrier_data\n self.tablet_view.ids[f'carrier{carrier}'].carrier = carrier_data\n else: # If no rider, set to None\n self.mobile_view.ids[f'carrier{carrier}'].carrier = None\n self.tablet_view.ids[f'carrier{carrier}'].carrier = None\n else:\n for carrier in carriers:\n self.mobile_view.ids[f'carrier{carrier}'].carrier = None\n self.tablet_view.ids[f'carrier{carrier}'].carrier = None\n","repo_name":"MNwake/TheCWA","sub_path":"View/AdminScreen/DockhandScreen/dockhand_screen.py","file_name":"dockhand_screen.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15905797240","text":"import os\nimport glob\nimport click\nimport pickle\nimport shutil\nimport pandas\nfrom nastools.nastools import retrieve_nas_files\nfrom externalretrieve import upload_to_ftp\n\n\n@click.command()\n@click.option('--redmine_instance', help='Path to pickled Redmine API instance')\n@click.option('--issue', help='Path to pickled Redmine issue')\n@click.option('--work_dir', help='Path to Redmine issue work directory')\n@click.option('--description', help='Path to pickled Redmine description')\ndef minimap2_redmine(redmine_instance, issue, work_dir, description):\n try:\n # Unpickle Redmine objects\n redmine_instance = pickle.load(open(redmine_instance, 'rb'))\n issue = pickle.load(open(issue, 'rb'))\n description = pickle.load(open(description, 'rb'))\n\n # Variable to hold supplied arguments\n argument_dict = {\n 'k-mer': 15\n }\n\n # Parse description to figure out what SEQIDs we need to run on.\n seqids = list()\n analysistype = \"\" # Just for the output at the end\n for item in description:\n item = item.rstrip()\n if 'k-mer' in item:\n argument_dict['k-mer'] = int(item.split('=')[1].lower())\n continue\n # Otherwise the item should be a SEQID\n seqids.append(item)\n\n # No SEQIDs\n if len(seqids) == 0:\n redmine_instance.issue.update(resource_id=issue.id, status_id=4,\n notes='You did not include any SEQIDs.')\n return\n\n # Create folder to drop FASTQ files\n seq_folder = os.path.join(work_dir, 'sequences')\n os.mkdir(seq_folder)\n\n # Create output folder which will be zipped later\n output_folder = os.path.join(work_dir, 'output')\n os.makedirs(output_folder)\n\n # Extract FASTQ files.\n retrieve_nas_files(seqids=seqids, outdir=seq_folder, filetype='fastq', copyflag=False)\n missing_fastqs = check_fastqs_present(seqids, seq_folder)\n if missing_fastqs:\n redmine_instance.issue.update(resource_id=issue.id,\n notes='WARNING: Could not find the following requested SEQIDs on '\n 'the OLC NAS: {}'.format(missing_fastqs))\n\n # These unfortunate hard coded paths appear to be necessary\n activate = 'source /home/ubuntu/miniconda3/bin/activate /mnt/nas2/virtual_environments/minimap2_sarah'\n targets_file = '/mnt/nas2/virtual_environments/minimap2_sarah/serotype_combined_targets.fasta'\n\n #now for each fastq file in the list, going to run the minimap2 command using the chosen targets file\n for fastq in glob.glob(os.path.join(seq_folder, '*.fastq.gz')):\n seqid = os.path.split(fastq)[1].split('.')[0]\n # Prepare command\n cmd = 'minimap2 -ax map-ont {target} {fastq} -o {output_file}'\\\n .format(output_file='{seqid}-aln.sam'.format(seqid=seqid),\n fastq=fastq,\n seqid=seqid,\n target=targets_file)\n\n # Create another shell script to execute within the roary conda environment\n template = \"#!/bin/bash\\n{} && cd {} && {}\".format(activate, seq_folder, cmd) #this line takes the activate command (above) and the command for minimap2 and writes it into bash file\n minimap2_script = os.path.join(work_dir, 'run_minimap2.sh')\n with open(minimap2_script, 'w+') as file:\n file.write(template)\n make_executable(minimap2_script)\n\n # Run shell script\n os.system(minimap2_script)\n\n #move the alignment files to the output folder\n sam_file = '{seqid}-aln.sam'.format(seqid=seqid)\n output_sam = os.path.join(output_folder, sam_file)\n shutil.copyfile(os.path.join(seq_folder,sam_file), output_sam)\n\n # Zip output\n output_filename = 'minimap2_output_{}'.format(issue.id)\n zip_filepath = zip_folder(results_path=output_folder,\n output_dir=work_dir,\n output_filename=output_filename)\n zip_filepath += '.zip'\n\n upload_successful = upload_to_ftp(local_file=zip_filepath)\n # Prepare upload\n if upload_successful:\n redmine_instance.issue.update(resource_id=issue.id, status_id=4,\n notes='Minimap2 process complete!\\n\\n'\n 'Results are available at the following FTP address:\\n'\n 'ftp://ftp.agr.gc.ca/outgoing/cfia-ac/{}'\n .format(os.path.split(zip_filepath)[1]))\n else:\n redmine_instance.issue.update(resource_id=issue.id, status_id=4,\n notes='Upload of result files was unsuccessful due to FTP connectivity '\n 'issues. '\n 'Please try again later.')\n # Remove the zip file\n# os.remove(zip_filepath)\n\n # Clean up files\n shutil.rmtree(output_folder)\n# os.remove(zip_filepath)\n except Exception as e:\n redmine_instance.issue.update(resource_id=issue.id,\n notes='Something went wrong! Send this error traceback to your friendly '\n 'neighborhood bioinformatician: {}'.format(e))\n\n\ndef make_executable(path):\n \"\"\"\n Takes a shell script and makes it executable (chmod +x)\n :param path: path to shell script\n \"\"\"\n mode = os.stat(path).st_mode\n mode |= (mode & 0o444) >> 2\n os.chmod(path, mode)\n\n\ndef verify_fasta_files_present(seqid_list, fasta_dir):\n missing_fastas = list()\n for seqid in seqid_list:\n if len(glob.glob(os.path.join(fasta_dir, seqid + '*.fasta'))) == 0:\n missing_fastas.append(seqid)\n return missing_fastas\n\ndef check_fastqs_present(fastq_list, fastq_dir):\n missing_fastqs = list()\n for seqid in fastq_list:\n if len(glob.glob(os.path.join(fastq_dir, seqid + '*.fastq.gz'))) < 2:\n # JAS adding an extra if statement here to allow for Nanopore (SE) reads\n if len(glob.glob(os.path.join(fastq_dir, seqid + \".fastq.gz\"))) == 0:\n missing_fastqs.append(seqid)\n return missing_fastqs\n\n\ndef zip_folder(results_path, output_dir, output_filename):\n output_path = os.path.join(output_dir, output_filename)\n shutil.make_archive(output_path, 'zip', results_path)\n return output_path\n\n\nif __name__ == '__main__':\n minimap2_redmine()\n","repo_name":"OLC-LOC-Bioinformatics/OLCRedmineAutomator","sub_path":"automators/minimap2.py","file_name":"minimap2.py","file_ext":"py","file_size_in_byte":6767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17113080085","text":"from visualization_msgs.msg import Marker, MarkerArray\nfrom geometry_msgs.msg import Quaternion, Pose, Point, Vector3\n\nfrom std_msgs.msg import Header, ColorRGBA\nfrom viz import FRAME_ID\nfrom rk.utils import find_route\nimport rospy\nimport numpy as np\n\ndef ulink_to_marker_array(ulink) -> MarkerArray:\n marker_array = MarkerArray()\n \n # Represent joint positions by Marker.SPHERE\n marker_dict = dict()\n for joint in ulink.values():\n marker = Marker(\n header=Header(frame_id=FRAME_ID),\n id=joint.id,\n type=Marker.SPHERE,\n action=Marker.ADD,\n pose=Pose(Point(joint.p[0], joint.p[1], joint.p[2]), Quaternion(0, 0, 0, 1)),\n scale=Vector3(0.1, 0.1, 0.1),\n color=ColorRGBA(1, 0, 0, 1),\n lifetime=rospy.Duration()\n )\n marker_dict[joint.id] = marker\n marker_array.markers.append(marker)\n\n if joint.is_leaf:\n \n link = Marker(\n header=Header(frame_id=FRAME_ID),\n action=Marker.ADD,\n id=int(str(joint.id) + '01'),\n type=Marker.LINE_STRIP,\n color=ColorRGBA(1.0, 1.0, 1.0, 1.0)\n )\n link.scale.x = 0.02\n link.pose.orientation.w = 1.0\n\n traj = np.append([1], find_route(ulink, joint.id))\n for i in traj:\n link.points.append(marker_dict[i].pose.position)\n marker_array.markers.append(link)\n return marker_array\n","repo_name":"jihoonerd/robot_kinematics","sub_path":"src/viz/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72651181066","text":"a,b=map(int, input().split())\nd=b-a\nacc = 0\nfor i in range(2**16):\n threshold = i\n acc = i*(i+1)\n if d <= i*(i+1):\n break\n\nif d == 0:\n print(0)\nelse:\n print(2*threshold - 1 if d <= acc-threshold else 2*threshold)\n","repo_name":"malkoG/polyglot-cp","sub_path":"BOJ/Mathematics/Python/1669.py","file_name":"1669.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37029073350","text":"import yaml,os,sys\nimport dill\nimport pandas as pd \n\nfrom app_logger.logger import App_Logger\nfrom app_exception.exception import AppException\n\n\n\nutil_logger = App_Logger('util_logger')\n\ndef write_yaml_file(file_path,data=None):\n \"\"\"\n Create yaml file \n file_path: str\n data: dict\n \"\"\"\n try:\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n with open(file_path,\"w\") as yaml_file:\n if data is not None:\n yaml.dump(data,yaml_file)\n except Exception as e:\n raise AppException(e,sys) from e\n\n\ndef read_yaml_file(file_path:str)->dict:\n \"\"\"\n Reads a YAML file and returns the contents as a dictionary.\n file_path: str\n \"\"\"\n try:\n with open(file_path, 'rb') as yaml_file:\n return yaml.safe_load(yaml_file)\n except Exception as e:\n raise AppException(e,sys) from e\n\ndef save_object(file_path:str,obj):\n \"\"\"\n file_path: str\n obj: Any sort of object\n \"\"\"\n try:\n dir_path = os.path.dirname(file_path)\n os.makedirs(dir_path, exist_ok=True)\n with open(file_path, \"wb\") as file_obj:\n dill.dump(obj, file_obj)\n except Exception as e:\n raise AppException(e,sys) from e\n\n\ndef load_object(file_path:str):\n \"\"\"\n file_path: str\n \"\"\"\n try:\n with open(file_path, \"rb\") as file_obj:\n return dill.load(file_obj)\n\n except Exception as e:\n raise AppException(e,sys) from e\n\n\ndef Read_data_MONGO(Connection , Del_id = True , Query = {}):\n \"\"\" This function take the connection object and read the data from the database and return the dataframe\n \"\"\"\n util_logger.info(\"Reading the data from the database\")\n try:\n data = Connection.Find_Many(query=Query)\n except Exception as e:\n raise AppException(e,sys) from e\n dataFrame = pd.DataFrame(list(data))\n util_logger.info(\"Dataframe created successfully\")\n if Del_id:\n dataFrame.drop(['_id'], axis=1, inplace=True)\n return dataFrame","repo_name":"pk1308/Forest-Fire","sub_path":"app_util/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4757396213","text":"import pygame\r\nimport random\r\npygame.init()\r\n\r\nwindow = pygame.display.set_mode((700, 500))\r\nbg = pygame.transform.scale(pygame.image.load(\"Scorched_Stone-Map.webp\"), (800, 600))\r\nclock = pygame.time.Clock()\r\n\r\n\r\nclass GameSprite(pygame.sprite.Sprite):\r\n def __init__(self, player_image, player_x, player_y, size_x, size_y, player_speed):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = pygame.transform.scale(pygame.image.load(player_image), (size_x, size_y))\r\n self.speed = player_speed\r\n self.rect = self.image.get_rect()\r\n self.rect.x = player_x\r\n self.rect.y = player_y\r\n \r\n def reset(self):\r\n window.blit(self.image, (self.rect.x, self.rect.y))\r\n\r\nclass Player(GameSprite):\r\n def update(self):\r\n keys_pressed = pygame.key.get_pressed()\r\n if keys_pressed[pygame.K_LEFT] and self.rect.x > 5:\r\n self.rect.x -= self.speed\r\n if keys_pressed[pygame.K_RIGHT] and self.rect.x < 620:\r\n self.rect.x += self.speed\r\n def lol(self):\r\n pyla = Pyla(\"pixilart-drawing.png\", self.rect.centerx, self.rect.top, 50, 50, 15)\r\n pyls.add(pyla)\r\n\r\nclass Rusar(GameSprite):\r\n def update(self):\r\n self.rect.y += self.speed\r\n if self.rect.y >= 399:\r\n self.rect.y = 0\r\n self.rect.x = random.randint(20, 600)\r\n\r\nclass Pyla(GameSprite):\r\n def update(self):\r\n self.rect.y -= self.speed\r\n if self.rect.y < 0:\r\n self.kill()\r\n\r\nplayer = Player(\"Piper.webp\", 300, 360, 70, 140, 10) \r\nrusars = pygame.sprite.Group()\r\npyls = pygame.sprite.Group()\r\n\r\nscore = 0\r\n\r\nfor i in range(4):\r\n rusar = Rusar(\"Mexo.webp\", random.randint(20, 600), random.randint(-50, 0), 100, 120, random.randint(1, 2))\r\n rusars.add(rusar)\r\n\r\nfont = pygame.font.Font(None, 80)\r\ntext = font.render(\"Ты лутше всех!!!\", True, (0, 0, 0))\r\n\r\ngame = True\r\nwhile game:\r\n window.blit(bg, (-50, -50))\r\n player.reset()\r\n player.update()\r\n rusars.draw(window)\r\n rusars.update()\r\n pyls.draw(window)\r\n pyls.update()\r\n\r\n if score >= 20:\r\n window.blit(text, (100, 100))\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_SPACE:\r\n player.lol()\r\n\r\n collides = pygame.sprite.groupcollide(rusars, pyls, True, True)\r\n for i in collides:\r\n score += 1\r\n rusar = Rusar(\"Mexo.webp\", random.randint(20, 600), random.randint(-50, 0), 100, 120, random.randint(3, 5))\r\n rusars.add(rusar)\r\n\r\n pygame.display.update()\r\n clock.tick(50)","repo_name":"JmuhBobr228/test","sub_path":"ШУТЕРР.py","file_name":"ШУТЕРР.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25987815112","text":"# https://www.codewars.com/kata/583726b8aa6717a718000002/train/python\n\"\"\"\n思路: 用字典存相对坐标, 代替预开过大数组。\n 大小位置的时候, dict更合适。\n\"\"\"\nimport collections\n\n\n\ndef spiralize(word):\n # goDown, goRight, goUp, goLeft\n dir4 = [[1, 0], [0, 1], [-1, 0], [0, -1]]\n res = collections.defaultdict(str)\n x, y = 0, 0\n for i, c in enumerate(word):\n dx, dy = dir4[i % 4]\n for idx in range(ord(c) - ord(\"a\") + 1):\n x += dx\n y += dy\n res[x, y] = c\n\n L, R = min(k[1] for k in res.keys()), max(k[1] for k in res.keys())\n T, D = min(k[0] for k in res.keys()), max(k[0] for k in res.keys())\n grid = [[\" \"] * (R - L + 1) for _ in range(T, D + 1)]\n for k, v in res.items():\n r = k[0] - T\n c = k[1] - L\n grid[r][c] = v\n\n return \"\\n\".join(\"\".join(_) for _ in grid)\n\n\n# print(spiralize(\"wordspiral\"))\n# print(spiralize(\"lbhrptkeiqtkzdamhjpfjaddtffgrpsjigcpfgxopltyxufcvrfspkcxcmazoyertwawhvokwdcsqjexnzejvxkciiyfwuyzbnie\"))\nprint(spiralize(\"adbbeb\") == \"\"\"\n bbb\n e b\n adedd\n e\n e\n ebb\"\"\")\n\n# print(spiralize(\"abcdefghijklmnopqrstuvwxyz\") ==\n# \"\"\"xxxxxxxxxxxxxxxxxxxxxxxxw\n# y w\n# y tttttttttttttttttttts w\n# y u s w\n# y u ppppppppppppppppo s w\n# y u q o s w\n# y u q llllllllllllk o s w\n# y u q m k o s w\n# y u q m hhhhhhhhg k o s w\n# y u q m i g k o s w\n# y u q m i ddddc g k o s w\n# y u q m i e c g k o s w\n# y u q m i e c g k o s w\n# y u q m i e abb g k o s w\n# y u q m i e g k o s w\n# y u q m i effffff k o s w\n# y u q m i k o s w\n# y u q m ijjjjjjjjjj o s w\n# y u q m o s w\n# y u q mnnnnnnnnnnnnnn s w\n# y u q s w\n# y u qrrrrrrrrrrrrrrrrrr w\n# y u w\n# y uvvvvvvvvvvvvvvvvvvvvvv\n# y\n# yzzzzzzzzzzzzzzzzzzzzzzzzzz\"\"\")\n","repo_name":"IsolatedRain/code_wars","sub_path":"completed/Word Spiral.py","file_name":"Word Spiral.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11267901077","text":"from flask import Flask, jsonify, request, g, render_template, abort, make_response, redirect, url_for\n\nfrom model.User import User\nfrom model.Pred import Pred\n\nfrom validation.Validator import *\n\nimport numpy as np\nimport joblib\nimport re\n\nfrom flask_cors import CORS\n\napp = Flask(__name__)\n\nCORS(app)\n\n# landing page\n@app.route(\"/\")\n@app.route(\"/index\")\n@app.route(\"/home\")\ndef index():\n return render_template(\"index.html\")\n\n\n# register page\n@app.route(\"/register\")\ndef register():\n return render_template(\"register.html\")\n\n\n# register form\n@app.route('/registerUser', methods=['GET', 'POST'])\ndef registerUser():\n try:\n user_name = request.form['user_name']\n email = request.form['email']\n pwd = request.form['pwd']\n pwd2 = request.form['pwd2']\n\n print(user_name)\n print(email)\n print(pwd)\n print(pwd2)\n\n msg = \"\"\n\n if pwd != pwd2:\n msg = \"Password and Confirm Password do not match! Please try again.\"\n return render_template(\"register.html\", message = msg)\n\n elif not re.match(r'[A-Za-z0-9]+', user_name):\n msg = \"User Name must contain only characters and numbers! Please try again.\"\n return render_template(\"register.html\", message = msg)\n\n else:\n userJson = {\n \"user_name\": user_name,\n \"email\": email,\n \"password\": pwd,\n \"role\": \"user\"\n }\n print(userJson)\n\n output = User.insertUser(userJson)\n\n return render_template(\"register.html\", message = \"Thank you! You have successfully registered.\")\n\n except Exception as err:\n print(err)\n return render_template(\"register.html\", message = \"Error! Please try again.\")\n\n\n# login page\n@app.route(\"/login\")\ndef login():\n return render_template(\"login.html\")\n\n\n# login form\n@app.route('/loginUser', methods=['POST'])\ndef loginUser():\n try:\n email = request.form['email']\n pwd = request.form['pwd']\n\n print(email)\n print(pwd)\n\n output = User.loginUser({\"email\": email, \"password\":pwd})\n print(output)\n\n jsonUser = User.getUserId(email)\n user_id = jsonUser[0]['user_id']\n user_name = jsonUser[0]['user_name']\n\n print(user_id)\n print(user_name)\n \n if output[\"jwt\"] == \"\":\n return render_template(\"login.html\", message = \"Invalid Login Credentials! Please try again.\")\n \n else:\n resp = make_response(render_template(\"viewPreds.html\", user_id = user_id, user_name = user_name))\n resp.set_cookie('jwt', output[\"jwt\"])\n \n return resp\n except Exception as err:\n print(err)\n return render_template(\"login.html\",message=\"Error! Please try again.\")\n\n\n# prediction page\n@app.route('/viewPreds.html')\n@login_required\ndef searchPred():\n try:\n user_id=request.args.get(\"user_id\")\n user_name = request.args.get(\"user_name\")\n\n print(user_id)\n print(user_name)\n \n jsonPreds = Pred.getPredByUser(user_id)\n #print(jsonPreds)\n\n return render_template(\"viewPreds.html\", preds = jsonPreds, user_id = user_id, user_name = user_name)\n \n except Exception as err:\n print(err)\n return render_template(\"viewPreds.html\", user_id = user_id, user_name = user_name)\n\n\n# predict form\n@app.route('/predict', methods = ['GET', 'POST'])\n@login_required\ndef predict():\n try:\n sepal_length = request.form['sepal_length']\n sepal_width = request.form['sepal_width']\n petal_length = request.form['petal_length']\n petal_width = request.form['petal_width']\n user_id = request.form['user_id']\n user_name = request.form['user_name']\n \n msg = \"\"\n\n print(sepal_length, sepal_width, petal_length, petal_width, user_id)\n\n # keep all inputs in array\n test_data = [sepal_length, sepal_width, petal_length, petal_width]\n print(test_data)\n \n # convert value data into numpy array\n test_data = np.array(test_data)\n \n # reshape array\n test_data = test_data.reshape(1,-1)\n print(test_data)\n \n # open file\n file = open(\"randomforest_model.pkl\",\"rb\")\n \n # load trained model\n trained_model = joblib.load(file)\n \n # predict\n prediction = trained_model.predict(test_data)\n \n print(prediction[0])\n \n # save data to sql\n jsonPreds = Pred.insertPred(user_id, sepal_length, sepal_width, petal_length, petal_width, prediction[0])\n print(jsonPreds)\n \n return render_template(\"viewPreds.html\", prediction = prediction,\n preds = jsonPreds,\n sepal_length = sepal_length,\n sepal_width = sepal_width,\n petal_length = petal_length,\n petal_width = petal_width,\n user_id = user_id,\n user_name = user_name)\n\n except Exception as err:\n print(err)\n msg = \"To make prediction, please fill in all fields with numeric values from 0 to 8, then click on 'Predict' button. To see results, click on 'View Prediction Results' button.\"\n return render_template(\"viewPreds.html\", user_id = user_id, user_name = user_name, message = msg)\n\n\n# delete predict\n@app.route('/deletePred', methods = ['POST'])\n@login_required\ndef deletePred():\n try:\n user_id = request.args.get(\"user_id\")\n user_name = request.args.get(\"user_name\")\n\n print(user_id)\n print(user_name)\n \n pred_id = request.args.get(\"pred_id\")\n print(pred_id)\n\n output = Pred.deletePred(pred_id)\n print(output)\n\n resp = make_response(render_template(\"viewPreds.html\", user_id = user_id, user_name = user_name))\n return resp\n \n except Exception as err:\n print(err)\n return render_template(\"login.html\")\n\n\n# request reset password page\n@app.route(\"/reset\")\ndef reset():\n return render_template(\"resetReq.html\")\n\n\n# resetReq form\n@app.route('/resetReq', methods=['GET', 'POST'])\ndef resetReq():\n try:\n email = request.form['email']\n print(email)\n \n msg = \"\"\n\n output = User.resetReq({\"email\": email})\n print(output)\n\n if output[\"valid\"] == \"yes\":\n msg = \"Reset email sent! > testing: /resetPwd.html\"\n else:\n msg = \"Account not found! Please try again\"\n \n return render_template(\"resetReq.html\", message = msg)\n\n except Exception as err:\n print(err)\n return render_template(\"resetReq.html\", message = \"Error! Please try again\")\n\n\n# change password\n@app.route('/reset_pwd', methods=['GET', 'POST'])\ndef resetPwd():\n try:\n # hard-coded user_name and email for testing use\n user_name = request.form['user_name']\n email = request.form['email']\n \n password = request.form['pwd']\n password2 = request.form['pwd2']\n\n print(user_name)\n print(email)\n print(password)\n print(password2)\n\n msg = \"\"\n\n if password != password2:\n msg = \"Password and Confirm Password do not match! Please try again.\"\n return render_template(\"resetPwd.html\", message = msg)\n\n else:\n output = User.resetPwd(password, email)\n print(output)\n return render_template(\"resetPwd.html\", message = \"You have successfully reset your password!\")\n\n except Exception as err:\n print(err)\n return render_template(\"resetPwd.html\", message = \"Error! Please try again.\")\n\n\n# admin page\n@app.route('/admin.html')\n@login_required\n@admin_required\ndef searchUser():\n try:\n user_name = request.args.get(\"search\")\n # print(user_name)\n\n jsonUsers = User.searchUser(user_name)\n # print(jsonUsers)\n\n return render_template(\"admin.html\", users=jsonUsers, search=user_name)\n except Exception as err:\n print(err)\n return render_template(\"admin.html\")\n\n\n# user details page\n@app.route('/userDetails.html')\n@login_required\n@admin_required\ndef getUserDetails():\n try:\n user_id = request.args.get(\"user_id\")\n \n jsonUser = User.getUser(user_id)\n # print(jsonUser)\n\n jsonPreds = Pred.getPredByUser(user_id)\n # print(jsonPreds)\n\n return render_template(\"userDetails.html\", user=jsonUser[0], preds=jsonPreds)\n except Exception as err:\n print(err)\n return redirect(\"admin.html?search=\")\n\n\n# remove predict record\n@app.route('/removePred', methods = ['POST'])\n@login_required\n@admin_required\ndef removePred():\n try:\n pred_id = request.args.get(\"pred_id\")\n print(pred_id)\n\n output = Pred.deletePred(pred_id)\n print(output)\n\n return redirect(\"admin.html?search=\")\n except Exception as err:\n print(err)\n return redirect(\"admin.html?search=\")\n\n\n# remove user\n@app.route('/removeUser', methods = ['POST'])\n@login_required\n@admin_required\ndef removeUser():\n try:\n user_id = request.args.get(\"user_id\")\n print(user_id)\n\n output = User.deleteUser(user_id)\n print(output)\n\n return redirect(\"admin.html?search=\")\n except Exception as err:\n print(err)\n return redirect(\"admin.html?search=\")\n\n\n# change user's role to 'admin'\n@app.route('/changeRole', methods = ['POST'])\n@login_required\n@admin_required\ndef changeRole():\n try:\n user_id = request.args.get(\"user_id\")\n print(user_id)\n\n # role is 'admin'\n role = \"admin\"\n\n output = User.changeRole(role, user_id)\n print(output)\n\n return redirect(\"admin.html?search=\")\n except Exception as err:\n print(err)\n return redirect(\"admin.html?search=\")\n\n\n# logout\n@app.route('/logout')\ndef logout():\n resp = make_response(redirect(\"/\"))\n resp.delete_cookie('jwt')\n \n return resp\n\n\n@app.route('/')\ndef staticPage(url):\n try:\n return render_template(url)\n except Exception as err:\n abort(404)\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n # note that we set the 404 status explicitly\n return render_template('404.html'), 404\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n","repo_name":"22lirong/sgus-iris","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15260670689","text":"import matplotlib.pyplot as plt \nimport numpy as np \n\n#Derivada Progressiva\n\ndef f(x):\n return x**2\n\ndef f2(x):\n return x**3\n\ndef derivada_progressiva(funcao,h,x):\n return (funcao(x+h) - funcao(x)) / h\n\nh = 0.0001\nx = 2\ndfdx = derivada_progressiva(f,h,x)\nprint(f'{dfdx:.3f}')\nprint('\\n')\n\nh_2 = 0.1\nx_2 = 2\ndfdx_2 = derivada_progressiva(f2,h,x)\nprint(f'{dfdx_2:.3f}')\nprint('\\n')\n\nvalor_real = 12\nerro_relativo = np.abs(valor_real - dfdx_2) / np.abs(valor_real)*100\nprint(f'{erro_relativo}')\nprint('\\n')\n#Derivada Regressiva\n\ndef derivada_regressiva(funcao,h,x):\n return (funcao(x) - funcao(x-h)) / h\n\nh = 0.1 \nx_3 = 2\ndfdx_3 = derivada_regressiva(f2,h,x)\nprint(f'{dfdx:.3f}')\nprint('\\n')\n\nerro_relativo = np.abs(valor_real - dfdx_3) / np.abs(valor_real)*100\nprint(f'{erro_relativo}')\nprint('\\n')\n\n#Derivada Central\n\ndef derivada_central(funcao,h,x):\n return (funcao(x+h) - funcao(x-h)) / (2*h)\n\nh = 0.1\nx_4 = 2\ndfdx_4 = derivada_central(f2,h,x)\nprint(f'{dfdx_4:.3f}')\nprint('\\n')\n\n#Gráfico\n\nh = 0.1\nx = np.arange(0,2*np.pi,h)\ndfdx_5 = derivada_central(np.cos,h,x)\ndfdx_6 = derivada_progressiva(np.cos,h,x)\ndfdx_7 = derivada_regressiva(np.cos,h,x)\n\nplt.figure(figsize=(12,8))\nplt.plot(x,dfdx_5,label='Central')\nplt.plot(x,dfdx_6,label='Progressiva')\nplt.plot(x,dfdx_7,label='Regressiva')\nplt.scatter(x,-np.sin(x),label='-Seno')\nplt.legend()\n\nplt.grid()\nplt.show()\n\n#\nt = np.arange(6)\np = np.array([0,5,20,45,80,125], dtype=float)\nidx = 2\n\n\ndef derivada_discreta(x,y):\n v = []\n for i in range(len(x)):\n if i!=0 and i!=len(x)-1:\n d = (y[i+1] - y[i-1]) / (x[i+1] - x[i-1])\n elif i==0:\n d = (y[i+1] - y[i]) / (x[i+1] - x[i])\n elif i == len(x) - 1:\n d = (y[i] - y[i-1]) / (x[i] - x[i-1])\n v.append(d)\n return v\n\ndxdy_8 = derivada_discreta(t,p)\n\nx = np.linspace(0,2*np.pi,20)\ny = np.cos(x)\n\ndxdy_9 = derivada_discreta(x,y)\n\nplt.figure(figsize=(12,8))\nplt.scatter(x,dxdy_9,s=100,c='red')\nplt.plot(x,-np.sin(x))\nplt.grid()\nplt.show()","repo_name":"RankracerBR/Matplotlib-Numpy","sub_path":"COLLEGE/Derivadas_Numericas.py","file_name":"Derivadas_Numericas.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10757753341","text":"from queue import PriorityQueue as pq\nfrom copy import deepcopy\n\n# Create a class called Node\nclass Node(object):\n # Constructor\n def __init__(self, board: list[list[int]], move, parent=None):\n self.board = [\n [x for x in row] for row in board\n ] # 2D matrix representing checker baord\n self.move = move\n self.parent = parent\n self.f = move + self.manhattan_distance() if move else self.manhattan_distance()\n\n # Returns the position of the empty block\n def zero_position(self):\n # Your code here\n board = self.board\n for i, row in enumerate(board):\n for j, val in enumerate(row):\n if val == 0:\n return (i, j)\n return None\n\n # Returns the manhattan distance (You can implement any other alternative too)\n def manhattan_distance(self):\n # Your code here\n board = self.board\n n = len(board)\n manhattan = 0\n for i, row in enumerate(board):\n for j, val in enumerate(row):\n ei, ej = val // n, val % n\n manhattan += abs(i - ei) + abs(j - ej)\n return manhattan\n\n # This function will generate and return nodes.\n def generate_nodes(self):\n allnodes = [] # an array that will store nodes.\n zero_position = self.zero_position()\n board = self.board\n zi, zj = self.zero_position()\n moves = [(0, 1), (0, -1), (1, 0), (-1, 0)]\n for di, dj in moves:\n si, sj = zi + di, zj + dj\n if 0 <= si < len(board) and 0 <= sj < len(board):\n board[zi][zj], board[si][sj] = board[si][sj], board[zi][zj]\n allnodes.append(Node(board, self.move + 1, self))\n board[zi][zj], board[si][sj] = board[si][sj], board[zi][zj]\n return allnodes\n\n # Store all the parents \"traceback\" of a node.\n def traceback(self):\n path = []\n curr = self\n while curr:\n path.append(curr)\n curr = curr.parent\n path.reverse()\n return path # returns a list of objects.\n\n def __lt__(self, value):\n return self.f < value.f\n\n\ndef is_end(board):\n n = len(board)\n for i, row in enumerate(board):\n for j, val in enumerate(row):\n if val != n * i + j:\n return False\n return True\n\n\ndef invert_random_pair(board):\n board = [[x for x in row] for row in board]\n i1, j1 = None, None\n i2, j2 = None, None\n\n for i, row in enumerate(board):\n for j, val in enumerate(row):\n if val != 0:\n if i1 is not None:\n i1, j1 = i, j\n else:\n i2, j2 = i, j\n board[i1][j1], board[i2][j2] = board[i2][j2], board[i1][j1]\n return board\n\n\n# A-star implementation\ndef Astar(initial_node):\n q1 = pq()\n q1.put(initial_node)\n while not q1.empty():\n curr = q1.get()\n if is_end(curr.board):\n return curr.traceback()\n for ne in curr.generate_nodes():\n q1.put(ne)\n return None\n\n\n# Prints the board\ndef draw_board(board):\n # Your code here\n print(\"\\n\".join([\" \".join(map(str, row)) for row in board]))\n\n\n# initial_node = Node([[5, 7, 8], [2, 3, 1], [4, 0, 6]], 0) # Initial state 1\ninitial_node = Node([[2, 3, 8], [4, 0, 6], [1, 7, 5]], 0) # Initial state 1\n\npath = Astar(initial_node)\nprint(\"num of moves :\" + str(len(path)))\nfor p in path:\n print(\"----------------\")\n draw_board(p.board)\n\n# draw_board(initial_node.board)\n# for ne in initial_node.generate_nodes():\n# print(\"-------------------\")\n# print(\"manhattan: \" + str(ne.manhattan_distance()))\n# print(\"zero_position: \" + str(ne.zero_position()))\n# draw_board(ne.board)\n","repo_name":"abhinand5ai/Fluency","sub_path":"AI/HW3/starter_code_Q2.py","file_name":"starter_code_Q2.py","file_ext":"py","file_size_in_byte":3771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33065455196","text":"import plotly.express as px\r\nimport csv \r\nimport numpy as np \r\n\r\nwith open(\"data2.csv\") as f:\r\n df=csv.DictReader(f)\r\n fig=px.scatter(df,x=\"Size of TV\",y=\"Time\")\r\n fig.show()\r\n\r\ndef getdatasource(dp):\r\n size=[]\r\n time=[]\r\n with open(dp) as g:\r\n reader=csv.DictReader(g)\r\n for r in reader:\r\n size.append(float(r[\"Size of TV\"]))\r\n time.append(float(r[\"Time\"]))\r\n return{\"x\":size,\"y\":time}\r\ndef findcorrelation(ds):\r\n cl=np.corrcoef(ds[\"x\"],ds[\"y\"])\r\n print(\"correalation between size v/s time :\",cl[0,1])\r\ndef setup():\r\n dp=\"data2.csv\"\r\n ds=getdatasource(dp)\r\n findcorrelation(ds)\r\nsetup()","repo_name":"Saikat1462/P-106","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37988539951","text":"from nltk.tag import TaggerI, untag\n\nclass ClassifierTagger(TaggerI):\n def __init__(self, feature_extractor, classifier):\n self.feature_extractor = feature_extractor\n self.classifier = classifier\n\n def tag(self,sent):\n history=[]\n\n for i,word in enumerate(sent):\n featureset = self.feature_extractor(sent,i,history)\n tag= self.classifier.classify(featureset)\n history.append(tag)\n\n return zip(sent,history)\n @classmethod\n\n def train(cls,train_sents, feature_extractor, classifier_cls,**kwargs):\n train_set = []\n\n for tagged_sent in train_sents:\n untagged_sent = untag(tagged_sent)\n history = []\n\n for i, (word,tag) in enumerate(tagged_sent):\n featureset = feature_extractor(untagged_sent,i, history)\n train_set.append((featureset,tag))\n history.append(tag)\n classifier = classifier_cls.train(train_set, **kwargs)\n return cls(feature_extractor, classifier)\n \n\n \n \n\n \n","repo_name":"julius-jsr/text_sum","sub_path":"Tagger.py","file_name":"Tagger.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15630631762","text":"from errors import CompilerError\nfrom node_utils import ExplorationError, NodeOptimizer\nfrom evm_asm import evm_opcodes, LATEST_VERSION # type: ignore\n\nimport lll_ast\n\n\noptimizer: NodeOptimizer = NodeOptimizer(lll_ast.LLLNode)\n\n\ndef optimize(\n node: lll_ast.LLLNode, version: str = LATEST_VERSION, num_iterations: int = 200\n) -> lll_ast.LLLNode:\n if version not in evm_opcodes:\n raise CompilerError(f\"Fork '{version}' is not supported\")\n\n vm = evm_opcodes[version]\n\n for _ in range(num_iterations):\n try:\n node = optimizer.update(node, vm)\n except ExplorationError as e:\n raise CompilerError(\"Optimization Failed\") from e\n\n if node is None:\n raise CompilerError(\"Optimization Failed\")\n\n return node\n","repo_name":"fubuloubu/lll-compiler","sub_path":"lll_optimizer.py","file_name":"lll_optimizer.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22014222424","text":"import pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nplt.style.use(\"seaborn-deep\")\r\nimport sys\r\nsys.path.append(\"..\")\r\n\r\ndef plot_labels_distribution(labels):\r\n sumlabs = labels.aggregate(\"sum\").sort_values()\r\n plt.barh(sumlabs.index, sumlabs, align=\"center\")\r\n plt.ylabel(\"Compounds\")\r\n plt.xticks(rotation=45)\r\n plt.xlabel(\"Count in Training Set\")\r\n plt.title(\"Compounds Represented in Training Set\")\r\n plt.show()\r\n\r\ndef plot_heatmap(labels):\r\n sns.heatmap(labels.corr())\r\n plt.show()\r\n\r\nif __name__ == \"__main__\":\r\n labels = pd.read_csv(\"data/train_labels.csv\", index_col=\"sample_id\")\r\n plot_labels_distribution(labels)\r\n plot_heatmap(labels)","repo_name":"RaviShah1/Mars-Spectrometry-Data-Driven","sub_path":"plots/labels_plots.py","file_name":"labels_plots.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"41695596664","text":"#!/usr/bin/python3\n\"\"\"\nAlice and Bob have candy bars of different sizes: A[i] is the size of the i-th\nbar of candy that Alice has, and B[j] is the size of the j-th bar of candy that\nBob has.\n\nSince they are friends, they would like to exchange one candy bar each so that\nafter the exchange, they both have the same total amount of candy. (The total\namount of candy a person has is the sum of the sizes of candy bars they have.)\n\nReturn an integer array ans where ans[0] is the size of the candy bar that Alice\nmust exchange, and ans[1] is the size of the candy bar that Bob must exchange.\n\nIf there are multiple answers, you may return any one of them. It is guaranteed\nan answer exists.\n\nExample 1:\n\nInput: A = [1,1], B = [2,2]\nOutput: [1,2]\nExample 2:\n\nInput: A = [1,2], B = [2,3]\nOutput: [1,2]\nExample 3:\n\nInput: A = [2], B = [1,3]\nOutput: [2,3]\nExample 4:\n\nInput: A = [1,2,5], B = [2,4]\nOutput: [5,4]\n\nNote:\n\n1 <= A.length <= 10000\n1 <= B.length <= 10000\n1 <= A[i] <= 100000\n1 <= B[i] <= 100000\nIt is guaranteed that Alice and Bob have different total amounts of candy.\nIt is guaranteed there exists an answer.\n\"\"\"\nfrom typing import List\nimport bisect\n\n\nclass Solution:\n def fairCandySwap(self, A: List[int], B: List[int]) -> List[int]:\n \"\"\"\n It is a search problem. Use set as search.\n \"\"\"\n sum_A = sum(A)\n sum_B = sum(B)\n diff = (sum_B - sum_A) // 2 # it can be negative or positive\n set_B = set(B)\n for a in A:\n if a + diff in set_B:\n return [a, a + diff]\n \n raise\n\n def fairCandySwap_complex(self, A: List[int], B: List[int]) -> List[int]:\n \"\"\"\n sum, to figure out the target O(N)\n exchange one\n exchange is (sum - target) + constant\n it is a search problem\n \"\"\"\n sum_A = sum(A)\n sum_B = sum(B)\n if sum_A > sum_B:\n return self.fairCandySwap(B, A)[::-1]\n\n A.sort()\n B.sort()\n diff = (sum_B - sum_A) // 2\n for a in A:\n i = bisect.bisect_left(B, a + diff)\n if i < len(B) and B[i] == a + diff:\n return [a, a + diff]\n\n raise\n","repo_name":"algorhythms/LeetCode","sub_path":"888 Fair Candy Swap.py","file_name":"888 Fair Candy Swap.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","stars":843,"dataset":"github-code","pt":"81"} +{"seq_id":"7887612024","text":"from sys import stdin, stdout\n\n# 0 1 2 3 4 5 6 7 8 9\n# 9 8 7 6 5 4 3 2 1 (0) 9\n# 1 2 3 4 5 6 7 8 (9) 8\n# 8 7 6 5 4 3 2 (1) 7\n# 2 3 4 5 6 7 (8) 6\n# 7 6 5 4 3 (2) 5\n# ..........\nt = int(stdin.readline())\nfor _ in range(t):\n n = int(stdin.readline())\n a = stdin.readline().strip()\n b = stdin.readline().strip()\n\n ans = []\n idx = 0\n flip = False\n for i in range(n-1, -1, -1):\n if (not flip and a[idx] == b[i]) or (flip and a[idx] != b[i]):\n ans.append(1)\n ans.append(i+1)\n\n if flip:\n idx -= i\n else:\n idx += i\n\n flip = not flip\n\n stdout.write(str(len(ans)) + ' ')\n if len(ans) > 0:\n stdout.write(' '.join(map(str, ans)) + '\\n')\n","repo_name":"tycyd/codeforces","sub_path":"implementation/1381A2 Prefix Flip.py","file_name":"1381A2 Prefix Flip.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3274289010","text":"import torch\nimport torchvision.models as models\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\n\n# 加载 ImageNet 数据集\ntraindir = '/path/to/train/data'\nvaldir = '/path/to/validation/data'\ntrain_dataset = datasets.ImageFolder(traindir, transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n]))\ntest_dataset = datasets.ImageFolder(valdir, transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n]))\ntrain_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True, num_workers=4, pin_memory=True)\ntest_loader = torch.utils.data.DataLoader(test_dataset, batch_size=64, shuffle=False, num_workers=4, pin_memory=True)\n\n# 加载预训练的 VGG-16 模型\nmodel = models.vgg16(pretrained=True)\n\n# 设置模型为评估模式\nmodel.eval()\n\n# 在测试集上进行测试\ncorrect = 0\ntotal = 0\nwith torch.no_grad():\n for data in test_loader:\n images, labels = data\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\nprint('Accuracy of the VGG-16 model on the ImageNet test images: %d %%' % (100 * correct / total))\n","repo_name":"guanrenyang/TW_TVM","sub_path":"Pruning/VGG_PyTorch/sensitivity_analysis.py","file_name":"sensitivity_analysis.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"25664716522","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jan 10 22:38:01 2019\r\n\r\n@author: Mehdi\r\ne\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jan 10 19:23:54 2019\r\nersres\r\n@author: Mehdi\r\n\"\"\"\r\n#importation des bibliotheques à utiliser:\r\n#importing all the needed modules\r\nimport numpy as np\r\nimport mesFonctions as fct\r\nfrom nltk.tokenize import sent_tokenize\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom sklearn.metrics import accuracy_score,classification_report\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.model_selection import ShuffleSplit\r\nfrom joblib import dump, load\r\nfrom sklearn.model_selection import cross_val_score\r\nimport tkinter as tk\r\n\r\n\r\n#dictionnaire des langues avec leurs keys:\r\n#dictionary of languages and their keys:\r\n\r\ndict_langues = {1.0 : \"Français\", 2.0 : \"Portugais\" , 3.0 : \"Italien\" , 4.0 : \"Espagnol\"}\r\n\r\n\r\n#programme appelant:\r\n#Main program:\r\nif __name__ == \"__main__\":\r\n\r\n#fonction de la bibliotheque mesFonctions effectuant le cleaning:\r\n#method from 'mesFonctions module for the cleaning process:\r\n \r\n textFrancais = fct.fileCleaning('txt/fr/ep-00-01-18.txt',\"fracaisClean3\")\r\n textItalien = fct.fileCleaning('txt/it/ep-00-04-11.txt',\"italienClean2\")\r\n textEspagnol = fct.fileCleaning('txt/es/ep-00-04-12.txt',\"espagnolClean\")\r\n textPortugais = fct.fileCleaning('txt/pt/ep-00-01-18.txt',\"portugaisClean\")\r\n #print(text)\r\n listSentenceFrancais = sent_tokenize(textFrancais)\r\n listSentenceItalien = sent_tokenize(textItalien)\r\n listSentenceEspagnol = sent_tokenize(textEspagnol)\r\n listSentencePortugais = sent_tokenize(textPortugais)\r\n \r\n #print(listSentence)\r\n \r\n\r\n#La fonction 'listSentenceEtiquete' permet d'etiqueter les textes:\r\n#function for labeling the each sentetence to its language:\r\n\r\n TFrancais = fct.listSetenceEtiquete(listSentenceFrancais,1)\r\n TPortugais = fct.listSetenceEtiquete(listSentencePortugais,2)\r\n TItalien = fct.listSetenceEtiquete(listSentenceItalien,3)\r\n TEspagnol = fct.listSetenceEtiquete(listSentenceEspagnol,4)\r\n \r\n \r\n#Pour regrouper tous les phrases des textes en une seule matrice. Cette matrice par la suite sera utilisée comme data set:\r\n#concatenate all the sentences to use it as one corpus for the training part:\r\n langues = np.concatenate((TFrancais,TPortugais,TItalien,TEspagnol))\r\n\r\n #print(langues)\r\n\r\n#preparer la dataset/dataFrame pour extraire les n-grams en utilisant le tf-idfVectorizer:\r\n#preparing the dataframe for the extaction of the n-gram letters, but before that we have to schuffle the sentences:\r\n \r\n languesModel=fct.melangerLangues(langues)\r\n df_langues = pd.DataFrame(languesModel)\r\n df_langues.columns = ['Phrases', 'clé_langue']\r\n df_langues['clé_langue'] = df_langues['clé_langue'].apply(float)\r\n df_langues['langue'] = df_langues['clé_langue'].map(dict_langues)\r\n\r\n\r\n X = df_langues['Phrases']\r\n y_labels = df_langues['clé_langue']\r\n\r\n #pour visualiser le dataFrame:\r\n '''print(df_langues.head(10))'''\r\n\r\n #on voit si on a un nombre equitable de phrases pour chaque langue\r\n #here we have to check that there is the same number of sentences for each language:\r\n '''\r\n clé_langue, nombrePhrase = np.unique(y_labels, return_counts=True)\r\n print(\"\\n\\n\",dict(zip(clé_langue, nombrePhrase)))\r\n \r\n '''\r\n#Phase d'apprentissage:\r\n#Training set:\r\n \r\n \r\n #on fait le split du dataset, 0.7 pour l'entrainement et 0.3 pour le test:\r\n X_train, X_test, y_train, y_test = train_test_split(X, y_labels, test_size = 0.3, random_state = 0)\r\n \r\n\r\n \r\n #print(\"La taille des features : \",len(X))\r\n \r\n #TfIdfvectorizer donne des features qui seront les n-grams du dataset: \r\n #algo = MLPClassifier(solver='sgd', hidden_layer_sizes=(12,),activation = 'identity')\r\n algo = MLPClassifier(solver='lbfgs', hidden_layer_sizes=(100,), activation = 'relu')\r\n vectorizer = TfidfVectorizer(analyzer='char',token_pattern = r\"(?u)\\b\\w+\\b\", ngram_range=(2,3))\r\n \r\n #On utilise le Pipeline pour combiner le return du TfidVectorizer avec le input de MLPClassifier:\r\n classification_text = Pipeline([('tfidfVectorizer', vectorizer),('algo', algo)])\r\n\r\n #l'algo MLPClassifier effectue la phase de l'apprentissage\r\n classification_text.fit(X_train, y_train)\r\n \r\n #Pour la validation set:\r\n \r\n cv = ShuffleSplit(n_splits=4, test_size=0.1, random_state=0)\r\n cv_score = cross_val_score(classification_text, X_train, y_train, cv=cv) \r\n print(\"\\n\\nValidation set : \",cv_score)\r\n \r\n #Sauvegarder le modele:\r\n \r\n filename = 'D:/ENSIAS_M/2ème année/ProjetIA/modele'\r\n '''\r\n dump(classification_text, filename+\".joblib\")\r\n '''\r\n \r\n #Phase de test:\r\n \r\n modele = classification_text\r\n #modele = load(filename+\".joblib\")\r\n \r\n\r\n\r\n \r\n prediction = modele.predict(X_test)\r\n \r\n\r\n matConf = confusion_matrix(y_test,prediction)\r\n #print(matConf)\r\n \r\n#affichage de la matrice de confusion:\r\n \r\n \r\n fig, ax = plt.subplots(figsize=(2.5, 2.5))\r\n ax.matshow(matConf, cmap=plt.cm.Blues, alpha=0.3)\r\n \r\n for i in range(matConf.shape[0]):\r\n for j in range(matConf.shape[1]):\r\n ax.text(x=j, y=i,s=matConf[i, j], va='center', ha='center')\r\n\r\n plt.xlabel('la classe predite (predict labels)')\r\n plt.ylabel('les vrais labels (True labels)')\r\n plt.show()\r\n\r\n \r\n#la precision de l'algo et le rapport de classification:\r\n#the accuracy and the classification report \r\n\r\n \r\n \r\n print(\"\\n\\nLa précison de l'algorithme utilisé : \",accuracy_score(y_test,prediction))\r\n print(\"\\n\\n\",classification_report(y_test, prediction, target_names=dict_langues.values()))\r\n #print(prediction)\r\n \r\n \r\n\r\n#phase de prediction:\r\n#predecting set:\r\n\r\n def predictionPhase():\r\n phrase = entry_field1.get()\r\n phrase = sent_tokenize(phrase)\r\n phrase = fct.listSetenceEtiquete(phrase,0)\r\n #print(phrase)\r\n df_phrase = pd.DataFrame(phrase)\r\n #print(df_phrase)\r\n \r\n #df_phrase.clomns = ['phrase_input', 'clé']\r\n #display(df_phrase)\r\n \r\n X_aPredire = df_phrase[0]\r\n \r\n \r\n #print(X_aPredire)\r\n prediction = modele.predict(X_aPredire)\r\n for e in prediction:\r\n cle = e\r\n return(\"\\n\\nLa phrase saisie est du : \"+dict_langues[cle])\r\n \r\n def predictionDisplay():\r\n \r\n pred = predictionPhase()\r\n \r\n #pour l'affichage du text field dans l'interface:\r\n pred_display = tk.Text(master = window, height=5, width=50,bg='light cyan')\r\n \r\n pred_display.grid(column = 2, row = 6)\r\n \r\n pred_display.insert(tk.END, pred)\r\n \r\n \r\n window = tk.Tk()\r\n window.title(\"Projet Machine Learning pour detection des langues\")\r\n window.geometry(\"1200x400\")\r\n window.configure(background = \"azure\")\r\n #Label:\r\n label1 = tk.Label(text = \"Insérez une phrase : \", font = (\"Times New Roman\",13), bg = \"LightSkyBlue1\", width = 20)\r\n label1.grid(column = 0, row = 2)\r\n\r\n #button\r\n bouton1 = tk.Button(text = 'Click', command = predictionDisplay, width = 11, font=('Tahoma',10))\r\n bouton1.grid(column = 2, row =3)\r\n\r\n #entry field:\r\n entry_field1 = tk.Entry(width = 100)\r\n entry_field1.grid(column=2,row=2)\r\n\r\n\r\n window.mainloop()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"MBelch/Language-identification-ML-NeuralNet","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":7601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5909162751","text":"\nTaxonPipeline = [\n\t{\"$lookup\":\n\t\t{\"from\": \"taxon_node\",\n\t\t\"localField\": \"children\",\n\t\t\"foreignField\": \"_id\",\n\t\t\"as\": \"children\",\n\t\t}\n\t},\n\t{\"$project\": \n\t\t{\"_id\":0}\n\t}\n]\n\nOrganismPipeline = [\n\t{\"$lookup\":\n\t\t{\"from\": \"genome\",\n\t\t\"localField\": \"genomes\",\n\t\t\"foreignField\": \"_id\",\n\t\t\"as\": \"genomes\",\n\t\t}\n\t},\n\t{\"$lookup\":\n\t\t{\"from\": \"annotation\",\n\t\t\"localField\": \"annotations\",\n\t\t\"foreignField\": \"_id\",\n\t\t\"as\": \"annotations\",\n\t\t}\n\t},\n\t{\"$lookup\":\n\t\t{\"from\": \"param_file\",\n\t\t\"localField\": \"assemblies\",\n\t\t\"foreignField\": \"param_files\",\n\t\t\"as\": \"param_files\",\n\t\t}\n\t},\n\t# {\"$lookup\":\n\t# \t{\"from\": \"taxon_node\",\n\t# \t\"localField\": \"taxon_lineage\",\n\t# \t\"foreignField\": \"_id\",\n\t# \t\"as\": \"taxon_lineage\",\n\t# \t}\n\t# },\n\t{\"$project\": \n\t\t{\"_id\":0, \n\t\t'taxon_lineage':0,\n\t\t\"genomes\" : {\"_id\":0},\n\t\t\"annotations\": {\"_id\":0, \"created\":0},\n\t\t\"param_files\":{\"_id\":0}\n\t\t}\n\t}\n]\n\nSamplePipeline = [\n\t{\"$lookup\":\n\t\t{\"from\": \"secondary_organism\",\n\t\t\"localField\": \"specimens\",\n\t\t\"foreignField\": \"_id\",\n\t\t\"as\": \"specimens\",\n\t\t}\n\t},\n\t{\"$lookup\":\n\t\t{\"from\": \"experiment\",\n\t\t\"localField\": \"experiments\",\n\t\t\"foreignField\": \"_id\",\n\t\t\"as\": \"experiments\",\n\t\t}\n\t},\n\t{\"$lookup\":\n\t\t{\"from\": \"assembly\",\n\t\t\"localField\": \"assemblies\",\n\t\t\"foreignField\": \"_id\",\n\t\t\"as\": \"assemblies\",\n\t\t}\n\t},\n\t{\"$project\": \n\t\t{\"_id\":0, \n \"created\":0,\n\t\t\"specimens\": {\"_id\":0,\"assemblies\":0,\"experiments\":0,\"specimens\":0},\n\t\t\"assemblies\" : {\"_id\":0},\n\t\t\"experiments\": {\"_id\":0}\n\n\t\t}\n\t}\n]","repo_name":"guigolab/gene-predictions-web","sub_path":"server/utils/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72496298826","text":"'''\nAuthor: Aaron Mc Nany\nDate: 4/19/2018\nClass: ISTA 130\nSection Leader: Sam Hoeger\n\nDescription:\nAppending existing dictionaries with new information from a file\nand creating a report based on the most common emoticon.\n'''\n\n\n\n \n \ndef load_twitter_dicts_from_file(filename, emoticons_to_ids, ids_to_emoticons):\n '''\n This function takes three parameters, which are all strings.\n The first represents the name of a file from which we will be\n gathering information to append the following two parameters, \n which are existing dictionaries that are fed to us. All the\n function does is add any values from the file, that are not\n in their current respective dictionaries, to said respective\n dictionaries.\n '''\n \n f_in = open(filename)\n \n for line in f_in:\n line = line.replace(\"\\\"\", \"\")\n line = line.strip().split()\n \n if line[0] not in emoticons_to_ids:\n emoticons_to_ids[line[0]]=[]\n emoticons_to_ids[line[0]].append((line[2]))\n \n if (line[2] not in ids_to_emoticons):\n ids_to_emoticons[line[2]]=[]\n ids_to_emoticons[line[2]].append((line[0]))\n \n f_in.close()\n\ndef find_most_common(dict):\n '''\n This function requires one parameter that is the dictionary\n through which we will search in order to find the most\n frequently used emoticon in said dictionary. The function\n will print out the emoticon with the number of times it\n occurs as well as return the name of the key.\n '''\n \n length=0\n key_name=\"\"\n \n for key in dict:\n \n if (length len(input_list.data):\n\n original_index = input_list.cur\n input_list.cur += gap\n temp_index = input_list.cur\n print(input_list.cur)\n\n if input_list.eol():\n return\n if input_list.elem() > target:\n return bs(original_index, temp_index, target, input_list)\n if input_list.elem() == target:\n return\n if input_list.elem() < target:\n gap += gap\n # return len(input_list) -1\n\ndef bs(start, end, target, array):\n if start - end >= 0:\n return\n\n current = (start + end)//2\n\n if end - start == 1:\n current = end\n return\n if array.data[current] == target:\n return\n if array.data[current]>target:\n return bs(start, current, target, array)\n if array.data[current] < target:\n return bs(current, end, target, array)\n\n\n##########################################################################\n# Question No. 2:\n\n# do not change the function heading\n\ndef Logarithmic_merge(index, cut_off, buffer_size):\n def process(LL):\n L_copy = LL[:]\n if len(LL) == 1:\n return L_copy\n for i in range(len(L_copy) - 1):\n if len(L_copy[i]) == len(L_copy[i+1]):\n i_copy = sorted(L_copy[i] + L_copy[i+1])\n L_copy.pop(i)\n L_copy.pop(i)\n L_copy.insert(i, i_copy)\n return L_copy\n return L_copy\n result = [[]]\n index = index[:cut_off]\n# print(index)\n while len(index) != 0:\n j = index.pop(0)\n while 1:\n new = process(result)\n if result == new:\n break\n else:\n result = new\n if len(result[0]) < buffer_size:\n result[0].append(j)\n if len(result[0]) == buffer_size:\n # result.insert(0, [])\n while True:\n new = process(result)\n if result == new:\n break\n else:\n result = new\n result.insert(0, [])\n RR = result[:]\n for k in range(len(result)):\n RR[k] = sorted(result[k])\n return RR\n\ndef decode_gamma(input_str):# do not change the function heading\n output_list = []\n while len(input_str) > 0:\n first_index = input_str.find('0')\n indicator = input_str[:first_index]\n count = 1 + len(indicator)\n binary_str = input_str[len(indicator) + 1: len(indicator) + count]\n binary_str = \"1\" + binary_str\n output = int(binary_str, 2)\n output_list.append(output)\n input_str = input_str[count:]\n input_str = input_str[len(binary_str) - 1:]\n return output_list\n\ndef decode_delta(input_str):# do not change the function heading\n output_list = []\n while len(input_str) > 0:\n first_index = input_str.find('0')\n unary_bin_indicator = input_str[:first_index]\n length_bin_indicator = len(unary_bin_indicator)\n bin_indicator = input_str[first_index + 1: 2*first_index + 1]\n input_str = input_str[2*first_index + 1:]\n bin_indicator = \"1\" + bin_indicator\n indicator = int(bin_indicator, 2)\n indicator = indicator - 1\n bin_output = \"1\" + input_str[:indicator]\n output = int(bin_output, 2)\n input_str = input_str[indicator:]\n output_list.append(output)\n\n return output_list\n\ndef decode_rice(inputs, b):# do not change the function heading\n from math import log\n output_list = []\n while len(inputs) > 0:\n first_index = inputs.find('0')\n indicator = first_index\n # print(indicator)\n q = int(log(b, 2))\n # print(q)\n bin_remain = inputs[indicator + 1: indicator + 1 + q]\n # print(bin_remain)\n remain = int(bin_remain, 2)\n # print(remain)\n output = indicator * b + remain\n # print(output)\n inputs = inputs[first_index + q + 1:]\n # print(inputs)\n output_list.append(output)\n return output_list\n\n##########################################################################\n# Question No. 1:\ndef binary_search(c, start, end, val):\n index = (start + end) // 2\n current = c.data[index]\n if current == val:\n c.cur = index\n return\n if end - start == 1:\n c.cur = end\n return\n if current < val:\n return binary_search(c, index, end, val)\n else:\n return binary_search(c, start, index, val)\n\n\ndef gallop_to(c, val):\n gap = 1\n while True:\n temp = c.cur\n c.cur += gap\n if c.eol():\n c.cur = len(c.data)-1\n if c.elem() < val:\n c.cur += 1\n return\n current = c.cur\n value = c.elem()\n if value > val:\n return binary_search(c, temp, current, val)\n if value == val:\n return\n gap += gap\n","repo_name":"ZiqinZhouGit/UNSW","sub_path":"Information Retrieval/ass_1.py","file_name":"ass_1.py","file_ext":"py","file_size_in_byte":5142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40194390975","text":"\"\"\"\nproblem tier : Silver 2 (solved.ac)\n\"\"\"\n\nstack = []\n\ndef search(d):\n if d == 6:\n print(' '.join(map(lambda x: str(x), stack)))\n else:\n for s in S:\n if d == 0 or stack[-1] < s:\n stack.append(s)\n search(d+1)\n stack.pop()\n\n\nwhile True:\n inp = list(map(lambda x: int(x), input().split()))\n if inp[0] == 0:\n break\n else:\n global S\n k = inp[0]\n S = inp[1:]\n search(0)\n print()\n\n","repo_name":"hyeongrokheo/baekjoon","sub_path":"solved/[6603] 로또.py","file_name":"[6603] 로또.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11831638915","text":"#!/usr/bin/env python\nimport os\nimport re\nfrom bs4 import BeautifulSoup\n\nroot = os.path.realpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__), \"../build/firefox\")\n)\nregex = re.compile(\"^popup.*\\.js$\")\nold_name = \"\"\n\nfor root, dirs, files in os.walk(root):\n for file in files:\n if regex.match(file):\n old_name = file\n\n\nroot = os.path.realpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__), \"../build/firefox\")\n)\n\nold_hash = old_name.split(\"js\")[0]\nnew_name = old_hash + \"mjs\"\n\nold_path = os.path.join(root, old_name)\nnew_path = os.path.join(root, new_name)\n\nos.rename(old_path, new_path)\n\nwith open(os.path.join(root, \"popup.html\"), \"r+\", encoding=\"utf8\") as html_string:\n data = html_string.read()\n\n soup = BeautifulSoup(data, features=\"html.parser\")\n\n for script in soup.find_all(\"script\"):\n script[\"src\"] = f\"/{new_name}\"\n\n html_string.seek(0)\n html_string.truncate(1)\n html_string.write(str(soup))\n\n html_string.close()\n\nprint(\"\\nConverted to modules.\")\n","repo_name":"XenoverseUp/fractions","sub_path":"scripts/rename_firefox_modules.py","file_name":"rename_firefox_modules.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"16367877455","text":"from shimpy.core import Extension, Block, NavBlock\nfrom shimpy.core.context import context\nfrom shimpy.core.utils import ReCheck\n\nimport os\nimport re\nimport structlog\n\nlog = structlog.get_logger()\n\n\nclass Handle404(Extension):\n priority = 99\n\n @staticmethod\n def _get_path(path):\n \"\"\"\n >>> Handle404._get_path(\"/static/foo.txt\")\n 'foo.txt'\n >>> Handle404._get_path(\"/foo.txt\")\n 'foo.txt'\n >>> Handle404._get_path(\"/post/list\")\n \"\"\"\n rc = ReCheck()\n if rc.match(\"^(/static)?/([a-zA-Z0-9\\.\\-]+)$\", path):\n return rc.group(2)\n return None\n\n @staticmethod\n def _get_disk_path(path):\n if path:\n theme = context.config.get(\"theme\", \"default\")\n options = [\n os.path.abspath(os.path.join(\"shimpy\", \"theme\", theme, path)),\n os.path.abspath(os.path.join(\"shimpy\", \"static\", path)),\n ]\n existing = [p for p in options if os.path.isfile(p)]\n if existing:\n return existing[0]\n\n def onPageRequest(self, event, request):\n if context.page.mode != \"page\" or self.count_main(context) != 0:\n return # something has already handled this request\n\n path = self._get_path(request.path)\n disk_path = self._get_disk_path(path)\n\n if disk_path:\n log.info(\"Serving static file\", path=disk_path)\n context.page.set_expiration(600)\n context.page.mode = \"data\"\n context.page.data = file(disk_path).read()\n\n context.page.content_type = {\n \"txt\": \"text/plain\",\n \"ico\": \"image/x-icon\",\n \"png\": \"image/png\",\n \"css\": \"text/css\",\n \"js\": \"application/javascript\",\n }.get(disk_path.split(\".\")[-1], \"text/plain\")\n else:\n log.info(\"404 handler called\", path=context.request.path)\n context.page.status = 404\n context.page.title = \"404\"\n context.page.heading = \"404 - no handler found\"\n context.page.add_block(NavBlock())\n context.page.add_block(Block(\"Explanation\", \"%r not found\" % context.request.path))\n\n def count_main(self, context):\n return len([b for b in context.page.blocks if b.section == \"main\"])\n","repo_name":"shish/shimpy","sub_path":"shimpy/ext/handle_404/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"44494832187","text":"# -*- coding:utf-8 -*-\n\nimport re\nfrom datetime import datetime, timezone, timedelta\ndef to_timestamp(dt_str, tz_str):\n time=datetime.strptime(dt_str,'%Y-%m-%d %H:%M:%S')\n\n return time.timestamp()\n\nt1 = to_timestamp('2015-6-1 08:10:30', 'UTC+7:00')\nassert t1 == 1433121030.0, t1\n\nt2 = to_timestamp('2015-5-31 16:10:30', 'UTC-09:00')\nassert t2 == 1433121030.0, t2\n\nprint('ok')","repo_name":"NEUlaowang/Python_test","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23832320717","text":"# PU Occupancy Behavior Estimation\r\n# Third iteration: Complete Information and Markovian across both time and frequency\r\n# Author: Bharath Keshavamurthy\r\n# Organization: School of Electrical and Computer Engineering, Purdue University\r\n# Copyright (c) 2019. All Rights Reserved.\r\n\r\n# For the math behind this algorithm, refer to:\r\n# This url may change - Please contact the author at for more details.\r\n# https://github.rcac.purdue.edu/bkeshava/Minerva/tree/master/latex\r\n\r\nfrom enum import Enum\r\nimport numpy\r\nimport scipy.stats\r\nfrom collections import namedtuple\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\n# Occupancy state enumeration\r\nclass OccupancyState(Enum):\r\n # Channel Idle\r\n idle = 0\r\n # Channel Occupied\r\n occupied = 1\r\n\r\n\r\n# Main class: PU Occupancy Behavior Estimation\r\nclass PUOccupancyBehaviorEstimatorIII(object):\r\n # Number of samples for this simulation\r\n # Also referred to as the Number of Sampling Rounds\r\n NUMBER_OF_SAMPLES = 1000\r\n\r\n # Variance of the Additive White Gaussian Noise Samples\r\n VARIANCE_OF_AWGN = 1\r\n\r\n # Variance of the Channel Impulse Response which is a zero mean Gaussian\r\n # SNR = 10log_10(80/1) = 19.03 dB\r\n VARIANCE_OF_CHANNEL_IMPULSE_RESPONSE = 80\r\n\r\n # Number of frequency bands/channels in the wideband spectrum of interest\r\n NUMBER_OF_FREQUENCY_BANDS = 18\r\n\r\n # Start probabilities of PU occupancy per frequency band\r\n BAND_START_PROBABILITIES = namedtuple('BandStartProbabilities', ['idle', 'occupied'])\r\n\r\n # Occupancy States (IDLE, OCCUPIED)\r\n OCCUPANCY_STATES = (OccupancyState.idle, OccupancyState.occupied)\r\n\r\n # Value function named tuple\r\n VALUE_FUNCTION_NAMED_TUPLE = namedtuple('ValueFunction',\r\n ['current_value', 'previous_temporal_state', 'previous_spatial_state'])\r\n\r\n # Number of trials to smoothen the Detection Accuracy v/s P(1|0) curve\r\n # Iterating the estimation over numerous trials to average out the inconsistencies\r\n NUMBER_OF_CYCLES = 50\r\n\r\n # Initialization Sequence\r\n def __init__(self):\r\n print('[INFO] PUOccupancyBehaviorEstimatorIII Initialization: Bringing things up ...')\r\n # AWGN samples\r\n self.noise_samples = {}\r\n # Channel Impulse Response samples\r\n self.channel_impulse_response_samples = {}\r\n # True PU Occupancy state\r\n self.true_pu_occupancy_states = []\r\n # The parameters of observations of all the bands are stored in this dict here\r\n self.observations_parameters = {}\r\n # The observed samples at the SU receiver\r\n self.observation_samples = []\r\n # The start probabilities\r\n # This is gonna be same in either dimension because P(X_i = 1) is same in either dimension\r\n self.start_probabilities = self.BAND_START_PROBABILITIES(occupied=0.6, idle=0.4)\r\n # The complete state transition matrix is defined as a Python dictionary taking in named-tuples\r\n # Both spatially and temporally, I'm using the same transition probability matrix\r\n # TODO: Maybe use different transition probability matrices and see what happens...\r\n # This will be defined in the Run Trigger call because of the 'p' variations\r\n self.transition_probabilities_matrix = {}\r\n\r\n # Generate the initial states for k = 0 across time\r\n def get_initial_states_temporal_variation(self, p_val, q_val, pi_val):\r\n initial_state_vector = []\r\n previous = 1\r\n # Initial state generation -> band-0 at time-0 using pi_val\r\n if numpy.random.random_sample() > pi_val:\r\n previous = 0\r\n initial_state_vector.append(previous)\r\n # Based on the state of band-0 at time-0 and the (p_val,q_val) values, generate the states of the remaining...\r\n # ...bands\r\n for _loop_iterator in range(1, self.NUMBER_OF_SAMPLES):\r\n seed = numpy.random.random_sample()\r\n if previous == 1 and seed < q_val:\r\n previous = 0\r\n elif previous == 1 and seed > q_val:\r\n previous = 1\r\n elif previous == 0 and seed < p_val:\r\n previous = 1\r\n else:\r\n previous = 0\r\n initial_state_vector.append(previous)\r\n return initial_state_vector\r\n\r\n # Generate the true states with Markovian across channels and Markovian across time\r\n # Arguments: p -> P(1|0); q -> P(0|1); and pi -> P(1)\r\n def generate_true_pu_occupancy_states(self, p_val, q_val, pi_val):\r\n # True PU Occupancy states collection will be a list of self.NUMBER_OF_CHANNELS rows, each row ...\r\n # ... with self.NUMBER_OF_SAMPLING_ROUNDS columns\r\n self.true_pu_occupancy_states.append(self.get_initial_states_temporal_variation(p_val, q_val, pi_val))\r\n # t = 0 and k = 0\r\n previous_state = self.true_pu_occupancy_states[0][0]\r\n for channel_index in range(1, self.NUMBER_OF_FREQUENCY_BANDS):\r\n seed = numpy.random.random_sample()\r\n if previous_state == 1 and seed < q_val:\r\n previous_state = 0\r\n elif previous_state == 1 and seed > q_val:\r\n previous_state = 1\r\n elif previous_state == 0 and seed < p_val:\r\n previous_state = 1\r\n else:\r\n previous_state = 0\r\n self.true_pu_occupancy_states.append([previous_state])\r\n # Let's fill in the other states\r\n for channel_index in range(1, self.NUMBER_OF_FREQUENCY_BANDS):\r\n for round_index in range(1, self.NUMBER_OF_SAMPLES):\r\n previous_temporal_state = self.true_pu_occupancy_states[channel_index][round_index - 1]\r\n previous_spatial_state = self.true_pu_occupancy_states[channel_index - 1][round_index]\r\n probability_occupied_temporal = \\\r\n self.transition_probabilities_matrix[previous_temporal_state][1]\r\n probability_occupied_spatial = self.transition_probabilities_matrix[previous_spatial_state][1]\r\n probability_occupied = (probability_occupied_spatial * probability_occupied_temporal) / pi_val\r\n seed = numpy.random.random_sample()\r\n if seed < probability_occupied:\r\n previous_state = 1\r\n else:\r\n previous_state = 0\r\n self.true_pu_occupancy_states[channel_index].append(previous_state)\r\n\r\n # Get the observations vector\r\n # Generate the observations of all the bands for a number of observation rounds or cycles\r\n def allocate_observations(self):\r\n # Each frequency band is observed ${NUMBER_OF_SAMPLES} times.\r\n # The sampling of each frequency band involves a noise sample corresponding to that SU receiver\r\n # ... and a channel impulse response sample between that SU receiver and the radio environment\r\n for frequency_band in range(0, self.NUMBER_OF_FREQUENCY_BANDS):\r\n mu_noise, std_noise = 0, numpy.sqrt(self.VARIANCE_OF_AWGN)\r\n # The Re and Im parts of the noise samples are IID and distributed as N(0,\\sigma_V^2/2)\r\n real_noise_samples = numpy.random.normal(mu_noise, (std_noise / numpy.sqrt(2)), self.NUMBER_OF_SAMPLES)\r\n img_noise_samples = numpy.random.normal(mu_noise, (std_noise / numpy.sqrt(2)), self.NUMBER_OF_SAMPLES)\r\n self.noise_samples[frequency_band] = real_noise_samples + (1j * img_noise_samples)\r\n mu_channel_impulse_response, std_channel_impulse_response = 0, numpy.sqrt(\r\n self.VARIANCE_OF_CHANNEL_IMPULSE_RESPONSE)\r\n # The Re and Im parts of the channel impulse response samples are IID and distributed as N(0,\\sigma_H^2/2)\r\n real_channel_impulse_response_samples = numpy.random.normal(mu_channel_impulse_response,\r\n (std_channel_impulse_response / numpy.sqrt(2)),\r\n self.NUMBER_OF_SAMPLES)\r\n img_channel_impulse_response_samples = numpy.random.normal(mu_channel_impulse_response,\r\n (std_channel_impulse_response / numpy.sqrt(2)),\r\n self.NUMBER_OF_SAMPLES)\r\n self.channel_impulse_response_samples[frequency_band] = real_channel_impulse_response_samples + (\r\n 1j * img_channel_impulse_response_samples)\r\n # Re-arranging the vectors\r\n for band in range(0, self.NUMBER_OF_FREQUENCY_BANDS):\r\n obs_per_band = list()\r\n for count in range(0, self.NUMBER_OF_SAMPLES):\r\n obs_per_band.append(self.channel_impulse_response_samples[band][\r\n count] * self.true_pu_occupancy_states[band][count] + self.noise_samples[\r\n band][count])\r\n self.observation_samples.append(obs_per_band)\r\n # The observation_samples member is a kxt matrix\r\n return self.observation_samples\r\n\r\n # Get the start probabilities from the named tuple - a simple getter utility method exclusive to this class\r\n def get_start_probabilities(self, state):\r\n # name or value\r\n if state == 'occupied' or state == '1':\r\n return self.start_probabilities.occupied\r\n else:\r\n return self.start_probabilities.idle\r\n\r\n # Return the transition probabilities from the transition probabilities matrix - two dimensions\r\n def get_transition_probabilities(self, temporal_prev_state, spatial_prev_state, current_state):\r\n return (self.transition_probabilities_matrix[temporal_prev_state][current_state] *\r\n self.transition_probabilities_matrix[spatial_prev_state][current_state]) / self.get_start_probabilities(\r\n str(temporal_prev_state))\r\n\r\n # Return the transition probabilities from the transition probabilities matrix - single dimension\r\n def get_transition_probabilities_single(self, row, column):\r\n return self.transition_probabilities_matrix[row][column]\r\n\r\n # Get the Emission Probabilities -> P(y|x)\r\n def get_emission_probabilities(self, state, observation_sample):\r\n # Idea: P(A, B|C) = P(A|B, C)P(B|C) = P(A|C)P(B|C) because the real and imaginary components are independent\r\n # P(\\Re(y_k(i)), \\Im(y_k(i))|x_k) = P(\\Re(y_k(i))|x_k)P(\\Im(y_k(i)|x_k)\r\n # The emission probability from the real component\r\n emission_real_gaussian_component = scipy.stats.norm(0, numpy.sqrt(\r\n ((self.VARIANCE_OF_CHANNEL_IMPULSE_RESPONSE / 2) * state) + (self.VARIANCE_OF_AWGN / 2))).pdf(\r\n observation_sample.real)\r\n # The emission probability from the imaginary component\r\n emission_imaginary_gaussian_component = scipy.stats.norm(0, numpy.sqrt(\r\n ((self.VARIANCE_OF_CHANNEL_IMPULSE_RESPONSE / 2) * state) + (self.VARIANCE_OF_AWGN / 2))).pdf(\r\n observation_sample.imag)\r\n return emission_real_gaussian_component * emission_imaginary_gaussian_component\r\n\r\n # Evaluate the Estimation Accuracy\r\n # P(\\hat{X_k} = x_k | X_k = x_k) \\forall k \\in \\{0, 1, 2, ...., K-1\\} and x_k \\in \\{0, 1\\}\r\n # Relative Frequency approach to estimate this parameter\r\n def get_estimation_accuracy(self, estimated_states):\r\n total_count = 0\r\n accuracies = 0\r\n for _counter in range(0, self.NUMBER_OF_FREQUENCY_BANDS):\r\n for _round in range(0, self.NUMBER_OF_SAMPLES):\r\n total_count += 1\r\n if self.true_pu_occupancy_states[_counter][_round] == estimated_states[_counter][_round]:\r\n accuracies += 1\r\n return accuracies / total_count\r\n\r\n # Evaluate the Probability of Detection P_D\r\n # P(\\hat{X_k} = 1 | X_k = 1) \\forall k \\in \\{0, 1, 2, ...., K-1\\}\r\n # Relative Frequency approach to estimate this parameter\r\n def get_probability_of_detection(self, estimated_states):\r\n occupancies = 0\r\n number_of_detections = 0\r\n for channel_index in range(0, self.NUMBER_OF_FREQUENCY_BANDS):\r\n for time_index in range(0, self.NUMBER_OF_SAMPLES):\r\n pu_state = self.true_pu_occupancy_states[channel_index][time_index]\r\n if pu_state == 1:\r\n occupancies += 1\r\n if estimated_states[channel_index][time_index] == pu_state:\r\n number_of_detections += 1\r\n if occupancies == 0:\r\n return 1\r\n return number_of_detections / occupancies\r\n\r\n # Evaluate the Probability of False Alarm P_FA\r\n # P(\\hat{X_k} = 1 | X_k = 0) \\forall k \\in \\{0, 1, 2, ...., K-1\\}\r\n # Relative Frequency approach to estimate this parameter\r\n def get_probability_of_false_alarm(self, estimated_states):\r\n idle_count = 0\r\n number_of_false_alarms = 0\r\n for channel_index in range(0, self.NUMBER_OF_FREQUENCY_BANDS):\r\n for time_index in range(0, self.NUMBER_OF_SAMPLES):\r\n pu_state = self.true_pu_occupancy_states[channel_index][time_index]\r\n if pu_state == 0:\r\n idle_count += 1\r\n if estimated_states[channel_index][time_index] == 1:\r\n number_of_false_alarms += 1\r\n if idle_count == 0:\r\n return 0\r\n return number_of_false_alarms / idle_count\r\n\r\n # Evaluate the Probability of Missed Detection P_MD\r\n # P(\\hat{X_k} = 0 | X_k = 1) \\forall k \\in \\{0, 1, 2, ...., K-1\\}\r\n # Relative Frequency approach to estimate this parameter\r\n def get_probability_of_missed_detection(self, estimated_states):\r\n occupancies = 0\r\n number_of_missed_detections = 0\r\n for channel_index in range(0, self.NUMBER_OF_FREQUENCY_BANDS):\r\n for time_index in range(0, self.NUMBER_OF_SAMPLES):\r\n pu_state = self.true_pu_occupancy_states[channel_index][time_index]\r\n if pu_state == 1:\r\n occupancies += 1\r\n if estimated_states[channel_index][time_index] == 0:\r\n number_of_missed_detections += 1\r\n if occupancies == 0:\r\n return 0\r\n return number_of_missed_detections / occupancies\r\n\r\n # Output the estimated state of the frequency bands in the wideband spectrum of interest\r\n # Output the collection consisting of the parameters of interest\r\n def estimate_pu_occupancy_states(self):\r\n # Estimated states - kxt matrix\r\n estimated_states = []\r\n previous_state_spatial = None\r\n previous_state_temporal = None\r\n for x in range(0, self.NUMBER_OF_FREQUENCY_BANDS):\r\n estimated_states.append([])\r\n # A value function collection to store and index the calculated value functions across t and k\r\n value_function_collection = []\r\n for k in range(0, self.NUMBER_OF_FREQUENCY_BANDS):\r\n temp_array = []\r\n for x in range(self.NUMBER_OF_SAMPLES):\r\n temp_array.append(dict())\r\n value_function_collection.append(temp_array)\r\n # t = 0 and k = 0 - No previous state to base the Markovian Correlation on in either dimension\r\n for state in OccupancyState:\r\n current_value = self.get_emission_probabilities(state.value, self.observation_samples[0][0]) * \\\r\n self.get_start_probabilities(state.name)\r\n value_function_collection[0][0][state.name] = self.VALUE_FUNCTION_NAMED_TUPLE(current_value=current_value,\r\n previous_temporal_state=None,\r\n previous_spatial_state=None)\r\n # First row - Only temporal correlation\r\n i = 0\r\n for j in range(1, self.NUMBER_OF_SAMPLES):\r\n # Trying to find the max pointer here ...\r\n for state in OccupancyState:\r\n # Again finishing off the [0] index first\r\n max_pointer = self.get_transition_probabilities_single(OccupancyState.idle.value,\r\n state.value) * \\\r\n value_function_collection[i][j - 1][OccupancyState.idle.name].current_value\r\n # Using IDLE as the confirmed previous state\r\n confirmed_previous_state = OccupancyState.idle.name\r\n for candidate_previous_state in OccupancyState:\r\n if candidate_previous_state.name == OccupancyState.idle.name:\r\n # Already done\r\n continue\r\n else:\r\n pointer = self.get_transition_probabilities_single(candidate_previous_state.value,\r\n state.value) * \\\r\n value_function_collection[i][j - 1][\r\n candidate_previous_state.name].current_value\r\n if pointer > max_pointer:\r\n max_pointer = pointer\r\n confirmed_previous_state = candidate_previous_state.name\r\n current_value = max_pointer * self.get_emission_probabilities(state.value,\r\n self.observation_samples[\r\n i][j])\r\n value_function_collection[i][j][state.name] = self.VALUE_FUNCTION_NAMED_TUPLE(\r\n current_value=current_value, previous_temporal_state=confirmed_previous_state,\r\n previous_spatial_state=None)\r\n # First column - Only spatial correlation\r\n j = 0\r\n for i in range(1, self.NUMBER_OF_FREQUENCY_BANDS):\r\n # Trying to find the max pointer here ...\r\n for state in OccupancyState:\r\n # Again finishing off the [0] index first\r\n max_pointer = self.get_transition_probabilities_single(OccupancyState.idle.value,\r\n state.value) * \\\r\n value_function_collection[i - 1][j][\r\n OccupancyState.idle.name].current_value\r\n confirmed_previous_state = OccupancyState.idle.name\r\n for candidate_previous_state in OccupancyState:\r\n if candidate_previous_state.name == OccupancyState.idle.name:\r\n # Already done\r\n continue\r\n else:\r\n pointer = self.get_transition_probabilities_single(candidate_previous_state.value,\r\n state.value) * \\\r\n value_function_collection[i - 1][j][\r\n candidate_previous_state.name].current_value\r\n if pointer > max_pointer:\r\n max_pointer = pointer\r\n confirmed_previous_state = candidate_previous_state.name\r\n current_value = max_pointer * self.get_emission_probabilities(state.value,\r\n self.observation_samples[\r\n i][j])\r\n value_function_collection[i][j][state.name] = self.VALUE_FUNCTION_NAMED_TUPLE(\r\n current_value=current_value, previous_temporal_state=None,\r\n previous_spatial_state=confirmed_previous_state)\r\n # I'm done with the first row and first column\r\n # Moving on to the other rows and columns\r\n for i in range(1, self.NUMBER_OF_FREQUENCY_BANDS):\r\n # For every row, I'm going across laterally (across columns) and populating the value_function_collection\r\n for j in range(1, self.NUMBER_OF_SAMPLES):\r\n for state in OccupancyState:\r\n # Again finishing off the [0] index first\r\n max_pointer = self.get_transition_probabilities(OccupancyState.idle.value,\r\n OccupancyState.idle.value, state.value) * \\\r\n value_function_collection[i][j - 1][OccupancyState.idle.name].current_value * \\\r\n value_function_collection[i - 1][j][OccupancyState.idle.name].current_value\r\n confirmed_previous_state_temporal = OccupancyState.idle.name\r\n confirmed_previous_state_spatial = OccupancyState.idle.name\r\n for candidate_previous_state_temporal in OccupancyState:\r\n for candidate_previous_state_spatial in OccupancyState:\r\n if candidate_previous_state_temporal.name == OccupancyState.idle.name and \\\r\n candidate_previous_state_spatial.name == OccupancyState.idle.name:\r\n # Already done\r\n continue\r\n else:\r\n pointer = self.get_transition_probabilities(candidate_previous_state_temporal.value,\r\n candidate_previous_state_spatial.value,\r\n state.value) * \\\r\n value_function_collection[i][j - 1][\r\n candidate_previous_state_temporal.name].current_value * \\\r\n value_function_collection[i - 1][j][\r\n candidate_previous_state_spatial.name].current_value\r\n if pointer > max_pointer:\r\n max_pointer = pointer\r\n confirmed_previous_state_temporal = candidate_previous_state_temporal.name\r\n confirmed_previous_state_spatial = candidate_previous_state_spatial.name\r\n # Now, I have the value function for this i and this j\r\n # Populate the value function collection with this value\r\n # I found maximum of Double Markov Chain value functions from the past and now I'm multiplying it...\r\n # ...with the emission probability of this particular observation\r\n current_value = max_pointer * self.get_emission_probabilities(state.value,\r\n self.observation_samples[\r\n i][j])\r\n value_function_collection[i][j][state.name] = self.VALUE_FUNCTION_NAMED_TUPLE(\r\n current_value=current_value, previous_temporal_state=confirmed_previous_state_temporal,\r\n previous_spatial_state=confirmed_previous_state_spatial)\r\n # I think the forward path is perfect\r\n # I have doubts in the backtrack path\r\n max_value = 0\r\n # Finding the max value among the named tuples\r\n for _value in value_function_collection[-1][-1].values():\r\n if _value.current_value > max_value:\r\n max_value = _value.current_value\r\n # Finding the state corresponding to this max_value and using this as the final confirmed state to ...\r\n # ...backtrack and find the previous states\r\n for k, v in value_function_collection[-1][-1].items():\r\n if v.current_value == max_value:\r\n estimated_states[self.NUMBER_OF_FREQUENCY_BANDS - 1].append(\r\n self.value_from_name(k))\r\n previous_state_temporal = k\r\n previous_state_spatial = k\r\n break\r\n # Backtracking\r\n for i in range(self.NUMBER_OF_FREQUENCY_BANDS - 1, -1, -1):\r\n for j in range(self.NUMBER_OF_SAMPLES - 1, -1, -1):\r\n if len(estimated_states[i]) == 0:\r\n estimated_states[i].insert(0, self.value_from_name(\r\n value_function_collection[i + 1][j][previous_state_spatial].previous_spatial_state))\r\n previous_state_temporal = value_function_collection[i][j][\r\n previous_state_spatial].previous_temporal_state\r\n continue\r\n estimated_states[i].insert(0, self.value_from_name(\r\n value_function_collection[i][j][previous_state_temporal].previous_temporal_state))\r\n previous_state_temporal = value_function_collection[i][j][\r\n previous_state_temporal].previous_temporal_state\r\n previous_state_spatial = value_function_collection[i][self.NUMBER_OF_SAMPLES - 1][\r\n previous_state_spatial].previous_spatial_state\r\n return {'Estimation_Accuracy': self.get_estimation_accuracy(estimated_states),\r\n 'Detection_Probability': self.get_probability_of_detection(estimated_states),\r\n 'False_Alarm_Probability': self.get_probability_of_false_alarm(estimated_states),\r\n 'Missed_Detection_Probability': self.get_probability_of_missed_detection(estimated_states)}\r\n\r\n # Get enumeration field value from name\r\n @staticmethod\r\n def value_from_name(name):\r\n if name == 'occupied':\r\n return OccupancyState.occupied.value\r\n else:\r\n return OccupancyState.idle.value\r\n\r\n # Reset every collection for the next run\r\n def reset(self):\r\n self.true_pu_occupancy_states.clear()\r\n self.transition_probabilities_matrix.clear()\r\n self.noise_samples.clear()\r\n self.channel_impulse_response_samples.clear()\r\n self.observation_samples.clear()\r\n\r\n # Exit strategy\r\n def __exit__(self, exc_type, exc_val, exc_tb):\r\n print('[INFO] PUOccupancyBehaviorEstimatorIII Clean-up: Cleaning things up ...')\r\n\r\n\r\n# Visualize a plot of Estimation Accuracies v/s P(Occupied|Idle)\r\ndef visualize_estimation_accuracy_plot(p_estimation_x_axis, estimation_accuracies_for_plotting):\r\n estimation_fig, estimation_axes = plt.subplots()\r\n estimation_axes.plot(p_estimation_x_axis, estimation_accuracies_for_plotting, linestyle='--', linewidth=1.0,\r\n marker='o', color='r')\r\n estimation_fig.suptitle(\r\n 'Estimation Accuracy v P(Occupied|Idle) at P(Occupied)=0.6 with Markovian Correlation across channels and '\r\n 'across observation rounds', fontsize=7.5)\r\n estimation_axes.set_xlabel('P(Occupied | Idle)', fontsize=10)\r\n estimation_axes.set_ylabel('Estimation Accuracy', fontsize=10)\r\n plt.show()\r\n\r\n\r\n# Visualize a plot of False Alarm Probabilities v/s P(Occupied|Idle)\r\ndef visualize_false_alarm_probability_plot(p_false_alarm_x_axis, false_alarm_probabilities_for_plotting):\r\n false_alarm_fig, false_alarm_axes = plt.subplots()\r\n false_alarm_axes.plot(p_false_alarm_x_axis, false_alarm_probabilities_for_plotting, linestyle='--', linewidth=1.0,\r\n marker='o', color='b')\r\n false_alarm_fig.suptitle('False Alarm Probability v P(Occupied|Idle) at P(Occupied)=0.6 with Markovian Correlation '\r\n 'across channels and across observation rounds', fontsize=7.5)\r\n false_alarm_axes.set_xlabel('P(Occupied | Idle)', fontsize=10)\r\n false_alarm_axes.set_ylabel('Probability of False Alarm', fontsize=10)\r\n plt.show()\r\n\r\n\r\n# Visualize a plot of Missed Detection Probabilities v/s P(Occupied|Idle)\r\ndef visualize_missed_detection_probability_plot(p_missed_detection_x_axis, missed_detection_probabilities_for_plotting):\r\n missed_detection_fig, missed_detection_axes = plt.subplots()\r\n missed_detection_axes.plot(p_missed_detection_x_axis, missed_detection_probabilities_for_plotting, linestyle='--',\r\n linewidth=1.0, marker='o', color='m')\r\n missed_detection_fig.suptitle(\r\n 'Missed Detection Probability v P(Occupied|Idle) at P(Occupied)=0.6 with Markovian Correlation '\r\n 'across channels and across observation rounds', fontsize=7.5)\r\n missed_detection_axes.set_xlabel('P(Occupied | Idle)', fontsize=10)\r\n missed_detection_axes.set_ylabel('Probability of Missed Detection', fontsize=10)\r\n plt.legend(loc='upper right', prop={'size': 8})\r\n plt.show()\r\n\r\n\r\n# Cyclic average evaluation\r\ndef cyclic_average(collection, number_of_internal_collections, number_of_cycles):\r\n collection_for_plotting = []\r\n for pass_counter in range(0, number_of_internal_collections):\r\n _sum = 0\r\n for _entry in collection:\r\n _sum += _entry[pass_counter]\r\n collection_for_plotting.append(_sum / number_of_cycles)\r\n return collection_for_plotting\r\n\r\n\r\n# Run Trigger\r\nif __name__ == '__main__':\r\n # External estimation accuracies array\r\n external_estimation_accuracies = []\r\n # External detection probabilities array\r\n external_detection_probabilities = []\r\n # External false_alarm_probabilities array\r\n external_false_alarm_probabilities = []\r\n # External missed detection probabilities array\r\n external_missed_detection_probabilities = []\r\n puOccupancyBehaviorEstimator = PUOccupancyBehaviorEstimatorIII()\r\n # P(1)\r\n pi = puOccupancyBehaviorEstimator.start_probabilities.occupied\r\n p_initial = 0.03\r\n for cycle in range(0, puOccupancyBehaviorEstimator.NUMBER_OF_CYCLES):\r\n # Internal estimation accuracies array\r\n internal_estimation_accuracies = []\r\n # Internal detection probabilities array\r\n internal_detection_probabilities = []\r\n # Internal false_alarm_probabilities array\r\n internal_false_alarm_probabilities = []\r\n # Internal missed detection probabilities array\r\n internal_missed_detection_probabilities = []\r\n # P(Occupied|Idle)\r\n p = p_initial\r\n # Varying p all the way up to independence\r\n for iteration in range(0, int(pi / p)):\r\n q = (p * (1 - pi)) / pi\r\n puOccupancyBehaviorEstimator.transition_probabilities_matrix = {\r\n 1: {1: (1 - q), 0: q},\r\n 0: {1: p, 0: (1 - p)}\r\n }\r\n # True PU Occupancy State\r\n puOccupancyBehaviorEstimator.generate_true_pu_occupancy_states(p, q, pi)\r\n puOccupancyBehaviorEstimator.allocate_observations()\r\n obtained_collection = puOccupancyBehaviorEstimator.estimate_pu_occupancy_states()\r\n internal_estimation_accuracies.append(obtained_collection['Estimation_Accuracy'])\r\n internal_detection_probabilities.append(obtained_collection['Detection_Probability'])\r\n internal_false_alarm_probabilities.append(obtained_collection['False_Alarm_Probability'])\r\n internal_missed_detection_probabilities.append(obtained_collection['Missed_Detection_Probability'])\r\n p += p_initial\r\n puOccupancyBehaviorEstimator.reset()\r\n external_estimation_accuracies.append(internal_estimation_accuracies)\r\n external_detection_probabilities.append(internal_detection_probabilities)\r\n external_false_alarm_probabilities.append(internal_false_alarm_probabilities)\r\n external_missed_detection_probabilities.append(internal_missed_detection_probabilities)\r\n x_axis = []\r\n for value in range(1, int(pi / p_initial) + 1):\r\n x_axis.append(value * p_initial)\r\n final_frontier = int(pi / p_initial)\r\n # Start Plotting\r\n visualize_estimation_accuracy_plot(x_axis,\r\n cyclic_average(external_estimation_accuracies, final_frontier,\r\n puOccupancyBehaviorEstimator.NUMBER_OF_CYCLES))\r\n visualize_false_alarm_probability_plot(x_axis, cyclic_average(external_false_alarm_probabilities, final_frontier,\r\n puOccupancyBehaviorEstimator.NUMBER_OF_CYCLES))\r\n visualize_missed_detection_probability_plot(x_axis,\r\n cyclic_average(external_missed_detection_probabilities, final_frontier,\r\n puOccupancyBehaviorEstimator.NUMBER_OF_CYCLES))\r\n","repo_name":"bharathkeshavamurthy/Minerva","sub_path":"src/PUOccupancyBehaviorEstimatorIII.py","file_name":"PUOccupancyBehaviorEstimatorIII.py","file_ext":"py","file_size_in_byte":32836,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"71870520584","text":"from common_schemas.http_response import Response\n\n\nclass BookDetails:\n def __init__(self, code, title, author, image, epub_link, mobi_link, pdf_link, description):\n self.code = code\n self.title = title\n self.author = author\n self.image = image\n self.epub_link = epub_link\n self.mobi_link = mobi_link\n self.pdf_link = pdf_link\n self.description = description\n\n\nclass GetBookDetailsResponse(Response):\n def __init__(self, status: int, book_detail: BookDetails):\n super().__init__(status, book_detail)\n","repo_name":"enzobonggio/le-libros-serverless","sub_path":"lambdas/get_book_details/get_book_details/schemas/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9219710788","text":"import serial\nimport sys\nfrom time import sleep\n\ni = 0\nwhile(True):\n try:\n ESP32 = serial.Serial(('COM'+str(i)), 921600)\n serialEnd = ('COM'+str(i))\n print(\"Serial device conecct on \" +serialEnd)\n ESP32.close()\n break\n except:\n print(sys.exc_info())\n sleep(2)\n i = i + 1\n","repo_name":"Pcpastre/Controller","sub_path":"serialFind.py","file_name":"serialFind.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2211919326","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport torch\nimport numpy as np\n# never use\ndef square_distance(src, dst):\n B, N, _ = src.shape\n _, M, _ = dst.shape\n # matmul: tensor multiply; -1: auto calculate how much should be\n dist = -2 * torch.matmul(src, dst.permute(0,2,1))\n # torch.view: resize\n dist += torch.sum(src**2, -1).view(B, N, 1)\n dist += torch.sum(dst**2, -1).view(B, 1, M)\n return dist\n\n# points:BxNx3, idx: BxSxK\n# [[x,y,z] [[1,8,7,6,5,15,26,35....]\n# [x,y,z] [1,8,7,6,5,15,26,35....]\n# [x,y,z] [1,8,7,6,5,15,26,35....]\n# [x,y,z] [1,8,7,6,5,15,26,35....]\n# [x,y,z] [1,8,7,6,5,15,26,35....]\n# [x,y,z] [1,8,7,6,5,15,26,35....]\n# [x,y,z] [1,8,7,6,5,15,26,35....]]\n# [x,y,z]\n# [x,y,z]\n# [x,y,z]\n# [x,y,z]\n# [x,y,z]\n# [x,y,z]]\ndef index_points(points, idx):\n \"\"\"\n Input:\n points: input point cloud [B, N, 3] tensor\n idx: point index [B, S, K] tensor\n output:\n indexed points: [B, S, K, 3] tensor\n \"\"\"\n device = points.device # copy these tensors to GPU\n B = points.shape[0] # batch size\n view_shape = list(idx.shape) # view_shape: [B, S, K],注意这里view shape就是只有三个元素,分别是三个维度的值\n view_shape[1:] = [1]*(len(view_shape)-1) # [1]*2 --> [1, 1] view_shape now: [B, 1, 1]\n repeat_shape = list(idx.shape) # repeat_shape: [B, S, K]\n repeat_shape[0] = 1 # repeat_shape now: [1, S, K]\n '''\n batch_indices: [B, S, K]\n 第一个维度B: 有B帧点云\n 第二个维度S: 每帧点云采样的S个点\n 第三个维度K: 每个点的K个最近邻\n 这里求new_points非常有trick,points的第一维遍历batch_indices确定点云帧数,第二维遍历idx确定该帧点云的某个采样点,\n 这样遍历完得到B*S*K个点,再用':'获得点的坐标及特征\n new_points: [B, S, K, 3]\n '''\n '''\n batch_indices形如:\n K个0\n [[[0,0,0,0,0....0],\n S [0,0,0,0,0....0],\n 行 [0,0,0,0,0....0],\n [0,0,0,0,0....0],\n [0,0,0,0,0....0]],\n\n [[1,1,1,1,1....1],\n S [1,1,1,1,1....1],\n 行 [1,1,1,1,1....1],\n [1,1,1,1,1....1],\n [1,1,1,1,1....1]],\n .\n .\n .\n .\n [[B-1,B-1....B-1],\n S [B-1,B-1....B-1],\n 行 [B-1,B-1....B-1],\n [B-1,B-1....B-1],\n [B-1,B-1....B-1]]]\n '''\n batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape) # [B, S, K]\n new_points = points[batch_indices, idx, :]\n return new_points\n\ndef query_knn_point(k, query, pc):\n \"\"\"\n Input:\n k: number of neighbor points\n query: query points [B, S, 3]\n pc: point cloud [B, N, 3]\n points: point features\n Output:\n normed_knn_points: [B, S, k, 3]\n knn_ids: [B, S, k] index of knn points\n \"\"\"\n # permute: exchange dim order\n '''\n before: query: [B, S, 3] pc: [B, N, 3]\n after: query: [B, 3, S, 1] database: [B, 3, 1, N]\n '''\n query = query.permute(0,2,1).unsqueeze(3)\n database = pc.permute(0,2,1).unsqueeze(2)\n # after broadcast, query-database: [B, 3, S, N], 3这个维度上现在不再是点的坐标,而是两点坐标分量差值\n # norm: [B, S, N],此时每个元素都是两点之间的归一化距离, dim=1: 第二维被缩减\n norm = torch.norm(query-database, dim=1, keepdim=False)\n # order function k-nearest\n '''\n knn_ids: [B, S, k] tensor,S个采样点,每个点有K个最近邻点\n return; knn最小的k个距离值,以及对应的index\n\n '''\n knn_d, knn_ids = torch.topk(norm, k=k, dim=2, largest=False, sorted=True)\n \"\"\"\n Input:\n pc: input point cloud [B, N, 3] tensor\n knn_ids: point index of [B, S, K] tensor\n output:\n knn_points: [B, S, K, 3] tensor\n \"\"\"\n knn_points = index_points(pc, knn_ids)\n # centroids: [B, S, 3] B帧点云,每帧S个关键点,即S个cluster,对应S个centroids\n centroids = torch.mean(knn_points, dim=2)\n # 使centroids和knn_points维度一致, 求knn各个点相对于中心的相对坐标\n # by cjf 实际上 normed_knn_points和centroids并没有被用到\n centroids = centroids.unsqueeze(2).repeat(1,1,k,1)\n normed_knn_points = knn_points - centroids\n return normed_knn_points, knn_ids\n\ndef random_dilation_encoding(x_sample, x, k, n):\n '''\n Input:\n x_sample: [B, nsample, 3+C]\n x: [B, N, 3+C]\n k: number of neighbors\n n: dilation ratio\n Output:\n dilation_group: random dilation cluster [B, 4+C, nsample, k]\n dilation_xyz: xyz of random dilation cluster [B, 3, nsample, k]\n '''\n xyz_sample = x_sample[:,:,:3] # 取3+C的前三个值\n xyz = x[:,:,:3] # 取3+C的前三个值\n # feature = x[:,:,3:] # 取3+C的后C个值,暂无\n '''\n 获得knn个点的相对于中心的坐标值和索引,注意这里的knn是2k个,是膨胀过后的\n knn_idx: [B, S, 2K],\n 同时返回knn点的相对于质心的相对坐标\n '''\n _, knn_idx = query_knn_point(int(k*n), xyz_sample, xyz)\n # return a random array ,data range from 0 ~ k*n-1\n # 在0~k*n-1 中随机提取k个索引\n rand_idx = torch.randperm(int(k*n))[:k]\n dilation_idx = knn_idx[:,:,rand_idx] # [B, S, K]\n # 提取最终保留的knn点的原始坐标和特征: [B, S, K, 3] || [B, S, K, C]\n dilation_xyz = index_points(xyz, dilation_idx)\n # dilation_feature = index_points(feature, dilation_idx) 暂无\n # [B, S, 3] --> [B, S, K, 3],为了和dilation_xyz维度保持一致,便于后续运算\n xyz_expand = xyz_sample.unsqueeze(2).repeat(1,1,k,1)\n # 将dilation_xyz相对于随机采样候选关键点的相对坐标,距离以及这些knn出来的点的特征聚合,成为最终的dilation_group,排序为 距离,相对坐标,特征\n dilation_xyz_resi = dilation_xyz - xyz_expand # [B, S, K, 3]\n dilation_xyz_dis = torch.norm(dilation_xyz_resi,dim=-1,keepdim=True) # 在最后一个维度上求norm, 即sqrt(x^2+y^2+z^2), [B, S, K, 1]\n dilation_group = torch.cat((dilation_xyz_dis, dilation_xyz_resi),dim=-1) # [B, S, K, 4]\n # dilation_group = torch.cat((dilation_group, dilation_feature), dim=-1) # [B, S, K, 4+C] 暂无\n dilation_group = dilation_group.permute(0,3,1,2) # [B, 4+C, S, K]\n return dilation_group, dilation_xyz","repo_name":"cloudcjf/KP-Net","sub_path":"model/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6515,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"73388358666","text":"# Permutations\nfrom typing import List\n\nclass Solution:\n def permute(self, nums: List[int]) -> List[List[int]]:\n if not nums: return [[]]\n ans = []\n s = set(nums)\n for i in range(len(nums)):\n scopy = s.copy()\n scopy.remove(nums[i])\n print(s)\n rec = [[nums[i]]+p for p in self.permute(list(scopy))]\n ans += rec\n return ans\n\ns = Solution()\nprint(s.permute([1,2,3]))\n","repo_name":"GavinPHR/code","sub_path":"phase3/46.py","file_name":"46.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"24503088436","text":"#%% import various libraries\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom kaggle.api.kaggle_api_extended import KaggleApi\nfrom zipfile import ZipFile\nimport os\n# %% \n# import data from kaggle \napi = KaggleApi()\napi.authenticate()\napi.dataset_download_files('unsdsn/world-happiness')\n\n#%%\n# unzip data \nzf = ZipFile('world-happiness.zip')\nos.makedirs('world-happiness')\nzf.extractall('world-happiness/') # save files in selected folder\nzf.close()\n# %%\n# becuase the data columns have different names in each year, we will only use the income column and the life expectency column as an example\nfiles = os.listdir('world-happiness')\ndf = pd.DataFrame()\nfor file in files:\n df_file = pd.read_csv(os.path.join('world-happiness',file))\n ## get the columns we need - country, gdp and life expectancy\n cols_get = [x for x in df_file.columns.tolist() if 'gdp' in x.lower() or 'expectancy' in x.lower()or 'country' in x.lower()]\n ## sort and rename the columns for consistancy \n cols_get.sort()\n dict_rename = dict(zip(cols_get,['Country','GDP','Life Expectancy']))\n df_file = df_file[cols_get].copy()\n df_file.rename(columns=dict_rename,inplace=True)\n ## add the year column for reference \n df_file['year'] = os.path.splitext(file)[0]\n ## concatenate the data \n df = pd.concat([df,df_file])\n \n\n# %%\n# the problem is if we can find a linear relation between GDP and life expectancy \n# we will first look into some basic stats of the data to determine if we need process the data before feeding it to a model \ndf.describe()\n# want to make sure the distribution in each year is relatively the same so they can be trained as a whole dataset \n# aka: we want to make sure time does not affect the data very much \ndf.boxplot(column=['GDP','Life Expectancy'],by=['year'],figsize=(12,5))\n\n# here we can see that the two variables have different maximum values, yet their distributions are quite similar judging from the percentiles. We will just scale the data to make the range of the variables 0-1 in this case \n# in addition, based on the boxplot grouped by year we can see that time is not an obvious influence factor here. So we can use the data as a whole \n#%%\n## import normalizer from sklearn \nfrom sklearn.preprocessing import MinMaxScaler\nscaler = MinMaxScaler()\n# transform data\nX = df['GDP'].values\nX_scaled = scaler.fit_transform(X.reshape(-1,1))\ny = df['Life Expectancy'].values\ny_scaled = scaler.fit_transform(y.reshape(-1,1))\n\n# %%\nrandom_state = 17\n# start building the model \nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X_scaled, y_scaled, test_size=0.3, random_state=random_state)\n# %%\n# build the model \nfrom sklearn.linear_model import LinearRegression\nreg = LinearRegression().fit(X_train, y_train)\n#%%\nfrom sklearn.metrics import mean_squared_error\nprint('Model RMSE on training data set: {:.2f}'.format(mean_squared_error(reg.predict(X_train),y_train)))\nprint('Model RMSE on test data set: {:.2f}'.format(mean_squared_error(reg.predict(X_test),y_test)))\n# %%\n## evaluate the model by visualization \nX_model = np.linspace(0,1,100)\ny_model = reg.predict(X_model.reshape(-1,1))\nplt.figure(figsize=(12,5))\nplt.scatter(X_train,y_train,color='tab:blue',label='train')\nplt.scatter(X_test,y_test,color='tab:orange',label='test')\nplt.plot(X_model,y_model, 'r--',label='fitted')\nplt.xlabel('GDP')\nplt.ylabel('Life Expectancy')\nplt.title('GDP vs Life Expectancy')\nplt.legend()\n\n# from the figure we can see that there is a clear trend as the higher the GDP is, the larger the life expectancy is. However, correlation does not necessarily mean causation. We will not dive deep into this topic here. \n# %%\n## some statistical exploration \n## TBC \n## prediction interval, confidence interval ","repo_name":"JieyuZhang97/ML_projects","sub_path":"linear_regression/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":3821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10036610159","text":"import wx \r\nimport pyspi \r\nclass MyApp(wx.App):\r\n def OnInit(self): \r\n self.frame = MyFrame(parent=None, title='py爬虫展示') \r\n self.SetTopWindow(self.frame)\r\n self.frame.Show() \r\n self.frame.Center()\r\n return True\r\n\r\nclass MyFrame(wx.Frame):\r\n\tdef __init__(self,parent,id=wx.ID_ANY,title=\"\",\r\n\t\t\t\tpos=wx.DefaultPosition,size=(500,500),\r\n\t\t\t\tstyle=wx.DEFAULT_FRAME_STYLE,\r\n\t\t\t\tname=\"MyFrame\"):\r\n\t\tsuper(MyFrame,self).__init__(parent,id,title,\r\n\t\t\t\t\t\t\t\t\tpos,size,style,name)\r\n\r\n\t\tself.panel=wx.Panel(self)\r\n\t\tself.panel.SetBackgroundColour(wx.WHITE)\r\n\t\tbutton1=wx.Button(self.panel,\r\n\t\t\t\t\t\t\t\tlabel=\"Get Price\",\r\n\t\t\t\t\t\t\t\tpos=(288,40))\r\n\t\tself.btnId1=button1.GetId()\r\n\t\tself.Bind(wx.EVT_BUTTON,self.getPrice,button1)\r\n##############\r\n\t\tself.multiLabel1=wx.StaticText(self.panel,-1,\"price\")\r\n\t\tself.multiText1=wx.TextCtrl(self.panel,-1,\"if you want to get the price of IphoneX,press the button \\n\",\r\n\t\t\t\t\t\t\tsize=(200,100),style=wx.TE_MULTILINE)\r\n\t\tself.multiText1.SetInsertionPoint(0)\r\n\r\n\t\tself.multiLabel2=wx.StaticText(self.panel,-1,\"model\")\r\n\t\tself.multiText2=wx.TextCtrl(self.panel,-1,\"if you want to get the brands of phones,press the button \\n\",\r\n\t\t\t\t\t\t\tsize=(200,100),style=wx.TE_MULTILINE)\r\n\t\tself.multiText2.SetInsertionPoint(0)\r\n\r\n\t\tself.richLabel=wx.StaticText(self.panel,-1,\"reputation\")\r\n\t\tself.richText=wx.TextCtrl(self.panel,\r\n\t\t\t-1,\"if you want to get the reputation of IphoneX press the button \\n\",size=(200, 100),style=wx.TE_MULTILINE|wx.TE_RICH2)\r\n\r\n\t\tself.richText.SetInsertionPoint(0)\r\n\t\tself.richText.SetStyle(44,52,wx.TextAttr(\"white\", \"black\"))\r\n\t\tpoints = self.richText.GetFont().GetPointSize()\t\t\r\n\t\tf = wx.Font(points + 3, wx.ROMAN, wx.ITALIC, wx.BOLD, True) #创建一个字体 \r\n\t\tself.richText.SetStyle(68, 82, wx.TextAttr(\"blue\", wx.NullColour, f)) #用新字体设置样式 \r\n\t\tsizer = wx.FlexGridSizer(cols=2, hgap=6, vgap=6)\t\t\r\n\t\tsizer.AddMany([self.multiLabel1, self.multiText1, self.richLabel, self.richText,self.multiLabel2, self.multiText2]) \r\n\t\tself.panel.SetSizer(sizer)\r\n\r\n\t\tbutton2=wx.Button(self.panel,label=\"Get Good Reputation\",pos=(288,140))\r\n\t\tself.btnId2=button2.GetId()\r\n\t\tself.Bind(wx.EVT_BUTTON,self.getRepu,button2)\r\n\t\tbutton3=wx.Button(self.panel,label=\"Get different model\",pos=(288,240))\r\n\t\tself.btnId3=button2.GetId()\r\n\t\tself.Bind(wx.EVT_BUTTON,self.getBrand,button3)\r\n\r\n\tdef getPrice(self,event):\r\n\t\tfor price in pyspi.getPrice():\r\n\t\t\tself.multiText1.AppendText(price.text+\"\\n\")\r\n\t\t\r\n\r\n\tdef getRepu(self,event):\r\n\t\t\r\n\r\n\t\tself.richText.AppendText(pyspi.getRepu()) \r\n\t\t\r\n\tdef getBrand(self,event):\t\t\r\n\t\tfor t in pyspi.getBrand():\r\n\t\t\tself.multiText2.AppendText(t.text+\"\\n\")\r\n\t\t\t\r\n\r\n\r\nif __name__==\"__main__\":\r\n\tapp = MyApp(False) \r\n\tapp.MainLoop() ","repo_name":"Eggwardhan/pythonhomework","sub_path":"wxp.py","file_name":"wxp.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14333527297","text":"\"\"\"\nuSpherum ESC Console Client\n\nUsage:\n cli.py config list\n cli.py config set \n cli.py ll list [--sort=] [--filter=] [--group=]\n cli.py ll status [--sort=] [--filter=] [--group=]\n cli.py ll show \n cli.py ll add [--lat ] [--lng ]\n cli.py ll cmd (--relay= | --mode=)\n cli.py llgr list\n cli.py (--interactive|-i)\n cli.py show \n\nOptions:\n -h --help Show this help screen\n --lat Object latitude\n --lng Object longitude\n\"\"\"\nfrom json import load, dump, loads, dumps\nfrom uuid import UUID\nimport pkg_resources\nimport pickle\nimport datetime\nfrom urllib.parse import urlparse\nfrom random import randint\nfrom typing import Sequence\nfrom os.path import dirname, join, exists\nfrom collections import OrderedDict, abc\nfrom asyncio import get_event_loop\nfrom urllib.parse import urljoin\nfrom pprint import pprint\nfrom jsonschema import validate\nfrom docopt import docopt, DocoptExit, printable_usage\nfrom tabulate import tabulate\nfrom websockets.client import Connect\nimport websockets\nimport requests\n\n\ndef deep_update(d, u):\n for k, v in u.items():\n if isinstance(v, abc.Mapping):\n d[k] = deep_update(d.get(k, {}), v)\n else:\n d[k] = v\n return d\n\n\ndef utc_dt() -> datetime.datetime:\n return datetime.datetime.utcnow().astimezone()\n\n\ndef utc_ts(mult=1e6) -> int:\n \"\"\"Микросекунды\"\"\"\n return int(utc_dt().timestamp()*mult)\n\n\ndef sid() -> int:\n return randint(1, 1 << 128)\n\n\nclass Config:\n config_path = join(dirname(__file__), '.esc-cli.json')\n config_schema = {\n 'type': 'object',\n 'properties': {\n 'api.url': {\n 'type': 'string'\n },\n 'api.http.auth.token': {\n 'type': 'string'\n },\n 'api.ws.auth.token': {\n 'type': 'string'\n }\n },\n 'required': ['api.url', 'api.http.auth.token', 'api.ws.auth.token']\n }\n\n @classmethod\n def default_config(cls) -> dict:\n return {\n 'api.url': 'http://localhost:8000',\n 'api.http.auth.token': 'SECRET_AUTH_TOKEN',\n 'api.ws.auth.token': 'SECRET_WS_AUTH_TOKEN',\n }\n\n @classmethod\n def _load_current_config(cls) -> dict:\n cfg = cls.default_config()\n if exists(cls.config_path):\n with open(cls.config_path) as f:\n deep_update(cfg, load(f))\n\n validate(cfg, cls.config_schema)\n return cfg\n\n @classmethod\n def list(cls) -> dict:\n return {\n 'config.path': cls.config_path,\n **cls._load_current_config()\n }\n\n @classmethod\n def get(cls, key):\n return cls._load_current_config()[key]\n\n @classmethod\n def set(cls, key, value) -> dict:\n cfg = cls._load_current_config()\n cfg[key] = value\n\n validate(cfg, cls.config_schema)\n with open(cls.config_path, 'w') as f:\n dump(cfg, f, indent=4)\n\n\nclass Cache:\n cache_path = '.cli-cache'\n\n @classmethod\n def _current_cache(cls) -> dict:\n cache = {}\n\n if exists(cls.cache_path):\n with open(cls.cache_path, 'rb') as cache_file:\n cache = pickle.load(cache_file)\n return cache\n\n @classmethod\n def get(cls, key, default=None):\n return cls._current_cache().get(key, default)\n\n @classmethod\n def set(cls, key, value):\n current_cache = cls._current_cache()\n current_cache[key] = value\n with open(cls.cache_path, 'wb') as cache_file:\n pickle.dump(current_cache, cache_file)\n\n\ndef ws_uri_from_url(url: str) -> str:\n url = urlparse(url)\n return dict(https='wss', http='ws')[url.scheme] + '://' + url.netloc\n\n\ndef config_list(args):\n pprint(Config.list())\n\n\ndef config_set(args):\n Config.set(args[''], args[''])\n\n\ndef _get_line_state(line_id):\n url = urljoin(Config.get('api.url'), f'api/v1/lighting-line-state')\n token = Config.get('api.http.auth.token')\n lines_state, = tuple(\n filter(\n lambda st: st['line_id'] == line_id,\n requests.get(\n url, headers=dict(Authorization=f'Bearer {token}')).json()\n )\n )\n del lines_state['line_id']\n return lines_state\n\n\ndef _get_line(line_id):\n url = urljoin(Config.get('api.url'), f'api/v1/lighting-line/{line_id}')\n token = Config.get('api.http.auth.token')\n line = requests.get(\n url, headers=dict(Authorization=f'Bearer {token}')).json()\n return line\n\n\ndef _get_lines():\n url = urljoin(Config.get('api.url'), 'api/v1/lighting-line')\n token = Config.get('api.http.auth.token')\n lines = requests.get(\n url, headers=dict(Authorization=f'Bearer {token}')).json()\n return lines\n\n\ndef _get_lines_status():\n url = urljoin(Config.get('api.url'), 'api/v1/lighting-line-state')\n token = Config.get('api.http.auth.token')\n lines_status = requests.get(\n url, headers=dict(Authorization=f'Bearer {token}')).json()\n return lines_status\n\n\ndef _get_line_groups():\n url = urljoin(Config.get('api.url'), 'api/v1/lighting-line-group')\n token = Config.get('api.http.auth.token')\n groups = requests.get(\n url, headers=dict(Authorization=f'Bearer {token}')).json()\n return groups\n\n\ndef apply_line_filters_and_sorting(lines, sort_by, group_id, req_filter):\n\n def ll_filter(line):\n return True\n\n if req_filter is not None:\n field_name, filter_expr = req_filter.split(':')\n if field_name in ('id', 'name'):\n def ll_filter(line):\n return line[field_name].find(filter_expr) != -1\n\n if field_name == 'm' and filter_expr.lower() in ('t', 'f'):\n def ll_filter(line):\n return line['in_maintenance'] == (filter_expr.lower() == 't')\n\n def ll_group_filter(f):\n def _filter(line):\n if group_id is not None:\n return f(line) and int(group_id) in line['groups']\n return f(line)\n return _filter\n\n ll_filter = ll_group_filter(ll_filter)\n\n lines = sorted(\n lines,\n key=lambda l: UUID(l[sort_by]) if sort_by == 'id' else sort_by)\n\n return [l for l in lines if ll_filter(l)]\n\n\ndef ll_list(args):\n lines = _get_lines()\n sort_by = args['--sort'] or 'id'\n group_id = args['--group']\n required_filter = args['--filter']\n\n if isinstance(lines, (tuple, list)):\n Cache.set('ll', lines)\n lines = apply_line_filters_and_sorting(\n lines, sort_by, group_id, required_filter)\n\n Cache.set('last_choosed_ll', lines)\n\n tab = tabulate([list(l.values()) for l in lines],\n headers=['Id', 'Name', 'Lat', 'Lng',\n 'In maintenance', 'Groups'])\n Cache.set('last_showed_ll', tab)\n Cache.set('last_showed', tab)\n print(tab)\n else:\n print(lines)\n\n\ndef ll_status(args):\n lines = Cache.get('ll')\n if lines is None:\n lines = _get_lines()\n\n lines_status = _get_lines_status()\n lines_status_by_id = {l_st['line_id']: l_st for l_st in lines_status}\n\n sort_by = args['--sort'] or 'id'\n group_id = args['--group']\n required_filter = args['--filter']\n\n if isinstance(lines_status, (tuple, list)):\n Cache.set('ll', lines)\n lines = apply_line_filters_and_sorting(\n lines, sort_by, group_id, required_filter)\n\n Cache.set('last_choosed_ll', lines)\n\n for line in lines:\n del line['lat']\n del line['lng']\n del line['groups']\n\n line_st = lines_status_by_id.get(line['id'], {})\n el_params = line_st.get('el_params', {}).get('val', {}) or {}\n pwr_actv = el_params.get('pwr_actv', ['-'])[0]\n if isinstance(pwr_actv, float):\n pwr_actv = round(pwr_actv, 3)\n\n eap = el_params.get('eap', ['-'])[0]\n if isinstance(eap, float):\n eap = round(eap, 3)\n\n line.update(dict(\n relay=line_st.get('relay', {}).get('val', '-'),\n ctl_mode=line_st.get('ctl_mode', {}).get('val', '-'),\n pwr_actv=pwr_actv, eap=eap))\n\n tab = tabulate([list(l.values()) for l in lines],\n headers=['Id', 'Name', 'In maintenance', 'Relay',\n 'Ctl. mode', 'Pwr. Active', 'Energy Sum.'])\n Cache.set('last_showed_ll_st', tab)\n Cache.set('last_showed', tab)\n print(tab)\n else:\n print(lines_status)\n\n\ndef ll_groups_list(args):\n groups = _get_line_groups()\n\n if isinstance(groups, (tuple, list)):\n groups = sorted(groups, key=lambda g: g['group_id'])\n Cache.set('last_choosed_llgr', groups)\n\n tab = tabulate([\n [v for k, v in gr.items() if k != 'lines'] for gr in groups\n ], headers=['Id', 'Name', 'Child groups'])\n\n Cache.set('last_showed_llgr', tab)\n Cache.set('last_showed', tab)\n print(tab)\n\n else:\n print(groups)\n\n\ndef ll_show(args):\n lines = Cache.get('ll')\n id_ = args['']\n\n if lines is None:\n lines = _get_lines()\n\n if isinstance(lines, (tuple, list)):\n choose, *_ = tuple(filter(lambda l: l['id'].startswith(id_), lines))\n line = _get_line(choose['id'])\n line.update(_get_line_state(choose['id']))\n pprint(line)\n\n\ndef show_last(args):\n nothing = 'Nothing to show'\n if args[''] == '_':\n print(Cache.get('last_showed', nothing))\n\n elif args[''] == 'll':\n print(Cache.get('last_showed_ll', nothing))\n\n elif args[''] == 'll_st':\n print(Cache.get('last_showed_ll_st', nothing))\n\n elif args[''] == 'llgr':\n print(Cache.get('last_showed_llgr', nothing))\n\n\nasync def _auth(ws) -> dict:\n await ws.send(dumps({\n \"ver\": 1,\n \"proto\": \"authReq\",\n \"ts\": utc_ts(),\n \"sid\": sid(),\n \"authToken\": Config.get('api.ws.auth.token')\n }))\n return loads(await ws.recv())\n\n\nasync def lines_relay_cmd(lines: Sequence[dict], relay_state: str):\n relay_state = relay_state.lower()\n assert relay_state in ('on', 'off')\n\n uri = f\"{ws_uri_from_url(Config.get('api.url'))}/ws\"\n async with websockets.connect(uri) as ws:\n print(await _auth(ws))\n await ws.send(dumps({\n \"ver\": 1,\n \"proto\": \"objSendCtlCmdReq\",\n \"ts\": utc_ts(),\n \"sid\": sid(),\n \"cmd\": \"relayEnable\" if relay_state == 'on' else 'relayDisable',\n \"cmdParams\": {},\n \"objects\": [\n {\n \"id\": line['id'],\n \"type\": \"line\",\n } for line in lines\n ]\n }\n ))\n print(loads(await ws.recv()))\n\n\nasync def lines_mode_cmd(lines: Sequence[dict], mode: str):\n assert mode in ('auto:sch', 'manual')\n\n uri = f\"{ws_uri_from_url(Config.get('api.url'))}/ws\"\n async with websockets.connect(uri) as ws:\n print(await _auth(ws))\n await ws.send(dumps({\n \"ver\": 1,\n \"proto\": \"setCtlModeReq\",\n \"ts\": utc_ts(),\n \"sid\": sid(),\n \"objects\": [\n {\n \"id\": line['id'],\n \"type\": \"line\",\n \"mode\": mode\n } for line in lines\n ]\n }\n ))\n print(loads(await ws.recv()))\n\n\ndef ll_cmd(args):\n lines = Cache.get('last_choosed_ll')\n if lines is None:\n print('No lines choosed')\n return\n\n if args['--relay'] is not None:\n resp = input(\n f'Set relay {args[\"--relay\"]} for {len(lines)} lines? [y/N]: ')\n\n if resp.lower() == 'y':\n get_event_loop().run_until_complete(\n lines_relay_cmd(lines, args['--relay']))\n\n if args['--mode'] is not None:\n resp = input(\n f'Set mode {args[\"--mode\"]} for {len(lines)} lines? [y/N]: ')\n\n if resp.lower() == 'y':\n get_event_loop().run_until_complete(\n lines_mode_cmd(lines, args['--mode']))\n\n\ndef determine_command(commands: OrderedDict, arguments: dict):\n for cmd in commands:\n if all(arguments.get(token, False) for token in cmd.split('.')):\n return commands[cmd]\n\n\ndef start_interactive():\n while True:\n command = input('> ')\n if command in ('exit', 'quit', '\\q'):\n return\n\n try:\n arguments = docopt(\n __doc__,\n argv=command.split(),\n version=pkg_resources.get_distribution(\n 'uspherum-esc-cli').version\n )\n dispatch_command(arguments, is_interactive=True)\n except DocoptExit:\n print(printable_usage(__doc__))\n\n\ndef dispatch_command(arguments, is_interactive=False):\n commands = OrderedDict({\n 'config.list': config_list,\n 'config.set': config_set,\n 'll.list': ll_list,\n 'll.status': ll_status,\n 'll.show': ll_show,\n 'll.cmd': ll_cmd,\n 'llgr.list': ll_groups_list,\n 'show': show_last\n })\n\n command = determine_command(commands, arguments)\n if command is not None:\n command(arguments)\n else:\n print('Unknown command requested!', printable_usage(__doc__))\n\n\ndef main():\n arguments = docopt(\n __doc__,\n version=pkg_resources.get_distribution('uspherum-esc-cli').version\n )\n\n # Интерактивный режим с использование WebSocket\n if arguments['-i'] or arguments['--interactive']:\n start_interactive()\n return\n\n dispatch_command(arguments)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Viktorokh96/usp_esc_cli","sub_path":"esc_cli/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":13848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73114594825","text":"import json\nimport os\nimport csv\nimport io\n\nfrom datetime import datetime\nfrom decimal import Decimal\nfrom typing import Dict, List\n\nimport boto3\n\n\nS3_CLIENT = boto3.client(\"s3\")\n\n\ndef lambda_handler(event: dict, context):\n \"\"\"Handler that will find the weather station that has the highest average temperature by month.\n\n Returns a dictionary with \"year-month\" as the key and dictionary (weather station info) as value.\n\n \"\"\"\n input_bucket_name = os.environ[\"INPUT_BUCKET_NAME\"]\n\n high_by_month: Dict[str, Dict] = {}\n\n for item in event[\"Items\"]:\n csv_data = get_file_from_s3(input_bucket_name, item[\"Key\"])\n dict_data = get_csv_dict_from_string(csv_data)\n\n for row in dict_data:\n avg_temp = float(row[\"TEMP\"])\n\n date = datetime.fromisoformat(row[\"DATE\"])\n month_str = date.strftime(\"%Y-%m\")\n\n monthly_high_record = high_by_month.get(month_str) or {}\n\n if not monthly_high_record:\n row[\"TEMP\"] = avg_temp\n high_by_month[month_str] = row\n continue\n\n if avg_temp > float(monthly_high_record[\"TEMP\"]):\n high_by_month[month_str] = row\n\n return high_by_month\n\n\ndef reducer_handler(event: dict, context: dict):\n \"\"\"Reducer function will read all of the mapped results from S3 and write to DDB.\n\n Args:\n event (dict): The event payload that arrives after the distributed map run has the\n folllowing structure:\n\n {\n \"MapRunArn\": \"arn-of-the-map-run\",\n \"ResultWriterDetails\": {\n \"Bucket\": \"bucket-name-where-results-are-written\",\n \"Key\": \"results/dee8fb57-3653-3f09-88dd-4f39225d2367/manifest.json\",\n },\n }\n context (dict): Lambda context\n \"\"\"\n print(event)\n results_bucket = event[\"ResultWriterDetails\"][\"Bucket\"]\n manifest = get_file_from_s3(\n results_bucket,\n event[\"ResultWriterDetails\"][\"Key\"],\n )\n\n maniftest_json = json.loads(manifest)\n\n high_by_month: Dict[str, Dict] = {}\n\n for result in maniftest_json[\"ResultFiles\"].get(\"SUCCEEDED\", []):\n results = get_file_from_s3(results_bucket, result[\"Key\"])\n\n for json_result in json.loads(results):\n\n monthly_highs: Dict[str, Dict] = json.loads(json_result[\"Output\"])\n\n for month_str, row in monthly_highs.items():\n high_temp = float(row[\"TEMP\"])\n\n monthly_high = high_by_month.get(month_str)\n\n if not monthly_high:\n high_by_month[month_str] = row\n continue\n\n if high_temp > float(monthly_high[\"TEMP\"]):\n high_by_month[month_str] = row\n\n _write_results_to_ddb(high_by_month)\n\n\ndef _write_results_to_ddb(high_by_month: Dict[str, Dict]):\n dynamodb = boto3.resource(\"dynamodb\")\n table = dynamodb.Table(os.environ[\"RESULTS_DYNAMODB_TABLE_NAME\"])\n\n for month_str, row in high_by_month.items():\n row[\"pk\"] = month_str\n row[\"TEMP\"] = round(Decimal(row[\"TEMP\"]), 1)\n table.put_item(Item=row)\n\n\ndef get_file_from_s3(input_bucket_name: str, key: str) -> str:\n resp = S3_CLIENT.get_object(Bucket=input_bucket_name, Key=key)\n return resp[\"Body\"].read().decode(\"utf-8\")\n\n\ndef get_csv_dict_from_string(csv_string: str) -> dict:\n return csv.DictReader(io.StringIO(csv_string))\n","repo_name":"aws-samples/aws-stepfunctions-examples","sub_path":"sam/demo-distributed-map-data-processing/functions/temps/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3414,"program_lang":"python","lang":"en","doc_type":"code","stars":191,"dataset":"github-code","pt":"81"} +{"seq_id":"23766671436","text":"import matplotlib.pyplot as mpl\nimport MathModule\nfrom numpy import linspace\n\n\nclass Graph(object):\n def __init__(self):\n pass\n\n def create_window(self, interval_a, interval_b, root_list):\n mpl.title('График функции')\n mpl.axhline(0, color='black')\n mpl.axvline(0, color='black')\n\n x = [i for i in linspace(interval_a, interval_b, 300)]\n y = [MathModule.function(i) for i in x]\n # dy = [derivative(MathModule.function, i) for i in x]\n\n values_in_roots = [MathModule.function(i) for i in root_list]\n\n mpl.plot(x, y, 'b')\n mpl.plot(root_list, values_in_roots, 'ro')\n\n extremum_points = []\n extremum_values = []\n\n for i in range(1, len(y) - 1):\n if y[i - 1] <= y[i] > y[i + 1] or y[i - 1] >= y[i] < y[i + 1]:\n extremum_points.append(x[i])\n extremum_values.append(y[i])\n\n mpl.plot(extremum_points, extremum_values, 'go')\n\n mpl.grid()\n mpl.show()\n","repo_name":"TheFunGun36/bmstu-python","sub_path":"py_lab03/Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4749998796","text":"import unittest\nimport random\nimport time\nimport sys\nimport os\nfrom fake_useragent import UserAgent\nfrom pprint import pprint\n\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\", \"src\")))\nfrom site_controllers import LinkedInController\n\n\nclass LoginForm(unittest.TestCase):\n\n def test_LinkedIn(self):\n ou_name = \"ÔỐỒỔỖỘÔỐỒỔỖỘôốồổỗộôố ƯỨỪỬỮỰƯỨỪỬỮỰưứừửữựưứ\"\n linkedin = LinkedInController(ou_name, \"linkedin.test11@facade-technologies.com\", \"linkedin.test11\")\n linkedin.start()\n\n linkedin.sendMessageTo(\"Mary-Ann Johnson\", \"What's up Mary-Ann?\")\n linkedin.sendMessageTo(\"George d'tousla canil-bater\", \"Hey man, what's up?\")\n\n linkedin.stop()\n\n def test_backAndForthConvo(self):\n mary_name = \"Mary-Ann Johnson\"\n ou_name = \"ÔỐỒỔỖỘÔỐỒỔỖỘôốồổỗộôố ƯỨỪỬỮỰƯỨỪỬỮỰưứừửữựưứ\"\n\n ou = LinkedInController(ou_name, \"linkedin.test11@facade-technologies.com\", \"linkedin.test11\", options=[f'{UserAgent().random}'])\n mary = LinkedInController(mary_name, \"linkedin.test2@facade-technologies.com\", \"linkedintest2\", options=[f'{UserAgent().random}'])\n ou.start()\n mary.start()\n\n ou.sendMessageTo(mary_name, \"What's \\nup \\nMary-Ann?\")\n mary.sendMessageTo(ou_name, \"Nothing much, \\njust living the dream! \\n\\n\\nHow are your kids?\")\n ou.sendMessageTo(mary_name, \"They're great! And how about your wife?\")\n mary.sendMessageTo(ou_name, \"Well....\")\n mary.sendMessageTo(ou_name, \"She cheated on me with my sister.\")\n ou.sendMessageTo(mary_name, \"Ah that sucks!\")\n mary.sendMessageTo(ou_name, \"Well, what are you going to do?\")\n ou.sendMessageTo(mary_name, \"I don't know girl, what can you do?\")\n mary.sendMessageTo(ou_name, \"Turn lemons into lemonade I guess!\")\n ou.sendMessageTo(mary_name, \"True. You're really inspiring, you know that?\")\n mary.sendMessageTo(ou_name, \"That's what my wife said... right before she cheated on me...\")\n\n ou.stop()\n mary.stop()\n\n def test_getConversationHistory(self):\n mary_name = \"Mary-Ann Johnson\"\n ou_name = \"ÔỐỒỔỖỘÔỐỒỔỖỘôốồổỗộôố ƯỨỪỬỮỰƯỨỪỬỮỰưứừửữựưứ\"\n\n ou = LinkedInController(ou_name, \"linkedin.test11@facade-technologies.com\", \"linkedin.test11\", options=[f'{UserAgent().random}'])\n ou.start()\n\n numMessages = 1\n conversation = ou.getConversationHistory(mary_name, 1)\n assert len(conversation) == numMessages\n\n ou.stop()\n\n def test_dateTimeConversions(self):\n from common.datetime import convertToDate, convertToTime, combineDateAndTime\n\n saturday = convertToDate(\"Saturday\")\n\n today = convertToDate(\"Today\")\n yesterday = convertToDate(\"Yesterday\")\n\n jun_22 = convertToDate(\"JUN 22\")\n jun_22_2019 = convertToDate(\"JUN 22, 2019\")\n\n eight_thirty_am = convertToTime(\"8:30 AM\")\n nine_thirty_am = convertToTime(\"09:30 AM\")\n twelve_thirty_pm = convertToTime(\"12:30 PM\")\n eleven_thirty_pm = convertToTime(\"11:30 PM\")\n\n comb1 = combineDateAndTime(saturday, eight_thirty_am)\n comb2 = combineDateAndTime(jun_22, twelve_thirty_pm)\n comb3 = combineDateAndTime(jun_22_2019, nine_thirty_am)\n comb4 = combineDateAndTime(today, eleven_thirty_pm)\n\n print(comb1)\n print(comb2)\n print(comb3)\n print(comb4)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"smalbadger/social","sub_path":"tests/turnin.py","file_name":"turnin.py","file_ext":"py","file_size_in_byte":3653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"44439746369","text":"from os.path import join\nfrom ob2.dockergrader.helpers import (\n copy,\n copytree,\n download_repository,\n ensure_files_exist,\n ensure_no_binaries,\n extract_repository,\n get_working_directory,\n safe_get_results,\n)\nfrom ob2.dockergrader.job import JobFailedError\nfrom ob2.dockergrader.rpc import DockerClient, TimeoutError\nfrom ob2.util.hooks import register_job\n\nfrom cs162.common import RESOURCES_DIR\n\n# This is the docker image we'll use to run our autograder\ndocker_image = \"cs162:latest\"\n\n\n@register_job(\"hw1\")\ndef build(source, commit):\n # We're using 2 Python context managers here:\n # - get_working_directory() of them creates a temporary directory on the host\n # - DockerClient().start() creates a new Docker container for this autograder job\n # We mount our temporary directory to \"/host\" inside the Docker container, so we can\n # communicate with it.\n with get_working_directory() as wd, \\\n DockerClient().start(docker_image, volumes={wd: \"/host\"}) as container:\n\n # These helper functions download and extract code from GitHub.\n # try:\n # download_repository(source, commit, join(wd, \"hw1.tar.gz\"))\n # extract_repository(container, join(\"/host\", \"hw1.tar.gz\"), \"/home/vagrant/ag\",\n # user=\"vagrant\")\n # except TimeoutError:\n # raise JobFailedError(\"I was downloading and extracting your code from GitHub, but I \"\n # \"took too long to finish and timed out. Try again?\")\n\n # For testing purposes, we can use solution code instead of GitHub code to test our\n # autograder. You're free to leave in commented code in order to support this.\n container.bash(\"mkdir -p /home/vagrant/ag/hw1\", user=\"vagrant\")\n copytree(join(RESOURCES_DIR, \"hw-exec\", \"solutions\"), join(wd, \"solutions\"))\n container.bash(\"cp -R /host/solutions/. /home/vagrant/ag/hw1/\", user=\"vagrant\")\n\n # These functions will raise JobFailedError if they find problems with the student code\n ensure_no_binaries(container, \"/home/vagrant/ag\")\n ensure_files_exist(container, \"/home/vagrant/ag\", [\"./hw1/Makefile\", \"./hw1/main.c\",\n \"./hw1/map.c\", \"./hw1/wc.c\"])\n\n # Our autograder consists of 2 Python scripts. You are free to use whatever language you\n # want in your autograders, as long as you make sure your Docker container can run them.\n copy(join(RESOURCES_DIR, \"hw-exec\", \"check.py\"), join(wd, \"check.py\"))\n copy(join(RESOURCES_DIR, \"common.py\"), join(wd, \"common.py\"))\n\n # We run our autograder, which produces 2 outputs: a build log and a score.\n try:\n container.bash(\"\"\"cd /host\n python2.7 check.py /home/vagrant/ag/hw1 &>build_log 162>score\n \"\"\", user=\"vagrant\", timeout=60)\n except TimeoutError:\n raise JobFailedError(\"I was grading your code, but the autograder took too long to \" +\n \"finish and timed out. Try again?\")\n\n # This function will safely retrieve the build log and the score value, without throwing\n # unexpected exceptions.\n return safe_get_results(join(wd, \"build_log\"), join(wd, \"score\"))\n","repo_name":"octobear2/my-ob2-config","sub_path":"cs162/hw1.py","file_name":"hw1.py","file_ext":"py","file_size_in_byte":3356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14432212642","text":"from django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nfrom .models import BidWindow, Bid\n\n\nclass BidWindowAuctionForm(forms.Form):\n bid_window_id = forms.IntegerField()\n\n def save(self):\n try:\n window = BidWindow.objects.get(id=self.cleaned_data.get('bid_window_id'))\n except:\n raise forms.ValidationError(_('No Bid Window found'))\n window.auction()\n\n\nclass BidForm(forms.ModelForm):\n class Meta:\n model = Bid\n fields = ['number_of_tokens', 'bidding_price', 'user', 'bid_window']\n widgets = {\n 'user': forms.HiddenInput(),\n 'bid_window': forms.HiddenInput(),\n }\n","repo_name":"oktavianustjoea/crypcentra","sub_path":"ico/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"85509299","text":"def solution(n):\n num = list(map(int, (str(n))))#int형 리스트\n num.sort(reverse=True) # 각숫자들을 역sort\n num2 = list(map(str, num))#역sort한 결과를 다시 str형태 리스트로\n\n answer = int(\"\".join(num2))#문자열 리스트를 이어 int형으로 변환\n return answer\n\n\nn=118732\nsolution(n)","repo_name":"Areum0921/Abox","sub_path":"num desc.py","file_name":"num desc.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"29747620038","text":"# -*- coding: utf-8 -*-\nimport json\nfrom threading import Thread\nfrom windows import BaseDialog\nfrom apis.tmdb_api import tmdb_people_info, tmdb_people_full_info\nfrom apis.imdb_api import imdb_videos\nfrom indexers import dialogs\nfrom indexers.images import Images\nfrom modules.utils import calculate_age\nfrom modules.kodi_utils import translate_path, notification, show_text, local_string as ls\nfrom modules.settings import extras_enable_scrollbars, extras_exclude_non_acting, get_resolution\nfrom modules.settings_reader import get_setting\n# from modules.kodi_utils import logger\n\ntmdb_image_base = 'https://image.tmdb.org/t/p/%s%s'\nfanart = translate_path('special://home/addons/plugin.video.ezra/fanart.png')\nbackup_thumbnail = translate_path('special://home/addons/plugin.video.ezra/resources/media/box_office.png')\nbackup_cast_thumbnail = translate_path('special://home/addons/plugin.video.ezra/resources/media/genre_family.png')\nroles_exclude = ('himself', 'herself', 'self', 'narrator', 'voice (voice)')\nbutton_ids = [10, 11, 50]\ngenres_exclude = (10763, 10764, 10767)\t\t\ngender_dict = {0: '', 1: ls(32844), 2: ls(32843), 3: ls(32466)}\nmore_from_movies_id, more_from_tvshows_id, imdb_videos_id, more_from_director_id = 2050, 2051, 2052, 2053\n\nclass People(BaseDialog):\n\tdef __init__(self, *args, **kwargs):\n\t\tBaseDialog.__init__(self, args)\n\t\tself.control_id = None\n\t\tself.kwargs = kwargs\n\t\tself.set_starting_constants()\n\t\tself.make_person_data()\n\t\tself.set_properties()\n\n\tdef onInit(self):\n\t\tThread(target=self.make_imdb_videos).start()\n\t\tThread(target=self.make_more_from, args=('movie',)).start()\n\t\tThread(target=self.make_more_from, args=('tvshow',)).start()\n\t\tThread(target=self.make_more_from, args=('director',)).start()\n\n\tdef run(self):\n\t\tself.doModal()\n\t\tself.clearProperties()\n\n\tdef onClick(self, controlID):\n\t\tself.control_id = None\n\t\tif controlID in button_ids:\n\t\t\tif controlID == 10:\n\t\t\t\tparams = {'mode': 'people_image_results', 'actor_name': self.person_name, 'actor_id': self.person_id,\n\t\t\t\t\t\t'actor_image': self.person_image, 'page_no': 1, 'rolling_count': 0}\n\t\t\t\tImages().run(params)\n\t\t\telif controlID == 11:\n\t\t\t\tparams = {'mode': 'people_tagged_image_results', 'actor_name': self.person_name, 'actor_id': self.person_id}\n\t\t\t\tImages().run(params)\n\t\t\telif controlID == 50:\n\t\t\t\tshow_text(self.person_name, self.person_biography, font_size='large')\n\t\telse: self.control_id = controlID\n\n\tdef onAction(self, action):\n\t\tif action in self.closing_actions: self.close()\n\t\tif not self.control_id: return\n\t\tif action in self.selection_actions:\n\t\t\tchosen_listitem = self.get_listitem(self.control_id)\n\t\t\tchosen_var = chosen_listitem.getProperty(self.item_action_dict[self.control_id])\n\t\t\tif self.control_id in (2050, 2051, 2053):\n\t\t\t\tif self.control_id in (2050, 2053): media_type = 'movie'\n\t\t\t\telse: media_type = 'tvshow'\n\t\t\t\tparams = {'tmdb_id': chosen_var, 'media_type': media_type, 'is_widget': 'false'}\n\t\t\t\treturn dialogs.extras_menu(params)\n\t\t\telif self.control_id == 2052:\n\t\t\t\tparams = json.loads(chosen_var)\n\t\t\t\tchosen = dialogs.imdb_videos_choice(params['videos'], params['thumb'])\n\t\t\t\tif not chosen: return\n\t\t\t\treturn self.open_window(('windows.videoplayer', 'VideoPlayer'), 'videoplayer.xml', video=chosen)\n\n\tdef make_person_data(self):\n\t\tif self.kwargs['query']:\n\t\t\ttry: self.person_id = tmdb_people_info(self.kwargs['query'])[0]['id']\n\t\t\texcept: notification(32760)\n\t\telse: self.person_id = self.kwargs['actor_id']\n\t\tperson_info = tmdb_people_full_info(self.person_id)\n\t\tif person_info.get('biography', None) in ('', None): person_info = tmdb_people_full_info(self.person_id, 'en')\n\t\tself.person_name = person_info['name']\n\t\timage_path = person_info['profile_path']\n\t\tif image_path: self.person_image = tmdb_image_base % ('h632', image_path)\n\t\telse: self.person_image = backup_cast_thumbnail\n\t\ttry: self.person_gender = gender_dict[person_info.get('gender')]\n\t\texcept: self.person_gender = ''\n\t\tplace_of_birth = person_info.get('place_of_birth')\n\t\tif place_of_birth: self.person_place_of_birth = place_of_birth\n\t\telse: self.person_place_of_birth = ''\n\t\tbiography = person_info.get('biography')\n\t\tif biography: self.person_biography = biography\n\t\telse: self.person_biography = ls(32760)\n\t\tbirthday = person_info.get('birthday')\n\t\tif birthday: self.person_birthday = birthday\n\t\telse: self.person_birthday = ''\n\t\tdeathday = person_info.get('deathday')\n\t\tif deathday: self.person_deathday = deathday\n\t\telse: self.person_deathday = ''\n\t\tif self.person_deathday: self.person_age = calculate_age(self.person_birthday, '%Y-%m-%d', self.person_deathday)\n\t\telif self.person_birthday: self.person_age = calculate_age(self.person_birthday, '%Y-%m-%d')\n\t\telse:self.person_age = ''\n\t\tself.imdb_id = person_info['imdb_id']\n\t\tmore_from_data = person_info['combined_credits']\n\t\tacting_data = more_from_data['cast']\n\t\tdirecting_data = more_from_data['crew']\n\t\tself.movie_data = [i for i in acting_data if i['media_type'] == 'movie']\n\t\tself.tvshow_data = [i for i in acting_data if i['media_type'] == 'tv']\n\t\tself.director_data = [i for i in directing_data if i['job'].lower() == 'director']\n\n\tdef make_more_from(self, media_type):\n\t\ttry:\n\t\t\tif media_type == 'movie':\n\t\t\t\tlist_type = media_type\n\t\t\t\t_id = more_from_movies_id\n\t\t\t\tdata = self.movie_data\n\t\t\t\tif self.exclude_non_acting:\n\t\t\t\t\ttry: data = [i for i in data if not 99 in i['genre_ids'] and not i['character'].lower() in roles_exclude]\n\t\t\t\t\texcept: pass\n\t\t\telif media_type == 'tvshow':\n\t\t\t\tlist_type = media_type\n\t\t\t\t_id = more_from_tvshows_id\n\t\t\t\tdata = self.tvshow_data\n\t\t\t\tif self.exclude_non_acting:\n\t\t\t\t\ttry: data = [i for i in data if not any(x in genres_exclude for x in i['genre_ids']) and not i['character'].lower() in roles_exclude]\n\t\t\t\t\texcept: pass\n\t\t\telse:#director\n\t\t\t\tlist_type = 'movie'\n\t\t\t\t_id = more_from_director_id\n\t\t\t\tdata = self.director_data\n\t\t\titem_list = list(self.make_tmdb_listitems(data, list_type))\n\t\t\tself.setProperty('tikiskins.person.more_from_%s.number' % media_type, '(x%02d)' % len(item_list))\n\t\t\tself.item_action_dict[_id] = 'tikiskins.person.tmdb_id'\n\t\t\tcontrol = self.getControl(_id)\n\t\t\tcontrol.addItems(item_list)\n\t\texcept: pass\n\n\tdef make_imdb_videos(self):\n\t\tdef builder():\n\t\t\tfor count, item in enumerate(data, 1):\n\t\t\t\ttry:\n\t\t\t\t\tlistitem = self.make_listitem()\n\t\t\t\t\tlistitem.setProperty('tikiskins.person.name', '%01d. %s' % (count, item['title']))\n\t\t\t\t\tlistitem.setProperty('tikiskins.person.thumbnail', item['poster'])\n\t\t\t\t\tlistitem.setProperty('tikiskins.person.params', json.dumps({'videos': json.dumps(item['videos']), 'thumb': item['poster']}))\n\t\t\t\t\tyield listitem\n\t\t\t\texcept: pass\n\t\ttry:\n\t\t\tdata = imdb_videos(self.imdb_id)\n\t\t\titem_list = list(builder())\n\t\t\tself.setProperty('tikiskins.person.imdb_videos.number', '(x%02d)' % len(item_list))\n\t\t\tself.item_action_dict[imdb_videos_id] = 'tikiskins.person.params'\n\t\t\tcontrol = self.getControl(imdb_videos_id)\n\t\t\tcontrol.addItems(item_list)\n\t\texcept: pass\n\n\tdef make_tmdb_listitems(self, data, media_type):\n\t\tused_ids = []\n\t\tappend = used_ids.append\n\t\tname_key = 'title' if media_type == 'movie' else 'name'\n\t\trelease_key = 'release_date' if media_type == 'movie' else 'first_air_date'\n\t\tfor item in data:\n\t\t\ttry:\n\t\t\t\ttmdb_id = item['id']\n\t\t\t\tif tmdb_id in used_ids: continue\n\t\t\t\tlistitem = self.make_listitem()\n\t\t\t\tposter_path = item['poster_path']\n\t\t\t\tif not poster_path: thumbnail = backup_thumbnail\n\t\t\t\telse: thumbnail = tmdb_image_base % (self.poster_resolution, poster_path)\n\t\t\t\tyear = item.get(release_key)\n\t\t\t\tif year in (None, ''): year = 'N/A'\n\t\t\t\telse:\n\t\t\t\t\ttry: year = year.split('-')[0]\n\t\t\t\t\texcept: pass\n\t\t\t\tlistitem.setProperty('tikiskins.person.name', item[name_key])\n\t\t\t\tlistitem.setProperty('tikiskins.person.release_date', year)\n\t\t\t\tlistitem.setProperty('tikiskins.person.vote_average', '%.1f' % item['vote_average'])\n\t\t\t\tlistitem.setProperty('tikiskins.person.thumbnail', thumbnail)\n\t\t\t\tlistitem.setProperty('tikiskins.person.tmdb_id', str(tmdb_id))\n\t\t\t\tappend(tmdb_id)\n\t\t\t\tyield listitem\n\t\t\texcept: pass\n\n\tdef set_starting_constants(self):\n\t\tself.highlight = get_setting('ezra_interface_color')\n\t\tself.item_action_dict = {}\n\t\tself.enable_scrollbars = extras_enable_scrollbars()\n\t\tself.exclude_non_acting = extras_exclude_non_acting()\n\t\tself.poster_resolution = get_resolution()['poster']\n\t\tself.plugin_runner = 'RunPlugin(%s)'\n\n\tdef set_properties(self):\n\t\tself.setProperty('tikiskins.person.name', self.person_name)\n\t\tself.setProperty('tikiskins.person.id', str(self.person_id))\n\t\tself.setProperty('tikiskins.person.image', self.person_image)\n\t\tself.setProperty('tikiskins.person.fanart', fanart)\n\t\tself.setProperty('tikiskins.person.gender', self.person_gender)\n\t\tself.setProperty('tikiskins.person.place_of_birth', self.person_place_of_birth)\n\t\tself.setProperty('tikiskins.person.biography', self.person_biography)\n\t\tself.setProperty('tikiskins.person.birthday', self.person_birthday)\n\t\tself.setProperty('tikiskins.person.deathday', self.person_deathday)\n\t\tself.setProperty('tikiskins.person.age', str(self.person_age))\n\t\tself.setProperty('tikiskins.highlight',self.highlight)\n\t\tself.setProperty('tikiskins.person.enable_scrollbars', self.enable_scrollbars)\n","repo_name":"abittencookie/abittencookie.github.io","sub_path":"repo/plugin.video.ezra/resources/lib/windows/people.py","file_name":"people.py","file_ext":"py","file_size_in_byte":9142,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"71027542026","text":"# https://www.acmicpc.net/problem/7576\n# 토마토\n\n\n\nfrom sys import stdin\nfrom collections import deque\n\n\nM, N = map(int, stdin.readline().split(\" \"))\narr = []\ncheck_zero = False\nqueue = deque()\nday = 0\n\nfor i in range(N):\n N_arr = list(map(int, stdin.readline().split(\" \")))\n arr.append(N_arr)\n for index, j in enumerate(N_arr):\n if j == 0:\n check_zero = True\n elif j == 1:\n queue.append([index, i, 0])\n\n\ndef bfs():\n global day\n dx = [0, 0, 1, -1]\n dy = [1, -1, 0, 0]\n while queue:\n x, y, cnt = queue.popleft()\n if cnt > day:\n day = cnt\n\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if nx >= 0 and nx < M and ny >= 0 and ny < N and arr[ny][nx] == 0:\n queue.append([nx, ny, cnt + 1])\n arr[ny][nx] = 1\n \n\n\nif not check_zero:\n print(0)\nelse:\n check_finished = True\n bfs()\n\n for i in range(N):\n for j in range(M):\n if arr[i][j] == 0:\n check_finished = False\n\n if check_finished:\n print(day)\n else:\n print(-1)","repo_name":"Guk0/TIL","sub_path":"algorithm/4주차/7576.py","file_name":"7576.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4241405793","text":"#!/usr/bin/python3\nimport spidev\nimport time\nimport csv\nimport os\nimport logging\nspi=spidev.SpiDev()\nspi.open(0,1)\n\n#logfile\nlogDate=time.time()\nlogDate1=time.localtime(logDate)\nlogDate2=str(time.strftime(\"%d%m%y_%H%M%S\", logDate1))\nlogfile='/home/pi/Documents/BerniHeizung/python3/log/LOG_HzngT.log'\nlogging.basicConfig(filename=logfile, level=logging.DEBUG)\n\nla11=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] #liste antwort1[1]\nla12=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] #liste antowrt1[2]\nla21=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] #liste antwort2[1]\nla22=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] #liste antwort2[2]\nlt=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] #liste zeitstempel\ntStep=1\nmtStep=10\neps=1e-20\nfile='/home/pi/Documents/BerniHeizung/python3/Heizung_T.csv'\nDatum0_0=0\n\nopen(logfile, 'w').close() #logfile leeren\n\nwhile True:\n\n if os.path.isfile(logfile) is True:\n logSize=os.path.getsize(logfile)\n print(logSize)\n if logSize > 500000:\n open(logfile, 'w').close() #logfile leeren\n\n# Zeitabgleich, damit neues csv. file angelegt wird\n Datum0=time.time()\n print(Datum0)\n ZeitDiff=Datum0-Datum0_0\n print(ZeitDiff)\n print(os.path.isfile(file))\n if ZeitDiff > 60*60*5 and os.path.isfile(file) is True: # archiviert .csv-file, wenn bestimmtes alter überschritten ist \n logging.debug('.csv file wird archiviert')\n Datum0_0=Datum0\n Datum1=time.localtime(Datum0)\n Datum2=str(time.strftime(\"%d%m%y_%H%M%S\", Datum1))\n print(Datum2) \n print(\"Datum aktualisiert\")\n file_old=\"/home/pi/Documents/BerniHeizung/python3/T_log/\"+Datum2+\".csv\"\n print(str(file_old))\n os.rename(file, file_old)\n\n# Liste mit n=mtStep Werten füllen\n logging.debug('Start T-werte erfassen')\n for i in range(0,mtStep): \n\n #0-128 1-144 2-160 3-176 4-192 5-208 6-224 7-240\n antwort1=spi.xfer([1,128,0])\n antwort2=spi.xfer([1,176,0])\n t=time.time()\n\n #entfernt ersten wert, fügt neuen wert hinzu\n del la11[0]\n la11.append(antwort1[1])\n del la12[0]\n la12.append(antwort1[2])\n del la21[0]\n la21.append(antwort2[1])\n del la22[0]\n la22.append(antwort2[2])\n del lt[0]\n lt.append(t) \n time.sleep(tStep)\n\n#sortiert listen nach grösse und entfernt 3 höchsten und 3 niedrigsten werte\n logging.debug('t-liste sortieren und werte löschen')\n La11=list(la11)\n La11.sort()\n del La11[20]\n del La11[19]\n del La11[18]\n del La11[0]\n del La11[0]\n del La11[0]\n \n La12=list(la12)\n La12.sort()\n del La12[20]\n del La12[19]\n del La12[18]\n del La12[0]\n del La12[0]\n del La12[0]\n \n La21=list(la21)\n La21.sort()\n del La21[20]\n del La21[19]\n del La21[18]\n del La21[0]\n del La21[0]\n del La21[0]\n \n La22=list(la22)\n La22.sort()\n del La22[20]\n del La22[19]\n del La22[18]\n del La22[0]\n del La22[0]\n del La22[0]\n \n#mittelwert der reduzierten listen\n logging.debug('Mittelwerte berechnen')\n La11m=sum(La11)/len(La11) \n La12m=sum(La12)/len(La12)\n \n La21m=sum(La21)/len(La21) \n La22m=sum(La22)/len(La22) \n \n if 0<= antwort1[1]<=3:\n logging.debug('Mittelwert 1 in T umrechnen')\n vMax1=3.29406\n vMin1=0.00322\n vRange1=vMax1-vMin1\n rAdd1=1000\n\n wert1=((La11m*256)+La12m)*0.00322+eps\n print(wert1,\" V\",)\n\n R1=(vRange1-wert1)/(wert1)*rAdd1 \n T1=round(1.1072597754889e-5*R1*R1+0.2331563968*R1-244.1819649032,2)\n print(T1,\" °C\",)\n \n if 0<= antwort2[1]<=3:\n logging.debug('Mittelwert 2 in T umrechnen')\n vMax2=3.29406\n vMin2=0.00322\n vRange2=vMax2-vMin2\n rAdd2=1000\n\n wert2=((La21m*256)+La22m)*0.00322+eps\n print(wert2,\" V\",)\n \n R2=(vRange2-wert2)/(wert2)*rAdd2\n T2=round(1.1072597754889e-5*R2*R2+0.2331563968*R2-244.1819649032,2)\n round(T2,2)\n print(T2,\" °C\",)\n\n logging.debug('Zeitpunkt der mittleren temperatur auslesen')\n timeLoc=time.localtime(lt[11])\n date=time.strftime(\"%d/%m/%y %H:%M:%S\", timeLoc)\n print(date)\n\n#Schreiben der temperaturen und zeit in liste, falls sinnvolle werte vorhanden\n \n if la21[0]>0 or la11[0]>0:\n logging.debug('Werte sind vorhanden --> schreiben in .csv t=%s T1=%s T2=%s', lt[11], T1, T2)\n with open(file, 'a') as out:\n cw=csv.writer(out, delimiter=',', lineterminator='\\n',quotechar='\"')\n cw.writerow([lt[11],T1,T2])\n logging.debug('Werte in .csv geschrieben')\n \n \n \n","repo_name":"RasHeiz/BerniHeizung","sub_path":"python3/Heizung_Temperatur.py","file_name":"Heizung_Temperatur.py","file_ext":"py","file_size_in_byte":4724,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41155778146","text":"\"\"\"\nTitle: 0008 - String to Integer (atoi)\nTags: String\nTime: O(n)\nSpace: O(1)\nSource: https://leetcode.com/problems/string-to-integer-atoi/\nDifficulty: Medium\n\"\"\"\n\nclass Solution:\n def myAtoi(self, s: str) -> int:\n INT_MAX = 2147483647\n INT_MIN = -2147483648\n result = 0\n\n if not s:\n return result\n\n i = 0\n while i < len(s) and s[i].isspace():\n i += 1\n\n if len(s) == i:\n return result\n\n sign = 1\n if s[i] == \"+\":\n i += 1\n elif s[i] == \"-\":\n sign = -1\n i += 1\n\n while i < len(s) and '0' <= s[i] <= '9':\n if result > (INT_MAX - int(s[i])) / 10:\n return INT_MAX if sign > 0 else INT_MIN\n result = result * 10 + int(s[i])\n i += 1\n\n return sign * result\n","repo_name":"tejeshreddy/competitive-programming","sub_path":"leetcode/python/string_to_integer_atoi.py","file_name":"string_to_integer_atoi.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"71229342986","text":"import json\nimport os\nimport re\nimport sys\nimport threading\nfrom datetime import datetime\n\nimport httpx\n\nimport lib.cnblogs_api as api\n\n\nclass CnblogsDownloader:\n \"\"\"\n 下载器类,日志均print到控制台\\n\n \"\"\"\n\n _FLAG_FILE_NAME = \".CnblogsDownloaderFlag.json\"\n\n _IMG_PATTERN = re.compile(r'(!\\[[^\\]]*?\\]\\()([^\\)]*/([^\\)]*?))(\\))|(]*?src=\")([^\"]*/([^\"]*?))(\"[^>]*?>)')\n \"\"\"\n 预先编译正则\n called by :py:func:`CnblogsDownloader._download_replace_img`\\n\n 此处正则中使用(?:)非捕获元无效\n \"\"\"\n\n def __init__(self, cnblogs_cookie, workdir, download_img=False):\n \"\"\"\n 初始化下载器类\n\n :param str cnblogs_cookie: 博客园Cookie ``.Cnblogs.AspNetCore.Cookies`` 的值\n :param str workdir: 工作目录,即下载目录\n :param bool download_img: 是否离线随笔中引用的图片\n \"\"\"\n self._total_essay = 0\n self._updated_essay = 0\n self._is_first_run = True\n self._last_update = None\n self._workdir = workdir\n self._download_img = download_img\n self._lock = threading.Lock()\n self._http_headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/99.0.4844.74 Safari/537.36\",\n \"Referer\": \"https://i.cnblogs.com/\",\n \"Cookie\": rf\".Cnblogs.AspNetCore.Cookies={cnblogs_cookie}\"}\n self._category = api.get_category_list(self._http_headers)\n if type(self._category) == dict:\n errors = self._category.get(\"errors1\")\n if errors is not None and errors[0] == \"Unauthorized\":\n print(\"Cookie 已过期,请重新配置Cookie\")\n sys.exit()\n\n flag_path = rf\"{workdir}\\{self._FLAG_FILE_NAME}\"\n if os.path.isfile(flag_path):\n self._is_first_run = False\n flag = None\n with open(flag_path, \"r\", encoding=\"utf-8\") as f:\n flag = json.load(f)\n pass\n # download_to_subdir最后还有写入操作\n last_update = flag[\"last_update\"]\n self._last_update = datetime.strptime(last_update, \"%Y-%m-%dT%H:%M:%S\")\n\n def download_to_subdir(self):\n \"\"\"\n 开始下载\\n\n 主函数,多线程下载随笔及图片,线程数为随笔的分类数\n\n :rtype: int\n :return: 更新的随笔数量\n \"\"\"\n current_path = os.getcwd()\n os.chdir(self._workdir)\n self._category.append({\"categoryId\": 0, \"title\": \"未分类\"})\n download_threads = []\n for category in self._category:\n # (category,) 一个元素的元组 (category)是列表转元组\n download_thread = threading.Thread(target=self._category_download_thread, args=(category,))\n download_thread.start()\n download_threads.append(download_thread)\n for download_thread in download_threads:\n download_thread.join()\n print(rf\"总共{self._total_essay}篇随笔,更新了{self._updated_essay}篇\")\n now = datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")\n with open(rf\"{self._workdir}\\{self._FLAG_FILE_NAME}\", \"w\", encoding=\"utf-8\") as f:\n f.write(rf'{{\"last_update\": \"{now}\"}}')\n os.chdir(current_path)\n return self._updated_essay\n\n def _category_download_thread(self, category):\n \"\"\"\n 每个分类一个线程去下载随笔内容,需要的话,还能下载图片\\n\n 多线程共用一个工作目录,任何一个线程os.chdir都会改变整个程序的工作目录\n\n :param dict category: 分类的基本信息\n \"\"\"\n dirname = category[\"title\"]\n dirname = re.sub(rf'(\\\\|/|\\?|\\||\"|:|\\*|<|>)', \" \", dirname)\n if not os.path.isdir(dirname):\n os.mkdir(dirname)\n write_absolute_path = rf\"{self._workdir}\\{dirname}\"\n\n essays = api.get_posts_list(self._http_headers, category_id=str(category[\"categoryId\"]))\n self._lock.acquire()\n self._total_essay = self._total_essay + essays[\"postsCount\"]\n self._lock.release()\n for essay_pre in essays[\"postList\"]:\n filename = essay_pre[\"title\"]\n # 替换特殊字符,Windows文件名不允许出现特殊字符: \\/:*?\"<>|\n filename = re.sub(rf'(\\\\|/|\\?|\\||\"|:|\\*|<|>)', \" \", filename)\n filename = rf'{filename}{\"[非公开]\" if not essay_pre[\"isPublished\"] else \"\"}' \\\n rf'{\"[草稿]\" if essay_pre[\"isDraft\"] else \"\"}.md'\n\n essay_date_updated = datetime.strptime(essay_pre[\"dateUpdated\"], \"%Y-%m-%dT%H:%M:%S\")\n if (not self._is_first_run) and os.path.isfile(rf\"{write_absolute_path}\\{filename}\") and \\\n (self._last_update - essay_date_updated).total_seconds() > 0:\n self._lock.acquire()\n print(rf\"已是最新:{dirname}\\{filename}\")\n self._lock.release()\n continue\n essay = api.get_post_by_id(self._http_headers, str(essay_pre[\"id\"]))\n\n essay_content = essay[\"blogPost\"][\"postBody\"]\n if self._download_img:\n essay_content = CnblogsDownloader._download_replace_img(filename, essay_content, write_absolute_path)\n\n with open(rf\"{write_absolute_path}\\{filename}\", \"w\", encoding=\"utf-8\") as f:\n f.write(essay_content)\n self._lock.acquire()\n self._updated_essay = self._updated_essay + 1\n print(rf\"已下载随笔:{dirname}\\{filename}\")\n self._lock.release()\n\n @staticmethod\n def _download_replace_img(essay_title, essay_content, workdir):\n \"\"\"\n 替换文章内容中的图片,包括 ``![]()`` 和 ```` 的格式\\n\n img标签中其他属性也会被保留,比如替换后为 ```` \\n\n 最后根据图片链接下载图片\n\n :rtype: str\n :return: 完成替换后的文章内容\n \"\"\"\n img_url = []\n\n # bug:写成lambda表达式用or连接两句时,只会执行最后一个表达式,猜测是因为前面的语句没有返回值\n def replace(m):\n img_url.append(m.group(2) if m.group(2) else m.group(6))\n return rf\"{m.group(1)}./img/{m.group(3)}{m.group(4)}\" if m.group(\n 3) else rf\"{m.group(5)}./img/{m.group(7)}{m.group(8)}\"\n\n essay_content = CnblogsDownloader._IMG_PATTERN.sub(replace, essay_content)\n http_headers = {\"Referer\": \"https://i.cnblogs.com/\"}\n if len(img_url) > 0 and (not os.path.isdir(rf\"{workdir}\\img\")):\n os.mkdir(rf\"{workdir}\\img\")\n for url in img_url:\n # 不再校验文件名的合法性\n img_name = url.split(\"/\")[-1]\n img_path = rf\"{workdir}/img/{img_name}\"\n if os.path.isfile(img_path):\n print(rf\"图片已存在:{img_name}\")\n continue\n try:\n r = httpx.get(url, headers=http_headers, timeout=api.TIMEOUT)\n with open(img_path, \"wb\") as f:\n f.write(r.content)\n print(rf\"已为《{essay_title}》下载图片:{img_name}\")\n except Exception as e:\n print(f\"error: 为《{essay_title}》下载图片失败,链接:{url}\")\n return essay_content\n","repo_name":"Charles94jp/cnblogs-blogger-downloader","sub_path":"cnblogs_downloader.py","file_name":"cnblogs_downloader.py","file_ext":"py","file_size_in_byte":7485,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"12104657945","text":"from django.db.models import OuterRef, Subquery\nfrom django.db.models.functions import Coalesce\nfrom django.utils import timezone\nfrom django.views.generic import RedirectView\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework import viewsets\nfrom drf_yasg.utils import swagger_auto_schema\nfrom drf_yasg import openapi\n\nfrom .models import Product, ProductsPrices\nfrom .serializers import ProductSerializer, ProductResponseSerializer\nfrom .services import CalculateAveragePriceService, ProductService\n\n\n# Create your views here.\nclass RootRedirectView(RedirectView):\n pattern_name = 'schema-swagger-ui'\n permanent = False\n\n\n\nclass ProductsViewSet(viewsets.ModelViewSet):\n queryset = Product.objects.all()\n serializer_class = ProductSerializer\n\n def list(self, request, *args, **kwargs):\n queryset = Product.objects.all()\n current_time = timezone.now()\n\n latest_price_subquery = ProductsPrices.objects.filter(\n product=OuterRef('pk')\n ).order_by('-created_at').values('price')[:1]\n\n current_price_subquery = ProductsPrices.objects.filter(\n product=OuterRef('pk'),\n start_date__lte=current_time,\n end_date__gte=current_time\n ).values('price')[:1]\n\n subquery = Coalesce(Subquery(current_price_subquery), Subquery(latest_price_subquery))\n\n serialized_data = queryset.annotate(\n price=subquery\n ).values('id', 'name', 'price')\n\n return Response(serialized_data)\n\n def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = ProductResponseSerializer(instance)\n return Response(serializer.data)\n\n @swagger_auto_schema(request_body=ProductSerializer)\n def create(self, request, *args, **kwargs):\n try:\n serializer = ProductSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n\n product = ProductService.create_product_with_price(\n name=serializer.validated_data['name'],\n price=serializer.validated_data.get('price'),\n start_date=serializer.validated_data.get('start_date'),\n end_date=serializer.validated_data.get('end_date'),\n )\n response_serializer = ProductResponseSerializer(product)\n return Response(response_serializer.data, status=status.HTTP_201_CREATED)\n except Exception as e:\n return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n\n @swagger_auto_schema(\n request_body=ProductSerializer,\n operation_description=\"If your start_date & end_date will match any existing product's price date range, \"\n \"it will change its price value. In case if any of start_date or end_date overlaps with \"\n \"any product's existing price date range, it will raise an error, if no will create a new price range \"\n )\n def update(self, request, *args, **kwargs):\n try:\n instance = self.get_object()\n serializer = ProductSerializer(instance, data=request.data)\n serializer.is_valid(raise_exception=True)\n\n price = serializer.validated_data.get('price')\n start_date = serializer.validated_data.get('start_date')\n end_date = serializer.validated_data.get('end_date')\n\n ProductService.update_product_with_price(\n instance=instance,\n price=price,\n start_date=start_date,\n end_date=end_date,\n )\n\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n except Exception as e:\n return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n\n def partial_update(self, request, *args, **kwargs):\n return self.update(request, *args, **kwargs)\n\n\n\nclass CalculateAveragePriceView(APIView):\n @swagger_auto_schema(\n manual_parameters=[\n openapi.Parameter('product_id', openapi.IN_PATH, type=openapi.TYPE_INTEGER),\n openapi.Parameter('start_date', openapi.IN_QUERY, type=openapi.TYPE_STRING, format=openapi.FORMAT_DATE),\n openapi.Parameter('end_date', openapi.IN_QUERY, type=openapi.TYPE_STRING, format=openapi.FORMAT_DATE),\n ],\n )\n def get(self, request, product_id, *args, **kwargs):\n start_date = request.query_params.get('start_date')\n end_date = request.query_params.get('end_date')\n\n try:\n average_price = CalculateAveragePriceService.calculate_average_price(product_id, start_date, end_date)\n return Response({'average_price': average_price}, status=status.HTTP_200_OK)\n\n except NotFound as e:\n return Response({'error': str(e)}, status=status.HTTP_404_NOT_FOUND)","repo_name":"nistreanmario/product_price_monitor","sub_path":"src/monitor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38588383051","text":"# Breadth First Search\n\nfrom collections import deque\nfrom ctypes import sizeof\n\ngraph_list = {1: set([3,4]),\n 2: set([3,4,5]),\n 3: set([1,5]),\n 4: set([1]),\n 5: set([2,6]),\n 6: set([3,5])}\nroot_node = 1\nfor i in range (0,sizeof(graph_list)):\n print(graph_list.get(i))\n\ndef BFS(graph, root):\n visited = []\n queue = deque([root])\n while queue:\n n = queue.popleft()\n if n not in visited:\n visited.append(n)\n queue += graph[n] - set(visited)\n return visited\n\n# print(BFS(graph_list, root_node))\n\n","repo_name":"Popo-Jin/Algorithm","sub_path":"자료구조_any/BFS.py","file_name":"BFS.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26593398668","text":"from pathlib import Path\n\nfrom load.dataset import build_datasets, DataConfig\nfrom networks.crnn import CRNN, CRNNConfig, CRNNTrainConfig\n\n\ndef main():\n dataset_path = Path(\"data\", \"catswhoyell\")\n data_config = DataConfig(dataset_path, 16, 0.2)\n train_loader, val_loader = build_datasets(data_config)\n\n\n model_config = CRNNConfig()\n train_config = CRNNTrainConfig(100, 1e-3, 5)\n model = CRNN(model_config)\n model.train(train_config, train_loader, val_loader)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dogeplusplus/meow-mix","sub_path":"segment_trainer.py","file_name":"segment_trainer.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23880875213","text":"import numpy as np\r\nimport gym\r\nimport random\r\nimport time\r\n\r\nenv = gym.make(\"FrozenLake-v0\", is_slippery=True)\r\n'''\r\nActions:\r\nLEFT = 0\r\nDOWN = 1\r\nRIGHT = 2\r\nUP = 3'''\r\n\r\n'''\r\nMAPS = {\r\n\"4x4\": [\r\n \"SFFF\",\r\n \"FHFH\",\r\n \"FFFH\",\r\n \"HFFG\"]'''\r\n\r\n \r\nV = np.zeros(env.nS)\r\nS = np.arange(0,16)\r\nthreshold = 1e-3\r\ngamma = 0.9\r\nactions = {'LEFT':0,'DOWN':1,'RIGHT':2,'UP':3}\r\n\r\n\r\ndef policy_evaluation(env, V, S, policy):\r\n while True:\r\n delta = 0\r\n for s in S:\r\n v_old = V[s]\r\n expected_r = 0\r\n expected_v = 0\r\n transition = env.P[s][policy[s]]\r\n for (probs, state_prime, r, done) in transition:\r\n expected_r += probs * r\r\n expected_v += probs * V[state_prime]\r\n V[s] = expected_r + (gamma*expected_v)\r\n delta = max(delta, abs(v_old - V[s]))\r\n if delta <= threshold:\r\n break\r\n return(V)\r\n\r\ndef random_policy(env, S, actions):\r\n policy_random = {}\r\n for s in S:\r\n s_a = random.choice(list(actions.values()))\r\n policy_random[s] = s_a\r\n return(policy_random)\r\n\r\n\r\ndef policy_improvement_iteration(env, V, S):\r\n policy = random_policy(env, S, actions)\r\n while True:\r\n V = policy_evaluation(env, V, S, policy)\r\n new_policy = dict()\r\n for s in S:\r\n best_a = None\r\n best_p = float('-inf')\r\n for a in actions.values():\r\n expected_r = 0\r\n expected_v = 0\r\n transition = env.P[s][a]\r\n for (probs, state_prime, r, done) in transition:\r\n expected_r += probs * r\r\n expected_v += probs * V[state_prime]\r\n V_new = expected_r + (gamma*expected_v)\r\n if V_new >= best_p:\r\n best_p = V_new\r\n best_a = a\r\n new_policy[s] = best_a\r\n if new_policy == policy:\r\n break\r\n else:\r\n policy = new_policy\r\n return(policy)\r\n \r\nstart_time = time.time()\r\noptimal_policy = policy_improvement_iteration(env, V, S)\r\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\r\noptimal_actions = np.asarray(list(optimal_policy.values()))\r\nprint(optimal_actions.reshape(4,4), actions, sep='\\n')\r\n\r\n","repo_name":"kmaciver/Reinforcement_Learning","sub_path":"policy_iteration/Frozen_lake_policy_iteration.py","file_name":"Frozen_lake_policy_iteration.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1250577884","text":"import json\nfrom time import time\nfrom pdx.logger import logger\nfrom pdx.models.model import Model\nfrom pdx.prompt.prompt_session import PromptSession\nfrom pdx.models.elevenlabs.client import ElevenLabsClient\nfrom pdx.models.metadata import ModelResponse, ResponseMetadata\n\n\nclass TextToSpeechModel(Model):\n def __init__(self,\n api_key: str,\n **kwargs,\n ):\n\n self._provider = \"elevenlabs\"\n self._client = ElevenLabsClient(api_key)\n # Currently: eleven_monolingual_v1, eleven_multilingual_v1\n self._model = kwargs.get('model', 'eleven_multilingual_v1')\n # https://api.elevenlabs.io/v1/voices\n self._voice_id = kwargs.get('voice_id', '21m00Tcm4TlvDq8ikWAM')\n # 0, 1, 2, 3, 4\n self._optimize_streaming_latency = kwargs.get(\n 'optimize_streaming_latency', 0)\n self._api_url = f\"/v1/text-to-speech/{self._voice_id}?optimize_streaming_latency={self._optimize_streaming_latency}\"\n # Voice settings overriding stored setttings for the given voice.\n # They are applied only on the given TTS request.\n self._stability = kwargs.get('stability', 0.5)\n self._similarity_boost = kwargs.get('similarity_boost', 0.5)\n\n self._retries = kwargs.get('retries', 2)\n\n def _preprocess(self, prompt: PromptSession) -> dict:\n _prompt = prompt.text_prompt({})\n\n request_params = {\n \"model\": self._model,\n \"text\": _prompt,\n \"voice_settings\": {\n \"stability\": self._stability,\n \"similarity_boost\": self._similarity_boost\n }\n }\n\n return request_params\n\n def _postprocess(self, response: dict, request_params: dict, request_time: float) -> ModelResponse:\n _prompt = request_params.pop('text', None)\n\n request_params['voice_id'] = self._voice_id\n request_params['optimize_streaming_latency'] = self._optimize_streaming_latency\n\n response_metadata = ResponseMetadata(\n model=request_params['model'],\n stop='tts_completed',\n stop_reason='tts_completed',\n latency=request_time)\n\n _data_type = f'audio_bytes'\n\n model_response = ModelResponse(\n metadata=response_metadata,\n request_params=request_params,\n data=response,\n data_type=_data_type\n )\n\n return model_response\n","repo_name":"pdx-labs/pdx","sub_path":"src/pdx/models/elevenlabs/tts.py","file_name":"tts.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"13806846909","text":"import re\nimport subprocess\nimport time\nfrom datetime import datetime, timedelta\nfrom typing import Tuple, List, Dict, Any, Optional\nfrom _config import Config\nfrom log import Log\n\n\nclass Videos:\n MAX_RANGE = 2000\n DT_ROOT_FORMAT = '%Y-%m-%d'\n DT_FORMAT = '%Y-%m-%d/%H/%M'\n DT_WEB_FORMAT = '%Y%m%d%H%M%S'\n DEPTH = 3\n MIN_FILE_SIZE = 1000\n MD_AVERAGE_LEN = 10\n\n def __init__(self, cam_hash: str):\n self._hash = cam_hash\n self._cam_path = f'{Config.storage_path}/{Config.cameras[cam_hash][\"folder\"]}'\n self._range = self.MAX_RANGE\n self._root_folder = []\n self._date_time = ''\n\n def get_days(self) -> int:\n return round((datetime.now() - self._get_start_date()).total_seconds() / 86400)\n\n def get(self, args: List[Any]) -> Tuple[str, int]:\n date_time = args['dt'][0] if 'dt' in args else ''\n\n if args['video'][0] == 'next':\n step = int(args['step'][0]) if 'step' in args else 0\n sensitivity = int(args['md'][0]) if 'md' in args else -1\n return self._get_next(step, date_time, sensitivity)\n\n elif args['video'][0] == 'range':\n rng = int(args['range'][0]) if 'range' in args else self.MAX_RANGE\n return self._get_by_range(rng)\n\n return self._get_live(date_time)\n\n def get_datetime_by_path(self, path: str) -> str:\n return re.sub(r'(-|/|.mp4)', '', path[len(self._cam_path) + 1:])\n\n def get_range_by_path(self, path: str) -> str:\n if self._range > self.MAX_RANGE:\n return self._range\n start_date = self._get_start_date()\n delta_seconds = (\n datetime.strptime(self.get_datetime_by_path(path), self.DT_WEB_FORMAT) - start_date\n ).total_seconds()\n total_seconds = (datetime.now() - start_date).total_seconds()\n return str(round(self.MAX_RANGE * delta_seconds / total_seconds))\n\n def _get_live(self, date_time: Optional[str] = '') -> Tuple[str, int]:\n self._range = self.MAX_RANGE + 1\n\n path, size = self._get_live_file() # checks now and last minute folder\n if not size:\n fallback = (datetime.now() - timedelta(minutes=1)).strftime(self.DT_FORMAT).split('/')\n return self._find_nearest_file('/'.join(fallback[0:-1]), fallback[-1], -1)\n\n segment_date_time = self.get_datetime_by_path(path)\n if not date_time or segment_date_time > date_time or not Config.storage_enabled:\n return path, size\n\n time.sleep(0.5)\n return self._get_live(date_time)\n\n def _get_by_range(self, rng: int) -> Tuple[str, int]:\n rng = min(max(rng, 0), self.MAX_RANGE)\n\n start_date = self._get_start_date()\n time_range = datetime.now() - start_date\n delta_minutes = int(time_range.total_seconds() * rng / self.MAX_RANGE / 60)\n wd = (start_date + timedelta(minutes=delta_minutes)).strftime(self.DT_FORMAT)\n\n parts = wd.split('/')\n return self._find_nearest_file('/'.join(parts[0:-1]), parts[-1], 1)\n\n def _get_next(self, step: int, date_time: str, sensitivity: int) -> Tuple[str, int]:\n if not date_time:\n return self._get_live()\n\n self._date_time = date_time\n\n if sensitivity >= 0:\n return self._get_next_motion(sensitivity, step)\n\n file_path = self._get_path_by_datetime(date_time)\n parts = file_path.split('/')\n wd = '/'.join(parts[0:-1])\n\n files = []\n if -10 < step < 0:\n prev_dir = (datetime.strptime(date_time, self.DT_WEB_FORMAT) - timedelta(minutes=1)\n ).strftime(self.DT_FORMAT)\n files = self._get_files_by_folders([prev_dir, wd])\n elif 0 < step < 10:\n next_dir = (datetime.strptime(date_time, self.DT_WEB_FORMAT) + timedelta(minutes=1)\n ).strftime(self.DT_FORMAT)\n files = self._get_files_by_folders([wd, next_dir])\n if files and abs(step) < len(files):\n arr = files if step > 0 else reversed(files)\n working_path = f'{self._cam_path}/{file_path}'\n i = 0\n for file in arr:\n f = file.split()\n path = f[1]\n if (step > 0 and path <= working_path) or (step < 0 and path >= working_path):\n continue\n i += 1\n if i < abs(step):\n continue\n if int(f[0]) > self.MIN_FILE_SIZE:\n return path, int(f[0])\n\n sign = 1 if step > 0 else -1\n seconds = max(60, abs(step))\n folder = (\n datetime.strptime(wd, self.DT_FORMAT) + timedelta(seconds=seconds) * sign\n ).strftime(self.DT_FORMAT)\n\n if step > 0 and folder > datetime.now().strftime(self.DT_FORMAT):\n return self._get_live(date_time)\n\n step = -2 if step < 0 else 1\n parts = folder.split('/')\n return self._find_nearest_file('/'.join(parts[0:-1]), parts[-1], step)\n\n def _get_start_date(self) -> datetime:\n return datetime.strptime(self._get_folders()[0], self.DT_ROOT_FORMAT)\n\n def _find_nearest_file(self, parent: str, folder: str, step: int) -> Tuple[str, int]:\n \"\"\" If folder is set shift left (to parent folder); else shift right (to child folder) \"\"\"\n parts = parent.split('/') if parent else []\n\n if (folder and len(parts) == self.DEPTH - 1) or (not folder and len(parts) == self.DEPTH):\n path = f'{parent}/{folder}'.rstrip('/')\n position = step - 1 if step > 0 else step\n file, size = self._get_file(path, position)\n if size:\n return file, size\n\n folders = self._get_folders(parent)\n if not folders and len(parts) > 0:\n return self._find_nearest_file('/'.join(parts[0:-1]), parts[-1], step) # shift left\n\n if folder:\n if step < 0: # find the largest element of folders less than folder\n rest = [i for i in folders if i < folder]\n else: # find the smallest element of folders greater than folder\n rest = [i for i in folders if i > folder]\n if rest:\n parts.append(max(rest) if step < 0 else min(rest))\n return self._find_nearest_file('/'.join(parts), '', step) # shift right\n\n if len(parts) > 0:\n return self._find_nearest_file('/'.join(parts[0:-1]), parts[-1], step) # shift left\n elif step < 0:\n return self._find_nearest_file('', '', 1) # move to the beginning\n else:\n return self._get_live() # move to the end\n\n if not folder and folders and len(parts) < self.DEPTH:\n parts.append(folders[-1]) if step < 0 else parts.append(folders[0])\n return self._find_nearest_file('/'.join(parts), '', step) # shift right\n\n Log.print(f'find_nearest_file: not found: {parent}[/{folder}], step={step}')\n\n return '', 0\n\n def _get_next_motion(self, sensitivity: int, step: int) -> Tuple[str, int]:\n sign = 1 if step > 0 else -1\n if step >= 60 or step <= -60:\n folder = (\n datetime.strptime(self._date_time, self.DT_WEB_FORMAT) + timedelta(seconds=abs(step)) * sign\n ).strftime(self.DT_FORMAT)\n else:\n path = self._get_path_by_datetime(self._date_time)\n folder = '/'.join(path.split('/')[0:-1])\n\n last_files = {}\n prev_folder = (datetime.strptime(folder, self.DT_FORMAT) - timedelta(minutes=1) * sign).strftime(self.DT_FORMAT)\n files = self._get_files(prev_folder)\n if files:\n for file in files:\n f = file.split(' ')\n last_files[f'{prev_folder}/{f[1]}'] = int(f[0])\n\n return self._motion_detector(folder, last_files, 100 - max(0, min(90, sensitivity)), sign)\n\n def _motion_detector(self, folder: str, last_files: Dict[str, int], sensitivity: int, sign: int) -> Tuple[str, int]:\n requested_path = self._get_path_by_datetime(self._date_time)\n files = self._get_files(folder)\n if not files:\n if sign > 0 and folder >= self._get_folders()[-1]:\n return self._get_live()\n if sign < 0 and folder <= self._get_folders()[0]:\n return '', 0\n\n file = self._find_nearest_file(folder, '', sign)\n if file:\n next_folder = '/'.join(file[0][len(self._cam_path) + 1:].split('/')[0:-1])\n if (sign > 0 and next_folder <= folder) or (sign < 0 and next_folder >= folder):\n return '', 0\n return self._motion_detector(next_folder, last_files, sensitivity, sign)\n\n sens = 1 + sensitivity / 100\n if sign < 0:\n files.reverse()\n for file in files:\n f = file.split(' ')\n if float(f[0]) < self.MIN_FILE_SIZE: # exclude broken files\n continue\n average_size = sum(last_files.values()) / len(last_files) if last_files else 0\n\n last_files[f'{folder}/{f[1]}'] = int(f[0])\n if len(last_files) > self.MD_AVERAGE_LEN:\n first_key = next(iter(last_files))\n del last_files[first_key]\n\n path = f'{folder}/{f[1]}'\n\n if (sign > 0 and requested_path >= path) or (sign < 0 and requested_path <= path):\n continue # don't detect the files before last motion & last motion itself\n\n if average_size and float(f[0]) > average_size * sens:\n return f'{self._cam_path}/{folder}/{f[1]}', int(f[0])\n\n if folder >= datetime.now().strftime(self.DT_FORMAT):\n return self._get_live()\n\n next_folder = (datetime.strptime(folder, self.DT_FORMAT) + timedelta(minutes=1) * sign).strftime(self.DT_FORMAT)\n return self._motion_detector(next_folder, last_files, sensitivity, sign)\n\n def _get_folders(self, folder: str = '') -> List[str]:\n if not folder and self._root_folder:\n return self._root_folder\n cmd = f'ls {self._cam_path}/{folder}'\n res = self._exec(cmd).splitlines()\n if not folder:\n self._root_folder = res\n return res\n\n def _get_files(self, folder: str) -> List[str]:\n wd = f\"{self._cam_path}/{folder}\"\n cmd = f'ls -l {wd} | awk ' + \"'{print $5,$9}'\"\n res = self._exec(cmd)\n if not res and folder and folder < datetime.now().strftime(self.DT_FORMAT):\n self._exec(f'rmdir {self._cam_path}/{folder}') # delete empty folder\n return res.splitlines()\n\n def _get_files_by_folders(self, folders: List[str]) -> List[str]:\n paths = ''\n for folder in folders:\n paths = f'{paths}{self._cam_path}/{folder}/* '\n cmd = f'ls -l {paths} | awk ' + \"'{print $5,$9}'\"\n return self._exec(cmd).splitlines()\n\n def _get_file(self, folder: str, position: int = 0) -> Tuple[str, int]:\n files = self._get_files(folder)\n if not files or len(files) <= position or len(files) < abs(position):\n return '', 0\n file = files[position].split() # [size, file]\n path = f'{self._cam_path}/{folder}/{file[1]}'\n size = int(file[0])\n if size > self.MIN_FILE_SIZE:\n return path, size\n if position < 0 and len(files) > abs(position):\n return self._get_file(folder, position - 1)\n return '', 0\n\n def _get_live_file(self):\n folder = datetime.now().strftime(self.DT_FORMAT) # Regular case\n files = self._get_files(folder)\n position = -2\n if len(files) > 1:\n file = files[position].split() # [size, file]\n size = int(file[0])\n if size < self.MIN_FILE_SIZE:\n return '', 0\n\n path = f'{self._cam_path}/{folder}/{file[1]}'\n return path, size\n\n elif files:\n position = -1\n\n folder = (datetime.now() - timedelta(minutes=1)).strftime(self.DT_FORMAT) # Possible case\n return self._get_file(folder, position)\n\n @staticmethod\n def _get_path_by_datetime(dt: str) -> str:\n if not re.match(r'^\\d{14}$', dt):\n return ''\n return f'{dt[0:4]}-{dt[4:6]}-{dt[6:8]}/{dt[8:10]}/{dt[10:12]}/{dt[12:14]}.mp4'\n\n @staticmethod\n def _exec(cmd: str, default: Any = '') -> Any:\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True, shell=True)\n stdout, _stderr = p.communicate()\n return stdout.strip() or default\n","repo_name":"vladpen/cams-pwa","sub_path":"server/videos.py","file_name":"videos.py","file_ext":"py","file_size_in_byte":12560,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"16843813865","text":"from __future__ import absolute_import, division, print_function, with_statement\n\nimport optparse\nimport os\nimport sys\n\nfrom commitcheck.argutils import GIT_ARGUMENTS, git_get_type\nfrom commitcheck.checkers import Pattern\nfrom commitcheck.checkers.gitchecker import Checker\n\n\ndef main(argv=None):\n argv = sys.argv if argv is None else argv\n\n # Create parser and parse args.\n parser = optparse.OptionParser(description='Commit checker without argparse')\n for arg in GIT_ARGUMENTS:\n parser.add_option(*arg[0], **arg[1])\n (options, args) = parser.parse_args(argv[1:])\n check_type = git_get_type(options)\n if check_type is None:\n print('Unknown check type: %s' % (options.type))\n return 1\n\n # Setup to do check.\n path = os.getcwd()\n patterns = [\n Pattern(r'[ \\t]+$', 'PTWS', 'Trailing whitespace'),\n ]\n checker = Checker(path, patterns, checks=[r'.*\\.py'])\n checker.verbose = options.verbose\n return checker.check(check_type, args)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"adan/commitcheck","sub_path":"demos/checker-with-optparse.py","file_name":"checker-with-optparse.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"43245728002","text":"from aiogram.types import Message, ParseMode\nfrom aiogram.dispatcher import FSMContext\nfrom main import bot\nfrom keyboards.buttons import (\n native_languages_markup,\n share_phone_markup,\n register_markup,\n accents_markup,\n genders_markup,\n leader_markup,\n start_markup,\n age_markup\n)\nfrom datetime import datetime\nimport aiohttp\nimport pandas\n\nfrom utils.uzbekvoice import db\nfrom main import UserRegistration, dp\nfrom utils.uzbekvoice.helpers import register_user, authorization_token\nfrom utils.helpers import send_message, IsSubscribedChannel, IsRegistered\nfrom utils.uzbekvoice.common_voice import CLIPS_LEADERBOARD_URL, VOTES_LEADERBOARD_URL, RECORDS_STAT_URL, \\\n ACTIVITY_STAT_URL\nfrom data.messages import INSTRUCTIONS, LEADERBOARD, CANCEL_MESSAGE, ABOUT_PROJECT, msg_dict, VOICE_LEADERBOARD, \\\n VOTE_LEADERBOARD, OVERALL_STATS\n\n\n@dp.message_handler(commands=['start'], state='*')\nasync def start_command(message: Message, state: FSMContext):\n await state.finish()\n if db.user_exists(message.chat.id):\n await send_message(message.chat.id, 'welcome-text', markup=start_markup)\n else:\n await send_message(message.chat.id, 'start', markup=register_markup)\n\n\n# Answer to all bot commands\n@dp.message_handler(text=\"👤 Ro'yxatdan o'tish\")\nasync def register_command(message: Message):\n if not db.user_exists(message.chat.id):\n await UserRegistration.full_name.set()\n await send_message(message.chat.id, 'ask-full-name')\n else:\n await send_message(message.chat.id, 'Siz ro\\'yxatdan o\\'tib bo\\'lgansiz!', markup=start_markup)\n\n\n@dp.message_handler(state=UserRegistration.full_name)\nasync def get_name(message: Message, state: FSMContext):\n if message.text == \"👤 Ro'yxatdan o'tish\":\n await UserRegistration.full_name.set()\n await send_message(message.chat.id, 'ask-full-name')\n else:\n async with state.proxy() as data:\n data[\"full_name\"] = message.text\n await UserRegistration.next()\n await send_message(message.chat.id, 'ask-phone', markup=share_phone_markup)\n\n\n@dp.message_handler(state=UserRegistration.phone_number, content_types=['contact', 'text'])\nasync def get_phone(message: Message, state: FSMContext):\n async with state.proxy() as data:\n data[\"phone_number\"] = str(message.contact.phone_number if message.contact else message.text)\n await UserRegistration.gender.set()\n await send_message(message.chat.id, 'ask-gender', markup=genders_markup)\n\n\n@dp.inline_handler()\n@dp.message_handler(state=UserRegistration.gender)\nasync def get_gender(message: Message, state: FSMContext):\n if message.text in ['👨 Erkak', '👩 Ayol']:\n async with state.proxy() as data:\n without_emoji = message.text.split(' ')[1]\n data[\"gender\"] = without_emoji\n await UserRegistration.native_language.set()\n await send_message(message.chat.id, 'ask-native-language', markup=native_languages_markup)\n else:\n await send_message(message.chat.id, 'ask-gender', markup=genders_markup)\n return await UserRegistration.gender.set()\n\n\n@dp.message_handler(state=UserRegistration.native_language)\nasync def native_language(message: Message, state: FSMContext):\n if message.text in [\n \"O'zbek tili\",\n \"Qoraqalpoq tili\",\n \"Rus tili\",\n \"Tojik tili\",\n \"Qozoq tili\"\n ]:\n async with state.proxy() as data:\n data[\"native_language\"] = message.text\n\n await send_message(message.chat.id, 'ask-accent', markup=accents_markup)\n await UserRegistration.accent_region.set()\n else:\n await send_message(message.chat.id, 'ask-native-language', markup=native_languages_markup)\n\n\n@dp.message_handler(state=UserRegistration.accent_region)\nasync def get_accent_region(message: Message, state: FSMContext):\n if (message.text in [\"Andijon\",\n \"Buxoro\",\n \"Farg'ona\",\n \"Jizzax\",\n \"Sirdaryo\",\n \"Xorazm\",\n \"Namangan\",\n \"Navoiy\",\n \"Qashqadaryo\",\n \"Qoraqalpog'iston\",\n \"Samarqand\",\n \"Surxondaryo\",\n \"Toshkent viloyati\",\n \"Toshkent shahri\"]):\n async with state.proxy() as data:\n data[\"accent_region\"] = message.text\n await send_message(message.chat.id, 'ask-birth-year', markup=age_markup)\n await UserRegistration.year_of_birth.set()\n else:\n await send_message(message.chat.id, 'ask-accent', markup=accents_markup)\n\n\n@dp.message_handler(state=UserRegistration.year_of_birth)\nasync def finish(message: Message, state: FSMContext):\n async with state.proxy() as data:\n if message.text in [\"< 19\", \"19-29\", \"30-39\", \"40-49\", \"50-59\",\n \"60-69\", \"70-79\", \"80-89\", \"> 89\"]:\n data[\"year_of_birth\"] = message.text\n await register_user(data, message.chat.id)\n await send_message(message.chat.id, 'register-success', markup=start_markup)\n await state.finish()\n else:\n await send_message(message.chat.id, 'ask-birth-year-again')\n return await UserRegistration.year_of_birth.set()\n\n\n@dp.message_handler(IsSubscribedChannel(), text=LEADERBOARD)\nasync def leaderboard(message: Message):\n await send_message(message.chat.id, 'leaderboard', markup=leader_markup)\n\n\n@dp.message_handler(IsSubscribedChannel(), text=INSTRUCTIONS)\nasync def instructions(message: Message):\n await send_message(message.chat.id, 'instructions', markup=start_markup)\n\n\n@dp.message_handler(IsSubscribedChannel(), text=ABOUT_PROJECT)\nasync def instructions(message: Message):\n await bot.send_message(message.chat.id, msg_dict['about-project'], reply_markup=start_markup, parse_mode=ParseMode.MARKDOWN)\n\n\n@dp.message_handler(text=CANCEL_MESSAGE)\nasync def go_back(message: Message):\n await bot.send_message(message.chat.id, 'Bosh menyu: ', reply_markup=start_markup)\n\n\n@dp.message_handler(IsRegistered(), IsSubscribedChannel(), text=VOICE_LEADERBOARD)\nasync def voice_leaderboard(message: Message):\n headers = {\n 'Authorization': await authorization_token(message.chat.id)\n }\n async with aiohttp.ClientSession() as session:\n async with session.get(CLIPS_LEADERBOARD_URL, headers=headers) as get_request:\n leaderboard_dict = await get_request.json()\n print(leaderboard_dict)\n data = {\n '№': [],\n '|FIO|': [],\n '|Yozilgan|': []\n }\n\n for leader in leaderboard_dict:\n data['№'].append(leader['position'] + 1)\n data['|FIO|'].append(f\"{leader['username'][:10]}...\")\n data['|Yozilgan|'].append(leader['total'])\n\n copy_data = data.copy()\n del data['№']\n leaderboard_text = pandas.DataFrame(data=data, index=copy_data['№'])\n leaderboard_text.index.name = '№'\n leaderboard_text = '```' + leaderboard_text.to_string() + '```'\n\n await bot.send_message(\n message.chat.id,\n leaderboard_text,\n reply_markup=start_markup,\n parse_mode=ParseMode.MARKDOWN\n )\n\n\n@dp.message_handler(IsRegistered(), IsSubscribedChannel(), text=VOTE_LEADERBOARD)\nasync def vote_leaderboard(message: Message):\n headers = {\n 'Authorization': await authorization_token(message.chat.id)\n }\n\n async with aiohttp.ClientSession() as session:\n async with session.get(VOTES_LEADERBOARD_URL, headers=headers) as get_request:\n leaderboard_dict = await get_request.json()\n data = {\n '№': [],\n '|FIO|': [],\n '|Tekshirilgan|': []\n }\n\n for leader in leaderboard_dict:\n data['№'].append(leader['position'] + 1)\n data['|FIO|'].append(f\"{leader['username'][:10]}...\")\n data['|Tekshirilgan|'].append(leader['total'])\n\n copy_data = data.copy()\n del data['№']\n leaderboard_text = pandas.DataFrame(data=data, index=copy_data['№'])\n leaderboard_text.index.name = '№'\n leaderboard_text = '```' + leaderboard_text.to_string() + '```'\n\n await bot.send_message(\n message.chat.id,\n leaderboard_text,\n reply_markup=start_markup,\n parse_mode=ParseMode.MARKDOWN\n )\n\n\n@dp.message_handler(IsRegistered(), IsSubscribedChannel(), text=OVERALL_STATS)\nasync def stats(message: Message):\n headers = {\n 'Authorization': await authorization_token(message.chat.id)\n }\n\n async with aiohttp.ClientSession() as session:\n async with session.get(RECORDS_STAT_URL, headers=headers) as get_request:\n stats_dict = await get_request.json()\n async with session.get(ACTIVITY_STAT_URL, headers=headers) as get_request:\n activity_dict = await get_request.json()\n\n latest_records = stats_dict[-1]\n latest_activity = activity_dict[-1]\n\n overall_records = int(int(latest_records['total']) / 3600) # 1 hour = 3600 seconds\n checked_records = int(int(latest_records['valid']) / 3600)\n stats_hour = latest_activity['date']\n stats_hour = datetime.strptime(stats_hour, '%Y-%m-%dT%H:%M:%S.%fZ').hour + 5\n users_count = latest_activity['value']\n\n stat_message = f\"\"\"\n🗣️ Umumiy yozilgan: {overall_records} soat\n\n✅ Tekshirilgan yozuvlar: {checked_records} soat\n\n☑️ 2-bosqich maqsadi: 2000 soat tekshirilgan yozvular\n\n⌛ Bugun {stats_hour}:00da aktivlar soni: {users_count}\n \"\"\"\n\n await bot.send_message(\n message.chat.id,\n text=stat_message,\n reply_markup=start_markup,\n parse_mode=ParseMode.MARKDOWN\n )\n","repo_name":"uzbekvoice/UzbekVoiceBot","sub_path":"src/handlers/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":9714,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"37247504124","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('my_bbs', '0003_auto_20200524_1554'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ReadRecord',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('create_time', models.DateTimeField(verbose_name='创建时间', auto_now_add=True)),\n ('update_time', models.DateTimeField(verbose_name='更新时间', auto_now=True)),\n ('is_delete', models.BooleanField(verbose_name='删除标志', default=False)),\n ('user_id', models.IntegerField(verbose_name='用户id')),\n ('article_id', models.IntegerField(verbose_name='文章id')),\n ],\n options={\n 'verbose_name': '浏览记录',\n 'verbose_name_plural': '浏览记录',\n 'db_table': 'read_record',\n },\n ),\n ]\n","repo_name":"shenxuexin/bbs","sub_path":"bbs/my_bbs/migrations/0004_readrecord.py","file_name":"0004_readrecord.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14546483756","text":"from flask import Flask, redirect, render_template, request, flash, session\nfrom models import db, connect_db, User, Post, Tag\n\napp = Flask(__name__)\n\napp.config['SECRET_KEY'] = \"secret\"\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///bogly'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = True\napp.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\n\nconnect_db(app)\ndb.create_all()\n\n\n@app.route(\"/\")\ndef homepage():\n \"\"\"Home Page - Will show list of users\"\"\"\n return render_template(\"home.html\")\n\n\n@app.route(\"/users\")\ndef all_users():\n \"\"\"Will Show List of All Users in DB\"\"\"\n users = User.query.all()\n return render_template('users.html', users=users)\n\n\n@app.route('/users/new', methods=[\"GET\"])\ndef users_new_form():\n \"\"\"Display Form to Add User\"\"\"\n return render_template('new.html')\n\n\n@app.route(\"/users/new\", methods=[\"POST\"])\ndef users_new():\n \"\"\"Handle Form Submission for new User\"\"\"\n # create new user from form data\n new_user = User(\n first_name=request.form['fname'],\n last_name=request.form['lname'],\n image_url=request.form['iurl'] or None)\n # add user to db and commit\n db.session.add(new_user)\n db.session.commit()\n return redirect(\"/users\")\n\n\n@app.route(\"/users/\")\ndef user_profile(user_id):\n \"\"\"Display User Information\"\"\"\n user = User.query.get_or_404(user_id)\n return render_template(\"userprofile.html\", user=user)\n\n\n@app.route(\"/users//edit\")\ndef user_edit(user_id):\n \"\"\"Display Form to Edit user\"\"\"\n user = User.query.get_or_404(user_id)\n return render_template(\"edit.html\", user=user)\n\n\n@app.route('/users//edit', methods=[\"POST\"])\ndef users_modify(user_id):\n \"\"\"Handle form submission for modifying user\"\"\"\n # get data from user input\n user = User.query.get_or_404(user_id)\n user.first_name = request.form['fname']\n user.last_name = request.form['lname']\n user.image_url = request.form['iurl']\n # update user in DB and commit\n db.session.add(user)\n db.session.commit()\n return redirect(f\"/users/{user_id}\")\n\n\n@app.route('/users//delete', methods=[\"POST\"])\ndef delete_user(user_id):\n \"\"\"Handle form submission for deleting an existing user\"\"\"\n # get userid and delete from DB\n user = User.query.get_or_404(user_id)\n db.session.delete(user)\n db.session.commit()\n return redirect(\"/users\")\n\n\n@app.route('/users//post-form')\ndef post_form(user_id):\n \"\"\"Take to Form to upload post\"\"\"\n user = User.query.get_or_404(user_id)\n tags = Tag.query.all()\n return render_template(\"post-form.html\", user=user, tags=tags)\n\n\n@app.route('/users//post-form', methods=[\"POST\"])\ndef handle_post(user_id):\n \"\"\"Handle form submission for adding a post\"\"\"\n # get form data and make post\n user = User.query.get_or_404(user_id)\n tag_ids = [int(num) for num in request.form.getlist(\"tags\")]\n tags = Tag.query.filter(Tag.id.in_(tag_ids)).all()\n\n new_post = Post(title=request.form['title'],\n content=request.form['content'], user=user, tags=tags)\n\n db.session.add(new_post)\n db.session.commit()\n flash(f\"Post '{new_post.title}' added.\")\n return redirect(f\"/users/{user_id}\")\n\n\n@app.route(\"/posts/\")\ndef display_post(post_id):\n \"\"\"Display Post Information\"\"\"\n post = Post.query.get_or_404(post_id)\n return render_template(\"post.html\", post=post)\n\n\n@app.route(\"/posts//edit\")\ndef edit_post(post_id):\n \"\"\"Display Form to edit\"\"\"\n post = Post.query.get_or_404(post_id)\n return render_template(\"edit-post.html\", post=post)\n\n\n@app.route(\"/posts//edit\", methods=[\"POST\"])\ndef modify_post(post_id):\n \"\"\"Handle Form to edit post\"\"\"\n post = Post.query.get_or_404(post_id)\n post.title = request.form['title']\n post.content = request.form['content']\n\n db.session.add(post)\n db.session.commit()\n\n flash(f\"Post '{post.title}' edited.\")\n post = Post.query.get_or_404(post_id)\n\n return redirect(f\"/posts/{post.id}\")\n\n\n@app.route('/posts//delete', methods=[\"POST\"])\ndef delete_post(post_id):\n \"\"\"Handle form submission for deleting an existing post\"\"\"\n # get post-id and delete from DB\n post = Post.query.get_or_404(post_id)\n db.session.delete(post)\n db.session.commit()\n return redirect(\"/users\")\n\n\n@app.route('/tags')\ndef all_tags():\n \"\"\"Will Show List of All Tags in DB\"\"\"\n tags = Tag.query.all()\n return render_template('tags.html', tags=tags)\n\n\n@app.route('/tags/new', methods=[\"GET\"])\ndef tags_new_form():\n \"\"\"Display Form to Add Tag\"\"\"\n return render_template('new-tag.html')\n\n\n@app.route(\"/tags/new\", methods=[\"POST\"])\ndef tags_new():\n \"\"\"Handle Form Submission for new Tag\"\"\"\n # create new tag from form data\n new_tag = Tag(\n title=request.form['title'])\n # add tag to db and commit\n db.session.add(new_tag)\n db.session.commit()\n return redirect(\"/tags\")\n\n\n@app.route(\"/tags/\")\ndef display_tag(tag_id):\n \"\"\"Display Tag Information\"\"\"\n tag = Tag.query.get_or_404(tag_id)\n return render_template(\"tag.html\", tag=tag)\n\n\n@app.route(\"/tags//edit\")\ndef edit_tag(tag_id):\n \"\"\"Display Form to edit tag\"\"\"\n tag = Tag.query.get_or_404(tag_id)\n return render_template(\"edit-tag.html\", tag=tag)\n\n\n@app.route(\"/tags//edit\", methods=[\"POST\"])\ndef modify_tag(tag_id):\n \"\"\"Handle Form to edit tag\"\"\"\n tag = Tag.query.get_or_404(tag_id)\n\n tag.title = request.form['title']\n\n db.session.add(tag)\n db.session.commit()\n\n flash(f\"Post '{tag.title}' edited.\")\n tag = tag.query.get_or_404(tag_id)\n\n return redirect(f\"/tags/{tag.id}\")\n\n\n@app.route('/tags//delete', methods=[\"POST\"])\ndef delete_tag(tag_id):\n \"\"\"Handle form submission for deleting an existing tag\"\"\"\n # get tag-id and delete from DB\n tag = Tag.query.get_or_404(tag_id)\n db.session.delete(tag)\n db.session.commit()\n return redirect(\"/tags\")\n","repo_name":"hassankaz1/blogly-p3","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20524164269","text":"import logging\nfrom time import sleep, time\nimport numpy as np\nfrom pymeasure.instruments import Instrument\nfrom pymeasure.instruments.validators import modular_range_bidirectional\nfrom pymeasure.instruments.validators import strict_discrete_set\nfrom pymeasure.instruments.validators import strict_range\n\n# =============================================================================\n# Logging\n# =============================================================================\n\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.NullHandler())\n\n\n# =============================================================================\n# Instrument file\n# =============================================================================\n\n\nclass DSPBase(Instrument):\n \"\"\"This is the base class for the Signal Recovery DSP 72XX lock-in\n amplifiers.\n\n Do not directly instantiate an object with this class. Use one of the\n DSP 72XX series instrument classes that inherit from this parent\n class. Floating point command mode (i.e., the inclusion of the ``.``\n character in commands) is included for usability.\n\n Untested commands are noted in docstrings.\n \"\"\"\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Constants\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n SENSITIVITIES = [\n np.nan, 2.0e-9, 5.0e-9, 10.0e-9, 20.0e-9, 50.0e-9, 100.0e-9,\n 200.0e-9, 500.0e-9, 1.0e-6, 2.0e-6, 5.0e-6, 10.0e-6,\n 20.0e-6, 50.0e-6, 100.0e-6, 200.0e-6, 500.0e-6, 1.0e-3,\n 2.0e-3, 5.0e-3, 10.0e-3, 20.0e-3, 50.0e-3, 100.0e-3,\n 200.0e-3, 500.0e-3, 1.0\n ]\n SEN_MULTIPLIER = [1, 1e-6, 1e-8]\n\n TIME_CONSTANTS = [\n 10.0e-6, 20.0e-6, 40.0e-6, 80.0e-6, 160.0e-6, 320.0e-6,\n 640.0e-6, 5.0e-3, 10.0e-3, 20.0e-3, 50.0e-3, 100.0e-3,\n 200.0e-3, 500.0e-3, 1.0, 2.0, 5.0, 10.0, 20.0, 50.0,\n 100.0, 200.0, 500.0, 1.0e3, 2.0e3, 5.0e3, 10.0e3,\n 20.0e3, 50.0e3\n ]\n REFERENCES = ['internal', 'external rear', 'external front']\n IMODES = ['voltage mode', 'current mode', 'low noise current mode']\n\n CURVE_BITS = ['x', 'y', 'magnitude', 'phase', 'sensitivity', 'adc1',\n 'adc2', 'dac1', 'dac2', 'noise', 'ratio', 'log ratio',\n 'event', 'frequency part 1', 'frequency part 2',\n # Dual modes\n 'x2', 'y2', 'magnitude2', 'phase2', 'sensitivity2']\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Initializer and important communication methods\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n def __init__(self, adapter, name=\"Signal Recovery DSP 72XX Base\", **kwargs):\n super().__init__(\n adapter,\n name,\n includeSCPI=False,\n **kwargs\n )\n\n def read(self, **kwargs):\n \"\"\"Read the response and remove extra unicode character from instrument readings.\"\"\"\n return super().read(**kwargs).replace('\\x00', '')\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Properties\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n id = Instrument.measurement(\n \"ID\",\n \"\"\"Measure the model number of the instrument.\n\n Returned value is an integer.\"\"\",\n cast=int\n )\n\n imode = Instrument.control(\n \"IMODE\", \"IMODE %d\",\n \"\"\"Control the lock-in amplifier to detect a voltage or current\n signal.\n\n Valid values are ``voltage mode, ``current mode``, or ``low noise current mode``.\n \"\"\",\n validator=strict_discrete_set,\n values=IMODES,\n map_values=True\n )\n\n slope = Instrument.control(\n \"SLOPE\", \"SLOPE %d\",\n \"\"\"Control the low-pass filter roll-off.\n\n Valid values are the integers 6, 12, 18, or 24, which represents the\n slope of the low-pass filter in dB/octave.\n \"\"\",\n validator=strict_discrete_set,\n values=[6, 12, 18, 24],\n map_values=True\n )\n\n time_constant = Instrument.control(\n \"TC\", \"TC %d\",\n \"\"\"Control the filter time constant.\n\n Valid values are a strict set of time constants from 10 us to 50,000 s.\n Returned values are floating point numbers in seconds.\n \"\"\",\n validator=strict_discrete_set,\n values=TIME_CONSTANTS,\n map_values=True\n )\n\n shield = Instrument.control(\n \"FLOAT\", \"FLOAT %d\",\n \"\"\"Control the input connector shield state.\n\n Valid values are 0 to have shields grounded or 1 to have the shields\n floating (i.e., connected to ground via a 1 kOhm resistor).\n \"\"\",\n validator=strict_discrete_set,\n values=[0, 1]\n )\n\n fet = Instrument.control(\n \"FET\", \"FET %d\",\n \"\"\"Control the voltage preamplifier transistor type.\n\n Valid values are 0 for bipolar or 1 for FET.\n \"\"\",\n validator=strict_discrete_set,\n values=[0, 1]\n )\n\n coupling = Instrument.control(\n \"CP\", \"CP %d\",\n \"\"\"Control the input coupling mode.\n\n Valid values are 0 for AC coupling mode or 1 for DC coupling mode.\n \"\"\",\n validator=strict_discrete_set,\n values=[0, 1]\n )\n\n voltage = Instrument.control(\n \"OA.\", \"OA. %g\",\n \"\"\"Control the oscillator amplitude.\n\n Valid values are floating point numbers between 0 to 5 V.\n \"\"\",\n validator=strict_range,\n values=[0, 5]\n )\n\n frequency = Instrument.control(\n \"OF.\", \"OF. %g\",\n \"\"\"Control the oscillator frequency.\n\n Valid values are floating point numbers representing the frequency in Hz.\n \"\"\",\n validator=strict_range,\n values=[0, 2.5e5],\n dynamic=True\n )\n\n reference = Instrument.control(\n \"IE\", \"IE %d\",\n \"\"\"Control the oscillator reference input mode.\n\n Valid values are ``internal``, ``external rear`` or ``external front``.\n \"\"\",\n validator=strict_discrete_set,\n values=REFERENCES,\n map_values=True\n )\n\n harmonic = Instrument.control(\n \"REFN\", \"REFN %d\",\n \"\"\"Control the reference harmonic mode.\n\n Valid values are integers.\n \"\"\",\n validator=strict_range,\n values=[1, 65535],\n dynamic=True\n )\n\n reference_phase = Instrument.control(\n \"REFP.\", \"REFP. %g\",\n \"\"\"Control the reference absolute phase angle.\n\n Valid values are floating point numbers between 0 - 360 degrees.\n \"\"\",\n validator=modular_range_bidirectional,\n values=[0, 360]\n )\n\n dac1 = Instrument.control(\n \"DAC. 1\", \"DAC. 1 %g\",\n \"\"\"Control the voltage of the DAC1 output on the rear panel.\n\n Valid values are floating point numbers between -12 to 12 V.\n \"\"\",\n validator=strict_range,\n values=[-12, 12]\n )\n\n dac2 = Instrument.control(\n \"DAC. 2\", \"DAC. 2 %g\",\n \"\"\"Control the voltage of the DAC2 output on the rear panel.\n\n Valid values are floating point numbers between -12 to 12 V.\n \"\"\",\n validator=strict_range,\n values=[-12, 12]\n )\n\n @property\n def gain(self):\n \"\"\"Control the AC gain of signal channel amplifier.\"\"\"\n return self.values(\"ACGAIN\")\n\n @gain.setter\n def gain(self, value):\n value = strict_discrete_set(int(value / 10), list(range(0, 10)))\n self.write(\"ACGAIN %d\" % value)\n\n @property\n def sensitivity(self):\n \"\"\"Control the signal's measurement sensitivity range.\n\n When in voltage measurement mode, valid values are discrete values from\n 2 nV to 1 V. When in current measurement mode, valid values are\n discrete values from 2 fA to 1 µA (for normal current mode) or up to\n 10 nA (for low noise current mode).\n \"\"\"\n return self.values(\"SEN.\")[0]\n\n @sensitivity.setter\n def sensitivity(self, value):\n # get the voltage/current mode:\n imode = self.IMODES.index(self.imode)\n\n # Scale the sensitivities to the correct range for voltage/current mode\n sensitivities = [s * self.SEN_MULTIPLIER[imode]\n for s in self.SENSITIVITIES]\n if imode == 2:\n sensitivities[0:7] = [np.nan] * 7\n\n # Check and map the value\n value = strict_discrete_set(value, sensitivities)\n value = sensitivities.index(value)\n\n # Set sensitivity\n self.write(\"SEN %d\" % value)\n\n @property\n def auto_gain(self):\n \"\"\"Control lock-in amplifier for automatic AC gain.\"\"\"\n return int(self.values(\"AUTOMATIC\")) == 1\n\n @auto_gain.setter\n def auto_gain(self, value):\n if value:\n self.write(\"AUTOMATIC 1\")\n else:\n self.write(\"AUTOMATIC 0\")\n\n x = Instrument.measurement(\n \"X.\",\n \"\"\"Measure the output signal's X channel.\n\n Returned value is a floating point number in volts.\n \"\"\"\n )\n\n y = Instrument.measurement(\n \"Y.\",\n \"\"\"Measure the output signal's Y channel.\n\n Returned value is a floating point number in volts.\n \"\"\"\n )\n\n xy = Instrument.measurement(\n \"XY.\",\n \"\"\"Measure both the X and Y channels.\n\n Returned values are floating point numbers in volts.\n \"\"\"\n )\n\n mag = Instrument.measurement(\n \"MAG.\",\n \"\"\"Measure the magnitude of the signal.\n\n Returned value is a floating point number in volts.\n \"\"\"\n )\n\n phase = Instrument.measurement(\n \"PHA.\",\n \"\"\"Measure the signal's absolute phase angle.\n\n Returned value is a floating point number in degrees.\n \"\"\"\n )\n\n adc1 = Instrument.measurement(\n \"ADC. 1\",\n \"\"\"Measure the voltage of the ADC1 input on the rear panel.\n\n Returned value is a floating point number in volts.\n \"\"\"\n )\n\n adc2 = Instrument.measurement(\n \"ADC. 2\",\n \"\"\"Measure the voltage of the ADC2 input on the rear panel.\n\n Returned value is a floating point number in volts.\n \"\"\"\n )\n\n ratio = Instrument.measurement(\n \"RT.\",\n \"\"\"Measure the ratio between the X channel and ADC1.\n\n Returned value is a unitless floating point number equivalent to the\n mathematical expression X/ADC1.\n \"\"\"\n )\n\n log_ratio = Instrument.measurement(\n \"LR.\",\n \"\"\"\n Measure the log (base 10) of the ratio between the X channel and ADC1.\n\n Returned value is a unitless floating point number equivalent to the\n mathematical expression log(X/ADC1).\n \"\"\"\n )\n\n curve_buffer_bits = Instrument.control(\n \"CBD\", \"CBD %d\",\n \"\"\"Control which data outputs are stored in the curve buffer.\n\n Valid values are values are integers between 1 and 65,535 (or 2,097,151\n in dual reference mode).\n \"\"\",\n values=[1, 2097151],\n validator=strict_range,\n cast=int,\n dynamic=True\n )\n\n curve_buffer_length = Instrument.control(\n \"LEN\", \"LEN %d\",\n \"\"\"Control the length of the curve buffer.\n\n Valid values are integers between 1 and 32,768, but the actual maximum\n amount of points is determined by the amount of curves that are stored,\n as set via the curve_buffer_bits property (32,768 / n).\n \"\"\",\n values=[1, 32768],\n validator=strict_range,\n cast=int\n )\n\n curve_buffer_interval = Instrument.control(\n \"STR\", \"STR %d\",\n \"\"\"Control the time interval between the collection of successive\n points in the curve buffer.\n\n Valid values to the the time interval are integers in ms with a\n resolution of 5 ms; input values are rounded up to a multiple of 5.\n Valid values are values between 0 and 1,000,000,000 (corresponding to\n 12 days). The interval may be set to 0, which sets the rate of data\n storage to the curve buffer to 1.25 ms/point (800 Hz). However this\n only allows storage of the X and Y channel outputs. There is no need to\n issue a CBD 3 command to set this up since it happens automatically\n when acquisition starts.\n \"\"\",\n values=[1, 1000000000],\n validator=strict_range,\n cast=int\n )\n\n curve_buffer_status = Instrument.measurement(\n \"M\",\n \"\"\"Measure the status of the curve buffer acquisition.\n\n Command returns four values:\n **First value - Curve Acquisition Status:** Number with 5 possibilities:\n 0: no activity\n 1: acquisition via TD command running\n 2: acquisition by a TDC command running\n 5: acquisition via TD command halted\n 6: acquisition bia TDC command halted\n **Second value - Number of Sweeps Acquired**: Number of sweeps already\n acquired.\n **Third value - Status Byte:** Decimal representation of the status byte\n (the same response as the ST command\n **Fourth value - Number of Points Acquired:** Number of points acquired\n in the curve buffer.\n \"\"\",\n cast=int,\n )\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Methods\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n def set_voltage_mode(self):\n \"\"\"Sets lock-in amplifier to measure a voltage signal.\"\"\"\n self.write(\"IMODE 0\")\n\n def setDifferentialMode(self, lineFiltering=True):\n \"\"\"Sets lock-in amplifier to differential mode, measuring A-B.\"\"\"\n self.write(\"VMODE 3\")\n self.write(\"LF %d 0\" % (3 if lineFiltering else 0))\n\n def setChannelAMode(self):\n \"\"\"Sets lock-in amplifier to measure a voltage signal only from the A\n input connector.\n \"\"\"\n self.write(\"VMODE 1\")\n\n def auto_sensitivity(self):\n \"\"\"Adjusts the full-scale sensitivity so signal's magnitude lies\n between 30 - 90 % of full-scale.\n \"\"\"\n self.write(\"AS\")\n\n def auto_phase(self):\n \"\"\"Adjusts the reference absolute phase to maximize the X channel\n output and minimize the Y channel output signals.\n \"\"\"\n self.write(\"AQN\")\n\n def init_curve_buffer(self):\n \"\"\"Initializes the curve storage memory and status variables. All\n record of previously taken curves is removed.\n \"\"\"\n self.write(\"NC\")\n\n def set_buffer(self, points, quantities=None, interval=10.0e-3):\n \"\"\"Prepares the curve buffer for a measurement.\n\n :param int points:\n Number of points to be recorded in the curve buffer\n\n :param list quantities:\n List containing the quantities (strings) that are to be\n recorded in the curve buffer, can be any of:\n 'x', 'y', 'magnitude', 'phase', 'sensitivity', 'adc1', 'adc2',\n 'adc3', 'dac1', 'dac2',\n 'noise', 'ratio', 'log ratio', 'event', 'frequency'\n (or 'frequency part 1' and 'frequency part 2');\n for both dual modes, additional options are:\n 'x2', 'y2', 'magnitude2', 'phase2', 'sensitivity2'.\n Default is 'x' and 'y'.\n\n :param float interval:\n The interval between two subsequent points stored in the\n curve buffer in s. Default is 10 ms.\n \"\"\"\n\n if quantities is None:\n quantities = [\"x\", \"y\"]\n\n if \"frequency\" in quantities:\n quantities.remove(\"frequency\")\n quantities.extend([\n \"frequency part 1\",\n \"frequency part 2\"\n ])\n\n # remove all possible duplicates\n quantities = list({q.lower() for q in quantities})\n\n bits = 0\n for q in quantities:\n bits += 2 ** self.CURVE_BITS.index(q)\n\n self.curve_buffer_bits = bits\n self.curve_buffer_length = points\n\n self.curve_buffer_interval = int(interval * 1000)\n self.init_curve_buffer()\n\n def start_buffer(self):\n \"\"\"Initiates data acquisition. Acquisition starts at the current\n position in the curve buffer and continues at the rate set by the STR\n command until the buffer is full.\n \"\"\"\n self.write(\"TD\")\n\n def wait_for_buffer(self, timeout=None, delay=0.1):\n \"\"\" Method that waits until the curve buffer is filled\n \"\"\"\n start = time()\n while self.curve_buffer_status[0] == 1:\n sleep(delay)\n if timeout is not None and time() < start + timeout:\n break\n\n def get_buffer(self, quantity=None,\n convert_to_float=True, wait_for_buffer=True):\n \"\"\"Retrieves the buffer after it has been filled. The data retrieved\n from the lock-in is in a fixed-point format, which requires translation\n before it can be interpreted as meaningful data. When\n `convert_to_float` is True the conversion is performed (if possible)\n before returning the data.\n\n :param str quantity:\n If provided, names the quantity that is to be retrieved from the\n curve buffer; can be any of:\n 'x', 'y', 'magnitude', 'phase', 'sensitivity', 'adc1', 'adc2',\n 'adc3', 'dac1', 'dac2', 'noise', 'ratio', 'log ratio', 'event',\n 'frequency part 1' and 'frequency part 2';\n for both dual modes, additional options are:\n 'x2', 'y2', 'magnitude2', 'phase2', 'sensitivity2'.\n If no quantity is provided, all available data is retrieved.\n\n :param bool convert_to_float:\n Bool that determines whether to convert the fixed-point buffer-data\n to meaningful floating point values via the `buffer_to_float`\n method. If True, this method tries to convert all the available\n data to meaningful values; if this is not possible, an exception\n will be raised. If False, this conversion is not performed and the\n raw buffer-data is returned.\n\n :param bool wait_for_buffer:\n Bool that determines whether to wait for the data acquisition to\n finished if this method is called before the acquisition is\n finished. If True, the method waits until the buffer is filled\n before continuing; if False, the method raises an exception if the\n acquisition is not finished when the method is called.\n \"\"\"\n\n # Check if buffer is finished\n if self.curve_buffer_status[0] != 0:\n if wait_for_buffer:\n self.wait_for_buffer()\n else:\n raise RuntimeError(\"Buffer acquisition is not yet finished.\")\n\n # Check which quantities are recorded in the buffer\n bits = format(self.curve_buffer_bits, '021b')[::-1]\n quantity_enums = [e for e, b in enumerate(bits) if b == \"1\"]\n\n # Check if the provided quantity (if any) is indeed recorded\n if quantity is not None:\n if self.CURVE_BITS.index(quantity) in quantity_enums:\n quantity_enums = [self.CURVE_BITS.index(quantity)]\n else:\n raise KeyError(\"The selected quantity '%s' is not recorded;\"\n \"quantity should be one of: %s\" % (\n quantity, \", \".join(\n [self.CURVE_BITS[q] for q in quantity_enums]\n )))\n\n # Retrieve the data\n data = {}\n for enum in quantity_enums:\n self.write(\"DC %d\" % enum)\n q_data = []\n\n while True:\n stb = format(self.adapter.connection.read_stb(), '08b')[::-1]\n\n if bool(int(stb[2])):\n raise ValueError(\"Status byte reports command parameter error.\")\n\n if bool(int(stb[0])):\n break\n\n if bool(int(stb[7])):\n q_data.append(int(self.read().strip()))\n\n data[self.CURVE_BITS[enum]] = np.array(q_data)\n\n if convert_to_float:\n data = self.buffer_to_float(data)\n\n if quantity is not None:\n data = data[quantity]\n\n return data\n\n def buffer_to_float(self, buffer_data, sensitivity=None,\n sensitivity2=None, raise_error=True):\n \"\"\"Converts fixed-point buffer data to floating point data.\n\n The provided data is converted as much as possible, but there are some\n requirements to the data if all provided columns are to be converted;\n if a key in the provided data cannot be converted it will be omitted in\n the returned data or an exception will be raised, depending on the\n value of raise_error.\n\n The requirements for converting the data are as follows:\n\n - Converting X, Y, magnitude and noise requires sensitivity data, which\n can either be part of the provided data or can be provided via the\n sensitivity argument\n - The same holds for X2, Y2 and magnitude2 with sensitivity2.\n - Converting the frequency requires both 'frequency part 1' and\n 'frequency part 2'.\n\n :param dict buffer_data:\n The data to be converted. Must be in the format as returned by the\n `get_buffer` method: a dict of numpy arrays.\n\n :param sensitivity:\n If provided, the sensitivity used to convert X, Y, magnitude and\n noise. Can be provided as a float or as an array that matches the\n length of elements in `buffer_data`. If both a sensitivity is\n provided and present in the buffer_data, the provided value is used\n for the conversion, but the sensitivity in the buffer_data is\n stored in the returned dict.\n\n :param sensitivity2:\n Same as the first sensitivity argument, but for X2, Y2, magnitude2\n and noise2.\n\n :param bool raise_error:\n Determines whether an exception is raised in case not all keys\n provided in buffer_data can be converted. If False, the columns\n that cannot be converted are omitted in the returned dict.\n\n :return: Floating-point buffer data\n :rtype: dict\n \"\"\"\n\n data = {}\n\n def maybe_raise(message):\n if raise_error:\n raise ValueError(message)\n\n def convert_if_present(keys, multiply_by=1):\n \"\"\"Copy any available entries from buffer_data to data, scale with\n multiply_by.\n \"\"\"\n for key in keys:\n if key in buffer_data:\n data[key] = buffer_data[key] * multiply_by\n\n # Sensitivity (for both single and dual modes)\n for key in [\"sensitivity\", \"sensitivity2\"]:\n if key in buffer_data:\n data[key] = np.array([\n self.SENSITIVITIES[v % 32] * self.SEN_MULTIPLIER[v // 32]\n for v in buffer_data[key]\n ])\n # Try to set sensitivity values from arg or data\n sensitivity = sensitivity or data.get('sensitivity', None)\n sensitivity2 = sensitivity2 or data.get('sensitivity2', None)\n\n if any([\"x\" in buffer_data,\n \"y\" in buffer_data,\n \"magnitude\" in buffer_data,\n \"noise\" in buffer_data, ]):\n if sensitivity is None:\n maybe_raise(\"X, Y, magnitude and noise cannot be converted as \"\n \"no sensitivity is provided, neither as argument \"\n \"nor as part of the buffer_data. \")\n else:\n convert_if_present([\"x\", \"y\", \"magnitude\", \"noise\"], sensitivity / 10000)\n\n # phase data (for both single and dual modes)\n convert_if_present([\"phase\", \"phase2\"], 1 / 100)\n\n # frequency data from frequency part 1 and 2\n if \"frequency part 1\" in buffer_data or \"frequency part 2\" in buffer_data:\n if \"frequency part 1\" in buffer_data and \"frequency part 2\" in buffer_data:\n data[\"frequency\"] = np.array([\n int(format(v2, \"016b\") + format(v1, \"016b\"), 2) / 1000 for\n v1, v2 in zip(buffer_data[\"frequency part 1\"], buffer_data[\"frequency part 2\"])\n ])\n else:\n maybe_raise(\"Can calculate the frequency only when both\"\n \"frequency part 1 and 2 are provided.\")\n\n # conversion for, adc1, adc2, dac1, dac2, ratio, and log ratio\n convert_if_present([\"adc1\", \"adc2\", \"dac1\",\n \"dac2\", \"ratio\", \"log ratio\"], 1 / 1000)\n\n # adc3 (integrating converter); requires a call to adc3_time\n if \"adc3\" in buffer_data:\n data[\"adc3\"] = buffer_data[\"adc3\"] / (50000 * self.adc3_time)\n\n # event does not require a conversion\n convert_if_present([\"event\"])\n\n # X, Y, and magnitude data for both dual modes\n if any([\"x2\" in buffer_data,\n \"y2\" in buffer_data,\n \"magnitude2\" in buffer_data, ]):\n if sensitivity2 is None:\n maybe_raise(\"X2, Y2 and magnitude2 cannot be converted as no \"\n \"sensitivity2 is provided, neither as argument nor \"\n \"as part of the buffer_data. \")\n else:\n convert_if_present([\"x2\", \"y2\", \"magnitude2\"], sensitivity2 / 10000)\n\n return data\n\n def shutdown(self):\n \"\"\"Safely shutdown the lock-in amplifier.\n\n Sets oscillator amplitude to 0 V and AC gain to 0 dB.\n \"\"\"\n log.info(\"Shutting down %s.\" % self.name)\n self.voltage = 0.\n self.gain = 0.\n super().shutdown()\n","repo_name":"pymeasure/pymeasure","sub_path":"pymeasure/instruments/signalrecovery/dsp_base.py","file_name":"dsp_base.py","file_ext":"py","file_size_in_byte":25859,"program_lang":"python","lang":"en","doc_type":"code","stars":514,"dataset":"github-code","pt":"81"} +{"seq_id":"75136438345","text":"class Solution:\n def countPairs(self, n: int, edges: List[List[int]]) -> int:\n \"\"\"\n use dfs to find the size of each connected component\n then just multiply the nodes\n can also use union findd but gets TLE\n \"\"\"\n graph=defaultdict(list)\n for u,v in edges:\n graph[u].append(v)\n graph[v].append(u)\n \n seen=set()\n \n curr=1\n def dfs(node):\n nonlocal curr\n seen.add(node)\n for nei in graph[node]:\n if nei not in seen:\n dfs(nei)\n curr+=1\n \n sets=list()\n \n for i in range(n):\n if i not in seen:\n curr=1\n dfs(i)\n sets.append(curr)\n \n values=list(accumulate(sets))\n m=len(values)\n ans=0\n for i in range(m):\n ans+=sets[i]*(values[-1]-values[i])\n \n return ans\n \n ","repo_name":"bamblebam/competitive-programming","sub_path":"2022/6_June_22/25-6-22/countunreachablepairsofnodesinanundirectedgraph.py","file_name":"countunreachablepairsofnodesinanundirectedgraph.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33038797379","text":"import requests\n\n\ndef test_api_running():\n context = \"test\"\n payload = {\n \"context\": context,\n \"token_max_length\": 2,\n \"temperature\": 0.0,\n \"top_p\": 0.0,\n }\n response = requests.post(\n \"http://api.vicgalle.net:5000/generate\", params=payload\n ).json()\n\n assert len(response[\"text\"]) > 0\n","repo_name":"vicgalle/gpt-j-api","sub_path":"tests/test_api_up.py","file_name":"test_api_up.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":331,"dataset":"github-code","pt":"81"} +{"seq_id":"3149440732","text":"#!/usr/bin/env python\n\nimport rospy\nfrom sensor_msgs.msg import PointCloud2\nfrom std_msgs.msg import Float64\nfrom ros_numpy import msgify, numpify\nimport tf2_ros\n\nimport numpy as np\nfrom timeit import default_timer as timer\nimport scipy.spatial\n\n\ndef reduce_rewards(rewards, eps=1e-6):\n assert isinstance(rewards, np.ndarray)\n assert len(rewards.shape) >= 2\n n_pts = rewards.shape[0] # (n, >=2)\n rewards = rewards.reshape([n_pts, -1])\n rewards = np.clip(rewards, eps, 1 - eps)\n lo = np.log(1. - rewards)\n lo = lo.sum(axis=1)\n rewards = 1. - np.exp(lo)\n assert rewards.shape == (n_pts,)\n return rewards\n\n\nclass RewardsAccumulator:\n \"\"\"\n This ROS node subscribes to local map rewards cloud with PointCloud2 msgs\n and concatenates the observations in one global PointCloud2 map.\n Merging the points is done by determining firstly the new points by\n proximity threshold to the points in existing map.\n The observations are firstly transformed to robot's ground truth frame\n before being concatenated.\n \"\"\"\n def __init__(self, reward_cloud_topic='reward_cloud'):\n # Set the device\n self.global_map = None\n self.local_map = None\n self.new_map = None\n self.map_frame = None\n\n # any point that is farer than this threshold from points in existing pcl is considered as new\n self.dist_th = rospy.get_param('~pts_proximity_th', 0.5)\n self.max_age = rospy.get_param('~max_age', 10.0)\n\n self.reward_cloud_rate = rospy.get_param('~reward_cloud_rate', 1.0)\n self.rewards_cloud_pub = rospy.Publisher('~rewards_map', PointCloud2, queue_size=1)\n self.new_cloud_pub = rospy.Publisher('~new_rewards_map', PointCloud2, queue_size=1)\n self.reward_pub = rospy.Publisher('~total_reward', Float64, queue_size=1)\n\n self.tf = tf2_ros.Buffer(cache_time=rospy.Duration(100))\n self.tl = tf2_ros.TransformListener(self.tf)\n\n self.local_map_sub = rospy.Subscriber(reward_cloud_topic, PointCloud2, self.accumulate_reward_clouds_cb)\n rospy.loginfo('Rewards accumulator node is ready.')\n\n def accumulate_reward_clouds_cb(self, pc_msg):\n assert isinstance(pc_msg, PointCloud2)\n rospy.logdebug('Point cloud is received')\n\n # # Discard old messages.\n # msg_stamp = rospy.Time.now()\n # age = (msg_stamp - pc_msg.header.stamp).to_sec()\n # if age > self.max_age:\n # rospy.logwarn('Rewards accumulator: Discarding points %.1f s > %.1f s old.', age, self.max_age)\n # return\n\n self.map_frame = pc_msg.header.frame_id\n\n try:\n transform1 = self.tf.lookup_transform('X1', pc_msg.header.frame_id,\n pc_msg.header.stamp, rospy.Duration(30))\n except (tf2_ros.LookupException, tf2_ros.ExtrapolationException):\n rospy.logwarn('Map accumulator: No transform between estimated robot pose and its ground truth')\n return\n\n try:\n transform2 = self.tf.lookup_transform(pc_msg.header.frame_id, 'X1_ground_truth',\n pc_msg.header.stamp, rospy.Duration(30))\n except (tf2_ros.LookupException, tf2_ros.ExtrapolationException):\n rospy.logwarn('Map accumulator: No transform between estimated robot pose and its ground truth')\n return\n\n t0 = timer()\n\n # Transform local map to ground truth localization frame\n local_map = numpify(pc_msg)\n local_map = np.vstack([local_map[f] for f in ['x', 'y', 'z', 'reward']])\n assert len(local_map.shape) == 2\n assert local_map.shape[0] == 4\n\n # transform new points to be on a ground truth mesh\n T1 = numpify(transform1.transform)\n T2 = numpify(transform2.transform)\n T = np.matmul(T2, T1)\n R, t = T[:3, :3], T[:3, 3]\n local_map[:3, ...] = np.matmul(R, local_map[:3, ...]) + t.reshape([3, 1])\n local_map = local_map.T\n assert len(local_map.shape) == 2\n assert local_map.shape[1] == 4\n n_pts = local_map.shape[0]\n\n if self.global_map is None:\n self.global_map = local_map\n self.local_map = local_map\n assert len(self.global_map.shape) == 2\n assert self.global_map.shape[1] == 4 # (N, 4)\n\n tree = scipy.spatial.cKDTree(self.global_map[..., :3])\n dists, idxs = tree.query(local_map[..., :3], k=1)\n common_pts_mask = dists <= self.dist_th\n\n assert len(dists) == local_map.shape[0]\n assert len(idxs) == local_map.shape[0]\n self.new_map = local_map[~common_pts_mask, :]\n self.local_map = local_map\n\n rospy.logdebug(f'Adding {self.new_map.shape[1]} new points')\n assert len(self.new_map.shape) == 2\n assert self.new_map.shape[1] == 4 # (n, 4)\n\n # and accumulate new points to global map\n self.global_map = np.concatenate([self.global_map, self.new_map], axis=0)\n assert len(self.global_map.shape) == 2\n assert self.global_map.shape[1] == 4 # (N, 4)\n\n # accumulate rewards\n rewards_prev = self.global_map[idxs, 3:4]\n rewards = self.local_map[..., 3:4]\n rewards = np.concatenate([rewards_prev, rewards], axis=1)\n assert rewards.shape[1] == 2\n assert rewards.shape[0] == n_pts # (n x 2)\n self.global_map[idxs, 3] = reduce_rewards(rewards)\n\n rospy.loginfo('Point cloud accumulation took: %.3f s', timer() - t0)\n\n def msgify_reward_cloud(self, cloud):\n assert cloud.shape[1] >= 4\n # publish global map with reward as intensity value\n map = np.zeros(cloud.shape[0], dtype=[('x', np.float32),\n ('y', np.float32),\n ('z', np.float32),\n ('reward', np.float32)])\n map['x'] = cloud[..., 0]\n map['y'] = cloud[..., 1]\n map['z'] = cloud[..., 2]\n map['reward'] = cloud[..., 3]\n # convert point cloud to msg and publish it\n map_msg = msgify(PointCloud2, map)\n map_msg.header.frame_id = self.map_frame\n map_msg.header.stamp = rospy.Time.now()\n return map_msg\n\n def run(self):\n rate = rospy.Rate(self.reward_cloud_rate)\n while not rospy.is_shutdown():\n if self.global_map is None or self.new_map is None:\n continue\n # publish reward clouds\n global_map_msg = self.msgify_reward_cloud(self.global_map)\n new_map_msg = self.msgify_reward_cloud(self.new_map)\n self.rewards_cloud_pub.publish(global_map_msg)\n self.new_cloud_pub.publish(new_map_msg)\n # publish total reward value\n total_reward = self.global_map[:, 3].sum()\n rospy.loginfo('Total reward: %.1f', total_reward)\n self.reward_pub.publish(Float64(total_reward))\n rate.sleep()\n\n\nif __name__ == '__main__':\n rospy.init_node('rewards_accumulator', log_level=rospy.INFO)\n proc = RewardsAccumulator(reward_cloud_topic=rospy.get_param('~reward_cloud_topic', 'reward_cloud'))\n proc.run()\n","repo_name":"tpet/rpz_planning","sub_path":"scripts/utils/rewards_accumulator.py","file_name":"rewards_accumulator.py","file_ext":"py","file_size_in_byte":7212,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"71172240905","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.hub import load_state_dict_from_url\n\n\nclass STN3d(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv1d(3, 64, 1)\n self.conv2 = nn.Conv1d(64, 128, 1)\n self.conv3 = nn.Conv1d(128, 1024, 1)\n self.fc1 = nn.Linear(1024, 512)\n self.fc2 = nn.Linear(512, 256)\n self.fc3 = nn.Linear(256, 9)\n self.bn1 = nn.BatchNorm1d(64)\n self.bn2 = nn.BatchNorm1d(128)\n self.bn3 = nn.BatchNorm1d(1024)\n self.bn4 = nn.BatchNorm1d(512)\n self.bn5 = nn.BatchNorm1d(256)\n\n def forward(self, x):\n x = F.relu(self.bn1(self.conv1(x)))\n x = F.relu(self.bn2(self.conv2(x)))\n x = F.relu(self.bn3(self.conv3(x)))\n x = x.amax(dim=2) # [B,1024]\n x = F.relu(self.bn4(self.fc1(x)))\n x = F.relu(self.bn5(self.fc2(x)))\n x = self.fc3(x).view(-1, 3, 3)\n x = x + torch.eye(3, device=x.device)\n return x\n\n\nclass PointNetfeat(nn.Module):\n def __init__(self, global_feat=True):\n super().__init__()\n self.stn = STN3d()\n self.conv1 = nn.Conv1d(3, 64, 1)\n self.conv2 = nn.Conv1d(64, 128, 1)\n self.conv3 = nn.Conv1d(128, 1024, 1)\n self.bn1 = nn.BatchNorm1d(64)\n self.bn2 = nn.BatchNorm1d(128)\n self.bn3 = nn.BatchNorm1d(1024)\n self.global_feat = global_feat\n\n def forward(self, x):\n trans = self.stn(x)\n x = x.transpose(2, 1)\n x = torch.bmm(x, trans)\n x = x.transpose(2, 1)\n x = F.relu(self.bn1(self.conv1(x)))\n pointfeat = x\n x = F.relu(self.bn2(self.conv2(x)))\n x = self.bn3(self.conv3(x))\n x = x.amax(dim=2) # [B,1024]\n if self.global_feat:\n return x, trans\n else:\n _, _, N = pointfeat.shape\n x = x[..., None].repeat_interleave(N, dim=-1)\n return torch.cat([x, pointfeat], 1), trans\n\n\nclass PointNet1(nn.Module):\n def __init__(self, k=2):\n super().__init__()\n self.feat = PointNetfeat(global_feat=True)\n self.fc1 = nn.Linear(1024, 512)\n self.fc2 = nn.Linear(512, 256)\n self.fc3 = nn.Linear(256, k)\n self.bn1 = nn.BatchNorm1d(512)\n self.bn2 = nn.BatchNorm1d(256)\n\n def forward(self, x):\n x1, trans = self.feat(x)\n x2 = F.relu(self.bn1(self.fc1(x1)))\n x3 = F.relu(self.bn2(self.fc2(x2)))\n x4 = self.fc3(x3)\n feature = torch.cat((x1, x2, x3, x4), dim=1)\n return feature\n\n\ndef pretrained_pointnet(dataset=\"shapenet\"):\n if dataset == \"shapenet\":\n model = PointNet1(k=16)\n state_dict = load_state_dict_from_url(\n url=\"https://github.com/microsoft/SpareNet/raw/main/Frechet/cls_model_39.pth\",\n progress=True,\n )\n else:\n raise ValueError(f\"Unknown dataset: {dataset}\")\n model.load_state_dict(state_dict)\n model.eval().requires_grad_(False)\n return model\n","repo_name":"kazuto1011/dusty-gan-v2","sub_path":"gans/metrics/pointnet.py","file_name":"pointnet.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"81"} +{"seq_id":"72356513864","text":"import random as rnd\n\n\ndef shuffle(cards: [int]):\n\n for i in range(len(cards)):\n random_index = rnd.randint(0, i)\n cards[i], cards[random_index] = cards[random_index], cards[i]\n\n\ndef random_subset(input_list: [int], subset_size: int):\n subset = input_list[:subset_size]\n\n for i in range(subset_size, len(input_list)):\n random_index = rnd.randint(0, i)\n if random_index < subset_size:\n subset[random_index] = input_list[i]\n\n return subset\n\n\ncards = [1, 2, 3, 4, 5, 6, 7, 8]\n\nprint(random_subset(cards, 5))\n# shuffle(cards)\n# print(cards)\n\n","repo_name":"KasraNezamabadi/InterviewPractice","sub_path":"RandomPractice.py","file_name":"RandomPractice.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28556065623","text":"import unittest\nfrom wojownik import Wojownik, WojownikError\nfrom jednostki.rycerz import Rycerz\n\n\nclass TestWojownik(unittest.TestCase):\n def test_warrior_with_correct_life(self):\n w = Wojownik(30)\n self.assertEqual(w._zycie, 30)\n self.assertEqual(w._doswiadczenie, 0)\n\n def test_warrior_with_incorrect_life(self):\n with self.assertRaises(WojownikError):\n Wojownik(-100)\n\n def test_repr(self):\n w = Wojownik(30)\n expected = 'Wojownik: hp=30, exp=0'\n received = w.__repr__()\n\n self.assertEqual(expected, received)\n\n def test_marsz(self):\n w = Wojownik(30)\n w.maszeruj(1000)\n self.assertEqual(20.0, w._doswiadczenie)\n\nclass TestRycerz(unittest.TestCase):\n def test_atakuj(self):\n r = Rycerz()\n r.atakuj()\n self.assertEqual(0.3, r._doswiadczenie)\n\n def test_stworzenie_udane(self):\n r = Rycerz()\n self.assertEqual(r._zycie, 60)\n self.assertEqual(r._doswiadczenie, 0)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"ritaly/kurs_python","sub_path":"16_unittest/strategia12/test_strategia.py","file_name":"test_strategia.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18404702335","text":"import sqlite3\n\ndbPath = \"./src/data/test.db\"\n#dbPath = \"../data/test.db\"\n\ndef SQL(sql):\n\tconn = sqlite3.connect(dbPath, check_same_thread = False)\n\tvalue = []\n\tc = conn.cursor()\n\tcursor = c.execute(sql)\n\tif (\"select\" or \"SELECT\") not in sql:\n\t\tconn.commit()\n\t\tconn.close()\n\telse:\n\t\tfor row in cursor:\n\t\t\tvalue.append(row)\n\tconn.close()\n\treturn value\t\n\nif __name__ == \"__main__\":\n\n\t#SQL(\"INSERT INTO SCANASE (ID,PRODUCTNAME,IP) \\\n # VALUES (1, 'Beoplay M3', '10.10.10.111')\")\n\t#SQL(\"INSERT INTO STATUS (ID,SCAN,STATUS) VALUES (1, 'ase_scan', 0)\")\n\t#SQL(\"delete from status\")\n\tSQL(\"update status set STATUS=0 where SCAN='ase_scan'\")\n\tprint(SQL(\"select * from scanase\"))\n\tprint(SQL(\"select * from status\"))","repo_name":"BruceZhu88/WifiSpeaker","sub_path":"src/common/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73164550985","text":"import logging\nimport json\nimport requests\nimport discord\nimport dataclasses\nimport datetime\n\nfrom discord.ext import commands\n\nfrom ovisbot.exceptions import CryptoHackApiException\nfrom ovisbot.helpers import success, escape_md\nfrom ovisbot.db_models import CryptoHackUserMapping\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\n@dataclasses.dataclass\nclass Score:\n username: str\n global_rank: int\n points: int\n total_points: int\n challs_solved: int\n total_challs: int\n num_users: int\n\n @classmethod\n def parse(cls, raw):\n # username:global_rank:points:total_points:challs_solved:total_challs:num_users\n spl = raw.split(\":\")\n assert len(spl) == 7\n username = spl.pop(0)\n return cls(*([username] + list(map(int, spl))))\n\n\nclass CryptoHack(commands.Cog):\n DISCORD_TOKEN_API_URL = \"https://cryptohack.org/discord_token/{0}/\"\n USER_SCORE_API_URL = \"https://cryptohack.org/wechall/userscore/\"\n USER_URL = \"https://cryptohack.org/user/{0}/\"\n\n def __init__(self, bot):\n self.bot = bot\n\n def _get_cryptohack_score(self, cryptohack_user):\n \"\"\"\n Returns score of given user.\n \"\"\"\n req = requests.get(\n CryptoHack.USER_SCORE_API_URL, params={\"username\": cryptohack_user}\n )\n res = req.text\n\n if \"failed\" in res:\n raise CryptoHackApiException\n\n return Score.parse(res)\n\n @commands.group()\n async def cryptohack(self, ctx):\n \"\"\"\n Collection of CryptoHack commands\n \"\"\"\n if ctx.invoked_subcommand is None:\n await ctx.send(\"Invalid command passed. Use `!help`.\")\n\n @cryptohack.command()\n async def connect(self, ctx, token=None):\n \"\"\"\n Link your CryptoHack account\n \"\"\"\n if token is None:\n msg = (\n \"Εν μου έδωκες token! Πάεννε στο https://cryptohack.org/user/ \"\n \"τζαι πιαστο token που εν κάτω κάτω! Μια ζωη να σας παρατζέλλω δαμέσα!\"\n )\n await ctx.send(msg)\n else:\n token = token.replace(\"/\", \"\").replace(\".\", \"\").replace(\"%\", \"\")\n req = requests.get(CryptoHack.DISCORD_TOKEN_API_URL.format(token))\n res = json.loads(req.content)\n\n if \"error\" in res:\n raise CryptoHackApiException(res[\"error\"])\n\n cryptohack_user = res[\"user\"]\n CryptoHackUserMapping(\n cryptohack_user=cryptohack_user, discord_user_id=ctx.author.id\n ).save()\n await ctx.send(f\"Linked CryptoHack as {cryptohack_user}!\")\n await success(ctx.message)\n\n @cryptohack.command()\n async def disconnect(self, ctx):\n \"\"\"\n Disconecct your CryptoHack account\n \"\"\"\n cryptohack_mapping = CryptoHackUserMapping.objects.get(\n {\"discord_user_id\": ctx.author.id}\n )\n cryptohack_mapping.delete()\n await success(ctx.message)\n\n @cryptohack.command()\n async def stats(self, ctx, user=None):\n \"\"\"\n Show your CryptoHack stats or any given user's\n \"\"\"\n if user is None:\n user_id = ctx.author.id\n else:\n user_id = next((m.id for m in ctx.message.mentions), None)\n if user_id is None:\n return\n\n cryptohack_mapping = CryptoHackUserMapping.objects.get(\n {\"discord_user_id\": user_id}\n )\n score = self._get_cryptohack_score(cryptohack_mapping.cryptohack_user)\n\n await ctx.send(\n embed=discord.Embed(\n title=score.username,\n url=CryptoHack.USER_URL.format(score.username),\n color=0xFEB32B,\n )\n .add_field(\n name=\"Rank\",\n value=f\"{score.global_rank} / {score.num_users}\",\n inline=False,\n )\n .add_field(\n name=\"Score\",\n value=f\"{score.points} / {score.total_points}\",\n inline=False,\n )\n .add_field(\n name=\"Solves\",\n value=f\"{score.challs_solved} / {score.total_challs}\",\n inline=False,\n )\n )\n\n @cryptohack.command()\n async def scoreboard(self, ctx):\n \"\"\"\n Displays internal CryptoHack scoreboard.\n \"\"\"\n limit = 10\n mappings = CryptoHackUserMapping.objects.all()\n scores = [\n (\n escape_md(self.bot.get_user(mapping.discord_user_id).name),\n self._get_cryptohack_score(mapping.cryptohack_user),\n )\n for mapping in mappings\n ][:limit]\n\n scores = sorted(scores, key=lambda s: s[1].points, reverse=True)\n scoreboard = \"\\n\".join(\n \"{0}. **{1}**\\t{2}\".format(idx + 1, s[0], s[1].points)\n for idx, s in enumerate(scores)\n )\n embed = discord.Embed(\n title=\"CryptoHack Scoreboard\",\n colour=discord.Colour(0xFEB32B),\n url=\"https://cryptohack.org/\",\n description=scoreboard,\n timestamp=datetime.datetime.now(),\n )\n\n embed.set_thumbnail(url=\"https://cryptohack.org/static/img/main.png\")\n embed.set_footer(\n text=\"CYberMouflons\",\n icon_url=\"https://i.ibb.co/yW2mYjq/cybermouflons.png\",\n )\n\n await ctx.send(embed=embed)\n\n @scoreboard.error\n @disconnect.error\n @connect.error\n @stats.error\n async def generic_error_handler(self, ctx, error):\n if isinstance(error.original, CryptoHackUserMapping.DoesNotExist):\n await ctx.channel.send(\n \"Ρε λεβεντη... εν βρίσκω συνδεδεμένο CryptoHack account! (`!cryptohack connect `)\"\n )\n elif isinstance(error.original, CryptoHackApiException):\n await ctx.channel.send(\"Ούπς... κατι επήε λάθος ρε τσιάκκο!\")\n\n\ndef setup(bot):\n bot.add_cog(CryptoHack(bot))\n","repo_name":"cybermouflons/ovisbot","sub_path":"ovisbot/extensions/cryptohack/cryptohack.py","file_name":"cryptohack.py","file_ext":"py","file_size_in_byte":6124,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"81"} +{"seq_id":"18402375564","text":"import os\nimport xmltodict\n\nimport serviceplatformen_cpr.settings as settings\n\nfrom serviceplatformen_cpr.helpers.soap import construct_envelope_SF1520\nfrom serviceplatformen_cpr.helpers.validation import validate_cprnr\nfrom serviceplatformen_cpr.helpers.http_requester import http_post\n\n\n__author__ = \"Heini Leander Ovason\"\n\n\ndef get_citizen(service_uuids, certificate, cprnr):\n \"\"\"The function returnes a citizen dict from the\n 'SF1520 - Udvidet person stamdata (lokal)' service.\n It serves as a facade to simplify input validation, and interaction\n with the SOAP service, parsing and filtering the response.\n :param cprnr: String of 10 digits -> r'^\\d{10}$'\n :type cpr: str\n :return: Dictionary representation of a citizen\n :rtype: dict\"\"\"\n\n is_cprnr_valid = validate_cprnr(cprnr)\n\n if is_cprnr_valid:\n\n soap_envelope_template = os.path.join(\n settings.install_path, settings.SP_SF1520_SOAP_ENVELOPE_TEMPLATE\n )\n\n soap_envelope = construct_envelope_SF1520(\n template=soap_envelope_template,\n service_uuids=service_uuids,\n cprnr=cprnr\n )\n response = call_cpr_person_lookup_request(\n soap_envelope=soap_envelope,\n certificate=certificate\n )\n if response.status_code == 200:\n citizen_dict = parse_cpr_person_lookup_xml_to_dict(\n soap_response_xml=response.text\n )\n return citizen_dict\n else:\n response.raise_for_status()\n return {'Error': 'Something went wrong'}\n\n\ndef call_cpr_person_lookup_request(soap_envelope, certificate):\n \"\"\"Performs a web service call to 'Udvidet Person Stam Data(lokal)'.\n : param soap_envelope: SOAP envelope\n : param certificate: Path to certificate\n : type soap_envelope: str\n : type soap_envelope: str\n :return: Complete serviceplatform xml representation of a citizen\n :rtype: str\"\"\"\n\n service_url = settings.SP_SERVICE_ENDPOINT_CPR_INFORMATION_1\n\n response = http_post(\n endpoint=service_url,\n soap_envelope=soap_envelope,\n certificate=certificate\n )\n\n return response\n\n\ndef parse_cpr_person_lookup_xml_to_dict(soap_response_xml):\n \"\"\"Parses string xml to a dict\n : param soap_response_xml: xml\n : type soap_response_xml: str\n : return: Dictionary representation of a citizen\n : rtype: dict\"\"\"\n\n xml_to_dict = xmltodict.parse(soap_response_xml)\n\n root = xml_to_dict['soap:Envelope']['soap:Body'][\n 'ns4:callCPRPersonLookupResponse']\n\n citizen_dict = {}\n\n person_data = root['ns4:personData']\n for k, v in person_data.items():\n key = k[4:]\n citizen_dict[key] = v\n\n name = root['ns4:navn']\n for k, v in name.items():\n key = k[4:]\n citizen_dict[key] = v\n\n address = root['ns4:adresse']['ns4:aktuelAdresse']\n if not address:\n address = {}\n for k, v in address.items():\n key = k[4:]\n citizen_dict[key] = v\n\n try:\n not_living_in_dk = root['ns4:adresse']['ns4:udrejseoplysninger']\n citizen_dict['udrejst'] = True\n except KeyError as key_error:\n citizen_dict['udrejst'] = False\n\n relations = root['ns4:relationer']\n citizen_dict['relationer'] = []\n for k, v in relations.items():\n # NOTE: v is a dict if k is:\n # 'ns4:mor', 'ns4:far', or 'ns4:aegtefaelle'.\n if isinstance(v, dict):\n citizen_dict['relationer'].append(\n {\n 'relation': k[4:],\n 'cprnr': v.get('ns4:PNR')\n }\n )\n # NOTE: v is a list of dicts if k is 'ns4:barn'.\n if isinstance(v, list):\n for child in v:\n citizen_dict['relationer'].append(\n {\n 'relation': k[4:],\n 'cprnr': child.get('ns4:PNR')\n }\n )\n\n return citizen_dict\n","repo_name":"magenta-aps/aak-affaldvarme","sub_path":"aak_integration/serviceplatformen_cpr/services/udvidet_person_stam_data_lokal.py","file_name":"udvidet_person_stam_data_lokal.py","file_ext":"py","file_size_in_byte":3976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22673966534","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n##############################################################################\n#\n# shopping_list.py\n# --------------------\n# A quick shopping list\n#\n# @author Shaden, 0x899319D4251502BA\n# @date 28 December 2015\n# @version 0.0.1\n##############################################################################\n\nshopping_list = [\"apples\", \"oranges\", \"batteries\"]\nallowed_commands = [\"SHOW\", \"ADD\", \"DEL\", \"HELP\", \"DONE\"]\nwelcome_message = \"Welcome to your shopping list!\\nSHOW will list your items\\nADD will add new items\\nDEL \" \\\n \"to remove items from the list\\n\" \\\n \"HELP for these instructions.\\nDONE to exit.\\n\" \\\n \">> \"\nerror_message = \"I don't understand what you are trying to do\\n\\n{}\".format(welcome_message)\nadd_message = \"Which number slot would you like to add this item? Hit ENTER to insert item at end of list.\"\n\n\ndef show_list(): # loop through list, print items with numbers to identify them.\n count = 1\n for item in shopping_list:\n print(str(count) + \": \" + item)\n count += 1\n\n\ndef insert_item(where, what): # insert new items into the list\n if where == \"\" or int(where) > len(shopping_list): # append to end if pressed\n shopping_list.append(what)\n else:\n if int(where) >= 1 and int(where) <= len(shopping_list): # insert at location if specified.\n shopping_list.insert(int(where) - 1, what)\n\n\ndef remove_item(position): # remove item from list location specified by user\n shopping_list.pop(position - 1)\n\n\nwhile True:\n wat = input(\"> \")\n if wat not in allowed_commands:\n input(error_message)\n elif wat == \"DONE\":\n print(\"Goodbye\")\n break\n elif wat == \"SHOW\":\n show_list()\n continue\n elif wat == \"DEL\":\n show_list()\n remove_item(int(input(\"Which number would you like to delete?\")))\n else:\n if wat == \"ADD\":\n insert_item(input(\"where\"), input(\"what\"))\n\n \n\n","repo_name":"slerpy/treehouse","sub_path":"py/shopping_list.py","file_name":"shopping_list.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39369306190","text":"from rest_framework import status \nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response \nfrom foodjournal.models import FoodJournal, FoodLogItem, User\nfrom foodjournal.serializers import FoodJournalSerializer, FoodLogItemSerializer, UserSerializer\nfrom datetime import date\nfrom datetime import datetime\nfrom django.utils.timezone import localdate\nfrom allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter\nfrom allauth.socialaccount.providers.oauth2.client import OAuth2Client\nfrom rest_auth.registration.views import SocialLoginView\n\n\nwanted_date = localdate()\n\nclass GoogleLogin(SocialLoginView):\n adapter_class = GoogleOAuth2Adapter\n client_class = OAuth2Client\n\n@api_view(['GET', 'POST'])\ndef food_journal_entry_list(request):\n \"\"\"\n List all journal entries or create a new entry\n \"\"\"\n if request.method == 'GET':\n entries = FoodJournal.objects.filter(date=localdate())\n serializer = FoodJournalSerializer(entries, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n # check db if date already exists, modify existing entry\n print(\"HERE\", wanted_date)\n\n serializer = FoodJournalSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) \n\n@api_view(['GET', 'PUT', 'DELETE'])\ndef food_journal_entry_detail(request, pk):\n \"\"\"\n Retrieve, update or delete a journal entry instance\n \"\"\"\n try:\n entry = FoodJournal.objects.get(pk=pk)\n except FoodJournal.DoesNotExist:\n return Response({'message': 'Sorry, that item does not exist'}, status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = FoodJournalSerializer(entry)\n return Response(serializer.data)\n elif request.method == 'PUT':\n print(\"HERE\")\n serializer = FoodJournalSerializer(entry, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n elif request.method == 'DELETE':\n entry.delete()\n return Response({'message': 'Item was deleted successfully!'}, status=status.HTTP_204_NO_CONTENT)\n\n@api_view(['GET', 'POST'])\ndef food_log_item_list(request):\n \"\"\"\n List all journal entries or create a new entry\n \"\"\"\n\n if request.method == 'GET':\n foods = FoodLogItem.objects.all()\n serializer = FoodLogItemSerializer(foods, many=True)\n return Response(serializer.data)\n elif request.method == 'POST':\n serializer = FoodLogItemSerializer(data=request.data)\n print(\"HERE!!\", serializer)\n if serializer.is_valid():\n serializer.save()\n\n journal_entry = {\n \"date\": wanted_date,\n \"food_entries\": [request.data]\n }\n \n entry_serializer = FoodJournalSerializer(data=journal_entry)\n print(\"AKEJGAJG\", entry_serializer)\n\n if entry_serializer.is_valid():\n entry_serializer.save()\n return Response(entry_serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n@api_view(['GET', 'PUT', 'DELETE'])\ndef food_log_item_detail(request, pk):\n \"\"\"\n Retrieve, update or delete a journal entry instance\n \"\"\"\n try:\n food = FoodLogItem.objects.get(pk=pk)\n except FoodLogItem.DoesNotExist:\n return Response({'message': 'Sorry, that item does not exist'}, status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = FoodLogItemSerializer(food)\n return Response(serializer.data)\n elif request.method == 'PUT':\n serializer = FoodLogItemSerializer(food, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n elif request.method == 'DELETE':\n food.delete()\n return Response({'message': 'Item was deleted successfully!'}, status=status.HTTP_204_NO_CONTENT)\n\n@api_view(['GET', 'POST'])\ndef user_list(request):\n \"\"\"\n List all users or create a new user\n \"\"\"\n if request.method == 'GET':\n users = User.objects.all()\n serializer = UserSerializer(users, many=True)\n return Response(serializer.data)\n elif request.method == 'POST':\n serializer = UserSerializer(data=request.data)\n print(\"USERREQ\", request.data)\n if serializer.is_valid():\n serializer.save()\n\n user_data = serializer.data\n #grabbing data\n m_or_f = user_data['male_or_female']\n user_weight = user_data['weight']\n user_height = user_data['height']\n user_age = user_data['age']\n user_act_level = user_data['activity_level']\n\n #calling model function\n bmr = User().calculate_bmr(m_or_f = m_or_f, user_weight = user_weight, user_height = user_height, user_age = user_age)\n rec_daily_cal = User().daily_caloric_needs(bmr, user_act_level)\n to_lose_weight = User().lose_weight(rec_daily_cal)\n\n response = {\n \"user_name\": request.data['user_name'],\n \"male_or_female\": request.data['male_or_female'],\n 'weight': request.data['weight'],\n 'height': request.data['height'],\n 'age': request.data['age'],\n 'activity_level': request.data['activity_level'],\n \"rec_bmr\": bmr,\n \"rec_calories\": rec_daily_cal,\n \"rec_cal_lose\": to_lose_weight\n }\n response_serializer = UserSerializer(data=response)\n if response_serializer.is_valid():\n response_serializer.save()\n return Response({\"rec_bmr\" : bmr, \"rec_calories\" : rec_daily_cal, \"rec_cal_lose\" : to_lose_weight}, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['GET', 'PUT', 'DELETE'])\ndef user_detail(request, pk):\n \"\"\"\n Retrieve, update or delete a user instance\n \"\"\"\n try:\n user = User.objects.get(pk=pk)\n except FoodLogItem.DoesNotExist:\n return Response({'message': 'Sorry, that item does not exist'}, status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = UserSerializer(user)\n return Response(serializer.data)\n elif request.method == 'PUT':\n serializer = UserSerializer(user, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n elif request.method == 'DELETE':\n user.delete()\n return Response({'message': 'Item was deleted successfully!'}, status=status.HTTP_204_NO_CONTENT)\n\n\n\n","repo_name":"emirry/mindfull","sub_path":"app/foodjournal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41766596615","text":"import string\nfrom typing import List, Optional\n\n\nclass Token:\n def __init__(self, symbol):\n self.symbol = symbol\n\n def __str__(self):\n return self.symbol\n\n def __repr__(self):\n return str(self)\n\n\nclass Tokens:\n AND = Token('&')\n OR = Token('|')\n XOR = Token('^')\n NOT = Token('!')\n IMPL = Token('=>')\n EQ = Token('==')\n CONST_0 = Token('0')\n CONST_1 = Token('1')\n PAREN_LEFT = Token('(')\n PAREN_RIGHT = Token(')')\n\n ALL_TOKENS = [AND, OR, XOR, NOT, IMPL, EQ, CONST_0, CONST_1, PAREN_LEFT, PAREN_RIGHT]\n\n\nclass VarToken(Token):\n def __init__(self, name):\n super().__init__('$')\n self.name = name\n\n def __str__(self):\n return f'${self.name}'\n\n def __repr__(self):\n return str(self)\n\n\ndef _tokenize(expr: str) -> Optional[List[Token]]:\n results = []\n\n i = 0\n while i < len(expr):\n if expr[i] == ' ':\n i += 1\n continue\n\n matched = False\n for token in Tokens.ALL_TOKENS:\n symbol = token.symbol\n if i + len(symbol) <= len(expr) and symbol == expr[i:i + len(symbol)]:\n i += len(symbol)\n results.append(token)\n matched = True\n break\n if matched:\n continue\n\n if expr[i] in string.ascii_letters:\n begin = i\n\n while i < len(expr) and expr[i] in (string.ascii_letters + string.digits):\n i += 1\n\n text = expr[begin:i]\n results.append(VarToken(text))\n continue\n\n return None\n\n return results\n\n\ndef _validate(expr: List[Token]) -> bool:\n state = 1\n parentheses = 0\n\n for token in expr:\n if state == 1:\n if token == Tokens.PAREN_LEFT:\n parentheses += 1\n elif token == Tokens.NOT:\n pass\n elif type(token) is VarToken or token in [Tokens.CONST_0, Tokens.CONST_1]:\n state = 2\n else:\n return False\n else:\n if token == Tokens.PAREN_RIGHT:\n parentheses -= 1\n if parentheses < 0:\n return False\n elif token in [Tokens.AND, Tokens.OR, Tokens.XOR, Tokens.IMPL, Tokens.EQ]:\n state = 1\n else:\n return False\n\n return state == 2 and parentheses == 0\n\n\ndef _extract_variables(expr: List[Token]) -> List[str]:\n # noinspection PyUnresolvedReferences\n return sorted(set(token.name for token in expr if type(token) is VarToken))\n\n\ndef _infix_to_rpn(expr: List[Token]) -> List[Token]:\n priorities = {\n Tokens.NOT: 2,\n Tokens.AND: 1,\n Tokens.OR: 1,\n Tokens.XOR: 1,\n Tokens.IMPL: 0,\n Tokens.EQ: 0\n }\n\n result = []\n operators = []\n\n for token in expr:\n if token == Tokens.PAREN_LEFT:\n operators.append(token)\n elif token == Tokens.PAREN_RIGHT:\n while operators[-1] != Tokens.PAREN_LEFT:\n result.append(operators.pop())\n operators.pop()\n elif token in [Tokens.NOT, Tokens.AND, Tokens.OR, Tokens.XOR, Tokens.IMPL, Tokens.EQ]:\n while len(operators) > 0 \\\n and operators[-1] != Tokens.PAREN_LEFT \\\n and priorities[operators[-1]] >= priorities[token]:\n result.append(operators.pop())\n\n operators.append(token)\n elif type(token) is VarToken or token in [Tokens.CONST_0, Tokens.CONST_1]:\n result.append(token)\n else:\n assert False\n\n while len(operators) > 0:\n result.append(operators.pop())\n\n return result\n\n\nclass Parser:\n\n def __init__(self, expr: str, debug_log: bool = False):\n self.expr = expr\n self.variables = None\n self.rpn = None\n self.debug_log = debug_log\n\n def parse(self) -> bool:\n if self.debug_log:\n print(\"Parsing expression\", self.expr)\n\n tokens = _tokenize(self.expr)\n\n if tokens is None:\n return False\n\n if self.debug_log:\n print(\"Tokens:\", ' '.join(map(str, tokens)))\n\n if not _validate(tokens):\n return False\n\n self.variables = _extract_variables(tokens)\n\n if self.debug_log:\n print(\"Variables:\", ', '.join(self.variables))\n\n self.rpn = _infix_to_rpn(tokens)\n\n if self.debug_log:\n print(\"RPN:\", ' '.join(map(str, self.rpn)))\n\n return True\n\n def get_variables(self) -> List[str]:\n return self.variables\n\n def get_rpn_expr(self) -> List[Token]:\n return self.rpn\n","repo_name":"pjanczyk/boolean-minimization","sub_path":"Parser.py","file_name":"Parser.py","file_ext":"py","file_size_in_byte":4663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72852905226","text":"from FlightRadar24.api import FlightRadar24API\nimport pandas as pd\nfrom geopy import distance\n\n\ndef same_continent(x):\n df_airports = pd.read_parquet(\"/data/Airports.parquet\")\n dest = x[\"dest_iata\"]\n org = x[\"org_iata\"]\n try :\n if df_airports[df_airports[\"iata\"] == org][\"Continent\"].iloc[0] == df_airports[df_airports[\"iata\"] == dest][\"Continent\"].iloc[0]:\n return df_airports[df_airports[\"iata\"] == org][\"Continent\"].iloc[0]\n else :\n return None\n except IndexError as e:\n return None\n\ndef airport_dist(x):\n df_airports = pd.read_parquet(\"/data/Airports.parquet\")\n org = x[\"org_iata\"]\n dest = x[\"dest_iata\"]\n try :\n coord_org = (df_airports[df_airports[\"iata\"] == org][\"lat\"].iloc[0], df_airports[df_airports[\"iata\"] == org][\"lon\"].iloc[0])\n coord_dest = (df_airports[df_airports[\"iata\"] == dest][\"lat\"].iloc[0], df_airports[df_airports[\"iata\"] == dest][\"lon\"].iloc[0])\n return distance.distance(coord_org, coord_dest).km\n except IndexError as e:\n return None\n\ndef cont_org(x):\n df_airports = pd.read_parquet(\"/data/Airports.parquet\")\n try :\n return df_airports[df_airports[\"iata\"] == x[\"org_iata\"]][\"Continent\"].iloc[0]\n except IndexError:\n return None\n\ndef company_country(x):\n airline = pd.read_parquet(\"/data/AirlineCountries.parquet\", columns=[\"ICAO\",\"Country\"])\n try :\n return airline[airline[\"ICAO\"] == x[\"company\"]][\"Country\"].iloc[0]\n except IndexError:\n return None\n\n\n#Q1\n\ndef most_active_airline():\n df_airlines = pd.read_parquet(\"/data/Airlines.parquet\")\n df_flights = pd.read_parquet(\"/data/live/Flights.parquet\", columns=[\"company\"])\n\n count = df_flights.value_counts()\n if not count.empty:\n return(df_airlines[df_airlines[\"ICAO\"] == count.index[0][0]].iloc[0][\"Name\"] , count.max())\n\n#Q2\n\ndef active_by_continent():\n df= pd.read_parquet(\"/data/live/Flights.parquet\",columns=[\"dest_iata\",\"org_iata\",\"company\"])\n\n df[\"Continent\"] = df.apply(same_continent, axis=1)\n return(df)\n \n\n\n#Q3 \n\ndef longuest_journey(): \n df3 = pd.read_parquet(\"/data/live/Flights.parquet\",columns=[\"id\",\"dest_iata\",\"org_iata\"])\n\n df3[\"travel_size\"] = df3.apply(airport_dist, axis=1)\n return df3\n\n#Q4 \n\ndef average_journey():\n fr_api = FlightRadar24API()\n df4 = pd.read_parquet(\"/data/live/Flights.parquet\",columns=[\"id\",\"dest_iata\",\"org_iata\"])\n\n df4[\"travel_size\"] = df4.apply(airport_dist, axis=1)\n df4[\"org_Continent\"] = df4.apply(cont_org, axis=1)\n return(df4)\n\n\n#Q5.1\n\n\n\ndef leading_manufacturer():\n df_flights = pd.read_parquet(\"/data/live/Flights.parquet\", columns=[\"model\"])\n return df_flights\n \n\n#Q5.2\n\ndef continent_manufacturer():\n df5 = pd.read_parquet(\"/data/live/Flights.parquet\",columns=[\"id\",\"dest_iata\",\"org_iata\",\"model\"])\n \n df5[\"org_Continent\"] = df5.apply(cont_org, axis=1)\n return df5\n\n\n#Q6\n\ndef flying_models():\n # Flights data\n df6 = pd.read_parquet(\"/data/live/Flights.parquet\", columns=[\"model\",\"company\"])\n df6[\"company_country\"] = df6.apply(company_country,axis=1)\n df6.dropna(inplace=True)\n df6.reset_index(allow_duplicates=False, inplace=True)\n\n # List of countries with active companies\n countries = df6[[\"company_country\"]].drop_duplicates()\n countries.dropna(inplace=True)\n countries = countries[\"company_country\"].to_list()\n\n dic = {\"Company registration country\":[],\"model_1\":[],\"number_1\":[],\"model_2\":[],\"number_2\":[],\"model_3\":[],\"number_3\":[]}\n\n for country in countries:\n models = df6[df6[\"company_country\"] == country].value_counts(\"model\")\n \n if len(models) == 0:\n pass\n elif len(models) == 1:\n dic[\"Company registration country\"].append(country)\n dic[\"model_1\"].append(models.index[0])\n dic[\"number_1\"].append(models[0])\n dic[\"model_2\"].append(\"\")\n dic[\"number_2\"].append(\"\")\n dic[\"model_3\"].append(\"\")\n dic[\"number_3\"].append(\"\")\n\n elif len(models) == 2:\n dic[\"Company registration country\"].append(country)\n dic[\"model_1\"].append(models.index[0])\n dic[\"number_1\"].append(models[0])\n dic[\"model_2\"].append(models.index[1])\n dic[\"number_2\"].append(models[1])\n dic[\"model_3\"].append(\"\")\n dic[\"number_3\"].append(\"\")\n\n elif len(models) >= 3:\n dic[\"Company registration country\"].append(country)\n dic[\"model_1\"].append(models.index[0])\n dic[\"number_1\"].append(models[0])\n dic[\"model_2\"].append(models.index[1])\n dic[\"number_2\"].append(models[1])\n dic[\"model_3\"].append(models.index[2])\n dic[\"number_3\"].append(models[2])\n\n df_answer6 = pd.DataFrame(dic)\n df_answer6 = df_answer6.sort_values(by=\"Company registration country\")\n return(df_answer6)\n\n#Q7.1\n\n\ndef popular_destination():\n df_airports = pd.read_parquet(\"data/Airports.parquet\")\n df7 = pd.read_parquet(\"/data/live/Flights.parquet\",columns=[\"id\",\"dest_iata\",\"org_iata\"])\n\n df7[\"travel_size\"] = df7.apply(airport_dist, axis=1)\n df7[\"org_Continent\"] = df7.apply(cont_org, axis=1)\n return df7\n \n\n#Q7.2\n\ndef inbounds():\n df_airports = pd.read_parquet(\"data/Airports.parquet\")\n df72 = pd.read_parquet(\"/data/live/Flights.parquet\",columns=[\"org_iata\",\"dest_iata\"])\n df_inbounds = pd.read_parquet(\"data/Airports.parquet\",columns=[\"iata\",\"Continent\"])\n df_inbounds[\"inbounds\"] = [0]*len(df_inbounds.index)\n\n for i in range(len(df72.index)):\n flight = df72.iloc[i]\n try :\n df_inbounds.iat[df_inbounds[df_inbounds[\"iata\"] == flight[\"org_iata\"]][\"inbounds\"].index[0],2] -=1\n df_inbounds.iat[df_inbounds[df_inbounds[\"iata\"] == flight[\"dest_iata\"]][\"inbounds\"].index[0],2] +=1\n except :\n pass\n \n df_inbounds[\"inbounds\"] = df_inbounds[\"inbounds\"].apply(lambda x: abs(x))\n return df_inbounds\n \n\n\n#Q8\n\ndef average_speed():\n fr_api = FlightRadar24API()\n df8 = pd.read_parquet(\"/data/live/Flights.parquet\",columns=[\"org_iata\",\"speed\"])\n\n df8[\"org_Continent\"] = df8.apply(cont_org, axis=1)\n return df8\n","repo_name":"Alaiitoc/kata-python","sub_path":"flightRadar/app/kata.py","file_name":"kata.py","file_ext":"py","file_size_in_byte":6237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37971270236","text":"\r\nboard = [\r\n [0, 2, 0, 3, 0, 0, 0, 0, 0],\r\n [6, 0, 8, 0, 0, 0, 5, 1, 3],\r\n [0, 0, 5, 0, 1, 0, 2, 0, 6],\r\n [3, 8, 9, 5, 7, 1, 6, 2, 4],\r\n [4, 1, 6, 8, 9, 2, 7, 3, 5],\r\n [0, 0, 2, 6, 3, 4, 0, 8, 0],\r\n [0, 0, 0, 2, 6, 0, 0, 5, 0],\r\n [2, 0, 3, 1, 4, 0, 0, 0, 7],\r\n [0, 6, 4, 7, 8, 0, 0, 0, 2]\r\n]\r\n\r\n\r\ndef solver(board):\r\n find = find_empty_element(board)\r\n if not find:\r\n return True\r\n else:\r\n row, col = find\r\n\r\n for i in range(1, 10):\r\n if valid(board, i, (row, col)):\r\n board[row][col] = i\r\n\r\n if solver(board):\r\n return True\r\n\r\n board[row][col] = 0\r\n\r\n return False\r\n\r\n\r\ndef valid(board, num, position):\r\n # check for row\r\n for i in range(len(board[0])):\r\n if board[position[0]][i] == num and position[1] != i:\r\n return False\r\n\r\n # check for column\r\n for i in range(len(board)):\r\n if board[i][position[1]] == num and position[0] != i:\r\n return False\r\n\r\n # check box in the board\r\n box_x = position[1] // 3\r\n box_y = position[0] // 3\r\n for i in range(box_y * 3, box_y * 3 + 3):\r\n for j in range(box_x * 3, box_x * 3 + 3):\r\n if board[i][j] == num and (i, j) != position:\r\n return False\r\n\r\n return True\r\n\r\n\r\ndef print_board():\r\n for i in range(len(board)):\r\n if i % 3 == 0 and i != 0:\r\n print(\"----------------------\")\r\n for j in range(len(board[i])):\r\n if j % 3 == 0 and j != 0:\r\n print(\"|\", end=\" \")\r\n print(board[i][j], end=\" \")\r\n print()\r\n\r\n\r\ndef find_empty_element(board):\r\n for i in range(len(board)):\r\n for j in range(len(board[i])):\r\n if board[i][j] == 0:\r\n return i, j\r\n\r\n return None\r\n\r\n\r\nif __name__ == '__main__':\r\n print_board()\r\n print()\r\n solver(board)\r\n print()\r\n print_board()\r\n","repo_name":"evyatar1/sudoku_solver","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38139892716","text":"#! /usr/bin/env python3\n'''\nTest def plot_pareto() of graphs.py\n\ntime -f '%e' ./plot_pareto.py\n./plot_pareto.py\n'''\n\nimport datasense as ds\nimport pandas as pd\n\noutput_url = 'plot_pareto.html'\nheader_title = 'plot_pareto'\nheader_id = 'plot-pareto'\n\n\ndef main():\n original_stdout = ds.html_begin(\n output_url=output_url,\n header_title=header_title,\n header_id=header_id\n )\n print(help(ds.plot_pareto))\n # Example 1\n data = pd.DataFrame(\n {\n 'ordinate': ['Mo', 'Larry', 'Curly', 'Shemp', 'Joe'],\n 'abscissa': [21, 2, 10, 4, 16]\n }\n )\n fig, ax1, ax2 = ds.plot_pareto(\n X=data['ordinate'],\n y=data['abscissa']\n )\n fig.savefig(\n fname='pareto.svg',\n format='svg'\n )\n ds.html_figure(file_name='pareto.svg')\n ds.html_end(\n original_stdout=original_stdout,\n output_url=output_url\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"gillespilon/datasense","sub_path":"scripts/plot_pareto.py","file_name":"plot_pareto.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"74449189385","text":"#encoding=utf-8\nimport torch\nimport cv2\nfrom tetris import Tetris\nfrom RL_brain import DQNPrioritizedReplay\n\nTETRIS_WIDTH = 10\nTETRIS_HEIGHT = 20\nTETRIS_BLOCK_SIZE = 30\n\nN_FEATURES = 4\nACTION_SPACE = 1\n\ndef test(model, model_path):\n if torch.cuda.is_available():\n torch.cuda.manual_seed(123)\n else:\n torch.manual_seed(123)\n\n model.load_model(model_path)\n\n env = Tetris(width=TETRIS_WIDTH, height=TETRIS_HEIGHT, block_size=TETRIS_BLOCK_SIZE)\n env.reset()\n\n if torch.cuda.is_available():\n model.cuda()\n \n while True:\n next_steps = env.get_next_states()\n next_actions, next_states = zip(*next_steps.items())\n action, _ = model.choose_action(next_actions, next_states, is_random=False)\n _, done = env.step(action, render=True)\n\n if done:\n print(\"Cleared: {}\".format(env.cleared_lines))\n break\n\nif __name__ == \"__main__\":\n prio_duel_DQN = DQNPrioritizedReplay(n_actions=ACTION_SPACE,\n n_features=N_FEATURES,\n test=True)\n model_path = \"model/dqn_tetris.pkl\"\n test(prio_duel_DQN, model_path)\n\n\n","repo_name":"lihow/Reinforcement-Learning-PyTorch","sub_path":"DQN_Test/tetris/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15357615769","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 16 11:03:28 2019\n\n@author: Paul Scheibal\n\nThe program shows the fundamentals of Discrete Wavelet Transform with application of \nnoise reduction.\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mtick\nimport matplotlib as mpl\nimport pylab as plb\nimport matplotlib.mlab as mlab\nimport math\n\nfrom IPython.core.pylabtools import figsize\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport seaborn as sns\nimport scipy.io\n\nimport pywt\nfrom collections import defaultdict, Counter\n\nfigsize(15,6)\n\n# function to generate a wave given amplitude (a), frequency(f), horizontal phase shift (hps) \n# and vertical phase shift (vps) for a given time t\ndef gen_wave(a, f, hps, vps, t):\n b = 2 * np.pi * f\n y = a * np.cos(b * t + hps) + vps\n fig, ax = plt.subplots()\n plt.plot(t,y)\n ttl = 'Cosine Wave: amplitude = %1.1f' % a\n ttl = ttl + ', frequency = %1.1f' % f\n ttl = ttl + ', phase shift = %1.1f' % hps\n plt.title(ttl, fontsize=16)\n plt.ylabel('Amplitude', fontsize=16)\n plt.xlabel('Time', fontsize=16)\n plt.yticks(np.arange(min(y),max(y)+1))\n plt.grid(True)\n for tick in ax.get_xticklabels():\n tick.set_fontsize(14)\n for tick in ax.get_yticklabels():\n tick.set_fontsize(14)\n plt.show()\n return y\n\ntime = 1\nsamples = 2048\nsample_interval = time / samples\nsamples_per_sec = int(round(samples/time))\nfreq_high = int(samples_per_sec/2)\n\nt = np.arange(0,time,sample_interval)\nf = np.arange(0,freq_high,1/time)\n\n# create a series of waves\n# low frequency\nphaseshift = -np.pi/2\nfrequency = 2\namplitude = 5\ny1 = gen_wave(amplitude,frequency,phaseshift,0,t)\n# low frequency\nphaseshift = .25\nfrequency = 3\namplitude = 9\ny2 = gen_wave(amplitude,frequency,phaseshift,0,t)\n# low frequency\nphaseshift = .5\nfrequency = 5\namplitude = 7\ny3 = gen_wave(amplitude,frequency,phaseshift,0,t)\n# high frequency noise\nphaseshift = 0\nfrequency = 256\namplitude = 1\ny4 = gen_wave(amplitude,frequency,phaseshift,0,t)\nphaseshift = 0\nfrequency = 100\namplitude = 2\n# high frequency noise\ny5 = gen_wave(amplitude,frequency,phaseshift,0,t)\n\n# put them all together to form one consolidated wave\ny = y1 + y2 + y3 + y4 + y5\n\n# plot combined signal\nfig, ax = plt.subplots()\nplt.plot(t,y)\nplt.title('Original Signal - With Noise', fontsize=16)\nplt.ylabel('Amplitude', fontsize=16)\nplt.xlabel('Time', fontsize=16)\nplt.grid(True)\nfor tick in ax.get_xticklabels():\n tick.set_fontsize(14)\nfor tick in ax.get_yticklabels():\n tick.set_fontsize(14)\nplt.show()\n\n# set wavelet name\nwaveletname = 'sym2' \ncoef= pywt.wavedec(y,waveletname,level=4)\n\nfig, ax = plt.subplots()\nplt.plot(coef[0],color='green')\nplt.title('Low Pass Filter', fontsize=16)\nplt.ylabel('Final', fontsize=16)\nax.set_yticklabels([])\nplt.grid(True)\nfor tick in ax.get_xticklabels():\n tick.set_fontsize(14)\nfor tick in ax.get_yticklabels():\n tick.set_fontsize(14)\nplt.show()\n\nfor i in range(1,len(coef)):\n fig, ax = plt.subplots()\n j = len(coef) - i \n plt.plot(coef[j],color='blue')\n plt.title('High Pass Filter', fontsize=16)\n plt.ylabel('Level ' + str(i), fontsize=16)\n ax.set_yticklabels([])\n plt.grid(True)\n for tick in ax.get_xticklabels():\n tick.set_fontsize(14)\n for tick in ax.get_yticklabels():\n tick.set_fontsize(14)\n plt.show()\n\n\n\n\n\n\n","repo_name":"paulscheibal/SBDataScienceCert","sub_path":"CapstoneP2/Code/DiscreteWaveletTransformSignals.py","file_name":"DiscreteWaveletTransformSignals.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32017008867","text":"import math\nimport iid.basic as basic\nfrom iid.dist import dist\n\nclass hyper(dist):\n def __init__(self, N, K, n):\n self.N = N\n self.K = K\n self.n = n\n self.kMin = max(0, n + K - N)\n self.kMax = min(n, K)\n\n def mean(self):\n N = float(self.N)\n K = float(self.K)\n n = float(self.n)\n return n*K/N\n\n def median(self):\n return None\n\n def var(self):\n N = float(self.N)\n K = float(self.K)\n n = float(self.n)\n return n*(K/N)*((N-K)/N)*((N-n)/(N - 1))\n\n def pmf(self, k, **args):\n N = self.N\n K = self.K\n n = self.n\n\n assert self.kMin <= k\n assert k <= self.kMax\n\n if 'log' in args and args['log']:\n return basic.logChoose(K, k) + basic.logChoose(N - K, n - k) - basic.logChoose(N, n)\n else:\n return float(basic.choose(K, k)) * float(basic.choose(N - K, n - k)) / float(basic.choose(N, n))\n\n def cdf(self, k, **args):\n N = self.N\n K = self.K\n n = self.n\n\n assert self.kMin <= k\n assert k <= self.kMax\n\n # Find the mode, to decide which direction to do the summation\n #\n w = (n + 1.0)*(K + 1.0)/(N + 2.0)\n\n if k == self.kMax:\n lls = 0.0\n lus = None\n elif k < w:\n s = 0\n for j in range(k, self.kMin-1, -1):\n t = self.pmf(j)\n s += t\n lls = math.log(s)\n lus = None\n else:\n s = 0\n for j in range(k+1, self.kMax+1):\n t = self.pmf(j)\n s += t\n lls = None\n lus = math.log(s)\n\n if 'upper' in args and args['upper']:\n if lus is None:\n lr = basic.log1mexp(lls)\n else:\n lr = lus\n else:\n if lls is None:\n lr = basic.log1mexp(lus)\n else:\n lr = lls\n\n if 'log' in args and args['log']:\n return lr\n return math.exp(lr)\n\n\n","repo_name":"drtconway/iid","sub_path":"python/iid/hyper.py","file_name":"hyper.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70897854984","text":"# coding: utf-8\n\nimport pickle as pickle\nfrom systemtools.basics import *\nfrom systemtools.logger import *\nfrom systemtools.location import *\nfrom systemtools.file import *\nfrom databasetools.mongo import *\nfrom datatools.jsonutils import *\nfrom datastructuretools import config as dstConf\nimport os\nimport gzip\nimport sys\nimport copy\n\ndef objectSizeMo(obj):\n size = sys.getsizeof(obj)\n size /= 1024.0\n size /= 1024.0\n return size\n\nclass SerializableDict():\n def __init__ \\\n (\n self,\n name=None,\n dirPath=None,\n funct=None,\n compresslevel=0,\n logger=None,\n verbose=True,\n readAndAddOnly=False,\n useFileSize=True,\n limit=None,\n cleanMaxSizeMoReadModifiedOlder=None,\n cleanNotReadOrModifiedSinceNDays=None,\n cleanNotReadOrModifiedSinceNSeconds=None,\n cleanEachNAction=500,\n serializeEachNAction=500,\n doSerialize=None,\n cacheCheckRatio=0.0, # 0.01\n raiseBadDesignException=True,\n useMongodb=False,\n host=None, user=None, password=None,\n mongoIndex=\"hash\",\n useLocalhostIfRemoteUnreachable=True,\n mongoDbName=None,\n databaseRoot=None,\n readIsAnAction=True,\n mongoGlobalDumpStrategy=DUMP_STRATEGIES.serialize,\n loadRetry=20,\n loadSleepMin=0.5,\n loadSleepMax=None,\n ):\n \"\"\"\n Read the README\n \"\"\"\n\n # Some checks:\n if dirPath is None:\n dirPath = tmpDir(\"SerializableDicts\")\n if useMongodb:\n doSerialize = False\n if doSerialize is None:\n if name is None:\n doSerialize = False\n else:\n doSerialize = True\n if name is None and useMongodb:\n raise Exception(\"Pls set a name for mongodb usage\")\n self.name = name\n if self.name is None:\n self.name = getRandomStr()\n self.mongoGlobalDumpStrategy = mongoGlobalDumpStrategy\n self.useFileSize = useFileSize\n self.doSerialize = doSerialize\n self.mongoDbName = mongoDbName\n self.serializeEachNAction = serializeEachNAction\n if not self.doSerialize:\n self.serializeEachNAction = 0\n self.useMongodb = useMongodb\n self.filePath = dirPath + \"/\" + self.name + \".pickle.zip\"\n\n # Now we store all vars:\n self.useLocalhostIfRemoteUnreachable = useLocalhostIfRemoteUnreachable\n self.mongoIndex = mongoIndex\n self.host = host\n self.user = user\n self.password = password\n self.logger = logger\n self.verbose = verbose\n self.readIsAnAction = readIsAnAction\n self.loadRetry = loadRetry\n self.loadSleepMin = loadSleepMin\n self.loadSleepMax = loadSleepMax\n if self.loadSleepMax is None:\n self.loadSleepMax = self.loadSleepMin * 3\n # Config:\n self.databaseRoot = databaseRoot\n if self.databaseRoot is None:\n self.databaseRoot = dstConf.sdDatabaseRoot\n if self.user is None:\n self.user = dstConf.sdUser\n if self.password is None:\n self.password = dstConf.sdPassword\n if self.host is None:\n self.host = dstConf.sdHost\n if self.mongoDbName is None:\n self.mongoDbName = dstConf.sdDatabaseName\n self.cleanMaxSizeMoReadModifiedOlder = cleanMaxSizeMoReadModifiedOlder\n self.initData()\n self.processingFunct = funct\n self.raiseBadDesignException = raiseBadDesignException\n self.cacheCheckRatio = cacheCheckRatio\n self.readAndAddOnly = readAndAddOnly\n self.actionCount = 0\n self.cleanEachNAction = cleanEachNAction\n self.cleanMaxValueCountReadModifiedOlder = limit\n self.cleanNotReadOrModifiedSinceNSeconds = cleanNotReadOrModifiedSinceNSeconds\n if self.cleanNotReadOrModifiedSinceNSeconds is None and cleanNotReadOrModifiedSinceNDays is not None:\n self.cleanNotReadOrModifiedSinceNSeconds = cleanNotReadOrModifiedSinceNDays * 3600 * 24\n self.compresslevel = compresslevel\n self.load()\n self.clean()\n\n def getMongoIndex(self):\n return self.mongoIndex\n\n def __getitem__(self, index):\n return self.get(index)\n def __setitem__(self, index, value):\n self.add(index, value)\n def __delitem__(self, key):\n if self.hasKey(key):\n del self.data[key]\n self.gotAnAction()\n def __contains__(self, key):\n return self.hasKey(key)\n def __len__(self):\n return self.size()\n\n def __del__(self):\n \"\"\"\n Warning: this function is called at the end of the python execution\n So you we can not use it to reset the file et data\n \"\"\"\n pass\n# return self.stop()\n\n def getData(self):\n return self.data\n\n def items(self):\n for key, value in self.data.items():\n yield (key, value[\"value\"])\n\n def save(self, *argv, **kwargs):\n self.serialize(*argv, **kwargs)\n def serialize(self):\n if not self.useMongodb:\n f = None\n if self.compresslevel > 0:\n f = gzip.GzipFile(self.filePath, 'wb', compresslevel=self.compresslevel)\n else:\n f = open(self.filePath, 'wb')\n try:\n data = self.data\n # data = copy.deepcopy(self.data)\n # data = dictToMongoStorable\\\n # (\n # data,\n # logger=self.logger,\n # verbose=self.verbose,\n # normalizeKeys=False,\n # normalizeEnums=False,\n # normalizeBigInts=False,\n # convertTuples=False,\n # convertSets=False,\n # )\n pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)\n except Exception as e:\n logException(e, self, location=\"SerializableDict serialize()\")\n finally:\n f.close()\n\n\n def add(self, key, value):\n if self.hasKey(key):\n if not self.readAndAddOnly:\n self.updateField(key, \"modified\", time.time())\n self.updateField(key, \"modifiedCount\", self.getFieldValue(key, \"modifiedCount\") + 1)\n self.updateField(key, \"value\", value)\n else:\n value = {\"value\": value}\n value[\"modified\"] = time.time()\n value[\"modifiedCount\"] = 0\n value[\"created\"] = time.time()\n value[\"read\"] = time.time()\n value[\"readCount\"] = 0\n if self.useMongodb:\n value[self.mongoIndex] = key\n self.data.insert(value)\n else:\n self.data[key] = value\n self.gotAnAction()\n\n def conditionalSerialize(self):\n if self.serializeEachNAction is not None and self.serializeEachNAction > 0:\n if self.actionCount % self.serializeEachNAction == 0:\n self.serialize()\n\n def gotAnAction(self):\n self.actionCount += 1\n self.conditionalSerialize()\n self.conditionalClean()\n\n def updateRow(self, key, field, value):\n \"\"\"\n This method update a field of a specific row without\n setting any timestamp in term of read or write.\n \"\"\"\n if self.useMongodb:\n field = \"value.\" + field\n value = toSerializableJson(value, mongoStorable=True, globalDumpStrategy=self.mongoGlobalDumpStrategy, logger=self.logger, verbose=self.verbose)\n self.data.updateOne({self.mongoIndex: key}, {\"$set\": {field: value}})\n else:\n self.data[key][\"value\"][field] = value\n\n def updateField(self, key, field, value):\n \"\"\"\n Private method, enable an update of the root architecture a the SerializableDict\n \"\"\"\n if self.useMongodb:\n value = toSerializableJson(value, mongoStorable=True, globalDumpStrategy=self.mongoGlobalDumpStrategy, logger=self.logger, verbose=self.verbose)\n self.data.updateOne({self.mongoIndex: key}, {\"$set\": {field: value}})\n else:\n self.data[key][field] = value\n\n def getFieldValue(self, key, field):\n if self.useMongodb:\n return self.data.findOne({self.mongoIndex: key})[field]\n else:\n return self.data[key][field]\n\n\n def get(self, key, *args, **kwargs):\n if self.hasKey(key):\n self.updateField(key, \"read\", time.time())\n self.updateField(key, \"readCount\", self.getFieldValue(key, \"readCount\") + 1)\n value = self.getFieldValue(key, \"value\")\n if self.processingFunct is not None and self.cacheCheckRatio > 0.0 and getRandomFloat() < self.cacheCheckRatio:\n valueToCheck = self.processingFunct(key, *args, **kwargs)\n if valueToCheck != value:\n message = \"Bad design of the cache mechanism. The value returned (already present in the cache) is different from the value returned by processingFunct(). WARNING extra datas *args, **kwargs are not taken into account in the indexing mecanism... Also you have to implement an equals function if the object is a complex object.\"\n message += \"\\n\\nAlready stored value:\\n\"\n message += listToStr(value)\n message += \"\\n\\nNew value:\\n\"\n message += listToStr(valueToCheck)\n logError(message, self)\n if isinstance(value, dict):\n for k1, v1 in value.items():\n v2 = valueToCheck[k1]\n if v1 != v2:\n logError(\"\\n\\nOne difference found in:\\n\", self)\n logError(listToStr(v1), self)\n logError(listToStr(v2), self)\n if self.raiseBadDesignException:\n raise Exception(message)\n if self.readIsAnAction:\n self.gotAnAction()\n return value\n elif self.processingFunct is not None:\n self.add(key, self.processingFunct(key, *args, **kwargs))\n if self.readIsAnAction:\n self.gotAnAction()\n return self.get(key, *args, **kwargs)\n else:\n if self.readIsAnAction:\n self.gotAnAction()\n return None\n\n def has(self, *args, **kwargs):\n return self.hasKey(*args, **kwargs)\n def hasKey(self, key):\n if self.useMongodb:\n return self.data.has(key)\n else:\n return key in self.data\n\n # def load(self):\n # if not self.useMongodb:\n # if self.doSerialize and (len(sortedGlob(self.filePath)) > 0):\n # f = None\n # data = None\n # for i in range(self.loadRetry):\n # try:\n # if self.compresslevel > 0:\n # with gzip.open(self.filePath, 'rb', compresslevel=self.compresslevel) as f:\n # data = pickle.load(f)\n # else:\n # with open(self.filePath, 'rb') as f:\n # data = pickle.load(f)\n # log(self.filePath + \" loaded from the disk\", self)\n # break\n # except Exception as e:\n # logException(e, self)\n # log(\"Failed to load \" + self.filePath + \" from the disk\", self)\n # currentSleep = getRandomInt(self.loadSleepMin, self.loadSleepMax)\n # log(\"Sleeping \" + str(currentSleep) + \" seconds.\")\n # time.sleep(currentSleep)\n # if data is None:\n # raise Exception(\"Unable to load \" + self.filePath)\n # else:\n # self.data = data\n def load(self):\n if not self.useMongodb:\n if self.doSerialize and (len(sortedGlob(self.filePath)) > 0):\n f = None\n data = None\n for i in range(self.loadRetry):\n try:\n if self.compresslevel > 0:\n f = gzip.GzipFile(self.filePath, 'rb', compresslevel=self.compresslevel)\n else:\n f = open(self.filePath, 'rb')\n data = pickle.load(f)\n f.close()\n log(self.filePath + \" loaded from the disk\", self)\n break\n except Exception as e:\n logException(e, self)\n log(\"Failed to load \" + self.filePath + \" from the disk\", self)\n try:\n f.close()\n except Exception as e:\n logException(e, self)\n currentSleep = getRandomInt(self.loadSleepMin, self.loadSleepMax)\n log(\"Sleeping \" + str(currentSleep) + \" seconds.\")\n time.sleep(currentSleep)\n if data is None:\n raise Exception(\"Unable to load \" + self.filePath)\n else:\n self.data = data\n\n def close(self):\n self.clean()\n self.serialize()\n\n def removeFile(self):\n removeIfExists(self.filePath)\n\n def initData(self, alreadyRetried=False):\n if self.useMongodb:\n try:\n self.data = MongoCollection\\\n (\n self.mongoDbName,\n self.name,\n giveTimestamp=False,\n indexOn=self.mongoIndex,\n host=self.host, user=self.user, password=self.password,\n logger=self.logger,\n verbose=self.verbose,\n databaseRoot=self.databaseRoot,\n globalDumpStrategy=self.mongoGlobalDumpStrategy,\n )\n except Exception as e:\n if self.useLocalhostIfRemoteUnreachable and not alreadyRetried:\n logException(e, self, location=\"initData hashmap\", message=\"WARNING: we can't access the remote SD, so we use the localhost...\")\n self.user = None\n self.password = None\n self.host = \"localhost\"\n self.initData(alreadyRetried=True)\n else:\n raise e\n else:\n self.data = dict()\n\n def reset(self, security=False):\n if self.useMongodb:\n self.data.resetCollection(security=security)\n else:\n self.removeFile()\n self.initData()\n\n def keys(self):\n return list(self.data.keys())\n\n def conditionalClean(self):\n if self.cleanEachNAction is not None and self.cleanEachNAction > 0:\n if self.actionCount % self.cleanEachNAction == 0:\n self.clean()\n\n def getToDeleteOlder(self, toDeleteCount):\n sortedIndex = []\n if self.useMongodb:\n theIterator = self.data.items(projection={\"modified\": 1, \"read\": 1})\n else:\n theIterator = self.data.items()\n for key, value in theIterator:\n currentModified = value[\"modified\"]\n currentRead = value[\"read\"]\n toTake = currentModified\n if currentRead > currentModified:\n toTake = currentRead\n sortedIndex.append((key, toTake))\n sortedIndex = sortBy(sortedIndex, desc=False, index=1)\n toDelete = set()\n for i in range(toDeleteCount):\n toDelete.add(sortedIndex[i][0])\n return toDelete\n\n def doUseFileSize(self):\n return isFile(self.filePath) and self.useFileSize and self.doSerialize\n\n def dataSizeMo(self):\n \"\"\"\n Warning getsizeof doesn't give real size of a dict because it can keep a huge size for performance issue, so we have to calculate the data size on each element:\n https://stackoverflow.com/questions/11129546/python-sys-getsizeof-reports-same-size-after-items-removed-from-list-dict\n \"\"\"\n try:\n if self.doUseFileSize():\n return getSize(self.filePath, unit='m')\n except Exception as e:\n logException(e, self)\n total = 0\n for key, value in self.data.items():\n total += objectSizeMo(key)\n total += objectSizeMo(value)\n return total\n\n def clean(self):\n cleanedCount = 0\n if self.cleanNotReadOrModifiedSinceNSeconds is not None and self.cleanNotReadOrModifiedSinceNSeconds > 0:\n toDelete = set()\n for key, current in self.data.items():\n modifiedInterval = time.time() - current[\"modified\"]\n readInterval = time.time() - current[\"read\"]\n if modifiedInterval > self.cleanNotReadOrModifiedSinceNSeconds or \\\n readInterval > self.cleanNotReadOrModifiedSinceNSeconds:\n toDelete.add(key)\n for current in toDelete:\n del self.data[current]\n cleanedCount += len(toDelete)\n if self.cleanMaxValueCountReadModifiedOlder is not None and \\\n self.cleanMaxValueCountReadModifiedOlder > 0 and \\\n self.size() > self.cleanMaxValueCountReadModifiedOlder:\n toDeleteCount = self.size() - self.cleanMaxValueCountReadModifiedOlder\n toDelete = self.getToDeleteOlder(toDeleteCount)\n cleanedCount += len(toDelete)\n for current in toDelete:\n del self.data[current]\n if not self.useMongodb:\n if self.cleanMaxSizeMoReadModifiedOlder is not None and \\\n self.cleanMaxSizeMoReadModifiedOlder > 0:\n while self.dataSizeMo() > self.cleanMaxSizeMoReadModifiedOlder:\n toDeleteCount = self.size() // 20 # We delete 5%\n toDelete = self.getToDeleteOlder(toDeleteCount)\n cleanedCount += len(toDelete)\n for current in toDelete:\n del self.data[current]\n if self.doUseFileSize():\n self.serialize()\n # Here you can other \"clean condition\"\n # ...\n if cleanedCount > 0:\n pass\n log(str(cleanedCount) + \" items were cleaned from \" + self.filePath, self)\n\n def size(self):\n return len(self.data)\n\n\n# DEPRECATED\nclass SerializableHashMap():\n # class Value(): # Todo date history, object, max number (limit)...\n\n def __init__(self, filePath, compresslevel=0):\n print(\"This class is deprecated, use SerializableDict instead\")\n self.data = dict()\n self.filePath = filePath\n self.compresslevel = compresslevel\n self.load()\n\n def serialize(self):\n f = None\n if self.compresslevel > 0:\n f = gzip.GzipFile(self.filePath, 'w', compresslevel=self.compresslevel)\n else:\n f = open(self.filePath, 'w')\n try:\n pickle.dump(self.data, f, pickle.HIGHEST_PROTOCOL)\n finally:\n f.close()\n\n def add(self, key, value):\n self.data[key] = value\n\n def getOne(self, *args, **kwargs):\n return self.get(*args, **kwargs)\n def get(self, key):\n if key in self:\n return self.data[key]\n else:\n return None\n\n def hasKey(self, *args, **kwargs):\n return self.has_key(*args, **kwargs)\n def has_key(self, key):\n return key in self.data\n\n def load(self):\n if (len(sortedGlob(self.filePath)) > 0):\n print(\"Loading \" + self.filePath + \" from the disk\")\n for i in range(20):\n try:\n f = None\n if self.compresslevel > 0:\n f = gzip.GzipFile(self.filePath, 'r', compresslevel=self.compresslevel)\n else:\n f = open(self.filePath, 'r')\n self.data = pickle.load(f)\n f.close()\n break\n except Exception as e:\n time.sleep(0.2)\n finally:\n try:\n f.close()\n except: pass\n\n def clean(self):\n if (len(sortedGlob(self.filePath)) > 0):\n os.remove(self.filePath)\n self.data = dict()\n\n def size(self):\n return len(self.data)\n\n\nclass SentencePair:\n def __init__(self, s1, s2):\n assert(isinstance(s1, str) or isinstance(s1, str))\n assert(isinstance(s2, str) or isinstance(s1, str))\n self.s1 = s1\n self.s2 = s2\n\n def __hash__(self):\n return (self.s1 + \"\\t\" + self.s2).__hash__()\n\n def __eq__(self, other):\n return (self.s1 == other.s1) and (self.s2 == other.s2)\n\n def __str__(self):\n if isinstance(self.s1, str):\n return str(self.s1) + \"\\t\" + str(self.s2)\n elif isinstance(self.s1, str):\n import unicodedata\n return unicodedata.normalize('NFKD', self.s1 + \"\\t\" + self.s2).encode('ascii','ignore')\n else:\n return \"Unknown object.\"\n\n\nif __name__ == '__main__':\n sd = SerializableDict()\n sd[\"a\"] = 1\n print(sd[\"a\"])\n del sd[\"a\"]\n print(sd[\"a\"])\n","repo_name":"hayj/DataStructureTools","sub_path":"datastructuretools/hashmap.py","file_name":"hashmap.py","file_ext":"py","file_size_in_byte":21561,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"12725052562","text":"class Hero: # template\n # __init__ akan di eksekusi pertama kali saat object di buat\n def __init__(self, inputName, inputHealth,inputPower, inputArmor):\n self.name = inputName\n self.health = inputHealth\n self.power = inputPower\n self.armor = inputArmor\n\n\nhero1 = Hero(\"Xiao\",100, 10, 4)\nhero2 = Hero(\"Ayaka\",100, 15, 1)\nhero3 = Hero(\"Diluc\",1000, 100, 0)\n\nprint(hero1.__dict__)\nprint(hero2.__dict__)\nprint(hero3.__dict__)","repo_name":"Yasir1st/100-Days-Of-Coding","sub_path":"constructor_init.py","file_name":"constructor_init.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11128010321","text":"import pandas as pd\nimport numpy as np\n\n\ndef main():\n\n return\n\n\ndef dropper(df):\n nan_value = float(\"NaN\")\n df.replace(0.0, nan_value, inplace=True)\n return df\n\n\ndef formatter(input_df):\n df = input_df\n del df['ID']\n df.columns = ['Origin', 'Destination', 'Year', 'Commuters', 'Distance']\n df['Origin'] = df['Origin'].map(lambda x: x.lstrip('GM'))\n df['Destination'] = df['Destination'].map(lambda x: x.lstrip('GM'))\n df = dropper(df)\n df.dropna(subset=[\"Commuters\"], inplace=True)\n df = df.reset_index()\n del df['index']\n for row in df.iterrows():\n X = row[1]['Year']\n X = X.split('MM')[0]\n Y = row[1]['Commuters']\n Y = Y * 1000\n df.at[row[0], 'Year'] = X\n df.at[row[0], 'Commuters'] = Y\n df['Origin'] = df['Origin'].astype(int)\n df['Destination'] = df['Destination'].astype(int)\n df['Distance'] = df['Distance'].str.split().str.get(-1)\n df['Distance'] = pd.to_numeric(df['Distance'], errors='coerce')\n df['Distance'] = df['Distance'].astype(float)\n return df\n\ndef commuting_matrix_creator(input_df, year):\n municipalities = input_df['Origin'].unique().tolist()\n output_df = pd.DataFrame(columns=municipalities, index=municipalities)\n for i in input_df.itertuples():\n output_df.at[i.Origin, i.Destination] = i.Commuters\n output_df = output_df.fillna(0)\n output_df = output_df.sort_index()\n output_df = output_df.sort_index(axis=1)\n return output_df\n\n\ndef population_formatter(pathway):\n pop_df = pd.read_csv(f'{pathway}', sep=';')\n del pop_df['ID']\n del pop_df['Sex']\n pop_df.columns = ['Region', 'Year', 'Population']\n pop_df.dropna(subset=[\"Population\"], inplace=True)\n pop_df['Region'] = pop_df['Region'].map(lambda x: x.lstrip('GM'))\n pop_df['Region'] = pop_df['Region'].astype(int)\n pop_df = pop_df.reset_index()\n del pop_df['index']\n for row in pop_df.iterrows():\n X = row[1]['Year']\n X = X.split('JJ')[0]\n pop_df.at[row[0], 'Year'] = int(X)\n return pop_df\n\n\ndef format_for_regression(comm_df, pop_df, year):\n df = pre_formatter(comm_df.copy())\n df2 = pop_df.loc[pop_df['Year'] == int(year)]\n for i in df.itertuples():\n for j in df2.itertuples():\n if i.Origin == j.Region:\n df.at[i.Index, 'Origin Pop'] = np.log(j.Population)\n if i.Destination == j.Region:\n df.at[i.Index, 'Destination Pop'] = np.log(j.Population)\n elif i.Destination == j.Region:\n df.at[i.Index, 'Destination Pop'] = np.log(j.Population)\n pathway = 'Databases/Regression_DataFrame_' + str(year) + '.csv'\n df.to_csv(f'{pathway}')\n return df\n\n\ndef pre_formatter(input_df):\n df = input_df.copy()\n for i in df.itertuples():\n if i.Distance != 0:\n df.at[i.Index, 'Distance'] = np.log(i.Distance)\n if i.Commuters != 0:\n df.at[i.Index, 'Commuters'] = np.log(i.Commuters)\n return df\n\n\ndef get_info(pathway, year, pop_df):\n rawdf = formatter(pd.read_csv(f'{pathway}', sep=\";\"))\n df = commuting_matrix_creator(rawdf, year)\n df_reg = format_for_regression(rawdf, pop_df, year)\n return df, df_reg\n\n\npop_general = population_formatter('Databases/Population 2014-2020.csv')\n\n\ncom_matrix_14, df_14_reg = get_info('Databases/Mobility 2014.csv', 2014, pop_general)\ncom_matrix_15, df_15_reg = get_info('Databases/Mobility 2015.csv', 2015, pop_general)\ncom_matrix_16, df_16_reg = get_info('Databases/Mobility 2016.csv', 2016, pop_general)\ncom_matrix_17, df_17_reg = get_info('Databases/Mobility 2017.csv', 2017, pop_general)\ncom_matrix_18, df_18_reg = get_info('Databases/Mobility 2018.csv', 2018, pop_general)\ncom_matrix_19, df_19_reg = get_info('Databases/Mobility 2019.csv', 2019, pop_general)\ncom_matrix_20, df_20_reg = get_info('Databases/Mobility 2020.csv', 2020, pop_general)\n\n\naggregate_df_reg = pd.concat([df_14_reg, df_15_reg, df_16_reg, df_17_reg, df_18_reg, df_19_reg, df_20_reg])\naggregate_df_reg.to_csv('Databases/Test_file.csv', index=False)\n\n\n## Changes in borders between municipalities\n\n# Some years, the Dutch government change municipality's borders by merging small ones into bigger ones for a myriad of reasons. We will identify these year to year modifications in the matrices.\n\nmunicipalities_14 = com_matrix_14.columns.values.tolist()\nmunicipalities_15 = com_matrix_15.columns.values.tolist()\nmunicipalities_16 = com_matrix_16.columns.values.tolist()\nmunicipalities_17 = com_matrix_17.columns.values.tolist()\nmunicipalities_18 = com_matrix_18.columns.values.tolist()\nmunicipalities_19 = com_matrix_19.columns.values.tolist()\nmunicipalities_20 = com_matrix_20.columns.values.tolist()\n\n\ndif_14_15 = list(set(municipalities_14).symmetric_difference(set(municipalities_15)))\nprint(dif_14_15,'\\n', 'The modification are:',len(dif_14_15), '\\n',\n 'The actual difference should be:',len(com_matrix_14) - len(com_matrix_15))\n\n\ndif_15_16 = list(set(municipalities_15).symmetric_difference(set(municipalities_16)))\nprint(dif_15_16,'\\n', 'The modification are:', len(dif_15_16), '\\n',\n 'The actual difference should be:', len(com_matrix_15) - len(com_matrix_16))\n\n\ndif_16_17 = list(set(municipalities_16).symmetric_difference(set(municipalities_17)))\nprint(dif_16_17,'\\n', 'The modification are:', len(dif_16_17), '\\n',\n 'The actual difference should be:', len(com_matrix_16) - len(com_matrix_17))\n\n\ndif_17_18 = list(set(municipalities_17).symmetric_difference(set(municipalities_18)))\nprint(dif_17_18,'\\n', 'The modification are:', len(dif_17_18), '\\n',\n 'The actual difference should be:', len(com_matrix_17) - len(com_matrix_18))\n\n\ndif_18_19 = list(set(municipalities_18).symmetric_difference(set(municipalities_19)))\nprint(dif_18_19,'\\n', 'The modification are:', len(dif_18_19), '\\n',\n 'The actual difference should be:', len(com_matrix_18) - len(com_matrix_19))\n\n\ndif_19_20 = list(set(municipalities_19).symmetric_difference(set(municipalities_20)))\nprint(dif_19_20,'\\n', 'The modification are:', len(dif_19_20), '\\n',\n 'The actual difference should be:', len(com_matrix_19) - len(com_matrix_20))\n\n\n## Backcasting border changes\n\n# Every year, some municipalities disappear. Sometimes they merge with other existent municipalities or sometimes a new bigger municipality is created by the fusion of others. In this section we will write a small funciton that will take a given year commuting matrix, the disappearing municipalities and the new one. Then we will create another function that does the same for the other type of DataFrame, the one that also has the distance between municipalities.\n\ndef commuting_matrix_updater(commuting_matrix, disappearing_municipalities, new_municipalities):\n df = commuting_matrix.copy()\n municipalities = df.columns.values.tolist()\n for i in range(len(new_municipalities)):\n if new_municipalities[i] in municipalities:\n for j in range(len(disappearing_municipalities[i])):\n df[new_municipalities[i]] += df[disappearing_municipalities[i][j]]\n del df[disappearing_municipalities[i][j]]\n df = df.T\n df[new_municipalities[i]] += df[disappearing_municipalities[i][j]]\n del df[disappearing_municipalities[i][j]]\n else:\n for j in range(len(disappearing_municipalities[i])):\n df[new_municipalities[i]] = df[disappearing_municipalities[i][j]]\n del df[disappearing_municipalities[i][j]]\n df = df.T\n df[new_municipalities[i]] = df[disappearing_municipalities[i][j]]\n del df[disappearing_municipalities[i][j]]\n return df\n\n# def regression_dataframe_updater(regression_dataframe, disappearing_municipalities, new_municipalities):\n# df = regression_dataframe.copy()\n# for i in df.itertuples():\n# if i.Index ==\n# return\n\n\n# Here, we already have a function that can modify the commuting matrix in the way we mentioned. To avoid problems\n# with the division of municipalities and merging of many into one, we can write a separate loop.\n\n# 2014 - 2020\n\ndisappearing_municipalities = [[491, 608, 623, 643, 644], [265, 282], [365, 458], [568, 612]]\ntarget_municipality = [1931, 241, 361, 1930]\ncom_matrix_14 = commuting_matrix_updater(com_matrix_14, disappearing_municipalities, target_municipality)\n\ndel com_matrix_14[1671]\ncom_matrix_14 = com_matrix_14.T\ndel com_matrix_14[1671]\n\ndisappearing_municipalities = [[381, 424, 425], [478], [241], [1921]]\ntarget_municipality = [1942, 385, 1945, 1940]\ncom_matrix_14 = commuting_matrix_updater(com_matrix_14, disappearing_municipalities, target_municipality)\n\ndisappearing_municipalities = [[844, 846, 860]]\ntarget_municipality = [1948]\ncom_matrix_14 = commuting_matrix_updater(com_matrix_14, disappearing_municipalities, target_municipality)\n\ndisappearing_municipalities = [[7, 48], [1987, 18, 40], [81, 140], [196], [63, 70, 1908]]\ntarget_municipality = [1950, 1952, 80, 299, 1949]\ncom_matrix_14 = commuting_matrix_updater(com_matrix_14, disappearing_municipalities, target_municipality)\n\ndisappearing_municipalities = [[53, 5, 1651, 1663], [545, 707, 620], [15, 22, 25, 56], [58, 79, 1722], [689, 1927],\n [236, 304, 733], [17, 9], [393], [576], [584, 585, 588, 611, 617], [738, 870, 874],\n [962, 881, 951]]\ntarget_municipality = [1966, 1961, 1969, 1970, 1978, 1960, 14, 394, 575, 1963, 1959, 1954]\ncom_matrix_14 = commuting_matrix_updater(com_matrix_14, disappearing_municipalities, target_municipality)\n\npathway = 'Databases/Commuting_Matrix_2014.csv'\ncom_matrix_14.to_csv(f'{pathway}')\n\nprint(len(com_matrix_14))\n\n# 2015 - 2020\n\ndisappearing_municipalities = [[381, 424, 425], [478], [241], [1921]]\ntarget_municipality = [1942, 385, 1945, 1940]\ncom_matrix_15 = commuting_matrix_updater(com_matrix_15, disappearing_municipalities, target_municipality)\n\ndisappearing_municipalities = [[844, 846, 860]]\ntarget_municipality = [1948]\ncom_matrix_15 = commuting_matrix_updater(com_matrix_15, disappearing_municipalities, target_municipality)\n\ndisappearing_municipalities = [[7, 48], [1987, 18, 40], [81, 140], [196], [63, 70, 1908]]\ntarget_municipality = [1950, 1952, 80, 299, 1949]\ncom_matrix_15 = commuting_matrix_updater(com_matrix_15, disappearing_municipalities, target_municipality)\n\ndisappearing_municipalities = [[53, 5, 1651, 1663], [545, 707, 620], [15, 22, 25, 56], [58, 79, 1722], [689, 1927],\n [236, 304, 733], [17, 9], [393], [576], [584, 585, 588, 611, 617], [738, 870, 874],\n [962, 881, 951]]\ntarget_municipality = [1966, 1961, 1969, 1970, 1978, 1960, 14, 394, 575, 1963, 1959, 1954]\ncom_matrix_15 = commuting_matrix_updater(com_matrix_15, disappearing_municipalities, target_municipality)\n\npathway = 'Databases/Commuting_Matrix_2015.csv'\ncom_matrix_15.to_csv(f'{pathway}')\n\nprint(len(com_matrix_15))\n\n# 2016 - 2020\n\ndisappearing_municipalities = [[844, 846, 860]]\ntarget_municipality = [1948]\ncom_matrix_16 = commuting_matrix_updater(com_matrix_16, disappearing_municipalities, target_municipality)\n\ndisappearing_municipalities = [[7, 48], [1987, 18, 40], [81, 140], [196], [63, 70, 1908]]\ntarget_municipality = [1950, 1952, 80, 299, 1949]\ncom_matrix_16 = commuting_matrix_updater(com_matrix_16, disappearing_municipalities, target_municipality)\n\ndisappearing_municipalities = [[53, 5, 1651, 1663], [545, 707, 620], [15, 22, 25, 56], [58, 79, 1722], [689, 1927],\n [236, 304, 733], [17, 9], [393], [576], [584, 585, 588, 611, 617], [738, 870, 874],\n [962, 881, 951]]\ntarget_municipality = [1966, 1961, 1969, 1970, 1978, 1960, 14, 394, 575, 1963, 1959, 1954]\ncom_matrix_16 = commuting_matrix_updater(com_matrix_16, disappearing_municipalities, target_municipality)\n\npathway = 'Databases/Commuting_Matrix_2016.csv'\ncom_matrix_16.to_csv(f'{pathway}')\n\nprint(len(com_matrix_16))\n\n# 2017 - 2020\n\ndisappearing_municipalities = [[7, 48], [1987, 18, 40], [81, 140], [196], [63, 70, 1908]]\ntarget_municipality = [1950, 1952, 80, 299, 1949]\ncom_matrix_17 = commuting_matrix_updater(com_matrix_17, disappearing_municipalities, target_municipality)\n\ndisappearing_municipalities = [[53, 5, 1651, 1663], [545, 707, 620], [15, 22, 25, 56], [58, 79, 1722], [689, 1927],\n [236, 304, 733], [17, 9], [393], [576], [584, 585, 588, 611, 617], [738, 870, 874],\n [962, 881, 951]]\ntarget_municipality = [1966, 1961, 1969, 1970, 1978, 1960, 14, 394, 575, 1963, 1959, 1954]\ncom_matrix_17 = commuting_matrix_updater(com_matrix_17, disappearing_municipalities, target_municipality)\n\npathway = 'Databases/Commuting_Matrix_2017.csv'\ncom_matrix_17.to_csv(f'{pathway}')\n\nprint(len(com_matrix_17))\n\n# 2018 - 2020\n\ndisappearing_municipalities = [[53, 5, 1651, 1663], [545, 707, 620], [15, 22, 25, 56], [58, 79, 1722], [689, 1927],\n [236, 304, 733], [17, 9], [393], [576], [584, 585, 588, 611, 617], [738, 870, 874],\n [962, 881, 951]]\ntarget_municipality = [1966, 1961, 1969, 1970, 1978, 1960, 14, 394, 575, 1963, 1959, 1954]\ncom_matrix_18 = commuting_matrix_updater(com_matrix_18, disappearing_municipalities, target_municipality)\n\npathway = 'Databases/Commuting_Matrix_2018.csv'\ncom_matrix_18.to_csv(f'{pathway}')\n\nprint(len(com_matrix_18))\n\n# 2019 - 2020\n\npathway = 'Databases/Commuting_Matrix_2019.csv'\ncom_matrix_19.to_csv(f'{pathway}')\n\n# 2020\n\npathway = 'Databases/Commuting_Matrix_2020.csv'\ncom_matrix_20.to_csv(f'{pathway}')\n\n\n# There's no changes in municipalities between 2019 and 2020, therefore the DataFrames stay unmodified.\n\nif __name__ == \"__main__\":\n main()","repo_name":"AGLamb/RA","sub_path":"Research Paper/Data Wrangling.py","file_name":"Data Wrangling.py","file_ext":"py","file_size_in_byte":13794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14806372811","text":"#https://leetcode.com/problems/find-numbers-with-even-number-of-digits/submissions/\ndef findNumbers(self, nums: List[int]) -> int:\n count = 0\n for _, num in enumerate(nums): #O(N)\n if len(str(num)) % 2 == 0: #len O(1) operation in python str o(s)\n count += 1 # O(1)\n \n return count #O(1)\n \n #Overall runtime complexity O(N + S)\n\n# Space complexity O(1)","repo_name":"ablades/coding_questions","sub_path":"findnumbers.py","file_name":"findnumbers.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71022168906","text":"from operator import index\nadres=input(\"antre yon adress ip : \")\nip= adres.split(\".\")\nadr=\"\".join(ip)\nsum=[]\nsom=0\nfor i in adr:\n sum.append((i)) \nkalkil=[int(x) for x in sum]\nfor i in kalkil:\n som= som+i\n#full= som * kalkil[0] \nprint(som)\n ","repo_name":"Jeudy37/enonse","sub_path":"enonse1.py","file_name":"enonse1.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37751914014","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('libreosteoweb', '0028_document'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='PatientDocument',\n fields=[\n ('attachment_type', models.SmallIntegerField(verbose_name='attachmentType')),\n ('document', models.OneToOneField(primary_key=True, serialize=False, to='libreosteoweb.Document', verbose_name='document', on_delete=models.CASCADE)),\n ('patient', models.ForeignKey(verbose_name='patient', serialize=False, to='libreosteoweb.Patient', on_delete=models.CASCADE))\n ],\n ),\n ]\n","repo_name":"libreosteo/LibreOsteo","sub_path":"libreosteoweb/migrations/0029_patientdocument.py","file_name":"0029_patientdocument.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"81"} +{"seq_id":"12409095918","text":"'''\n-Problem-\nInput: An N x M matrix of a garden. Each cell contains an integer representing the number of carrots in that part of the garden.\n\nOutput: The number of carrots Bunny eats before falling asleep.\n\nConditions: Bunny starts in the center of the garden. \n1. If there are more than one center cell, Bunny starts in the cell with the largest number of carrots. \n2. There will never be a tie for the highest number of carrots in a center cell. \n3. Bunny eats all of the carrots in his cell, then looks left, right, up, and down for more carrots. \n4. Bunny always moves to the adjacent cell with the highest carrot count. \n5. If there are no adjacent cells with carrots, Bunny falls asleep.\n\n-Assumptions for this problem-\n1. Number of carrots in cells are positive integers\n2. All input fields are on-empty arrays\n3. Run Python 3.6.1 environment\n\nRun command:\npython bunny.py\n'''\n\nimport numpy as np\nfrom test_bunny import *\n\n'''\nfind_start\n<-Takes in N x M field array\n->Return Bunny's starting position depending\n on the 4 types of N x M input\n'''\ndef find_start(field):\n # Type array to an np array so I can use its functions\n field = np.array(field)\n\n # Number of rows and columns in the given array\n num_rows = field.shape[0]\n num_cols = field.shape[1]\n\n # Case: Odd Rows x Odd Columns -> [x]\n if num_rows % 2 != 0 and num_cols % 2 != 0:\n # Get center indices\n start_row = int((num_rows - 1) / 2)\n start_col = int((num_cols - 1) / 2)\n\n # Case: Even Rows x Odd Columns -> [x x]\n elif num_rows % 2 == 0 and num_cols % 2 != 0:\n # Set index of starting column\n start_col = int((num_cols - 1) / 2)\n\n # Set center row's upper and lower bounds for temp array\n row_lower = int((num_rows/2) - 1)\n row_upper = int(num_rows/2) + 1\n # temp array contains only the center values\n temp = field[row_lower:row_upper, start_col]\n # Get max center value's index from the temp array\n idx = np.unravel_index(temp.argmax(), temp.shape)\n # Set index of starting row\n start_row = idx[0] + row_lower\n \n # Case: Odd Rows x Even Columns -> [x],[x]\n elif num_rows % 2 != 0 and num_cols % 2 == 0:\n # Set index of starting row\n start_row = int((num_rows - 1) / 2)\n # Set center column's upper and lower bounds for temp array\n col_lower = int((num_cols/2) - 1)\n col_upper = int(num_cols/2) + 1\n # temp array contains only the center values\n temp = field[start_row, col_lower:col_upper]\n # Get max value's index from the temp array\n idx = np.unravel_index(temp.argmax(), temp.shape)\n # Set index of starting column\n start_col = idx[0] + col_lower\n\n # Case: Even Rows x Even Columns -> [x x], [x x]\n else:\n # Need to get max value from a 2x2 center\n col_lower = int((num_cols/2) - 1)\n col_upper = int(num_cols/2) + 1\n row_lower = int((num_rows/2) - 1)\n row_upper = int(num_rows/2) + 1\n # Get max value's index from the temp array\n temp = field[row_lower:row_upper, col_lower:col_upper]\n idx = np.unravel_index(temp.argmax(), temp.shape)\n # Set center's indices:\n start_row = idx[0] + row_lower\n start_col = idx[1] + col_lower\n\n return [start_row, start_col]\n\n'''\neat_carrots\n<-Takes the input array, carrot counter, and current position of Bunny\n->Recursively outputs the total number of carrots Bunny ate\n\nEat carrots depending on current location\n'''\ndef eat_carrots(field, counter = 0, curr_position = []):\n # First call needs to set Bunny's starting position\n if not curr_position:\n curr_position = find_start(field)\n\n # Set values for current position and lengths\n i = curr_position[0]\n j = curr_position[1]\n row_length = len(field)\n col_length = len(field[0])\n\n # Eat carrots from current position\n counter += field[i][j]\n # Set current cell's carrot count to 0\n field[i][j] = 0\n\n # Set direction variables to -1 for max value determination\n north, south, east, west = -1, -1, -1, -1\n\n # Bounds check:\n if i-1 < row_length and i-1 >= 0:\n north = field[i-1][j]\n if i+1 < row_length and i+1 >= 0:\n south = field[i+1][j]\n if j+1 < col_length and j+1 >= 0:\n east = field[i][j+1]\n if j-1 < col_length and j-1 >= 0:\n west = field[i][j-1]\n\n # At this point, if any direction vars are -1, this means that direction is out of bounds\n\n # Map the possible directions' values to their locations on the field for easy access\n routes = {\n north: [i-1, j],\n south: [i+1, j],\n east: [i, j+1],\n west: [i, j-1]\n }\n\n # Find next cell to eat\n eat_next = max(routes)\n\n # Base case is when there are no carrots in the max adjacent cell\n if eat_next < 1:\n return counter\n else:\n # Recurse until all adjacent cells are empty\n return eat_carrots(field, counter, routes[eat_next])\n\nif __name__ == '__main__':\n test.main()","repo_name":"hafsastorm/misc_coding","sub_path":"bunny/bunny.py","file_name":"bunny.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28995851036","text":"#!/usr/bin/env python\n\n#\n# This script listens to changes in the \"sources\" folder,\n# and triggers the build script.\n# Keep the compiled font from the \"fonts\" folder open in\n# FontGoggles, and watch the saved changes from Glyphs.app\n# take effect.\n#\n# You will need the \"watchfiles\" package from PyPI:\n#\n# $ pip install watchfiles\n#\n\nimport subprocess\nfrom watchfiles import watch\n\n\nprint(\n \"Listening to changes in the 'sources' folder. \"\n \"Type command-period or control-C to stop.\"\n)\n\nfor changes in watch(\"sources\"):\n print(\"Rebuilding font...\")\n result = subprocess.run(\n [\"./build.sh\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n encoding=\"utf-8\",\n )\n if result.returncode != 0:\n print(result.stdout)\n print(\"Done.\")\n","repo_name":"justvanrossum/nabla","sub_path":"auto_build.py","file_name":"auto_build.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":153,"dataset":"github-code","pt":"81"} +{"seq_id":"567422801","text":"import pandas as pd\nimport json\nimport io\nimport numpy as np\nfrom pandas.io.json import json_normalize\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n\n\n\ndata_json = io.open('malalatweets.json', mode='r', encoding='utf-8').read()\ndata_python = json.loads(data_json)\n\ndf = json_normalize(data_python)\ndf.to_csv(\"outputtweets2.csv\")\n\n#read organised data into python to analyse\ntw_data = pd.read_csv('outputtweets2.csv',lineterminator='\\n')\ntw_text = tw_data['text'].astype(str)\ntw_time = tw_data['timestamp'].astype(str)\n#print(tw_data['text'])\n\nanalyzer = SentimentIntensityAnalyzer()\nneg_sentiment = []\npos_sentiment = []\nneu_sentiment = []\ncomp_sentiment = []\nfor tweet in tw_text:\n vs = analyzer.polarity_scores(tweet)\n #print(\"{:-<65} {}\".format(tweet, str(vs)))\n #print(vs['neg'])\n neg_sentiment = np.append(neg_sentiment, vs['neg'])\n pos_sentiment = np.append(pos_sentiment, vs['pos'])\n neu_sentiment = np.append(neu_sentiment, vs['neu'])\n comp_sentiment = np.append(comp_sentiment, vs['compound'])\n#print(sentiment)\n\ntw_time = tw_time.str.split(\"T\", expand=True)\n#print(tw_time)\ntw_data['date'] = tw_time[0]\ntw_data['time'] = tw_time[1]\ntw_data['pos'] = pos_sentiment\ntw_data['neg'] = neg_sentiment\ntw_data['neu'] = neu_sentiment\ntw_data['comp'] = comp_sentiment\n\ntw_data.to_csv('sent_output2.csv')\n","repo_name":"waliyah/infoviz18","sub_path":"datacollect.py","file_name":"datacollect.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8174102404","text":"from ..utils import *\nfrom .ssim import *\nimport numpy as np\nimport scipy.ndimage\n\ndef compute_msssim(frame1, frame2, method='product'):\n extend_mode = 'constant'\n avg_window = np.array(gen_gauss_window(5, 1.5))\n K_1 = 0.01\n K_2 = 0.03\n level = 5\n weight1 = np.array([0.0448, 0.2856, 0.3001, 0.2363, 0.1333])\n weight2 = weight1.copy()\n weight2 /= np.sum(weight2)\n\n downsample_filter = np.ones(2, dtype=np.float32)/2.0\n\n im1 = frame1.astype(np.float32)\n im2 = frame2.astype(np.float32)\n\n overall_mssim1 = []\n overall_mssim2 = []\n for i in range(level):\n mssim_array, ssim_map_array, mcs_array, cs_map_array = ssim_full(im1, im2, K_1 = K_1, K_2 = K_2, avg_window = avg_window)\n mssim_array = mssim_array[0]\n ssim_map_array = ssim_map_array[0]\n mcs_array = mcs_array[0]\n cs_map_array = cs_map_array[0]\n filtered_im1 = scipy.ndimage.correlate1d(im1, downsample_filter, 0)\n filtered_im1 = scipy.ndimage.correlate1d(filtered_im1, downsample_filter, 1)\n filtered_im1 = filtered_im1[1:, 1:]\n\n filtered_im2 = scipy.ndimage.correlate1d(im2, downsample_filter, 0)\n filtered_im2 = scipy.ndimage.correlate1d(filtered_im2, downsample_filter, 1)\n filtered_im2 = filtered_im2[1:, 1:]\n\n im1 = filtered_im1[::2, ::2]\n im2 = filtered_im2[::2, ::2]\n\n if i != level-1:\n overall_mssim1.append(mcs_array**weight1[i])\n overall_mssim2.append(mcs_array*weight2[i])\n\n if method == \"product\":\n overall_mssim = np.product(overall_mssim1) * mssim_array\n else:\n overall_mssim = np.sum(overall_mssim2) + mssim_array\n\n return overall_mssim\n\n\ndef msssim(referenceVideoData, distortedVideoData, method='product'):\n \"\"\"Computes Multiscale Structural Similarity (MS-SSIM) Index. [#f1]_\n\n Both video inputs are compared frame-by-frame to obtain T\n MS-SSIM measurements on the luminance channel.\n\n Parameters\n ----------\n referenceVideoData : ndarray\n Reference video, ndarray of dimension (T, M, N, C), (T, M, N), (M, N, C), or (M, N),\n where T is the number of frames, M is the height, N is width,\n and C is number of channels. Here C is only allowed to be 1.\n\n distortedVideoData : ndarray\n Distorted video, ndarray of dimension (T, M, N, C), (T, M, N), (M, N, C), or (M, N),\n where T is the number of frames, M is the height, N is width,\n and C is number of channels. Here C is only allowed to be 1.\n\n method : str\n Whether to use \"product\" (default) or to use \"sum\" for combing multiple scales into the single score.\n\n Returns\n -------\n msssim_array : ndarray\n The MS-SSIM results, ndarray of dimension (T,), where T\n is the number of frames\n\n References\n ----------\n\n .. [#f1] Z. Wang, E. P. Simoncelli and A. C. Bovik, \"Multi-scale structural similarity for image quality assessment,\" IEEE Asilomar Conference Signals, Systems and Computers, Nov. 2003.\n\n \"\"\"\n\n referenceVideoData = vshape(referenceVideoData)\n distortedVideoData = vshape(distortedVideoData)\n\n assert(referenceVideoData.shape == distortedVideoData.shape)\n\n T, M, N, C = referenceVideoData.shape\n\n assert C == 1, \"MS-SSIM called with videos containing %d channels. Please supply only the luminance channel\" % (C,)\n assert (M >= 176) & (N >= 176), \"You supplied a resolution of %dx%d. MS-SSIM can only be used with videos large enough having multiple scales. Please use only with resolutions >= 176x176.\" % (M, N)\n\n scores = np.zeros(T, dtype=np.float32)\n for t in range(T):\n referenceFrame = referenceVideoData[t, :, :, 0].astype(np.float32)\n distortedFrame = distortedVideoData[t, :, :, 0].astype(np.float32)\n \n scores[t] = compute_msssim(referenceFrame, distortedFrame, method=method)\n\n return scores\n","repo_name":"scikit-video/scikit-video","sub_path":"skvideo/measure/msssim.py","file_name":"msssim.py","file_ext":"py","file_size_in_byte":3840,"program_lang":"python","lang":"en","doc_type":"code","stars":638,"dataset":"github-code","pt":"81"} +{"seq_id":"24473312915","text":"import asyncio\nfrom time import sleep\nfrom datetime import datetime\n#async def main():\n\n # print('tim')\n # await foo('text')\n#main()\n#print(main())#co rutine object\n#asyncio.run(main())\n#async def foo(text):\n # print(text)\n # await asyncio.sleep(1)\n \n#asyncio.run(main())\n\n#sroring the time at which script start executing\nstart=datetime.now()\ndef func1():\n print(\"NASA\")\n sleep(5)\n print(\"Complete executing func1\")\ndef func2():\n print(\"ESA\")\n sleep(5)\n print(\"Complete executing func2\")\nfunc1()\nfunc2()\nprint(\"Total Execution time :\"+str((datetime.now())-start))\n\n#concurrent COmputing\n#Concurrent computing means executing several instructions concurrently or \n#@at the same time\nstart=datetime.now()\nasync def funcx():\n print(\"openSUSE\")\n await asyncio.sleep(5)\n print(\"Complete execution funcx\")\nasync def funcy():\n print(\"Manjaro\")\n await asyncio.sleep(5)\n print(\"complete executing func y\")\n \nasync def main():\n await asyncio.gather(funcx(), funcy())\nasyncio.run(main())\nprint(\"Total execution time:\"+str((datetime.now())-start))\n\n\n \n","repo_name":"RajrupDasid/py-delta","sub_path":"asc.py","file_name":"asc.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"84960362","text":"from datetime import datetime\nfrom django import forms\nfrom django.core import validators\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import validate_integer\nfrom django.utils.timezone import now\n\nfrom polls.models import Poll, Question, Comment, Choice\n\n\ndef validate_even(value):\n if value % 2 != 0:\n raise ValidationError('%(value)s ไม่ใช่เลขคู่', params={'value': value})\n\n\nclass PollForm(forms.Form):\n title = forms.CharField(label=\"ชื่อโพล\", max_length=100, required=True)\n email = forms.CharField(validators=[validators.validate_email])\n no_questions = forms.IntegerField(label=\"จำนวนคำถาม\", min_value=0, max_value=10,\n required=True, validators=[validate_even])\n start_date = forms.DateField(required=False)\n end_date = forms.DateField(required=False)\n\n # def clean_title(self):\n # data = self.cleaned_data['title']\n #\n # if \"ไอทีหมีแพนด้า\" not in data:\n # raise forms.ValidationError(\"คุณลืมชื่อคณะ\")\n #\n # return data\n #\n # def clean(self):\n # cleaned_data = super().clean()\n # start = cleaned_data.get('start_date')\n # end = cleaned_data.get('end_date')\n #\n # if start and not end:\n # # raise forms.ValidationError(\"โปรดเลือกวันสิ้นสุด\")\n # self.add_error('end_date', \"โปรดเลือกวันสิ้นสุด\")\n # elif not start and end:\n # # raise forms.ValidationError(\"โปรดเลือกวันเริ่มต้น\")\n # self.add_error('start_date', \"โปรดเลือกวันเริ่มต้น\")\n\n\nclass QuestionForm(forms.ModelForm):\n question_id = forms.IntegerField(required=False, widget=forms.HiddenInput)\n\n class Meta:\n model = Question\n exclude = ['poll']\n\n widgets = {\n 'text': forms.TextInput(attrs={'class': 'form-control'}),\n 'type': forms.Select(attrs={'class': 'form-control'}),\n }\n\n\nclass QuestionModelForm(forms.Form):\n question = forms.CharField(widget=forms.Textarea)\n\n\nclass PollModelForm(forms.ModelForm):\n class Meta:\n model = Poll\n exclude = ['del_flag']\n\n widgets = {\n 'title': forms.TextInput(attrs={'class': 'form-control'}),\n 'start_date': forms.DateInput(attrs={'class': 'form-control', 'type': 'date'}),\n 'end_date': forms.DateInput(attrs={'class': 'form-control', 'type': 'date'}),\n }\n\n # def clean_title(self):\n # data = self.cleaned_data['title']\n #\n # if \"ไอทีหมีแพนด้า\" not in data:\n # raise forms.ValidationError(\"คุณลืมชื่อคณะ\")\n #\n # return data\n #\n # def clean(self):\n # cleaned_data = super().clean()\n # start = cleaned_data.get('start_date')\n # end = cleaned_data.get('end_date')\n #\n # if start and not end:\n # # raise forms.ValidationError(\"โปรดเลือกวันสิ้นสุด\")\n # self.add_error('end_date', \"โปรดเลือกวันสิ้นสุด\")\n # elif not start and end:\n # # raise forms.ValidationError(\"โปรดเลือกวันเริ่มต้น\")\n # self.add_error('start_date', \"โปรดเลือกวันเริ่มต้น\")\n\n\nclass CommentForm(forms.ModelForm):\n class Meta:\n model = Comment\n fields = ['title', 'body', 'email', 'tel']\n\n # title = forms.CharField(max_length=100, required=True)\n # body = forms.CharField(max_length=500, required=True)\n # email = forms.EmailField(validators=[validators.validate_email])\n # tel = forms.IntegerField(min_value=10, max_value=10)\n\n # def clean_title(self):\n # data = self.cleaned_data['title']\n #\n # if not data:\n # raise forms.ValidationError(\"ต้องกรอก email หรือ Mobile Number\")\n #\n # return data\n\n def clean(self):\n cleaned_data = super().clean()\n email = cleaned_data.get('email')\n mobile = cleaned_data.get('tel')\n\n if not email and not mobile:\n self.add_error('tel', \"ต้องกรอก email หรือ Mobile Number\")\n # raise forms.ValidationError(\"โปรดเลือกวันสิ้นสุด\")\n try:\n int(mobile)\n except:\n self.add_error('tel', \"หมายเลขโทรศัพท์ต้องเป็นตัวเลขเท่านั้น\")\n if len(mobile) < 10:\n # raise forms.ValidationError(\"โปรดเลือกวันเริ่มต้น\")\n self.add_error('tel', \"หมายเลขโทรศัพท์ต้องมี 10 หลัก\")\n # elif not validators.validate_email(email):\n # self.add_error('email', \"Email a valid email address\")\n\n\n# class ContactForm(forms.Form):\n# subject = forms.CharField(max_length=100)\n# message = forms.CharField()\n# sender = forms.EmailField()\n# recipients =\n\nclass ChoiceModelForm(forms.ModelForm):\n class Meta:\n model = Choice\n fields = '__all__'\n\n\nclass CreateQuestion(forms.Form):\n text = forms.CharField(widget=forms.Textarea)\n value = forms.IntegerField(max_value=1)\n","repo_name":"Nuengnakhap/mySite","sub_path":"polls/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12758077775","text":"import json\nfrom os import path\nimport subprocess\nfrom typing import List\nfrom github import Repository\n\n\ndef mirror(root_dir: str, repo: Repository, depth: int = None):\n repo_dir = path.join(root_dir, repo.owner_login, repo.name)\n if path.exists(repo_dir):\n _git(f\"-C {repo_dir} fetch origin refs/heads/*:refs/heads/*\")\n else:\n d = f\"--depth {depth}\" if depth else \"\"\n _git(f\"clone --bare {d} {repo.url} {repo_dir}\")\n _store_description(repo_dir, repo.description)\n\n\ndef store_meta(root_dir: str, repos: List[Repository]):\n curr = {}\n try:\n with open(path.join(root_dir, \"meta.json\"), mode='r') as f:\n for r in json.load(f):\n curr[f\"{r.get('owner_login')}/{r.get('name')}\"] = r\n except Exception as e:\n print(\"error: \", e)\n pass\n with open(path.join(root_dir, \"meta.json\"), mode='w+') as f:\n for r in repos:\n curr[f\"{r.owner_login}/{r.name}\"] = r.map()\n json.dump(list(curr.values()), f, indent=2)\n\n\ndef _git(cmd: str):\n res = subprocess.run(\n f\"git {cmd}\".split(),\n stdout=subprocess.DEVNULL,\n stderr=subprocess.STDOUT,\n )\n if res.returncode != 0:\n raise Exception(\n res.stderr or res.stdout or f\"unknown error: {res.returncode}\")\n return res.stdout\n\n\ndef _store_description(repo_dir: str, desc: str):\n if not desc:\n return\n dfile = path.join(repo_dir, \"description\")\n with open(dfile, \"w+\", encoding='utf-8') as f:\n f.write(desc)\n","repo_name":"zekroTJA/stargrab","sub_path":"stargrab/repo.py","file_name":"repo.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"81"} +{"seq_id":"9734132934","text":"import copy\nimport logging\nimport os\nimport sys\nimport time\nfrom random import shuffle\nfrom urllib.parse import urlparse\n\nimport gitlab\nfrom apscheduler.schedulers.asyncio import AsyncIOScheduler\nfrom japronto import Application\nfrom joblib import Parallel, delayed\nfrom joblib.parallel import parallel_backend\n\nMRBV_TOKEN = os.getenv('MRBV_TOKEN', None)\nif MRBV_TOKEN is None:\n print(f\"Please set MRBV_TOKEN\")\n\ntry:\n o = urlparse(os.getenv('MRBV_URL', None))\n MRBV_URL = f\"{o.scheme}://{o.netloc}\"\nexcept Exception as e:\n print(f\"Error parse MRBV_URL: {e}\")\n\nMRBV_REQUIRED_VOTE_IDS = os.getenv('MRBV_REQUIRED_VOTE_IDS', None)\nif MRBV_REQUIRED_VOTE_IDS is not None:\n MRBV_REQUIRED_VOTE_IDS = [int(i) for i in os.getenv('MRBV_REQUIRED_VOTE_IDS', '').split(',')]\nelse:\n MRBV_REQUIRED_VOTE_IDS = []\n\nMRBV_SECONDS_PROJECTS = int(os.getenv('MRBV_SECONDS_PROJECTS', 300))\nMRBV_SECONDS_VARIABLES = int(os.getenv('MRBV_SECONDS_VARIABLES', 120))\nMRBV_GET_SECONDS_MERGE = int(os.getenv('MRBV_GET_SECONDS_MERGE', 60))\nMRBV_SECONDS_SUBSCRIBE = int(os.getenv('MRBV_SECONDS_SUBSCRIBE', 60))\nMRBV_CLOSE_SECONDS_MERGE = int(os.getenv('MRBV_CLOSE_SECONDS_MERGE', 30))\nMAX_APPEND_TIME = max([MRBV_SECONDS_PROJECTS,\n MRBV_SECONDS_VARIABLES,\n MRBV_GET_SECONDS_MERGE,\n MRBV_SECONDS_SUBSCRIBE,\n MRBV_CLOSE_SECONDS_MERGE])\n\nPROJECTS = []\nWITH_UPVOTE = []\nPROJECTS_REQUIRED_VOTE_IDS = {}\nMERGE_REQUESTS = []\nSUBSCRIBE = {}\nUPTIME = {}\n\n\nasync def get_projects():\n global PROJECTS\n global UPTIME\n UPTIME['MRBV_SECONDS_PROJECTS'] = time.time() + MRBV_SECONDS_PROJECTS\n PROJECTS = gl.projects.list(all=True)\n print(f\"found {len(PROJECTS)} projects\")\n\n\ndef check_project_upvote(project):\n try:\n MRBV_BOT_UPVOTE = project.variables.get('MRBV_BOT_UPVOTE')\n try:\n MRBV_REQUIRED_VOTE_IDS = [int(i) for i in project.variables.get('MRBV_REQUIRED_VOTE_IDS').value.split(',')]\n print(f\"'{project.name_with_namespace}' with {len(MRBV_REQUIRED_VOTE_IDS)} ids for required vote\")\n except gitlab.GitlabGetError as e:\n MRBV_REQUIRED_VOTE_IDS = []\n return project, int(MRBV_BOT_UPVOTE.value), MRBV_REQUIRED_VOTE_IDS\n except Exception as e:\n pass\n\n\nasync def get_variables():\n global PROJECTS\n global WITH_UPVOTE\n global UPTIME\n UPTIME['MRBV_SECONDS_VARIABLES'] = time.time() + MRBV_SECONDS_VARIABLES\n this_PROJECTS = copy.copy(PROJECTS)\n # issue https://github.com/scikit-learn/scikit-learn/issues/8920\n with parallel_backend('threading'):\n data = Parallel(n_jobs=4)(delayed(check_project_upvote)(project) for project in this_PROJECTS)\n WITH_UPVOTE = [i for i in data if i is not None]\n print(f\"found {len(WITH_UPVOTE)} MRBV_BOT_UPVOTE in {len(PROJECTS)} projects\")\n\n\ndef check_merge_by_award_emojis(merge, required_vote_ids):\n for award_emoji in merge.awardemojis.list(all=True):\n if award_emoji.name == 'thumbsup' and award_emoji.user['id'] in required_vote_ids:\n return True\n return False\n\n\ndef check_merge_upvote(data):\n global SUBSCRIBE\n project, upvote, required_vote_ids = data\n try:\n mergerequests = []\n for i in project.mergerequests.list(state='opened', all=True):\n # coming soon\n # if i.id not in SUBSCRIBE.keys():\n # SUBSCRIBE[i.id] = i\n if required_vote_ids:\n check_merge = check_merge_by_award_emojis(i, required_vote_ids)\n else:\n check_merge = True\n if int(i.upvotes) >= int(upvote) and \\\n int(i.downvotes) <= 0 and \\\n not i.work_in_progress and \\\n i.merge_status == 'can_be_merged' and \\\n check_merge:\n mergerequests.append(i)\n return mergerequests\n except Exception as e:\n pass\n\n\nasync def get_merges():\n global WITH_UPVOTE\n global MERGE_REQUESTS\n global PROJECTS\n global UPTIME\n UPTIME['MRBV_GET_SECONDS_MERGE'] = time.time() + MRBV_GET_SECONDS_MERGE\n this_WITH_UPVOTE = copy.copy(WITH_UPVOTE)\n with parallel_backend('threading'):\n data = Parallel(n_jobs=4)(delayed(check_merge_upvote)(data) for data in this_WITH_UPVOTE)\n buff = []\n for mergerequests in [i for i in data if i is not None]:\n buff = buff + mergerequests\n MERGE_REQUESTS = buff\n print(f\"found {len(MERGE_REQUESTS)} merges with {len(WITH_UPVOTE)} MRBV_BOT_UPVOTE in {len(PROJECTS)} projects\")\n\n\n# coming soon\nasync def subscribe():\n global SUBSCRIBE\n global UPTIME\n UPTIME['MRBV_SECONDS_SUBSCRIBE'] = time.time() + MRBV_SECONDS_SUBSCRIBE\n this_SUBSCRIBE = copy.copy(SUBSCRIBE)\n for i in this_SUBSCRIBE.keys():\n try:\n print(f\"subscribe '{this_SUBSCRIBE[i].title}'\")\n this_SUBSCRIBE[i].subscribe()\n except Exception as e:\n pass\n del SUBSCRIBE[i]\n\n\ndef accept(merge):\n name_with_namespace = gl.projects.get(merge.project_id).name_with_namespace\n try:\n merge.merge()\n print(f\"merge '{merge.title}' in {name_with_namespace}\")\n MERGE_REQUESTS.remove(merge)\n except Exception as e:\n print(f\"{e} ===> {merge.title} ({name_with_namespace})\")\n\n\nasync def try_close_merge():\n global MERGE_REQUESTS\n UPTIME['MRBV_CLOSE_SECONDS_MERGE'] = time.time() + MRBV_CLOSE_SECONDS_MERGE\n this_MERGE_REQUESTS = copy.copy(MERGE_REQUESTS)\n shuffle(this_MERGE_REQUESTS)\n if not this_MERGE_REQUESTS:\n return\n # accept first merge\n accept(this_MERGE_REQUESTS[0])\n\n\nasync def connect_scheduler():\n scheduler = AsyncIOScheduler(timezone=\"UTC\")\n scheduler.add_job(get_projects, 'interval', seconds=MRBV_SECONDS_PROJECTS, max_instances=1)\n scheduler.add_job(get_variables, 'interval', seconds=MRBV_SECONDS_VARIABLES, max_instances=1)\n scheduler.add_job(get_merges, 'interval', seconds=MRBV_GET_SECONDS_MERGE, max_instances=1)\n # scheduler.add_job(subscribe, 'interval', seconds=MRBV_SECONDS_SUBSCRIBE, max_instances=1)\n scheduler.add_job(try_close_merge, 'interval', seconds=MRBV_CLOSE_SECONDS_MERGE, max_instances=1)\n\n scheduler.start()\n\n\nasync def health_check(request):\n global MERGE_REQUESTS\n global UPTIME\n data = {}\n for i in MERGE_REQUESTS:\n data[i.id] = {\"title\": i.title, \"project_id\": i.project_id}\n code = 200\n for key in UPTIME.keys():\n if UPTIME[key] <= time.time() - MAX_APPEND_TIME:\n print(f\"process freeze for {key}\", file=sys.stderr)\n code = 500\n return request.Response(json=data, mime_type=\"application/json\", code=code)\n\n\napp = Application()\ngl = gitlab.Gitlab(MRBV_URL, private_token=MRBV_TOKEN, api_version='4')\nlogging.getLogger('apscheduler.scheduler').propagate = False\nlogging.getLogger('apscheduler.scheduler').addHandler(logging.NullHandler())\napp.loop.run_until_complete(get_projects())\napp.loop.run_until_complete(get_variables())\napp.loop.run_until_complete(get_merges())\napp.loop.run_until_complete(try_close_merge())\n# app.loop.run_until_complete(subscribe())\napp.loop.run_until_complete(connect_scheduler())\nrouter = app.router\nrouter.add_route('/', health_check)\nrouter.add_route('/healthcheck', health_check)\napp.run(port=80)\n","repo_name":"Negashev/mrbv","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70705161544","text":"# Take simple example, plot per-layer stats over time\nimport random\nfrom typing import List\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torchvision.datasets as datasets\nfrom PIL import Image\n\n_pytorch_floating_point_types = (torch.float16, torch.float32, torch.float64)\n\n_numpy_type_map = {\n 'float64': torch.DoubleTensor,\n 'float32': torch.FloatTensor,\n 'float16': torch.HalfTensor,\n 'int64': torch.LongTensor,\n 'int32': torch.IntTensor,\n 'int16': torch.ShortTensor,\n 'int8': torch.CharTensor,\n 'uint8': torch.ByteTensor,\n}\n\n\ndef pytorch_dtype_to_floating_numpy_dtype(dtype):\n \"\"\"Converts PyTorch dtype to numpy floating point dtype, defaulting to np.float32 for non-floating point types.\"\"\"\n if dtype == torch.float64:\n dtype = np.float64\n elif dtype == torch.float32:\n dtype = np.float32\n elif dtype == torch.float16:\n dtype = np.float16\n else:\n dtype = np.float32\n return dtype\n\n\ndef to_numpy(x, dtype: np.dtype = None) -> np.ndarray:\n \"\"\"\n Convert numeric object to floating point numpy array. If dtype is not specified, use PyTorch default dtype.\n\n Args:\n x: numeric object\n dtype: numpy dtype, must be floating point\n\n Returns:\n floating point numpy array\n \"\"\"\n\n assert np.issubdtype(dtype, np.floating), \"dtype must be real-valued floating point\"\n\n # Convert to normal_form expression from a special form (https://reference.wolfram.com/language/ref/Normal.html)\n if hasattr(x, 'normal_form'):\n x = x.normal_form()\n\n if type(x) == np.ndarray:\n assert np.issubdtype(x.dtype, np.floating), f\"numpy type promotion not implemented for {x.dtype}\"\n\n if type(x) == torch.Tensor:\n dtype = pytorch_dtype_to_floating_numpy_dtype(x.dtype)\n return x.detach().cpu().numpy().astype(dtype)\n\n # list or tuple, iterate inside to convert PyTorch arrrays\n if type(x) in [list, tuple]:\n x = [to_numpy(r) for r in x]\n\n # Some Python type, use numpy conversion\n result = np.array(x, dtype=dtype)\n assert np.issubdtype(result.dtype, np.number), f\"Provided object ({result}) is not numeric, has type {result.dtype}\"\n if dtype is None:\n return result.astype(pytorch_dtype_to_floating_numpy_dtype(torch.get_default_dtype()))\n return result\n\n\ndef to_numpys(*xs, dtype=np.float32):\n return (to_numpy(x, dtype) for x in xs)\n\n\ndef check_equal(observed, truth, rtol=1e-9, atol=1e-12, label: str= '') -> None:\n \"\"\"\n Assert fail any entries in two arrays are not close to each to desired tolerance. See np.allclose for meaning of rtol, atol\n\n \"\"\"\n\n truth = to_numpy(truth)\n observed = to_numpy(observed)\n\n assert truth.shape == observed.shape, f\"Observed shape {observed.shape}, expected shape {truth.shape}\"\n # run np.testing.assert_allclose for extra info on discrepancies\n if not np.allclose(observed, truth, rtol=rtol, atol=atol, equal_nan=True):\n print(f'Numerical testing failed for {label}')\n np.testing.assert_allclose(truth, observed, rtol=rtol, atol=atol, equal_nan=True)\n\n\ndef check_close(a0, b0, rtol=1e-5, atol=1e-8, label: str= '') -> None:\n \"\"\"Convenience method for check_equal with tolerances defaulting to typical errors observed in neural network\n ops in float32 precision.\"\"\"\n return check_equal(a0, b0, rtol=rtol, atol=atol, label=label)\n\n\n# Fork of SimpleModel that doesn't automatically register hooks, for autograd_lib.py refactoring\nclass SimpleModel2(nn.Module):\n \"\"\"Simple sequential model. Adds layers[] attribute, flags to turn on/off hooks, and lookup mechanism from layer to parent\n model.\"\"\"\n\n layers: List[nn.Module]\n all_layers: List[nn.Module]\n\n def __init__(self, *args, **kwargs):\n super().__init__()\n\n\ndef least_squares(data, targets=None, aggregation='mean'):\n \"\"\"Least squares loss (like MSELoss, but an extra 1/2 factor.\"\"\"\n assert is_matrix(data), f\"Expected matrix, got {data.shape}\"\n assert aggregation in ('mean', 'sum')\n if targets is None:\n targets = torch.zeros_like(data)\n err = data - targets.view(-1, data.shape[1])\n normalizer = len(data) if aggregation == 'mean' else 1\n return torch.sum(err * err) / 2 / normalizer\n\n\n# Autograd functions, from https://gist.github.com/apaszke/226abdf867c4e9d6698bd198f3b45fb7\n# noinspection PyTypeChecker\ndef jacobian(y: torch.Tensor, x: torch.Tensor, create_graph=False):\n jac = []\n flat_y = y.reshape(-1)\n grad_y = torch.zeros_like(flat_y)\n for i in range(len(flat_y)):\n grad_y[i] = 1.\n grad_x, = torch.autograd.grad(flat_y, x, grad_y, retain_graph=True, create_graph=create_graph)\n jac.append(grad_x.reshape(x.shape))\n grad_y[i] = 0.\n return torch.stack(jac).reshape(y.shape + x.shape)\n\n\ndef hessian(y: torch.Tensor, x: torch.Tensor):\n return jacobian(jacobian(y, x, create_graph=True), x)\n\n\ndef seed_random(seed: int) -> None:\n \"\"\"Manually set seed to seed for configurable random number generators in current process.\"\"\"\n torch.manual_seed(seed)\n random.seed(seed)\n np.random.seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(seed)\n\n\nclass SimpleMLP(nn.Module):\n \"\"\"Simple feedforward network that works on images.\"\"\"\n\n layers: List[nn.Module]\n all_layers: List[nn.Module]\n\n def __init__(self, d: List[int], nonlin=False, bias=False):\n \"\"\"\n Feedfoward network of linear layers with optional ReLU nonlinearity. Stores layers in \"layers\" attr, ie\n model.layers[0] refers to first linear layer.\n\n Args:\n d: list of layer dimensions, ie [768, 20, 10] for MNIST 10-output with hidden layer of 20\n nonlin: whether to include ReLU nonlinearity\n \"\"\"\n super().__init__()\n self.layers: List[nn.Module] = []\n self.all_layers: List[nn.Module] = []\n self.d: List[int] = d\n for i in range(len(d) - 1):\n linear = nn.Linear(d[i], d[i + 1], bias=bias)\n setattr(linear, 'name', f'{i:02d}-linear')\n self.layers.append(linear)\n self.all_layers.append(linear)\n if nonlin:\n self.all_layers.append(nn.ReLU())\n self.predict = torch.nn.Sequential(*self.all_layers)\n\n def forward(self, x: torch.Tensor):\n x = x.reshape((-1, self.d[0]))\n return self.predict(x)\n\n\ndef is_row_matrix(dd):\n return len(dd.shape) == 2 and dd.shape[0] == 1\n\n\ndef is_col_matrix(dd):\n return len(dd.shape) == 2 and dd.shape[1] == 1\n\n\ndef is_square_matrix(dd):\n return len(dd.shape) == 2 and dd.shape[0] == dd.shape[1] and dd.shape[0] >= 1\n\n\ndef is_vector(dd) -> bool:\n shape = dd.shape\n return len(shape) == 1 and shape[0] >= 1\n\n\ndef is_matrix(dd) -> bool:\n shape = dd.shape\n return len(shape) == 2 and shape[0] >= 1 and shape[1] >= 1\n\n\ndef to_logits(p: torch.Tensor) -> torch.Tensor:\n \"\"\"Inverse of F.softmax\"\"\"\n if len(p.shape) == 1:\n batch = torch.unsqueeze(p, 0)\n else:\n assert len(p.shape) == 2\n batch = p\n\n batch = torch.log(batch) - torch.log(batch[:, -1])\n return batch.reshape(p.shape)\n\n\nclass TinyMNIST(datasets.MNIST):\n \"\"\"Custom-size MNIST dataset for testing. Generates data/target images with reduced resolution and 0\n channels. When provided with original 28, 28 resolution, generates standard 1 channel MNIST dataset.\n \"\"\"\n\n def __init__(self, dataset_root='data', data_width=4, targets_width=4, dataset_size=0,\n train=True):\n \"\"\"\n\n Args:\n data_width: dimension of input images\n targets_width: dimension of target images\n dataset_size: number of examples, use for smaller subsets and running locally\n \"\"\"\n super().__init__(dataset_root, download=True, train=train)\n\n original_targets = True\n\n if dataset_size > 0:\n self.data = self.data[:dataset_size, :, :]\n self.targets = self.targets[:dataset_size]\n\n if data_width != 28 or targets_width != 28:\n new_data = np.zeros((self.data.shape[0], data_width, data_width))\n new_targets = np.zeros((self.data.shape[0], targets_width, targets_width))\n for i in range(self.data.shape[0]):\n arr = self.data[i, :].numpy().astype(np.uint8)\n im = Image.fromarray(arr)\n im.thumbnail((data_width, data_width), Image.ANTIALIAS)\n new_data[i, :, :] = np.array(im) / 255\n im = Image.fromarray(arr)\n im.thumbnail((targets_width, targets_width), Image.ANTIALIAS)\n new_targets[i, :, :] = np.array(im) / 255\n self.data = torch.from_numpy(new_data).type(torch.get_default_dtype())\n if not original_targets:\n self.targets = torch.from_numpy(new_targets).type(torch.get_default_dtype())\n else:\n self.data = self.data.type(torch.get_default_dtype()).unsqueeze(1)\n if not original_targets:\n self.targets = self.data\n\n if torch.cuda.is_available():\n self.data, self.targets = self.data.to('cuda'), self.targets.to('cuda')\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n img, target = self.data[index], self.targets[index]\n\n return img, target\n\n","repo_name":"cybertronai/autograd-lib","sub_path":"autograd_lib/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":9474,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"81"} +{"seq_id":"25442605910","text":"#!/usr/bin/python\n#!python\n\nimport urllib.request\nfrom datetime import date\nimport pandas as pd\n\n\ndef dowload_dataset(urldescarga, pathdestino):\n today = date.today()\n d1 = today.strftime(\"%d/%m/%Y\")\n print('[', d1, ']', 'downloading dataset...')\n\n urllib.request.urlretrieve(urldescarga, pathdestino)\n\ndef datasetPreparement(pathdestino, pathpoblaciones):\n dt = pd.read_csv(pathdestino)\n\n dt['datetime'] = pd.to_datetime(dt['dateRep'], format='%d/%m/%Y')\n dt = dt.set_index('datetime')\n\n dt = dt.iloc[::-1]\n\n dt_poblaciones = pd.read_csv(pathpoblaciones)\n dt = dt.join(dt_poblaciones.set_index('Location'), on='countriesAndTerritories')\n\n #population is in thousands of habitants\n dt['deathsPop'] = dt['deaths'].div(dt['PopTotal']*10)\n dt['casesPop'] = dt['cases'].div(dt['PopTotal']*10)\n\n for i in ['cases', 'deaths', 'deathsPop', 'casesPop']:\n dt[i] = dt[i].abs()\n\n #dt.to_csv(pathdestino, index=True)\n\n return dt\n\ndef datasetCCAA(urldescarga, pathdestino):\n dowload_dataset(urldescarga, pathdestino)\n\n dt = pd.read_csv(pathdestino)\n del (dt['cod_ine'])\n\n CCAA = list(dt['CCAA'])\n fechas = list(dt.columns)[1:]\n dt_filter = pd.DataFrame(columns=['datetime'] + CCAA)\n #del (dt_filter['Total'])\n #CCAA.remove('Total')\n\n dt_filter['datetime'] = fechas\n dt_filter = dt_filter.set_index('datetime')\n\n for i in CCAA:\n datos = list(dt[dt['CCAA'] == i].values)[0][1:]\n dt_filter[i] = datos\n\n dt_poblaciones_ccaa = pd.read_csv(r'2915sc.csv', delimiter=';', engine='python')\n\n for i in CCAA:\n datos = list(dt_filter[i])\n poblacion = int(dt_poblaciones_ccaa[dt_poblaciones_ccaa['Place'] == i]['Population'])\n for j in range(1,len(datos)):\n datos[-j] = datos[-j] - datos[-j-1]\n for j in range(len(datos)):\n datos[j] = datos[j] / poblacion\n dt_filter[i] = datos\n\n return dt_filter\n","repo_name":"VicentePerezSoloviev/covid","sub_path":"dataDowload.py","file_name":"dataDowload.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42486228014","text":"# render: 파이썬 데이터 템플릿에 적용하여 HTML로 변환\n# get_object_or_404: 404 오류 출력(요청한 페이지가 없는 경우)\nfrom django.shortcuts import render, get_object_or_404, redirect\n# from django.http import HttpResponse\nfrom django.utils import timezone\nfrom .models import Question, Answer\nfrom .forms import QuestionForm, AnswerForm\n\n# Create your views here.\ndef index(request):\n \"\"\"\n pybo 목록 출력\n \"\"\"\n # 질문 목록 데이터 '-create_date'는 작성일시 역순(게시물을 최신으로 확인)\n question_list = Question.objects.order_by('-create_date')\n context = {'question_list': question_list}\n\n # 'pybo/question_list.html' -> 템플릿 -> config/settings.py에 추가\n return render(request, 'pybo/question_list.html', context)\n\ndef detail(request, question_id):\n \"\"\"\n pybo 내용 출력\n \"\"\"\n # 오류를 404오류로 출력\n question = get_object_or_404(Question, pk=question_id)\n context = {'question': question}\n return render(request, 'pybo/question_detail.html', context)\n\ndef answer_create(request, question_id):\n \"\"\"\n pybo 답변등록\n \"\"\"\n question = get_object_or_404(Question, pk=question_id)\n # # question.answer_set.create(content=request.POST.get('content'), create_date=timezone.now())\n # answer =Answer(question=question, content=request.POST.get('content'), create_date=timezone.now())\n # answer.save()\n # return redirect('pybo:detail', question_id=question.id)\n if request.method == \"POST\":\n form = AnswerForm(request.POST)\n if form.is_valid():\n answer = form.save(commit=False)\n answer.create_date = timezone.now()\n answer.question = question\n answer.save()\n return redirect('pybo:detail', question_id=question.id)\n else:\n form = AnswerForm()\n context = {'question': question, 'form': form}\n return render(request, 'pybo/question_detail.html', context)\n\ndef question_create(request):\n \"\"\"\n pybo 질문등록\n \"\"\"\n if request.method == 'POST':\n form = QuestionForm(request.POST)\n if form.is_valid():\n question = form.save(commit=False)\n question.create_date = timezone.now()\n question.save()\n return redirect('pybo:index')\n else:\n form = QuestionForm()\n context = {'form': form}\n return render(request, 'pybo/question_form.html', context)","repo_name":"Seungjun-bob/django_project","sub_path":"pybo_project/pybo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24260122173","text":"import csv\nimport inspect\nimport os\nimport os.path\nimport sap_date\nimport confirmation_deleter\nfrom const import CURSOR, RAPORT_CATALOG, db_commit\nfrom pyodbc import DatabaseError, OperationalError, Error\n\n\ndef upload_new_data():\n print('Refreshing SAP DataBase.')\n sap_insert_file = os.path.join(RAPORT_CATALOG, \"SAP_INSERT.csv\")\n with open(sap_insert_file) as file:\n changed_records = csv.reader(file)\n for index, record in enumerate(changed_records):\n if index == 0:\n if ''.join(record) == '':\n print(f'SAP_Insert file is empty.')\n break\n if send_record_to_db(record=record):\n print(f'Records sent to database: {index + 1}', end=\"\\r\")\n else:\n print('\\nUnexpected error occurs during updating SAP.')\n return False\n print('\\n', end='\\r')\n return True\n\n\ndef upload_new_items():\n print('Uploading new Items')\n item_insert_file = os.path.join(RAPORT_CATALOG, \"ITEM_INSERT.csv\")\n with open(item_insert_file) as file:\n changed_records = csv.reader(file)\n i = 0\n for record in changed_records:\n if send_item_to_db(record=record):\n i += 1\n print(f'Items sent to database: {i}', end=\"\\r\")\n else:\n break\n print('\\n', end='\\r')\n\n\ndef redate(date):\n if not len(date):\n return 'Null'\n\n month, day, year = date.split('/')\n month = f'{month:0>2}'\n day = f'{day:0>2}'\n new_date = f\"'{year}-{month}-{day}'\"\n return new_date\n\n\ndef send_record_to_db(record):\n table = 'SAP'\n\n if len(record) < 29:\n for i in range(0, (29 - len(record))):\n record.append('')\n\n if record[23] != '':\n so = record[0]\n obiekt = record[1]\n po = record[2]\n start_po = redate(record[3])\n finish_po = redate(record[4])\n ilosc = record[5]\n urzadzenie = record[6]\n brygada = record[7]\n nr_op = record[8]\n operacja = record[9]\n start_op = redate(record[10])\n finish_op = redate(record[11])\n czas_plan = record[12]\n czas_raport = record[13]\n opis = record[14]\n create = redate(record[15])\n planista_0 = record[16]\n ostatnia_zmiana = redate(record[17])\n planista_1 = record[18]\n release_aktualny = redate(record[19])\n release_plan = redate(record[20])\n network = record[21]\n system_status = record[22].split(' ')[-1]\n confirmation = record[23]\n start_po_aktualny = redate(record[24])\n finish_po_aktualny = redate(record[25])\n start_op_aktualny = redate(record[26])\n finish_op_aktualny = redate(record[27])\n urzadzenie_glowne = record[28]\n\n query = f\"Delete from {table} WHERE Confirmation = {confirmation}; \"\\\n f\"Insert into dbo.{table} ([S.O.],[Obiekt],[P.O.],[Start P.O.],[Finish P.O.],[Ilość],[Urządzenie],[Brygada],\" \\\n f\"[Nr Op],[Operacja],[Start Op],[Finish Op],[Czas Plan],[Czas Raport],[Opis],[Create],[Planista 0],\" \\\n f\"[Ostatnia Zmiana],[Planista 1],[Release Aktualny],[Release Plan],[Network],[System Status],\" \\\n f\"[Confirmation],[Start P.O. Aktualny],[Finish P.O. Aktualny],[Start Op Aktualny],[Finish Op Aktualny],\" \\\n f\"[Urządzenie Główne]) \" \\\n f\"VALUES('{so}','{obiekt}','{po}',{start_po},{finish_po},'{ilosc}','{urzadzenie}','{brygada}',\"\\\n f\"'{nr_op}','{operacja}',{start_op},{finish_op},'{czas_plan}','{czas_raport}','{opis}',{create},\"\\\n f\"'{planista_0}',{ostatnia_zmiana},'{planista_1}',{release_aktualny},\"\\\n f\"{release_plan},'{network}','{system_status}','{confirmation}',{start_po_aktualny},\" \\\n f\"{finish_po_aktualny},{start_op_aktualny},{finish_op_aktualny},'{urzadzenie_glowne}')\"\n\n return db_commit(query=query, func_name=inspect.currentframe().f_code.co_name)\n\n\ndef update_status(record):\n\n table = 'SAP'\n confirmation = record[0]\n system_status = record[1].split(' ')[-1]\n\n query = f\"UPDATE {table} SET [System Status] = '{system_status}' WHERE Confirmation = '{confirmation}';\"\n db_commit(query=query, func_name=inspect.currentframe().f_code.co_name)\n\n\ndef send_item_to_db(record):\n\n table = 'Items'\n if len(record) < 2:\n for i in range(0, (2 - len(record))):\n record.append('')\n\n if record[1] != '':\n if record[0] == '0':\n item = 'NULL'\n else:\n item = record[0]\n prod_order = int(record[1])\n query = f\"DELETE FROM {table} WHERE Prod_Order = {prod_order}; \"\\\n f\"INSERT INTO dbo.{table} (Prod_Order, Item) \" \\\n f\"VALUES({prod_order},{item})\"\n return db_commit(query=query, func_name=inspect.currentframe().f_code.co_name)\n\n\ndef uploader_checker():\n sap_insert_path = os.path.join(RAPORT_CATALOG, 'SAP_INSERT.csv')\n if os.path.exists(sap_insert_path):\n sap_insert_date = os.path.getmtime(sap_insert_path)\n query = 'SELECT SAP_Skrypt_Zmiana FROM SAP_data;'\n if not db_commit(query=query, func_name=inspect.currentframe().f_code.co_name):\n return False\n\n result = CURSOR.execute(query)\n\n for date_time in result:\n sap_db_date = date_time[0].timestamp()\n if sap_insert_date > sap_db_date:\n return True\n\n return False\n\n\ndef uploader_item_checker():\n item_insert_path = os.path.join(RAPORT_CATALOG, 'ITEM_INSERT.csv')\n if os.path.exists(item_insert_path):\n item_insert_date = os.path.getmtime(item_insert_path)\n\n query = 'SELECT Item_Data FROM SAP_data;'\n if not db_commit(query=query, func_name=inspect.currentframe().f_code.co_name):\n return False\n result = CURSOR.execute(query)\n\n for date_time in result:\n item_db_date = date_time[0].timestamp()\n if item_insert_date > item_db_date:\n return True\n return False\n\n\ndef update_system_status():\n print('Uploading System status')\n status_file = os.path.join(RAPORT_CATALOG, \"Status_Update.csv\")\n with open(status_file) as file:\n changed_records = csv.reader(file)\n i = 0\n for record in changed_records:\n if update_status(record=record):\n i += 1\n print(f'Records sent to database: {i}', end=\"\\r\")\n else:\n break\n print('\\n', end='\\r')\n\n\ndef main():\n if uploader_checker():\n if upload_new_data():\n sap_date.update(column='SAP_Skrypt_zmiana')\n else:\n return False\n confirmation_deleter.delete_confirmation()\n else:\n print('No new data uploaded.')\n\n if uploader_item_checker():\n upload_new_items()\n sap_date.update(column='Item_Data')\n print('New Items uploaded')\n\n return True\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ArekFrel/WMS","sub_path":"new_data_uploader.py","file_name":"new_data_uploader.py","file_ext":"py","file_size_in_byte":7031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38052892730","text":"import sys,os,time,math\nimport keyboard\nimport cv2\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sampleViewer\nimport config\n\n# image processing kernel\nclass MyExperimenting(sampleViewer.SampleViewer):\n\n def __init__(self, imagesPerCol = 3):\n super().__init__(imagesPerCol)\n\n # inherit the abstract methode kernel is mandatory\n def kernel(self):\n sampleViewer.SampleViewer.kernel(self) # call the parent first\n \n # open a image and transform it to the YCRCB color space\n self.image_read()\n\n # demo case \n # pot your own code here\n self.start_watch()\n YCRImg = cv2.cvtColor(self.imOrigin, cv2.COLOR_BGR2YCR_CB)\n Y, CR, CB = cv2.split(YCRImg)\n self.stop_watch(\"cvtColor to YCR_CB\")\n\n self.append(\"Y\", Y) \n self.append(\"CR\", CR)\n self.append(\"CB\", CB)\n\ndef main():\n viewer = sampleViewer.SampleViewer()\n\n # argument handler\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-d\", \"--data\", required=False,\n help=\"sample images to be processed\",\n default=config.DATA_PATH)\n args = vars(ap.parse_args())\n \n # viewer to show image samples\n viewer = MyExperimenting(imagesPerCol = 4) # rapace that with your class\n data_path = os.path.join(viewer.get_application_path(), args[\"data\"])\n if os.path.isdir(data_path) == True:\n print('Data path: ', data_path)\n else:\n print('Could not open or find the data directory:', args[\"data\"])\n exit(0)\n viewer.set_sample_path(data_path)\n viewer.print_samples()\n viewer.show_first()\n keyboard.on_press(viewer.menu, suppress=False) # Simple user interface\n loop(viewer, os.path.join(viewer.get_application_path(), config.DEBUG_PATH))\n\ndef loop(viewer, debuggPath = None):\n \n # main loop\n # WARNING: The viewer MUST run in the main(). \n # Do NOT change anything. It might result in asynchronous program flow, with lost of images in the results list\n while(viewer.run):\n #if viewer.changed and viewer.results.len() > 0:\n if viewer.changed:\n viewer.changed = False\n if debuggPath != None:\n viewer.results.delete_images(debuggPath)\n viewer.kernel()\n viewer.results.show(viewer.get_current_info()) # this must be called out of the main tread\n viewer.print_watch()\n if debuggPath != None:\n viewer.results.save(debuggPath)\n plt.pause(0.1) # Must be called every loop otherwise the plots get hanged !!!\n \n # derminate application\n cv2.destroyAllWindows()\n plt.close('all')\n print('buy buy')\n sys.exit()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"amuenger/imageViewer","sub_path":"Application.py","file_name":"Application.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9400903807","text":"import collections\nstudent_course_pairs_1 = [\n [\"58\", \"Software Design\"],\n [\"58\", \"Linear Algebra\"],\n [\"94\", \"Art History\"],\n [\"94\", \"Operating Systems\"],\n [\"17\", \"Software Design\"],\n [\"58\", \"Mechanics\"],\n [\"58\", \"Economics\"],\n [\"17\", \"Linear Algebra\"],\n [\"17\", \"Political Science\"],\n [\"94\", \"Economics\"],\n [\"25\", \"Economics\"],\n]\ndef courseOverlaps(studentCoursePairs):\n if not studentCoursePairs: return\n student_courses = collections.defaultdict(set)\n for student, course in studentCoursePairs:\n student_courses[student].add(course)\n seen = set()\n for s1 in student_courses.keys():\n for s2 in student_courses.keys():\n if s1 != s2 and (s1, s2) not in seen and (s2, s1) not in seen:\n seen.add((s1, s2))\n print('[{}, {}]: {}'.format(s1, s2, list(student_courses[s1] & student_courses[s2])))\n\ncourseOverlaps(student_course_pairs_1)\n\n\ndef courseSchedule1(courseRelation):\n allCourses = set()\n pre = collections.defaultdict(set)\n is_pre = collections.defaultdict(set)\n for i, j in courseRelation:\n allCourses.add(i)\n allCourses.add(j)\n is_pre[j].add(i)\n pre[i].add(j)\n res = []\n seen = set()\n dq = collections.deque([i for i in allCourses if not pre[i]])\n while dq:\n cur = dq.popleft()\n if cur in seen: continue\n seen.add(cur)\n res.append(cur)\n for nei in is_pre[cur]:\n pre[nei].remove(cur)\n if not pre[nei]:\n dq.append(nei)\n return res if len(seen) == len(allCourses) else []\n\nprint(courseSchedule1([('A', 'B'), ('B', 'C'), ('C', 'D'), ('D', 'E'), ('Z', 'Y'), ('Z', 'X'), ('Z', 'G')]))\nprint(courseSchedule1([('A', 'B'), ('B', 'A')]))\n\ndef courseHelper(coursesRelation):\n # time: O(V! + E) space: O(V + E)\n allCourses = set()\n pre = collections.defaultdict(set)\n is_pre = collections.defaultdict(set)\n for i, j in coursesRelation:\n allCourses.add(i)\n allCourses.add(j)\n is_pre[j].add(i)\n pre[i].add(j)\n\n res = set()\n\n def dfs(path, seen, course):\n if len(path) == len(allCourses):\n res.add(tuple(path))\n return\n if course in seen or pre[course]:\n return\n mark = set()\n for i in is_pre[course]:\n pre[i].discard(course)\n mark.add(i)\n for i in allCourses - seen:\n dfs(path + [course], seen | {course}, i)\n for i in mark:\n pre[i].add(course)\n\n for i in allCourses:\n dfs([], set(), i)\n\n return res\n\nall_courses = [\n [\"Logic\",\"COBOL\"],\n [\"Data Structures\",\"Algorithms\"],\n [\"Creative Writing\",\"Data Structures\"],\n [\"Algorithms\",\"COBOL\"],\n [\"Intro to Computer Science\",\"Data Structures\"],\n [\"Logic\",\"Compilers\"],\n [\"Data Structures\",\"Logic\"],\n [\"Creative Writing\",\"System Administration\"],\n [\"Databases\",\"System Administration\"],\n [\"Creative Writing\",\"Databases\"]\n]\nall_courses2 = []\nfor c in all_courses:\n all_courses2.append(c[::-1])\nfor x in courseHelper(all_courses2):\n print(len(x))\n\nprint({x[len(x) // 2] for x in courseHelper(all_courses2)})\n\n\n\n","repo_name":"Jason003/interview","sub_path":"intuit/courses.py","file_name":"courses.py","file_ext":"py","file_size_in_byte":3166,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"4138653350","text":"import argparse\nfrom socket import TCP_NODELAY\nimport sys\nfrom tkinter import messagebox\nimport pandas as pd\nfrom MyWidget import MyWidget\nfrom PySide6.QtCore import QDateTime, QTimeZone\n\n#JC\nfrom PySide6.QtWidgets import QMessageBox\n\nfrom PySide6.QtWidgets import QApplication \nfrom datetime import datetime\nfrom dateutil.parser import parse\nfrom dateutil.tz import gettz\n\nclass getResults(object):\n \"\"\"description of class\"\"\"\n def __init__(self):\n pass\n def date(self, utc):\n d=utc[0:10]\n utc_fmt = \"yyyy-MM-dd\"\n\n new_date = QDateTime().fromString(d, utc_fmt)\n tes=new_date.toString()\n return new_date \n\n #JC\n def showdialog(self, text):\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Warning)\n\n msg.setText(text)\n msg.setWindowTitle(\"Warning\")\n msg.setStandardButtons(QMessageBox.Ok)\n retval = msg.exec_()\n print(\"value of pressed message box button:\", retval)\n\n \n def read_data(self, fname):\n # Read the CSV content\n df = pd.read_csv(fname)\n \n #JC\n if df.empty :\n print('Warning! DataFrame is Empty\\n')\n self.showdialog('DataFrame Empty!')\n return ([],[])\n\n\n # Remove wrong magnitudes\n df['date'] = df['date'].str[:10]\n df= df.groupby('date', as_index=False).mean()\n dates = df[\"date\"].apply(lambda x: self.date(x))\n sentiment=df[\"scale\"].apply(lambda x: round(x,3))\n # My local timezone\n\n # Get timestamp transformed to our timezone\n\n return dates, sentiment","repo_name":"JeffChanSZ/SEPA---Group-16","sub_path":"SentimentAnalysisPrototype/getResults.py","file_name":"getResults.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18226040693","text":"import sys\nsys.stdin=open(\"input.txt\",\"r\")\n\ndef bi():\n for i in range(1,(1<n+1:\n total-=save\n result.append(total)\n else:\n result.append(total)\n\nn=int(input())\n\ninp=[[] for _ in range(n)]\nfor j in range(n):\n inp[j]+=list(map(int,input().split()))\nresult=[]\nbi()\nprint(result)\nprint(max(result))","repo_name":"HorangApple/TIL","sub_path":"Algorithm/Baekjoon/푸는 중/14501_퇴사.py","file_name":"14501_퇴사.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7750244657","text":"import numpy as np\nfrom keras.models import Sequential, load_model\nfrom keras.layers.core import Dense, Activation, Dropout\nfrom keras.layers.recurrent import LSTM\nfrom keras.callbacks import History, ModelCheckpoint\nfrom keras.optimizers import RMSprop\n\nfrom corpus import build_corpus\n\n\n\ndef train_model(midi_files, save_path, model_path=None, step_size=3, phrase_len=20, layer_size=128, batch_size=128, nb_epoch=1):\n\n melody_corpus, melody_set, notes_indices, indices_notes = build_corpus(midi_files)\n\n corpus_size = len(melody_set)\n\n # cut the corpus into semi-redundant sequences of max_len values\n # step_size = 3\n # phrase_len = 20\n phrases = []\n next_notes = []\n for i in range(0, len(melody_corpus) - phrase_len, step_size):\n phrases.append(melody_corpus[i: i + phrase_len])\n next_notes.append(melody_corpus[i + phrase_len])\n print('nb sequences:', len(phrases))\n\n # transform data into binary matrices\n X = np.zeros((len(phrases), phrase_len, corpus_size), dtype=np.bool)\n y = np.zeros((len(phrases), corpus_size), dtype=np.bool)\n for i, phrase in enumerate(phrases):\n for j, note in enumerate(phrase):\n X[i, j, notes_indices[note]] = 1\n y[i, notes_indices[next_notes[i]]] = 1\n if model_path is None:\n model = Sequential()\n model.add(LSTM(layer_size, return_sequences=True, input_shape=(phrase_len, corpus_size)))\n model.add(Dropout(0.2))\n model.add(LSTM(layer_size, return_sequences=False))\n model.add(Dropout(0.2))\n model.add(Dense(corpus_size))\n model.add(Activation('softmax'))\n\n model.compile(loss='categorical_crossentropy', optimizer=RMSprop())\n\n else:\n model = load_model(model_path)\n\n checkpoint = ModelCheckpoint(filepath=save_path,\n verbose=1, save_best_only=False)\n history = History()\n model.fit(X, y, batch_size=batch_size, nb_epoch=nb_epoch, callbacks=[checkpoint, history])\n\n return model, melody_corpus, melody_set, notes_indices, indices_notes\n","repo_name":"naoyak/JohaNN","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"41309802167","text":"\"\"\"Test Data attributes.\"\"\"\n\nimport pytest # noqa: F401\n\nfrom .data_attributes import DataAttributes\nfrom .attribute_value import AttributeValue\n\n\ndef test_data_attributes_constructor():\n \"\"\"Test the basic constructor.\"\"\"\n actual = DataAttributes()\n assert isinstance(actual, DataAttributes)\n\n\ndef test_data_attributes_load_raw():\n \"\"\"Test data_attributes_load_raw.\"\"\"\n raw_list = [\n {\n \"attribute\": \"https://example.com/attr/Classification/value/TS\",\n \"displayName\": \"does not matter\",\n }\n ]\n da = DataAttributes()\n da.load_raw(raw_list)\n actual = da.get(\"https://example.com/attr/Classification/value/TS\")\n assert isinstance(actual, AttributeValue)\n assert actual.namespace == \"https://example.com/attr/Classification\"\n assert actual.value == \"TS\"\n\n\ndef test_data_attributes_export_raw():\n \"\"\"Test data_attributes_export_raw.\"\"\"\n expected = [{\"attribute\": \"https://example.com/attr/Classification/value/TS\"}]\n da = DataAttributes()\n da.load_raw(expected)\n # Note - order is not preserved, so comparing actual to expected is\n # a little difficult for multiple attributes. Hence one item.\n actual = da.export_raw()\n assert actual == expected\n","repo_name":"opentdf/backend","sub_path":"containers/kas/kas_core/tdf3_kas_core/models/attributes/data_attributes_test.py","file_name":"data_attributes_test.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"41261392927","text":"# -*- coding: utf-8 -*-\n# Created on Mon Mar 11 2019 22:17:22\n# Author: WuLC\n# EMail: liangchaowu5@gmail.com\n\n# greedy, O(nlogn) time, O(1) space\n# sort and turn negative numbers to positive numbers as many as possible\n\nclass Solution(object):\n def largestSumAfterKNegations(self, A, K):\n \"\"\"\n :type A: List[int]\n :type K: int\n :rtype: int\n \"\"\"\n A.sort()\n idx = -1\n for i in xrange(len(A)):\n if A[i] >= 0:\n idx = i\n break\n result = 0\n if idx < 0 or idx >= K:\n for num in A:\n if K > 0:\n num *= -1\n K -= 1\n result += num\n else:\n left = (K - idx)&1\n for i in xrange(len(A)):\n if i == idx and left > 0:\n if i > 0 and A[i-1]*-1 < A[i]:\n result += 2*A[i-1]\n else:\n result += -2*A[i]\n left -= 1\n result += -1*A[i] if A[i]<0 else A[i]\n return result\n\n","repo_name":"WuLC/LeetCode","sub_path":"Algorithm/Python/1000+/1005. Maximize Sum Of Array After K Negations.py","file_name":"1005. Maximize Sum Of Array After K Negations.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"81"} +{"seq_id":"2376969659","text":"cj = cookielib.CookieJar()\nopener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))\nurllib2.install_opener(opener)\n\ndef post_multipart(host, selector, fields, files):\n \"\"\"\n Post fields and files to an http host as multipart/form-data.\n fields is a sequence of (name, value) elements for regular form fields.\n files is a sequence of (name, filename, value) elements for data to be uploaded as files\n Return the server's response page.\n \"\"\"\n content_type, body = encode_multipart_formdata(fields, files)\n headers = {'Content-Type': content_type,\n 'Content-Length': str(len(body))}\n r = urllib2.Request(\"http://%s%s\" % (host, selector), body, headers)\n return urllib2.urlopen(r).read()\n\ndef encode_multipart_formdata(fields, files):\n \"\"\"\n fields is a sequence of (name, value) elements for regular form fields.\n files is a sequence of (name, filename, value) elements for data to be uploaded as files\n Return (content_type, body) ready for httplib.HTTP instance\n \"\"\"\n BOUNDARY = mimetools.choose_boundary()\n CRLF = '\\r\\n'\n L = []\n for (key, value) in fields:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\n L.append('')\n L.append(value)\n for (key, filename, value) in files:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (key, filename))\n L.append('Content-Type: %s' % get_content_type(filename))\n L.append('')\n L.append(value)\n L.append('--' + BOUNDARY + '--')\n L.append('')\n body = CRLF.join(L)\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\n return content_type, body\n\ndef get_content_type(filename):\n return mimetypes.guess_type(filename)[0] or 'application/octet-stream'\n","repo_name":"sethc23/BD_Scripts","sub_path":"html/HTMLPost_cookies.py","file_name":"HTMLPost_cookies.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"17691852508","text":"import os\nfrom pathlib import Path\n\nimport bokeh\nfrom bokeh.models import ColumnDataSource, LinearColorMapper, ColorBar\nfrom bokeh.layouts import column\n\nimport PIL\n\nimport numpy as np\nimport pandas as pd\nfrom pybaselines import Baseline\nfrom rsciio.renishaw import file_reader\nfrom scipy.signal import medfilt\n\nfrom pydatalab.blocks.base import DataBlock\nfrom pydatalab.bokeh_plots import mytheme, selectable_axes_plot\nfrom pydatalab.file_utils import get_file_info_by_id\n\n\nclass RamanMapBlock(DataBlock):\n blocktype = \"raman_map\"\n description = \"Raman spectroscopy map\"\n accepted_file_extensions = (\".wdf\")\n\n @property\n def plot_functions(self):\n return (self.generate_raman_map_plot,)\n\n\n @classmethod\n def get_map_data(self, location: Path | str):\n \"\"\"Read the .wdf file with RosettaSciIO and extract relevant \n data.\n\n Parameters:\n location: The location of the file to read.\n\n Returns:\n A dataframe with the appropriate columns,\n\n\n \"\"\"\n\n raman_data = file_reader(location)\n\n if len(raman_data[0][\"axes\"]) == 3:\n pass\n elif len(raman_data[0][\"axes\"]) == 1:\n raise RuntimeError(\"This block is for 2D Raman data, not 1D\")\n else:\n raise RuntimeError(\"Data is not compatible 1D or 2D Raman data.\")\n\n for dictionary in raman_data[0]['axes']:\n if dictionary[\"name\"] == \"Raman Shift\":\n raman_shift = []\n for i in range(int(dictionary[\"size\"])):\n raman_shift.append(float(dictionary['offset']) + float(dictionary[\"scale\"])*i)\n return np.array(raman_shift), raman_data[0]['data'], raman_data[0]['metadata']\n\n def plot_raman_map(self, location: str | Path):\n data = file_reader(location)\n raman_shift, intensity_data, metadata = self.get_map_data(location)\n x_coordinates = []\n # gets the size, point spacing and original offset of x-axis\n # check for origin\n size_x = data[0]['original_metadata']['WMAP_0']['size_xyz'][0]\n scale_x = data[0]['original_metadata']['WMAP_0']['scale_xyz'][0]\n offset_x = data[0]['original_metadata']['WMAP_0']['offset_xyz'][0]\n # generates x-coordinates\n for i in range(size_x):\n x_coordinates.append(i*scale_x+offset_x)\n y_coordinates = []\n # gets the size, point spacing and original offset of x-axis\n size_y = data[0]['original_metadata']['WMAP_0']['size_xyz'][1]\n scale_y = data[0]['original_metadata']['WMAP_0']['scale_xyz'][1]\n offset_y = data[0]['original_metadata']['WMAP_0']['offset_xyz'][1]\n # generates y-coordinates\n for i in range(size_y):\n y_coordinates.append(i*scale_y+offset_y)\n \n coordinate_pairs = []\n for y in y_coordinates:\n for x in x_coordinates:\n coordinate_pairs.append((x,y))\n \n # extracts image and gets relevant data\n image_data = data[0][\"original_metadata\"][\"WHTL_0\"][\"image\"]\n image = PIL.Image.open(image_data)\n origin = data[0][\"original_metadata\"][\"WHTL_0\"][\"FocalPlaneXYOrigins\"]\n origin = [float(origin[0]), float(origin[1])]\n x_span = float(data[0][\"original_metadata\"][\"WHTL_0\"][\"FocalPlaneXResolution\"])\n y_span = float(data[0][\"original_metadata\"][\"WHTL_0\"][\"FocalPlaneYResolution\"])\n \n # converts image to vector compatible with bokeh\n image_data = np.array(image, dtype=np.uint8)\n image_data = np.dstack((image_data, 255 * np.ones_like(image_data[:, :, 0])))\n img_vector = image_data.view(dtype=np.uint32).reshape((image_data.shape[0], image_data.shape[1]))\n \n # generates numbers for colours for points in linear gradient\n col = [i/(len(x_coordinates)*len(y_coordinates)) for i in range(len(x_coordinates)*len(y_coordinates))]\n\n # links x- and y-coordinates with colour numbers\n source = ColumnDataSource(data={'x': [pair[0] for pair in coordinate_pairs], 'y': [pair[1] for pair in coordinate_pairs],\n 'col': col})\n # gemerates colormap for coloured scatter poitns\n exp_cmap = LinearColorMapper(palette=\"Turbo256\", \n low = min(col), \n high = max(col))\n\n # generates image figure and plots image\n from pathlib import Path\n bokeh.plotting.output_file(Path(__file__).parent/\"plot.html\")\n p = bokeh.plotting.figure(width=image_data.shape[1], height=image_data.shape[0], x_range=(origin[0], origin[0] + x_span), y_range=(origin[1]+y_span,origin[1]))\n p.image_rgba(image=[img_vector], x=origin[0], y=origin[1], dw=x_span, dh=y_span)\n # plot scatter points and colorbar\n p.circle('x','y', size=10, source = source,\n color={\"field\":\"col\", \"transform\":exp_cmap})\n color_bar = ColorBar(color_mapper=exp_cmap, label_standoff=12, border_line_color=None, location=(0,0))\n p.add_layout(color_bar, 'right')\n bokeh.plotting.save(p)\n # return color numbers to use in spectra plotting and returns figure object\n \n bokeh.plotting.output_file(Path(__file__).parent/\"plot.html\")\n return col, p, metadata\n \n def plot_raman_spectra(self, location: str | Path, col):\n p = bokeh.plotting.figure(width=800, height=400, x_axis_label='Raman Shift (cm-1)', y_axis_label='Intensity (a.u.)')\n raman_shift, intensity_data, metadata = self.get_map_data(location)\n print(intensity_data)\n intensity_list = []\n def generate_baseline(x_data, y_data):\n baseline_fitter = Baseline(x_data = x_data)\n baseline = baseline_fitter.mor(y_data, half_window=30)[0]\n return baseline\n for i in range(intensity_data.shape[0]):\n for j in range(intensity_data.shape[1]):\n intensity_spectrum = intensity_data[i, j, :]\n # want to make optional but will leave for now\n baseline = generate_baseline(raman_shift, intensity_spectrum)\n intensity_spectrum = intensity_spectrum - baseline\n # if normalised:\n # intensity_data = intensity_data/np.max(intensity_data)\n intensity_list.append(intensity_spectrum)\n \n # generates colorbar\n source = ColumnDataSource(data={'x': [raman_shift]*len(intensity_list), 'y': intensity_list,\n 'col' : col})\n exp_cmap = LinearColorMapper(palette=\"Turbo256\", \n low = min(col), \n high = max(col))\n # plots spectra\n p.multi_line('x','y', line_width=0.5, source = source,color={\"field\":\"col\", \"transform\":exp_cmap})\n return p\n\n def generate_raman_map_plot(self):\n file_info = None\n pattern_dfs = None\n\n if \"file_id\" not in self.data:\n return None\n\n else:\n file_info = get_file_info_by_id(self.data[\"file_id\"], update_if_live=True)\n ext = os.path.splitext(file_info[\"location\"].split(\"/\")[-1])[-1].lower()\n if ext not in self.accepted_file_extensions:\n raise RuntimeError(\n \"RamanBlock.generate_raman_plot(): Unsupported file extension (must be one of %s), not %s\",\n self.accepted_file_extensions,\n ext, \n )\n col, p1, metadata = self.plot_raman_map(file_info[\"location\"])\n p2 = self.plot_raman_spectra(file_info[\"location\"],col = col)\n self.data[\"bokeh_plot_data\"] = bokeh.embed.json_item(column(p1,p2))\n \n","repo_name":"elbee99/datalab","sub_path":"pydatalab/pydatalab/apps/raman_map/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":7739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"34383014505","text":"from threading import Timer\nfrom pythonosc import udp_client, osc_server, dispatcher\nimport math, asyncio, threading\nfrom config import osc_config, config_struct\nimport traceback\nimport logging\nfrom tinyoscquery.queryservice import OSCQueryService\nfrom tinyoscquery.utility import get_open_tcp_port, get_open_udp_port, check_if_tcp_port_open, check_if_udp_port_open\n\nlog = logging.getLogger(__name__)\n\n\nclass OscHandler:\n\tdef __init__(self, conf: config_struct, config_osc: osc_config):\n\t\tself.config: config_struct = conf\n\t\tself.config_osc: osc_config = config_osc\n\t\tself.isactive = False\n\n\t\tself.osc_enable_server = self.config_osc.server_port >= 0 # Used to improve sync with in-game avatar and autodetect sync parameter count used for the avatar.\n\t\tself.osc_server_ip = self.config_osc.ip # OSC server IP to listen too\n\t\tself.default_osc_server_port = self.config_osc.server_port\n\t\tself.osc_server_port = self.config_osc.server_port # OSC network port for recieving messages\n\t\tself.http_port = self.config_osc.http_port # HTTP port for OSCQuery\n\t\tself.osc_ip = self.config_osc.ip # OSC server IP to send too\n\t\tself.osc_port = self.config_osc.client_port # OSC network port for sending messages\n\n\t\tself.osc_delay: float = 0.25 # Delay between network updates in seconds. Setting this too low will cause issues.\n\t\tself.osc_chatbox_delay: float = 1.25 # Delay between chatbox updates in seconds. Setting this too low will cause issues.\n\t\tself.sync_params: int = 16 # Default sync parameters. This is automatically updated if the OSC server is enabled.\n\n\t\tself.line_length: int = 32 # Characters per line of text\n\t\tself.line_count: int = 4 # Maximum lines of text\n\n\t\tself.kat_charlimit: int = 128 # Maximum length of text for KAT\n\t\tself.textbox_charlimit: int = 144 # Maximum length of text for textbox\n\t\tself.sync_params_max: int = 16 # Maximum sync parameters\n\t\tself.sync_params_last: int = self.sync_params # Last detected sync parameters\n\n\t\tself.pointer_count: int = int(self.kat_charlimit / self.sync_params)\n\t\tself.pointer_clear: int = 255\n\t\tself.pointer_index_resync: int = 0\n\n\t\tself.sync_params_test_char_value: int = 97 # Character value to use when testing sync parameters\n\n\t\tself.param_visible: str = \"KAT_Visible\"\n\t\tself.param_pointer: str = \"KAT_Pointer\"\n\t\tself.param_sync: str = \"KAT_CharSync\"\n\n\t\t\n\t\tself.osc_parameter_prefix: str = \"/avatar/parameters/\"\n\t\tself.osc_use_kat_path: str = self.osc_parameter_prefix + \"use_kat\"\n\t\tself.osc_use_textbox_path: str = self.osc_parameter_prefix + \"use_textbox\"\n\t\tself.osc_use_both_path: str = self.osc_parameter_prefix + \"use_both\"\n\t\tself.osc_stt_mode_path: str = self.osc_parameter_prefix + \"stt_mode\"\n\t\tself.osc_avatar_change_path: str = \"/avatar/change\"\n\t\tself.osc_chatbox_path: str = \"/chatbox/input\"\n\t\tself.osc_chatbox_typing_path = \"/chatbox/typing\"\n\t\tself.osc_parameter_listening = self.osc_parameter_prefix + \"stt_listening\"\n\t\tself.last_chatbox_text: str = \"\"\n\t\tself.osc_text: str = \"\"\n\t\tself.kat_target_text: str = \"\"\n\t\tself.textbox_target_text: str = \"\"\n\n\t\tself.invalid_char: str = \"?\" # character used to replace invalid characters\n\n\t\tself.keys = {\n\t\t\t\" \": 0,\n\t\t\t\"!\": 1,\n\t\t\t\"\\\"\": 2,\n\t\t\t\"#\": 3,\n\t\t\t\"$\": 4,\n\t\t\t\"%\": 5,\n\t\t\t\"&\": 6,\n\t\t\t\"'\": 7,\n\t\t\t\"(\": 8,\n\t\t\t\")\": 9,\n\t\t\t\"*\": 10,\n\t\t\t\"+\": 11,\n\t\t\t\",\": 12,\n\t\t\t\"-\": 13,\n\t\t\t\".\": 14,\n\t\t\t\"/\": 15,\n\t\t\t\"0\": 16,\n\t\t\t\"1\": 17,\n\t\t\t\"2\": 18,\n\t\t\t\"3\": 19,\n\t\t\t\"4\": 20,\n\t\t\t\"5\": 21,\n\t\t\t\"6\": 22,\n\t\t\t\"7\": 23,\n\t\t\t\"8\": 24,\n\t\t\t\"9\": 25,\n\t\t\t\":\": 26,\n\t\t\t\";\": 27,\n\t\t\t\"<\": 28,\n\t\t\t\"=\": 29,\n\t\t\t\">\": 30,\n\t\t\t\"?\": 31,\n\t\t\t\"@\": 32,\n\t\t\t\"A\": 33,\n\t\t\t\"B\": 34,\n\t\t\t\"C\": 35,\n\t\t\t\"D\": 36,\n\t\t\t\"E\": 37,\n\t\t\t\"F\": 38,\n\t\t\t\"G\": 39,\n\t\t\t\"H\": 40,\n\t\t\t\"I\": 41,\n\t\t\t\"J\": 42,\n\t\t\t\"K\": 43,\n\t\t\t\"L\": 44,\n\t\t\t\"M\": 45,\n\t\t\t\"N\": 46,\n\t\t\t\"O\": 47,\n\t\t\t\"P\": 48,\n\t\t\t\"Q\": 49,\n\t\t\t\"R\": 50,\n\t\t\t\"S\": 51,\n\t\t\t\"T\": 52,\n\t\t\t\"U\": 53,\n\t\t\t\"V\": 54,\n\t\t\t\"W\": 55,\n\t\t\t\"X\": 56,\n\t\t\t\"Y\": 57,\n\t\t\t\"Z\": 58,\n\t\t\t\"[\": 59,\n\t\t\t\"\\\\\": 60,\n\t\t\t\"]\": 61,\n\t\t\t\"^\": 62,\n\t\t\t\"_\": 63,\n\t\t\t\"`\": 64,\n\t\t\t\"a\": 65,\n\t\t\t\"b\": 66,\n\t\t\t\"c\": 67,\n\t\t\t\"d\": 68,\n\t\t\t\"e\": 69,\n\t\t\t\"f\": 70,\n\t\t\t\"g\": 71,\n\t\t\t\"h\": 72,\n\t\t\t\"i\": 73,\n\t\t\t\"j\": 74,\n\t\t\t\"k\": 75,\n\t\t\t\"l\": 76,\n\t\t\t\"m\": 77,\n\t\t\t\"n\": 78,\n\t\t\t\"o\": 79,\n\t\t\t\"p\": 80,\n\t\t\t\"q\": 81,\n\t\t\t\"r\": 82,\n\t\t\t\"s\": 83,\n\t\t\t\"t\": 84,\n\t\t\t\"u\": 85,\n\t\t\t\"v\": 86,\n\t\t\t\"w\": 87,\n\t\t\t\"x\": 88,\n\t\t\t\"y\": 89,\n\t\t\t\"z\": 90,\n\t\t\t\"{\": 91,\n\t\t\t\"|\": 92,\n\t\t\t\"}\": 93,\n\t\t\t\"~\": 94,\n\t\t\t\"€\": 95,\n\t\t\t\"À\": 96,\n\t\t\t\"Á\": 97,\n\t\t\t\"Â\": 98,\n\t\t\t\"Ã\": 99,\n\t\t\t\"Ä\": 100,\n\t\t\t\"Å\": 101,\n\t\t\t\"Æ\": 102,\n\t\t\t\"Ç\": 103,\n\t\t\t\"È\": 104,\n\t\t\t\"É\": 105,\n\t\t\t\"Ê\": 106,\n\t\t\t\"Ë\": 107,\n\t\t\t\"Ì\": 108,\n\t\t\t\"Í\": 109,\n\t\t\t\"Î\": 110,\n\t\t\t\"Ï\": 111,\n\t\t\t\"Ð\": 112,\n\t\t\t\"Ñ\": 113,\n\t\t\t\"Ò\": 114,\n\t\t\t\"Ó\": 115,\n\t\t\t\"Ô\": 116,\n\t\t\t\"Õ\": 117,\n\t\t\t\"Ö\": 118,\n\t\t\t\"×\": 119,\n\t\t\t\"Ø\": 120,\n\t\t\t\"Ù\": 121,\n\t\t\t\"Ú\": 122,\n\t\t\t\"Û\": 123,\n\t\t\t\"Ü\": 124,\n\t\t\t\"Ý\": 125,\n\t\t\t\"Þ\": 126,\n\t\t\t\"ぬ\": 127,\n\t\t\t\"ふ\": 129,\n\t\t\t\"あ\": 130,\n\t\t\t\"う\": 131,\n\t\t\t\"え\": 132,\n\t\t\t\"お\": 133,\n\t\t\t\"や\": 134,\n\t\t\t\"ゆ\": 135,\n\t\t\t\"よ\": 136,\n\t\t\t\"わ\": 137,\n\t\t\t\"を\": 138,\n\t\t\t\"ほ\": 139,\n\t\t\t\"へ\": 140,\n\t\t\t\"た\": 141,\n\t\t\t\"て\": 142,\n\t\t\t\"い\": 143,\n\t\t\t\"す\": 144,\n\t\t\t\"か\": 145,\n\t\t\t\"ん\": 146,\n\t\t\t\"な\": 147,\n\t\t\t\"に\": 148,\n\t\t\t\"ら\": 149,\n\t\t\t\"せ\": 150,\n\t\t\t\"ち\": 151,\n\t\t\t\"と\": 152,\n\t\t\t\"し\": 153,\n\t\t\t\"は\": 154,\n\t\t\t\"き\": 155,\n\t\t\t\"く\": 156,\n\t\t\t\"ま\": 157,\n\t\t\t\"の\": 158,\n\t\t\t\"り\": 159,\n\t\t\t\"れ\": 160,\n\t\t\t\"け\": 161,\n\t\t\t\"む\": 162,\n\t\t\t\"つ\": 163,\n\t\t\t\"さ\": 164,\n\t\t\t\"そ\": 165,\n\t\t\t\"ひ\": 166,\n\t\t\t\"こ\": 167,\n\t\t\t\"み\": 168,\n\t\t\t\"も\": 169,\n\t\t\t\"ね\": 170,\n\t\t\t\"る\": 171,\n\t\t\t\"め\": 172,\n\t\t\t\"ろ\": 173,\n\t\t\t\"。\": 174,\n\t\t\t\"ぶ\": 175,\n\t\t\t\"ぷ\": 176,\n\t\t\t\"ぼ\": 177,\n\t\t\t\"ぽ\": 178,\n\t\t\t\"べ\": 179,\n\t\t\t\"ぺ\": 180,\n\t\t\t\"だ\": 181,\n\t\t\t\"で\": 182,\n\t\t\t\"ず\": 183,\n\t\t\t\"が\": 184,\n\t\t\t\"ぜ\": 185,\n\t\t\t\"ぢ\": 186,\n\t\t\t\"ど\": 187,\n\t\t\t\"じ\": 188,\n\t\t\t\"ば\": 189,\n\t\t\t\"ぱ\": 190,\n\t\t\t\"ぎ\": 191,\n\t\t\t\"ぐ\": 192,\n\t\t\t\"げ\": 193,\n\t\t\t\"づ\": 194,\n\t\t\t\"ざ\": 195,\n\t\t\t\"ぞ\": 196,\n\t\t\t\"び\": 197,\n\t\t\t\"ぴ\": 198,\n\t\t\t\"ご\": 199,\n\t\t\t\"ぁ\": 200,\n\t\t\t\"ぃ\": 201,\n\t\t\t\"ぅ\": 202,\n\t\t\t\"ぇ\": 203,\n\t\t\t\"ぉ\": 204,\n\t\t\t\"ゃ\": 205,\n\t\t\t\"ゅ\": 206,\n\t\t\t\"ょ\": 207,\n\t\t\t\"ヌ\": 208,\n\t\t\t\"フ\": 209,\n\t\t\t\"ア\": 210,\n\t\t\t\"ウ\": 211,\n\t\t\t\"エ\": 212,\n\t\t\t\"オ\": 213,\n\t\t\t\"ヤ\": 214,\n\t\t\t\"ユ\": 215,\n\t\t\t\"ヨ\": 216,\n\t\t\t\"ワ\": 217,\n\t\t\t\"ヲ\": 218,\n\t\t\t\"ホ\": 219,\n\t\t\t\"ヘ\": 220,\n\t\t\t\"タ\": 221,\n\t\t\t\"テ\": 222,\n\t\t\t\"イ\": 223,\n\t\t\t\"ス\": 224,\n\t\t\t\"カ\": 225,\n\t\t\t\"ン\": 226,\n\t\t\t\"ナ\": 227,\n\t\t\t\"ニ\": 228,\n\t\t\t\"ラ\": 229,\n\t\t\t\"セ\": 230,\n\t\t\t\"チ\": 231,\n\t\t\t\"ト\": 232,\n\t\t\t\"シ\": 233,\n\t\t\t\"ハ\": 234,\n\t\t\t\"キ\": 235,\n\t\t\t\"ク\": 236,\n\t\t\t\"マ\": 237,\n\t\t\t\"ノ\": 238,\n\t\t\t\"リ\": 239,\n\t\t\t\"レ\": 240,\n\t\t\t\"ケ\": 241,\n\t\t\t\"ム\": 242,\n\t\t\t\"ツ\": 243,\n\t\t\t\"サ\": 244,\n\t\t\t\"ソ\": 245,\n\t\t\t\"ヒ\": 246,\n\t\t\t\"コ\": 247,\n\t\t\t\"ミ\": 248,\n\t\t\t\"モ\": 249,\n\t\t\t\"ネ\": 250,\n\t\t\t\"ル\": 251,\n\t\t\t\"メ\": 252,\n\t\t\t\"ロ\": 253,\n\t\t\t\"〝\": 254,\n\t\t\t\"°\": 255\n\t\t}\n\n\t\tself.emote_keys = dict()\n\t\ti = 0\n\t\ttmp = \"\"\n\t\tfor key, value in self.keys.items():\n\t\t\tif value >= 96:\n\t\t\t\tif value % 2 == 0:\n\t\t\t\t\ttmp = str(key)\n\t\t\t\telse:\n\t\t\t\t\ttmp = tmp + str(key)\n\t\t\t\t\tself.emote_keys[i] = tmp\n\t\t\t\t\ttmp = \"\"\n\t\t\t\t\ti = i + 1\n\t\t\t\t\n\t\t# Character to use in place of unknown characters\n\t\tself.invalid_char_value: int = self.keys.get(self.invalid_char, 0)\n\n\t\t# --------------\n\t\t# OSC Setup\n\t\t# --------------\n\n\t\t# Setup OSC Chatbox\n\t\tself.osc_chatbox_timer = RepeatedTimer(self.osc_chatbox_delay, self.osc_chatbox_loop)\n\n\t\t# Setup OSC Client\n\t\tself.osc_client = udp_client.SimpleUDPClient(self.osc_ip, self.osc_port)\n\t\tself.osc_timer = RepeatedTimer(self.osc_delay, self.osc_timer_loop)\n\n\t\tif config_osc.use_kat:\n\t\t\tself.osc_client.send_message(self.osc_parameter_prefix + self.param_pointer, 255) # Clear KAT text\n\t\t\tfor value in range(self.sync_params):\n\t\t\t\tself.osc_client.send_message(self.osc_parameter_prefix + self.param_sync + str(value), 0.0) # Reset KAT characters sync\n\n\t\t# Setup OSC Server\n\t\tself.oscqs: OSCQueryService = None\n\t\tself.osc_server: osc_server.ThreadingOSCUDPServer = None\n\t\tself.osc_server_test_step: int = 0\n\t\tself.osc_dispatcher: dispatcher.Dispatcher = None\n\n\t\tif self.osc_enable_server:\n\t\t\tlog.info(\"OSC Server enabled, autodetecting sync parameters\")\n\t\t\tself.osc_start_server()\n\t\telse:\n\t\t\tlog.info(\"OSC Server disabled.\")\n\n\t\t# Start timer loop\n\t\tself.osc_chatbox_timer.start()\n\t\tself.osc_timer.start()\n\n\t# Starts the OSC Server\n\tdef osc_start_server(self):\n\t\tif self.osc_server == None:\n\t\t\ttry:\n\t\t\t\tself.osc_server_test_step = 1\n\n\t\t\t\tif self.osc_server_port != 9001:\n\t\t\t\t\tlog.info(\"OSC Server port is not default, testing port availability and advertising OSCQuery endpoints\")\n\t\t\t\t\tif self.osc_server_port <= 0 or not check_if_udp_port_open(self.osc_server_port):\n\t\t\t\t\t\tself.osc_server_port = get_open_udp_port()\n\t\t\t\t\tif self.http_port <= 0 or not check_if_tcp_port_open(self.http_port):\n\t\t\t\t\t\tself.http_port = self.osc_server_port if check_if_tcp_port_open(self.osc_server_port) else get_open_tcp_port()\n\t\t\t\telse:\n\t\t\t\t\tlog.info(\"OSC Server port is default.\")\n\n\t\t\t\tself.osc_dispatcher = dispatcher.Dispatcher()\n\t\t\t\tself.osc_dispatcher.map(self.osc_parameter_prefix + self.param_sync + \"*\", self.osc_server_handler_char)\n\t\t\t\tself.osc_dispatcher.map(self.osc_avatar_change_path + \"*\", self.osc_server_handler_avatar)\n\t\t\t\tself.osc_dispatcher.map(self.osc_use_kat_path, self.osc_server_handler_kat)\n\t\t\t\tself.osc_dispatcher.map(self.osc_use_textbox_path, self.osc_server_handler_textbox)\n\t\t\t\tself.osc_dispatcher.map(self.osc_use_both_path, self.osc_server_handler_both)\n\t\t\t\tself.osc_dispatcher.map(self.osc_stt_mode_path, self.osc_server_handler_stt_mode)\n\n\t\t\t\tself.osc_server = osc_server.ThreadingOSCUDPServer((self.osc_server_ip, self.osc_server_port), self.osc_dispatcher, asyncio.get_event_loop())\n\t\t\t\tthreading.Thread(target = self.osc_server_serve, daemon = True).start()\n\n\t\t\t\tif self.osc_server_port != 9001:\n\t\t\t\t\tself.oscqs = OSCQueryService(\"TextboxSTT\", self.http_port, self.osc_server_port)\n\t\t\t\t\tfor i in range(self.sync_params_max):\n\t\t\t\t\t\tself.oscqs.advertise_endpoint(self.osc_parameter_prefix + self.param_sync + str(i), access=\"readwrite\")\n\t\t\t\t\tself.oscqs.advertise_endpoint(self.osc_avatar_change_path, access=\"readwrite\")\n\t\t\t\t\tself.oscqs.advertise_endpoint(self.osc_use_kat_path, access=\"readwrite\")\n\t\t\t\t\tself.oscqs.advertise_endpoint(self.osc_use_textbox_path, access=\"readwrite\")\n\t\t\t\t\tself.oscqs.advertise_endpoint(self.osc_use_both_path, access=\"readwrite\")\n\t\t\t\t\tself.oscqs.advertise_endpoint(self.osc_stt_mode_path, access=\"readwrite\")\n\t\t\texcept:\n\t\t\t\tself.osc_enable_server = False\n\t\t\t\tself.osc_server_test_step = 0\n\n\t# Stops the OSC Server\n\tdef osc_stop_server(self):\n\t\tif self.osc_server:\n\t\t\tself.osc_server.shutdown()\n\t\t\tself.osc_server = False\n\t\t\tself.osc_enable_server = False\n\t\tif self.oscqs:\n\t\t\tself.oscqs.stop()\n\n\t# Set the text to any value\n\tdef set_textbox_text(self, text: str, cutoff: bool = False, instant: bool = False):\n\t\tif cutoff:\n\t\t\tself.textbox_target_text = text[-self.textbox_charlimit:]\n\t\telse:\n\t\t\tself.textbox_target_text = text[:self.textbox_charlimit]\n\n\t\tif instant:\n\t\t\tself.osc_chatbox_loop()\n\n\n\t# Set the text to any value\n\tdef set_kat_text(self, text: str, cutoff: bool = False):\n\t\tif cutoff:\n\t\t\tself.kat_target_text = text[-self.kat_charlimit:]\n\t\telse:\n\t\t\tself.kat_target_text = text[:self.kat_charlimit]\n\n\n\t# Sets the sync parameter count\n\tdef set_sync_params(self, sync_params: int):\n\t\tif sync_params == 0:\n\t\t\t# Automatic sync parameters\n\t\t\tself.osc_start_server()\n\t\telse:\n\t\t\t# Manual sync parameter setting\n\t\t\tself.sync_params = sync_params\n\t\t\tself.sync_params_last = self.sync_params\n\t\t\tself.pointer_count = int(self.kat_charlimit / self.sync_params)\n\n\t\t\tself.osc_server_test_step = -1 # Reset parameters and clear text\n\t\t\tself.osc_stop_server()\n\n\n\t# Chatbox loop\n\tdef osc_chatbox_loop(self):\n\t\t_text = self.textbox_target_text.replace(\"\\n\", \" \")\n\n\t\tif self.last_chatbox_text == _text:\n\t\t\treturn\n\t\t\n\t\tself.last_chatbox_text = _text\n\t\tself.osc_client.send_message(self.osc_chatbox_path, [_text, True, True if self.textbox_target_text == \"\" else False])\n\n\tdef set_kat_typing_indicator(self, state: bool):\n\t\tself.osc_client.send_message(self.osc_parameter_listening, state)\n\n\tdef set_textbox_typing_indicator(self, state: bool):\n\t\tself.osc_client.send_message(self.osc_chatbox_typing_path, state)\n\n\t# Syncronisation loop\n\tdef osc_timer_loop(self):\n\t\tgui_text = self.kat_target_text\n\n\t\t# Test parameter count if an update is requried\n\t\tif type(self.osc_server) == osc_server.ThreadingOSCUDPServer:\n\t\t\tif self.osc_server_test_step > 0:\n\t\t\t\t# Keep text cleared during test\n\t\t\t\tself.osc_client.send_message(self.osc_parameter_prefix + self.param_pointer, self.pointer_clear)\n\t\t\t\tself.osc_client.send_message(self.osc_use_kat_path, self.config.osc.use_kat)\n\t\t\t\tself.osc_client.send_message(self.osc_use_textbox_path, self.config.osc.use_textbox)\n\t\t\t\tself.osc_client.send_message(self.osc_use_both_path, self.config.osc.use_both)\n\t\t\t\tself.osc_client.send_message(self.osc_stt_mode_path, self.config.mode)\n\n\t\t\t\tif self.osc_server_test_step == 1:\n\t\t\t\t\t# Reset sync parameters count\n\t\t\t\t\tself.sync_params = 0\n\n\t\t\t\t\t# Reset character text values\n\t\t\t\t\tfor char_index in range(self.sync_params_max):\n\t\t\t\t\t\tself.osc_client.send_message(self.osc_parameter_prefix + self.param_sync + str(char_index), 0.0)\n\t\t\t\t\tself.osc_server_test_step = 2\n\t\t\t\t\treturn\n\n\t\t\t\telif self.osc_server_test_step == 2:\n\t\t\t\t\t# Set characters to test value\n\t\t\t\t\tfor char_index in range(self.sync_params_max):\n\t\t\t\t\t\tself.osc_client.send_message(self.osc_parameter_prefix + self.param_sync + str(char_index), self.sync_params_test_char_value / 127.0)\n\t\t\t\t\tself.osc_server_test_step = 3\n\t\t\t\t\treturn\n\n\t\t\t\telif self.osc_server_test_step == 3:\n\t\t\t\t\t# Set characters back to 0\n\t\t\t\t\tfor char_index in range(self.sync_params_max):\n\t\t\t\t\t\tself.osc_client.send_message(self.osc_parameter_prefix + self.param_sync + str(char_index), 0.0)\n\t\t\t\t\tself.osc_server_test_step = 4\n\t\t\t\t\treturn\n\n\t\t\t\telif self.osc_server_test_step == 4:\n\t\t\t\t\t# Finish the parameter sync test\n\t\t\t\t\tif self.sync_params == 0:\n\t\t\t\t\t\tself.sync_params = self.sync_params_last # Test failed, reuse last detected param count\n\t\t\t\t\t\tself.isactive = False\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.sync_params_last = self.sync_params\n\t\t\t\t\t\tself.isactive = True\n\t\t\t\t\tself.osc_server_test_step = 0\n\t\t\t\t\tself.pointer_count = int(self.kat_charlimit / self.sync_params)\n\t\t\t\t\tself.osc_text = \" \".ljust(self.kat_charlimit) # Resync letters\n\n\t\t# Do not process anything if sync parameters are not setup\n\t\tif self.sync_params == 0:\n\t\t\treturn\n\n\t\t# Sends clear text message if all text is empty\n\t\tif gui_text.strip(\"\\n\").strip(\" \") == \"\" or self.osc_server_test_step == -1:\n\n\t\t\t# Reset parameters\n\t\t\tif self.osc_server_test_step == -1:\n\t\t\t\tfor char_index in range(self.sync_params_max):\n\t\t\t\t\tself.osc_client.send_message(self.osc_parameter_prefix + self.param_sync + str(char_index), 0.0)\n\t\t\t\t\tself.osc_server_test_step = 0\n\n\t\t\t# Clear text\n\t\t\tself.osc_client.send_message(self.osc_parameter_prefix + self.param_pointer, self.pointer_clear)\n\t\t\tself.osc_text = \" \".ljust(self.kat_charlimit)\n\t\t\treturn\n\n\t\t# Make sure KAT is visible even after avatar change\n\t\tself.osc_client.send_message(self.osc_parameter_prefix + self.param_visible, True)\n\n\t\t# Pad line feeds with spaces for OSC\n\t\ttext_lines = gui_text.split(\"\\n\")\n\t\tfor index, text in enumerate(text_lines):\n\t\t\ttext_lines[index] = self._pad_line(text)\n\t\tgui_text = self._list_to_string(text_lines)\n\n\t\t# Pad text with spaces up to the text limit\n\t\tgui_text = gui_text.ljust(self.kat_charlimit)\n\t\tosc_text = self.osc_text.ljust(self.kat_charlimit)\n\n\t\t# Text syncing\n\t\tosc_chars = list(osc_text)\n\t\tif gui_text != self.osc_text: # GUI text is different, needs sync\n\n\t\t\tfor pointer_index in range(self.pointer_count):\n\t\t\t\t# Check if characters within this pointer are different\n\t\t\t\tequal = True\n\t\t\t\tfor char_index in range(self.sync_params):\n\t\t\t\t\tindex = (pointer_index * self.sync_params) + char_index\n\n\t\t\t\t\tif gui_text[index] != osc_text[index]:\n\t\t\t\t\t\tequal = False\n\t\t\t\t\t\tbreak\n\n\t\t\t\tif equal == False: # Characters not equal, need to sync this pointer position\n\t\t\t\t\tself.osc_update_pointer(pointer_index, gui_text, osc_chars)\n\t\t\t\t\treturn\n\n\t\t# No updates required, use time to resync text\n\t\tself.pointer_index_resync = self.pointer_index_resync + 1\n\t\tif self.pointer_index_resync >= self.pointer_count:\n\t\t\tself.pointer_index_resync = 0\n\n\t\tpointer_index = self.pointer_index_resync\n\t\tself.osc_update_pointer(pointer_index, gui_text, osc_chars)\n\n\t# Starts the OSC server serve\n\tdef osc_server_serve(self):\n\t\tself.osc_server.serve_forever(2)\n\n\n\t# Handle OSC server to detect the correct sync parameters to use\n\tdef osc_server_handler_char(self, address: tuple[str, int], value: str, *args: list[dispatcher.Any]):\n\t\tif self.osc_server_test_step > 0:\n\t\t\tlength = len(self.osc_parameter_prefix + self.param_sync)\n\t\t\tself.sync_params = max(self.sync_params, int(address[length:]) + 1)\n\n\n\t# Handle OSC server to retest sync on avatar change\n\tdef osc_server_handler_avatar(self, address: tuple[str, int], value: str, *args: list[dispatcher.Any]):\n\t\tlog.info(\"Avatar change detected, retesting sync parameters\")\n\t\tself.osc_server_test_step = 1\n\t\tself.isactive = False\n\t\n\tdef osc_server_handler_kat(self, address: tuple[str, int], value: str, *args: list[dispatcher.Any]):\n\t\tif self.osc_server_test_step == 0:\n\t\t\tself.config.osc.use_kat = bool(value)\n\n\tdef osc_server_handler_textbox(self, address: tuple[str, int], value: str, *args: list[dispatcher.Any]):\n\t\tif self.osc_server_test_step == 0:\n\t\t\tself.config.osc.use_textbox = bool(value)\n\t\n\tdef osc_server_handler_both(self, address: tuple[str, int], value: str, *args: list[dispatcher.Any]):\n\t\tif self.osc_server_test_step == 0:\n\t\t\tself.config.osc.use_both = bool(value)\n\n\tdef osc_server_handler_stt_mode(self, address: tuple[str, int], value: str, *args: list[dispatcher.Any]):\n\t\tif self.osc_server_test_step == 0:\n\t\t\tself.config.mode = int(value)\n\t\t\tif self.config.mode != 0:\n\t\t\t\tif self.config.listener.pause_threshold < 3.0:\n\t\t\t\t\tself.config.listener.pause_threshold = 3.0\n\t\t\t\tif self.config.listener.timeout_time < 5.0:\n\t\t\t\t\tself.config.listener.timeout_time = 5.0\n\n\t# Updates the characters within a pointer\n\tdef osc_update_pointer(self, pointer_index: int, gui_text: str, osc_chars: list[int]):\n\t\tif not self.config_osc.use_kat:\n\t\t\treturn\n\t\tself.osc_client.send_message(self.osc_parameter_prefix + self.param_pointer, pointer_index + 1) # Set pointer position\n\n\t\t# Loop through characters within this pointer and set them\n\t\tfor char_index in range(self.sync_params):\n\t\t\tindex = (pointer_index * self.sync_params) + char_index\n\t\t\tgui_char = gui_text[index]\n\n\t\t\t# Convert character to the key value, replace invalid characters\n\t\t\tkey = self.keys.get(gui_char, self.invalid_char_value)\n\n\t\t\t# Calculate character float value for OSC\n\t\t\tvalue = float(key)\n\t\t\tif value > 127.5:\n\t\t\t\tvalue = value - 256.0\n\t\t\tvalue = value / 127.0\n\n\t\t\tself.osc_client.send_message(self.osc_parameter_prefix + self.param_sync + str(char_index), value)\n\t\t\tosc_chars[index] = gui_char # Apply changes to the networked value\n\n\t\tself.osc_text = self._list_to_string(osc_chars)\n\n\n\t# Combines an array of strings into a single string\n\tdef _list_to_string(self, string: str):\n\t\treturn \"\".join(string)\n\n\n\t# Pads the text line to its effective length\n\tdef _pad_line(self, text: str):\n\t\treturn text.ljust(self._get_padded_length(text))\n\n\n\t# Gets the effective padded length of a line\n\tdef _get_padded_length(self, text: str):\n\t\tlines = max(math.ceil(len(text) / self.line_length), 1)\n\t\treturn self.line_length * lines\n\n\n\t# Stop the timer and hide the text overlay\n\tdef stop(self):\n\t\ttry:\n\t\t\tself.osc_timer.stop()\n\t\texcept Exception as e:\n\t\t\tlog.error(traceback.format_exc())\n\t\ttry:\n\t\t\tself.osc_chatbox_timer.stop()\n\t\texcept Exception as e:\n\t\t\tlog.error(traceback.format_exc())\n\t\ttry:\n\t\t\tself.osc_stop_server()\n\t\texcept Exception as e:\n\t\t\tlog.error(traceback.format_exc())\n\t\tself.hide()\n\t\tself.clear_kat()\n\t\tself.clear_chatbox()\n\t\t\n\n\n\t# Restart the timer for syncing texts and show the overlay\n\tdef start(self):\n\t\ttry:\n\t\t\tself.osc_timer.start()\n\t\texcept Exception as e:\n\t\t\tlog.error(traceback.format_exc())\n\t\ttry:\n\t\t\tself.osc_chatbox_timer.start()\n\t\texcept Exception as e:\n\t\t\tlog.error(traceback.format_exc())\n\t\ttry:\n\t\t\tself.osc_start_server()\n\t\texcept Exception as e:\n\t\t\tlog.error(traceback.format_exc())\n\t\tself.osc_timer.start()\n\t\tself.osc_chatbox_timer.start()\n\t\tself.hide()\n\t\tself.clear_kat()\n\t\tself.clear_chatbox()\n\n\n\t# show overlay\n\tdef show(self):\n\t\tself.osc_client.send_message(self.osc_parameter_prefix + self.param_visible, True) # Hide KAT\n\n\n\t# hide overlay\n\tdef hide(self):\n\t\tself.osc_client.send_message(self.osc_parameter_prefix + self.param_visible, False) # Hide KAT\n\n\n\t# clear text\n\tdef clear_kat(self):\n\t\tself.osc_text = \"\"\n\t\tself.kat_target_text = \"\"\n\t\tself.osc_client.send_message(self.osc_parameter_prefix + self.param_pointer, 255) # Clear KAT text\n\t\tself.hide()\n\n\tdef clear_chatbox(self, instant: bool = False):\n\t\tself.textbox_target_text = \"\"\n\n\t\tif instant:\n\t\t\tself.osc_chatbox_loop()\n\n\nclass RepeatedTimer(object):\n\tdef __init__(self, interval: float, function, *args, **kwargs):\n\t\tself._timer: Timer = None\n\t\tself.interval = interval\n\t\tself.function = function\n\t\tself.args = args\n\t\tself.kwargs = kwargs\n\t\tself.is_running: bool = False\n\t\tself.start()\n\n\tdef _run(self):\n\t\tself.is_running = False\n\t\tself.start()\n\t\tself.function(*self.args, **self.kwargs)\n\n\tdef start(self):\n\t\tif not self.is_running:\n\t\t\tself._timer = Timer(self.interval, self._run)\n\t\t\tself._timer.start()\n\t\t\tself.is_running = True\n\n\tdef stop(self):\n\t\tself._timer.cancel()\n\t\tself.is_running = False\n","repo_name":"I5UCC/VRCTextboxSTT","sub_path":"src/osc.py","file_name":"osc.py","file_ext":"py","file_size_in_byte":21328,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"81"} +{"seq_id":"41005197495","text":"\"\"\"\nFaça um Programa para uma loja de tintas. O programa deverá pedir o tamanho em metros \nquadrados da área a ser pintada. Considere que a cobertura da tinta é de 1 litro para cada \n6 metros quadrados e que a tinta é vendida em latas de 18 litros, que custam R$ 80,00 ou em \ngalões de 3,6 litros, que custam R$ 25,00.\nInforme ao usuário as quantidades de tinta a serem compradas e os respectivos preços em 3 situações:\ncomprar apenas latas de 18 litros;\ncomprar apenas galões de 3,6 litros;\nmisturar latas e galões, de forma que o desperdício de tinta seja menor. \nAcrescente 10% de folga e sempre arredonde os valores para cima, isto é, considere latas cheias.\n\"\"\"\n\nimport math\n\nmetros = float(input('Insira o valor em m² da área a ser pintada: '))\n\nmetros_com_dez = (metros * 1.0)\n\nlatas = math.ceil(metros / 108)\ngaloes = math.ceil(metros / 21.6)\n\nvalor_latas = (latas * 80)\nvalor_galoes = (galoes * 25)\n\nlatas_2 = math.floor(metros_com_dez / 108)\ngaloes_2 = math.ceil((metros_com_dez % 108) / (21.6))\n\nvalor_latas_galoes = (latas_2 * 80) + (galoes_2 * 25)\n\nprint('Caso você compre somente latas de 18 litros, você terá que pegar',\n latas, 'latas e pagará um total de', valor_latas, 'reais.')\nprint('Caso você compre somente galões de 3,6 litros, você terá que pegar',\n galoes, 'galões e pagará um total de', valor_galoes, 'reais.')\nprint('Caso você queira comprar latas e galões, você terá que pegar', latas_2,\n 'latas e', galoes_2, 'galoes e pagará um total de', valor_latas_galoes, 'reais.')","repo_name":"iscodand/python-programming-logic","sub_path":"Tasks - LP/Estrutura Sequencial/ex017.py","file_name":"ex017.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"73380567946","text":"#!/usr/bin/env python3\n#\n# Description:\n#\n# Find the maximium profit to be had given an array of a ticker's daily prices\n# if you can only buy and sell the stock once.\n\ndef find_max_profit(prices):\n lowest, profitist = float('inf'), 0.0\n for p in prices:\n if (p - lowest) > profitist: profitist = p - lowest\n if p < lowest: lowest = p\n return profitist\n","repo_name":"ijnji/sandbox","sub_path":"python/epi/buy_and_sell_stock.py","file_name":"buy_and_sell_stock.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8303841277","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nfrom __future__ import unicode_literals, print_function, absolute_import\n\nimport logging\nfrom collections import OrderedDict\n\nfrom django.db import models\nfrom django.db.models.deletion import CASCADE, DO_NOTHING\nfrom django.conf import global_settings\nfrom django.utils.translation import get_language\n\nfrom compositefk.fields import (\n CompositeForeignKey,\n RawFieldValue,\n LocalFieldValue,\n CompositeOneToOneField,\n FunctionBasedFieldValue,\n)\n\nlogger = logging.getLogger(__name__)\n__author__ = 'darius.bernard'\n\n\nclass Address(models.Model):\n company = models.IntegerField()\n tiers_id = models.IntegerField()\n type_tiers = models.CharField(max_length=1, choices=[(\"C\", \"Customer\"), (\"S\", \"supplier\")])\n city = models.CharField(max_length=255)\n postcode = models.CharField(max_length=32)\n\n class Meta(object):\n unique_together = [\n (\"company\", \"tiers_id\", \"type_tiers\"),\n ]\n\n\nclass Representant(models.Model):\n company = models.IntegerField()\n cod_rep = models.CharField(max_length=2)\n\n\ndef get_local_type_tiers():\n return 'C'\n\n\nclass Customer(models.Model):\n company = models.IntegerField()\n customer_id = models.IntegerField()\n name = models.CharField(max_length=255)\n cod_rep = models.CharField(max_length=2, null=True)\n\n # for problem in trim_join, we must try to give the fields in a consistent order with others models...\n # see #26515 at https://code.djangoproject.com/ticket/26515\n # so we always give company first and tiers_id after\n address = CompositeForeignKey(Address, on_delete=CASCADE, null=True, to_fields=OrderedDict([\n (\"company\", LocalFieldValue(\"company\")),\n (\"tiers_id\", \"customer_id\"),\n (\"type_tiers\", RawFieldValue(\"C\"))\n ]), null_if_equal=[ # if either of the fields company or customer is -1, ther can't have address\n (\"company\", -1),\n (\"customer_id\", -1)\n ])\n\n local_address = CompositeForeignKey(\n Address,\n on_delete=CASCADE,\n null=True,\n to_fields=OrderedDict([\n (\"company\", LocalFieldValue(\"company\")),\n (\"tiers_id\", \"customer_id\"),\n (\"type_tiers\", FunctionBasedFieldValue(get_local_type_tiers))\n ]),\n null_if_equal=[ # if either of the fields company or customer is -1, ther can't have address\n (\"company\", -1),\n (\"customer_id\", -1)],\n related_name='customer_local',\n )\n\n representant = CompositeForeignKey(Representant, on_delete=CASCADE, null=True, to_fields=[\n \"company\",\n \"cod_rep\",\n ], nullable_fields={\"cod_rep\": \"\"},\n null_if_equal=[ # if either of the fields company or customer is -1, ther can't have address\n (\"cod_rep\", \"\"),\n ]\n )\n\n class Meta(object):\n unique_together = [\n (\"company\", \"customer_id\"),\n ]\n\n\nclass Supplier(models.Model):\n company = models.IntegerField()\n supplier_id = models.IntegerField()\n name = models.CharField(max_length=255)\n address = CompositeForeignKey(Address, on_delete=CASCADE, to_fields=OrderedDict([\n (\"company\", LocalFieldValue(\"company\")),\n (\"tiers_id\", \"supplier_id\"),\n (\"type_tiers\", RawFieldValue(\"S\"))\n ]))\n\n class Meta(object):\n unique_together = [\n (\"company\", \"supplier_id\"),\n ]\n\n\nclass Contact(models.Model):\n company_code = models.IntegerField()\n customer_code = models.IntegerField()\n surname = models.CharField(max_length=255)\n # virtual field\n customer = CompositeForeignKey(Customer, on_delete=CASCADE, related_name='contacts', to_fields=OrderedDict([\n (\"company\", \"company_code\"),\n (\"customer_id\", \"customer_code\"),\n ]))\n\n\nclass PhoneNumber(models.Model):\n num = models.CharField(max_length=32)\n type_number = models.IntegerField()\n contact = models.ForeignKey(Contact, on_delete=CASCADE, related_name='phonenumbers')\n\n\nclass Extra(models.Model):\n \"\"\"\n some wrongly analysed table that add extra column to existing one (not a django way of life)\n\n \"\"\"\n company = models.IntegerField()\n customer_id = models.IntegerField()\n sales_revenue = models.FloatField()\n customer = CompositeOneToOneField(\n Customer,\n on_delete=CASCADE,\n related_name='extra',\n to_fields=[\"company\", \"customer_id\"])\n\n\nclass AModel(models.Model):\n n = models.CharField(max_length=32)\n\n\nclass BModel(models.Model):\n a = models.ForeignKey(AModel, null=True, on_delete=CASCADE)\n\n\nclass MultiLangSupplier(models.Model):\n company = models.IntegerField()\n supplier_id = models.IntegerField()\n\n\nclass SupplierTranslations(models.Model):\n master = models.ForeignKey(\n MultiLangSupplier,\n on_delete=CASCADE,\n related_name='translations',\n null=True,\n )\n language_code = models.CharField(\n max_length=255,\n choices=global_settings.LANGUAGES,\n )\n name = models.CharField(max_length=255)\n title = models.CharField(max_length=255)\n\n class Meta:\n unique_together = ('language_code', 'master')\n\n\nactive_translations = CompositeForeignKey(\n SupplierTranslations,\n on_delete=DO_NOTHING,\n to_fields={\n 'master_id': 'id',\n 'language_code': FunctionBasedFieldValue(get_language)\n })\n\n\nactive_translations.contribute_to_class(MultiLangSupplier, 'active_translations')\n","repo_name":"onysos/django-composite-foreignkey","sub_path":"testapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5440,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"81"} +{"seq_id":"8620569861","text":"import numpy as np\nimport math\nimport random\nimport mutation_mod as mp\nfrom crossover_mod import cross_over\nfrom crossover_mod import is_path\n'''class Path_col:\n def __init__(self,L):\n self.list=L\n def fspread(self,a):\n return ((a**2)/(a-1))\n def stupidity(self,l):\n non_unique=list(set([i for i in l if l.count(i)!=1]))\n no_nonun=len(non_unique)\n spread_reps=list(map(self.fspread,list(map(l.count,non_unique))))\n return sum(spread_reps)+no_nonun\n def tot_mass(self):\n stu_inds=list(map(self.stupidity, self.list))\n return sum(stu_inds)\n def flex(self,l):\n flexi=self.stupidity(l)/self.tot_mass()\n return flexi\n def good_enough(self,part):\n prog=[]\n for i in self.list:\n if self.flex(i)<=part:\n prog.append(i)\n return prog'''\nclass Fit:\n def __init__(self,l):\n self.path=l\n def dist(self,mat,pos1,pos2):\n x1,y1=np.where(mat==pos1)\n x2,y2=np.where(mat==pos2)\n delx= x2[0]-x1[0]\n dely= y2[0]-y1[0]\n dis = (delx**2 + dely**2)**0.5\n return dis\n def span(self,mat):\n pos=self.path[0]\n test=[i for i in set(self.path) if i!=pos]\n x= lambda posi : self.dist(mat,pos,posi)\n res=list(map(x,test))\n return sum(res)\n def leng(self,mat):\n temp=[]\n for i in range(len(self.path)-1):\n pos1=self.path[i]\n pos2=self.path[i+1]\n temp.append(self.dist(mat, pos1, pos2))\n return sum(temp)\n def fspread(self,a):\n return ((a**2)/(a-1))\n def stupidity(self):\n non_unique=list(set([i for i in self.path if self.path.count(i)!=1]))\n no_nonun=len(non_unique)\n if no_nonun==0:\n return 0\n spread_reps=list(map(self.fspread,list(map(self.path.count,non_unique))))\n return (sum(spread_reps))/(no_nonun)\n def completeness(self,mat):\n tot= list(range(1,np.size(mat)+1))\n scr=0\n for i in tot:\n if i in self.path:\n scr+=1\n if scr==np.size(mat):\n return scr+int(scr/3)\n else:\n return scr\n def fit_cof(self,mat):\n fun=lambda x : x/(x+1)\n fit_coeff=((fun(self.span(mat)/self.leng(mat))-fun(self.stupidity())))\n return fit_coeff+(np.size(mat)*is_path(self.path, mat))+self.completeness(mat)\nclass Batch:\n def __init__(self,l):\n self.list=l\n def fit_enough(self,mat):\n lst=[i for i in self.list if len(i)>=np.size(mat)]\n tops=[]\n maxim=Fit(lst[0]).fit_cof(mat)\n maxl=Fit(lst[0])\n for i in lst:\n i=Fit(i)\n if i.fit_cof(mat)>maxim:\n maxim=i.fit_cof(mat)\n maxl=i\n return(maxl.path)\n def parents(self,mat,c_prob):\n com=self.fit_enough(mat)\n fun= lambda l: (com,l)\n lis=[i for i in random.sample(self.list, math.floor(c_prob*len(self.list))) if i!=com]\n parted_l=[i for i in self.list if i not in lis]\n par=list(map(fun,lis))\n return parted_l,par\n def generation(self,mat,net,c_prob,m_prob):\n net=Batch(net)\n rem,pars=net.parents(mat, c_prob)\n dauts1=list(map(cross_over,pars))\n dauts2=[]\n if len(rem)%2 ==0:\n for i in range(int(len(rem)/2)):\n dauts2.append((rem[i],rem[i+int(len(rem)/2)]))\n else:\n for i in range(int((len(rem)-1)/2)):\n dauts2.append((rem[i],rem[i+int((len(rem)-1)/2)]))\n dauts2.append((rem[len(rem)-1],net.fit_enough(mat)))\n dauts=dauts1+dauts2\n to_mut=math.floor(len(dauts)*m_prob)\n rand_range=random.sample(list(range(len(dauts))), to_mut)\n for i in rand_range:\n x,y=dauts[i]\n dauts[i]=(mp.mutate(x, mat),mp.mutate(y, mat))\n return dauts\n\n \n ","repo_name":"darkwebber/AI-group-project","sub_path":"selection_mod.py","file_name":"selection_mod.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12303585969","text":"from .. import graph, using\nfrom ..util import shorten_strings\nfrom flask_babel import gettext as _l\nfrom collections import Counter\nimport re\nimport pandas as pd\n\n\n@graph(_l('Most common words your friends use (above 5 letters)'))\n@using('messages', 'username')\ndef words_count_in_message(data):\n all_messages = data['messages']\n messages = all_messages[(all_messages.thread_type == 'Regular') & (all_messages.sender != data['username'])].dropna()\n\n users_list = []\n words_list = []\n\n for conv, msgs in messages.groupby(messages.conversation):\n\n counter = Counter()\n\n if msgs.content is not None and len(msgs.content) >= 1:\n for msg in msgs.content:\n words = [word.lower() for word in re.split(r'[\\s,.]', re.sub(r'[^\\w ]+', '', msg)) if 5 < len(word) < 20]\n counter.update(words)\n\n if len(counter) == 0:\n continue\n\n common_words = counter.most_common(3)\n text = ''\n for word, count in common_words:\n text += '%s(%d), ' % (word, count)\n text = text[0:-2]\n\n users_list.append(conv)\n words_list.append(text)\n\n table = pd.DataFrame({'User': users_list, 'Most common words': words_list})\n table['User'] = shorten_strings(table['User'])\n\n return table.head(10), table\n","repo_name":"klima7/Social-Insight","sub_path":"analytics/messages/most_common_words_friends.py","file_name":"most_common_words_friends.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69807923785","text":"from logging import RootLogger\nimport tkinter as tk\n\nfrom tkinter import *\nfrom tkinter import messagebox\n\n# Set the directory path\ndirectory_path = \"/Users/logan/Documents/Weather For Sailing\"\nlogged_in_username = \"\"\n\n\n# Function to save the selected choices\ndef save_choices():\n # Get the selected choices from the choice variables\n selected_choices = []\n for choice_var in choice_vars:\n if choice_var.get() != 0:\n selected_choices.append(choices[choice_vars.index(choice_var)])\n\n # Check if exactly three choices are selected\n if len(selected_choices) == 3:\n # Open the choices.txt file in write mode\n logged_in_username = \"\"\n\n user_file = open(\"last_user.txt\", \"r+\")\n lines = user_file.readlines()\n\n for username in lines:\n if username.strip():\n logged_in_username = username.strip()\n break\n\n if logged_in_username == \"\":\n messagebox.showinfo(\"Error\", \"User log-in invalid.\")\n return\n\n user_file.close()\n\n user_found = BooleanVar()\n user_found.set(False)\n\n user_data = open(\"db/choices.txt\", \"r+\")\n lines = user_data.readlines()\n\n for index, line in enumerate(lines):\n if logged_in_username.upper().strip() in line:\n user_found.set(True)\n new_line = \"{},{},{},{}\\n\".format(logged_in_username.strip(), selected_choices[0], selected_choices[1], selected_choices[2])\n\n lines[index] = new_line\n user_data.seek(0)\n\n user_data.writelines(lines)\n user_data.truncate()\n break\n\n if not user_found.get():\n user_data.write(\"{}={}={}={}\\n\".format(logged_in_username.strip(), selected_choices[0], selected_choices[1], selected_choices[2]))\n user_data.close()\n\n # Show a success message box\n messagebox.showinfo(\"Success\", \"Choices saved successfully!\")\n\n # Close the tkinter window\n root.destroy()\n\n # Import the home module (assuming it exists)\n import home\n else:\n # Show a warning message box for invalid selection\n messagebox.showwarning(\"Invalid Selection\", \"Please select exactly three choices.\")\n\n\n# Create the root tkinter window\nroot = tk.Tk()\nroot.title(\"Choice Selection\")\nroot.geometry(\"300x250\")\nroot.resizable(width=False, height=False)\n\n# Function to center the window on the screen\ndef center_window(window):\n window.update_idletasks()\n width = window.winfo_width()\n height = window.winfo_height()\n screen_width = window.winfo_screenwidth()\n screen_height = window.winfo_screenheight()\n x = (screen_width - width) // 2\n y = (screen_height - height) // 2\n \n window.geometry(\"{}x{}+{}+{}\".format(width, height, x, y))\n\n# Call the center_window function to center the window\ncenter_window(root)\n\n# Available choices\nchoices = [\"Windspeed\", \"Tide\", \"Windgusts\", \"Current\", \"Weather\", \"Temperature\"]\n\n# Create choice variables\nchoice_vars = []\nfor i, choice in enumerate(choices):\n choice_var = tk.IntVar(value=0) # Initialize as 0 (off)\n choice_vars.append(choice_var)\n\n # Create a check button for each choice\n checkbox = tk.Checkbutton(root, text=choice, variable=choice_var)\n checkbox.pack(anchor=tk.W)\n\n# Create a button to save the choices\nsave_button = tk.Button(root, text=\"Save\", command=save_choices)\nsave_button.pack(pady=10)\n\n# Start the tkinter event loop\nroot.mainloop()","repo_name":"loganisbadatPY/weather-for-sailors-v2","sub_path":"Weather For Sailing/choice_selector.py","file_name":"choice_selector.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2251583285","text":"import torch, torchvision\nfrom torchvision.transforms import transforms\n\ntransform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\nclass Cifar10ImageLoader:\n def __init__(self, data_dir='./data', batch_size=128, train_shuffle=True, val_shuffle=False, num_workers=4):\n self.trainset = torchvision.datasets.CIFAR10(root=data_dir, train=True, download=True, transform=transform)\n self.trainLoader = torch.utils.data.DataLoader(self.trainset, batch_size=batch_size, shuffle=train_shuffle, num_workers=num_workers)\n\n self.testset = torchvision.datasets.CIFAR10(root=data_dir, train=False, download=True, transform=transform)\n self.testLoader = torch.utils.data.DataLoader(self.testset, batch_size=batch_size, shuffle=val_shuffle, num_workers=num_workers)\n","repo_name":"cheesama/machine-learning","sub_path":"autorch/example/loader/cifar10_image_loader.py","file_name":"cifar10_image_loader.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37168614501","text":"def isBadVersion(version):\n return False if version<2 else True\n\nclass Solution(object):\n def firstBadVersion(self, n):\n l,r=0,n-1\n piovt=1\n while l<=r:\n mid=(l+r)//2\n piovt=mid+1\n if isBadVersion(piovt):\n if not isBadVersion(piovt-1):\n break\n r=mid-1\n else:\n l=mid+1\n return piovt\n\ns=Solution()\na=s.firstBadVersion(1)\nprint(a)\n ","repo_name":"eligah/codingPractice","sub_path":"leetcode/python/278. First Bad Version.py","file_name":"278. First Bad Version.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38791814214","text":"\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.autograd import Variable\n\nfrom functools import reduce # Valid in Python 2.6+, required in Python 3\nimport operator\n\ndef fanin_init(size, fanin=None):\n fanin = fanin or size[0]\n v = 1. / np.sqrt(fanin)\n return torch.Tensor(size).uniform_(-v, v)\n\nclass BaseModel(nn.Module):\n def __init__(self, nb_states, nb_actions, init_w):\n super(BaseModel, self).__init__()\n self.nb_states = nb_states\n self.nb_actions = nb_actions\n self.init_w = init_w\n\n self.relu = nn.ReLU()\n self.tanh = nn.Tanh()\n\n in_dims = self.nb_states[0]\n self.conv1 = nn.Conv2d(in_dims, 32, 5, 1)\n self.pool1 = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(32, 32, 5, 1)\n self.pool2 = nn.MaxPool2d(2, 2)\n self.conv3 = nn.Conv2d(32, 32, 4, 1)\n self.cnn_out_dims = self._get_cnn_out_dims()\n\n def init_weights(self, init_w):\n self.conv1.weight.data = fanin_init(self.conv1.weight.data.size())\n self.conv2.weight.data = fanin_init(self.conv2.weight.data.size())\n self.conv3.weight.data = fanin_init(self.conv3.weight.data.size())\n\n def forward(self, x):\n batch_size = x.data.size()[0]\n out = self._forward_cnn(x)\n out = out.view(batch_size, -1)\n return out\n \n def _forward_cnn(self, x):\n out = self.conv1(x)\n out = self.relu(out)\n out = self.pool1(out)\n \n out = self.conv2(out)\n out = self.relu(out)\n out = self.pool2(out)\n \n out = self.conv3(out)\n out = self.relu(out)\n \n return out\n\n def _get_cnn_out_dims(self):\n x = Variable(torch.rand(self.nb_states).view((1,) + self.nb_states))\n out = self._forward_cnn(x).data.numpy()\n return int(np.prod(out.shape[1:]))\n\n def _sample_output(self, batch_size):\n # For debugging\n x = Variable(torch.rand((batch_size,) + self.nb_states))\n out = self.forward(x).data.numpy()\n return out \n\nclass Actor(BaseModel):\n def __init__(self, nb_states, nb_actions, init_w=3e-5):\n super(Actor, self).__init__(nb_states, nb_actions, init_w)\n\n self.fc1 = nn.Linear(self.cnn_out_dims, 512)\n self.fc2 = nn.Linear(512, nb_actions)\n\n self.init_weights(init_w)\n \n def init_weights(self, init_w):\n super(Actor, self).init_weights(init_w)\n self.fc1.weight.data = fanin_init(self.fc1.weight.data.size())\n self.fc2.weight.data.uniform_(-init_w, init_w)\n self.fc2.bias.data.uniform_(-init_w, init_w)\n \n def forward(self, x):\n cnn_out = super(Actor, self).forward(x)\n out = self.fc1(cnn_out)\n out = self.relu(out)\n out = self.fc2(out)\n out = self.tanh(out)\n return out\n \nclass Critic(BaseModel):\n def __init__(self, nb_states, nb_actions, init_w=3e-5):\n super(Critic, self).__init__(nb_states, nb_actions, init_w)\n\n self.fc1 = nn.Linear(self.cnn_out_dims + nb_actions, 512)\n self.fc2 = nn.Linear(512, 1)\n\n self.init_weights(init_w)\n \n def init_weights(self, init_w):\n super(Critic, self).init_weights(init_w)\n self.fc1.weight.data = fanin_init(self.fc1.weight.data.size())\n self.fc2.weight.data.uniform_(-init_w, init_w)\n self.fc2.bias.data.uniform_(-init_w, init_w)\n \n def forward(self, xs):\n x, a = xs\n cnn_out = super(Critic, self).forward(x)\n merge_out = torch.cat([cnn_out, a], 1)\n out = self.fc1(merge_out)\n out = self.relu(out)\n out = self.fc2(out)\n return out\n\n def _sample_output(self, batch_size):\n # For debugging\n x = Variable(torch.rand((batch_size,) + self.nb_states))\n a = Variable(torch.rand((batch_size, self.nb_actions)))\n out = self.forward((x, a)).data.numpy()\n return out \n","repo_name":"williamd4112/pytorch-rl","sub_path":"ddpg/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35597213423","text":"import os\nimport shutil\ndownloadFolder = \"./downloads\"\ntry:\n os.makedirs(downloadFolder, exist_ok=True)\nexcept:\n pass\npath = input(\"youtube url or txt file name? (dont include .txt): \")\nif(len(path)>5 and \".\" in path ):\n print(\"detected url\")\n urls = [path]\nelse:\n urls = []\n #read txt file\n with open(path+\".txt\") as f:\n contents = f.readlines()\n for line in contents:\n urls.append(line)\n #remove duplicates\n res = []\n [res.append(x) for x in urls if x not in res]\n urls = res\nprint(urls)\nprint(\"Found %i urls\" %len(urls))\n\ncurrentDir = os.getcwd()\nos.chdir(downloadFolder)\nfor url in urls:\n\n\tcommand = 'yt-dlp -o \"%(id)s.%(ext)s\"' +' ' + url\n\tprint(command)\n\tprint(\"\\nDownloading \", url)\n\tos.system(command)\n\tprint(\"=============\")\nos.chdir(currentDir)\nshutil.move(downloadFolder, \"inputFolder\")\nos.system(\"python main.py\")\n","repo_name":"UoA-CARES/multimodal-data-pipeline","sub_path":"download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16120660997","text":"from db_models.modelsv2 import ProjectAttachmentTypes\nfrom flask_restful import fields\n\n# PARAMS\nNAME = \"ProjectAttachmentTypeResource\"\nNAME_LIST = \"ProjectAttachmentTypeListResource\"\nENTITY_NAME = \"Project Attachment Types\"\nMODEL = ProjectAttachmentTypes\nROUTE = \"/v2/projectAttachmentTypes\"\nEND_POINT = \"v2-project-atttachment-types\"\nROUTE_LIST = ROUTE\nEND_POINT_LIST = END_POINT + \"-list\"\n# NESTED SCHEMA FIELDS\n\n# OUTPUT SCHEMA\nOUTPUT_FIELDS = {\n 'id': fields.Integer,\n 'name': fields.String,\n 'system_name': fields.String,\n 'filename_extensions': fields.List(fields.String),\n 'icon': fields.String\n}","repo_name":"vyadzmak/Landau.X.Api","sub_path":"resv2/project_attachment_types_resources.py","file_name":"project_attachment_types_resources.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27838599580","text":"def char_num(char):\t\t\t\t\t\t\t\t\t# O(1)\n\treturn ord(char.lower())\n\n\nprint(char_num(\"z\"))\n\n# check if string 'a' can be converted into string 'b' by only\n# \t\tdeleting letters\n#\t\t chaging letter cases\n\ndef abbreviation(a, b) -> str:\n\tinvalid_count = 0\n\tchange_count = 0\n\tdelete_count = 0\n\n\ti = 0 ; j = 0\n\twhile True:\n\t\t\n\t\tif ord(a[i].lower()) == ord(b[i].lower()):\n\t\t\tprint(\"EQUAL LETTER:\", a[i], b[i], end=\" ----->\")\n\t\t\t\n\t\t\tif ord(a[i]) > ord(b[j]): print(\"change\", a[i], \"to uppercase!\")\n\t\t\telif ord(a[i]) < ord(b[j]): print(\"change\", a[i], \"to lowercase!\")\n\t\t\t\n\t\t\ti += 1 ; j += 1\n\t\telif ( ord(a[i].lower()) != ord(b[i].lower()) ) and (len(a) == len(b)):\n\t\t\tprint(\"INVALID CHARACTER DETECTED\")\n\t\t\treturn \"NO\"\n\n\t\telif ( ord(a[i].lower()) != ord(b[i].lower()) ) and (len(a) != len(b)):\n\t\t\tdelete_count += 1\n\t\t\ti += 1\n\t\telse:\n\t\t\ti += 1 ; j += 1\t\t\n\na = \"AbcDE\"\nb = \"ABDE\"\n\nprint( abbreviation(a, b) )","repo_name":"LahiruHW/portfolio","sub_path":"Practice Problems/Hackerrank/X_abbreviation.py","file_name":"X_abbreviation.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74464387786","text":"\"\"\"import xml.etree.ElementTree\ne = xml.etree.ElementTree.parse('mcm_subset.xml').getroot()\nspecies = [s for s in e[0] if 'species_number' in s.keys() and len(s) > 0]\n\nsmiles = dict()\nfor s in species:\n smiles[s.get('species_name')] = [i for i in s[0].itertext()][0]\n \n \nNspecies = {k: v for k, v in smiles.items() if v.count('N') > 0}\n\nPANspecies = {k: v for k, v in Nspecies.items() if 'O=N(=O)OO' in v or 'OON(=O)=O' in v or 'OO[N+](=O)[O-]' in v}\nNTRspecies = {k: v for k, v in Nspecies.items() if ('O[N+](=O)[O-]' in v or 'O=N(=O)O' in v or 'ON(=O)=O' in v or 'O[N+](=O)[O-])' in v) and k not in PANspecies}\n\nprint({k: v for k, v in Nspecies.items() if not k in PANspecies and not k in NTRspecies})\nprint('N', len(Nspecies))\nprint('PAN', len(PANspecies))\nprint('PAN = ' + ' + '.join([k for k in PANspecies.keys()]))\nprint('NTR', len(NTRspecies))\nprint('NTR = ' + ' + '.join([k for k in NTRspecies.keys()]))\n\"\"\"\n\nfrom matplotlib import use; use('Agg')\nimport matplotlib.pyplot as plt\nimport pandas as pd\nrate = pd.read_csv('Rate_timeseries_1.dat', delimiter = '!')\nrate.rename(columns = lambda x: x.strip(), inplace = True)\nconc = pd.read_csv('Spec_timeseries_1.dat', delimiter = '!')\nconc.rename(columns = lambda x: x.strip(), inplace = True)\nrate.eval('JDAY_LST = JDAY_GMT + (LON_degE / 15.)/24 - 2006000', inplace = True)\nfor key in conc.columns:\n if key not in 'TIME JDAY_GMT LAT_degN LON_degE PRESS_hPa TEMP_K THETA H2O CFACTOR M N2 O2 JNO2FACT JO1DFACT'.split() and not 'Unnamed' in key:\n conc.eval('%s = %s / CFACTOR * 1e9' % (key, key), inplace = True)\nconc.eval('JDAY_LST = JDAY_GMT + (LON_degE / 15.)/24 - 2006000', inplace = True)\nconc.eval('NOx = NO2 + NO', inplace = True)\nconc.eval('RN = NOx / HNO3', inplace = True)\n\nplt.close()\nax = rate.plot(x = 'JDAY_LST', y = ['NO2 --> O + NO', 'NO3 --> NO', 'NO3 --> O + NO2', 'H2O2 --> 2 OH'])\nrate.plot(x = 'JDAY_LST', y = ['THETA'], secondary_y = True, ax = ax)\nplt.savefig('rates.png')\n\nplt.close()\nconc.plot(x = 'JDAY_LST', y = 'NO NO2 NO3 HNO3'.split(), stacked = True)\nplt.ylabel('cumulative ppbv')\nplt.savefig('concs.png')\n","repo_name":"barronh/DSMACC","sub_path":"working/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"81"} +{"seq_id":"29939368621","text":"import re\nimport time as t\nimport datetime\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\n\n\ndef get_data(pages, last_retrieved_item):\n browser = webdriver.Chrome()\n # browser = webdriver.Firefox()\n\n cars_info_list = []\n cars_info_dict = dict()\n browser.get(f\"https://bama.ir/car?priced=1\")\n\n body = browser.find_element(By.TAG_NAME, 'body')\n\n no_pages = pages\n while no_pages:\n body.send_keys(Keys.END)\n t.sleep(0.5)\n no_pages -= 1\n\n card_content = browser.find_elements(By.XPATH, \"//a[@class = 'bama-ad']\")\n for car in card_content:\n\n make = car.find_element(By.XPATH, \".//p[contains(@class, 'bama-ad__title')]\")\n cars_info_dict['make'] = make.text\n # print(title.text)\n\n time = car.find_element(By.XPATH, \".//div[@class = 'bama-ad__title-row']\")\n cars_info_dict['time'] = time.text.split('\\n')[1]\n\n year = car.find_element(By.XPATH, \".//div[@class = 'bama-ad__detail-row']\")\n cars_info_dict['year'] = year.text.split('\\n')[0]\n\n mileage = car.find_element(By.XPATH, \".//div[@class = 'bama-ad__detail-row']\")\n cars_info_dict['mileage'] = mileage.text.split('\\n')[1]\n\n condition = car.find_element(By.XPATH, \".//div[@class = 'bama-ad__detail-row']\")\n cars_info_dict['condition'] = condition.text.split('\\n')[2]\n\n location = car.find_element(By.XPATH, \".//div[@class = 'bama-ad__address']\")\n cars_info_dict['location'] = location.text\n\n price = car.find_element(By.XPATH, \".//div[@class = 'bama-ad__price-holder']\")\n cars_info_dict['price'] = price.text\n\n # print(cars_info_dict)\n # print(\"///////////////////////////////////////////////////////////////////\")\n if cars_info_dict != last_retrieved_item:\n cars_info_list.append(dict(cars_info_dict))\n else:\n break\n # print(len(cars_info_list))\n\n return cars_info_list, len(cars_info_list)\n\n\nlast_item = {'make': 'پورشه، کاین',\n 'time': 'لحظاتی پیش',\n 'year': '2013',\n 'mileage': '72,000 کیلومتر',\n 'condition': '6 سیلندر',\n 'location': 'تهران / شهرک غرب',\n 'price': '9,000,000,000'}\n\nprint(get_data(100, last_item))\n\n'''\nclass ScrapingReport(models.Model):\n report_date = models.DateTimeField()\n counts = models.IntegerField(default=0)\n last_retrieve_car = models.IntegerField()\n\n\nclass Cars(models.Model):\n scraping_report = models.ForeignKey(ScrapingReport, on_delete=models.CASCADE)\n make = models.CharField(max_length=100)\n mileage = models.CharField(max_length=100)\n condition = models.CharField(max_length=100)\n location = models.CharField(max_length=250)\n price = models.CharField(max_length=100)\n \n \nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'OPTIONS': {\n \"read_default_file\": BASE_DIR / \"db_config.cnf\",\n \"init_command\": \"SET default_storage_engine = INNODB\"\n }\n }\n}\n\n\n[client]\ndatabase = carfinder\nuser = root\npassword = Baroon@5067082\ndefault_character_set = utf8\n'''\n","repo_name":"fmadadi69/AdvancePython","sub_path":"S6_Ex1__bama.py","file_name":"S6_Ex1__bama.py","file_ext":"py","file_size_in_byte":3242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13900265051","text":"import re\nimport os\n\nclass ParamParser:\n \"\"\"Class for parsing param files from picongpu (simplified C++ header parser)\"\"\"\n def __init__(self, verbose = False):\n self.verbose = verbose\n self.defines = {}\n \n def log(self, text, level = 0):\n \"\"\"Log to stdout if verbose is turned on. Level is the indendation level\"\"\"\n if self.verbose:\n print(\" \" * level + text)\n \n def throwError(self, text):\n \"\"\"Raise an exception with the given text. Current filePath is automatically added\"\"\"\n raise Exception(text + \" (\" + self.filePath + \")\")\n\n def cleanLine(self, line, curScopeInput, level):\n \"\"\"Return a cleaned line.\n \n Put multiline comments on 1 line\n Remove #pragma/#include\n Replace double whitespaces and whitespaces on either end of the line\n \"\"\"\n if \"/*\" in line and not \"*/\" in line:\n self.log(\"Multiline comment start: \" + line, level)\n line = self.parseMultilineComment(line, curScopeInput)\n # Skip those\n if \"#pragma\" in line or \"#include\" in line:\n return \"\"\n line = line.strip()\n # Collapse multiple whitespaces\n line = re.sub(r\"\\t|(\\s\\s+)\", \" \", line)\n return line\n \n def parseMultilineComment(self, line, curScopeInput):\n \"\"\"Return the full multiline comment\"\"\"\n for newLine in curScopeInput:\n newLine = newLine.strip()\n if newLine.startswith('*') and not newLine.startswith('*/'):\n newLine = newLine[1:].lstrip()\n self.log(\"Adding to multiline comment: \" + newLine)\n line = line + \"\\n\" + newLine\n if \"*/\" in newLine:\n break\n return line\n \n def parseMultilineAssignment(self, line, curScopeInput, level):\n \"\"\"Return the full assignment when it is split among multiple lines\n \n Search till semicolon\n Detect descriptions inbetween\n Return (line, descriptions[])\n \"\"\"\n descriptions = []\n for newLine in curScopeInput:\n newLine = newLine.strip()\n newLine = self.cleanLine(newLine, curScopeInput, level)\n if not newLine:\n continue\n if self.matchDescription(newLine, level):\n descriptions.append(self.curDescription)\n continue\n line += newLine\n if newLine.endswith(\";\"):\n return line, descriptions\n if ';' in newLine:\n self.throwError(\"Unexpected ';' in line: \" + newLine)\n self.throwError(\"Did not found end of assignment\")\n \n def parseFunction(self, line, curScopeInput, level):\n \"\"\"Parse something that looks like a function. Adds everything till a closing brace or semicolon\"\"\"\n if line.endswith(';'):\n return line\n elif line.endswith('{'):\n openBraces = 1\n else:\n openBraces = 0\n for newLine in curScopeInput:\n newLine = newLine.strip()\n newLine = self.cleanLine(newLine, curScopeInput, level)\n if not newLine:\n continue\n line += newLine\n if newLine.endswith(\"{\"):\n openBraces += 1\n elif newLine == \"}\":\n openBraces -= 1\n if openBraces == 0:\n return line\n elif openBraces < 0:\n self.throwError(\"Unexpected '}'\")\n elif newLine.endswith(\";\") and openBraces == 0:\n return line\n \n @staticmethod\n def addDescription(node, mainDescr, subDescriptions = None):\n \"\"\"Add description meta data by combining the main description\n and possible sub descriptions (e.g. from assignments)\"\"\"\n if mainDescr:\n descr = [mainDescr]\n else:\n descr = []\n if subDescriptions:\n descr.extend(subDescriptions)\n descr = '\\n'.join(descr)\n if descr:\n node['__description'] = descr + node.get('__description', '')\n \n def parseAssignment(self, line, data, curScopeInput, level):\n \"\"\"Parse a line with an assignment (value or type). Return the name of the target\"\"\"\n description = self.curDescription\n self.curDescription = None\n if not line.endswith(\";\"):\n if ';' in line:\n self.throwError(\"Unexpected ';' in line: \" + line)\n line, descriptions = self.parseMultilineAssignment(line, curScopeInput, level)\n else:\n descriptions = None\n # Apply defines\n for define, value in self.defines.items():\n line = line.replace(define, value)\n using = re.match(r\"(using|namespace) (\\w+) ?= ?([^;]+) ?;$\", line)\n if using:\n typedefType = using.group(1)\n name = using.group(2)\n value = using.group(3)\n self.log(\"Found \" + typedefType + \": \" + name + \" = \" + value, level)\n if typedefType == \"using\":\n typedefType = \"Type\"\n else:\n typedefType = \"Namespace\"\n data[name] = {\"__value\": value, \"__type\": typedefType}\n self.addDescription(data[name], description, descriptions)\n return name\n variable = re.match(r\"((static )?(constexpr |BOOST_STATIC_CONSTEXPR |BOOST_CONSTEXPR_OR_CONST )?)(\\w+) (\\w+) ?=(.*);\", line)\n if variable:\n varType = variable.group(4)\n name = variable.group(5)\n value = variable.group(6).strip()\n self.log(\"Found variable of type \" + varType + \": \" + name + \" = \" + value, level)\n data[name] = {\"__value\": value, \"__type\": varType}\n self.addDescription(data[name], description, descriptions)\n return name\n externVar = re.match(r\"(\\w+) (\\w+)::(\\w+) * =(.*?);\", line)\n if externVar:\n varType = externVar.group(1)\n className = externVar.group(2)\n name = externVar.group(3)\n value = externVar.group(4).strip()\n self.log(\"Found extern variable of type \" + varType + \": \" + name + \" = \" + value, level)\n if not className in data:\n self.throwError(\"Invalid class: \" + className + \"; \" + line)\n data = data[className]\n if not name in data:\n self.throwError(\"Unknown variable. Not declared in class? \" + line)\n data = data[name]\n if data[\"__type\"] != varType:\n self.throwError(\"Type mismatch of variable: \" + varType + \"!=\" + data[\"__type\"] + \". \" + line)\n data[\"__value\"] = value;\n self.addDescription(data, description, descriptions)\n return name\n self.throwError(\"Unknown assignment: \" + line)\n\n def matchDescription(self, line, level):\n \"\"\"Try to match a description on the line and add set self.curDescription\n Return True if matched\"\"\"\n # Doc comment\n description = re.match(r\"/\\*(\\*|!)(.*)\\*/$\", line, re.DOTALL)\n if description:\n self.curDescription = description.group(2).strip()\n self.log(\"New description: \" + self.curDescription, level)\n return True\n else:\n return False\n \n def parseScope(self, data, curScopeInput, level = 0):\n \"\"\"Parse a given scope till its end\"\"\"\n self.curDescription = None\n ignoreLines = False\n ifStack = []\n for line in curScopeInput:\n line = self.cleanLine(line, curScopeInput, level)\n if not line:\n continue\n self.log(\"Line: \" + line, level)\n if line == \"#endif\":\n ignoreLines = ifStack.pop()\n self.log(\"Found #endif -> Ignoring = \" + str(ignoreLines))\n continue\n if ignoreLines:\n if line.startswith(\"#if\"):\n self.log(\"Found #if in ignored block\")\n ifStack.append(True)\n elif line == \"#else\":\n ignoreLines = ifStack[-1]\n self.log(\"Found #else -> Ignoring = \" + str(ignoreLines))\n ignoreLines = False\n else:\n self.log(\"Ignored...\")\n continue\n if line == \"#else\":\n self.log(\"Found #else\")\n ignoreLines = True\n continue\n if line == \"};\" or line == \"}\":\n self.log(\"End of scope\", level)\n return\n ifndef = re.match(r\"#ifndef (\\w+)$\", line)\n if ifndef:\n name = ifndef.group(1)\n self.log(\"Found #ifndef: \" + name, level)\n ifStack.append(False)\n ignoreLines = name in self.defines\n continue\n \n ppIf = re.match(r\"#if (!)? *(\\w+)$\", line)\n if ppIf:\n name = ppIf.group(2)\n self.log(\"Found #if: \" + name, level)\n ifStack.append(False)\n ignoreLines = not name in self.defines or ((self.defines[name] == \"1\") == (ppIf.group(1) != \"!\"))\n continue\n \n define = re.match(r\"#define (\\w+) (.*[^\\\\])$\", line)\n if define:\n name = define.group(1)\n value = define.group(2)\n self.log(\"Found define: \" + name + \" = \" + value, level)\n self.AddDefine(name, value)\n continue\n defineFunc = re.match(r\"#define (\\w+)\\(.*[^\\\\]$\", line)\n if defineFunc:\n name = defineFunc.group(1)\n self.log(\"Removed preprocessor function: \" + name, level)\n continue\n scopeStart = re.match(r\"(namespace|struct|class) (\\w+) ?(\\{|;)$\", line)\n if scopeStart:\n newScope = scopeStart.group(2)\n self.log(\"New scope: \" + newScope, level)\n if not newScope in data:\n data[newScope] = {\"__type\": \"Scope\"}\n self.addDescription(data[newScope], self.curDescription)\n self.curDescription = None\n if scopeStart.group(3) == '{':\n self.parseScope(data[newScope], curScopeInput, level + 1)\n continue\n if self.matchDescription(line, level):\n continue\n if \"=\" in line:\n asgnName = self.parseAssignment(line, data, curScopeInput, level)\n continue\n staticVarDecl = re.match(r\"static (\\w+) (\\w+);$\", line)\n if staticVarDecl:\n varType = staticVarDecl.group(1)\n name = staticVarDecl.group(2)\n self.log(\"Found static variable declaration: \" + name + \" (\" + varType+ \")\", level)\n data[name] = {\"__value\": None, \"__type\": varType}\n self.addDescription(data[name], self.curDescription)\n self.curDescription = None\n continue \n valueId = re.match(r\"value_identifier\\((\\w+), ?(\\w+), ?([^,]+)\\);$\", line)\n if valueId:\n valType = valueId.group(1)\n name = valueId.group(2)\n value = valueId.group(3)\n self.log(\"Found value_identifier: \" + name + \" = \" + value + \" (\" + valType+ \")\", level)\n if not \"__valID\" in data:\n data[\"__valID\"] = {}\n data[\"__valID\"][name] = {\"__value\": value, \"__type\": valType}\n self.addDescription(data[\"__valID\"][name], self.curDescription)\n self.curDescription = None\n continue\n using = re.match(r\"using ([^\\;]+);$\", line)\n if using:\n self.log(\"Ignored using \" + using.group(1), level)\n continue\n if re.match(r\"template ?<[^>]+>$\", line):\n self.log(\"Ignored template\", level)\n continue\n alias = re.match(r\"alias\\((\\w+\\));$\", line)\n if alias:\n name = alias.group(1)\n self.log(\"Found alias \" + name, level)\n if not \"__alias\" in data:\n data[\"__alias\"] = {}\n data[\"__alias\"][name] = {}\n self.addDescription(data[\"__alias\"][name], self.curDescription)\n self.curDescription = None\n continue\n functor = re.match(r\"(\\w+) operator\\(\\)\\(\\) ?\\{ ?return ([^;]+); ?}\", line)\n if functor:\n fType = functor.group(1)\n value = functor.group(2)\n self.log(\"Found functor: \" + value + \"(\" + fType + \")\", level)\n data['__functorType'] = fType\n data['__value'] = value\n continue\n if line.startswith(\"DINLINE\") or line.startswith(\"HINLINE\") or line.startswith(\"HDINLINE\"):\n self.parseFunction(line, curScopeInput, level)\n self.log(\"Ignored function\", level)\n continue\n if line == 'private:' or line == 'protected:' or line == 'public:':\n continue\n if line.startswith('PMACC_ALIGN') or re.match(\"(const )?\\w+ \\w+(,\\w+)*;\", line):\n # Ignore variable declaration\n continue\n\n self.throwError(\"Unknown statement: \" + line)\n\n def parse(self, filePath, data):\n self.data = data\n self.curNamespaceName = \"\"\n data['__type'] = 'Scope'\n self.filePath = filePath\n with open(self.filePath, 'r') as paramFile:\n content = paramFile.read()\n # Replace \"escaped newline\" by nothing\n content = re.sub(r\"\\\\\\n\", \"\", content)\n # Remove comments\n content = re.sub(\"//.*\", \"\", content)\n content = re.sub(r\"/\\*[^\\*!].*?\\*/\", \"\", content, flags = re.DOTALL)\n # Docstrings should end the line -> Place rest on new line\n content = re.sub(r\"\\*/(.)\", r\"*/\\n\\1\", content)\n # Remove empty lines\n content = re.sub(r\"^\\s*\\n\", \"\", content)\n # Place opening braces on own lines onto previous line\n content = re.sub(r\"\\n\\s*\\{\", \"{\", content)\n # Place closing braces on new line\n content = re.sub(r\"([^\\s\\n])\\}\", r\"\\1\\n}\", content)\n # Typedefs -> using\n content = re.sub(r\"typedef ([^;]+) (\\w+);\", r\"using \\2 = \\1;\", content, flags = re.DOTALL)\n # \"# ifdef\" -> \"#ifdef\"\n content = re.sub(r\"#\\s+\", \"#\", content)\n content = re.sub(r\"(\\w+)\\s+operator\\(\\)\\(\\)\\s*\\{\\s*([^\\}\\n]+)\\s*\\}\", r\"\\1 operator()(){ \\2 }\", content, flags = re.DOTALL)\n contentArray = content.split('\\n')\n self.parseScope(data, iter(contentArray))\n \n def AddDefine(self, name, value):\n \"\"\"Add a preprocessor definition\"\"\"\n self.defines[name] = value\n \n def ParseFolder(self, paramDir):\n \"\"\"Parse all param files of a folder into one dictionary\"\"\"\n paramFiles = [os.path.join(paramDir, fileName) for fileName in os.listdir(paramDir) if fileName.endswith(\".param\")]\n data = {}\n for filePath in paramFiles:\n self.parse(filePath, data)\n return data\n \n ######################\n # Evaluation functions\n ######################\n \n def SetCurNamespace(self, namespaceName):\n \"\"\"Set the namespace name in which all future Get* calls will search\"\"\"\n if namespaceName:\n self.curNamespaceName = namespaceName + \"::\"\n else:\n self.curNamespaceName = ''\n \n def GetNode(self, name):\n if not name.startswith(\"::\"):\n name = self.curNamespaceName + name\n return GetNode(name, self.data)\n \n def GetValue(self, name):\n node = self.GetNode(name)\n return node['__value']\n \n def GetNumber(self, name, getFromValueIdentifier = False):\n if not name.startswith(\"::\"):\n name = self.curNamespaceName + name\n return GetNumber(name, self.data, getFromValueIdentifier = getFromValueIdentifier)\n \n def GetVector(self, name):\n if not name.startswith(\"::\"):\n name = self.curNamespaceName + name\n return GetVector(name, self.data)\n \ndef GetNode(name, mainScope):\n \"\"\"Return the node in the global scope by its name (Foo::bar)\"\"\"\n if name.startswith(\"::\"):\n name = name[2:]\n names = name.split(\"::\")\n curScope = mainScope\n for curName in names:\n if curName in curScope:\n curScope = curScope[curName]\n else:\n return None\n return curScope\n\ndef GetNodeInScope(name, scopeName, mainScope):\n \"\"\"Return (nodeName, node) in a named scope by its name\n \n Examle: Bar::foobar in Foo::Foo2 might be\n Foo::Foo2::Bar::foobar, Foo::Bar::foobar or ::Bar::foobar\n \"\"\"\n if name.startswith(\"::\"):\n return GetNode(name[2:], mainScope)\n value = GetNode(scopeName + \"::\" + name, mainScope)\n while value == None:\n if not \"::\" in scopeName:\n return (None, None)\n scopeName = scopeName.rsplit(\"::\", 1)[0]\n value = GetNode(scopeName + \"::\" + name, mainScope)\n return (scopeName + \"::\" + name, value)\n\ndef GetNodeFromMemberScope(name, scopeMemberName, mainScope):\n \"\"\"Return (nodeName, node) starting the search in the scope of another member\"\"\"\n scopeName = scopeMemberName.rsplit(\"::\", 1)[0] if \"::\" in scopeMemberName else \"\"\n return GetNodeInScope(name, scopeName, mainScope)\n\ndef GetNumber(name, mainScope, node = None, lvl = 0, getFromValueIdentifier = False):\n \"\"\"Get numeric value from a node and/or name. Resolve variable references and simple arithmetics\"\"\"\n # Get node if not given\n if not node:\n if getFromValueIdentifier:\n names = name.split(\"::\")\n entryName = names[-1]\n names[-1] = \"__valID\"\n node = GetNode(\"::\".join(names), mainScope)\n if node and entryName in node:\n node = node[entryName]\n else:\n return None\n else:\n node = GetNode(name, mainScope)\n if not node:\n return None\n # Detect type\n valType = node['__type']\n if valType.startswith(\"float\"):\n isFloat = True\n elif valType.startswith(\"int\") or valType.startswith(\"uint\") or valType.startswith(\"unsigned\") or valType == \"size_t\":\n isFloat = False\n else:\n return None\n value = node[\"__value\"]\n # Check if we already have a number\n try:\n return float(value) if isFloat else int(value)\n except ValueError:\n pass\n # Limit recursion level\n if lvl > 3:\n return None\n # Support simple arithmetics\n # Regexp for floats or ints\n numRegExp = r\"(\\+|-)?\\d+(\\.\\d+)?(e(\\+|-)\\d+)?\" if isFloat else r\"(\\+|-)?\\d+\"\n # Detect 'x + y' style\n expr = re.match(\"(?P\" + numRegExp + r\"|[\\w:]+) ?(?P[\\+\\-\\*/]) ? (?P\" + numRegExp + r\"|[\\w:]+)\", value)\n if expr:\n op1 = expr.group(\"Op1\")\n op = expr.group(\"Op\")\n op2 = expr.group(\"Op2\")\n try:\n op1 = float(op1) if isFloat else int(op1)\n except ValueError:\n subName, value = GetNodeFromMemberScope(op1, name, mainScope)\n op1 = GetNumber(subName, mainScope, value, lvl + 1)\n try:\n op2 = float(op2) if isFloat else int(op2)\n except ValueError:\n subName, value = GetNodeFromMemberScope(op2, name, mainScope)\n op2 = GetNumber(subName, mainScope, value, lvl + 1)\n if op == '+':\n return op1 + op2\n if op == '-':\n return op1 - op2\n if op == '*':\n return op1 * op2\n if op == '/':\n return op1 / op2\n return None\n subName, value = GetNodeFromMemberScope(value, name, mainScope)\n return GetNumber(subName, mainScope, value, lvl + 1)\n\ndef GetVector(name, mainScope):\n \"\"\"Get a PMacc compiletime vector type as a python array\"\"\"\n value = GetNode(name, mainScope)\n if value['__type'] != \"Type\":\n return None\n vec = re.match(r\"(PMacc::)?math::CT::(\\w+)< ?([^,]+)(, ?([^,]+)(, ?([^,]+))) ?>$\", value[\"__value\"])\n if not vec:\n return None\n result = [vec.group(3)]\n if vec.group(5):\n result.append(vec.group(5))\n if vec.group(7):\n result.append(vec.group(7))\n if \"Int\" in vec.group(2):\n return [int(x) for x in result]\n else:\n return [float(x) for x in result]\n \ndef ParseParamFolder(paramDir):\n \"\"\"Parse all param files of a folder into one dictionary\"\"\"\n parser = ParamParser()\n return parser.ParseFolder(paramDir)\n\n","repo_name":"ComputationalRadiationPhysics/parataxis","sub_path":"buildSystem/ParamParser.py","file_name":"ParamParser.py","file_ext":"py","file_size_in_byte":20850,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"40936613254","text":"import os\n\nfrom metric_providers.base import BaseMetricProvider\n\nclass CpuUtilizationProcfsSystemProvider(BaseMetricProvider):\n def __init__(self, resolution):\n super().__init__(\n metric_name='cpu_utilization_procfs_system',\n metrics={'time': int, 'value': int},\n resolution=resolution,\n unit='Ratio',\n current_dir=os.path.dirname(os.path.abspath(__file__)),\n )\n","repo_name":"green-coding-berlin/green-metrics-tool","sub_path":"metric_providers/cpu/utilization/procfs/system/provider.py","file_name":"provider.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"81"} +{"seq_id":"40078048557","text":"import hashlib\nimport os.path\nfrom unittest import mock\nimport uuid\n\nimport pytest\n\nfrom zigpy.config import CONF_OTA_DIR, CONF_OTA_IKEA\nimport zigpy.ota\nimport zigpy.ota.image\nimport zigpy.ota.provider as ota_p\n\nfrom .async_mock import AsyncMock, patch\nfrom .test_ota_image import image # noqa: F401\n\nMANUFACTURER_ID = 4476\nIMAGE_TYPE = mock.sentinel.image_type\n\n\n@pytest.fixture\ndef file_image_name(tmp_path, image): # noqa: F811\n def ota_img_filename(name=\"ota-image\"):\n image_file = tmp_path / (name + \"-\" + str(uuid.uuid4()))\n image_file.write_bytes(image.serialize())\n\n return str(image_file)\n\n return ota_img_filename\n\n\n@pytest.fixture\ndef file_image(file_image_name):\n img = ota_p.FileImage()\n img.file_name = file_image_name()\n img.manufacturer_id = MANUFACTURER_ID\n img.image_type = IMAGE_TYPE\n return img\n\n\n@pytest.fixture\ndef file_prov():\n p = ota_p.FileStore()\n p.enable()\n return p\n\n\n@pytest.fixture\ndef file_image_with_version(file_image_name):\n def img(version=100, image_type=IMAGE_TYPE):\n img = ota_p.FileImage()\n img.file_name = file_image_name()\n img.header.file_version = version\n img.header.manufacturer_id = MANUFACTURER_ID\n img.header.image_type = image_type\n return img\n\n return img\n\n\n@pytest.fixture\ndef ikea_image_with_version():\n def img(image_type=IMAGE_TYPE):\n img = zigpy.ota.provider.IKEAImage(\n image_type=image_type,\n binary_url=mock.sentinel.url,\n sha3_256_sum=mock.sentinel.sha3_256_sum,\n )\n return img\n\n return img\n\n\n@pytest.fixture\ndef ikea_image(ikea_image_with_version):\n return ikea_image_with_version()\n\n\n@pytest.fixture\ndef basic_prov():\n class Prov(ota_p.Basic):\n async def initialize_provider(self, ota_config):\n return None\n\n async def refresh_firmware_list(self):\n return None\n\n p = Prov()\n p.enable()\n return p\n\n\n@pytest.fixture\ndef ikea_prov():\n p = ota_p.Trådfri()\n p.enable()\n return p\n\n\n@pytest.fixture\ndef key():\n return zigpy.ota.image.ImageKey(MANUFACTURER_ID, IMAGE_TYPE)\n\n\ndef test_expiration(ikea_prov):\n # if we never refreshed firmware list then we should be expired\n assert ikea_prov.expired\n\n\nasync def test_initialize_provider(basic_prov):\n await basic_prov.initialize_provider(mock.sentinel.ota_dir)\n\n\nasync def test_basic_get_image(basic_prov, key):\n image = mock.MagicMock() # noqa: F811\n image.fetch_image = AsyncMock(return_value=mock.sentinel.image)\n basic_prov._cache = mock.MagicMock()\n basic_prov._cache.__getitem__.return_value = image\n basic_prov.refresh_firmware_list = AsyncMock()\n\n # check when disabled\n basic_prov.disable()\n r = await basic_prov.get_image(key)\n assert r is None\n assert basic_prov.refresh_firmware_list.call_count == 0\n assert basic_prov._cache.__getitem__.call_count == 0\n assert image.fetch_image.call_count == 0\n\n # check with locked image\n basic_prov.enable()\n await basic_prov._locks[key].acquire()\n\n r = await basic_prov.get_image(key)\n assert r is None\n assert basic_prov.refresh_firmware_list.call_count == 0\n assert basic_prov._cache.__getitem__.call_count == 0\n assert image.fetch_image.call_count == 0\n\n # unlocked image\n basic_prov._locks.pop(key)\n\n r = await basic_prov.get_image(key)\n assert r is mock.sentinel.image\n assert basic_prov.refresh_firmware_list.call_count == 1\n assert basic_prov._cache.__getitem__.call_count == 1\n assert basic_prov._cache.__getitem__.call_args[0][0] == key\n assert image.fetch_image.call_count == 1\n\n\ndef test_basic_enable_provider(key, basic_prov):\n assert basic_prov.is_enabled is True\n\n basic_prov.disable()\n assert basic_prov.is_enabled is False\n\n basic_prov.enable()\n assert basic_prov.is_enabled is True\n\n\nasync def test_basic_get_image_filtered(basic_prov, key):\n image = mock.MagicMock() # noqa: F811\n image.fetch_image = AsyncMock(return_value=mock.sentinel.image)\n basic_prov._cache = mock.MagicMock()\n basic_prov._cache.__getitem__.return_value = image\n basic_prov.refresh_firmware_list = AsyncMock()\n basic_prov.filter_get_image = AsyncMock(return_value=True)\n\n r = await basic_prov.get_image(key)\n assert r is None\n assert basic_prov.filter_get_image.call_count == 1\n assert basic_prov.filter_get_image.call_args[0][0] == key\n assert basic_prov.refresh_firmware_list.call_count == 0\n assert basic_prov._cache.__getitem__.call_count == 0\n assert image.fetch_image.call_count == 0\n\n\nasync def test_ikea_init_ota_dir(ikea_prov):\n ikea_prov.enable = mock.MagicMock()\n ikea_prov.refresh_firmware_list = AsyncMock()\n\n r = await ikea_prov.initialize_provider({CONF_OTA_IKEA: True})\n assert r is None\n assert ikea_prov.enable.call_count == 1\n assert ikea_prov.refresh_firmware_list.call_count == 1\n\n\nasync def test_ikea_get_image_no_cache(ikea_prov, ikea_image):\n ikea_image.fetch_image = AsyncMock(return_value=mock.sentinel.image)\n ikea_prov._cache = mock.MagicMock()\n ikea_prov._cache.__getitem__.side_effect = KeyError()\n ikea_prov.refresh_firmware_list = AsyncMock()\n\n non_ikea = zigpy.ota.image.ImageKey(\n ota_p.Trådfri.MANUFACTURER_ID + 1,\n IMAGE_TYPE,\n )\n\n # Non IKEA manufacturer_id, don't bother doing anything at all\n r = await ikea_prov.get_image(non_ikea)\n assert r is None\n assert ikea_prov._cache.__getitem__.call_count == 0\n assert ikea_prov.refresh_firmware_list.call_count == 0\n assert non_ikea not in ikea_prov._cache\n\n # IKEA manufacturer_id, but not in cache\n assert ikea_image.key not in ikea_prov._cache\n r = await ikea_prov.get_image(ikea_image.key)\n assert r is None\n assert ikea_prov.refresh_firmware_list.call_count == 1\n assert ikea_prov._cache.__getitem__.call_count == 1\n assert ikea_image.fetch_image.call_count == 0\n\n\nasync def test_ikea_get_image(ikea_prov, key, ikea_image):\n ikea_image.fetch_image = AsyncMock(return_value=mock.sentinel.image)\n ikea_prov._cache = mock.MagicMock()\n ikea_prov._cache.__getitem__.return_value = ikea_image\n ikea_prov.refresh_firmware_list = AsyncMock()\n\n r = await ikea_prov.get_image(key)\n assert r is mock.sentinel.image\n assert ikea_prov._cache.__getitem__.call_count == 1\n assert ikea_prov._cache.__getitem__.call_args[0][0] == ikea_image.key\n assert ikea_image.fetch_image.call_count == 1\n\n\n@patch(\"aiohttp.ClientSession.get\")\nasync def test_ikea_refresh_list(mock_get, ikea_prov):\n mock_get.return_value.__aenter__.return_value.json = AsyncMock(\n side_effect=[\n [\n {\n \"fw_image_type\": 4557,\n \"fw_type\": 2,\n \"fw_sha3_256\": \"896edfb0a9d8314fb49d44fb11dc91fb5bb55e2ee1f793d53189cb13f884e13c\",\n \"fw_binary_url\": \"https://fw.ota.homesmart.ikea.com/files/rodret-dimmer-soc_release_prod_v16777287_9812b73c-b02e-4678-b737-d21251a34fd2.ota\",\n },\n {\n \"fw_update_prio\": 5,\n \"fw_filesize\": 242071587,\n \"fw_type\": 3,\n \"fw_hotfix_version\": 1,\n \"fw_major_version\": 2,\n \"fw_binary_checksum\": \"8c17b203bede63ea53e36d345b628cc7f2faecc18d4406458a12f8f25e54718a24495d30a03fe3244799bfaa50de72d99e6c0d2f7553a8465e37c10c22ba75fc\",\n \"fw_minor_version\": 453,\n \"fw_sha3_256\": \"657ed8fd0f6e5e6700acdc6afd64829cebacb1dd03b3f5453258b4bd77b674ed\",\n \"fw_binary_url\": \"https://fw.ota.homesmart.ikea.com/files/DIRIGERA_release_prod_v2.453.1_348f0dce-3c34-49a2-b64c-a1caa202104c.raucb\",\n },\n {\n \"fw_image_type\": 4552,\n \"fw_type\": 2,\n \"fw_sha3_256\": \"1b5fbea79c5b41864352a938a90ad25d9a0118054bf1cdc0314ef9636a60143a\",\n \"fw_binary_url\": \"https://fw.ota.homesmart.ikea.com/files/tradfri-motion-sensor2_release_prod_v604241925_8afa2f7c-19c3-4ddf-a96c-233714179022.ota\",\n },\n ]\n ]\n )\n mock_get.return_value.__aenter__.return_value.status = 200\n mock_get.return_value.__aenter__.return_value.reason = \"OK\"\n\n await ikea_prov.refresh_firmware_list()\n assert mock_get.call_count == 1\n assert len(ikea_prov._cache) == 2\n\n image1 = ikea_prov._cache[\n zigpy.ota.image.ImageKey(ota_p.Trådfri.MANUFACTURER_ID, 4557)\n ]\n image2 = ikea_prov._cache[\n zigpy.ota.image.ImageKey(ota_p.Trådfri.MANUFACTURER_ID, 4552)\n ]\n\n assert image1 == ota_p.IKEAImage(\n image_type=4557,\n binary_url=\"https://fw.ota.homesmart.ikea.com/files/rodret-dimmer-soc_release_prod_v16777287_9812b73c-b02e-4678-b737-d21251a34fd2.ota\",\n sha3_256_sum=\"896edfb0a9d8314fb49d44fb11dc91fb5bb55e2ee1f793d53189cb13f884e13c\",\n )\n\n assert image1.version == 16777287\n\n assert image2 == ota_p.IKEAImage(\n image_type=4552,\n binary_url=\"https://fw.ota.homesmart.ikea.com/files/tradfri-motion-sensor2_release_prod_v604241925_8afa2f7c-19c3-4ddf-a96c-233714179022.ota\",\n sha3_256_sum=\"1b5fbea79c5b41864352a938a90ad25d9a0118054bf1cdc0314ef9636a60143a\",\n )\n assert image2.version == 604241925\n\n assert not ikea_prov.expired\n\n\ndef test_ikea_bad_version():\n image = ota_p.IKEAImage(\n image_type=4552,\n binary_url=\"https://fw.ota.homesmart.ikea.com/files/DIRIGERA_release_prod_v2.453.1_348f0dce-3c34-49a2-b64c-a1caa202104c.raucb\",\n sha3_256_sum=\"1b5fbea79c5b41864352a938a90ad25d9a0118054bf1cdc0314ef9636a60143a\",\n )\n\n with pytest.raises(ValueError):\n image.version\n\n\n@patch(\"aiohttp.ClientSession.get\")\nasync def test_ikea_refresh_list_locked(mock_get, ikea_prov):\n await ikea_prov._locks[ota_p.LOCK_REFRESH].acquire()\n\n mock_get.return_value.__aenter__.return_value.json = AsyncMock(side_effect=[[]])\n mock_get.return_value.__aenter__.return_value.status = 434\n mock_get.return_value.__aenter__.return_value.reason = \"UNK\"\n\n await ikea_prov.refresh_firmware_list()\n assert mock_get.call_count == 0\n\n\n@patch(\"aiohttp.ClientSession.get\")\nasync def test_ikea_refresh_list_failed(mock_get, ikea_prov):\n mock_get.return_value.__aenter__.return_value.json = AsyncMock(side_effect=[[]])\n\n mock_get.return_value.__aenter__.return_value.status = 434\n mock_get.return_value.__aenter__.return_value.reason = \"UNK\"\n\n with patch.object(ikea_prov, \"update_expiration\") as update_exp:\n await ikea_prov.refresh_firmware_list()\n assert mock_get.call_count == 1\n assert update_exp.call_count == 0\n\n\n@patch(\"aiohttp.ClientSession.get\")\nasync def test_ikea_fetch_image(mock_get, ikea_image_with_version):\n data = bytes.fromhex(\n \"1ef1ee0b0001380000007c11012178563412020054657374204f544120496d61\"\n \"676500000000000000000000000000000000000042000000\"\n )\n sub_el = b\"\\x00\\x00\\x04\\x00\\x00\\x00abcd\"\n\n container = bytearray(b\"\\x00This is extra data\\x00\\x55\\xaa\" * 100)\n container[0:4] = b\"NGIS\"\n container[16:20] = (512).to_bytes(4, \"little\") # offset\n container[20:24] = len(data + sub_el).to_bytes(4, \"little\") # size\n container[512 : 512 + len(data) + len(sub_el)] = data + sub_el\n\n img = ikea_image_with_version(image_type=0x2101)\n img.url = mock.sentinel.url\n img.sha3_256_sum = hashlib.sha3_256(container).hexdigest()\n\n mock_get.return_value.__aenter__.return_value.read = AsyncMock(\n side_effect=[container]\n )\n\n r = await img.fetch_image()\n assert isinstance(r, zigpy.ota.image.OTAImage)\n assert mock_get.call_count == 1\n assert mock_get.call_args[0][0] == mock.sentinel.url\n assert r.serialize() == data + sub_el\n\n\ndef test_file_image_key(key):\n fimg = ota_p.FileImage()\n fimg.header.manufacturer_id = MANUFACTURER_ID\n fimg.header.image_type = IMAGE_TYPE\n fimg.header.file_version = mock.sentinel.version\n\n assert fimg.key == key\n assert fimg.version == mock.sentinel.version\n\n\ndef test_filestore_scan(file_image_name):\n file_name = file_image_name()\n r = ota_p.FileImage.scan_image(file_name)\n\n assert isinstance(r, ota_p.FileImage)\n assert r.file_name == file_name\n\n\ndef test_filestore_scan_exc(file_image_name):\n ota_file = file_image_name()\n with patch(\"builtins.open\", mock.mock_open()) as mock_file:\n mock_file.side_effect = OSError()\n\n r = ota_p.FileImage.scan_image(ota_file)\n assert r is None\n assert mock_file.call_count == 1\n assert mock_file.call_args[0][0] == ota_file\n\n with patch(\"builtins.open\", mock.mock_open()) as mock_file:\n mock_file.side_effect = ValueError()\n\n r = ota_p.FileImage.scan_image(ota_file)\n assert r is None\n assert mock_file.call_count == 1\n assert mock_file.call_args[0][0] == ota_file\n\n\ndef test_filestore_scan_uncaught_exc(file_image_name):\n ota_file = file_image_name()\n with pytest.raises(RuntimeError):\n with patch(\"builtins.open\", mock.mock_open()) as mock_file:\n mock_file.side_effect = RuntimeError()\n\n ota_p.FileImage.scan_image(ota_file)\n assert mock_file.call_count == 1\n assert mock_file.call_args[0][0] == ota_file\n\n\nasync def test_filestore_fetch_image(file_image):\n r = await ota_p.FileImage.fetch_image(file_image)\n\n assert isinstance(r, zigpy.ota.image.OTAImage)\n\n\nasync def test_filestore_fetch_image_exc(file_image):\n with mock.patch(\"builtins.open\", mock.mock_open()) as mock_file:\n mock_file.side_effect = OSError()\n\n r = await ota_p.FileImage.fetch_image(file_image)\n assert r is None\n assert mock_file.call_count == 1\n assert mock_file.call_args[0][0] == file_image.file_name\n\n with mock.patch(\"builtins.open\", mock.mock_open()) as mock_file:\n mock_file.side_effect = ValueError()\n\n r = await ota_p.FileImage.fetch_image(file_image)\n assert r is None\n assert mock_file.call_count == 1\n assert mock_file.call_args[0][0] == file_image.file_name\n\n\nasync def test_filestore_fetch_uncaught_exc(file_image):\n with pytest.raises(RuntimeError):\n with mock.patch(\"builtins.open\", mock.mock_open()) as mock_file:\n mock_file.side_effect = RuntimeError()\n\n await ota_p.FileImage.fetch_image(file_image)\n assert mock_file.call_count == 1\n assert mock_file.call_args[0][0] == file_image.file_name\n\n\ndef test_filestore_validate_ota_dir(tmp_path):\n file_prov = ota_p.FileStore()\n\n assert file_prov.validate_ota_dir(None) is None\n assert file_prov.validate_ota_dir(str(tmp_path)) == str(tmp_path)\n\n # non existing dir\n non_existing = tmp_path / \"non_existing\"\n assert file_prov.validate_ota_dir(str(non_existing)) is None\n\n # file instead of dir\n file_path = tmp_path / \"file\"\n file_path.touch()\n\n assert file_prov.validate_ota_dir(str(file_path)) is None\n\n\nasync def test_filestore_init_provider_success(file_prov):\n file_prov.enable = mock.MagicMock()\n file_prov.refresh_firmware_list = AsyncMock()\n file_prov.validate_ota_dir = mock.MagicMock(return_value=mock.sentinel.ota_dir)\n\n r = await file_prov.initialize_provider({CONF_OTA_DIR: mock.sentinel.ota_dir})\n assert r is None\n assert file_prov.validate_ota_dir.call_count == 1\n assert file_prov.validate_ota_dir.call_args[0][0] == mock.sentinel.ota_dir\n assert file_prov.enable.call_count == 1\n assert file_prov.refresh_firmware_list.call_count == 1\n\n\nasync def test_filestore_init_provider_failure(file_prov):\n file_prov.enable = mock.MagicMock()\n file_prov.refresh_firmware_list = AsyncMock()\n file_prov.validate_ota_dir = mock.MagicMock(return_value=None)\n\n r = await file_prov.initialize_provider({CONF_OTA_DIR: mock.sentinel.ota_dir})\n assert r is None\n assert file_prov.validate_ota_dir.call_count == 1\n assert file_prov.validate_ota_dir.call_args[0][0] == mock.sentinel.ota_dir\n assert file_prov.enable.call_count == 0\n assert file_prov.refresh_firmware_list.call_count == 0\n\n\nasync def test_filestore_refresh_firmware_list(\n file_prov, file_image_with_version, monkeypatch\n):\n image_1 = file_image_with_version(image_type=mock.sentinel.image_1)\n image_2 = file_image_with_version(image_type=mock.sentinel.image_2)\n _ = file_image_with_version(image_type=mock.sentinel.image_3)\n images = (image_1, None, image_2)\n ota_dir = os.path.dirname(image_1.file_name)\n\n file_image_mock = mock.MagicMock()\n file_image_mock.scan_image.side_effect = images\n monkeypatch.setattr(ota_p, \"FileImage\", file_image_mock)\n file_prov.update_expiration = mock.MagicMock()\n\n r = await file_prov.refresh_firmware_list()\n assert r is None\n assert file_image_mock.scan_image.call_count == 0\n assert file_prov.update_expiration.call_count == 0\n assert len(file_prov._cache) == 0\n\n # check with an ota_dir this time\n file_prov._ota_dir = ota_dir\n for file in ota_p.SKIP_OTA_FILES:\n with open(os.path.join(ota_dir, file), mode=\"w+\"):\n pass\n r = await file_prov.refresh_firmware_list()\n assert r is None\n assert file_image_mock.scan_image.call_count == len(images)\n assert file_prov.update_expiration.call_count == 1\n assert len(file_prov._cache) == len([img for img in images if img])\n\n\nasync def test_filestore_refresh_firmware_list_2(\n file_prov, file_image_with_version, monkeypatch\n):\n \"\"\"Test two files with same key and the same version.\"\"\"\n ver = 100\n image_1 = file_image_with_version(version=ver)\n image_2 = file_image_with_version(version=ver)\n\n ota_dir = os.path.dirname(image_1.file_name)\n\n file_image_mock = mock.MagicMock()\n file_image_mock.scan_image.side_effect = [image_1, image_2]\n monkeypatch.setattr(ota_p, \"FileImage\", file_image_mock)\n file_prov.update_expiration = mock.MagicMock()\n\n file_prov._ota_dir = ota_dir\n r = await file_prov.refresh_firmware_list()\n assert r is None\n assert file_image_mock.scan_image.call_count == 2\n assert file_prov.update_expiration.call_count == 1\n assert len(file_prov._cache) == 1\n assert file_prov._cache[image_1.key].version == ver\n\n\nasync def test_filestore_refresh_firmware_list_3(\n file_prov, file_image_with_version, monkeypatch\n):\n \"\"\"Test two files with the same key, older, then newer versions.\"\"\"\n ver = 100\n image_1 = file_image_with_version(version=(ver - 1))\n image_2 = file_image_with_version(version=ver)\n\n ota_dir = os.path.dirname(image_1.file_name)\n\n file_image_mock = mock.MagicMock()\n file_image_mock.scan_image.side_effect = [image_1, image_2]\n monkeypatch.setattr(ota_p, \"FileImage\", file_image_mock)\n file_prov.update_expiration = mock.MagicMock()\n\n file_prov._ota_dir = ota_dir\n r = await file_prov.refresh_firmware_list()\n assert r is None\n assert file_image_mock.scan_image.call_count == 2\n assert file_prov.update_expiration.call_count == 1\n assert len(file_prov._cache) == 1\n assert file_prov._cache[image_1.key].version == ver\n\n\nasync def test_filestore_refresh_firmware_list_4(\n file_prov, file_image_with_version, monkeypatch\n):\n \"\"\"Test two files with the same key, newer, then older versions.\"\"\"\n ver = 100\n image_1 = file_image_with_version(version=ver)\n image_2 = file_image_with_version(version=(ver - 1))\n\n ota_dir = os.path.dirname(image_1.file_name)\n\n file_image_mock = mock.MagicMock()\n file_image_mock.scan_image.side_effect = [image_1, image_2]\n monkeypatch.setattr(ota_p, \"FileImage\", file_image_mock)\n file_prov.update_expiration = mock.MagicMock()\n\n file_prov._ota_dir = ota_dir\n r = await file_prov.refresh_firmware_list()\n assert r is None\n assert file_image_mock.scan_image.call_count == 2\n assert file_prov.update_expiration.call_count == 1\n assert len(file_prov._cache) == 1\n assert file_prov._cache[image_1.key].version == ver\n","repo_name":"zigpy/zigpy","sub_path":"tests/test_ota_provider.py","file_name":"test_ota_provider.py","file_ext":"py","file_size_in_byte":20015,"program_lang":"python","lang":"en","doc_type":"code","stars":691,"dataset":"github-code","pt":"81"} +{"seq_id":"74780744265","text":"\nfrom time import sleep\nfrom time import perf_counter\nimport numpy as np\n\ndef time_R():\n port = [50080, 50075, 50090, 50085, 50070]\n np.random.choice(port)\n x = np.random.choice( port, size=3, replace=False )\n print(x)\n\n delay = {\n 50070 : 0.026,\n 50075 : 0.240,\n 50080 : 0.040,\n 50085 : 0.047,\n 50090 : 0.075\n }\n r = []\n for y in x :\n z = delay.get(y)\n r.append(z)\n r.sort()\n print(\"今回の遅延時間list\", r)\n p_wait_sec = 0\n for sec in r[:]:\n sec = r[0]\n r.remove(sec)\n wait_sec = sec - p_wait_sec\n print(r)\n print(\"%4f ping-delay\" % wait_sec)\n t = perf_counter()\n until = perf_counter() + wait_sec\n while perf_counter() < until:\n pass\n keys = [k for k, v in delay.items() if v == sec]\n print(keys)\n end_time = perf_counter() - t\n print(end_time)\n p_wait_sec = sec\n\n\"\"\"\ndef transfer_port(self, sec):\n people_dict.keys()[people_dict.values().index(search_age)]\n self.delay.keys()[self.delay.valus().index(sec)]\n\"\"\"\n\ntime_R()","repo_name":"cit-fujihalab/Cross_reference","sub_path":"CSN_ServerNode/delay_test_v3.py","file_name":"delay_test_v3.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23890650760","text":"\"\"\"\r\nLab 1, exercise 2: Statistics extraction\r\nBioinformatics @ Politecnico di Torino\r\nAuthor: Silvia Giammarinaro\r\n\r\n\"\"\"\r\n\r\n# python ex2.py input_file output_file GC_threshold\r\n# The input file is a fasta file\r\n\r\n# Example\r\n# python ex2.py file1.fa output_ex2.txt 1\r\n\r\nimport sys \r\n\r\nif __name__ == \"__main__\":\r\n \r\n input_file = sys.argv[1]\r\n output_file = sys.argv[2]\r\n GC_threshold = int(sys.argv[3])\r\n counterA = 0\r\n counterT = 0\r\n counterC = 0\r\n counterG = 0\r\n readsLowComplexitySeq = 0\r\n dictReadsGCCouples = {}\r\n\r\n with open(input_file, 'r') as lines:\r\n for i, line in enumerate(lines):\r\n if i % 2 == 1: #sequence line\r\n counterA += line.count('A')\r\n counterT += line.count('T')\r\n counterC += line.count('C')\r\n counterG += line.count('G')\r\n if 'AAAAAA' in line or 'TTTTT' in line or 'CCCCC' in line or 'GGGGG' in line: \r\n #one low complexity sequence found, update counter\r\n readsLowComplexitySeq +=1\r\n GCcouples = line.count('GC')\r\n if GCcouples > GC_threshold:\r\n dictReadsGCCouples[readID] = GCcouples\r\n else:\r\n readID = line[1:-1]\r\n\r\n # print statistics on output file\r\n with open(output_file, 'w') as f:\r\n f.write(\"STATISTICS RESULTS \\n\")\r\n f.write(\"Number of A bases: \" + str(counterA) + \"\\n\")\r\n f.write(\"Number of T bases: \" + str(counterT) + \"\\n\")\r\n f.write(\"Number of C bases: \" + str(counterC) + \"\\n\")\r\n f.write(\"Number of G bases: \" + str(counterG) + \"\\n\")\r\n f.write(\"Number of reads with at least one low complexity sequence: \" + str(readsLowComplexitySeq) + \"\\n\")\r\n f.write(\"Number of reads with GC couples higher than the threshold \" + str(GC_threshold) + \": \" + str(len(dictReadsGCCouples)) + \"\\n\")\r\n for key, value in dictReadsGCCouples.items():\r\n f.write(\"Read ID: \" + key + \", number of GC couples: \" + str(value) + \"\\n\")\r\n\r\n\r\n \r\n\r\n\r\n \r\n\r\n","repo_name":"sigeek/bioinformatics-labs","sub_path":"lab1/ex2/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21220640144","text":"import sys, os, re, math\r\nfrom collections import OrderedDict\r\nimport scalmc_driver\r\nimport time\r\nfrom experiments import Result\r\nimport matplotlib.pyplot as pyplot\r\nimport numpy\r\n\r\ntry:\r\n import IPython # for debugging\r\nexcept:\r\n pass\r\n\r\noptions = {}\r\nlogfile = sys.stderr\r\n\r\noptions[\"use_py33_subprocess\"] = True\r\ntry:\r\n sys.path.append(\"util/subprocess32-py3.3-backport\")\r\nexcept ImportError:\r\n options[\"use_py33_subprocess\"] = False\r\noptions[\"use_py33_subprocess\"] = False # TODO remove this once fixing _posixsubprocess import issues\r\n\r\nif options[\"use_py33_subprocess\"]:\r\n import subprocess32 as subprocess\r\nelse:\r\n import subprocess\r\n\r\nDEFAULT_DELTA = 0.2 # 1-0.25 = 0.75 is default tolerance in ApproxMC source code (it's inverted there, listed as delta when really it means 1-delta)\r\nDELTA = \"delta\"\r\nEPSILON = \"epsilon\"\r\n\r\nresults_dict = OrderedDict() # this will be populated by the script, it will map experiment name (filename) to a dictionary, and each of those dictionaries will map tool name to a list of measurements\r\n\r\n# Recursively traverse directory structure, adding files to the list\r\n# Or looking for files with a .c extension in each directory\r\n# and subdirectory\r\n# \r\n# FIXME: doesn't currently handle infinite symlink indirection\r\ndef get_files_from_path(arg, acc=OrderedDict(), depth=0):\r\n if os.path.isdir(arg):\r\n for path in os.listdir(arg):\r\n full_path = os.path.join(arg, path)\r\n if options[\"depth\"] < 0 or (options[\"depth\"] > 0 and depth < options[\"depth\"]):\r\n if os.path.isdir(full_path):\r\n get_files_from_path(full_path, acc, depth=depth+1)\r\n elif os.path.isfile(full_path):\r\n ext = \".c\"\r\n if options[\"cnf\"]:\r\n ext = \".cnf\"\r\n split = os.path.basename(path).split(ext)\r\n if len(split) is 2 and not split[1].strip() and not path[0] is \".\":\r\n assert(full_path not in acc)\r\n acc[full_path] = True\r\n elif os.path.isfile(arg):\r\n assert(arg not in acc)\r\n acc[arg] = True\r\n return acc\r\n\r\ndef run_scalmc(filename, epsilon, delta):\r\n #(time, out, err) = shell_script(SCALMC, [filename])\r\n cur_time = time.time()\r\n (out, err) = scalmc_driver.run_scalmc([filename], epsilon, delta)\r\n total_time = time.time() - cur_time\r\n # print(\"=========\")\r\n # print(out)\r\n # print(\"=========\")\r\n # print(err)\r\n # print(\"=========\")\r\n \r\n \r\n \r\n timeout = False\r\n try:\r\n [multiplier, power] = out.split(\"Number of solutions is:\")[1].split(\" x \")\r\n [base, exponent] = power.split(\"^\")\r\n multiplier = int(multiplier)\r\n base = int(base)\r\n exponent = int(exponent)\r\n solutions = multiplier * base ** exponent\r\n info_flow = math.log(solutions, 2)\r\n except IndexError: # this means there was a timeout or it's unsat\r\n if \"The input formula is unsatisfiable.\" in out:\r\n info_flow = \"UNSAT\"\r\n else: # timeout, and sadly scalmc doesn't give a partial count\r\n info_flow = None\r\n timeout = True\r\n return (total_time, info_flow, timeout)\r\n\r\ndef run_exp(exp_filename, epsilon, delta):\r\n logfile.write(\"Now running experiment \" + exp_filename + \" epsilon=\" + str(epsilon) + \" delta=\" + str(delta) + \"\\n\")\r\n # if options.cnf isn't set, then run CBMC first to generate CNFs then do this\r\n if exp_filename not in results_dict:\r\n results_dict[exp_filename] = {}\r\n if (epsilon, delta) not in results_dict[exp_filename]:\r\n results_dict[exp_filename][(epsilon, delta)] = []\r\n if options[\"cnf\"]:\r\n logfile.write(\" Running scalmc... \")\r\n # run scalmc\r\n (time, info_flow, timeout) = run_scalmc(exp_filename, epsilon, delta)\r\n scalmc_result = Result(time, info_flow, exp_filename, timeout)\r\n logfile.write(\"done: \" + str(scalmc_result) + \"\\n\")\r\n results_dict[exp_filename][(epsilon, delta)].append(scalmc_result)\r\n \r\n # if all the observations are empty, then this experiment failed for some reason, so remove it? or put a special value here\r\n empty = True\r\n for (e, d) in results_dict[exp_filename]:\r\n if len(results_dict[exp_filename][(e, d)]) > 0:\r\n empty = False\r\n break\r\n if empty:\r\n del results_dict[exp_filename] # or put a special value instead?\r\n\r\n\r\ndef main(argv):\r\n # Deal with program arguments\r\n def usage():\r\n print(\"Usage: \" + argv[0] + \" experiment-list [depth=n] [cnf=true/false]\")\r\n print(\" experiment: a mixed list of files and directories, looks for .c files inside each directory specified\")\r\n print(\" depth: maximum directory depth to recurse, \\\"no limit\\\" if negative value, \\\"max\\\", or \\\"inf\\\" is passed, no recursion if 0 is passed (default: 8)\" )\r\n print(\" cnf: CNF mode -- will perform only model counting (default: true), valid modes: (false, f, 0)\" )\r\n print(\" runs: The integer number of times to run the tools on each experiment (default: 1)\")\r\n print(\" timeout: The timeout for each experiment, in an integral number of seconds -- no timeout if omitted (default: no timeout)\")\r\n \r\n if len(argv) < 2:\r\n usage()\r\n exit(1)\r\n \r\n global options\r\n # Maps each keyword argument to a function that converts the argument to a python value of the appropriate type\r\n kwarg_type_converters = \\\r\n {\"depth\":\r\n lambda v: int(v) if v.lower() not in [\"inf\", \"max\"] and int(v) > 0 else -1,\r\n \"cnf\":\r\n lambda v: v.lower() not in [\"f\", \"false\", \"0\"],\r\n \"timeout\":\r\n lambda v: int(float(v)) if round(float(v)) == int(float(v)) and int(float(v)) > 0 else None,\r\n \"runs\":\r\n lambda v: int(v) if int(v) > 1 else 1}\r\n kwargs={\"depth\": 8, \"cnf\": True} # default values\r\n posargs = []\r\n for arg in argv:\r\n is_kwarg = False\r\n for kwarg in kwarg_type_converters:\r\n if arg.lower().startswith(kwarg + \"=\"):\r\n is_kwarg = True\r\n if not is_kwarg:\r\n posargs.append(arg)\r\n else:\r\n split = arg.lower().split(\"=\")\r\n kwargs[split[0]] = kwarg_type_converters[split[0]]((split[1]))\r\n \r\n if len(posargs) < 2:\r\n usage()\r\n exit(1)\r\n \r\n options = kwargs\r\n # Resolve the list of experiments\r\n \r\n ext = \".c\"\r\n if options[\"cnf\"]:\r\n ext = \"cnf\"\r\n files = get_files_from_path(posargs[1])\r\n for arg in posargs[2:]:\r\n files.update(get_files_from_path(arg))\r\n \r\n if \"timeout\" not in options:\r\n options[\"timeout\"] = None\r\n \r\n if \"runs\" not in options:\r\n options[\"runs\"] = 1\r\n #print(files)\r\n #print(len(files))\r\n \r\n # Now the main program, run these experiments\r\n step = 0.05\r\n epsilon_range = reversed([j*step for j in range(1, int(1.0/step)+1, 1)])\r\n #delta_range = [j*step for j in range(1, int(1.0/step)+1, 1)]\r\n #DEFAULT_DELTA = None\r\n delta_range = [DEFAULT_DELTA]\r\n \r\n for exp in files:\r\n for i in xrange(options[\"runs\"]):\r\n for epsilon in epsilon_range:\r\n for delta in delta_range:\r\n run_exp(exp, epsilon, delta)\r\n \r\n # Now output the results\r\n print(\"+++++++++++++++++++++++++\")\r\n print(\"+++++++++++++++++++++++++\")\r\n print(\"+++++++++++++++++++++++++\")\r\n print(\"+++++ RESULTS +++++\")\r\n print(\"+++++++++++++++++++++++++\")\r\n print(\"+++++++++++++++++++++++++\")\r\n print(\"+++++++++++++++++++++++++\")\r\n print(\"\")\r\n \r\n for exp in results_dict:\r\n print(\"\\nExperiment \" + exp + \":\")\r\n for (epsilon, delta) in results_dict[exp]:\r\n print(\" Results for epsilon=\" + str(epsilon) + \" delta=\" + str(delta) + \":\")\r\n for result in results_dict[exp][(epsilon, delta)]:\r\n print(\" \" + str(result))\r\n sys.stdout.flush()\r\n \r\n \r\n # TODO: assumes single entry, not a big deal to loop through each exp though...\r\n # TODO: assumes single measurement for each -- if more needed in the future, do the statistics BEFORE, and put it in the variable called results (i.e. if want median replace list of >1 Result obj with a list containing a single Result obj that represents the median)\r\n \r\n # generate new results after taking the time-average\r\n \r\n results = results_dict[list(results_dict)[0]]\r\n x = [i for (i, j) in results if j == DEFAULT_DELTA]\r\n \r\n #y = [results[(i,j)][0].time for (i, j) in results if j == DEFAULT_DELTA] # assumes only a single observation\r\n y = [numpy.mean([result.time for result in results[(i,j)]]) for (i,j) in results if j == DEFAULT_DELTA]\r\n \r\n pyplot.plot(x,y)\r\n pyplot.savefig(\"tmp/plot.png\")\r\n #pyplot.show()\r\n \r\n for (epsilon, delta) in sorted(results):\r\n if delta == DEFAULT_DELTA:\r\n print(\"e=%.2f d=%.2f: %.4f\" % (float(epsilon), float(delta), float(results[(epsilon, delta)][0].time)))\r\n \r\n \r\n\r\nif __name__ == \"__main__\":\r\n main(sys.argv)","repo_name":"approxflow/approxflow","sub_path":"experiment-tools/experiments_precision_time.py","file_name":"experiments_precision_time.py","file_ext":"py","file_size_in_byte":8581,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"74661663304","text":"# -*- coding: utf-8 -*-\n\"\"\"\n# @Time : 5/18/21 \n\n# @Author : Zhaopu Teng\n\"\"\"\nimport collections\nimport heapq\nfrom typing import List\n\n\ndef topKFrequent(nums: List[int], k: int) -> List[int]:\n count = {}\n for i in nums:\n count.setdefault(i, 0)\n count[i] += 1\n heap = [(val, key) for key, val in count.items()]\n return [x[1] for x in heapq.nlargest(k, heap)]\n\n\nnums = [4, 4, 4, 5, 5, 6]\ntopKFrequent(nums, 3)\n","repo_name":"Zhaoput1/Python","sub_path":"Leetcode/highfre/58_347topKFrequent.py","file_name":"58_347topKFrequent.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74204586504","text":"import tensorflow as tf\n\nmap_fn = tf.map_fn\n\ndef build_LSTM_simple_model(n_features, num_noise_params, max_sequence_length, initial_state=0, batchsize = 32, seed = 42, train = True):\n\t\n\tTINY = 1e-6 \n\t\n\t# Model architecture params\n\tLEARNING_RATE = 0.01\n\tdropout_rate_input = 0.2\n\tdropout_rate_hidden = 0.5\n\tn_hidden_RNN = 81\n\tn_out_RNN = n_features\n\t#hidden_fcl_sizes = [512, 512]\n\tactiv = tf.nn.sigmoid\n\tregularization_param = 1.0\n\t\n\tprint(\"Network Parameters:\")\n\tprint(\"\\tLearning Rate: \" + str(LEARNING_RATE))\n\t#print(\"\\tHidden layer sizes: \" + str(hidden_fcl_sizes))\n\tprint(\"\\tDropout probability for input: \" + str(dropout_rate_input))\n\tprint(\"\\tDropout probability for hidden layers: \" + str(dropout_rate_hidden))\n\tprint(\"\")\n\t\n\t########################################################################\n\t# Define the Model\n\n\t# Inputs Paceholders\n\tfeats_inpt = tf.placeholder(tf.float32, shape = [max_sequence_length, batchsize, n_features], name = \"input_features\")\n\t\n\tlabels = tf.placeholder(tf.float32, shape = [max_sequence_length, batchsize, num_noise_params], name = \"input_labels\")\n\t\n\tdrop_prob_input = dropout_rate_input\n\tdrop_prob_hidden = dropout_rate_hidden\n\n\t# Refularization and Initialization\n\ttf.set_random_seed(seed)\n\tregularizer = tf.contrib.layers.l2_regularizer(scale = regularization_param)\n\tnorm_initializer = tf.contrib.layers.xavier_initializer(uniform=False, dtype=tf.float32)\n\n\n\t# The LSTM with additional dropout\n\trnn_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden_RNN)\n\tinitial_state = tf.zeros([batchsize, rnn_cell.state_size])\n\n\t#if initial_state == 0: #not initialized\n\t#\tinitial_state = rnn_cell.zero_state(batchsize, tf.float32)\n\tfinal_state=initial_state\n\trnn_outputs, state = tf.nn.dynamic_rnn(rnn_cell, feats_inpt, initial_state=initial_state, dtype=tf.float32, time_major=True) #output is rnn_outputs, rnn_states\n\n\t\n\tfinal_projection = lambda x: tf.contrib.layers.linear(x, num_outputs=num_noise_params, activation_fn=activ)\n\t\n\t# apply projection to every timestep.\n\tpredicted_outputs = map_fn(final_projection, rnn_outputs)\n\n\n\t#with tf.name_scope(\"cross_entropy_ns\"):\n\t# compute elementwise cross entropy.\n\tcross_entropy = -(labels * tf.log(predicted_outputs + TINY) + (1.0 - labels) * tf.log(1.0 - predicted_outputs + TINY))\n\terror = tf.reduce_mean(cross_entropy, name=\"error\")\n\ttf.summary.scalar(\"error\", error)\n\n\tfinal_state=state\n\t\n\t# Define loss and optimizer\n\ttrain_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(error)\n\n\t# Return training step, loss, predictions, accuracy and input placeholders\n\treturn [train_step, cross_entropy, error, feats_inpt, labels, state]\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"lvhualong/classical-SLAM","sub_path":"asl_msf/msf_updates/Python/noise_estimation/lstm_simple.py","file_name":"lstm_simple.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"81"} +{"seq_id":"16571055853","text":"import math\nimport pyspark\nfrom pyspark.sql.functions import *\nfrom pyspark.sql import SparkSession\n\nsc = pyspark.SparkContext(\"local\", \"Pearson Correlation\")\nspark = SparkSession(sc)\n\n\"\"\"\nlength(object)\nComputes the character length of string data or number of bytes of binary data.\nThe length of character data includes the trailing spaces. The length of binary data\nincludes binary zeros.\n\"\"\"\n\n# Reviews => ['asin', 'review_text']\nreviews = spark.read.csv(\"hdfs://$NAME_NODE_IP:9000/input/reviews.csv\", header=True, sep=\",\")\nasin_and_avg_review_length = reviews.withColumn(\"review_text\", length(reviews.review_text)) \\\n .groupBy(\"asin\")\\\n .agg(mean(\"review_text\").alias(\"review_length\"))\n\n# Filter null values\nasin_and_avg_review_length = asin_and_avg_review_length.filter(asin_and_avg_review_length.review_length.isNotNull())\n\n# Book meta => {'asin':'', 'price':''}\nbook_meta = spark.read.json(\"hdfs://$NAME_NODE_IP:9000/input/book_meta.json\")\nasin_and_prices = book_meta.select(\"asin\", \"price\")\n# Filter null values\nasin_and_prices = asin_and_prices.filter(asin_and_prices.price.isNotNull())\n\n# Merge data by 'asin'\ndata = asin_and_avg_review_length.join(asin_and_prices, \"asin\")\n\ndata = data.select(\"price\", \"review_length\")\nn = data.count()\n\n# data format => [['1.23', 151.0], ..., ['2.99', 235.5]] ([price, average_length])\ndata = data.rdd.map(list)\n\nflatdata = data.flatMap(lambda row: (\n (\"x\", float(row[0])),\n (\"y\", float(row[1])),\n (\"x2\", float(row[0]) * float(row[0])),\n (\"y2\", float(row[1]) * float(row[1])),\n (\"xy\", float(row[0]) * float(row[1]))))\n\nreduced_data = flatdata.reduceByKey(lambda x, y: x + y).sortByKey().collect()\n\n# Order after sorting [x, x2, xy, y, y2]\nx = reduced_data[0][1]\nx2 = reduced_data[1][1]\nxy = reduced_data[2][1]\ny = reduced_data[3][1]\ny2 = reduced_data[4][1]\n\nnumerator = n * xy - (x * y)\ndenominator = math.sqrt(n * x2 - (x * x)) * math.sqrt(n * y2 - (y * y))\ncorrelation = numerator / denominator\n\nresult = sc.parallelize([correlation])\nresult.saveAsTextFile(\"hdfs://$NAME_NODE_IP:9000/output/correlation\")\n\nprint(\"The Pearson Correlation is: %s \" % correlation)","repo_name":"Pomatoo/db_project","sub_path":"automation_scripts/analytics_scripts/correlation.py","file_name":"correlation.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"32401501494","text":"\"\"\"Experiment on Xgboost: http://xgboost.readthedocs.io/en/latest/model.html.\n\"\"\"\nimport xgboost as xgb\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.preprocessing import StandardScaler\n\nimport argparse\nimport time\n\nimport DataUtil\nimport Parameters\nimport PreprocessingUtil\n\nnp.random.seed(1)\n\ndef train_and_evaluate(trainFile, propertiesFile):\n \"\"\"Train and evaluate a model.\n\n trainFile: path to train_2016_v2.csv\n propertiesFile: path to properties_2016.csv\n \"\"\"\n x_train, x_test, y_train, y_test = _read_and_process_data(\n trainFile, propertiesFile)\n\n train = xgb.DMatrix(x_train, label=y_train)\n test = xgb.DMatrix(x_test, label=y_test)\n\n params = {}\n # eta is the 'learning rate in xgboost'\n params['eta'] = 0.02\n params['objective'] = 'reg:linear'\n params['eval_metric'] = 'mae'\n params['max_depth'] = 4\n params['silent'] = 1\n\n watchlist = [(train, 'train'), (test, 'test')]\n start_time = time.time()\n clf = xgb.train(params, train, 10000, watchlist, early_stopping_rounds=100,\n verbose_eval=10)\n print(\"Training takes {} seconds\".format(time.time() - start_time))\n print(\"test set mean_abosulte_error is: {}\".format(\n mean_absolute_error(y_true = y_test.values, y_pred =clf.predict(test))))\n print(\"training set mean_abosulte_error is: {}\".format(\n mean_absolute_error(y_true = y_train.values, y_pred =clf.predict(train))))\n\ndef _read_and_process_data(trainFile, propertiesFile):\n \"\"\"Read and preprocess data with LabelEncoder, StandardScaler\n and IsolationForest. Return x_train, x_test, y_train and y_test.\n \"\"\"\n print(\"Start reading and processing data\")\n df_train = DataUtil.readTrainFile(trainFile)\n df_properties = DataUtil.readPropertiesFile(propertiesFile)\n\n df_properties = PreprocessingUtil.applyLabelEncoder(df_properties)\n\n # find the intersection of training data and property information for\n # all data\n inter = pd.merge(df_properties, df_train, how=\"inner\", on=[\"parcelid\"])\n # decompose transaction date information\n inter['transactiondate'] = pd.to_datetime(df_train[\"transactiondate\"])\n inter['transaction_year'] = inter['transactiondate'].dt.year\n inter['transaction_month'] = inter['transactiondate'].dt.month\n inter['transaction_day'] = inter['transactiondate'].dt.day\n\n y = inter['logerror'].astype(float)\n x_train, x_test, y_train, y_test = DataUtil.getTrainAndTestData(\n inter, y, Parameters.getTestSetRatio()\n )\n x_train = x_train.drop(Parameters.getColumnsToDrop(), axis = 1)\n x_test = x_test.drop(Parameters.getColumnsToDrop(), axis = 1)\n standardScaler = StandardScaler()\n x_train.iloc[::] = standardScaler.fit_transform(x_train.iloc[::])\n x_test.iloc[::] = standardScaler.transform(x_test.iloc[::])\n\n y_train_inoutliners = PreprocessingUtil.applyIsolationForest(\n y_train.values.reshape(-1, 1))\n index = y_train_inoutliners == 1\n x_train = x_train.iloc[index]\n x_train.reset_index(drop = True, inplace = True)\n y_train = y_train.iloc[index]\n y_train.reset_index(drop = True, inplace = True)\n print(\"Finished reading and processing data\")\n return x_train, x_test, y_train, y_test\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-tf', '--trainFile',\n help='path to train_2016_v2.csv file', required=True)\n parser.add_argument('-pf', '--propertiesFile',\n help='path to properties_2016.csv.csv file', required=True)\n args = parser.parse_args()\n train_and_evaluate(args.trainFile, args.propertiesFile)\n","repo_name":"neil-lhm/cs8803FinalProject","sub_path":"Xgboost.py","file_name":"Xgboost.py","file_ext":"py","file_size_in_byte":3665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3450693915","text":"import logging as log\n\nREGTAB = {\n 'v0': 2, 'v1': 3,\n 'a0': 4, 'a1': 5, 'a2': 6, 'a3': 7,\n 't0': 8, 't1': 9, 't2': 10, 't3': 11, 't4': 12, 't5': 13, 't6': 14, 't7': 15,\n 's0': 16, 's1': 17, 's2': 18, 's3': 19, 's4': 20, 's5': 21, 's6': 22, 's7': 23,\n 't8': 24, 't9': 25,\n 'k0': 26, 'k1': 27,\n 'gp': 28,\n 'sp': 29,\n 'fp': 30,\n 'ra': 31,\n 'zero': 0\n}\n\n\ndef regmap(strreg):\n \"\"\"Map string register name, either using shorthand (\"t1\") or\n absolute (\"1\") to the corresponding int value.\n \"\"\"\n\n return int(strreg) if strreg[0].isdigit() else REGTAB[strreg]\n\n\nclass Registers(object):\n def __init__(self, dmem_size):\n self.gpr = [0] * 32\n self.gpr[29] = dmem_size\n self.pc = 0\n self.hi = 0\n self.lo = 0\n\n def read(self, reg):\n return self.gpr[regmap(reg)]\n\n def write(self, reg, contents):\n ind = regmap(reg)\n if ind == 0:\n raise Exception('can\\'t write to $zero')\n self.gpr[ind] = contents\n\n def dump(self):\n for reg in sorted(REGTAB):\n log.info('${}/{} : {}\\t\\t'.format(reg, regmap(reg),\n self.gpr[regmap(reg)]))\n log.info('pc : {}'.format(self.pc))\n log.info('hi : {}'.format(self.hi))\n log.info('lo : {}'.format(self.lo))\n","repo_name":"offlinemark/spym","sub_path":"emu/registers.py","file_name":"registers.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"81"} +{"seq_id":"9891223728","text":"from dronekit import connect, VehicleMode\nfrom pymavlink import mavutil\nimport time\n\n\nimport os\nimport datetime\nimport sys\nimport signal\n\n# Get the parent directory path\nparent_directory = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\n# Add the parent directory to sys.path\nsys.path.append(parent_directory)\nfrom drone_ardupilot import *\n\ndef interrupt(signal_num, frame):\n print(\"Interrupted!\")\n global vehicle\n if vehicle is not None:\n vehicle.remove_attribute_listener('velocity', on_velocity)\n vehicle.mode = VehicleMode (\"LAND\")\n time.sleep(3) \n vehicle.close()\n sys.exit()\n\ncreate_log_file() \nglobal vehicle\n#vehicle = connect(parse_connect(), wait_ready=False)\nvehicle = connect(\"/dev/serial0\", baud= 921600, wait_ready=False,rate=10) # for raspberry p\n#vehicle = connect(\"/dev/ttyUSB0\", baud= 57600, wait_ready=False, rate=10) #for telemetry\nvehicle.wait_ready(True, raise_exception=False) # for raspberry pi & telemetry only \nsignal.signal(signal.SIGINT, interrupt)\n\ndrone_hight=2 \narm_and_takeoff(vehicle,drone_hight)\n\nprint( \"Takeoff and wait 2 sec\")\n\nvehicle.mode = VehicleMode(\"LOITER\") \ntime.sleep(2)\nvehicle.mode = VehicleMode(\"GUIDED\")\n\nhex_side=1\nangl_dir=[0,120, 180,240,300,360,60,180]\n#angl_dir=[0]\n\nfor i in range(len(angl_dir)):\n\n print( \"************************* Moving to\",angl_dir[i], \"*************************\" ) \n move_body_PID(vehicle, angl_dir[i], hex_side)\n \n vehicle.mode = VehicleMode(\"LOITER\") \n time.sleep(2)\n vehicle.mode = VehicleMode(\"GUIDED\")\n\ntime.sleep(2)\n\n\nwrite_log_message(f\" Coming Home\")\nvehicle.mode = VehicleMode (\"LAND\")\ntime.sleep(2) \n\n# Close connection\nvehicle.close()\n","repo_name":"SulaimanMohammad/Drone_VESPA","sub_path":"src/drone/unit_tests/hexagon_move.py","file_name":"hexagon_move.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74516360904","text":"from django.conf import settings\nfrom django.db import models\n\nfrom applications.app_domicilio.models import Domicilio\nfrom applications.app_personas.models import Sucursal\n\n\nclass Poliza(models.Model):\n GENDER_TIPO = (\n (\"E\", \"Egreso\"),\n (\"I\", \"Ingreso\"),\n (\"D\", \"Diario\"),\n )\n GENDER_APLICACION = (\n (\"N\", \"Normal\"),\n (\"C\", \"Condonacion\"),\n (\"O\", \"ChequeOrden\"),\n (\"D\", \"CierreDiario\"),\n (\"M\", \"CierreMensual\"),\n (\"A\", \"CierreAnual\"),\n )\n GENDER_FUENTE = (\n (\"O\", \"Operacion\"),\n (\"A\", \"Activos\"),\n (\"N\", \"Nomina\"),\n (\"G\", \"Gastos\"),\n (\"P\", \"Pasiva\"),\n (\"T\", \"Traslados\"),\n (\"R\", \"Traspasos\"),\n )\n tipo = models.CharField(\n max_length=1,\n choices=GENDER_TIPO,\n verbose_name=\"Tipo de poliza\",\n default=\"E\",\n )\n numero = models.IntegerField(verbose_name=\"Número de poliza\")\n sucursal = models.ForeignKey(\n Sucursal,\n on_delete=models.PROTECT,\n verbose_name=\"Sucursal a la que pertenece:\",\n )\n fecha = models.DateTimeField(verbose_name=\"Fecha de poliza\")\n fecharegistro = models.DateTimeField(auto_now=True)\n concepto = models.CharField(max_length=200, verbose_name=\"Concepto de la poliza\")\n usuarioAutoriza = models.IntegerField(verbose_name=\"Usuario Elabora:\")\n usuarioElabora = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.PROTECT,\n verbose_name=\"Usuario Autoriza:\",\n )\n aplicacion = models.CharField(\n max_length=1,\n choices=GENDER_APLICACION,\n default=\"N\",\n verbose_name=\"Aplicacion de poliza\",\n )\n fuente = models.CharField(\n max_length=1,\n choices=GENDER_FUENTE,\n default=\"O\",\n verbose_name=\"Fuente de poliza\",\n )\n automatica = models.BooleanField(verbose_name=\"Poliza automatica\")\n\n class Meta:\n verbose_name = \"Poliza\"\n verbose_name_plural = \"Polizas\"\n\n def __str__(self):\n return self.tipo + \" \" + str(self.numero) + \" \" + self.concepto\n\n\nclass Banco(models.Model):\n nombre = models.CharField(max_length=50, verbose_name=\"Nombre del banco\")\n\n class Meta:\n verbose_name = \"Banco\"\n verbose_name_plural = \"Bancos\"\n\n def __str__(self):\n return self.nombre\n\n\nclass PolizaEgreso(models.Model):\n poliza = models.ForeignKey(Poliza, on_delete=models.CASCADE)\n beneficiario = models.CharField(\n max_length=70, verbose_name=\"Nombre del beneficiario\"\n )\n banco = models.ForeignKey(Banco, on_delete=models.PROTECT)\n cheque = models.CharField(max_length=10, verbose_name=\"Numero de cheque\")\n\n class Meta:\n verbose_name = \"Poliza Egreso\"\n verbose_name_plural = \"Polizas Egreso\"\n\n def __str__(self):\n return self.beneficiario\n\n\nclass Proveedor(models.Model):\n GENDER_TIPOPROV = (\n (\"N\", \"04 Nacional\"),\n (\"E\", \"05 Extranjera\"),\n (\"G\", \"15 Glonbal\"),\n )\n\n GENDER_OPERACIONPROV = (\n (\"P\", \"03 Por servicios profesionales\"),\n (\"A\", \"06 Arrendamiento\"),\n (\"O\", \"85 Otros\"),\n )\n\n nombre = models.CharField(max_length=100, verbose_name=\"Nombre del proveedor\")\n domicilio = models.ForeignKey(Domicilio, on_delete=models.PROTECT)\n rfc = models.CharField(max_length=15, verbose_name=\"RFC\", unique=True)\n curp = models.CharField(max_length=18, verbose_name=\"CURP\")\n telefono = models.CharField(max_length=10, verbose_name=\"Número de teléfono\")\n tipo = models.CharField(\n max_length=1,\n choices=GENDER_TIPOPROV,\n default=\"N\",\n verbose_name=\"Tipo de proveedor\",\n )\n operacion = models.CharField(\n max_length=1,\n choices=GENDER_OPERACIONPROV,\n default=\"P\",\n verbose_name=\"Operacion de proveedor\",\n )\n regimen = models.CharField(max_length=100, verbose_name=\"Regimen Fiscal\")\n nombreExtranjero = models.CharField(max_length=50)\n paisResidencia = models.CharField(max_length=50)\n nacionalidad = models.CharField(max_length=50)\n banco = models.ForeignKey(Banco, on_delete=models.PROTECT)\n cuentaClabe = models.CharField(max_length=25)\n\n class Meta:\n verbose_name = \"Proveedor\"\n verbose_name_plural = \"Proveedores\"\n\n def __str__(self):\n return self.nombre\n\n\nclass PolizaSoporte(models.Model):\n poliza = models.ForeignKey(Poliza, on_delete=models.CASCADE)\n gastoacomprobar = models.IntegerField(default=0)\n archivo = models.CharField(max_length=300, blank=True)\n chavecfdi = models.CharField(max_length=100, blank=True)\n subtotal = models.DecimalField(max_digits=14, decimal_places=2, default=0)\n monto = models.DecimalField(max_digits=14, decimal_places=2, default=0)\n descuento = models.DecimalField(max_digits=14, decimal_places=2, default=0)\n impuesto = models.DecimalField(max_digits=14, decimal_places=2, default=0)\n rfc = models.CharField(max_length=15, blank=True)\n polizaSoporteRelacion = models.IntegerField(default=0)\n\n class Meta:\n verbose_name = \"PolizaSoporte\"\n verbose_name_plural = \"PolizasSoporte\"\n\n def __str__(self):\n return self.poliza + \" \" + self.polizaSoporteRelacion\n\n\nclass ClaveUnidad(models.Model):\n clave = models.CharField(max_length=8)\n descripcion = models.CharField(max_length=100)\n\n class Meta:\n verbose_name = \"Clave Unidad\"\n verbose_name_plural = \"Claves Unidad\"\n\n def __str__(self):\n return self.clave + \" \" + self.descripcion\n\n\nclass DetallePolizaSoporte(models.Model):\n polizaSoporte = models.ForeignKey(PolizaSoporte, on_delete=models.CASCADE)\n claveProdServ = models.CharField(max_length=8)\n cantidad = models.DecimalField(max_digits=12, decimal_places=2)\n claveUnidad = models.ForeignKey(ClaveUnidad, on_delete=models.PROTECT)\n descripcion = models.CharField(max_length=100)\n unitario = models.DecimalField(max_digits=12, decimal_places=2)\n objetoImp = models.CharField(max_length=2)\n base = models.DecimalField(max_digits=12, decimal_places=2)\n impuesto = models.CharField(max_length=3)\n tipoFactor = models.CharField(max_length=20)\n tasaoCuota = models.DecimalField(max_digits=8, decimal_places=6)\n importe = models.DecimalField(max_digits=12, decimal_places=2)\n\n class Meta:\n verbose_name = \"Detalle Poliza Soporte\"\n verbose_name_plural = \"Detalle Polizas Soporte\"\n\n def __str__(self):\n return self.descripcion\n\n\nclass CatalogoCuenta(models.Model):\n GENDER_CLASIFICACION = (\n (\"C\", \"Capitulo\"),\n (\"S\", \"Subn Capitulo\"),\n (\"M\", \"Mayor\"),\n (\"E\", \"Menor\"),\n )\n GENDER_GRUPO = (\n (\"A\", \"Activo\"),\n (\"P\", \"Pasivo\"),\n (\"C\", \"Capital\"),\n (\"R\", \"Resultado Acreedor\"),\n (\"D\", \"Resultado Deudor\"),\n (\"O\", \"Orden\"),\n (\"U\", \"Puente\"),\n )\n GENDER_FINALIDAD = (\n (\"C\", \"Caja\"),\n (\"B\", \"Banco\"),\n (\"O\", \"Otros\"),\n )\n GENDER_NATURALEZA = (\n (\"A\", \"Deudora\"),\n (\"D\", \"Acreedora\"),\n )\n\n cuenta = models.CharField(max_length=30, verbose_name=\"Cuenta contable\")\n cuentaSiti = models.CharField(max_length=15, verbose_name=\"Cuenta Siti\")\n nombre = models.CharField(max_length=50, verbose_name=\"Nombre cuenta\")\n clasificacion = models.CharField(\n max_length=1,\n choices=GENDER_CLASIFICACION,\n default=\"C\",\n verbose_name=\"Clasificación contable\",\n )\n grupo = models.CharField(\n max_length=1, choices=GENDER_GRUPO, default=\"A\", verbose_name=\"Grupo contable\"\n )\n finalidad = models.CharField(\n max_length=1,\n choices=GENDER_FINALIDAD,\n default=\"O\",\n verbose_name=\"Finalidad contable\",\n )\n naturaleza = models.CharField(\n max_length=1,\n choices=GENDER_NATURALEZA,\n default=\"A\",\n verbose_name=\"Naturaleza contable\",\n )\n afectable = models.BooleanField(default=0, verbose_name=\"Afectable\")\n padre = models.CharField(\n max_length=30, verbose_name=\"Padre o nivel superior de la cuenta\"\n )\n nivel = models.IntegerField()\n balance = models.BooleanField(\n default=0, verbose_name=\"La cuenta se desplegara en el balance\"\n )\n catalogoMinimo = models.BooleanField(\n default=0, verbose_name=\"Desplegar en el catalogo Minimo\"\n )\n nombreBalance = models.CharField(\n max_length=100, verbose_name=\"Nombre que desplegara en el balance\"\n )\n nombreSiti = models.CharField(\n max_length=70, verbose_name=\"Nombre de la cuenta en el SITI\"\n )\n cuentaPadreSiti = models.CharField(\n max_length=15, verbose_name=\"Padre o nivel superior cuenta SITI\"\n )\n cuentaAgrupar = models.CharField(\n max_length=30, verbose_name=\"Cuenta a la que agrupará\"\n )\n ordenSiti = models.IntegerField(\n verbose_name=\"Orden en que desplegara en la base del SITI\"\n )\n subCuentaSiti = models.BooleanField(verbose_name=\"SubCuenta de catalogo SITI\")\n prorrateo = models.BooleanField(default=0)\n\n class Meta:\n verbose_name = \"Catalogo de cuenta\"\n verbose_name_plural = \"Catalogo de cuentas\"\n\n def __str__(self):\n return self.nombre\n\n\nclass DetallePoliza(models.Model):\n GENDER_TIPOIVA = (\n (\"NA\", \"No aplica\"),\n (\"16\", \"16%\"),\n (\"8\", \"8%\"),\n (\"11\", \"11%\"),\n (\"0\", \"0%\"),\n (\"E\", \"Excento\"),\n (\"R\", \"Retenido\"),\n )\n\n poliza = models.ForeignKey(Poliza, on_delete=models.CASCADE)\n cuenta = models.ForeignKey(CatalogoCuenta, on_delete=models.PROTECT)\n sucursal = models.ForeignKey(Sucursal, on_delete=models.PROTECT)\n cargo = models.DecimalField(max_digits=14, decimal_places=2)\n abono = models.DecimalField(max_digits=14, decimal_places=2)\n proveedor = models.ForeignKey(Proveedor, on_delete=models.PROTECT)\n concepto = models.CharField(max_length=100)\n iva = models.CharField(\n max_length=2, choices=GENDER_TIPOIVA, default=\"NA\", verbose_name=\"Tasa de IVA\"\n )\n\n class Meta:\n verbose_name = \"Detalle Poliza\"\n verbose_name_plural = \"Detalle Polizas\"\n\n def __str__(self):\n return str(self.cuenta) + \" \" + str(self.concepto)\n","repo_name":"dgox16/proyecto1_practicas","sub_path":"applications/app_polizas/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10243,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2132115010","text":"import networkx as nx\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pyveplot import Hiveplot, Axis, Node\nimport random\n\n# load data\npd_data = pd.read_csv('./kg_final_with_temporal_data_and_validated_inconsistencies.txt', sep='\\t')\npd_data = pd_data[['Subject', 'Predicate', 'Object']]\n\n# filter positives only\nneg_predicates = [\n 'confers no resistance to antibiotic',\n 'not upregulated by antibiotic',\n 'no represses',\n 'no activates',]\n\ndef _check_match(x, predicates):\n flag = False\n for predicate in predicates:\n if predicate in x:\n flag = True\n return flag\n\n# remove the negatives\npd_data = pd_data[~pd_data['Predicate'].apply(lambda x: _check_match(x, neg_predicates))]\n\n# select data by relation type\npd_cras = pd_data[pd_data['Predicate'].str.contains('resistance to antibiotic')]\npd_ubas = pd_data[pd_data['Predicate'].str.contains('upregulated by antibiotic')]\npd_represses = pd_data[pd_data['Predicate'].str.contains('represses')]\npd_activates = pd_data[pd_data['Predicate'].str.contains('activates')]\npd_has = pd_data[pd_data['Predicate'].str.contains('has')]\npd_iii = pd_data[pd_data['Predicate'].str.contains('is involved in')]\npd_ipo = pd_data[pd_data['Predicate'].str.contains('is part of')]\npd_tb = pd_data[pd_data['Predicate'].str.contains('targeted by')]\n\n# get genes\ngenes = []\ngenes.extend(pd_cras['Subject'].to_numpy().tolist())\ngenes.extend(pd_ubas['Subject'].to_numpy().tolist())\ngenes.extend(pd_represses['Subject'].to_numpy().tolist())\ngenes.extend(pd_represses['Object'].to_numpy().tolist())\ngenes.extend(pd_activates['Subject'].to_numpy().tolist())\ngenes.extend(pd_activates['Object'].to_numpy().tolist())\ngenes.extend(pd_has['Subject'].to_numpy().tolist())\ngenes.extend(pd_iii['Subject'].to_numpy().tolist())\ngenes.extend(pd_ipo['Subject'].to_numpy().tolist())\ngenes.extend(pd_tb['Subject'].to_numpy().tolist())\ngenes = list(set(genes))\n\npd_genes = pd.DataFrame(genes, columns=['Label'])\npd_genes['Category'] = 'gene'\nprint('gene:', pd_genes.shape)\n\n# get antibiotics\nantibiotics = []\nantibiotics.extend(pd_cras['Object'].to_numpy().tolist())\nantibiotics.extend(pd_ubas['Object'].to_numpy().tolist())\nantibiotics.extend(pd_tb['Object'].to_numpy().tolist())\nantibiotics = list(set(antibiotics))\n\npd_antibiotics = pd.DataFrame(antibiotics, columns=['Label'])\npd_antibiotics['Category'] = 'antibiotic'\nprint('antibiotic:', pd_antibiotics.shape)\n\n# get molecular_function\nmolecular_functions = pd_has['Object'].to_numpy().tolist()\nmolecular_functions = list(set(molecular_functions))\n\npd_molecular_functions = pd.DataFrame(molecular_functions, columns=['Label'])\npd_molecular_functions['Category'] = 'molecular_function'\nprint('molecular_function:', pd_molecular_functions.shape)\n\n# get biological_process\nbiological_processes = pd_iii['Object'].to_numpy().tolist()\nbiological_processes = list(set(biological_processes))\n\npd_biological_processes = pd.DataFrame(biological_processes, columns=['Label'])\npd_biological_processes['Category'] = 'biological_process'\nprint('biological_process:', pd_biological_processes.shape)\n\n# get cellular_component\ncellular_components = pd_ipo['Object'].to_numpy().tolist()\ncellular_components = list(set(cellular_components))\n\npd_cellular_components = pd.DataFrame(cellular_components, columns=['Label'])\npd_cellular_components['Category'] = 'cellular_component'\nprint('cellular_component:', pd_cellular_components.shape)\n\n#########\n# nodes #\n#########\npd_nodes = pd.concat(\n [pd_genes,\n pd_antibiotics,\n pd_molecular_functions,\n pd_biological_processes,\n pd_cellular_components])\n\npd_nodes = pd_nodes.reset_index(drop=True)\n\n#########\n# edges #\n#########\npd_edges = pd_data.copy()\n\ndef map_func(label):\n if label.startswith('confers resistance to antibiotic'):\n return 'confers resistance to antibiotic'\n elif label.startswith('confers no resistance to antibiotic'):\n return 'confers no resistance to antibiotic'\n elif label.startswith('upregulated by antibiotic'):\n return 'upregulated by antibiotic'\n elif label.startswith('not upregulated by antibiotic'):\n return 'not upregulated by antibiotic'\n elif 'represses' == label:\n return 'represses'\n elif 'no represses' == label:\n return 'no represses'\n elif 'activates' == label:\n return 'activates'\n elif 'no activates' == label:\n return 'no activates'\n elif 'has' == label:\n return 'has'\n elif 'is involved in' == label:\n return 'is involved in'\n elif 'is part of' == label:\n return 'is part of'\n elif 'targeted by' == label:\n return 'targeted by'\n else:\n raise ValueError('Invalid label: {}'.format(label))\n\npd_edges['Category'] = pd_edges['Predicate'].apply(lambda x: map_func(x))\n\n# build graph\nG = nx.MultiDiGraph()\n\nfor _, row in pd_nodes.iterrows():\n\tG.add_node(row['Label'], category=row['Category'])\n\nfor _, row in pd_edges.iterrows():\n G.add_edge(row['Subject'], row['Object'], category=row['Category'])\n\nfor n, d in G.nodes(data=True):\n\tprint(n)\n\tprint(d)\n\tbreak\n\nfor u, v, c in G.edges.data():\n\tprint(u)\n\tprint(v)\n\tprint(c)\n\tbreak\n\n# hive plot\n# create hiveplot object\nh = Hiveplot()\n\n# create three axes, spaced at 120 degrees from each other\nh.axes = [\n Axis(start=20, angle=270), # gene\n Axis(start=20, angle=200), # antibiotic\n Axis(start=20, angle=350), # molecular_functions\n Axis(start=20, angle=40), # biological_processes\n Axis(start=20, angle=120), # cellular_components\n]\n\n# sort nodes by degree\nk = list(nx.degree(G))\nk.sort(key=lambda tup: tup[1])\n\n# categorize them as high, medium and low degree\ngenes_sorted = [v[0] for v in k if v[0] in genes]\nantibiotics_sorted = [v[0] for v in k if v[0] in antibiotics]\nmolecular_functions_sorted = [v[0] for v in k if v[0] in molecular_functions]\nbiological_processes_sorted = [v[0] for v in k if v[0] in biological_processes]\ncellular_components_sorted = [v[0] for v in k if v[0] in cellular_components]\n\ngenes_sorted = genes_sorted[int(-0.05*len(genes_sorted)):]\nantibiotics_sorted = antibiotics_sorted[int(-0.05*len(antibiotics_sorted)):]\nmolecular_functions_sorted = molecular_functions_sorted[int(-0.05*len(molecular_functions_sorted)):]\nbiological_processes_sorted = biological_processes_sorted[int(-0.05*len(biological_processes_sorted)):]\ncellular_components_sorted = cellular_components_sorted[int(-0.05*len(cellular_components_sorted)):]\n\nzipped = zip(\n h.axes,\n [genes_sorted,\n antibiotics_sorted,\n molecular_functions_sorted,\n biological_processes_sorted,\n cellular_components_sorted],\n ['gene',\n 'antibiotic',\n 'molecular_function',\n 'biological_process',\n 'cellular_components',])\n\n# place these nodes into our three axes\nfor axis, nodes, node_type in zipped:\n if node_type == 'gene':\n circle_color = '#5ab3e5'\n elif node_type == 'antibiotic':\n circle_color = '#bd3886'\n elif node_type == 'molecular_function':\n circle_color = '#55d400'\n elif node_type == 'biological_process':\n circle_color = '#e7a025'\n elif node_type == 'cellular_components':\n circle_color = '#ff7045'\n else:\n raise ValueError('Invalid node type!')\n\n for idx, v in enumerate(nodes):\n # create node object\n node = Node(\n radius=G.degree(v),\n label=\"node %s degree=%s\" % (v, G.degree(v)))\n\n # add it to axis\n axis.add_node(v, node)\n\n # once it has x, y coordinates, add a circle\n node.add_circle(\n fill=circle_color,\n # stroke_width=0.1,\n fill_opacity=0.7)\n\n if idx >= (len(nodes)-3):\n # also add a label\n if axis.angle < 180:\n orientation = -1\n scale = 0.6\n else:\n orientation = 1\n scale = 0.35\n\n if node_type == 'cellular_components':\n scale*=0.7\n\n node.add_label(\n \"%s\" % (v),\n angle=axis.angle + 90 * orientation,\n scale=0.15*G.degree(v)*scale)\n\n# extract subgraphs\nG_cra = nx.MultiDiGraph(\n ((u, v, e) for u, v, e in G.edges(data=True) if e['category'] == 'confers resistance to antibiotic'))\n\nG_uba = nx.MultiDiGraph(\n ((u, v, e) for u, v, e in G.edges(data=True) if e['category'] == 'upregulated by antibiotic'))\n\nG_represses = nx.MultiDiGraph(\n ((u, v, e) for u, v, e in G.edges(data=True) if e['category'] == 'represses'))\n\nG_activates = nx.MultiDiGraph(\n ((u, v, e) for u, v, e in G.edges(data=True) if e['category'] == 'activates'))\n\nG_has = nx.MultiDiGraph(\n ((u, v, e) for u, v, e in G.edges(data=True) if e['category'] == 'has'))\n\nG_iii = nx.MultiDiGraph(\n ((u, v, e) for u, v, e in G.edges(data=True) if e['category'] == 'is involved in'))\n\nG_ipo = nx.MultiDiGraph(\n ((u, v, e) for u, v, e in G.edges(data=True) if e['category'] == 'is part of'))\n\nG_tb = nx.MultiDiGraph(\n ((u, v, e) for u, v, e in G.edges(data=True) if e['category'] == 'targeted by'))\n\n# draw curves between nodes connected by edges in network\n\n# gene,cra,antibiotic\nh.connect_axes(h.axes[0],\n h.axes[1],\n G_cra.edges,\n stroke_width=1.5,\n stroke_opacity=0.05,\n stroke='#607d8b')\n\n# gene,uba,antibiotic\nh.connect_axes(h.axes[0],\n h.axes[1],\n G_uba.edges,\n stroke_width=1.5,\n stroke_opacity=0.05,\n stroke='#9c5d99')\n\n# gene,represses,gene\nh.connect_axes(h.axes[0],\n h.axes[0],\n G_represses.edges,\n stroke_width=1.5,\n stroke_opacity=0.05,\n stroke='#00a014')\n\n# gene,activates,gene\nh.connect_axes(h.axes[0],\n h.axes[0],\n G_activates.edges,\n stroke_width=1.5,\n stroke_opacity=0.05,\n stroke='#b26c00')\n\n# gene,has,molecular_function\nh.connect_axes(h.axes[0],\n h.axes[2],\n G_has.edges,\n stroke_width=1.5,\n stroke_opacity=0.05,\n stroke='#e04d1c')\n\n# gene,iii,biological_processes\nh.connect_axes(h.axes[0],\n h.axes[3],\n G_iii.edges,\n stroke_width=1.5,\n stroke_opacity=0.05,\n stroke='#007a6b')\n\n# gene,ipo,cellular_components\nh.connect_axes(h.axes[4],\n h.axes[0],\n G_ipo.edges,\n stroke_width=1.5,\n stroke_opacity=0.05,\n stroke='#009d8b')\n\n# gene,tb,antibiotic\nh.connect_axes(h.axes[0],\n h.axes[1],\n G_tb.edges,\n stroke_width=1.5,\n stroke_opacity=0.05,\n stroke='#0081a5')\n\n# save output\nh.save('ba_hiveplot.svg')\n","repo_name":"IBPA/KIDS","sub_path":"manuscript_preparation/kg_visualization/hive_plots2.py","file_name":"hive_plots2.py","file_ext":"py","file_size_in_byte":10842,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"37232539654","text":"\n\nfrom langchain.memory.chat_message_histories import RedisChatMessageHistory\nfrom langchain.memory import ConversationBufferWindowMemory\n \nfrom langchain.schema import (\n HumanMessage,\n SystemMessage\n)\n\ndef normalChat(command_arg,memory_key,chat):\n message_db = RedisChatMessageHistory(url='redis://localhost:6379/0', ttl=3600, session_id=memory_key)\n memory = ConversationBufferWindowMemory(memory_key=memory_key, \n k=3, chat_memory=message_db, \n return_messages=True)\n memory_buffer = memory.load_memory_variables({})\n chat_history = []\n if memory_key in memory_buffer:\n chat_history = memory_buffer[memory_key]\n system_message_prompt = SystemMessage(content=\"You are a helpful assistant to me.\")\n human_message_prompt=HumanMessage(content=command_arg)\n chat_prompt_message = [system_message_prompt] + chat_history + [human_message_prompt]\n \n reply = chat(chat_prompt_message)\n \n message_db.add_user_message(command_arg)\n message_db.add_ai_message(reply.content)\n answer = reply.content\n return answer","repo_name":"shenxl/spark","sub_path":"commands/chat_normal.py","file_name":"chat_normal.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69967340105","text":"import pandas as pd\nimport requests\nimport json\n\ndf = pd.read_csv('species.csv')\n\nobservations_data = {}\nfor plant_name in df['plant_name']:\n # Set the URL and parameters for the iNaturalist API request\n url = 'https://api.inaturalist.org/v1/observations'\n params = {'q': plant_name}\n \n # Make the API request and get the JSON data\n response = requests.get(url, params=params)\n data = json.loads(response.text)\n \n # Store the observations data in a dictionary with the plant name as the key\n observations_data[plant_name] = data['results']\n\nwith open('observations.json', 'w') as f:\n json.dump(observations_data, f)","repo_name":"lehigh-hack-club-penguin/weedle","sub_path":"data/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22565161421","text":"import json\nfrom logger import logger\n\n\ndef load_data_from_json_file() -> list[dict]:\n \"\"\"\n Загрузка данных из posts.json\n :return: list[dict]\n Содержимое posts.json\n \"\"\"\n try:\n with open('posts.json', 'r', encoding='utf-8') as f:\n return json.load(f)\n except FileNotFoundError:\n logger.critical(\"File not found\")\n except json.JSONDecodeError:\n logger.error(\"Файл не удается преобразовать\")\n\n\ndef get_search_result(key: str) -> list[dict]:\n \"\"\"\n Функция поиска данных по posts.json\n :param key: str\n Ключевое слово поиска\n :return: list[dict]\n список постов, подходящий по параметрам поиска\n \"\"\"\n data_list = load_data_from_json_file()\n search_result = []\n for data in data_list:\n if key.lower() in data['content'].lower():\n search_result.append(data)\n return search_result\n\n\n\n","repo_name":"mvladlena85/homework_12","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27135406765","text":"from pca import score\nimport pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\n\n#total assets and perundp\nscore = score\ncode = score.index\ncode = pd.DataFrame(code)\n\nbasic = pd.read_csv('basic_data.csv')\n\ntA = pd.DataFrame()\nfor i in code['code']:\n i = int(i)\n tA = tA.append(basic.ix[basic['code']==i])\n\ntA.index = tA['code']\ntA = tA[['code', 'totalAssets', 'perundp']]\n\n#current assets turnover\ncato = pd.read_csv('operation_data_2017.csv')\ncolumns = cato.columns\ncolumns = columns.drop('Unnamed: 0')\ncato = cato[columns]\ncO = pd.DataFrame()\nfor i in code['code']:\n i = int(i)\n cO = cO.append(cato.ix[cato['code']==i])\ncO.index = cO['code']\ncO = cO[['code', 'currentasset_turnover']]\n\n#mbrg\nmbrg = pd.read_csv('growth_data_2017.csv')\ncolumns = mbrg.columns\ncolumns = columns.drop('Unnamed: 0')\nmbrg = mbrg[columns]\nmB = pd.DataFrame()\nfor i in code['code']:\n i = int(i)\n mB = mB.append(mbrg.ix[mbrg['code']==i])\nmB.index = mB['code']\nmB = mB[['code', 'mbrg']]\n\n#cashflow ratio\ncsfl = pd.read_csv('cashflow_data_2017.csv')\ncolumns = csfl.columns\ncolumns = columns.drop('Unnamed: 0')\ncsfl = csfl[columns]\ncR = pd.DataFrame()\nfor i in code['code']:\n i = int(i)\n cR = cR.append(csfl.ix[csfl['code']==i])\ncR.index = cR['code']\ncR = cR[['code', 'cashflowratio']]\n\n#current ratio\ncrrt = pd.read_csv('debtpaying_data_2017.csv')\ncolumns = crrt.columns\ncolumns = columns.drop('Unnamed: 0')\ncrrt = crrt[columns]\ncT = pd.DataFrame()\nfor i in code['code']:\n i = int(i)\n cT = cT.append(crrt.ix[crrt['code']==i])\ncT.index = cT['code']\ncT = cT[['code', 'currentratio']]\n\n#merged data\ndata = pd.merge(tA, cO)\ndata = pd.merge(data, mB)\ndata = pd.merge(data, cR)\ndata = pd.merge(data, cT)\n\n#data preprocessing\ndata.index = data['code']\ndata['totalAssets'] = np.log(data['totalAssets'])\n\n#regression\ncolumns = data.columns\ncolumns = columns.drop('code')\nX = data[columns]\ny = score['score']\nreg = LinearRegression()\nreg.fit(X, y)\nintercept = reg.intercept_\ncoef = reg.coef_\nRsquare = reg.score(X, y)\n","repo_name":"Optimusbo/GraduatePaper","sub_path":"regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33272418583","text":"#@@ -0,0 +1,49 @@\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 3 14:52:54 2019\nUpdated on Fri April 23 2021\n\n@author: michelle\n\nDescription: Created to retrieve gifs of state court structures and save as png. \n\nNotes: I believe this was written in Mousepad, updated years later in RStudio. \n\"\"\"\n \nimport sys\nimport requests\nimport pandas as pd\nimport textract\nimport csv\nfrom PIL import Image, ImageDraw, ImageFont #dynamic import\n\n\nstates = ['wyoming', 'wisconsin', 'westvirginia', 'washington', 'virginia',\n 'vermont', 'utah', 'texas', 'tennessee', 'southdakota', \n 'southcarolina', 'rhodeisland', 'pennsylvania', 'oregon', \n 'oklahoma', 'ohio', 'northdakota', 'northcarolina',\n 'newyork', 'newmexico', 'newjersey', 'newhampshire', \n 'nevada', 'nebraska', 'montana', 'missouri', 'mississippi', \n 'minnesota', 'michigan', 'massachusetts', 'maryland', 'maine', \n 'louisiana', 'kentucky', 'kansas', 'iowa', 'indiana', 'illinois', \n 'idaho', 'hawaii', 'georgia', 'florida', 'district of columbia',\n 'delaware', 'connecticut', 'colorado', 'california', 'arkansas',\n 'arizona', 'alaska', 'alabama']\n\npath = \"/home/michelle/Dropbox/Data and Projects/Data/State Courts/\"\n\nfor s in range(0,len(states)):\n while s < len(states): \n url = \"https://www.ncsc.org/~/media/Microsites/Images/CSP/State_Structure_Charts/\" + states[s] + \".gif\"\n response = requests.get(url)\n if response.status_code==200:\n print(states[s])\n file = path+\"GIF/\"+states[s]+\".gif\" \n with open(file, 'wb') as f:\n f.write(requests.get(url).content)\n img = Image.open(path+\"GIF/\"+states[s]+'.gif')\n img.save(path+states[s]+\".png\",'png', optimize=True, quality=70)\n break \n if response.status_code!=200:\n print(\"BAD URL \", states[s]) \n break \n\n\n\n \n","repo_name":"mlwier99/dissertation_courts","sub_path":"State Courts/GIF/StateCourtStructurePNG.py","file_name":"StateCourtStructurePNG.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25745779671","text":"from tkinter import *\r\nfrom tkinter import ttk, filedialog\r\nimport numpy\r\nimport pandas as pd\r\n\r\nroot = Tk()\r\nroot.configure(bg=\"grey\")\r\nroot.title(\"EXCEL SHEETS\")\r\nroot.geometry(\"500x800\")\r\n\r\n\r\n# function for opening the file1\r\ndef open_file1():\r\n filename = filedialog.askopenfilename(title=\"Open the first file\", filetype=((\"xlxs files\", \".*xlsx\"),\r\n (\"All Files\", \"*.\")))\r\n\r\n if filename:\r\n try:\r\n filename = r\"{}\".format(filename)\r\n global df1\r\n df1 = pd.read_excel(filename)\r\n except ValueError:\r\n label.config(text=\"File could not be opened\")\r\n except FileNotFoundError:\r\n label.config(text=\"File Not Found\")\r\n # Clear all the previous data in tree\r\n clear_treeview()\r\n\r\n\r\n# function for opening the file2\r\n\r\n\r\ndef open_file2():\r\n filename = filedialog.askopenfilename(title=\"Open the second file\", filetype=((\"xlxs files\", \".*xlsx\"),\r\n (\"All Files\", \"*.\")))\r\n\r\n if filename:\r\n try:\r\n filename = r\"{}\".format(filename)\r\n global df2\r\n df2 = pd.read_excel(filename)\r\n except ValueError:\r\n label.config(text=\"File could not be opened\")\r\n except FileNotFoundError:\r\n label.config(text=\"File Not Found\")\r\n # Clear all the previous data in tree\r\n clear_treeview()\r\n\r\n\r\ndef merge_files():\r\n dataframe = [df1, df2]\r\n\r\n # Remove duplicates from the merged pandas dataframes.\r\n merge = pd.concat(dataframe).drop_duplicates()\r\n\r\n merge.to_excel(\"Merged.xlsx\")\r\n\r\n\r\n# Clear the Treeview Widget\r\n\r\n\r\ndef clear_treeview():\r\n tree.delete(*tree.get_children())\r\n\r\n\r\n# Create a Treeview widget\r\ntree = ttk.Treeview(root)\r\n\r\nlabel = Label(root, text=\"MERGING TWO EXCEL SHEETS\", width=28, height=1, font=(\"times\", 65, \"bold\"),\r\n bg=\"dark grey\", fg=\"black\")\r\nlabel.pack()\r\n\r\nb1 = Button(root, text=\"Get Sheet A\", width=10, height=1, font=(\"normal\", 15, \"bold\"),\r\n bg=\"aqua\", bd=10, foreground=\"blue\", command=open_file1)\r\nb1.pack(pady=20)\r\n\r\nb2 = Button(root, text=\"Get Sheet B\", width=10, height=1, font=(\"time\", 15, \"bold\"),\r\n bg=\"aqua\", bd=10, foreground=\"blue\", command=open_file2)\r\nb2.pack(pady=20)\r\n\r\nb3 = Button(root, text=\"Get Merged Sheet\", width=15, height=1, font=(\"normal\", 15, \"bold\"),\r\n bg=\"aqua\", bd=10, foreground=\"black\", command=merge_files)\r\nb3.pack(pady=20)\r\n\r\nroot.mainloop()\r\n","repo_name":"Beeblue2/Excel-project-","sub_path":"mypro.py","file_name":"mypro.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5963167281","text":"import logging\n\nfrom zipline.api import order, order_target_percent\nfrom zipline.api import set_benchmark\nfrom zipline.finance import commission, slippage\n\nfrom .strategy_base import StrategyBase\n\n\ndef create_strategy(args):\n return Turtle()\n\n\nclass Turtle(StrategyBase):\n def __init__(self):\n super().__init__('Turtle')\n\n def initialize(self, context, stock_ids):\n super().initialize(context, stock_ids)\n\n context.portfolio_highest = {}\n context.price_highest = {}\n context.stock_shares = {}\n context.stock_n = {}\n\n context.set_commission(commission.PerShare(cost=.0075, min_trade_cost=1.0))\n context.set_slippage(slippage.VolumeShareSlippage())\n set_benchmark(context.assets[0])\n\n for asset in context.assets:\n context.price_highest[asset] = 0.0\n context.portfolio_highest[asset] = 0.0\n context.stock_shares[asset] = 0\n context.stock_n[asset] = []\n\n def handle_single_asset_data(self, context, asset, data):\n donchian_days = self.current_parameter_\n\n trailing_window = data.history(asset, 'price', donchian_days + 1, '1d')\n high_trailing_window = data.history(asset, 'high', donchian_days + 1, '1d')\n low_trailing_window = data.history(asset, 'low', donchian_days + 1, '1d')\n\n if trailing_window.isnull().values.any():\n return\n\n donchian_up = high_trailing_window.max()\n donchian_down = low_trailing_window.min()\n donchian_mid = (donchian_up + donchian_down) / 2\n\n # calculate N\n n = 0.0\n\n for i in range(donchian_days):\n true_range = max(high_trailing_window[-1 - i] - low_trailing_window[-1 - i],\n abs(high_trailing_window[-1 - i] - trailing_window[-2 - i]),\n abs(trailing_window[-2 - i] - low_trailing_window[-1 - i]))\n\n n += true_range\n\n n = n / donchian_days\n\n pct_per_stock = 1.0 / len(context.assets)\n cash = context.portfolio.cash * pct_per_stock\n price = data.current(asset, 'price')\n\n unit = cash * .01 / n\n\n logging.debug('stock:{}, n:{}, unit:{}, amount:{}'.format(asset, n, unit, unit / price))\n\n def parameter_set(self):\n yield (20)\n\n def __repr__(self):\n donchian_days = self.current_parameter_\n return 'Turtle with donachian days:{}'.format(donchian_days)\n","repo_name":"stonewell/learn-curve","sub_path":"src/strategy/turtle.py","file_name":"turtle.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3268021225","text":"from functools import lru_cache\n\nclass Solution:\n def longestRepeatingSubstring(self, s: str) -> int:\n\n @lru_cache\n def has_repeating_str(length):\n log = set()\n print(len(s) - length)\n for i in range(len(s) - length + 1):\n slice = s[i:i+length]\n if slice in log:\n return True\n log.add(slice)\n return False\n\n l, r = 0, len(s) - 1\n while l + 1 < r:\n m = (l + r) // 2\n if has_repeating_str(m):\n l = m\n else:\n r = m\n\n if has_repeating_str(r):\n return r\n return l\n\n'''\nbinary search\n'''\n","repo_name":"Dillettant/leetcode","sub_path":"solutions/1062.py","file_name":"1062.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18627754615","text":"# -*- coding: utf-8 -*-\nfrom . import unittest\nimport icalendar\nimport datetime\nimport os\nimport pytz\n\n\nclass TestIssues(unittest.TestCase):\n\n\n def test_issue_53(self):\n \"\"\"Issue #53 - Parsing failure on some descriptions?\n https://github.com/collective/icalendar/issues/53\n \"\"\"\n\n directory = os.path.dirname(__file__)\n ics = open(os.path.join(directory, 'case_meetup.ics'), 'rb')\n cal = icalendar.Calendar.from_ical(ics.read())\n ics.close()\n\n event = cal.walk('VEVENT')[0]\n desc = event.get('DESCRIPTION')\n self.assertTrue('July 12 at 6:30 PM' in desc.to_ical())\n\n timezones = cal.walk('VTIMEZONE')\n self.assertEqual(len(timezones), 1)\n tz = timezones[0]\n self.assertEqual(tz['tzid'].to_ical(), \"America/New_York\")\n\n\n def test_issue_55(self):\n \"\"\"Issue #55 - Parse error on utc-offset with seconds value\n https://github.com/collective/icalendar/issues/55\n \"\"\"\n ical_str = \"\"\"BEGIN:VTIMEZONE\nTZID:America/Los Angeles\nBEGIN:STANDARD\nDTSTART:18831118T120702\nRDATE:18831118T120702\nTZNAME:PST\nTZOFFSETFROM:-075258\nTZOFFSETTO:-0800\nEND:STANDARD\nEND:VTIMEZONE\"\"\"\n\n tz = icalendar.Timezone.from_ical(ical_str)\n self.assertEqual(tz.to_ical(),\n 'BEGIN:VTIMEZONE\\r\\nTZID:America/Los Angeles\\r\\nBEGIN:STANDARD\\r\\n'\n 'DTSTART:18831118T120702\\r\\nRDATE:18831118T120702\\r\\nTZNAME:PST'\n '\\r\\nTZOFFSETFROM:-075258\\r\\nTZOFFSETTO:-0800\\r\\nEND:STANDARD\\r\\n'\n 'END:VTIMEZONE\\r\\n')\n\n\n def test_issue_58(self):\n \"\"\"Issue #58 - TZID on UTC DATE-TIMEs\n https://github.com/collective/icalendar/issues/58\n \"\"\"\n\n # According to RFC 2445: \"The TZID property parameter MUST NOT be\n # applied to DATE-TIME or TIME properties whose time values are\n # specified in UTC.\"\n\n event = icalendar.Event()\n dt = pytz.utc.localize(datetime.datetime(2012, 7, 16, 0, 0, 0))\n event.add('dtstart', dt)\n self.assertEqual(event.to_ical(),\n \"BEGIN:VEVENT\\r\\n\"\n \"DTSTART;VALUE=DATE-TIME:20120716T000000Z\\r\\n\"\n \"END:VEVENT\\r\\n\")\n\n\n def test_issue_64(self):\n \"\"\"Issue #64 - Event.to_ical() fails for unicode strings\n https://github.com/collective/icalendar/issues/64\n \"\"\"\n\n # Non-unicode characters\n event = icalendar.Event()\n event.add(\"dtstart\", datetime.datetime(2012, 9, 3, 0, 0, 0))\n event.add(\"summary\", u\"abcdef\")\n self.assertEqual(event.to_ical(),\n \"BEGIN:VEVENT\\r\\nSUMMARY:abcdef\\r\\nDTSTART;VALUE=DATE-TIME:\"\n \"20120903T000000\\r\\nEND:VEVENT\\r\\n\")\n\n # Unicode characters\n event = icalendar.Event()\n event.add(\"dtstart\", datetime.datetime(2012, 9, 3, 0, 0, 0))\n event.add(\"summary\", u\"åäö\")\n self.assertEqual(event.to_ical(),\n \"BEGIN:VEVENT\\r\\nSUMMARY:\\xc3\\xa5\\xc3\\xa4\\xc3\\xb6\\r\\n\"\n \"DTSTART;VALUE=DATE-TIME:20120903T000000\\r\\nEND:VEVENT\\r\\n\")\n\n def test_issue_70(self):\n \"\"\"Issue #70 - e.decode(\"RRULE\") causes Attribute Error\n https://github.com/collective/icalendar/issues/70\n \"\"\"\n\n ical_str = \"\"\"BEGIN:VEVENT\nCREATED:20081114T072804Z\nUID:D449CA84-00A3-4E55-83E1-34B58268853B\nDTEND:20070220T180000\nRRULE:FREQ=WEEKLY;INTERVAL=1;UNTIL=20070619T225959\nTRANSP:OPAQUE\nSUMMARY:Esb mellon phone conf\nDTSTART:20070220T170000\nDTSTAMP:20070221T095412Z\nSEQUENCE:0\nEND:VEVENT\"\"\"\n\n cal = icalendar.Calendar.from_ical(ical_str)\n recur = cal.decoded(\"RRULE\")\n self.assertIsInstance(recur, icalendar.vRecur)\n self.assertEqual(recur.to_ical(),\n u'FREQ=WEEKLY;UNTIL=20070619T225959;INTERVAL=1')\n\n\n\n def test_issue_82(self):\n \"\"\"Issue #82 - vBinary __repr__ called rather than to_ical from\n container types\n https://github.com/collective/icalendar/issues/82\n \"\"\"\n\n b = icalendar.vBinary('text')\n b.params['FMTTYPE'] = 'text/plain'\n self.assertEqual(b.to_ical(), 'dGV4dA==')\n e = icalendar.Event()\n e.add('ATTACH', b)\n self.assertEqual(e.to_ical(),\n \"BEGIN:VEVENT\\r\\nATTACH;ENCODING=BASE64;FMTTYPE=text/plain;\"\n \"VALUE=BINARY:dGV4dA==\\r\\nEND:VEVENT\\r\\n\"\n )\n\n\n def test_issue_100(self):\n \"\"\"Issue #100 - Transformed doctests into unittests, Test fixes and\n cleanup.\n https://github.com/collective/icalendar/pull/100\n \"\"\"\n\n ical_content = \"BEGIN:VEVENT\\r\\nSUMMARY;LANGUAGE=ru:te\\r\\nEND:VEVENT\"\n icalendar.Event.from_ical(ical_content).to_ical()\n\n\n def test_issue_101(self):\n \"\"\"Issue #101 - icalender is choking on umlauts in ORGANIZER\n\n https://github.com/collective/icalendar/issues/101\n \"\"\"\n ical_str = \"\"\"BEGIN:VCALENDAR\nVERSION:2.0\nX-WR-CALNAME:Kalender von acme\\, admin\nPRODID:-//The Horde Project//Horde_iCalendar Library\\, Horde 3.3.5//EN\nMETHOD:PUBLISH\nBEGIN:VEVENT\nDTSTART:20130416T100000Z\nDTEND:20130416T110000Z\nDTSTAMP:20130416T092616Z\nUID:20130416112341.10064jz0k4j7uem8@acmenet.de\nCREATED:20130416T092341Z\nLAST-MODIFIED:20130416T092341Z\nSUMMARY:wichtiger termin 1\nORGANIZER;CN=\"acme, ädmin\":mailto:adm-acme@mydomain.de\nLOCATION:im büro\nCLASS:PUBLIC\nSTATUS:CONFIRMED\nTRANSP:OPAQUE\nEND:VEVENT\nEND:VCALENDAR\"\"\"\n\n cal = icalendar.Calendar.from_ical(ical_str)\n org_cn = cal.walk('VEVENT')[0]['ORGANIZER'].params['CN']\n self.assertEqual(org_cn, u'acme, ädmin')\n","repo_name":"blech/snaptrip","sub_path":"icalendar/tests/test_fixed_issues.py","file_name":"test_fixed_issues.py","file_ext":"py","file_size_in_byte":5546,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"15423230251","text":"\"\"\"Screen module.\"\"\"\nimport logging\nimport pygame\nimport random\nimport functools\nimport pyscroll\nfrom pyscroll.util import PyscrollGroup\n\nfrom .locals import * # noqa\nfrom .input import InputModule\nfrom .events import Event, TickEvent, EventManager\nfrom .entities import LayerComponent, EntityManager, MapLayerComponent\nfrom .modules import Module\nfrom .utils import ContainerAware\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass FrameRenderCompletedEvent(Event):\n\n \"\"\"Frame render completion event.\"\"\"\n\n\nclass DisplayLayer(ContainerAware):\n\n \"\"\"Display layer.\"\"\"\n\n def __init__(self, size=None, flags=0, position=[0, 0], z_index=None):\n \"\"\"Constructor.\"\"\"\n if not z_index:\n z_index = 100\n\n if not size:\n info = pygame.display.Info()\n size = info.current_w, info.current_h\n\n self._z_index = z_index\n self.display = None\n\n self.size = size\n self.position = position\n self.flags = flags\n\n self.surface = pygame.Surface(self.size, flags=self.flags)\n\n @property\n def z_index(self):\n \"\"\"Return zIndex.\"\"\"\n return self._z_index\n\n @z_index.setter\n def z_index(self, value):\n \"\"\"Set zIndex.\"\"\"\n # If we're attached to the displaymanager, make sure our z_index\n # binding is correct\n display = self.display if self.display else None\n\n if display:\n display.remove(self)\n\n self._z_index = value\n\n if display:\n display.add(self)\n\n def update(self, delta_time):\n \"\"\"\n Compute an update to the layer's state.\n\n Generally, this will update the state of all objects inside of the layer.\n\n :param delta_time: Time delta to compute the state update for, in s.\n\n \"\"\"\n pass\n\n def draw(self, surface):\n \"\"\"Draw the layer onto a surface.\"\"\"\n surface.blit(self.surface, self.position)\n\n def resize(self, size):\n \"\"\"\n Resize the layer.\n\n :param list size: New layer size, in pixels.\n\n \"\"\"\n self.size = size\n self.surface = pygame.transform.scale(self.surface, self.size)\n\n\nclass EntityDisplayLayer(DisplayLayer):\n\n \"\"\"\n Entity display layer.\n\n A layer for rendering and displaying entities.\n\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Constructor.\"\"\"\n super().__init__(**kwargs)\n self.entities = {}\n\n def add_entity(self, entity):\n \"\"\"Add an entity to the layer.\"\"\"\n entity.add_component(LayerComponent(layer=self))\n self.entities[entity.id] = entity\n\n def remove_entity(self, entity):\n \"\"\"Remove an entity from the layer.\"\"\"\n entity.remove_component(LayerComponent)\n self.entities.pop(entity.id, None)\n\n def draw(self, surface):\n \"\"\"Draw the layer onto a surface.\"\"\"\n self.surface.fill([0, 0, 0, 0])\n\n for entity_id in self.entities:\n self.surface.blit(self.entities[entity_id].components['sprite'].image,\n self.entities[entity_id].components['position'].primary_position)\n\n super().draw(surface)\n\n\nclass ScrollingMapEntityDisplayLayer(EntityDisplayLayer):\n\n \"\"\"\n Scrollable map display layer.\n\n A display layer for rendering entities on a scrollable map.\n\n \"\"\"\n\n def __init__(self, tmx_data, default_layer=0, **kwargs):\n \"\"\"Constructor.\"\"\"\n super().__init__(**kwargs)\n\n self.em = self.container.get(EntityManager)\n self.events = self.container.get(EventManager)\n\n # Create data source\n self.map_data = pyscroll.data.TiledMapData(tmx_data)\n self.map_layer = pyscroll.BufferedRenderer(self.map_data, self.size, clamp_camera=True)\n\n self.surface = self.map_layer.buffer\n self.group = PyscrollGroup(map_layer=self.map_layer, default_layer=default_layer)\n\n self.build_collision_map()\n self.build_mana_map()\n self.spawn_entities()\n\n self.center = None\n\n def build_collision_map(self):\n \"\"\"Build a collision map based on map data.\"\"\"\n logger.debug('Building collision map [map=%s]', self.map_data.tmx.filename)\n self.collision_map = []\n\n for o in self.map_data.tmx.objects:\n if o.properties.get('collision', 'false') == 'true':\n self.collision_map.append(pygame.Rect(o.x, o.y, o.width, o.height))\n\n def build_mana_map(self):\n \"\"\"Build a mana map based on map data.\"\"\"\n logger.debug('Building mana map [map=%s]', self.map_data.tmx.filename)\n self.mana_map = {}\n self.mana_replenishment_map = {}\n\n # Loop through all vibible layers\n for i, l in enumerate(self.map_data.tmx.visible_layers):\n # Only continue for terrain layers\n if l.properties.get('terrain', 'false') == 'true':\n # Iterate over all tiles\n for x in range(0, l.width):\n for y in range(0, l.height):\n tile = self.map_data.tmx.get_tile_properties(x, y, i)\n\n # Only continue if the current tile has mana types\n if tile and tile.get('mana_types'):\n # Initialize dicts if needed\n if not self.mana_map.get(i):\n self.mana_map[i] = {}\n\n if not self.mana_map[i].get(x):\n self.mana_map[i][x] = {}\n\n if not self.mana_map[i][x].get(y):\n self.mana_map[i][x][y] = {}\n\n # Parse mana type data\n mana_types = tile['mana_types'].split(';')\n mana_types = [x.split(':') for x in mana_types]\n\n # Index mana types by type, and use defaults\n for mana in mana_types:\n # .. = [, ]\n self.mana_map[i][x][y][mana[0]] = [\n float(mana[1]) if len(mana) > 1 else 1,\n float(mana[2]) if len(mana) > 2 else (float(mana[1] if len(mana) > 1 else 1))\n ]\n\n def spawn_entities(self):\n \"\"\"Spawn entities on the map based on map data.\"\"\"\n logger.debug('Spawning entities [map=%s]', self.map_data.tmx.filename)\n\n for o in self.map_data.tmx.objects:\n if o.properties.get('spawn', 'false') == 'true':\n templates = o.properties['spawn_templates'].split(';')\n\n for i in range(0, o.properties.get('spawn_count', 1)):\n template = random.choice(templates)\n entity = self.em.create_entity_from_template(template)\n\n if 'position' in entity.components:\n target = pygame.Rect(o.x, o.y, o.width, o.height).center\n\n entity.components['position'].layer_position = target\n # entity.components['position'].map_position = layer_point_to_map(self.map_layer, target)\n # entity.components['position'].screen_position = \\\n # map_point_to_screen(self.map_layer, entity.components['position'].map_position)\n\n self.add_entity(entity)\n\n def update(self, delta_time):\n \"\"\"Compute an update to the layer's state.\"\"\"\n super().update(delta_time)\n self.group.update(delta_time)\n\n def add_entity(self, entity):\n \"\"\"Add an entity to the layer.\"\"\"\n super().add_entity(entity)\n entity.add_component(MapLayerComponent())\n self.group.add(entity)\n\n # If this entity supports collision detection, add its collision core to our collision map\n if 'physics' in entity.components:\n self.collision_map.append(entity.components['physics'].collision_core)\n\n def remove_entity(self, entity):\n \"\"\"Remove an entity from the layer.\"\"\"\n super().remove_entity(entity)\n entity.remove_component(MapLayerComponent)\n self.group.remove(entity)\n\n # If this entity supports collision detection, remove its collision core from our collision map\n if 'physics' in entity.components:\n self.collision_map.remove(entity.components['physics'].collision_core)\n\n def draw(self, surface):\n \"\"\"Draw the layer onto a surface.\"\"\"\n # Center the map/screen if needed\n if self.center:\n self.group.center(self.center.rect.center)\n\n # Draw the map and all sprites\n self.group.draw(surface)\n\n def resize(self, size):\n \"\"\"Handle a resize.\"\"\"\n self.map_layer.set_size(size)\n self.surface = self.map_layer.buffer\n\n super().resize(size)\n\n\nclass DisplayModule(Module):\n\n \"\"\"Display module.\"\"\"\n\n dependencies = [\n 'input'\n ]\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n self.configuration = self.container.get(Configuration)\n self.events = self.container.get(EventManager)\n self.input = self.container.get(InputModule)\n\n self.resolution = self.configuration.get('akurra.display.resolution', [0, 0])\n self.caption = self.configuration.get('akurra.display.caption', 'Akurra DEV')\n\n self.flags = self.configuration.get('akurra.display.flags', ['DOUBLEBUF', 'HWSURFACE', 'RESIZABLE'])\n self.flags = functools.reduce(lambda x, y: x | y, [getattr(pygame, x) for x in self.flags])\n\n self.screen = self.create_screen()\n\n self.layers = {}\n self.layer_z_indexes = []\n\n def start(self):\n \"\"\"Start the module.\"\"\"\n self.events.register(TickEvent, self.on_tick)\n self.events.register(pygame.VIDEORESIZE, self.on_video_resize)\n\n self.input.add_action_listener('fullscreen_toggle', self.toggle_fullscreen)\n\n def stop(self):\n \"\"\"Stop the module.\"\"\"\n self.input.remove_action_listener(self.toggle_fullscreen)\n\n self.events.unregister(self.on_video_resize)\n self.events.unregister(self.on_tick)\n\n def add_layer(self, layer):\n \"\"\"Add a layer to the display.\"\"\"\n if layer.z_index not in self.layers:\n self.layers[layer.z_index] = {}\n self.layer_z_indexes = sorted(self.layers.keys(), key=int)\n\n self.layers[layer.z_index][layer] = 1\n\n # Set display within layer\n layer.display = self\n\n logger.debug('Added layer \"%s\" to display [z_index=%s]', id(layer), layer.z_index)\n\n def remove_layer(self, layer):\n \"\"\"Remove a layer from the display.\"\"\"\n to_remove = None\n\n # If we're able to remove a layer and there are no other layers for this z_index,\n # queue the key for removal\n if self.layers[layer.z_index].pop(layer, None) and not self.layers[layer.z_index]:\n to_remove = layer.z_index\n\n if to_remove:\n self.layers.pop(to_remove, None)\n self.layer_z_indexes.remove(to_remove)\n\n # Remove display from layer\n layer.display = None\n\n logger.debug('Removed layer \"%s\" from display [z_index=%s]', id(layer), layer.z_index)\n\n def on_tick(self, event):\n \"\"\"\n Perform a frame render, updating the contents of the entire display.\n\n Internally, this will render all layers and perform a screen update.\n See also http://www.pygame.org/docs/ref/display.html#pygame.display.flip\n\n :param event: TickEvent\n\n \"\"\"\n for z_index in self.layer_z_indexes:\n for layer in self.layers[z_index]:\n layer.update(event.delta_time)\n layer.draw(self.screen)\n\n pygame.display.flip()\n self.events.dispatch(FrameRenderCompletedEvent())\n\n def on_video_resize(self, event):\n \"\"\"Handle resizing of the display.\"\"\"\n old_size = self.screen.get_size()\n self.resolution = event.size\n self.screen = self.create_screen()\n\n for z_index in self.layers:\n for layer in self.layers[z_index]:\n if layer.size == old_size:\n layer.resize(event.size)\n\n def create_screen(self):\n \"\"\"Create and return a screen with a few options.\"\"\"\n screen = pygame.display.set_mode(self.resolution, self.flags)\n pygame.display.set_caption(self.caption)\n logger.debug('Display created [resolution=%s, flags=%s]', self.resolution, self.flags)\n\n return screen\n\n def toggle_fullscreen(self, event):\n \"\"\"Toggle fullscreen mode.\"\"\"\n if not event.state:\n return\n\n if self.flags & pygame.FULLSCREEN:\n self.flags ^= pygame.FULLSCREEN\n else:\n self.flags |= pygame.FULLSCREEN\n\n self.screen = self.create_screen()\n","repo_name":"multatronic/akurra","sub_path":"akurra/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":12852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27445690285","text":"# -*-coding:utf-8 -*-\n# __author__ = 'xiaoxi'\n# @time:2019/4/8 10:46\nimport os,time\nfrom public.publicLogs import PublicLogs\nlog=PublicLogs()\nimport warnings\nwarnings.simplefilter(\"ignore\", ResourceWarning)\n\nclass PublicOperate:\n def __init__(self,driver):\n self.driver=driver\n\n def get_back(self):\n '''\n :return:返回键\n '''\n os.popen('adb shell input keyevent 4')\n time.sleep(2)\n log.info(u\"原生页面操作返回\")\n\n def get_search(self):\n \"\"\"\n 搜索键\n :return:\n \"\"\"\n return self.driver.press_keycode(84)\n\n def get_window_size(self):\n '''\n :return:返回屏幕的大小\n '''\n x=self.driver.get_window_size()['width']\n y=self.driver.get_window_size()['height']\n return(x,y)\n def swipe_up(self):\n '''\n :return:向上滑动\n '''\n try:\n l=self.get_window_size()\n x1=int(l[0]*0.5)\n y1=int(l[0]*0.8)\n y2=int(l[1]*0.2)\n self.driver.swipe(x1,y1,x1,y2,1000)\n log.info(u\"向上滑动\")\n time.sleep(1)\n except:\n log.error(u\"上滑动异常\")\n\n def swipe_down(self):\n '''\n :return:向下滑动\n '''\n try:\n l=self.get_window_size()\n x1=int(l[0]*0.5)\n y1=int(l[0]*0.3)\n y2=int(l[1]*0.8)\n self.driver.swipe(x1,y1,x1,y2,1000)\n log.info(u\"向下滑动\")\n except:\n log.error(u\"下滑动异常\")\n\n def swipe_right(self):\n '''\n :return:向右滑动\n '''\n try:\n l=self.get_window_size()\n x1=int(l[0]*0.2)\n x2=int(l[0]*0.8)\n y1=int(l[1]*0.5)\n self.driver.swipe(x1,y1,x2,y1,1000)\n log.info(u\"向右滑动\")\n except:\n log.error(u\"右滑动异常\")\n\n def swipe_left(self):\n '''\n :return:向左滑动\n '''\n try:\n l=self.get_window_size()\n x1=int(l[0]*0.8)\n x2=int(l[0]*0.2)\n y1=int(l[1]*0.5)\n self.driver.swipe(x1,y1,x2,y1,1000)\n log.info(u\"向左滑动\")\n except:\n log.error(u\"左滑动异常\")\n\n def swipe_click(self):\n '''\n :return:坐标点击\n '''\n try:\n l=self.get_window_size()\n x1=int(l[0]*0.5)\n y1=int(l[1]*0.3)\n self.driver.swipe(x1,y1,x1,y1,1000)\n log.info(u\"坐标点击\")\n except:\n log.error(u\"点击异常\")\n\n def screenshot(self):\n '''\n :return:截图\n '''\n nowtime=time.strftime('%Y-%m-%d %H-%M-%S',time.localtime())\n filename=\"../img/%s.png\"%nowtime\n self.driver.get_screenshot_as_file(filename)","repo_name":"labixiaoxi/testJustbon","sub_path":"testJustbon/public/publicOperate.py","file_name":"publicOperate.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21322042392","text":"import os #per prendere le informazioni dal sistema operativo\nos.remove(\"novel.txt\")#rimuove il file novel.txt\nos.rename(\"first_draft.txt\",\"finished.txt\")#rinomina il file\nos.path.exists(\"finished.txt\")#ci dice se il file esiste con True o False\n\nos.path.getsize(\"speed.txt\")#ci dice la grandezza del file\nos.path.getmtime(\"speed.txt\")#tempo in formato unix usato spesso\n\nimport datetime\ntimestamp=os.path.getmtime(\"speed.txt\")\ndatetime.datetime.fromtimestamp(timestamp)#data molto + facile da leggere\n\nos.path.abspath(\"speed.txt\")#ci dice il percorso assoluto del file\n","repo_name":"scidox/esempi_python","sub_path":"manipolazione_file.py","file_name":"manipolazione_file.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32338762452","text":"\"\"\"\n Файл, содержащий функции для фильтрации амплитудной составляющей сигнала\n\"\"\"\n\n\nimport numpy as np\nfrom cv2 import boxFilter, BORDER_REPLICATE\n\n\ndef kuan_argument_checker(input_matrix: np.ndarray, window_step: int, enl=1, rli_type='amplitude') -> None:\n \"\"\"\n Проверка входных данных фильтра Куана на допустимые значения\n input_matrix - амплитудная матрица РЛИ\n window_step - радиус окна фильтра\n enl - эквивалентное число некогерентных накоплений\n rli_type - тип РЛИ ('amplitude', 'power')\n \"\"\"\n if rli_type not in {'amplitude', 'power'}:\n raise ValueError('rli_type')\n\n if not isinstance(window_step, int):\n raise TypeError('window_step')\n if window_step < 1:\n raise ValueError('window_step')\n\n if not isinstance(enl, int):\n raise TypeError('enl', int, enl)\n if enl < 1:\n raise ValueError('enl')\n\n if not isinstance(input_matrix, np.ndarray):\n raise TypeError('input_matrix')\n if input_matrix.shape[0] < 2 * window_step + 1 or input_matrix.shape[0] < 2 * window_step + 1:\n raise ValueError('input_matrix too small')\n\n\ndef kuan_filter(input_matrix: np.ndarray, window_step: int, enl=1, rli_type='amplitude') -> np.ndarray:\n \"\"\"\n Алгоритм Куана фильтрации спекл-шума\n input_matrix - матрица РЛИ\n window_step - радиус окна фильтра\n enl - эквивалентное число некогерентных накоплений\n rli_type - тип РЛИ ('amplitude', 'power')\n\n \"\"\"\n\n rvmn = 0.273 / enl if rli_type == 'amplitude' else 1 / enl # относительная дисперсия мультипликативного шума\n dvdr = 1 + rvmn # делитель для нахождения весового коэффициента\n\n ksize = (2 * window_step + 1, 2 * window_step + 1) # кортеж размеров ядра фильтра\n\n # матрица ячейке (i, j) которой соотвествует мат.ожидание окна с центром в точке (i + window_step, j + window_step)\n mean_matrix = boxFilter(input_matrix, -1, ksize, normalize=True, borderType=BORDER_REPLICATE)\n\n # матрица ячейке (i, j) которой соотвествует мат.ожидание квадратов окна\n # с центром в точке (i + window_step, j + window_step)\n mean_sqr_matrix = boxFilter(input_matrix ** 2, -1, ksize, normalize=True, borderType=BORDER_REPLICATE)\n\n # матрица весовых коэффициентов\n k = (1 - rvmn * mean_matrix ** 2 / (mean_sqr_matrix - mean_matrix ** 2))\n # аналог np.clip через функцию, поддерживаемую библиотекой numba\n k = np.where(k < 0, 0, k)\n k /= dvdr\n result = input_matrix.copy()\n result = mean_matrix + k * (input_matrix - mean_matrix)\n return result\n","repo_name":"siprok/o_insar","sub_path":"insar/filtering/amplitude.py","file_name":"amplitude.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"8631709064","text":"# -*- coding: utf-8 -*-\n\nfrom django import forms\nfrom storage.models import StorageUnit\n\nclass StorageUnitForm(forms.Form):\n \"\"\"Storage unit creation form.\"\"\"\n alias = forms.CharField(max_length=200, required=False)\n name = forms.CharField(max_length=200, required=False)\n description = forms.CharField(max_length=200, required=False)\n description_file = forms.FileField(required=True)\n ingest_file = forms.FileField(required=True)\n metadata_generation_script = forms.FileField(required=True)\n\n\nclass StorageUnitUpdateForm(forms.ModelForm):\n \"\"\"Storage unit update form.\"\"\"\n alias = forms.CharField(\n \tlabel='Alias de la Unidad',\n \tmax_length=200, required=False,\n \thelp_text=''\n )\n name = forms.CharField(\n \tlabel='Nombre de Unidad',\n \twidget=forms.TextInput(\n \t\tattrs={\n \t\t'readonly':'readonly',\n \t\t'placeholder': (\n 'Escriba el alias de la unidad de almacenamiento.' \n )\n \t\t}\n \t)\n )\n\n class Meta:\n model = StorageUnit\n fields = ['alias','name']\n\n\n\n","repo_name":"OpenDatacubeIDEAM/cdcol-web","sub_path":"storage/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"73008680904","text":"import tensorflow as tf\nimport glob\nimport numpy as np\nimport os\nfrom tensorflow.keras import layers, models\nimport time\nimport cv2\nimport model\n\nbatch_size=256\nepochs=500\nnoise_dim=100\ntab_size=5\nnum_examples_to_generate=tab_size*tab_size\ndir_images='images_gan'\ncheckpoint_dir='./training_checkpoints_gan'\ncheckpoint_prefix=os.path.join(checkpoint_dir, \"ckpt\")\n\nif not os.path.isdir(dir_images):\n os.mkdir(dir_images)\n\n(train_images, train_labels), (test_images, test_labels)=tf.keras.datasets.mnist.load_data()\n\ntrain_images=train_images.reshape(-1, 28, 28, 1).astype('float32')\ntrain_images=(train_images-127.5)/127.5\n\ntrain_dataset=tf.data.Dataset.from_tensor_slices(train_images).shuffle(len(train_images)).batch(batch_size)\n\ndef discriminator_loss(real_output, fake_output):\n real_loss=cross_entropy(tf.ones_like(real_output), real_output)\n fake_loss=cross_entropy(tf.zeros_like(fake_output), fake_output)\n total_loss=real_loss+fake_loss\n return total_loss\n\ndef generator_loss(fake_output):\n return cross_entropy(tf.ones_like(fake_output), fake_output)\n\ngenerator=model.generator_model()\ndiscriminator=model.discriminator_model()\n\ncross_entropy=tf.keras.losses.BinaryCrossentropy(from_logits=True)\n\ntrain_generator_loss=tf.keras.metrics.Mean()\ntrain_discriminator_loss=tf.keras.metrics.Mean()\n\ngenerator_optimizer=tf.keras.optimizers.Adam(1e-4)\ndiscriminator_optimizer=tf.keras.optimizers.Adam(1e-4)\n\ncheckpoint=tf.train.Checkpoint(generator_optimizer=generator_optimizer,\n discriminator_optimizer=discriminator_optimizer,\n generator=generator,\n discriminator=discriminator)\n\nseed=tf.random.normal([num_examples_to_generate, noise_dim])\n\n@tf.function\ndef train_step(images):\n noise=tf.random.normal([batch_size, noise_dim])\n\n with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n generated_images=generator(noise, training=True)\n\n real_output=discriminator(images, training=True)\n fake_output=discriminator(generated_images, training=True)\n\n gen_loss=generator_loss(fake_output)\n disc_loss=discriminator_loss(real_output, fake_output)\n\n train_generator_loss(gen_loss)\n train_discriminator_loss(disc_loss)\n \n gradients_of_generator=gen_tape.gradient(gen_loss, generator.trainable_variables)\n gradients_of_discriminator=disc_tape.gradient(disc_loss, discriminator.trainable_variables)\n\n generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))\n discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))\n\ndef train(dataset, epochs):\n for epoch in range(epochs):\n start=time.time()\n for image_batch in dataset:\n train_step(image_batch)\n generate_and_save_images(generator, epoch+1, seed)\n if (epoch+1)%15==0:\n checkpoint.save(file_prefix=checkpoint_prefix)\n print ('Epoch {}: loss generator: {:.4f} loss discriminator: {:.4f} {:.4f} sec'.format(epoch+1,\n train_generator_loss.result(),\n train_discriminator_loss.result(),\n time.time()-start))\n train_generator_loss.reset_states()\n train_discriminator_loss.reset_states()\n \ndef generate_and_save_images(model, epoch, test_input):\n labels=tf.one_hot(tf.range(0, num_examples_to_generate, 1)%10, 10)\n predictions=model([test_input, labels], training=False)\n img=np.empty(shape=(tab_size*28, tab_size*28), dtype=np.float32)\n for i in range(tab_size):\n for j in range(tab_size):\n img[j*28:(j+1)*28, i*28:(i+1)*28]=predictions[j*tab_size+i, :, :, 0]*127.5+127.5\n cv2.imwrite('{}/image_{:04d}.png'.format(dir_images, epoch), img)\n \ntrain(train_dataset, epochs)\n","repo_name":"L42Project/Tutoriels","sub_path":"Tensorflow/tutoriel35/gan.py","file_name":"gan.py","file_ext":"py","file_size_in_byte":4006,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"81"} +{"seq_id":"28352066959","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Dec 30 12:07:19 2017\r\n\r\n@author: 羅際禎\r\n\"\"\"\r\n\r\ndef kinect2global(a, b): # a is vertical pixel, b is horizontal\r\n import numpy as np\r\n a = a - 292\r\n b = b - 351\r\n paraX = np.load(\"paraX.npy\")\r\n #print(paraX, paraX.shape)\r\n paraY = np.load(\"paraY.npy\")\r\n #print(paraY, paraY.shape)\r\n X = np.matrix([a,b,a**2,b**2,a*b])*paraX\r\n Y = np.matrix([a,b,a**2,b**2,a*b])*paraY\r\n X = float(X)\r\n Y = float(Y)\r\n X += 120\r\n #print(X,Y)\r\n return (X,Y)","repo_name":"B04902039/TennisColletor","sub_path":"kinect2global.py","file_name":"kinect2global.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10282454463","text":"\"\"\" Module for the labeling session \nTable: session\n\"\"\"\nfrom dataclasses import dataclass, field\nfrom typing import List, Dict\nfrom datetime import datetime\nfrom pandas import DataFrame\n\n\n@dataclass\nclass LabelingSession:\n \"\"\"Represent a summary of the labeling updates in BI-LAVA before iterating\"\"\"\n\n # pylint: disable=C0103:invalid-name\n id: int = field(init=False)\n end_date: datetime\n number: int\n num_updates: int\n num_errors: int\n num_classifiers: int\n\n def to_tuple(self):\n \"\"\"to insert to db\"\"\"\n return (\n self.end_date,\n self.number,\n self.num_updates,\n self.num_errors,\n self.num_classifiers,\n )\n\n\ndef create_session(\n end_date: datetime,\n subfigures: List[Dict],\n classifiers: List[str],\n session_number: int,\n) -> LabelingSession:\n df_subfigures = DataFrame.from_dict(subfigures)\n df_subfigures[\"error\"] = df_subfigures.apply(\n lambda x: 1 if \"error\" in x[\"upt_label\"] else 0, axis=1\n )\n num_errors = df_subfigures.error.sum()\n num_updates = df_subfigures.shape[0] - num_errors\n num_classifiers = len(classifiers)\n\n return LabelingSession(\n end_date=end_date,\n number=session_number,\n num_updates=num_updates,\n num_errors=num_errors,\n num_classifiers=num_classifiers,\n )\n","repo_name":"uic-evl/bio-search","sub_path":"content-onboarding/biosearch_core/bilava/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74805588423","text":"import requests\nimport PySimpleGUI\nfrom bs4 import BeautifulSoup\n\ndef get_champion_info(champion_name):\n url = 'https://leagueoflegends.fandom.com/wiki/'+champion_name+'/LoL'\n response = requests.get(url)\n soup = BeautifulSoup(response.content, 'html.parser')\n champion_info = {\n 'Health': soup.find(id='Health_'+champion_name),\n 'Mana': soup.find(id='ResourceBar_'+champion_name),\n 'Armor': soup.find(id='Armor_'+champion_name),\n 'Magic_Resist': soup.find(id='MagicResist_'+champion_name),\n 'Attack_Damage': soup.find(id='AttackDamage_'+champion_name)\n }\n return champion_info\n\nlayout = [[PySimpleGUI.Text('Champion:'), PySimpleGUI.InputText()],\n [PySimpleGUI.Button('Ok'), PySimpleGUI.Button('Cancel')],\n [PySimpleGUI.Text('Health: '),PySimpleGUI.Text('', key='Health'), \n PySimpleGUI.Text('Mana: '),PySimpleGUI.Text('', key='Mana')],\n [PySimpleGUI.Text('Armor: '),PySimpleGUI.Text('', key='Armor'), \n PySimpleGUI.Text('Magic Resist: '),PySimpleGUI.Text('', key='magic_Resist')],\n [PySimpleGUI.Text('Attack Damage: '),PySimpleGUI.Text('', key='attack_Damage')],\n [PySimpleGUI.Text('', key='status')]]\n\nwindow = PySimpleGUI.Window('LoL-Tool', layout)\n\nwhile True:\n event, values = window.read()\n window['status'].update('')\n if event == 'Ok':\n try:\n champion_Info = get_champion_info(values[0])\n window['Health'].update(champion_Info[\"Health\"].text.strip())\n window['Mana'].update(champion_Info[\"Mana\"].text.strip())\n window['Armor'].update(champion_Info[\"Armor\"].text.strip())\n window['magic_Resist'].update(champion_Info[\"Magic_Resist\"].text.strip())\n window['attack_Damage'].update(champion_Info[\"Attack_Damage\"].text.strip())\n except:\n window['status'].update('Make sure you typed the champions name correct')\n continue\n elif event == \"Cancel\" or event == PySimpleGUI.WIN_CLOSED:\n window.close()\n break\n","repo_name":"Maikeruy/LoL-Wiki-Scrape","sub_path":"Champion_Info_Tool.py","file_name":"Champion_Info_Tool.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"41950915327","text":"import sys\ninput = sys.stdin.readline\n\nN = int(input())\n\ndp = [float('inf') for _ in range(N + 1)]\ndp[0] = 0\ndp[1] = 1\n\nfor n in range(2, N + 1):\n for root in range(1, int(n ** 0.5) + 1):\n dp[n] = min(dp[n], dp[n - root ** 2] + 1)\n\nprint(dp[N])\n","repo_name":"nerdroid-labs/problem-solving","sub_path":"BOJ/17626.py","file_name":"17626.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74152890504","text":"import re\n\nfrom app.MyModule import Telnet5680T\nfrom .. import logger, db\nfrom ..models import Device\n\n\ndef AnalysisONT(id):\n device_id = id\n device_info = Device.query.filter_by(id=device_id).first()\n tlnt = Telnet5680T.TelnetDevice(mac='', host=device_info.ip, username=device_info.login_name,\n password=device_info.login_password)\n if not device_info.status:\n logger.info('status %s. This device is not available' % device_info.status)\n return False\n\n # get the number of boards\n board_id_list = [line.strip().split()[0] for line in tlnt.check_board_info()]\n\n # Get the ONT info and insert or update to DB\n for bid in board_id_list:\n ont_list, port_list = tlnt.check_board_info(slot=bid)\n print(port_list)\n x = 0\n for i in ont_list:\n i = i.replace('b\\'', '')\n if re.search(r'total of ONTs', str(i)):\n x += 1\n continue\n try:\n print(i.strip().split()[:9])\n f, s_p, ont_id, mac, control_flag, run_state, config_state, match_state, protect_side = i.strip().split()[:9]\n f = f.replace('/', '')\n s, p = s_p.split('/')\n except Exception as e:\n try:\n print(i.split()[:8])\n f_s_p, ont_id, mac, control_flag, run_state, config_state, match_state, protect_side = i.strip().split()[:8]\n f, s, p = f_s_p.split('/')\n except Exception as e:\n print(i.split()[:7])\n f = '0'\n s = bid\n ont_id, mac, control_flag, run_state, config_state, match_state, protect_side = i.strip().split()[:7]\n p = port_list[x]\n print(p)\n\n ont_info = ONTDetail.query.filter_by(mac=mac).first()\n if ont_info:\n print('mac exist {} {}/{}/{}, ont_id:{}'.format(mac, ont_info.f, ont_info.s, ont_info.p, ont_info.ont_id))\n if ont_info.f == f and ont_info.s == s and ont_info.p == p and str(ont_info.ont_id) == ont_id:\n ont_info.update_time = time.localtime()\n ont_info.control_flag = control_flag\n ont_info.run_state = run_state\n ont_info.match_state = match_state\n else:\n print('mac exist, but changed port or ont_id {}'.format(mac))\n ont_info.ont_status = 2\n ont_new = ONTDetail(device_id=id, f=f, s=s, p=p, ont_id=ont_id,\n mac=mac, control_flag=control_flag,\n run_state=run_state, config_state=config_state,\n match_state=match_state, protect_side=protect_side,\n pre_id=ont_info.id,\n create_time=time.localtime(), update_time=time.localtime())\n db.session.add(ont_new)\n else:\n print(mac)\n if re.search(r'^[0-9a-fA-F]{4}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{4}', mac):\n ont_info = ONTDetail(device_id=id, f=f, s=s, p=p, ont_id=ont_id,\n mac=mac, control_flag=control_flag,\n run_state=run_state, config_state=config_state,\n match_state=match_state, protect_side=protect_side,\n create_time=time.localtime(), update_time=time.localtime())\n else:\n continue\n db.session.add(ont_info)\n try:\n db.session.commit()\n except Exception as e:\n print(e)\n db.session.rollback()\n\n tlnt.telnet_close()","repo_name":"KoiosChen/gamefast","sub_path":"app/MyModule/OltOperator.py","file_name":"OltOperator.py","file_ext":"py","file_size_in_byte":3897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71512775625","text":"\"\"\"\nCS230: Section HB3\nName: Juliana Spitzner\nData: Boston Uber and Lyft Rideshare Data\nDescription:\nThis program allows the user to visualize the data in various ways such as charts, pivot tables, and maps.\nThe sidebar in the Streamlit app gives 6 views for the user to click through.\nThere are many options to sort, map, filter, graph, and calculate the data to see the information they desire.\n\nI pledge that I have completed the programming assignment independently.\nI have not copied the code from a student or any source.\nI have not given my code to any student. \n\"\"\"\n\n#streamlit run finalProject.py\n\nimport streamlit as st\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nimport csv\nimport pandas as pd\nimport numpy as np\nimport statistics\n\n#title & header\nst.title('Uber/Lyft Data Analysis Project')\nst.header('Juliana Spitzner CS230-HB3')\nst.write(' ')\ndf = st.cache(pd.read_csv)(\"ridesharesample.csv\")\n\n#Function 1: pass in parameters and return mean or median\ndef function1(m, para1, para2):\n thisPrice = []\n for index, row1 in df.iterrows():\n if row1['destination'] == para2:\n if not np.isnan(row1[para1]):\n thisPrice.append(row1[para1])\n if m == 'mean':\n return statistics.mean(thisPrice)\n if m == 'median':\n return statistics.median(thisPrice)\n\n#Function 2: create list of hours, return nothing\ndef hourLists(zipped):\n hour1, hour2, hour3, hour4, hour5, hour6, hour7, hour8, hour9, hour10, hour11, hour12 = [], [], [], [], [], [], [], [], [], [], [], []\n hour13, hour14, hour15, hour16, hour17, hour18, hour19, hour20, hour21, hour22, hour23, hour0 = [], [], [], [], [], [], [], [], [], [], [], []\n for x in zipped:\n if x[1] == 1: hour1.append(x[0])\n if x[1] == 2: hour2.append(x[0])\n if x[1] == 3: hour3.append(x[0])\n if x[1] == 4: hour4.append(x[0])\n if x[1] == 5: hour5.append(x[0])\n if x[1] == 6: hour6.append(x[0])\n if x[1] == 7: hour7.append(x[0])\n if x[1] == 8: hour8.append(x[0])\n if x[1] == 9: hour9.append(x[0])\n if x[1] == 10: hour10.append(x[0])\n if x[1] == 11: hour11.append(x[0])\n if x[1] == 12: hour12.append(x[0])\n if x[1] == 13: hour13.append(x[0])\n if x[1] == 14: hour14.append(x[0])\n if x[1] == 15: hour15.append(x[0])\n if x[1] == 16: hour16.append(x[0])\n if x[1] == 17: hour17.append(x[0])\n if x[1] == 18: hour18.append(x[0])\n if x[1] == 19: hour19.append(x[0])\n if x[1] == 20: hour20.append(x[0])\n if x[1] == 21: hour21.append(x[0])\n if x[1] == 22: hour22.append(x[0])\n if x[1] == 23: hour23.append(x[0])\n if x[1] == 0: hour0.append(x[0])\n listofhours = hour0, hour1, hour2, hour3, hour4, hour5, hour6, hour7, hour8, hour9, hour10, hour11, hour12, hour13, hour14, hour15, hour16, hour17, hour18, hour19, hour20, hour21, hour22, hour23\n return listofhours\n\n#Function 3: Pivot Table\n#User chooses whether they would like to view a pivot table of price or destination, showing Uber vs. Lyft rides for each source/destination combo\ndef pivot():\n values = st.radio(\"View Price or Distance Data:\", ('price', 'distance'))\n pivTable = pd.pivot_table(df, values=values, index=['source', 'destination'], columns='cab_type', aggfunc=np.sum)\n st.write(pivTable)\n pass\n\n#Simplified Dataframe\ndf_one = pd.DataFrame({'Hour': df.hour, 'Day': df.day, 'Month': df.month, 'Datetime': df.datetime, 'Source': df.source, 'Destination': df.destination, 'Cab Type': df.cab_type, 'Name': df.name, 'Price': df.price, 'Distance': df.distance, 'Temperature': df.temperature, 'Weather': df.short_summary})\n#Variables to be used later\nweatherList = ('Clear', 'Drizzle', 'Foggy', 'Light Rain', 'Mostly Cloudy', 'Overcast', 'Partly Cloudy', 'Possible Drizzle', 'Rain')\nmeanormed = 'mean'\nselectedColumn = 'price'\nselectedDest = 'Back Bay'\n\n#Sorted Dataframe\n#User is able to pick which columns of the dataframe to view, and which columns to sort by\ndataset = st.sidebar.checkbox(\"Sort Original Dataframe\")\nif dataset:\n options = st.multiselect('What columns would you like to display?', ['Hour', 'Day', 'Month', 'Datetime', 'Source', 'Destination', 'Cab Type', 'Name', 'Price', 'Distance', 'Temperature', 'Weather'], ['Day', 'Cab Type'])\n df_m = pd.DataFrame({})\n for option in options:\n df_m[option] = df_one[option]\n sortColumns = st.multiselect('Choose columns to sort by:', options, 'Day')\n df_s = df_m.sort_values(sortColumns)\n st.write(df_s)\n\n#Map\n#User can view a map of all the locations in the data, which a list on the right of all of the locations\nmapData = pd.DataFrame({\n 'Source' : df_one['Source'],\n 'lat' : df['latitude'],\n 'lon' : df['longitude'] })\nsourceList = []\nfor source in mapData['Source']:\n if source not in sourceList:\n sourceList.append(source)\nview_map = st.sidebar.checkbox(\"View Map\")\nif view_map:\n col1, col2 = st.beta_columns([3, 1])\n col1.subheader(\"Map\")\n col1.map(mapData)\n col2.subheader(\"Locations\")\n col2.table(sourceList)\n\n#Filtered Dataframe\n#User can filter out ride data from a particular day, and can also filter on temperature and destination if they would like to\nview_dayMonth = st.sidebar.checkbox(\"Filter Table\")\nif view_dayMonth:\n columns = ['Hour', 'Day', 'Month', 'Datetime', 'Source', 'Destination', 'Cab Type', 'Name', 'Price', 'Distance', 'Temperature', 'Weather']\n month = st.radio(\"Select month:\", ('November', 'December'))\n if month == 'December':\n day = st.select_slider('Select day', options=[1, 2, 3, 4, 9, 10, 13, 14, 15, 16, 17, 18])\n mon = 12\n if month == 'November':\n day = st.select_slider('Select day', options=[26, 27, 28, 29, 30])\n mon = 11\n monthDay = pd.DataFrame({})\n monthDay = df_one[(df_one.Month == mon) & (df_one.Day == day)]\n tempanddest = st.checkbox(\"Filter by temperature and destination\")\n if tempanddest:\n temp = st.selectbox(\"View all entries with temperatures greater than:\", (range(40,56)))\n sourceList.insert(0, 'None')\n dest = st.selectbox(\"View all entries with a destination of:\", (sourceList))\n if dest == 'None':\n monthDay = df_one[(df_one.Month == mon) & (df_one.Day == day) & (df_one.Temperature > temp)]\n else: monthDay = df_one[(df_one.Month == mon) & (df_one.Day == day) & (df_one.Temperature > temp) & (df_one.Destination == dest)]\n st.write(monthDay)\n else:\n st.write(monthDay)\n\n#Pivot Table\nview_pivTable = st.sidebar.checkbox(\"View Pivot Table\")\nif view_pivTable:\n pivot()\n\n#Line Chart\n#User chooses to view count, mean, median, or both mean & median in the line chart and also chooses the color of the line\n\n#create price & hour lists\npriceList = []\nhourList = []\nfor index, row2 in df.iterrows():\n if not np.isnan(row2['price']):\n price = float(row2['price'])\n hour = int(row2['hour'])\n priceList.append(price)\n hourList.append(hour)\n#zip price & hour lists into a tuple\nz = zip(priceList, hourList)\n\n#calculate count, mean, median lists\ncountList = []\nmeanList = []\nmedianList = []\nlistofhours = hourLists(z)\nfor hour in listofhours:\n countList.append(len(hour))\n meanList.append(statistics.mean(hour))\n medianList.append(statistics.median(hour))\n\n#Line Chart\nview_linechart = st.sidebar.checkbox(\"View Line Chart\")\nif view_linechart:\n countmeanmed = st.sidebar.radio(\"Plot the line based on:\", ('Count', 'Mean', 'Median', 'Mean & Median'))\n color = st.sidebar.color_picker('Pick A Line Color', '#B9159D')\n fig, ax = plt.subplots()\n if countmeanmed == 'Count':\n plt.plot(countList, label='Trip Count', color=color)\n plt.ylabel('Number of trips')\n plt.title('Number of Trips per Hour')\n if countmeanmed == 'Mean':\n plt.plot(meanList, label='Mean Price', color=color)\n plt.ylabel('Price (USD)')\n plt.title('Mean Trip Price per Hour')\n if countmeanmed == 'Median':\n plt.plot(medianList, label='Median Price', color=color)\n plt.ylabel('Price (USD)')\n plt.title('Median Trip Price per Hour')\n if countmeanmed == 'Mean & Median':\n plt.plot(meanList, label='Mean Price', color=color)\n plt.ylabel('Price (USD)')\n plt.title('Mean Trip Price per Hour')\n color2 = st.sidebar.color_picker('Pick A Line Color', '#4C7EDA')\n plt.plot(medianList, label='Median Price', color=color2)\n plt.legend()\n plt.xlabel('Hour of Day')\n plt.xticks(hourList)\n plt.show()\n st.pyplot(fig)\n\n#Calculate mean or median\nmeanFunction = st.sidebar.checkbox(\"Mean & Median Function\")\nif meanFunction:\n meanormed = st.selectbox(\"Mean or Median:\", ('mean', 'median'))\n selectedColumn = st.selectbox(\"Choose a column to get the mean of:\", ('price', 'distance'))\n selectedDest = st.selectbox(\"Choose a destination: \", (sourceList))\n st.write('')\n st.write(meanormed, selectedColumn, 'for rides to', selectedDest, 'is', function1(meanormed, selectedColumn, selectedDest))\n\ndef main():\n function1(meanormed, selectedColumn, selectedDest)\n\nmain()\n\n","repo_name":"JSpitzner/final","sub_path":"finalProject.py","file_name":"finalProject.py","file_ext":"py","file_size_in_byte":9163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71433377866","text":"import os\nimport dns\nfrom pymongo import MongoClient\nfrom aws_tools import aws_upload\nfrom dotenv import load_dotenv\nfrom datetime import datetime\n\ndef make_violator_entry_in_db(\n\tconfidence_pred,\n\timg_buffer\n\t):\n\t\"\"\"\n\tInserts entry into MongoDB collection of link to image of violator, confidence scores and timestamp.\n\n\tReturns: None\n\t\"\"\"\n\n\t## Getting mongo string from .env\n\tload_dotenv()\n\tmongo_string = os.getenv('MONGODB_AUTH_URI')\n\n\t## Initilizing Mongo client\n\tclient = MongoClient(mongo_string)\n\tprint(client[\"Maks-Cluster\"]) # Debugging convenience; to be removed in production code\n\n\t## Uploading image to AWS and getting link of uploaded image\n\tuploaded_img_link = aws_upload.upload_image(img_buffer=img_buffer)\n\n\t## Preparing timestamp of time of violation\n\tnow = datetime.now()\n\tdate_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n\t## Pulling DB variables\n\tdatabase = client['User-Credentials'] # Picking database from our Mongo client\n\tcoll = database['violators'] # Picking collection from the database\n\n\trecord = {\n\t\t\"timestamp\": date_string,\n\t\t\"confidence-of-prediction\": confidence_pred,\n\t\t\"link-to-image\": uploaded_img_link,\n\t\t\"email\": os.getenv('EMAIL_ADDRESS')\n\t}\n\n\tcoll.insert_one(record)\n\nif __name__ == \"__main__\":\n\tpass","repo_name":"adityashukzy/Maks","sub_path":"mongodb_tools/mongo_upload.py","file_name":"mongo_upload.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27097830076","text":"\"\"\"\r\nCrie um programa onde o usuário possa digitar sete valores numéricos e cadastre-os\r\nem uma lista única que mantenha separados os valores pares e ímpares. No final,\r\nmostre os valores pares e ímpares em ordem crescente.\r\n\"\"\"\r\nlista = [[], []]\r\n\r\nfor v in range(1, 8):\r\n valor = int(input(f\"Digite o {v}° valor: \"))\r\n if valor % 2 == 0:\r\n lista[0].append(valor)\r\n else:\r\n lista[1].append(valor)\r\n\r\nprint(\"- \"*30)\r\nlista[0].sort()\r\nlista[1].sort()\r\nprint(f\"Todos os valores: {lista}\")\r\nprint(f\"Os valores pares são: {lista[0]}\")\r\nprint(f\"Os valores ímpares são: {lista[1]}\")\r\n","repo_name":"goudardroberto/Python","sub_path":"k6_listas_com_pares_e_Impares.py","file_name":"k6_listas_com_pares_e_Impares.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70598860426","text":"from Components.MenuList import MenuList\n\nfrom Plugins.Extensions.ShareMyBox import boxwrapper, dreamclass\nfrom Plugins.Extensions.ShareMyBox.ShareMyBoxRequets import ShareMyBoxApi as Request \n\nfrom BaseScreens import Smb_BaseScreen, Smb_BaseListScreen\n\n\nfrom Tools.Directories import resolveFilename, fileExists, SCOPE_PLUGINS, SCOPE_SKIN_IMAGE, SCOPE_CURRENT_PLUGIN\nfrom Tools.Directories import resolveFilename, SCOPE_SKIN_IMAGE\nfrom Components.Label import Label\n\nfrom Screens.InputBox import InputBox\nfrom Components.Input import Input\n\nfrom Screens.MessageBox import MessageBox\n\nfrom Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest\nfrom enigma import eListboxPythonMultiContent, gFont\nfrom Tools.LoadPixmap import LoadPixmap\n\nfrom Plugins.Extensions.ShareMyBox.__init__ import _\n\nimport time\n \nclass MainMenu(Smb_BaseListScreen):\n\n title = _('Friends')\n\n def build(self):\n \n self[\"myMenu\"] = MenuList(self.buildlist(), False, eListboxPythonMultiContent)\n self[\"myMenu\"].onSelectionChanged = [self.Changed]\n self[\"myMenu\"].l.setFont(0, gFont(\"Regular\", 20))\n self[\"myMenu\"].l.setFont(1, gFont(\"Regular\", 14))\n self[\"myMenu\"].l.setItemHeight(40) \n \n self[\"Description\"] = Label(\"\")\n \n self[\"red\"] = Label(\"Add\")\n self[\"green\"] = Label(\"Delete\")\n self[\"yellow\"] = Label(\"\")\n self[\"blue\"] = Label(\"\") \n #self[\"padd_label\"] = Label(\"\")\n \n \n self.actions['red'] = self.ActionHelperAdd\n self.actions['green'] = self.ActionHelperDelete\n #self.actions['yellow'] = lambda: self.action\n #self.actions['blue'] = lambda: self.action('delete')\n self.context = [\"WizardActions\", \"ColorActions\"]\n \n def Changed(self):\n item = self[\"myMenu\"].l.getCurrentSelection()\n if item is None: return\n \n obj = item[-1]\n self[\"Description\"].setText(str(obj['boxkey'])) \n \n def buildlist(self):\n \n list = []\n \n icon_friend = boxwrapper.Icon('friend')\n icon_disable = boxwrapper.Icon('friend_disable')\n \n try:\n api = boxwrapper.SendRequestAuth('FriendList').GetList()\n except Exception as e:\n self.ErrorException(e)\n return \n \n\n for x in api:\n \n icon = icon_friend\n if not x['approved'] == \"1\":\n icon = icon_disable\n \n list.append([\n str(x['uid']),\n MultiContentEntryText(pos=(60, 0), size=(320, 25), font=0, text=str(x['name'])),\n MultiContentEntryPixmapAlphaTest(pos=(5, 0), size=(50, 40), png = icon),\n x\n ]) \n \n return list \n \n def onMenuChanged(self, item):\n obj = item[-1]\n items = {\n #'Update': dreamclass.format_date(obj['updated_on']),\n _('Description'): obj['description'],\n _('Approved'): str(obj['approved']),\n }\n\n self.DescriptionToText(items) \n \n def ActionHelperAdd(self):\n self.session.openWithCallback(self.AddFriendCallback, InputBox, title=_(\"Please enter Username or Mail of friend account\"), text=\" \" * 55, maxSize=55, type=Input.TEXT) \n \n def ActionHelperDelete(self):\n if self.is_selected() is False: return\n self.session.openWithCallback(self.ActionDelete, MessageBox, _(\"Do you want delete this item?\"), MessageBox.TYPE_YESNO) \n\n def AddFriendCallback(self, word = None):\n if word is None: return\n \n try: \n word = str(word).strip()\n if len(word) == 0: return\n \n ret = Request().FriendAdd(word).GetResponse()\n self.SetMessage(ret)\n \n except Exception as e:\n self.SetMessage(str(e)) \n\n def ActionDelete(self, result):\n try:\n if result is False or self.Id() is None: return\n \n msg = Request().FriendDelete(self.Id()).GetResponse()\n self.SetMessage(msg)\n self.rebuild()\n except Exception as e:\n self.SetMessage(str(e))\n\n def rebuild(self):\n self[\"myMenu\"].setList(self.buildlist())\n \n def delete(self, result):\n if result is False: return \n \n try: \n oid = self[\"myMenu\"].l.getCurrentSelection()[0]\n if oid is not None:\n msg = Request().BouquetDelete(oid).GetResponse()\n self.SetMessage(msg)\n self.rebuild()\n \n except Exception as e:\n self.SetMessage(\"Error:\" + str(e)) \n \n\n","repo_name":"Haehnchen/sharemybox.net-enigma2-client","sub_path":"src/ScreenLib/MasterBox.py","file_name":"MasterBox.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"23932270434","text":"import numpy as np\n#import matplotlib as mpl\n#mpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport pickle\n\n\nrestDir = ''\nthres = 600\nsim_data = pickle.load(open(restDir+'simres_'+str(thres)+'.p', \"rb\"))\n\nlw_val = 3\nls_val = ['-', '--', ':', '-', '--', ':']\n\nconsumptionList = sim_data['consumptionList']\nper5List = sim_data['per5List']\ncontrolList = sim_data['controlList']\npolicies = sim_data['policies']\ntrafficPattern = sim_data['trafficPattern']\nthres = sim_data['thres']\nnHourPerDay = 24\n\nxAxis = np.arange(0, nHourPerDay)\n\nplt.figure(1)\nfor i in range(len(policies)):\n plt.plot(xAxis, consumptionList[i], ls=ls_val[i], lw=lw_val, label=policies[i])\n# plt.axis([0, 23, 1455, 1500])\n_, _, y1, y2 = plt.axis()\nplt.axis((0, 23, y1, y2))\nplt.ylabel('Consumption (W)')\nplt.xlabel('Hour')\nplt.grid(True)\nplt.legend(loc=\"best\")\nplt.savefig(restDir+'consumption.eps', bbox_inches='tight')\n\n\nplt.figure(2)\nfor i in range(len(policies)):\n plt.plot(xAxis, per5List[i]/1e3, ls=ls_val[i], lw=lw_val, label=policies[i])\nplt.plot(xAxis, np.ones(nHourPerDay)*thres/1e3, ls=':', lw=lw_val, label='$Q_{min}$')\n# plt.axis([0, 23, 1455, 1500])\n_, _, y1, y2 = plt.axis()\nplt.axis((0, 23, y1, y2))\nplt.ylabel('Ratio of UEs satifying QoS')\nplt.xlabel('Hour')\nplt.grid(True)\nplt.legend(loc=\"upper left\")\nplt.savefig(restDir+'QoS.eps', bbox_inches='tight')\n\n\n\ncontrolVectorList = list()\n\nfor i in range(len(policies)):\n controlArray = np.zeros((3, nHourPerDay))\n c = controlList[i]\n for j in range(nHourPerDay):\n controlArray[0, j] = c[j][0] # nPicos\n controlArray[1, j] = c[j][1] # ABS\n controlArray[2, j] = c[j][2] # CRE\n controlVectorList.append(controlArray)\n\n\nfig = plt.figure(3)\nsub1 = fig.add_subplot(311)\nfor i in range(len(policies)):\n sub1.plot(xAxis, controlVectorList[i][0, :], ls=ls_val[i], lw=lw_val, label=policies[i])\n# sub1.legend(loc=\"upper left\")\nplt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=3, mode=\"expand\", borderaxespad=0.)\n_, _, y1, y2 = plt.axis()\nplt.axis((0, 23, y1-1, y2+1))\nplt.ylabel('$p\\'$')\nplt.xlabel('Hour')\nplt.grid(True)\n\nsub1 = fig.add_subplot(312)\nfor i in range(len(policies)):\n sub1.plot(xAxis, controlVectorList[i][1, :], ls=ls_val[i], lw=lw_val, label=policies[i])\n# plt.legend(bbox_to_anchor=(0., 1.02, 1., .102),loc=3, ncol=3, mode=\"expand\", borderaxespad=0.)\n_, _, y1, y2 = plt.axis()\nplt.axis((0, 23, y1-.1, y2+.1))\nplt.ylabel('ABS ratio ($\\gamma$)')\nplt.xlabel('Hour')\nplt.grid(True)\n\nsub1 = fig.add_subplot(313)\nfor i in range(len(policies)):\n sub1.plot(xAxis, controlVectorList[i][2, :], ls=ls_val[i], lw=lw_val, label=policies[i])\n# plt.legend(bbox_to_anchor=(0., 1.02, 1., .102),loc=3, ncol=3, mode=\"expand\", borderaxespad=0.)\n_, _, y1, y2 = plt.axis()\nplt.axis((0, 23, y1-1, y2+1))\nplt.ylabel('CRE bias ($\\phi$)')\nplt.xlabel('Hour')\nplt.grid(True)\nplt.savefig(restDir+'paramConf.eps', bbox_inches='tight')\n\n\nplt.figure(4)\ntrafficPatternAxis = np.linspace(0, 24, 1440)\nplt.plot(trafficPatternAxis, np.random.poisson(trafficPattern*10000-150), lw=1, label='$\\lambda$')\nplt.plot(trafficPatternAxis, trafficPattern*10000-150, lw=1, c='k', label=r\"$\\bar{\\lambda}$\")\nplt.axis([0, 23, 0, 1500])\nplt.ylabel('Traffic Intensity (UEs per sector)')\nplt.xlabel('Hour')\n# plt.legend(loc=\"best\")\nplt.grid(True)\nplt.savefig(restDir+'trafficPattern.eps', bbox_inches='tight')\n\nplt.show()\n\n# for i in range(len(policies)):\n# print(policies[i])\n# print(np.sum(consumptionList[i] * 3600))\n# print(np.sum(per5List[i] > thres) / 24)\n#\n# print(policies[0])\n# print(1-np.sum(consumptionList[0] * 3600) / np.sum(consumptionList[2] * 3600))\n# print(policies[1])\n# print(1-np.sum(consumptionList[1] * 3600) / np.sum(consumptionList[2] * 3600))\n\n","repo_name":"jaayala/EnergyInterference-DynamicProgramming-CEC","sub_path":"results/getFigures.py","file_name":"getFigures.py","file_ext":"py","file_size_in_byte":3729,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"18808955532","text":"# -*- coding: utf-8 -*-\n__author__ = 'cedric'\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom argparse import ArgumentParser\nimport configparser\nimport os\nimport matplotlib.font_manager\n# print matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')\n# list = matplotlib.font_manager.get_fontconfig_fonts()\n# names = [matplotlib.font_manager.FontProperties(fname=fname).get_name() for fname in list]\n# print names\n# print matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf').find(\"FreeSerif\")\n\nparser = ArgumentParser(description='Oszi plotter')\nparser.add_argument('-c','--filename', help='This program makes plots from oszi data!',\\\n default='D:/data/NitroStrip/NitroStrip/IVLocations.txt')\nargs = parser.parse_args()\n\nwith open(args.filename) as f:\n content = f.readlines()\n\ncontent = [x.strip() for x in content]\n\n#seperator = args.filename.rfind(\"/Mini\")\n#dataString = args.filename[0:seperator+1]\n\nplt.rcParams['font.family']='Linux Libertine'\n# plt.rcParams['font.family']='Liberation Serif'\nplt.rcParams['font.size']=16\n# plt.rcParams['font.libertine'] =['Linux Libertine']#['Times New Roman']\n# plt.rc('font',family='Linux Libertine')\nfig, ax = plt.subplots(figsize=(8,6))\nfor file in content:\n startBin = 5000\n endBin = 6730\n timeOffset = 0.5\n seperator = file.rfind(\"/Mini\")\n seperator2 = file.rfind(\"-run\")\n nameString = file[seperator + 1:seperator2]\n print(file)\n data = np.genfromtxt(file, delimiter=',', skip_header=0,\n skip_footer=0, names=['volt', 'current'])\n\n\n\n # fig, ax = plt.subplots(figsize=(16,12))\n\n #plt.rc('text', usetex=True)\n\n # ax.plot(data[\"time\"][startBin:endBin]*1000000,data['Ch3'][startBin:endBin], label='Clear',linewidth=3)\n # ax.plot(data[\"time\"][startBin:endBin]*1000000,data['Ch4'][startBin:endBin], label=\"Gate\",linewidth=3)\n\n ax.plot(data[\"time\"]/60,data['current']*1E6, label=nameString,linewidth=3,color='r')\n\n\nax.set_ylim(-0.1,-0.7)\n#ax.set_xlim(0,-600.0)\n\nplt.xlabel(u\"Time in minutes\",fontsize=18)\nplt.ylabel(u\"Current in µA\",fontsize=18)\n# ax.plot(data['Ch2'], label=\"Gate\",linewidth=3)\nax.grid(True)\n# ticklines = ax.get_xticklines() + ax.get_yticklines()\n# gridlines = ax.get_xgridlines() + ax.get_ygridlines()\n# for line in gridlines:\n# line.set_linestyle('--')\n# ax.legend(bbox_to_anchor=(0.6, 1.1), loc=2)\nax.legend()\n# plt.show()\ndataString = 'D:/data/NitroStrip/NitroStrip'\nfig.savefig(dataString+\"/\" + \"IVCurves\"+\".png\")\nfig.savefig(dataString+\"/\" + \"IVCurves\"+\".pdf\")\n# plt.show()\n","repo_name":"janhoenig/Plotting-Software","sub_path":"CSVPicMakerLong.py","file_name":"CSVPicMakerLong.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33617187816","text":"\"\"\"A task where the agent must type a given string one letter at a time.\n\nIn this variation of the task, the agent is given a single function,\nthat takes a letter as an argument.\n\"\"\"\nimport dataclasses\nfrom typing import Any, Callable, List, cast\n\nfrom langchain.tools import BaseTool, tool\n\nfrom langchain_benchmarks.schema import ToolUsageEnvironment, ToolUsageTask\n\n\n@dataclasses.dataclass\nclass Paper:\n \"\"\"A piece of paper that the agent can write on.\"\"\"\n\n content: str\n\n\ndef create_typer(paper: Paper) -> Callable[[str], str]:\n \"\"\"Create a function that types the given letter.\"\"\"\n\n def type_letter(letter: str) -> str:\n \"\"\"Print the given letter on the paper.\"\"\"\n if len(letter) != 1:\n return \"ERROR: The letter must be a single character.\"\n paper.content += letter\n return \"OK\"\n\n return type_letter\n\n\n# PUBLIC API\n\n\ndef get_environment() -> ToolUsageEnvironment:\n \"\"\"Create tools and state reader.\n\n Attention: this is a factory function, so it will create a new environment\n every time it is called. The paper contains state.\n\n Returns:\n A tuple of (tools, state_reader).\n \"\"\"\n paper = Paper(content=\"\") # Start with an empty piece of paper\n\n def _read_state() -> Any:\n \"\"\"Read the state of the environment.\"\"\"\n return paper.content\n\n tools = cast(List[BaseTool], [tool(create_typer(paper))])\n\n return ToolUsageEnvironment(\n tools=tools,\n read_state=_read_state,\n )\n\n\nTYPE_WRITER_TASK = ToolUsageTask(\n name=\"Tool Usage - Typewriter (1 tool)\",\n dataset_id=\"https://smith.langchain.com/public/59577193-8938-4ccf-92a7-e8a96bcf4f86/d\",\n create_environment=get_environment,\n instructions=(\n \"Repeat the given string using the provided tools. \"\n \"Do not write anything else or provide any explanations. \"\n \"For example, if the string is 'abc', you must print the letters \"\n \"'a', 'b', and 'c' one at a time and in that order. \"\n ),\n description=(\n \"\"\"\\\nEnvironment with a single tool that accepts a single letter as input, and \\\nprints it on a piece of virtual paper.\n\nThe objective of this task is to evaluate the ability of the model to use the provided \\\ntools to repeat a given input string.\n\nFor example, if the string is 'abc', the tools 'a', 'b', and 'c' must be invoked \\\nin that order.\n\nThe dataset includes examples of varying difficulty. The difficulty is measured \\\nby the length of the string.\n\"\"\"\n ),\n eval_params={\n # For this task, the agent's output is irrelevant\n # what we care about is the final state of the environment\n # (i.e., what's written on the virtual paper)\n \"output_evaluation\": \"none\",\n },\n)\n\n\nSTRINGS_TO_TYPE = [\n # letter repetition\n \"a\",\n \"aa\",\n \"aaa\",\n \"aaaa\",\n # 3-letter words\n \"dog\",\n \"cat\",\n # 4-letter words\n \"hand\",\n \"head\",\n # 5-letter words\n \"house\",\n \"horse\",\n # 6-letter words\n \"school\",\n \"church\",\n # 7-letter words\n \"teacher\",\n \"student\",\n # 8-letter words\n \"computer\",\n \"keyboard\",\n # 9-letter words\n \"university\",\n \"dictionary\",\n # 10-letter words\n \"information\",\n \"communication\",\n]\n\n\ndef _create_dataset(strings: List[str]) -> List[dict]:\n \"\"\"Create the dataset.\"\"\"\n dataset = []\n for string in strings:\n dataset.append(\n {\n \"question\": string,\n \"expected_steps\": [\"type_letter\"] * len(string),\n \"state\": string,\n }\n )\n return dataset\n\n\nDATASET = _create_dataset(STRINGS_TO_TYPE)\n\n\ndef _create_dataset() -> None:\n \"\"\"Create a dataset with the langsmith client.\"\"\"\n from langsmith.client import Client\n\n client = Client()\n dataset = client.create_dataset(\n dataset_name=TYPE_WRITER_TASK.name,\n description=TYPE_WRITER_TASK.description,\n )\n\n for example in DATASET:\n client.create_example(\n inputs={\n \"question\": example[\"question\"],\n },\n outputs={\n \"reference\": example[\"state\"],\n \"expected_steps\": example[\"expected_steps\"],\n \"state\": example[\"state\"],\n },\n dataset_id=dataset.id,\n )\n","repo_name":"langchain-ai/langchain-benchmarks","sub_path":"langchain_benchmarks/tool_usage/tasks/type_writer.py","file_name":"type_writer.py","file_ext":"py","file_size_in_byte":4301,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"81"} +{"seq_id":"74697675145","text":"\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\nfrom tensorflow.keras.optimizers import Adam, RMSprop\nfrom tensorflow.python.compiler.mlcompute import mlcompute\nmlcompute.set_mlc_device(device_name='gpu')\nimport pandas as pd\n\n# Custom Imports:\nfrom engine.train import train \nfrom engine.test import test \n#from engine.random import random_games \nfrom agents.agent import Agent\nfrom envs.stock_env import StockEnvironment\nfrom utils.pipelines import preprocess_df, preprocess_price\nfrom utils.split import time_split\n\n\nif __name__ == \"__main__\": \n df = pd.read_csv('./data/BTCUSDT-1m-data_historical.csv')\n df = preprocess_df(df)\n #df = pd.read_csv('./data/pricedata.csv').sort_values('Date')\n #df = AddIndicators(df)\n\n lookback_window_size = 200 # 6 hours\n test_window = 120 # Since 2021\n train_df, test_df = time_split(df, lookback_window_size, test_window)\n\n agent = Agent(lookback_window_size=lookback_window_size, lr=0.00001, epochs=5,\n optimizer=Adam, batch_size = 32, model=\"LSTM\")\n\n train_env = StockEnvironment(df = train_df, lookback_window_size = lookback_window_size, \n normalize_obs=False, initial_balance = 10000, render_range = 300,\n reward_strategy='incremental', name = \"Custom Trading Environment v1\")\n\n train(train_env, agent, train_episodes=2, training_batch_size=500)\n\n\n test_env = False\n\n if test_env:\n test_env = StockEnvironment(test_df, lookback_window_size=lookback_window_size, initial_balance = 10000,\n normalize_obs=False, render_range = lookback_window_size, show_reward=True, show_indicators=True)\n\n\n folder, name = '2021_01_18_22_18_Crypto_trader', '1933.71_Crypto_trader'\n folder_3000, name3000 = '2021-05-06 18: 01_trader', '3135.59_Crypto_trader'\n folderCNN, nameCNN = '2021-05-08 14: 32_trader', '1464.28_Crypto_trader'\n folderLSTM, nameLSTM = '2021-05-09 15: 17_trader', '87.48_Crypto_trader'\n \n test(test_env, agent, visualize=True, test_episodes=1,\n folder=folderCNN, name=nameCNN, comment=\"\")\n \n\n #random_games(test_env, visualize=True, test_episodes=1)\n\n print(test_env.shapes)","repo_name":"cankocagil/CryptoRL","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38433576153","text":"class Studant: \n def __init__(self,name,grade1, grade2):\n self.name = name \n self.grade1 = grade1\n self.grade2 = grade2\n self.mean = 0\n\n def mean_calculate(self):\n self.mean = (self.grade1 + self.grade2) / 2\n return self.mean\n \n def data_show(self):\n print('Name: ',self.name)\n print('Grade 1: ',self.grade1)\n print('Grade 2: ',self.grade2)\n print('Mean: ', self.mean)\n\n def result(self):\n if self.mean >= 6.0:\n print('aprovado')\n else:\n print('reprovado')\n\n\nst = Studant('José', 10, 4)\nst.mean_calculate()\nst.data_show()\nst.result()\nst1 = Studant('Pedro', 2.5, 6.7)\nst1.mean_calculate()\nst.data_show()\nst.result()","repo_name":"diegodila/datastructure","sub_path":"POO/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41594700237","text":"from collections import defaultdict\n\nINF = float('inf')\n\ndef solve(A, B, num):\n a = 0\n b = 0\n cant = 0\n for i in range(len(A)):\n if A[i] == num:\n if B[i] != num:\n b += 1\n elif B[i] == num:\n a += 1\n else:\n cant = 1\n break\n if cant:\n return INF\n return min(a, b)\n\nclass Solution:\n def minDominoRotations(self, A: List[int], B: List[int]) -> int:\n a = solve(A, B, A[0])\n b = solve(A, B, B[0])\n if min(a, b) == INF:\n return -1\n return min(a, b)\n","repo_name":"morriship/misc","sub_path":"Leetcode/127/1007.py","file_name":"1007.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4155263927","text":"from pprint import pprint\n\nfrom great_expectations import get_context\nfrom great_expectations.core import ExpectationConfiguration\nfrom great_expectations.core.batch import BatchRequest\n\ncontext = get_context()\nsuite = context.create_expectation_suite(\"flavors_test\", overwrite_existing=True)\n\nsuite.add_expectation(\n ExpectationConfiguration(\n expectation_type=\"expect_column_to_exist\",\n kwargs={\n \"column\": \"flavor_name\",\n },\n )\n)\n\nbatch_request = BatchRequest(\n datasource_name=\"my_postgres_db\",\n data_asset_name=\"std.flavors\",\n data_connector_name=\"whole_table\",\n)\n\nvalidator = context.get_validator(\n batch_request=batch_request,\n expectation_suite=suite,\n)\npprint(validator.validate())\n","repo_name":"Kaiohenriqueps/great-expectations-lab","sub_path":"great_expectations/great_expectations_test.py","file_name":"great_expectations_test.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30614878727","text":"class Solution:\n def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:\n if not nums:\n return []\n res = []\n len_nums = len(nums)\n nums.sort()\n def dfs(re,index):\n if re not in res:\n res.append(re)\n for i in range(index,len_nums):\n dfs(re+[nums[i]],i+1)\n dfs([],0)\n return res","repo_name":"uorcz/leetcode_practice","sub_path":"090.py","file_name":"090.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19398452979","text":"\"\"\"\nModèle de similarité :\nOn entraine un embedding personalisé qui permet de plonger les BI et les Sirus dans un même espaces de représentation.\nAfin de matcher un bi, on regarde les sirus les plus simulaires (cosine similarity).\n\nAfin d'entrainer les modèles, il est nécéssaire d'encoder les données, c'est ce que fait ce fichier\n\nAuthor : bsanchez@starclay.fr\ndate : 23/09/2020\n\"\"\"\nimport os\nimport sys\nimport pickle\nimport shutil\nimport logging\n\nimport numpy as np\nimport pandas as pd\n\nfrom tensorflow.keras.preprocessing.text import Tokenizer\n\nfrom gensim.corpora import Dictionary\nimport fasttext\nimport fasttext.util\nfrom nltk import ngrams\n\nfrom .process import Process\n\n\nclass ProcessMLPSiamese(Process):\n\n def __init__(self, local_path_run, list_cols, \n fasttext=False, \n fasttext_size=296,\n ngram: str = \"3\"):\n \"\"\"\n Organise le preprocess du modèle de similarité.\n \n Input: DataFrame nettoyé\n \n On calcule le vocabulaire et le tokeniser en passant sur tout les mots du jeu de donnée.\n Puis on encode via le tokeniser chaque partie du dataset : la partie BI et la partie Sirus\n \n Les résultats sont stocké dans une matrice à 2 entrée : une matrice bi et une matrice sirus\n \n Pour des soucis de taille de mémoire, les fichiers output sont dumpé via pickle.\n \n Output : siamese_corpus Matrice de donnée encodée\n dict_info: un dictionnaire avec des information sur le dataset. Le dictionnaire sera utilisé par le modèle de similarité.\n tokeniser: tokeniser keras dumper dans un fichier pickle\n \n Ex: print(siamese_corpus[0])\n >> [[0],[68,741,38], [99,75]\n >> ,[0,0],[741,38], []\n >> ...]\n \n Chaque ligne de la matrice est une ligne du dataframe. Toutes les lignes ont un \n nombre d'élément fixe (colonne du dataframe d'entrée). Les données dans les colonnes ne sont pas pas paddé et sont donc de taille variable.\n Deux matrices sont produite, BI et sirus, et sont stocké dans une liste (a deux entrée donc).\n \n :param local_path_run: chemin local du training en cours\n :param list_cols: dict de listes\n - type_de_données: [colonnes à traiter]\n :param fasttext: bool (utilisation de fasttext ou non) (uniquement si ngram='word')\n :param fasttext_size: taille des embeddings\n :param ngram: str. Si entier ('3'), taille des ngrams. Si 'word', on tokenise par mot\n \"\"\"\n self.dir_path = local_path_run\n self.tokenizer_path = os.path.join(self.dir_path, \"tokenizer.p\")\n self.dict_info_path = os.path.join(self.dir_path, 'dict_info.pickle')\n self.fasttext_emb_path = os.path.join(self.dir_path, 'embedding_fasttext.npy')\n self.fasttext = fasttext\n self.fasttext_size = fasttext_size\n self.list_cols = list_cols\n self.ngram = ngram\n if self.fasttext and self.ngram != 'word':\n raise ValueError('Fasttext is True, ngram should be \"word\"')\n\n def train(self, input_df_path): \n \"\"\"\n Entraine le tokenizer, et (si besoin) extrait les embeddings fasttext\n\n :param input_df_path: chemin du fichier à traiter\n \"\"\"\n ####\n # INIT\n ###\n len_full_largest_token : int = -1 # plus grande liste de token présent dans le jeu de donnée (sirus + bi) \n vocab_size_full : int = -1 # nombre de token différent\n chunksize : int = 60000 # longeur du block chargé, à augmenter si beaucoup de mémoire\n \n if self.ngram.isdigit(): \n split_method = lambda x: list(ngrams(x, int(self.ngram)))\n elif self.ngram == \"word\":\n split_method = lambda x: x.split()\n else:\n# print(\"Erreur sur l'argument ngram\")\n sys.exit(2)\n\n #########################\n # Preparation donnée\n # On update les métriques (longeur token vocabulaire ...)\n #########################\n tokenizer = Tokenizer(lower=True, char_level=False, oov_token='oov_token')\n # TODO tokenizer looses tokens...\n for X in pd.read_csv(input_df_path, sep=';', dtype=str, chunksize = chunksize):\n for col in X:\n X[col] = X[col].astype(str) \n \n # On \"fusionne\" toutes les sources de données dans un même champs pour aller plus vite\n X['fusion_bi'] = X[self.list_cols['fusion_bi']].agg(' '.join, axis=1) \n X['fusion_si'] = X[self.list_cols['fusion_si']].agg(' '.join, axis=1)\n train_corpus = []\n for _, doc in enumerate(X['fusion_bi'].values.tolist() + X['fusion_si'].values.tolist()):\n tokens = split_method(doc)\n tokens = [''.join(t) for t in tokens]\n if len_full_largest_token < len(tokens):\n len_full_largest_token = len(tokens)\n train_corpus.append(tokens)\n tokenizer.fit_on_texts(train_corpus)\n\n vocab_size_full = len(tokenizer.word_index) + 2 # +1 pour marquer le 0 et +1 par sécurité\n tokenizer.num_words = len(tokenizer.word_index) # pour garder tous les mots\n \n with open(self.tokenizer_path, \"wb\") as f:\n pickle.dump(tokenizer, f)\n # Nécéssaire pour le modèle de similarité\n dict_info = {\n \"n_features\" : self.fasttext_size,\n \"len_full_largest_token\" : len_full_largest_token,\n \"vocab_size_full\" : vocab_size_full,\n \"ngram\" : self.ngram\n }\n with open(self.dict_info_path, 'wb') as file:\n pickle.dump(dict_info, file, protocol = pickle.HIGHEST_PROTOCOL)\n\n if self.fasttext:\n # load fasttext\n logger = logging.getLogger('fasttext')\n mydir = os.path.dirname(os.path.realpath(__file__))\n fasttext_path = os.path.join(os.path.dirname(mydir), 'fasttext', f'cc.fr.{self.fasttext_size}.bin')\n ft_model=None\n if not os.path.exists(fasttext_path):\n logger.info('Downloading fasttext from source...')\n os.makedirs(os.path.dirname(fasttext_path), exist_ok=True)\n fasttext.util.download_model('fr', 'strict')\n shutil.move('cc.fr.300.bin', os.path.join(os.path.dirname(fasttext_path), 'cc.fr.300.bin'))\n if self.fasttext_size != 300:\n logger.info(f'Resizing fasttext to {self.fasttext_size}...')\n ft_model = fasttext.load_model(os.path.join(os.path.dirname(fasttext_path), 'cc.fr.300.bin'))\n fasttext.util.reduce_model(ft_model, self.fasttext_size)\n ft_model.save_model(fasttext_path)\n if ft_model is None:\n logger.info(f'loading fasttext, size {self.fasttext_size}...')\n ft_model = fasttext.load_model(fasttext_path)\n # get embeddings\n words_not_found = []\n embed_dim = self.fasttext_size\n embedding_matrix = np.zeros((vocab_size_full + 1, embed_dim))\n for word, i in tokenizer.word_index.items():\n embedding_vector = ft_model[word]\n if (embedding_vector is not None) and len(embedding_vector) > 0:\n # words not found in embedding index will be all-zeros.\n embedding_matrix[i] = embedding_vector\n else:\n words_not_found.append(word)\n # Montre le nb de mots non trouvé, au minimun 1 à cause du padding '0'\n# print(f'number of null word embeddings: {np.sum(np.sum(embedding_matrix, axis=1) == 0)}')\n np.save(self.fasttext_emb_path, embedding_matrix)\n \n###############################################################################################################################\n \n def run(self, input_df_path, output_file):\n \"\"\"\n Encode les données (doit être entrainé avant)\n\n :param input_df_path: chemin du fichier d'entrée\n :param output_file: chemin du fichier de sortie\n \n \"\"\"\n chunksize = 60000\n \n # Load pre-trained encoder\n with open(self.tokenizer_path, \"rb\") as input_file:\n tokenizer = pickle.load(input_file)\n \n # with open(path_tokeniser, \"rb\") as input_file:\n # dict_info = pickle.load(input_file)\n \n if self.ngram.isdigit(): \n split_method = lambda x: list(ngrams(x, int(self.ngram)))\n elif self.ngram == \"word\":\n split_method = lambda x: x.split()\n else:\n print(\"Erreur sur l'argument ngram\")\n sys.exit(2)\n \n siamese_corpus = []\n for X in pd.read_csv(input_df_path, sep=';', dtype=str, chunksize = chunksize):\n# print(f\"Chunksize is {X.shape}\")\n for col in X:\n X[col] = X[col].astype(str)\n \n df_corpus = []\n for index, siam in enumerate(self.list_cols):\n# print(f\"\\n** {siam} **\")\n list_encoded_cols = []\n for col in self.list_cols[siam]:\n# print(f\"__{col}__\")\n corpus = X[col].values.tolist()\n# print(corpus[0:2])\n for i, row in enumerate(corpus):\n tokens = split_method(row)\n while not len(tokens) and len(row):\n # input is too short to make a token => we pad it with ' '\n row = ' ' + row\n tokens = split_method(row)\n tokens = [''.join(t) for t in tokens]\n corpus[i] = tokens\n# print(corpus[0:2])\n word_seq_train = tokenizer.texts_to_sequences(corpus)\n# print(word_seq_train[0:2])\n list_encoded_cols.append(word_seq_train)\n encoded_anchor = []\n for i in range(len(list_encoded_cols[0])):\n encoded_anchor.append([col[i] for col in list_encoded_cols])\n# print(\"______\")\n# print(encoded_anchor[0:2])\n# print(\"______\")\n df_corpus.append(encoded_anchor)\n siamese_corpus.append(df_corpus)\n\n # reformat into [X, Y]\n siamese_corpus = [sum([d[i] for d in siamese_corpus], []) for i in range(len(self.list_cols))]\n if output_file[-4:] == '.npy':\n np.save(output_file, siamese_corpus)\n elif output_file[-2:] == '.p':\n pickle.dump(siamese_corpus, open(output_file, \"wb\" ))\n else:\n raise ValueError(f'Format not recognized: {output_file}')\n \n","repo_name":"etalab-ia/ami-ia-insee-aiee2","sub_path":"pipeline_siret_bi/training_classes/ProcessMLPSiamese.py","file_name":"ProcessMLPSiamese.py","file_ext":"py","file_size_in_byte":10984,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35171534377","text":"import os\nfrom typing import Callable\nfrom imgui_bundle import immapp, imgui, imgui_md, hello_imgui\nfrom imgui_bundle.demos_python.demo_utils.functional_utils import memoize\n\n\nGuiFunction = Callable[[], None]\n\n\ndef main_python_package_folder() -> str:\n this_dir = os.path.dirname(__file__)\n this_dir = this_dir.replace(\"\\\\\", \"/\")\n\n items = this_dir.split(\"/\")\n\n for n in reversed(range(len(items))):\n parent_folder = \"/\".join(items[:n])\n if os.path.isfile(parent_folder + \"/hello_imgui.pyi\"):\n return parent_folder\n\n raise Exception(\"Cannot find main python package!\")\n\n\ndef demos_assets_folder() -> str:\n r = main_python_package_folder() + \"/demos_assets\"\n return r\n\n\ndef demos_cpp_folder() -> str:\n return main_python_package_folder() + \"/demos_cpp\"\n\n\ndef demos_python_folder() -> str:\n return main_python_package_folder() + \"/demos_python\"\n\n\ndef markdown_doc_folder() -> str:\n return main_python_package_folder() + \"/doc/imgui_bundle_demo_parts\"\n\n\ndef set_hello_imgui_demo_assets_folder():\n hello_imgui.set_assets_folder(demos_assets_folder())\n\n\ndef show_python_vs_cpp_code(python_code: str, cpp_code: str, nb_lines: int = 0):\n imgui.push_id(python_code)\n\n snippet_cpp: immapp.snippets.SnippetData = immapp.snippets.SnippetData() # type: ignore\n snippet_cpp.code = cpp_code\n snippet_cpp.displayed_filename = \"C++ code\"\n snippet_cpp.height_in_lines = nb_lines\n\n snippet_python: immapp.snippets.SnippetData = immapp.snippets.SnippetData() # type: ignore\n snippet_python.code = python_code\n snippet_python.displayed_filename = \"Python code\"\n snippet_python.height_in_lines = nb_lines\n\n immapp.snippets.show_side_by_side_snippets(snippet_python, snippet_cpp)\n\n imgui.pop_id()\n\n\ndef show_python_vs_cpp_and_run(\n python_gui_function, cpp_code: str, nb_lines: int = 0\n) -> None:\n import inspect\n\n python_code = inspect.getsource(python_gui_function)\n show_python_vs_cpp_code(python_code, cpp_code, nb_lines)\n python_gui_function()\n\n\ndef show_python_vs_cpp_file(demo_file_path: str, nb_lines: int = 0) -> None:\n cpp_code = read_cpp_code(demo_file_path)\n python_code = read_python_code(demo_file_path)\n show_python_vs_cpp_code(python_code, cpp_code, nb_lines)\n\n\ndef show_markdown_file(doc_filename: str) -> None:\n code = read_markdown_code(doc_filename)\n imgui_md.render_unindented(code)\n\n\n@memoize # type: ignore\ndef read_code(filename: str) -> str:\n if not os.path.isfile(filename):\n return \"\"\n with open(filename, encoding=\"utf8\") as f:\n r = f.read()\n return r\n\n\ndef read_cpp_code(demo_file_path: str) -> str:\n file_abs = demos_cpp_folder() + \"/\" + demo_file_path + \".cpp\"\n code: str = read_code(file_abs) # type: ignore\n return code\n\n\ndef read_python_code(demo_file_path: str) -> str:\n file_abs = demos_python_folder() + \"/\" + demo_file_path + \".py\"\n code: str = read_code(file_abs) # type: ignore\n return code\n\n\ndef read_markdown_code(doc_filename: str) -> str:\n doc_file = markdown_doc_folder() + \"/\" + doc_filename + \".adoc.md\"\n r: str = read_code(doc_file) # type: ignore\n return r\n","repo_name":"pthom/imgui_bundle","sub_path":"bindings/imgui_bundle/demos_python/demo_utils/api_demos.py","file_name":"api_demos.py","file_ext":"py","file_size_in_byte":3165,"program_lang":"python","lang":"en","doc_type":"code","stars":394,"dataset":"github-code","pt":"81"} +{"seq_id":"8673248048","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('gitorial', '0006_tutorial_repo_name'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='step',\n name='index',\n field=models.IntegerField(default=0),\n preserve_default=False,\n ),\n ]\n","repo_name":"bezi/gitorial","sub_path":"gitorial/migrations/0007_step_index.py","file_name":"0007_step_index.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"5925942974","text":"import base64\r\nimport io\r\nfrom typing import List\r\nfrom typing import Optional\r\nfrom typing import Union\r\n\r\nfrom telegram import CallbackQuery\r\nfrom telegram import InlineKeyboardButton\r\nfrom telegram import InlineKeyboardMarkup\r\nfrom telegram import ReplyKeyboardMarkup\r\nfrom telegram import Update\r\nfrom telegram.ext import CallbackContext\r\nfrom telegram.ext import ConversationHandler\r\nfrom telegram.ext import MessageFilter\r\n\r\nimport rating\r\nimport reviews\r\nimport students\r\n\r\n\r\ndef send_message(update: Update, context: CallbackContext, text: str, photo: Optional[bytes] = None,\r\n reply_markup: Optional[Union[InlineKeyboardMarkup, ReplyKeyboardMarkup]] = None) -> None:\r\n \"\"\"Отправляет сообщение, фотографию и встроенную клавиатуру\"\"\"\r\n if photo:\r\n context.bot.send_photo(chat_id=update.effective_chat.id, photo=photo, caption=text, reply_markup=reply_markup)\r\n else:\r\n context.bot.send_message(chat_id=update.effective_chat.id, text=text, reply_markup=reply_markup)\r\n\r\n\r\ndef add_student(update: Update, context: CallbackContext) -> None:\r\n \"\"\"Заносит информацию о пользователе в БД\"\"\"\r\n data = context.user_data['user']\r\n\r\n photo_stream = io.BytesIO()\r\n update.message.photo[-1].get_file().download(out=photo_stream)\r\n photo = base64.b64encode(photo_stream.getvalue())\r\n\r\n students.add_student(\r\n student_id=data['id'],\r\n name=data['name'],\r\n surname=data['surname'],\r\n u_group=data['u_group'],\r\n phone_number=data['phone_number'],\r\n photo=photo.decode()\r\n )\r\n del context.user_data['user']\r\n\r\n\r\ndef send_student_info(update: Update, context: CallbackContext, student: students.Student) -> None:\r\n \"\"\"Отправляет карточку студента\"\"\"\r\n student_info = f\"{student.name} {student.surname}\\n{student.u_group}\"\r\n student_photo = base64.b64decode(student.photo) if student.photo else None\r\n reply_markup = get_student_markup(student_id=student.id)\r\n send_message(update=update, context=context, text=student_info, photo=student_photo, reply_markup=reply_markup)\r\n\r\n\r\ndef get_student_markup(student_id: int) -> InlineKeyboardMarkup:\r\n \"\"\"Генерирует встроенную клавиатуру для карточки студента\"\"\"\r\n student_rating = rating.get_student_rating(student_id)\r\n\r\n likes = sum([int(rate.likes) for rate in student_rating])\r\n dislikes = sum([int(rate.dislikes) for rate in student_rating])\r\n\r\n keyboard = [\r\n [\r\n InlineKeyboardButton(f\"👍 {likes}\", callback_data=f'rate {student_id} likes'),\r\n InlineKeyboardButton(f\"👎 {dislikes}\", callback_data=f'rate {student_id} dislikes'),\r\n ],\r\n [InlineKeyboardButton(\"Оставить отзыв\", callback_data=f'review {student_id}')],\r\n [InlineKeyboardButton(\"Посмотреть отзывы\", callback_data=f'get_reviews {student_id}')]\r\n ]\r\n return InlineKeyboardMarkup(keyboard)\r\n\r\n\r\ndef update_rating(update: Update, query: CallbackQuery, message: List[str], **_) -> None:\r\n \"\"\"Изменяет рейтинг студента и обновляет его карточку\"\"\"\r\n student_id = int(message[1])\r\n rating.update_rating(student_id=student_id, from_id=update.effective_chat.id, rate=message[2] == \"likes\")\r\n reply_markup = get_student_markup(student_id=student_id)\r\n query.edit_message_reply_markup(reply_markup)\r\n\r\n\r\ndef request_review(update: Update, context: CallbackContext, message: List[str], **_) -> str:\r\n \"\"\"Запрашивает отзыв о студенте\"\"\"\r\n context.user_data['student_id'] = message[1]\r\n keyboard = [[InlineKeyboardButton(\"Отмена\", callback_data='cancel')]]\r\n reply_markup = InlineKeyboardMarkup(keyboard)\r\n send_message(update=update, context=context, text=\"Напишите отзыв, который хотите оставить\",\r\n reply_markup=reply_markup)\r\n return 'SUBMIT'\r\n\r\n\r\ndef get_reviews(update: Update, context: CallbackContext, message: List[str], **_) -> None:\r\n \"\"\"Отправляет все отзывы о студенте\"\"\"\r\n student_reviews = reviews.get_reviews(student_id=int(message[1]))\r\n if not student_reviews:\r\n send_message(update=update, context=context, text=\"Нет отзывов об этом студенте\")\r\n return\r\n\r\n for review in student_reviews:\r\n reply_markup = get_review_markup(update=update, review=review)\r\n send_message(update=update, context=context, text=review.message, reply_markup=reply_markup)\r\n\r\n\r\ndef delete_review(query: CallbackQuery, message: List[str], **_) -> None:\r\n \"\"\"Удаляет отзыв о студенте и сообщение\"\"\"\r\n reviews.delete_review(int(message[1]))\r\n query.delete_message()\r\n\r\n\r\ndef cancel_review(context: CallbackContext, query: CallbackQuery, **_) -> str:\r\n \"\"\"Отменяет запрос и удаляет сообщение\"\"\"\r\n query.delete_message()\r\n del context.user_data['student_id']\r\n return ConversationHandler.END\r\n\r\n\r\ndef get_review_markup(update: Update, review: reviews.Review) -> Union[InlineKeyboardMarkup, None]:\r\n \"\"\"Генерирует встроенную клавиатуру для отзыва о студенте\"\"\"\r\n reply_markup = None\r\n if update.effective_chat.id == review.from_id:\r\n keyboard = [[InlineKeyboardButton(\"Удалить\", callback_data=f'delete {review.id}')]]\r\n reply_markup = InlineKeyboardMarkup(keyboard)\r\n return reply_markup\r\n\r\n\r\nclass FilterNotAuthorized(MessageFilter):\r\n \"\"\"Фильтр на проверку \"\"\"\r\n def filter(self, message):\r\n return not students.get_student(message.chat_id)\r\n","repo_name":"aseven1/practice_1","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3137284622","text":"\r\n\r\n__author__ = 'Clement'\r\nfrom enOcean.telegram.BaseTelegram import BaseTelegram\r\n\r\nclass Rcm250:\r\n \"\"\" This class permit to use a rcm250.\r\n\r\n Learning:\r\n - Pull LRN button on Rcm250\r\n - Set the sender id you want with setSenderId method\r\n - Use the switchOn method.\r\n\r\n \"\"\"\r\n def __init__(self, serial):\r\n self.id = [0xFF, 0xFF, 0xFF, 0xFF]\r\n self.senderId = [0x00, 0x00, 0x00, 0x00]\r\n\r\n self.action = [0x10]\r\n self.serial = serial\r\n\r\n self.tel = BaseTelegram()\r\n self.tel.setType(BaseTelegram.RADIO)\r\n\r\n def setId(self,id):\r\n self.id = id\r\n\r\n def setSenderId(self,id):\r\n self.senderId = id\r\n\r\n\r\n def switchOn(self):\r\n data = [0xF6, 0x10] + self.senderId + [0x30]\r\n self.tel.setData(data)\r\n opData = [0x01] + self.id + [0x2D,0x00]\r\n self.tel.setOptionalData(opData)\r\n self.serial.write(self.tel.toByte())\r\n\r\n\r\n def switchOff(self):\r\n data = [0xF6, 0x30] + self.senderId + [0x30]\r\n self.tel.setData(data)\r\n opData = [0x01] + self.id + [0x2D,0x00]\r\n self.tel.setOptionalData(opData)\r\n self.serial.write(self.tel.toByte())\r\n\r\n\r\n\r\n","repo_name":"cgarnier/domolife","sub_path":"python/enOcean/Modules/Rcm250.py","file_name":"Rcm250.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"34544167195","text":"from app.modules.emails.models import EmailAddress\n\n\nclass EmailService:\n @classmethod\n def get_email(cls, session, email_address: str):\n email = session.query(EmailAddress).filter_by(address=email_address).first()\n if not email:\n raise Exception(\"No email found!\")\n return email\n","repo_name":"gbilton/Sampa","sub_path":"app/modules/emails/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37907051868","text":"# ordenar los valores de una lista\r\nimport os\r\nos.system(\"cls\")\r\n \r\ndef listaOrganizar(lista):\r\n for i in range(1,len(lista)):\r\n elemento = lista[i]\r\n indice = i\r\n # print(lista[i])\r\n \r\n while indice > 0 and lista[indice - 1] > elemento: \r\n lista[indice] = lista[indice - 1]\r\n indice = indice - 1\r\n \r\n lista[indice] = elemento\r\n \r\nlista = [2000, 300, 25, 40, 31, 19, 50, 45]\r\nprint(f\"Valores desorganizados : {lista}\")\r\nlistaOrganizar(lista)\r\n \r\nprint(f\"Valores organizada : {lista}\")\r\n","repo_name":"Edwardb11/ejercicios-de-python","sub_path":"Array/ordenar.py","file_name":"ordenar.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"22163851076","text":"from typing import Any, Dict, List, Mapping, Optional\n\nfrom grpclib.server import Stream\n\nfrom viam.media.video import CameraMimeType\nfrom viam.proto.common import DoCommandRequest, DoCommandResponse, PointCloudObject, Pose, PoseInFrame, ResourceName\nfrom viam.proto.service.motion import (\n Constraints,\n GetPoseRequest,\n GetPoseResponse,\n MotionServiceBase,\n MoveOnMapRequest,\n MoveOnMapResponse,\n MoveRequest,\n MoveResponse,\n MoveSingleComponentRequest,\n MoveSingleComponentResponse,\n MoveOnGlobeRequest,\n MoveOnGlobeResponse,\n)\nfrom viam.proto.service.sensors import (\n GetReadingsRequest,\n GetReadingsResponse,\n GetSensorsRequest,\n GetSensorsResponse,\n Readings,\n SensorsServiceBase,\n)\nfrom viam.proto.service.vision import (\n Classification,\n Detection,\n GetClassificationsFromCameraRequest,\n GetClassificationsFromCameraResponse,\n GetClassificationsRequest,\n GetClassificationsResponse,\n GetDetectionsFromCameraRequest,\n GetDetectionsFromCameraResponse,\n GetDetectionsRequest,\n GetDetectionsResponse,\n GetObjectPointCloudsRequest,\n GetObjectPointCloudsResponse,\n VisionServiceBase,\n)\n\nfrom viam.services.mlmodel import File, LabelType, Metadata, MLModel, TensorInfo\nfrom viam.services.slam import SLAM\nfrom viam.utils import ValueTypes, struct_to_dict\n\n\nclass MockMLModel(MLModel):\n INPUT_DATA: Dict = {\"image\": [10, 10, 255, 0, 0, 255, 255, 0, 100]}\n OUTPUT_DATA = {\n \"n_detections\": [3],\n \"confidence_scores\": [[0.9084375, 0.7359375, 0.33984375]],\n \"labels\": [[0, 0, 4]],\n \"locations\": [[[0.1, 0.4, 0.22, 0.4], [0.02, 0.22, 0.77, 0.90], [0.40, 0.50, 0.40, 0.50]]],\n }\n META_INPUTS = [TensorInfo(name=\"image\", description=\"i0\", data_type=\"uint8\", shape=[300, 200])]\n META_OUTPUTS = [\n TensorInfo(name=\"n_detections\", description=\"o0\", data_type=\"int32\", shape=[1]),\n TensorInfo(name=\"confidence_scores\", description=\"o1\", data_type=\"float32\", shape=[3, 1]),\n TensorInfo(\n name=\"labels\",\n description=\"o2\",\n data_type=\"int32\",\n shape=[3, 1],\n associated_files=[\n File(\n name=\"category_labels.txt\",\n description=\"these labels represent types of plants\",\n label_type=LabelType.LABEL_TYPE_TENSOR_VALUE,\n )\n ],\n ),\n TensorInfo(name=\"locations\", description=\"o3\", data_type=\"float32\", shape=[4, 3, 1]),\n ]\n META = Metadata(name=\"fake_detector\", type=\"object_detector\", description=\"desc\", input_info=META_INPUTS, output_info=META_OUTPUTS)\n\n def __init__(self, name: str):\n self.timeout: Optional[float] = None\n\n super().__init__(name)\n\n async def infer(self, input_data: Dict[str, ValueTypes], *, timeout: Optional[float] = None) -> Dict[str, ValueTypes]:\n self.timeout = timeout\n return self.OUTPUT_DATA\n\n async def metadata(self, *, timeout: Optional[float] = None) -> Metadata:\n self.timeout = timeout\n return self.META\n\n\nclass MockMotion(MotionServiceBase):\n def __init__(\n self,\n move_responses: Dict[str, bool],\n move_single_component_responses: Dict[str, bool],\n get_pose_responses: Dict[str, PoseInFrame],\n ):\n self.move_responses = move_responses\n self.move_single_component_responses = move_single_component_responses\n self.get_pose_responses = get_pose_responses\n self.constraints: Optional[Constraints] = None\n self.extra: Optional[Mapping[str, Any]] = None\n self.timeout: Optional[float] = None\n\n async def Move(self, stream: Stream[MoveRequest, MoveResponse]) -> None:\n request = await stream.recv_message()\n assert request is not None\n name: ResourceName = request.component_name\n self.constraints = request.constraints\n self.extra = struct_to_dict(request.extra)\n self.timeout = stream.deadline.time_remaining() if stream.deadline else None\n success = self.move_responses[name.name]\n response = MoveResponse(success=success)\n await stream.send_message(response)\n\n async def MoveOnMap(self, stream: Stream[MoveOnMapRequest, MoveOnMapResponse]) -> None:\n request = await stream.recv_message()\n assert request is not None\n self.component_name = request.component_name\n self.destination = request.destination\n self.slam_service = request.slam_service_name\n self.extra = struct_to_dict(request.extra)\n self.timeout = stream.deadline.time_remaining() if stream.deadline else None\n await stream.send_message(MoveOnMapResponse(success=True))\n\n async def MoveOnGlobe(self, stream: Stream[MoveOnGlobeRequest, MoveOnGlobeResponse]) -> None:\n request = await stream.recv_message()\n assert request is not None\n self.component_name = request.component_name\n self.destination = request.destination\n self.movement_sensor = request.movement_sensor_name\n self.obstacles = request.obstacles\n self.heading = request.heading\n self.linear_meters_per_sec = request.linear_meters_per_sec\n self.angular_deg_per_sec = request.angular_deg_per_sec\n self.extra = struct_to_dict(request.extra)\n self.timeout = stream.deadline.time_remaining() if stream.deadline else None\n await stream.send_message(MoveOnGlobeResponse(success=True))\n\n async def MoveSingleComponent(self, stream: Stream[MoveSingleComponentRequest, MoveSingleComponentResponse]) -> None:\n request = await stream.recv_message()\n assert request is not None\n name: ResourceName = request.component_name\n self.extra = struct_to_dict(request.extra)\n self.timeout = stream.deadline.time_remaining() if stream.deadline else None\n success = self.move_single_component_responses[name.name]\n response = MoveSingleComponentResponse(success=success)\n await stream.send_message(response)\n\n async def GetPose(self, stream: Stream[GetPoseRequest, GetPoseResponse]) -> None:\n request = await stream.recv_message()\n assert request is not None\n name: ResourceName = request.component_name\n self.extra = struct_to_dict(request.extra)\n self.timeout = stream.deadline.time_remaining() if stream.deadline else None\n pose = self.get_pose_responses[name.name]\n response = GetPoseResponse(pose=pose)\n await stream.send_message(response)\n\n async def DoCommand(self, stream: Stream[DoCommandRequest, DoCommandResponse]) -> None:\n request = await stream.recv_message()\n assert request is not None\n self.timeout = stream.deadline.time_remaining() if stream.deadline else None\n await stream.send_message(DoCommandResponse(result=request.command))\n\n\nclass MockSensors(SensorsServiceBase):\n def __init__(self, sensors: List[ResourceName], readings: List[Readings]):\n self.sensors = sensors\n self.readings = readings\n self.extra: Optional[Mapping[str, Any]] = None\n self.timeout: Optional[float] = None\n\n async def GetSensors(self, stream: Stream[GetSensorsRequest, GetSensorsResponse]) -> None:\n request = await stream.recv_message()\n assert request is not None\n self.extra = struct_to_dict(request.extra)\n self.timeout = stream.deadline.time_remaining() if stream.deadline else None\n response = GetSensorsResponse(sensor_names=self.sensors)\n await stream.send_message(response)\n\n async def GetReadings(self, stream: Stream[GetReadingsRequest, GetReadingsResponse]) -> None:\n request = await stream.recv_message()\n assert request is not None\n self.extra = struct_to_dict(request.extra)\n self.timeout = stream.deadline.time_remaining() if stream.deadline else None\n self.sensors_for_readings: List[ResourceName] = list(request.sensor_names)\n response = GetReadingsResponse(readings=self.readings)\n await stream.send_message(response)\n\n async def DoCommand(self, stream: Stream[DoCommandRequest, DoCommandResponse]) -> None:\n request = await stream.recv_message()\n assert request is not None\n self.timeout = stream.deadline.time_remaining() if stream.deadline else None\n await stream.send_message(DoCommandResponse(result=request.command))\n\n\nclass MockSLAM(SLAM):\n INTERNAL_STATE_CHUNKS = [bytes(5), bytes(2)]\n POINT_CLOUD_PCD_CHUNKS = [bytes(3), bytes(2)]\n POSITION = Pose(x=1, y=2, z=3, o_x=2, o_y=3, o_z=4, theta=20)\n\n def __init__(self, name: str):\n self.name = name\n self.timeout: Optional[float] = None\n super().__init__(name)\n\n async def get_internal_state(self, *, timeout: Optional[float] = None) -> List[bytes]:\n self.timeout = timeout\n return self.INTERNAL_STATE_CHUNKS\n\n async def get_point_cloud_map(self, *, timeout: Optional[float] = None) -> List[bytes]:\n self.timeout = timeout\n return self.POINT_CLOUD_PCD_CHUNKS\n\n async def get_position(self, *, timeout: Optional[float] = None) -> Pose:\n self.timeout = timeout\n return self.POSITION\n\n async def do_command(self, command: Mapping[str, ValueTypes], *, timeout: Optional[float] = None, **kwargs) -> Mapping[str, ValueTypes]:\n return {\"command\": command}\n\n\nclass MockVision(VisionServiceBase):\n def __init__(\n self,\n detectors: List[str],\n detections: List[Detection],\n classifiers: List[str],\n classifications: List[Classification],\n segmenters: List[str],\n point_clouds: List[PointCloudObject],\n ):\n self.detectors = detectors\n self.detections = detections\n self.classifiers = classifiers\n self.classifications = classifications\n self.segmenters = segmenters\n self.point_clouds = point_clouds\n self.extra: Optional[Mapping[str, Any]] = None\n self.timeout: Optional[float] = None\n\n async def GetDetectionsFromCamera(self, stream: Stream[GetDetectionsFromCameraRequest, GetDetectionsFromCameraResponse]) -> None:\n request = await stream.recv_message()\n assert request is not None\n self.extra = struct_to_dict(request.extra)\n self.timeout = stream.deadline.time_remaining() if stream.deadline else None\n response = GetDetectionsFromCameraResponse(detections=self.detections)\n await stream.send_message(response)\n\n async def GetDetections(self, stream: Stream[GetDetectionsRequest, GetDetectionsResponse]) -> None:\n request = await stream.recv_message()\n assert request is not None\n self.extra = struct_to_dict(request.extra)\n self.timeout = stream.deadline.time_remaining() if stream.deadline else None\n response = GetDetectionsResponse(detections=self.detections)\n await stream.send_message(response)\n\n async def GetClassificationsFromCamera(\n self, stream: Stream[GetClassificationsFromCameraRequest, GetClassificationsFromCameraResponse]\n ) -> None:\n request = await stream.recv_message()\n assert request is not None\n self.extra = struct_to_dict(request.extra)\n self.timeout = stream.deadline.time_remaining() if stream.deadline else None\n response = GetClassificationsFromCameraResponse(classifications=self.classifications)\n await stream.send_message(response)\n\n async def GetClassifications(self, stream: Stream[GetClassificationsRequest, GetClassificationsResponse]) -> None:\n request = await stream.recv_message()\n assert request is not None\n self.extra = struct_to_dict(request.extra)\n self.timeout = stream.deadline.time_remaining() if stream.deadline else None\n response = GetClassificationsResponse(classifications=self.classifications)\n await stream.send_message(response)\n\n async def GetObjectPointClouds(self, stream: Stream[GetObjectPointCloudsRequest, GetObjectPointCloudsResponse]) -> None:\n request = await stream.recv_message()\n assert request is not None\n self.extra = struct_to_dict(request.extra)\n self.timeout = stream.deadline.time_remaining() if stream.deadline else None\n response = GetObjectPointCloudsResponse(mime_type=CameraMimeType.PCD.value, objects=self.point_clouds)\n await stream.send_message(response)\n\n async def DoCommand(self, stream: Stream[DoCommandRequest, DoCommandResponse]) -> None:\n request = await stream.recv_message()\n assert request is not None\n self.timeout = stream.deadline.time_remaining() if stream.deadline else None\n await stream.send_message(DoCommandResponse(result=request.command))\n","repo_name":"michaellee1019/viam-python-sdk","sub_path":"tests/mocks/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":12777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"4262091982","text":"from rest_framework import serializers\n\nfrom datahub.company.serializers import NestedAdviserField\nfrom datahub.investment.project.proposition.models import Proposition, PropositionDocument\nfrom datahub.investment.project.serializers import NestedInvestmentProjectField\n\n\nclass CreatePropositionSerializer(serializers.ModelSerializer):\n \"\"\"Proposition serialiser for create endpoint.\"\"\"\n\n class Meta:\n model = Proposition\n fields = (\n 'adviser',\n 'deadline',\n 'name',\n 'scope',\n )\n\n\nclass CompletePropositionSerializer(serializers.ModelSerializer):\n \"\"\"Proposition serialiser for complete endpoint.\"\"\"\n\n class Meta:\n model = Proposition\n fields = (\n 'details',\n )\n extra_kwargs = {\n 'details': {'default': ''},\n }\n\n def complete(self):\n \"\"\"Complete a proposition.\"\"\"\n self.instance.complete(\n by=self.context['current_user'],\n details=self.validated_data['details'],\n )\n return self.instance\n\n\nclass AbandonPropositionSerializer(serializers.ModelSerializer):\n \"\"\"Proposition serialiser for abandon endpoint.\"\"\"\n\n class Meta:\n model = Proposition\n fields = (\n 'details',\n )\n extra_kwargs = {\n 'details': {'allow_blank': False},\n }\n\n def abandon(self):\n \"\"\"Abandon a proposition.\"\"\"\n self.instance.abandon(\n by=self.context['current_user'],\n details=self.validated_data['details'],\n )\n return self.instance\n\n\nclass PropositionDocumentSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for Investment Project Proposition Documents.\"\"\"\n\n av_clean = serializers.BooleanField(source='document.av_clean', read_only=True)\n created_by = NestedAdviserField(read_only=True)\n status = serializers.CharField(source='document.status', read_only=True)\n uploaded_on = serializers.DateTimeField(source='document.uploaded_on', read_only=True)\n\n class Meta:\n model = PropositionDocument\n fields = (\n 'id',\n 'av_clean',\n 'created_by',\n 'created_on',\n 'uploaded_on',\n 'original_filename',\n 'url',\n 'status',\n )\n read_only_fields = ('url', 'created_on')\n\n def create(self, validated_data):\n \"\"\"Create proposition document.\"\"\"\n return PropositionDocument.objects.create(\n proposition_id=self.context['request'].parser_context['kwargs']['proposition_pk'],\n original_filename=validated_data['original_filename'],\n created_by=self.context['request'].user,\n )\n\n\nclass PropositionSerializer(serializers.ModelSerializer):\n \"\"\"Proposition serialiser for view only endpoints.\"\"\"\n\n investment_project = NestedInvestmentProjectField()\n adviser = NestedAdviserField()\n created_by = NestedAdviserField()\n modified_by = NestedAdviserField()\n\n class Meta:\n model = Proposition\n fields = (\n 'id',\n 'investment_project',\n 'adviser',\n 'deadline',\n 'status',\n 'name',\n 'scope',\n 'details',\n 'created_on',\n 'created_by',\n 'modified_on',\n 'modified_by',\n )\n read_only_fields = fields\n","repo_name":"uktrade/data-hub-api","sub_path":"datahub/investment/project/proposition/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"71172243465","text":"import math\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.modules.utils import _pair, _quadruple\n\n\nclass Pad(nn.Module):\n def __init__(self, padding, ring=False, mode=\"replicate\"):\n super().__init__()\n self.padding = _quadruple(padding)\n self.horizontal = \"circular\" if ring else mode\n self.vertical = mode\n\n def forward(self, h):\n left, right, top, bottom = self.padding\n h = F.pad(h, (left, right, 0, 0), mode=self.horizontal)\n h = F.pad(h, (0, 0, top, bottom), mode=self.vertical)\n return h\n\n def extra_repr(self):\n return f\"padding={self.padding}, horizontal={self.horizontal}, vertical={self.vertical}\"\n\n\ndef filter2d(x, kernel, gain=1):\n assert kernel.ndim == 1\n kernel = kernel / kernel.sum()\n kernel = kernel * (gain ** (kernel.ndim / 2))\n fh = fw = len(kernel)\n pw0 = fw // 2\n pw1 = (fw - 1) // 2\n ph0 = fh // 2\n ph1 = (fh - 1) // 2\n x = F.pad(x, (pw0, pw1, 0, 0), mode=\"circular\")\n x = F.pad(x, (0, 0, ph0, ph1), mode=\"replicate\")\n B, C, H, W = x.shape\n kernel = kernel[None, None].repeat(C, 1, 1).to(dtype=x.dtype)\n x = F.conv2d(x, kernel[..., None, :], groups=C)\n x = F.conv2d(x, kernel[..., :, None], groups=C)\n return x\n\n\nclass Resample(nn.Module):\n def __init__(\n self,\n up=1,\n down=1,\n window=[1, 3, 3, 1], # bilinear\n ring=True,\n normalize=True,\n direction=\"hw\",\n ):\n super().__init__()\n self.up = np.asarray(_pair(up))\n self.down = np.asarray(_pair(down))\n self.window = window\n self.n_taps = len(window)\n self.ring = ring\n self.pad_mode_w = \"circular\" if ring else \"replicate\"\n self.pad_mode_h = \"replicate\"\n self.normalize = normalize\n self.direction = direction\n assert self.direction in (\"h\", \"w\", \"hw\")\n\n # setup sizes\n if \"h\" in self.direction:\n self.k_h = self.n_taps\n self.up_h = self.up[0]\n self.down_h = self.down[0]\n else:\n self.k_h = self.up_h = self.down_h = 1\n\n if \"w\" in self.direction:\n self.k_w = self.n_taps\n self.up_w = self.up[1]\n self.down_w = self.down[1]\n else:\n self.k_w = self.up_w = self.down_w = 1\n\n # setup filter\n kernel = torch.tensor(self.window, dtype=torch.float32)\n if self.normalize:\n kernel /= kernel.sum()\n kernel *= (self.up_h * self.up_w) ** (kernel.ndim / 2)\n self.register_buffer(\"kernel\", kernel)\n\n # setup padding\n if self.up[0] > 1:\n self.ph0 = (self.k_h - self.up_h + 1) // 2 + self.up_h - 1\n self.ph1 = (self.k_h - self.up_h) // 2\n elif self.down[0] >= 1:\n self.ph0 = (self.k_h - self.down_h + 1) // 2\n self.ph1 = (self.k_h - self.down_h) // 2\n if self.up[1] > 1:\n self.pw0 = (self.k_w - self.up_w + 1) // 2 + self.up_w - 1\n self.pw1 = (self.k_w - self.up_w) // 2\n elif self.down[1] >= 1:\n self.pw0 = (self.k_w - self.down_w + 1) // 2\n self.pw1 = (self.k_w - self.down_w) // 2\n\n self.margin = max(self.ph0, self.ph1, self.pw0, self.pw1)\n\n def forward(self, h):\n # margin\n h = F.pad(h, (self.margin, self.margin, 0, 0), mode=self.pad_mode_w)\n h = F.pad(h, (0, 0, self.margin, self.margin), mode=self.pad_mode_h)\n # up by zero-insertion\n B, C, H, W = h.shape\n h = h.view(B, C, H, 1, W, 1)\n h = F.pad(h, [0, self.up_w - 1, 0, 0, 0, self.up_h - 1])\n h = h.view(B, C, H * self.up_h, W * self.up_w)\n # crop\n h = h[\n ...,\n self.margin * self.up_h\n - self.ph0 : (H - self.margin) * self.up_h\n + self.ph1,\n self.margin * self.up_w\n - self.pw0 : (W - self.margin) * self.up_w\n + self.pw1,\n ]\n # fir\n kernel = self.kernel[None, None].repeat(C, 1, 1).to(dtype=h.dtype)\n if self.direction == \"hw\":\n h = F.conv2d(h, kernel[..., None, :], groups=C)\n h = F.conv2d(h, kernel[..., :, None], groups=C)\n elif self.direction == \"h\":\n h = F.conv2d(h, kernel[..., :, None], groups=C)\n elif self.direction == \"w\":\n h = F.conv2d(h, kernel[..., None, :], groups=C)\n # down\n h = h[:, :, :: self.down_h, :: self.down_w]\n return h\n\n def extra_repr(self):\n return f'filter_type={self.window}, up={self.up}, down={self.down}, direction=\"{self.direction}\"'\n\n\nclass BlurVH(nn.Module):\n \"\"\"\n vertical/horizontal antialiasing from NR-GAN:\n https://arxiv.org/abs/1911.11776\n \"\"\"\n\n def __init__(self, window=[1, 2, 1], ring=True):\n super().__init__()\n self.blur_v = Resample(window=window, ring=ring, direction=\"h\")\n self.blur_h = Resample(window=window, ring=ring, direction=\"w\")\n\n def forward(self, x):\n h_v = self.blur_v(x)\n h_h = self.blur_h(x)\n return torch.cat([h_v, h_h], dim=1)\n\n\nclass EqualLR(nn.Module):\n \"\"\"\n A wrapper for runtime weight scaling (equalized learning rate).\n https://arxiv.org/abs/1710.10196\n \"\"\"\n\n def __init__(self, module, gain: float = 1.0, lr_mul=1.0):\n super().__init__()\n self.module = module\n self.gain = gain\n self.lr_mul = lr_mul\n\n # Runtime scale factor\n self.gain_ = gain * lr_mul\n fan_in = self.module.weight[0].numel()\n self.scale = 1.0 / math.sqrt(fan_in)\n\n # Weights are initialized with N(0, 1)\n nn.init.normal_(self.module.weight, 0.0, 1.0 / lr_mul)\n if hasattr(self.module, \"bias\") and self.module.bias is not None:\n nn.init.constant_(self.module.bias, 0.0)\n\n def forward(self, x):\n return self.module(x * self.scale) * self.gain_\n\n def extra_repr(self):\n return f\"gain={self.gain}, lr_mul={self.lr_mul}\"\n\n\nclass Conv2d(nn.Sequential):\n \"\"\"\n Custom padding + Conv2d + Equal LR\n \"\"\"\n\n def __init__(\n self,\n in_ch,\n out_ch,\n kernel_size,\n stride,\n padding,\n bias=True,\n ring=False,\n equal_lr=False,\n gain=1.0,\n lr_mul=1.0,\n ):\n layers = []\n if padding != 0:\n layers += [Pad(padding=padding, ring=ring)]\n conv = nn.Conv2d(in_ch, out_ch, kernel_size, stride, 0, bias=bias)\n layers += [EqualLR(conv, gain, lr_mul) if equal_lr else conv]\n super().__init__(*layers)\n\n\nclass PixelNorm(nn.Module):\n \"\"\"\n Pixel-wise normalization.\n https://arxiv.org/abs/1710.10196\n Hypersphare projection in 1D case.\n https://arxiv.org/abs/1912.04958\n \"\"\"\n\n def forward(self, x, alpha: float = 1e-8):\n y = x.pow(2.0).mean(dim=1, keepdim=True).add(alpha).sqrt()\n return x / y\n\n\nclass MinibatchStdDev(nn.Module):\n \"\"\"\n Minibatch discrimination.\n https://arxiv.org/abs/1710.10196\n \"\"\"\n\n def __init__(self, group=4, features=1):\n super().__init__()\n self.group = group\n self.features = features\n\n def forward(self, x, alpha: float = 1e-8):\n B, C, H, W = x.shape\n group = min(B, self.group)\n # Split minibatch into groups\n y = x.view(group, -1, self.features, C // self.features, H, W)\n # Calculate stddev over group\n y = torch.sqrt(y.var(0, unbiased=False) + alpha)\n # Take average over fmaps and pixels\n y = y.mean([2, 3, 4], keepdim=True).squeeze(2)\n # [B // group, features, 1, 1]\n y = y.repeat(group, 1, H, W)\n # [B, features, H, W]\n y = torch.cat([x, y], dim=1)\n return y\n\n def extra_repr(self):\n return f\"group={self.group}, features={self.features}\"\n\n\nclass Dilation(nn.Module):\n def __init__(self, dilation=1, value=0):\n super().__init__()\n self.dilation = dilation\n self.value = value\n self.stride = self.dilation + 1\n kernel = F.pad(torch.ones(1, 1, 1, 1), (self.dilation,) * 4, value=self.value)\n self.register_buffer(\"kernel\", kernel)\n\n def forward(self, x):\n _, C, _, _ = x.shape\n kernel = self.kernel.repeat(C, 1, 1, 1)\n return F.conv_transpose2d(x, kernel, stride=self.stride, padding=1, groups=C)\n\n def extra_repr(self):\n return f\"dilation={self.dilation}, value={self.value}\"\n\n\ndef init_weights(layer, mode, gain=1.0):\n for name, module in layer.named_modules():\n if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):\n if mode == \"ortho\":\n nn.init.orthogonal_(module.weight, gain)\n elif mode == \"N02\":\n nn.init.normal_(module.weight, 0, 0.02)\n elif mode in [\"glorot\", \"xavier\"]:\n nn.init.xavier_uniform_(module.weight, gain)\n else:\n NotImplementedError\n if module.bias is not None:\n nn.init.constant_(module.bias, 0.0)\n elif \"BatchNorm\" in name:\n if mode == \"N02\":\n torch.nn.init.normal_(module.weight, 1.0, 0.02)\n torch.nn.init.zeros_(module.bias)\n","repo_name":"kazuto1011/dusty-gan-v2","sub_path":"gans/models/ops/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":9255,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"81"} +{"seq_id":"17718136498","text":"import pytest\nfrom flask import session\nfrom app import app\n\n@pytest.fixture\ndef client():\n app.config['TESTING'] = True\n with app.test_client() as client:\n with app.app_context():\n yield client\n\n\ndef test_home_route_authenticated(client):\n # Simulate an authenticated user\n with client.session_transaction() as sess:\n sess['username'] = 'test_user'\n sess['id'] = 1\n\n # Make a GET request to the home route\n response = client.get('/')\n\n assert response.status_code == 200\n assert b'Welcome, test_user' in response.data\n\n\ndef test_home_route_unauthenticated(client):\n # Simulate an unauthenticated user\n with client.session_transaction() as sess:\n sess.clear()\n\n # Make a GET request to the home route\n response = client.get('/')\n\n assert response.status_code == 302\n assert b'Redirecting' in response.data\n\n\ndef test_register_route(client):\n # Make a GET request to the register route\n response = client.get('/register')\n\n assert response.status_code == 200\n assert b'register' in response.data\n\ndef test_login_route(client):\n # Make a GET request to the login route\n response = client.get('/login')\n\n assert response.status_code == 200\n assert b'Login' in response.data\n\n\ndef test_login_route_post(client):\n # Make a POST request to the login route with valid data\n response = client.post('/login', data={\n 'username': 'test',\n 'password': 'test'\n })\n\n assert response.status_code == 302\n assert response.location == '/'\n\n\ndef test_logout_route(client):\n # Simulate an authenticated user\n with client.session_transaction() as sess:\n sess['username'] = 'test_user'\n sess['id'] = 1\n\n # Make a GET request to the logout route\n response = client.get('/logout')\n\n assert response.status_code == 302\n assert response.location == '/login'\n assert session.get('username') is None\n assert session.get('id') is None\ndef test_home_route_authenticated(client):\n # Simulate an authenticated user\n with client.session_transaction() as sess:\n sess['username'] = 'test'\n sess['id'] = 1\n\n # Make a POST request to the home route\n response = client.post('/', data={\n 'start_location': 'Start Location',\n 'end_location': 'End Location',\n })\n \n assert response.status_code == 302\n\ndef test_generate_ticket_route_unauthenticated(client):\n # Simulate an unauthenticated user\n with client.session_transaction() as sess:\n sess.clear()\n\n # Make a GET request to the generate_ticket route\n response = client.get('/generate_ticket')\n\n assert response.status_code == 302\n assert response.location == '/login'\n # Add more assertions based on the behavior of the generate_ticket route with an unauthenticated user\n\ndef test_process_qr_code_route(client):\n # Make a POST request to the process_qr_code route\n response = client.post('/process_qr_code', data={\n 'qr_code_data': 'QR Code Data',\n })\n\n assert response.status_code == 200\n\nimport time\nimport os\nfrom ticket import Ticket, generate_qr_code\n\n@pytest.fixture\ndef ticket_data():\n return {\n 'id': 'random_hash',\n 'creation_time': time.time(),\n 'start_location': 'Start',\n 'destination': 'Destination',\n 'qr_code_file': 'static/tickets/random_hash.png'\n }\n\ndef test_ticket_is_valid(ticket_data):\n ticket = Ticket(**ticket_data)\n\n # Ticket should be valid immediately after creation\n assert ticket.is_valid()\n\ndef test_ticket_invalid_after_validity_period(ticket_data):\n ticket_data['creation_time'] = time.time() - 3700 # Set creation time to 1 hour and 1 second ago\n ticket = Ticket(**ticket_data)\n\n # Ticket should not be valid after 1 hour has passed\n assert not ticket.is_valid()\n\ndef test_valid_upto(ticket_data):\n ticket = Ticket(**ticket_data)\n\n # Ticket's validity should be 1 hour\n assert ticket.valid_upto() == 3600\n\ndef test_generate_qr_code(tmpdir):\n start_location = 'Start'\n destination = 'Destination'\n qr_code_file = tmpdir.join('test_qr_code.png').strpath\n\n ticket = generate_qr_code(start_location, destination, qr_code_file=qr_code_file)\n\n # Check if the QR code file was generated and exists\n assert os.path.exists(qr_code_file)\n\n # Check if the Ticket object was returned with correct attributes\n assert ticket.id\n assert ticket.creation_time\n assert ticket.start_location == start_location\n assert ticket.destination == destination\n assert ticket.qr_code_file == qr_code_file\n\ndef test_ticket_remove_qr_code(ticket_data, tmpdir):\n ticket_data['qr_code_file'] = tmpdir.join('test_qr_code.png').strpath\n ticket = Ticket(**ticket_data)\n\n # Create a dummy QR code file\n with open(ticket.qr_code_file, 'w') as file:\n file.write('Dummy QR code data')\n\n # Check if the dummy QR code file exists before removing\n assert os.path.exists(ticket.qr_code_file)\n\n # Remove the QR code file using the remove method\n ticket.remove()\n\n # Check if the QR code file was removed\n assert not os.path.exists(ticket.qr_code_file)\n","repo_name":"sameer693/qr_ticketing-sys","sub_path":"test_project.py","file_name":"test_project.py","file_ext":"py","file_size_in_byte":5167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31801728161","text":"numeros = []\nprint(\"Insira uma nota: / Para parar de inserir, quando perguntado digite 0: \")\nsair = 1\nwhile sair != 0:\n numeros.append(int(input(\"Informe um número: \")))\n sair = int(input(\"Deseja sair: \"))\nsoma = sum(numeros)\nqtd_numeros = len(numeros)\nmedia = soma / qtd_numeros\nprint(\"A media dos números digitados é: %.1f\" % media)","repo_name":"mferoc/UniProjecao-Topicos-Avancados-em-Analise-e-Desenvolvimento-de-Sistemas","sub_path":"exercicios/Repeticao/ex12.py","file_name":"ex12.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"17771002896","text":"from PySide6 import QtCore, QtGui, QtWidgets\n\nimport util\n\nSTOPPED = 1\nPAUSED = 2\nRUNNING = 3\n\nclass TimerWidget(QtWidgets.QWidget):\n def __init__(self, parent):\n super(TimerWidget, self).__init__()\n self.parent = parent\n\n self.elapsed_ms = 0\n self.state = STOPPED\n\n self.timer = QtCore.QTimer(self)\n self.timer.setInterval(10)\n self.timer.timeout.connect(self.update_timer)\n self.elapsed_timer = QtCore.QElapsedTimer()\n\n frame = QtWidgets.QFrame(self)\n grid_layout = QtWidgets.QGridLayout(self)\n\n self.timer_label = QtWidgets.QLabel(\"0.00s\")\n font = self.timer_label.font()\n font.setPointSize(32)\n self.timer_label.setFont(font)\n self.timer_label.setAlignment(QtCore.Qt.AlignRight)\n\n self.add_second_button = QtWidgets.QPushButton(\"+1s\")\n self.sub_second_button = QtWidgets.QPushButton(\"-1s\")\n self.start_button = QtWidgets.QPushButton(\"Start\")\n self.lap_button = QtWidgets.QPushButton(\"Add Call\")\n self.reset_button = QtWidgets.QPushButton(\"Reset\")\n\n self.add_second_button.clicked.connect(self.add_second_button_clicked)\n self.sub_second_button.clicked.connect(self.sub_second_button_clicked)\n self.start_button.clicked.connect(self.start_button_clicked)\n self.lap_button.clicked.connect(self.lap_button_clicked)\n self.reset_button.clicked.connect(self.reset_button_clicked)\n\n grid_layout.addWidget(self.timer_label, 0, 0, 2, 3)\n grid_layout.addWidget(self.sub_second_button, 2, 0, 1, 1)\n grid_layout.addWidget(self.add_second_button, 2, 2, 1, 1)\n\n grid_layout.addWidget(self.start_button, 0, 3, 1, 1)\n grid_layout.addWidget(self.lap_button, 1, 3, 1, 1)\n grid_layout.addWidget(self.reset_button, 2, 3, 1, 1)\n frame.setLayout(grid_layout)\n self.setLayout(grid_layout)\n\n self.playback_row = 0\n\n def add_second_button_clicked(self):\n self.set_elapsed_ms(self.elapsed_ms + 1000)\n\n def sub_second_button_clicked(self):\n self.set_elapsed_ms(self.elapsed_ms - 1000)\n\n def set_elapsed_ms(self, elapsed_ms):\n self.elapsed_ms = elapsed_ms\n self.update_timer_label()\n\n self.playback_row = 0\n\n while self.playback_row < self.parent.table_model.rowCount() and\\\n self.elapsed_ms > self.parent.table_model.callouts[self.playback_row].timestamp:\n self.playback_row += 1\n\n self.parent.select_row(self.playback_row)\n\n def update_timer_label(self):\n self.timer_label.setText(util.format_ms(self.elapsed_ms)+\"s\")\n\n def update_timer(self):\n self.elapsed_ms += self.elapsed_timer.restart()\n self.update_timer_label()\n\n row_updated = False\n\n while self.playback_row < self.parent.table_model.rowCount() and\\\n self.elapsed_ms > self.parent.table_model.callouts[self.playback_row].timestamp:\n self.playback_row += 1\n row_updated = True\n\n if row_updated:\n self.parent.select_row(self.playback_row)\n\n def start_button_clicked(self, event):\n if self.state == STOPPED:\n self.timer.start()\n self.elapsed_timer.start()\n self.start_button.setText(\"Pause\")\n self.state = RUNNING\n\n self.parent.select_row(0)\n elif self.state == RUNNING:\n self.timer.stop()\n self.start_button.setText(\"Resume\")\n self.state = PAUSED\n elif self.state == PAUSED:\n self.timer.start()\n self.elapsed_timer.restart()\n self.start_button.setText(\"Pause\")\n self.state = RUNNING\n\n def lap_button_clicked(self, event):\n if self.state == RUNNING:\n self.parent.lap_button_clicked(self.elapsed_ms)\n\n def reset(self):\n self.timer.stop()\n self.timer_label.setText(\"0.00s\")\n self.start_button.setText(\"Start\")\n self.state = STOPPED\n self.elapsed_ms = 0\n self.playback_row = 0\n\n def reset_button_clicked(self, event):\n self.reset()\n\n\nif __name__ == '__main__':\n import sys\n app = QtWidgets.QApplication(sys.argv)\n\n box = QtWidgets.QGroupBox()\n layout = QtWidgets.QVBoxLayout()\n widget = TimerWidget(box)\n layout.addWidget(widget)\n box.setLayout(layout)\n box.resize(300, 200)\n box.show()\n\n sys.exit(app.exec())\n","repo_name":"schultzaur/fftimer","sub_path":"UI/timer_widget.py","file_name":"timer_widget.py","file_ext":"py","file_size_in_byte":4417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73859977546","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\n# List of prefixes\nprefixes = [\"ACCT\", \"AFAM\", \"AGNG\", \"AMST\", \"ANTH\", \"ARAB\", \"ART\", \"ARTE\", \"ASAM\", \"ASTR\", \"BIOL\", \"BUAD\", \"CAS\",\n \"CEDU\", \"CHEM\", \"CHIC\", \"CHIN\", \"CNSM\", \"COMD\", \"COMM\", \"COUN\", \"CPLT\", \"CPSC\", \"CRJU\", \"CTVA\", \"DANC\",\n \"ECON\", \"EDAD\", \"EDD\", \"EDEL\", \"EDSC\", \"EGCE\", \"EGEC\", \"EGGN\", \"EGME\", \"EGMT\", \"ENED\", \"ENGL\", \"ENST\",\n \"ESE\", \"ESM\", \"ETHN\", \"FIN\", \"FREN\", \"GEOG\", \"GEOL\", \"GRMN\", \"HCOM\", \"HIST\", \"HONR\", \"HSS\", \"HUSR\", \"IDT\",\n \"ISDS\", \"ITAL\", \"JAPN\", \"KNES\", \"KORE\", \"LBST\", \"LING\", \"LTAM\", \"MAED\", \"MATH\", \"MGMT\", \"MKTG\", \"MLNG\",\n \"MLSC\", \"MSW\", \"MUS\", \"MUSE\", \"NURS\", \"PERS\", \"PHIL\", \"PHYS\", \"PORT\", \"POSC\", \"PSYC\", \"PUBH\", \"READ\",\n \"RLST\", \"SCED\", \"SOCI\", \"SPAN\", \"SPED\", \"TESL\", \"THTR\", \"VIET\", \"WGST\"]\n\n# Create an empty list to store the coid numbers\ncoid_numbers = []\n\n# The URL of the website you want to scrape\nbase_url = \"https://catalog.fullerton.edu/content.php?filter%5B27%5D={}&filter%5B29%5D=&filter%5Bkeyword%5D=&filter%5B32%5D=1&filter%5Bcpage%5D=1&cur_cat_oid=80&expand=&navoid=11056&search_database=Filter&filter%5Bexact_match%5D=1#acalog_template_course_filter\"\n\n# Loop through the prefixes\nfor prefix in prefixes:\n # Construct the URL for the specific prefix\n url = base_url.format(prefix)\n\n # Send a GET request to the website\n response = requests.get(url)\n\n # Parse the HTML content of the page using BeautifulSoup\n soup = BeautifulSoup(response.content, 'html.parser')\n\n # Find all the 'a' tags that contain 'coid' in their 'href' attribute\n coid_links = soup.find_all('a', href=lambda href: href and 'coid' in href)\n\n # Extract the 'coid' numbers from the 'href' attributes of the links and add them to the list\n coid_numbers.extend(link.get('href').split('=')[-1] for link in coid_links)\n\n# Create a DataFrame with the coid numbers\ndf = pd.DataFrame({'coid Numbers': coid_numbers})\n\n# Save the DataFrame to an Excel file\ndf.to_excel('coid_numbers1.xlsx', index=False)\n","repo_name":"ChinmayiSreeChitra/AIElectiveAdvisor","sub_path":"webscraping.py","file_name":"webscraping.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8151393600","text":"from matplotlib.pyplot import axis\nimport torch\nimport torch.nn as nn\nfrom torchsummary import summary\nfrom torchinfo import summary\nimport sys\nimport numpy as np\nfrom models import net_modules\nimport os\n\nif __name__ == \"__main__\":\n import Generater\nelse:\n from models import Generater\n\nfrom external.poseAware.models.enc_and_dec import Encoder , Decoder , StaticEncoder\nfrom external.poseAware import option_parser\nfrom external.poseAware.models.skeleton import build_edge_topology\n\nclass pose2texnet(nn.Module):\n def __init__(self, device , type , args , topology , separate_flg, texture_resolution , multi_topology_flg , prediction_texture_mask):\n super(pose2texnet,self).__init__()\n\n self.device = device\n self.type = type\n self.args = args\n self.separate_flg = separate_flg\n self.multi_topology_flg = multi_topology_flg\n self.prediction_texture_mask = prediction_texture_mask\n\n \"\"\"\n self.fc_seq = nn.Sequential(\n #fc1\n nn.Linear(24*3, 128) ,\n nn.ReLU(inplace=False),\n #fc2\n nn.Linear(128, 256),\n nn.ReLU(inplace=False),\n #fc3\n nn.Linear(256, 512),\n nn.ReLU(inplace=False),\n #fc4 \n nn.Linear(512, 1024),\n nn.ReLU(inplace=False),\n #fc5\n nn.Linear(1024, 2048),\n )\n \n self.fc_seq_multi = nn.Sequential(\n #fc1\n nn.Linear(24*3*3, 256),\n nn.ReLU(inplace=False),\n #fc2\n nn.Linear(256, 512),\n nn.ReLU(inplace=False),\n #fc3 \n nn.Linear(512, 1024),\n nn.ReLU(inplace=False),\n #fc4\n nn.Linear(1024, 2048),\n )\n \"\"\"\n\n self.fc_poseAware = self.fc_layer(feature_in = 84, feature_out = 2048)\n\n self.fc_poseAware_cond = self.fc_layer(feature_in = 84, feature_out = 1024)\n\n self.dropout = nn.Dropout(0.3) \n #layer = 2\n \"\"\"\n self.fc_poseAware_multi0 = nn.Sequential(\n #fc1\n nn.Linear(48, 512),\n nn.ReLU(inplace=False),\n #fc2\n nn.Linear(512, 2048),\n )\n \"\"\"\n\n self.fc_poseAware_concateneted = self.fc_layer_concateneted(2048*6,2048) \n\n #layer = 1\n self.fc_poseAware_multi0 = self.fc_layer_multi1(48,2048)\n self.fc_poseAware_multi1 = self.fc_layer_multi1(24,2048)\n self.fc_poseAware_multi2 = self.fc_layer_multi1(24,2048)\n self.fc_poseAware_multi3 = self.fc_layer_multi1(72,2048)\n self.fc_poseAware_multi4 = self.fc_layer_multi1(24,2048)\n self.fc_poseAware_multi5 = self.fc_layer_multi1(24,2048)\n #layer = 2\n \"\"\"\n self.fc_poseAware_multi3 = nn.Sequential(\n nn.Linear(72, 512),\n nn.ReLU(inplace=False),\n #fc2\n nn.Linear(512, 2048),\n )\n \"\"\"\n self.fc_poseAware_multi_cond0 = self.fc_layer_multi1(48,1024)\n self.fc_poseAware_multi_cond1 = self.fc_layer_multi1(24,1024)\n self.fc_poseAware_multi_cond2 = self.fc_layer_multi1(24,1024)\n self.fc_poseAware_multi_cond3 = self.fc_layer_multi1(72,1024)\n self.fc_poseAware_multi_cond4 = self.fc_layer_multi1(24,1024)\n self.fc_poseAware_multi_cond5 = self.fc_layer_multi1(24,1024)\n\n self.poseAware_multi_conv = nn.Sequential(\n #conv1_1\n nn.Conv2d(18, 18,\n kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(18),\n nn.ReLU(inplace=True),\n\n #conv2_1\n nn.Conv2d(18, 6,\n kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(6),\n nn.ReLU(inplace=True),\n\n #conv3_1\n nn.Conv2d(6, 3,\n kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(3),\n nn.ReLU(inplace=True),\n\n #conv3_1\n nn.Conv2d(3, 3,\n kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(3),\n nn.ReLU(inplace=True),\n\n #conv4_1\n nn.Conv2d(3, 3,\n kernel_size=3, stride=1, padding=1, bias=False),\n #nn.ReLU(inplace=True)\n nn.Sigmoid(),\n )\n \n #Encoder layer : in :(256,256,3)\n self.enc1 = nn.Sequential( \n nn.Conv2d(3, 64, 4, stride=2 ,padding=1)).to(device) #(128,128,64) \n\n self.enc2 = self.enc_mid(64, 128).to(device) #(64,64,128)\n self.enc3 = self.enc_mid(128, 256).to(device) #(32,32,256)\n self.enc4 = self.enc_mid(256, 512).to(device) #(16,16,512)\n self.enc5 = self.enc_mid(512, 512).to(device) #(8,8,512)\n self.enc6 = self.enc_mid(512, 512).to(device) #(4,4,512)\n self.enc7 = self.enc_mid(512, 1024).to(device) #(2,2,1024)\n\n self.enc8 = nn.Sequential( \n nn.LeakyReLU(inplace=False),\n nn.Conv2d(1024, 1024, 4, stride=2 ,padding=1)).to(device) #(1,1,1024)) \n\n self.enc_all = nn.Sequential(self.enc1,self.enc2,self.enc3,self.enc4,self.enc5,self.enc6,self.enc7,self.enc8)\n\n self.args.kernel_size = 3\n #print(\"args.skeleton_info:\",self.args.skeleton_info)\n self.args.skeleton_info = \"\" #should comment out if use offsets\n self.args.rotation = \"euler_angle\"\n #args.extra_conv = 0\n #self.static_encoder = StaticEncoder(self.args, topology)\n self.args.num_layers = 2\n if self.multi_topology_flg:\n self.SkeletonAwareEncoder_list = []\n self.SkeletonAwareGenerater_list = []\n topology , joints_group_list = net_modules.split_edges(topology)\n self.joints_group_list = joints_group_list\n \n #self.args.num_layers = 2\n self.args.num_layers = 1\n self.SkeletonAwareEncoder_list.append(Encoder(self.args, topology[0]).to(device)) #.to(device) \n self.args.num_layers = 1\n self.SkeletonAwareEncoder_list.append(Encoder(self.args, topology[1]).to(device)) #.to(device)\n self.args.num_layers = 1\n self.SkeletonAwareEncoder_list.append(Encoder(self.args, topology[2]).to(device)) #.to(device)\n #self.args.num_layers = 2\n self.args.num_layers = 1\n self.SkeletonAwareEncoder_list.append(Encoder(self.args, topology[3]).to(device)) #.to(device)\n self.args.num_layers = 1\n self.SkeletonAwareEncoder_list.append(Encoder(self.args, topology[4]).to(device)) #.to(device)\n self.args.num_layers = 1\n self.SkeletonAwareEncoder_list.append(Encoder(self.args, topology[5]).to(device)) #.to(device) \n for i in range(6): \n self.SkeletonAwareGenerater_list.append(Generater.Generater(2048,128,output_dim = 3 ,texture_resolution=texture_resolution).to(device)) #.to(device)\n else:\n self.SkeletonAwareEncoder = Encoder(self.args, topology) #.to(device)\n \n if self.separate_flg == False:\n if self.type == \"clothes\" or self.type == \"naked\": \n self.Generater = Generater.Generater(2048,128,output_dim = 3+0+0 ,texture_resolution=texture_resolution) #.to(device)\n elif self.type == \"color\": \n self.Generater = Generater.Generater(2048,128,output_dim = 0+3+0 ,texture_resolution=texture_resolution) #.to(device)\n elif self.type == \"clothes_and_color\": \n self.Generater = Generater.Generater(2048,128,output_dim = 3+3+0 ,texture_resolution=texture_resolution) #.to(device)\n else:\n if self.type == \"clothes\" or self.type == \"naked\": \n self.Generater_disp = Generater.Generater(2048,128,output_dim = 3 ,texture_resolution=texture_resolution , prediction_texture_mask = self.prediction_texture_mask) #.to(device)\n elif self.type == \"color\": \n self.Generater_color = Generater.Generater(2048,128,output_dim = 3 ,texture_resolution=texture_resolution , prediction_texture_mask = self.prediction_texture_mask) #.to(device)\n elif self.type == \"clothes_and_color\": \n self.Generater_disp = Generater.Generater(2048,128,output_dim = 3 ,texture_resolution=texture_resolution , prediction_texture_mask = self.prediction_texture_mask) #.to(device)\n self.Generater_color = Generater.Generater(2048,128,output_dim = 3 ,texture_resolution=texture_resolution , prediction_texture_mask = self.prediction_texture_mask) #.to(device)\n \n\n def enc_mid(self , feature_in , feature_out):\n layers = []\n layers.append(nn.LeakyReLU(inplace=False))\n layers.append(nn.Conv2d(feature_in, feature_out, 4, stride=2 ,padding=1)) \n layers.append(nn.BatchNorm2d(feature_out)) \n return nn.Sequential(*layers)\n\n def fc_layer(self , feature_in , feature_out):\n out_fc = nn.Sequential(\n #fc1\n #fc1\n nn.Linear(feature_in, 512),\n nn.ReLU(inplace=False),\n #fc2\n nn.Linear(512, feature_out),\n )\n return out_fc\n \n def fc_layer_multi1(self , feature_in , feature_out):\n out_fc = nn.Sequential(\n nn.Flatten(),\n #fc1\n nn.Linear(feature_in, 512),\n nn.ReLU(inplace=False),\n #fc2\n nn.Linear(512, feature_out),\n )\n return out_fc\n\n def fc_layer_concateneted(self , feature_in , feature_out):\n out_fc = nn.Sequential(\n nn.Flatten(),\n #fc1\n nn.Linear(feature_in, 2048*3),\n nn.ReLU(inplace=False),\n #fc2\n nn.Linear(2048*3, feature_out),\n )\n return out_fc\n\n \"\"\"\n def fc_layer_multi1(self , feature_in , feature_out):\n out_fc = nn.Sequential(\n nn.Flatten(),\n #fc1\n nn.Linear(feature_in, 256),\n nn.ReLU(inplace=False),\n #fc1\n nn.Linear(256, 512),\n nn.ReLU(inplace=False),\n #fc1\n nn.Linear(512, 1024),\n nn.ReLU(inplace=False),\n #fc2\n nn.Linear(1024, feature_out),\n )\n return out_fc\n \"\"\"\n\n def forward(self,smpl , condition_mu_texture = None):\n \n #kt_path = r\"D:\\Data\\Human\\Template-star-0.015\\kintree.bin\"\n #kintree = net_modules.load_ktfile(kt_path)\n\n #t_pose_path = r\"D:\\Data\\Human\\HUAWEI\\Iwamoto\\data\\smplparams_centered_new\\T_joints.bin\"\n #t_pose ,t_joint_length = net_modules.LoadStarSkeleton(t_pose_path,kintree)\n\n #t_pose = np.array([t_pose])\n #print(t_pose.shape)\n #t_pose = torch.from_numpy(t_pose.astype(np.float32)).to(device)\n\n \"\"\"\n offset_idx = 0\n self.offset_repr = []\n self.offset_repr.append(self.static_encoder(t_pose.to(device)))\n self.offset_repr = [self.static_encoder(t_pose)]\n offsets = [self.offset_repr[0][p][offset_idx] for p in range(self.args.num_layers + 1)] #####\n \"\"\"\n offsets = None #comment out if use offset\n \n if self.multi_topology_flg:\n x_list1 = []\n for i in range(6):\n joints_group = self.joints_group_list[i]\n joints_group_for_smpl = []\n for j in joints_group:\n joints_group_for_smpl.append(j)\n joints_group_for_smpl.append(j+1)\n joints_group_for_smpl.append(j+2)\n x_list1.append(self.SkeletonAwareEncoder_list[i](smpl[:,joints_group_for_smpl],offsets)) #[1,84,1]\n \n x_list2 = []\n #x_list1[0] = torch.squeeze(x_list1[0],-1)\n if condition_mu_texture != None:\n cMu = self.enc_all(condition_mu_texture)\n cMu = torch.squeeze(cMu , -1)\n cMu = torch.squeeze(cMu , -1)\n x_tmp0 = self.fc_poseAware_multi_cond0(x_list1[0])\n x_list2.append(torch.cat([cMu , x_tmp0] , dim = 1)) \n\n x_tmp1 = self.fc_poseAware_multi_cond1(x_list1[1])\n x_list2.append(torch.cat([cMu , x_tmp1] , dim = 1)) \n\n x_tmp2 = self.fc_poseAware_multi_cond2(x_list1[2])\n x_list2.append(torch.cat([cMu , x_tmp2] , dim = 1))\n\n x_tmp3 = self.fc_poseAware_multi_cond3(x_list1[3])\n x_list2.append(torch.cat([cMu , x_tmp3] , dim = 1)) \n\n x_tmp4 = self.fc_poseAware_multi_cond4(x_list1[4])\n x_list2.append(torch.cat([cMu , x_tmp4] , dim = 1)) \n\n x_tmp5 = self.fc_poseAware_multi_cond5(x_list1[5])\n x_list2.append(torch.cat([cMu , x_tmp5] , dim = 1))\n else:\n x_list2.append(self.fc_poseAware_multi0(x_list1[0]))\n x_list2.append(self.fc_poseAware_multi1(x_list1[1]))\n x_list2.append(self.fc_poseAware_multi2(x_list1[2]))\n\n x_list2.append(self.fc_poseAware_multi3(x_list1[3]))\n x_list2.append(self.fc_poseAware_multi4(x_list1[4]))\n x_list2.append(self.fc_poseAware_multi5(x_list1[5]))\n \n #concatenate on 1d feature\n x = torch.cat(x_list2,axis = 1)\n x = self.fc_poseAware_concateneted(x)\n x = torch.unsqueeze(x,-1)\n x = torch.unsqueeze(x,-1)\n output_disp = self.Generater_disp(x) \n return output_disp \n else:\n x = self.SkeletonAwareEncoder(smpl,offsets) #[1,84,1]\n\n x = torch.squeeze(x,-1)\n\n if condition_mu_texture != None:\n x = self.fc_poseAware_cond(x)\n cMu = self.enc_all(condition_mu_texture)\n cMu = torch.squeeze(cMu , -1)\n cMu = torch.squeeze(cMu , -1)\n x = torch.cat([cMu , x] , dim = 1)\n else:\n x = self.fc_poseAware(x)\n \n x = torch.unsqueeze(x,-1)\n x = torch.unsqueeze(x,-1)\n\n if self.separate_flg == False:\n output = self.Generater(x) \n \n if self.type == \"clothes\" or self.type == \"naked\": \n return output\n elif self.type == \"color\" : \n return output\n elif self.type == \"clothes_and_color\":\n out_disp = output[:,0:3,:,:] \n out_color = output[:,3:3+3,:,:] \n return out_disp ,out_color\n else:\n raise AssertionError(\"not defined pose2texnet output\")\n else:\n if self.type == \"clothes\" or self.type == \"naked\": \n if self.prediction_texture_mask:\n output_disp , output_disp_mask = self.Generater_disp(x) \n return output_disp , output_disp_mask\n else:\n output_disp = self.Generater_disp(x) \n return output_disp\n elif self.type == \"color\": \n output_color = self.Generater_color(x) \n return output_color\n elif self.type == \"clothes_and_color\": \n output_disp = self.Generater_disp(x) \n output_color = self.Generater_color(x) \n return output_disp ,output_color\n else:\n raise AssertionError(\"not defined pose2texnet output\")\n\nclass discriminater(nn.Module):\n def __init__(self,device,part):\n super(discriminater,self).__init__()\n self.device = device\n\n self.conv_discriminater = nn.Sequential(\n #cnn1\n nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3), # in:(3,224*224)、out:(64,112*112)\n nn.BatchNorm2d(64),\n nn.ReLU()\n )\n\n n_features = 64\n self.dcgan_discriminater = nn.Sequential(\n nn.Conv2d(3, n_features,\n kernel_size=4, stride=2, padding=1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n # conv2\n nn.Conv2d(n_features, n_features * 2,\n kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(n_features * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # conv3\n nn.Conv2d(n_features * 2, n_features * 4,\n kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(n_features * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # conv4\n nn.Conv2d(n_features * 4, n_features * 8,\n kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(n_features * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # conv5\n nn.Conv2d(n_features * 8, 1,\n kernel_size=4, stride=2, padding=0, bias=False),\n nn.Sigmoid(),\n nn.Flatten(),\n # fmt: on\n )\n\n def forward(self,body_part):\n body_part = self.dcgan_discriminater(body_part)\n return body_part\n\nif __name__ == \"__main__\":\n args = option_parser.get_args()\n kt_path = r\"D:\\Data\\Human\\Template-star-0.015\\kintree.bin\"\n kintree = net_modules.load_ktfile(kt_path)\n print(\"kintree:\",kintree)\n joint_topology = net_modules.GetParentFromKintree(kintree)\n print(\"joint_topology\")\n print(type(joint_topology))\n print(joint_topology)\n\n edges = build_edge_topology(joint_topology, torch.zeros((len(joint_topology), 3)))\n print(\"edges\")\n print(len(edges))\n print(edges)\n \"\"\"\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n model_gen = pose2texnet(device,True,1,args,edges , \"True\")\n summary(model_gen)\n print(\"aaa\")\n summary(model_gen,[(1,72,3)])\n\n #x = torch.rand(1,72,3).to(device)\n #y = model_gen(x)\n #print(y.shape)\n\n #model_disc = discriminater(device,\"face\")\n #summary(model_disc,[(1,3,410,410)])\n #summary(model_disc,[(1,3,264,264)])\n #print(x.shape)\n\n \"\"\"\n x = torch.rand(1,24*3*3)\n\n crierion = nn.BCELoss()\n txr = model_gen(x)\n face = model_disc(txr)\n print(face.shape)\n y_real = torch.full_like(face, 1)\n loss_real = crierion(face, y_real)\n print(loss_real)","repo_name":"diegothomas/Interactive-Animatable-Avatar","sub_path":"Pose2Texture/models/pose2texnetWithPoseAware.py","file_name":"pose2texnetWithPoseAware.py","file_ext":"py","file_size_in_byte":18460,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"39719482404","text":"\"\"\"\nGiven a binary tree, return the preorder traversal of its nodes values.\nNOTE: Using recursion is not allowed.\n\"\"\"\nclass Solution:\n\t# @param A : root node of tree\n\t# @return a list of integers\n\tdef preorderTraversal(self, A):\n\t stack = []\n\t root = A\n\t ans = []\n\t while len(stack) > 0 or root != None:\n\t if root != None:\n\t ans.append(root.val)\n\t stack.append(root)\n\t root = root.left\n\t else:\n\t node = stack.pop()\n\t root = node.right\n\t return ans","repo_name":"anurag5398/DSA-Problems","sub_path":"Trees/PreOrder.py","file_name":"PreOrder.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26354333080","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport os\nimport time\nfrom six.moves import xrange\nfrom pprint import pprint\n\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom models.util import log\n\n\nclass Trainer(object):\n\n @staticmethod\n def get_model_class(model_name):\n if model_name == 'synthesis_baseline':\n from models.baselines.model_synthesis import Model\n elif model_name == 'induction_baseline':\n from models.baselines.model_induction import Model\n elif model_name == 'summarizer':\n from models.baselines.model_summarizer import Model\n elif model_name == 'full':\n from models.model_full import Model\n else:\n raise ValueError(model_name)\n return Model\n\n def __init__(self,\n config,\n dataset,\n dataset_test):\n self.config = config\n hyper_parameter_str = 'bs_{}_lr_{}_{}_cell_{}'.format(\n config.batch_size, config.learning_rate,\n config.encoder_rnn_type,\n config.num_lstm_cell_units)\n if config.scheduled_sampling:\n hyper_parameter_str += '_sd_{}'.format(\n config.scheduled_sampling_decay_steps)\n hyper_parameter_str += '_k_{}'.format(self.config.num_k)\n\n self.train_dir = './train_dir/%s-%s-%s-%s-%s-%s' % (\n config.dataset_type,\n '_'.join(config.dataset_path.split('/')),\n config.model,\n config.prefix,\n hyper_parameter_str,\n time.strftime(\"%Y%m%d-%H%M%S\")\n )\n\n if not os.path.exists(self.train_dir): os.makedirs(self.train_dir)\n log.infov(\"Train Dir: %s\", self.train_dir)\n\n # --- input ops ---\n self.batch_size = config.batch_size\n\n if config.dataset_type == 'karel':\n from karel_env.input_ops_karel import create_input_ops\n elif config.dataset_type == 'vizdoom':\n from vizdoom_env.input_ops_vizdoom import create_input_ops\n else:\n raise ValueError(config.dataset)\n\n _, self.batch_train = create_input_ops(dataset, self.batch_size,\n is_training=True)\n _, self.batch_test = create_input_ops(dataset_test, self.batch_size,\n is_training=False)\n # --- optimizer ---\n self.global_step = tf.contrib.framework.get_or_create_global_step(\n graph=None)\n\n # --- create model ---\n Model = self.get_model_class(config.model)\n log.infov(\"Using Model class: %s\", Model)\n self.model = Model(config, debug_information=config.debug,\n global_step=self.global_step)\n\n if config.lr_weight_decay:\n self.init_learning_rate = config.learning_rate\n self.learning_rate = tf.train.exponential_decay(\n self.init_learning_rate,\n global_step=self.global_step,\n decay_steps=10000,\n decay_rate=0.5,\n staircase=True,\n name='decaying_learning_rate'\n )\n else:\n self.learning_rate = config.learning_rate\n\n self.check_op = tf.no_op()\n\n # --- checkpoint and monitoring ---\n all_vars = tf.trainable_variables()\n log.warn(\"********* var ********** \")\n slim.model_analyzer.analyze_vars(all_vars, print_info=True)\n\n self.optimizer = tf.contrib.layers.optimize_loss(\n loss=self.model.loss,\n global_step=self.global_step,\n learning_rate=self.learning_rate,\n optimizer=tf.train.AdamOptimizer,\n clip_gradients=20.0,\n name='optimizer_pixel_loss'\n )\n\n self.train_summary_op = tf.summary.merge_all(key='train')\n self.test_summary_op = tf.summary.merge_all(key='test')\n\n self.saver = tf.train.Saver(max_to_keep=100)\n self.pretrain_saver = tf.train.Saver(var_list=all_vars, max_to_keep=1)\n self.summary_writer = tf.summary.FileWriter(self.train_dir)\n self.log_step = self.config.log_step\n self.test_sample_step = self.config.test_sample_step\n self.write_summary_step = self.config.write_summary_step\n\n self.checkpoint_secs = 600 # 10 min\n\n self.supervisor = tf.train.Supervisor(\n logdir=self.train_dir,\n is_chief=True,\n saver=None,\n summary_op=None,\n summary_writer=self.summary_writer,\n save_summaries_secs=300,\n save_model_secs=self.checkpoint_secs,\n global_step=self.global_step,\n )\n\n session_config = tf.ConfigProto(\n allow_soft_placement=True,\n gpu_options=tf.GPUOptions(allow_growth=True),\n device_count={'GPU': 1},\n )\n self.session = self.supervisor.prepare_or_wait_for_session(\n config=session_config)\n\n self.ckpt_path = config.checkpoint\n if self.ckpt_path is not None:\n log.info(\"Checkpoint path: %s\", self.ckpt_path)\n self.pretrain_saver.restore(self.session, self.ckpt_path)\n log.info(\"Loaded the pretrain parameters from the provided\" +\n \"checkpoint path\")\n\n def train(self):\n log.infov(\"Training Starts!\")\n pprint(self.batch_train)\n\n max_steps = 1000000\n\n ckpt_save_step = 1000\n log_step = self.log_step\n test_sample_step = self.test_sample_step\n write_summary_step = self.write_summary_step\n\n for s in xrange(max_steps):\n # train a single step\n step, train_summary, loss, output, step_time = \\\n self.run_single_step(\n self.batch_train, step=s, is_train=True)\n if s % log_step == 0:\n self.log_step_message(step, loss, step_time)\n\n # periodic inference\n if s % test_sample_step == 0:\n test_step, test_summary, test_loss, output, test_step_time = \\\n self.run_test(self.batch_test)\n self.summary_writer.add_summary(test_summary,\n global_step=test_step)\n self.log_step_message(step, test_loss, test_step_time, is_train=False)\n\n if s % write_summary_step == 0:\n self.summary_writer.add_summary(train_summary,\n global_step=step)\n\n if s % ckpt_save_step == 0:\n log.infov(\"Saved checkpoint at %d\", s)\n self.saver.save(\n self.session, os.path.join(self.train_dir, 'model'),\n global_step=step)\n\n def run_single_step(self, batch, step=None, is_train=True):\n _start_time = time.time()\n\n batch_chunk = self.session.run(batch)\n\n fetch = [self.global_step, self.train_summary_op, self.model.output,\n self.model.loss, self.check_op, self.optimizer]\n\n feed_dict = self.model.get_feed_dict(\n batch_chunk, step=step,\n is_training=is_train,\n )\n\n fetch_values = self.session.run(fetch, feed_dict=feed_dict)\n\n [step, summary, output, loss] = fetch_values[:4]\n\n _end_time = time.time()\n\n return step, summary, loss, output, (_end_time - _start_time)\n\n def run_test(self, batch):\n _start_time = time.time()\n\n batch_chunk = self.session.run(batch)\n\n feed_dict = self.model.get_feed_dict(\n batch_chunk,\n is_training=False,\n )\n\n step, summary, loss, output = self.session.run(\n [self.global_step, self.test_summary_op, self.model.loss,\n self.model.output],\n feed_dict=feed_dict\n )\n\n _end_time = time.time()\n\n return step, summary, loss, output, (_end_time - _start_time)\n\n def log_step_message(self, step, loss, step_time, is_train=True):\n if step_time == 0: step_time = 0.001\n log_fn = (is_train and log.info or log.infov)\n log_fn((\" [{split_mode:5s} step {step:4d}] \" +\n \"Loss: {loss:.5f} \" +\n \"({sec_per_batch:.3f} sec/batch, {instance_per_sec:.3f} \" +\n \"instances/sec) \"\n ).format(split_mode=(is_train and 'train' or 'val'),\n step=step,\n loss=loss,\n sec_per_batch=step_time,\n instance_per_sec=self.batch_size / step_time\n )\n )\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--debug', action='store_true', default=False,\n help='set to True to see debugging visualization')\n parser.add_argument('--prefix', type=str, default='default',\n help='a nickanme for the training')\n parser.add_argument('--model', type=str, default='full',\n choices=['synthesis_baseline', 'induction_baseline',\n 'summarizer', 'full'],\n help='specify which type of models to train')\n parser.add_argument('--dataset_type', type=str, default='karel',\n choices=['karel', 'vizdoom'])\n parser.add_argument('--dataset_path', type=str,\n default='datasets/karel_dataset',\n help='the path to your dataset')\n parser.add_argument('--checkpoint', type=str, default=None,\n help='specify the path to a pre-trained checkpoint')\n # log\n parser.add_argument('--log_step', type=int, default=10,\n help='the frequency of outputing log info')\n parser.add_argument('--write_summary_step', type=int, default=100,\n help=' the frequency of writing TensorBoard sumamries')\n parser.add_argument('--test_sample_step', type=int, default=100,\n help='the frequency of performing '\n 'testing inference during training')\n # hyperparameters\n parser.add_argument('--num_k', type=int, default=10,\n help='the number of seen demonstrations')\n parser.add_argument('--batch_size', type=int, default=32)\n parser.add_argument('--learning_rate', type=float, default=0.001)\n parser.add_argument('--lr_weight_decay', action='store_true', default=False,\n help='set to `True` to perform expotential weight '\n 'decay on the learning rate')\n parser.add_argument('--scheduled_sampling', action='store_true', default=False,\n help='set to True to train models with scheduled sampling')\n parser.add_argument('--scheduled_sampling_decay_steps', type=int, default=20000,\n help='the number of training steps required to decay'\n 'scheduled sampling probability to minimum.')\n # model hyperparameters\n parser.add_argument('--encoder_rnn_type', default='lstm',\n choices=['lstm', 'rnn', 'gru'])\n parser.add_argument('--num_lstm_cell_units', type=int, default=512)\n parser.add_argument('--demo_aggregation', type=str, default='avgpool',\n choices=['concat', 'avgpool', 'maxpool'],\n help='how to aggregate the demo features')\n\n config = parser.parse_args()\n\n if config.dataset_type == 'karel':\n import karel_env.dataset_karel as dataset\n dataset_train, dataset_test, dataset_val \\\n = dataset.create_default_splits(config.dataset_path, num_k=config.num_k)\n elif config.dataset_type == 'vizdoom':\n import vizdoom_env.dataset_vizdoom as dataset\n dataset_train, dataset_test, dataset_val \\\n = dataset.create_default_splits(config.dataset_path, num_k=config.num_k)\n else:\n raise ValueError(config.dataset)\n\n # Set data dimension in configuration\n data_tuple = dataset_train.get_data(dataset_train.ids[0])\n # s_h: state history, demonstrations\n # a_h: action history, sequence of actions\n # per: sequence of perception primitives\n program, _, s_h, test_s_h, a_h, _, _, _, program_len, demo_len, test_demo_len, \\\n per, test_per = data_tuple[:13]\n\n config.dim_program_token = np.asarray(program.shape)[0]\n config.max_program_len = np.asarray(program.shape)[1]\n config.k = np.asarray(s_h.shape)[0]\n config.test_k = np.asarray(test_s_h.shape)[0]\n config.max_demo_len = np.asarray(s_h.shape)[1]\n config.h = np.asarray(s_h.shape)[2]\n config.w = np.asarray(s_h.shape)[3]\n config.depth = np.asarray(s_h.shape)[4]\n config.action_space = np.asarray(a_h.shape)[2]\n config.per_dim = np.asarray(per.shape)[2]\n if config.dataset_type == 'karel':\n config.dsl_type = dataset_train.dsl_type\n config.env_type = dataset_train.env_type\n config.vizdoom_pos_keys = []\n config.vizdoom_max_init_pos_len = -1\n config.perception_type = ''\n config.level = None\n elif config.dataset_type == 'vizdoom':\n config.dsl_type = 'vizdoom_default' # vizdoom has 1 dsl type for now\n config.env_type = 'vizdoom_default' # vizdoom has 1 env type\n config.vizdoom_pos_keys = dataset_train.vizdoom_pos_keys\n config.vizdoom_max_init_pos_len = dataset_train.vizdoom_max_init_pos_len\n config.perception_type = dataset_train.perception_type\n config.level = dataset_train.level\n\n trainer = Trainer(config, dataset_train, dataset_test)\n\n log.warning(\"dataset: %s, learning_rate: %f\",\n config.dataset_path, config.learning_rate)\n trainer.train()\n\nif __name__ == '__main__':\n main()\n","repo_name":"shaohua0116/demo2program","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":13939,"program_lang":"python","lang":"en","doc_type":"code","stars":100,"dataset":"github-code","pt":"81"} +{"seq_id":"33441240125","text":"from azure.core.exceptions import HttpResponseError\nimport msrest.serialization\n\n\nclass CollectionOfBookingAppointment(msrest.serialization.Model):\n \"\"\"Collection of bookingAppointment.\n\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param value:\n :type value: list[~bookings.models.MicrosoftGraphBookingAppointment]\n :param odata_next_link:\n :type odata_next_link: str\n \"\"\"\n\n _attribute_map = {\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'value': {'key': 'value', 'type': '[MicrosoftGraphBookingAppointment]'},\n 'odata_next_link': {'key': '@odata\\\\.nextLink', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(CollectionOfBookingAppointment, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.value = kwargs.get('value', None)\n self.odata_next_link = kwargs.get('odata_next_link', None)\n\n\nclass CollectionOfBookingAppointment0(msrest.serialization.Model):\n \"\"\"Collection of bookingAppointment.\n\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param value:\n :type value: list[~bookings.models.MicrosoftGraphBookingAppointment]\n :param odata_next_link:\n :type odata_next_link: str\n \"\"\"\n\n _attribute_map = {\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'value': {'key': 'value', 'type': '[MicrosoftGraphBookingAppointment]'},\n 'odata_next_link': {'key': '@odata\\\\.nextLink', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(CollectionOfBookingAppointment0, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.value = kwargs.get('value', None)\n self.odata_next_link = kwargs.get('odata_next_link', None)\n\n\nclass CollectionOfBookingBusiness(msrest.serialization.Model):\n \"\"\"Collection of bookingBusiness.\n\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param value:\n :type value: list[~bookings.models.MicrosoftGraphBookingBusiness]\n :param odata_next_link:\n :type odata_next_link: str\n \"\"\"\n\n _attribute_map = {\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'value': {'key': 'value', 'type': '[MicrosoftGraphBookingBusiness]'},\n 'odata_next_link': {'key': '@odata\\\\.nextLink', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(CollectionOfBookingBusiness, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.value = kwargs.get('value', None)\n self.odata_next_link = kwargs.get('odata_next_link', None)\n\n\nclass CollectionOfBookingCurrency(msrest.serialization.Model):\n \"\"\"Collection of bookingCurrency.\n\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param value:\n :type value: list[~bookings.models.MicrosoftGraphBookingCurrency]\n :param odata_next_link:\n :type odata_next_link: str\n \"\"\"\n\n _attribute_map = {\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'value': {'key': 'value', 'type': '[MicrosoftGraphBookingCurrency]'},\n 'odata_next_link': {'key': '@odata\\\\.nextLink', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(CollectionOfBookingCurrency, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.value = kwargs.get('value', None)\n self.odata_next_link = kwargs.get('odata_next_link', None)\n\n\nclass CollectionOfBookingCustomer(msrest.serialization.Model):\n \"\"\"Collection of bookingCustomer.\n\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param value:\n :type value: list[~bookings.models.MicrosoftGraphBookingCustomer]\n :param odata_next_link:\n :type odata_next_link: str\n \"\"\"\n\n _attribute_map = {\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'value': {'key': 'value', 'type': '[MicrosoftGraphBookingCustomer]'},\n 'odata_next_link': {'key': '@odata\\\\.nextLink', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(CollectionOfBookingCustomer, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.value = kwargs.get('value', None)\n self.odata_next_link = kwargs.get('odata_next_link', None)\n\n\nclass CollectionOfBookingService(msrest.serialization.Model):\n \"\"\"Collection of bookingService.\n\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param value:\n :type value: list[~bookings.models.MicrosoftGraphBookingService]\n :param odata_next_link:\n :type odata_next_link: str\n \"\"\"\n\n _attribute_map = {\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'value': {'key': 'value', 'type': '[MicrosoftGraphBookingService]'},\n 'odata_next_link': {'key': '@odata\\\\.nextLink', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(CollectionOfBookingService, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.value = kwargs.get('value', None)\n self.odata_next_link = kwargs.get('odata_next_link', None)\n\n\nclass CollectionOfBookingStaffMember(msrest.serialization.Model):\n \"\"\"Collection of bookingStaffMember.\n\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param value:\n :type value: list[~bookings.models.MicrosoftGraphBookingStaffMember]\n :param odata_next_link:\n :type odata_next_link: str\n \"\"\"\n\n _attribute_map = {\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'value': {'key': 'value', 'type': '[MicrosoftGraphBookingStaffMember]'},\n 'odata_next_link': {'key': '@odata\\\\.nextLink', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(CollectionOfBookingStaffMember, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.value = kwargs.get('value', None)\n self.odata_next_link = kwargs.get('odata_next_link', None)\n\n\nclass MicrosoftGraphEntity(msrest.serialization.Model):\n \"\"\"entity.\n\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param id: Read-only.\n :type id: str\n \"\"\"\n\n _attribute_map = {\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'id': {'key': 'id', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(MicrosoftGraphEntity, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.id = kwargs.get('id', None)\n\n\nclass MicrosoftGraphBookingAppointment(MicrosoftGraphEntity):\n \"\"\"Represents a booked appointment of a service by a customer in a business.\n\n :param id: Read-only.\n :type id: str\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param additional_information:\n :type additional_information: str\n :param customer_email_address:\n :type customer_email_address: str\n :param customer_id: The id of the booking customer associated with this appointment.\n :type customer_id: str\n :param customer_location: location.\n :type customer_location: ~bookings.models.MicrosoftGraphLocation\n :param customer_name:\n :type customer_name: str\n :param customer_notes: Notes from the customer associated with this appointment.\n :type customer_notes: str\n :param customer_phone:\n :type customer_phone: str\n :param duration:\n :type duration: ~datetime.timedelta\n :param end: dateTimeTimeZone.\n :type end: ~bookings.models.MicrosoftGraphDateTimeZone\n :param invoice_amount:\n :type invoice_amount: float\n :param invoice_date: dateTimeTimeZone.\n :type invoice_date: ~bookings.models.MicrosoftGraphDateTimeZone\n :param invoice_id:\n :type invoice_id: str\n :param invoice_status: Possible values include: \"draft\", \"reviewing\", \"open\", \"canceled\",\n \"paid\", \"corrective\".\n :type invoice_status: str or ~bookings.models.MicrosoftGraphBookingInvoiceStatus\n :param invoice_url:\n :type invoice_url: str\n :param is_location_online:\n :type is_location_online: bool\n :param online_meeting_url:\n :type online_meeting_url: str\n :param opt_out_of_customer_email:\n :type opt_out_of_customer_email: bool\n :param post_buffer:\n :type post_buffer: ~datetime.timedelta\n :param pre_buffer:\n :type pre_buffer: ~datetime.timedelta\n :param price:\n :type price: float\n :param price_type: Possible values include: \"undefined\", \"fixedPrice\", \"startingAt\", \"hourly\",\n \"free\", \"priceVaries\", \"callUs\", \"notSet\".\n :type price_type: str or ~bookings.models.MicrosoftGraphBookingPriceType\n :param reminders:\n :type reminders: list[~bookings.models.MicrosoftGraphBookingReminder]\n :param self_service_appointment_id:\n :type self_service_appointment_id: str\n :param service_id: The id of the booking service associated with this appointment.\n :type service_id: str\n :param service_location: location.\n :type service_location: ~bookings.models.MicrosoftGraphLocation\n :param service_name: The name of the booking service associated with this appointment.\n :type service_name: str\n :param service_notes:\n :type service_notes: str\n :param staff_member_ids:\n :type staff_member_ids: list[str]\n :param start: dateTimeTimeZone.\n :type start: ~bookings.models.MicrosoftGraphDateTimeZone\n \"\"\"\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'additional_information': {'key': 'additionalInformation', 'type': 'str'},\n 'customer_email_address': {'key': 'customerEmailAddress', 'type': 'str'},\n 'customer_id': {'key': 'customerId', 'type': 'str'},\n 'customer_location': {'key': 'customerLocation', 'type': 'MicrosoftGraphLocation'},\n 'customer_name': {'key': 'customerName', 'type': 'str'},\n 'customer_notes': {'key': 'customerNotes', 'type': 'str'},\n 'customer_phone': {'key': 'customerPhone', 'type': 'str'},\n 'duration': {'key': 'duration', 'type': 'duration'},\n 'end': {'key': 'end', 'type': 'MicrosoftGraphDateTimeZone'},\n 'invoice_amount': {'key': 'invoiceAmount', 'type': 'float'},\n 'invoice_date': {'key': 'invoiceDate', 'type': 'MicrosoftGraphDateTimeZone'},\n 'invoice_id': {'key': 'invoiceId', 'type': 'str'},\n 'invoice_status': {'key': 'invoiceStatus', 'type': 'str'},\n 'invoice_url': {'key': 'invoiceUrl', 'type': 'str'},\n 'is_location_online': {'key': 'isLocationOnline', 'type': 'bool'},\n 'online_meeting_url': {'key': 'onlineMeetingUrl', 'type': 'str'},\n 'opt_out_of_customer_email': {'key': 'optOutOfCustomerEmail', 'type': 'bool'},\n 'post_buffer': {'key': 'postBuffer', 'type': 'duration'},\n 'pre_buffer': {'key': 'preBuffer', 'type': 'duration'},\n 'price': {'key': 'price', 'type': 'float'},\n 'price_type': {'key': 'priceType', 'type': 'str'},\n 'reminders': {'key': 'reminders', 'type': '[MicrosoftGraphBookingReminder]'},\n 'self_service_appointment_id': {'key': 'selfServiceAppointmentId', 'type': 'str'},\n 'service_id': {'key': 'serviceId', 'type': 'str'},\n 'service_location': {'key': 'serviceLocation', 'type': 'MicrosoftGraphLocation'},\n 'service_name': {'key': 'serviceName', 'type': 'str'},\n 'service_notes': {'key': 'serviceNotes', 'type': 'str'},\n 'staff_member_ids': {'key': 'staffMemberIds', 'type': '[str]'},\n 'start': {'key': 'start', 'type': 'MicrosoftGraphDateTimeZone'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(MicrosoftGraphBookingAppointment, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.additional_information = kwargs.get('additional_information', None)\n self.customer_email_address = kwargs.get('customer_email_address', None)\n self.customer_id = kwargs.get('customer_id', None)\n self.customer_location = kwargs.get('customer_location', None)\n self.customer_name = kwargs.get('customer_name', None)\n self.customer_notes = kwargs.get('customer_notes', None)\n self.customer_phone = kwargs.get('customer_phone', None)\n self.duration = kwargs.get('duration', None)\n self.end = kwargs.get('end', None)\n self.invoice_amount = kwargs.get('invoice_amount', None)\n self.invoice_date = kwargs.get('invoice_date', None)\n self.invoice_id = kwargs.get('invoice_id', None)\n self.invoice_status = kwargs.get('invoice_status', None)\n self.invoice_url = kwargs.get('invoice_url', None)\n self.is_location_online = kwargs.get('is_location_online', None)\n self.online_meeting_url = kwargs.get('online_meeting_url', None)\n self.opt_out_of_customer_email = kwargs.get('opt_out_of_customer_email', None)\n self.post_buffer = kwargs.get('post_buffer', None)\n self.pre_buffer = kwargs.get('pre_buffer', None)\n self.price = kwargs.get('price', None)\n self.price_type = kwargs.get('price_type', None)\n self.reminders = kwargs.get('reminders', None)\n self.self_service_appointment_id = kwargs.get('self_service_appointment_id', None)\n self.service_id = kwargs.get('service_id', None)\n self.service_location = kwargs.get('service_location', None)\n self.service_name = kwargs.get('service_name', None)\n self.service_notes = kwargs.get('service_notes', None)\n self.staff_member_ids = kwargs.get('staff_member_ids', None)\n self.start = kwargs.get('start', None)\n\n\nclass MicrosoftGraphBookingNamedEntity(MicrosoftGraphEntity):\n \"\"\"Booking entities that provide a display name.\n\n :param id: Read-only.\n :type id: str\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param display_name: Display name of this entity.\n :type display_name: str\n \"\"\"\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'display_name': {'key': 'displayName', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(MicrosoftGraphBookingNamedEntity, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.display_name = kwargs.get('display_name', None)\n\n\nclass MicrosoftGraphBookingBusiness(MicrosoftGraphBookingNamedEntity):\n \"\"\"Represents a Microsot Bookings Business.\n\n :param id: Read-only.\n :type id: str\n :param display_name: Display name of this entity.\n :type display_name: str\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param address: physicalAddress.\n :type address: ~bookings.models.MicrosoftGraphPhysicalAddress\n :param business_hours:\n :type business_hours: list[~bookings.models.MicrosoftGraphBookingWorkHours]\n :param business_type:\n :type business_type: str\n :param default_currency_iso:\n :type default_currency_iso: str\n :param email:\n :type email: str\n :param is_published:\n :type is_published: bool\n :param phone:\n :type phone: str\n :param public_url:\n :type public_url: str\n :param scheduling_policy: This type represents the set of policies that dictate how bookings\n can be created in a Booking Calendar.\n :type scheduling_policy: ~bookings.models.MicrosoftGraphBookingSchedulingPolicy\n :param web_site_url: The URL of the business web site.\n :type web_site_url: str\n :param appointments: All appointments in this business.\n :type appointments: list[~bookings.models.MicrosoftGraphBookingAppointment]\n :param calendar_view: A calendar view of appointments in this business.\n :type calendar_view: list[~bookings.models.MicrosoftGraphBookingAppointment]\n :param customers: All customers of this business.\n :type customers: list[~bookings.models.MicrosoftGraphBookingCustomer]\n :param services: All services offered by this business.\n :type services: list[~bookings.models.MicrosoftGraphBookingService]\n :param staff_members: All staff members that provides services in this business.\n :type staff_members: list[~bookings.models.MicrosoftGraphBookingStaffMember]\n \"\"\"\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'display_name': {'key': 'displayName', 'type': 'str'},\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'address': {'key': 'address', 'type': 'MicrosoftGraphPhysicalAddress'},\n 'business_hours': {'key': 'businessHours', 'type': '[MicrosoftGraphBookingWorkHours]'},\n 'business_type': {'key': 'businessType', 'type': 'str'},\n 'default_currency_iso': {'key': 'defaultCurrencyIso', 'type': 'str'},\n 'email': {'key': 'email', 'type': 'str'},\n 'is_published': {'key': 'isPublished', 'type': 'bool'},\n 'phone': {'key': 'phone', 'type': 'str'},\n 'public_url': {'key': 'publicUrl', 'type': 'str'},\n 'scheduling_policy': {'key': 'schedulingPolicy', 'type': 'MicrosoftGraphBookingSchedulingPolicy'},\n 'web_site_url': {'key': 'webSiteUrl', 'type': 'str'},\n 'appointments': {'key': 'appointments', 'type': '[MicrosoftGraphBookingAppointment]'},\n 'calendar_view': {'key': 'calendarView', 'type': '[MicrosoftGraphBookingAppointment]'},\n 'customers': {'key': 'customers', 'type': '[MicrosoftGraphBookingCustomer]'},\n 'services': {'key': 'services', 'type': '[MicrosoftGraphBookingService]'},\n 'staff_members': {'key': 'staffMembers', 'type': '[MicrosoftGraphBookingStaffMember]'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(MicrosoftGraphBookingBusiness, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.address = kwargs.get('address', None)\n self.business_hours = kwargs.get('business_hours', None)\n self.business_type = kwargs.get('business_type', None)\n self.default_currency_iso = kwargs.get('default_currency_iso', None)\n self.email = kwargs.get('email', None)\n self.is_published = kwargs.get('is_published', None)\n self.phone = kwargs.get('phone', None)\n self.public_url = kwargs.get('public_url', None)\n self.scheduling_policy = kwargs.get('scheduling_policy', None)\n self.web_site_url = kwargs.get('web_site_url', None)\n self.appointments = kwargs.get('appointments', None)\n self.calendar_view = kwargs.get('calendar_view', None)\n self.customers = kwargs.get('customers', None)\n self.services = kwargs.get('services', None)\n self.staff_members = kwargs.get('staff_members', None)\n\n\nclass MicrosoftGraphBookingCurrency(MicrosoftGraphEntity):\n \"\"\"bookingCurrency.\n\n :param id: Read-only.\n :type id: str\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param symbol:\n :type symbol: str\n \"\"\"\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'symbol': {'key': 'symbol', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(MicrosoftGraphBookingCurrency, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.symbol = kwargs.get('symbol', None)\n\n\nclass MicrosoftGraphBookingPerson(MicrosoftGraphBookingNamedEntity):\n \"\"\"Represents a booking customer or staff member.\n\n :param id: Read-only.\n :type id: str\n :param display_name: Display name of this entity.\n :type display_name: str\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param email_address: The e-mail address of this person.\n :type email_address: str\n \"\"\"\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'display_name': {'key': 'displayName', 'type': 'str'},\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'email_address': {'key': 'emailAddress', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(MicrosoftGraphBookingPerson, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.email_address = kwargs.get('email_address', None)\n\n\nclass MicrosoftGraphBookingCustomer(MicrosoftGraphBookingPerson):\n \"\"\"Represents a customer of the business.\n\n :param id: Read-only.\n :type id: str\n :param display_name: Display name of this entity.\n :type display_name: str\n :param email_address: The e-mail address of this person.\n :type email_address: str\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n \"\"\"\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'display_name': {'key': 'displayName', 'type': 'str'},\n 'email_address': {'key': 'emailAddress', 'type': 'str'},\n 'additional_properties': {'key': '', 'type': '{object}'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(MicrosoftGraphBookingCustomer, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n\n\nclass MicrosoftGraphBookingReminder(msrest.serialization.Model):\n \"\"\"This type represents when and to whom to send an e-mail reminder.\n\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param message: Message to send.\n :type message: str\n :param offset: How much time before an appointment the reminder should be sent.\n :type offset: ~datetime.timedelta\n :param recipients: Possible values include: \"allAttendees\", \"staff\", \"customer\".\n :type recipients: str or ~bookings.models.MicrosoftGraphBookingReminderRecipients\n \"\"\"\n\n _attribute_map = {\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'message': {'key': 'message', 'type': 'str'},\n 'offset': {'key': 'offset', 'type': 'duration'},\n 'recipients': {'key': 'recipients', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(MicrosoftGraphBookingReminder, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.message = kwargs.get('message', None)\n self.offset = kwargs.get('offset', None)\n self.recipients = kwargs.get('recipients', None)\n\n\nclass MicrosoftGraphBookingSchedulingPolicy(msrest.serialization.Model):\n \"\"\"This type represents the set of policies that dictate how bookings can be created in a Booking Calendar.\n\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param allow_staff_selection: Allow customers to choose a specific person for the booking.\n :type allow_staff_selection: bool\n :param maximum_advance: Maximum number of days in advance that a booking can be made.\n :type maximum_advance: ~datetime.timedelta\n :param minimum_lead_time: Minimum lead time for bookings and cancellations.\n :type minimum_lead_time: ~datetime.timedelta\n :param send_confirmations_to_owner: Notify the business via email when a booking is created or\n changed.\n :type send_confirmations_to_owner: bool\n :param time_slot_interval: Duration of each time slot.\n :type time_slot_interval: ~datetime.timedelta\n \"\"\"\n\n _attribute_map = {\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'allow_staff_selection': {'key': 'allowStaffSelection', 'type': 'bool'},\n 'maximum_advance': {'key': 'maximumAdvance', 'type': 'duration'},\n 'minimum_lead_time': {'key': 'minimumLeadTime', 'type': 'duration'},\n 'send_confirmations_to_owner': {'key': 'sendConfirmationsToOwner', 'type': 'bool'},\n 'time_slot_interval': {'key': 'timeSlotInterval', 'type': 'duration'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(MicrosoftGraphBookingSchedulingPolicy, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.allow_staff_selection = kwargs.get('allow_staff_selection', None)\n self.maximum_advance = kwargs.get('maximum_advance', None)\n self.minimum_lead_time = kwargs.get('minimum_lead_time', None)\n self.send_confirmations_to_owner = kwargs.get('send_confirmations_to_owner', None)\n self.time_slot_interval = kwargs.get('time_slot_interval', None)\n\n\nclass MicrosoftGraphBookingService(MicrosoftGraphBookingNamedEntity):\n \"\"\"Represents a particular service offered by a booking business.\n\n :param id: Read-only.\n :type id: str\n :param display_name: Display name of this entity.\n :type display_name: str\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param additional_information:\n :type additional_information: str\n :param default_duration:\n :type default_duration: ~datetime.timedelta\n :param default_location: location.\n :type default_location: ~bookings.models.MicrosoftGraphLocation\n :param default_price:\n :type default_price: float\n :param default_price_type: Possible values include: \"undefined\", \"fixedPrice\", \"startingAt\",\n \"hourly\", \"free\", \"priceVaries\", \"callUs\", \"notSet\".\n :type default_price_type: str or ~bookings.models.MicrosoftGraphBookingPriceType\n :param default_reminders: The default reminders set in an appointment of this service.\n :type default_reminders: list[~bookings.models.MicrosoftGraphBookingReminder]\n :param description:\n :type description: str\n :param is_hidden_from_customers:\n :type is_hidden_from_customers: bool\n :param is_location_online:\n :type is_location_online: bool\n :param notes:\n :type notes: str\n :param post_buffer:\n :type post_buffer: ~datetime.timedelta\n :param pre_buffer:\n :type pre_buffer: ~datetime.timedelta\n :param scheduling_policy: This type represents the set of policies that dictate how bookings\n can be created in a Booking Calendar.\n :type scheduling_policy: ~bookings.models.MicrosoftGraphBookingSchedulingPolicy\n :param staff_member_ids:\n :type staff_member_ids: list[str]\n \"\"\"\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'display_name': {'key': 'displayName', 'type': 'str'},\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'additional_information': {'key': 'additionalInformation', 'type': 'str'},\n 'default_duration': {'key': 'defaultDuration', 'type': 'duration'},\n 'default_location': {'key': 'defaultLocation', 'type': 'MicrosoftGraphLocation'},\n 'default_price': {'key': 'defaultPrice', 'type': 'float'},\n 'default_price_type': {'key': 'defaultPriceType', 'type': 'str'},\n 'default_reminders': {'key': 'defaultReminders', 'type': '[MicrosoftGraphBookingReminder]'},\n 'description': {'key': 'description', 'type': 'str'},\n 'is_hidden_from_customers': {'key': 'isHiddenFromCustomers', 'type': 'bool'},\n 'is_location_online': {'key': 'isLocationOnline', 'type': 'bool'},\n 'notes': {'key': 'notes', 'type': 'str'},\n 'post_buffer': {'key': 'postBuffer', 'type': 'duration'},\n 'pre_buffer': {'key': 'preBuffer', 'type': 'duration'},\n 'scheduling_policy': {'key': 'schedulingPolicy', 'type': 'MicrosoftGraphBookingSchedulingPolicy'},\n 'staff_member_ids': {'key': 'staffMemberIds', 'type': '[str]'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(MicrosoftGraphBookingService, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.additional_information = kwargs.get('additional_information', None)\n self.default_duration = kwargs.get('default_duration', None)\n self.default_location = kwargs.get('default_location', None)\n self.default_price = kwargs.get('default_price', None)\n self.default_price_type = kwargs.get('default_price_type', None)\n self.default_reminders = kwargs.get('default_reminders', None)\n self.description = kwargs.get('description', None)\n self.is_hidden_from_customers = kwargs.get('is_hidden_from_customers', None)\n self.is_location_online = kwargs.get('is_location_online', None)\n self.notes = kwargs.get('notes', None)\n self.post_buffer = kwargs.get('post_buffer', None)\n self.pre_buffer = kwargs.get('pre_buffer', None)\n self.scheduling_policy = kwargs.get('scheduling_policy', None)\n self.staff_member_ids = kwargs.get('staff_member_ids', None)\n\n\nclass MicrosoftGraphBookingStaffMember(MicrosoftGraphBookingPerson):\n \"\"\"Represents a staff member who provides services in a business.\n\n :param id: Read-only.\n :type id: str\n :param display_name: Display name of this entity.\n :type display_name: str\n :param email_address: The e-mail address of this person.\n :type email_address: str\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param availability_is_affected_by_personal_calendar:\n :type availability_is_affected_by_personal_calendar: bool\n :param color_index:\n :type color_index: int\n :param role: Possible values include: \"guest\", \"administrator\", \"viewer\", \"externalGuest\".\n :type role: str or ~bookings.models.MicrosoftGraphBookingStaffRole\n :param use_business_hours:\n :type use_business_hours: bool\n :param working_hours:\n :type working_hours: list[~bookings.models.MicrosoftGraphBookingWorkHours]\n \"\"\"\n\n _validation = {\n 'color_index': {'maximum': 2147483647, 'minimum': -2147483648},\n }\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'display_name': {'key': 'displayName', 'type': 'str'},\n 'email_address': {'key': 'emailAddress', 'type': 'str'},\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'availability_is_affected_by_personal_calendar': {'key': 'availabilityIsAffectedByPersonalCalendar', 'type': 'bool'},\n 'color_index': {'key': 'colorIndex', 'type': 'int'},\n 'role': {'key': 'role', 'type': 'str'},\n 'use_business_hours': {'key': 'useBusinessHours', 'type': 'bool'},\n 'working_hours': {'key': 'workingHours', 'type': '[MicrosoftGraphBookingWorkHours]'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(MicrosoftGraphBookingStaffMember, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.availability_is_affected_by_personal_calendar = kwargs.get('availability_is_affected_by_personal_calendar', None)\n self.color_index = kwargs.get('color_index', None)\n self.role = kwargs.get('role', None)\n self.use_business_hours = kwargs.get('use_business_hours', None)\n self.working_hours = kwargs.get('working_hours', None)\n\n\nclass MicrosoftGraphBookingWorkHours(msrest.serialization.Model):\n \"\"\"This type represents the set of working hours in a single day of the week.\n\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param day: Possible values include: \"sunday\", \"monday\", \"tuesday\", \"wednesday\", \"thursday\",\n \"friday\", \"saturday\".\n :type day: str or ~bookings.models.MicrosoftGraphDayOfWeek\n :param time_slots: A list of start/end times during a day.\n :type time_slots: list[~bookings.models.MicrosoftGraphBookingWorkTimeSlot]\n \"\"\"\n\n _attribute_map = {\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'day': {'key': 'day', 'type': 'str'},\n 'time_slots': {'key': 'timeSlots', 'type': '[MicrosoftGraphBookingWorkTimeSlot]'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(MicrosoftGraphBookingWorkHours, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.day = kwargs.get('day', None)\n self.time_slots = kwargs.get('time_slots', None)\n\n\nclass MicrosoftGraphBookingWorkTimeSlot(msrest.serialization.Model):\n \"\"\"bookingWorkTimeSlot.\n\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param end:\n :type end: ~datetime.time\n :param start:\n :type start: ~datetime.time\n \"\"\"\n\n _attribute_map = {\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'end': {'key': 'end', 'type': 'time'},\n 'start': {'key': 'start', 'type': 'time'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(MicrosoftGraphBookingWorkTimeSlot, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.end = kwargs.get('end', None)\n self.start = kwargs.get('start', None)\n\n\nclass MicrosoftGraphDateTimeZone(msrest.serialization.Model):\n \"\"\"dateTimeTimeZone.\n\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param date_time: A single point of time in a combined date and time representation\n ({date}T{time}; for example, 2017-08-29T04:00:00.0000000).\n :type date_time: str\n :param time_zone: Represents a time zone, for example, 'Pacific Standard Time'. See below for\n more possible values.\n :type time_zone: str\n \"\"\"\n\n _attribute_map = {\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'date_time': {'key': 'dateTime', 'type': 'str'},\n 'time_zone': {'key': 'timeZone', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(MicrosoftGraphDateTimeZone, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.date_time = kwargs.get('date_time', None)\n self.time_zone = kwargs.get('time_zone', None)\n\n\nclass MicrosoftGraphLocation(msrest.serialization.Model):\n \"\"\"location.\n\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param address: physicalAddress.\n :type address: ~bookings.models.MicrosoftGraphPhysicalAddress\n :param coordinates: outlookGeoCoordinates.\n :type coordinates: ~bookings.models.MicrosoftGraphOutlookGeoCoordinates\n :param display_name: The name associated with the location.\n :type display_name: str\n :param location_email_address: Optional email address of the location.\n :type location_email_address: str\n :param location_type: Possible values include: \"default\", \"conferenceRoom\", \"homeAddress\",\n \"businessAddress\", \"geoCoordinates\", \"streetAddress\", \"hotel\", \"restaurant\", \"localBusiness\",\n \"postalAddress\".\n :type location_type: str or ~bookings.models.MicrosoftGraphLocationType\n :param location_uri: Optional URI representing the location.\n :type location_uri: str\n :param unique_id: For internal use only.\n :type unique_id: str\n :param unique_id_type: Possible values include: \"unknown\", \"locationStore\", \"directory\",\n \"private\", \"bing\".\n :type unique_id_type: str or ~bookings.models.MicrosoftGraphLocationUniqueIdType\n \"\"\"\n\n _attribute_map = {\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'address': {'key': 'address', 'type': 'MicrosoftGraphPhysicalAddress'},\n 'coordinates': {'key': 'coordinates', 'type': 'MicrosoftGraphOutlookGeoCoordinates'},\n 'display_name': {'key': 'displayName', 'type': 'str'},\n 'location_email_address': {'key': 'locationEmailAddress', 'type': 'str'},\n 'location_type': {'key': 'locationType', 'type': 'str'},\n 'location_uri': {'key': 'locationUri', 'type': 'str'},\n 'unique_id': {'key': 'uniqueId', 'type': 'str'},\n 'unique_id_type': {'key': 'uniqueIdType', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(MicrosoftGraphLocation, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.address = kwargs.get('address', None)\n self.coordinates = kwargs.get('coordinates', None)\n self.display_name = kwargs.get('display_name', None)\n self.location_email_address = kwargs.get('location_email_address', None)\n self.location_type = kwargs.get('location_type', None)\n self.location_uri = kwargs.get('location_uri', None)\n self.unique_id = kwargs.get('unique_id', None)\n self.unique_id_type = kwargs.get('unique_id_type', None)\n\n\nclass MicrosoftGraphOutlookGeoCoordinates(msrest.serialization.Model):\n \"\"\"outlookGeoCoordinates.\n\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param accuracy: The accuracy of the latitude and longitude. As an example, the accuracy can be\n measured in meters, such as the latitude and longitude are accurate to within 50 meters.\n :type accuracy: float\n :param altitude: The altitude of the location.\n :type altitude: float\n :param altitude_accuracy: The accuracy of the altitude.\n :type altitude_accuracy: float\n :param latitude: The latitude of the location.\n :type latitude: float\n :param longitude: The longitude of the location.\n :type longitude: float\n \"\"\"\n\n _attribute_map = {\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'accuracy': {'key': 'accuracy', 'type': 'float'},\n 'altitude': {'key': 'altitude', 'type': 'float'},\n 'altitude_accuracy': {'key': 'altitudeAccuracy', 'type': 'float'},\n 'latitude': {'key': 'latitude', 'type': 'float'},\n 'longitude': {'key': 'longitude', 'type': 'float'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(MicrosoftGraphOutlookGeoCoordinates, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.accuracy = kwargs.get('accuracy', None)\n self.altitude = kwargs.get('altitude', None)\n self.altitude_accuracy = kwargs.get('altitude_accuracy', None)\n self.latitude = kwargs.get('latitude', None)\n self.longitude = kwargs.get('longitude', None)\n\n\nclass MicrosoftGraphPhysicalAddress(msrest.serialization.Model):\n \"\"\"physicalAddress.\n\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param city: The city.\n :type city: str\n :param country_or_region: The country or region. It's a free-format string value, for example,\n 'United States'.\n :type country_or_region: str\n :param postal_code: The postal code.\n :type postal_code: str\n :param post_office_box:\n :type post_office_box: str\n :param state: The state.\n :type state: str\n :param street: The street.\n :type street: str\n :param type: Possible values include: \"unknown\", \"home\", \"business\", \"other\".\n :type type: str or ~bookings.models.MicrosoftGraphPhysicalAddressType\n \"\"\"\n\n _attribute_map = {\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'city': {'key': 'city', 'type': 'str'},\n 'country_or_region': {'key': 'countryOrRegion', 'type': 'str'},\n 'postal_code': {'key': 'postalCode', 'type': 'str'},\n 'post_office_box': {'key': 'postOfficeBox', 'type': 'str'},\n 'state': {'key': 'state', 'type': 'str'},\n 'street': {'key': 'street', 'type': 'str'},\n 'type': {'key': 'type', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(MicrosoftGraphPhysicalAddress, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.city = kwargs.get('city', None)\n self.country_or_region = kwargs.get('country_or_region', None)\n self.postal_code = kwargs.get('postal_code', None)\n self.post_office_box = kwargs.get('post_office_box', None)\n self.state = kwargs.get('state', None)\n self.street = kwargs.get('street', None)\n self.type = kwargs.get('type', None)\n\n\nclass OdataError(msrest.serialization.Model):\n \"\"\"OdataError.\n\n All required parameters must be populated in order to send to Azure.\n\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param error: Required.\n :type error: ~bookings.models.OdataErrorMain\n \"\"\"\n\n _validation = {\n 'error': {'required': True},\n }\n\n _attribute_map = {\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'error': {'key': 'error', 'type': 'OdataErrorMain'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(OdataError, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.error = kwargs['error']\n\n\nclass OdataErrorDetail(msrest.serialization.Model):\n \"\"\"OdataErrorDetail.\n\n All required parameters must be populated in order to send to Azure.\n\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param code: Required.\n :type code: str\n :param message: Required.\n :type message: str\n :param target:\n :type target: str\n \"\"\"\n\n _validation = {\n 'code': {'required': True},\n 'message': {'required': True},\n }\n\n _attribute_map = {\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'code': {'key': 'code', 'type': 'str'},\n 'message': {'key': 'message', 'type': 'str'},\n 'target': {'key': 'target', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(OdataErrorDetail, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.code = kwargs['code']\n self.message = kwargs['message']\n self.target = kwargs.get('target', None)\n\n\nclass OdataErrorMain(msrest.serialization.Model):\n \"\"\"OdataErrorMain.\n\n All required parameters must be populated in order to send to Azure.\n\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param code: Required.\n :type code: str\n :param message: Required.\n :type message: str\n :param target:\n :type target: str\n :param details:\n :type details: list[~bookings.models.OdataErrorDetail]\n :param innererror: The structure of this object is service-specific.\n :type innererror: dict[str, object]\n \"\"\"\n\n _validation = {\n 'code': {'required': True},\n 'message': {'required': True},\n }\n\n _attribute_map = {\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'code': {'key': 'code', 'type': 'str'},\n 'message': {'key': 'message', 'type': 'str'},\n 'target': {'key': 'target', 'type': 'str'},\n 'details': {'key': 'details', 'type': '[OdataErrorDetail]'},\n 'innererror': {'key': 'innererror', 'type': '{object}'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(OdataErrorMain, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.code = kwargs['code']\n self.message = kwargs['message']\n self.target = kwargs.get('target', None)\n self.details = kwargs.get('details', None)\n self.innererror = kwargs.get('innererror', None)\n\n\nclass Paths1Bomg32BookingbusinessesBookingbusinessIdCalendarviewBookingappointmentIdMicrosoftGraphCancelPostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model):\n \"\"\"Paths1Bomg32BookingbusinessesBookingbusinessIdCalendarviewBookingappointmentIdMicrosoftGraphCancelPostRequestbodyContentApplicationJsonSchema.\n\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param cancellation_message:\n :type cancellation_message: str\n \"\"\"\n\n _attribute_map = {\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'cancellation_message': {'key': 'cancellationMessage', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(Paths1Bomg32BookingbusinessesBookingbusinessIdCalendarviewBookingappointmentIdMicrosoftGraphCancelPostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.cancellation_message = kwargs.get('cancellation_message', None)\n\n\nclass Paths1K88Cl0BookingbusinessesBookingbusinessIdAppointmentsBookingappointmentIdMicrosoftGraphCancelPostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model):\n \"\"\"Paths1K88Cl0BookingbusinessesBookingbusinessIdAppointmentsBookingappointmentIdMicrosoftGraphCancelPostRequestbodyContentApplicationJsonSchema.\n\n :param additional_properties: Unmatched properties from the message are deserialized to this\n collection.\n :type additional_properties: dict[str, object]\n :param cancellation_message:\n :type cancellation_message: str\n \"\"\"\n\n _attribute_map = {\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'cancellation_message': {'key': 'cancellationMessage', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(Paths1K88Cl0BookingbusinessesBookingbusinessIdAppointmentsBookingappointmentIdMicrosoftGraphCancelPostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs)\n self.additional_properties = kwargs.get('additional_properties', None)\n self.cancellation_message = kwargs.get('cancellation_message', None)\n","repo_name":"BrianTJackett/msgraph-cli","sub_path":"msgraph-cli-extensions/beta/bookings_beta/azext_bookings_beta/vendored_sdks/bookings/models/_models.py","file_name":"_models.py","file_ext":"py","file_size_in_byte":48483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"39882297262","text":"import os\nimport sys\nimport pygame\nimport random\n\n\nclass Monster:\n def __init__(self, x, y, m_type, par):\n if x == 0 or x == n - 1:\n self.x = int(bool(x)) * n * cell_width\n self.y = cell_width // 2 + y * cell_width\n else:\n self.x = cell_width // 2 + x * cell_width\n self.y = int(bool(y)) * n * cell_width\n self.board_state = board_state\n self.m_type, self.n = m_type, n\n self.width = cell_width\n self.par_x, self.par_y = par, par\n self.coord = [x, y]\n self.left, self.top = 0, 0\n self.nx, self.ny = nx, ny\n self.rotate = False\n self.watch_right = True\n self.hp = random.choice([3, 4])\n if self.m_type == 'flying':\n monster_image = load_image(\"eye.png\")\n else:\n monster_image = load_image(\"skell.png\")\n self.monster = pygame.sprite.Sprite(moving_sprites)\n self.monster.image = monster_image\n self.monster.rect = self.monster.image.get_rect()\n board_moving.board[y][x] = self.monster\n self.lvl = lvl\n\n def move(self):\n if nx == -1 and self.watch_right or nx == 1 and not self.watch_right:\n self.monster.image = pygame.transform.flip(self.monster.image, True, False)\n self.watch_right = bool(nx + 1)\n if self.left + self.coord[0] * self.width <= self.x <= self.left + (self.coord[0] + 1) * self.width and \\\n self.top + self.coord[1] * self.width <= self.y <= self.top + (self.coord[1] + 1) * self.width:\n self.x += self.par_x * self.nx\n self.y += self.par_y * self.ny\n if self.rotate and self.x % self.width > self.width // 2 and self.nx == 1:\n self.ny = self.nx - (2 * int(self.rotate == 'left') * self.nx)\n self.nx = 0\n self.rotate = False\n elif self.rotate and self.y % self.width > self.width // 2 and self.ny == 1:\n self.nx = -self.ny - (2 * int(self.rotate == 'left') * -self.ny)\n self.ny = 0\n self.rotate = False\n elif self.rotate and self.x % self.width < self.width // 2 and self.nx == -1:\n self.ny = self.nx - (2 * int(self.rotate == 'left') * self.nx)\n self.nx = 0\n self.rotate = False\n elif self.rotate and self.y % self.width < self.width // 2 and self.ny == -1:\n self.nx = -self.ny - (2 * int(self.rotate == 'left') * -self.ny)\n self.ny = 0\n self.rotate = False\n else:\n board_moving.move(self.coord[0], self.coord[1], self.nx, self.ny, self.monster)\n self.coord[0] += self.nx\n self.coord[1] += self.ny\n if self.coord[0] == self.n - 1 and self.nx == 1 or self.coord[1] == self.n - 1 and self.ny == 1 or\\\n self.coord[0] == 0 and self.nx == -1 or self.coord[1] == 0 and self.ny == -1:\n self.rotate = True\n elif self.lvl[self.coord[1] + self.ny][self.coord[0] + self.nx] not in ('r1', 'r2', 'r3', 'r4',\n 'r5', 'r6', 'ca', 'xx'):\n self.rotate = True\n if self.rotate:\n if self.coord[0] == 0 and self.ny == -1 or self.coord[0] == self.n - 1 and self.ny == 1 or\\\n self.coord[1] == 0 and self.nx == 1 or self.coord[1] == self.n - 1 and self.nx == -1:\n self.rotate = 'right'\n elif self.coord[0] == 0 and self.ny == 1 or self.coord[0] == self.n - 1 and self.ny == -1 or \\\n self.coord[1] == 0 and self.nx == -1 or self.coord[1] == self.n - 1 and self.nx == 1:\n self.rotate = 'left'\n elif (self.lvl[self.coord[1] + self.nx * ((self.ny - self.nx) ** 2)]\n [self.coord[0] + self.ny * (-(self.nx - self.ny) ** 2)] not in ('r1', 'r2', 'r3', 'r4', 'br'\n 'r5', 'r6', 'ca', 'xx')):\n self.rotate = 'left'\n else:\n self.rotate = 'right'\n\n\nclass Tower:\n def __init__(self, x, y, t_type):\n self.x, self.y = int(x), int(y)\n self.t_type = t_type\n if self.t_type == 'flying':\n tower_image = load_image(\"archer.png\")\n else:\n tower_image = load_image(\"wizard.png\")\n self.tower = pygame.sprite.Sprite(moving_sprites)\n self.tower.image = tower_image\n self.tower.rect = self.tower.image.get_rect()\n board_moving.board[self.y][self.x] = self.tower\n\n def damage_monsters_near(self):\n for i in range(-1, 2):\n for j in range(-1, 2):\n if 0 <= i + self.y < n and 0 <= j + self.x < n:\n for monster_to_kill in monsters:\n if monster_to_kill.coord == [j + self.x, i + self.y]:\n if monster_to_kill.m_type == self.t_type:\n monster_to_kill.hp -= 1\n score[0] += int(5 * kf)\n if self.t_type == 'flying':\n sound = pygame.mixer.Sound('data/arrow.wav')\n sound.set_volume(0.4)\n sound.play()\n else:\n sound = pygame.mixer.Sound('data/wizard.wav')\n sound.set_volume(0.4)\n sound.play()\n return\n\n\nclass Board:\n def __init__(self):\n self.board = [[no_sprite] * n for _ in range(n)]\n self.left = 10\n self.top = 10\n self.cell_size = 16\n\n def set_view(self, left, top, cell_size):\n self.left = left\n self.top = top\n self.cell_size = cell_size\n\n def render(self, sprites_group, screen_to_draw):\n for i in range(n):\n for j in range(n):\n cell = self.board[i][j]\n coord = (self.left + j * self.cell_size, self.top + i * self.cell_size, self.cell_size, self.cell_size)\n try:\n if cell:\n cell.rect.x, cell.rect.y = coord[0], coord[1]\n except AttributeError:\n pass\n sprites_group.draw(screen_to_draw)\n\n def move(self, x, y, move_x, move_y, sprite):\n self.board[y][x] = no_sprite\n self.board[y + move_y][x + move_x] = sprite\n\n def fill(self, text_lvl):\n for i in range(n):\n for j in range(n):\n if text_lvl[i][j] == '00' or text_lvl[i][j] == 'xx':\n continue\n elem = pygame.sprite.Sprite(back_sprites)\n if text_lvl[i][j] == '..':\n elem.image = pygame.transform.scale(load_image(\"background.png\"), (self.cell_size, self.cell_size))\n elif text_lvl[i][j] == ',,':\n elem.image = pygame.transform.scale(load_image(\"background_scary.png\"),\n (self.cell_size, self.cell_size))\n elif text_lvl[i][j] == 'st':\n elem.image = pygame.transform.scale(load_image(\"scary_tree.png\"), (self.cell_size, self.cell_size))\n elif text_lvl[i][j] == 'sr':\n elem.image = pygame.transform.rotate(\n pygame.transform.scale(load_image(\"scary_river.png\"), (self.cell_size, self.cell_size)), 90)\n elif text_lvl[i][j] == 'br':\n elem.image = pygame.transform.rotate(\n pygame.transform.scale(load_image(\"bridge.png\"), (self.cell_size, self.cell_size)), 90)\n elif text_lvl[i][j] == \"r1\":\n elem.image = pygame.transform.scale(load_image(\"road.png\"), (self.cell_size, self.cell_size))\n elif text_lvl[i][j] == \"r2\":\n elem.image = pygame.transform.rotate(\n pygame.transform.scale(load_image(\"road.png\"), (self.cell_size, self.cell_size)), 90)\n elif text_lvl[i][j] == \"r3\":\n elem.image = pygame.transform.rotate(\n pygame.transform.scale(load_image(\"road1.png\"), (self.cell_size, self.cell_size)), 90)\n elif text_lvl[i][j] == \"r4\":\n elem.image = pygame.transform.rotate(\n pygame.transform.scale(load_image(\"road1.png\"), (self.cell_size, self.cell_size)), 180)\n elif text_lvl[i][j] == \"r5\":\n elem.image = pygame.transform.rotate(\n pygame.transform.scale(load_image(\"road1.png\"), (self.cell_size, self.cell_size)), 270)\n elif text_lvl[i][j] == \"r6\":\n elem.image = pygame.transform.scale(load_image(\"road1.png\"), (self.cell_size, self.cell_size))\n elif text_lvl[i][j] == \"b0\":\n elem.image = pygame.transform.scale(load_image(\"bush.png\"), (self.cell_size, self.cell_size))\n elif text_lvl[i][j] == \"b1\":\n elem.image = pygame.transform.scale(load_image(\"bush1.png\"), (self.cell_size, self.cell_size))\n elif text_lvl[i][j] == \"ca\":\n elem.image = pygame.transform.scale(load_image(\"house big.png\"),\n (2 * self.cell_size, 2 * self.cell_size))\n elif text_lvl[i][j] == \"h0\":\n elem.image = pygame.transform.scale(load_image(\"house.png\"), (self.cell_size, self.cell_size))\n elif text_lvl[i][j] == \"h1\":\n elem.image = pygame.transform.scale(load_image(\"house1.png\"), (self.cell_size, self.cell_size))\n elif text_lvl[i][j] == \"h2\":\n elem.image = pygame.transform.scale(load_image(\"house2.png\"), (self.cell_size, self.cell_size))\n elif text_lvl[i][j] == \"h3\":\n elem.image = pygame.transform.scale(load_image(\"house3.png\"), (self.cell_size, self.cell_size))\n elif text_lvl[i][j] == \"t0\":\n elem.image = pygame.transform.scale(load_image(\"tree.png\"), (self.cell_size, self.cell_size))\n elif text_lvl[i][j] == \"t1\":\n elem.image = pygame.transform.scale(load_image(\"tree1.png\"), (self.cell_size, self.cell_size))\n elif text_lvl[i][j] == \"wh\":\n elem.image = pygame.transform.scale(load_image(\"wheat.png\"), (self.cell_size, self.cell_size))\n elif text_lvl[i][j] == \"w1\":\n elem.image = pygame.transform.scale(load_image(\"river.png\"), (self.cell_size, self.cell_size))\n elif text_lvl[i][j] == \"w2\":\n elem.image = pygame.transform.rotate(\n pygame.transform.scale(load_image(\"river.png\"), (self.cell_size, self.cell_size)), 90)\n elif text_lvl[i][j] == \"w3\":\n elem.image = pygame.transform.rotate(\n pygame.transform.scale(load_image(\"river1.png\"), (self.cell_size, self.cell_size)), 90)\n elif text_lvl[i][j] == \"w4\":\n elem.image = pygame.transform.rotate(\n pygame.transform.scale(load_image(\"river1.png\"), (self.cell_size, self.cell_size)), 180)\n elif text_lvl[i][j] == \"w5\":\n elem.image = pygame.transform.rotate(\n pygame.transform.scale(load_image(\"river1.png\"), (self.cell_size, self.cell_size)), 270)\n elif text_lvl[i][j] == \"w6\":\n elem.image = pygame.transform.scale(load_image(\"river1.png\"), (self.cell_size, self.cell_size))\n elif text_lvl[i][j] == \"gr\":\n elem.image = pygame.transform.scale(load_image(\"grass.png\"), (self.cell_size, self.cell_size))\n elem.rect = elem.image.get_rect()\n self.board[i][j] = elem\n\n\nclass Player:\n def __init__(self, x, y):\n self.player = pygame.sprite.Sprite(hero_sprite)\n self.player.image = load_image(\"hero.png\")\n self.player.rect = self.player.image.get_rect()\n self.coord = [x, y]\n self.board_player = board_player\n self.board_player.board[y][x] = self.player\n self.n = n\n\n def update(self, dx, dy):\n if 0 <= self.coord[0] + dx < self.n and dx:\n self.board_player.move(self.coord[0], self.coord[1], dx, dy, self.player)\n self.coord[0] += dx\n elif 0 <= self.coord[1] + dy < self.n and dy:\n self.board_player.move(self.coord[0], self.coord[1], dx, dy, self.player)\n self.coord[1] += dy\n\n\ndef load_image(name, color_key=None):\n fullname = os.path.join('data', name)\n # если файл не существует, то выходим\n if not os.path.isfile(fullname):\n print(f\"Файл с изображением '{fullname}' не найден\")\n sys.exit()\n image = pygame.image.load(fullname)\n if color_key is not None:\n image = image.convert()\n if color_key == -1:\n color_key = image.get_at((0, 0))\n image.set_colorkey(color_key)\n else:\n image = image.convert_alpha()\n return image\n\n\ndef load_level(filename):\n filename = \"data/\" + filename\n with open(filename, 'r') as mapFile:\n level = [line.split() for line in mapFile]\n return level\n\n\ndef check_monster(x, y):\n if (pygame.image.tostring(board_moving.board[y][x].image, \"RGB\") ==\n pygame.image.tostring(board_moving.board[n - 1][n - 1].image, \"RGB\")):\n return True\n return False\n\n\ndef check_tower(x, y):\n if (pygame.image.tostring(board_moving.board[y][x].image, \"RGB\") ==\n pygame.image.tostring(board_moving.board[n - 1][n - 1].image, \"RGB\")):\n return lvl[y][x] in ('00', 'b0', 'b1', 'gr')\n return False\n\n\ndef check_monster_kill(x, y):\n if lvl[y][x] == 'ca':\n pygame.mixer.Sound('data/no_hp.wav').play()\n return True\n return False\n\n\ndef terminate():\n pygame.quit()\n sys.exit()\n\n\ndef start_screen():\n screen.fill((255, 255, 255))\n start_end_group.empty()\n start_sprite = pygame.sprite.Sprite(start_end_group)\n start_sprite.image = load_image('start.png')\n start_sprite.rect = start_sprite.image.get_rect()\n start_sprite.rect.x, start_sprite.rect.y = 0, 0\n note_surface = pygame.Surface((700, 120))\n note_surface.fill((255, 255, 255))\n note_font = pygame.font.Font(None, 50)\n note_text = note_font.render(f\"Нажмите 1, 2 или 3 для выбора уровня\", True, pygame.color.Color('black'))\n note_text_x = 0\n note_text_y = 0\n note_surface.blit(note_text, (note_text_x, note_text_y))\n\n note_text2 = note_font.render(f\"Нажмите Esc для выхода\", True, pygame.color.Color('black'))\n note_text_x2 = 0\n note_text_y2 = 50\n note_surface.blit(note_text2, (note_text_x2, note_text_y2))\n\n screen.blit(note_surface, (15, 590))\n while True:\n for event_start in pygame.event.get():\n if event_start.type == pygame.QUIT:\n terminate()\n elif event_start.type == pygame.KEYDOWN:\n if event_start.key == pygame.K_1:\n if current_music[0] != 'normal':\n pygame.mixer.music.stop()\n pygame.mixer.music.load('data/music.mp3')\n pygame.mixer.music.set_volume(0.7)\n pygame.mixer.music.play(-1)\n current_music[0] = 'normal'\n return levels['first']\n elif event_start.key == pygame.K_2:\n if current_music[0] != 'normal':\n pygame.mixer.music.stop()\n pygame.mixer.music.load('data/music.mp3')\n pygame.mixer.music.set_volume(0.7)\n pygame.mixer.music.play(-1)\n current_music[0] = 'normal'\n return levels['second']\n elif event_start.key == pygame.K_3:\n if current_music[0] != 'normal':\n pygame.mixer.music.stop()\n pygame.mixer.music.load('data/music.mp3')\n pygame.mixer.music.set_volume(0.7)\n pygame.mixer.music.play(-1)\n current_music[0] = 'normal'\n return levels['third']\n elif event_start.key == pygame.K_4:\n if current_music[0] == 'normal':\n pygame.mixer.music.stop()\n pygame.mixer.music.load('data/secret_music.mp3')\n pygame.mixer.music.set_volume(0.7)\n pygame.mixer.music.play(-1)\n current_music[0] = 'secret'\n return levels['fourth']\n elif event_start.key == pygame.K_ESCAPE:\n thx_for_game_surface = pygame.Surface((700, 700))\n thx_for_game_surface.fill((255, 255, 255))\n thx_for_game_font = pygame.font.Font(None, 64)\n thx_for_game_text = thx_for_game_font.render(f\"Спасибо за игру!\", True, pygame.color.Color('black'))\n thx_for_game_text_x = 160\n thx_for_game_text_y = 250\n thx_for_game_surface.blit(thx_for_game_text, (thx_for_game_text_x, thx_for_game_text_y))\n screen.blit(thx_for_game_surface, (0, 0))\n pygame.display.flip()\n pygame.time.wait(3000)\n sys.exit()\n start_end_group.draw(screen)\n pygame.display.flip()\n clock.tick(fps)\n\n\ndef end_screen():\n screen.fill((255, 255, 255))\n start_end_group.empty()\n end_sprite = pygame.sprite.Sprite(start_end_group)\n if won:\n end_sprite.image = load_image('win.png')\n pygame.mixer.music.pause()\n pygame.mixer.Sound(\"data/winSound.wav\").play()\n else:\n end_sprite.image = load_image('lose.png')\n pygame.mixer.music.pause()\n pygame.mixer.Sound('data/loseSound.wav').play()\n end_sprite.rect = end_sprite.image.get_rect()\n end_sprite.rect.x, end_sprite.rect.y = 0, 0\n while True:\n for event_end in pygame.event.get():\n if event_end.type == pygame.QUIT:\n terminate()\n elif event_end.type == pygame.KEYDOWN or event_end.type == pygame.MOUSEBUTTONDOWN:\n pygame.mixer.music.unpause()\n if not won:\n score[0] = 0\n return\n start_end_group.draw(screen)\n if not won:\n final_score_surface = pygame.Surface((700, 128))\n final_score_surface.fill((255, 255, 255))\n final_score_font = pygame.font.Font(None, 64)\n final_score_text = final_score_font.render(f\"Вы набрали {score[0]} очков\", True,\n pygame.color.Color('black'))\n final_score_text_x = 15\n final_score_text_y = 0\n final_score_surface.blit(final_score_text, (final_score_text_x, final_score_text_y))\n screen.blit(final_score_surface, (0, 500))\n pygame.display.flip()\n clock.tick(fps)\n\n\nfps = 60\nscore = [0]\npygame.init()\npygame.display.set_caption(\"Йопики у ворот\")\nscreen = pygame.display.set_mode((704, 700))\nclock = pygame.time.Clock()\nkf = monsters_num = start_money = health = player_x = player_y = monster_x = monster_y = monster_speed = nx = ny =\\\n spawn_par = lvl = lvl_back = 0\nwon = False\npygame.mixer.music.load('data/music.mp3')\npygame.mixer.music.set_volume(0.7)\npygame.mixer.music.play(-1)\ncurrent_music = ['normal']\nlevels = {'first': [1, 10, 20, 4, 6, 6, 0, 0, 1, 1, 0, 1.5, 'lvl1.txt', 'lvl_back.txt'],\n 'second': [1.5, 15, 30, 4, 6, 1, 2, 7, 1.7, 0, -1, 1.2, 'lvl2.txt', 'lvl_back.txt'],\n 'third': [2, 20, 40, 4, 5, 5, 7, 3, 2.2, -1, 0, 1, 'lvl3.txt', 'lvl_back.txt'],\n 'fourth': [5, 50, 50, 6, 5, 5, 0, 0, 5, 1, 0, 1, 'lvl4.txt', 'lvl_scary_back.txt']}\nstart_end_group = pygame.sprite.Group()\nwhile True:\n won = False\n start_lvl = start_screen()\n screen = pygame.display.set_mode((704, 576))\n kf, monsters_num, money, health, player_x, player_y, monster_x, monster_y, monster_v, nx, ny, spawn_par,\\\n level_text, level_back_text = start_lvl\n n = 8\n counter = 0\n end_counter = 0\n cell_width = 64\n left_top = 0\n\n no_sprite = pygame.sprite.Sprite()\n no_sprite.image = load_image('no_image.png')\n board_moving = Board()\n board_state = Board()\n board_player = Board()\n board_background = Board()\n board_moving.set_view(left_top, left_top, cell_width)\n board_state.set_view(left_top, left_top, cell_width)\n board_player.set_view(left_top, left_top, cell_width)\n board_background.set_view(left_top, left_top, cell_width)\n back_sprites = pygame.sprite.Group()\n moving_sprites = pygame.sprite.Group()\n state_sprites = pygame.sprite.Group()\n hero_sprite = pygame.sprite.Group()\n\n board_background.fill(load_level(level_back_text))\n lvl = load_level(level_text)\n board_state.fill(lvl)\n\n hp_image = load_image(\"hp.png\")\n money_image = load_image(\"coin.png\")\n score_image = pygame.transform.scale(load_image(\"xp.png\", -1), (64, 64)) # Поменять\n buttons_image = pygame.transform.scale(load_image(\"buttons.png\"), (cell_width * 3, cell_width * 2))\n image_1 = pygame.transform.scale(load_image(\"1.png\"), (64, 64))\n image_2 = pygame.transform.scale(load_image(\"2.png\"), (64, 64))\n\n monsters = []\n towers = []\n par_monsters = monster_v * cell_width / fps\n player = Player(player_x, player_y)\n hero_right = True\n running = True\n start = False\n screen.fill((45, 160, 95))\n\n hp_surface = pygame.Surface((n * cell_width // 3, cell_width))\n hp_surface.fill((45, 160, 95))\n hp_surface.blit(hp_image, (0, 0))\n hp_font = pygame.font.Font(None, 64)\n hp_text = hp_font.render(f\"{health}\", True, pygame.color.Color('red'))\n hp_text_x = 64\n hp_text_y = cell_width // 2 - hp_text.get_height() // 2\n hp_surface.blit(hp_text, (hp_text_x, hp_text_y))\n screen.blit(hp_surface, (0, n * cell_width))\n\n money_surface = pygame.Surface((n * cell_width // 3, cell_width))\n money_surface.fill((45, 160, 95))\n money_surface.blit(money_image, (0, 0))\n money_font = pygame.font.Font(None, 64)\n money_text = money_font.render(f\"{money}\", True, pygame.color.Color('yellow'))\n money_text_x = 64\n money_text_y = cell_width // 2 - money_text.get_height() // 2\n money_surface.blit(money_text, (money_text_x, money_text_y))\n screen.blit(money_surface, (n * cell_width // 3, n * cell_width))\n\n score_surface = pygame.Surface((n * cell_width // 3, cell_width))\n score_surface.fill((45, 160, 95))\n score_surface.blit(score_image, (0, 0))\n score_font = pygame.font.Font(None, 64)\n score_text = score_font.render(f\"{score}\", True, pygame.color.Color('green'))\n score_text_x = 64\n score_text_y = cell_width // 2 - score_text.get_height() // 2\n score_surface.blit(score_text, (score_text_x, score_text_y))\n screen.blit(score_surface, (n * cell_width // 3 * 2, n * cell_width))\n\n navigation_surface = pygame.Surface((cell_width * 3, cell_width * 2.5))\n navigation_surface.fill((45, 160, 95))\n navigation_surface.blit(buttons_image, (0, cell_width * 0.5))\n navigation_font = pygame.font.Font(None, 45)\n navigation_text = navigation_font.render(\"Управление\", True, pygame.color.Color('white'))\n navigation_text_x = 0\n navigation_text_y = 0\n navigation_surface.blit(navigation_text, (navigation_text_x, navigation_text_y))\n screen.blit(navigation_surface, (n * cell_width, 0))\n\n surface_1 = pygame.Surface((cell_width * 3, cell_width * 2))\n surface_1.fill((45, 160, 95))\n surface_1.blit(image_1, (0, cell_width * 1))\n surface_1_font = pygame.font.Font(None, 64)\n surface_1_text = surface_1_font.render(\"Маг\", True, pygame.color.Color('white'))\n surface_1_text_x = 0\n surface_1_text_y = 0\n surface_1.blit(surface_1_text, (surface_1_text_x, surface_1_text_y))\n surface_1_text_2 = surface_1_font.render(\"(земля)\", True, pygame.color.Color('white'))\n surface_1_text_2_x = 0\n surface_1_text_2_y = cell_width * 0.5\n surface_1.blit(surface_1_text_2, (surface_1_text_2_x, surface_1_text_2_y))\n screen.blit(surface_1, (n * cell_width, cell_width * 2.5))\n\n surface_2 = pygame.Surface((cell_width * 3, cell_width * 2))\n surface_2.fill((45, 160, 95))\n surface_2.blit(image_2, (0, cell_width * 1))\n surface_2_font = pygame.font.Font(None, 64)\n surface_2_text = surface_2_font.render(\"Лучник\", True, pygame.color.Color('white'))\n surface_2_text_x = 0\n surface_2_text_y = 0\n surface_2.blit(surface_2_text, (surface_2_text_x, surface_2_text_y))\n surface_2_text_2 = surface_2_font.render(\"(воздух)\", True, pygame.color.Color('white'))\n surface_2_text_2_x = 0\n surface_2_text_2_y = cell_width * 0.5\n surface_2.blit(surface_2_text_2, (surface_2_text_2_x, surface_2_text_2_y))\n screen.blit(surface_2, (n * cell_width, cell_width * 4.5))\n\n pygame.display.flip()\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT:\n if not hero_right:\n board_player.board[player.coord[1]][player.coord[0]].image = pygame.transform.flip(\n board_player.board[player.coord[1]][player.coord[0]].image, True, False)\n hero_right = True\n player.update(1, 0)\n elif event.key == pygame.K_LEFT:\n if hero_right:\n board_player.board[player.coord[1]][player.coord[0]].image = pygame.transform.flip(\n board_player.board[player.coord[1]][player.coord[0]].image, True, False)\n hero_right = False\n player.update(-1, 0)\n elif event.key == pygame.K_UP:\n player.update(0, -1)\n elif event.key == pygame.K_DOWN:\n player.update(0, 1)\n elif event.key == pygame.K_1:\n if check_tower(player.coord[0], player.coord[1]) and money >= 10:\n towers.append(Tower(player.coord[0], player.coord[1], 'ground'))\n money -= 10\n elif event.key == pygame.K_2:\n if check_tower(player.coord[0], player.coord[1]) and money >= 10:\n towers.append(Tower(player.coord[0], player.coord[1], 'flying'))\n money -= 10\n if event.key == pygame.K_ESCAPE:\n running = False\n if monsters_num != 0 and check_monster(monster_x, monster_y):\n start = True\n monster_type = random.choice(['ground', 'flying'])\n spawn_chance = [0] * int(spawn_par * fps) + [1]\n if random.choice(spawn_chance):\n obj_monster = Monster(monster_x, monster_y, monster_type, par_monsters)\n monsters.append(obj_monster)\n monsters_num -= 1\n if start:\n for monster in monsters:\n monster.move()\n if check_monster_kill(monster.coord[0], monster.coord[1]) or monster.hp <= 0:\n health -= int(check_monster_kill(monster.coord[0], monster.coord[1]))\n if monster.m_type == 'ground' and monster.hp <= 0:\n pygame.mixer.Sound('data/skell.wav').play()\n elif monster.m_type == 'flying' and monster.hp <= 0:\n pygame.mixer.Sound('data/eye.wav').play()\n monster.monster.kill()\n monsters.remove(monster)\n money += 5\n score[0] += int(5 * kf) # коэф\n if counter >= fps:\n for tower in towers:\n tower.damage_monsters_near()\n counter = 0\n if health <= 0:\n won = False\n break\n if not monsters_num and not len(monsters):\n end_counter += 1\n\n board_background.render(back_sprites, screen)\n board_state.render(state_sprites, screen)\n board_moving.render(moving_sprites, screen)\n board_player.render(hero_sprite, screen)\n\n hp_surface.fill((45, 160, 95))\n hp_surface.blit(hp_image, (0, 0))\n hp_text = hp_font.render(f\"{health}\", True, pygame.color.Color('red'))\n hp_text_x = 64\n hp_text_y = cell_width // 2 - hp_text.get_height() // 2\n hp_surface.blit(hp_text, (hp_text_x, hp_text_y))\n screen.blit(hp_surface, (0, n * cell_width))\n\n money_surface.fill((45, 160, 95))\n money_surface.blit(money_image, (0, 0))\n money_text = money_font.render(f\"{money}\", True, pygame.color.Color('yellow'))\n money_text_x = 64\n money_text_y = cell_width // 2 - money_text.get_height() // 2\n money_surface.blit(money_text, (money_text_x, money_text_y))\n screen.blit(money_surface, (n * cell_width // 3, n * cell_width))\n\n score_surface.fill((45, 160, 95))\n score_surface.blit(score_image, (0, 0))\n score_text = score_font.render(f\"{score[0]}\", True, pygame.color.Color('green'))\n score_text_x = 64\n score_text_y = cell_width // 2 - score_text.get_height() // 2\n score_surface.blit(score_text, (score_text_x, score_text_y))\n screen.blit(score_surface, (n * cell_width // 3 * 2, n * cell_width))\n\n clock.tick(fps)\n pygame.display.flip()\n counter += 1\n if end_counter >= 120:\n won = True\n break\n end_screen()\n","repo_name":"ShipilinVlad/pygame_game","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":30287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72177759625","text":"#!/usr/bin/env python3\n\nimport argparse\nfrom answer_set import AnswerSet\nfrom lti import OutcomeRequest\nimport sys\ndef json_err(msg): json.dumps({\"Ok\": False, \"Error\": msg})\n\ndef main():\n consumer = \"0c676cd363e7fdb18c3a855eec8ab180213af6c201733bc14ab50c4f37c74b29\"\n secret = \"0f41e00085b3f411ea5ebb4093c37b56f2bce67bd00bf4a69dffbef2c09bf244\"\n service_url = \"http://localhost:18000/courses/course-v1:remox+c1+t1/xblock/block-v1:remox+c1+t1+type@lti_consumer+block@13c61a21043e435698ec1303f884a69a/handler_noauth/outcome_service_handler\"\n sourcedid = \"course-v1%3Aremox%2Bc1%2Bt1:localhost%3A18000-13c61a21043e435698ec1303f884a69a:55fc413721b3acac97cdd98c831b70c4\"\n grade = \".4242424242\"\n\n \n outcome_request = OutcomeRequest({\n 'consumer_key': consumer,\n 'consumer_secret': secret,\n 'lis_outcome_service_url': service_url,\n 'lis_result_sourcedid': sourcedid,\n })\n \n outcome_response = outcome_request.post_replace_result(grade)\n print(outcome_response)\n\nif __name__ == \"__main__\":\n print(main())\n","repo_name":"jupyterhub-openedx-integration/lti-grader","sub_path":"test_send_grade.py","file_name":"test_send_grade.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10855628986","text":"import numpy\nnumpy.random.seed(123)\nfrom sklearn import linear_model\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.svm import SVR\nfrom sklearn.preprocessing import StandardScaler\nimport xgboost as xgb\nfrom sklearn import neighbors\nfrom sklearn.preprocessing import Normalizer\n\nfrom keras.models import Sequential\nfrom keras.models import Model as KerasModel\nfrom keras.layers import Input, Dense, Activation, Reshape\nfrom keras.layers import Concatenate\nfrom keras.layers.embeddings import Embedding\nfrom keras.callbacks import ModelCheckpoint\n\nimport pickle\n\n\ndef embed_features(X, saved_embeddings_fname):\n # f_embeddings = open(\"embeddings_shuffled.pickle\", \"rb\")\n f_embeddings = open(saved_embeddings_fname, \"rb\")\n embeddings = pickle.load(f_embeddings)\n\n index_embedding_mapping = {1: 0, 2: 1, 4: 2, 5: 3, 6: 4, 7: 5}\n X_embedded = []\n\n (num_records, num_features) = X.shape\n for record in X:\n embedded_features = []\n for i, feat in enumerate(record):\n feat = int(feat)\n if i not in index_embedding_mapping.keys():\n embedded_features += [feat]\n else:\n embedding_index = index_embedding_mapping[i]\n embedded_features += embeddings[embedding_index][feat].tolist()\n\n X_embedded.append(embedded_features)\n\n return numpy.array(X_embedded)\n\n\ndef split_features(X):\n X_list = []\n\n store_index = X[..., [1]]\n X_list.append(store_index)\n\n day_of_week = X[..., [2]]\n X_list.append(day_of_week)\n\n promo = X[..., [3]]\n X_list.append(promo)\n\n year = X[..., [4]]\n X_list.append(year)\n\n month = X[..., [5]]\n X_list.append(month)\n\n day = X[..., [6]]\n X_list.append(day)\n\n State = X[..., [7]]\n X_list.append(State)\n\n return X_list\n\n\nclass Model(object):\n\n def evaluate(self, X_val, y_val):\n assert(min(y_val) > 0)\n guessed_sales = self.guess(X_val)\n relative_err = numpy.absolute((y_val - guessed_sales) / y_val)\n result = numpy.sum(relative_err) / len(y_val)\n return result\n\n\nclass LinearModel(Model):\n\n def __init__(self, X_train, y_train, X_val, y_val):\n super().__init__()\n self.clf = linear_model.LinearRegression()\n self.clf.fit(X_train, numpy.log(y_train))\n print(\"Result on validation data: \", self.evaluate(X_val, y_val))\n\n def guess(self, feature):\n return numpy.exp(self.clf.predict(feature))\n\n\nclass RF(Model):\n\n def __init__(self, X_train, y_train, X_val, y_val):\n super().__init__()\n self.clf = RandomForestRegressor(n_estimators=200, verbose=True, max_depth=35, min_samples_split=2,\n min_samples_leaf=1)\n self.clf.fit(X_train, numpy.log(y_train))\n print(\"Result on validation data: \", self.evaluate(X_val, y_val))\n\n def guess(self, feature):\n return numpy.exp(self.clf.predict(feature))\n\n\nclass SVM(Model):\n\n def __init__(self, X_train, y_train, X_val, y_val):\n super().__init__()\n self.X_train = X_train\n self.y_train = y_train\n self.__normalize_data()\n self.clf = SVR(kernel='linear', degree=3, gamma='auto', coef0=0.0, tol=0.001,\n C=1.0, epsilon=0.1, shrinking=True, cache_size=200, verbose=False, max_iter=-1)\n\n self.clf.fit(self.X_train, numpy.log(self.y_train))\n print(\"Result on validation data: \", self.evaluate(X_val, y_val))\n\n def __normalize_data(self):\n self.scaler = StandardScaler()\n self.X_train = self.scaler.fit_transform(self.X_train)\n\n def guess(self, feature):\n return numpy.exp(self.clf.predict(feature))\n\n\nclass XGBoost(Model):\n\n def __init__(self, X_train, y_train, X_val, y_val):\n super().__init__()\n dtrain = xgb.DMatrix(X_train, label=numpy.log(y_train))\n evallist = [(dtrain, 'train')]\n param = {'nthread': -1,\n 'max_depth': 7,\n 'eta': 0.02,\n 'silent': 1,\n 'objective': 'reg:linear',\n 'colsample_bytree': 0.7,\n 'subsample': 0.7}\n num_round = 3000\n self.bst = xgb.train(param, dtrain, num_round, evallist)\n print(\"Result on validation data: \", self.evaluate(X_val, y_val))\n\n def guess(self, feature):\n dtest = xgb.DMatrix(feature)\n return numpy.exp(self.bst.predict(dtest))\n\n\nclass HistricalMedian(Model):\n\n def __init__(self, X_train, y_train, X_val, y_val):\n super().__init__()\n self.history = {}\n self.feature_index = [1, 2, 3, 4]\n for x, y in zip(X_train, y_train):\n key = tuple(x[self.feature_index])\n self.history.setdefault(key, []).append(y)\n print(\"Result on validation data: \", self.evaluate(X_val, y_val))\n\n def guess(self, features):\n features = numpy.array(features)\n features = features[:, self.feature_index]\n guessed_sales = [numpy.median(self.history[tuple(feature)]) for feature in features]\n return numpy.array(guessed_sales)\n\n\nclass KNN(Model):\n\n def __init__(self, X_train, y_train, X_val, y_val):\n super().__init__()\n self.normalizer = Normalizer()\n self.normalizer.fit(X_train)\n self.clf = neighbors.KNeighborsRegressor(n_neighbors=10, weights='distance', p=1)\n self.clf.fit(self.normalizer.transform(X_train), numpy.log(y_train))\n print(\"Result on validation data: \", self.evaluate(self.normalizer.transform(X_val), y_val))\n\n def guess(self, feature):\n return numpy.exp(self.clf.predict(self.normalizer.transform(feature)))\n\n\nclass NN_with_EntityEmbedding(Model):\n\n def __init__(self, X_train, y_train, X_val, y_val):\n super().__init__()\n self.epochs = 10\n self.checkpointer = ModelCheckpoint(filepath=\"best_model_weights.hdf5\", verbose=1, save_best_only=True)\n self.max_log_y = max(numpy.max(numpy.log(y_train)), numpy.max(numpy.log(y_val)))\n self.__build_keras_model()\n self.fit(X_train, y_train, X_val, y_val)\n\n def preprocessing(self, X):\n X_list = split_features(X)\n return X_list\n\n def __build_keras_model(self):\n input_store = Input(shape=(1,))\n output_store = Embedding(1115, 10, name='store_embedding')(input_store)\n output_store = Reshape(target_shape=(10,))(output_store)\n\n input_dow = Input(shape=(1,))\n output_dow = Embedding(7, 6, name='dow_embedding')(input_dow)\n output_dow = Reshape(target_shape=(6,))(output_dow)\n\n input_promo = Input(shape=(1,))\n output_promo = Dense(1)(input_promo)\n\n input_year = Input(shape=(1,))\n output_year = Embedding(3, 2, name='year_embedding')(input_year)\n output_year = Reshape(target_shape=(2,))(output_year)\n\n input_month = Input(shape=(1,))\n output_month = Embedding(12, 6, name='month_embedding')(input_month)\n output_month = Reshape(target_shape=(6,))(output_month)\n\n input_day = Input(shape=(1,))\n output_day = Embedding(31, 10, name='day_embedding')(input_day)\n output_day = Reshape(target_shape=(10,))(output_day)\n\n input_germanstate = Input(shape=(1,))\n output_germanstate = Embedding(12, 6, name='state_embedding')(input_germanstate)\n output_germanstate = Reshape(target_shape=(6,))(output_germanstate)\n\n input_model = [input_store, input_dow, input_promo,\n input_year, input_month, input_day, input_germanstate]\n\n output_embeddings = [output_store, output_dow, output_promo,\n output_year, output_month, output_day, output_germanstate]\n\n output_model = Concatenate()(output_embeddings)\n output_model = Dense(1000, kernel_initializer=\"uniform\")(output_model)\n output_model = Activation('relu')(output_model)\n output_model = Dense(500, kernel_initializer=\"uniform\")(output_model)\n output_model = Activation('relu')(output_model)\n output_model = Dense(1)(output_model)\n output_model = Activation('sigmoid')(output_model)\n\n self.model = KerasModel(inputs=input_model, outputs=output_model)\n\n self.model.compile(loss='mean_absolute_error', optimizer='adam')\n\n def _val_for_fit(self, val):\n val = numpy.log(val) / self.max_log_y\n return val\n\n def _val_for_pred(self, val):\n return numpy.exp(val * self.max_log_y)\n\n def fit(self, X_train, y_train, X_val, y_val):\n self.model.fit(self.preprocessing(X_train), self._val_for_fit(y_train),\n validation_data=(self.preprocessing(X_val), self._val_for_fit(y_val)),\n epochs=self.epochs, batch_size=128,\n # callbacks=[self.checkpointer],\n )\n # self.model.load_weights('best_model_weights.hdf5')\n print(\"Result on validation data: \", self.evaluate(X_val, y_val))\n\n def guess(self, features):\n features = self.preprocessing(features)\n result = self.model.predict(features).flatten()\n return self._val_for_pred(result)\n\n\nclass NN(Model):\n\n def __init__(self, X_train, y_train, X_val, y_val):\n super().__init__()\n self.epochs = 10\n self.checkpointer = ModelCheckpoint(filepath=\"best_model_weights.hdf5\", verbose=1, save_best_only=True)\n self.max_log_y = max(numpy.max(numpy.log(y_train)), numpy.max(numpy.log(y_val)))\n self.__build_keras_model()\n self.fit(X_train, y_train, X_val, y_val)\n\n def __build_keras_model(self):\n self.model = Sequential()\n self.model.add(Dense(1000, kernel_initializer=\"uniform\", input_dim=1183))\n self.model.add(Activation('relu'))\n self.model.add(Dense(500, kernel_initializer=\"uniform\"))\n self.model.add(Activation('relu'))\n self.model.add(Dense(1))\n self.model.add(Activation('sigmoid'))\n\n self.model.compile(loss='mean_absolute_error', optimizer='adam')\n\n def _val_for_fit(self, val):\n val = numpy.log(val) / self.max_log_y\n return val\n\n def _val_for_pred(self, val):\n return numpy.exp(val * self.max_log_y)\n\n def fit(self, X_train, y_train, X_val, y_val):\n self.model.fit(X_train, self._val_for_fit(y_train),\n validation_data=(X_val, self._val_for_fit(y_val)),\n epochs=self.epochs, batch_size=128,\n # callbacks=[self.checkpointer],\n )\n # self.model.load_weights('best_model_weights.hdf5')\n print(\"Result on validation data: \", self.evaluate(X_val, y_val))\n\n def guess(self, features):\n result = self.model.predict(features).flatten()\n return self._val_for_pred(result)\n","repo_name":"entron/entity-embedding-rossmann","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10757,"program_lang":"python","lang":"en","doc_type":"code","stars":860,"dataset":"github-code","pt":"81"} +{"seq_id":"3106121061","text":"\"\"\"\nhhpy.modelling.py\n~~~~~~~~~~~~~~~~~\n\nContains a model class that is based on pandas DataFrames and wraps around sklearn and other frameworks\nto provide convenient train test functions.\n\n\"\"\"\n\n# ---- imports\n# --- standard imports\nimport itertools\nimport warnings\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# --- third party imports\nfrom typing import Sequence, Mapping, Union, Callable, Optional, Any, Tuple, List\n\nfrom sklearn.exceptions import DataConversionWarning\n\n# ---- optional imports\ntry:\n from IPython.core.display import display\nexcept ImportError:\n display = print\n\n# --- local imports\nfrom hhpy.main import GROUPBY_DUMMY, export, BaseClass, is_list_like, assert_list, tprint, DocstringProcessor, \\\n SequenceOrScalar, DFOrArray, list_exclude, list_merge, list_intersection, silentcopy\nfrom hhpy.ds import docstr as docstr_ds, assert_df, k_split, df_score, drop_duplicate_cols, top_n, concat_cols\nfrom hhpy.plotting import ax_as_list, legend_outside, rcParams as hpt_rcParams, docstr as docstr_hpt\nfrom hhpy.ipython import display_df\n\n# ---- variables\n# --- validations\nvalidations = {\n 'Model__predict__return_type': ['y', 'df', 'DataFrame'],\n 'Models__predict__return_type': ['y', 'df', 'DataFrame', 'self'],\n 'Models__score__return_type': ['self', 'df', 'DataFrame'],\n 'Models__score__scores': ['r2', 'rmse', 'mae', 'stdae', 'medae', 'pae'],\n 'fit__fit_type': ['train_test', 'k_cross', 'final'],\n 'setattr__target': ['all', 'self', 'children', 'a', 's', 'c']\n}\n# --- docstr\ndocstr = DocstringProcessor(\n df='Pandas DataFrame containing the training and testing data. '\n 'Can be saved to the Model object or supplied on an as needed basis.',\n X_ref='List of features (predictors) used for training the model',\n y_ref='List of labels (targets) to be predicted',\n X='The feature (predictor) data used for training as DataFrame, np.array or column names',\n y='The label (target) data used for training as DataFrame, np.array or column names',\n X_test='The feature (predictor) data used for testing as DataFrame, np.array or column names',\n y_test='The label (target) data used for testing as DataFrame, np.array or column names',\n df_train='Pandas DataFrame containing the training data, optional if array like data is passed for X/y',\n df_test='Pandas DataFrame containing the testing data, optional if array like data is passed for X/y test',\n X_predict='The feature (predictor) data used for predicting as DataFrame, np.array or column names',\n y_predict='The label (target) data used for predicting as DataFrame, np.array or column names. Specifying y '\n 'is only necessary for convolutional or time-series type models [optional]',\n df_predict='Pandas DataFrame containing the predict data, optional if array like data is passed for X_predict',\n model_in='Any model object that implements .fit and .predict',\n name='Name of the model, used for naming columns [optional]',\n dropna='Whether to drop rows containing NA in the training data [optional]',\n scaler_X='Scalar object that implements .transform and .inverse_transform, applied to the features (predictors)'\n 'before training and inversely after predicting [optional]',\n scaler_y='Scalar object that implements .transform and .inverse_transform, applied to the labels (targets)'\n 'before training and inversely after predicting [optional]',\n do_print='Whether to print the steps to console [optional]',\n display_score='Whether to display the score DataFrame [optional]',\n ensemble='if True also predict with Ensemble like combinations of models. If True or mean calculate'\n 'mean of individual predictions. If median calculate median of individual predictions. [optional]',\n printf='print function to use for logging [optional]',\n groupby=docstr_ds.params['groupby'],\n split_groupby='Whether to train one model for each groupby level [optional]',\n groupby_levels='The levels to use for split groupby, if None will be inferred [optional]',\n key='key of attribute to set',\n value='value of attribute to set',\n multi='postfixes to use for multi output [deprecated]',\n **validations\n)\n\n\n# ---- classes\n# noinspection PyPep8Naming\n@docstr\n@export\nclass Model(BaseClass):\n \"\"\"\n A unified modeling class that is extended from sklearn, accepts any model that implements .fit and .predict\n\n :param model: %(model_in)s\n :param name: %(name)s\n :param X_ref: %(X_ref)s\n :param y_ref: %(y_ref)s\n :param groupby: %(groupby)s\n :param split_groupby: %(split_groupby)s\n :param groupby_levels: %(groupby_levels)s\n \"\"\"\n\n # --- globals\n __name__ = 'Model'\n __attributes__ = ['name', 'base_model', 'model', 'X_ref', 'y_ref', 'groupby', 'split_groupby', 'groupby_levels',\n 'is_fit']\n\n # --- functions\n def __init__(self, model: Any = None, name: str = 'pred', X_ref: SequenceOrScalar = None,\n y_ref: SequenceOrScalar = None, groupby: SequenceOrScalar = None, split_groupby: bool = None,\n groupby_levels: SequenceOrScalar = None) -> None:\n\n # -- assert\n if X_ref is not None:\n X_ref = assert_list(X_ref)\n if y_ref is not None:\n y_ref = assert_list(y_ref)\n if groupby is not None:\n groupby = assert_list(groupby)\n if groupby_levels is not None:\n groupby_levels = assert_list(groupby_levels)\n\n # -- assign\n self.name = name\n # set values\n if isinstance(model, str):\n model = eval(model)\n self.base_model = silentcopy(model)\n self.model = {}\n self.X_ref = X_ref\n self.y_ref = y_ref\n self.groupby = groupby\n self.split_groupby = split_groupby\n self.groupby_levels = groupby_levels\n\n # non set values\n self.is_fit = False\n self.reset()\n\n def reset(self) -> None:\n \"\"\"\n reset self to unfit state and delete copy of model\n\n :return: None\n \"\"\"\n\n self.is_fit = False\n self.model = {}\n\n @docstr\n def fit(self, X: Union[DFOrArray, SequenceOrScalar] = None, y: Union[DFOrArray, SequenceOrScalar] = None,\n df: pd.DataFrame = None, dropna: bool = True, X_test: Union[DFOrArray, SequenceOrScalar] = None,\n y_test: Union[DFOrArray, SequenceOrScalar] = None, df_test: pd.DataFrame = None,\n groupby: SequenceOrScalar = None, split_groupby: bool = None, k: int = 0, **kwargs) -> None:\n \"\"\"\n generalized fit method extending on model.fit\n\n :param X: %(X)s\n :param y: %(y)s\n :param df: %(df_train)s\n :param dropna: %(dropna)s\n :param X_test: %(X_test)s\n :param y_test: %(y_test)s\n :param df_test: %(df_test)s\n :param groupby: %(groupby)s\n :param split_groupby: %(split_groupby)s\n :param k: index of the model to fit [optional]\n :param kwargs: additional keyword arguments to pass to model.fit [optional]\n :return: None\n \"\"\"\n\n # -- assert\n # - groupby\n if groupby is None:\n groupby = self.groupby\n groupby = assert_list(groupby)\n self.groupby = groupby\n # - split groupby\n if split_groupby is None:\n split_groupby = self.split_groupby\n else:\n self.split_groupby = split_groupby\n # - k\n k = assert_list(k)\n if split_groupby and k != [0]:\n raise ValueError(\n \"k split is not supported for split_groupby==True\")\n # - check for groupby in kwargs\n\n # if X and y are passed separately: concat them to a DataFrame\n if df is None:\n\n _df_X = assert_df(X)\n _df_y = assert_df(y)\n # if both X and y are DataFrames we can use their column names\n if isinstance(X, pd.DataFrame) and isinstance(y, (pd.DataFrame, pd.Series)):\n df = pd.concat([_df_X, _df_y], axis=1)\n # if not then we override the default column names when concatting to avoid duplicates\n else:\n df = pd.concat(\n [_df_X, _df_y], ignore_index=True, sort=False, axis=1)\n # save column names to self\n self.X_ref = _df_X.columns\n self.y_ref = _df_y.columns\n # in this case groupby is not supported\n groupby = []\n # get X_ref and y_ref\n else:\n if self.X_ref is None:\n self.X_ref = assert_list(X)\n if self.y_ref is None:\n self.y_ref = assert_list(y)\n\n df = assert_df(df)\n if df_test is not None:\n df_test = assert_df(df_test)\n\n # handle dropna\n if dropna:\n _dropna_cols = list_intersection(\n df.columns, list_merge(self.X_ref, self.y_ref, groupby))\n df = df.dropna(subset=_dropna_cols)\n if df_test is not None:\n df_test = df_test.dropna(subset=_dropna_cols)\n\n # -- init\n if split_groupby:\n df['_groupby'] = concat_cols(df, groupby)\n if df_test is not None:\n df_test['_groupby'] = concat_cols(df_test, groupby)\n if self.groupby_levels is None:\n self.groupby_levels = top_n(df['_groupby'])\n _indices = self.groupby_levels + []\n else:\n _indices = k\n\n # -- fit\n # noinspection PyTypeChecker\n warnings.simplefilter('ignore', (FutureWarning, DataConversionWarning))\n # get model by index\n for _index in _indices:\n _model = silentcopy(self.base_model)\n _varnames = _model.fit.__code__.co_varnames\n _kwargs = kwargs.copy()\n\n # handle split groupby\n if split_groupby:\n # add _groupby column\n _df = df[lambda _: _['_groupby'] == _index]\n if df_test is None:\n _df_test = None\n else:\n _df_test = df_test[lambda _: _['_groupby'] == _index]\n # check if groupby variables in X_ref and clean\n for _X_ref in self.X_ref:\n for _groupby in groupby:\n if _groupby in _X_ref:\n self.X_ref = list_exclude(self.X_ref, _X_ref)\n else:\n _df = df\n _df_test = df_test\n\n # - X / y train\n _X = _df[self.X_ref]\n _y = _df[self.y_ref]\n\n # - X / y test\n if _df_test is None:\n _X_test = X_test\n _y_test = y_test\n else:\n _X_test = _df_test[self.X_ref]\n _y_test = _df_test[self.y_ref]\n\n # pass X / y test only if fit can handle it\n if 'groupby' not in _kwargs.keys() and 'groupby' in _varnames and groupby not in [None, []]:\n _X = pd.concat([_X, _df[groupby]], axis=1)\n if _X_test is not None:\n _X_test = pd.concat([_X_test, _df_test[groupby]], axis=1)\n _kwargs['groupby'] = groupby\n if 'X_test' not in _kwargs.keys() and 'X_test' in _varnames:\n _kwargs['X_test'] = _X_test\n if 'y_test' not in _kwargs.keys() and 'y_test' in _varnames:\n _kwargs['y_test'] = _y_test\n\n _model.fit(X=_X, y=_y, **kwargs)\n self.model[_index] = _model\n del _model\n\n # noinspection PyTypeChecker\n warnings.simplefilter(\n 'default', (FutureWarning, DataConversionWarning))\n self.is_fit = True\n\n @docstr\n def predict(self, X: Union[DFOrArray, SequenceOrScalar] = None, y: Union[DFOrArray, SequenceOrScalar] = None,\n df: pd.DataFrame = None, return_type: str = 'y', k_index: pd.Series = None,\n groupby: SequenceOrScalar = None, split_groupby: bool = None,\n handle_na: bool = True, multi: SequenceOrScalar = None, **kwargs\n ) -> Union[pd.Series, pd.DataFrame]:\n \"\"\"\n Generalized predict method based on model.predict\n\n :param X: %(X)s\n :param y: %(y)s\n :param df: %(df)s\n :param return_type: one of %(Model__predict__return_type)s, if 'y' returns a pandas Series / DataFrame\n with only the predictions, if one of 'df','DataFrame' returns the full DataFrame with predictions added\n :param k_index: If specified and model is k_cross split: return only the predictions for each test subset\n :param groupby: %(groupby)s\n :param split_groupby: %(split_groupby)s\n :param handle_na: Whether to handle NaN values (prediction will be NaN) [optional]\n :param multi: %(multi)s\n :param kwargs: additional keyword arguments passed to child model's predict [optional]\n :return: see return_type\n \"\"\"\n\n # -- assert\n # - model is fit\n if not self.is_fit:\n raise ValueError('Model is not fit yet')\n # - valid return type\n if return_type not in validations['Model__predict__return_type']:\n raise ValueError(\n f\"return type must be one of {validations['Model__predict__return_type']}\")\n # - df\n if df is not None:\n df = assert_df(df)\n else:\n df = pd.concat([assert_df(X), assert_df(y)], axis=1)\n # - groupby\n if groupby in [None, []]:\n groupby = self.groupby\n groupby = assert_list(groupby)\n # - split_groupby\n if split_groupby is None:\n split_groupby = self.split_groupby\n if split_groupby is not None:\n df['_groupby'] = concat_cols(df, groupby)\n # backwards compatibility\n _ = k_index\n _ = multi\n\n _y_ref_pred = [f\"{_}_{self.name}\" for _ in self.y_ref]\n\n # - predict using sklearn api\n _df_out = []\n\n # loop model dictionary\n for _k, (_key, _model) in enumerate(self.model.items()):\n\n # init\n _kwargs = kwargs.copy()\n\n # get _df and handle split_groupby\n if split_groupby:\n _df: pd.DataFrame = df[lambda _: _['_groupby'] == _key]\n else:\n _df = df.copy()\n _df['_k_model'] = _k\n\n # - handle kwargs\n _varnames = _model.predict.__code__.co_varnames\n # groupby\n if 'groupby' not in _kwargs.keys() and 'groupby' in _varnames:\n _X = _df[self.X_ref + groupby] # .dropna()\n _kwargs['groupby'] = groupby\n else:\n _X = _df[self.X_ref]\n # noinspection PyUnresolvedReferences\n _na_indices = _X[_X.isna().any(axis=1)].index.tolist()\n # y\n if 'y' not in _kwargs.keys() and 'y' in _varnames:\n _y = _df[self.y_ref]\n # noinspection PyUnresolvedReferences\n _na_indices = list_merge(\n _na_indices, _X[_X.isna().any(axis=1)].index.tolist())\n if handle_na: # drop na\n _y = _y.drop(_na_indices)\n _kwargs['y'] = _y\n if handle_na: # drop na (has to be here so y na indices have been handled)\n _X = _X.drop(_na_indices)\n # - predict\n try:\n _y_pred = _model.predict(_X, **_kwargs)\n except TypeError:\n # sometimes the model accepts y as keyword argument but cannot actually handle it\n # I'm looking at you sklearn.multioutput\n kwargs.pop('y')\n _y_pred = _model.predict(_X, **_kwargs)\n\n # cast to pandas\n if len(_y_pred) != len(_X):\n raise ValueError(\n f\"The lengths of y_pred ({len(_y_pred)}) and X ({len(_X)}) do not match\")\n _y_pred = pd.DataFrame(_y_pred, index=_X.index)\n _y_pred.columns = _y_ref_pred\n # bring back dropped nans\n if handle_na:\n _y_pred = pd.concat([_y_pred, pd.DataFrame(np.nan, index=_na_indices, columns=_y_pred.columns)])\\\n .sort_index()\n\n # feed back to df\n for _y_pred_i in _y_pred.columns:\n _df[_y_pred_i] = _y_pred[_y_pred_i]\n _df_out.append(_df)\n\n # # for k stuff (potentially unneeded as of 2020-07-28, to be verified)\n # # get y lab\n # if _ks == 1:\n # _y_lab = _y_ref_pred\n # _y_labs = [_y_ref_pred]\n # else:\n # if k_index is None:\n # _y_lab = [f\"{_}_{_k}\" for _ in assert_list(_y_ref_pred)]\n # _y_labs += _y_lab\n # else:\n # _y_labs = [_y_ref_pred]\n # if len(_y_labs) < _y_pred.shape[1]: # some regressors (TimeSeriesRegressors)\n # # might return multi output\n # _y_labs_new = []\n # if multi is None:\n # multi = _y_pred.shape[1] // len(_y_labs)\n # for _y_lab in _y_labs:\n # for _it in range(multi):\n # _y_labs_new.append(f\"{_y_lab}_{_it}\")\n # _y_labs = _y_labs_new + []\n #\n # if k_index is None: # all\n # for _it, _y_lab in enumerate(_y_labs):\n # _df[_y_lab] = _y_pred[_y_pred.columns[_it]]\n # else:\n # # during first iteration: assert output columns are available\n # if _k == 0:\n # for __y_ref_pred in assert_list(_y_ref_pred):\n # if __y_ref_pred not in _df.columns:\n # _df[__y_ref_pred] = np.nan\n # # write output\n # # we loop the columns because _y is possible to be multi output or single output while _y_pred is a DF\n # # and np where does weird stuff if you pass a single column on the left and a DF of the right of ==\n # for _col, __y_ref_pred in enumerate(assert_list(_y_ref_pred)):\n # _df[__y_ref_pred] = np.where(k_index == _k, _y_pred[_y_pred.columns[_col]], _df[__y_ref_pred])\n # concat\n _df_out = pd.concat(_df_out)\n\n # special case if there is only one target (most algorithms support only one)\n if len(self.y_ref) == 1:\n _y_ref_pred = _y_ref_pred[0]\n\n if return_type == 'y':\n return _df_out[_y_ref_pred]\n else:\n return _df_out\n\n @docstr\n def setattr(self, key: str, value: Any, target: str = 'all') -> None:\n \"\"\"\n Set attribute on self and or all k instances of one's model\n\n :param key: %(key)s\n :param value: %(value)s\n :param target: One of %(setattr__target)s, for 'self' sets only on self, for 'children' sets for children and\n for 'all' tries to set on both\n :return: None\n \"\"\"\n\n # -- assert\n if target not in validations['setattr__target']:\n raise ValueError(\n f\"target must be one of {validations['setattr__target']}\")\n\n # -- main\n # - self\n if target in ['all', 'a', 'self', 's'] and hasattr(self, key):\n setattr(self, key, value)\n\n # - children\n if target in ['all', 'a', 'children', 'c']:\n for _model in self.model:\n if hasattr(_model, key):\n setattr(_model, key, value)\n\n\n# noinspection PyPep8Naming\n@docstr\n@export\nclass Models(BaseClass):\n \"\"\"\n Collection of Models that allow for fitting and predicting with multiple Models at once,\n comparing accuracy and creating Ensembles\n\n :param args: multiple Model objects that will form a Models Collection\n :param name: name of the collection\n :param df: %(df)s\n :param X_ref: %(X_ref)s\n :param y_ref: %(y_ref)s\n :param groupby: %(groupby)s\n :param split_groupby: %(split_groupby)s\n :param groupby_levels: %(groupby_levels)s\n :param scaler_X: %(scaler_X)s\n :param scaler_y: %(scaler_y)s\n :param printf: %(printf)s\n \"\"\"\n\n # --- globals\n __name__ = 'Models'\n __attributes__ = ['models', 'fit_type', 'df', 'X_ref', 'y_ref', 'y_pred', 'groupby', 'split_groupby',\n 'groupby_levels', 'scaler_X', 'scaler_y', 'model_names', 'df_score', 'k_tests', 'printf']\n __dependent_classes__ = [Model]\n\n # --- functions\n def __init__(self, *args: Any, df: pd.DataFrame = None, X_ref: SequenceOrScalar = None,\n y_ref: SequenceOrScalar = None, groupby: SequenceOrScalar = None, split_groupby: bool = False,\n groupby_levels: SequenceOrScalar = None, scaler_X: Any = None, scaler_y: Any = None,\n printf: Callable = tprint) -> None:\n\n # -- assert\n if isinstance(scaler_X, type):\n scaler_X = scaler_X()\n if isinstance(scaler_y, type):\n scaler_y = scaler_y()\n if X_ref is not None:\n X_ref = assert_list(X_ref)\n if y_ref is not None:\n y_ref = assert_list(y_ref)\n if groupby is not None:\n groupby = assert_list(groupby)\n if groupby_levels is not None:\n groupby_levels = assert_list(groupby_levels)\n\n # -- init\n _models = []\n _model_names = []\n # ensure hhpy.modelling.Model\n _it = -1\n if len(args) == 0:\n warnings.warn('No Models passed')\n for _arg in args:\n for _model in assert_list(_arg):\n\n _it += 1\n\n if not isinstance(_model, Model):\n if hasattr(_model, 'name'):\n _name = _model.name\n elif hasattr(_model, '__name__'):\n _name = f\"{_model.__name__}_{_it}\"\n else:\n _name = f\"model_{_it}\"\n _model = Model(_model, name=_name, X_ref=X_ref, y_ref=y_ref, groupby=groupby,\n split_groupby=split_groupby, groupby_levels=groupby_levels)\n _models.append(silentcopy(_model))\n if _model.name not in _model_names:\n _model_names.append(_model.name)\n\n # -- assign\n self.models = _models\n self.fit_type = None\n if df is not None:\n self.df = df.copy()\n else:\n self.df = None\n self.y_ref = y_ref\n if X_ref is not None:\n self.X_ref = X_ref\n elif df is not None:\n # default to all columns not in y_ref / groupby\n self.X_ref = [_ for _ in df.columns if _ not in y_ref + groupby]\n else:\n self.X_ref = []\n self.groupby = groupby\n self.split_groupby = split_groupby\n self.groupby_levels = groupby_levels\n self.scaler_X = scaler_X\n self.scaler_y = scaler_y\n self.model_names = _model_names\n self.printf = printf\n\n # - not given attributes\n self.df_score = None\n self.k_tests = None\n self.y_pred = []\n self.multi = None\n\n @property\n def df_test(self) -> Union[None, pd.DataFrame, List[pd.DataFrame]]:\n # None case\n if self.df is None or self.fit_type is None:\n return None\n # train-test case\n if self.fit_type == 'train_test':\n return self.df[lambda _: _['_k_index'] == self.k_tests[0]]\n # k-cross case\n _df_tests = []\n for _k_test in self.k_tests:\n _df_tests.append(self.df[lambda _: _['_k_index'] == _k_test])\n return _df_tests\n\n def reset(self) -> None:\n \"\"\"\n reset all child models to an unfit state\n\n :return: None\n \"\"\"\n\n # - loop child models and call reset\n for _model in self.models:\n _model.reset()\n # - reset df score\n self.df_score = None\n # - reset k tests\n self.k_tests = None\n # - drop predictions from df\n for _y_pred in list_intersection(self.y_pred, assert_list(self.df.columns)):\n self.df = self.df.drop(_y_pred, axis=1)\n\n @docstr\n def k_split(self, **kwargs):\n \"\"\"\n apply hhpy.ds.k_split to self to create train-test or k-cross ready data\n\n :param kwargs: keyword arguments passed to :func:`~hhpy.ds.k_split`\n :return: None\n \"\"\"\n\n # -- init\n if self.df is None:\n raise ValueError('You must specify a df')\n\n # -- split\n _k_index = k_split(df=self.df, return_type='s', **kwargs)\n self.df['_k_index'] = _k_index\n\n @docstr\n def model_by_name(self, name: Union[list, str]) -> Union[Model, list]:\n \"\"\"\n extract a list of Models from the collection by their names\n\n :param name: name of the Model\n :return: list of Models\n \"\"\"\n\n _models = []\n for _model in self.models:\n if _model.name in assert_list(name):\n if not is_list_like(name):\n return _model\n else:\n _models.append(_model)\n return _models\n\n @docstr\n def fit(self, fit_type: str = 'train_test', k_test: Optional[int] = 0, groupby: SequenceOrScalar = None,\n do_print: bool = True, **kwargs):\n \"\"\"\n fit all Model objects in collection\n\n :param fit_type: one of %(fit__fit_type)s\n :param k_test: which k_index to use as test data\n :param groupby: %(groupby)s\n :param do_print: %(do_print)s\n :param kwargs: Other keyword arguments passed to :func:`~Model.fit`\n :return: None\n \"\"\"\n\n # -- assert\n if fit_type not in validations['fit__fit_type']:\n raise ValueError(\n f\"fit type must be one of {validations['fit__fit_type']}\")\n\n # -- init\n if groupby is None:\n groupby = self.groupby\n # get df\n _df = self.df.copy()\n # groupby and k index\n # -- apply scaler to X and y separately\n warnings.simplefilter('ignore', DataConversionWarning)\n # 0 column features cannot be scaled\n if self.scaler_X is not None and _df[self.X_ref].shape[1] > 0:\n self.scaler_X = self.scaler_X.fit(_df[self.X_ref])\n _df[self.X_ref] = self.scaler_X.transform(_df[self.X_ref])\n else:\n self.scaler_X = None\n if self.scaler_y is not None:\n self.scaler_y = self.scaler_y.fit(_df[self.y_ref])\n _df[self.y_ref] = self.scaler_y.transform(_df[self.y_ref])\n warnings.simplefilter('default', DataConversionWarning)\n # split\n if fit_type == 'train_test':\n _k_tests = [k_test]\n # self.df['_test'] = self.df['_k_index'] == k_test\n elif fit_type == 'k_cross':\n _k_tests = sorted(_df['_k_index'].unique())\n else: # fit_type == 'final'\n _k_tests = [-1]\n # get df train and df test\n for _k_test in _k_tests:\n\n _df_train = _df[lambda _: _['_k_index'] != _k_test]\n _df_test = _df[lambda _: _['_k_index'] == _k_test]\n\n for _model in self.models:\n\n if do_print:\n self.printf('fitting model {}...'.format(_model.name))\n\n _model.fit(X=self.X_ref, y=self.y_ref, df=_df_train, df_test=_df_test, groupby=groupby,\n k=_k_test, **kwargs)\n\n self.fit_type = fit_type\n self.k_tests = _k_tests\n\n @docstr\n def predict(\n self, X: Union[DFOrArray, SequenceOrScalar] = None, y: Union[DFOrArray, SequenceOrScalar] = None,\n df: pd.DataFrame = None, return_type: str = 'self', ensemble: bool = False, k_predict_type: str = 'test',\n groupby: SequenceOrScalar = None, multi: SequenceOrScalar = None, do_print: bool = True, **kwargs\n ) -> Optional[Union[pd.Series, pd.DataFrame]]:\n \"\"\"\n predict with all models in collection\n\n :param X: %(X_predict)s\n :param y: %(y_predict)s\n :param df: %(df_predict)s\n :param return_type: one of %(Models__predict__return_type)s\n :param ensemble: %(ensemble)s\n :param k_predict_type: 'test' or 'all'\n :param groupby: %(groupby)s\n :param multi: %(multi)s\n :param do_print: %(do_print)s\n :param kwargs: other keyword arguments passed to Model.predict [optional]\n :return: if return_type is self: None, else see Model.predict\n \"\"\"\n\n # -- assert\n # backwards compatibility\n _ = multi\n # return_type\n _valid_return_types = ['self', 'df', 'df_full', 'DataFrame']\n assert (\n return_type in _valid_return_types), f\"return_type must be one of {_valid_return_types}\"\n # fit_type\n if not self.fit_type:\n raise ValueError('Model is not fit yet')\n # X / y\n if df is None:\n if isinstance(X, pd.DataFrame):\n if isinstance(y, pd.DataFrame):\n df = pd.concat([X, y], axis=1)\n else:\n df = X\n else:\n df = self.df.copy()\n\n # - handle scaler transformation\n warnings.simplefilter('ignore', DataConversionWarning)\n if self.scaler_X is not None:\n df[self.X_ref] = self.scaler_X.transform(df[self.X_ref])\n if self.scaler_y is not None:\n df[self.y_ref] = self.scaler_y.transform(df[self.y_ref])\n\n _y_preds = []\n _y_ref_preds = []\n _model_names = []\n\n for _model in self.models:\n\n if do_print:\n self.printf('predicting model {}...'.format(_model.name))\n\n _model_names.append(_model.name)\n\n # get config for k_cross\n _k_index = None\n if self.fit_type == 'k_cross':\n # k_cross_type test: return only test prediction for each k\n if k_predict_type == 'test':\n _k_index = self.df['_k_index']\n _y_ref_preds += [f\"{_}_{_model.name}\" for _ in _model.y_ref]\n # k_cross_type all: return all predictions\n else:\n for _k in range(len(_model.model)):\n _y_ref_preds += [f\"{_}_{_model.name}_{_k}\" for _ in _model.y_ref]\n else:\n _y_ref_preds += [f\"{_}_{_model.name}\" for _ in _model.y_ref]\n\n _y_pred_scaled = _model.predict(\n df=df, groupby=groupby, k_index=_k_index, **kwargs)\n\n # - handle scaler inverse transformation\n if self.scaler_y is None:\n _y_pred = silentcopy(_y_pred_scaled)\n else:\n _df_y_pred_scaled = pd.DataFrame(_y_pred_scaled)\n _df_y_pred_scaled.columns = _model.y_ref\n # if the model is predicting only a subset of ys we need to reshape before we scale\n if _model.y_ref != self.y_ref:\n for _col in self.y_ref:\n if _col not in _model.y_ref:\n _df_y_pred_scaled[_col] = np.nan\n # ensure correct order\n _df_y_pred_scaled = _df_y_pred_scaled[self.y_ref]\n\n _df_y_pred = self.scaler_y.inverse_transform(_df_y_pred_scaled)\n _df_y_pred = pd.DataFrame(_df_y_pred)\n _df_y_pred.columns = self.y_ref\n _y_pred = _df_y_pred[_model.y_ref]\n _y_preds.append(_y_pred)\n warnings.simplefilter('default', DataConversionWarning)\n\n _df = pd.concat(_y_preds, axis=1, sort=False)\n\n # potentially unneeded as of 2017-07-28\n # # feed back to df\n # if len(_y_ref_preds) < _df.shape[1]: # some regressors (TimeSeriesRegressors)\n # # might return multi output\n # _y_ref_preds_new = []\n # if multi is None:\n # multi = range(_df.shape[1] // len(_y_ref_preds))\n # for _y_ref_pred in _y_ref_preds:\n # for _multi in multi:\n # _y_ref_preds_new.append(f\"{_y_ref_pred}_{_multi}\")\n # _y_ref_preds = _y_ref_preds_new + []\n # else:\n # multi = None\n #\n # # save\n # self.multi = multi\n\n # adjust column names\n _df.columns = _y_ref_preds\n\n # adjust index\n _df.index = df.index\n\n # supports up to 3 model ensembles (does not support multi regression)\n if ensemble:\n # drop duplicates\n _model_names = list(set(_model_names))\n # get all combinations\n for _comb in list(itertools.combinations(_model_names, 2)) + list(itertools.combinations(_model_names, 3)):\n for _y_ref in self.y_ref:\n _comb_name = '_'.join(_comb)\n _y_comb_name = '{}_{}'.format(_y_ref, _comb_name)\n _cols = []\n for _element in _comb:\n _cols.append('{}_{}'.format(_y_ref, _element))\n _df[_y_comb_name] = _df[_cols].mean(axis=1)\n if _comb_name not in self.model_names:\n self.model_names.append(_comb_name)\n\n if return_type == 'self':\n for _col in _df.columns:\n self.df[_col] = _df[_col]\n if _col not in self.y_pred:\n self.y_pred.append(_col)\n elif return_type in ['df', 'DataFrame']:\n return _df\n elif return_type in ['df_full']:\n for _col in list_intersection(df.columns, _df.columns):\n df = df.drop(_col, axis=1)\n return pd.concat([df, _df], axis=1)\n\n @docstr\n def score(\n self, return_type: str = 'self', pivot: bool = False, groupby: SequenceOrScalar = None,\n do_print: bool = True, display_score: bool = True, display_format: str = ',.3f', **kwargs\n ) -> Optional[pd.DataFrame]:\n \"\"\"\n calculate score of the Model predictions\n\n :param return_type: one of %(Models__score__return_type)s\n :param pivot: whether to pivot the DataFrame for easier readability [optional]\n :param do_print: %(do_print)s\n :param display_score: %(display_score)s\n :param display_format: Format to use when displaying the score DataFrame [optional]\n :param groupby: %(groupby)s\n :param kwargs: other keyword arguments passed to :func:`~hhpy.ds.df_score`\n :return: if return_type is 'self': None, else: pandas DataFrame containing the scores\n \"\"\"\n\n # -- assert\n _valid_return_types = ['self', 'df', 'df_full', 'DataFrame']\n assert (return_type in _valid_return_types),\\\n f\"return_type must be one of {_valid_return_types}\"\n groupby = assert_list(groupby)\n _df = self.df.copy()\n\n # -- init\n if do_print:\n self.printf('scoring...')\n\n if self.fit_type is None:\n raise ValueError('Model is not fit yet')\n elif self.fit_type == 'k_cross':\n groupby.append('_k_index')\n elif self.fit_type == 'train_test':\n _df = _df[lambda _: _['_k_index'].isin(self.k_tests)]\n\n _df_score = df_score(df=_df, y_true=self.y_ref, pred_suffix=self.model_names, pivot=pivot, groupby=groupby,\n multi=self.multi, **kwargs)\n\n if display_score:\n if pivot:\n _display_score = _df_score\n else:\n _display_score = _df_score.pivot_table(\n index=['y_true', 'y_pred'], columns='score', values='value')\n display_df(_display_score, float_format=display_format)\n\n if return_type == 'self':\n self.df_score = _df_score\n else:\n return _df_score\n\n @docstr\n def train(\n self, df: pd.DataFrame = None, k: int = 5, groupby: Union[Sequence, str] = None,\n sortby: Union[Sequence, str] = None, random_state: int = None, fit_type: str = 'train_test',\n k_test: Optional[int] = 0, ensemble: bool = False, scores: Union[Sequence, str, Callable] = None,\n multi: SequenceOrScalar = None,\n scale: float = None, do_predict: bool = True, do_score: bool = True, do_split: bool = True,\n do_fit: bool = True, do_print: bool = True, display_score: bool = True, kws_fit: dict = None,\n kws_pred: dict = None, kws_score: dict = None\n ) -> None:\n \"\"\"\n wrapper method that combined k_split, train, predict and score\n\n :param df: %(df)s\n :param k: see hhpy.ds.k_split see hhpy.ds.k_split\n :param groupby: %(groupby)s\n :param sortby: see hhpy.ds.k_split\n :param random_state: see hhpy.ds.k_split\n :param fit_type: see .fit\n :param k_test: see .fit\n :param ensemble: %(ensemble)s\n :param scores: see .score [optional]\n :param multi: %(multi)s\n :param scale: see .score\n :param do_print: %(do_print)s\n :param display_score: %(display_score)s\n :param do_split: whether to apply k_split [optional]\n :param do_fit: whether to fit the Models [optional]\n :param do_predict: whether to add predictions to DataFrame [optional]\n :param do_score: whether to create self.df_score [optional]\n :param kws_fit: additional keyword arguments passed to fit [optional]\n :param kws_pred: additional keyword arguments passed to pred [optional]\n :param kws_score: additional keyword arguments passed to score [optional]\n :return: None\n \"\"\"\n # -- defaults\n if df is not None:\n self.df = df\n if kws_fit is None:\n kws_fit = {}\n if kws_pred is None:\n kws_pred = {}\n if kws_score is None:\n kws_score = {}\n\n # -- main\n if do_split:\n self.k_split(k=k, groupby=groupby, sortby=sortby,\n random_state=random_state, do_print=do_print)\n if do_fit:\n self.fit(fit_type=fit_type, k_test=k_test,\n do_print=do_print, groupby=groupby, **kws_fit)\n if do_predict:\n self.predict(ensemble=ensemble, do_print=do_print,\n groupby=groupby, multi=multi, **kws_pred)\n if do_score:\n self.score(scores=scores, scale=scale, do_print=do_print,\n display_score=display_score, **kws_score)\n if do_print:\n self.printf('training done')\n\n @docstr_hpt\n def scoreplot(\n self, x='y_ref', y='value', hue='model', hue_order=None, row='score', row_order=None,\n palette=None, width=16, height=9 / 2, scale=None, query=None, return_fig_ax=False,\n **kwargs\n ) -> Optional[tuple]:\n \"\"\"\n plot the score(s) using sns.barplot\n\n :param x: %(x)s\n :param y: %(y)s\n :param hue: %(hue)s\n :param hue_order: %(order)s\n :param row: the variable to wrap around the rows [optional]\n :param row_order: %(order)s\n :param palette: %(palette)s\n :param width: %(subplot_width)s\n :param height: %(subplot_height)s\n :param scale: scale the values [optional\n :param query: query to be passed to pd.DataFrame.query before plotting [optional]\n :param return_fig_ax: %(return_fig_ax)s\n :param kwargs: other keyword arguments passed to sns.barplot\n :return: see return_fig_ax\n \"\"\"\n\n # -- init\n if row_order is None:\n row_order = ['r2', 'rmse', 'mae', 'stdae', 'medae', 'pae']\n if hue_order is None:\n hue_order = self.model_names\n if palette is None:\n palette = hpt_rcParams['palette']\n _row_order = assert_list(row_order)\n\n fig, ax = plt.subplots(nrows=len(_row_order), figsize=(\n width, height * len(_row_order)))\n _ax_list = ax_as_list(ax)\n\n _df_score = self.score(return_type='df', scale=scale,\n do_print=False, display_score=False)\n if query is not None:\n _df_score = _df_score.query(query)\n\n _row = -1\n for _row_value in _row_order:\n _row += 1\n _ax = _ax_list[_row]\n sns.barplot(x=x, y=y, hue=hue, hue_order=hue_order, data=_df_score.query('{}==@_row_value'.format(row)),\n ax=_ax, palette=palette, **kwargs)\n _y_label = _row_value\n if scale:\n _y_label += '_scale={}'.format(scale)\n _ax.set_ylabel(_y_label)\n _ax.set_xlabel('')\n\n legend_outside(ax)\n\n if return_fig_ax:\n return fig, ax\n else:\n plt.show(fig)\n\n @docstr\n def setattr(self, key: str, value: Any, target: str = 'all', child_target: str = 'all') -> None:\n \"\"\"\n Set attribute on self and or all child models\n\n :param key: %(key)s\n :param value: %(value)s\n :param target: One of %(setattr__target)s, for 'self' sets only on self, for 'children' sets for children and\n for 'all' tries to set on both\n :param child_target: Same as target but passed to :func:`~Model.setattr`\n :return: None\n \"\"\"\n\n # -- assert\n if target not in validations['setattr__target']:\n raise ValueError(\n f\"target must be one of {validations['setattr__target']}\")\n\n # -- main\n # - self\n if target in ['all', 'a', 'self', 's'] and hasattr(self, key):\n setattr(self, key, value)\n\n # - children\n if target in ['all', 'a', 'children', 'c']:\n for _model in self.models:\n _model.setattr(key, value, target=child_target)\n\n\n# ---- functions\n@export\ndef assert_array(a: Any, return_name: bool = False, name_default: str = 'name') -> Union[Tuple[np.ndarray, str],\n np.ndarray]:\n \"\"\"\n Take any python object and turn it into a 2d numpy array (if possible). Useful for training neural networks.\n\n :param a: any python object\n :param return_name: Whether the name should be returned\n :param name_default: The name to fall back to if the object has no name attribute.\n :return: numpy array, if return_name: Tuple [numpy.array, name]\n \"\"\"\n _name = name_default\n if return_name:\n if isinstance(a, pd.DataFrame):\n _name = ';'.join(a.columns)\n elif hasattr(a, 'name'):\n _name = a.name\n # assert array\n a = np.array(a).copy()\n if len(a.shape) == 1:\n a = a.reshape(-1, 1)\n if return_name:\n return a, _name\n else:\n return a\n\n\n@export\ndef dict_to_model(dic: Mapping) -> Model:\n \"\"\"\n restore a Model object from a dictionary\n\n :param dic: dictionary containing the model definition\n :return: Model\n \"\"\"\n # init empty model\n if dic['__name__'] == 'Model':\n _model = Model()\n else:\n raise ValueError('__name__ not recognized')\n # reconstruct from dict\n _model.from_dict(dic)\n\n return _model\n\n\n@export\ndef assert_model(model: Any) -> Model:\n \"\"\"\n takes any Model, model object or dictionary and converts to Model\n\n :param model: Mapping or object containing a model\n :return: Model\n \"\"\"\n\n if not isinstance(model, Mapping):\n if not isinstance(model, Model):\n return Model(model)\n return model\n\n if '__name__' in model.keys():\n if model['__name__'] in ['Model']:\n _model = dict_to_model(model)\n else:\n raise ValueError('__name__ not recognized')\n else:\n raise KeyError('no element named __name__')\n\n return _model\n\n\ndef force_model(*args, **kwargs):\n warnings.warn(\n 'force_model is deprecated, please use assert_model instead', DeprecationWarning)\n return assert_model(*args, **kwargs)\n\n\n@export\ndef get_coefs(model: Any, y: SequenceOrScalar):\n \"\"\"\n get coefficients of a linear regression in a sorted data frame\n\n :param model: model object\n :param y: name of the coefficients\n :return: pandas DataFrame containing the coefficient names and values\n \"\"\"\n\n if isinstance(model, Model):\n _model = model.model\n else:\n _model = model\n\n assert (hasattr(_model, 'coef_')\n ), 'Attribute coef_ not available, did you specify a linear model?'\n\n _coef = _model.coef_.tolist()\n _coef.append(model.intercept_)\n\n _df = pd.DataFrame()\n _df['feature'] = assert_list(y) + ['intercept']\n _df['coef'] = _coef\n _df = _df.sort_values(['coef'], ascending=False).reset_index(drop=True)\n\n return _df\n\n\n@export\ndef get_feature_importance(\n model: object, predictors: Union[Sequence, str], features_to_sum: Mapping = None\n) -> pd.DataFrame:\n \"\"\"\n get feature importance of a decision tree like model in a sorted data frame\n\n :param model: model object\n :param predictors: names of the predictors properly sorted\n :param features_to_sum: if you want to sum features please provide name mappings\n :return: pandas DataFrame containing the feature importances\n \"\"\"\n\n def _get_feature_importance_rf(__model, __predictors, __features_to_sum: Mapping = None):\n\n # init df for feature importances\n ___df = pd.DataFrame({\n 'feature': __predictors,\n 'importance': __model.feature_importances_\n })\n\n # if applicable: sum features\n if __features_to_sum is not None:\n for _key in list(__features_to_sum.keys()):\n ___df['feature'] = np.where(___df['feature'].isin(\n __features_to_sum[_key]), _key, ___df['feature'])\n ___df = ___df.groupby('feature').sum().reset_index()\n\n ___df = ___df.sort_values(\n ['importance'], ascending=False).reset_index(drop=True)\n ___df['importance'] = np.round(___df['importance'], 5)\n\n return ___df\n\n # get feature importance of a decision tree like model in a sorted data frame\n def _get_feature_importance_xgb(_f_model, _f_predictors, _f_features_to_sum=None):\n # get f_score\n _f_score = _f_model.get_booster().get_fscore()\n\n # init df to return\n __df = pd.DataFrame()\n\n # get predictor code\n __df['feature_code'] = list(_f_score.keys())\n __df['feature_code'] = __df['feature_code'].str[1:].astype(int)\n\n # code importance\n __df['importance_abs'] = list(_f_score.values())\n __df['importance'] = __df['importance_abs'] / \\\n __df['importance_abs'].sum()\n\n __df = __df.sort_values(['feature_code']).reset_index(drop=True)\n\n __df['feature'] = [_f_predictors[x]\n for x in range(len(_f_predictors)) if x in __df['feature_code']]\n\n if _f_features_to_sum is not None:\n\n for _key in list(_f_features_to_sum.keys()):\n __df['feature'] = np.where(__df['feature'].isin(\n _f_features_to_sum[_key]), _key, __df['feature'])\n __df = __df.groupby('feature').sum().reset_index()\n\n __df = __df.sort_values(\n ['importance'], ascending=False).reset_index(drop=True)\n\n __df['importance'] = np.round(__df['importance'], 5)\n\n __df = __df[['feature', 'importance']]\n\n return __df\n\n # -- main\n try:\n # noinspection PyTypeChecker\n _df = _get_feature_importance_rf(model, predictors, features_to_sum)\n # this is supposed to also work for XGBoost but it was broken in a recent release\n # so below serves as fallback\n except ValueError:\n # noinspection PyTypeChecker\n _df = _get_feature_importance_xgb(model, predictors, features_to_sum)\n\n return _df\n\n\n@export\ndef to_keras_3d(x: DFOrArray, window: int, y: DFOrArray = None, groupby: SequenceOrScalar = None,\n groupby_to_dummy: bool = False, dropna: bool = True, reshape: bool = True)\\\n -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:\n \"\"\"\n reformat a DataFrame / 2D array to become a keras compatible 3D array. If dropna is True the first window\n observations get dropped since they will contain NaN values in the required shifted elements.\n\n :param x: numpy array or DataFrame\n :param window: series-window, how many iterations to convolve\n :param y: accompanying target / label DataFrame or numpy 2d array. If specified a modified version of y will be\n returned to match x's shape where the first window elements have been dropped. [optional]\n :param groupby: column to group by (shift observations in each group) [optional]\n :param groupby_to_dummy: Whether to include the groupby value as pandas Dummy [optional]\n :param dropna: Whether to drop na rows [optional]\n :param reshape: Whether to reshape to keras format observations - timestamps - features [optional]\n :return: if y is None: x as 3d array, else: Tuple[x, y]\n \"\"\"\n\n # -- assert\n # if x input is already a 3d-array: return it as is\n if hasattr(x, 'shape') and len(x.shape) == 3:\n if y is None:\n return x\n else:\n return x, y\n\n # -- init\n # - convert x and y to DataFrames\n x = assert_df(x)\n # - groupby\n if groupby in [None, []]:\n groupby = GROUPBY_DUMMY\n x[GROUPBY_DUMMY] = 1\n groupby = assert_list(groupby)\n # handle groupby to dummy (needs to be here)\n if groupby_to_dummy:\n x = pd.concat([x, pd.get_dummies(x[groupby])], axis=1)\n _x_cols = list_exclude(x.columns, groupby)\n # handle y\n if y is None:\n _df = x\n _y_cols = None\n else:\n y = assert_df(y)\n _y_cols = list_exclude(y.columns, groupby)\n _df = pd.concat([x, y], axis=1)\n _df = drop_duplicate_cols(_df)\n\n # -- main\n # - init new x as list of window lists\n _x = [[] for _ in range(window)]\n # - init new y as list\n _y = []\n\n # - groupby loop\n for _index, _df_i in _df.groupby(groupby):\n # shift loop\n for _step in range(window):\n # shift by _step+1 since shifting by 0 would mean using the target (label) value as predictor (feature)\n _x_shift_i = _df_i[_x_cols].shift(_step + 1)\n if dropna:\n # drop the first window elements (since they are na in at least 1 cast)\n _x_shift_i = _x_shift_i.iloc[window:]\n _x[_step].append(_x_shift_i)\n # drop the first window elements of y\n if y is not None:\n _y_i = _df_i[_y_cols]\n if dropna:\n _y_i = _y_i.iloc[window:]\n _y.append(_y_i)\n # - use pd concat to make lists of dfs into dfs\n _x = [pd.concat(_) for _ in _x]\n if y is not None:\n _y = pd.concat(_y).values\n # - turn x into keras 3d array using numpy.dstack\n _x = np.dstack(_x)\n # - reshape into the keras format observations - timestamps - features\n if reshape:\n _x_copy = _x.copy()\n _x = np.zeros((_x_copy.shape[0], _x_copy.shape[2], _x_copy.shape[1]))\n for _row in range(_x_copy.shape[0]):\n _x[_row] = _x_copy[_row].T\n # -- return\n if y is None:\n return _x\n else:\n return _x, _y\n\n# TODO - make these members of Models\n# def grid_search(model, grid, target, n_random=100, n_random_state=0, goal='acc_test', f=cla, opt_class=None,\n# opt_score_type=None, ascending=None, do_print=True, display_score=False, float_format=',.4f',\n# **kwargs):\n# # -- defaults --\n#\n# if opt_score_type is None:\n# if opt_class is not None:\n# opt_score_type = 'f1_pr'\n# else:\n# opt_score_type = 'score'\n#\n# if ascending is None:\n# # goal can be one of scores keys\n# if goal in ['r2_train', 'r2_test', 'acc_train', 'acc_test', 'true_pos', 'true_neg']:\n# _ascending = False\n# else:\n# _ascending = True\n# else:\n# _ascending = ascending\n#\n# # -- init --\n#\n# if is_list_like(target):\n# target = target[0]\n# warnings.warn('target cannot be a list for grid_search, using {}'.format(target))\n#\n# # you can pass a DataFrame or a dictionary as grid\n# if isinstance(grid, pd.DataFrame):\n# _grid = grid.copy()\n# del grid\n# elif isinstance(grid, Mapping):\n#\n# # expand gridf\n#\n# _grid = expand_dict_to_df(grid)\n# del grid\n#\n# else:\n# raise 'grid should be a dictionary or DataFrame'\n#\n# # if a number of random combinations to try was passed, use it to sample grid\n# if n_random is not None:\n# if n_random < _grid.shape[0]:\n# _grid = _grid.sample(n=n_random, random_state=n_random_state).reset_index(drop=True)\n#\n# # -- main --\n#\n# # iter _grid rows\n# _df_out = []\n#\n# _i = 0\n# for _index, _row in _grid.iterrows():\n#\n# _it_df = pd.DataFrame({'it': [_i]})\n# _i += 1\n#\n# _model = silentcopy(model)\n# _string = ''\n#\n# for _key in _row.keys():\n#\n# _value = _row[_key]\n# if isinstance(_value, float):\n# if _value.is_integer(): _value = int(_value)\n#\n# if len(_string) > 0: _string += ', '\n# _string += '{} = {}'.format(_key, _value)\n#\n# _model.set_params(**{_key: _value})\n# _it_df[_key] = _value\n#\n# if do_print: tprint('{} / {} - {}'.format(_i, _grid.shape[0], _string))\n#\n# # execute function\n# _dict = f(model=_model, target=target, do_print=False, display_score=display_score, **kwargs)\n#\n# # parse score df\n# if opt_score_type == 'score':\n# _score = _dict['score']\n# elif opt_score_type == 'f1_pr':\n# # switch case for k_cross\n# if 'f1_pr' in _dict['cm'][target].keys():\n# _score = _dict['cm'][target]['f1_pr']\n# else:\n# _score = _dict['cm'][target]['f1_pr_test']\n# elif opt_score_type == 'cm':\n# # switch case for k_cross\n# if 'cm' in _dict['cm'][target].keys(): _score = _dict['cm'][target]['f1_pr']\n# _score = _dict['cm'][target]['cm_test']\n# else:\n# raise ValueError('Unknown opt_score_type {}'.format(opt_score_type))\n#\n# if opt_class is not None: _score = _score[_score.index == opt_class]\n#\n# for _col in _score.columns: _it_df[_col] = _score[_col].mean()\n#\n# _df_out.append(_it_df)\n#\n# _df_out = pd.concat(_df_out, ignore_index=True, sort=False).sort_values(by=goal, ascending=_ascending)\n#\n# # find best combination\n# if do_print:\n# tprint('')\n# print('best combination:')\n# display_df(_df_out.head(1), float_format=float_format)\n#\n# return _df_out\n#\n#\n# # wrapper for grid_search for regression\n# def grid_reg(*args, goal='rmse_test_scaled', f=reg, **kwargs):\n# return grid_search(*args, goal=goal, f=f, **kwargs)\n#\n#\n# # wrapper for grid_search for regression\n# def grid_cla(*args, goal='acc_test', display_cm=False, display_f1_pr=False, display_score=False, f=cla, **kwargs):\n# return (\n# grid_search(*args, goal=goal, display_cm=display_cm, display_f1_pr=display_f1_pr, display_score=display_score,\n# f=f, **kwargs))\n#\n#\n# # just a wrapper for grid_reg that uses k_cross validation by default\n# def grid_reg_k(f=k_cross_reg, goal='rmse', **kwargs):\n# return grid_search(goal=goal, f=f, **kwargs)\n#\n#\n# def multi_modelling(f, goal, ascending, df_train, df_test, target, predictors_it, predictors_fix=None,\n# fixed_only=True,\n# do_print=True, print_prefix='', **kwargs):\n# # init\n# if predictors_fix is None:\n# predictors_fix = []\n# if not is_list_like(target): target = [target]\n# if not is_list_like(predictors_it): predictors_it = [predictors_it]\n# if not is_list_like(predictors_fix): predictors_fix = [predictors_fix]\n#\n# # a predictor cannot be both fix and it\n# if len(predictors_fix) > 0:\n# _predictors_it = [_ for _ in predictors_it if _ not in predictors_fix]\n# if fixed_only: _predictors_it += [{'fixed_only': []}]\n# else:\n# _predictors_it = predictors_it\n#\n# ## empty dict so that it can become a DtaFrame\n# _df_score = []\n#\n# _i = 0\n# _i_max = len(_predictors_it)\n#\n# for _predictor in _predictors_it:\n#\n# _i += 1\n#\n# if isinstance(_predictor, Mapping):\n# _predictor_name = list(_predictor.keys())[0]\n# _predictor_value = _predictor[_predictor_name]\n# else:\n# _predictor_name = _predictor\n# _predictor_value = _predictor\n#\n# if not is_list_like(_predictor_value):\n# _predictors_score = [_predictor_value]\n# else:\n# _predictors_score = [] + _predictor_value\n#\n# if predictors_fix is not None: _predictors_score = _predictors_score + predictors_fix\n#\n# _print_prefix = '{}iteration {} / {} : {} - '.format(print_prefix, _i, _i_max, _predictor_name)\n#\n# _rmse_test = 0\n# _rmse_train = 0\n#\n# _score = \\\n# f(df_train=df_train, df_test=df_test, target=target, predictors=_predictors_score,\n# print_prefix=_print_prefix,\n# do_print=do_print, display_score=False, **kwargs)['score']\n# _score = pd.DataFrame(_score[_score.index.isin(target)].mean()).T\n# _score['predictor'] = _predictor_name\n# _score['predictor_value'] = _predictor_value\n#\n# _df_score.append(_score)\n#\n# _df_score = pd.concat(_df_score, ignore_index=True, sort=False).sort_values(by=goal,\n# ascending=ascending).reset_index(\n# drop=True)\n#\n# if do_print:\n# tprint('done')\n#\n# return _df_score\n\n\n# # wrapper\n# def multi_cla(f=cla, goal='acc_test', ascending=False, **kwargs):\n# return multi_modelling(f=f, goal=goal, ascending=ascending, **kwargs)\n#\n#\n# # wrapper\n# def multi_reg(f=reg, goal='rmse_test_scaled', ascending=True, **kwargs):\n# return multi_modelling(f=f, goal=goal, ascending=ascending, **kwargs)\n#\n#\n# # iteratively build a model by trying all predictors one by one, finding the best one and then trying\n# # all again (repeat)\n# def iter_modelling(f, goal, df_train, df_test, target, predictors_it, predictors_fix=None, max_depth=10, cutoff=0,\n# do_print=True, **kwargs):\n# if predictors_fix is None:\n# predictors_fix = []\n# if cutoff is None: cutoff = - np.inf\n#\n# # init\n# _predictors_fix = silentcopy(predictors_fix)\n# _predictors_it = silentcopy(predictors_it)\n# if not is_list_like(_predictors_fix): _predictors_fix = [_predictors_fix]\n# _predictors_fix = list(_predictors_fix)\n# if max_depth > len(predictors_it): max_depth = len(predictors_it)\n#\n# # a predictor cannot be both fix and it\n# _predictors_it = [_ for _ in _predictors_it if _ not in _predictors_fix]\n#\n# _prev_rmse = 9 ** 9\n#\n# for _depth in range(max_depth):\n#\n# _print_prefix = 'depth {} / {} - '.format(_depth + 1, max_depth)\n#\n# _df_score = f(df_train=df_train, df_test=df_test, target=target, predictors_it=_predictors_it,\n# predictors_fix=_predictors_fix, fixed_only=False, do_print=do_print, print_prefix=_print_prefix,\n# **kwargs)\n# _best = _df_score.iloc[0]\n# _best_goal = _best[goal]\n# _best_predictor = _best['predictor_value']\n#\n# print('{}best result: {}'.format(_print_prefix, format_iloc(_best)))\n#\n# if _prev_rmse - _best_goal > cutoff:\n#\n# _prev_rmse = _best_goal\n#\n# # update predictors\n# if not is_list_like(_best_predictor):\n#\n# _predictors_it = [_ for _ in _predictors_it if _ != _best_predictor]\n# _predictors_fix += [_best_predictor]\n#\n# else:\n#\n# _predictors_it_new = []\n#\n# for _predictor in _predictors_it:\n#\n# if not isinstance(_predictor, Mapping):\n# _predictors_it_new.append(_predictor)\n# else:\n# if not _best['predictor'] in _predictor.keys(): _predictors_it_new.append(_predictor)\n#\n# _predictors_it = silentcopy(_predictors_it_new)\n# _predictors_fix += _best_predictor\n#\n# else:\n#\n# print('{}cutoff {} not reached, stopping'.format(_print_prefix, cutoff))\n# break\n#\n# return None\n#\n#\n# # wrapper\n# def iter_cla(f=multi_cla, goal='acc_test', **kwargs):\n# return iter_modelling(f=f, goal=goal, **kwargs)\n#\n#\n# # wrapper\n# def iter_reg(f=multi_reg, goal='rmse_test_scaled', **kwargs):\n# return iter_modelling(f=f, goal=goal, **kwargs)\n#\n#\n# # iteratively build a model by trying all predictors one by one and accepting those who improve more than cutoff\n# # (faster than iter_reg)\n# def forward_reg(df_train, df_test, predictors_it, predictors_fix, max_depth=10, cutoff=.001, do_print=True, f=reg,\n# **kwargs):\n# _predictors_fix = silentcopy(predictors_fix)\n# _predictors_it = silentcopy(predictors_it)\n# if not is_list_like(_predictors_fix): _predictors_fix = [_predictors_fix]\n# _predictors_fix = list(_predictors_fix)\n# _predictors_final = [] + _predictors_fix\n#\n# # a predictor cannot be both fix and it\n# _predictors_it = [_ for _ in _predictors_it if _ not in _predictors_fix]\n#\n# if do_print: tprint('training base model...')\n#\n# _dict = f(\n# df_train=df_train,\n# df_test=df_test,\n# predictors=predictors_fix,\n# do_print=False,\n# **kwargs\n# )\n#\n# _base = _dict['score'].iloc[0]\n#\n# _prev_rmse = _base['rmse_test_scaled']\n#\n# print('base score: rmse_scaled train / test : {:.4f} / {:.4f} - rmse train / test : {:.4f} / {:.4f}'.format(\n# _base['rmse_train_scaled'], _base['rmse_test_scaled'], _base['rmse_train'], _base['rmse_test']))\n#\n# for _depth in range(max_depth):\n#\n# _print_prefix = 'depth {} / {} - '.format(_depth + 1, max_depth)\n#\n# # do a multi_reg\n# _df_score = multi_reg(df_train=df_train, df_test=df_test, predictors_it=_predictors_it,\n# predictors_fix=_predictors_fix, fixed_only=False, do_print=do_print,\n# print_prefix=_print_prefix, f=reg, **kwargs)\n#\n# # find base model\n# _df_score_base = _df_score.query('@_prev_rmse-rmse_test_scaled>@cutoff')\n#\n# # if the len is zero no improvements are possible -> quit\n# if _df_score_base.shape[0] == 0:\n# if do_print: print('{}no improvements possible, quitting'.format(_print_prefix))\n# break\n#\n# # get all predictors that made improvements:\n# _predictors_pos_name = _df_score_base['predictor'].tolist()\n# _predictors_pos = _df_score_base['predictor_value'].tolist()\n# # print(_predictors_pos)\n# _predictors_fix_new = _predictors_fix + flatten(_predictors_pos)\n#\n# if do_print: print('{}accepted predictors: {}'.format(_print_prefix, _predictors_pos_name))\n#\n# if do_print: tprint('{}training new base model...'.format(_print_prefix))\n#\n# # get new base\n# _dict_new = f(\n# df_train=df_train,\n# df_test=df_test,\n# predictors=_predictors_fix_new,\n# do_print=False,\n# **kwargs\n# )\n#\n# _base_new = _dict_new['score'].iloc[0]\n#\n# print('{}new score: rmse_scaled train / test : {:.4f} / {:.4f} - rmse train / test : {:.4f} / {:.4f}'.format(\n# _print_prefix, _base_new['rmse_train_scaled'], _base_new['rmse_test_scaled'], _base_new['rmse_train'],\n# _base_new['rmse_test']))\n#\n# # new rmse\n# if _prev_rmse - _base_new['rmse_test_scaled'] > cutoff:\n#\n# _prev_rmse = silentcopy(_base_new['rmse_test_scaled'])\n# _base = _base_new.copy()\n# _dict = silentcopy(_dict_new)\n# _predictors_fix = silentcopy(_predictors_fix_new)\n# _predictors_final += _predictors_pos_name\n#\n# # update predictors\n# for _new_predictor, _new_predictor_name in zip(_predictors_pos, _predictors_pos_name):\n#\n# if not is_list_like(_new_predictor):\n#\n# _predictors_it = [_ for _ in _predictors_it if _ != _new_predictor]\n#\n# else:\n#\n# _predictors_it_new = []\n#\n# for _predictor in _predictors_it:\n#\n# if not isinstance(_predictor, Mapping):\n# _predictors_it_new.append(_predictor)\n# else:\n# if not _new_predictor_name in _predictor.keys(): _predictors_it_new.append(_predictor)\n#\n# _predictors_it = silentcopy(_predictors_it_new)\n# else:\n# print('{}overall score improvement below cutoff {}, quitting'.format(_print_prefix, cutoff))\n# break\n#\n# if len(_predictors_it) == 0:\n# print('{}accepted all predictors, quitting'.format(_print_prefix))\n# break\n#\n# print('')\n# print('final predictors: {}'.format(_predictors_final))\n# print('discarded predictors: {}'.format(_predictors_it))\n# print('final score: rmse_scaled train / test : {:.4f} / {:.4f} - rmse train / test : {:.4f} / {:.4f}'.format(\n# _base['rmse_train_scaled'], _base['rmse_test_scaled'], _base['rmse_train'], _base['rmse_test']))\n#\n# return _dict\n#\n#\n# # iteratively build a model by trying all predictors and removing one by one and discarding those whose gain\n# # was less than cutoff (faster than iter_reg)\n# def backward_modelling(f, goal, ascending, cutoff, df_train, df_test, target, predictors_it, predictors_fix=None,\n# max_depth=3, do_print=True, float_format=',.4f', **kwargs):\n# # -- init --\n# if predictors_fix is None:\n# predictors_fix = []\n# _predictors_fix = silentcopy(predictors_fix)\n# _predictors_it = silentcopy(predictors_it)\n# if not is_list_like(_predictors_fix): _predictors_fix = [_predictors_fix]\n# _predictors_fix = list(_predictors_fix)\n# _predictors_final = [] + _predictors_fix\n#\n# # a predictor cannot be both fix and it\n# _predictors_it = [_ for _ in _predictors_it if _ not in _predictors_fix]\n# _predictors_all = predictors_fix + _predictors_it\n#\n# # -- base --\n# if do_print: tprint('training base model...')\n#\n# _dict = f(\n# df_train=df_train,\n# df_test=df_test,\n# target=target,\n# predictors=_predictors_all,\n# do_print=False,\n# display_score=False,\n# **kwargs\n# )f\n#\n# _score = _dict['score']\n# _score = _score[_score.index.isin(target)]\n# _base = _score.mean()\n#\n# _best_goal = _base[goal]\n#\n# print('base score: {}'.format(format_iloc(_base, float_format=float_format)))\n#\n# # -- main loop --\n# for _depth in range(max_depth):\n#\n# _print_prefix = 'depth {} / {} - '.format(_depth + 1, max_depth)\n#\n# tprint('{}init...'.format(_print_prefix))\n#\n# _i = 0\n# _i_max = len(_predictors_it)\n# _predictors_it_new = silentcopy(_predictors_it)\n# _removed = 0\n#\n# # loop all remaining predictors\n# for _predictor in _predictors_it:\n#\n# _i += 1\n#\n# _predictors = [_ for _ in _predictors_all if _ != _predictor]\n#\n# # try:\n# if True:\n#\n# _dict_new = f(\n# df_train=df_train,\n# df_test=df_test,\n# target=target,\n# predictors=_predictors,\n# do_print=False,\n# display_score=False,\n# **kwargs\n# )\n#\n# _score_new = _dict_new['score']\n# _score_new = _score_new[_score_new.index.isin(target)]\n# _base_new = _score_new.mean()\n#\n# _goal = _base_new[goal]\n# _full_score = format_iloc(_base_new, float_format=float_format)\n# _new_score = _base_new[goal]\n# _new_score_formatted = format_iloc(_base_new[[goal]], float_format=float_format)\n#\n# if ascending:\n# _goal_diff = _best_goal - _new_score\n# else:\n# _goal_diff = _new_score - _best_goal\n#\n# tprint(\n# '{}predictor {} / {} : {} ; {} - best: {} - diff: {}'.format(_print_prefix, _i, _i_max,\n# _predictor,\n# _new_score_formatted,\n# format(_best_goal, float_format),\n# format(_goal_diff, float_format)))\n#\n# if _goal_diff <= cutoff:\n#\n# _predictors_all = silentcopy(_predictors)\n# _predictors_it_new.remove(_predictor)\n# _removed += 1\n# _dict = silentcopy(_dict_new)\n#\n# # if the diff is below 0 a new best score was reached\n# if _goal_diff < 0: _best_goal = silentcopy(_new_score)\n#\n# # except:\n# # print('')\n# # print('predictor {} failed'.format(_predictor))\n# # pass\n#\n# if _removed == 0:\n# tprint('')\n# print('no improvements possible, quitting')\n# break\n#\n# _predictors_it = silentcopy(_predictors_it_new)\n#\n# tprint('')\n# print('{}predictors {} ; {}'.format(_print_prefix, _predictors_all, _full_score))\n#\n# if len(_predictors_all) == 1:\n# print('only 1 predictor left, quitting')\n# break\n#\n# return _dict\n#\n#\n# def backward_cla(f=cla, goal='acc_test', ascending=True, cutoff=0, float_format=',.3f', **kwargs):\n# return backward_modelling(f=f, goal=goal, ascending=ascending, cutoff=cutoff, float_format=float_format,\n# **kwargs)\n#\n#\n# def backward_reg(f=reg, goal='rmse_test_scaled', ascending=False, cutoff=0, **kwargs):\n# return backward_modelling(f=f, goal=goal, ascending=ascending, cutoff=cutoff, **kwargs)\n","repo_name":"rhedak/hhpy","sub_path":"hhpy/modelling.py","file_name":"modelling.py","file_ext":"py","file_size_in_byte":70940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19185910696","text":"from uuid import UUID\n\nfrom pydantic import EmailStr\nfrom sqlalchemy.ext.asyncio import AsyncSession\n\nfrom backend.crud import CRUDUser\nfrom backend.models import UserDB\nfrom backend.schemas import UserRead, UserCreate\n\n\nclass UserService:\n @staticmethod\n async def create_user(\n username: str,\n email: EmailStr,\n picture: str,\n session: AsyncSession,\n ) -> UserRead:\n user = UserCreate(username=username, email=email, picture=picture)\n crud_user = CRUDUser(UserDB, session)\n user = await crud_user.create(user)\n return UserRead.model_validate(user)\n\n @staticmethod\n async def get_user(user_id: UUID, session: AsyncSession) -> UserRead | None:\n crud_user = CRUDUser(UserDB, session)\n user = await crud_user.get(user_id)\n if user:\n return UserRead.model_validate(user)\n\n @staticmethod\n async def get_user_by_email(email: EmailStr, session: AsyncSession) -> UserRead | None:\n crud_user = CRUDUser(UserDB, session)\n user = await crud_user.get_by_email(email)\n if user:\n return UserRead.model_validate(user)\n\n @staticmethod\n async def delete_user(user_id: UUID, session: AsyncSession) -> UserRead | None:\n crud_user = CRUDUser(UserDB, session)\n user = await crud_user.delete(user_id)\n if user:\n return UserRead.model_validate(user)\n","repo_name":"baltikaa9/ZXCTube","sub_path":"backend/services/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8574589604","text":"import csv\nimport glob\nimport multiprocessing\nimport os\nimport warnings\nfrom queue import Queue\nfrom typing import Tuple\n\nimport gym\nimport numpy as np\nimport stable_baselines3\nimport tensorflow as tf\nimport yaml\nfrom stable_baselines3.common.utils import get_linear_fn, set_random_seed\nfrom tensorflow.python.platform import tf_logging as tf_log\n\ntf.get_logger().setLevel(tf_log.ERROR)\ngym.logger.set_level(gym.logger.ERROR)\n\nfrom stable_baselines import DQN, PPO2, SAC\nfrom stable_baselines.common import set_global_seeds\nfrom stable_baselines.common.callbacks import (CheckpointCallback,\n EvalCallback,\n StopTrainingOnRewardThreshold)\nfrom stable_baselines.common.noise import (AdaptiveParamNoiseSpec,\n NormalActionNoise,\n OrnsteinUhlenbeckActionNoise)\nfrom stable_baselines.common.schedules import constfn\nfrom stable_baselines.common.vec_env import DummyVecEnv\nfrom stable_baselines.logger import configure\n\nfrom abstract_agent import AbstractAgent\nfrom algo.env_predicate_pair import EnvPredicatePair\nfrom custom_callbacks import (LoggingTrainingMetricsCallback,\n SaveVecNormalizeCallback)\nfrom custom_callbacks3 import \\\n LoggingTrainingMetricsCallback as LoggingTrainingMetricsCallbackSb3\nfrom custom_callbacks3 import \\\n SaveVecNormalizeCallback as SaveVecNormalizeCallbackSb3\nfrom env_utils import (get_n_actions, get_reward_threshold, make_custom_env,\n make_env_parallel, normalize_env)\nfrom envs.env_eval_callback import EnvEvalCallback\nfrom envs.env_variables import EnvVariables\nfrom evaluation import custom_evaluate_policy\nfrom log import Log\nfrom progress_bar_manager import ProgressBarManager\nfrom training.custom_dqn import CustomDQN\nfrom training.custom_sac import CustomSAC\nfrom utilities import (HOME, PREFIX_DIR_MODELS_SAVE, LinearNormalActionNoise,\n linear_schedule)\n\nif multiprocessing.cpu_count() <= 4:\n n_cpu_tf_sess = multiprocessing.cpu_count() // 2\nelse:\n n_cpu_tf_sess = multiprocessing.cpu_count()\n\n\ndef filter_tf_version_warnings():\n # https://stackoverflow.com/questions/40426502/is-there-a-way-to-suppress-the-messages-tensorflow-prints/40426709\n os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\" # or any {'0', '1', '2'}\n # https://stackoverflow.com/questions/15777951/how-to-suppress-pandas-future-warning\n warnings.simplefilter(action=\"ignore\", category=FutureWarning)\n warnings.simplefilter(action=\"ignore\", category=Warning)\n tf.autograph.set_verbosity(0)\n\n\ndef get_value_given_key(filename, key) -> str:\n with open(filename, newline=\"\") as csvfile:\n csv_reader = csv.DictReader(csvfile)\n for row in csv_reader:\n last_row = row\n if key in last_row:\n return last_row[key]\n return None\n\n\ndef load_hyperparams(algo_name=None, env_name=None, model_suffix=None):\n # Load hyperparameters from yaml file\n abs_hyperparams_dir = os.path.abspath(HOME + \"/hyperparams\")\n filename = (\n abs_hyperparams_dir + \"/{}.yml\".format(algo_name)\n if not model_suffix\n else abs_hyperparams_dir + \"/{}_{}.yml\".format(algo_name, model_suffix)\n )\n with open(filename, \"r\") as f:\n hyperparams_dict = yaml.safe_load(f)\n if env_name in list(hyperparams_dict.keys()):\n return hyperparams_dict[env_name]\n else:\n if model_suffix:\n raise ValueError(\"Hyperparameters not found for {}_{}-{}\".format(algo_name, model_suffix, env_name))\n else:\n raise ValueError(\"Hyperparameters not found for {}-{}\".format(algo_name, env_name))\n\n\ndef _parse_normalize(dictionary):\n normalize_kwargs = {}\n if \"normalize\" in dictionary.keys():\n normalize = dictionary[\"normalize\"]\n if isinstance(normalize, str):\n normalize_kwargs = eval(normalize)\n del dictionary[\"normalize\"]\n\n return normalize_kwargs\n\n\nDEFAULT_N_EVAL_EPISODES = 0\n\n\nclass Agent(AbstractAgent):\n def __init__(\n self,\n algo_name: str = \"ppo2\",\n env_name: str = \"CartPole-v1\",\n log_to_tensorboard: bool = False,\n tb_log_name: str = \"ppo2\",\n train_total_timesteps: int = None,\n n_eval_episodes: int = DEFAULT_N_EVAL_EPISODES,\n render: bool = False,\n num_envs: int = 1,\n model_to_load: str = None,\n continue_learning: bool = False,\n continue_learning_suffix: str = \"continue_learning\",\n discrete_action_space: bool = False,\n eval_callback: bool = False,\n env_variables: EnvVariables = None,\n env_eval_callback: EnvEvalCallback = None,\n show_progress_bar: bool = False,\n log_every: int = 1000,\n save_replay_buffer: bool = True,\n save_model: bool = True,\n algo_hyperparams: str = None,\n sb_version: str = \"sb2\",\n model_suffix: str = None,\n ):\n\n self.algo_name = algo_name\n self.env_name = env_name\n self.log_to_tensorboard = log_to_tensorboard\n self.tb_log_name = tb_log_name\n self.train_total_timesteps = train_total_timesteps\n self.n_eval_episodes = n_eval_episodes\n self.render = render\n self.num_envs = num_envs\n self.model_to_load = model_to_load\n self.continue_learning = continue_learning\n self.continue_learning_suffix = continue_learning_suffix\n self.discrete_action_space = discrete_action_space\n self.eval_callback = eval_callback\n self.env_kwargs = env_variables\n self.env_eval_callback = env_eval_callback\n self.show_progress_bar = show_progress_bar\n self.log_every = log_every\n self.save_replay_buffer = save_replay_buffer\n self.save_model = save_model\n self.algo_hyperparams = algo_hyperparams\n self.model_suffix = model_suffix\n self.logger = Log(\"Agent\")\n assert sb_version == \"sb2\" or sb_version == \"sb3\", \"sb_version == sb2 or sb3: {}\".format(sb_version)\n self.sb_version = sb_version\n\n filter_tf_version_warnings()\n self.logger.debug(\"Instantiating agent\")\n\n if algo_name == \"sac\":\n assert not discrete_action_space, \"discrete_action_space not supported in sac\"\n elif algo_name == \"dqn\":\n assert discrete_action_space, \"continues_action_space not supported in dqn\"\n elif algo_name == \"ppo2\":\n self.logger.warn(\"PPO with {} action space\".format(\"continuous\" if not discrete_action_space else \"discrete\"))\n\n def _preprocess_hyperparams(self, _hyperparams):\n # Convert to python object if needed\n if \"policy_kwargs\" in _hyperparams.keys() and isinstance(_hyperparams[\"policy_kwargs\"], str):\n _hyperparams[\"policy_kwargs\"] = eval(_hyperparams[\"policy_kwargs\"])\n\n n_timesteps = _hyperparams.pop(\"n_timesteps\", None)\n n_envs = _hyperparams.pop(\"n_envs\", None)\n log_every = _hyperparams.pop(\"log_every\", None)\n if not self.continue_learning:\n if not log_every:\n self.logger.debug(\"log_every not defined in yml file: using command line log_every {}\".format(self.log_every))\n log_every = self.log_every\n else:\n self.logger.debug(\"using log_every as defined in yml file: {}\".format(log_every))\n else:\n self.logger.debug(\"priority to command line log_every {}\".format(self.log_every))\n log_every = self.log_every\n\n # Parse noise string\n if self.algo_name in [\"ddpg\", \"sac\", \"td3\"] and _hyperparams.get(\"noise_type\") is not None:\n noise_type = _hyperparams[\"noise_type\"].strip()\n noise_std = _hyperparams[\"noise_std\"]\n n_actions = get_n_actions(env_name=self.env_name, env_variables=self.env_kwargs)\n self.logger.debug(\"n_actions: {}\".format(n_actions))\n if \"adaptive-param\" in noise_type:\n assert self.algo_name == \"ddpg\", \"Parameter is not supported by SAC\"\n _hyperparams[\"param_noise\"] = AdaptiveParamNoiseSpec(initial_stddev=noise_std, desired_action_stddev=noise_std)\n elif \"normal\" in noise_type:\n if \"lin\" in noise_type:\n _hyperparams[\"action_noise\"] = LinearNormalActionNoise(\n mean=np.zeros(n_actions),\n sigma=noise_std * np.ones(n_actions),\n final_sigma=_hyperparams.get(\"noise_std_final\", 0.0) * np.ones(n_actions),\n max_steps=n_timesteps,\n )\n else:\n _hyperparams[\"action_noise\"] = NormalActionNoise(\n mean=np.zeros(n_actions), sigma=noise_std * np.ones(n_actions)\n )\n elif \"ornstein-uhlenbeck\" in noise_type:\n _hyperparams[\"action_noise\"] = OrnsteinUhlenbeckActionNoise(\n mean=np.zeros(n_actions), sigma=noise_std * np.ones(n_actions)\n )\n else:\n raise RuntimeError('Unknown noise type \"{}\"'.format(noise_type))\n self.logger.debug(\"Applying {} noise with std {}\".format(noise_type, noise_std))\n del _hyperparams[\"noise_type\"]\n del _hyperparams[\"noise_std\"]\n if \"noise_std_final\" in _hyperparams:\n del _hyperparams[\"noise_std_final\"]\n\n normalize_kwargs = _parse_normalize(dictionary=_hyperparams)\n\n if n_envs is None:\n self.logger.debug(\"n_envs not defined in yml file: using command line n_envs {}\".format(self.num_envs))\n n_envs = self.num_envs\n else:\n self.logger.debug(\"using n_envs as num of envs defined in yml file:\".format(n_envs))\n\n if not self.continue_learning:\n # priority to yml defined n_timesteps\n if n_timesteps is None:\n self.logger.debug(\n \"n_timesteps not defined in yml file: using command line n_timesteps {}\".format(self.train_total_timesteps)\n )\n n_timesteps = self.train_total_timesteps\n else:\n self.logger.debug(\"using n_timesteps as total timesteps defined in yml file: {}\".format(n_timesteps))\n n_timesteps = int(n_timesteps)\n else:\n if self.train_total_timesteps and self.train_total_timesteps != -1:\n assert self.train_total_timesteps <= int(n_timesteps), \"train_total_timesteps <= n_timesteps: {}, {}\".format(\n self.train_total_timesteps, n_timesteps\n )\n # priority to command line n_timesteps\n self.logger.debug(\"priority to command line n_timesteps {}\".format(self.train_total_timesteps))\n n_timesteps = self.train_total_timesteps\n elif self.train_total_timesteps == -1:\n assert n_timesteps, \"n_timesteps should have a value: {}\".format(n_timesteps)\n n_timesteps = int(n_timesteps)\n self.logger.info(\"training in continual learning = training from scratch. n_timesteps {}\".format(n_timesteps))\n else:\n assert n_timesteps, \"n_timesteps should have a value: {}\".format(n_timesteps)\n n_timesteps = int(n_timesteps // 2)\n self.logger.debug(\n \"train_total_timesteps not specified in continue_learning: \"\n \"taking half of original n_timesteps defined in yml file {}\".format(n_timesteps)\n )\n\n assert n_timesteps % log_every == 0, \"it should be possible to divide n_timesteps for log_every: {}, {}\".format(\n n_timesteps, log_every\n )\n return normalize_kwargs, n_envs, n_timesteps, log_every, _hyperparams\n\n def _preprocess_storage_dirs(self):\n if self.model_suffix:\n best_model_save_path = (\n PREFIX_DIR_MODELS_SAVE + \"/\" + self.algo_name + \"/logs_\" + self.tb_log_name + \"_\" + self.model_suffix\n )\n else:\n best_model_save_path = PREFIX_DIR_MODELS_SAVE + \"/\" + self.algo_name + \"/logs_\" + self.tb_log_name\n if self.log_to_tensorboard:\n tensorboard_log_dir = PREFIX_DIR_MODELS_SAVE + \"/\" + self.algo_name + \"/logs_\" + self.tb_log_name\n else:\n tensorboard_log_dir = None\n return best_model_save_path, tensorboard_log_dir\n\n def _set_global_seed(self, seed):\n if self.sb_version == \"sb3\":\n set_random_seed(seed)\n else:\n set_global_seeds(seed)\n\n # TODO: could be optimized when used in search (some variables can be passed instead of being created from\n # scratch)\n def train(\n self,\n seed: int,\n communication_queue: Queue = None,\n current_iteration: int = -1,\n search_suffix: str = \"1\",\n env_variables: EnvVariables = None,\n random_search: bool = False,\n ):\n\n self._set_global_seed(seed=seed)\n\n env_kwargs_to_set = env_variables if env_variables else self.env_kwargs\n self.logger.debug(\"env_variables: {}\".format(env_kwargs_to_set.get_params_string()))\n\n reward_threshold = get_reward_threshold(env_name=self.env_name)\n\n best_model_save_path, tensorboard_log_dir = self._preprocess_storage_dirs()\n\n if current_iteration != -1 and not self.continue_learning:\n best_model_save_path = best_model_save_path + \"_\" + str(current_iteration)\n\n self.logger.debug(\"best_model_save_path: {}\".format(best_model_save_path))\n\n if communication_queue or search_suffix != \"1\":\n continue_learning_suffix = self.continue_learning_suffix + \"_\" + search_suffix\n else:\n continue_learning_suffix = self.continue_learning_suffix\n\n os.environ[\"OPENAI_LOG_FORMAT\"] = \"log,csv\"\n if self.continue_learning:\n os.environ[\"OPENAI_LOGDIR\"] = best_model_save_path + \"_\" + continue_learning_suffix\n else:\n os.environ[\"OPENAI_LOGDIR\"] = best_model_save_path\n configure()\n\n if self.algo_hyperparams:\n self.logger.debug(\"Overriding file specified hyperparams with {}\".format(eval(self.algo_hyperparams)))\n hyperparams = eval(self.algo_hyperparams)\n else:\n hyperparams = load_hyperparams(algo_name=self.algo_name, env_name=self.env_name, model_suffix=self.model_suffix)\n\n (normalize_kwargs, n_envs, n_timesteps, log_every, hyperparams,) = self._preprocess_hyperparams(\n _hyperparams=hyperparams\n )\n\n if n_envs > 1 and self.algo_name == \"ppo2\":\n # On most env, SubprocVecEnv does not help and is quite memory hungry\n env = DummyVecEnv(\n [\n make_env_parallel(\n sb_version=self.sb_version,\n seed=seed,\n rank=i,\n env_name=self.env_name,\n continue_learning=self.continue_learning,\n log_dir=best_model_save_path,\n env_kwargs=env_kwargs_to_set,\n algo_name=self.algo_name,\n continue_learning_suffix=continue_learning_suffix,\n )\n for i in range(n_envs)\n ]\n )\n if len(normalize_kwargs) > 0:\n env = normalize_env(\n env=env,\n vectorize=False,\n orig_log_dir=best_model_save_path,\n continue_learning=self.continue_learning,\n sb_version=self.sb_version,\n normalize_kwargs=normalize_kwargs,\n )\n else:\n env = make_custom_env(\n seed=seed,\n sb_version=self.sb_version,\n env_kwargs=env_kwargs_to_set,\n normalize_kwargs=normalize_kwargs,\n continue_learning=self.continue_learning,\n log_dir=best_model_save_path,\n env_name=self.env_name,\n algo_name=self.algo_name,\n continue_learning_suffix=continue_learning_suffix,\n )\n\n if self.n_eval_episodes > DEFAULT_N_EVAL_EPISODES:\n analysis_callback = self.build_callback(\n algo_name=self.algo_name,\n continue_learning=self.continue_learning,\n call_every=log_every,\n eval_callback=self.eval_callback,\n _reward_threshold=reward_threshold,\n eval_episodes=self.n_eval_episodes,\n _eval_env=make_custom_env(\n seed=seed,\n continue_learning=self.continue_learning,\n sb_version=self.sb_version,\n env_kwargs=env_kwargs_to_set,\n env_name=self.env_name,\n log_dir=best_model_save_path,\n algo_name=self.algo_name,\n normalize_kwargs=normalize_kwargs,\n evaluate=True,\n evaluate_during_learning=True,\n continue_learning_suffix=continue_learning_suffix,\n ),\n original_env=make_custom_env(\n seed=seed,\n continue_learning=self.continue_learning,\n sb_version=self.sb_version,\n env_kwargs=self.env_kwargs,\n env_name=self.env_name,\n log_dir=best_model_save_path,\n algo_name=self.algo_name,\n normalize_kwargs=normalize_kwargs,\n evaluate=True,\n evaluate_during_learning=True,\n ),\n env_name=self.env_name,\n _best_model_save_path=best_model_save_path,\n num_envs=n_envs,\n total_timesteps=n_timesteps,\n continue_learning_suffix=continue_learning_suffix,\n communication_queue=communication_queue,\n env_eval_callback=self.env_eval_callback,\n save_replay_buffer=self.save_replay_buffer,\n save_model=self.save_model,\n random_search=random_search,\n )\n else:\n analysis_callback = self.build_callback(\n algo_name=self.algo_name,\n continue_learning=self.continue_learning,\n call_every=log_every,\n eval_callback=self.eval_callback,\n _reward_threshold=reward_threshold,\n eval_episodes=self.n_eval_episodes,\n env_name=self.env_name,\n _best_model_save_path=best_model_save_path,\n num_envs=n_envs,\n continue_learning_suffix=continue_learning_suffix,\n save_replay_buffer=self.save_replay_buffer,\n save_model=self.save_model,\n random_search=random_search,\n )\n\n if self.continue_learning:\n model = self.create_model(\n seed=seed,\n algo_name=self.algo_name,\n env=env,\n tensorboard_log_dir=tensorboard_log_dir,\n hyperparams=hyperparams,\n best_model_save_path=best_model_save_path,\n n_timesteps=n_timesteps,\n continue_learning=True,\n env_name=self.env_name,\n model_to_load=self.model_to_load,\n save_replay_buffer=self.save_replay_buffer,\n )\n else:\n model = self.create_model(\n seed=seed,\n algo_name=self.algo_name,\n env=env,\n tensorboard_log_dir=tensorboard_log_dir,\n hyperparams=hyperparams,\n env_name=self.env_name,\n n_timesteps=n_timesteps,\n model_to_load=self.model_to_load,\n save_replay_buffer=self.save_replay_buffer,\n )\n\n try:\n callback_list = [analysis_callback]\n\n # if len(normalize_kwargs) > 0 and not self.continue_learning:\n # callback_list = [self._build_vec_normalize_callback(save_path=best_model_save_path,\n # log_every=log_every), analysis_callback]\n\n if self.show_progress_bar:\n with ProgressBarManager(total_timesteps=n_timesteps, sb_version=self.sb_version) as progress_callback:\n callback_list.append(progress_callback)\n if self.continue_learning and self.log_to_tensorboard:\n model.learn(\n total_timesteps=n_timesteps,\n callback=callback_list,\n tb_log_name=self.tb_log_name + \"_\" + continue_learning_suffix,\n )\n else:\n model.learn(\n total_timesteps=n_timesteps, callback=callback_list, tb_log_name=self.tb_log_name,\n )\n\n else:\n if self.continue_learning and self.log_to_tensorboard:\n model.learn(\n total_timesteps=n_timesteps,\n callback=callback_list,\n tb_log_name=self.tb_log_name + \"_\" + continue_learning_suffix,\n )\n else:\n self.logger.debug(\"Model learn start...\")\n model.learn(\n total_timesteps=n_timesteps, callback=callback_list, tb_log_name=self.tb_log_name,\n )\n self.logger.debug(\"Model learn end\")\n except KeyboardInterrupt:\n pass\n finally:\n if len(normalize_kwargs) > 0 and not self.continue_learning:\n # Important: save the running average, for testing the agent we need that normalization\n model.get_vec_normalize_env().save(os.path.join(best_model_save_path, \"vecnormalize.pkl\"))\n\n # Release resources\n env.close()\n\n def _build_vec_normalize_callback(self, save_path: str, log_every: int):\n\n if self.sb_version == \"sb2\":\n return SaveVecNormalizeCallback(log_every=log_every, save_path=save_path)\n return SaveVecNormalizeCallbackSb3(log_every=log_every, save_path=save_path)\n\n def test_with_callback(self, seed, env_variables: EnvVariables, n_eval_episodes: int = None) -> EnvPredicatePair:\n\n assert self.env_eval_callback, \"env_eval_callback should be instantiated\"\n\n self._set_global_seed(seed=seed)\n\n self.logger.debug(\"env_variables: {}\".format(env_variables.get_params_string()))\n\n best_model_save_path, tensorboard_log_dir = self._preprocess_storage_dirs()\n\n if self.algo_hyperparams:\n self.logger.debug(\"Overriding file specified hyperparams with {}\".format(eval(self.algo_hyperparams)))\n hyperparams = eval(self.algo_hyperparams)\n else:\n hyperparams = load_hyperparams(algo_name=self.algo_name, env_name=self.env_name)\n\n normalize_kwargs = _parse_normalize(dictionary=hyperparams)\n\n eval_env = make_custom_env(\n seed=seed,\n sb_version=self.sb_version,\n env_kwargs=env_variables,\n algo_name=self.algo_name,\n env_name=self.env_name,\n normalize_kwargs=normalize_kwargs,\n log_dir=best_model_save_path,\n evaluate=True,\n continue_learning_suffix=self.continue_learning_suffix,\n )\n model = self.create_model(\n seed=seed,\n algo_name=self.algo_name,\n env=eval_env,\n tensorboard_log_dir=tensorboard_log_dir,\n hyperparams=hyperparams,\n best_model_save_path=best_model_save_path,\n model_to_load=self.model_to_load,\n env_name=self.env_name,\n )\n\n n_eval_episodes_to_run = n_eval_episodes if n_eval_episodes else self.n_eval_episodes\n adequate_performance, info = self.env_eval_callback.evaluate_env(\n model=model, env=eval_env, n_eval_episodes=n_eval_episodes_to_run, sb_version=self.sb_version,\n )\n return EnvPredicatePair(env_variables=env_variables, predicate=adequate_performance, execution_info=info,)\n\n def test_without_callback(self, seed, n_eval_episodes: int = 0, model_path: str = None) -> Tuple[float, float]:\n assert n_eval_episodes > 0 or self.n_eval_episodes > 0, \"n_eval_episodes > 0: {}, {}\".format(\n n_eval_episodes, self.n_eval_episodes\n )\n\n self._set_global_seed(seed=seed)\n\n if n_eval_episodes == 0:\n n_eval_episodes = self.n_eval_episodes\n\n best_model_save_path, tensorboard_log_dir = self._preprocess_storage_dirs()\n if model_path:\n best_model_save_path = model_path\n\n if self.algo_hyperparams:\n self.logger.debug(\"Overriding file specified hyperparams with {}\".format(eval(self.algo_hyperparams)))\n hyperparams = eval(self.algo_hyperparams)\n else:\n hyperparams = load_hyperparams(algo_name=self.algo_name, env_name=self.env_name)\n\n normalize_kwargs = _parse_normalize(dictionary=hyperparams)\n\n eval_env = make_custom_env(\n seed=seed,\n sb_version=self.sb_version,\n env_kwargs=self.env_kwargs,\n algo_name=self.algo_name,\n env_name=self.env_name,\n log_dir=best_model_save_path,\n normalize_kwargs=normalize_kwargs,\n evaluate=True,\n continue_learning_suffix=self.continue_learning_suffix,\n )\n model = self.create_model(\n seed=seed,\n algo_name=self.algo_name,\n env=eval_env,\n tensorboard_log_dir=tensorboard_log_dir,\n hyperparams=hyperparams,\n best_model_save_path=best_model_save_path,\n model_to_load=self.model_to_load,\n env_name=self.env_name,\n )\n\n mean_reward, std_reward = custom_evaluate_policy(\n model, eval_env, n_eval_episodes=n_eval_episodes, render=self.render, deterministic=True,\n )\n\n # release resources\n eval_env.close()\n\n return mean_reward, std_reward\n\n def test(self, seed, continue_learning_suffix: str = None, env_variables: EnvVariables = None):\n\n assert self.n_eval_episodes > 0, \"n_eval_episodes > 0: {}\".format(self.n_eval_episodes)\n\n self._set_global_seed(seed=seed)\n\n env_kwargs_to_set = env_variables if env_variables else self.env_kwargs\n self.logger.debug(\"env_variables: {}\".format(env_kwargs_to_set.get_params_string()))\n\n best_model_save_path, tensorboard_log_dir = self._preprocess_storage_dirs()\n\n if self.algo_hyperparams:\n self.logger.debug(\"Overriding file specified hyperparams with {}\".format(eval(self.algo_hyperparams)))\n hyperparams = eval(self.algo_hyperparams)\n else:\n hyperparams = load_hyperparams(algo_name=self.algo_name, env_name=self.env_name)\n\n normalize_kwargs = _parse_normalize(dictionary=hyperparams)\n\n if self.continue_learning and not continue_learning_suffix:\n best_model_save_path = best_model_save_path + \"_\" + self.continue_learning_suffix + \"/\"\n elif self.continue_learning and continue_learning_suffix:\n best_model_save_path = best_model_save_path + \"_\" + continue_learning_suffix + \"/\"\n\n eval_env = make_custom_env(\n seed=seed,\n sb_version=self.sb_version,\n env_kwargs=env_kwargs_to_set,\n algo_name=self.algo_name,\n env_name=self.env_name,\n log_dir=best_model_save_path,\n normalize_kwargs=normalize_kwargs,\n evaluate=True,\n continue_learning_suffix=self.continue_learning_suffix,\n )\n model = self.create_model(\n seed=seed,\n algo_name=self.algo_name,\n env=eval_env,\n tensorboard_log_dir=tensorboard_log_dir,\n hyperparams=hyperparams,\n best_model_save_path=best_model_save_path,\n model_to_load=self.model_to_load,\n env_name=self.env_name,\n )\n\n if self.show_progress_bar:\n with ProgressBarManager(total_timesteps=self.n_eval_episodes, sb_version=self.sb_version) as progress_callback:\n mean_reward, std_reward = custom_evaluate_policy(\n model,\n eval_env,\n n_eval_episodes=self.n_eval_episodes,\n render=self.render,\n callback=progress_callback,\n deterministic=True,\n )\n else:\n mean_reward, std_reward = custom_evaluate_policy(\n model, eval_env, n_eval_episodes=self.n_eval_episodes, render=self.render, deterministic=True,\n )\n\n self.logger.debug(f\"mean_reward:{mean_reward:.2f} +/- {std_reward:.2f}\")\n\n # release resources\n eval_env.close()\n\n def load_model(self, directory, model_name=None):\n abs_dir_models = os.path.abspath(directory)\n if model_name:\n model_to_load = os.path.join(abs_dir_models, model_name + \".zip\")\n self.logger.debug(\"Searching model file {}\".format(model_to_load))\n else:\n self.logger.debug(\"Searching model file in directory {}\".format(abs_dir_models))\n model_files = glob.glob(abs_dir_models + \"/*.zip\")\n model_to_load = max(model_files, key=os.path.getmtime)\n if not os.path.exists(model_to_load):\n raise FileNotFoundError(\"File \" + model_to_load + \" not found\")\n self.logger.debug(\"Loading model file {}\".format(model_to_load))\n return model_to_load\n\n def create_model(\n self,\n seed,\n algo_name,\n env,\n tensorboard_log_dir,\n hyperparams,\n best_model_save_path=None,\n model_to_load=None,\n continue_learning=False,\n env_name=\"CartPole-v1\",\n n_timesteps=-1,\n save_replay_buffer: bool = True,\n ):\n\n old_hyperparams = dict()\n\n # Create learning rate schedules for ppo2 and sac\n if algo_name in [\"ppo2\", \"sac\", \"td3\"]:\n for key in [\"learning_rate\", \"cliprange\", \"cliprange_vf\"]:\n if key not in hyperparams:\n continue\n if isinstance(hyperparams[key], str):\n self.logger.debug(\"Key {}, value {}\".format(key, hyperparams[key]))\n old_hyperparams[key] = hyperparams[key]\n schedule, initial_value = hyperparams[key].split(\"_\")\n initial_value = float(initial_value)\n hyperparams[key] = linear_schedule(initial_value)\n elif isinstance(hyperparams[key], (float, int)):\n # Negative value: ignore (ex: for clipping)\n if hyperparams[key] < 0:\n continue\n old_hyperparams[key] = float(hyperparams[key])\n hyperparams[key] = constfn(float(hyperparams[key]))\n else:\n raise ValueError(\"Invalid value for {}: {}\".format(key, hyperparams[key]))\n\n if algo_name == \"ppo2\":\n\n if self.sb_version == \"sb3\":\n raise NotImplementedError(\"PPO still in sb2\")\n\n if best_model_save_path and continue_learning:\n model = PPO2.load(\n self.load_model(best_model_save_path, model_to_load),\n env=env,\n tensorboard_log=tensorboard_log_dir,\n verbose=1,\n )\n key = \"cliprange\"\n cl_cliprange_value = 0.08 # new policy can be a bit different than the old one\n if key in old_hyperparams:\n if isinstance(old_hyperparams[key], str):\n self.logger.debug(\"Setting cliprange to lin_{}\".format(cl_cliprange_value))\n model.cliprange = linear_schedule(cl_cliprange_value)\n elif isinstance(old_hyperparams[key], (float, int)):\n self.logger.debug(\"Setting cliprange to value {}\".format(cl_cliprange_value))\n model.cliprange = constfn(cl_cliprange_value)\n else:\n # default value is too high for continual learning (0.2)\n self.logger.debug(\"Setting cliprange to value {}\".format(cl_cliprange_value))\n model.cliprange = cl_cliprange_value\n\n return model\n elif best_model_save_path:\n return PPO2.load(\n self.load_model(best_model_save_path, model_to_load),\n env=env,\n tensorboard_log=tensorboard_log_dir,\n verbose=1,\n n_cpu_tf_sess=n_cpu_tf_sess,\n )\n return PPO2(env=env, verbose=1, tensorboard_log=tensorboard_log_dir, **hyperparams, n_cpu_tf_sess=n_cpu_tf_sess,)\n\n elif algo_name == \"sac\":\n if self.sb_version == \"sb3\":\n if best_model_save_path and continue_learning:\n model = stable_baselines3.SAC.load(\n self.load_model(best_model_save_path, model_to_load),\n env=env,\n seed=seed,\n tensorboard_log=tensorboard_log_dir,\n verbose=1,\n )\n model.load_replay_buffer(path=best_model_save_path + \"/replay_buffer\")\n self.logger.debug(\"Model replay buffer size: {}\".format(model.replay_buffer.size()))\n self.logger.debug(\"Setting learning_starts to 0\")\n model.learning_starts = 0\n\n value = get_value_given_key(best_model_save_path + \"/progress.csv\", \"ent_coef\")\n if value:\n ent_coef = float(value)\n self.logger.debug(\"Restore model old ent_coef: {}\".format(\"auto_\" + str(ent_coef)))\n model.ent_coef = \"auto_\" + str(ent_coef)\n model.target_entropy = str(ent_coef)\n\n return model\n elif best_model_save_path:\n return stable_baselines3.SAC.load(\n self.load_model(best_model_save_path, model_to_load),\n env=env,\n seed=seed,\n tensorboard_log=tensorboard_log_dir,\n verbose=1,\n n_cpu_tf_sess=n_cpu_tf_sess,\n )\n assert n_timesteps > 0, \"n_timesteps > 0: {}\".format(n_timesteps)\n return stable_baselines3.SAC(env=env, verbose=0, seed=seed, tensorboard_log=tensorboard_log_dir, **hyperparams)\n\n else:\n if best_model_save_path and continue_learning:\n model = CustomSAC.load(\n self.load_model(best_model_save_path, model_to_load),\n env=env,\n tensorboard_log=tensorboard_log_dir,\n verbose=1,\n )\n self.logger.debug(\"Model replay buffer size: {}\".format(len(model.replay_buffer)))\n self.logger.debug(\"Setting learning_starts to 0\")\n model.learning_starts = 0\n if not save_replay_buffer:\n self.logger.debug(\"Setting save_replay_buffer to False\")\n model.save_replay_buffer = False\n\n value = get_value_given_key(best_model_save_path + \"/progress.csv\", \"ent_coef\")\n if value:\n ent_coef = float(value)\n self.logger.debug(\"Restore model old ent_coef: {}\".format(\"auto_\" + str(ent_coef)))\n model.ent_coef = \"auto_\" + str(ent_coef)\n model.target_entropy = str(ent_coef)\n\n return model\n\n elif best_model_save_path:\n # do not load replay buffer since we are in testing mode (no continue_learning)\n return SAC.load(\n self.load_model(best_model_save_path, model_to_load),\n env=env,\n tensorboard_log=tensorboard_log_dir,\n verbose=1,\n n_cpu_tf_sess=n_cpu_tf_sess,\n )\n return CustomSAC(\n total_timesteps=n_timesteps,\n env=env,\n verbose=1,\n tensorboard_log=tensorboard_log_dir,\n **hyperparams,\n n_cpu_tf_sess=n_cpu_tf_sess,\n save_replay_buffer=save_replay_buffer,\n )\n\n elif algo_name == \"dqn\":\n\n if self.sb_version == \"sb3\":\n\n if best_model_save_path:\n if continue_learning:\n model = stable_baselines3.DQN.load(\n self.load_model(best_model_save_path, model_to_load),\n env=env,\n seed=seed,\n tensorboard_log=tensorboard_log_dir,\n verbose=0,\n )\n model.load_replay_buffer(path=best_model_save_path + \"/replay_buffer\")\n model.learning_starts = 0\n model.exploration_fraction = 0.0005\n model.exploration_initial_eps = model.exploration_final_eps\n model.exploration_schedule = get_linear_fn(\n model.exploration_initial_eps, model.exploration_final_eps, model.exploration_fraction\n )\n self.logger.debug(\"Model replay buffer size: {}\".format(model.replay_buffer.size()))\n self.logger.debug(\"Setting learning_starts to {}\".format(model.learning_starts))\n self.logger.debug(\"Setting exploration_fraction to {}\".format(model.exploration_fraction))\n self.logger.debug(\"Setting exploration_initial_eps to {}\".format(model.exploration_initial_eps))\n return model\n return stable_baselines3.DQN.load(\n self.load_model(best_model_save_path, model_to_load),\n env=env,\n seed=seed,\n tensorboard_log=tensorboard_log_dir,\n verbose=1,\n )\n return stable_baselines3.DQN(env=env, verbose=0, seed=seed, tensorboard_log=tensorboard_log_dir, **hyperparams)\n else:\n if best_model_save_path:\n if continue_learning:\n model = CustomDQN.load(\n self.load_model(best_model_save_path, model_to_load),\n env=env,\n tensorboard_log=tensorboard_log_dir,\n verbose=1,\n )\n self.logger.debug(\"Model replay buffer size: {}\".format(len(model.replay_buffer)))\n self.logger.debug(\n \"Setting exploration initial eps to exploration final eps {}\".format(model.exploration_final_eps)\n )\n self.logger.debug(\"Setting learning_starts to 0\")\n if not save_replay_buffer:\n self.logger.debug(\"Setting save_replay_buffer to False\")\n model.save_replay_buffer = False\n model.learning_starts = 0\n model.exploration_fraction = 0.005\n model.exploration_initial_eps = model.exploration_final_eps\n return model\n return DQN.load(\n self.load_model(best_model_save_path, model_to_load),\n env=env,\n tensorboard_log=tensorboard_log_dir,\n verbose=1,\n n_cpu_tf_sess=n_cpu_tf_sess,\n )\n return CustomDQN(\n env=env,\n save_replay_buffer=save_replay_buffer,\n verbose=1,\n tensorboard_log=tensorboard_log_dir,\n **hyperparams,\n n_cpu_tf_sess=n_cpu_tf_sess,\n )\n raise NotImplementedError(\"algo_name {} not supported yet\".format(algo_name))\n\n def build_checkpoint_callback(self, save_freq=10000, save_path=None):\n self.logger.debug(\"Checkpoint callback called every {} timesteps\".format(save_freq))\n return CheckpointCallback(save_freq=save_freq, save_path=save_path)\n\n def build_logging_training_metrics_callback(\n self,\n algo_name=\"ppo2\",\n env_name=None,\n log_every=1000,\n save_path=None,\n num_envs=1,\n _eval_env=None,\n original_env=None,\n total_timesteps=0,\n n_eval_episodes=10,\n communication_queue=None,\n env_eval_callback=None,\n continue_learning=False,\n save_replay_buffer=True,\n save_model=True,\n random_search=False,\n ):\n self.logger.debug(\"Logging training metrics callback called every {}\".format(log_every))\n if self.sb_version == \"sb3\":\n return LoggingTrainingMetricsCallbackSb3(\n log_every=log_every,\n log_dir=save_path,\n num_envs=num_envs,\n env_name=env_name,\n total_timesteps=total_timesteps,\n eval_env=_eval_env,\n original_env=original_env,\n n_eval_episodes=n_eval_episodes,\n communication_queue=communication_queue,\n env_eval_callback=env_eval_callback,\n continue_learning=continue_learning,\n save_replay_buffer=save_replay_buffer,\n save_model=save_model,\n random_search=random_search,\n )\n return LoggingTrainingMetricsCallback(\n log_every=log_every,\n log_dir=save_path,\n num_envs=num_envs,\n env_name=env_name,\n total_timesteps=total_timesteps,\n eval_env=_eval_env,\n original_env=original_env,\n n_eval_episodes=n_eval_episodes,\n communication_queue=communication_queue,\n env_eval_callback=env_eval_callback,\n continue_learning=continue_learning,\n save_model=save_model,\n random_search=random_search,\n )\n\n def build_eval_callback(\n self, eval_freq=10000, reward_threshold=900, log_path=None, eval_episodes=10, eval_env=None,\n ):\n callback_on_best = StopTrainingOnRewardThreshold(reward_threshold=reward_threshold, verbose=1)\n eval_callback = EvalCallback(\n eval_env=eval_env,\n best_model_save_path=log_path,\n log_path=log_path,\n eval_freq=eval_freq,\n deterministic=True,\n render=False,\n n_eval_episodes=eval_episodes,\n callback_on_new_best=callback_on_best,\n verbose=1,\n )\n self.logger.debug(\n \"Eval callback called every {} timesteps: stop training when mean reward is above {} in {} episodes\".format(\n eval_freq, reward_threshold, eval_episodes\n )\n )\n return eval_callback\n\n def build_callback(\n self,\n algo_name=\"ppo2\",\n continue_learning=False,\n call_every=1000,\n eval_callback=False,\n _reward_threshold=900,\n eval_episodes=10,\n _eval_env=None,\n original_env=None,\n _best_model_save_path=None,\n num_envs=1,\n env_name=None,\n continue_learning_suffix=\"continue_learning\",\n communication_queue=None,\n env_eval_callback=None,\n total_timesteps=0,\n save_replay_buffer=True,\n save_model=True,\n random_search=False,\n ):\n if continue_learning:\n save_path = _best_model_save_path + \"_\" + continue_learning_suffix + \"/\"\n else:\n save_path = _best_model_save_path\n\n if eval_callback:\n return self.build_eval_callback(\n eval_env=_eval_env,\n eval_freq=call_every,\n reward_threshold=_reward_threshold,\n log_path=save_path,\n eval_episodes=eval_episodes,\n )\n else:\n if _eval_env:\n return self.build_logging_training_metrics_callback(\n algo_name=algo_name,\n env_name=env_name,\n log_every=call_every,\n save_path=save_path,\n num_envs=num_envs,\n _eval_env=_eval_env,\n original_env=original_env,\n n_eval_episodes=eval_episodes,\n communication_queue=communication_queue,\n env_eval_callback=env_eval_callback,\n total_timesteps=total_timesteps,\n continue_learning=continue_learning,\n save_replay_buffer=save_replay_buffer,\n save_model=save_model,\n random_search=random_search,\n )\n return self.build_logging_training_metrics_callback(\n algo_name=algo_name,\n env_name=env_name,\n log_every=call_every,\n save_path=save_path,\n num_envs=num_envs,\n save_replay_buffer=save_replay_buffer,\n save_model=save_model,\n )\n","repo_name":"testingautomated-usi/rl-plasticity-experiments","sub_path":"src/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":46360,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"34451176416","text":"import operator\n\nops = {\n '+' : operator.add,\n '-' : operator.sub,\n '*' : operator.mul,\n '/' : operator.truediv,\n '%' : operator.mod,\n '^' : operator.pow,\n}\n\nfirst_number = float(input())\nsecond_number = float(input())\nsign = input()\n\nif sign in ops:\n answer = ops[sign](first_number, second_number)\nprint(answer)\n\n\n","repo_name":"everybees/parsel_tongue","sub_path":"itunu/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"42389963596","text":"\r\n#Basice calculator\r\n'''num_1=int(input(\"Enter the first number:\"))\r\nop=input(\"Enter a operator:\")\r\nnum_2=int(input(\"Enter the second number:\"))\r\n\r\nif op=='+':\r\n print(num_1+num_2)\r\nelif op=='-':\r\n print(num_1-num_2)\r\nelif op=='*':\r\n print(num_1*num_2)\r\nelif op=='/':\r\n print(num_1/num_2)\r\nelse:\r\n print(\"Invalid input number..\")'''\r\n\r\n#Gassing game\r\n\r\n'''password='25iwritecode'\r\nGuess=input(\"Guess a password:\")\r\n\r\nif Guess== password:\r\n print(\"Logging in...\")\r\nelse:\r\n print(\"incorrect password\")'''\r\n\r\n#Guess\r\n'''staff_name='Ragav'\r\nguess=\" \"\r\nwhile guess != staff_name:\r\n guess=input(\"What is the name of the staff:\")\r\n if guess != staff_name:\r\n guess\r\n else:\r\n\r\n print(\"Oo i see he is working well...\")'''\r\n\r\n#Buliding a guess game:\r\n\r\n'''password='default code'\r\nguess=\" \"\r\nguess_limit=3\r\nstart_count=1\r\nend_of_guess=False\r\n\r\n\r\nwhile guess != password and not end_of_guess:\r\n if start_count <=guess_limit:\r\n guess=input(\"Please Enter your password:\")\r\n start_count+=1\r\n else:\r\n end_of_guess=True\r\nif end_of_guess:\r\n print(\"You have insert maximum time incorrect password\\n\"\r\n \"please wait for 5 min we are fetching out.\")\r\nelse:\r\n print(\"Logging you in...\")'''\r\n\r\n'''\r\n#A dictornay of year\r\ndef main():\r\n var=''\r\n start_count=1\r\n\r\n month_year={'jan':'janauary',\r\n 'feb':'febuary',\r\n 'mar':'march',\r\n 'apr':'april',\r\n 'may':'may',\r\n 'jun':'june',\r\n 'jul':'july',\r\n 'agu':'aguest',\r\n 'sept':'september',\r\n 'oct':'october',\r\n 'nov':'november',\r\n 'dec':'december',\r\n }\r\n try:\r\n print(month_year[input(\"Enter a any year:\" )])\r\n while var != month_year :\r\n if start_count<7:\r\n print(month_year[input(\"Enter a any year:\"\"\")])\r\n start_count += 1\r\n else:\r\n print(\"You can't able to type in..\")\r\n except:\r\n print(\"invalid input\")\r\n var=input(\"Do you want to restart the game:\")\r\n if var=='yes'.lower():\r\n print(\"restaring\")\r\n main()\r\n elif var=='no'.lower():\r\n print(\"player don't want to restart it..\")\r\n else:\r\n print(\"Invalid input\")\r\nmain()\r\n'''\r\n\r\n\r\n#print(month_year[\"jan\"])\r\n#An example\r\n\r\n'''C1='Jamil'\r\nC2=\"Ranger\"\r\nJamil= input(str(C1)+ \" Enter your strength:\")\r\nprint(C1,\"Enter your strength:\",Jamil)\r\nFalli=input(str(C2)+\" Enter your skill:\")\r\nprint(C2,\" Enter your skill:\",Falli)'''\r\n\r\n\r\n\r\n\r\n\r\n'''def main():\r\n while True:\r\n var=input(\"Enter anything:\").lower()\r\n if var=='yes':\r\n print(\"restarting\")\r\nmain()'''\r\n\r\n\r\nvar=input(\"Up_to how many number of table you want type here:\")\r\nvar=int(var)\r\nfor j in range(1,var+1):\r\n for i in range(10+1):\r\n print(end=\" \")\r\n print(j,\"x\",i,\"=\",i*j)\r\n\r\n\r\n#Or we can write like this -\r\nnum=int(input(\"Enter a number:\"))\r\nfor i in range(1,num+1):\r\n for j in range(1,11):\r\n print(end=\" \")\r\n print(f\"{i} * {j} = {i * j}\")\r\n\r\n\r\n# num=int(input(\"Enter over here:\"))\r\n# for i in range(1,11):\r\n# print(num,\"x\",i,\"=\",num*i)\r\n\r\n","repo_name":"Deependrakumarrout/Small-Assignment","sub_path":"Simple project/pro.py","file_name":"pro.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2155512715","text":"import signal\nimport datetime\nimport netifaces as ni\nfrom RPi import GPIO\nfrom PIL import ImageFont\nfrom luma.core.interface.serial import i2c\nfrom luma.oled.device import ssd1306\nfrom luma.core.render import canvas\nfrom sensors.bmp280 import BMP280\nfrom sensors.ds18b20 import Thermometer\n\n\nclass Display:\n\n def __init__(self):\n fontsize = 11\n font = ImageFont.truetype('DejaVuSans-Bold.ttf', fontsize)\n\n self.style = dict(fill='white', font=font)\n self.line = [(5, 2 * fontsize * i) for i in range(3)]\n self.disp_on = True\n self.a_screen = ssd1306(i2c(port=1, address=0x3C))\n self.b_screen = ssd1306(i2c(port=1, address=0x3D))\n\n def control_setup(self, disp_btn, measure_btn):\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(disp_btn, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n GPIO.setup(measure_btn, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n GPIO.add_event_detect(disp_btn, GPIO.FALLING,\n callback=self.turn_onoff,\n bouncetime=300)\n GPIO.add_event_detect(measure_btn, GPIO.FALLING,\n callback=self.refresh,\n bouncetime=200)\n\n def turn_onoff(self, channel):\n if self.disp_on:\n self.a_screen.hide()\n self.b_screen.hide()\n else:\n self.a_screen.show()\n self.b_screen.show()\n self.disp_on = not self.disp_on\n\n def weather(self, t, p):\n time = datetime.datetime.now().strftime('%H:%M:%S')\n with canvas(self.a_screen) as draw:\n draw.text(self.line[0], 'WEATHER {}'.format(time), **self.style)\n draw.text(self.line[1], 't = {:.2f} °C'.format(t), **self.style)\n draw.text(self.line[2], 'p = {:.2f} hPa'.format(p), **self.style)\n\n def localnet(self, ifnet, ip, mac):\n with canvas(self.b_screen) as draw:\n draw.text(self.line[0], 'Ethernet - {}'.format(ifnet),\n **self.style)\n draw.text(self.line[1], ip, **self.style)\n draw.text(self.line[2], mac, **self.style)\n\n def refresh(self, event=None):\n t = Thermometer()\n p = BMP280()\n p.measure()\n\n h = 150\n interface = 'eth0'\n mac = ni.ifaddresses(interface)[ni.AF_LINK][0]['addr']\n try:\n ip = ni.ifaddresses(interface)[ni.AF_INET][0]['addr']\n except KeyError:\n ip = 'Connection closed!'\n\n self.weather(t.temperature, p.mslp_pressure(h))\n self.localnet(interface, ip, mac)\n\ndisplay = Display()\ndisplay.control_setup(17, 27)\ndisplay.refresh()\nsignal.pause()\n","repo_name":"etakerim/MeteoIoT","sub_path":"flask_logger_old/infodisplay.py","file_name":"infodisplay.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5574255068","text":"from clients import Client\nfrom cards import DebitCard, CreditCard\n\nCATEGORY_GOLD = \"GOLD\"\nCATEGORY_PLATINUM = \"PLATINUM\"\nCATEGORY_BLACK = \"BLACK\"\n\n# Creamos las instancias de TC, TD y CLI\ndc = DebitCard(\"2343 2343 9804 9084 90845\", CATEGORY_PLATINUM, \"05/22\", \"05/26\", \"822\")\ncc = CreditCard(\"2343 2343 9804 9084 90847\", CATEGORY_PLATINUM, \"05/22\", \"05/26\", \"123\", 50000)\ncli1 = Client(\"Martin\", \"Coronado\", \"+54 11 23123232\", \"2932932\", \"Belgrano 123\", CATEGORY_PLATINUM, dc, cc)\n\nprint(cli1)\n\n# Agregar tarjeta de credito o debito (esto pisara la tarjeta actual del cliente)\ncli1.add_credit_card(cc)\ncli1.add_debit_card(dc)\n\nprint(cli1)\n\n# Cambiar la categoria del cliente\ncli1.upgrade_category(CATEGORY_BLACK)\n\nprint(cli1)\n\n# Cancelar una tarjeta de credito\ncli1.cancel_credit_card()\n\nprint(cli1)\n\n# Suspender un cliente\ncli1.suspend_client()\n\nprint(cli1.client_state())\n\n\n\n\n\n","repo_name":"mszubillag24ucema/Segundo_Repositorio","sub_path":"clases_POO/mian.py","file_name":"mian.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74794970825","text":"class Solution:\n def networkDelayTime(self, times: List[List[int]], n: int, k: int) -> int:\n # build graph\n graph = collections.defaultdict(list)\n for i, j, w in times:\n graph[i].append((w, j))\n \n # build table\n table = [0]\n for i in range(1, n+1):\n table.append(0 if i == k else float('inf'))\n \n # bellman ford\n old_table = []\n while not self.same_list(old_table, table):\n old_table = table.copy()\n for node in range(1, n+1):\n for w_nei, nei in graph.get(node, []):\n if table[nei] > table[node]+w_nei:\n table[nei] = table[node]+w_nei\n \n _max = max(table)\n return _max if _max != float('inf') else -1\n \n def same_list(self, list1, list2):\n if len(list1) != len(list2):\n return False\n for v1, v2 in zip(list1, list2):\n if v1 != v2:\n return False\n return True\n","repo_name":"novayo/LeetCode","sub_path":"0743_Network_Delay_Time/try_3.py","file_name":"try_3.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"29483647738","text":"#!/bin/env python\n\nfor _ in range(int(input())):\n max_weight, n = map(int, input().split())\n\n stops = []\n for _ in range(n):\n distance, weight = map(int, input().split())\n stops.append((distance, weight))\n\n total_distance = 0\n curr_weight = 0\n last_d = 0\n\n while (len(stops) > 0):\n d,w = stops[0]\n total_distance += d - last_d\n last_d = d\n\n if (curr_weight+w <= max_weight):\n #Pickup the garbage\n curr_weight += w \n\n #If we are full, drive back to the dump\n if (curr_weight == max_weight):\n total_distance += d\n curr_weight = 0\n last_d = 0\n\n stops.pop(0)\n else:\n #Drive back to the dump\n total_distance += d*2\n curr_weight = 0\n\n print(total_distance + last_d)\n\n\n","repo_name":"AeroX2/uva-solutions","sub_path":"solved/garbage-collection.py","file_name":"garbage-collection.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19297736691","text":"import logging\nimport datetime\nfrom lxml import etree\nimport lofar.triggerservices.trigger_service\nimport time\nimport dateutil.parser\n\nlogger = logging.getLogger(__name__)\n\nclass DeciderInterface:\n \"\"\"\n A simple interface that should be implemented by a decider class to accept or reject events.\n Note: This is not the place to perform permission or system state checks, but rather things like e.g. whether an\n event is visible or interesting enough to be observed.\n\n Example:\n ---\n class AcceptAllDecider(DeciderInterface):\n def is_acceptable(self, voevent_root):\n logger.info('Accepting all events.')\n return True\n ---\n \"\"\"\n\n def is_acceptable(self, voevent_root):\n \"\"\"\n This should be overwritten by project-specific logic that decide whether the provided VO event should be\n accepted or not. This logic should usually be provided by the PI.\n\n :param voevent_root: the root node of the event as an lxml etree\n :return: True to accept the event, False to reject/drop it\n \"\"\"\n raise NotImplementedError\n\n\n\nclass ALERTDecider(DeciderInterface):\n \"\"\"\n Alert event acceptance based on input from Sander\n \"\"\"\n def is_acceptable(self, voevent_root):\n\n # check if this event is marked as 'test'\n role = voevent_root.attrib['role']\n logger.info('Role: %s' % role)\n if role.lower() == 'test':\n logger.info('Role is test! Everyone, this is just an exercise! Rejecting the event.')\n return False\n\n # check that event toa is between now and in half an hour...\n # ...read from event\n isotime = voevent_root.find('./WhereWhen/ObsDataLocation/ObservationLocation/AstroCoords/Time/TimeInstant/ISOTime').text\n reference_frequency = float(voevent_root.find(\"./What/Group[@name='observatory parameters']/Param[@name='centre_frequency']\").attrib['value'])\n dm = float(voevent_root.find(\"./What/Group[@name='event parameters']/Param[@name='dm']\").attrib['value'])\n # ...determine time to arrival\n reference_time = time.mktime(dateutil.parser.parse(isotime).timetuple()) # convert time string to seconds\n now = time.mktime(datetime.datetime.utcnow().timetuple())\n centertime = lofar.triggerservices.trigger_service.translate_arrival_time_to_frequency(reference_time, reference_frequency, dm, target_frequency=200)\n diff = centertime - now\n # ...check if out of bounds:\n if diff < 0:\n msg = 'Event whooshed already past! (%s seconds ago)' % -diff\n logger.error(msg)\n raise ValueError(msg)\n if diff > 1800:\n msg = 'Event will arrive too far in the future! (%s seconds from now)' % diff\n logger.error(msg)\n raise ValueError(msg)\n\n # accept the request is nothing was wrong\n return True\n\n\n","repo_name":"kernsuite-debian/lofar","sub_path":"SAS/TriggerServices/lib/voevent_decider.py","file_name":"voevent_decider.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23747715011","text":"import re\n\n\"\"\"\nJohn Harrington\nSoftware Reliability and Testing, Spring, 2022\nAssignment Three: Dependency Injection\n\"\"\"\n\n\nclass Helper:\n \"\"\"\n This class contains helper methods that can be injected into the Game class\n \"\"\"\n\n def __init__(self):\n # Store configs for winner_check() method\n self.configs = [\n [(0, 0), (0, 1), (0, 2)],\n [(1, 0), (1, 1), (1, 2)],\n [(2, 0), (2, 1), (2, 2)],\n [(0, 0), (1, 0), (2, 0)],\n [(0, 1), (1, 1), (2, 1)],\n [(0, 2), (1, 2), (2, 2)],\n [(0, 0), (1, 1), (2, 2)],\n [(2, 0), (1, 1), (0, 2)],\n ]\n\n def take_turn(self, curr_player):\n \"\"\"\n Helper function to allow the program to take turns between two users:\n An 'X' user, and an 'O' user.\n >>> Helper.take_turn(Helper(), 'X')\n 'O'\n >>> Helper.take_turn(Helper(), 'O')\n 'X'\n >>> Helper.take_turn(Helper(), '0')\n Traceback (most recent call last):\n ...\n RuntimeError: Current player was not defined as either 'X' or 'O'\n\n :param curr_player:\n :return:\n \"\"\"\n if curr_player == 'X':\n return 'O'\n elif curr_player == 'O':\n return 'X'\n else:\n raise RuntimeError(\"Current player was not defined as either 'X' or 'O'\")\n\n def print_board(self, board):\n \"\"\"\n Helper method to print the game board.\n See tests.py for unit testing using a mock object.\n :param board:\n :return:\n \"\"\"\n print(\" 1 2 3\")\n for i in range(3):\n print(f\"{i + 1} \" + \"|\".join(board[i]))\n\n def next_move(self, curr_player, board):\n \"\"\"\n Accept the current player's next move, validate it,and update the board's state.\n See tests.py for unit testing using a mock object.\n :param curr_player:\n :param board:\n :return:\n \"\"\"\n # Get next move from user\n move = input(f\"{curr_player}'s move, enter row and column: \")\n row, col = re.split(r\"[ \\t,]+\", move)\n row_index, col_index = int(row) - 1, int(col) - 1\n\n # Validate the move\n while board[row_index][col_index] != \" \":\n print(\"Choose an empty space!\")\n self.print_board(board)\n move = input(f\"{curr_player}'s move, enter row and column: \")\n row, col = re.split(r\"[ \\t,]+\", move)\n row_index, col_index = int(row) - 1, int(col) - 1\n continue\n\n # Update the board object\n board[row_index][col_index] = curr_player\n\n def winner_check(self, curr_player, board):\n \"\"\"\n Inspects most recent update to game board (last player's turn)\n to determine if either player has won.\n See tests.py for unit testing using a mock object.\n :param curr_player:\n :param board:\n :return:\n \"\"\"\n winner = False\n\n for config in self.configs:\n config_wins = True\n for row_index, col_index in config:\n if board[row_index][col_index] != curr_player:\n config_wins = False\n break\n if config_wins:\n winner = True\n break\n if winner:\n print(f\"Congratulations {curr_player}, you won!\")\n exit(0)\n\n\nclass Game:\n def __init__(self, helper_methods):\n self.helper = helper_methods\n self.board = [\n [\" \", \" \", \" \"],\n [\" \", \" \", \" \"],\n [\" \", \" \", \" \"],\n ]\n self.turn = \"X\"\n self.row_index = -1\n self.col_index = -1\n\n def play(self):\n while True:\n # Print the current board state\n self.helper.print_board(self.board)\n\n # Process next move\n self.helper.next_move(self.turn, self.board)\n\n # Check for a winner\n self.helper.winner_check(self.turn, self.board)\n\n # There was no winner, so go to the next player\n self.turn = self.helper.take_turn(self.turn)\n\n\ndef main():\n helper = Helper()\n game = Game(helper)\n game.play()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"harr1424/software-testing","sub_path":"3_DependencyInjection/tictactoe/tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":4227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"735426744","text":"import raynest\nimport raynest.model\nimport numpy as np\nimport matplotlib.pyplot as plt \n#print('raynest')\nfrom corner import corner\n\n\nclass Line(raynest.model.Model):\n\n def __init__(self,x,y,sig_y, sig_x):\n super(Line,self).__init__()\n self.x=x\n self.y=y\n self.errorx=sig_x\n self.errory= sig_y\n \n self.names= ['a', 'b']\n self.bounds =[ [0,3], [0,3]]\n\n for i, xi in enumerate(self.x):\n self.names.append('x{0}'.format(i))\n self.bounds.append([xi-5*self.errorx,xi+5*self.errorx])\n \n def log_prior(self,x):\n logp=super(Line,self).log_prior(x)\n if np.isfinite(logp):\n #some prior\n return 0.\n else:\n return -np.inf\n\n def log_likelihood(self,x):\n logl1=-len(self.y)*np.log((np.sqrt(2*np.pi)*self.errory))\n logl2=-len(self.x)*np.log((np.sqrt(2*np.pi)*self.errorx))\n for i, xi in enumerate(self.x):\n\n x_s = x[\"x{0}\".format(i)]\n logl1 += -1/2 * ((self.y[i] - (x[\"a\"] * x_s + x[\"b\"]))**2) / (self.errory**2) #+ np.log(1/(np.sqrt(2*np.pi)*self.errory)) \n logl2 += -1/2 * ((xi - x_s )**2) / (self.errorx**2) #+ np.log(1/(np.sqrt(2*np.pi)*self.errorx)) \n \n logl = logl1 + logl2\n return logl\n\n\nx_true=np.linspace(0,5,10)\ny=np.zeros(len(x_true))\nx=np.zeros(len(x_true))\na_val=2\nb_val=1\nsig_y=0.05\nsig_x=0.1\nfrom numpy import random\nfor i in range(len(x)):\n mean=a_val*x_true[i]+b_val\n s = np.random.normal(mean, sig_y, 1)\n y[i]=s\n x[i]=np.random.normal(x_true[i],sig_x)\n \n#print(x,y)\n#y= b_val + a_val * x +sig_y * np.random.rand(0 ,1, x.size)\n\nmymodel= Line(x,y,sig_y, sig_x)\nnest = raynest.raynest(mymodel, verbose=2, nnest=1, nensemble=1, nlive=1000, maxmcmc=5000)\nnest.run(corner = True)\npost = nest.posterior_samples.ravel()\n\nsamples = np.column_stack([post[lab] for lab in mymodel.names])\nfig = corner(samples, labels = ['$a$','$b$'], truths=[a_val,b_val])\nfig.savefig('joint_posterior.pdf', bbox_inches = 'tight')\n\n\n","repo_name":"phiamorton/BayesExercises","sub_path":"CPnestLine/nested_errorx.py","file_name":"nested_errorx.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71919179785","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n'''\n@Project :Awesome-Uplift-Model \n@File :example_6.py\n@Author :JackHCC\n@Date :2022/12/3 17:36 \n@Desc :\n\n'''\nimport pandas as pd\nimport numpy as np\nfrom itertools import product\n\nnp.random.seed(1000)\n\n\n# N for number of individuals, T for time periods\ndef create_clus_data(N=200, T=10):\n # We're going to create errors clustered at the\n # ID level. So we can follow our steps from making panel data\n p = pd.DataFrame(\n product(range(0, N), range(0, T)))\n p.columns = ['ID', 't']\n\n # Individual- and time-varying variable\n p['W'] = np.random.normal(size=N * T)\n\n # Now an individual-specific error cluster\n indiv_data = pd.DataFrame({\n 'ID': range(0, N),\n 'C': np.random.normal(size=N)})\n\n # Bring them together\n p = p.merge(indiv_data, on='ID')\n\n # Create X\n p['X'] = 2 * p['W'] + np.random.normal(size=N * T)\n\n # And create Y. The error term has two components: the individual\n # cluster C, and the individual-and-time-varying element\n p['Y'] = 3 * p['X'] + (p['C'] + np.random.normal(size=N * T))\n return p\n\n\ncreate_clus_data(100, 5)\n","repo_name":"JackHCC/Awesome-Uplift-Model","sub_path":"Example/The_Effect/Simulation/example_6.py","file_name":"example_6.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"81"} +{"seq_id":"25035130092","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.http import JsonResponse\nfrom sys import stderr\nfrom sys import stderr\nfrom sys import stderr\nfrom sys import stderr\nfrom .processServices import UserProcessService, CommentProcessService \n\n# Create your views here.\n\ndef index(request):\n# return HttpResponse(\"Tengo Hambre\")\n userProcessService = UserProcessService()\n users = userProcessService.getUsers()\n jsonUsers = []\n for user in users:\n jsonUser = {\"facebook_id\":user.facebook_profile_id, \"first_name\":user.first_name, \"last_name\":user.last_name}\n jsonUsers.append(jsonUser)\n# return JsonResponse({'cedula': 'nombres'})\n# return JsonResponse(userService.all_users())\n return HttpResponse(jsonUsers)\n\ndef userData(request, facebook_profile_id):\n userProcessService = UserProcessService()\n user = userProcessService.getUser(facebook_profile_id)\n jsonUsers = []\n jsonUser = {\"facebook_id\":user.facebook_profile_id, \"first_name\":user.first_name, \"last_name\":user.last_name}\n jsonUsers.append(jsonUser)\n return HttpResponse(jsonUsers)\n \n'''\n users = userProcessService.getUserProfile(user_profile)\n return users\n jsonUsers = []\n for user in users:\n jsonUser = {\"facebook_id\":user.facebook_profile_id, \"first_name\":user.first_name, \"last_name\":user.last_name}\n jsonUsers.append(jsonUser)\n# return JsonResponse({'cedula': 'nombres'})\n# return JsonResponse(userService.all_users())\n return HttpResponse(jsonUsers)\n'''\n\ndef createUser(self, facebook_profile_id, first_name, middle_name , last_name, full_name):\n userProcessService = UserProcessService()\n result = userProcessService.createUser(facebook_profile_id, first_name, middle_name, last_name, full_name)\n return HttpResponse([{\"result\": result}])\n\ndef createComment(self, user_id, application, description):\n commentProcessService = CommentProcessService()\n result = commentProcessService.createComment(user_id, application, description)\n return HttpResponse([{\"result\": result}])\n\ndef saveUserSession(self, user_id, session_id, feedback_level, feedback):\n userProcessService = UserProcessService()\n userProcessService.saveUserSession(user_id, session_id, feedback_level, feedback)\n ","repo_name":"yezgar/core_data","sub_path":"tyquy/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8507850787","text":"\n## import all required files here \nfrom pdf2image import convert_from_path\nimport pdf2image\nimport cv2\nimport PIL\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport pytesseract\nimport numpy as np\nimport boto3\nimport os \nimport re\nimport json\nimport argparse\n\n## define boto3 resource \ns3 = boto3.resource('s3')\ns3client = boto3.client('s3')\n\n\ndef download_s3Objs(s3bucket, s3prefix):\n ## read s3 objs from the args passed\n response = s3client.list_objects_v2(\n Bucket = s3bucket,\n Prefix = s3prefix,\n MaxKeys = 10 )\n\n s3Objs = [k['Key'] for k in response['Contents'] if k['Key'].find('.pdf') != -1]\n print(s3Objs)\n\n # for objs in s3Objs:\n for objs in s3Objs:\n path = os.path.abspath(os.getcwd())\n filename= objs.split('/')[-1]\n downloadfile_directory = os.path.join(os.getcwd(), r'pdfs')\n if not os.path.exists(downloadfile_directory):\n os.makedirs(downloadfile_directory) \n downloadfilepath = f'{downloadfile_directory}/{filename}'\n #print(filename)\n #print(downloadfilepath)\n s3.Bucket(s3bucket).download_file(Key=objs,Filename=downloadfilepath)\n createJsonData(downloadfilepath, filename)\n\n\n\ndef createJsonData(filepath, filename):\n pdfs = filepath\n pages = convert_from_path(pdfs, 350)\n jsonfiledata = filename.split('.')[0]\n print('\\n')\n print('----------------------------')\n print(f'Processing PDF - {jsonfiledata}')\n print('----------------------------')\n \n i = 1\n ## PDF Image Json Content Extract ## \n pdfData = {}\n pdfData['pdf_file_id'] = jsonfiledata\n pdfData['content'] = {}\n\n output_publish_file = f'{jsonfiledata.replace(\" \", \"_\")}_extracted_data.txt'\n current_directory = os.getcwd()\n publish_directory = os.path.join(current_directory, r'extracted_data')\n if not os.path.exists(publish_directory):\n os.makedirs(publish_directory) \n output_publish_file = publish_directory + '/' + output_publish_file \n with open(output_publish_file, 'w') as outfile:\n outfile.write('\\n')\n outfile.close()\n\n global pdf_imgs\n pdf_imgs = []\n for page in pages:\n image_name = \"Page_\" + str(i) + \".jpg\" \n page.save(image_name, \"JPEG\")\n pdf_imgs.append(image_name)\n i = i+1 \n #print(pdf_imgs)\n\n pdfData['content'] = []\n \n for imgs in pdf_imgs: \n createdict = {}\n createdict[imgs.split('.')[0]] = {}\n pdfData['content'].append(createdict)\n #pdfData['content']['page'][imgs.split('.')[0]] = {} \n #pdfData[jsonfiledata]['page'][imgs] = {}\n ImageProcessor = ImageProcessingModel(imgs)\n print('Call data extract fns now')\n sorteddict = ImageProcessor.model_dataextract()\n #print(sorteddict)\n ## print the extracted content to a text file for now \n print(f'publishing content to file : {output_publish_file}')\n publish_processed_output(output_publish_file, sorteddict, imgs.split('.')[0])\n print('\\n')\n \t#print(pdfData)\n\n \n\n#############################################\n#### main class for Image processing \nclass ImageProcessingModel():\n \n def __init__(self, OrgImg):\n #print('\\n')\n print('Image processing initiated')\n print(f'Pdf page being processed ===> {OrgImg}')\n self.img_cv = cv2.imread(OrgImg)\n self.img_processed = self.processPDFImages(OrgImg)\n cv2.imwrite('inv_img.png',self.img_processed) ## save the processed img with openCV\n self.PIL_img_org = Image.open(OrgImg)\n self.PIL_img_processed = Image.open('inv_img.png')\n \n ## fix image orientation \n self.fixImageOrientation(OrgImg)\n self.model_confidence = 20\n self.box_hor_orientation_priority = 1 ## setting left to right for horizontal priority for extracting data in boxes\n self.box_ver_orientation_priority = 1 ## setting top to bottom for vertical priority for extracting data in boxes \n self.OCR_textDetection_model(OrgImg)\n ## pass image to OCR Model for text detection\n \n \n ##check all different Images now\n # plt.figure(figsize=(10,10))\n # plt.imshow(self.img_cv,cmap='gray')\n # plt.show()\n\n # plt.imshow(self.img_processed,cmap='gray')\n # plt.show()\n\n # plt.imshow(self.PIL_img_processed,cmap='gray')\n # plt.show()\n \n # get grayscale image\n def get_grayscale(self,image):\n return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # noise removal\n def remove_noise(self,image):\n return cv2.medianBlur(image,5)\n\n def get_imgblurred(self,image):\n return cv2.GaussianBlur(image, (3,3), 0)\n\n #thresholding\n def thresholding(self,image):\n return cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]\n # cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]\n\n #dilation\n def dilate(self,image):\n kernel = np.ones((5,5),np.uint8)\n return cv2.dilate(image, kernel, iterations = 1)\n \n #erosion\n def erode(self,image):\n kernel = np.ones((5,5),np.uint8)\n return cv2.erode(image, kernel, iterations = 1)\n\n #opening - erosion followed by dilation\n def opening(self,image):\n kernel = np.ones((5,5),np.uint8)\n return cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)\n\n #canny edge detection\n def canny(self,image):\n return cv2.Canny(image, 100, 200)\n\n #skew correction\n def deskew(self,image):\n coords = np.column_stack(np.where(image > 0))\n angle = cv2.minAreaRect(coords)[-1]\n if angle < -45:\n angle = -(90 + angle)\n else:\n angle = -angle\n (h, w) = image.shape[:2]\n center = (w // 2, h // 2)\n M = cv2.getRotationMatrix2D(center, angle, 1.0)\n rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)\n return rotated\n\n #template matching\n def match_template(self,image, template):\n return cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)\n \n ## preprocess image, before feeding to OCR Model \n def processPDFImages(self,img):\n im = self.img_cv\n gray = self.get_grayscale(im)\n #rmnoise = remove_noise(gray)\n thresh = self.thresholding(gray)\n # blur = cv2.GaussianBlur(gray, (3,3), 0)\n\n # Morph open to remove noise and invert image\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10,10))\n opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=1)\n invert = 255 - opening\n return thresh\n \n \n def fixImageOrientation(self,img): \n # cv2.imwrite('inv_img.png',img)\n # inverted_image = Image.open('inv_img.png')\n #print(pytesseract.image_to_osd(self.PIL_img_processed))\n\n Img_orientation = int(re.search('(?<=Rotate: )\\d+', pytesseract.image_to_osd(self.PIL_img_processed)).group(0)) \n print(f'Image orientation is : {Img_orientation}')\n rotated_img = self.PIL_img_processed\n org_PIL_rotated_img = self.PIL_img_org \n if Img_orientation > 0:\n print('Fixing Image orientation ')\n angle_to_rotate_Img = -int(re.search('(?<=Rotate: )\\d+', pytesseract.image_to_osd(self.PIL_img_processed)).group(0)) \n rotated_img = self.PIL_img_processed.rotate(angle_to_rotate_Img)\n org_PIL_rotated_img = self.PIL_img_org.rotate(angle_to_rotate_Img) \n \n self.org_PIL_rotated_img = org_PIL_rotated_img ## PIL Image original fixed\n self.rotated_img = rotated_img ## PIL Image orientation fixed\n self.cv_rotated_img = cv2.cvtColor(np.array(self.org_PIL_rotated_img), cv2.COLOR_RGB2BGR) ## cv image orientation fixed \n \n # print('image before orientation fix')\n # plt.figure(figsize=(10,10))\n # plt.imshow(self.PIL_img_processed)\n # plt.show()\n # \n # print('image after orientation fix')\n # plt.imshow(self.cv_rotated_img)\n # plt.show()\n # \n # print('image after orientation fix')\n # plt.imshow(self.org_PIL_rotated_img)\n # plt.show()\n \n def OCR_textDetection_model(self, img):\n ## config defined for tesseract params \n custom_config = r'--oem 3 --psm 6' \n ## invoke model with configs, for langs [eng,ara] included \n d = pytesseract.image_to_data(self.org_PIL_rotated_img, output_type=pytesseract.Output.DICT, config=custom_config, lang='eng+ara')\n boxes = len(d['level'])\n for i in range(boxes):\n if int(d['conf'][i]) > 20:\n (x, y, w, h) = (d['left'][i], d['top'][i], d['width'][i], d['height'][i])\n cv2.rectangle(self.cv_rotated_img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n self.img_data = d ## setting img_data for extracting in further steps\n self.boxes = boxes\n \n ### Uncomment this section to see the bounding boxes on original image, check and fine \n ### tune the model accordingly\n \n #plt.figure(figsize=(20,20))\n #plt.imshow(self.cv_rotated_img)\n #plt.show()\n \n ## call dataprocessing fns now ! commented and taken out to main fn caller\n # self.model_dataextract() \n \n \n def model_dataextract(self):\n ## fetch set for lines in doc \n lines = [l for l in self.img_data['line_num']]\n num_lines = set(lines)\n #print(num_lines)\n\n data_process_dict = {}\n for ln in num_lines: \n #print('line :',ln)\n data_process_dict[f'line_{ln}'] = []\n for i in range(self.boxes): \n if self.img_data['line_num'][i] == ln:\n #print('-------------------------')\n for kys in self.img_data.keys():\n if self.box_hor_orientation_priority and self.box_ver_orientation_priority:\n if kys=='left' and int(self.img_data['conf'][i]) > self.model_confidence and len(self.img_data['text'][i]) > 1: \n newDict = dict()\n newDict['left'] = self.img_data[kys][i]\n newDict['text'] = self.img_data['text'][i] \n newDict['top'] = self.img_data['top'][i] \n data_process_dict[f'line_{ln}'].append(newDict)\n #print('extracted data :')\n #print(data_process_dict) \n \n ## now process each lines to get the texts sorted from left and get min,max values for top, to identify the \n ## block of works in the documents, to understand how far words differ vertically\n \n clear_sorted_dict_arr = []\n clear_sorted_dict = dict()\n for line in data_process_dict: \n #print(line)\n if len(data_process_dict[line]) > 0:\n left_arr,text_arr,y_val_arr = [],[],[]\n for dct in data_process_dict[line]: \n left_arr.append(dct['left'])\n text_arr.append(dct['text'])\n y_val_arr.append(dct['top'])\n #print(left_arr)\n #print(text_arr)\n #print(y_val_arr)\n #print(f'y_min:{min(y_val_arr)}',f'y_max:{max(y_val_arr)}')\n #print(f'x_min:{min(left_arr)}',f'x_max:{max(left_arr)}') \n text_sorted_arry, combined_srted_arr = self.sort_fromleft_arry(text_arr,left_arr)\n text_sorted_arry1 = [re.sub('[^a-zA-Z0-9ا-ي\\n\\.]','', z) for z in text_sorted_arry] \n #print(text_sorted_arry1)\n clear_sorted_dict[line] = dict()\n #clear_sorted_dict['line']['content'] = ' | '.join(text_sorted_arry1)\n clear_sorted_dict[line]['content'] = ' | '.join(text_sorted_arry1)\n clear_sorted_dict[line]['y_min'] = min(y_val_arr)\n clear_sorted_dict[line]['y_max'] = max(y_val_arr)\n clear_sorted_dict[line]['x_min'] = min(left_arr)\n clear_sorted_dict[line]['x_max'] = max(left_arr) \n #print('-------')\n #print('<======= processed data =======> ') \n return clear_sorted_dict\n \n ## fn for sorting the lists based on distace from left margin\n ## required for ordering the word blocks read by model \n def sort_fromleft_arry(self,X,Y):\n keydict = dict(zip(X, Y))\n X.sort(key=keydict.get)\n return X, keydict\n\n\n\ndef publish_processed_output(outfilename, dct, img):\n with open(outfilename, 'a') as outfile:\n outfile.write('\\n')\n outfile.write(img+ '\\n')\n outfile.write('--------------------- \\n')\n outfile.write('\\n')\n for hostDict in dct:\n json.dump(hostDict, outfile, ensure_ascii=False)\n outfile.write(' : ')\n json.dump(dct[hostDict], outfile, ensure_ascii=False)\n outfile.write('\\n')\n\n\n\ndef main():\n args = parse_args()\n\n s3bucket = args.s3bucket\n s3prefix = args.s3prefix\n #print(s3bucket, s3prefix)\n\n download_s3Objs(s3bucket, s3prefix)\n print('\\n')\n print('====================================================================================================\\n')\n print(u'Done extracting data ! \\U0001F609 \\U0001F680')\n print('''NOTE : this is demo data extract, in production use case instead of publishing data ''' \\\n \t '''to file, data extract can be pushed to NoSQL DB like Dynamodb/MongoDB or even Elastic search '''\n \t '''for exposing data to APIs''')\n\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='data extractor')\n parser.add_argument('--s3bucket', dest='s3bucket', required=True, help='provide S3 bucket name')\n parser.add_argument('--s3prefix', dest='s3prefix', required=True, help='provide S3 folder path')\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"shahin43/pdf_DataExtractproject","sub_path":"pdfDataextract.py","file_name":"pdfDataextract.py","file_ext":"py","file_size_in_byte":14112,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"4629726120","text":"import numpy as np\n\nfrom isaac_gym import gymapi\nfrom isaac_gym_utils.ctrl_utils import ForcePositionController\n\n\nclass BoxWorldHighLevelController:\n\n def __init__(self, low_level_controllers, target_update_freq): \n self.low_level_controllers = low_level_controllers\n self.target_update_freq = target_update_freq\n \n def reset(self):\n pass\n\n def _compute_low_level_controller_targets(self, controller_idxs):\n\n if len(controller_idxs) == 1:\n c_idx = controller_idxs[0]\n controller_0 = self.low_level_controllers[c_idx]\n target = controller_0.get_target()\n return target[:7]\n\n c0_idx, c1_idx, c2_idx = controller_idxs\n final_targets = np.zeros(10)\n args = ()\n\n controller_0 = self.low_level_controllers[c0_idx]\n c0_target = controller_0.get_target(*args)\n has_orientation_controller = False\n\n if controller_0.is_position_controller:\n assert np.sum(np.abs(c0_target[4:])) < 1e-6, \"Invalid controller output\"\n else:\n assert np.sum(np.abs(c0_target[:3])) < 1e-6, \"Invalid controller output\"\n has_orientation_controller = True\n final_targets += c0_target\n \n controller_1 = self.low_level_controllers[c1_idx]\n if controller_1.is_position_controller:\n c1_target = controller_1.get_target(*args)\n assert np.sum(np.abs(c1_target[4:])) < 1e-6, \"Invalid controller output\"\n c0_null = controller_0.null_space\n c1_target[:3] = c0_null @ c1_target[:3]\n final_targets += c1_target\n else:\n if not has_orientation_controller:\n c1_target = controller_1.get_target(*args)\n assert np.sum(np.abs(c1_target[:3])) < 1e-6, \"Invalid controller output\"\n final_targets += c1_target\n has_orientation_controller = True\n\n controller_2 = self.low_level_controllers[c2_idx]\n if controller_2.is_position_controller:\n c1_null = controller_1.null_space\n c1_null = c0_null @ c1_null\n c2_target = controller_2.get_target(*args)\n assert np.sum(np.abs(c2_target[4:])) < 1e-6, \"Invalid controller output\"\n c2_target[:3] = c1_null @ c2_target[:3]\n final_targets += c2_target\n else:\n if not has_orientation_controller:\n c2_target = controller_2.get_target(*args)\n assert np.sum(np.abs(c2_target[:3])) < 1e-6, \"Invalid controller output\"\n final_targets += c2_target\n has_orientation_controller = True\n\n return final_targets[:7] \n\n def compute_controls(self, current_tra, current_R, block_force, controller_idxs):\n '''Compute delta transform controls\n \n controller_idxs: List of (max len 3) controllers to choose from\n '''\n delta_targets = self._compute_low_level_controller_targets(controller_idxs)\n pos, quat = delta_targets[:3], delta_targets[3:7]\n\n delta_translation = gymapi.Transform(gymapi.Vec3(pos[0], pos[1], pos[2]))\n # Quat(x, y, z, w) while numpy-quaternion represents it as (w, x, y, z)\n delta_rotation = gymapi.Transform(r=gymapi.Quat(quat[3], quat[0], quat[1], quat[2]))\n \n return delta_translation, delta_rotation\n","repo_name":"iamlab-cmu/hierarchical-object-controllers","sub_path":"object_axes_ctrlrs/controllers/planar_box_world_high_level_controller.py","file_name":"planar_box_world_high_level_controller.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"38356534557","text":"from tkinter import *\ni=0\ndef listfromfile():\n global fr\n fr = open(\"movie_details.txt\", \"r\")\n data= fr.readlines()\n innerl=[]\n for i in data:\n innerl.append(i.split(\"\\t\"))\n return innerl\n\ndef end():\n view.destroy() \n\ndef first():\n print(\"first record\")\n l1=listfromfile()\n s1=l1[0][0]\n s2=l1[0][1]\n s3=l1[0][2]\n s4=l1[0][3]\n s5=l1[0][4]\n t1=Label(view,text=s1)\n t1.grid(row=2,column=3)\n t2=Label(view,text=s2)\n t2.grid(row=3,column=3)\n t3=Label(view,text=s3)\n t3.grid(row=4,column=3)\n t4=Label(view,text=s4)\n t4.grid(row=5,column=3)\n t5=Label(view,text=s5)\n t5.grid(row=6,column=3)\n fr.close()\n\ndef nextr():\n print(\"next record\")\n l1=listfromfile()\n print(l1)\n n=len(l1)\n global i\n if(i<=n-1):\n s1=(l1[i][0])\n s2=(l1[i][1])\n s3=(l1[i][2])\n s4=(l1[i][3])\n s5=(l1[i][4])\n t1=Label(view,text=s1)\n t1.grid(row=2,column=3)\n t2=Label(view,text=s2)\n t2.grid(row=3,column=3)\n t3=Label(view,text=s3)\n t3.grid(row=4,column=3)\n t4=Label(view,text=s4)\n t4.grid(row=5,column=3)\n t5=Label(view,text=s5)\n t5.grid(row=6,column=3)\n i=i+1\n fr.close()\n else:\n i=0\n nextr()\n \ndef last():\n print(\"last record\")\n l1=listfromfile()\n n=len(l1)\n s1=l1[n-1][0]\n s2=l1[n-1][1]\n s3=l1[n-1][2]\n s4=l1[n-1][3]\n s5=l1[n-1][4]\n t1=Label(view,text=s1)\n t1.grid(row=2,column=3)\n t2=Label(view,text=s2)\n t2.grid(row=3,column=3)\n t3=Label(view,text=s3)\n t3.grid(row=4,column=3)\n t4=Label(view,text=s4)\n t4.grid(row=5,column=3)\n t5=Label(view,text=s5)\n t5.grid(row=6,column=3)\n fr.close()\n\n###########################################################################\ndef VIEW():\n global view\n view=Tk()\n view.title(\"View a record\")\n global s1, s2, s3, s4, s5\n s1=StringVar()\n s2=StringVar()\n s3=StringVar()\n s4=StringVar()\n s5=StringVar()\n l=Label(view,text=\"View All Records Here: \")\n l.grid(row=1,column=1,columnspan=3)\n l1=Label(view, text=\"Movie Name: \")\n l1.grid(row=2,column=2)\n l2=Label(view,text=\"Director's name: \")\n l2.grid(row=3,column=2)\n l3=Label(view,text=\"Genre: \")\n l3.grid(row=4,column=2)\n l4=Label(view,text=\"Duration: \")\n l4.grid(row=5,column=2)\n l5=Label(view,text=\"Release date: \")\n l5.grid(row=6,column=2)\n firstr=Button(view, text=\"View First record\", command=first)\n firstr.grid(row=7,column=1)\n lastr=Button(view, text=\"View Last record\", command=last)\n lastr.grid(row=7,column=3)\n nextrec=Button(view, text=\"View Next record\", command=nextr)\n nextrec.grid(row=7,column=2)\n exit=Button(view,text=\"Exit\",command=end)\n exit.grid(row=1,column=4)\n view.geometry(\"350x200\")\n view.mainloop()\n","repo_name":"PragyaJain18/Movie-Details-and-Booking-System","sub_path":"viewrec.py","file_name":"viewrec.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1245996727","text":"from model import SingleReactionSolution\nimport numpy as np\nimport pints\nfrom data import ECTimeData\nimport pickle\n\n\ndef choose_filename(error_model_type, ending):\n filename = \"mcmc_results_\" + error_model_type + \".\" + ending\n return filename\n\ndef fetch_model_results(error_model_type):\n \n filename = choose_filename(error_model_type, \"pickle\")\n results = pickle.load(open(filename, \"rb\"))\n chains = results[3]\n \n return chains\n\ndef export_mcmc(error_model_type):\n \n chains = fetch_model_results(error_model_type)\n chains_stacked = np.vstack(chains)\n filename = \"mcmc_draws_\" + error_model_type + \".csv\"\n np.savetxt(filename, chains_stacked, delimiter=\",\")\n\ndef model_posterior_simulation(error_model_type, n_remove):\n \n chains = fetch_model_results(error_model_type)\n chains_stacked = np.vstack(chains)\n posterior_means = np.median(chains_stacked, axis=0)\n posterior_means = posterior_means[:(len(posterior_means) - n_remove)]\n \n model = SingleReactionSolution()\n data = ECTimeData('GC01_FeIII-1mM_1M-KCl_02_009Hz.txt', model,\n ignore_begin_samples=5, ignore_end_samples=0, samples_per_period=200)\n model_simulation = model.simulate(posterior_means, data.times)\n filename = choose_filename(error_model_type, \"csv\")\n np.savetxt(filename, model_simulation, delimiter=\",\")\n \ndef export_data():\n model = SingleReactionSolution()\n data = ECTimeData('GC01_FeIII-1mM_1M-KCl_02_009Hz.txt', model,\n ignore_begin_samples=5, ignore_end_samples=0, samples_per_period=200)\n x = np.zeros((len(data.times), 2))\n x[:, 0] = data.times\n x[:, 1] = data.current\n np.savetxt(\"data.csv\", x, delimiter=\",\")\n \nif __name__ == '__main__':\n \n error_models = ['independent', 'ar1', 'arma11', 'student_t']\n n_noise_parameters = [1, 2, 3, 2]\n k = 0\n for em in error_models:\n print(k)\n model_posterior_simulation(em, n_noise_parameters[k])\n export_mcmc(em)\n k += 1\n export_data()","repo_name":"pints-team/electrochem_pde_model","sub_path":"process_results.py","file_name":"process_results.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"31448372535","text":"import sys\nimport collections\n\nd = collections.deque()\n\nn = int(sys.stdin.readline().rstrip())\n\nfor _ in range(n):\n command = sys.stdin.readline().rstrip()\n cmd = command.split(\" \")\n if cmd[0] == \"push_front\":\n d.appendleft(cmd[1])\n elif cmd[0] == \"push_back\":\n d.append(cmd[1])\n elif cmd[0] == \"pop_front\":\n if len(d) == 0:\n print(-1)\n else:\n erased = d.popleft()\n print(erased)\n elif cmd[0] == \"pop_back\":\n if len(d) == 0:\n print(-1)\n else:\n erased = d.pop()\n print(erased)\n elif cmd[0] == \"back\":\n if len(d) == 0:\n print(-1)\n else:\n erased = d.pop()\n print(erased)\n d.append(erased)\n elif cmd[0] == \"front\":\n if len(d) == 0:\n print(-1)\n else:\n erased = d.popleft()\n print(erased)\n d.appendleft(erased)\n elif cmd[0] == \"empty\":\n if len(d) == 0:\n print(1)\n else:\n print(0)\n else:\n print(len(d))\n","repo_name":"ContiNew/problem_solving","sub_path":"baekjoon-in-python/deque_q.py","file_name":"deque_q.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35647169246","text":"#-*- coding:utf-8 -*-\n'''\n写一个类实现由阿拉伯数字到罗马数字的转换\n'''\n\n\nclass num_change:\n def __init__(self, n):\n self.arab = n\n self.rome = ''\n\n def exc(self):\n rom = '〇ⅠⅡⅢⅣⅤⅥⅦⅧⅨ'\n for i in self.arab:\n self.rome += rom[eval(i)]\n return self.rome\n\n\nnum = input(\"请输入所需转换的阿拉伯数字:\")\nNum_Change = num_change(num)\nprint(f'转换后的罗马数字为:{Num_Change.exc()}')\n","repo_name":"maojingke/xiaomao","sub_path":"Arab-Rome.py","file_name":"Arab-Rome.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2253253780","text":"import math\n\n\nclass Triangle:\n def __init__(self, a, b, c):\n self.a = a\n self.b = b\n self.c = c\n\n def side_length(point1, point2):\n return math.hypot((point2[0] - point1[0]), (point2[1] - point1[1]))\n\n self.ab = side_length(self.a, self.b)\n self.bc = side_length(self.b, self.c)\n self.ca = side_length(self.c, self.a)\n\n def existence(self):\n if (self.a == self.b or self.a == self.c or self.b == self.c) or \\\n (self.ab >= self.bc + self.ca or self.bc >= self.ab + self.ca or self.ca >= self.ab + self.bc):\n return False\n else:\n return True\n\n def perimeter(self):\n return self.ab + self.bc + self.ca\n\n def area(self):\n p = self.perimeter() / 2\n return math.sqrt(p * (p - self.ab) * (p - self.bc) * (p - self.ca))\n\n\nif __name__ == '__main__':\n a_point = [int(x) for x in input(\"Enter the coordinates x,y of the A point: \").split(',')]\n b_point = [int(x) for x in input(\"Enter the coordinates x,y of the B point: \").split(',')]\n c_point = [int(x) for x in input(\"Enter the coordinates x,y of the C point: \").split(',')]\n\n triangle = Triangle(a_point, b_point, c_point)\n\n if triangle.existence():\n print('Triangle perimeter is', triangle.perimeter())\n print('Triangle area is', triangle.area())\n else:\n print(\"Triangle doesn't exist\")\n","repo_name":"yatvoiotec/FBSTriangle","sub_path":"triangle.py","file_name":"triangle.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13980959763","text":"\"\"\"Multiprocessing functions for training language models\"\"\"\nfrom __future__ import annotations\n\nimport os\nimport subprocess\nimport typing\nfrom pathlib import Path\n\nimport sqlalchemy\nfrom _kalpy.fstext import VectorFst\nfrom kalpy.decoder.decode_graph import DecodeGraphCompiler\nfrom kalpy.fstext.lexicon import LexiconCompiler\nfrom sqlalchemy.orm import joinedload, subqueryload\n\nfrom montreal_forced_aligner import config\nfrom montreal_forced_aligner.abc import KaldiFunction\nfrom montreal_forced_aligner.data import MfaArguments, WordType\nfrom montreal_forced_aligner.db import Job, Phone, PhoneInterval, Speaker, Utterance, Word\nfrom montreal_forced_aligner.helper import mfa_open\nfrom montreal_forced_aligner.utils import thirdparty_binary\n\nif typing.TYPE_CHECKING:\n from dataclasses import dataclass\n\n from montreal_forced_aligner.abc import MetaDict\nelse:\n from dataclassy import dataclass\n\n\n@dataclass\nclass TrainSpeakerLmArguments(MfaArguments):\n \"\"\"\n Arguments for :class:`~montreal_forced_aligner.language_modeling.multiprocessing.TrainSpeakerLmFunction`\n\n Parameters\n ----------\n job_name: int\n Integer ID of the job\n db_string: str\n String for database connections\n log_path: :class:`~pathlib.Path`\n Path to save logging information during the run\n model_path: :class:`~pathlib.Path`\n Path to model\n order: int\n Ngram order of the language models\n method: str\n Ngram smoothing method\n target_num_ngrams: int\n Target number of ngrams\n hclg_options: dict[str, Any]\n HCLG creation options\n \"\"\"\n\n model_path: Path\n tree_path: Path\n lexicon_compilers: typing.Dict[int, LexiconCompiler]\n order: int\n method: str\n target_num_ngrams: int\n hclg_options: MetaDict\n\n\n@dataclass\nclass TrainLmArguments(MfaArguments):\n \"\"\"\n Arguments for :class:`~montreal_forced_aligner.language_modeling.multiprocessing.TrainSpeakerLmFunction`\n\n Parameters\n ----------\n job_name: int\n Integer ID of the job\n db_string: str\n String for database connections\n log_path: :class:`~pathlib.Path`\n Path to save logging information during the run\n symbols_path: :class:`~pathlib.Path`\n Words symbol table paths\n oov_word: str\n OOV word\n order: int\n Ngram order of the language models\n \"\"\"\n\n working_directory: Path\n symbols_path: Path\n order: int\n oov_word: str\n\n\nclass TrainLmFunction(KaldiFunction):\n \"\"\"\n Multiprocessing function to training small language models for each speaker\n\n See Also\n --------\n :openfst_src:`farcompilestrings`\n Relevant OpenFst binary\n :ngram_src:`ngramcount`\n Relevant OpenGrm-Ngram binary\n :ngram_src:`ngrammake`\n Relevant OpenGrm-Ngram binary\n :ngram_src:`ngramshrink`\n Relevant OpenGrm-Ngram binary\n\n Parameters\n ----------\n args: :class:`~montreal_forced_aligner.language_modeling.multiprocessing.TrainSpeakerLmArguments`\n Arguments for the function\n \"\"\"\n\n def __init__(self, args: TrainLmArguments):\n super().__init__(args)\n self.working_directory = args.working_directory\n self.symbols_path = args.symbols_path\n self.order = args.order\n self.oov_word = args.oov_word\n\n def _run(self) -> typing.Generator[bool]:\n \"\"\"Run the function\"\"\"\n with self.session() as session, mfa_open(self.log_path, \"w\") as log_file:\n word_query = session.query(Word.word).filter(\n Word.word_type.in_(WordType.speech_types())\n )\n included_words = set(x[0] for x in word_query)\n utterance_query = session.query(Utterance.normalized_text, Utterance.text).filter(\n Utterance.job_id == self.job_name\n )\n\n farcompile_proc = subprocess.Popen(\n [\n thirdparty_binary(\"farcompilestrings\"),\n \"--token_type=symbol\",\n \"--generate_keys=16\",\n \"--keep_symbols\",\n f\"--symbols={self.symbols_path}\",\n ],\n stderr=log_file,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n env=os.environ,\n )\n ngramcount_proc = subprocess.Popen(\n [\n thirdparty_binary(\"ngramcount\"),\n \"--round_to_int\",\n f\"--order={self.order}\",\n \"-\",\n self.working_directory.joinpath(f\"{self.job_name}.cnts\"),\n ],\n stderr=log_file,\n stdin=farcompile_proc.stdout,\n env=os.environ,\n )\n for (normalized_text, text) in utterance_query:\n if not normalized_text:\n normalized_text = text\n text = \" \".join(\n x if x in included_words else self.oov_word for x in normalized_text.split()\n )\n farcompile_proc.stdin.write(f\"{text}\\n\".encode(\"utf8\"))\n farcompile_proc.stdin.flush()\n self.callback(1)\n farcompile_proc.stdin.close()\n self.check_call(ngramcount_proc)\n\n\nclass TrainPhoneLmFunction(KaldiFunction):\n \"\"\"\n Multiprocessing function to training small language models for each speaker\n\n See Also\n --------\n :openfst_src:`farcompilestrings`\n Relevant OpenFst binary\n :ngram_src:`ngramcount`\n Relevant OpenGrm-Ngram binary\n :ngram_src:`ngrammake`\n Relevant OpenGrm-Ngram binary\n :ngram_src:`ngramshrink`\n Relevant OpenGrm-Ngram binary\n\n Parameters\n ----------\n args: :class:`~montreal_forced_aligner.language_modeling.multiprocessing.TrainSpeakerLmArguments`\n Arguments for the function\n \"\"\"\n\n def __init__(self, args: TrainLmArguments):\n super().__init__(args)\n self.working_directory = args.working_directory\n self.symbols_path = args.symbols_path\n self.order = args.order\n\n def _run(self) -> typing.Generator[bool]:\n \"\"\"Run the function\"\"\"\n with self.session() as session, mfa_open(self.log_path, \"w\") as log_file:\n if config.USE_POSTGRES:\n string_agg_function = sqlalchemy.func.string_agg\n else:\n string_agg_function = sqlalchemy.func.group_concat\n pronunciation_query = (\n sqlalchemy.select(Utterance.id, string_agg_function(Phone.kaldi_label, \" \"))\n .select_from(Utterance)\n .join(Utterance.phone_intervals)\n .join(PhoneInterval.phone)\n .where(Utterance.job_id == self.job_name)\n .group_by(Utterance.id)\n )\n farcompile_proc = subprocess.Popen(\n [\n thirdparty_binary(\"farcompilestrings\"),\n \"--token_type=symbol\",\n \"--generate_keys=16\",\n f\"--symbols={self.symbols_path}\",\n ],\n stderr=log_file,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n env=os.environ,\n )\n ngramcount_proc = subprocess.Popen(\n [\n thirdparty_binary(\"ngramcount\"),\n \"--require_symbols=false\",\n \"--round_to_int\",\n f\"--order={self.order}\",\n \"-\",\n self.working_directory.joinpath(f\"{self.job_name}.cnts\"),\n ],\n stderr=log_file,\n stdin=farcompile_proc.stdout,\n env=os.environ,\n )\n for utt_id, phones in session.execute(pronunciation_query):\n farcompile_proc.stdin.write(f\"{phones}\\n\".encode(\"utf8\"))\n farcompile_proc.stdin.flush()\n self.callback((utt_id, phones))\n farcompile_proc.stdin.close()\n self.check_call(ngramcount_proc)\n\n\nclass TrainSpeakerLmFunction(KaldiFunction):\n \"\"\"\n Multiprocessing function to training small language models for each speaker\n\n See Also\n --------\n :openfst_src:`farcompilestrings`\n Relevant OpenFst binary\n :ngram_src:`ngramcount`\n Relevant OpenGrm-Ngram binary\n :ngram_src:`ngrammake`\n Relevant OpenGrm-Ngram binary\n :ngram_src:`ngramshrink`\n Relevant OpenGrm-Ngram binary\n\n Parameters\n ----------\n args: :class:`~montreal_forced_aligner.language_modeling.multiprocessing.TrainSpeakerLmArguments`\n Arguments for the function\n \"\"\"\n\n def __init__(self, args: TrainSpeakerLmArguments):\n super().__init__(args)\n self.model_path = args.model_path\n self.tree_path = args.tree_path\n self.lexicon_compilers = args.lexicon_compilers\n self.order = args.order\n self.method = args.method\n self.target_num_ngrams = args.target_num_ngrams\n self.hclg_options = args.hclg_options\n\n def _run(self) -> typing.Generator[bool]:\n \"\"\"Run the function\"\"\"\n with self.session() as session, mfa_open(self.log_path, \"w\") as log_file:\n\n job: Job = (\n session.query(Job)\n .options(joinedload(Job.corpus, innerjoin=True), subqueryload(Job.dictionaries))\n .filter(Job.id == self.job_name)\n .first()\n )\n\n for d in job.dictionaries:\n dict_id = d.id\n word_symbols_path = d.words_symbol_path\n speakers = (\n session.query(Speaker.id)\n .join(Utterance.speaker)\n .filter(Utterance.job_id == job.id)\n .filter(Speaker.dictionary_id == dict_id)\n .distinct()\n )\n for (speaker_id,) in speakers:\n print(speaker_id)\n hclg_path = d.temp_directory.joinpath(f\"{speaker_id}.fst\")\n if os.path.exists(hclg_path):\n continue\n utterances = (\n session.query(Utterance.normalized_text)\n .filter(Utterance.speaker_id == speaker_id)\n .order_by(Utterance.kaldi_id)\n )\n mod_path = d.temp_directory.joinpath(f\"g.{speaker_id}.fst\")\n farcompile_proc = subprocess.Popen(\n [\n thirdparty_binary(\"farcompilestrings\"),\n \"--fst_type=compact\",\n f\"--unknown_symbol={d.oov_word}\",\n f\"--symbols={word_symbols_path}\",\n \"--keep_symbols\",\n \"--generate_keys=16\",\n ],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=log_file,\n env=os.environ,\n )\n count_proc = subprocess.Popen(\n [thirdparty_binary(\"ngramcount\"), f\"--order={self.order}\"],\n stdin=farcompile_proc.stdout,\n stdout=subprocess.PIPE,\n stderr=log_file,\n )\n make_proc = subprocess.Popen(\n [thirdparty_binary(\"ngrammake\"), \"--method=kneser_ney\"],\n stdin=count_proc.stdout,\n stdout=subprocess.PIPE,\n stderr=log_file,\n env=os.environ,\n )\n shrink_proc = subprocess.Popen(\n [\n thirdparty_binary(\"ngramshrink\"),\n \"--method=relative_entropy\",\n f\"--target_number_of_ngrams={self.target_num_ngrams}\",\n \"--shrink_opt=2\",\n \"--theta=0.001\",\n \"-\",\n mod_path,\n ],\n stdin=make_proc.stdout,\n stderr=log_file,\n env=os.environ,\n )\n for (text,) in utterances:\n farcompile_proc.stdin.write(f\"{text}\\n\".encode(\"utf8\"))\n farcompile_proc.stdin.flush()\n farcompile_proc.stdin.close()\n shrink_proc.wait()\n compiler = DecodeGraphCompiler(\n self.model_path,\n self.tree_path,\n self.lexicon_compilers[dict_id],\n **self.hclg_options,\n )\n compiler.g_fst = VectorFst.Read(str(mod_path))\n compiler.export_hclg(\"\", hclg_path)\n self.callback(os.path.exists(hclg_path))\n","repo_name":"MontrealCorpusTools/Montreal-Forced-Aligner","sub_path":"montreal_forced_aligner/language_modeling/multiprocessing.py","file_name":"multiprocessing.py","file_ext":"py","file_size_in_byte":13067,"program_lang":"python","lang":"en","doc_type":"code","stars":1099,"dataset":"github-code","pt":"81"} +{"seq_id":"30315774853","text":"#define \"Class Game\"\n#__inint__ --> initializer method (initializes \"atributes\")\n#Merhods are special functions defined in a class: METHODS REQUIRE self argument\n\n#Game has a pygame context\nimport pygame\n\nclass Game:\n def __init__(self, caption=\"my first game\", screen_width = 640 , screen_height = 400):\n print(\"initializing game atributes\")\n\n\n pygame.init()\n #create tge game Window(screen)\n pygame.display.set_caption(caption)\n self.screen = pygame.display.set_mode(screen_width, screen_height)\n self.keep_screen_open = True\n\n #this method has the \"game loop = while true\"\n def run(self):\n print(\"This is the game run method\")\n while self.keep_screen_open:\n print(\"The game is runing\")\n else:\n print(\"quit game because\",self.keep_screen_open)","repo_name":"FanolaAndreVillarroel/projectdinopy","sub_path":"demo_program/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32400546454","text":"import libs.spotipy as spotipy\nimport logging\nimport xml.etree.ElementTree as ET\nimport os.path\n\n\nclass StatifyCache:\n def __init__(self):\n self.spotipy = spotipy.Spotify()\n logging.basicConfig(filename=\"debug.log\", filemode='w', level=logging.DEBUG, format='%(asctime)s %(levelname)s > %(message)s', datefmt='%Y/%m/%d %H:%M:%S')\n \n try:\n self.tree = ET.parse(\"data/cache.xml\")\n except:\n logging.warning(\"Could not parse cache file... Recreating.\")\n data = ET.Element('root')\n songs = ET.SubElement(data, 'songs')\n artists = ET.SubElement(data, 'artists')\n albums = ET.SubElement(data, 'albums')\n ET.ElementTree(data).write(\"data/cache.xml\")\n self.tree = ET.parse(\"data/cache.xml\")\n \n \n self.root = self.tree.getroot()\n self.allSongs = self.root.find('songs')\n self.allArtists = self.root.find('artists')\n self.allAlbums = self.root.find('albums')\n \n def add(self, id, type):\n \"\"\"Add a song to the cache file.\n id is type spotify ID.\n type should be song, artist or album.\n \"\"\"\n if id == None or self.existsID(id, type):\n return None\n if(type == \"song\"):\n return self._addSong(id)\n if(type == \"artist\"):\n return self._addArtist(id)\n if(type == \"album\"):\n return self._addAlbum(id)\n \n def _addSong(self, id):\n try:\n result = self.spotipy.track(id)\n except:\n logging.error(\"Could not connect to Spotify API\")\n return \n name = result['name']\n songurl = result['external_urls']['spotify']\n artist = result['artists'][0]['name']\n artistid = result['artists'][0]['id']\n album = result['album']['name']\n albumid = result['album']['id']\n explicit = result['explicit']\n discno = result['disc_number']\n trackno = result['track_number']\n duration = result['duration_ms']\n return Song(self.tree, id, name, songurl, artist, artistid, album, albumid, explicit, discno, trackno, duration)\n \n def _addArtist(self, id):\n try:\n result = self.spotipy.artist(id)\n except:\n logging.error(\"Could not connect to Spotify API\")\n return \n name = result['name']\n artisturl = result['external_urls']['spotify']\n imageurl = result['images'][0]['url']\n return Artist(self.tree, id, name, artisturl, imageurl)\n \n def _addAlbum(self, id):\n try:\n result = self.spotipy.album(id)\n except:\n logging.error(\"Could not connect to Spotify API\")\n return \n name = result['name']\n artist = result['artists'][0]['name']\n artistid = result['artists'][0]['id']\n albumurl = result['external_urls']['spotify']\n imageurl = result['images'][0]['url']\n tracks = []\n duration = 0\n for track in result['tracks']['items']:\n tracks.append(track['name'])\n duration += track['duration_ms']\n totaltracks = result['tracks']['total']\n return Album(self.tree, id, name, artist, artistid, albumurl, imageurl, tracks, totaltracks, duration)\n \n def existsID(self, id, type):\n \"\"\"Checks an id to see if it exists in the cache file\n type should be song, artist or album or any.\n \"\"\"\n found = False\n if type == \"song\" or type == \"any\":\n for track in self.allSongs.findall('song'):\n if track.find('id').text == id:\n found = True \n if type == \"artist\" or type == \"any\":\n for artist in self.allArtists.findall('artist'):\n if artist.find('id').text == id:\n found = True\n if type == \"album\" or type == \"any\":\n for album in self.allAlbums.findall('album'):\n if album.find('id').text == id:\n found = True\n return found\n \n def existsSong(self, song, artist):\n \"\"\"Checks a song and artist name to see if it exists in the cache file\n \"\"\"\n found = False\n for track in self.allSongs.findall('song'):\n if track.find('name').text == song and track.find('artist').text == artist:\n found = True \n return found\n \n def getID(self, id):\n \"\"\"Return the Song, Artist or Album object based on id, if it exists\n \"\"\"\n if self.existsID(id, \"any\"):\n for track in self.allSongs.findall('song'):\n if track.find('id').text == id:\n return Song.read(track)\n for artist in self.allArtists.findall('artist'):\n if artist.find('id').text == id:\n return Artist.read(artist)\n for album in self.allAlbums.findall('album'):\n if album.find('id').text == id:\n return Album.read(album)\n else:\n return None\n \n def getName(self, name, type):\n \"\"\"Return the Song, Artist or Album object based on name, if it exists\n type is either song, artist or album.\n \"\"\"\n if type == \"song\":\n for track in self.allSongs.findall('song'):\n if track.find('name').text == name:\n return Song.read(track)\n if type == \"artist\":\n for artist in self.allArtists.findall('artist'):\n if artist.find('name').text == name:\n return Artist.read(artist)\n if type == \"album\":\n for album in self.allAlbums.findall('album'):\n if album.find('name').text == name:\n return Album.read(album)\n return None\n \n def getSong(self, name, artist):\n \"\"\"Similar to getName but only for songs, with the artist field included.\n Will search and create a song if not \n Returns Song object\n \"\"\"\n for track in self.allSongs.findall('song'):\n if track.find('name').text == name and track.find('artist').text == artist:\n return Song.read(track)\n # Song not cached. Possibly un-cacheable. Try again or log error.\n uncacheable = open(\"data/uncacheable.txt\",'r')\n uncached = uncacheable.read().splitlines()\n uncacheable.close()\n if (name + \" - \" + artist) in uncached:\n return Song(None, None, name, None, artist, None, None, None, None, None, None, None)\n else: \n id = self.search(name, artist)\n if id != None:\n song = self.add(id, \"song\")\n if song != None:\n return song\n logging.warning(\"Uncacheable song: \" + name + \" - \" + artist)\n file = open(\"data/uncacheable.txt\",'a')\n file.write(name + \" - \" + artist + \"\\n\")\n file.close()\n return Song(None, None, name, None, artist, None, None, None, None, None, None, None)\n \n def search(self, song, artist):\n \"\"\"Sends a search string to Spotify's servers to return an ID\n Artist must not be None, song can be None\n \"\"\"\n if song != None and artist != None:\n try:\n result = self.spotipy.search(q=\"artist:\" + artist + \" track:\" + song, limit=1, type='track')\n except:\n logging.error(\"Could not connect to Spotify API\")\n return \n if len(result['tracks']['items']) > 0:\n return result['tracks']['items'][0]['id']\n else:\n return None\n elif song == None and artist != None:\n try:\n result = self.spotipy.search(q=\"artist:\" + artist, limit=1, type='artist')\n except:\n logging.error(\"Could not connect to Spotify API\")\n return \n if len(result['artists']['items']) > 0:\n return result['artists']['items'][0]['id']\n else:\n return None\n \nclass Song: \n def __init__(self, tree, id, name, songurl, artist, artistid, album, albumid, explicit, discno, trackno, duration):\n self.id = id\n self.name = name\n self.songurl = songurl\n self.artist = artist\n self.artistid = artistid\n self.album = album\n self.albumid = albumid\n self.explicit = explicit\n self.discno = discno\n self.trackno = trackno\n self.duration = duration\n \n if tree != None:\n self.tree = tree\n self.write()\n #print (\"Song:\", id, name, songurl, artist, artistid, album, albumid, explicit, discno, trackno, duration)\n \n def __str__(self):\n return str(self.id) + \" (\" + self.name + \" - \" + self.artist + \")\"\n def __repr__(self):\n return self.__str__()\n \n def write(self):\n \"\"\"Write this object to the cache file as XML\n \"\"\"\n new = ET.Element('song')\n \n newid = ET.SubElement(new, 'id')\n newname = ET.SubElement(new, 'name')\n newsongurl = ET.SubElement(new, 'songurl')\n newartist = ET.SubElement(new, 'artist')\n newartistid = ET.SubElement(new, 'artistid')\n newalbum = ET.SubElement(new, 'album')\n newalbumid = ET.SubElement(new, 'albumid')\n newexplicit = ET.SubElement(new, 'explicit')\n newdiscno = ET.SubElement(new, 'discno')\n newtrackno = ET.SubElement(new, 'trackno')\n newduration = ET.SubElement(new, 'duration')\n \n \n newid.text = self.id \n newname.text = self.name \n newsongurl.text = self.songurl \n newartist.text = self.artist \n newartistid.text = self.artistid \n newalbum.text = self.album\n newalbumid.text = self.albumid\n newexplicit.text = str(self.explicit)\n newdiscno.text = str(self.discno)\n newtrackno.text = str(self.trackno)\n newduration.text = str(self.duration)\n \n self.tree.getroot()[0].append(new)\n self.tree.write(\"data/cache.xml\")\n \n def read(element):\n \"\"\"Read this object from an Element object\n \"\"\"\n id = element.find('id').text\n name = element.find('name').text\n songurl = element.find('songurl').text\n artist = element.find('artist').text\n artistid = element.find('artistid').text\n album = element.find('album').text\n albumid = element.find('albumid').text\n explicit = element.find('explicit').text\n discno = element.find('discno').text\n trackno = element.find('trackno').text\n duration = element.find('duration').text\n return Song(None, id, name, songurl, artist, artistid, album, albumid, explicit, discno, trackno, duration)\n \nclass Artist: \n def __init__(self, tree, id, name, artisturl, imageurl):\n self.id = id\n self.name = name\n self.artisturl = artisturl\n self.imageurl = imageurl\n \n if tree != None:\n self.tree = tree\n self.write()\n #print (\"Artist:\", id, name, artisturl, imageurl)\n \n def __str__(self):\n return self.id + \" (\" + self.name + \")\"\n def __repr__(self):\n return self.__str__()\n \n def write(self):\n \"\"\"Write this object to the cache file as XML\n \"\"\"\n new = ET.Element('artist')\n \n newid = ET.SubElement(new, 'id')\n newname = ET.SubElement(new, 'name')\n newartisturl = ET.SubElement(new, 'artisturl')\n newimageurl = ET.SubElement(new, 'imageurl')\n \n newid.text = self.id\n newname.text = self.name\n newartisturl.text = self.artisturl\n newimageurl.text = self.imageurl\n \n self.tree.getroot()[1].append(new)\n self.tree.write(\"data/cache.xml\")\n \n def read(element):\n \"\"\"Read this object from an Element object\n \"\"\"\n id = element.find('id').text\n name = element.find('name').text\n artisturl = element.find('artisturl').text\n imageurl = element.find('imageurl').text\n return Artist(None, id, name, artisturl, imageurl)\n \nclass Album: \n def __init__(self, tree, id, name, artist, artistid, albumurl, imageurl, tracks, totaltracks, duration):\n self.id = id\n self.name = name\n self.artist = artist\n self.artistid = artistid\n self.albumurl = albumurl\n self.imageurl = imageurl\n self.tracks = tracks\n self.totaltracks = totaltracks\n self.duration = duration\n \n if tree != None:\n self.tree = tree\n self.write()\n #print (\"Album:\", id, name, artist, artistid, albumurl, imageurl, tracks, totaltracks, duration)\n \n def __str__(self):\n return self.id + \" (\" + self.name + \" - \" + self.artist + \")\"\n def __repr__(self):\n return self.__str__()\n \n def write(self):\n \"\"\"Write this object to the cache file as XML\n \"\"\"\n new = ET.Element('album')\n \n newid = ET.SubElement(new, 'id')\n newname = ET.SubElement(new, 'name')\n newartist = ET.SubElement(new, 'artist')\n newartistid = ET.SubElement(new, 'artistid')\n newalbumurl = ET.SubElement(new, 'albumurl')\n newimageurl = ET.SubElement(new, 'imageurl')\n newtracks = ET.SubElement(new, 'tracks')\n newtotaltracks = ET.SubElement(new, 'totaltracks')\n newduration = ET.SubElement(new, 'duration')\n \n \n newid.text = self.id \n newname.text = self.name \n newartist.text = self.artist \n newartistid.text = self.artistid \n newalbumurl.text = self.albumurl\n newimageurl.text = self.imageurl\n newtracks.text = str(self.tracks)\n newtotaltracks.text = str(self.totaltracks)\n newduration.text = str(self.duration)\n \n self.tree.getroot()[2].append(new)\n self.tree.write(\"data/cache.xml\")\n\n def read(element):\n \"\"\"Read this object from an element object\n \"\"\"\n id = element.find('id').text\n name = element.find('name').text\n artist = element.find('artist').text\n artistid = element.find('artistid').text\n albumurl = element.find('albumurl').text\n imageurl = element.find('imageurl').text\n tracks = element.find('tracks').text\n totaltracks = element.find('totaltracks').text\n duration = element.find('duration').text\n return Album(None, id, name, artist, artistid, albumurl, imageurl, tracks, totaltracks, duration)\n \ndef test():\n \"\"\"Used to test cache changes.\n \"\"\"\n pass\n #sc = StatifyCache()\n #print(sc.existsID(\"2ELcuwXrtMA8ect9cGTYnQ\", \"song\"))\n #print(sc.existsID(\"2Dr744zaEbNqmW9jxw4gfq\", \"any\"))\n #print(sc.existsID(\"2Dr744zaEbNqsjxw4gfq\", \"any\"))\n #print(sc.get(\"2PqYzY7wdgq0ydlC3nR4ei\").find('name').text)\n #result = sc.add(sc.search(\"Lunar Liftoff\", \"Adam Young\"), \"song\")\n #result = sc.get(sc.search(\"Lunar Liftoff\", \"Adam Young\"))\n #result2 = sc.add(result.find('albumid').text, \"album\")\n #print(result2)\n #print(sc.existsSong(\"Lunar Liftoff\", \"Adam Young\"))\n #print(sc.existsID(\"3dRfiJ2650SZu6GbydcHNb\", \"artist\"))\n #print(sc.getName(\"Adam Young\",\"artist\").artisturl)\n \n \n \ntest()","repo_name":"beng92/Statify","sub_path":"src/StatifyCache.py","file_name":"StatifyCache.py","file_ext":"py","file_size_in_byte":15847,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"21357706409","text":"\"\"\"PPI Dataset.\n\nA third-party implementation of citation graph dataset from dgl.4.0.1 post. This\nfile is needed to retain compatibility.arning\n.\n\"\"\"\nimport json\nimport numpy as np\nimport networkx as nx\nfrom networkx.readwrite import json_graph\n\nfrom dgl.data.utils import download, extract_archive, get_download_dir, _get_dgl_url\nfrom dgl import DGLGraph\nfrom functools import wraps\n\n_url = 'dataset/ppi.zip'\n\n\ndef retry_method_with_fix(fix_method):\n \"\"\"Decorator that executes a fix method before retrying again when the decorated method\n\n fails once with any exception.\n\n If the decorated method fails again, the execution fails with that\n exception.\n\n Notes\n -----\n This decorator only works on class methods, and the fix function must also\n be a class method.\n It would not work on functions.\n\n Parameters\n ----------\n fix_method : TYPE\n Description\n\n Deleted Parameters\n ------------------\n fix_func : callable\n The fix method to execute. It should not accept any arguments. Its\n return values are\n ignored.\n\n Returns\n -------\n TYPE\n Description\n \"\"\"\n\n def _creator(func):\n \"\"\"Summary\n\n Parameters\n ----------\n func : TYPE\n Description\n\n Returns\n -------\n TYPE\n Description\n \"\"\"\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n \"\"\"Summary\n\n Parameters\n ----------\n *args\n Description\n **kwargs\n Description\n\n Returns\n -------\n TYPE\n Description\n \"\"\"\n # pylint: disable=W0703,bare-except\n try:\n return func(self, *args, **kwargs)\n except:\n fix_method(self)\n return func(self, *args, **kwargs)\n\n return wrapper\n\n return _creator\n\n\nclass PPIDataset(object):\n \"\"\"A toy Protein-Protein Interaction network dataset.\n\n Adapted from\n https://github.com/williamleif/GraphSAGE/tree/master/example_data.\n\n The dataset contains 24 graphs. The average number of nodes per graph\n is 2372. Each node has 50 features and 121 labels.\n\n We use 20 graphs for training, 2 for validation and 2 for testing.\n\n Attributes\n ----------\n features : TYPE\n Description\n graph : TYPE\n Description\n graph_id : TYPE\n Description\n labels : TYPE\n Description\n mode : TYPE\n Description\n test_graphs : list\n Description\n test_labels : list\n Description\n test_mask_list : list\n Description\n train_graphs : list\n Description\n train_labels : list\n Description\n train_mask_list : list\n Description\n valid_graphs : list\n Description\n valid_labels : list\n Description\n valid_mask_list : list\n Description\n \"\"\"\n\n def __init__(self, mode):\n \"\"\"Initialize the dataset.\n\n Paramters\n ---------\n mode : str\n ('train', 'valid', 'test').\n\n Parameters\n ----------\n mode : TYPE\n Description\n \"\"\"\n assert mode in ['train', 'valid', 'test']\n self.mode = mode\n self._name = 'ppi'\n self._dir = get_download_dir()\n self._zip_file_path = '{}/{}.zip'.format(self._dir, self._name)\n self._load()\n self._preprocess()\n\n def _download(self):\n \"\"\"Summary\"\"\"\n download(_get_dgl_url(_url), path=self._zip_file_path)\n extract_archive(self._zip_file_path, '{}/{}'.format(self._dir, self._name))\n\n @retry_method_with_fix(_download)\n def _load(self):\n \"\"\"Loads input data.\n\n train/test/valid_graph.json => the graph data used for training,\n test and validation as json format;\n train/test/valid_feats.npy => the feature vectors of nodes as\n numpy.ndarry object, it's shape is [n, v],\n n is the number of nodes, v is the feature's dimension;\n train/test/valid_labels.npy=> the labels of the input nodes, it\n is a numpy ndarry, it's like[[0, 0, 1, ... 0],\n [0, 1, 1, 0 ...1]], shape of it is n*h, n is the number of nodes,\n h is the label's dimension;\n train/test/valid/_graph_id.npy => the element in it indicates which\n graph the nodes belong to, it is a one dimensional numpy.ndarray\n object and the length of it is equal the number of nodes,\n it's like [1, 1, 2, 1...20].\n \"\"\"\n print('Loading G...')\n if self.mode == 'train':\n with open('{}/ppi/train_graph.json'.format(self._dir)) as jsonfile:\n g_data = json.load(jsonfile)\n self.labels = np.load('{}/ppi/train_labels.npy'.format(self._dir))\n self.features = np.load('{}/ppi/train_feats.npy'.format(self._dir))\n self.graph = DGLGraph(nx.DiGraph(json_graph.node_link_graph(g_data)))\n self.graph_id = np.load('{}/ppi/train_graph_id.npy'.format(self._dir))\n if self.mode == 'valid':\n with open('{}/ppi/valid_graph.json'.format(self._dir)) as jsonfile:\n g_data = json.load(jsonfile)\n self.labels = np.load('{}/ppi/valid_labels.npy'.format(self._dir))\n self.features = np.load('{}/ppi/valid_feats.npy'.format(self._dir))\n self.graph = DGLGraph(nx.DiGraph(json_graph.node_link_graph(g_data)))\n self.graph_id = np.load('{}/ppi/valid_graph_id.npy'.format(self._dir))\n if self.mode == 'test':\n with open('{}/ppi/test_graph.json'.format(self._dir)) as jsonfile:\n g_data = json.load(jsonfile)\n self.labels = np.load('{}/ppi/test_labels.npy'.format(self._dir))\n self.features = np.load('{}/ppi/test_feats.npy'.format(self._dir))\n self.graph = DGLGraph(nx.DiGraph(json_graph.node_link_graph(g_data)))\n self.graph_id = np.load('{}/ppi/test_graph_id.npy'.format(self._dir))\n\n def _preprocess(self):\n \"\"\"Summary\"\"\"\n if self.mode == 'train':\n self.train_mask_list = []\n self.train_graphs = []\n self.train_labels = []\n for train_graph_id in range(1, 21):\n train_graph_mask = np.where(self.graph_id == train_graph_id)[0]\n self.train_mask_list.append(train_graph_mask)\n self.train_graphs.append(self.graph.subgraph(train_graph_mask))\n self.train_labels.append(self.labels[train_graph_mask])\n if self.mode == 'valid':\n self.valid_mask_list = []\n self.valid_graphs = []\n self.valid_labels = []\n for valid_graph_id in range(21, 23):\n valid_graph_mask = np.where(self.graph_id == valid_graph_id)[0]\n self.valid_mask_list.append(valid_graph_mask)\n self.valid_graphs.append(self.graph.subgraph(valid_graph_mask))\n self.valid_labels.append(self.labels[valid_graph_mask])\n if self.mode == 'test':\n self.test_mask_list = []\n self.test_graphs = []\n self.test_labels = []\n for test_graph_id in range(23, 25):\n test_graph_mask = np.where(self.graph_id == test_graph_id)[0]\n self.test_mask_list.append(test_graph_mask)\n self.test_graphs.append(self.graph.subgraph(test_graph_mask))\n self.test_labels.append(self.labels[test_graph_mask])\n\n def __len__(self):\n \"\"\"Return number of samples in this dataset.\n\n Returns\n -------\n TYPE\n Description\n \"\"\"\n if self.mode == 'train':\n return len(self.train_mask_list)\n if self.mode == 'valid':\n return len(self.valid_mask_list)\n if self.mode == 'test':\n return len(self.test_mask_list)\n\n def __getitem__(self, item):\n \"\"\"Get the i^th sample.\n\n Paramters\n ---------\n idx : int\n The sample index.\n\n Returns\n -------\n (dgl.DGLGraph, ndarray)\n The graph, and its label.\n\n Parameters\n ----------\n item : TYPE\n Description\n \"\"\"\n if self.mode == 'train':\n g = self.train_graphs[item]\n g.ndata['feat'] = self.features[self.train_mask_list[item]]\n label = self.train_labels[item]\n elif self.mode == 'valid':\n g = self.valid_graphs[item]\n g.ndata['feat'] = self.features[self.valid_mask_list[item]]\n label = self.valid_labels[item]\n elif self.mode == 'test':\n g = self.test_graphs[item]\n g.ndata['feat'] = self.features[self.test_mask_list[item]]\n label = self.test_labels[item]\n return g, label\n\n\nclass LegacyPPIDataset(PPIDataset):\n \"\"\"Legacy version of PPI Dataset\"\"\"\n\n def __getitem__(self, item):\n \"\"\"Get the i^th sample.\n\n Paramters\n ---------\n idx : int\n The sample index.\n\n Returns\n -------\n (dgl.DGLGraph, ndarray, ndarray)\n The graph, features and its label.\n\n Parameters\n ----------\n item : TYPE\n Description\n \"\"\"\n if self.mode == 'train':\n return self.train_graphs[item], self.features[\n self.train_mask_list[item]], self.train_labels[item]\n if self.mode == 'valid':\n return self.valid_graphs[item], self.features[\n self.valid_mask_list[item]], self.valid_labels[item]\n if self.mode == 'test':\n return self.test_graphs[item], self.features[\n self.test_mask_list[item]], self.test_labels[item]\n","repo_name":"googleinterns/sparsegraphssl","sub_path":"data_reader/legacy_ppi.py","file_name":"legacy_ppi.py","file_ext":"py","file_size_in_byte":8757,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"10912831925","text":"#! /usr/bin/env python3\n\nimport numpy as np\nimport argparse\nimport os\n\nparser = argparse.ArgumentParser(description=\"list npy file shape and other attributes\")\nparser.add_argument(\"npys\", type=str, nargs='*', help='paths to npy file, leave empty will list all npy file in the directory')\n\nargs = parser.parse_args()\n\nformat_str = \"\"\"\n=========================================\nfilename: %s\n-----------------------------------------\nshape: %s\ndtype: %s\nmax: %f\nmin: %f\nmean: %f\nvar: %f\n=========================================\n\"\"\"\n\nnpys = args.npys\nif npys == []:\n npys = [i for i in os.listdir('.') if i.endswith('.npy')]\nfor npy in npys:\n array = np.load(npy)\n shape = array.shape\n dtype = array.dtype\n arr_max = np.max(array)\n arr_min = np.min(array)\n arr_mean = np.average(array)\n arr_var = np.var(array)\n print(format_str % (npy, shape, dtype, arr_max, arr_min, arr_mean, arr_var))\n\n\n\n","repo_name":"bravomikekilo/BMKLittleToolBox","sub_path":"lnpy.py","file_name":"lnpy.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"42551966238","text":"#!/usr/bin/env python3\nimport fileinput\nimport inspect\nimport os\nimport re\nimport subprocess\nfrom datetime import datetime\nfrom multiprocessing import Process\nfrom subprocess import PIPE\n\nenv = dict(os.environ)\n\n\ndef _print_line_number(number_of_outer_frame=1):\n cf = inspect.currentframe()\n frame = cf\n for ii in range(number_of_outer_frame):\n frame = frame.f_back\n\n timestamp = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')\n print('\\n'.join(['#' * 40, '[%s] LINE NUMBER: %d' % (timestamp, frame.f_lineno), '#' * 40]))\n\n\ndef _run(cmd, file_path_name=None, cwd=None, file_mode='a', run_env=env):\n def _f():\n if not file_path_name:\n _p = subprocess.Popen(cmd, cwd=cwd, env=run_env)\n _p.communicate()\n if _p.returncode != 0:\n raise Exception()\n else:\n with open(file_path_name, file_mode) as ff:\n _p = subprocess.Popen(cmd, stdout=ff, cwd=cwd, env=run_env)\n _p.communicate()\n if _p.returncode != 0:\n raise Exception()\n\n _print_line_number(2)\n cmd_string = ' '.join(cmd)\n timestamp = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')\n print('\\n'.join(['#' * 40, '[%s] COMMAND: %s' % (timestamp, cmd_string), '#' * 40]))\n\n pp = Process(target=_f)\n pp.start()\n pp.join()\n if pp.exitcode != 0:\n raise Exception()\n\n\ndef _file_line_replace(file_path_name, str_old, str_new, backup='.bak'):\n with fileinput.FileInput(file_path_name, inplace=True, backup=backup) as ff:\n for line in ff:\n new_line = re.sub(str_old, str_new, line)\n print(new_line, end='')\n\n\ndef main():\n provisioning_path = '/vagrant'\n hostname = 'dv-sixshop-my-local-1a-012345'\n _file_line_replace('/etc/sysconfig/network', '^HOSTNAME=localhost.localdomain$', 'HOSTNAME=%s' % hostname)\n with open('/etc/hosts', 'a') as ff:\n ff.write('127.0.0.1 %s\\n' % hostname)\n _run(['hostname', hostname])\n _run(['/etc/init.d/network', 'restart'])\n\n _print_line_number()\n\n _print_line_number()\n\n _run(['fallocate', '-l', '2G', '/swapfile'])\n _run(['chmod', '600', '/swapfile'])\n _run(['mkswap', '/swapfile'])\n _run(['swapon', '/swapfile'])\n with open('/etc/fstab', 'a') as ff:\n ff.write('/swapfile\tswap\tswap\tsw\t0\t0\\n')\n\n _print_line_number()\n\n with open('/etc/server_info', 'w') as ff:\n ff.write('AWS_EC2_INSTANCE_ID=i-01234567\\n')\n ff.write('AWS_EC2_AVAILABILITY_ZONE=my-local-1a\\n')\n\n _print_line_number()\n\n subprocess.Popen(['chpasswd'], stdin=PIPE).communicate(b'root:1234qwer')\n _file_line_replace('/etc/ssh/sshd_config', '^#PermitRootLogin yes$', 'PermitRootLogin yes')\n _file_line_replace('/etc/ssh/sshd_config', '^PasswordAuthentication no$', 'PasswordAuthentication yes')\n _run(['/sbin/service', 'sshd', 'restart'])\n\n _print_line_number()\n\n _run(['yum', '-y', 'update'])\n file_path_name = '%s/requirements_rpm.txt' % provisioning_path\n if os.path.exists(file_path_name):\n with open(file_path_name, 'r') as ff:\n lines = ff.readlines()\n for ll in lines:\n _run(['yum', '-y', 'install', ll.strip()])\n _run(['yum', '-y', 'erase', 'java-1.7.0-openjdk'])\n\n _print_line_number()\n\n _run(['yum', '-y', 'erase', 'ntp*'])\n _run(['yum', '-y', 'install', 'chrony'])\n _run(['/sbin/service', 'chronyd', 'start'])\n _run(['/sbin/chkconfig', 'chronyd', 'on'])\n\n _print_line_number()\n\n cmd_common = ['cp', '--backup']\n file_list = list()\n for ff in file_list:\n cmd = cmd_common + ['%s/configuration' % provisioning_path + ff, ff]\n _run(cmd)\n\n ff_source = '/etc/tomcat8/tomcat8_vagrant.conf'\n ff_target = '/etc/tomcat8/tomcat8.conf'\n cmd = cmd_common + ['%s/configuration' % provisioning_path + ff_source, ff_target]\n _run(cmd)\n\n _print_line_number()\n\n _run(['/sbin/chkconfig', 'tomcat8', 'on'])\n _run(['reboot'])\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"HardBoiledSmith/SpringMultidatabaseExample","sub_path":"_provisioning/provisioning.py","file_name":"provisioning.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"43362385146","text":"import cv2 as cv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# Enter the path of the picture to get 8 bitmaps\ndef get_bitmaps(image):\n bit_extraction = []\n bit_images = []\n for i in range(8):\n bit_extraction.append(np.ones(image.shape, dtype=np.uint8) * pow(2, i))\n for i in range(8):\n bit_images.append(cv.bitwise_and(image, bit_extraction[i]))\n return bit_images\n\n\ndef decomposite():\n lenna = cv.imread('../images/pele_sem_pelos.jpg', cv.IMREAD_COLOR)\n lenna = lenna[:, :, [2, 1, 0]]\n gray_lenna = cv.cvtColor(lenna, cv.COLOR_BGR2GRAY)\n gray_bit_images = get_bitmaps(gray_lenna)\n color_bit_images = get_bitmaps(lenna)\n\n plt.figure(figsize=(12, 12))\n plt.suptitle('gray image bitmap', fontsize=16)\n plt.subplot(3, 3, 1), plt.axis('off'), plt.title('melanoma'), plt.imshow(gray_lenna, cmap='gray')\n for i in range(8):\n plt.subplot(3, 3, i + 2), plt.axis('off'), plt.title('bit_img_' + str(i)), plt.imshow(gray_bit_images[i],\n cmap='gray')\n plt.show()\n\n plt.figure(figsize=(12, 12))\n plt.suptitle('color image bitmap', fontsize=16)\n plt.subplot(3, 3, 1), plt.axis('off'), plt.title('melanoma'), plt.imshow(lenna)\n for i in range(8):\n plt.subplot(3, 3, i + 2), plt.axis('off'), plt.title('bit_img_' + str(i)), plt.imshow(color_bit_images[i])\n plt.show()\n\n\nif __name__ == \"__main__\":\n decomposite()\n","repo_name":"jozecarlos/mestrado","sub_path":"PROCESSAMENTO_DE_IMAGEM/filters/bit_plane_decomposition.py","file_name":"bit_plane_decomposition.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15496147903","text":"import os; os.system('cls' if os.name == 'nt' else 'clear')\nimport random; import math\n\nclass Node:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\nclass Troubleshooter:\n def __init__(self, headValue):\n self.head = Node(headValue)\n self.length = 1\n\n def maxValue(self):\n temp = self.head\n highestValue = temp.value\n\n while temp:\n if(temp.value > highestValue):\n highestValue = temp.value\n temp = temp.right\n return highestValue\n\n def append(self, value):\n new_node = Node(value)\n temp = self.head; pre = temp\n\n while temp.right:\n pre = temp; temp = temp.right\n temp.right = new_node\n self.length += 1; return True\n\n def calculateAverage(self):\n listAverage = 0\n temp = self.head\n\n while temp:\n listAverage += temp.value\n temp = temp.right\n listAverage /= self.length\n return listAverage\n\n def calculateStandardDeviation(self):\n averageNumber = self.calculateAverage()\n temp = self.head; totalNumber = 0\n\n while temp:\n totalNumber += ((temp.value - averageNumber) ** 2)\n temp = temp.right\n totalNumber /= self.length\n totalNumber = round(math.sqrt(totalNumber), 2)\n return totalNumber\n\n def highestErrorCheck(self, binaryMaxValue):\n if(binaryMaxValue == self.maxValue()): return True\n else: return False\n\n def removeElement(self, value):\n if(self.length <= 0): return None\n temp = self.head; pre = temp\n\n while temp.value != value and temp.right != None:\n pre = temp; temp = temp.right\n pre.right = pre.right.right\n self.length -= 1; return True\n\n def print_elements(self):\n if(self.length <= 0): return None\n temp = self.head; counter = 0\n while temp: \n print(temp.value, end=' - ' if counter < 7 else '\\n')\n temp = temp.right; counter += 1\n if(counter == 10): counter = 0\n\nclass BinarySearchTree:\n def __init__(self):\n self.root = None\n self.troubleShooter = None\n\n def insert(self, value):\n new_node = Node(value)\n temp = self.root\n\n # new root logic\n if(self.root == None):\n self.troubleShooter = Troubleshooter(value)\n self.root = new_node; return True\n\n while True:\n # avoid error and replacement bug\n if(temp.value == new_node.value): return None\n \n # left node logic\n if(new_node.value < temp.value):\n if(temp.left == None):\n temp.left = new_node\n self.troubleShooter.append(new_node.value)\n return True\n temp = temp.left\n\n # right node logic\n else:\n if(temp.right == None):\n temp.right = new_node\n self.troubleShooter.append(new_node.value)\n return True\n temp = temp.right\n\n def contains(self, value):\n temp = self.root\n while temp:\n if(value < temp.value):\n temp = temp.left\n elif(value > temp.value):\n temp = temp.right\n else: return True\n # executed when it's equal \n return False\n\n def min_value_node(self, current_node):\n while current_node.left:\n current_node = current_node.left\n return current_node\n\n def max_value_node(self, current_node):\n while current_node.right:\n current_node = current_node.right\n return current_node\n\n def MaxValue(self):\n if(self.root == None): return None\n return self.max_value_node(self.root).value\n\n def MinValue(self):\n if(self.root == None): return None\n return self.min_value_node(self.root).value\n\n def AverageValue(self):\n if(self.root == None): return None\n return self.troubleShooter.calculateAverage()\n\n def StdDevValue(self):\n if(self.root == None): return None\n return self.troubleShooter.calculateStandardDeviation()\n\n def executeTroubleCheck(self):\n if(self.troubleShooter != None):\n if(self.troubleShooter.highestErrorCheck(self.max_value_node(self.root).value)): print('Note: No Error Detected!'); return True\n else: print(f'Note: Error Detected!, {self.troubleShooter.maxValue()} != {self.max_value_node(self.root).value}'); return False\n # make sure that our binary search tree is a good tree enough\n\n def reconstruct(self, value):\n if(self.troubleShooter.head == None or self.contains(value) != True): return None \n elif(self.root == None): return None\n temp = self.root; pre = temp; position = None\n self.troubleShooter.removeElement(value)\n\n while temp:\n if(temp.value == value): \n nodeStatuses = 'Element Has Been Found'\n nodeBreakAvailability = True; break\n \n if(value < temp.value):\n pre = temp; temp = temp.left\n position = 'left'\n\n elif(value > temp.value):\n pre = temp; temp = temp.right\n position = 'right'\n\n # case 1 - no child node\n if(temp.right == None and temp.left == None):\n if(position == 'left'): pre.left = None\n elif(position == 'right'): pre.right = None\n print('Case 1 Dijalankan')\n\n # case 2 - one child node\n elif(temp.right == None or temp.left == None):\n if(position == 'left' and pre.left.left != None): \n pre.left = pre.left.left\n print('Case 2 - Kondisi 1 Dijalankan')\n \n elif(position == 'left' and pre.left.right != None):\n pre.left = pre.left.right\n print('Case 2 - Kondisi 2 Dijalankan')\n \n elif(position == 'right' and pre.right.left != None):\n pre.right = pre.right.left\n print('Case 2 - Kondisi 3 Dijalankan')\n \n elif(position == 'right' and pre.right.right != None):\n pre.right = pre.right.right\n print('Case 2 - Kondisi 4 Dijalankan')\n else: pass\n\n # case 3 - two child node\n elif(temp.right != None and temp.left != None):\n print('Case 3 - Memulai Logika'); # starting grid\n def leftMax(): return self.max_value_node(temp.left)\n def rightMin(): return self.min_value_node(temp.right)\n \n listNextNode = [leftMax, rightMin]; nextNode = None\n nextNode = random.choice(listNextNode); nextNode = nextNode()\n \n self.reconstruct(nextNode.value)\n pivot = temp.value; temp.value = nextNode.value\n print(f'Case 3 - Selesai dan Menghapus {pivot}')\n\n # synchronization with the troubleshooter\n self.troubleShooter.removeElement(value)\n \n def showAllDataFromTroubleShooter(self):\n if(self.troubleShooter != None):\n self.troubleShooter.print_elements()\n # logic by davis_arrizqi\n \n\nif __name__ == \"__main__\":\n myTree = BinarySearchTree()\n print('===== Final Result =====')\n \n # declaration and data insertion\n listNumber = [12, 5, 3, 1, 7, 9, 8, 11, 15, 13, 14, 17, 20, 18]\n for data in listNumber: myTree.insert(data)\n \n # method for check the trouble\n myTree.executeTroubleCheck()\n\n # show max value\n maxValue = myTree.MaxValue()\n print(f'Max Value: {maxValue}')\n\n # show average value\n averageValue = myTree.AverageValue()\n print(f'Average Value: {averageValue}')\n\n # show standard deviation\n stdDeviationValue = myTree.StdDevValue()\n print(f'Standard Deviation: {stdDeviationValue}')\n print('========================')\n\n # reconstruct function\n print('\\n===== Reconstruct Debug Log =====')\n myTree.reconstruct(5); myTree.reconstruct(15)\n print('=================================')\n\n # show all data = debug\n myTree.showAllDataFromTroubleShooter()\n print('\\n')\n","repo_name":"davisarrizqi/rekap-semester-2","sub_path":"modul/Tugas 9.5/flytigera.py","file_name":"flytigera.py","file_ext":"py","file_size_in_byte":8281,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"16501137283","text":"import json\n\nemp1={\"name\":\"neelam\",\"Designation\":\"programer\",\"Age\":\"24\",\"Salary\":\"2400\"}\nemp2={\"name\":\"komal\",\"Designation\":\"trainer\",\"Age\":\"24\",\"salary\":\"20000\"}\nemp3={\"name\":\"anuradha\",\"Designation\":\"HR\",\"Age\":\"25\",\"salary\":\"40000\"}\nemp4={\"name\":\"Abhishek\",\"Designation\":\"manager\",\"Age\":\"29\",\"salary\":\"63000\"}\n\nd=[]\nd.append(emp1)\nd.append(emp2)\nd.append(emp3)\nd.append(emp4)\n\nwith open(\"file2_txt.json\",\"w\") as file:\n json.dump(d,file,indent=4)\n\n\n\n\n\n\n\n","repo_name":"jyotirmayeemohanta/Json","sub_path":"Question8.py","file_name":"Question8.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37431763835","text":"# iterates over each file in input directory\r\n# and moves to output_directory under folders by file type\r\nfrom os import system, listdir, path, mkdir, rename, getlogin, startfile\r\nimport ctypes, random, shutil\r\nfrom . import jenny\r\n\r\ndef sort_directory():\r\n input_default = 'C:/Users/' + getlogin() + '/Downloads'\r\n output_default = 'G:/Downloads'\r\n\r\n input_directory = jenny.ask(f'Input directory or press enter to use default {input_default}') or input_default\r\n output_directory = jenny.ask(f'Output directory (enter to use default {output_default}') or output_default\r\n\r\n if not path.exists(input_directory):\r\n jenny.say(f'Input directory not found @ {input_directory}')\r\n return\r\n\r\n if not path.exists(output_directory):\r\n jenny.say(f'Output directory not found @ {output_directory}')\r\n return\r\n\r\n mapping = {\r\n 'video': ['mp4', 'mpeg', 'wmv', 'mov', 'flv'],\r\n 'audio': ['wav', 'mp3'],\r\n 'images': ['jpg', 'jpeg', 'png', 'ico'],\r\n 'torrents': ['torrent'],\r\n 'subtitles': ['srt'],\r\n 'zips': ['zip'],\r\n 'software': ['exe', 'msi'],\r\n 'files': ['html', 'php', 'css', 'js', 'py', 'csv', 'json', 'pdf', 'docx', 'txt'],\r\n 'sql': ['sql']\r\n }\r\n\r\n for file in listdir(input_directory):\r\n full_file_path = input_directory + '/' + file\r\n extension = path.splitext(file)[1].replace('.', '')\r\n for folder, extensions in mapping.items():\r\n if extension in extensions:\r\n destination = output_directory + '/' + folder\r\n if not path.exists(destination):\r\n mkdir(destination)\r\n\r\n try:\r\n status = shutil.move(full_file_path, destination)\r\n if status:\r\n jenny.say(f'Moved {full_file_path} to {destination}')\r\n except Exception as e:\r\n print(f'____ FAILED TO MOVE {full_file_path} to {destination}')\r\n print(e)\r\n\r\n return jenny.say('Finished sorting..')\r\n\r\ndef play_random_music():\r\n input_default = 'C:/Users/' + getlogin() + '/Music/favs'\r\n input_directory = jenny.ask(f'Input directory or press enter to use default {input_default}') or input_default\r\n system(\"vlc.exe.lnk \" + input_directory + \" --qt-start-minimized\")\r\n\r\ndef set_random_wallpaper():\r\n default_path = 'C:/Users/' + getlogin() + '/Pictures/Wallpapers'\r\n path = jenny.ask(f'Input directory or press enter to use default {default_path}') or default_path\r\n image = default_path + '/' + random.choice(listdir(default_path))\r\n\r\n ctypes.windll.user32.SystemParametersInfoW(20, 0, image , 0)\r\n\r\ndef start_work_place():\r\n startfile('powershell')\r\n startfile('chrome')\r\n startfile('evernote')\r\n startfile('filezilla')\r\n startfile('D:\\Clients')\r\n\r\ndef goto(query=False):\r\n destination = query or jenny.ask('Where do you want to go')\r\n mapped_directories = jenny.config('directories')\r\n if destination in mapped_directories:\r\n fpath = mapped_directories[destination]\r\n fullPath = path.abspath(fpath)\r\n if path.exists(fullPath):\r\n startfile(fullPath)\r\n return\r\n\r\n jenny.output('Directory not found')","repo_name":"sakydev/jenny","sub_path":"linux/modules/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6844586217","text":"import util\r\nimport sys\r\nimport time\r\nimport datetime\r\n\r\n\r\n\r\n\r\nsearchList = {\"local flower delivery\", \r\n \"local flowers\", \r\n \"local flowers near me\",\r\n \"flowers same day delivery\", \r\n \"local flower shop\",\r\n \"local florist\",\r\n \"local florist shop\",\r\n \"local florist near me\",\r\n \"local flower delivery washington dc\",\r\n \"local flower delivery kensington md\",\r\n \"local flower delivery olney md\",\r\n\r\n } \r\n\r\n\r\n\r\nrankList = list()\r\n\r\nd = datetime.datetime.now().strftime('%m/%d/%Y')\r\n\r\n\r\nfor searchText in searchList: \r\n try:\r\n rank = util.googleSearch(searchText, \"localflowersonline\", 100)\r\n print(\"Rank: {0} : {1}\".format(rank,searchText))\r\n rankList.append(rank)\r\n time.sleep(30)\r\n except Exception as ex:\r\n print(\"Error: {0}\".format(ex))\r\n time.sleep(300)\r\n pass\r\n\r\nrank = sum(rankList)/len(rankList)\r\nprint(\"National Rank: {0} : {1}\".format(rank,\"Average\"))\r\nlog = \"echo {0},{1},{2},,{3},{4},{5},{6},{7} >> google-daily-national.csv\".format(d,rank,rank,rankList[0],rankList[1],rankList[2],rankList[3],rankList[4])\r\n\r\nprint(\"CMD: {0}\".format(log))\r\n\r\nutil.runCmd(log)\r\n\r\n\r\nprint(\"Rank: {0} : {1}\".format(sum(rankList)/len(rankList),\"Average\"))","repo_name":"tjohnson-github/python-projects","sub_path":"google-search-national.py","file_name":"google-search-national.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7683564845","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport matplotlib.cm as cm\nfrom matplotlib.colors import from_levels_and_colors\nfrom netCDF4 import Dataset, num2date\nfrom mpl_toolkits.basemap import Basemap\nfrom mpl_toolkits.basemap import shiftgrid\n\n#--------------------------------\n# Constants\n#--------------------------------\n\n# gravitational acceleration (m/s^2)\ng = 9.81\n# specific heat of seawater (J/kg/degC)\ncp = 3985.0\n# Von Karman constant\nkappa = 0.4\n# reference density of seawater (kg/m^3)\nrho_0 = 1027.0\n# reference salinity (psu)\nS_0 = 35.0\n# reference temperature (degC)\nT_0 = 10.0\n# constant thermal expansion coefficient (1/degC)\nalpha_0 = 1.65531e-4\n# constant saline contraction coefficient (1/psu)\nbeta_0 = 7.59494e-4\n\n#--------------------------------\n# GOTMProfile\n#--------------------------------\nclass GOTMProfile(object):\n\n \"\"\"GOTMProfile object\"\"\"\n\n def __init__(self, time=None, z=None, data=None, name=None):\n \"\"\"Initialize GOTMProfile\n\n :time: (1D numpy array/datetime object) time\n :z: (1D numpy array) vertical coordinate\n :data: (2D numpy array) data at each time and z\n :name: (str) name of variable\n\n \"\"\"\n self.time = time\n self.z = z\n self.data = data\n self.name = name\n try:\n self.data_mean = np.mean(data, axis=0)\n except:\n self.data_mean = None\n\n def plot(self, axis=None, xlim=None, ylim=None,\n xlabel=None, ylabel=None, title=None,\n ptype='contourf', **kwargs):\n \"\"\"Plot the Hovmoller diagram (time - z)\n\n :axis: (matplotlib.axes, optional) axis to plot figure on\n :xlim: ([float, float], optional) upper and lower limits of the x-axis\n :ylim: ([float, float], optional) upper and lower limits of the y-axis\n :xlabel: (str, optional) x-label, 'Time' by default, 'off' to turn it off\n :ylabel: (str, optional) y-label, 'Depth (m)' by default, 'off' to turn it off\n :title: (str, optional) title\n :ptype: (str, optional) plot type, valid values: contourf (default), pcolor\n :**kwargs: (keyword arguments) to be passed to matplotlib.pyplot.contourf() or\n matplotlib.pyplot.pcolor() depending on ptype\n :returns: (matplotlib figure object) figure\n\n \"\"\"\n # use curret axis if not specified\n if axis is None:\n axis = plt.gca()\n # plot type\n if ptype == 'contourf':\n fig = axis.contourf(self.time, self.z, np.transpose(self.data), **kwargs)\n elif ptype == 'pcolor':\n fig = axis.pcolor(self.time, self.z, np.transpose(self.data), **kwargs)\n else:\n raise ValueError('Plot type (ptype) should be \\'contourf\\' or \\'pcolor\\', got {}.'.format(ptype))\n # x- and y-label, turn off by passing in 'off'\n if xlabel is None:\n axis.set_xlabel('Time')\n else:\n if xlabel != 'off':\n axis.set_xlabel(xlabel)\n if ylabel is None:\n axis.set_ylabel('Depth (m)')\n else:\n if ylabel != 'off':\n axis.set_ylabel(ylabel)\n # x- and y-limits\n if xlim is not None:\n axis.set_xlim(xlim)\n if ylim is not None:\n axis.set_ylim(ylim)\n # return figure\n return fig\n\n def plot_mean(self, axis=None, xlim=None, ylim=None,\n xlabel=None, ylabel=None, title=None, **kwargs):\n \"\"\"Plot the mean profile\n\n :axis: (matplotlib.axes, optional) axis to plot figure on\n :xlim: ([float, float], optional) upper and lower limits of the x-axis\n :ylim: ([float, float], optional) upper and lower limits of the y-axis\n :xlabel: (str, optional) x-label, 'self.name' by default, 'off' to turn it off\n :ylabel: (str, optional) y-label, 'Depth (m)' by default, 'off' to turn it off\n :title: (str, optional) title\n :**kwargs: (keyword arguments) to be passed to matplotlib.pyplot.plot()\n :returns: (matplotlib figure object) figure\n\n \"\"\"\n # use curret axis if not specified\n if axis is None:\n axis = plt.gca()\n # plot figure\n fig = plt.plot(self.data_mean, self.z, **kwargs)\n # x- and y-label, turn off by passing in 'off'\n if xlabel is None:\n axis.set_xlabel(self.name)\n else:\n if xlabel != 'off':\n axis.set_xlabel(xlabel)\n if ylabel is None:\n axis.set_ylabel('Depth (m)')\n else:\n if ylabel != 'off':\n axis.set_ylabel(ylabel)\n # x- and y-limits\n if xlim is not None:\n axis.set_xlim(xlim)\n if ylim is not None:\n axis.set_ylim(ylim)\n # return figure\n return fig\n\n\n#--------------------------------\n# GOTMTimeseries\n#--------------------------------\nclass GOTMTimeseries(object):\n\n \"\"\"GOTMTimeseries object\"\"\"\n\n def __init__(self, time=None, data=None, name=None):\n \"\"\"Initialize GOTMTimeseries\n\n :time: (1D numpy array/datetime object) time\n :data: (1D numpy array) data at each location\n :name: (str) name of variable\n\n \"\"\"\n\n self.time = time\n self.data = data\n self.name = name\n try:\n self.data_mean = np.mean(data, axis=0)\n except TypeError:\n self.data_mean = None\n\n def plot(self, axis=None, xlim=None, ylim=None,\n xlabel=None, ylabel=None, title=None, **kwargs):\n \"\"\"Plot timeseries\n\n :axis: (matplotlib.axes, optional) axis to plot figure on\n :xlim: ([float, float], optional) upper and lower limits of the x-axis\n :ylim: ([float, float], optional) upper and lower limits of the y-axis\n :xlabel: (str, optional) x-label, 'Time' by default, 'off' to turn it off\n :ylabel: (str, optional) y-label, 'self.name' by default, 'off' to turn it off\n :title: (str, optional) title\n :**kwargs: (keyword arguments) to be passed to matplotlib.pyplot.plot()\n :returns: (matplotlib figure object) figure\n\n \"\"\"\n # use curret axis if not specified\n if axis is None:\n axis = plt.gca()\n # plot figure\n fig = axis.plot(self.time, self.data, **kwargs)\n # x- and y-label, turn off by passing in 'off'\n if xlabel is None:\n axis.set_xlabel('Time')\n else:\n if xlabel != 'off':\n axis.set_xlabel(xlabel)\n if ylabel is None:\n axis.set_ylabel(self.name)\n else:\n if ylabel != 'off':\n axis.set_ylabel(ylabel)\n # x- and y-limits\n if xlim is not None:\n axis.set_xlim(xlim)\n if ylim is not None:\n axis.set_ylim(ylim)\n # return figure\n return fig\n\n\n#--------------------------------\n# GOMTMap\n#--------------------------------\n\nclass GOTMMap(object):\n\n \"\"\"GOTMMap object\"\"\"\n\n def __init__(self, data=None, lon=None, lat=None, name=None, units=None):\n \"\"\"Initialize GOTMMap\n\n :data: (1D numpy array) data at each location\n :lon: (1D numpy array) longitude\n :lat: (1D numpy array) latitude\n :name: (str) name of variable\n :units: (str) units of variable\n\n \"\"\"\n self.data = data\n self.lon = lon\n self.lat = lat\n self.name = name\n self.units = units\n\n def __neg__(self):\n \"\"\"Return the negated GOTMMap object\n\n \"\"\"\n out = GOTMMap(data=-self.data, lon=self.lon, lat=self.lat, name=self.name, units=self.units)\n return out\n\n def __add__(self, other):\n \"\"\"Add 'other' to a GOTMMap object\n\n :other: (float, int or GOTMMap object) object to be added\n\n \"\"\"\n if isinstance(other, float) or isinstance(other, int):\n out = GOTMMap(data=self.data+other, lon=self.lon, lat=self.lat, name=self.name, units=self.units)\n elif isinstance(other, GOTMMap):\n assert self.name == other.name, \"GOTMMap object to be added has a different name.\"\n assert self.units == other.units, \"GOTMMap object to be added has a different unit.\"\n data = np.zeros(self.data.size)\n loc_self = list(zip(self.lon, self.lat))\n loc_other = list(zip(other.lon, other.lat))\n for idx, val in enumerate(loc_self):\n if val in loc_other:\n idx_other = loc_other.index(val)\n data[idx] = self.data[idx] + other.data[idx_other]\n else:\n data[idx] = np.nan\n out = GOTMMap(data=data, lon=self.lon, lat=self.lat, name=self.name, units=self.units)\n else:\n raise TypeError('Addition is not defined between a GOTMMap object and a {} object'.format(type(other)))\n return out\n\n def __sub__(self, other):\n \"\"\"Subtract 'other' from a GOTMMap object\n\n :other: (float, int, or GOTMMap object) object to be subtracted\n\n \"\"\"\n if isinstance(other, float) or isinstance(other, int):\n out = GOTMMap(data=self.data-other, lon=self.lon, lat=self.lat, name=self.name, units=self.units)\n elif isinstance(other, GOTMMap):\n assert self.name == other.name, \"GOTMMap object to be subtracted has a different name.\"\n assert self.units == other.units, \"GOTMMap object to be subtracted has a different unit.\"\n data = np.zeros(self.data.size)\n loc_self = list(zip(self.lon, self.lat))\n loc_other = list(zip(other.lon, other.lat))\n for idx, val in enumerate(loc_self):\n if val in loc_other:\n idx_other = loc_other.index(val)\n data[idx] = self.data[idx] - other.data[idx_other]\n else:\n data[idx] = np.nan\n out = GOTMMap(data=data, lon=self.lon, lat=self.lat, name=self.name, units=self.units)\n else:\n raise TypeError('Subtraction is not defined between a GOTMMap object and a {} object'.format(type(other)))\n return out\n\n def __mul__(self, other):\n \"\"\"Multiply a GOTMMap object by 'other'\n\n :other: (float, int, or GOTMMap object) object to be multiplied\n\n \"\"\"\n if isinstance(other, float) or isinstance(other, int):\n out = GOTMMap(data=self.data*other, lon=self.lon, lat=self.lat, name=self.name, units=self.units)\n elif isinstance(other, GOTMMap):\n data = np.zeros(self.data.size)\n loc_self = list(zip(self.lon, self.lat))\n loc_other = list(zip(other.lon, other.lat))\n for idx, val in enumerate(loc_self):\n if val in loc_other:\n idx_other = loc_other.index(val)\n data[idx] = self.data[idx] - other.data[idx_other]\n else:\n data[idx] = np.nan\n out = GOTMMap(data=data, lon=self.lon, lat=self.lat, name=self.name+' * '+other.name, units=self.units+' * '+other.units)\n else:\n raise TypeError('Multiplication is not defined between a GOTMMap object and a {} object'.format(type(other)))\n return out\n\n def __truediv__(self, other):\n \"\"\"Divide a GOTMMap object by 'other'\n\n :other: (float, int, or GOTMMap object) object to be divided\n\n \"\"\"\n if isinstance(other, float) or isinstance(other, int):\n out = GOTMMap(data=self.data/other, lon=self.lon, lat=self.lat, name=self.name, units=self.units)\n elif isinstance(other, GOTMMap):\n data = np.zeros(self.data.size)\n loc_self = list(zip(self.lon, self.lat))\n loc_other = list(zip(other.lon, other.lat))\n for idx, val in enumerate(loc_self):\n if val in loc_other:\n idx_other = loc_other.index(val)\n data[idx] = self.data[idx] / other.data[idx_other]\n else:\n data[idx] = np.nan\n if other.name == self.name:\n name = 'Ratio of '+self.name\n units = 'None'\n else:\n name = self.name+' / '+other.name\n units = self.units+' / '+other.units\n out = GOTMMap(data=data, lon=self.lon, lat=self.lat, name=name, units=units)\n else:\n raise TypeError('Division is not defined between a GOTMMap object and a {} object'.format(type(other)))\n return out\n\n def save(self, path):\n \"\"\"Save GOTMMap object\n\n :path: (str) path of file to save\n :returns: none\n\n \"\"\"\n np.savez(path, data=self.data, lon=self.lon, lat=self.lat, name=self.name, units=self.units)\n\n def load(self, path):\n \"\"\"Load data to GOTMMap object\n\n :path: (str) path of file to load\n :returns: (GOTMMap object)\n\n \"\"\"\n dat = np.load(path, allow_pickle=True)\n self.__init__(data=dat['data'], lon=dat['lon'], lat=dat['lat'],\n name=str(dat['name']), units=str(dat['units']))\n return self\n\n def masked(self, mask, mask_data=np.nan):\n \"\"\"Apply mask to GOTMMap object. The mask should also be a GOTMMap object,\n with 1 for valid and 0 for invalid.\n\n :mask: (GOTMMap object) mask, 1 for valid, 0 for invalid\n :mask_data: (optional) values to be filled in maked points\n :return: (GOTMMap object) masked GOTMMap\n\n \"\"\"\n if mask.data.size != self.data.size:\n raise ValueError('The dimension of mask does not match.')\n dat = self.data\n self.data = np.where(mask.data==0, mask_data, dat)\n\n def plot(self, axis=None, levels=None, add_colorbar=True, cmap='rainbow', **kwargs):\n \"\"\"Plot scatters on a map\n\n :axis: (matplotlib.axes, optional) axis to plot figure on\n :leveles: (list, optional) list of levels\n :add_colorbar: (bool) do not add colorbar if False\n :cmap: (str, optional) colormap\n :**kwargs: (keyword arguments) to be passed to mpl_toolkits.basemap.scatter()\n :return: (matplotlib figure object) figure\n\n \"\"\"\n # use curret axis if not specified\n if axis is None:\n axis = plt.gca()\n # plot map\n m = Basemap(projection='cyl', llcrnrlat=-72, urcrnrlat=72, llcrnrlon=20, urcrnrlon=380, ax=axis)\n # plot coastlines, draw label meridians and parallels.\n m.drawcoastlines()\n m.drawmapboundary(fill_color='lightgray')\n m.fillcontinents(color='gray',lake_color='lightgray')\n m.drawparallels(np.arange(-90.,91.,30.), labels=[1,0,0,1])\n m.drawmeridians(np.arange(-180.,181.,60.), labels=[1,0,0,1])\n data = self.data\n lat = self.lat\n lon = self.lon\n # shift longitude\n lon = np.where(lon < 20., lon+360., lon)\n x, y = m(lon, lat)\n # manually mapping levels to the colormap if levels is passed in,\n # otherwise linear mapping\n if levels is not None:\n bounds = np.array(levels)\n norm = colors.BoundaryNorm(boundaries=bounds, ncolors=256)\n fig = m.scatter(x, y, marker='.', s=32, c=data, norm=norm, cmap=plt.cm.get_cmap(cmap), **kwargs)\n else:\n fig = m.scatter(x, y, marker='.', s=32, c=data, cmap=plt.cm.get_cmap(cmap), **kwargs)\n # add colorbar\n if add_colorbar:\n cb = m.colorbar(fig, ax=axis)\n cb.formatter.set_powerlimits((-2, 2))\n cb.update_ticks()\n return fig\n\n def zonal_mean(self):\n \"\"\"Calculate the zonal mean.\n\n :returns: (numpy array) array of latitude and zonal mean data\n\n \"\"\"\n lat_all = self.lat\n lat, counts = np.unique(lat_all, return_counts=True)\n nlat = lat.size\n val = np.zeros(nlat)\n for i in np.arange(nlat):\n if counts[i] >=5:\n tmp_arr = self.data[lat_all==lat[i]]\n val[i] = np.nanmean(tmp_arr)\n else:\n val[i] = None\n return lat, val\n\n#--------------------------------\n# GOTMOutputData\n#--------------------------------\n\nclass GOTMOutputData(object):\n\n \"\"\"GOTM Output data object\"\"\"\n\n def __init__(self, path, init_time_location=True):\n \"\"\"Initialize the data\n\n :path: (str) path to the GOTM output netCDF file\n :init_time_location: (bool) do not initialize time and lon etc. if\n False, which is used when constructing large\n dataset to reduce initialization time.\n\n \"\"\"\n # path of data\n self._path = path\n # open the dataset\n self.open()\n # time and location if requested\n if init_time_location:\n # latitude\n self.lat = self.dataset.variables['lat'][:]\n # Coriolis parameter\n self.f = 4.*np.pi/86400*np.sin(self.lat*np.pi/180.)\n # longitude\n self.lon = self.dataset.variables['lon'][:]\n # time\n time = self.dataset.variables['time']\n self.time = time[:]\n self.time_units = time.units\n self.time_calendar = time.calendar\n # list of dimensions\n self.list_dimensions = list(self.dataset.dimensions.keys())\n # list of variables\n list_variables = list(self.dataset.variables.keys())\n self.list_variables = [x for x in list_variables if x not in self.list_dimensions]\n # list of time series and profiles\n self.list_timeseries = []\n self.list_profile = []\n for var in self.list_variables:\n ndim = self.dataset.variables[var].ndim\n if ndim == 4:\n self.list_profile.append(var)\n elif ndim == 3:\n self.list_timeseries.append(var)\n else:\n pass\n # update list of variables to include the derived ones\n self.list_profile = self.list_profile + self._get_derived_profile(list_keys=True)\n self.list_timeseries = self.list_timeseries + self._get_derived_timeseries(list_keys=True)\n # close dataset\n self.close()\n\n def open(self):\n \"\"\"Open netCDF dataset\n\n \"\"\"\n # netCDF4 Dataset\n self.dataset = Dataset(self._path, 'r')\n\n def close(self):\n \"\"\"Close netCDF datset\n\n \"\"\"\n if self.dataset.isopen():\n self.dataset.close()\n\n def read_profile(self, var, tidx_start=None, tidx_end=None, ignore_time=False, **kwargs):\n \"\"\"Return profile variable and z (fixed in time)\n\n :var: (str) variable name\n :tidx_start: (int, optional) starting index\n :tidx_end: (int, optional) ending index\n :ignore_time: (bool) ignore time dimension if True\n :**kwargs: (keyword arguments) to be passed to _get_derived_profile()\n :returns: (GOTMProfile object) profile\n\n \"\"\"\n # open dataset\n self.open()\n # preprocess variable name, change sign if variable name start with '-'\n if var[0] == '-':\n var = var[1:]\n change_sign = True\n else:\n change_sign = False\n # read variable\n if var in self.list_variables:\n dat = self.dataset.variables[var][tidx_start:tidx_end,:,0,0]\n coord = self.dataset.variables[var].coordinates\n if 'zi' in coord:\n z = self.dataset.variables['zi'][0,:,0,0]\n elif 'z' in coord:\n z = self.dataset.variables['z'][0,:,0,0]\n else:\n raise AttributeError('z coordinate not fould.')\n else:\n dat, z = self._get_derived_profile(var)(tidx_start=tidx_start, tidx_end=tidx_end, **kwargs)\n # create GOTMProfile object\n if not ignore_time:\n time = num2date(self.time[tidx_start:tidx_end], units=self.time_units, calendar=self.time_calendar)\n else:\n time = None\n # change sign if variable name start with '-'\n if change_sign:\n dat = -dat\n out = GOTMProfile(time=time, z=z, data=dat, name=var)\n # close data\n self.close()\n return out\n\n def read_timeseries(self, var, tidx_start=None, tidx_end=None, ignore_time=False, **kwargs):\n \"\"\"Return timeseries of variable [var]\n\n :var: (str) variable name\n :tidx_start: (int, optional) starting index\n :tidx_end: (int, optional) ending index\n :ignore_time: (bool) ignore time dimension if True\n :**kwargs: (keyword arguments) to be passed to _get_derived_timeseries()\n :returns: (GOTMTimeseries object) time series\n\n \"\"\"\n # open dataset\n self.open()\n # preprocess variable name, change sign if variable name start with '-'\n if var[0] == '-':\n var = var[1:]\n change_sign = True\n else:\n change_sign = False\n # read variable\n if var in self.list_variables:\n dat = self.dataset.variables[var][tidx_start:tidx_end,0,0]\n else:\n dat = self._get_derived_timeseries(var)(tidx_start=tidx_start, tidx_end=tidx_end, **kwargs)\n # create GOTMTimeseries object\n if not ignore_time:\n time = num2date(self.time[tidx_start:tidx_end], units=self.time_units, calendar=self.time_calendar)\n else:\n time = None\n # change sign if variable name start with '-'\n if change_sign:\n dat = -dat\n out = GOTMTimeseries(time=time, data=dat, name=var)\n # close data\n self.close()\n return out\n\n def diag_forcing_regime_BG12(self, tidx_start=None, tidx_end=None, cfrac=0.25):\n \"\"\"Diagnose the forcing regime according to the dissipation based\n definition in Belcher et al., 2012\n\n :tidx_start: (int, optional) starting index\n :tidx_end: (int, optional) ending index\n :cfrac: (float, optional) critical fraction, 0 0:\n unstable = -1\n else:\n unstable = 1\n # forcing regime in unstable condition\n mask1 = ts_bflux < 0\n mask2 = ts_ustar > 1e-10\n mask3 = ts_laturb > 1e-10\n bmask = [all(imask) for imask in zip(mask1, mask2, mask3)]\n if sum(bmask) < ts_bflux.size/3:\n # not enough data\n forcing_regime = 8\n else:\n comp_ST = 2.0*(1.0-np.exp(-0.5*ts_laturb[bmask]))\n comp_LT = 0.22/ts_laturb[bmask]**2\n comp_CT = -0.3*ts_bflux[bmask]*ts_hbl[bmask]/ts_ustar[bmask]**3\n comp_total = comp_ST + comp_LT + comp_CT\n frac_ST = comp_ST/comp_total\n frac_LT = comp_LT/comp_total\n frac_CT = comp_CT/comp_total\n mfrac_ST = np.mean(frac_ST)\n mfrac_LT = np.mean(frac_LT)\n mfrac_CT = np.mean(frac_CT)\n if mfrac_LT < cfrac and mfrac_CT < cfrac:\n # ST dominant\n forcing_regime = 1\n elif mfrac_ST < cfrac and mfrac_CT < cfrac:\n # LT dominant\n forcing_regime = 2\n elif mfrac_ST < cfrac and mfrac_LT < cfrac:\n # CT dominant\n forcing_regime = 3\n elif mfrac_ST >= cfrac and mfrac_LT >= cfrac and mfrac_CT < cfrac:\n # combined ST and LT\n forcing_regime = 4\n elif mfrac_ST >= cfrac and mfrac_CT >= cfrac and mfrac_LT < cfrac:\n # combined ST and CT\n forcing_regime = 5\n elif mfrac_LT >= cfrac and mfrac_CT >= cfrac and mfrac_ST < cfrac:\n # combined LT and CT\n forcing_regime = 6\n else:\n # combined ST, LT and CT\n forcing_regime = 7\n return forcing_regime*unstable\n\n def diag_forcing_regime_LF17(self, tidx_start=None, tidx_end=None, cfrac=0.25):\n \"\"\"Diagnose the forcing regime according to the entrainment buoyancy\n flux based definition in Li and Fox-Kemper, 2017\n\n :tidx_start: (int, optional) starting index\n :tidx_end: (int, optional) ending index\n :cfrac: (float, optional) critical fraction, 0 0:\n unstable = -1\n else:\n unstable = 1\n # forcing regime in unstable condition\n mask1 = ts_bflux < 0\n mask2 = ts_ustar > 1e-10\n mask3 = ts_sl > 1e-10\n bmask = [all(imask) for imask in zip(mask1, mask2, mask3)]\n if sum(bmask) < ts_bflux.size/3:\n # not enough data\n forcing_regime = 8\n else:\n comp_ST = 0.17\n comp_LT = 0.083/ts_sl[bmask]**2\n comp_CT = -0.15*ts_bflux[bmask]*ts_hbl[bmask]/ts_ustar[bmask]**3\n comp_total = comp_ST + comp_LT + comp_CT\n frac_ST = comp_ST/comp_total\n frac_LT = comp_LT/comp_total\n frac_CT = comp_CT/comp_total\n mfrac_ST = np.mean(frac_ST)\n mfrac_LT = np.mean(frac_LT)\n mfrac_CT = np.mean(frac_CT)\n if mfrac_LT < cfrac and mfrac_CT < cfrac:\n # ST dominant\n forcing_regime = 1\n elif mfrac_ST < cfrac and mfrac_CT < cfrac:\n # LT dominant\n forcing_regime = 2\n elif mfrac_ST < cfrac and mfrac_LT < cfrac:\n # CT dominant\n forcing_regime = 3\n elif mfrac_ST >= cfrac and mfrac_LT >= cfrac and mfrac_CT < cfrac:\n # combined ST and LT\n forcing_regime = 4\n elif mfrac_ST >= cfrac and mfrac_CT >= cfrac and mfrac_LT < cfrac:\n # combined ST and CT\n forcing_regime = 5\n elif mfrac_LT >= cfrac and mfrac_CT >= cfrac and mfrac_ST < cfrac:\n # combined LT and CT\n forcing_regime = 6\n else:\n # combined ST, LT and CT\n forcing_regime = 7\n return forcing_regime*unstable\n\n def _get_derived_profile(self, name=None, list_keys=False):\n \"\"\"Find the derived profile variable\n\n :name: (str) variable name\n :list_keys: (bool) simply return the list of keys if True\n :returns: (function/str) corresponding function or list of keys\n\n \"\"\"\n switcher = {\n 'buoy': self._get_buoyancy,\n 'spice': self._get_spice,\n 'wt': self._get_wt,\n 'ws': self._get_ws\n }\n if list_keys:\n return list(switcher.keys())\n elif name in switcher.keys():\n return switcher.get(name)\n else:\n raise ValueError('Variable \\'{}\\' not found.'.format(name))\n\n def _get_buoyancy(self, tidx_start=None, tidx_end=None):\n \"\"\"Calculate the buoyancy from temperature and salinity\n assuming linear equation of state\n\n :tidx_start: (int, optional) starting index\n :tidx_end: (int, optional) ending index\n :returns: (numpy array) buoyancy\n\n \"\"\"\n # temperature and salinity\n temp = self.dataset.variables['temp'][tidx_start:tidx_end,:,0,0]\n salt = self.dataset.variables['salt'][tidx_start:tidx_end,:,0,0]\n # buoyancy\n buoy = g*alpha_0*(temp-T_0)-g*beta_0*(salt-S_0)\n # z\n z = self.dataset.variables['z'][0,:,0,0]\n return buoy, z\n\n def _get_spice(self, tidx_start=None, tidx_end=None):\n \"\"\"Calculate the spice from temperature and salinity\n assuming linear equation of state\n\n :tidx_start: (int, optional) starting index\n :tidx_end: (int, optional) ending index\n :returns: (numpy array) spice\n\n \"\"\"\n # temperature and salinity\n temp = self.dataset.variables['temp'][tidx_start:tidx_end,:,0,0]\n salt = self.dataset.variables['salt'][tidx_start:tidx_end,:,0,0]\n # spice\n spice = g*alpha_0*(temp-T_0)+g*beta_0*(salt-S_0)\n # z\n z = self.dataset.variables['z'][0,:,0,0]\n return spice, z\n\n def _get_wt(self, tidx_start=None, tidx_end=None):\n \"\"\"Find the vertical temperature flux (K m/s)\n\n :tidx_start: (int, optional) starting index\n :tidx_end: (int, optional) ending index\n :returns: (numpy array) surface buoyancy flux\n\n \"\"\"\n # temperature profiles\n temp = self.dataset.variables['temp'][tidx_start:tidx_end,:,0,0]\n # turbulent diffusivity of heat\n nuh = self.dataset.variables['nuh'][tidx_start:tidx_end,:,0,0]\n # nonlocal temperature flux\n gamh = self.dataset.variables['gamh'][tidx_start:tidx_end,:,0,0]\n # layer thickness\n h = self.dataset.variables['h'][tidx_start:tidx_end,:,0,0]\n # depth\n zi = self.dataset.variables['zi'][0,1:,0,0]\n # surface temperature flux\n wt0 = -self.dataset.variables['heat'][tidx_start:tidx_end,0,0]/cp/rho_0\n # vertical temperature flux\n wt = np.zeros(temp.shape)\n wt[:,-1] = wt0\n wt[:,0:-1] = -2.0*nuh[:,1:-1]*(temp[:,1:]-temp[:,0:-1])/(h[:,1:]+h[:,0:-1]) + gamh[:,1:-1]\n return wt, zi\n\n def _get_ws(self, tidx_start=None, tidx_end=None):\n \"\"\"Find the vertical salinity flux (g/kg m/s)\n\n :tidx_start: (int, optional) starting index\n :tidx_end: (int, optional) ending index\n :returns: (numpy array) surface buoyancy flux\n\n \"\"\"\n # salinity profiles\n salt = self.dataset.variables['salt'][tidx_start:tidx_end,:,0,0]\n # surface salinity\n salt0 = salt[:,-1]\n # turbulent diffusivity of salinity\n nus = self.dataset.variables['nus'][tidx_start:tidx_end,:,0,0]\n # nonlocal salinity flux\n gams = self.dataset.variables['gams'][tidx_start:tidx_end,:,0,0]\n # layer thickness\n h = self.dataset.variables['h'][tidx_start:tidx_end,:,0,0]\n # depth\n zi = self.dataset.variables['zi'][0,1:,0,0]\n # surface salinity flux\n ws0 = (self.dataset.variables['precip'][tidx_start:tidx_end,0,0]\n - self.dataset.variables['evap'][tidx_start:tidx_end,0,0])*salt0\n # vertical temperature flux\n ws = np.zeros(salt.shape)\n ws[:,-1] = ws0\n ws[:,0:-1] = -2.0*nus[:,1:-1]*(salt[:,1:]-salt[:,0:-1])/(h[:,1:]+h[:,0:-1]) + gams[:,1:-1]\n return ws, zi\n\n def _get_derived_timeseries(self, name=None, list_keys=False):\n \"\"\"Find the derived variable\n\n :name: (str) variable name\n :list_keys: (bool) simply return the list of keys if True\n :returns: (function/str) corresponding function or list of keys\n\n \"\"\"\n switcher = {\n 'LaTurb': self._get_la_turb,\n 'LaSL': self._get_la_sl,\n 'hoLmo': self._get_h_over_lmo,\n 'bflux': self._get_bflux,\n 'dPEdt': self._get_dpedt,\n 'mixEf1': self._get_dpedt_over_ustar3,\n 'mld_maxNsqr': self._get_mld_maxNsqr,\n 'mld_deltaT': self._get_mld_deltaT,\n 'mld_deltaR': self._get_mld_deltaR,\n 'bld_nuh': self._get_bld_nuh,\n 'bld_tke': self._get_bld_tke,\n 'bld_kpp': self._get_bld_kpp,\n 'bld_epbl': self._get_bld_epbl,\n 'Nsqr_mld': self._get_Nsqr_mld,\n 'PE': self._get_pez\n }\n if list_keys:\n return list(switcher.keys())\n elif name in switcher.keys():\n return switcher.get(name)\n else:\n raise ValueError('Variable \\'{}\\' not found.'.format(name))\n\n def _get_la_turb(self, tidx_start=None, tidx_end=None):\n \"\"\"Find the turbulent Langmuir number defined as\n La_t = sqrt{u^*/u^S}\n\n :tidx_start: (int, optional) starting index\n :tidx_end: (int, optional) ending index\n :returns: (numpy array) Langmuir number\n\n \"\"\"\n # friction velocity\n ustar = self.dataset.variables['u_taus'][tidx_start:tidx_end,0,0]\n # surface Stokes drift\n usx = self.dataset.variables['u0_stokes'][tidx_start:tidx_end,0,0]\n usy = self.dataset.variables['v0_stokes'][tidx_start:tidx_end,0,0]\n us = np.sqrt(usx**2.+usy**2.)\n # calculate Langmuir number\n la = np.sqrt(ustar/us)\n return la\n\n def _get_la_sl(self, tidx_start=None, tidx_end=None):\n \"\"\"Find the surface layer averaged Langmuir number defined as\n La_{SL} = sqrt{u^*/_{SL}}, where\n _{SL} = int_{-0.2h_b}^0 u^S(z) dz\n\n :tidx_start: (int, optional) starting index\n :tidx_end: (int, optional) ending index\n :returns: (numpy array) Langmuir number\n\n \"\"\"\n # friction velocity\n ustar = self.dataset.variables['u_taus'][tidx_start:tidx_end,0,0]\n # Stokes drift profiles\n ustokes = self.dataset.variables['u_stokes'][tidx_start:tidx_end,:,0,0]\n vstokes = self.dataset.variables['v_stokes'][tidx_start:tidx_end,:,0,0]\n zi = self.dataset.variables['zi'][tidx_start:tidx_end,:,0,0]\n h = self.dataset.variables['h'][tidx_start:tidx_end,:,0,0]\n # boundary layer depth\n hbl = self._get_derived_timeseries('bld_nuh')(tidx_start=tidx_start, tidx_end=tidx_end)\n # surface layer: upper 20% of the boundary layer\n hsl = -0.2*hbl\n # loop over time to calculate the surface layer averaged Stokes drift\n # note that zi has indices 0:nlev whereas z has indices 0:nlev-1, this is\n # different from the indices in GOTM\n nt = ustar.shape[0]\n ussl = np.zeros(nt)\n vssl = np.zeros(nt)\n for i in np.arange(nt):\n ihsl = np.argmin(zi[i,:]0:\n idx_min = np.max(idxlist)\n mld[i] = z[i,idx_min]\n else:\n mld[i] = np.min(z[i,:])\n return np.abs(mld)\n\n def _get_mld_deltaR(self, tidx_start=None, tidx_end=None, deltaR=0.03, zRef=-10):\n \"\"\"Find the mixed layer depth defined as the depth where\n the potential density difference from the reference level first exceed\n a threshold value\n\n :tidx_start: (int, optional) starting index\n :tidx_end: (int, optional) ending index\n :deltaR: (float, optional) potential density threshold in kg/m^3\n :zRef: (float, optional) depth of the reference level\n :returns: (numpy array) mixed layer depth\n\n \"\"\"\n Rho = self.dataset.variables['rho'][tidx_start:tidx_end,:,0,0]\n z = self.dataset.variables['z'][tidx_start:tidx_end,:,0,0]\n nt = Rho.shape[0]\n mld = np.zeros(nt)\n for i in np.arange(nt):\n idx_zref = np.argmin(np.abs(z[i,:]-zRef))\n dRho = Rho[i,:]-Rho[i,idx_zref]\n # ignore the points above the reference level\n dRho[idx_zref:] = -99.\n # ignore nan\n dRho[np.isnan(dRho)] = -99.\n # find the maximum index (closest to the surface) where the density\n # difference is greater than the threshold value\n idxlist = np.where(dRho>=deltaR)\n if idxlist[0].size>0:\n idx_min = np.max(idxlist)\n mld[i] = z[i,idx_min]\n else:\n mld[i] = np.min(z[i,:])\n return np.abs(mld)\n\n def _get_bld_nuh(self, tidx_start=None, tidx_end=None, nuh_bg=1e-5):\n \"\"\"Find the boundary layer depth defined as the depth where\n the turbulent diffusivity first drops to a background value\n\n :tidx_start: (int, optional) starting index\n :tidx_end: (int, optional) ending index\n :nuh_bg: (float, optional) background diffusivity\n :returns: (numpy array) mixed layer depth\n\n \"\"\"\n nuh = self.dataset.variables['nuh'][tidx_start:tidx_end,:,0,0]\n z = self.dataset.variables['zi'][tidx_start:tidx_end,:,0,0]\n nt = nuh.shape[0]\n nz = nuh.shape[1]\n bld = np.zeros(nt)\n for i in np.arange(nt):\n idxlist = np.where(nuh[i,:]=cfrac) & (rz_LT>=cfrac) & (rz_CT=cfrac) & (rz_CT>=cfrac) & (rz_LT=cfrac) & (rz_CT>=cfrac) & (rz_ST>> LISTENING ON local:8000 >>>>> DO >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\nimport string\t\t\t\t\t\t\t\t\t\t#*\nimport secrets\t\t\t\t\t\t\t\t\t\t#*\nimport hashlib\t\t\t\t\t\t\t\t\t\t# **\nimport sys, os\n\nos.getcwd()\n'/Users/thomasperez/.ssh'\t\t\t\t\t\t\t# ok\nprint (os.getcwd())\t\t\t\t\t\t\t\t\t# non-op\nos.system('ssh root@allesrebel.com')\t\t\t\t# ok - in ' root@localhost:~# ' \nos.chdir(\"root/5540-final\")\t\t\t\t\t\t\t# non-op\nprint (os.getcwd())\t\t\t\t\t\t\t\t\t# non-op\n\nos.system('python3 server.py')\t#@ OK. Results in: \"Welcome to Ubuntu 22.04 LTS (GNU/Linux 5.15.0-41-generic x86_64) . . .\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t# BUT ONLY is visible after user does ^D in console, manually.s\n\n# RESULTS@ eg:\n#About to listen on http://localhost:8000\n'''\n{'for': '67.150.3.111', 'c_csuites': 'TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM -SHA256:ECDHE-ECDSA-AES256-SHA38 \t\tetc etc etc\n'''\n\nprint(\"\\n\\nProcessing fingerprinting of client app comms over TLS\")\t# os.system usage?\n\n# Extract elements/features of the server-side (from a dict). \n\t# from eg., root@localhost:~# cd 5540-final / server.py / [listening localhost:1000] / [my IP]\n\t# \tfilter or not : \t\t\n\t\t# \t\tc_csuites (cipher)\n\t\t# \t\tc_curves (characteristic math algo)\n\t\t# \t\ts_proto\n\t\t# \t\ts_session (a hex str ~ 60 chars), \t \n\t\t# \t\tc_port (client) \t\t\n\t\t# \t\ts_ip\n\t\t# \t\ttcp_rtt\n\t\t\n# ** Condensed-usage:\t\t\nserverdict = {'c_csuites' : '_' , \t 'c_curves' : '_' ,\t 's_proto' :\t '_' ,\t 's_session' : '_' ,\t 'c_port' : '_' ,\t 's_ip' : '_' ,\t 'tcp_rtt' : '_'\t }\t\t# init\nprint(\"\\n\\nOriginal svr dict: \" + str(serverdict))\t\t\t\t\t# test\n\n# https://pythonguides.com/get-all-values-from-a-dictionary-python/\nresult = list(serverdict.values())\t\t\t\t\t\t\t\t\t# Extract values using py BI values().\nprint(\" \\n\\nResult : \" + str(result))\t\t\t\t\t\t\t\t# test\n\n# hash :\n# https://docs.python.org/3/library/hashlib.html\n# hashlib.sha224(result).hexdigest()\t\t\t\t\t\t\t\t# Non-op. \n# m = hashlib.sha224()\t\t\t\t\t\t\t\t\t\t\t\t# ?\n# m.block_size\t\t\t\t\t\t\t\t\t\t\t\t\t\t# ?\n\n\n# Share hash ------->\t \n\nos.system('exit')\n\n\n","repo_name":"TomEphraimPerez/CS5540-final","sub_path":"Wed-7-20-Google2fa.py","file_name":"Wed-7-20-Google2fa.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28036298618","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\nfrom structgen.encoder import MPNEncoder\nfrom structgen.data import alphabet\nfrom structgen.utils import *\nfrom structgen.protein_features import ProteinFeatures\n\n\nclass HierarchicalEncoder(nn.Module):\n \n def __init__(self, args, node_in, edge_in):\n super(HierarchicalEncoder, self).__init__()\n self.node_in, self.edge_in = node_in, edge_in\n self.W_v = nn.Sequential(\n nn.Linear(self.node_in, args.hidden_size, bias=True),\n Normalize(args.hidden_size)\n )\n self.W_e = nn.Sequential(\n nn.Linear(self.edge_in, args.hidden_size, bias=True),\n Normalize(args.hidden_size)\n )\n self.layers = nn.ModuleList([\n MPNNLayer(args.hidden_size, args.hidden_size * 3, dropout=args.dropout)\n for _ in range(args.depth)\n ])\n for param in self.parameters():\n if param.dim() > 1:\n nn.init.xavier_uniform_(param)\n\n def forward(self, V, E, hS, E_idx, mask):\n h_v = self.W_v(V) # [B, N, H] \n h_e = self.W_e(E) # [B, N, K, H] \n nei_s = gather_nodes(hS, E_idx) # [B, N, K, H]\n\n # [B, N, 1] -> [B, N, K, 1] -> [B, N, K]\n vmask = gather_nodes(mask.unsqueeze(-1), E_idx).squeeze(-1)\n h = h_v\n for layer in self.layers:\n nei_v = gather_nodes(h, E_idx) # [B, N, K, H]\n nei_h = torch.cat([nei_v, nei_s, h_e], dim=-1)\n h = layer(h, nei_h, mask_attend=vmask) # [B, N, H]\n h = h * mask.unsqueeze(-1) # [B, N, H]\n return h\n\n\nclass HierarchicalDecoder(nn.Module):\n\n def __init__(self, args):\n super(HierarchicalDecoder, self).__init__()\n self.cdr_type = args.cdr_type\n self.k_neighbors = args.k_neighbors\n self.block_size = args.block_size\n self.update_freq = args.update_freq\n self.hidden_size = args.hidden_size\n self.pos_embedding = PosEmbedding(16)\n \n self.features = ProteinFeatures(\n top_k=args.k_neighbors, num_rbf=args.num_rbf,\n features_type='full',\n direction='bidirectional'\n )\n self.node_in, self.edge_in = self.features.feature_dimensions['full']\n self.O_d0 = nn.Linear(args.hidden_size, 12)\n self.O_d = nn.Linear(args.hidden_size, 12)\n self.O_s = nn.Linear(args.hidden_size, args.vocab_size)\n self.W_s = nn.Embedding(args.vocab_size, args.hidden_size)\n\n self.struct_mpn = HierarchicalEncoder(args, self.node_in, self.edge_in)\n self.seq_mpn = HierarchicalEncoder(args, self.node_in, self.edge_in)\n self.init_mpn = HierarchicalEncoder(args, 16, 32)\n self.rnn = nn.GRU(\n args.hidden_size, args.hidden_size, batch_first=True, \n num_layers=1, bidirectional=True\n ) \n self.W_stc = nn.Sequential(\n nn.Linear(args.hidden_size * 2, args.hidden_size),\n nn.ReLU(),\n )\n self.W_seq = nn.Sequential(\n nn.Linear(args.hidden_size * 2, args.hidden_size),\n nn.ReLU(),\n )\n\n self.ce_loss = nn.CrossEntropyLoss(reduction='none')\n self.huber_loss = nn.SmoothL1Loss(reduction='none')\n self.mse_loss = nn.MSELoss(reduction='none')\n\n for param in self.parameters():\n if param.dim() > 1:\n nn.init.xavier_uniform_(param)\n\n def init_struct(self, B, N, K):\n # initial V\n pos = torch.arange(N).cuda()\n V = self.pos_embedding(pos.view(1, N, 1)) # [1, N, 1, 16]\n V = V.squeeze(2).expand(B, -1, -1) # [B, N, 6]\n # initial E_idx\n pos = pos.unsqueeze(0) - pos.unsqueeze(1) # [N, N]\n D_idx, E_idx = pos.abs().topk(k=K, dim=-1, largest=False) # [N, K]\n E_idx = E_idx.unsqueeze(0).expand(B, -1, -1) # [B, N, K]\n D_idx = D_idx.unsqueeze(0).expand(B, -1, -1) # [B, N, K]\n # initial E\n E_rbf = self.features._rbf(3 * D_idx)\n E_pos = self.features.embeddings(E_idx)\n E = torch.cat((E_pos, E_rbf), dim=-1)\n return V, E, E_idx\n\n def init_coords(self, S, mask):\n B, N = S.size(0), S.size(1)\n K = min(self.k_neighbors, N)\n V, E, E_idx = self.init_struct(B, N, K)\n h = self.init_mpn(V, E, S, E_idx, mask)\n return self.predict_dist(self.O_d0(h))\n\n # Q: [B, N, H], K, V: [B, M, H]\n def attention(self, Q, context, cmask, W):\n att = torch.bmm(Q, context.transpose(1, 2)) # [B, N, M]\n att = att - 1e6 * (1 - cmask.unsqueeze(1))\n att = F.softmax(att, dim=-1)\n out = torch.bmm(att, context) # [B, N, M] * [B, M, H]\n out = torch.cat([Q, out], dim=-1)\n return W(out)\n\n def predict_dist(self, X):\n X = X.view(X.size(0), X.size(1), 4, 3)\n X_ca = X[:, :, 1, :]\n dX = X_ca[:, None, :, :] - X_ca[:, :, None, :]\n D = torch.sum(dX ** 2, dim=-1)\n V = self.features._dihedrals(X)\n AD = self.features._AD_features(X[:,:,1,:])\n return X.detach().clone(), D, V, AD\n\n def mask_mean(self, X, mask, i):\n # [B, N, 4, 3] -> [B, 1, 4, 3] / [B, 1, 1, 1]\n X = X[:, i:i+self.block_size]\n if X.dim() == 4:\n mask = mask[:, i:i+self.block_size].unsqueeze(-1).unsqueeze(-1)\n else:\n mask = mask[:, i:i+self.block_size].unsqueeze(-1)\n return torch.sum(X * mask, dim=1, keepdims=True) / (mask.sum(dim=1, keepdims=True) + 1e-8)\n\n def make_X_blocks(self, X, l, r, mask):\n N = X.size(1)\n lblocks = [self.mask_mean(X, mask, i) for i in range(0, l, self.block_size)]\n rblocks = [self.mask_mean(X, mask, i) for i in range(r + 1, N, self.block_size)]\n bX = torch.cat(lblocks + [X[:, l:r+1]] + rblocks, dim=1)\n return bX.detach()\n\n def make_S_blocks(self, LS, S, RS, l, r, mask):\n N = S.size(1)\n hS = self.W_s(S)\n LS = [self.mask_mean(hS, mask, i) for i in range(0, l, self.block_size)]\n RS = [self.mask_mean(hS, mask, i) for i in range(r + 1, N, self.block_size)]\n bS = torch.cat(LS + [hS[:, l:r+1]] + RS, dim=1)\n lmask = [mask[:, i:i+self.block_size].amax(dim=1, keepdims=True) for i in range(0, l, self.block_size)]\n rmask = [mask[:, i:i+self.block_size].amax(dim=1, keepdims=True) for i in range(r + 1, N, self.block_size)]\n bmask = torch.cat(lmask + [mask[:, l:r+1]] + rmask, dim=1)\n return bS, bmask, len(LS), len(RS)\n\n def get_completion_mask(self, B, N, cdr_range):\n cmask = torch.zeros(B, N).cuda()\n for i, (l,r) in enumerate(cdr_range):\n cmask[i, l:r+1] = 1\n return cmask\n\n def remove_cdr_coords(self, X, cdr_range):\n X = X.clone()\n for i, (l,r) in enumerate(cdr_range):\n X[i, l:r+1, :, :] = 0\n return X.clone()\n\n def forward(self, true_X, true_S, true_cdr, mask):\n B, N = mask.size(0), mask.size(1)\n K = min(self.k_neighbors, N)\n\n cdr_range = [(cdr.index(self.cdr_type), cdr.rindex(self.cdr_type)) for cdr in true_cdr]\n T_min = min([l for l,r in cdr_range])\n T_max = max([r for l,r in cdr_range])\n cmask = self.get_completion_mask(B, N, cdr_range)\n smask = mask.clone()\n\n # make blocks and encode framework\n S = true_S.clone() * (1 - cmask.long())\n hS, _ = self.rnn(self.W_s(S))\n LS, RS = hS[:, :, :self.hidden_size], hS[:, :, self.hidden_size:]\n hS, mask, offset, suffix = self.make_S_blocks(LS, S, RS, T_min, T_max, mask)\n cmask = torch.cat([cmask.new_zeros(B, offset), cmask[:, T_min:T_max+1], cmask.new_zeros(B, suffix)], dim=1)\n\n # Ground truth \n true_X = self.make_X_blocks(true_X, T_min, T_max, smask)\n true_V = self.features._dihedrals(true_X)\n true_AD = self.features._AD_features(true_X[:,:,1,:])\n true_D, mask_2D = pairwise_distance(true_X, mask)\n true_D = true_D ** 2\n\n # initial loss\n sloss = 0.\n X, D, V, AD = self.init_coords(hS, mask)\n X = X.detach().clone()\n dloss = self.huber_loss(D, true_D)\n vloss = self.mse_loss(V, true_V)\n aloss = self.mse_loss(AD, true_AD)\n\n for t in range(T_min, T_max + 1):\n # Prepare input\n V, E, E_idx = self.features(X, mask)\n hS = self.make_S_blocks(LS, S, RS, T_min, T_max, smask)[0]\n\n # Predict residue t\n h = self.seq_mpn(V, E, hS, E_idx, mask)\n h = self.attention(h, LS, smask, self.W_seq)\n logits = self.O_s(h[:, offset + t - T_min])\n snll = self.ce_loss(logits, true_S[:, t])\n sloss = sloss + torch.sum(snll * cmask[:, offset + t - T_min])\n\n # Teacher forcing on S\n S = S.clone()\n S[:, t] = true_S[:, t]\n S = S.clone()\n\n # Iterative refinement\n if t % self.update_freq == 0:\n h = self.struct_mpn(V, E, hS, E_idx, mask)\n h = self.attention(h, LS, smask, self.W_stc)\n X, D, V, AD = self.predict_dist(self.O_d(h))\n X = X.detach().clone()\n dloss = dloss + self.huber_loss(D, true_D)\n vloss = vloss + self.mse_loss(V, true_V)\n aloss = aloss + self.mse_loss(AD, true_AD)\n\n dloss = torch.sum(dloss * mask_2D) / mask_2D.sum()\n vloss = torch.sum(vloss * mask.unsqueeze(-1)) / mask.sum()\n aloss = torch.sum(aloss * mask.unsqueeze(-1)) / mask.sum()\n sloss = sloss.sum() / cmask.sum()\n loss = sloss + dloss + vloss + aloss\n return loss, sloss\n\n def log_prob(self, true_S, true_cdr, mask):\n B, N = mask.size(0), mask.size(1)\n K = min(self.k_neighbors, N)\n\n cdr_range = [(cdr.index(self.cdr_type), cdr.rindex(self.cdr_type)) for cdr in true_cdr]\n T_min = min([l for l,r in cdr_range])\n T_max = max([r for l,r in cdr_range])\n cmask = self.get_completion_mask(B, N, cdr_range)\n smask = mask.clone()\n\n # initialize\n S = true_S.clone() * (1 - cmask.long())\n hS, _ = self.rnn(self.W_s(S))\n LS, RS = hS[:, :, :self.hidden_size], hS[:, :, self.hidden_size:]\n hS, mask, offset, suffix = self.make_S_blocks(LS, S, RS, T_min, T_max, mask)\n cmask = torch.cat([cmask.new_zeros(B, offset), cmask[:, T_min:T_max+1], cmask.new_zeros(B, suffix)], dim=1)\n\n sloss = 0.\n X = self.init_coords(hS, mask)[0]\n X = X.detach().clone()\n\n for t in range(T_min, T_max + 1):\n # Prepare input\n V, E, E_idx = self.features(X, mask)\n hS = self.make_S_blocks(LS, S, RS, T_min, T_max, smask)[0]\n\n # Predict residue t\n h = self.seq_mpn(V, E, hS, E_idx, mask)\n h = self.attention(h, LS, smask, self.W_seq)\n logits = self.O_s(h[:, offset + t - T_min])\n snll = self.ce_loss(logits, true_S[:, t])\n sloss = sloss + snll * cmask[:, offset + t - T_min]\n\n # Teacher forcing on S\n S = S.clone()\n S[:, t] = true_S[:, t]\n S = S.clone()\n\n # Iterative refinement\n if t % self.update_freq == 0:\n h = self.struct_mpn(V, E, hS, E_idx, mask)\n h = self.attention(h, LS, smask, self.W_stc)\n X = self.predict_dist(self.O_d(h))[0]\n X = X.detach().clone()\n\n ppl = sloss / cmask.sum(dim=-1)\n sloss = sloss.sum() / cmask.sum()\n return ReturnType(nll=sloss, ppl=ppl, X=X, X_cdr=X[:, offset:offset+T_max-T_min+1])\n\n def generate(self, true_S, true_cdr, mask, return_ppl=False):\n B, N = mask.size(0), mask.size(1)\n K = min(self.k_neighbors, N)\n\n cdr_range = [(cdr.index(self.cdr_type), cdr.rindex(self.cdr_type)) for cdr in true_cdr]\n T_min = min([l for l,r in cdr_range])\n T_max = max([r for l,r in cdr_range])\n cmask = self.get_completion_mask(B, N, cdr_range)\n smask = mask.clone()\n\n # initialize\n S = true_S.clone() * (1 - cmask.long())\n hS, _ = self.rnn(self.W_s(S))\n LS, RS = hS[:, :, :self.hidden_size], hS[:, :, self.hidden_size:]\n hS, mask, offset, suffix = self.make_S_blocks(LS, S, RS, T_min, T_max, mask)\n cmask = torch.cat([cmask.new_zeros(B, offset), cmask[:, T_min:T_max+1], cmask.new_zeros(B, suffix)], dim=1)\n\n X = self.init_coords(hS, mask)[0]\n X = X.detach().clone()\n sloss = 0\n\n for t in range(T_min, T_max + 1):\n # Prepare input\n V, E, E_idx = self.features(X, mask)\n hS = self.make_S_blocks(LS, S, RS, T_min, T_max, smask)[0]\n\n # Predict residue t\n h = self.seq_mpn(V, E, hS, E_idx, mask)\n h = self.attention(h, LS, smask, self.W_seq)\n logits = self.O_s(h[:, offset + t - T_min])\n prob = F.softmax(logits, dim=-1) # [B, 20]\n S[:, t] = torch.multinomial(prob, num_samples=1).squeeze(-1) # [B, 1]\n sloss = sloss + self.ce_loss(logits, S[:, t]) * cmask[:, offset + t - T_min]\n\n # Iterative refinement\n h = self.struct_mpn(V, E, hS, E_idx, mask)\n h = self.attention(h, LS, smask, self.W_stc)\n X = self.predict_dist(self.O_d(h))[0]\n X = X.detach().clone()\n\n S = S.tolist()\n S = [''.join([alphabet[S[i][j]] for j in range(cdr_range[i][0], cdr_range[i][1] + 1)]) for i in range(B)]\n ppl = torch.exp(sloss / cmask.sum(dim=-1))\n return (S, ppl, X[:, offset:offset+T_max-T_min+1]) if return_ppl else S\n","repo_name":"wengong-jin/RefineGNN","sub_path":"structgen/hierarchical.py","file_name":"hierarchical.py","file_ext":"py","file_size_in_byte":13704,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"81"} +{"seq_id":"1735568744","text":"# Finding the optimal number of components to fit \r\n# the glow curve for sample J1000\r\nfrom scipy import optimize\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom prettytable import PrettyTable \r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n#Deconvolution of prompt TL with N=5-6-7 components \r\n#using original KP-TL\r\ndata = np.loadtxt('J1000prompt.txt')\r\nx_data,y_data = data[:, 0], data[:, 1]\r\nz, kB =1.8, 8.617E-5\r\ndef TL(T, B,En ,rho, s): \r\n return abs(B)* np.exp(-rho*( (np.log(1+z*s*kB*(((T+\\\r\n\t\t\t 273)**2.0)/np.abs(En))*np.exp(-En/(kB*(T+273)))*\\\r\n\t\t\t (1-2*kB*(T+273)/En)))**3.0))*(En**2.0-6*(kB**2.0)*\\\r\n \t((T+273)**2.0))*((np.log(1+z*s*kB*(((T+273)**2.0)/\\\r\n\t\t\t abs(En))*np.exp(-En/(kB*(T+273)))*(1-2*kB*(T+273)/\\\r\n\t\t\t En)))**2.0)/(En*kB*s*((T+273)**2)*z-2*(kB**2.0)*\\\r\n\t\t\t s*z*((T+273)**3.0)+np.exp(En/(kB*(T+273)))*En)\r\ndef total_TL(T, *inis): \r\n u=np.array([0 for i in range(len(x_data))])\r\n Bs, Ens,rho,s= inis[0:nPks], inis[nPks:2*nPks],\\\r\n inis[-2],inis[-1]\r\n for i in range(nPks):\r\n u=u+TL(T,Bs[i],Ens[i],rho,s)\r\n return u\r\ndef main(nPks):\r\n Bs=[2e17]*7\r\n B=Bs[0:nPks] \r\n lowB, highB=[0.01*x for x in B], [50*x for x in B]\r\n lowrho, highrho, rho= [0.003,.015,.008]\r\n lows, highs, s =[1e11, 1e14, 1e12]\r\n Ens=[.8, .9,1.06,1.19,1.4,1.6,1.7,1.8]\r\n En=Ens[0:nPks] \r\n lowEn,highEn =[0.9*x for x in En],[1.1*x for x in En]\r\n inis=B+En+[rho]+[s]\r\n lowbnds=lowB+lowEn+[lowrho]+[lows]\r\n highbnds=highB+highEn+[highrho]+[highs]\r\n params, params_covariance = optimize.curve_fit(total_TL,\\\r\n x_data,y_data,p0=inis,bounds=(lowbnds,highbnds),maxfev=10000)\r\n plt.subplot(2,2, nPks-4)\r\n plt.scatter(x_data, y_data,c='r')\r\n plt.plot(x_data, total_TL(x_data, \r\n *params),c='black',label='N='+str(nPks),linewidth=1) \r\n for i in range(0,nPks): \r\n plt.plot(x_data, TL(x_data,\r\n params[i],params[nPks+i],params[-2],params[-1]))\r\n leg = plt.legend()\r\n leg.get_frame().set_linewidth(0.0) \r\n plt.ylabel('TL [a.u.]')\r\n plt.xlabel(r'Temperature T [$^{o}$C]')\r\n res=total_TL(x_data, *params)-y_data\r\n FOM=100*np.sum(abs(res))/np.sum(y_data) \r\n FOMS[nPks-5]=FOM \r\nFOMS=[0]*3\r\nfor nPks in range(7,4,-1): \r\n main(nPks)\r\nplt.subplot(2,2, 4)\r\nplt.plot(range(5,8),FOMS,'o-')\r\nplt.ylabel('FOM [%]')\r\nplt.xlabel(r'# of components N')\r\nplt.ylim([0, 5])\r\nmy_xticks = [5,6,7]\r\nplt.xticks(range(5,8), my_xticks)\r\nplt.tight_layout()\r\nplt.show()","repo_name":"vpagonis/Python-Codes","sub_path":"Ch5PagonisGitHub/Code 5.5.py","file_name":"Code 5.5.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34798608824","text":"#!/usr/bin/python\n# full assembly of the sub-parts to form the complete net\nfrom .submodule import *\n\n\nclass FeatureExtractor(nn.Module):\n def __init__(self):\n super(FeatureExtractor, self).__init__()\n # self.reductionFactor = 3\n self.conv1 = nn.Sequential(convbn(1, 64, 3, 2, 1, 1), nn.ReLU(inplace=True),\n convbn(64, 128, 3, 1, 1, 1), nn.ReLU(inplace=True),\n convbn(128, 256, 3, 1, 1, 1), nn.ReLU(inplace=True),\n convbn(256, 512, 3, 1, 1, 1), nn.ReLU(inplace=True))\n self.layer1 = self._make_layer(BasicBlock, 32, 3, 1, 1, 1)\n self.layer2 = self._make_layer(BasicBlock, 64, 16, 2, 1, 1)\n self.layer3 = self._make_layer(BasicBlock, 128, 3, 1, 1, 1)\n self.layer4 = self._make_layer(BasicBlock, 128, 3, 1, 1, 2)\n\n def _make_layer(self, block, planes, blocks, stride, pad, dilation):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion), )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, pad, dilation))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, 1, None, pad, dilation))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.firstconv(x)\n x = self.layer1(x)\n l2 = self.layer2(x)\n l3 = self.layer3(l2)\n l4 = self.layer4(l3)\n gwc_feature = torch.cat((l2, l3, l4), dim=1)\n return {\"gwc_feature\": gwc_feature}\n","repo_name":"Azorgz/LYNRED","sub_path":"My_NN/net_part.py","file_name":"net_part.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30629940127","text":"'''\nThe code is tested on Keras 2.0.0 using Tensorflow backend, and Python 3.5\n'''\n#from string import punctuation\nimport csv\nimport codecs\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\n#import spacy\n#nlp = spacy.load('en')\nnp.set_printoptions(threshold=400000)\n\n#import itertools as it\nfrom os.path import isfile\nfrom collections import Counter\nfrom pprint import pprint\nfrom itertools import islice\n\n#from gensim.models import KeyedVectors\n#from gensim.models import Phrases\n#from gensim.models.word2vec import LineSentence\n\nfrom keras.models import Sequential\nfrom keras.utils import np_utils\nfrom keras import backend as K\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.layers import Dense, Input, Activation, PReLU\nfrom keras.layers import LSTM, Embedding, Bidirectional, GRU\nfrom keras.layers import Dropout, Merge, BatchNormalization\nfrom keras.layers import TimeDistributed, Lambda\nfrom keras.layers import Convolution1D, GlobalMaxPooling1D\nfrom keras.models import Model\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\n\nimport time\nimport datetime\nts = time.time()\nst = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n\nMAX_NB_WORDS = 200000\nEMBEDDING_DIM = 300\nVALIDATION_SPLIT = 0.15\nMAX_SEQUENCE_LENGTH = 40\nMAX_NB_WORDS = 200000\n\nGLOVE840 = 'F:/DS-main/BigFiles/glove.840B.300d/glove.840B.300d.txt'\nTRAIN_DATA_FILE = 'F:/DS-main/Kaggle-main/Quora Question Pairs - inputs/data/finalTrain.csv'\nTEST_DATA_FILE = 'F:/DS-main/Kaggle-main/Quora Question Pairs - inputs/data/finalTest.csv'\n\nSTAMP = 'Quora - '+ st\nprint(STAMP)\ndel(ts)\n\n##############################\n###First Steps\n##############################\n#calculate eng features\n#calculate scapy features\n#not done yet: preprocess sentences for nlp (gensim)\n\n\n##############################\n###Getting data\n##############################\n#train data\nprint('\\nGetting training data')\ndataALL = pd.read_csv(TRAIN_DATA_FILE, sep='\\t', encoding=\"utf-8\")\ny = dataALL['0'].values\ntexts_1 = dataALL['1'].values.astype(str)\ntexts_2 = dataALL['2'].values.astype(str)\ndataDense = dataALL.drop(['0', '1', '2'], axis=1).values\nprint(len(y),' training \"lines\"')\ndel(dataALL)\n\n#test data\nprint('\\nGetting testing data')\ndataALL = pd.read_csv(TEST_DATA_FILE, sep='\\t', encoding=\"utf-8\")\nprint('test_ids')\ntest_ids = dataALL['0'].values\nprint('test_texts_1')\ntest_texts_1 = dataALL['1'].values.astype(str)\nprint('test_texts_2')\ntest_texts_2 = dataALL['2'].values.astype(str)\ntest_dataDense = dataALL.drop(['0', '1', '2'], axis=1)\nprint(len(test_ids),' testing \"lines\"')\ndel(dataALL)\n\n##############################\n###Tokenizing\n##############################\nprint('\\nTokenizing Started')\ntokenizer = Tokenizer(num_words=MAX_NB_WORDS)\ntokenizer.fit_on_texts(list(texts_1) + list(texts_2) + list(test_texts_1) + list(test_texts_2))\n\nprint('x1')\nx1 = tokenizer.texts_to_sequences(texts_1)\nx1 = pad_sequences(x1, maxlen=MAX_SEQUENCE_LENGTH)\ndel(texts_1)\n\nprint('x2')\nx2 = tokenizer.texts_to_sequences(texts_2)\nx2 = pad_sequences(x2, maxlen=MAX_SEQUENCE_LENGTH)\ndel(texts_2)\n\nprint('test_x1')\ntest_x1 = tokenizer.texts_to_sequences(test_texts_1)\ntest_x1 = pad_sequences(test_x1, maxlen=MAX_SEQUENCE_LENGTH)\ndel(test_texts_1)\n\nprint('test_x2')\ntest_x2 = tokenizer.texts_to_sequences(test_texts_2)\ntest_x2 = pad_sequences(test_x2, maxlen=MAX_SEQUENCE_LENGTH)\ndel(test_texts_2)\n\nword_index = tokenizer.word_index\nprint('Found %s unique tokens' % len(word_index))\n\n#ytrain_enc = np_utils.to_categorical(y)\n#y = np.array(y)\n#test_ids = np.array(test_ids)\n\nprint('Shape of data tensor x1:', x1.shape)\nprint('Shape of data tensor x2:', x2.shape)\nprint('Shape of data tensor test_x1:', test_x1.shape)\nprint('Shape of data tensor test_x2:', test_x2.shape)\nprint('Shape of label tensor:', y.shape)\nprint('Tokenizing Done')\n\n\n##############################\n###prepare embeddings glove840\n##############################\nprint('Preparing embedding matrix')\nnb_words = min(MAX_NB_WORDS, len(word_index))+1\n\nembeddings_index = {}\nf = open(GLOVE840, encoding='utf8')\nfor line in tqdm(f):\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\nf.close()\nprint('Found %s word vectors.' % len(embeddings_index))\n\nembedding_matrix = np.zeros((nb_words, EMBEDDING_DIM))\nfor word, i in tqdm(word_index.items()):\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\nprint('Null word embeddings: %d' % np.sum(np.sum(embedding_matrix, axis=1) == 0))\n\n\n\n\n#ValueError: could not convert string to float: '.'\n\n\n\n\n\n","repo_name":"charlesjansen/Kaggle_Quora-Question-Pair","sub_path":"Archives/quora.py","file_name":"quora.py","file_ext":"py","file_size_in_byte":4702,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"13347706861","text":"import os\nimport sys\n\nscript_path = os.path.dirname(os.path.realpath(__file__))\nthird_party_modules = os.path.join(script_path, 'blender_pip_modules')\n\nimport logging\nimport bpy\nfrom bpy.app.handlers import persistent\n\nlogging.basicConfig(\n level=logging.DEBUG,\n format=f'%(levelname)s:{__name__}:\"%(pathname)s:%(lineno)d\":%(message)s'\n)\n\nADDONS = ['bl_nengo_3d']\n\n\n@persistent\ndef load_handler_for_preferences(_):\n logging.info('Changing Preference Defaults!')\n from bpy import context\n\n prefs = context.preferences\n # prefs.use_preferences_save = False\n\n # kc = context.window_manager.keyconfigs[\"blender\"]\n\n\n@persistent\ndef load_handler_for_startup(_):\n # logging.info('Changing Startup Defaults!')\n for addon in ADDONS:\n bpy.ops.preferences.addon_enable(module=addon)\n\n # Use smooth faces.\n # for mesh in bpy.data.meshes:\n # for poly in mesh.polygons:\n # poly.use_smooth = True\n\n # Use material preview shading.\n # for screen in bpy.data.screens:\n # for area in screen.areas:\n # for space in area.spaces:\n # if space.type == 'FILE_BROWSER':\n # space.params.directory = os.path.dirname(__file__)\n # space.shading.type = 'MATERIAL'\n # space.shading.use_scene_lights = True\n\n\ndef register():\n if third_party_modules not in sys.path:\n sys.path.append(third_party_modules)\n\n # print(sorted(sys.modules.keys()))\n def create_link(directory: str, link_path: str) -> None:\n if os.path.exists(link_path):\n os.remove(link_path)\n\n if sys.platform == 'win32':\n import _winapi\n _winapi.CreateJunction(str(directory), str(link_path))\n else:\n os.symlink(str(directory), str(link_path), target_is_directory=True)\n\n addons_dir = bpy.utils.user_resource('SCRIPTS', 'addons')\n os.makedirs(addons_dir, exist_ok=True)\n for addon in ADDONS:\n source = os.path.join(script_path, addon)\n assert os.path.exists(source)\n create_link(directory=source, link_path=os.path.join(addons_dir, addon))\n\n bpy.app.handlers.load_factory_preferences_post.append(load_handler_for_preferences)\n bpy.app.handlers.load_factory_startup_post.append(load_handler_for_startup)\n\n\ndef unregister():\n if third_party_modules in sys.path:\n sys.path.remove(third_party_modules)\n\n bpy.app.handlers.load_factory_preferences_post.remove(load_handler_for_preferences)\n bpy.app.handlers.load_factory_startup_post.remove(load_handler_for_startup)\n","repo_name":"Mateusz-Grzelinski/nengo-3d","sub_path":"nengo_3d/nengo_app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73148516746","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def swapPairs(self, head: Optional[ListNode]) -> Optional[ListNode]:\n \n start = ListNode(None, None)\n cursor = start\n \n while(head and head.next):\n \n first = head\n second = head.next\n nextNode = head.next.next\n \n cursor.next = second\n second.next = first\n first.next = None\n head = nextNode\n cursor = first\n \n if(head):\n cursor.next = head\n \n return start.next\n","repo_name":"lodado/PS","sub_path":"leetcode/링크드리스트/24.SwapNodesinPairs.py","file_name":"24.SwapNodesinPairs.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23662530684","text":"# coding:utf-8\n\nfrom __future__ import print_function\n\nimport json\nimport os\n\nfrom ttvcloud.ApiInfo import ApiInfo\nfrom ttvcloud.Credentials import Credentials\nfrom ttvcloud.ServiceInfo import ServiceInfo\nfrom ttvcloud.base.Service import Service\nfrom ttvcloud.const.Const import *\nfrom ttvcloud.util.Util import *\nfrom ttvcloud.Policy import *\n\nIMAGEX_HOST_CN = \"imagex.bytedanceapi.com\"\nIMAGEX_HOST_VA = \"imagex.us-east-1.bytedanceapi.com\"\nIMAGEX_HOST_SG = \"imagex.ap-singapore-1.bytedanceapi.com\"\nIMAGEX_HOST_GCP = \"imagex-us-east-2.bytevcloudapi.com\"\n\nIMAGEX_SERVICE_NAME = \"ImageX\"\nIMAGEX_API_VERSION = \"2018-08-01\"\n\nResourceServiceIdTRN = \"trn:ImageX:*:*:ServiceId/%s\"\n\nservice_info_map = {\n REGION_CN_NORTH1: ServiceInfo(\n IMAGEX_HOST_CN,\n {'Accept': 'application/json'},\n Credentials('', '', IMAGEX_SERVICE_NAME, REGION_CN_NORTH1),\n 10, 10, \"https\"),\n REGION_AP_SINGAPORE1: ServiceInfo(\n IMAGEX_HOST_SG,\n {'Accept': 'application/json'},\n Credentials('', '', IMAGEX_SERVICE_NAME, REGION_AP_SINGAPORE1),\n 10, 10, \"https\"),\n REGION_US_EAST1: ServiceInfo(\n IMAGEX_HOST_VA,\n {'Accept': 'application/json'},\n Credentials('', '', IMAGEX_SERVICE_NAME, REGION_US_EAST1),\n 10, 10, \"https\"),\n REGION_US_EAST2: ServiceInfo(\n IMAGEX_HOST_GCP,\n {'Accept': 'application/json'},\n Credentials('', '', IMAGEX_SERVICE_NAME, REGION_US_EAST2),\n 10, 10, \"https\"),\n}\n\napi_info = {\n # 模板管理\n \"CreateImageTemplate\":\n ApiInfo(\"POST\", \"/\", {\"Action\": \"CreateImageTemplate\",\n \"Version\": IMAGEX_API_VERSION}, {}, {}),\n \"DeleteImageTemplate\":\n ApiInfo(\"POST\", \"/\", {\"Action\": \"DeleteImageTemplate\",\n \"Version\": IMAGEX_API_VERSION}, {}, {}),\n \"PreviewImageTemplate\":\n ApiInfo(\"POST\", \"/\", {\"Action\": \"PreviewImageTemplate\",\n \"Version\": IMAGEX_API_VERSION}, {}, {}),\n \"GetImageTemplate\":\n ApiInfo(\"GET\", \"/\", {\"Action\": \"GetImageTemplate\",\n \"Version\": IMAGEX_API_VERSION}, {}, {}),\n \"GetAllImageTemplates\":\n ApiInfo(\"GET\", \"/\", {\"Action\": \"GetAllImageTemplates\",\n \"Version\": IMAGEX_API_VERSION}, {}, {}),\n # 资源管理相关\n \"ApplyImageUpload\":\n ApiInfo(\"GET\", \"/\", {\"Action\": \"ApplyImageUpload\",\n \"Version\": IMAGEX_API_VERSION}, {}, {}),\n \"CommitImageUpload\":\n ApiInfo(\"POST\", \"/\", {\"Action\": \"CommitImageUpload\",\n \"Version\": IMAGEX_API_VERSION}, {}, {}),\n \"DeleteImageUploadFiles\":\n ApiInfo(\"POST\", \"/\", {\"Action\": \"DeleteImageUploadFiles\",\n \"Version\": IMAGEX_API_VERSION}, {}, {}),\n \"UpdateImageUploadFiles\":\n ApiInfo(\"POST\", \"/\", {\"Action\": \"UpdateImageUploadFiles\",\n \"Version\": IMAGEX_API_VERSION}, {}, {}),\n \"PreviewImageUploadFile\":\n ApiInfo(\"GET\", \"/\", {\"Action\": \"PreviewImageUploadFile\",\n \"Version\": IMAGEX_API_VERSION}, {}, {}),\n \"GetImageUploadFile\":\n ApiInfo(\"GET\", \"/\", {\"Action\": \"GetImageUploadFile\",\n \"Version\": IMAGEX_API_VERSION}, {}, {}),\n \"GetImageUploadFiles\":\n ApiInfo(\"GET\", \"/\", {\"Action\": \"GetImageUploadFiles\",\n \"Version\": IMAGEX_API_VERSION}, {}, {}),\n \"GetImageUpdateFiles\":\n ApiInfo(\"GET\", \"/\", {\"Action\": \"GetImageUpdateFiles\",\n \"Version\": IMAGEX_API_VERSION}, {}, {})\n}\n\n\nclass ImageXService(Service):\n def __init__(self, region='cn-north-1'):\n self.service_info = ImageXService.get_service_info(region)\n self.api_info = ImageXService.get_api_info()\n super(ImageXService, self).__init__(self.service_info, self.api_info)\n\n @staticmethod\n def get_service_info(region):\n service_info = service_info_map.get(region, None)\n if not service_info:\n raise Exception(\n 'Cant find the region %s, please check it carefully' % region)\n return service_info\n\n @staticmethod\n def get_api_info():\n return api_info\n\n # upload\n def apply_upload(self, params):\n res = self.get('ApplyImageUpload', params, doseq=1)\n if res == '':\n raise Exception(\"empty response\")\n res_json = json.loads(res)\n return res_json\n\n def commit_upload(self, params, body):\n res = self.json('CommitImageUpload', params, body)\n if res == '':\n raise Exception(\"empty response\")\n res_json = json.loads(res)\n return res_json\n\n # 上传本地图片文件\n def upload_image(self, service_id, file_paths, keys=[], space_name=\"\", functions=[], skip_meta=False):\n img_datas = []\n for p in file_paths:\n if not os.path.isfile(p):\n raise Exception(\"no such file on file path %s\" % p)\n in_file = open(p, \"rb\")\n img_datas.append(in_file.read())\n in_file.close()\n return self.upload_image_data(service_id, img_datas, keys, space_name, functions, skip_meta)\n\n # 上传图片二进制数据\n def upload_image_data(self, service_id, img_datas, keys=[], space_name=\"\", functions=[], skip_meta=False):\n for data in img_datas:\n if not isinstance(data, bytes):\n raise Exception(\"upload of non-bytes not supported\")\n\n apply_upload_request = {\n 'ServiceId': service_id,\n 'UploadNum': len(img_datas),\n 'StoreKeys': keys,\n 'SpaceName': space_name\n }\n resp = self.apply_upload(apply_upload_request)\n if 'Error' in resp['ResponseMetadata']:\n raise Exception(resp['ResponseMetadata'])\n\n result = resp['Result']\n reqid = result['RequestId']\n addr = result['UploadAddress']\n if len(addr['UploadHosts']) == 0:\n raise Exception(\"no upload host found, reqid %s\" % reqid)\n elif len(addr['StoreInfos']) != len(img_datas):\n raise Exception(\n \"store info len %d != upload num %d, reqid %s\" % (\n len(result['StoreInfos']), len(img_datas), reqid))\n\n session_key = addr['SessionKey']\n host = addr['UploadHosts'][0]\n self.do_upload(img_datas, host, addr['StoreInfos'])\n\n commit_upload_request = {\n 'ServiceId': service_id,\n 'SpaceName': space_name,\n 'SkipMeta': str(skip_meta)\n }\n commit_upload_body = {\n 'SessionKey': session_key,\n 'Functions': functions,\n }\n resp = self.commit_upload(\n commit_upload_request, json.dumps(commit_upload_body))\n if 'Error' in resp['ResponseMetadata']:\n raise Exception(resp['ResponseMetadata'])\n return resp['Result']\n\n def do_upload(self, img_datas, host, store_infos):\n idx = 0\n for d in img_datas:\n oid = store_infos[idx]['StoreUri']\n auth = store_infos[idx]['Auth']\n url = 'http://{}/{}'.format(host, oid)\n check_sum = crc32(d) & 0xFFFFFFFF\n check_sum = \"%08x\" % check_sum\n headers = {'Content-CRC32': check_sum, 'Authorization': auth}\n upload_status, resp = self.put_data(url, d, headers)\n if not upload_status:\n raise Exception(\"upload %s error %s\" % (url, resp))\n idx += 1\n\n # 获取临时上传凭证\n def get_upload_auth_token(self, params):\n apply_token = self.get_sign_url('ApplyImageUpload', params)\n commit_token = self.get_sign_url('CommitImageUpload', params)\n\n ret = {'Version': 'v1', 'ApplyUploadToken': apply_token,\n 'CommitUploadToken': commit_token}\n data = json.dumps(ret)\n if sys.version_info[0] == 3:\n return base64.b64encode(data.encode('utf-8')).decode('utf-8')\n else:\n return base64.b64encode(data.decode('utf-8'))\n\n # 获取上传临时密钥\n def get_upload_auth(self, service_ids, expire=60*60):\n actions = ['ImageX:ApplyImageUpload', 'ImageX:CommitImageUpload']\n resources = []\n if len(service_ids) == 0:\n resources.append(ResourceServiceIdTRN % '*')\n else:\n for sid in service_ids:\n resources.append(ResourceServiceIdTRN % sid)\n\n statement = Statement.new_allow_statement(actions, resources)\n inline_policy = Policy([statement])\n return self.sign_sts2(inline_policy, expire)\n\n # 更新图片URL:action为0表示刷新,为1表示禁用,为2表示解禁,为4表示预热,为5表示目录刷新,为6表示样式刷新\n def update_image_urls(self, service_id, urls, action=0):\n query = {\n 'ServiceId': service_id\n }\n body = {\n 'Action': action,\n 'ImageUrls': urls\n }\n res = self.json(\"UpdateImageUploadFiles\", query, json.dumps(body))\n if res == '':\n raise Exception(\"empty response\")\n res_json = json.loads(res)\n if 'Error' in res_json['ResponseMetadata']:\n raise Exception(res_json['ResponseMetadata'])\n return res_json['Result']\n\n # 获取图片信息\n def get_image_info(self, service_id, store_uri):\n query = {\n 'ServiceId': service_id,\n 'StoreUri': store_uri,\n }\n res = self.get('PreviewImageUploadFile', query)\n if res == '':\n raise Exception(\"empty response\")\n res_json = json.loads(res)\n if 'Error' in res_json['ResponseMetadata']:\n raise Exception(res_json['ResponseMetadata'])\n return res_json['Result']\n\n def delete_images(self, service_id, uris):\n query = {\n 'ServiceId': service_id\n }\n body = {\n 'StoreUris': uris\n }\n res = self.json('DeleteImageUploadFiles', query, json.dumps(body))\n if res == '':\n raise Exception(\"DeleteImageUploadFiles: empty response\")\n res_json = json.loads(res)\n if 'Error' in res_json['ResponseMetadata']:\n raise Exception(res_json['ResponseMetadata'])\n return res_json['Result']\n\n def imagex_get(self, action, params, doseq=0):\n res = self.get(action, params, doseq)\n if res == '':\n raise Exception(\"%s: empty response\" % action)\n res_json = json.loads(res)\n return res_json\n\n def imagex_post(self, action, params, body):\n res = self.json(action, params, body)\n if res == '':\n raise Exception(\"%s: empty response\" % action)\n res_json = json.loads(res)\n return res_json\n","repo_name":"TTvcloud/vcloud-sdk-python","sub_path":"ttvcloud/imagex/ImageXService.py","file_name":"ImageXService.py","file_ext":"py","file_size_in_byte":10617,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"5722893707","text":"from stable_baselines import DQN\nfrom stable_baselines.common.evaluation import evaluate_policy\nimport gym\nimport time\n\n# env = gym.make('CartPole-v0') # 倒立摆\nenv = gym.make('GridWorld-v0') # 机器人找金币\nTRAIN = 0\n\nif TRAIN:\n model = DQN('MlpPolicy', env, learning_rate=1e-3, prioritized_replay=True, verbose=1)\n model.learn(total_timesteps=int(1e5))\n # model.save(\"dqn_cartpole\")\n model.save(\"dqn_gridworld\")\n del model\n\nelse:\n # model = DQN.load(\"dqn_cartpole\", env)\n model = DQN.load(\"dqn_gridworld\", env)\n mean_reward, std_reward = evaluate_policy(model, model.get_env(), n_eval_episodes=10)\n # print(mean_reward, std_reward)\n obs = env.reset()\n # print(obs)\n for i in range(1000):\n action, _states = model.predict(obs)\n obs, rewards, done, info = env.step(action)\n env.render()\n time.sleep(2) # for showing render()","repo_name":"guyuehome/guyueclass","sub_path":"machine_learning/MuJoCo_reinforcement_learning/9. gym实例讲解/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":534,"dataset":"github-code","pt":"81"} +{"seq_id":"1794409718","text":"import pytest\n\nfrom array_analyzer.extract.metadata import MetaData\nimport array_analyzer.extract.constants as constants\n\n\"\"\"\nTests to add:\nMETADATA:\nnormal operation:\n*requires:\n - input folder with good xlsx, good \n - dummy output folder\n1) extension .xml, .xlsx are found\n\n2) no extension is found (not implemented error)\n\n3) c.params are populated correctly for each of\n 3a) .xml, .xlsx\n \n4) arrays are populated correctly for each of \n 4a) .xml, .xlsx\n \n5) fiducials are calculated correctly for each of\n 5a) 6x6 array metadata, .xml and .xlsx\n 5b) 6x8 array metadata, .xml and .xlsx\n \nbad operation:\n* requires:\n - input folder with bad xml:\n - multiple .xml\n - .xml with correct dictionaries\n1) catch when multiple .xml\n2) catch when incorrectly labeled .xlsx\n3) txtparser xlsx: catch when incorrect sheets\n2) c.params has rows, columns, pitch, size defined\n3) \n\"\"\"\n\n\ndef test_one_xml(create_good_xml):\n input_dir, output_dir = create_good_xml\n constants.METADATA_FILE = 'temp.xml'\n constants.RUN_PATH = output_dir\n MetaData(input_dir, output_dir)\n\n\ndef test_wrong_xml_name(create_good_xml):\n input_dir, output_dir = create_good_xml\n constants.METADATA_FILE = 'no_xml.xml'\n constants.RUN_PATH = output_dir\n with pytest.raises(IOError):\n MetaData(input_dir, output_dir)\n\n\ndef test_xlsx(create_good_xlsx):\n input_dir, output_dir = create_good_xlsx\n constants.METADATA_FILE = 'multisero_output_data_metadata.xlsx'\n constants.RUN_PATH = output_dir\n MetaData(input_dir, output_dir)\n assert constants.params['rows'] == 6\n assert constants.params['columns'] == 6\n assert constants.params['v_pitch'] == 0.4\n assert constants.params['h_pitch'] == 0.45\n assert constants.params['spot_width'] == 0.2\n assert constants.params['pixel_size'] == 0.0049\n\n\ndef test_xlsx_rerun(create_good_xlsx):\n input_dir, output_dir = create_good_xlsx\n constants.METADATA_FILE = 'multisero_output_data_metadata.xlsx'\n constants.RUN_PATH = output_dir\n constants.RERUN = True\n MetaData(input_dir, output_dir)\n assert constants.RERUN_WELLS == ['A3', 'B7']\n assert constants.params['rows'] == 6\n assert constants.params['columns'] == 6\n assert constants.params['v_pitch'] == 0.4\n assert constants.params['h_pitch'] == 0.45\n assert constants.params['spot_width'] == 0.2\n assert constants.params['pixel_size'] == 0.0049\n","repo_name":"mehta-lab/multiSero","sub_path":"tests/extract/test_metadata.py","file_name":"test_metadata.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"72929286666","text":"from typing import List\nfrom sys import setrecursionlimit\nsetrecursionlimit(10 ** 9)\n\n\nclass Solution:\n def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:\n result = []\n def combinations(index, path, target):\n if target < 0:\n return\n if target == 0:\n result.append(path)\n\n for i in range(index, len(candidates)):\n combinations(i, path + [candidates[i]], target - candidates[i])\n\n combinations(0, [], target)\n return result\n\n\n# print(Solution().combinationSum(\n# [2,3,5],\n# 8))","repo_name":"boorooksus/Algorithm-Study","sub_path":"LeetCode/3회차/B36_Combination Sum.py","file_name":"B36_Combination Sum.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33371144107","text":"import json\n\nfrom CharlotteKnn import CharlotteKnn\n\n\ndef main():\n msC = CharlotteKnn()\n\n test_data = {}\n\n with open('results/test.json', 'r') as file_data:\n test_data = json.loads(file_data.read())\n\n predictions=[]\n\n # Typically k = N^(1/2) where N is the number of attributes\n # Attributes being each word found\n k = 200\n\n correctClassification = 0\n\n # The number of pages processed\n pageCount = 0\n\n for key in test_data:\n expected_classification = test_data[key][\"category\"]\n # Returns K number of nearest neighbors\n neighbors = msC.getNeighbors(test_data[key]['stemmed_words'], k)\n \n # Gets the classification of the the surrounding neighbors with the most members present\n classification = msC.getNeighborsVotes(neighbors)\n\n if classification == expected_classification:\n correctClassification += 1\n\n pageCount += 1\n\n print(\"Accuracy: \" + str(correctClassification / pageCount))\n\n \n\n\nmain()","repo_name":"brooksbecton/charlotte","sub_path":"driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"73569550984","text":"import os, bs4, html, json, codecs\nfrom bs4 import BeautifulSoup\n\nclass Muaban:\n path = \"/home/nga/Documents/20193/Project/request/Datas/muaban.net\"\n def __init__(self):\n for _,_, filenames in os.walk(self.path):\n for filename in filenames:\n print(filename)\n self.extract(BeautifulSoup(open(self.path+'/'+filename,'r').read(),'html.parser'), filename)\n return\n\n def saveFile(self, filename, info):\n json.dump(info, codecs.open('./'+filename.split('.')[0]+'.txt','w'),ensure_ascii=False)\n\n def extract(self, bs, filename):\n info = dict()\n\n # title\n info['title'] = bs.find(\"h1\", {\"class\":\"title\"}).text.strip()\n # price\n try:\n info['price'] = bs.find(\"div\", {\"class\":\"price-container__value\"}).text.strip()\n except:\n pass\n\n # Dia diem, Ngay dang\n info['address'] = bs.find(\"span\", {\"class\":\"location-clock__location\"}).text.strip()\n info['startDate'] = bs.find(\"span\", {\"class\":\"location-clock__clock\"}).text.strip()\n info['name'] = bs.find(\"div\", {\"class\":\"user-info__fullname\"}).text.strip()\n info['phone'] = bs.find(\"div\", {\"class\":\"mobile-container__value\"}).find(\"span\")['mobile']\n # description\n info['description'] = \"\"\n element = bs.find(\"div\",{\"class\":\"body-container\"})\n for chil in element.find_all(text= lambda text: (not isinstance(text, bs4.element.Comment)) and (text.strip() != ''), recursive=True):\n info['description'] += chil.strip()+\"\\n\"\n # dia chi, dien tich, phap ly,..\n items = bs.find(\"div\",{\"class\":\"tect-content-block\"}).find_all(text= lambda text: (not isinstance(text, bs4.element.Comment)) and (text.strip() != ''), recursive=True)\n key = None\n for item in items:\n if key is None:\n key = item.strip()\n info[key] = None\n else:\n info[key] = item.strip()\n key = None\n return self.saveFile(filename, info)\n\nbds = Muaban()\n","repo_name":"Duongkieunga/Real-Estate","sub_path":"Extract/muaban_net.py","file_name":"muaban_net.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33253889869","text":"from xml.dom import minidom\r\nfrom nameparser import HumanName\r\nimport web.models\r\n\r\nRFACTOR2NODEMAP = {\r\n \"Position\": \"Position\",\r\n \"GridPos\": \"GridPosition\",\r\n \"CarNumber\": \"CarNumber\",\r\n \"Points\": \"Points\",\r\n \"Pitstops\": \"Stops\"\r\n}\r\n\r\ndef importResultFile(raceResult):\r\n \"\"\"\r\n Parses the given rFacotr 2 result file and returns a list of (unsaved) driver results\r\n \"\"\"\r\n resultList = list()\r\n xml = minidom.parse(raceResult.File.path)\r\n web.models.DriverResult.objects.filter(RaceResult__Race=raceResult.Race).delete()\r\n trackLength = float(xml.getElementsByTagName('TrackLength')[0].childNodes[0].nodeValue)\r\n driverResults = xml.getElementsByTagName('Driver')\r\n for driverResult in driverResults:\r\n resultToSave: web.models.DriverResult = web.models.DriverResult()\r\n resultToSave.RaceResult = raceResult # yes, it's redundant, but it makes querying simpler\r\n resultToSave.AverageSpeed = 0.0\r\n resultToSave.ArePointsScoring = True\r\n resultToSave.ArePointsScoringForTeam = True\r\n\r\n # Find referenced fields\r\n laps = list()\r\n teamName = \"\"\r\n driverFirstName = \"\"\r\n driverLastName = \"\"\r\n finishStatus = \"\"\r\n for node in driverResult.childNodes:\r\n if \"Element\" in str(type(node)) and node.tagName in RFACTOR2NODEMAP:\r\n setattr(resultToSave,RFACTOR2NODEMAP[node.tagName],node.childNodes[0].nodeValue)\r\n elif \"Element\" in str(type(node)):\r\n if node.tagName == \"Lap\":\r\n # parse the laps\r\n lap = web.models.Lap()\r\n position = node.attributes[\"p\"].value\r\n lapNumber = node.attributes[\"num\"].value\r\n duration = node.childNodes[0].nodeValue\r\n lap.Position = position\r\n lap.Number = lapNumber\r\n lap.Duration = duration\r\n laps.append(lap)\r\n elif node.tagName ==\"TeamName\":\r\n teamName = node.childNodes[0].nodeValue\r\n elif node.tagName ==\"Name\":\r\n rawDriverName = node.childNodes[0].nodeValue\r\n name = HumanName(rawDriverName)\r\n driverFirstName = name.first\r\n if name.middle != \"\":\r\n driverLastName = name.middle + \" \" + name.last\r\n else:\r\n driverLastName = name.last\r\n elif node.tagName == \"FinishStatus\":\r\n resultToSave.FinishStatus = web.models.FinishStatus.objects.get(Name=node.childNodes[0].nodeValue)\r\n elif node.tagName == \"CarType\":\r\n resultToSave.VehicleClass = web.models.VehicleClass.objects.get(Name=node.childNodes[0].nodeValue)\r\n # find the team entry\r\n teamQuery = web.models.TeamEntry.objects.filter(Name=teamName,Season=raceResult.Race.Season)\r\n if teamQuery.count() == 1:\r\n # team is signed up, continue\r\n team = teamQuery.first()\r\n driverQuery = team.Drivers.filter(FirstName=driverFirstName, LastName=driverLastName)\r\n driver = driverQuery.first()\r\n if team is not None and driver is not None:\r\n # insert result\r\n resultToSave.Driver = driver\r\n resultToSave.save()\r\n overallTimeNeeded = 0.0\r\n completedDistance = trackLength * len(laps) / 1000\r\n for lap in laps:\r\n lap.save()\r\n resultToSave.Laps.add(lap)\r\n overallTimeNeeded = overallTimeNeeded + float(lap.Duration)\r\n resultToSave.AverageSpeed = round(completedDistance/ (overallTimeNeeded/60/60),2)\r\n resultToSave.save()\r\n raceResult.DriverResults.add(resultToSave) \r\n resultList.append(resultToSave)\r\n return resultList","repo_name":"simpaddock/pitlane2","sub_path":"web/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38832226257","text":"from rest_framework import permissions\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\n\ndocs_view = get_schema_view(\n\topenapi.Info(\n\t\ttitle='Paranoid Social Network API',\n\t\tdefault_version='dev',\n\t\tdescription='An opensource social network',\n\t\tlicense=openapi.License(name='MIT License')\n\t),\n\tpublic=True,\n\tpermission_classes=[permissions.AllowAny]\n)\n","repo_name":"RubenRubens/paranoid_backend","sub_path":"src/config/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24512471154","text":"import pandas\nimport pandas\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import Lasso, LassoCV, LinearRegression\nfrom sklearn import model_selection\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nfrom coeficients import r2, calculate_aic\nimport statsmodels.api as sm\nfrom sklearn.metrics import mean_squared_error,r2_score\nfrom sklearn.model_selection import cross_val_score, cross_val_predict\nfrom sklearn.preprocessing import StandardScaler, OrdinalEncoder\nimport seaborn as sns\n\ndef predict_feature(path_clinical_data, names):\n predict = []\n clinical_data = pandas.read_excel(path_clinical_data)\n for i in range (0,len(clinical_data)):\n if clinical_data.loc[i,\"TCIA_ID\"] in names:\n predict.append(clinical_data.loc[i,[\"TCIA_ID\",\"OS\"]])\n predict_data = pandas.DataFrame(predict).set_index(\"TCIA_ID\")\n return predict_data\n#A aquest document es realitzarán regressions amb característiques clíniques que destaquen mèdicamtn per intentar millorar el model fet\nclinical_data_path =\"C:/Users/miacr/OneDrive/Documentos/TFG/HCC-TACE-Seg_clinical_data-V2.xlsx\"\nclinical_data_X_train = pandas.read_csv(\"C:/Users/miacr/TFG-radiomica/clinical_data_Xtrain_processed.csv\").set_index(\"TCIA_ID\")\nclinical_data_X_test = pandas.read_csv(\"C:/Users/miacr/TFG-radiomica/clinical_data_Xtest_processed.csv\").set_index(\"TCIA_ID\")\nclinical_data_y_train = pandas.read_csv(\"C:/Users/miacr/TFG-radiomica/clinical_data_Ytrain_processed.csv\").set_index(\"TCIA_ID\")\nclinical_data_y_test = pandas.read_csv(\"C:/Users/miacr/TFG-radiomica/clinical_data_Ytest_processed.csv\").set_index(\"TCIA_ID\")\nclinical_data = pandas.read_excel(clinical_data_path)\npredict_data = clinical_data.loc[:,[\"OS\", \"TCIA_ID\"]].set_index(\"TCIA_ID\")\n\n# Funció que reprodueix el mateix procés que a la regressió original només amb un grup de variables donades\ndef seleccio_columnes(columnes, clinical_data_X_train,clinical_data_X_test): \n taula = []\n dades_columna = pandas.DataFrame()\n for nom in columnes:\n dades = []\n for var in clinical_data_X_train.columns:\n if nom in var:\n dades.append(var)\n dades_columna_X_train = clinical_data_X_train.loc[:,dades]\n dades_columna_X_test = clinical_data_X_test.loc[:,dades]\n names_Xtrain= dades_columna_X_train.index.to_list()\n names_Xtest= dades_columna_X_test.index.to_list() #Llista amb l'ID del pacients que tenen informacio necessaria\n y_train = predict_feature(clinical_data_path, names_Xtrain) #OS dels mateixos pacients\n y_test = predict_feature(clinical_data_path, names_Xtest)\n r2_valid, r2_score , Aic , nom_columna = fer_regressio_una_columna(nom,dades_columna_X_train,dades_columna_X_test,y_train, y_test)\n taula.append({\n 'Columna': nom_columna,\n 'r2_valid': r2_valid,\n 'r2_test': r2_score,\n 'Aic': Aic\n })\n df = pandas.DataFrame(taula)\n return df.set_index(\"Columna\")\n\n\ndef fer_regressio_una_columna(nom_columna,dades_columna_X_train, dades_columna_X_test,predict_data_y_train,predict_data_y_test):\n for col in dades_columna_X_train.columns:\n unique_vals = dades_columna_X_train[col].unique()\n if len(unique_vals) <= 7:\n dades_columna_X_train.loc[:,col] = dades_columna_X_train.loc[:,col].astype(\"category\")\n X_train_num = dades_columna_X_train.select_dtypes(include=['float64', 'int'])\n col_num = X_train_num.columns\n if len(col_num)!=0:\n scaler = StandardScaler()\n dades_columna_X_train.loc[:,col_num] = scaler.fit_transform(dades_columna_X_train.loc[:,col_num])\n dades_columna_X_test.loc[:,col_num] = scaler.fit_transform(dades_columna_X_test.loc[:,col_num])\n \n regressio= LinearRegression()\n regressio.fit(dades_columna_X_train,predict_data_y_train)\n scoring = \"r2\"\n results = cross_val_score(regressio, dades_columna_X_train, predict_data_y_train, cv=4, scoring=scoring)\n print(results)\n print(\"R squared val \", nom_columna, results.mean())\n r_valid = results.mean()\n\n cv_predicciones = cross_val_predict(\n estimator = regressio,\n X = dades_columna_X_train,\n y = predict_data_y_train,\n cv = 4\n )\n cv_predicciones_list= []\n for lista in cv_predicciones:\n cv_predicciones_list.append(lista[0])\n cv_predicciones_list = np.asarray(cv_predicciones_list)\n\n #Grafics de residus\n # fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(9, 5))\n\n # axes[0, 0].scatter(predict_data_y_train, cv_predicciones_list, edgecolors=(0, 0, 0), alpha = 0.4)\n # axes[0, 0].plot(\n # [predict_data_y_train.min(), predict_data_y_train.max()],\n # [predict_data_y_train.min(), predict_data_y_train.max()],\n # 'k--', color = 'black', lw=2\n # )\n # axes[0, 0].set_title('Valor predit vs valor real', fontsize = 10, fontweight = \"bold\")\n # axes[0, 0].set_xlabel('Real')\n # axes[0, 0].set_ylabel('Predicció')\n # axes[0, 0].tick_params(labelsize = 7)\n\n # axes[0, 1].scatter(list(range(len(predict_data_y_train))), predict_data_y_train.loc[:,\"OS\"].tolist() - cv_predicciones_list,\n # edgecolors=(0, 0, 0), alpha = 0.4)\n # axes[0, 1].axhline(y = 0, linestyle = '--', color = 'black', lw=2)\n # axes[0, 1].set_title('Residus del model', fontsize = 10, fontweight = \"bold\")\n # axes[0, 1].set_xlabel('id')\n # axes[0, 1].set_ylabel('Residu')\n # axes[0, 1].tick_params(labelsize = 7)\n\n # sns.histplot(\n # data = predict_data_y_train.loc[:,\"OS\"].tolist() - cv_predicciones_list,\n # stat = \"density\",\n # kde = True,\n # line_kws= {'linewidth': 1},\n # color = \"firebrick\",\n # alpha = 0.3,\n # ax = axes[1, 0]\n # )\n\n # axes[1, 0].set_title('Distribució residus del model', fontsize = 10,\n # fontweight = \"bold\")\n\n # axes[1, 0].set_xlabel(\"Residu\")\n # axes[1, 0].set_ylabel(\"densitat\")\n # axes[1, 0].tick_params(labelsize = 7)\n\n\n # sm.qqplot(\n # predict_data_y_train.loc[:,\"OS\"].tolist() - cv_predicciones_list,\n # fit = True,\n # line = 'q',\n # ax = axes[1, 1], \n # color = 'firebrick',\n # alpha = 0.4,\n # lw = 2\n # )\n # axes[1, 1].set_title('Q-Q residus del model', fontsize = 10, fontweight = \"bold\")\n # axes[1, 1].set_xlabel(\"Quantils teòrics\")\n # axes[1, 1].set_ylabel(\"Quantils mostrals\")\n # axes[1, 1].tick_params(labelsize = 7)\n\n # fig.tight_layout()\n # plt.subplots_adjust(top=0.9)\n # fig.suptitle('Diagnòstic de residus', fontsize = 12, fontweight = \"bold\")\n # plt.show()\n\n #Dades test\n prediccions = regressio.predict(dades_columna_X_test)\n prediccions_list= []\n for lista in prediccions:\n prediccions_list.append(lista[0])\n prediccions_list = np.asarray(prediccions_list)\n df_predicciones = pandas.DataFrame({'OS' : predict_data_y_test.loc[:,\"OS\"], 'predicció' : prediccions_list})\n\n r2_coef = r2_score(y_true=predict_data_y_test, y_pred=prediccions)\n print(r2_coef)\n\n Aic = calculate_aic(modelo = regressio, X = dades_columna_X_test,y = predict_data_y_test)\n return r_valid, r2_coef, Aic, nom_columna\n \n\n\nprint(seleccio_columnes([\"CPS\",\"TNM\",\"Okuda\",\"CLIP_score\",\"BCLC\"],clinical_data_X_train, clinical_data_X_test))\n\ndef regressió_columnes(columnes,clinical_data_X_train,clinical_data_X_test):\n dades_columnes = pandas.DataFrame()\n dades = []\n for nom in columnes:\n for var in clinical_data_X_train.columns:\n if nom in var:\n dades.append(var)\n dades_columna_X_train = clinical_data_X_train.loc[:,dades]\n dades_columna_X_test = clinical_data_X_test.loc[:,dades]\n names_Xtrain= dades_columna_X_train.index.to_list()\n names_Xtest= dades_columna_X_test.index.to_list() #Llista amb l'ID del pacients que tenen informacio necessaria\n y_train = predict_feature(clinical_data_path, names_Xtrain) #OS dels mateixos pacients\n y_test = predict_feature(clinical_data_path, names_Xtest)\n return fer_regressio_una_columna(\"Escales\", dades_columna_X_train, dades_columna_X_test,y_train,y_test)\n\nprint(regressió_columnes([\"CPS\",\"TNM\",\"Okuda\",\"CLIP_score\",\"BCLC\"],clinical_data_X_train, clinical_data_X_test))","repo_name":"Mia3400/TFG-radiomica","sub_path":"Regressions/Exp_clínic.py","file_name":"Exp_clínic.py","file_ext":"py","file_size_in_byte":8493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29196352876","text":"from Bio import SeqIO\nimport itertools as it\nimport numpy as np\nimport dill\nfrom PIL import Image\nimport math\n\n\nperm12 = it.product(\"ACTG\", repeat=12)\nperm12_l = list(perm12)\nbase12 = [''.join(item) for item in perm12_l]\nrgb = [(x,y,z) for x in range(256) for y in range(256) for z in range(256)]\nd = {c: kmer for c, kmer in zip(rgb, base12)}\nd_rev = {kmer: c for c, kmer in zip(rgb, base12)}\nrecord, = SeqIO.parse(\"NC_022116.fna\", 'fasta')\ns = str(record.seq)\nn = len(s)\ncomp = math.floor(math.sqrt(n)) + 1\ntarget = comp * comp\ndiff = target - n\niternum = n - (n % 12)\nfinal_diff = diff + (n % 12)\ns_coded = []\nfor i in range(0, iternum, 12):\n s_coded.append(d_rev[s[i:i + 12]])\ns_coded2 = [item for tup in s_coded for item in tup]\ns_coded_b = bytes(s_coded2)\n# coded_final = s_coded_b + bytes(final_diff)\n# dim = int(math.sqrt(len(coded_final)))\n## from divisors its 1131x2265\nprint(len(s_coded_b))\n# s_arr = np.array(s_coded)\n# img = Image.frombytes('P', (1131, 2265), s_coded_b)\n# img_conv = img.convert('RGB')\n# img_arr = np.array(img_conv)\n# img2 = Image.fromarray(s_arr)\n# with open(\"NC_022116.sqz\", 'wb') as f:\n# dill.dump(s_coded_b, f, protocol=dill.HIGHEST_PROTOCOL)\nimg3 = Image.new('RGB', (1131, 755))\nimg3.putdata(s_coded)\nimg3.save(f\"NC_022116_sqz_c.png\")\n","repo_name":"phenolophthaleinum/fasta2png","sub_path":"squeezer.py","file_name":"squeezer.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"611778035","text":"import pandas as pd\nimport numpy as np\nimport datetime\nimport re\n\ndf = pd.read_excel('./PFE/liste globale des PFE.xls')\ndf['link'] = df['Titre'].str.extract(r'(?Phttp[s]?://[^\\s]+)')\n\n\ndf = df.rename(columns={\n 'Titre': 'title',\n 'Auteur': 'publisher',\n 'Sujet': 'description',\n 'Date': 'date',\n 'Organisme': 'contributor',\n 'Encadrants': 'contributosr',\n})\n\n\nrecords = df.to_dict(orient='records')\n\n\nfor record in records:\n\n xml_string = '\\n'\n xml_string += '\\n'\n\n title = record['title']\n url = record['link']\n print(url)\n\n xml_string += f'https://sbn.inpt.ac.ma/handle/123456789/{title}\\n'\n for key, value in record.items():\n xml_string += f'{value}\\n'\n\n xml_string += ''\n\n with open(f'./csv/{title}.xml', 'w', encoding='utf-8') as f:\n f.write(xml_string)\n","repo_name":"kuo-hm/dspace","sub_path":"pandastest.py","file_name":"pandastest.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37195764671","text":"import threading\n\nclass Foo:\n def __init__(self):\n self.lock1 = threading.Lock()\n self.lock2 = threading.Lock()\n\n self.lock1.acquire(blocking=True)\n self.lock2.acquire(blocking=True)\n\n def first(self, printFirst: 'Callable[[], None]') -> None:\n printFirst()\n self.lock1.release()\n\n def second(self, printSecond: 'Callable[[], None]') -> None:\n self.lock1.acquire()\n printSecond()\n self.lock2.release()\n\n def third(self, printThird: 'Callable[[], None]') -> None:\n self.lock2.acquire()\n printThird()\n\n\n\n# --------------------\ndef p1():\n print(\"first\", end=\"\")\n\ndef p2():\n print(\"second\", end=\"\")\n\ndef p3():\n print(\"third\")\n\n# [1, 2, 3]\nfoo1 = Foo()\nt11 = threading.Thread(target=foo1.second, args=(p2,))\nt12 = threading.Thread(target=foo1.third, args=(p3,))\nt13 = threading.Thread(target=foo1.first, args=(p1,))\nt11.start()\nt13.start()\nt12.start()\n\n# [1, 3, 2]\nfoo2 = Foo()\nt21 = threading.Thread(target=foo2.second, args=(p2,))\nt22 = threading.Thread(target=foo2.third, args=(p3,))\nt23 = threading.Thread(target=foo2.first, args=(p1,))\nt21.start()\nt23.start()\nt22.start()\n","repo_name":"LianSheng197/My-LeetCode-Solutions","sub_path":"Easy/1114. Print in Order/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4023556146","text":"from __future__ import annotations\n\nfrom pathlib import Path\nfrom typing import Any, Generator\n\nfrom cv2 import CAP_PROP_BUFFERSIZE, VideoCapture\nfrom numpy.typing import NDArray\n\nfrom zoloto.marker_type import MarkerType\n\nfrom .base import BaseCamera\nfrom .mixins import IterableCameraMixin, VideoCaptureMixin, ViewableCameraMixin\nfrom .utils import (\n get_video_capture_resolution,\n set_video_capture_resolution,\n validate_calibrated_video_capture_resolution,\n)\n\n\ndef find_camera_ids() -> Generator[int, None, None]:\n \"\"\"\n Find and return ids of connected cameras.\n\n Works the same as VideoCapture(-1).\n \"\"\"\n for camera_id in range(8):\n capture = VideoCapture(camera_id)\n opened = capture.isOpened()\n capture.release()\n if opened:\n yield camera_id\n\n\nclass Camera(VideoCaptureMixin, IterableCameraMixin, BaseCamera, ViewableCameraMixin):\n def __init__(\n self,\n camera_id: int,\n *,\n marker_size: int | None = None,\n marker_type: MarkerType,\n calibration_file: Path | None = None,\n resolution: tuple[int, int] | None = None,\n ) -> None:\n super().__init__(\n marker_size=marker_size,\n marker_type=marker_type,\n calibration_file=calibration_file,\n )\n self.camera_id = camera_id\n self.video_capture = self.get_video_capture(self.camera_id)\n\n if resolution is not None:\n self._set_resolution(resolution)\n\n if self.calibration_params is not None:\n validate_calibrated_video_capture_resolution(\n self.video_capture,\n self.calibration_params,\n override=resolution is not None,\n )\n\n def __repr__(self) -> str:\n return f\"<{self.__class__.__name__}: {self.camera_id}>\"\n\n def get_video_capture(self, camera_id: int) -> VideoCapture:\n cap = VideoCapture(camera_id)\n cap.set(CAP_PROP_BUFFERSIZE, 1)\n return cap\n\n def _set_resolution(self, resolution: tuple[int, int]) -> None:\n set_video_capture_resolution(self.video_capture, resolution)\n\n def get_resolution(self) -> tuple[int, int]:\n return get_video_capture_resolution(self.video_capture)\n\n def capture_frame(self) -> NDArray:\n # Hack: Double capture frames to fill buffer.\n self.video_capture.read()\n return super().capture_frame()\n\n def close(self) -> None:\n super().close()\n self.video_capture.release()\n\n @classmethod\n def discover(cls, **kwargs: Any) -> Generator[Camera, None, None]:\n for camera_id in find_camera_ids():\n yield cls(camera_id, **kwargs)\n\n\nclass SnapshotCamera(VideoCaptureMixin, BaseCamera):\n \"\"\"\n A modified version of Camera optimised for single use.\n\n - Doesn't keep the camera open between captures\n \"\"\"\n\n def __init__(\n self,\n camera_id: int,\n *,\n marker_size: int | None = None,\n marker_type: MarkerType,\n calibration_file: Path | None = None,\n resolution: tuple[int, int] | None = None,\n ) -> None:\n super().__init__(\n marker_size=marker_size,\n marker_type=marker_type,\n calibration_file=calibration_file,\n )\n self.camera_id = camera_id\n self._resolution = resolution\n\n def __repr__(self) -> str:\n return f\"<{self.__class__.__name__}: {self.camera_id}>\"\n\n def get_video_capture(self, camera_id: int) -> VideoCapture:\n video_capture = VideoCapture(camera_id)\n if self._resolution is not None:\n set_video_capture_resolution(video_capture, self._resolution)\n else:\n self._resolution = get_video_capture_resolution(video_capture)\n\n if self.calibration_params is not None:\n validate_calibrated_video_capture_resolution(\n video_capture, self.calibration_params, override=False\n )\n return video_capture\n\n def get_resolution(self) -> tuple[int, int]:\n if self._resolution is None:\n raise ValueError(\n \"Cannot find resolution of camera until at least 1 frame has been captured.\"\n )\n return self._resolution\n\n def capture_frame(self) -> NDArray:\n self.video_capture = self.get_video_capture(self.camera_id)\n frame = super().capture_frame()\n self.video_capture.release()\n return frame\n\n @classmethod\n def discover(cls, **kwargs: Any) -> Generator[SnapshotCamera, None, None]:\n for camera_id in find_camera_ids():\n yield cls(camera_id, **kwargs)\n","repo_name":"RealOrangeOne/zoloto","sub_path":"zoloto/cameras/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":4636,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"81"} +{"seq_id":"70751482826","text":"import logging\nimport time\nfrom secrets import token_hex\nfrom typing import Any, Dict\n\nimport logging_loki\nfrom chaoslib import __version__, experiment_hash\nfrom chaoslib.run import EventHandlerRegistry, RunEventHandler\nfrom chaoslib.types import Activity, Experiment, Journal, Run, Secrets\nfrom logzero import logger as ctk_logger\n\n__all__ = [\"configure_control\"]\nDEFAULT_LOKI_URL = \"http://localhost:3100\"\nloki_logger = logging.getLogger(\"chaostoolkit-loki\")\n\n\ndef configure_control(\n experiment: Experiment,\n secrets: Secrets = None,\n event_registry: EventHandlerRegistry = None,\n loki_endpoint: str = DEFAULT_LOKI_URL,\n tags: Dict[str, str] = None,\n experiment_ref: str = None,\n trace_id: str = None,\n) -> None:\n \"\"\"\n Configure a Python logger that sends its messages to a Loki endpoint.\n\n * `loki_endpoint` is teh base url of your Loki service\n * `tags` a mapping of strings injected in all logs\n * `experiment_ref` a unique string identifying this experiment, if none\n is provided, a has of the experiment is created\n * `trace_id` a unique string for a particular run of the experiment, if\n none is provided, a random string is generated\n\n This sends logs about the run events (started, finished, failed, etc.)\n \"\"\"\n ctk_logger.debug(\"Add Loki handler to logger\")\n\n if event_registry is None:\n ctk_logger.debug(\n \"You may be using an older version of chaostoolkit-lib, make sure \"\n \"you run at least 1.26.0. The Grafana extension will not \"\n \"be enabled\"\n )\n return\n\n url = f\"{loki_endpoint}/loki/api/v1/push\"\n auth = secrets.get(\"auth\")\n experiment_ref = experiment_ref or experiment_hash(experiment)\n trace_id = trace_id or token_hex(16)\n tags = tags or {}\n tags.update(\n {\n \"source\": \"chaostoolkit\",\n \"chaostoolkit_lib_version\": __version__,\n \"chaostoolkit_run_trace_id\": trace_id,\n \"chaostoolkit_experiment_ref\": experiment_ref,\n }\n )\n\n experiment_tags = experiment.get(\"tags\")\n if experiment_tags:\n tags.update(experiment_tags)\n\n logging_loki.emitter.LokiEmitter.level_tag = \"level\"\n handler = logging_loki.LokiHandler(\n url=url, tags=tags, auth=auth, version=\"1\"\n )\n handler.emitter = FixedLokiEmitterV1(url, tags, auth)\n handler.setLevel(logging.INFO)\n loki_logger.addHandler(handler)\n loki_logger.setLevel(logging.INFO)\n\n event_registry.register(LokiRunEventHandler())\n\n loki_logger.info(\n \"Experiment started\",\n extra={\n \"tags\": {\n \"type\": \"experiment-started\",\n \"title\": experiment.get(\"title\"),\n }\n },\n )\n\n\ndef before_activity_control(context: Activity, *args, **kwargs) -> None:\n a = context\n loki_logger.info(\n f\"Activity '{a['name']}' started\",\n extra={\n \"tags\": {\"type\": \"experiment-activity-started\", \"name\": a[\"name\"]},\n },\n )\n\n\ndef after_activity_control(\n context: Activity, state: Run, *args, **kwargs\n) -> None:\n a = context\n loki_logger.info(\n f\"Activity '{a['name']}' finished\",\n extra={\n \"tags\": {\n \"type\": \"experiment-activity-finished\",\n \"name\": a[\"name\"],\n \"status\": state[\"status\"],\n \"start\": state[\"start\"],\n \"end\": state[\"end\"],\n \"duration\": state[\"duration\"],\n \"output\": state[\"output\"],\n \"exception\": state.get(\"exception\"),\n },\n },\n )\n\n\n###############################################################################\n# Private functions\n###############################################################################\nclass FixedLokiEmitterV1(logging_loki.emitter.LokiEmitter):\n def build_payload(self, record: logging.LogRecord, line) -> dict:\n \"\"\"Build JSON payload with a log entry.\"\"\"\n labels = self.build_tags(record)\n ts = str(time.time_ns())\n stream = {\n \"stream\": labels,\n \"values\": [[ts, line]],\n }\n return {\"streams\": [stream]}\n\n\nclass LokiRunEventHandler(RunEventHandler):\n def finish(self, journal: Journal) -> None:\n loki_logger.info(\n \"Experiment finished\",\n extra={\n \"tags\": {\n \"type\": \"experiment-finished\",\n \"deviated\": journal.get(\"deviated\"),\n \"status\": journal.get(\"status\"),\n \"start\": journal.get(\"start\"),\n \"end\": journal.get(\"end\"),\n \"duration\": journal.get(\"duration\"),\n },\n },\n )\n\n def interrupted(self, experiment: Experiment, journal: Journal) -> None:\n loki_logger.info(\n \"Experiment interrupted\",\n extra={\"tags\": {\"type\": \"experiment-interrupted\"}},\n )\n\n def signal_exit(self) -> None:\n loki_logger.info(\n \"Experiment exit signal received\",\n extra={\"tags\": {\"type\": \"experiment-exit-signal\"}},\n )\n\n def start_continuous_hypothesis(self, frequency: int) -> None:\n loki_logger.info(\n \"Experiment steady state started running continuously\",\n extra={\"tags\": {\"type\": \"experiment-continuous-ssh-started\"}},\n )\n\n def continuous_hypothesis_iteration(\n self, iteration_index: int, state: Any\n ) -> None:\n loki_logger.info(\n f\"Experiment steady state iteration {iteration_index}\",\n extra={\n \"tags\": {\n \"type\": \"experiment-continuous-ssh-iteration\",\n \"iteration\": iteration_index,\n }\n },\n )\n\n def continuous_hypothesis_completed(\n self,\n experiment: Experiment,\n journal: Journal,\n exception: Exception = None,\n ) -> None:\n loki_logger.info(\n \"Experiment continuous steady state completed\",\n extra={\"tags\": {\"type\": \"experiment-continuous-ssh-completed\"}},\n )\n\n def start_hypothesis_before(self, experiment: Experiment) -> None:\n loki_logger.info(\n \"Experiment before steady state started\",\n extra={\"tags\": {\"type\": \"experiment-before-ssh-started\"}},\n )\n\n def hypothesis_before_completed(\n self, experiment: Experiment, state: Dict[str, Any], journal: Journal\n ) -> None:\n extra = {\n \"tags\": {\n \"type\": \"experiment-before-ssh-completed\",\n \"state_met\": state.get(\"steady_state_met\"),\n },\n }\n\n if state.get(\"steady_state_met\") is False:\n for probe in state.get(\"probes\", []):\n if probe[\"tolerance_met\"] is False:\n extra[\"tags\"][\"failed_probe\"] = probe\n break\n\n loki_logger.info(\n \"Experiment before steady state completed\", extra=extra\n )\n\n def start_hypothesis_after(self, experiment: Experiment) -> None:\n loki_logger.info(\n \"Experiment after steady state started\",\n extra={\"tags\": {\"type\": \"experiment-after-ssh-started\"}},\n )\n\n def hypothesis_after_completed(\n self, experiment: Experiment, state: Dict[str, Any], journal: Journal\n ) -> None:\n extra = {\n \"tags\": {\n \"type\": \"experiment-after-ssh-completed\",\n \"state_met\": state.get(\"steady_state_met\"),\n },\n }\n\n if state.get(\"steady_state_met\") is False:\n for probe in state.get(\"probes\", []):\n if probe[\"tolerance_met\"] is False:\n extra[\"tags\"][\"failed_probe\"] = probe\n break\n\n loki_logger.info(\"Experiment after steady state completed\", extra=extra)\n\n def start_method(self, experiment: Experiment) -> None:\n loki_logger.info(\n \"Experiment method started\",\n extra={\"tags\": {\"type\": \"experiment-method-started\"}},\n )\n\n def method_completed(self, experiment: Experiment, state: Any) -> None:\n loki_logger.info(\n \"Experiment method completed\",\n extra={\"tags\": {\"type\": \"experiment-method-completed\"}},\n )\n\n def start_rollbacks(self, experiment: Experiment) -> None:\n loki_logger.info(\n \"Experiment rollbacks started\",\n extra={\"tags\": {\"type\": \"experiment-rollbacks-started\"}},\n )\n\n def rollbacks_completed(\n self, experiment: Experiment, journal: Journal\n ) -> None:\n loki_logger.info(\n \"Experiment rollbacks completed\",\n extra={\"tags\": {\"type\": \"experiment-rollbacks-completed\"}},\n )\n\n def start_cooldown(self, duration: int) -> None:\n loki_logger.info(\n f\"Experiment cooldown period started for {duration}s\",\n extra={\n \"tags\": {\n \"type\": \"experiment-cooldown-started\",\n \"duration\": duration,\n }\n },\n )\n\n def cooldown_completed(self) -> None:\n loki_logger.info(\n \"Experiment cooldown period completed\",\n extra={\"tags\": {\"type\": \"experiment-cooldown-completed\"}},\n )\n","repo_name":"chaostoolkit-incubator/chaostoolkit-grafana","sub_path":"chaosgrafana/controls/loki.py","file_name":"loki.py","file_ext":"py","file_size_in_byte":9317,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"23758978041","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function, division, absolute_import\n\n\nfrom PyQt4 import QtGui, QtCore, QtOpenGL\nfrom . import viewer, menu, tabber, stats\nfrom .resources import images # noqa unused import, but needed for logo\n\n\nclass GAUDInspectView(QtGui.QMainWindow):\n\n fileDropped = QtCore.pyqtSignal(str)\n\n def __init__(self, app=None):\n super(GAUDInspectView, self).__init__()\n self.app = app\n self.setWindowIcon(QtGui.QIcon(':/logo.png'))\n self.glcontext = QtOpenGL.QGLContext(QtOpenGL.QGLFormat(), None)\n self.setAcceptDrops(True)\n\n self.initUI()\n self.show()\n\n def initUI(self):\n # Set up main window\n self.canvas = QtGui.QWidget(self)\n self.canvas.setObjectName(\"MainCanvas\")\n self.setCentralWidget(self.canvas)\n self.layout = QtGui.QGridLayout(self.canvas)\n\n self.setWindowTitle('GAUDInspect')\n self.setGeometry(0, 0, 900, 600)\n self._center()\n\n # Main layout\n self.left = QtGui.QWidget()\n self.viewer = viewer.get(self.glcontext, self)\n self.stats = stats.get()\n self.tabber = tabber.get()\n self.statusbar = self.statusBar()\n self.splitter = QtGui.QSplitter(QtCore.Qt.Horizontal)\n self.menu = menu.get(self)\n\n # Organize widgets\n self.layout.addWidget(self.splitter, 0, 0)\n self.left_layout = QtGui.QGridLayout(self.left)\n self.left_layout.addWidget(self.viewer, 0, 0)\n self.left_layout.addWidget(self.stats, 0, 0)\n\n self.splitter.addWidget(self.left)\n self.splitter.addWidget(self.tabber)\n self.left_layout.setContentsMargins(0, 0, 0, 0)\n\n def _center(self):\n # get geometry of this frame (a rectangle)\n current_geometry = self.frameGeometry()\n # get center of screen\n desktop_center = QtGui.QDesktopWidget().availableGeometry().center()\n # move rectangle center to desktop center to guess top left location\n current_geometry.moveCenter(desktop_center)\n # move widget to the guessed top left corner\n self.move(current_geometry.topLeft())\n\n # Implement drag&drop\n def dragEnterEvent(self, e):\n if e.mimeData().hasUrls():\n filename = next(str(url.toLocalFile()) for url in e.mimeData().urls())\n if filename.endswith('.gaudi-input') or filename.endswith('.gaudi-output'):\n e.accept()\n self.setWindowOpacity(0.8)\n else:\n e.ignore()\n\n def dragLeaveEvent(self, e):\n self.setWindowOpacity(1.0)\n\n def dropEvent(self, e):\n if e.mimeData().hasUrls():\n e.accept()\n p = next(str(url.toLocalFile()) for url in e.mimeData().urls())\n self.fileDropped.emit(p)\n self.setWindowOpacity(1.0)\n else:\n e.ignore()\n\n # Status bar handler\n def status(self, txt):\n self.statusbar.showMessage(txt, 3000)\n","repo_name":"insilichem/gaudinspect","sub_path":"gaudinspect/view/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26581008442","text":"from flask_restful import Resource, reqparse\nfrom flask_jwt import jwt_required\nfrom models.item import ItemModel\n\n\nclass Item(Resource):\n parse = reqparse.RequestParser()\n parse.add_argument('price',\n type=float,\n required=True,\n help=\"price can't be left blank !!\"\n )\n parse.add_argument('store_id',\n type=int,\n required=True,\n help=\"item must have a store !!\"\n )\n\n @jwt_required()\n def get(self, name):\n item = ItemModel.find_by_name(name)\n if item:\n return item.json(), 200\n\n return {\"message\": \"item doesn't exist\"}, 404 # ERROR CODE FOR NOT FOUND , 200 CODE FOR OK\n\n def post(self, name):\n if ItemModel.find_by_name(name):\n return {\"message\": \"item {} already exist\".format(name)}, 400 # ERROR CODE\n\n data = Item.parse.parse_args()\n item = ItemModel(name, **data)\n\n try:\n item.save_to_db()\n except:\n return {\"message\": \"An error occurred inserting the item\"}, 500\n\n return item.json(), 201\n\n def delete(self, name):\n item = ItemModel.find_by_name(name)\n if not item:\n return {\"message\": \"item {} not found\".format(name)}, 400 # ERROR CODE\n\n item.delete()\n return {\"message\": \"item {} was deleted\".format(name)}, 200\n\n def put(self, name):\n data = Item.parse.parse_args()\n item = ItemModel.find_by_name(name)\n update_item = ItemModel(name, **data)\n\n if item is None:\n item = ItemModel(name, **data)\n else:\n item.price = data[\"price\"]\n item.store_id = data[\"store_id\"]\n\n item.save_to_db()\n\n return item.json(), 200\n\n\nclass ItemList(Resource):\n\n def get(self):\n return {\"items\": [item.json() for item in ItemModel.query.all()]}\n","repo_name":"Psy112/flask_restfull_SQLAlchemy_Heroku","sub_path":"resources/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5423321850","text":"# Uses python3\nimport sys\n\ndef get_optimal_value(capacity, weights, values):\n value = 0\n if capacity == 0:\n return 0\n while capacity > 0:\n maxValueIndex = get_max_value_per_unit(weights, values)\n if maxValueIndex >= 0:\n weightsUsed = min(capacity, weights[maxValueIndex])\n value += (weightsUsed*(values[maxValueIndex]/weights[maxValueIndex]))\n weights[maxValueIndex] -= weightsUsed\n capacity -= weightsUsed\n else:\n return value\n return value\ndef get_max_value_per_unit(weights, values):\n max = 0\n maxIndex = -1\n for i in range(len(weights)):\n if weights[i] > 0:\n if (max < (values[i]/weights[i])):\n maxIndex = i\n max = (values[i]/weights[i])\n return maxIndex\n\nif __name__ == \"__main__\":\n data = list(map(int, sys.stdin.read().split()))\n n, capacity = data[0:2]\n values = data[2:(2 * n + 2):2]\n weights = data[3:(2 * n + 2):2]\n opt_value = get_optimal_value(capacity, weights, values)\n print(\"{:.10f}\".format(opt_value))\n","repo_name":"Russellkusuma/GoogleCSSICoursera_AlgorithmicToolbox","sub_path":"week3_greedy_algorithms/2_maximum_value_of_the_loot/fractional_knapsack.py","file_name":"fractional_knapsack.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3711642571","text":"import pygame\n\npygame.init()\nscreen = pygame.display.set_mode((640, 480), 0, 32)\nsurface = pygame.Surface(screen.get_size())\n\nwhile True:\n screen.fill((255, 255, 255))\n for event in pygame.event.get():\n if event.type == QUIT:\n exit()\n\n # print('drawing')\n # time.sleep(1)\n pygame.draw.rect(screen, (128, 0, 0), (0, 0, 128, 128))\n # screen.blit(surface, (0, 0))\n pygame.display.update()\n","repo_name":"noobxiaohai/shotball_demo","sub_path":"surface.py","file_name":"surface.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35370231057","text":"# gcn_model.py\r\n\r\nimport torch\r\nimport torch.nn.functional as F\r\nfrom torch_geometric.nn import GCNConv, GENConv\r\nfrom gcn import add_features_, dataloader\r\nfrom torch_geometric.data import InMemoryDataset\r\nfrom torch_geometric.data import ClusterData, ClusterLoader\r\nfrom sklearn.metrics import mean_squared_error, r2_score\r\n\r\n\r\nnum_epochs = 20\r\ndata_, G = dataloader()\r\ndata_ = add_features_(data_, G)\r\ndataset = data_\r\nprint(dataset)\r\n# dataset = InMemoryDataset.collate(data)\r\ncluster_data = ClusterData(data_, num_parts=50, recursive=False)\r\ntest_mask = cluster_data\r\ntrain_loader = ClusterLoader(cluster_data, batch_size=5, shuffle=True,\r\n\t\t\t\t\t\t\t num_workers=12)\r\n\r\nclass Net(torch.nn.Module):\r\n\tdef __init__(self):\r\n\t\tsuper(Net, self).__init__()\r\n\t\tself.conv1 = GCNConv(dataset.num_node_features, 32)\r\n\t\t# self.conv2 = GCNConv(16, dataset.num_classes)\r\n\t\tself.conv3 = GCNConv(32, 16)\r\n\t\tself.conv2 = GCNConv(16, dataset.y.shape[1])\r\n\r\n\tdef forward(self, data):\r\n\t\tx, edge_index = data.x, data.edge_index\r\n\t\tx = self.conv1(x, edge_index)\r\n\t\tx = F.relu(x)\r\n\t\tx = self.conv3(x, edge_index)\r\n\t\tx = F.relu(x)\r\n\t\tx = F.dropout(x, training=self.training)\r\n\t\tx = self.conv2(x, edge_index)\r\n\r\n\t\treturn F.relu(x)\r\n\t\t# return x\r\n\r\n\r\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\nmodel = Net().to(device)\r\ndata = data_.to(device)\r\n\r\n\r\ntest_mask = data_\r\noptimizer = torch.optim.Adam(model.parameters(), lr=0.1, weight_decay=5e-1)\r\n\r\ndef train():\r\n\tmodel.train()\r\n\tloss_all=0\r\n\tfor batch in train_loader:\r\n\t\tbatch = batch.to(device)\r\n\t\toptimizer.zero_grad()\r\n\t\tout = model(data)\r\n\t\tloss = F.mse_loss(out, data.y)\r\n\t\tloss.backward()\r\n\t\tloss_all += loss.item()\r\n\t\toptimizer.step()\r\n\tprint(loss_all)\r\n\r\n\r\nfor epoch in range(num_epochs):\r\n\ttrain()\r\n\r\n\r\n\r\nmodel.eval()\r\npred = model(data)\r\n# print(data.y.shape)\r\n# print(pred.shape)\r\nmse = mean_squared_error(data.y.detach().numpy(), pred.detach().numpy(), multioutput='uniform_average')\r\nr_square = r2_score(data.y.detach().numpy(), pred.detach().numpy(), multioutput= 'uniform_average')\r\nprint(mse)\r\nprint(r_square)\r\n","repo_name":"Climate-Crisis-AI-Team-Vel-Ice/sea-ice-challenge","sub_path":"modules/ml_pipeline/gcn_model.py","file_name":"gcn_model.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"39743296974","text":"class Solution:\n \"\"\"\n You are keeping the scores for a baseball game with strange rules. At the beginning of the game, you start with an empty record.\n You are given a list of strings operations, where operations[i] is the ith operation you must apply to the record and\n is one of the following:\n An integer x.\n Record a new score of x.\n '+'.\n Record a new score that is the sum of the previous two scores.\n 'D'.\n Record a new score that is the double of the previous score.\n 'C'.\n Invalidate the previous score, removing it from the record.\n Return the sum of all the scores on the record after applying all the operations.\n\n The test cases are generated such that the answer and all intermediate calculations fit in a 32-bit integer and\n that all operations are valid.\n\n Best Case -\n Time complexity is O(n) since we have to traverse through all the elements of the array atleast once\n Space complexity is O(n) as we are creating a stack of size of the array\n\n Soln 1 -\n Use Stacks to append and pop elements based on the operations in the array\n \"\"\"\n\n def calPoints(self, operations: List[str]) -> int:\n stack = []\n\n for operation in operations:\n if operation == \"+\":\n stack.append(stack[-1] + stack[-2])\n elif operation == \"D\":\n stack.append(2 * stack[-1])\n elif operation == \"C\":\n stack.pop()\n else:\n stack.append(int(operation))\n return sum(stack)\n","repo_name":"anurag3/python-scratch-pad","sub_path":"682-Baseball-Game.py","file_name":"682-Baseball-Game.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20392526428","text":"# -*- encoding: utf-8 -*-\n# author:Shane Yao(future0906@gmail.com)\n# create@:2017/06/25\n\nfrom ..factory import CommandFactory\n\nfrom snowflake.core.models.entries.text_entry import TextEntry\n\n\n@CommandFactory.executor\nclass CreateTextEntry(object):\n def __init__(self, queue, cmd_obj):\n self.command = cmd_obj\n self.command_queue = queue\n\n def exec(self, guid, content_format, title, content):\n new_article = TextEntry()\n new_article.title = title\n new_article.guid = guid\n new_article.content_format = content_format\n new_article.content = content\n","repo_name":"SnowflakeCMS/SnowflakePortal","sub_path":"snowflake/core/commands/executors/create_text_entry.py","file_name":"create_text_entry.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39838665456","text":"import numpy as np\nimport torch\nimport torch.nn as nn\n\n\nclass RadialBasisFunctionExpansion(nn.Module):\n\tdef __init__(self, n_bin, low=0, high=20):\n\t\tsuper().__init__()\n\t\tassert high > low\n\n\t\tself.n_center = n_bin\n\t\tself.gap = (high - low) / n_bin\n\t\tself.centers = nn.Parameter(\n\t\t\ttorch.FloatTensor(np.linspace(low, high, n_bin)).view(1, -1))\n\n\tdef forward(self, dist):\n\t\t'''\n\t\tinput: dist: [(b*n_v*n_v) x 1]\n\t\toutput: rbf: [(b*n_v*n_v) x n_bin]\n\t\t'''\n\n\t\trbf = dist.unsqueeze(-1) - self.centers # [(b*n_v*n_v) x n_centers]\n\t\trbf = torch.exp(-(rbf.pow(2)) / self.gap)\n\t\treturn rbf\n\n\ndef segment_max(logit, n_seg, seg_i, idx_j):\n\tmax_seg_numel = idx_j.max().item() + 1\n\tseg_max = logit.new_full((n_seg, max_seg_numel), -np.inf)\n\tseg_max = seg_max.index_put_((seg_i, idx_j), logit).max(dim=1)[0]\n\treturn seg_max[seg_i]\n\n\ndef segment_sum(logit, n_seg, seg_i):\n\tnorm = logit.new_zeros(n_seg).index_add(0, seg_i, logit)\n\treturn norm[seg_i]\n\n\ndef segment_softmax(logit, n_seg, seg_i, idx_j, temperature):\n\tlogit_max = segment_max(logit, n_seg, seg_i, idx_j).detach()\n\tlogit = torch.exp((logit - logit_max) / temperature)\n\tlogit_norm = segment_sum(logit, n_seg, seg_i)\n\tprob = logit / (logit_norm + 1e-8)\n\treturn prob\n\n\ndef segment_multihead_expand(seg_i, n_seg, n_head):\n\ti_head_shift = n_seg * seg_i.new_tensor(torch.arange(n_head))\n\tseg_i = (seg_i.view(-1, 1) + i_head_shift.view(1, -1)).view(-1)\n\treturn seg_i\n\n\nclass CoAttention(nn.Module):\n\tdef __init__(self, d_in, d_out, n_head=1, dropout=0.1):\n\t\tsuper().__init__()\n\t\tself.temperature = np.sqrt(d_in)\n\n\t\tself.n_head = n_head\n\t\tself.multi_head = self.n_head > 1\n\n\t\tself.key_proj = nn.Linear(d_in, d_in * n_head, bias=False)\n\t\tself.val_proj = nn.Linear(d_in, d_in * n_head, bias=False)\n\t\tnn.init.xavier_normal_(self.key_proj.weight)\n\t\tnn.init.xavier_normal_(self.val_proj.weight)\n\n\t\tself.attn_drop = nn.Dropout(p=dropout)\n\t\tself.out_proj = nn.Sequential(\n\t\t\tnn.Linear(d_in * n_head, d_out),\n\t\t\tnn.LeakyReLU(), nn.Dropout(p=dropout))\n\n\tdef forward(self, node1, seg_i1, idx_j1, node2, seg_i2, idx_j2, entropy=[]):\n\n\t\tprint(\"node1.shape \", node1.shape)\n\t\tprint(\"node2.shape \", node2.shape)\n\n\t\td_h = node1.size(1)\n\n\t\tn_seg1 = node1.size(0)\n\t\tn_seg2 = node2.size(0)\n\n\t\t# Copy center for attention key\n\t\tnode1_ctr = self.key_proj(node1).index_select(0, seg_i1)\n\t\tnode2_ctr = self.key_proj(node2).index_select(0, seg_i2)\n\n\t\t# Copy neighbor for attention value\n\t\tnode1_nbr = self.val_proj(node2).index_select(0, seg_i2) # idx_j1 == seg_i2\n\t\tnode2_nbr = self.val_proj(node1).index_select(0, seg_i1) # idx_j2 == seg_i1\n\n\t\targ_i1 = None\n\t\targ_i2 = None\n\n\t\tif self.multi_head:\n\t\t\t# prepare copied and shifted index tensors\n\t\t\tseg_i1 = segment_multihead_expand(seg_i1, n_seg1, self.n_head)\n\t\t\tseg_i2 = segment_multihead_expand(seg_i2, n_seg2, self.n_head)\n\n\t\t\tidx_j1 = idx_j1.unsqueeze(1).expand(-1, self.n_head).contiguous().view(-1)\n\t\t\tidx_j2 = idx_j2.unsqueeze(1).expand(-1, self.n_head).contiguous().view(-1)\n\n\t\t\t# prepare for the final multi-head concatenation\n\t\t\targ_i1 = segment_multihead_expand(\n\t\t\t\tseg_i1.new_tensor(np.arange(n_seg1)), n_seg1, self.n_head)\n\t\t\targ_i2 = segment_multihead_expand(\n\t\t\t\tseg_i2.new_tensor(np.arange(n_seg2)), n_seg2, self.n_head)\n\n\t\t\t# pile up as regular input\n\t\t\tnode1_ctr = node1_ctr.view(-1, d_h)\n\t\t\tnode2_ctr = node2_ctr.view(-1, d_h)\n\n\t\t\tnode1_nbr = node1_nbr.view(-1, d_h)\n\t\t\tnode2_nbr = node2_nbr.view(-1, d_h)\n\n\t\t\t# new numbers of segments\n\t\t\tn_seg1 = n_seg1 * self.n_head\n\t\t\tn_seg2 = n_seg2 * self.n_head\n\n\t\ttranslation = (node1_ctr * node2_ctr).sum(1)\n\n\t\t# TODO!! Remove this while training, this is just for entropy.\n\t\t#translation = torch.ones_like(translation)\n\n\t\t# Calculate attention weight as edges between two graphs\n\t\tnode1_edge = self.attn_drop(segment_softmax(\n\t\t\ttranslation, n_seg1, seg_i1, idx_j1, self.temperature))\n\t\tnode2_edge = self.attn_drop(segment_softmax(\n\t\t\ttranslation, n_seg2, seg_i2, idx_j2, self.temperature))\n\n\t\tprint(\"before node1 shape\", node1_edge.shape)\n\n\t\tnode1_edge = node1_edge.view(-1, 1)\n\t\tnode2_edge = node2_edge.view(-1, 1)\n\n\t\tprint(\"after node1 shape\", node1_edge.shape)\n\n\n\t\t# Weighted sum\n\t\tmsg1 = node1.new_zeros((n_seg1, d_h)).index_add(0, seg_i1, node1_edge * node1_nbr)\n\t\tmsg2 = node2.new_zeros((n_seg2, d_h)).index_add(0, seg_i2, node2_edge * node2_nbr)\n\n\t\t# Entropy computation\n\t\t#ent1 = node1.new_zeros((n_seg1, 1)).index_add(0, seg_i1, torch.sum(node1_edge * torch.log(node1_edge + 1e-7), -1))\n\t\t#ent2 = node2.new_zeros((n_seg2, 1)).index_add(0, seg_i2, torch.sum(node2_edge * torch.log(node2_edge + 1e-7), -1))\n\t\t#entropy.append(ent1)\n\t\t#entropy.append(ent2)\n\n\t\tif self.multi_head:\n\t\t\tmsg1 = msg1[arg_i1].view(-1, d_h * self.n_head)\n\t\t\tmsg2 = msg2[arg_i2].view(-1, d_h * self.n_head)\n\n\t\tmsg1 = self.out_proj(msg1)\n\t\tmsg2 = self.out_proj(msg2)\n\n\t\treturn msg1, msg2, node1_edge, node2_edge\n\n\nclass MessagePassing(nn.Module):\n\tdef __init__(self, d_node, d_edge, d_hid, dropout=0.1):\n\t\tsuper().__init__()\n\n\t\tdropout = nn.Dropout(p=dropout)\n\n\t\tself.node_proj = nn.Sequential(\n\t\t\tnn.Linear(d_node, d_hid, bias=False), dropout)\n\n\t\tself.edge_proj = nn.Sequential(\n\t\t\tnn.Linear(d_edge, d_hid), nn.LeakyReLU(), dropout,\n\t\t\tnn.Linear(d_hid, d_hid), nn.LeakyReLU(), dropout)\n\n\t\tself.msg_proj = nn.Sequential(\n\t\t\tnn.Linear(d_hid, d_hid), nn.LeakyReLU(), dropout,\n\t\t\tnn.Linear(d_hid, d_hid), dropout)\n\n\tdef forward(self, node, edge, seg_i, idx_j):\n\t\tedge = self.edge_proj(edge)\n\t\tmsg = self.node_proj(node)\n\t\tmsg = self.message_composing(msg, edge, idx_j)\n\t\tmsg = self.message_aggregation(node, msg, seg_i)\n\t\treturn msg\n\n\tdef message_composing(self, msg, edge, idx_j):\n\t\tmsg = msg.index_select(0, idx_j) # neighbors\n\t\tmsg = msg * edge # element-wise multiplication composition\n\t\treturn msg\n\n\tdef message_aggregation(self, node, msg, seg_i):\n\t\tmsg = torch.zeros_like(node).index_add(0, seg_i, msg) # sum over messages\n\t\treturn msg\n\n\nclass CoAttentionMessagePassingNetwork(nn.Module):\n\tdef __init__(self, d_hid, d_readout, n_prop_step, n_head=1, dropout=0.1, update_method='res'):\n\n\t\tsuper().__init__()\n\n\t\tself.n_prop_step = n_prop_step\n\n\t\tif update_method == 'res':\n\t\t\tx_d_node = lambda step_i: 1\n\t\t\tself.update_fn = lambda x, y, z: x + y + z\n\t\telif update_method == 'den':\n\t\t\tx_d_node = lambda step_i: 1 + 2 * step_i\n\t\t\tself.update_fn = lambda x, y, z: torch.cat([x, y, z], -1)\n\t\telse:\n\t\t\traise NotImplementedError\n\n\t\tself.mps = nn.ModuleList([\n\t\t\tMessagePassing(\n\t\t\t\td_node=d_hid * x_d_node(step_i),\n\t\t\t\td_edge=d_hid, d_hid=d_hid, dropout=dropout)\n\t\t\tfor step_i in range(n_prop_step)])\n\n\t\tself.coats = nn.ModuleList([\n\t\t\tCoAttention(\n\t\t\t\td_in=d_hid * x_d_node(step_i),\n\t\t\t\td_out=d_hid, n_head=n_head, dropout=dropout)\n\t\t\tfor step_i in range(n_prop_step)])\n\n\t\tself.lns = nn.ModuleList([\n\t\t\tnn.LayerNorm(d_hid * x_d_node(step_i))\n\t\t\tfor step_i in range(n_prop_step)])\n\n\t\tself.pre_readout_proj = nn.Sequential(\n\t\t\tnn.Linear(d_hid * x_d_node(n_prop_step), d_readout),\n\t\t\tnn.LeakyReLU())\n\n\tdef forward(\n\t\t\tself,\n\t\t\tseg_g1, node1, edge1, inn_seg_i1, inn_idx_j1, out_seg_i1, out_idx_j1,\n\t\t\tseg_g2, node2, edge2, inn_seg_i2, inn_idx_j2, out_seg_i2, out_idx_j2,\n\t\t\t\t\t\tentropies=[]):\n\n\t\tfor step_i in range(self.n_prop_step):\n\t\t\t#if step_i >= len(entropies):\n\t\t\t#\tentropies.append([])\n\t\t\tinner_msg1 = self.mps[step_i](node1, edge1, inn_seg_i1, inn_idx_j1)\n\t\t\tinner_msg2 = self.mps[step_i](node2, edge2, inn_seg_i2, inn_idx_j2)\n\t\t\touter_msg1, outer_msg2, attn1, attn2 = self.coats[step_i](\n\t\t\t\tnode1, out_seg_i1, out_idx_j1,\n\t\t\t\tnode2, out_seg_i2, out_idx_j2, [])\n\t\t\t\t# node2, out_seg_i2, out_idx_j2, entropies[step_i])\n\n\t\t\tnode1 = self.lns[step_i](self.update_fn(node1, inner_msg1, outer_msg1))\n\t\t\tnode2 = self.lns[step_i](self.update_fn(node2, inner_msg2, outer_msg2))\n\n\t\tg1_vec = self.readout(node1, seg_g1)\n\t\tg2_vec = self.readout(node2, seg_g2)\n\n\t\treturn g1_vec, g2_vec, attn1, attn2\n\n\tdef readout(self, node, seg_g):\n\t\tsz_b = seg_g.max() + 1\n\n\t\tnode = self.pre_readout_proj(node)\n\t\td_h = node.size(1)\n\n\t\tencv = node.new_zeros((sz_b, d_h)).index_add(0, seg_g, node)\n\t\treturn encv\n","repo_name":"andreeadeac22/graph_coattention","sub_path":"modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":8005,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"81"} +{"seq_id":"11771329110","text":"import requests\nfrom time import sleep\n\n\nclass TelegramBot:\n def __init__(self, token):\n self.token = token\n self.api_url = \"https://api.telegram.org/bot{}/\".format(token)\n\n def get_updates(self, offset=None, timeout=30):\n method = 'getUpdates'\n params = {'timeout': timeout, 'offset': offset}\n resp = requests.get(self.api_url + method, params)\n result_json = resp.json()['result']\n return result_json\n\n def get_chat_id(self, update):\n chat_id = update['message']['chat']['id']\n return chat_id\n\n def send_message(self, chat_id, text):\n params = {'chat_id': chat_id, 'text': text}\n method = 'sendMessage'\n resp = requests.post(self.api_url + method, params)\n return resp\n\n def get_last_update(self):\n get_result = self.get_updates()\n if len(get_result) > 0:\n last_update = get_result[-1]\n else:\n last_update = get_result[len(get_result)]\n return last_update\n\n\ndef main():\n token = \"1325110987:AAHkykK4bYufy1gCg-0kUwTa3Wqlq8qmDmg\"\n bot = TelegramBot(token)\n update_id = bot.get_last_update()['update_id']\n while True:\n if update_id == bot.get_last_update()['update_id']:\n chat_id = bot.get_chat_id(bot.get_last_update())\n bot.send_message(chat_id, \"Hello\")\n update_id += 1\n sleep(1)\n\n\nif __name__ == '__main__':\n main()","repo_name":"Foxxxxxy/gegebot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19033403924","text":"#https://www.codewars.com/kata/56676e8fabd2d1ff3000000c/train/python\n\n'''\nCan you find the needle in the haystack?\n\nWrite a function findNeedle() that takes an array full of junk but containing one \"needle\"\n\nAfter your function finds the needle it should return a message (as a string) that says:\n\n\"found the needle at position \" plus the index it found the needle, so:\n\nfind_needle(['hay', 'junk', 'hay', 'hay', 'moreJunk', 'needle', 'randomJunk'])\nshould return \"found the needle at position 5\" (in COBOL \"found the needle at position 6\")\n'''\ndef find_needle(haystack):\n for each in haystack:\n if each == \"needle\":\n inde = haystack.index(each)\n return \"found the needle at position {}\" .format(inde)\n \n\nfind_needle(['3', '123124234', None, 'needle', 'world', 'hay', 2, '3', True, False])\n\n'''\nTop Rated answer:\n\ndef find_needle(haystack): return 'found the needle at position %d' % haystack.index('needle')\n\nPersonal note answer:\n\n\n'''\n","repo_name":"OwlNet-C/CodeWars-Answers","sub_path":"Python/8-kyu/Needle_in_A_Haystack.py","file_name":"Needle_in_A_Haystack.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42593281867","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport time\nimport os\nimport csv\nimport sys\n\ndef open_csv(dirname, msgname, msg_dict):\n\n filename = dirname + '/' + msgname + '.csv'\n\n # open file for saving\n if sys.version_info >= (3,0,0):\n f = open(filename, 'w', newline='')\n else:\n f = open(filename, 'wb')\n\n fieldnames = []\n for name in msg_dict:\n fieldnames.append(name)\n\n writer = csv.DictWriter(f, fieldnames, delimiter=';')\n writer.writeheader()\n return writer\n\n\nclass CsvWorker(object):\n \"\"\" Description here \"\"\"\n\n def __init__(self, dirname, msgname):\n self.msgname = msgname\n self.dirname = dirname\n self.writer = None\n\n def write(self, mav_msg, time_boot_ms, time_usec):\n d = mav_msg.to_dict()\n\n del d[\"mavpackettype\"]\n\n if time_boot_ms != None:\n d[\"time_boot_ms\"] = time_boot_ms\n if time_usec != None:\n d[\"time_usec\"] = time_usec\n\n if self.writer is None :\n self.writer = open_csv(self.dirname, self.msgname, d)\n print(self.msgname)\n\n self.writer.writerow(d)\n\n\nclass CsvPool(object):\n \"\"\" Description here \"\"\"\n\n def __init__(self, dirname=None):\n self.time_boot_ms = 0\n self.time_usec = 0\n\n if dirname is None:\n self.dirname = time.strftime('%Y-%m-%d~%H.%M.%S')\n else:\n self.dirname = dirname\n\n if not os.path.exists(self.dirname):\n os.makedirs(self.dirname)\n self.wdict = {}\n\n def post(self, mavmsg):\n posted = False\n boot = None\n utc = None\n\n if (mavmsg.get_type() == 'GRIFFON_MEASUREMENT'):\n self.time_boot_ms = mavmsg.time_boot_ms\n self.time_usec = mavmsg.time_usec\n\n if not hasattr(mavmsg, 'time_boot_ms'):\n boot = self.time_boot_ms\n else:\n boot = None\n\n if not hasattr(mavmsg, 'time_usec'):\n utc = self.time_usec\n else:\n utc = None\n\n name = mavmsg.get_type()\n try:\n self.wdict[name].write(mavmsg, boot, utc)\n except KeyError:\n self.wdict[name] = CsvWorker(self.dirname, name)\n self.wdict[name].write(mavmsg, boot, utc)\n\n\n","repo_name":"barthess/u2","sub_path":"tools/data_acquisition/csvpool.py","file_name":"csvpool.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36139651928","text":"from AccessControl import ClassSecurityInfo\nfrom Products.ATExtensions.widget import RecordWidget\nfrom Products.Archetypes.Registry import registerWidget\nimport datetime\n\nclass CoordinateWidget(RecordWidget):\n security = ClassSecurityInfo()\n _properties = RecordWidget._properties.copy()\n _properties.update({\n 'macro': \"bika_widgets/coordinatewidget\",\n })\n\nregisterWidget(CoordinateWidget,\n title = 'CoordinateWidget',\n description = '',\n )\n","repo_name":"hocinebendou/bika.gsoc","sub_path":"bika/lims/browser/widgets/coordinatewidget.py","file_name":"coordinatewidget.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3887114052","text":"import sys, fileinput\ninstructions = [list(line.strip()) for line in fileinput.input(files=sys.argv[1])]\n\nship_dir = 'E'\ncoord_east = 0\ncoord_north = 0\n\ndef read_instruction(action, value):\n if action == \"F\":\n move(ship_dir, value)\n elif action in [\"N\",\"E\",\"S\",\"W\"]:\n move(action, value)\n elif action in [\"L\",\"R\"]:\n turn(action, value)\n\ndef move(action, value):\n global coord_east, coord_north\n if action == \"N\":\n coord_north += value\n elif action == \"E\":\n coord_east += value\n elif action == \"S\":\n coord_north -= value\n elif action == \"W\":\n coord_east -= value\n\ndef turn(action, value):\n global ship_dir\n \n turn_dir = 0\n if action == \"R\":\n turn_dir = 1\n elif action == \"L\":\n turn_dir = -1\n \n # counteract out of bounds by repeating direction string\n # 3 times (NESWNESWNESW) and then locate 2nd occurence\n nesw = \"NESW\"*3\n # index of second occurence of \"ship_dir\"\n index = nesw.index(ship_dir, nesw.index(ship_dir)+ 1)\n \n ship_dir = nesw[index + turn_dir * int((value/90))]\n \n \nfor instruction in instructions:\n action = instruction[0]\n value = int(\"\".join(instruction[1:]))\n \n read_instruction(action,value)\n # print(action,value,\"--\",ship_dir, coord_north, coord_east)\n\nmh_dist = abs(coord_north)+abs(coord_east)\nprint(coord_east,\"east,\",coord_north,\"north: MH dist =\",mh_dist)\n\n","repo_name":"VincentVelthuis/advent2020","sub_path":"12/day12a.py","file_name":"day12a.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39719025020","text":"import yt_dlp\n\nfrom downloader.Downloader import Downloader\n\n\nclass YouTubeDownloader(Downloader):\n def __init__(self, video_id, proxy=None):\n super().__init__('YouTube', video_id, proxy)\n self.url = 'https://www.youtube.com/watch?v=' + video_id\n\n def video_info(self):\n params = {'proxy': self.proxy} if self.proxy else {}\n return yt_dlp.YoutubeDL(params).extract_info(self.url, download=False)\n\n def download(self):\n ydl_opts = {\n 'format': 'bestvideo+bestaudio/best',\n 'outtmpl': 'videos/%(id)s.%(ext)s'\n }\n if self.proxy:\n ydl_opts['proxy'] = self.proxy\n with yt_dlp.YoutubeDL(ydl_opts) as ydl:\n ydl.download([self.url])\n\n info = self.video_info()\n return {'title': info['title'], 'file': f\"{info['id']}.{info['ext']}\"}\n\n\nif __name__ == \"__main__\":\n d = YouTubeDownloader('bWM633v77-I', 'socks5://127.0.0.1:1080/')\n rs = d.download()\n print(rs)\n","repo_name":"kingviewer/video-carrier","sub_path":"downloader/YouTubeDownloader.py","file_name":"YouTubeDownloader.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"81"} +{"seq_id":"16955459624","text":"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Intel nGraph library documentation build configuration file, created by\n# sphinx-quickstart on Mon Dec 25 13:04:12 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\n\n# Add path to nGraph Python API.\n\nsys.path.insert(0, os.path.abspath('../../../python'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\nneeds_sphinx = '1.7.9'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.ifconfig',\n 'sphinx.ext.viewcode',\n 'breathe',\n ]\n\n\n# source_suffix = '.rst'\nsource_suffix = ['.rst', '.md']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\nstatic_path = ['static']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'nGraph Compiler Stack'\ncopyright = '2017-2020, Intel Corporation'\nauthor = 'Intel Corporation'\n\n# License specifics see LICENSE of component\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '0.29'\n\n# The Documentation full version, including alpha/beta/rc tags. Some features\n# available in the latest code will not necessarily be documented first\nrelease = '0.29.0'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = 'en'\n\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = []\n\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n\n# -- Options for HTML output ----------------------------------------------\n\nhtml_title = \"Documentation for the nGraph Library and Compiler Stack\"\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'ngraph_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n# html_theme_options = {}\nhtml_logo = '../ngraph_theme/static/logo.png'\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = '../ngraph_theme/static/favicon.ico'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['../ngraph_theme/static']\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = [\"../\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n '**': [\n 'relations.html', # needs 'show_related': True theme option to display\n 'searchbox.html',\n ]\n}\n\nhtml_last_updated_fmt= ''\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'IntelnGraphlibrarydoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'nGraphCompilerStack.tex', u'nGraph Compiler Stack Documentation',\n u'Intel Corporation', 'manual'),\n]\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'ngraphcompiler', 'nGraph Compiler stack',\n [author], 1)\n]\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'IntelnGraphlibrary', 'Intel nGraph Library',\n author, 'IntelnGraphlibrary', 'Documentation for Intel nGraph Library code base',\n 'Miscellaneous'),\n]\n\nbreathe_projects = {\n \"ngraph\": \"../../doxygen/xml\",\n}\n\nrst_epilog = u\"\"\"\n.. include:: /replacements.txt\n\"\"\"\n\n# -- autodoc Extension configuration --------------------------------------\n\nautodoc_mock_imports = ['ngraph.impl', 'ngraph.utils']\n","repo_name":"NervanaSystems/ngraph","sub_path":"doc/sphinx/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":6437,"program_lang":"python","lang":"en","doc_type":"code","stars":1358,"dataset":"github-code","pt":"81"} +{"seq_id":"39575801264","text":"from abc import ABC, abstractmethod\n\n\nclass Booth(ABC):\n def __init__(self, booth_number, capacity):\n self.booth_number = booth_number\n self.capacity = capacity\n self.delicacy_orders = []\n self.price_for_reservation = 0\n self.is_reserved = False\n\n @property\n @abstractmethod\n def price_per_person(self):\n pass\n\n @property\n def capacity(self):\n return self.__capacity\n\n @capacity.setter\n def capacity(self, value):\n if value < 0:\n raise ValueError(\"Capacity cannot be a negative number!\")\n self.__capacity = value\n\n def reserve(self, number_of_people):\n price = self.price_per_person * number_of_people\n self.price_for_reservation = price\n self.is_reserved = True\n\n def calculate_bill(self):\n bill = 0\n for delicacy_ord in self.delicacy_orders:\n bill += delicacy_ord.price\n\n bill += self.price_for_reservation\n\n return bill\n\n\n\n\n\n\n\n","repo_name":"vessln/Softuni_OOP_exams","sub_path":"Exam - 10 December 2022/project/booths/booth.py","file_name":"booth.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17573742593","text":"import discord, os, asyncio\nfrom discord.ext import commands\nfrom colorama import Fore\n\n\ndef nukbot(prefix,token):\n\tclass Nukr(commands.Bot):\n\t\tdef __init__(self,pre):\n\t\t\tintents = discord.Intents.all()\n\t\t\tintents.message_content = True\n\t\t\tsuper().__init__(command_prefix=pre, \n\t\t\t\tintents=intents, help_command=None)\n\t\n\t\n\tclient = Nukr(prefix)\n\t\n\tasync def load():\t\t\n\t\tfor folder in os.listdir(\"./cogs\"):\n\t\t\tif folder.endswith(\".py\"):\n\t\t\t\tawait client.load_extension(f\"cogs.commands.{folder[:-3]}\")\n\t\t\t\tprint(f\"{Fore.LIGHTGREEN_EX}[+] Loaded {folder[:-3]}\")\n\t\t\telse:\n\t\t\t\tfor file in os.listdir(f\"./cogs/{folder}\"):\n\t\t\t\t\tif file.endswith(\".py\"):\n\t\t\t\t\t\tawait client.load_extension(f\"cogs.{folder}.{file[:-3]}\")\n\t\t\t\t\t\tprint(f\"{Fore.LIGHTGREEN_EX}[+] Loaded {folder} > {file}\")\n\t\t\t\t\n\t\t\t\t\n\t\n\t\n\tprint(f\"{Fore.LIGHTMAGENTA_EX}[=] Loading Commands\")\n\tasyncio.run(load())\n\tprint(f\"{Fore.LIGHTMAGENTA_EX}[=] Cogs Added\")\n\t\n\tclient.run(token)\n","repo_name":"Hack-Scout/Nobody-Nuker","sub_path":"discordBot.py","file_name":"discordBot.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43365704605","text":"from django.core.management.base import BaseCommand\nfrom main.models import Contract\nfrom datetime import datetime, timedelta\n\nnow = datetime.now()\nfuture = now + timedelta(days=730)\n\nclass Command(BaseCommand):\n\n def add_arguments(self, parser):\n parser.add_argument('count', type=int, help='Total number of Contracts to create')\n parser.add_argument('-p', '--prefix', type=str, help='Prefix for Contracts to create')\n\n def handle(self, *args, **options):\n count = options.get('count')\n prefix = options.get('prefix')\n\n if prefix is None:\n prefix = 'Contract'\n\n for i in range(count):\n Contract.objects.create(signing_date=now, expiration_date=future, salary=100000)\n self.stdout.write(f'{prefix} {i} created')\n","repo_name":"methodDen/EPL-Manager","sub_path":"ManagerEPL/main/management/commands/contracts_seed.py","file_name":"contracts_seed.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18062475157","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.homepage, name='home-homepage'),\n path('characters/', views.characters, name='characters'),\n path('characters/details//', views.characters_details, name='char-detail'),\n path('summary/', views.summary_view, name='summary-view'),\n path('play/', views.game_view, name='game-view'),\n path('play/game', views.game_play_view, name='real-game')\n]\n","repo_name":"BeGeos/DARK-the_series","sub_path":"characters/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35202835130","text":"import pandas as pd\nfrom pandas_datareader import data as pdr\nimport yfinance as yf\nimport numpy as np\nimport datetime as dt\nfrom scipy.stats import norm\nfrom matplotlib import pyplot as plt\n\n#question4b\n#Assumption that returns are normally distributed\n#Assumption that initial investment is 100, can change it to 1 to find VaR in %\n\ntickers = ['AAPL','IBM', 'GOOG', 'BP','XOM','COST','GS']\nweights = np.array([.15, .20, .20, .15,.10,.15,.5])\ninitial_investment = 100\n\n#pulling close prices from yahoo\ndata = pdr.get_data_yahoo(tickers, start=\"2016-01-01\", end=\"2016-12-31\")['Close']\n\n#finding daily change of returns\nreturns = data.pct_change()\n# print(returns.tail())\n\n#finding cov matrix\ncov_matrix = returns.cov()\n# print(cov_matrix)\n\n#finding mean of returns of the distribution\navg_rets = returns.mean()\n#finding mean in % of portfolio returns\nport_mean = avg_rets.dot(weights)\n#finding std deviation of portfolio\nport_stdev = np.sqrt(weights.T.dot(cov_matrix).dot(weights))\n#finding absolute mean of portfolio w 100USD invested\nmean_investment = (1+port_mean) * initial_investment\n#finding std deviation of portfolio w 100USD invested\nstdev_investment = initial_investment * port_stdev\n#using alpha as 5%\nconf_level1 = 0.05\n#finding portfolio value at 5% left tail\ncutoff1 = norm.ppf(conf_level1, mean_investment, stdev_investment)\n#finding Value at Risk daily\nvar_1d1 = initial_investment - cutoff1\nprint(f'The value at risk for 1-day period is {np.round(var_1d1,2)} at 95% confidence interval')\n#output \n#The value at risk for 1-day period is 2.39 at 95% confidence interval\n\n#plotting Value at Risk for 365days\nvar_array = []\nnum_days = int(365)\nfor x in range(1, num_days+1): \n var_array.append(np.round(var_1d1 * np.sqrt(x),2))\n # print(str(x) + \" day VaR @ 95% confidence: \" + str(np.round(var_1d1 * np.sqrt(x),2)))\n#Value at Risk for 1 year period\nprint(f'The value at risk for 1 year period is {np.round(var_1d1 * np.sqrt(365),2)} at 95% confidence interval')\n#output\n#The value at risk for 1 year period is 45.64 at 95% confidence interval\n\n# Build plot, graph is in question4Charts.md\nplt.xlabel(\"Day #\")\nplt.ylabel(\"Max portfolio loss (USD)\")\nplt.title(\"Max portfolio loss (VaR) over 15-day period\")\nplt.plot(var_array, \"r\")\n\n\n#finding conditional Value at Risk, expectation of the conditional prob that alpha occurs\n#Not sure how to write the code to get the cVaR, my current output is 0\ncVaR = 1/conf_level1 * norm.expect(lambda x: x, loc=mean_investment, scale=stdev_investment, lb=var_1d1)\nprint(cVaR)\n\n\n#qns4c Portfolio Optimisation, generating efficient frontier and maximising Sharpe Ratio\nnum_portfolios = 30000\n\n#generating random portfolios with different weights\ndef portfolio_annualised_performance(weights, mean_returns, cov_matrix):\n returns = np.sum(mean_returns*weights ) *365\n std = np.sqrt(np.dot(weights.T, np.dot(cov_matrix, weights))) * np.sqrt(365)\n return std, returns\n\ndef random_portfolios(num_portfolios, avg_rets, cov_matrix):\n results = np.zeros((3,num_portfolios))\n weights_record = []\n for i in range(num_portfolios):\n #Generating random weights\n weights = np.random.random(7)\n #normalising weights\n weights /= np.sum(weights)\n weights_record.append(weights)\n portfolio_std_dev, portfolio_return = portfolio_annualised_performance(weights, avg_rets, cov_matrix)\n results[0,i] = portfolio_std_dev\n results[1,i] = portfolio_return\n results[2,i] = portfolio_return / portfolio_std_dev\n return results, weights_record\n\ndef display_simulated_ef_with_random(mean_returns, cov_matrix, num_portfolios):\n #Generating 30000 random weights, saving their returns, std dev, sharpe ratio\n results, weights = random_portfolios(num_portfolios,mean_returns, cov_matrix) \n\n #finding index of max sharpe ratio\n max_sharpe_idx = np.argmax(results[2])\n #getting the returns and std dev of this particular portfolio\n sdp, rp = results[0,max_sharpe_idx], results[1,max_sharpe_idx]\n #allocation of weights with max sharpe ratio\n max_sharpe_allocation = weights[max_sharpe_idx]\n\n #finding index of min volatility/standard deviation\n min_vol_idx = np.argmin(results[0])\n #getting the returns and std dev of this particular portfolio\n sdp_min, rp_min = results[0,min_vol_idx], results[1,min_vol_idx]\n #allocation of weights with min volatility/standard deviation\n min_vol_allocation = weights[min_vol_idx] \n \n #finding index of max returns\n max_returns_idx = np.argmax(results[1])\n #getting the returns and std dev of this particular portfolio\n sdp_max, rp_max = results[0,max_returns_idx], results[1,max_returns_idx]\n #allocation of weights with max returns\n max_returns_allocation = weights[max_returns_idx] \n \n print(f'''Max sharpe ratio portfolio return: {round(rp,2)}\nVolatility: {round(sdp,2)}\n{max_sharpe_allocation}''')\n print('-'*18)\n print(f'''Min volatility portfolio return: {round(rp_min,2)}\nVolatility: {round(sdp_min,2)}\n{min_vol_allocation}''')\n print('-'*18)\n print(f'''Max returns: {round(rp_max,2)}\nVolatility: {round(sdp_max,2)}\n{max_returns_allocation}''')\n\ndisplay_simulated_ef_with_random(avg_rets, cov_matrix, num_portfolios)\n# output\n# Max sharpe ratio portfolio return: 0.32\n# Volatility: 0.2\n# [0.04270409 0.34324545 0.00436848 0.01489951 0.28455692 0.02515885\n# 0.2850667 ]\n# ------------------\n# Min volatility portfolio return: 0.14\n# Volatility: 0.15\n# [0.03511302 0.1982911 0.15794914 0.02835736 0.2054005 0.37117518\n# 0.00371369]\n# ------------------\n# Max returns: 0.37\n# Volatility: 0.25\n# [0.04935525 0.06067908 0.08015331 0.05189512 0.09388982 0.02189946\n# 0.64212796]\n# Note: weights are ordered according to ['AAPL','IBM', 'GOOG', 'BP','XOM','COST','GS']\n# Not sure how to incorporate shorting and rebalancing monthly. I would think shorting will be able to reduce volatility further\n# and being able to rebalance monthly allows change of portfolio weights from the new month data. Using the new data to find\n# the updated efficient frontier for max sharpe/returns/min risk.\n\n#Answer: I would choose my optimal portfolio weights to be one that maximises the sharpe ratio, max returns per unit risk\n# However, if an investor wants to maximise returns or minimise risk, the investor can choose either of the other two weights. \n","repo_name":"jinxuannnn/Treehouse-Finance-Quant-Interview","sub_path":"question4.py","file_name":"question4.py","file_ext":"py","file_size_in_byte":6369,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"70082214025","text":"import bisect\n\n\nclass Solution:\n\n def maxConsecutiveAnswers(self, answerKey: str, k: int) -> int:\n # 错误解法,因为不能保证把当前窗口中的答案都改变为当前窗口中最左边的答案就可以得到最大连续相同子串\n # n = len(answerKey)\n # ans = 0\n # l = 0\n # while True:\n # r = l\n # while r < n and answerKey[r] == answerKey[l]:\n # r += 1\n # firstR = r\n # change = 0\n # while r < n and change < k:\n # if answerKey[r] != answerKey[l]:\n # change += 1\n # r += 1\n # while r < n and answerKey[r] == answerKey[l]:\n # r += 1\n # ans = max(ans, r - l)\n # print(l, r)\n # if r >= n:\n # break\n # l += 1\n # return ans\n\n # 属于是把题目中的相关标签二分查找、前缀和、滑动窗口都用上了\n # 其实分别对T和F做一次滑动窗口就行了\n # 但对前缀和做二��查找确实能加快滑动的速度\n n = len(answerKey)\n t_prefix = [0] * (n + 1)\n f_prefix = [0] * (n + 1)\n for i in range(n):\n t_prefix[i + 1] = t_prefix[i]\n f_prefix[i + 1] = f_prefix[i]\n if answerKey[i] == 'T':\n t_prefix[i + 1] += 1\n else:\n f_prefix[i + 1] += 1\n\n ans = 0\n l = r = 0\n while l <= n:\n r = bisect.bisect_right(t_prefix, t_prefix[l] + k, lo=r)\n ans = max(ans, r - l - 1)\n l += 1\n\n l = r = 0\n while l <= n:\n r = bisect.bisect_right(f_prefix, f_prefix[l] + k, lo=r)\n ans = max(ans, r - l - 1)\n l += 1\n\n return ans\n\n\nl = \"FFFTTFTTFT\"\nk = 3\nprint(Solution().maxConsecutiveAnswers(l, k))\n","repo_name":"tiandiyijian/myLeetcode","sub_path":"2024.py","file_name":"2024.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22110470629","text":"# coding=utf-8\nfrom typing import List, Tuple, Any, Iterable\n\nimport numpy as np\nfrom collections import namedtuple\nfrom dataclasses import dataclass\n\nfrom blocAndTools.buildingbloc import ExperimentSpec, GymPlayground\n\nfrom blocAndTools.container.samplecontainer import TrajectoryContainer, TrajectoryCollector\nfrom blocAndTools.container.samplecontainer import UniformeBatchContainer, UniformBatchCollector\nfrom blocAndTools.temporal_difference_computation import computhe_the_Advantage, compute_TD_target\n\n@dataclass()\nclass MiniBatch:\n __slots__ = ['obs_t', 'act_t', 'obs_tPrime', 'rew_t', 'q_values_t']\n\n obs_t: list\n act_t: list\n obs_tPrime: list\n rew_t: list\n q_values_t: list\n\n\nclass TrajectoryContainerMiniBatchOnlineOAnORV(TrajectoryContainer):\n \"\"\"\n Container for storage & retrieval of events collected at every timestep of a trajectories\n for Batch Actor-Critic algorithm\n \"\"\"\n __slots__ = ['obs_t',\n 'actions',\n 'rewards',\n 'Q_values',\n 'trajectory_return',\n '_trajectory_lenght',\n 'trajectory_id',\n 'V_estimates',\n 'actor_losses',\n 'critic_losses']\n\n def __init__(self, obs_t: list, actions: list, rewards: list, Q_values: list, trajectory_return: float,\n trajectory_id, V_estimates: list = None, actor_losses: list = None, critic_losses: list = None) -> None:\n\n self.V_estimates = V_estimates\n self.actor_losses = actor_losses\n self.critic_losses = critic_losses\n super().__init__(obs_t, actions, rewards, Q_values, trajectory_return, trajectory_id)\n\n def cut(self, max_lenght):\n \"\"\"Down size the number of timestep stored in the container\"\"\"\n raise UserWarning(\"This method is not supose to be used with this TrajectoryContainer subclass\")\n # self.V_estimates = self.V_estimates[:max_lenght]\n # super().cut(max_lenght)\n\n def unpack(self) -> Tuple[list, list, list, list, float, int, list, list, list]:\n \"\"\"\n Unpack the full trajectorie as a tuple of numpy array\n\n :return: (obs_t, actions, rewards, Q_values, trajectory_return, _trajectory_lenght, V_estimate, actor_losses, critic_losses)\n :rtype: (list, list, list, list, float, int, list, list, list)\n \"\"\"\n # (nice to have) todo:refactor --> as a namedtuple\n unpacked_super = super().unpack()\n\n observations, actions, rewards, Q_values, trajectory_return, _trajectory_lenght = unpacked_super\n\n return (observations, actions, rewards, Q_values, trajectory_return, _trajectory_lenght,\n self.V_estimates, self.actor_losses, self.critic_losses)\n\n def __repr__(self):\n myRep = super().__repr__()\n myRep += \".V_estimates=\\n{}\\n\\n\".format(self.V_estimates)\n myRep += \".actor_losses=\\n{}\\n\\n\".format(self.actor_losses)\n myRep += \".critic_losses=\\n{}\\n\\n\".format(self.critic_losses)\n return myRep\n\n\nclass TrajectoryCollectorMiniBatchOnlineOAnORV(TrajectoryCollector):\n \"\"\"\n Collect timestep event of single trajectory for Batch Actor-Critic algorihm\n\n 1. Collect sampled timestep events of a single trajectory,\n 2. Output on demande minibatch of collected timestep for learning with computed Bootstrap estimate target\n 2. On trajectory end:\n a. Output a TrajectoryContainerMiniBatchOnlineOAnORV feed with collected sample\n b. Reset and ready for next trajectory\n \"\"\"\n def __init__(self, experiment_spec: ExperimentSpec, playground: GymPlayground, discounted: bool = True,\n mini_batch_capacity: int = 10):\n self.mini_batch_capacity = mini_batch_capacity\n self.obs_tPrime = []\n self.V_estimates = []\n\n self._trjCollector_minibatch_runing_idx = 0\n self._current_minibatch_size = 0\n super().__init__(experiment_spec, playground, discounted)\n self.q_values = []\n self.actor_losses = []\n self.critic_losses = []\n\n def minibatch_is_full(self) -> bool:\n return self._current_minibatch_size >= self.mini_batch_capacity\n\n def collect_OAnORV(self, obs_t: np.ndarray, act_t, obs_tPrime: np.ndarray, rew_t: float, V_estimate: float) -> None:\n \"\"\" Collect obs_t, act_t, obs_tPrime, rew_t and V estimate for one timestep\n \"\"\"\n\n assert not self.minibatch_is_full(), (\"The minibatch is full: {} timesteps collected! \"\n \"Execute compute_Qvalues_as_BootstrapEstimate() \"\n \"than get_minibatch()\").format(self._current_minibatch_size)\n\n self.obs_tPrime.append(obs_tPrime)\n self.V_estimates.append(V_estimate)\n super().collect_OAR(obs_t, act_t, rew_t)\n self._current_minibatch_size += 1\n return None\n\n def set_Qvalues(self, Qvalues: list) -> None:\n \"\"\"\n Qvalues must be computed explicitely before minibatch_pop using eiter methode:\n - set_Qvalues,\n - or compute_Qvalues_as_rewardToGo\n \"\"\"\n assert not self._q_values_computed\n assert isinstance(Qvalues, list)\n\n assert len(Qvalues) == len(self.actions) - len(self.q_values)\n self.q_values += Qvalues\n\n self._q_values_computed = True\n return None\n\n def get_minibatch(self):\n # (!) Dont assert if minibatch is full. The last minibatch of the trajectory will be smaller than the other\n\n assert self._q_values_computed, (\"The Q-values are not computed yet!!! \"\n \"Call the method set_Qvalues() or compute_Qvalues_as_BootstrapEstimate()\"\n \" before get_minibatch()\")\n\n mb_idx = self._trjCollector_minibatch_runing_idx\n\n mini_batch = MiniBatch(obs_t=self.observations[mb_idx:], act_t=self.actions[mb_idx:],\n obs_tPrime=self.obs_tPrime[mb_idx:], rew_t=self.rewards[mb_idx:],\n q_values_t=self.q_values[mb_idx:])\n\n self._reset_minibatch_internal_state()\n return mini_batch\n\n def _reset_minibatch_internal_state(self):\n self._q_values_computed = False\n self._current_minibatch_size = 0\n self._trjCollector_minibatch_runing_idx = len(self.actions)\n return None\n\n def compute_Qvalues_as_BootstrapEstimate(self) -> None:\n \"\"\"\n Qvalues must be computed explicitely before minibatch_pop using eiter methode:\n - set_Qvalues,\n - or compute_Qvalues_as_BootstrapEstimate\n \"\"\"\n mb_idx = self._trjCollector_minibatch_runing_idx\n TD_target: list = compute_TD_target(self.rewards[mb_idx:], self.V_estimates[mb_idx:],\n self._exp_spec.discout_factor).tolist()\n self.set_Qvalues(TD_target)\n return None\n\n def collect_loss(self, actor_loss, critic_loss):\n self.actor_losses.append(actor_loss)\n self.critic_losses.append(critic_loss)\n return None\n\n def pop_trajectory_and_reset(self) -> TrajectoryContainerMiniBatchOnlineOAnORV:\n \"\"\"\n 1. Return the last sampled trajectory in a TrajectoryContainerMiniBatchOnlineOAnORV\n 2. Reset the container ready for the next trajectory sampling.\n\n :return: A TrajectoryContainerMiniBatchOnlineOAnORV with a full trajectory\n :rtype: TrajectoryContainerMiniBatchOnlineOAnORV\n \"\"\"\n assert not self._q_values_computed, (\"The return and the Q-values are not computed yet!!! \"\n \"Call the method trajectory_ended() before pop_trajectory_and_reset()\")\n trajectory_containerBatchAC = TrajectoryContainerMiniBatchOnlineOAnORV(obs_t=self.observations.copy(),\n actions=self.actions.copy(),\n rewards=self.rewards.copy(),\n Q_values=self.q_values.copy(),\n trajectory_return=self.theReturn,\n trajectory_id=self._trj_collected,\n V_estimates=self.V_estimates.copy(),\n actor_losses=self.actor_losses.copy(),\n critic_losses=self.critic_losses.copy())\n\n self._reset()\n return trajectory_containerBatchAC\n\n def _reset(self):\n super()._reset()\n\n self.obs_tPrime.clear()\n self.V_estimates.clear()\n\n self._trjCollector_minibatch_runing_idx = 0\n self._current_minibatch_size = 0\n\n self.q_values = [] # is required since q_values are not handle the same way as in the parent class\n self.actor_losses.clear()\n self.critic_losses.clear()\n return None\n\n\nclass UnconstrainedExperimentStageContainerOnlineAAC(UniformeBatchContainer):\n def __init__(self, trj_container_batch: List[TrajectoryContainerMiniBatchOnlineOAnORV], batch_constraint: int, batch_id: int):\n \"\"\"\n Container for storage & retrieval of sampled trajectories history & statistic\n (!) Be advise, this container do not inforce uniformity constraint over collected timestep.\n All container will be of uneven timestep lenght & they keep all collected trajectory complete (uncut)\n Used with Online Actor-Critic algorihm\n\n Purpose: statistic book-keeping.\n\n It's a component of the ExperimentStageCollectorOnlineAAC\n\n :param batch_constraint: max capacity measured in timestep\n :type batch_constraint: int\n :param trj_container_batch: Take a list of TrajectoryContainer instance fulled with collected timestep events.\n :type trj_container_batch: List[TrajectoryContainer]\n :param batch_id: the batch idx number\n :type batch_id: int\n \"\"\"\n self.stage_Values_estimate = []\n self.actor_losses = []\n self.critic_losses = []\n super().__init__(trj_container_batch, batch_constraint, batch_id)\n\n def _container_feed_on_init_hook(self, aTrjContainer: TrajectoryContainerMiniBatchOnlineOAnORV):\n aTrj_obss, aTrj_acts, aTrj_rews, aTrj_Qs, aTrj_return, aTrj_lenght, aTrj_Values, aTrj_actor_losses, aTrj_critic_losses = aTrjContainer.unpack()\n\n self.stage_Values_estimate += aTrj_Values\n self.actor_losses += aTrj_actor_losses\n self.critic_losses += aTrj_critic_losses\n\n return aTrj_obss, aTrj_acts, aTrj_rews, aTrj_Qs, aTrj_return, aTrj_lenght\n\n def _check_uniformity_constraint(self, batch_constraint: Any) -> None:\n \"\"\"\n The function muted since this subclass does not inforce uniformity across stages\n \"\"\"\n pass\n\n def unpack_all(self) -> Tuple[Any, list, list, list]:\n \"\"\"\n Unpack the full epoch batch of collected trajectories in lists of numpy ndarray\n\n :return: (batch_observations, batch_actions, batch_Qvalues,\n batch_returns, batch_trjs_lenghts, total_timestep_collected, nb_of_collected_trjs,\n stage_Values_estimate, actor_losses, critic_losses)\n :rtype: (list, list, list, list, list, int, int, list, list, list)\n \"\"\"\n unpack_super = super().unpack_all()\n return (*unpack_super, self.stage_Values_estimate.copy(), self.actor_losses.copy(), self.critic_losses.copy())\n\n def get_stage_mean_loss(self) -> (float, float):\n \"\"\"\n Compute mean loss over stage for actor and critic\n\n :return: (actor_mean_loss, critic_mean_loss)\n :rtype: (float, float)\n \"\"\"\n actor_mean_loss = float(np.array(self.actor_losses).mean())\n critic_mean_loss = float(np.array(self.critic_losses).mean())\n return actor_mean_loss, critic_mean_loss\n\nclass ExperimentStageCollectorOnlineAAC(UniformBatchCollector):\n \"\"\"\n Collect sampled trajectories and agregate them in multiple stage container of even number of trajectories\n for online Actor-Critic algorithm\n\n Purpose: statistic book-keeping.\n capacity: is on a trajectory count scale\n\n (!) Keep in mind that the size of containers produced on a timestep scale will be UNEVEN )\n\n note: Optimization consideration --> why collect numpy ndarray in python list?\n | It's a order of magnitude faster to collect ndarray in a list and then convert the list\n | to a long ndarray than it is to append ndarray to each other\n\n \"\"\"\n\n def is_not_full(self) -> bool:\n return self._trajectory_count < self.CAPACITY\n\n def internal_state(self) -> namedtuple:\n \"\"\"Testing utility\"\"\"\n ExperimentStatsAndHistoryCollectorInternalState = namedtuple('ExperimentStatsAndHistoryCollectorInternalState',\n ['trajectories_list', 'timestep_count',\n 'trajectory_count', 'remaining_stage_space'])\n\n return ExperimentStatsAndHistoryCollectorInternalState(self.trajectories_list, self._timestep_count,\n self._trajectory_count, self.remaining_stage_space)\n\n def __call__(self, trajectory: TrajectoryContainerMiniBatchOnlineOAnORV, *args, **kwargs) -> None:\n assert self.is_not_full(), (\"The batch is full: {} timesteps collected! \"\n \"Execute pop_batch_and_reset()\").format(self._timestep_count)\n\n self.trajectories_list.append(trajectory)\n self._trajectory_count += 1\n self._timestep_count += trajectory.__len__()\n self.remaining_stage_space -= 1\n return None\n\n def pop_batch_and_reset(self) -> UnconstrainedExperimentStageContainerOnlineAAC:\n \"\"\"\n :return: A batch of concatenated trajectories component\n :rtype: UnconstrainedExperimentStageContainerOnlineAAC\n \"\"\"\n container = UnconstrainedExperimentStageContainerOnlineAAC(self.trajectories_list, self.CAPACITY, self.batch_idx)\n\n # reset\n self._reset()\n return container\n\n def _reset(self):\n self.trajectories_list = []\n self._timestep_count = 0\n self._trajectory_count = 0\n self.remaining_stage_space = self.CAPACITY\n\n\n","repo_name":"RedLeader962/LectureDirigeDRLimplementation","sub_path":"DRLimplementation/blocAndTools/container/samplecontainer_online_mini_batch_OAnORV.py","file_name":"samplecontainer_online_mini_batch_OAnORV.py","file_ext":"py","file_size_in_byte":14695,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"23434631117","text":"from math import sin, cos, sqrt\r\nimport scipy.integrate as integrate\r\n\r\n\r\ndef func(x):\r\n return cos(x)*sin(x)\r\n\r\ndef middle_rectangle(f):\r\n a = 0\r\n b = 1\r\n m = 100000\r\n h = (b-a)/m\r\n Im = 0\r\n for i in range(0, m - 1):\r\n Im += h * f(a + (i + 1 / 2) * h)\r\n return Im\r\n\r\n\r\nif __name__ == '__main__':\r\n print('Вычисление интеграла при помощи КФ типа Гаусса c 2-мя узлами')\r\n N = 2\r\n moments = []\r\n moments.append(middle_rectangle(lambda x: cos(x)))\r\n moments.append(middle_rectangle(lambda x: cos(x)*x))\r\n moments.append(middle_rectangle(lambda x: cos(x)*x**2))\r\n moments.append(middle_rectangle(lambda x: cos(x)*x**3))\r\n\r\n print(\"Моменты весовой функции cos(x):\")\r\n for i in range(4):\r\n print('moment', i, ': ', moments[i])\r\n\r\n a1 = (moments[0]*moments[3] - moments[2]*moments[1])/(moments[1]**2 - moments[0]*moments[2])\r\n a2 = (moments[2]**2 - moments[3]*moments[1])/(moments[1]**2 - moments[0]*moments[2])\r\n\r\n print('Ортогональный многочлен: x^2 ' + str(a1) + '*x + ' + str(a2))\r\n\r\n x1 = (-a1 + sqrt(a1**2 - 4*a2))/2\r\n x2 = (-a1 - sqrt(a1**2 - 4*a2))/2\r\n\r\n A1 = (moments[1] - x2*moments[0])/(x1-x2)\r\n A2 = (moments[1] - x1*moments[0])/(x2-x1)\r\n\r\n print('Узлы и коэффициенты КФ:')\r\n print('x1 = ', x1, ' A1 = ', A1)\r\n print('x2 = ', x2, ' A2 = ', A2)\r\n\r\n print('Сумма коэффициентов:', A1 + A2)\r\n\r\n print('Проверка для f = x^3')\r\n print('Интеграл по КФ:', A1*(x1**3) + A2*(x2**3))\r\n print('Интеграл через мат. пакет:', integrate.quad(lambda x: cos(x)*x**3, 0, 1))\r\n\r\n print('Интеграл для f = sin(x):', A1*sin(x1) + A2*sin(x2))\r\n\r\n\r\n\r\n\r\n","repo_name":"ka1mar/code","sub_path":"lab6.2.py","file_name":"lab6.2.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2184322527","text":"import swisseph as swe\n#from __future__ import division\nfrom math import floor, ceil\nfrom collections import namedtuple as struct\nfrom timezonefinder import TimezoneFinder\nimport datetime\nimport pytz\nimport json\n\nPLANETS = {\n \"Sun\": swe.SUN,\n \"Moon\": swe.MOON,\n \"Mercury\": swe.MERCURY,\n \"Venus\": swe.VENUS,\n \"Mars\": swe.MARS,\n \"Jupiter\": swe.JUPITER,\n \"Saturn\": swe.SATURN,\n \"Rahu\": swe.MEAN_NODE\n}\nASPECT_SYMBOLS = {\n \"Conjunction\": \"\\u260C\",\n \"Semi-Sextile\": \"\\u26BA\",\n \"Semi-Square\": \"\\u2220\",\n \"Sextile\": \"\\u26B9\",\n \"Quintile\": \"Q\",\n \"Square\": \"\\u25A1\",\n \"Trine\": \"\\u25B3\",\n \"Sesquiquadrate\": \"\\u26BC\",\n \"BiQuintile\": \"bQ\",\n \"Quincunx\": \"\\u26BB\",\n \"Opposite\": \"\\u260D\",\n}\ndef convert_angle(angle):\n if 0 < angle < 30:\n return angle, \"Aries\"\n elif 30 <= angle < 60:\n return angle - 30, \"Taurus\"\n elif 60 <= angle < 90:\n return angle - 60, \"Gemini\"\n elif 90 <= angle < 120:\n return angle - 90, \"Cancer\"\n elif 120 <= angle < 150:\n return angle - 120, \"Leo\"\n elif 150 <= angle < 180:\n return angle - 150, \"Virgo\"\n elif 180 <= angle < 210:\n return angle - 180, \"Libra\"\n elif 210 <= angle < 240:\n return angle - 210, \"Scorpio\"\n elif 240 <= angle < 270:\n return angle - 240, \"Sagittarius\"\n elif 270 <= angle < 300:\n return angle - 270, \"Capricorn\"\n elif 300 <= angle < 330:\n return angle - 300, \"Aquarius\"\n elif 330 <= angle < 360:\n return angle - 330, \"Pisces\"\ndef decdeg2dms(deg):\n d = int(deg)\n mins = (deg - d) * 60\n m = int(mins)\n s = int(round((mins - m) * 60))\n return [d, m, s]\nDate = struct('Date', ['year', 'month', 'day', 'hour','greg'])\nPlace = struct('Location', ['latitude', 'longitude'])\n\ndef getTZ_old(Lat,Lon):\n from tzwhere import tzwhere\n tzwhere = tzwhere.tzwhere()\n timezone_str = tzwhere.tzNameAt(float(Lat), float(Lon)) # Seville coordinates\n return timezone_str\n\ndef getTZ(Lat,Lon):\n tf = TimezoneFinder()\n latitude, longitude = float(Lat), float(Lon)\n return tf.timezone_at(lng=longitude, lat=latitude) # returns 'Europe/Berlin'\n\nout = {}\nAscendant_output = {}\nout_maker = {}\n\ndef process(Json_payload):\n #return Json_payload\n Lat,Lon = Json_payload[\"Lat\"],Json_payload[\"Lon\"]\n DY,DMon,DD,DH,DMin,DS = Json_payload[\"Year\"],Json_payload[\"Month\"],Json_payload[\"Day\"],Json_payload[\"Hour\"],Json_payload[\"Min\"],\"00\"\n naive = datetime.datetime.strptime (DY+\"-\"+DMon+\"-\"+DD+\" \"+DH+\":\"+DMin+\":\"+DS, \"%Y-%m-%d %H:%M:%S\")\n try:\n local = pytz.timezone(getTZ(float(Lat),float(Lon)))\n except Exception as e:\n print(e)\n local_dt = local.localize(naive, is_dst=None)\n utc_dt = local_dt.astimezone(pytz.utc)\n utc_dt = utc_dt.strftime (\"%Y:%m:%d:%H:%M:%S\")\n DY,DMon,DD,DH,DMin,DS = utc_dt.split(\":\")\n kkd = Place(float(Lat),float(Lon))\n dtR = swe.utc_to_jd(int(DY),int(DMon),int(DD),int(DH),int(DMin),int(DS),1)\n swe.set_ephe_path('files')\n #swe.set_topo(12.9667, 77.5667, 0);\n swe.set_sid_mode(swe.SIDM_LAHIRI, 0, 0)\n for key in PLANETS:\n \n output = swe.calc_ut(dtR[0], PLANETS[key], flag = swe.FLG_SWIEPH | swe.FLG_SIDEREAL | swe.FLG_SPEED)\n ActualDegrees = decdeg2dms((output[0]))\n out_maker = {}\n out_maker[\"ActualDegrees\"] = str(ActualDegrees[0])\n out_maker[\"ConvertedDegrees\"] = str(convert_angle(ActualDegrees[0])[0])\n out_maker[\"ConvertedMin\"] = str(ActualDegrees[1])\n out_maker[\"ConvertedSec\"] = str(ActualDegrees[2])\n out_maker[\"Zodiac\"] = str(convert_angle(ActualDegrees[0])[1])\n #out_maker = json.dumps(out_maker)\n out[key] = out_maker\n\n ascDeg = swe.houses_ex(dtR[0], kkd[0], kkd[1], str.encode('A'), flag = swe.FLG_SWIEPH | swe.FLG_SIDEREAL | swe.FLG_SPEED)\n DeG = decdeg2dms(convert_angle(ascDeg[0][0])[0])\n out[\"Ascendant\"] = str(ascDeg[0][0]) + \" \" +str(DeG[0])+ \" \" + str(DeG[1]) + \" \" + str(DeG[2]) + \" \" + str(convert_angle(ascDeg[0][0])[1])\n return out\n \n\n","repo_name":"RaviKarrii/Vedic-Chart-API","sub_path":"src/codebase.py","file_name":"codebase.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3986738110","text":"from subprocess import call\nimport os\nfrom osgeo import ogr\nfrom datetime import datetime\nimport pandas as pd\nimport re\nfrom multiprocessing import Pool,Manager,cpu_count\n#gsutil cp -r gs://gcp-public-data-sentinel-2/tiles/20/L/PQ/S2A_MSIL1C_20151230T142546_N0201_R010_T20LPQ_20151230T235521.SAFE ./\n#https://krstn.eu/landsat-batch-download-from-google/\n'''\ngrid_sentinel = r'B:\\monitoramento_kfw\\base_raster\\download\\PA\\grid_pa_sentinel.shp'\noutfolder = r\".\\preview\"\nstartdate = '2018-08-01'\nenddate = '2019-03-25'\ntile_limit = 3\ncloud_limit = 10\n#sat = '8'\n'''\n\nstDate = re.compile(r'201[6-9][01]\\d[0-3]\\dT[01]\\d\\d\\d\\d\\d')\nstTile = re.compile(r'(?i)[12][0-589][H-N][B-HK-NP-RT-VW-Z][A-HJ-NP-V]')\n\n#stDate = re.compile(r'201[6789][01]\\d[0123]\\d')\n\ndef downloadSentinel(data_folder,tileid,cloud_limit,tile_limit,startdate,enddate,outfolder,preview,pr):\n\tsortColumns = ['CLOUD_COVER']\n\ttilefuso = tileid[0:2]\n\tsingle_letter = tileid[2:3]\n\tdouble_letter = tileid[3:]\n\tfile_name = os.path.join(\"index\",\"sentinel_filtered_dataframev3.pickle\")\n\tdf = pd.read_pickle(file_name)\n\ttile_id = '{}_{}_{}'.format(tilefuso,single_letter,double_letter)\n\tstart = datetime.strptime(startdate, '%Y-%m-%d')\n\tend = datetime.strptime(enddate, '%Y-%m-%d')\n\tselect = df[(df['SENSING_TIME'] >= start) & (df['SENSING_TIME'] <= end)]\n\tselect = select[(select['TILE_ID'] == tile_id)]\n\tselect = select[select['CLOUD_COVER'] <= cloud_limit]\n\tselect = select.sort_values(sortColumns)\n\ttd_numb = 0\n\tout_tile_folder = os.path.join(outfolder,tile_id)\n\tcall('mkdir {}'.format(out_tile_folder),shell=True)\n\timage_list = []\n\tdownload_scenes = []\n\tif len(select) != 0:\n\t\t#print select\n\t\t#print products Columns: [TILE_ID, PRODUCT_ID, GRANULE_ID, SENSING_TIME, CLOUD_COVER, GEOMETRIC_QUALITY, NORTH_LAT, SOUTH_LAT, WEST_LON, EAST_LON, TOTAL_SIZE, BASE_URL]\n\t\tfor i in select.index.values:\n\t\t\tif td_numb >= tile_limit:\n\t\t\t\tbreak\n\t\t\tprodId = select.at[i,'PRODUCT_ID']\n\t\t\tprint (prodId)\n\t\t\tgranuleId = select.at[i,'GRANULE_ID']\n\n\t\t\t_st_date = stDate.findall(prodId)[0]\n\t\t\t_st_tile = stTile.findall(prodId)\n\t\t\tif not len(_st_tile) == 0:\n\t\t\t\t_st_tile = _st_tile[0]\n\t\t\t\tpreview_file_name = 'T{}_{}_PVI.jp2'.format(_st_tile,_st_date)\n\t\t\telse:\n\t\t\t\tprname = granuleId.replace('MSI','PVI').split('.')[0][:-4]\n\t\t\t\tpreview_file_name = '{}.jp2'.format(prname)\n\t\t\ttd_numb +=1\n\t\t\t#print ('downloading scene number {} for this tile id'.format(td_numb))\n\t\t\tprint (prodId , _st_tile, _st_date)\n\t\t\turl = select.at[i,'BASE_URL'].strip()\n\t\t\tpreview_url = '/'.join([url,'GRANULE',granuleId,'QI_DATA',preview_file_name])\n\t\t\t#print preview_url\n\t\t\tif preview == True:\n\t\t\t\tpreview_path = os.path.join(out_tile_folder,preview_file_name)\n\t\t\t\timage_list.append(preview_path)\n\t\t\t\tif os.path.isfile('{}.png'.format(preview_path[:-4])):\n\t\t\t\t\tprint('already downloaded')\n\t\t\t\t\tcontinue\n\t\t\t\turl = preview_url\n\n\t\t\tdownload_scenes.append((url,out_tile_folder))\n\t\tif len(download_scenes)> 0:\n\t\t\tproc = len(download_scenes)\n\t\t\tif proc > pr:\n\t\t\t\tproc = pr\n\t\t\tparallelMother(download_scenes,proc)\n\t\t\tprint('download complete')\n\t\timage_list.reverse()\n\t\treturn [out_tile_folder,image_list]\n\telse:\n\t\treturn [out_tile_folder,image_list]\n\t\tprint('could not find any scene with that criteria')\n\n\n\t#scenes = stack_sentinel(tilefuso,bands,outfolder)\n\t#print '\\n scenes dictionary'\n\t#print scenes\n\t#createFP(scenes,outfolder)\n\t#return scenes\ndef parallelMother(scenes,proc):\n\tpool = Pool(processes=proc, maxtasksperchild=10)\n\tjobs= {}\n\tfor item in scenes:\n\t\tjobs[item[0]] = pool.apply_async(download_parallel, [item])\n\tfor item,result in jobs.items():\n\t\ttry:\n\t\t\tresult = result.get()\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\tpool.close()\n\tpool.join()\n\ndef download_parallel(data):\n\turl = data[0]\n\tout_tile_folder =data[1]\n\tcall('python \"./gsutil/gsutil.py\" -q cp -r {} {}'.format(url,out_tile_folder),shell=True)\n\ndef main():\n\tdriver = ogr.GetDriverByName('ESRI Shapefile')\n\tdataSource = driver.Open(grid_sentinel, 0) # 0 means read-only. 1 means writeable\n\tlayer = dataSource.GetLayer()\n\tfor feature in layer:\n\t\ttile = feature.GetField('tile_id')\n\t\ttile_fuso = tile[0:2]\n\t\tsingle_letter = tile[2:3]\n\t\tdouble_letter = tile[3:]\n\t\tprint (tile_fuso,single_letter,double_letter)\n\t\tdownloadSentinel(data_folder,tile_fuso,single_letter,double_letter,cloud_limit,tile_limit,startdate,enddate,sort_columns,outfolder,bands)\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"rupestre-campos/sat_sel","sub_path":"download_sentinel.py","file_name":"download_sentinel.py","file_ext":"py","file_size_in_byte":4360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35698749996","text":"import torch\nimport copy\nimport gurobipy as grb\nimport numpy as np\nimport contextlib\nimport multiprocessing\nimport torch.nn as nn\nimport contextlib\nimport random\nimport time\nimport re\nimport os\n\nfrom dnn_solver.symbolic_network import SymbolicNetwork\nfrom utils.data_collector import collector\nimport settings\n\nclass ReLUDomain:\n\n def __init__(self, net, input_lower, input_upper, assignment=None, bounds_mapping=None, optimize_input_flag=True):\n\n self.assignment = assignment\n self.valid = True \n self.unsat = False \n self.input_lower = input_lower\n self.input_upper = input_upper\n\n self.net = net\n self.layers_mapping = net.layers_mapping\n self.transformer = SymbolicNetwork(net)\n self.reversed_layers_mapping = {n: k for k, v in self.layers_mapping.items() for n in v}\n\n self.bounds_mapping = bounds_mapping\n \n self.output_lower = None\n self.output_upper = None\n # self.unassigned_nodes = self._find_unassigned_nodes(assignment)\n # self.full_assignment = True if self.unassigned_nodes is None else False\n # if self.full_assignment:\n # self.valid = False\n\n # self.layer_idx = self.reversed_layers_mapping[list(self.unassigned_nodes)[0]]\n # self.layer_nodes = list(self.layers_mapping[self.layer_idx])\n\n # print(self.assignment, self.layer_idx, self.unassigned_nodes)\n self.backsub_dict = None\n # self.full_imply_assignment = {}\n self.optimize_input_flag = optimize_input_flag\n\n def update_output_bounds(self, lower, upper):\n if self.output_lower is None:\n self.output_lower = lower\n else:\n idx = lower > self.output_lower\n self.output_lower[idx] = lower[idx]\n\n if self.output_upper is None:\n self.output_upper = upper\n else:\n idx = upper < self.output_upper\n self.output_upper[idx] = upper[idx]\n\n if (self.output_lower > self.output_upper).any():\n collector.add(unsat_by_abs=True)\n self.unsat = True\n\n def init_optimizer(self):\n # with contextlib.redirect_stdout(open(os.devnull, 'w')):\n if 1:\n self.model = grb.Model()\n self.model.setParam('OutputFlag', False)\n \n [self.model.addVar(name=f'x{i}', lb=self.input_lower[i], ub=self.input_upper[i]) for i in range(self.net.n_input)]\n self.model.update()\n\n def update_bounds_mapping(self, new_bounds_mapping):\n # pass\n for node in self.bounds_mapping:\n old_lb, old_ub = self.bounds_mapping[node]\n new_lb, new_ub = new_bounds_mapping[node]\n self.bounds_mapping[node] = (max(old_lb, new_lb), min(old_ub, new_ub))\n\n def clone(self, node, status, assignment={}):\n new_assignment = copy.deepcopy(self.assignment)\n new_assignment[node] = status\n\n # print('\\t + cloned:', new_assignment)\n\n # print(node)\n # print(full_assignment)\n # print((backsub_dict.keys()))\n new_bounds_mapping = {}\n new_bounds_mapping.update(self.bounds_mapping)\n l, u = self.bounds_mapping[node]\n # print(node, l, u, l.clamp(min=0), u.clamp(min=0))\n if status:\n new_bounds_mapping[node] = (max(l, 0), u)\n else:\n new_bounds_mapping[node] = (l, min(u, 0))\n\n\n\n full_assignment = {}\n full_assignment.update(new_assignment)\n for n in new_bounds_mapping:\n if n in full_assignment:\n continue\n l, u = new_bounds_mapping[n]\n if l >= -1e-6:\n full_assignment[n] = True\n elif u <= 1e-6:\n full_assignment[n] = False\n flag_unsat = False\n for node, status in assignment.items():\n if node in full_assignment:\n if full_assignment[node] != status:\n flag_unsat = True\n else:\n full_assignment[node] = status\n\n # print('full:', full_assignment.keys())\n output_mat, backsub_dict = self.transformer(full_assignment)\n\n # print(backsub_dict.keys())\n\n old_vars = self.model.getVars()\n new_domain = ReLUDomain(self.net, [_.lb for _ in old_vars], [_.ub for _ in old_vars], new_assignment, new_bounds_mapping, optimize_input_flag=self.optimize_input_flag)\n new_domain.output_upper = self.output_upper.clone() if self.output_upper is not None else None\n new_domain.output_lower = self.output_lower.clone() if self.output_lower is not None else None\n new_domain.unsat = flag_unsat\n # new_domain.backsub_dict = backsub_dict\n # output_mat, backsub_dict = self.transformer(new_assignment)\n new_domain.model = self.model.copy()\n # assert id(new_domain.model) == id(self.model)\n new_vars = new_domain.model.getVars()\n if node in backsub_dict:\n coeffs = backsub_dict[node]\n cstr = grb.LinExpr(coeffs[:-1], new_vars) + coeffs[-1]\n if status:\n new_domain.model.addLConstr(cstr >= 1e-6)\n else:\n new_domain.model.addLConstr(cstr <= 0)\n new_domain.model.update()\n return new_domain\n\n def get_layer_bounds(self, lid):\n lb, ub = [], []\n for node in self.layers_mapping[lid]:\n l, u = self.bounds_mapping[node]\n lb.append(l)\n ub.append(u)\n return torch.stack([torch.tensor(lb, dtype=settings.DTYPE, device=self.net.device), torch.tensor(ub, dtype=settings.DTYPE, device=self.net.device)])\n\n def get_input_bounds(self):\n # lb, ub = [], []\n # for x in self.model.getVars():\n # lb.append(x.lb)\n # ub.append(x.ub)\n # return torch.stack([torch.tensor(lb, dtype=settings.DTYPE, device=self.net.device), torch.tensor(ub, dtype=settings.DTYPE, device=self.net.device)])\n return torch.stack([torch.tensor(self.input_lower, dtype=settings.DTYPE, device=self.net.device), torch.tensor(self.input_upper, dtype=settings.DTYPE, device=self.net.device)])\n\n\n def optimize_input_bounds(self):\n # print([v.lb for v in self.model.getVars()])\n if len(self.model.getVars()) > 100:\n return False\n if not self.optimize_input_flag:\n return False\n \n for i, v in enumerate(self.model.getVars()):\n # lower bound\n v.lb = self.input_lower[i]\n v.ub = self.input_upper[i]\n self.model.update()\n \n for i, v in enumerate(self.model.getVars()):\n self.model.setObjective(v, grb.GRB.MINIMIZE)\n self.model.optimize()\n if self.model.status == grb.GRB.INFEASIBLE:\n collector.add(unsat_by_lp=True)\n self.unsat = True\n return False\n if self.model.status != grb.GRB.OPTIMAL:\n continue\n \n v.lb = self.model.objval\n # upper bound\n self.model.setObjective(v, grb.GRB.MAXIMIZE)\n self.model.optimize()\n v.ub = self.model.objval\n \n if self.input_lower[i] < v.lb:\n self.input_lower[i] = v.lb\n if self.input_upper[i] > v.ub:\n self.input_upper[i] = v.ub\n self.model.update()\n return True\n # Timers.toc('Gurobi functions')\n # print([v.lb for v in self.model.getVars()])\n # print()\n\n def optimize_bounds(self, assignment=None):\n\n # self.optimize_input_bounds()\n\n # print(len(self.assignment), len(self.model.getConstrs()))\n # self.model.write(f'gurobi/{hash(frozenset(self.assignment.items()))}.lp')\n if self.unsat:\n return -1\n \n if not self.optimize_input_flag:\n return 0\n \n # if full_assignment is None:\n full_assignment = {}\n full_assignment.update(self.assignment)\n for n in self.bounds_mapping:\n if n in full_assignment:\n continue\n l, u = self.bounds_mapping[n]\n if l >= -1e-6:\n full_assignment[n] = True\n elif u <= 1e-6:\n full_assignment[n] = False\n\n for node, status in assignment.items():\n if node in full_assignment:\n if full_assignment[node] != status:\n collector.add(unsat_by_abs=True)\n self.unsat = True\n break\n else:\n full_assignment[node] = status\n \n if self.unsat:\n return -1\n # print('full:', full_assignment.keys())\n output_mat, backsub_dict = self.transformer(full_assignment)\n\n # output_mat, backsub_dict = self.transformer(self.assignment)\n lid, lnodes = self.get_layer_nodes()\n # print(lid, lnodes)\n if lnodes is None:\n # print('full assignment')\n return -1\n unassigned_nodes = [n for n in lnodes if self.bounds_mapping[n][0] < 0 < self.bounds_mapping[n][1]]\n # print('unassigned_nodes:', unassigned_nodes)\n # print(len(self.backsub_dict.keys()), self.backsub_dict.keys())\n variables = self.model.getVars()\n\n if len(unassigned_nodes) > 50:\n return -1\n\n for node in unassigned_nodes:\n if node not in backsub_dict:\n continue\n coeffs = backsub_dict[node]\n obj = grb.LinExpr(coeffs[:-1], variables) + coeffs[-1]\n \n self.model.setObjective(obj, grb.GRB.MINIMIZE)\n # self.model.update()\n # model.write(f'gurobi/{hash(frozenset(self.assignment.items()))}_{node}.lp')\n # self.model.reset()\n self.model.optimize()\n if self.model.status == grb.GRB.INFEASIBLE:\n collector.add(unsat_by_lp=True)\n self.unsat = True\n break\n lb = self.model.objval\n\n self.model.setObjective(obj, grb.GRB.MAXIMIZE)\n # self.model.update()\n # self.model.reset()\n self.model.optimize()\n ub = self.model.objval\n\n old_lb, old_ub = self.bounds_mapping[node]\n # if lb > old_lb or ub < old_ub:\n # print(f'[{node}] from ({old_lb:.02f}, {old_ub:.02f}) to ({lb:.02f}, {ub:.02f})')\n # self.model.write('gurobi/cac.lp')\n self.bounds_mapping[node] = (max(lb, old_lb), min(ub, old_ub))\n\n # lid, lnodes = self.get_layer_nodes()\n # unassigned_nodes = [n for n in lnodes if self.bounds_mapping[n][0] < 0 < self.bounds_mapping[n][1]]\n # print('unassigned_nodes:', unassigned_nodes)\n return lid\n\n\n def deserialize(self):\n full_assignment = {}\n full_assignment.update(self.assignment)\n for node in self.bounds_mapping:\n if node in full_assignment:\n continue\n l, u = self.bounds_mapping[node]\n if l >= -1e-6:\n full_assignment[node] = True\n elif u <= 1e-6:\n full_assignment[node] = False\n output_mat, backsub_dict = self.transformer(full_assignment)\n lid, lnodes = self.get_layer_nodes()\n return self.assignment, backsub_dict, (lid, lnodes), (self.input_lower, self.input_upper)\n\n\n\n def get_next_variable(self):\n # if self.full_assignment:\n # return None\n lid, lnodes = self.get_layer_nodes()\n if lnodes is None: \n return None\n unassigned_nodes = [n for n in lnodes if self.bounds_mapping[n][0] < 0 < self.bounds_mapping[n][1]]\n # print(self.assignment)\n # print(unassigned_nodes)\n if not len(unassigned_nodes): \n return None\n scores = [(n, self.get_score(n)) for n in unassigned_nodes]\n scores = sorted(scores, key=lambda tup: tup[1], reverse=True)\n return scores[0][0]\n \n\n\n def get_score(self, node):\n\n l, u = self.bounds_mapping[node]\n # score = (u - l)\n score = min(u, -l)\n # score = (u + l) / (u - l)\n # print(score, u, l)\n # exit()\n return score#.abs()\n\n\n \n def get_layer_nodes(self):\n for lidx, nodes in self.layers_mapping.items():\n for node in nodes:\n if node in self.assignment:\n continue\n lb, ub = self.bounds_mapping[node]\n if lb < 0 < ub:\n # print(node, lb, ub)\n return lidx, nodes\n return None, None\n\n\n ","repo_name":"dynaroars/neuralsat-solver","sub_path":"src/batch_processing/domain.py","file_name":"domain.py","file_ext":"py","file_size_in_byte":12593,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"39292673554","text":"import discord\r\nfrom discord.ext import commands, menus\r\nimport json\r\nfrom Utils.utils import Utils, RawMessage\r\nimport unicodedata\r\nfrom bottom import to_bottom, from_bottom\r\nimport base64\r\nimport random\r\nimport humanize\r\nimport datetime\r\nimport time\r\nimport asyncio\r\nimport udpy\r\nfrom discord.ext.commands.cooldowns import BucketType\r\nfrom jishaku.paginators import WrappedPaginator, PaginatorEmbedInterface\r\nimport string\r\n\r\nclass EmbedUrbanSource(menus.ListPageSource):\r\n async def format_page(self, menu, item):\r\n embed = discord.Embed(title=item.word, description=item.definition if not len(item.definition) > 2048 else item.definition[:(2048-15)] + '[More chars...]')\r\n return embed\r\n\r\nclass EmbedMSGSource(menus.ListPageSource):\r\n async def format_page(self, menu, item):\r\n embed = discord.Embed(description=item.jump_url).set_author(name=item.author.name, icon_url=item.author.avatar_url)\r\n return embed\r\n\r\ndef to_string(c):\r\n digit = f'{ord(c):x}'\r\n name = unicodedata.name(c, 'Can not find')\r\n return f'`\\\\U{digit:>08}`= {name}'\r\n\r\nsquares = ['⬛', '⬜', '🟫', '🟥', '🟦', '🟩', '🟨', '🟧']\r\nrule = {str(i): squares[i] for i in range(8)}\r\nrulen = {f\"{chr(o+ord('A'))}\": o+10 for o in range(26)} | {f\"{chr(o+ord('a'))}\": o+36 for o in range(26)}\r\nrulena = {o+10 : f\"{chr(o+ord('A'))}\" for o in range(26)} | {o+36 : f\"{chr(o+ord('a'))}\" for o in range(26)}\r\ndef toEmoji(e):\r\n for k in rule.keys():\r\n e = e.replace(k, rule[k])\r\n for x in string.printable.replace(' ', '').replace('\\n', ''):\r\n e = e.replace(x, \"\")\r\n return e\r\n\r\ndef fromEmoji(e):\r\n for k in rule.keys():\r\n e = e.replace(rule[k], k)\r\n return e\r\n\r\ndef toNum(e):\r\n for k in rulen.keys():\r\n e = e.replace(k, str(rulen[k]))\r\n return e\r\n\r\n#def fromNum(e):\r\n# x = 0\r\n# while x < len(e):\r\n# e = e.replace(e[x] + e[x+1], rulena[int(e[x] + e[x+1])])\r\n# x+=2\r\n# return e\r\n\r\nclass API(commands.Cog):\r\n def __init__(self, bot):\r\n self.bot = bot\r\n self.utils = Utils(self.bot)\r\n self.urban = udpy.UrbanClient()\r\n \r\n async def do_rtfs(self, query, library):\r\n async with self.bot.session.get('https://idevision.net/api/public/rtfs', params={'query': query, 'library': library}) as resp:\r\n if not resp.status == 400:\r\n response = await resp.json()\r\n nodes = response['nodes']\r\n time = response['query_time']\r\n else:\r\n nodes, time = None\r\n return nodes, time\r\n \r\n async def do_rtfm(self, query, library):\r\n if library in ('discord.py', 'd.py', 'dpy'):\r\n library = \"https://discordpy.readthedocs.io/en/stable\"\r\n elif library in ('python', 'py'):\r\n library = \"https://docs.python.org/3/\"\r\n async with self.bot.session.get('https://idevision.net/api/public/rtfm', params={'query': query, 'location': library}) as resp:\r\n if not resp.status == 400:\r\n response = await resp.json()\r\n nodes = response['nodes']\r\n time = response['query_time']\r\n else:\r\n nodes, time = None\r\n return nodes, time\r\n \r\n @commands.command(brief='Shows the raw data of a message')\r\n async def msgraw(self, ctx, id):\r\n await ctx.send(self.utils.codeblock(json.dumps(await self.bot.http.get_message(ctx.channel.id, id), indent=4), language='json'))\r\n \r\n @commands.command(brief='Shows the raw data of a user')\r\n async def userraw(self, ctx, id):\r\n await ctx.send(self.utils.codeblock(json.dumps(await self.bot.http.get_user(id), indent=4), language='json'))\r\n \r\n @commands.command(brief='Shows the raw data of a server')\r\n async def guildraw(self, ctx, id):\r\n try:\r\n await ctx.send(self.utils.codeblock(json.dumps(await self.bot.http.get_guild(id), indent=4), language='json'))\r\n except discord.HTTPException:\r\n await ctx.send('Output too long, but the code for uploading to mystbin is curretly broken.')\r\n \r\n @commands.command(brief='Shows unicode info on a character')\r\n async def charinfo(self, ctx, *, char:str):\r\n values = []\r\n for value in char:\r\n values.append(to_string(value))\r\n await ctx.send('\\n'.join(values))\r\n \r\n @commands.command(brief='Maps numbers to emojis')\r\n async def grid(self, ctx, *, content:commands.clean_content):\r\n x = toEmoji(content)\r\n x = await ctx.send(x.strip(\"\")) if x else None\r\n \r\n @commands.group(brief='Encodes and decodes bottom')\r\n async def bottom(self, ctx):\r\n pass\r\n \r\n @bottom.command(brief='Encodes into bottom')\r\n async def encode(self, ctx, *, content:str):\r\n await ctx.send('```\\n' + to_bottom(content) + '\\n```')\r\n \r\n @bottom.command(brief='Decodes out of bottom')\r\n async def decode(self, ctx, *, content:str):\r\n await ctx.send('```\\n' + from_bottom(content) + '\\n```')\r\n \r\n @commands.command(aliases=['pt'], brief='Shows the id belonging to the token')\r\n async def parsetoken(self, ctx, token:str):\r\n id = base64.b64decode(token.split('.')[0])\r\n await ctx.send(f'The id belonging to this token is {id.decode()}')\r\n \r\n @commands.command(aliases=['rm'], brief='Shows a random message from the current channel')\r\n async def randommessage(self, ctx, limit:int=100):\r\n start = time.perf_counter()\r\n messages = await ctx.channel.history(limit=limit).flatten()\r\n messages.reverse()\r\n message = random.choice(messages)\r\n end = time.perf_counter()\r\n e = discord.Embed(description=message.content, color=0x2F3136)\r\n e.set_footer(text='ID: '+str(message.id) + ', Took ' + str(round((end - start)*1000, 6)) + 'ms to find message')\r\n e.add_field(name='jump url', value=f'[jump!]({message.jump_url})')\r\n e.set_author(icon_url=message.author.avatar.url, name=message.author.name)\r\n await ctx.send(embed=e)\r\n \r\n @commands.command(aliases=['rma'], brief='Shows a random message sent by you in the current channel')\r\n async def randmessageauth(self, ctx, limit:int=100):\r\n start = time.perf_counter()\r\n messages = [message for message in (await ctx.channel.history(limit=limit).flatten()) if message.author.name == ctx.author.name]\r\n message = random.choice(messages)\r\n end = time.perf_counter()\r\n e = discord.Embed(description=message.content, color=0x2F3136)\r\n e.set_footer(text='ID: '+str(message.id) + ', Took ' + str(round((end - start)*1000, 6)) + 'ms to find message')\r\n e.add_field(name='jump url', value=f'[jump!]({message.jump_url})')\r\n e.set_author(icon_url=message.author.avatar.url, name=message.author.name)\r\n await ctx.send(embed=e)\r\n \r\n @commands.command(aliases=['e'], brief='Sends an embed')\r\n async def embed(self, ctx, *options):\r\n await ctx.send(embed=discord.Embed.from_dict(json.loads(\"\".join(options).replace(\"'\", \"\\\"\"))))\r\n \r\n @commands.command(aliases=['re'], brief='Shows your edit history')\r\n async def recent_edits(self, ctx):\r\n formats = []\r\n messages = {}\r\n for id in self.bot.edit_history:\r\n if self.bot.edit_history[id].author.id == ctx.author.id:\r\n messages[id] = self.bot.edit_history[id]\r\n \r\n for k, v in messages.items():\r\n formats.append(f'Message {list(messages).index(k)+1}:')\r\n m = v\r\n formats.append(f' No. 1 at {m.created_at}: {m.content}')\r\n formats.append('\\n')\r\n \r\n embed = discord.Embed(title='Recent Edits', description='\\n'.join(formats), color=0x2F3136)\r\n await ctx.send(embed=embed)\r\n \r\n @commands.command(brief='Shows you the urban definition of a word')\r\n async def urban(self, ctx, *, word:str):\r\n entries = self.urban.get_definition(word)\r\n if entries == []:\r\n return await ctx.send(embed=discord.Embed(title=\"No results found\", description=f\"No entries were found for query '{word}'\"))\r\n menu = menus.MenuPages(EmbedUrbanSource(entries, per_page=1))\r\n await menu.start(ctx)\r\n \r\n @commands.command(brief='Looks up a message in the channel specified')\r\n async def msglookup(self, ctx, channel:discord.TextChannel, *, content:str):\r\n entries = await self.utils.find(content, await channel.history().flatten(), key=lambda m: m.content)\r\n if entries == []:\r\n return await ctx.send(embed=discord.Embed(title=\"No results found\", description=f\"No entries were found for query '{content}'\"))\r\n menu = menus.MenuPages(EmbedMSGSource(entries, per_page=1))\r\n await menu.start(ctx)\r\n \r\n @commands.command(brief='Reports a bug to the dev team')\r\n async def reportbug(self, ctx, bug):\r\n await ctx.send('Bug reported.')\r\n self.bot.dispatch('bug', ctx, ctx.author, bug)\r\n \r\n @commands.Cog.listener()\r\n async def on_bug(self, ctx, author, bug):\r\n self.bot.db.execute('INSERT INTO bugs(author, bug) VALUES (?, ?)', (author.id,bug))\r\n channel = self.bot.get_channel(826610452653146152)\r\n emb = discord.Embed(description=bug).set_author(name='Bug Report', icon_url=author.avatar_url)\r\n await channel.send(embed=emb)\r\n await self.bot.get_user(376129806313455616).send(embed=emb)\r\n \r\n @commands.command(brief='Searches for the source of modules')\r\n @commands.cooldown(1,5,BucketType.user)\r\n async def rtfs(self, ctx, library = 'discord.py', *, query):\r\n library = library if not library == 'dpy' else 'discord.py'\r\n nodes, time = await self.do_rtfs(query, library)\r\n if nodes == {}:\r\n return await ctx.send('No results found.')\r\n if nodes is None and time is None:\r\n return await ctx.send('Library is not available.')\r\n emb = discord.Embed(title=f'Results for \"{query}\" in {library}', description='\\n'.join([f'[{node_name}]({node_source})' for node_name, node_source in nodes.items()]), color=0x2F3136)\r\n emb.set_footer(text=f'Query Time: {round(time * 1000, 6)}ms')\r\n await ctx.send(embed=emb)\r\n \r\n# @commands.command(brief='Encodes the alphabet into a decimal format.')\r\n# async def encode(self, ctx, *, content):\r\n# await ctx.send(toNum(content))\r\n# \r\n# @commands.command(brief='Decodes decimal format into the alphabet.')\r\n# async def decode(self, ctx, *, content):\r\n# await ctx.send(fromNum(content))\r\n \r\n @commands.command(brief='Searches for the documentation of modules')\r\n @commands.cooldown(1,5,BucketType.user)\r\n async def rtfm(self, ctx, library = 'discord.py', *, query):\r\n nodes, time = await self.do_rtfm(query, library)\r\n if nodes == {}:\r\n return await ctx.send('No results found.')\r\n if nodes is None and time is None:\r\n return await ctx.send('Library is not available.')\r\n emb = discord.Embed(title=f'Results for \"{query}\" in {library}', description='\\n'.join([f'[{node_name}]({node_source})' for node_name, node_source in nodes.items()]), color=0x2F3136)\r\n emb.set_footer(text=f'Query Time: {round(float(time) * 1000, 6)}ms')\r\n await ctx.send(embed=emb)\r\n\r\nasync def async_setup(bot):\r\n await bot.add_cog(API(bot))\r\n\r\ndef setup(bot):\r\n return async_setup(bot)\r\n bot.add_cog(API(bot))\r\n","repo_name":"ConnorTippets/Discord-Mega-Bot","sub_path":"Commands/Api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":11460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11498477120","text":"import unittest\nimport os\n\n\ndef last_line(file_name: str) -> str:\n with open(file_name, 'r') as f:\n for line in f:\n pass\n\n return line\n\n\ndef first_line(file_name: str) -> str:\n with open(file_name, 'r') as f:\n return f.readline()\n\n\ndef digit_word_count(line: str) -> int:\n result = 0\n for word in line.split(' '):\n if word.isdigit():\n result += int(word)\n\n return result\n\n\ndef summing_file_content(file_name: str) -> int:\n with open(file_name, 'r') as f:\n return sum(map(digit_word_count, f))\n\n\ndef vowel_count(string: str, result: dict = None) -> dict:\n if result is None:\n result = {}\n for char in string:\n c = char.lower()\n if c in 'aeiou':\n result.update({c: result.get(c, 0) + 1})\n\n return result\n\n\ndef vowel_count_file_content(file_name: str) -> dict:\n result = {}\n with open(file_name, 'r') as f:\n for line in f:\n result = vowel_count(line, result)\n\n return result\n\n\nclass MyFileSystemTestCase(unittest.TestCase):\n FILE_NAME = 'test_file'\n\n def tearDown(self) -> None:\n os.remove(self.FILE_NAME)\n\n def test_read_last_line(self) -> None:\n self.__write_file(\"\"\"\n First line\n Second line\n Last line\n \"\"\")\n\n self.assertEqual('Last line \\n', last_line(self.FILE_NAME))\n\n def test_read_first_line(self) -> None:\n self.__write_file(\"\"\"\n First line\n Second line\n Last line\n \"\"\")\n\n self.assertEqual('First line \\n', first_line(self.FILE_NAME))\n\n def test_it_sums_words_that_only_contain_digits(self) -> None:\n self.__write_file(\"\"\"\n The standard chunk of Lorem Ipsum used since the 1500s \n is reproduced below for those interested. Sections 1.10.32 \n and 1.10.33 from \"de Finibus Bonorum et Malorum\" by Cicero are also \n reproduced in their exact original form, accompanied by English versions from the \n 1914 translation by H. Rackham.\n \"\"\")\n\n self.assertEqual(1914, summing_file_content(self.FILE_NAME))\n\n def test_it_counts_vowels(self) -> None:\n self.__write_file(\"\"\"\n Apple\n Banana\n Orange\n \"\"\")\n\n self.assertEqual({'a': 5, 'e': 2, 'o': 1}, vowel_count_file_content(self.FILE_NAME))\n\n @staticmethod\n def __write_file(text: str):\n with open(MyFileSystemTestCase.FILE_NAME, 'w') as file:\n for line in text.split('\\n'):\n if line.strip():\n file.write(f'{line.strip()} \\n')\n","repo_name":"wbits/python-workout","sub_path":"last_line.py","file_name":"last_line.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43721387167","text":"#!/usr/bin/env python\n\nimport math\nfrom copy import deepcopy\nimport rospy\nimport tf2_ros\nimport PyKDL\nimport moveit_commander\nfrom geometry_msgs.msg import Pose, Quaternion, Vector3, TransformStamped\nfrom tf.transformations import euler_from_quaternion\nfrom tf_conversions import posemath\nfrom std_msgs.msg import String\nfrom std_srvs.srv import Trigger\nfrom nist_gear.msg import LogicalCameraImage, VacuumGripperState\nfrom nist_gear.srv import VacuumGripperControl, VacuumGripperControlRequest\n\n\nclass AssemblyPart:\n def __init__(self, part_type, color, pose):\n self.part_type = part_type\n self.color = color\n self.pose = pose\n\n\nclass AssemblyCommander:\n ee_transform = PyKDL.Frame(PyKDL.Rotation.RPY(\n 0, math.pi/2, math.pi), PyKDL.Vector(0, 0, 0))\n gripper_offset = 0.013\n\n grip_offsets = {'sensor': PyKDL.Frame(PyKDL.Rotation.RPY(0, 0, 0), PyKDL.Vector(0.01, 0, 0.035 + gripper_offset)),\n 'pump': PyKDL.Frame(PyKDL.Rotation.RPY(0, 0, 0), PyKDL.Vector(0, 0, 0.058 + gripper_offset)),\n 'regulator': PyKDL.Frame(PyKDL.Rotation.RPY(0, 0, 0), PyKDL.Vector(0, 0, 0.034 + gripper_offset)),\n 'battery': PyKDL.Frame(PyKDL.Rotation.RPY(0, 0, 0), PyKDL.Vector(0, 0, 0.030 + gripper_offset))}\n\n def __init__(self):\n self.camera_pose = {}\n self.models = {}\n self.as1_kit_tray_sub = rospy.Subscriber('ariac/as1_kit_tray', LogicalCameraImage,\n callback=self.as1_kit_tray_callback)\n\n self.as2_kit_tray_sub = rospy.Subscriber('ariac/as2_kit_tray', LogicalCameraImage,\n callback=self.as2_kit_tray_callback)\n\n self.as3_kit_tray_sub = rospy.Subscriber('ariac/as3_kit_tray', LogicalCameraImage,\n callback=self.as3_kit_tray_callback)\n\n self.as4_kit_tray_sub = rospy.Subscriber('ariac/as4_kit_tray', LogicalCameraImage,\n callback=self.as4_kit_tray_callback)\n\n self.part_attached = False\n self.gripper_state_sub = rospy.Subscriber('/ariac/gantry/arm/gripper/state', VacuumGripperState,\n callback=self.gripper_state_callback)\n\n # Create TF Listener\n self.tfBuffer = tf2_ros.Buffer()\n self.tf_listener = tf2_ros.TransformListener(self.tfBuffer)\n self.tf_broadcaster = tf2_ros.StaticTransformBroadcaster()\n\n ns = \"ariac/gantry\"\n robot_description = ns + \"/robot_description\"\n\n self.torso_move_group = moveit_commander.MoveGroupCommander('gantry_torso',\n robot_description=robot_description,\n ns=ns)\n self.torso_move_group.set_max_acceleration_scaling_factor(0.2)\n\n self.arm_move_group = moveit_commander.MoveGroupCommander('gantry_arm',\n robot_description=robot_description,\n ns=ns)\n\n # Create gripper service proxy\n self.actuate_gripper = rospy.ServiceProxy(\n '/ariac/gantry/arm/gripper/control', VacuumGripperControl)\n self.actuate_req = VacuumGripperControlRequest()\n\n # Set up callbacks to know where the agvs are\n self.agv1_location = None\n self.agv1_location_sub = rospy.Subscriber(\n 'ariac/agv1/station', String, callback=self.agv1_callback)\n\n self.agv2_location = None\n self.agv2_location_sub = rospy.Subscriber(\n 'ariac/agv2/station', String, callback=self.agv2_callback)\n\n self.agv3_location = None\n self.agv3_location_sub = rospy.Subscriber(\n 'ariac/agv3/station', String, callback=self.agv3_callback)\n\n self.agv4_location = None\n self.agv4_location_sub = rospy.Subscriber(\n 'ariac/agv4/station', String, callback=self.agv4_callback)\n \n ###########################################\n ############# NASE FUNKCIJE ###############\n ###########################################\n def get_gantry_position(self):\n return self.torso_move_group.get_current_pose().pose.position\n\n def orient_gantry(self, rotation):\n\n joints = self.torso_move_group.get_current_joint_values()\n\n joints[2] = rotation\n\n self.torso_move_group.set_joint_value_target(joints)\n self.torso_move_group.go()\n ###########################################\n ###########################################\n\n def get_pose_on_kit_tray(self, station_id, part):\n \"\"\"Use logical camera to determine information about the part. Return part pose in world coordinates. \n Returns None if the specified part is not on the desired kit_tray\"\"\"\n\n rate = rospy.Rate(10)\n while not self.models[station_id]:\n rate.sleep()\n\n for model in self.models[station_id]:\n _, part_type, color = model.type.split(\"_\")\n if part_type == part.part_type and color == part.color:\n # Convert to world coordinates\n f1 = posemath.fromMsg(self.camera_pose[station_id])\n f2 = posemath.fromMsg(model.pose)\n\n return posemath.toMsg(f1*f2)\n\n return False\n\n def move_arm_to_home_position(self):\n \"\"\"Commands the arm to move specified home position where it won't hit anything while the gantry moves\"\"\"\n\n self.arm_move_group.set_joint_value_target(\n [0, -math.pi, math.pi-0.6, math.pi+0.6, -math.pi/2, 0])\n self.arm_move_group.go()\n rospy.sleep(0.5)\n\n def move_gantry_to_station(self, station_id):\n \"\"\"Commands the gantry to move in front of the briefcase for the current assembly\"\"\"\n\n # Move gantry to center aisle\n try:\n t = self.tfBuffer.lookup_transform(\n 'world', 'torso_base', rospy.Time(), rospy.Duration(1)).transform\n except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):\n rospy.logerr(\"Unable to lookup transform\")\n return False\n self.move_gantry(t.translation.x, 0, 0)\n rospy.sleep(0.5)\n\n # Move gantry to correct x position\n try:\n t = self.tfBuffer.lookup_transform(\n 'world', \"briefcase_\" + station_id[-1], rospy.Time(), rospy.Duration(1)).transform\n except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):\n rospy.logerr(\"Unable to lookup transform\")\n return False\n\n self.move_gantry(t.translation.x + 2.5, 0, 0)\n rospy.sleep(0.5)\n\n # Move gantry to correct y position and rotation\n if station_id == 'as1' or station_id == 'as3':\n self.move_gantry(t.translation.x + 2, t.translation.y, 0)\n elif station_id == 'as2' or station_id == 'as4':\n self.move_gantry(t.translation.x + 2, t.translation.y, math.pi)\n\n def move_gantry_to_agv(self, station_id):\n \"\"\"Commands the gantry to move beside the agv\"\"\"\n\n # Check that the agv is at the correct location\n correct_location = True\n if station_id == 'as1':\n if not self.agv1_location == 'as1':\n correct_location = False\n if station_id == 'as2':\n if not self.agv2_location == 'as2':\n correct_location = False\n if station_id == 'as3':\n if not self.agv3_location == 'as3':\n correct_location = False\n if station_id == 'as4':\n if not self.agv4_location == 'as4':\n correct_location = False\n\n if not correct_location:\n rospy.logerr(\"agv is not in the correct location\")\n return False\n\n tray = \"kit_tray_\" + station_id[-1]\n\n try:\n agv_transform = self.tfBuffer.lookup_transform(\n 'world', tray, rospy.Time(), rospy.Duration(1)).transform\n except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):\n rospy.logerr(\"Unable to lookup transform\")\n return False\n\n # Move gantry to correct position\n if station_id == 'as1' or station_id == 'as3':\n self.move_gantry(agv_transform.translation.x,\n agv_transform.translation.y - 1.5, 0)\n elif station_id == 'as2' or station_id == 'as4':\n self.move_gantry(agv_transform.translation.x,\n agv_transform.translation.y + 1.5, math.pi)\n\n rospy.sleep(0.5)\n\n return True\n\n def pick_part(self, station_id, tray_pose, part):\n \"\"\"Commands the gantry arm to pickup the requested part\"\"\"\n\n #rospy.loginfo('Picking up a ' + part.color + '_' + part.part_type)\n\n # Define the pick pose\n q = [tray_pose.orientation.x, tray_pose.orientation.y,\n tray_pose.orientation.z, tray_pose.orientation.w]\n rotation = euler_from_quaternion(q)[2]\n\n tray_frame = PyKDL.Frame(PyKDL.Rotation.RPY(0, 0, rotation),\n PyKDL.Vector(tray_pose.position.x, tray_pose.position.y, tray_pose.position.z))\n\n pick_pose = posemath.toMsg(\n tray_frame*self.grip_offsets[part.part_type]*self.ee_transform)\n\n # Move gantry so that end effector is above part\n ee_pose = self.arm_move_group.get_current_pose().pose\n gantry_joints = self.torso_move_group.get_current_joint_values()\n gantry_joints[0] += pick_pose.position.x - ee_pose.position.x\n self.torso_move_group.set_joint_value_target(gantry_joints)\n self.torso_move_group.go()\n rospy.sleep(0.5)\n\n ee_pose = self.arm_move_group.get_current_pose().pose\n gantry_joints = self.torso_move_group.get_current_joint_values()\n\n if station_id == 'as1' or station_id == 'as3':\n gantry_joints[1] -= pick_pose.position.y - ee_pose.position.y - 0.1\n elif station_id == 'as2' or station_id == 'as4':\n gantry_joints[1] -= pick_pose.position.y - ee_pose.position.y + 0.1\n\n self.torso_move_group.set_joint_value_target(gantry_joints)\n self.torso_move_group.go()\n rospy.sleep(0.5)\n\n # Define pick_approach poses\n ee_pose = self.arm_move_group.get_current_pose().pose\n\n delta_y = pick_pose.position.y - ee_pose.position.y\n delta_z = pick_pose.position.z - ee_pose.position.z\n\n approach_1 = deepcopy(ee_pose)\n approach_1.position.y += delta_y\n approach_1.position.z += delta_z + 0.8\n\n approach_2 = deepcopy(approach_1)\n approach_2.position.z -= 0.6\n\n approach_3 = deepcopy(pick_pose)\n approach_3.position.z += 0.2\n\n self.publish_transform(approach_1, \"approach_1\", \"world\")\n self.publish_transform(approach_2, \"approach_2\", \"world\")\n self.publish_transform(approach_3, \"approach_3\", \"world\")\n self.publish_transform(pick_pose, \"pick\", \"world\")\n\n # Move through approach poses\n self.arm_move_group.set_pose_target(approach_1)\n plan = self.arm_move_group.plan()\n self.arm_move_group.execute(plan, wait=True)\n\n self.arm_move_group.set_pose_target(approach_2)\n plan = self.arm_move_group.plan()\n self.arm_move_group.execute(plan, wait=True)\n\n self.arm_move_group.set_pose_target(approach_3)\n plan = self.arm_move_group.plan()\n self.arm_move_group.execute(plan, wait=True)\n\n # Move straight down to pick pose\n plan, _ = self.arm_move_group.compute_cartesian_path(\n [pick_pose], 0.01, 0.0)\n self.arm_move_group.execute(plan, wait=True)\n\n # Turn on gripper\n self.gripper_on()\n\n rospy.sleep(0.5)\n\n # Keep moving down by a mm until the part is successfully picked up\n while not self.part_attached:\n pick_pose.position.z -= .001\n plan, _ = self.arm_move_group.compute_cartesian_path(\n [pick_pose], 0.001, 0.0)\n self.arm_move_group.execute(plan, wait=True)\n rospy.sleep(0.5)\n\n # Move through waypoints in reverse\n plan, _ = self.arm_move_group.compute_cartesian_path(\n [approach_3], 0.01, 0.0)\n self.arm_move_group.execute(plan, wait=True)\n\n def move_gantry_to_assembly_position(self, briefcase, part):\n \"\"\"Commands the gantry to move beside the requested briefcase, in the correct position to assembly the \n requested part\"\"\"\n try:\n t_briefcase = self.tfBuffer.lookup_transform(\n 'world', briefcase, rospy.Time(), rospy.Duration(2)).transform\n except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):\n rospy.logerr(\"Unable to lookup transform\")\n return False\n\n if part.part_type == 'sensor':\n gantry_x = t_briefcase.translation.x + 1.8\n gantry_y = t_briefcase.translation.y - 0.3\n gantry_rot = math.pi/2\n elif part.part_type == 'regulator':\n gantry_x = t_briefcase.translation.x + 1.6\n gantry_y = t_briefcase.translation.y + 0.2\n gantry_rot = math.pi/2\n elif part.part_type == 'battery':\n gantry_x = t_briefcase.translation.x + 1.2\n gantry_y = t_briefcase.translation.y - 0.8\n gantry_rot = math.pi/4\n elif part.part_type == 'pump':\n gantry_x = t_briefcase.translation.x + 1.2\n gantry_y = t_briefcase.translation.y - 1\n gantry_rot = math.pi/4\n\n self.move_gantry(gantry_x, gantry_y, gantry_rot)\n\n def assemble_part(self, briefcase, part):\n \"\"\"Command the gantry arm to orient and assemble the part\"\"\"\n\n rospy.loginfo('Assembling the ' + part.color + '_' + part.part_type)\n\n try:\n t = self.tfBuffer.lookup_transform(\n 'world', briefcase, rospy.Time(), rospy.Duration(1)).transform\n except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):\n rospy.logerr(\"Unable to lookup transform\")\n return False\n\n # Convert transform to kdl frame\n briefcase_pose = Pose()\n briefcase_pose.position = Vector3(\n t.translation.x, t.translation.y, t.translation.z)\n briefcase_pose.orientation = Quaternion(\n t.rotation.x, t.rotation.y, t.rotation.z, t.rotation.w)\n\n f1 = posemath.fromMsg(briefcase_pose)\n f2 = posemath.fromMsg(part.pose)\n\n assembly_pose = posemath.toMsg(f1*f2)\n\n self.publish_transform(assembly_pose, 'assembly_pose', 'world')\n\n if part.part_type == \"sensor\":\n assembly_approach = deepcopy(assembly_pose)\n assembly_approach.position.x += 0.2\n else:\n assembly_approach = deepcopy(assembly_pose)\n assembly_approach.position.z += 0.2\n\n # Create pose for the robot end effector\n ee_assembly_pose = posemath.toMsg(posemath.fromMsg(\n assembly_pose)*self.grip_offsets[part.part_type]*self.ee_transform)\n ee_assembly_approach = posemath.toMsg(posemath.fromMsg(\n assembly_approach)*self.grip_offsets[part.part_type]*self.ee_transform)\n\n self.publish_transform(ee_assembly_pose, 'ee_assembly_pose', 'world')\n self.publish_transform(ee_assembly_approach,\n 'ee_assembly_approach', 'world')\n\n if part.part_type == \"sensor\":\n # Move gantry so that end effector is in-line with part\n ee_pose = self.arm_move_group.get_current_pose().pose\n gantry_joints = self.torso_move_group.get_current_joint_values()\n gantry_joints[1] -= ee_assembly_approach.position.y - \\\n ee_pose.position.y\n self.torso_move_group.set_joint_value_target(gantry_joints)\n self.torso_move_group.go()\n rospy.sleep(1)\n\n # Move to initial assembly approach\n ee_assembly_approach2 = deepcopy(ee_assembly_approach)\n ee_assembly_approach2.position.z += 0.35\n self.arm_move_group.set_pose_target(ee_assembly_approach2)\n self.arm_move_group.go()\n rospy.sleep(1)\n\n # Move to assembly approach\n self.arm_move_group.set_pose_target(ee_assembly_approach)\n plan = self.arm_move_group.plan()\n\n if plan.joint_trajectory.points:\n self.arm_move_group.execute(plan)\n else:\n rospy.logerr(\n \"Unable to generate trajectory to reach assembly approach.\")\n return False\n\n rospy.sleep(0.5)\n\n # Move to assembly pose\n plan, _ = self.arm_move_group.compute_cartesian_path(\n [ee_assembly_pose], 0.01, 0.0)\n\n plan = self.arm_move_group.retime_trajectory(\n self.arm_move_group.get_current_state(), plan, 0.05, 0.1)\n\n self.arm_move_group.execute(plan, wait=True)\n\n # Turn off the gripper\n self.gripper_off()\n rospy.sleep(1)\n\n # Move robot arm away\n after_assembly = deepcopy(ee_assembly_pose)\n\n if part.part_type == \"sensor\":\n after_assembly.position.x += 0.2\n after_assembly.position.y -= 0.2\n elif part.part_type == \"regulator\":\n after_assembly.position.x += 0.2\n else:\n after_assembly.position.z += 0.2\n\n self.arm_move_group.set_pose_target(after_assembly)\n self.arm_move_group.go()\n\n return True\n\n def move_gantry(self, x, y, rotation):\n try:\n t = self.tfBuffer.lookup_transform(\n 'world', 'torso_base', rospy.Time(), rospy.Duration(1)).transform\n except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):\n rospy.logerr(\"Unable to lookup transform\")\n return False\n\n joints = self.torso_move_group.get_current_joint_values()\n\n joints[0] += x - t.translation.x\n joints[1] -= y - t.translation.y\n joints[2] = rotation\n\n self.torso_move_group.set_joint_value_target(joints)\n self.torso_move_group.go()\n\n def gripper_on(self):\n self.actuate_req.enable = True\n self.actuate_gripper(self.actuate_req)\n\n def gripper_off(self):\n self.actuate_req.enable = False\n self.actuate_gripper(self.actuate_req)\n\n def as1_kit_tray_callback(self, msg):\n self.camera_pose['as1'] = msg.pose\n self.models['as1'] = msg.models\n\n def as2_kit_tray_callback(self, msg):\n self.camera_pose['as2'] = msg.pose\n self.models['as2'] = msg.models\n\n def as3_kit_tray_callback(self, msg):\n self.camera_pose['as3'] = msg.pose\n self.models['as3'] = msg.models\n\n def as4_kit_tray_callback(self, msg):\n self.camera_pose['as4'] = msg.pose\n self.models['as4'] = msg.models\n\n def agv1_callback(self, msg):\n self.agv1_location = msg.data\n\n def agv2_callback(self, msg):\n self.agv2_location = msg.data\n\n def agv3_callback(self, msg):\n self.agv3_location = msg.data\n\n def agv4_callback(self, msg):\n self.agv4_location = msg.data\n\n def gripper_state_callback(self, msg):\n self.part_attached = msg.attached\n\n def publish_transform(self, pose, child, parent):\n t = TransformStamped()\n t.header.stamp = rospy.Time.now()\n t.header.frame_id = parent\n t.child_frame_id = child\n t.transform.translation.x = pose.position.x\n t.transform.translation.y = pose.position.y\n t.transform.translation.z = pose.position.z\n t.transform.rotation.x = pose.orientation.x\n t.transform.rotation.y = pose.orientation.y\n t.transform.rotation.z = pose.orientation.z\n t.transform.rotation.w = pose.orientation.w\n\n self.tf_broadcaster.sendTransform(t)","repo_name":"karajfilip/ariac22","sub_path":"project/src/nist_assembly.py","file_name":"nist_assembly.py","file_ext":"py","file_size_in_byte":20059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37257790692","text":"#!/usr/bin/python3\n\"\"\"Python script that gather data from an API to export data in the JSON\"\"\"\n\nimport json\nimport requests\n\n\nif __name__ == \"__main__\":\n dict_tasks = {}\n empl_d = requests.get('https://jsonplaceholder.typicode.com/users/').json()\n todos_d = requests.get(\"https://jsonplaceholder.typicode.com/todos\").json()\n\n dict_tasks = {item.get(\"id\"):\n [{\"username\": item.get(\"username\"),\n \"task\": j.get(\"title\"),\n \"completed\": j.get(\"completed\")}\n for j in todos_d\n if j.get(\"userId\") == item.get(\"id\")]\n for item in empl_d}\n\n with open(\"todo_all_employees.json\", 'w') as f:\n json.dump(dict_tasks, f)\n","repo_name":"rodrigoandresd/holbertonschool-back-end","sub_path":"api/3-dictionary_of_list_of_dictionaries.py","file_name":"3-dictionary_of_list_of_dictionaries.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39204914820","text":"import meep as mp\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Polygon, Patch\nfrom matplotlib.ticker import FormatStrFormatter\nfrom solvers import triangular\n\nnum_bands = 2\nresolution = 32\n\nms = triangular()\nms.num_bands = num_bands\nms.resolution = resolution\n\nnum_x = 30\nresults = []\nradii = np.linspace(0, 0.5, num=num_x)\nfor r in radii:\n geometry = [mp.Cylinder(r, material=mp.Medium(epsilon=12.96))]\n ms.geometry = geometry\n ms.run_tm()\n if len(ms.gap_list) > 0:\n results.append(ms.gap_list[0])\n else:\n results.append((0, 0, 0))\n\npolypoints = [(r, b[2]) for r, b in zip(radii, results) if b != (0, 0, 0)]\npolypoints.extend([(r, b[1]) for r, b in zip(radii, results) if b != (0, 0, 0)][::-1])\n\nmax_idx = np.argmax([b[0] for b in results])\nmax_gap_centre = np.mean(results[max_idx][1:])\nmax_gap_half = results[max_idx][2] - max_gap_centre\n\nmatplotlib.rcParams['axes.labelsize'] = 18\nmatplotlib.rcParams['xtick.labelsize'] = 14\nmatplotlib.rcParams['ytick.labelsize'] = 14\nmatplotlib.rcParams['legend.fontsize'] = 14\n\nfig, ax = plt.subplots(constrained_layout=True)\npoly = Polygon(polypoints)\nax.errorbar(radii[max_idx], max_gap_centre, yerr=max_gap_half, color='black')\nax.plot([radii[max_idx], 0.5], [results[max_idx][2]]*2, color='black', ls=':', alpha=0.5)\nax.plot([radii[max_idx], 0.5], [results[max_idx][1]]*2, color='black', ls=':', alpha=0.5)\nax.axvline(radii[max_idx], ls='--', color='black', alpha=0.5, zorder=3)\nax.add_patch(poly)\nax.legend(handles=[Patch(facecolor='C0', label='first photonic band gap')], loc='upper right')\nax.set_xlim([0, 0.5])\nax.set_ylim([0.15, 0.4])\naxy = ax.twinx()\naxy.set_xlim([0, 0.5])\naxy.set_ylim([0.15, 0.4])\naxy.set_yticks([results[max_idx][2], results[max_idx][1]])\naxy.yaxis.set_major_formatter(FormatStrFormatter('%.3f'))\nax.set_xlabel(r'pillar radius / $r/a$')\nax.set_ylabel(r'frequency / $\\omega a/2\\pi c$')\nfig.savefig('bandgap_vs_ratio.pdf')\n\nplt.show()","repo_name":"alkamid/lab-scripts","sub_path":"mpb/tear_plot.py","file_name":"tear_plot.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31148208979","text":"#https://www.codechef.com/SNCKQL19/problems/QUALPREL\n\n#reference https://www.geeksforgeeks.org/kth-smallestlargest-element-unsorted-array-set-3-worst-case-linear-time/\n\n#https://stackoverflow.com/questions/10806303/python-implementation-of-median-of-medians-algorithm\n\ndef qSelect(A, i):\n t = len(A)\n\n if(t <= 15):\n \tval=sorted(A)[i]\n return val\n else:\n \n B = [ qSelect(k, (len(k) - 1)/2) for k in [A[j:(j +15)] for j in range(0,len(A),15)]]\n\n \n M = qSelect(B, (len(B) - 1)/2)\n\n \n P1 = [ j for j in A if j < M ]\n if(i < len(P1)):\n return qSelect( P1, i)\n P3 = [ j for j in A if j > M ]\n L3 = len(P3)\n if(i < (t - L3)):\n return M\n return qSelect( P3, i - (t - L3))\n\n\n\nt=int(raw_input())\nwhile t!=0:\n t-=1\n n,k=[int(x) for x in raw_input().split()]\n array=[int(x) for x in raw_input().split()]\n number=qSelect(array,n-k)\n count=0\n for a in array:\n if(a>=number):\n count+=1\n print(count)\n\n","repo_name":"thecodearrow/100-Days-Of-Code","sub_path":"Qualifying to Pre-elimination.py","file_name":"Qualifying to Pre-elimination.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"5999441931","text":"import sys\n\nfile_path = sys.argv[1]\nprint(sys.argv[0])\n\nwith open('test.h', 'r') as f:\n text = f.read()\n # print(text)\n # print(text.replace('_', ' '))\n with open(\"res.txt\", 'w') as wf:\n wf.write(text.replace('_', ' '))\n","repo_name":"Starxyz/Utilities","sub_path":"replaceSpace.py","file_name":"replaceSpace.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71758347786","text":"import tkinter as tk\nfrom typing import Text\nfrom tkinter import *\nfrom PIL import Image, ImageTk\nfrom program import randomlist\nfrom program1 import sum1\nfrom program2 import sum2\nfrom program3 import sum3\nfrom program4 import sum4\nimport random\n\n\nwindow = tk.Tk()\nwindow.geometry(\"600x800\") # size of app\nwindow.minsize(600, 800)\nwindow.maxsize(600, 800)\nwindow.title(\"วันนี้กินอะไรดี?\")\n# window.configure(bg =\"#54A2A4\") # set background color\n\nbg = Image.open(\"draf_p1_cf.jpg\")\nbg = bg.resize((600,800), Image.ANTIALIAS)\nlabel_bg = ImageTk.PhotoImage(bg)\nlabel = tk.Label(image=label_bg)\nlabel.place( x=-2, y=0)\n\n#explain\nframe1 = tk.Frame(master = window, width = 40 ,height = 300 ,bg=\"white\")\n\n\nlabel1 = tk.Label(master=frame1, text=\"1.โปรแกรมนี้เป็นโปรแกรมที่อยู่ในช่วงทดลอง อาจจะเกิดปัญหาในการใช้งาน\",bg=\"white\",font=\"Ayuthaya 14\")\nlabel2 = tk.Label(master=frame1, text=\"2.วิธีการใช้งาน\",bg=\"white\",font=\"Ayuthaya 14\")\nlabel3 = tk.Label(master=frame1, text=\" 2.1 หลังจากกด เริ่ม โปรแกรมจะขึ้นหน้าต่างให้ผู้ใช้เลือกว่าต้องการจะสุ่มอาหารประเภทไหน\",bg=\"white\",font=\"Ayuthaya 14\")\nlabel4 = tk.Label(master=frame1, text=\" 2.2 หลังจากเลือกประเภทของอาหารแล้ว ถ้าต้องการสุ่มใหม่สามารถกดที่ปุ่มสุ่มอีกครั้งหรือปิดหน้าต่างได้เลย\",bg=\"white\",font=\"Ayuthaya 14\")\nlabel4_1 = tk.Label(master=frame1, text=\" โปรแกรมจะกลีบไปที่หน้าเลือกประเภทอาหาร\",bg=\"white\",font=\"Ayuthaya 14\")\nlabel5 = tk.Label(master=frame1, text=\" 2.3 ถ้าต้องการทราบวิธีทำ คลิ๊กที่ วิธีทำ โปรแกรมจะลิ้งค์ขึ้นหน้าเว็บไซต์ให้อัตโนมัติ\",bg=\"white\",font=\"Ayuthaya 14\")\nlabel6 = tk.Label(master=frame1, text=\"3.ขอบคุณที่ร่วมทดลองใช้\",bg=\"white\",font=\"Ayuthaya 14\")\n\nframe1.place(x = 47 , y = 300)\nlabel1.pack(anchor='w')\nlabel2.pack(anchor='w')\nlabel3.pack(anchor='w')\nlabel4.pack(anchor='w')\nlabel4_1.pack(anchor='w')\nlabel5.pack(anchor='w')\nlabel6.pack(anchor='w')\n\n#create button that click to next page\nbt = tk.Button(master = window, text=\"เริ่ม\",width=10,height=1,font=\"Ayuthaya 20\",bg=\"#FCAF38\",command=randomlist)\nbt.place(x = 220, y = 520)\n\n\nwindow.mainloop()\n\n","repo_name":"ncqxm/305272-Advanced-Computer-Programming","sub_path":"Project/วันนี้กินไรดี.py","file_name":"วันนี้กินไรดี.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"th","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19226898147","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jul 12 20:12:43 2022\r\n\r\n@author: User\r\n\"\"\"\r\n\r\nfrom datetime import date\r\n\r\ndate_1=date(1992, 1, 16)\r\ndate_2=date(1991, 2, 5)\r\nprint(date_1-date_2) ","repo_name":"KEVIN32-23/Cuaderno","sub_path":"untitled141.py","file_name":"untitled141.py","file_ext":"py","file_size_in_byte":192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4398811145","text":"from ray.rllib.models.torch.torch_modelv2 import TorchModelV2\nfrom ray.rllib.models import ModelCatalog\nfrom ray.rllib.utils.annotations import override\nfrom ray.rllib.utils import try_import_torch\nimport numpy as np\n\ntorch, nn = try_import_torch()\n\n\nclass ResidualBlock(nn.Module):\n def __init__(self, channels, init_normed=False):\n super().__init__()\n self.conv0 = nn.Conv2d(in_channels=channels, out_channels=channels, kernel_size=3, padding=1)\n self.conv1 = nn.Conv2d(in_channels=channels, out_channels=channels, kernel_size=3, padding=1)\n if init_normed:\n scale = (1/3**0.5 * 1/2**0.5)**0.5 \n # Understand logic for this scale -> \n # https://github.com/openai/phasic-policy-gradient/blob/master/phasic_policy_gradient/impala_cnn.py\n self.conv0.weight.data *= scale / self.conv0.weight.norm(dim=(1, 2, 3), p=2, keepdim=True)\n nn.init.zeros_(self.conv0.bias)\n self.conv1.weight.data *= scale / self.conv1.weight.norm(dim=(1, 2, 3), p=2, keepdim=True)\n nn.init.zeros_(self.conv1.bias)\n\n \n def forward(self, x):\n inputs = x\n x = nn.functional.relu(x)\n x = self.conv0(x)\n x = nn.functional.relu(x)\n x = self.conv1(x)\n return x + inputs\n\n\nclass ConvSequence(nn.Module):\n def __init__(self, input_shape, out_channels, init_normed=False):\n super().__init__()\n self._input_shape = input_shape\n self._out_channels = out_channels\n self.conv = nn.Conv2d(in_channels=self._input_shape[0], out_channels=self._out_channels, kernel_size=3, padding=1)\n self.res_block0 = ResidualBlock(self._out_channels, init_normed)\n self.res_block1 = ResidualBlock(self._out_channels, init_normed)\n if init_normed:\n self.conv.weight.data *= 1. / self.conv.weight.norm(dim=(1, 2, 3), p=2, keepdim=True)\n nn.init.zeros_(self.conv.bias)\n\n def forward(self, x, pool=True):\n x = self.conv(x)\n if pool:\n x = nn.functional.max_pool2d(x, kernel_size=3, stride=2, padding=1)\n x = self.res_block0(x)\n x = self.res_block1(x)\n assert x.shape[1:] == self.get_output_shape()\n return x\n\n def get_output_shape(self):\n _c, h, w = self._input_shape\n return (self._out_channels, (h + 1) // 2, (w + 1) // 2)\n\n\nclass ImpalaCNN(TorchModelV2, nn.Module):\n \"\"\"\n Network from IMPALA paper implemented in ModelV2 API.\n\n Based on https://github.com/ray-project/ray/blob/master/rllib/models/tf/visionnet_v2.py\n and https://github.com/openai/baselines/blob/9ee399f5b20cd70ac0a871927a6cf043b478193f/baselines/common/models.py#L28\n \"\"\"\n\n def __init__(self, obs_space, action_space, num_outputs, model_config,\n name, device):\n TorchModelV2.__init__(self, obs_space, action_space, num_outputs,\n model_config, name)\n nn.Module.__init__(self)\n self.device = device\n depths = model_config['custom_model_config'].get('depths')\n nlatents = model_config['custom_model_config'].get('nlatents')\n init_normed = model_config['custom_model_config'].get('init_normed')\n self.use_layernorm = model_config['custom_model_config'].get('use_layernorm')\n self.diff_framestack = model_config['custom_model_config'].get('diff_framestack')\n \n h, w, c = obs_space.shape\n if self.diff_framestack:\n assert c == 6, \"diff_framestack is only for frame_stack = 2\"\n c = 9\n shape = (c, h, w)\n\n conv_seqs = []\n for out_channels in depths:\n conv_seq = ConvSequence(shape, out_channels, init_normed)\n shape = conv_seq.get_output_shape()\n conv_seqs.append(conv_seq)\n self.conv_seqs = nn.ModuleList(conv_seqs)\n self.hidden_fc = nn.Linear(in_features=shape[0] * shape[1] * shape[2], out_features=nlatents)\n if init_normed:\n self.hidden_fc.weight.data *= 1.4 / self.hidden_fc.weight.norm(dim=1, p=2, keepdim=True)\n nn.init.zeros_(self.hidden_fc.bias)\n self.pi_fc = nn.Linear(in_features=nlatents, out_features=num_outputs)\n self.value_fc = nn.Linear(in_features=nlatents, out_features=1)\n self.aux_vf = nn.Linear(in_features=nlatents, out_features=1)\n if init_normed:\n self.pi_fc.weight.data *= 0.1 / self.pi_fc.weight.norm(dim=1, p=2, keepdim=True)\n self.value_fc.weight.data *= 0.1 / self.value_fc.weight.norm(dim=1, p=2, keepdim=True)\n self.aux_vf.weight.data *= 0.1 / self.aux_vf.weight.norm(dim=1, p=2, keepdim=True)\n else:\n nn.init.orthogonal_(self.pi_fc.weight, gain=0.01)\n nn.init.orthogonal_(self.value_fc.weight, gain=1)\n nn.init.orthogonal_(self.aux_vf.weight, gain=1)\n\n nn.init.zeros_(self.pi_fc.bias)\n nn.init.zeros_(self.value_fc.bias)\n nn.init.zeros_(self.aux_vf.bias)\n if self.use_layernorm:\n self.layernorm = nn.LayerNorm(nlatents)\n\n \n @override(TorchModelV2)\n def forward(self, input_dict, state, seq_lens):\n x = input_dict[\"obs\"].float()\n if self.diff_framestack:\n x = torch.cat([x, x[...,:-3] - x[...,-3:]], dim=3) # only works for framestack 2 for now\n x = x / 255.0 # scale to 0-1\n x = x.permute(0, 3, 1, 2) # NHWC => NCHW\n x = self.conv_seqs[0](x)\n x = self.conv_seqs[1](x)\n x = self.conv_seqs[2](x)\n x = torch.flatten(x, start_dim=1)\n x = nn.functional.relu(x)\n x = self.hidden_fc(x)\n if self.use_layernorm:\n x = self.layernorm(x)\n x = torch.tanh(x)\n else:\n x = nn.functional.relu(x)\n logits = self.pi_fc(x)\n value = self.value_fc(x.detach())\n self._value = value.squeeze(1)\n self._aux_value = self.aux_vf(x).squeeze(1)\n return logits, state\n\n @override(TorchModelV2)\n def value_function(self):\n assert self._value is not None, \"must call forward() first\"\n return self._value\n \n def aux_value_function(self):\n return self._aux_value\n \n \n def vf_pi(self, obs, no_grad=False, ret_numpy=False, to_torch=False):\n if to_torch:\n obs = torch.tensor(obs).to(self.device)\n \n def v_pi(obs):\n pi, _ = self.forward({\"obs\": obs}, None, None)\n v = self.value_function()\n return v, pi\n \n if no_grad:\n with torch.no_grad():\n v, pi = v_pi(obs)\n else:\n v, pi = v_pi(obs)\n \n if ret_numpy:\n return v.cpu().numpy(), pi.cpu().numpy()\n else:\n return v, pi\n \nModelCatalog.register_custom_model(\"impala_torch_ppg\", ImpalaCNN)\n","repo_name":"dipamc/procgen-competition","sub_path":"models/impala_ppg.py","file_name":"impala_ppg.py","file_ext":"py","file_size_in_byte":6821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73285289224","text":"from CamObject import CamObject\r\nfrom consts import *\r\nfrom HeeksConfig import HeeksConfig\r\nimport cad\r\nimport wx\r\nfrom Object import PyProperty\r\nfrom Object import PyPropertyLength\r\nfrom Object import PyChoiceProperty\r\nimport Tool\r\nimport Operations\r\n\r\nclass PyToolProperty(PyChoiceProperty):\r\n def __init__(self, op):\r\n # a choice property which lets you choose a drop-down of tool names and updates the op's tool_number attribute\r\n choices = ['None']\r\n alternative_values = [0]\r\n obj_list = wx.GetApp().program.tools\r\n if obj_list != None:\r\n obj = obj_list.GetFirstChild()\r\n while obj:\r\n if obj.GetIDGroupType() == Tool.type:\r\n choices.append(obj.GetTitle())\r\n alternative_values.append(obj.tool_number)\r\n obj = obj_list.GetNextChild()\r\n \r\n PyChoiceProperty.__init__(self, 'Tool', 'tool_number', choices, op, alternative_values)\r\n\r\n\r\nclass Operation(CamObject):\r\n def __init__(self):\r\n CamObject.__init__(self, id_named = True)\r\n self.active = True\r\n self.comment = ''\r\n self.tool_number = 0\r\n self.pattern = 1\r\n self.surface = 0\r\n \r\n def TypeName(self):\r\n return \"Operation\"\r\n \r\n def icon(self):\r\n # the name of the PNG file in the HeeksCNC icons folder\r\n if self.active:\r\n return self.op_icon()\r\n else:\r\n return \"noentry\"\r\n \r\n def CanBeDeleted(self):\r\n return True\r\n \r\n def UsesTool(self): # some operations don't use the tool number\r\n return True\r\n \r\n def ReadDefaultValues(self):\r\n config = HeeksConfig()\r\n self.tool_number = config.ReadInt(\"Tool\", 0)\r\n self.pattern = config.ReadInt(\"Pattern\", 0)\r\n self.surface = config.ReadInt(\"Surface\", 0)\r\n\r\n def WriteDefaultValues(self):\r\n config = HeeksConfig()\r\n config.WriteInt(\"Tool\", self.tool_number)\r\n config.WriteInt(\"Pattern\", self.pattern)\r\n config.WriteInt(\"Surface\", self.surface)\r\n\r\n def SetDefaultTool(self, tool_type): \r\n if self.tool_number == 0:\r\n t = wx.GetApp().program.tools.FindFirstTool(tool_type)\r\n if t != None:\r\n self.tool_number = t.tool_number \r\n \r\n def DoGCodeCalls(self):\r\n if len(self.comment) > 0:\r\n comment(self.comment) \r\n\r\n if self.UsesTool():\r\n wx.GetApp().SetTool(self.tool_number) # Select the correct tool.\r\n \r\n def CopyFrom(self, object):\r\n CamObject.CopyFrom(self, object)\r\n self.active = object.active\r\n self.comment = object.comment\r\n self.tool_number = object.tool_number\r\n self.pattern = object.pattern\r\n self.surface = object.surface\r\n \r\n def CanAddTo(self, owner):\r\n return owner.GetType() == Operations.type\r\n \r\n def PreferredPasteTarget(self):\r\n return wx.GetApp().program.operations\r\n \r\n def WriteXml(self):\r\n if len(self.comment) > 0: cad.SetXmlValue('comment', self.comment)\r\n cad.SetXmlValue('active', self.active)\r\n cad.SetXmlValue('tool_number', self.tool_number)\r\n cad.SetXmlValue('pattern', self.pattern)\r\n cad.SetXmlValue('surface', self.surface)\r\n CamObject.WriteXml(self)\r\n \r\n def ReadXml(self):\r\n self.comment = cad.GetXmlValue('comment', self.comment)\r\n self.active = cad.GetXmlBool('active', self.active)\r\n self.tool_number = cad.GetXmlInt('tool_number', self.tool_number)\r\n self.pattern = cad.GetXmlInt('pattern', self.pattern)\r\n self.surface = cad.GetXmlInt('surface', self.surface)\r\n CamObject.ReadXml(self)\r\n \r\n def GetProperties(self):\r\n properties = []\r\n\r\n properties.append(PyProperty(\"Comment\", 'comment', self))\r\n properties.append(PyProperty(\"Active\", 'active', self))\r\n properties.append(PyToolProperty(self))\r\n properties.append(PyProperty(\"Pattern\", 'pattern', self))\r\n properties.append(PyProperty(\"Surface\", 'surface', self))\r\n \r\n properties += CamObject.GetProperties(self)\r\n\r\n return properties\r\n \r\n def CheckToolExists(self):\r\n # returns failure message or None for success\r\n if self.tool_number == 0:\r\n return 'No tool defined for operation!'\r\n tool = Tool.FindTool(self.tool_number)\r\n if tool == None:\r\n return 'Tool number ' + str(self.tool_number) + ' does not exist'\r\n","repo_name":"danheeks/PyCAM","sub_path":"Operation.py","file_name":"Operation.py","file_ext":"py","file_size_in_byte":4586,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"81"} +{"seq_id":"14907960573","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[39]:\n\n\nimport pandas as pd\nimport numpy as np\n\n\n# In[40]:\n\n\nurl = \"https://media.geeksforgeeks.org/wp-content/uploads/nba.csv\"\n\ndf = pd.read_csv(url)\n\n\n# In[46]:\n\n\n#df[\"Number\"] = df[\"Number\"] - 1\ndf.rename(columns={\"Name\": \"Full name\"},inplace=True)\n\n\n# In[58]:\n\n\nbins = np.linspace(min(df[\"Age\"]),max(df[\"Age\"]),3) \n#El ultimo parametro es group_names+1\n\n\n# In[59]:\n\n\nbins\n\n\n# In[60]:\n\n\ngroup_names = [\"Low\",\"High\"]\n\n\n# In[61]:\n\n\ndf[\"Age-binded\"] = pd.cut(df[\"Age\"],bins, labels = group_names, include_lowest = True)\n\n\n# In[63]:\n\n\ndf\n\n\n# In[65]:\n\n\npd.get_dummies(df[\"Position\"])\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"javi040898/DATA-ANALYSIS-WITH-PYTHON","sub_path":"Leccion2/ipynb.py","file_name":"ipynb.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9379321930","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 1 15:57:08 2022\n\n@author: masaokafumiya\n\"\"\"\n## Mac環境で作成\n# In[1]:\n#ライブラリの読み込み\nimport numpy as np\nimport numpy.random as random \nimport scipy as sp\nimport pandas as pd\nfrom pandas import Series, DataFrame\n\n#可視化ライブラリ\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport seaborn as sns\nsns.set()\n\nfrom sklearn import linear_model\n\n# In[2]:\nimport os\n#ディレクトリの設定\nos.chdir('/Users/masaokafumiya/Library/')\n\n# In[3]:\n#Excelの読み込み\ntest_data = pd.ExcelFile('data.xlsx')\nprint(test_data.sheet_names)\ntest_data_df = test_data.parse()\n\n# In[4]:\n#基本統計量をまとめて吐き出す\n##全ての変数を表示\nprint(test_data_df['コンビニ店舗数(1万人あたり)'].describe())\nprint(test_data_df['たばこ支出金額'].describe())\nprint(test_data_df['喫煙率'].describe())\nprint(test_data_df['人口密度'].describe())\n\n# In[5]:\n#地図にプロット(下準備)\nfrom matplotlib import rcParams\n\nrcParams[\"font.family\"] = \"sans-serif\"\nrcParams[\"font.sans-serif\"] = \"Hiragino Maru Gothic Pro\"\n\nimport cv2\nfrom PIL import Image\nimport matplotlib.colors\nfrom japanmap import *\n\ntest_data_df = test_data_df.iloc[:53,:13]\n\n# In[6]:\n#地図にプロット(コンビニ店舗数(1万人あたり))\nnum_dict1={}\n\nfor k,n in zip(test_data_df[\"都道府県\"], test_data_df[\"コンビニ店舗数(1万人あたり)\"]):\n tmp = pref_code(k)\n tmp = pref_names[tmp]\n if tmp not in num_dict1:\n num_dict1[tmp] = n\n else:\n num_dict1[tmp] += n\n\nn_min = min(num_dict1.values())\nn_max = max(num_dict1.values())\n\ncmap = plt.cm.rainbow\nnorm = matplotlib.colors.Normalize(vmin=n_min, vmax=n_max)\n\ndef color_scale(r):\n tmp = cmap(norm(r))\n return(tmp[0]*225, tmp[1]*225, tmp[2]*225)\n\nfor k,v in num_dict1.items():\n num_dict1[k] = color_scale(v)\n\nplt.figure(figsize=(10,8))\nplt.imshow(picture(num_dict1))\nplt.grid(False)\nplt.title(\"コンビニ店舗数(1万人あたり)\")\n\nsm =plt.cm.ScalarMappable(cmap=cmap, norm=norm)\nplt.colorbar(sm)\nplt.show()\n\n# In[7]:\n#地図にプロット(たばこ支出金額)\nnum_dict2={}\n\nfor k,n in zip(test_data_df[\"都道府県\"], test_data_df[\"たばこ支出金額\"]):\n tmp = pref_code(k)\n tmp = pref_names[tmp]\n if tmp not in num_dict2:\n num_dict2[tmp] = n\n else:\n num_dict2[tmp] += n\n\nn_min = min(num_dict2.values())\nn_max = max(num_dict2.values())\n\ncmap = plt.cm.rainbow\nnorm = matplotlib.colors.Normalize(vmin=n_min, vmax=n_max)\n\ndef color_scale(r):\n tmp = cmap(norm(r))\n return(tmp[0]*225, tmp[1]*225, tmp[2]*225)\n\nfor k,v in num_dict2.items():\n num_dict2[k] = color_scale(v)\n\nplt.figure(figsize=(10,8))\nplt.imshow(picture(num_dict2))\nplt.grid(False)\nplt.title(\"たばこ支出金額\")\n\nsm =plt.cm.ScalarMappable(cmap=cmap, norm=norm)\nplt.colorbar(sm)\nplt.show()\n\n# In[8]:\n#地図にプロット(喫煙率)\nnum_dict3={}\n\nfor k,n in zip(test_data_df[\"都道府県\"], test_data_df[\"喫煙率\"]):\n tmp = pref_code(k)\n tmp = pref_names[tmp]\n if tmp not in num_dict3:\n num_dict3[tmp] = n\n else:\n num_dict3[tmp] += n\n\nn_min = min(num_dict3.values())\nn_max = max(num_dict3.values())\n\ncmap = plt.cm.rainbow\nnorm = matplotlib.colors.Normalize(vmin=n_min, vmax=n_max)\n\ndef color_scale(r):\n tmp = cmap(norm(r))\n return(tmp[0]*225, tmp[1]*225, tmp[2]*225)\n\nfor k,v in num_dict3.items():\n num_dict3[k] = color_scale(v)\n\nplt.figure(figsize=(10,8))\nplt.imshow(picture(num_dict3))\nplt.grid(False)\nplt.title(\"喫煙率\")\n\nsm =plt.cm.ScalarMappable(cmap=cmap, norm=norm)\nplt.colorbar(sm)\nplt.show()\n\n# In[9]:\n#地図にプロット(人口密度)\nnum_dict4={}\n\nfor k,n in zip(test_data_df[\"都道府県\"], test_data_df[\"人口密度\"]):\n tmp = pref_code(k)\n tmp = pref_names[tmp]\n if tmp not in num_dict4:\n num_dict4[tmp] = n\n else:\n num_dict4[tmp] += n\n\nn_min = min(num_dict4.values())\nn_max = max(num_dict4.values())\n\ncmap = plt.cm.rainbow\nnorm = matplotlib.colors.Normalize(vmin=n_min, vmax=n_max)\n\ndef color_scale(r):\n tmp = cmap(norm(r))\n return(tmp[0]*225, tmp[1]*225, tmp[2]*225)\n\nfor k,v in num_dict4.items():\n num_dict4[k] = color_scale(v)\n\nplt.figure(figsize=(10,8))\nplt.imshow(picture(num_dict4))\nplt.grid(False)\nplt.title(\"人口密度\")\n\nsm =plt.cm.ScalarMappable(cmap=cmap, norm=norm)\nplt.colorbar(sm)\nplt.show()\n\n# In[10]:\n#複数変数のヒストグラム\nsns.pairplot(test_data_df[['コンビニ店舗数(1万人あたり)','たばこ支出金額','喫煙率','人口密度']])\nplt.grid(True)\n\n# In[11]:\n#相関関係を可視化する(ヒートマップ)\nimport seaborn as sns\ncor = test_data_df[[ 'コンビニ店舗数(1万人あたり)','たばこ支出金額','喫煙率','人口密度']].corr()\n\nsns.heatmap(cor, cmap= sns.color_palette('coolwarm', 10), \n annot=True,\n fmt='.2f', \n vmin = -1, \n vmax = 1\n )\nplt.title(\"相関関係を可視化する(ヒートマップ)\")\n\n# In[12]:\n#コンビニ店舗数(1万人あたり)と喫煙率の単回帰分析\n#グラフの作成\nreg =linear_model.LinearRegression()\nprint(reg)\n\nX = test_data_df.loc[:,['コンビニ店舗数(1万人あたり)']].values\nY = test_data_df['喫煙率'].values\n\nreg.fit(X,Y)\n\nplt.plot(test_data_df['コンビニ店舗数(1万人あたり)'],test_data_df['喫煙率'],\"o\",)\n\nplt.xlabel('コンビニ店舗数(1万人あたり)')\nplt.ylabel('喫煙率')\nplt.title(\"コンビニ店舗数(1万人あたり)と喫煙率の単回帰分析\")\nplt.grid(True)\n\nplt.plot(X, reg.predict(X))\nplt.grid(True)\n\n#回帰係数\nprint('回帰係数', reg.coef_)\n#切片\nprint('切片', reg.intercept_)\n#決定係数\nprint('決定係数', reg.score(X,Y))\n","repo_name":"Fumi02043/regression-analysis","sub_path":"コンビニ店舗数・人口密度と喫煙率に関する.py","file_name":"コンビニ店舗数・人口密度と喫煙率に関する.py","file_ext":"py","file_size_in_byte":5841,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72689206665","text":"import schedule\nimport time\nimport RPi.GPIO as GPIO\n\ndef setupAsOutput(pinNumber):\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(pinNumber, GPIO.OUT)\n GPIO.output(pinNumber, GPIO.LOW)\n\ndef destroyOutput(pinNumber):\n GPIO.output(pinNumber, GPIO.LOW)\n GPIO.cleanup()\n\nif __name__ == '__main__': # Program start from here\n setupAsOutput(12)\n\n schedule.every(6).minutes.do(lambda: (\n GPIO.output(12, GPIO.LOW)\n ))\n \n schedule.every(4).minutes.do(lambda: (\n GPIO.output(12, GPIO.HIGH)\n ))\n\n while 1:\n schedule.run_pending()\n time.sleep(1)","repo_name":"barend-erasmus/raspberry-pi-home-automation","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8169491914","text":"from datetime import date\nanoa = date.today().year\ntotmaior = 0\ntotmenor = 0\nfor pessoas in range(1, 8):\n ano = int(input('Qual ano a {}ª pessoa nasceu? '.format(pessoas)))\n idade = anoa - ano\n if idade >= 18:\n totmaior += 1\n else:\n totmenor += 1\nprint('Tivemos \\033[4m{}\\033[m pessoas maiores de idade'.format(totmaior))\nprint('Tivemos \\033[4m{}\\033[m pessoas menores de idade'.format(totmenor))","repo_name":"GuilhermeUrben/python","sub_path":"curso_em_video/Exercicios/ex054.py","file_name":"ex054.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38885431677","text":"import discord\r\nimport discord.ext.commands\r\nfrom better_profanity import profanity\r\nimport os\r\nimport json\r\n\r\nwarns = {}\r\n\r\nif os.path.exists(\"./warns.json\"):\r\n with open(\"./warns.json\", \"r\") as file:\r\n warns = json.load(file)\r\n\r\nbot = discord.ext.commands.Bot(\"c!\")\r\n\r\n@bot.listen()\r\nasync def on_message(message):\r\n if profanity.contains_profanity(message.content):\r\n hook = await message.channel.create_webhook(name=message.author.display_name, avatar=message.author.avatar)\r\n await hook.send(profanity.censor(message.content, \"#\"))\r\n await message.delete() #message.edit(content=profanity.censor(message.content))\r\n await message.channel.send(\"Don't swear, \" + message.author.mention + \"! You have been warned.\")\r\n await _warn(message.author, \"Swearing\")\r\n await hook.delete()\r\n\r\n@bot.command()\r\n@discord.ext.commands.has_permissions(kick_members=True)\r\nasync def warn(ctx, uid: discord.Member, reason=\"None\"):\r\n await ctx.send(\"Warned \" + uid.display_name + \" for reason \" + reason)\r\n await _warn(uid, str(reason))\r\n\r\n@bot.command()\r\n@discord.ext.commands.has_permissions(kick_members=True)\r\nasync def kick(ctx, uid: discord.Member, reason=\"None\"):\r\n await ctx.send(\"Kicked \" + uid.display_name + \" for reason \" + str(reason))\r\n await uid.send(\"You have been kicked from \" + ctx.guild.name + \".\\nReason: \" + reason)\r\n await uid.kick()\r\n\r\n@bot.command()\r\n@discord.ext.commands.has_permissions(ban_members=True)\r\nasync def ban(ctx, uid: discord.Member, reason=\"None\"):\r\n await ctx.send(\"Banned \" + uid.display_name + \" for reason \" + str(reason))\r\n await uid.send(\"You have been banned from \" + ctx.guild.name + \".\\nReason: \" + reason)\r\n await uid.ban()\r\n\r\n@warn.error\r\n@kick.error\r\n@ban.error\r\nasync def error(error, ctx):\r\n if isinstance(error, discord.ext.commands.MissingPermissions):\r\n await ctx.send(\"You don't have the permissions to do that!\")\r\n\r\nasync def _warn(uid: discord.Member, reason):\r\n global warns\r\n if uid in warns:\r\n warns[uid] += 1\r\n return\r\n else:\r\n warns[uid] = 1\r\n await uid.send(\"You have been warned in \" + uid.guild.name + \"\\nReason: \" + reason + \"\\nYour warns: \" + str(warns[uid]))\r\n if warns[uid] == 7:\r\n await uid.send(\"You have been banned from \" + uid.guild.name + \".\\nReason: Too many warns.\")\r\n warns[uid] = 0\r\n await uid.ban()\r\n\r\n@bot.event\r\nasync def on_disconnect():\r\n global warns\r\n with open(\"./warns.json\", \"w\") as file:\r\n json.dump(warns, file)\r\n\r\nbot.run(os.getenv(\"DISCORD_TOKEN\"))\r\n","repo_name":"Unity123/chocolate-bot","sub_path":"chocolatebot.py","file_name":"chocolatebot.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10380981094","text":"from matplotlib import font_manager\nfrom matplotlib import pyplot as plt\n\nfont = font_manager.FontProperties(fname='C:\\Windows\\Fonts\\simsun.ttc')\n\ny_1 = [1, 0, 1, 1, 2, 4, 3, 2, 3, 4, 4, 5, 6, 5, 4, 3, 3, 1, 1, 1]\ny_2 = [1, 0, 3, 1, 2, 2, 3, 3, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n\nx = range(11, 31)\n\nplt.figure(figsize=(20, 8), dpi=80)\n\nplt.plot(x, y_1, label='小明', color='#F08080')\nplt.plot(x, y_2, label=\"小红\", color=\"#DB7093\", linestyle=\"--\")\n\n_xtick_labels = [\"{}岁\".format(i) for i in x]\nplt.xticks(x, _xtick_labels, fontproperties=font)\n\nplt.grid(alpha=0.4, linestyle=':')\n\nplt.legend(prop=font, loc='upper left')\n\nplt.show()\n","repo_name":"strawsyz/straw","sub_path":"study/matplotlib_study/demo03.py","file_name":"demo03.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"28461762293","text":"\"\"\"Extract Event Selection Labels from the QUESO event selection.\"\"\"\n\nfrom typing import TYPE_CHECKING, Dict\n\nfrom graphnet.data.extractors.i3extractor import I3Extractor\n\nif TYPE_CHECKING:\n from icecube import icetray # pyright: reportMissingImports=false\n\n\nclass I3QUESOExtractor(I3Extractor):\n \"\"\"Class for extracting labels from the QUESO event selection.\"\"\"\n\n def __init__(\n self,\n name: str = \"queso\",\n padding_value: int = -1,\n ):\n \"\"\"Construct I3QUESOExtractor.\"\"\"\n # Base class constructor\n super().__init__(name)\n self._padding_value = padding_value\n\n def __call__(self, frame: \"icetray.I3Frame\") -> Dict[str, float]:\n \"\"\"Extract Event Selection Labels from the QUESO event selection.\"\"\"\n keys = [\n \"QuesoL3_Bool\",\n \"QuesoL3_Vars_cleaned_length\",\n \"QuesoL3_Vars_cleaned_num_hit_modules\",\n \"QuesoL3_Vars_cleaned_num_hits_fid_vol\",\n \"QuesoL3_Vars_cleaned_vertexZ\",\n \"QuesoL3_Vars_uncleaned_length\",\n ]\n output = {}\n for key in keys:\n try:\n value = frame[key].value\n except KeyError:\n value = self._padding_value\n output.update(\n {\n key: value,\n }\n )\n\n return output\n","repo_name":"Peterandresen12/graphnet","sub_path":"src/graphnet/data/extractors/i3quesoextractor.py","file_name":"i3quesoextractor.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"14850612463","text":"from typing import Any, Dict, Union\n\nfrom torchmetrics.classification.accuracy import MulticlassAccuracy\nfrom torchmetrics.classification.f_beta import MulticlassF1Score\nfrom transformers.configuration_utils import PretrainedConfig\nfrom transformers.tokenization_utils_base import PreTrainedTokenizerBase\n\nfrom transformers_framework.architectures.modeling_outputs import MaskedLMAndTokenClassOutput\nfrom transformers_framework.interfaces.adaptation import masked_lm_and_token_detection_adaptation\nfrom transformers_framework.interfaces.logging import (\n LOSS,\n MASKED_LM_ACCURACY,\n MASKED_LM_LOSS,\n MASKED_LM_PERPLEXITY,\n TOKEN_CLASS_ACCURACY,\n TOKEN_CLASS_F1,\n TOKEN_CLASS_LOSS,\n)\nfrom transformers_framework.interfaces.step import MaskedLMAndTokenClassStepOutput\nfrom transformers_framework.metrics.perplexity import Perplexity\nfrom transformers_framework.pipelines.pipeline import Pipeline\nfrom transformers_framework.processing.postprocessors import masked_lm_and_token_class_processor\nfrom transformers_framework.utilities import IGNORE_IDX\nfrom transformers_framework.utilities.arguments import (\n FlexibleArgumentParser,\n add_masked_lm_arguments,\n add_token_class_arguments,\n)\nfrom transformers_framework.utilities.torch import combine_losses\n\n\nclass MaskedLMAndTokenClass(Pipeline):\n\n POST_FORWARD_ADAPTER = masked_lm_and_token_detection_adaptation\n MODEL_INPUT_NAMES_TO_REDUCE = [\n ('input_ids', 'attention_mask', 'token_type_ids', 'masked_lm_labels', 'token_class_labels')\n ]\n\n def __init__(self, hyperparameters):\n super().__init__(hyperparameters)\n\n # masked lm metrics\n metrics_args = (self.tokenizer.vocab_size, )\n metrics_kwargs = dict(average='micro', ignore_index=IGNORE_IDX)\n\n self.train_mlm_acc = MulticlassAccuracy(*metrics_args, **metrics_kwargs)\n self.valid_mlm_acc = MulticlassAccuracy(*metrics_args, **metrics_kwargs)\n self.test_mlm_acc = MulticlassAccuracy(*metrics_args, **metrics_kwargs)\n\n metrics_kwargs = dict(ignore_index=IGNORE_IDX)\n self.train_mlm_ppl = Perplexity(**metrics_kwargs)\n self.valid_mlm_ppl = Perplexity(**metrics_kwargs)\n self.test_mlm_ppl = Perplexity(**metrics_kwargs)\n\n # classification metrics\n metrics_kwargs = dict(average=None, ignore_index=IGNORE_IDX, num_classes=self.config.num_labels)\n\n # train metrics\n self.train_acc = MulticlassAccuracy(**metrics_kwargs)\n self.train_f1 = MulticlassF1Score(**metrics_kwargs)\n\n # validation metrics\n self.valid_acc = MulticlassAccuracy(**metrics_kwargs)\n self.valid_f1 = MulticlassF1Score(**metrics_kwargs)\n\n # test metrics\n self.test_acc = MulticlassAccuracy(**metrics_kwargs)\n self.test_f1 = MulticlassF1Score(**metrics_kwargs)\n\n def setup_config(self, **kwargs) -> Union[PretrainedConfig, Dict[str, PretrainedConfig]]:\n kwargs['num_labels'] = self.hyperparameters.num_labels\n return super().setup_config(**kwargs)\n\n def setup_tokenizer(self) -> PreTrainedTokenizerBase:\n return super().setup_tokenizer(add_prefix_space=True)\n\n def step(self, batch: Dict) -> MaskedLMAndTokenClassStepOutput:\n r\"\"\" Forward step is shared between all train/val/test steps. \"\"\"\n\n results: MaskedLMAndTokenClassOutput = self.forward(**batch)\n\n # classification\n classification_predictions = results.token_class_logits.argmax(dim=-1)\n\n loss = combine_losses(\n losses=[results.masked_lm_loss, results.token_class_loss],\n weigths=[self.hyperparameters.masked_lm_weight, self.hyperparameters.token_class_weight],\n )\n\n return MaskedLMAndTokenClassStepOutput(\n loss=loss,\n masked_lm_loss=results.masked_lm_loss,\n masked_lm_predictions=results.masked_lm_logits.argmax(dim=-1),\n masked_lm_logits=results.masked_lm_logits,\n masked_lm_labels=batch['masked_lm_labels'],\n token_class_loss=results.token_class_loss,\n token_class_predictions=classification_predictions,\n token_class_logits=results.token_class_logits,\n token_class_labels=batch['token_class_labels'],\n )\n\n def training_step(self, batch, *args):\n r\"\"\" Here you compute and return the training loss and some additional metrics for e.g.\n the progress bar or logger.\n \"\"\"\n step_output = self.step(batch)\n\n train_mlm_acc = self.train_mlm_acc(step_output.masked_lm_predictions, step_output.masked_lm_labels)\n train_mlm_ppl = self.train_mlm_ppl(step_output.masked_lm_logits.float(), step_output.masked_lm_labels)\n\n train_acc = self.train_acc(\n step_output.token_class_predictions, step_output.token_class_labels\n )\n train_f1 = self.train_f1(step_output.token_class_predictions, step_output.token_class_labels)\n\n self.log(LOSS, step_output.loss)\n self.log(MASKED_LM_LOSS, step_output.masked_lm_loss)\n self.log(MASKED_LM_ACCURACY, train_mlm_acc)\n self.log(MASKED_LM_PERPLEXITY, train_mlm_ppl)\n self.log(TOKEN_CLASS_LOSS, step_output.token_class_loss)\n self.log(TOKEN_CLASS_ACCURACY, train_acc)\n self.log(TOKEN_CLASS_F1, train_f1)\n\n return step_output.loss\n\n def validation_step(self, batch, *args):\n r\"\"\" Operates on a single batch of data from the validation set.\n In this step you'd might generate examples or calculate anything of interest like accuracy.\n \"\"\"\n step_output = self.step(batch)\n\n valid_acc = self.valid_acc(\n step_output.token_class_predictions, step_output.token_class_labels\n )\n valid_f1 = self.valid_f1(step_output.token_class_predictions, step_output.token_class_labels)\n\n self.log(LOSS, step_output.loss)\n self.log(TOKEN_CLASS_LOSS, step_output.token_class_loss)\n self.log(TOKEN_CLASS_ACCURACY, valid_acc)\n self.log(TOKEN_CLASS_F1, valid_f1)\n\n def test_step(self, batch, *args):\n r\"\"\"\n Operates on a single batch of data from the test set.\n In this step you'd normally generate examples or calculate anything of interest such as accuracy.\n \"\"\"\n step_output = self.step(batch)\n\n test_acc = self.test_acc(\n step_output.token_class_predictions, step_output.token_class_labels\n )\n test_f1 = self.test_f1(step_output.token_class_predictions, step_output.token_class_labels)\n\n self.log(LOSS, step_output.loss)\n self.log(TOKEN_CLASS_LOSS, step_output.token_class_loss)\n self.log(TOKEN_CLASS_ACCURACY, test_acc)\n self.log(TOKEN_CLASS_F1, test_f1)\n\n def postprocess(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n r\"\"\" Process single samples to add denoising objective. \"\"\"\n return masked_lm_and_token_class_processor(\n sample=sample,\n input_column=self.hyperparameters.input_column,\n label_column=self.hyperparameters.label_column,\n probability=self.hyperparameters.probability,\n probability_masked=self.hyperparameters.probability_masked,\n probability_replaced=self.hyperparameters.probability_replaced,\n probability_unchanged=self.hyperparameters.probability_unchanged,\n tokenizer=self.tokenizer,\n max_sequence_length=self.hyperparameters.max_sequence_length,\n whole_word_masking=self.hyperparameters.whole_word_masking,\n )\n\n @classmethod\n def add_argparse_args(cls, parser: FlexibleArgumentParser):\n super().add_argparse_args(parser)\n parser.add_argument('--input_column', type=str, required=True)\n parser.add_argument('--label_column', type=str, required=True)\n parser.add_argument('--token_class_weight', type=float, default=1.0)\n parser.add_argument('--masked_lm_weight', type=float, default=1.0)\n add_masked_lm_arguments(parser)\n add_token_class_arguments(parser)\n","repo_name":"lucadiliello/transformers-framework","sub_path":"transformers_framework/pipelines/masked_lm_and_token_class/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":8025,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"74039093386","text":"'''\n El siguiente código pide al usuario un ingreso de un número y lo utiliza para indexar una lista, pero pueden pasar 2 cosas:\n 1- El usuario ingresa un número que no puede ser casteado a entero.\n 2- El usuario ingresa un número que no es un índice válido para la lista.\n\n Utilizar un bloque try-catch para que no termine la ejecución del programa e indicar al usuario cual de los 2 errores cometió.\n'''\n\ndef main():\n lista = [1, 2, 3, 4, 5]\n ingreso = int(input(\"Ingrese un número: \"))\n print(\"En la posicion\", ingreso, \"se encuentra el elemento\", lista[ingreso])\n\nmain()","repo_name":"NicoMarianetti/Manejo-de-excepciones","sub_path":"ejercicio4.py","file_name":"ejercicio4.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20721319661","text":"from gurobipy import *\r\n\r\nnb_scena=2\r\nnb_arcs=12\r\n\r\nw = [2,1]\r\nw_prim = [w[i]-w[i+1] for i in range(len(w)-1)]+[w[-1]]\r\nk_w_prim = [w_prim[i]*(i+1) for i in range(len(w))]\r\n\r\nt = [[5,10,2,1,4,4,1,3,3,1,1,1], # scénario 1\r\n [3,4,6,3,6,2,4,1,5,2,1,1]] #scénario 2\r\n\r\nnoms_arcs = ['ab', 'ac', 'ad', 'bd', 'be', 'bc', 'dc', 'ce', 'df', 'cf', 'eg', 'fg']\r\n\r\n# Coefficients de la fonction objectif\r\n#c = k_w_prim + [-w_prim[0] for i in range(nb_scena)] + [-w_prim[1] for i in range(nb_scena)] + [0 for i in range(nb_arcs)]\r\n \r\nm = Model(\"question_4_2\")\r\n\r\n# declaration variables de decision\r\nr = []\r\nfor i in range(nb_scena):\r\n r.append(m.addVar(vtype=GRB.CONTINUOUS, lb=-GRB.INFINITY,name=\"r%d\" % (i+1)))\r\n\r\nb = []\r\nfor i in range(nb_scena):\r\n bi=[]\r\n for j in range(nb_scena):\r\n bi.append(m.addVar(vtype=GRB.CONTINUOUS, lb=0, name=\"b%d%d\" % ((i+1),(j+1))))\r\n b.append(bi)\r\n \r\nx = [] \r\nfor i in range(nb_arcs):\r\n x.append(m.addVar(vtype=GRB.BINARY, lb=0, name=\"x%s\" % noms_arcs[i]))\r\n \r\n# maj du modele pour integrer les nouvelles variables\r\nm.update()\r\n\r\nobj = LinExpr();\r\nobj = 0\r\nfor k in range(nb_scena):\r\n obj += k_w_prim[k] * r[k]\r\n \r\nfor k in range(nb_scena):\r\n obj += - w_prim[k] * quicksum(b[i][k] for i in range(nb_scena))\r\n\r\n\r\n\r\n\r\n# definition de l'objectif\r\nm.setObjective(obj,GRB.MAXIMIZE)\r\n\r\n# Definition des contraintes\r\nfor k in range(nb_scena):\r\n for i in range(nb_scena):\r\n m.addConstr(r[k]-b[i][k] <= -quicksum(t[i][j]*x[j] for j in range(nb_arcs)), \"Contrainte%d\" % (i*nb_arcs+k))\r\n\r\nm.addConstr(x[0] + x[1] + x[2] == 1, \"Contrainte_a\") #depuis a\r\nm.addConstr(-x[10] - x[11] == -1, \"Contrainte_g\") #vers g\r\n#nombre d'arcs sortant - nombre d'arcs sortant = 0(pour les noeuds autre que a et g)\r\nm.addConstr(x[3] + x[4] + x[5] - x[0] == 0, \"Contrainte_b\") # b\r\nm.addConstr(x[7] + x[9] - x[1] - x[5] - x[6] == 0, \"Contrainte_c\") # c\r\nm.addConstr(x[6] + x[8] - x[2] - x[3] == 0, \"Contrainte_d\") # d\r\nm.addConstr(x[10] - x[4] - x[7] == 0, \"Contrainte_e\") # e\r\nm.addConstr(x[11] - x[8] - x[9] == 0, \"Contrainte_f\") # f\r\n\r\n\r\nm.optimize()\r\n\r\nprint('\\nSolution optimale pour %d scenarios ' % nb_scena)\r\nprint(\"\\nTemps pour parcourir le chemin de a à g :\")\r\nfor i in range(nb_scena):\r\n print('t',(i+1),' = ', quicksum(t[i][j]*x[j].x for j in range(nb_arcs)))\r\nprint(\"\\nArcs empruntés :\")\r\nfor i in range(nb_arcs):\r\n print(noms_arcs[i],' = ', x[i].x)\r\n","repo_name":"BenKabongo25/m1-mogpl-optim-equitable","sub_path":"groupe3_Bouchouchi_Kabongo/src/question_4_2.py","file_name":"question_4_2.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39978548067","text":"import os.path as osp\nfrom unittest.mock import MagicMock, patch\n\nfrom mmcompression.datasets import CustomDataset, ConcatDataset, RepeatDataset\n\n\n@patch('mmcompression.datasets.CustomDataset.load_annotations', MagicMock)\n@patch('mmcompression.datasets.CustomDataset.__getitem__',\n MagicMock(side_effect=lambda idx: idx))\ndef test_dataset_wrapper():\n # CustomDataset.load_annotations = MagicMock()\n # CustomDataset.__getitem__ = MagicMock(side_effect=lambda idx: idx)\n dataset_a = CustomDataset(img_dir=MagicMock(), pipeline=[])\n len_a = 10\n dataset_a.img_infos = MagicMock()\n dataset_a.img_infos.__len__.return_value = len_a\n dataset_b = CustomDataset(img_dir=MagicMock(), pipeline=[])\n len_b = 20\n dataset_b.img_infos = MagicMock()\n dataset_b.img_infos.__len__.return_value = len_b\n\n concat_dataset = ConcatDataset([dataset_a, dataset_b])\n assert concat_dataset[5] == 5\n assert concat_dataset[25] == 15\n assert len(concat_dataset) == len(dataset_a) + len(dataset_b)\n\n repeat_dataset = RepeatDataset(dataset_a, 10)\n assert repeat_dataset[5] == 5\n assert repeat_dataset[15] == 5\n assert repeat_dataset[27] == 7\n assert len(repeat_dataset) == 10 * len(dataset_a)\n\n\ndef test_custom_dataset():\n img_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True)\n crop_size = (512, 1024)\n train_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='Resize', img_scale=(128, 256), ratio_range=(0.5, 2.0)),\n dict(type='RandomCrop', crop_size=crop_size),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size=crop_size, pad_val=0),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img']),\n ]\n test_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img']),\n ]\n\n # with img_dir\n train_dataset = CustomDataset(\n train_pipeline,\n data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'),\n img_dir='imgs/',\n img_suffix='img.jpg')\n assert len(train_dataset) == 5\n\n # with img_dir, split\n train_dataset = CustomDataset(\n train_pipeline,\n data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'),\n img_dir='imgs/',\n img_suffix='img.jpg',\n split='splits/train.txt')\n assert len(train_dataset) == 4\n\n # no data_root\n train_dataset = CustomDataset(\n train_pipeline,\n img_dir=osp.join(osp.dirname(__file__), '../data/pseudo_dataset/imgs'),\n img_suffix='img.jpg')\n assert len(train_dataset) == 5\n\n # with data_root but img_dir are abs path\n train_dataset = CustomDataset(\n train_pipeline,\n data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'),\n img_dir=osp.abspath(\n osp.join(osp.dirname(__file__), '../data/pseudo_dataset/imgs')),\n img_suffix='img.jpg')\n assert len(train_dataset) == 5\n\n test_dataset = CustomDataset(\n test_pipeline,\n img_dir=osp.join(osp.dirname(__file__), '../data/pseudo_dataset/imgs'),\n img_suffix='img.jpg')\n assert len(test_dataset) == 5\n\n # training data get\n train_data = train_dataset[0]\n assert isinstance(train_data, dict)\n\n # test data get\n test_data = test_dataset[0]\n assert isinstance(test_data, dict)\n\n # evaluation\n pseudo_results = [{'bpp': 0.2, 'psnr': 25}, {'bpp': 0.1, 'psnr': 30}]\n eval_results = train_dataset.evaluate(pseudo_results)\n assert isinstance(eval_results, dict)\n\n\n@patch('mmcompression.datasets.CustomDataset.load_annotations', MagicMock)\n@patch('mmcompression.datasets.CustomDataset.__getitem__',\n MagicMock(side_effect=lambda idx: idx))\ndef test_custom_dataset_random_palette_is_generated():\n dataset = CustomDataset(\n pipeline=[],\n img_dir=MagicMock(),\n split=MagicMock())\n\n\n@patch('mmcompression.datasets.CustomDataset.load_annotations', MagicMock)\n@patch('mmcompression.datasets.CustomDataset.__getitem__',\n MagicMock(side_effect=lambda idx: idx))\ndef test_custom_dataset_custom_palette():\n dataset = CustomDataset(\n pipeline=[],\n img_dir=MagicMock(),\n split=MagicMock())\n","repo_name":"lizhihao6/MMCompression_open","sub_path":"tests/test_data/test_dataset.py","file_name":"test_dataset.py","file_ext":"py","file_size_in_byte":4420,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"18273933444","text":"import numpy as np\nimport sympy\nfrom sympy import symbols, hessian\nfrom sympy.vector import gradient\nfrom sympy import cos, sin\nfrom Transformation import R01, R12, R23, R34, P01, P12, P23, P34, I1, I2, I3\nfrom sympy import Matrix, Function, diff\nfrom sympy import simplify, latex\nfrom sympy.physics.mechanics import mprint, mlatex\nfrom lagrangian_helper import obtain_links, Inertia, mass, theta, theta_dot, theta_ddot, functions\n\nL1, L2, L3 = symbols('L1 L2 L3')\nm1, m2, m3, I1xx, I1yy, I1zz, I2xx, I2yy, I2zz, I3xx, I3yy, I3zz = symbols('m1 m2 m3 I1xx I1yy I1zz I2xx I2yy I2zz I3xx I3yy I3zz')\ng = symbols('g')\n\nt = symbols('t')\ntheta1 = Function('theta1')(t); theta2 = Function('theta2')(t); theta3 = Function('theta3')(t)\ntheta_d1 = diff(theta1, t); theta_d2 = diff(theta2, t); theta_d3 = diff(theta3, t)\ntheta_dd1 = diff(theta_d1, t); theta_dd2 = diff(theta_d2, t); theta_dd3 = diff(theta_d3, t)\n\n\nlinks = obtain_links()\n\ndef T(R, P):\n matr = Matrix([\n [R[0, 0], R[0, 1], R[0, 2], P[0]],\n [R[1, 0], R[1, 1], R[1, 2], P[1]],\n [R[2, 0], R[2, 1], R[2, 2], P[2]],\n [0, 0, 0, 1]\n ])\n\n return matr\n\ndef kinetic_energy(link):\n\n ke = 0.5 * mass['mass_'+str(link['link'])] * link['v_c'].T * link['v_c'] + 0.5 * link['w'].T * mass['I'+ str(link['link'])] * link['w']\n # print(ke)\n return ke\n\ndef sum_of_ke(link1, link2, link3):\n return kinetic_energy(link2) + kinetic_energy(link3)\n return kinetic_energy(link1) + kinetic_energy(link2) + kinetic_energy(link3)\n\ndef potential_energy(link, T):\n name = link['link'] - 1\n PC = functions['PC'+str(name)]\n\n PC = Matrix([PC[0], PC[1], PC[2], 1])\n R = functions['R'+str(name)+str(name+1)].T\n P = functions['P'+str(name)+str(name+1)]\n\n P0 = T * PC\n grav = Matrix([0, 0, -g, 0])\n\n u = -mass['mass_' + str(name + 1)] * grav.T * P0\n # print(u)\n\n return u\n\ndef sum_of_pe(link1, link2, link3):\n\n T01 = T(R01(theta), P01(L1))\n T12 = T(R12(theta), P12())\n T23 = T(R23(theta), P23(L2))\n\n return potential_energy(link2, T01 * T12) + potential_energy(link3, T01 * T12 * T23)\n return potential_energy(link1, T01) + potential_energy(link2, T01 * T12) + potential_energy(link3, T01 * T12 * T23)\n\ndef gradient(f, params):\n diff1 = diff(f, params[0])\n diff2 = diff(f, params[1])\n diff3 = diff(f, params[2])\n \n return [diff1, diff2, diff3]\n\ndef main():\n\n links = obtain_links()\n link1 = links['link1']; link2 = links['link2']; link3 = links['link3']\n\n print(link1['v_c'])\n print(link2['v_c'])\n print(link3['v_c'])\n\n K = sum_of_ke(link1, link2, link3)\n P = sum_of_pe(link1, link2, link3)\n\n print(\"Total Potential: \", P)\n print(\"Total Kinetic\", K)\n # print(sum_of_pe(link2, link3))\n\n L = K - P\n \n M = hessian(K, [theta_d1, theta_d2, theta_d3])\n # print(\"\\n Mass Matrix: \", M)\n G = Matrix(gradient(P, [theta1, theta2, theta3]))\n # print(\"\\n Gravity vector is: \", G)\n H = Matrix(gradient(K, [theta_d1, theta_d2, theta_d3])).jacobian(Matrix([theta1, theta2, theta3])) * Matrix([theta_d1, theta_d2, theta_d3]) - Matrix(gradient(K, [theta1, theta2, theta3]))\n # H = jacobian(gradient(K, [theta_d1, theta_d2, theta_d3]), [theta1, theta2, theta3]) * Matrix([theta_d1, theta_d2, theta_d3]) - gradient(K, [theta1, theta2, theta3])\n # print(\"\\n NonLinear Term is: \", H)\n\n #For theta1:\n term1 = diff(L, theta_d1)\n term11 = diff(term1, t)\n term2 = diff(L, theta1)\n tau_1 = simplify(term11 - term2) \n # print(\"Tau_1 is: \", tau_1)\n\n #For theta2:\n term1 = diff(L, theta_d2)\n term11 = diff(term1, t)\n term2 = diff(L, theta2)\n tau_2 = simplify(term11 - term2) \n # print(\"\\nTau_2 is: \", tau_2)\n\n #For theta3:\n term1 = diff(L, theta_d3)\n term11 = diff(term1, t)\n term2 = diff(L, theta3)\n tau_3 = simplify(term11 - term2) \n tau_3 = simplify(tau_3)\n # print(\"\\nTau_3 is: \", tau_3)\n\n final = Matrix([tau_1, tau_2, tau_3])\n\n f = open('Matrices.txt', 'w')\n f.write(mlatex(M)+'\\n')\n f.write(mlatex(G)+'\\n')\n f.write(mlatex(H)+'\\n')\n f.write(mlatex(K)+'\\n')\n f.write(mlatex(P)+'\\n')\n \n \n\n # print(\"\\nDerivative wrt to theta_d1\", term1)\n # print()\n final2 = M*Matrix([theta_dd1, theta_dd2, theta_dd3]) + G + H\n\n # print(\"Equality?? : \", simplify(final - final2) == 0)\n # For theta 1:\n # tau_11 = -1 * diff(K, theta1)\n # tau_12 = diff(P, theta1)\n\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Stav42/3RGaussian","sub_path":"man_controller/eom/src/lagrangian_eom.py","file_name":"lagrangian_eom.py","file_ext":"py","file_size_in_byte":4475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74946913863","text":"from heapq import heappush, heappop\nimport logging\nfrom weakref import WeakValueDictionary\n\nfrom odoo.addons.odoo_magento2_ept.models.backend.exception import ChannelNotFound\nfrom ..logs.job import PENDING, ENQUEUED, STARTED, FAILED, DONE ,CANCELED ,HOLD\nNOT_DONE = (PENDING, ENQUEUED, STARTED)\n\n_logger = logging.getLogger(__name__)\n\n\nclass PriorityQueue(object):\n \"\"\"A priority queue that supports removing arbitrary objects.\n Adding an object already in the queue is a no op.\n Popping an empty queue returns None.\n >>> q = PriorityQueue()\n >>> q.add(2)\n >>> q.add(3)\n >>> q.add(3)\n >>> q.add(1)\n >>> q[0]\n 1\n >>> len(q)\n 3\n >>> q.pop()\n 1\n >>> q.remove(2)\n >>> len(q)\n 1\n >>> q[0]\n 3\n >>> q.pop()\n 3\n >>> q.pop()\n >>> q.add(2)\n >>> q.remove(2)\n >>> q.add(2)\n >>> q.pop()\n 2\n \"\"\"\n\n def __init__(self):\n self._heap = []\n self._known = set() # all objects in the heap (including removed)\n self._removed = set() # all objects that have been removed\n\n def __len__(self):\n return len(self._known) - len(self._removed)\n\n def __getitem__(self, i):\n if i != 0:\n raise IndexError()\n while True:\n if not self._heap:\n raise IndexError()\n o = self._heap[0]\n if o in self._removed:\n o2 = self._heap.pop() # Changes for remove heappop\n #assert o2 == o\n self._removed.remove(o)\n self._known.remove(o)\n else:\n return o\n\n def __contains__(self, o):\n return o in self._known and o not in self._removed\n\n def add(self, o):\n if o is None:\n raise ValueError()\n if o in self._removed:\n self._removed.remove(o)\n if o in self._known:\n return\n self._known.add(o)\n# heappush(self._heap, o)\n self._heap.append(o) # Changes for remove heappop\n\n def remove(self, o):\n if o is None:\n raise ValueError()\n if o not in self._known:\n return\n if o not in self._removed:\n self._removed.add(o)\n\n def pop(self):\n while True:\n try:\n o = self._heap.pop() # Changes for remove heappop\n except IndexError:\n # queue is empty\n return None\n self._known.remove(o)\n if o in self._removed:\n self._removed.remove(o)\n else:\n return o\n\n\nclass SafeSet(set):\n \"\"\"A set that does not raise KeyError when removing non-existent items.\n >>> s = SafeSet()\n >>> s.remove(1)\n >>> len(s)\n 0\n >>> s.remove(1)\n \"\"\"\n def remove(self, o):\n try:\n super(SafeSet, self).remove(o)\n except KeyError:\n pass\n\n\nclass ChannelJob(object):\n \"\"\"A channel job is attached to a channel and holds the properties of a\n job that are necessary to prioritise them.\n Channel jobs are comparable according to the following rules:\n * jobs with an eta come before all other jobs\n * then jobs with a smaller eta come first\n * then jobs with smaller priority come first\n * then jobs with a smaller creation time come first\n * then jobs with a smaller sequence come first\n Here are some examples.\n j1 comes before j2 before it has a smaller date_created\n >>> j1 = ChannelJob(None, None, 1,\n ... seq=0, date_created=1, priority=9, eta=None)\n >>> j1\n \n >>> j2 = ChannelJob(None, None, 2,\n ... seq=0, date_created=2, priority=9, eta=None)\n >>> j1 < j2\n True\n j3 comes first because it has lower priority,\n despite having a creation date after j1 and j2\n >>> j3 = ChannelJob(None, None, 3,\n ... seq=0, date_created=3, priority=2, eta=None)\n >>> j3 < j1\n True\n j4 and j5 comes even before j3, because they have an eta\n >>> j4 = ChannelJob(None, None, 4,\n ... seq=0, date_created=4, priority=9, eta=9)\n >>> j5 = ChannelJob(None, None, 5,\n ... seq=0, date_created=5, priority=9, eta=9)\n >>> j4 < j5 < j3\n True\n j6 has same date_created and priority as j5 but a smaller eta\n >>> j6 = ChannelJob(None, None, 6,\n ... seq=0, date_created=5, priority=9, eta=2)\n >>> j6 < j4 < j5\n True\n Here is the complete suite:\n >>> j6 < j4 < j5 < j3 < j1 < j2\n True\n j0 has the same properties as j1 but they are not considered\n equal as they are different instances\n >>> j0 = ChannelJob(None, None, 1,\n ... seq=0, date_created=1, priority=9, eta=None)\n >>> j0 == j1\n False\n >>> j0 == j0\n True\n \"\"\"\n\n def __init__(self, db_name, channel, uuid,\n seq, date_created, priority, eta):\n self.db_name = db_name\n self.channel = channel\n self.uuid = uuid\n self.seq = seq\n self.date_created = date_created\n self.priority = priority\n self.eta = eta\n\n def __repr__(self):\n return \"\" % self.uuid\n\n def __eq__(self, other):\n return id(self) == id(other)\n\n def __hash__(self):\n return id(self)\n\n def __cmp__(self, other):\n if self.eta and not other.eta:\n return -1\n elif not self.eta and other.eta:\n return 1\n else:\n return (cmp(self.eta, other.eta) or\n cmp(self.priority, other.priority) or\n cmp(self.date_created, other.date_created) or\n cmp(self.seq, other.seq))\n\n\nclass ChannelQueue(object):\n \"\"\"A channel queue is a priority queue for jobs that returns\n jobs with a past ETA first.\n >>> q = ChannelQueue()\n >>> j1 = ChannelJob(None, None, 1,\n ... seq=0, date_created=1, priority=1, eta=10)\n >>> j2 = ChannelJob(None, None, 2,\n ... seq=0, date_created=2, priority=1, eta=None)\n >>> j3 = ChannelJob(None, None, 3,\n ... seq=0, date_created=3, priority=1, eta=None)\n >>> q.add(j1)\n >>> q.add(j2)\n >>> q.add(j3)\n >>> q.pop(now=1)\n \n >>> q.pop(now=11)\n \n >>> q.pop(now=12)\n \n \"\"\"\n\n def __init__(self):\n self._queue = PriorityQueue()\n self._eta_queue = PriorityQueue()\n\n def __len__(self):\n return len(self._eta_queue) + len(self._queue)\n\n def __contains__(self, o):\n return o in self._eta_queue or o in self._queue\n\n def add(self, job):\n if job.eta:\n self._eta_queue.add(job)\n else:\n self._queue.add(job)\n\n def remove(self, job):\n self._eta_queue.remove(job)\n self._queue.remove(job)\n\n def pop(self, now):\n if len(self._eta_queue) and self._eta_queue[0].eta <= now:\n return self._eta_queue.pop()\n else:\n return self._queue.pop()\n\n\nclass Channel(object):\n \"\"\"A channel for jobs, with a maximum capacity.\n When jobs are created by connector modules, they may be associated\n to a job channel. Jobs with no channel are inserted into the root channel.\n Job channels are joined in a hierarchy down to the root channel.\n When a job channel has available capacity, jobs are dequeued, marked\n as running in the channel and are inserted into the queue of the\n parent channel where they wait for available capacity and so on.\n Job channels can be visualized as water channels with a given flow\n limit (= capacity). Channels are joined together in a downstream channel\n and the flow limit of the downstream channel limits upstream channels.::\n ---------------------+\n |\n |\n Ch. A C:4,Q:12,R:4 +-----------------------\n ---------------------+ Ch. root C:5,Q:0,R:4\n |\n ---------------------+\n Ch. B C:1,Q:0,R:0\n ---------------------+-----------------------\n The above diagram illustrates two channels joining in the root channel.\n The root channel has a capacity of 5, and 4 running jobs coming from\n Channel A. Channel A has a capacity of 4, all in use (passed down to the\n root channel), and 12 jobs enqueued. Channel B has a capacity of 1,\n none in use. This means that whenever a new job comes in channel B,\n there will be available room for it to run in the root channel.\n Note that from the point of view of a channel, 'running' means enqueued\n in the downstream channel. Only jobs marked running in the root channel\n are actually sent to Odoo for execution.\n Should a downstream channel have less capacity than its upstream channels,\n jobs going downstream will be enqueued in the downstream channel,\n and compete normally according to their properties (priority, etc).\n Using this technique, it is possible to enforce sequence in a channel\n with a capacity of 1. It is also possible to dedicate a channel with a\n limited capacity for application-autocreated subchannels\n without risking to overflow the system.\n \"\"\"\n\n def __init__(self, name, parent, capacity=None, sequential=False):\n self.name = name\n self.parent = parent\n if self.parent:\n self.parent.children[name] = self\n self.children = {}\n self.capacity = capacity\n self.sequential = sequential\n self._queue = ChannelQueue()\n self._running = SafeSet()\n self._failed = SafeSet()\n\n def configure(self, config):\n \"\"\" Configure a channel from a dictionary.\n Supported keys are:\n * capacity\n * sequential\n \"\"\"\n assert self.fullname.endswith(config['name'])\n self.capacity = config.get('capacity', None)\n self.sequential = bool(config.get('sequential', False))\n if self.sequential and self.capacity != 1:\n raise ValueError(\"A sequential channel must have a capacity of 1\")\n\n @property\n def fullname(self):\n \"\"\" The full name of the channel, in dot separated notation. \"\"\"\n if self.parent:\n return self.parent.fullname + '.' + self.name\n else:\n return self.name\n\n def get_subchannel_by_name(self, subchannel_name):\n return self.children.get(subchannel_name)\n\n def __str__(self):\n capacity = u'∞' if self.capacity is None else str(self.capacity)\n return \"%s(C:%s,Q:%d,R:%d,F:%d)\" % (self.fullname,\n capacity,\n len(self._queue),\n len(self._running),\n len(self._failed))\n\n def remove(self, job):\n \"\"\" Remove a job from the channel. \"\"\"\n self._queue.remove(job)\n self._running.remove(job)\n self._failed.remove(job)\n if self.parent:\n self.parent.remove(job)\n\n def set_done(self, job):\n \"\"\" Mark a job as done.\n This removes it from the channel queue.\n \"\"\"\n self.remove(job)\n _logger.debug(\"job %s marked done in channel %s\",\n job.uuid, self)\n \n def set_canceled(self, job):\n \"\"\" Mark a job as done.\n This removes it from the channel queue.\n \"\"\"\n self.remove(job)\n _logger.debug(\"job %s marked Cancel in channel %s\",\n job.uuid, self)\n \n def set_pending(self, job):\n \"\"\" Mark a job as pending.\n This puts the job in the channel queue and remove it\n from parent channels queues.\n \"\"\"\n if job not in self._queue:\n self._queue.add(job)\n self._running.remove(job)\n self._failed.remove(job)\n if self.parent:\n self.parent.remove(job)\n _logger.debug(\"job %s marked pending in channel %s\",\n job.uuid, self)\n\n def set_running(self, job):\n \"\"\" Mark a job as running.\n This also marks the job as running in parent channels.\n \"\"\"\n if job not in self._running:\n self._queue.remove(job)\n self._running.add(job)\n self._failed.remove(job)\n if self.parent:\n self.parent.set_running(job)\n _logger.debug(\"job %s marked running in channel %s\",\n job.uuid, self)\n\n def set_failed(self, job):\n \"\"\" Mark the job as failed. \"\"\"\n if job not in self._failed:\n self._queue.remove(job)\n self._running.remove(job)\n self._failed.add(job)\n if self.parent:\n self.parent.remove(job)\n _logger.debug(\"job %s marked failed in channel %s\",\n job.uuid, self)\n\n def get_jobs_to_run(self, now):\n \"\"\" Get jobs that are ready to run in channel.\n This works by enqueuing jobs that are ready to run in children\n channels, then yielding jobs from the channel queue until\n ``capacity`` jobs are marked running in the channel.\n :param now: the current datetime using a type that is comparable to\n jobs eta attribute\n :return: iterator of :py:class:`connector.jobrunner.ChannelJob`\n \"\"\"\n # enqueue jobs of children channels\n for child in self.children.values():\n for job in child.get_jobs_to_run(now):\n self._queue.add(job)\n # sequential channels block when there are failed jobs\n # TODO: this is probably not sufficient to ensure\n # sequentiality because of the behaviour in presence\n # of jobs with eta; plus: check if there are no\n # race conditions.\n if self.sequential and len(self._failed):\n return\n # yield jobs that are ready to run\n while not self.capacity or len(self._running) < self.capacity:\n job = self._queue.pop(now)\n if not job:\n return\n self._running.add(job)\n _logger.debug(\"job %s marked running in channel %s\",\n job.uuid, self)\n yield job\n\n\nclass ChannelManager(object):\n \"\"\" High level interface for channels\n This class handles:\n * configuration of channels\n * high level api to create and remove jobs (notify, remove_job, remove_db)\n * get jobs to run\n Here is how the runner will use it.\n Let's create a channel manager and configure it.\n >>> from pprint import pprint as pp\n >>> cm = ChannelManager()\n >>> cm.simple_configure('root:4,A:4,B:1')\n >>> db = 'db'\n Add a few jobs in channel A with priority 10\n >>> cm.notify(db, 'A', 'A1', 1, 0, 10, None, 'pending')\n >>> cm.notify(db, 'A', 'A2', 2, 0, 10, None, 'pending')\n >>> cm.notify(db, 'A', 'A3', 3, 0, 10, None, 'pending')\n >>> cm.notify(db, 'A', 'A4', 4, 0, 10, None, 'pending')\n >>> cm.notify(db, 'A', 'A5', 5, 0, 10, None, 'pending')\n >>> cm.notify(db, 'A', 'A6', 6, 0, 10, None, 'pending')\n Add a few jobs in channel B with priority 5\n >>> cm.notify(db, 'B', 'B1', 1, 0, 5, None, 'pending')\n >>> cm.notify(db, 'B', 'B2', 2, 0, 5, None, 'pending')\n We must now run one job from queue B which has a capacity of 1\n and 3 jobs from queue A so the root channel capacity of 4 is filled.\n >>> pp(list(cm.get_jobs_to_run(now=100)))\n [, , , ]\n Job A2 is done. Next job to run is A5, even if we have\n higher priority job in channel B, because channel B has a capacity of 1.\n >>> cm.notify(db, 'A', 'A2', 2, 0, 10, None, 'done')\n >>> pp(list(cm.get_jobs_to_run(now=100)))\n []\n Job B1 is done. Next job to run is B2 because it has higher priority.\n >>> cm.notify(db, 'B', 'B1', 1, 0, 5, None, 'done')\n >>> pp(list(cm.get_jobs_to_run(now=100)))\n []\n Let's say A1 is done and A6 gets a higher priority. A6 will run next.\n >>> cm.notify(db, 'A', 'A1', 1, 0, 10, None, 'done')\n >>> cm.notify(db, 'A', 'A6', 6, 0, 5, None, 'pending')\n >>> pp(list(cm.get_jobs_to_run(now=100)))\n []\n \"\"\"\n\n def __init__(self):\n self._jobs_by_uuid = WeakValueDictionary()\n self._root_channel = Channel(name='root', parent=None, capacity=1)\n self._channels_by_name = WeakValueDictionary(root=self._root_channel)\n\n @staticmethod\n def split_strip(s, sep, maxsplit=-1):\n \"\"\"Split string and strip each component.\n >>> ChannelManager.split_strip(\"foo: bar baz\\\\n: fred:\", \":\")\n ['foo', 'bar baz', 'fred', '']\n \"\"\"\n return [x.strip() for x in s.split(sep, maxsplit)]\n\n @classmethod\n def parse_simple_config(cls, config_string):\n \"\"\"Parse a simple channels configuration string.\n The general form is as follow:\n channel(.subchannel)*(:capacity(:key(=value)?)*)? [, ...]\n If capacity is absent, it defaults to 1.\n If a key is present without value, it gets True as value.\n When declaring subchannels, the root channel may be omitted\n (ie sub:4 is the same as root.sub:4).\n Returns a list of channel configuration dictionaries.\n >>> from pprint import pprint as pp\n >>> pp(ChannelManager.parse_simple_config('root:4'))\n [{'capacity': 4, 'name': 'root'}]\n >>> pp(ChannelManager.parse_simple_config('root:4,root.sub:2'))\n [{'capacity': 4, 'name': 'root'}, {'capacity': 2, 'name': 'root.sub'}]\n >>> pp(ChannelManager.parse_simple_config('root:4,root.sub:2:'\n ... 'sequential:k=v'))\n [{'capacity': 4, 'name': 'root'},\n {'capacity': 2, 'k': 'v', 'name': 'root.sub', 'sequential': True}]\n >>> pp(ChannelManager.parse_simple_config('root'))\n [{'capacity': 1, 'name': 'root'}]\n >>> pp(ChannelManager.parse_simple_config('sub:2'))\n [{'capacity': 2, 'name': 'sub'}]\n It ignores whitespace around values, and drops empty entries which\n would be generated by trailing commas, or commented lines on the Odoo\n config file.\n >>> pp(ChannelManager.parse_simple_config('''\n ... root : 4,\n ... ,\n ... foo bar:1: k=va lue,\n ... '''))\n [{'capacity': 4, 'name': 'root'},\n {'capacity': 1, 'k': 'va lue', 'name': 'foo bar'}]\n It's also possible to replace commas with line breaks, which is more\n readable if you're taking the channel configuration from a ConfigParser\n file.\n >>> pp(ChannelManager.parse_simple_config('''\n ... root : 4\n ... foo bar:1: k=va lue\n ... baz\n ... '''))\n [{'capacity': 4, 'name': 'root'},\n {'capacity': 1, 'k': 'va lue', 'name': 'foo bar'},\n {'capacity': 1, 'name': 'baz'}]\n \"\"\"\n res = []\n config_string = config_string.replace(\"\\n\", \",\")\n for channel_config_string in cls.split_strip(config_string, ','):\n if not channel_config_string:\n # ignore empty entries (commented lines, trailing commas)\n continue\n config = {}\n config_items = cls.split_strip(channel_config_string, ':')\n name = config_items[0]\n if not name:\n raise ValueError('Invalid channel config %s: '\n 'missing channel name' % config_string)\n config['name'] = name\n if len(config_items) > 1:\n capacity = config_items[1]\n try:\n config['capacity'] = int(capacity)\n except:\n raise ValueError('Invalid channel config %s: '\n 'invalid capacity %s' %\n (config_string, capacity))\n for config_item in config_items[2:]:\n kv = cls.split_strip(config_item, '=')\n if len(kv) == 1:\n k, v = kv[0], True\n elif len(kv) == 2:\n k, v = kv\n else:\n raise ValueError('Invalid channel config %s: ',\n 'incorrect config item %s'\n (config_string, config_item))\n if k in config:\n raise ValueError('Invalid channel config %s: '\n 'duplicate key %s'\n (config_string, k))\n config[k] = v\n else:\n config['capacity'] = 1\n res.append(config)\n return res\n\n def simple_configure(self, config_string):\n \"\"\"Configure the channel manager from a simple configuration string\n >>> cm = ChannelManager()\n >>> c = cm.get_channel_by_name('root')\n >>> c.capacity\n 1\n >>> cm.simple_configure('root:4,autosub.sub:2')\n >>> cm.get_channel_by_name('root').capacity\n 4\n >>> cm.get_channel_by_name('root.autosub').capacity\n >>> cm.get_channel_by_name('root.autosub.sub').capacity\n 2\n >>> cm.get_channel_by_name('autosub.sub').capacity\n 2\n \"\"\"\n for config in ChannelManager.parse_simple_config(config_string):\n self.get_channel_from_config(config)\n\n def get_channel_from_config(self, config):\n \"\"\"Return a Channel object from a parsed configuration.\n If the channel does not exist it is created.\n The configuration is applied on the channel before returning it.\n If some of the parent channels are missing when creating a subchannel,\n the parent channels are auto created with an infinite capacity\n (except for the root channel, which defaults to a capacity of 1\n when not configured explicity).\n \"\"\"\n channel = self.get_channel_by_name(config['name'], autocreate=True)\n channel.configure(config)\n return channel\n\n def get_channel_by_name(self, channel_name, autocreate=False):\n \"\"\"Return a Channel object by its name.\n If it does not exist and autocreate is True, it is created\n with a default configuration and inserted in the Channels structure.\n If autocreate is False and the channel does not exist, an exception\n is raised.\n >>> cm = ChannelManager()\n >>> c = cm.get_channel_by_name('root', autocreate=False)\n >>> c.name\n 'root'\n >>> c.fullname\n 'root'\n >>> c = cm.get_channel_by_name('root.sub', autocreate=True)\n >>> c.name\n 'sub'\n >>> c.fullname\n 'root.sub'\n >>> c = cm.get_channel_by_name('sub', autocreate=True)\n >>> c.name\n 'sub'\n >>> c.fullname\n 'root.sub'\n >>> c = cm.get_channel_by_name('autosub.sub', autocreate=True)\n >>> c.name\n 'sub'\n >>> c.fullname\n 'root.autosub.sub'\n >>> c = cm.get_channel_by_name(None)\n >>> c.fullname\n 'root'\n >>> c = cm.get_channel_by_name('root.sub')\n >>> c.fullname\n 'root.sub'\n >>> c = cm.get_channel_by_name('sub')\n >>> c.fullname\n 'root.sub'\n \"\"\"\n if not channel_name or channel_name == self._root_channel.name:\n return self._root_channel\n if not channel_name.startswith(self._root_channel.name + '.'):\n channel_name = self._root_channel.name + '.' + channel_name\n if channel_name in self._channels_by_name:\n return self._channels_by_name[channel_name]\n if not autocreate:\n raise ChannelNotFound('Channel %s not found' % channel_name)\n parent = self._root_channel\n for subchannel_name in channel_name.split('.')[1:]:\n subchannel = parent.get_subchannel_by_name(subchannel_name)\n if not subchannel:\n subchannel = Channel(subchannel_name, parent, capacity=None)\n self._channels_by_name[subchannel.fullname] = subchannel\n parent = subchannel\n return parent\n\n def notify(self, db_name, channel_name, uuid,\n seq, date_created, priority, eta, state):\n try:\n channel = self.get_channel_by_name(channel_name)\n except ChannelNotFound:\n _logger.warning('unknown channel %s, '\n 'using root channel for job %s',\n channel_name, uuid)\n channel = self._root_channel\n job = self._jobs_by_uuid.get(uuid)\n if job:\n # db_name is invariant\n assert job.db_name == db_name\n # date_created is invariant\n assert job.date_created == date_created\n # if one of the job properties that influence\n # scheduling order has changed, we remove the job\n # from the queues and create a new job object\n if (seq != job.seq or\n priority != job.priority or\n eta != job.eta or\n channel != job.channel):\n _logger.debug(\"job %s properties changed, rescheduling it\",\n uuid)\n self.remove_job(uuid)\n job = None\n if not job:\n job = ChannelJob(db_name, channel, uuid,\n seq, date_created, priority, eta)\n self._jobs_by_uuid[uuid] = job\n # state transitions\n if not state or state == DONE:\n job.channel.set_done(job)\n elif state in (PENDING,HOLD):\n job.channel.set_pending(job)\n elif state in (ENQUEUED, STARTED):\n job.channel.set_running(job)\n elif state == FAILED:\n job.channel.set_failed(job)\n elif state == CANCELED :\n job.channel.set_canceled(job)\n else:\n _logger.error(\"unexpected state %s for job %s\", state, job)\n\n def remove_job(self, uuid):\n job = self._jobs_by_uuid.get(uuid)\n if job:\n job.channel.remove(job)\n del self._jobs_by_uuid[job.uuid]\n\n def remove_db(self, db_name):\n for job in self._jobs_by_uuid.values():\n if job.db_name == db_name:\n job.channel.remove(job)\n #del self._jobs_by_uuid[job.uuid]\n\n def get_jobs_to_run(self, now):\n return self._root_channel.get_jobs_to_run(now)","repo_name":"kg6zjm/odoo-magento2","sub_path":"models/jobrunner/channels.py","file_name":"channels.py","file_ext":"py","file_size_in_byte":26636,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"38750999016","text":"#-------------------------------------------------------------------------------\r\n# Name: Films.py\r\n# Purpose:\r\n#\r\n# Author: NPL\r\n#\r\n# Created: 01.11.2021\r\n# Copyright: (c) 2021\r\n# Licence: \r\n#-------------------------------------------------------------------------------\r\nimport sys # sys нужен для передачи argv в QApplication\r\nfrom PyQt5 import QtCore\r\nfrom PyQt5.QtWidgets import QApplication, QWidget, QLabel\r\nfrom PyQt5.QtGui import QIcon, QPixmap\r\nfrom PyQt5 import QtWidgets\r\nimport FormUI\r\nfrom PyQt5 import QtSql\r\nfrom PyQt5.QtSql import QSqlQuery\r\nfrom PyQt5.QtSql import QSqlDatabase\r\nfrom PyQt5.QtWidgets import QTableWidget,QTableWidgetItem\r\n\r\nclass UIForm(QtWidgets.QMainWindow, FormUI.Ui_MainWindow):\r\n \r\n def __init__(self, path):\r\n # Это здесь нужно для доступа к переменным, методам\r\n # и т.д. в файле design.py\r\n super().__init__()\r\n self.setupUi(self) # Это нужно для инициализации нашего дизайна\r\n\r\n self.tabWidget.setCurrentIndex(0)\r\n self.tabWidget.currentChanged.connect(self.changeTab2) # изменение вкладки\r\n self.addBtn.clicked.connect(self.add_row) # OK BUTTON\r\n self.tableWidget.clicked.connect(self.select_row) # SELECT ROW in TABLE\r\n self.tableWidget.doubleClicked.connect(self.edit_mode) #Изменение строки (2-ой клик)\r\n self.Ed_find.editingFinished.connect(self.search_title_orig) # Поиск по оригинальному назваию\r\n self.Ed_find2.editingFinished.connect(self.search_title) # Поиск по названию\r\n\r\n self.comboBox_2.addItems([\"Боевик\", \"Детектив\", \"Триллер\", \"Катастрофа\", \"Комедия\", \"Ужасы\", \"Фантастика\"])\r\n\r\n self.path = path\r\n self.con1 = self.create_connection(path)\r\n self.read_table(self.con1, \"SELECT id, num, title_orig, title, year, genre, country, director, actors, desc, prim, tags, img FROM films\")\r\n self.create_films_table = \"\"\"\r\n CREATE TABLE IF NOT EXISTS films (\r\n id INTEGER PRIMARY KEY AUTOINCREMENT,\r\n num TEXT,\r\n title_orig TEXT NOT NULL,\r\n title TEXT,\r\n year INTEGER,\r\n genre TEXT,\r\n country TEXT,\r\n director TEXT,\r\n actors TEXT,\r\n desc TEXT,\r\n prim TEXT,\r\n tags TEXT,\r\n img TEXT\r\n );\r\n \"\"\"\r\n self.current_row = 0\r\n self.edit_flag = False\r\n \r\n # i m a g e -----------------------------------------\r\n pixmap = QPixmap('data/img0.jpg')\r\n pixmap_small = pixmap.scaled(400,400, QtCore.Qt.KeepAspectRatio)\r\n self.label_pix.setScaledContents(True)\r\n self.label_pix.setPixmap(pixmap_small)\r\n\r\n # ====== S E A R C H ======================================================\r\n def search(self, key_string, query_string):\r\n query = QSqlQuery()\r\n query.prepare(query_string)\r\n format_key = key_string.lower() \r\n query.addBindValue('%'+key_string.title()+'%')\r\n query.exec_()\r\n \r\n self.tableWidget.setRowCount(0)\r\n while query.next():\r\n rows = self.tableWidget.rowCount()\r\n self.tableWidget.setRowCount(rows + 1)\r\n self.tableWidget.setItem(rows, 0, QTableWidgetItem(str(query.value(0))))\r\n self.tableWidget.setItem(rows, 1, QTableWidgetItem(str(query.value(1))))\r\n self.tableWidget.setItem(rows, 2, QTableWidgetItem(str(query.value(2))))\r\n self.tableWidget.setItem(rows, 3, QTableWidgetItem(str(query.value(3))))\r\n self.tableWidget.setItem(rows, 4, QTableWidgetItem(str(query.value(4))))\r\n self.tableWidget.setItem(rows, 5, QTableWidgetItem(str(query.value(5))))\r\n self.tableWidget.setItem(rows, 6, QTableWidgetItem(str(query.value(6))))\r\n self.tableWidget.setItem(rows, 7, QTableWidgetItem(str(query.value(7))))\r\n self.tableWidget.setItem(rows, 8, QTableWidgetItem(str(query.value(8))))\r\n self.tableWidget.setItem(rows, 9, QTableWidgetItem(str(query.value(9))))\r\n self.tableWidget.setItem(rows, 10, QTableWidgetItem(str(query.value(10))))\r\n self.tableWidget.resizeColumnsToContents()\r\n self.tableWidget.resizeRowsToContents()\r\n \r\n \r\n def search_title_orig(self): \r\n self.search (self.Ed_find.text(), \"SELECT id, num, title_orig, title, year, genre, country, director, actors, desc, prim, tags, img FROM films WHERE title_orig LIKE ?\")\r\n \r\n def search_title(self):\r\n self.search (self.Ed_find2.text(), \"SELECT id, num, title_orig, title, year, genre, country, director, actors, desc, prim, tags, img FROM films WHERE title LIKE ?\")\r\n\r\n # - - - - - - T A B L E s e c - - - - - -\r\n\r\n def create_connection(self, path):\r\n connection = None\r\n try:\r\n connection = QtSql.QSqlDatabase.addDatabase('QSQLITE')\r\n connection.setDatabaseName(path)\r\n connection.open()\r\n print(\"Connection to SQLite DB succesful.\")\r\n except Error as e:\r\n print(\"The error occurred:\")\r\n print(e)\r\n return connection\r\n\r\n def create_table(self, connection, query):\r\n createTableQuery = QSqlQuery()\r\n try:\r\n createTableQuery.exec(query)\r\n print(\"Query executed succesfully.\")\r\n except Error as e:\r\n print(\"The error occurred:\")\r\n print(e)\r\n\r\n def close_connection(self):\r\n self.con1.close()\r\n if self.con1.isOpen() == False:\r\n print (\"Connection closed.\")\r\n \r\n def read_table(self, connection, query): #Вывод в таблицу QTableWidget\r\n self.tableWidget.setRowCount(0)\r\n data = QSqlQuery(query)\r\n self.tableWidget.setColumnCount(9)\r\n while data.next():\r\n rows = self.tableWidget.rowCount()\r\n self.tableWidget.setEditTriggers(QtWidgets.QTableWidget.NoEditTriggers) #set read Only\r\n self.tableWidget.setRowCount(rows + 1)\r\n self.tableWidget.setItem(rows, 0, QTableWidgetItem(str(data.value(0))))\r\n self.tableWidget.setItem(rows, 1, QTableWidgetItem(str(data.value(1))))\r\n self.tableWidget.setItem(rows, 2, QTableWidgetItem(str(data.value(2))))\r\n self.tableWidget.setItem(rows, 3, QTableWidgetItem(str(data.value(3))))\r\n self.tableWidget.setItem(rows, 4, QTableWidgetItem(str(data.value(4))))\r\n self.tableWidget.setItem(rows, 5, QTableWidgetItem(str(data.value(5))))\r\n self.tableWidget.setItem(rows, 6, QTableWidgetItem(str(data.value(6))))\r\n self.tableWidget.setItem(rows, 7, QTableWidgetItem(str(data.value(7))))\r\n self.tableWidget.setItem(rows, 8, QTableWidgetItem(str(data.value(8))))\r\n self.tableWidget.setItem(rows, 9, QTableWidgetItem(str(data.value(9))))\r\n self.tableWidget.setItem(rows, 10, QTableWidgetItem(str(data.value(10))))\r\n\r\n self.tableWidget.resizeColumnsToContents()\r\n\r\n def add_row(self, connection): # BTTN \"OK\" ( add row ) WORKS WITH DATABASE\r\n insertDataQuery = QSqlQuery()\r\n # EDIT exsist row ============================================================\r\n if self.edit_flag == True:\r\n query = QSqlQuery()\r\n # Year change\r\n year_ed = self.spinBox_age.value()\r\n query.prepare('UPDATE films SET year=:year WHERE id=:var')\r\n query.bindValue(':year', year_ed)\r\n query.bindValue(':var', self.current_row+1)\r\n query.exec()\r\n\r\n # Title original change\r\n title_orig = self.Ed_name.text()\r\n query.prepare('UPDATE films SET title_orig=:title_orig WHERE id=:var')\r\n query.bindValue(':title_orig', title_orig)\r\n query.bindValue(':var', self.current_row+1)\r\n query.exec()\r\n\r\n # Title change\r\n title = self.Ed_title.text()\r\n query.prepare('UPDATE films SET title=:title WHERE id=:var')\r\n query.bindValue(':title', title)\r\n query.bindValue(':var', self.current_row+1)\r\n query.exec()\r\n\r\n # Country change\r\n country = self.lineEdit_country.text()\r\n query.prepare('UPDATE films SET country=:country WHERE id=:var')\r\n query.bindValue(':country', country)\r\n query.bindValue(':var', self.current_row+1)\r\n query.exec()\r\n\r\n # Genre change\r\n genre = self.comboBox_2.currentText()\r\n query.prepare('UPDATE films SET genre=:genre WHERE id=:var')\r\n query.bindValue(':genre', genre)\r\n query.bindValue(':var', self.current_row+1)\r\n query.exec()\r\n \r\n # Director change\r\n director = self.Ed_director.text()\r\n query.prepare('UPDATE films SET director=:director WHERE id=:var')\r\n query.bindValue(':director', director)\r\n query.bindValue(':var', self.current_row+1)\r\n query.exec()\r\n \r\n # Desc change\r\n desc = self.Te_desc.toPlainText()\r\n query.prepare('UPDATE films SET desc=:desc WHERE id=:var')\r\n query.bindValue(':desc', desc)\r\n query.bindValue(':var', self.current_row+1)\r\n query.exec()\r\n \r\n # Img change\r\n img = self.Ed_img.text()\r\n query.prepare('UPDATE films SET img=:img WHERE id=:var')\r\n query.bindValue(':img', img)\r\n query.bindValue(':var', self.current_row+1)\r\n query.exec()\r\n \r\n # Actors change\r\n actors = self.Ed_actors.text()\r\n query.prepare('UPDATE films SET actors=:actors WHERE id=:var')\r\n query.bindValue(':actors', actors)\r\n query.bindValue(':var', self.current_row+1)\r\n query.exec()\r\n \r\n # ending editing mode and return to tab1\r\n print (\"edit flag\")\r\n self.edit_flag = False\r\n self.read_table(self.con1, \"SELECT id, num, title_orig, title, year, genre, country, director, actors, desc, prim, tags, img FROM films\")\r\n \r\n # i m a g e -----------------------------------------\r\n pixmap = QPixmap('data/' + img)\r\n pixmap_small = pixmap.scaled(400,400, QtCore.Qt.KeepAspectRatio)\r\n self.label_pix.setScaledContents(True)\r\n self.label_pix.setPixmap(pixmap_small)\r\n self.tabWidget.setTabText(1,'Добавить')\r\n self.tabWidget.setCurrentIndex(0)\r\n # IF ADDING NEW ROW ========================================================= \r\n else:\r\n insertDataQuery.prepare(\"INSERT INTO films (num, title_orig, title, year, genre, country, director, actors, desc, prim, tags, img) VALUES (?,?,?,?,?,?,?,?,?,?,?,?)\")\r\n num = self.Ed_num.text()\r\n title_orig = self.Ed_name.text()\r\n title = self.Ed_title.text()\r\n year = self.spinBox_age.value()\r\n genre = self.comboBox_2.currentText()\r\n country = self.lineEdit_country.text()\r\n director = self.Ed_director.text()\r\n actors = self.Ed_actors.text()\r\n desc = self.Te_desc.toPlainText()\r\n prim = self.Ed_prim.text()\r\n tags = self.Ed_tags.text()\r\n img = self.Ed_img.text() \r\n\r\n data = [(num, title_orig, title, year, genre, country, director, actors, desc, prim, tags, img)]\r\n for num, title_orig, title, year, genre, country, director, actors, desc, prim, tags, img in data:\r\n insertDataQuery.addBindValue(num)\r\n insertDataQuery.addBindValue(title_orig)\r\n insertDataQuery.addBindValue(title)\r\n insertDataQuery.addBindValue(year)\r\n insertDataQuery.addBindValue(genre)\r\n insertDataQuery.addBindValue(country)\r\n insertDataQuery.addBindValue(director)\r\n insertDataQuery.addBindValue(actors)\r\n insertDataQuery.addBindValue(desc)\r\n insertDataQuery.addBindValue(prim)\r\n insertDataQuery.addBindValue(tags)\r\n insertDataQuery.addBindValue(img)\r\n insertDataQuery.exec()\r\n self.close_connection()\r\n # Clearing editable widgets for new row\r\n self.Ed_name.setText('')\r\n self.Ed_title.setText('')\r\n self.Ed_num.setText('')\r\n self.Ed_director.setText('')\r\n self.Ed_actors.setText('')\r\n self.Te_desc.setText('')\r\n self.tabWidget.setCurrentIndex(0)\r\n \r\n\r\n def changeTab2 (self): # изменение текущей вкладки\r\n if self.tabWidget.currentIndex() == 1:\r\n if self.edit_flag == False:\r\n self.spinBox_id.setValue(self.tableWidget.rowCount()+1)\r\n self.spinBox_id.setReadOnly(True)\r\n if self.tableWidget.rowCount() == 0:\r\n self.create_table(self.con1, self.create_films_table)\r\n if self.tabWidget.currentIndex() == 0:\r\n if self.con1.isOpen() == False: \r\n self.create_connection(self.path)\r\n self.read_table(self.con1, \"SELECT id, num, title_orig, title, year, genre, country, director, actors, desc, prim, tags, img FROM films\")\r\n if self.edit_flag == True:\r\n print(\"Mode switch to edit_off\")\r\n self.edit_flag = False\r\n self.tabWidget.setTabText(1,'Добавить')\r\n # очистка полей\r\n self.Ed_name.setText('')\r\n self.Ed_title.setText('')\r\n self.Ed_num.setText('')\r\n self.Ed_director.setText('')\r\n self.Ed_actors.setText('')\r\n self.Te_desc.setText('')\r\n\r\n\r\n def select_row (self): # Выбор ячейки (строки)\r\n row = self.tableWidget.currentRow() # Index of Row\r\n self.current_row = row\r\n # get info active row \r\n key = self.tableWidget.item(row, 2).text() #title_orig is key for search\r\n query = QSqlQuery()\r\n query.prepare(\"SELECT id, title_orig, title, year, genre, country, director, actors, desc, prim, tags, img FROM films WHERE title_orig LIKE ?\")\r\n query.addBindValue(key)\r\n query.exec()\r\n # вывод инфы из базы данных в виджеты\r\n while query.next():\r\n print(\"id: \", str(query.value(0)), str(query.value(1)), str(query.value(2))) # <<-- GET ID ACTIVE ELEMENT!!!\r\n Title = str(query.value(1)) + \" - (\" + str(query.value(2)) + \")\" + \", \" + str(query.value(3))\r\n Desc = str(query.value(8))\r\n Image = str(query.value(11))\r\n\r\n self.label_title.setText(Title)\r\n self.label_title.setStyleSheet(\"\"\"\r\n font: bold italic;\r\n background-color: none;\r\n \"\"\")\r\n\r\n self.textEdit.setText(Desc)\r\n \r\n pixmap = QPixmap('data/' + Image)\r\n pixmap_small = pixmap.scaled(400,400, QtCore.Qt.KeepAspectRatio)\r\n self.label_pix.setScaledContents(True)\r\n self.label_pix.setPixmap(pixmap_small)\r\n self.Ed_img.setText(Image)\r\n\r\n\r\n # =====================================================================\r\n # ==========================================================================\r\n # E D I T T A B\r\n # ==========================================================================\r\n def edit_mode (self): # после двойного клика - переходим в режим редактироания\r\n \r\n print(\"Edit mode\")\r\n self.edit_flag = True\r\n\r\n row = self.tableWidget.currentRow() # Index of Row\r\n self.current_row = row\r\n\r\n print(\"Row: \", row)\r\n self.tabWidget.setCurrentIndex(1)\r\n self.edit_flag = True\r\n self.tabWidget.setTabText(1,'Редактирование')\r\n\r\n # вывод года (year)\r\n query = \"SELECT year FROM films\"\r\n data = QSqlQuery(query)\r\n for i in range (0, self.tableWidget.rowCount()):\r\n data.next()\r\n if i==row: \r\n Year = data.value(0)\r\n self.spinBox_age.setValue(Year)\r\n # id \r\n query = \"SELECT id FROM films\"\r\n data = QSqlQuery(query)\r\n for i in range (0, self.tableWidget.rowCount()):\r\n data.next()\r\n if i==row: \r\n id_num = data.value(0)\r\n self.spinBox_id.setValue(id_num)\r\n # title original (title_orig)\r\n query = \"SELECT title_orig FROM films\"\r\n data = QSqlQuery(query)\r\n for i in range (0, self.tableWidget.rowCount()):\r\n data.next()\r\n if i==row: \r\n title_orig = data.value(0)\r\n self.Ed_name.setText(title_orig)\r\n # title (title)\r\n query = \"SELECT title FROM films\"\r\n data = QSqlQuery(query)\r\n for i in range (0, self.tableWidget.rowCount()):\r\n data.next()\r\n if i==row: \r\n title = data.value(0)\r\n self.Ed_title.setText(title)\r\n # country \r\n query = \"SELECT country FROM films\"\r\n data = QSqlQuery(query)\r\n for i in range (0, self.tableWidget.rowCount()):\r\n data.next()\r\n if i==row: \r\n country = data.value(0)\r\n self.lineEdit_country.setText(country)\r\n # director\r\n query = \"SELECT director FROM films\"\r\n data = QSqlQuery(query)\r\n for i in range (0, self.tableWidget.rowCount()):\r\n data.next()\r\n if i==row: \r\n director = data.value(0)\r\n self.Ed_director.setText(director) \r\n # actors\r\n query = \"SELECT actors FROM films\"\r\n data = QSqlQuery(query)\r\n for i in range (0, self.tableWidget.rowCount()):\r\n data.next()\r\n if i==row: \r\n actors = data.value(0)\r\n self.Ed_actors.setText(actors)\r\n # desc\r\n query = \"SELECT desc FROM films\"\r\n data = QSqlQuery(query)\r\n for i in range (0, self.tableWidget.rowCount()):\r\n data.next()\r\n if i==row: \r\n desc = data.value(0)\r\n self.Te_desc.setText(desc)\r\n # genre\r\n query = \"SELECT genre FROM films\"\r\n data = QSqlQuery(query)\r\n for i in range (0, self.tableWidget.rowCount()):\r\n data.next()\r\n if i==row: \r\n genre = data.value(0)\r\n self.comboBox_2.setCurrentIndex(self.comboBox_2.findText(genre)) \r\n\r\n\r\ndef main():\r\n #con1 = create_connection('test.db')\r\n #create_table(con1, create_users_table)\r\n #create_row_table(con1, create_user, data)\r\n #execute_read_query(con1, read_all) \r\n \r\n app = QtWidgets.QApplication(sys.argv) # Новый экземпляр QApplication\r\n app.setStyle('Fusion')\r\n\r\n window = UIForm('movies.db') # Создаём объект класса UIForm\r\n\r\n #window.read_table(con1, read_all) #вывести в таблицу\r\n\r\n window.show() # Показываем окно\r\n app.exec_() # и запускаем приложение\r\n \r\n window.close_connection() \r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"NPL-121/Filmscat","sub_path":"Films.py","file_name":"Films.py","file_ext":"py","file_size_in_byte":19831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21530891390","text":"#!/usr/bin/env python3\nfrom typing import Dict, Union\n\nimport jq\n\nfrom dataclass_utils import dataclass_from_dict\nfrom modules.common.component_state import InverterState\nfrom modules.common.component_type import ComponentDescriptor\nfrom modules.common.fault_state import ComponentInfo\nfrom modules.common.simcount import SimCounter\nfrom modules.common.store import get_inverter_value_store\nfrom modules.devices.json.config import JsonInverterSetup\n\n\nclass JsonInverter:\n def __init__(self, device_id: int, component_config: Union[Dict, JsonInverterSetup]) -> None:\n self.__device_id = device_id\n self.component_config = dataclass_from_dict(JsonInverterSetup, component_config)\n self.sim_counter = SimCounter(self.__device_id, self.component_config.id, prefix=\"pv\")\n self.store = get_inverter_value_store(self.component_config.id)\n self.component_info = ComponentInfo.from_component_config(self.component_config)\n\n def update(self, response) -> None:\n config = self.component_config.configuration\n\n power = float(jq.compile(config.jq_power).input(response).first())\n if power >= 0:\n power = power * -1\n if config.jq_exported is None:\n _, exported = self.sim_counter.sim_count(power)\n else:\n exported = float(jq.compile(config.jq_exported).input(response).first())\n\n inverter_state = InverterState(\n power=power,\n exported=exported\n )\n self.store.set(inverter_state)\n\n\ncomponent_descriptor = ComponentDescriptor(configuration_factory=JsonInverterSetup)\n","repo_name":"snaptec/openWB","sub_path":"packages/modules/devices/json/inverter.py","file_name":"inverter.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":322,"dataset":"github-code","pt":"81"} +{"seq_id":"29083781838","text":"from pynetsnmp import netsnmp, twistedsnmp\nfrom twisted.internet import reactor, defer\nfrom collections import defaultdict\n\nimport logging\nlogging.basicConfig()\n\nclass Agent(netsnmp.Session):\n\n def __init__(self, ip, community='public', version=netsnmp.SNMP_VERSION_2c, **kw):\n super(Agent, self).__init__(peername=ip, version=version,\n community=community, community_len = len(community), **kw\n )\n self.results = defaultdict( lambda key: {} )\n self.max_bulk = 40\n self._currentTarget = None\n self._currentName = None\n self._stopped = False\n self.fetch_defer = None\n\n def fetch(self, targets):\n self.fetch_defer = defer.Deferred()\n self.targets = targets\n self.next_target()\n self._stopped = False\n twistedsnmp.updateReactor()\n return self.fetch_defer\n\n def next_target(self):\n if self._stopped:\n return\n if self._currentTarget:\n self.results[self._currentName] = self._currentTarget.results\n\n try:\n target = self.targets.pop(0)\n self._currentName, self._currentTarget = target\n self.getbulk(0, self.max_bulk, [self._currentTarget.root])\n except IndexError:\n self.fetch_defer.callback(self.results)\n\n def callback(self, pdu):\n results = netsnmp.getResult(pdu)\n for oid, result in results:\n if oid in self._currentTarget:\n self._currentTarget.insert(oid, result)\n else:\n self.next_target()\n return\n\n if not results:\n self.next_target()\n else:\n self.getbulk(0, self.max_bulk, [results[-1][0]])\n\n def timeout(self, error):\n if self.fetch_defer:\n self._stopped = True\n self.fetch_defer.callback(False)","repo_name":"wroniasty/macbrush","sub_path":"macbrush/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10380951504","text":"import dgl\n\n\n# ===��=======Create a graph in DGL============\ndef build_karate_club_graph():\n g = dgl.DGLGraph()\n g.add_nodes(34)\n\n edge_list = [(1, 0), (2, 0), (2, 1), (3, 0), (3, 1), (3, 2),\n (4, 0), (5, 0), (6, 0), (6, 4), (6, 5), (7, 0), (7, 1),\n (7, 2), (7, 3), (8, 0), (8, 2), (9, 2), (10, 0), (10, 4),\n (10, 5), (11, 0), (12, 0), (12, 3), (13, 0), (13, 1), (13, 2),\n (13, 3), (16, 5), (16, 6), (17, 0), (17, 1), (19, 0), (19, 1),\n (21, 0), (21, 1), (25, 23), (25, 24), (27, 2), (27, 23),\n (27, 24), (28, 2), (29, 23), (29, 26), (30, 1), (30, 8),\n (31, 0), (31, 24), (31, 25), (31, 28), (32, 2), (32, 8),\n (32, 14), (32, 15), (32, 18), (32, 20), (32, 22), (32, 23),\n (32, 29), (32, 30), (32, 31), (33, 8), (33, 9), (33, 13),\n (33, 14), (33, 15), (33, 18), (33, 19), (33, 20), (33, 22),\n (33, 23), (33, 26), (33, 27), (33, 28), (33, 29), (33, 30),\n (33, 31), (33, 32)]\n src, dst = tuple(zip(*edge_list))\n g.add_edges(src, dst)\n # edges are directional in DGL; make them bi-directional\n g.add_edges(dst, src)\n\n return g\n\n\npart_1 = False\n# part_1 =True\nif part_1:\n G = build_karate_club_graph()\n print('We have %d nodes.' % G.number_of_nodes())\n print('We have %d edges.' % G.number_of_edges())\n\npart_2 = False\n# part_2 = True\nimport networkx as nx\n\n# Since the actual graph is undirected, we convert it for visualization\n# purpose.\nif part_2:\n G = build_karate_club_graph()\n nx_G = G.to_networkx().to_undirected()\n print(G)\n print(nx_G)\n # Kamada-Kawaii layout usually looks pretty for arbitrary graphs\n pos = nx.kamada_kawai_layout(nx_G)\n nx.draw(nx_G, pos, with_labels=True, node_color=[[.7, .7, .7]])\n\n# =================Assgin features tp nodes or edges=========================\nimport torch\n\nG = build_karate_club_graph()\n# assgin features to nodes\nG.ndata['feat'] = torch.eye(34)\nprint(G.nodes[2].data['feat'])\nprint(G.nodes[10, 11].data['feat'])\n\n# ======================Define a Graph Convolutional Network(GCN)============================\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\n# Define the message and reduce function\n# NOTE: We ignore the GCN's normalization constant c_ij for this tutorial.\ndef gcn_message(edges):\n # The argument is a batch of edges.\n # This computes a (batch of) message called 'msg' using the source node's feature 'h'.\n return {'msg': edges.src['h']}\n\n\ndef gcn_reduce(nodes):\n # The argument is a batch of nodes.\n # This computes the new 'h' features by summing received 'msg' in each node's mailbox.\n return {'h': torch.sum(nodes.mailbox['msg'], dim=1)}\n\n\n# Define the GCNLayer module\nclass GCNLayer(nn.Module):\n def __init__(self, in_feats, out_feats):\n super(GCNLayer, self).__init__()\n self.linear = nn.Linear(in_feats, out_feats)\n\n def forward(self, g, inputs):\n # g is the graph and the inputs is the input node features\n # first set the node features\n g.ndata['h'] = inputs\n # trigger message passing on all edges\n g.send(g.edges(), gcn_message)\n # trigger aggregation at all nodes\n g.recv(g.nodes(), gcn_reduce)\n # get the result node features\n h = g.ndata.pop('h')\n # perform linear transformation\n return self.linear(h)\n\n\n# Define a 2-layer GCN model\nclass GCN(nn.Module):\n def __init__(self, in_feats, hidden_size, num_classes):\n super(GCN, self).__init__()\n self.gcn1 = GCNLayer(in_feats, hidden_size)\n self.gcn2 = GCNLayer(hidden_size, num_classes)\n\n def forward(self, g, inputs):\n h = self.gcn1(g, inputs)\n h = torch.relu(h)\n h = self.gcn2(g, h)\n return h\n\n\n# The first layer transforms input features of size of 34 to a hidden size of 5.\n# The second layer transforms the hidden layer and produces output features of\n# size 2, corresponding to the two groups of the karate club.\nnet = GCN(34, 5, 2)\n\n# ==========================Data preparation and initialization================================\n# use one-hot vector to initialize the node features\ninputs = torch.eye(34)\n# in a semi-supervised setting.\n# only the instuctor(node 0) and the club president are assigned labels\nlabeled_nodes = torch.tensor([0, 33])\n# the label of instructor anc the president node are labeled\nlabels = torch.tensor([0, 1])\n\n# ==============================Train then visualize==================================\noptimizer = torch.optim.Adam(net.parameters(), lr=0.01)\nall_logits = []\nfor epoch in range(300):\n logits = net(G, inputs)\n all_logits.append(logits.detach())\n logp = F.log_softmax(logits, 1)\n loss = F.nll_loss(logp[labeled_nodes], labels)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if epoch % 20 == 0:\n print('Epoch %d | Loss: %.16f' % (epoch, loss.item()))\n\nimport matplotlib.animation as animation\nimport matplotlib.pyplot as plt\n\n\n# import matplotlib.axes as ax\ndef draw(i):\n cls1color = '#00FFFF'\n cls2color = '#FF00FF'\n pos = {}\n colors = []\n for v in range(34):\n pos[v] = all_logits[i][v].numpy()\n cls = pos[v].argmax()\n colors.append(cls1color if cls else cls2color)\n\n ax.cla()\n ax.axis('off')\n ax.set_title('EPOCH: %d' % i)\n nx.draw_networkx(nx_G.to_undirected(), pos, node_color=colors,\n with_labels=True, node_size=300, ax=ax)\n\nnx_G = G.to_networkx().to_undirected()\nfig = plt.figure(dpi=150)\nfig.clf()\nax = fig.subplots()\ndraw(0) # draw the prediction of the first epoch\n# import pylab\n# pylab.title('Self_Define Net',fontsize=15)\n# pylab.show()\nani = animation.FuncAnimation(fig, draw, frames=len(all_logits), interval=200)\nplt.show()\nplt.close()\n","repo_name":"strawsyz/straw","sub_path":"study/dgl_study/01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":5889,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"12165782642","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nimport pdb\r\nimport numpy as np\r\n\r\nclass A3C(nn.Module):\r\n def __init__(self):\r\n super(A3C, self).__init__()\r\n\r\n self.conv1 = nn.Conv2d(1, 4, kernel_size=4, stride=2)\r\n self.conv2 = nn.Conv2d(4, 8, kernel_size=4, stride=2)\r\n self.conv3 = nn.Conv2d(8, 8, kernel_size=4, stride=2) #drop out or batchnorm\r\n\r\n self.fc = nn.Linear(512, 128)\r\n\r\n self.critic = nn.Linear(64, 1)\r\n self.actor = nn.Linear(64, 5)\r\n\r\n def forward(self, state, hx, cx):\r\n \"\"\"\r\n :param state: [batch size, channel, width, height]\r\n :return:\r\n \"\"\"\r\n x = F.relu(self.conv1(state))\r\n x = F.relu(self.conv2(x))\r\n x = F.relu(self.conv3(x))\r\n\r\n img_feat = x.view(x.size(0), -1)\r\n img_feat = F.relu(self.fc(img_feat))\r\n\r\n act, critic = torch.chunk(img_feat, 2, dim=1)\r\n\r\n return self.critic(critic), self.actor(act), hx, cx\r\n\r\n\r\nclass A3C_LSTM(nn.Module):\r\n def __init__(self):\r\n super(A3C_LSTM, self).__init__()\r\n\r\n self.conv1 = nn.Conv2d(4, 8, kernel_size=3, stride=1)\r\n # self.batchnorm1 = nn.BatchNorm2d(4, track_running_stats=False)\r\n self.conv2 = nn.Conv2d(8, 12, kernel_size=3, stride=1)\r\n # self.batchnorm2 = nn.BatchNorm2d(6, track_running_stats=False)\r\n self.conv3 = nn.Conv2d(12, 12, kernel_size=3, stride=1)\r\n # self.batchnorm3 = nn.BatchNorm2d(6, track_running_stats=False)\r\n\r\n self.conv_col = nn.Conv2d(4, 8, kernel_size=(1, 8), stride=1) # kernel=(height,width)\r\n self.conv_row = nn.Conv2d(4, 8, kernel_size=(20, 1), stride=1)\r\n\r\n self.fc = nn.Linear(560, 512)\r\n\r\n self.lstm = nn.LSTMCell(512, 256)\r\n\r\n self.mlp = nn.Linear(256, 128)\r\n\r\n self.critic = nn.Linear(64, 1)\r\n self.actor = nn.Linear(64, 6)\r\n\r\n def forward(self, state, hx, cx):\r\n \"\"\"\r\n :param state: [batch size, channel, width, height]\r\n :return:\r\n \"\"\"\r\n # pdb.set_trace()\r\n x = F.relu(self.conv1(state))\r\n x = F.relu(self.conv2(x))\r\n x = F.relu(self.conv3(x))\r\n # pdb.set_trace()\r\n img_feat = x.view(x.size(0), -1) # [1, 336]\r\n\r\n col = F.relu(self.conv_col(state))\r\n col = col.view(col.size(0), -1) # [1, 160]\r\n row = F.relu(self.conv_row(state))\r\n row = row.view(row.size(0), -1) # [1, 64]\r\n\r\n feat = torch.cat([img_feat, col, row], dim=1)\r\n\r\n img_feat = F.leaky_relu(self.fc(feat))\r\n\r\n _hx, _cx = self.lstm(img_feat, (hx, cx))\r\n\r\n mlp_output = self.mlp(_hx)\r\n\r\n act, critic = torch.chunk(mlp_output, 2, dim=1)\r\n\r\n return self.critic(critic), self.actor(act), _hx, _cx","repo_name":"kibeomKim/tetris","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17914350480","text":"from preproc_NIR import devise_bande, msc, pow_trans, prep_log\nfrom sklearn.model_selection import KFold,cross_val_predict, LeaveOneOut\nfrom sklearn.metrics import mean_squared_error, mean_squared_log_error, max_error, r2_score, mean_absolute_error\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.preprocessing import normalize, StandardScaler\nimport seaborn as sns\nfrom pandas import read_csv, read_excel\nfrom matplotlib.pyplot import plot, show, xlabel, ylabel, title\nfrom pandas import DataFrame, concat\nimport numpy as np\nfrom scipy.stats import f_oneway, shapiro\nfrom scipy.signal import savgol_filter, detrend\nfrom preproc_NIR import osc, msc, snv, simple_moving_average, centring, var_sel, pow_trans\nfrom statsmodels.multivariate.pca import PCA\nfile_name=\"data-oil-2miroirs\"\ndb=read_excel(file_name+'.xlsx')\nX=db.drop([db.columns[0],'Y'],axis=1)\nX=DataFrame(savgol_filter(msc(X.to_numpy()),3,1,1))\nY=db['Y']\nY=[np.sqrt(i) for i in Y]\nrescols=[\"r2c\",\"r2cv\",\"r2t\",\"rmsec\",\"rmsecv\",\"rmset\"]\nr2c,r2cv,r2t,rmsec,rmsecv,rmset=[],[],[],[],[],[]\nr2=[]\nRMSE=[]\nj=0\nwhile True:\n for i in range(1,30,1):\n pc = PCA(X, ncomp=i, method='nipals')\n x_train, x_test, y_train, y_test = train_test_split(DataFrame(pc.factors),Y,test_size=0.2,random_state=j)\n model=LinearRegression()\n model.fit(x_train, y_train)\n r2.append(r2_score(model.predict(x_test),y_test))\n RMSE.append(mean_squared_error(model.predict(x_test),y_test))\n pc = PCA(X, ncomp=29, method='nipals')#1+RMSE.index(min(RMSE))\n x_train, x_test, y_train, y_test = train_test_split(DataFrame(pc.factors),Y,test_size=0.2,random_state=j)\n model=LinearRegression()\n model.fit(x_train, y_train)\n RMSECV=abs(np.mean(cross_val_score(LinearRegression(), x_train, y_train, scoring='neg_root_mean_squared_error', cv=LeaveOneOut())))\n R2CV=100*np.mean(cross_val_score(LinearRegression(), x_train, y_train, scoring='r2'))\n R2train=100*r2_score(y_train,model.predict(x_train))\n R2test=100*r2_score(y_test,model.predict(x_test))\n RMSEtrain=mean_squared_error(y_train,model.predict(x_train))\n RMSEtest=mean_squared_error(y_test,model.predict(x_test))\n if R2test>0 and R2CV>0:\n r2c.append(R2train)\n r2cv.append(R2CV)\n r2t.append(R2test)\n rmsec.append(RMSEtrain)\n rmsecv.append(RMSECV)\n rmset.append(RMSEtest)\n break\n else:\n j+=1\nres=DataFrame({rescols[0]:r2c,rescols[1]:r2cv,rescols[2]:r2t,rescols[3]:rmsec,rescols[4]:rmsecv,rescols[5]:rmset})\nprint(res)\nw,p = shapiro([i-j for i,j in zip(y_test,model.predict(x_test))])\nif p>0.05:\n desicion=\"Normal\"\nelif p<0.05:\n desicion=\"Not Normal\"\nprint('Quantile shapiro : {}\\npropability shapiro : {}\\ndesicion : {}'.format(w,p,desicion))","repo_name":"Ayoubzinini/oil-project","sub_path":"pcr-final.py","file_name":"pcr-final.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7106164507","text":"import _init_path\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom torch.utils.data import DataLoader\n\nimport dets \nfrom dets.detector import YOLOV3Detector\nfrom dets.data.dataset import DatasetYolov5\n\n\nbbox = [0, 45, 0.479492, 0.688771, 0.955609, 0.5955,\n 0, 45, 0.736516, 0.247188, 0.498875, 0.476417,\n 0, 50, 0.637063, 0.732938, 0.494125, 0.510583,\n 0, 45, 0.339438, 0.418896, 0.678875, 0.7815,\n 0, 49, 0.646836, 0.132552, 0.118047, 0.096937,\n 0, 49, 0.773148, 0.129802, 0.090734, 0.097229,\n 0, 49, 0.668297, 0.226906, 0.131281, 0.146896,\n 0, 49, 0.642859, 0.079219, 0.148063, 0.148062\n ]\ntargets = torch.tensor(bbox).view(-1, 6)\n\ndata = torch.rand(1, 3, 640, 640)\ntargets[:, 3:] = targets[:, 3:]\n\n\nyolov3 = YOLOV3Detector()\noutput = yolov3(data, targets)\ntry:\n print(output.shape, output.sum())\nexcept:\n print(output['loss'].item())\n\n# print(yolov3)\n\n\n\ndevice = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')\n\ndataset = DatasetYolov5('../../dataset/coco128/images/train2017/')\ndataloader = DataLoader(dataset, batch_size=8, collate_fn=dataset.collate_fn)\n\nyolov3 = YOLOV3Detector().to(device=device)\nyolov3.train()\n\n# optimizer = optim.Adam(yolov3.parameters(), lr=0.001)\n\n# yolov3.model[0].eval()\n# for p in yolov3.model[0].parameters():\n# p.requires_grad = False\n\noptimizer = optim.SGD(yolov3.parameters(), lr=0.01, momentum=0.9)\nscheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10, 30], gamma=0.5)\n\n\nfor _ in range(3):\n\n for data, label in dataloader:\n data = data.to(device=device)\n label = label.to(device=device)\n\n losses = yolov3(data, label)\n \n optimizer.zero_grad()\n losses['loss'].backward()\n optimizer.step()\n \n print(losses['loss'].item(), losses['lbox'], losses['lobj'], losses['lcls'], )\n\n\nyolov3 = yolov3.to(torch.device('cpu'))\ntorch.save(yolov3.state_dict(), './yolov3.pt')\n\n","repo_name":"lyuwenyu/DetectionL","sub_path":"tests/test_yolov3.py","file_name":"test_yolov3.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"39678072548","text":"from utils.database import Database\nfrom discord.ext import commands\nimport discord\nimport log\nfrom discord_slash import SlashCommand as SlashClient\nimport json\n\nwith open('config.json', 'r') as f:\n config = json.load(f)\n\nGUILDS = config['commands']['guilds']\nATLAS = config['bot']['guild']\n\nclass Context(commands.Context):\n def send(self, *args, **kwargs):\n kwargs['allowed_mentions'] = discord.AllowedMentions(replied_user=False)\n return self.reply(*args, **kwargs)\n\nclass Bot(commands.Bot):\n def __init__(self, **options):\n options['intents'] = discord.Intents.default()\n options['intents'].members = True\n options['command_prefix'] = '/'\n options['help_command'] = None\n options['status'] = discord.Status.online\n options['activity'] = discord.Game(name='on mc.the-atlas.net')\n super().__init__(**options)\n self.logger = log.Logger()\n self.database = None\n with open('config.json', 'r') as f:\n config = json.load(f)\n self.config = config\n # config loaded from config.json\n self.limbo_role = config['moderation']['limbo']\n self.mute_role = config['moderation']['mute']\n self.application_channel = config['applications']['channel']\n self.suggestion_channel = config['suggestions']['channel']\n self.sugg_deny_channel = config['suggestions']['deny_channel']\n self.sugg_accept_channel = config['suggestions']['accept_channel']\n self.guild_id = config['bot']['guild']\n self.db = Database(self)\n self.slash = SlashClient(self, delete_from_unused_guilds=True, sync_commands=False)\n self.owner_id = 830344767027675166\n\n @property\n def _config(self):\n return self.config\n\n async def get_or_fetch_user(self, user):\n try:\n return self.get_user(user) or await self.fetch_user(user)\n except discord.HTTPException:\n return None\n \n async def sync_all_commands(self) -> None:\n self.logger.info('Syncing commands...')\n await self.slash.sync_all_commands()\n self.logger.success('Commands synced!')\n\n async def sync_commands(self) -> None:\n await self.sync_all_commands()\n\n async def on_connect(self):\n self.logger.info('Connected to Discord.')\n\n async def on_disconnect(self):\n self.logger.warn('Disconnected from Discord.')\n\n async def on_resumed(self):\n self.logger.info('Connection resumed.')\n\n async def on_ready(self):\n self.logger.success(f'Bot is ready!')\n self.logger.success(f'Logged in as {self.user} with ID {self.user.id}')\n\n async def on_message(self, message):\n if not await self.is_owner(message.author):\n return\n await self.process_commands(message)\n\n async def close(self) -> None:\n self.logger.warn('Closing the bot...')\n for ext in dict(self.extensions).keys():\n self.unload_extension(ext)\n await super().close()\n\n async def get_context(self, message, *, cls=None):\n return await super().get_context(message, cls=cls or Context)\n","repo_name":"plun1331/bentinel","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3131,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"23914761232","text":"from typing import List\n\nfrom retroui.terminal.color import Color, Black, White\nfrom retroui.terminal.size import Size\nfrom retroui.terminal.tixel import Tixel\nfrom retroui.terminal.view import View\n\n\nclass Scroller(View):\n \"\"\"\n A `Scroller` is a view used to indicate where scrollable content is\n positioned, and what fraction of that content is currently visible.\n\n Slots:\n\n `is_vertical`\n Whether or not the scroller is vertical, or horizontal. True by\n default.\n\n `scroll_position`\n How far into the content the visible portion of the content is.\n Ranges from 0.0, for when the content is scrolled to the\n beginning, to 1.0 for when the content has scrolled to the end.\n\n `visible_fraction`\n The fraction of the content that is currently visible in its\n containing view. Ranges from 0.0 to 1.0.\n \"\"\"\n\n __slots__ = ['is_vertical', 'position', 'visible_fraction']\n\n def __init__(self):\n # type: () -> None\n\n super().__init__()\n\n self.is_vertical = True # type: bool\n self.scroll_position = 0.0 # type: float\n self.visible_fraction = 1.0 # type: float\n\n def set_is_vertical(self, yn):\n # type: (bool) -> None\n \"\"\"\n Set whether the scroller is vertical or horizontal.\n \"\"\"\n\n self.is_vertical = yn\n\n def set_scroll_position(self, pos):\n # type: (float) -> None\n \"\"\"\n Set the scroll position.\n \"\"\"\n\n self.scroll_position = max(0.0, min(1.0, float(pos)))\n\n def set_visible_fraction(self, frac):\n # type: (float) -> None\n \"\"\"\n Set the visible fraction.\n \"\"\"\n\n self.visible_fraction = min(1.0, max(0.0, float(frac)))\n\n def constrain_size(self, new_size):\n # type: (Size) -> Size\n \"\"\"\n The size of a scroller is constrained to be at most 1 wide if the\n scroller is vertical, and 1 tall if the scroller is horizontal.\n \"\"\"\n\n if self.is_vertical:\n return Size(1, new_size.height)\n else:\n return Size(new_size.width, 1)\n\n def draw(self):\n # type: () -> List[List[Tixel]]\n blanksym = '' # type: str\n barsym = '' # type: str\n line = [] # type: List[List[str]]\n if self.is_vertical:\n scrollbar_height = max(\n 1, int(self.visible_fraction * self.size.height))\n available_scrollbar_positions = self.size.height - scrollbar_height\n current_scrollbar_position = int(\n self.scroll_position * available_scrollbar_positions)\n\n blanksym = '│'\n barsym = '█'\n preblanks = current_scrollbar_position * \\\n [blanksym] # type: List[str]\n bars = scrollbar_height * [barsym] # type: List[str]\n postblanks = (available_scrollbar_positions -\n current_scrollbar_position) * [blanksym] # type: List[str]\n lines = preblanks + bars + postblanks\n else:\n scrollbar_width = max(\n 1, int(self.visible_fraction * self.size.width))\n available_scrollbar_positions = self.size.width - scrollbar_width\n current_scrollbar_position = int(\n self.scroll_position * available_scrollbar_positions)\n\n blanksym = '─'\n barsym = '█'\n preblank = current_scrollbar_position * blanksym\n bar = scrollbar_width * barsym\n postblank = (available_scrollbar_positions -\n current_scrollbar_position) * blanksym\n lines = [preblank + bar + postblank]\n\n tixel_lines = [[Tixel(c, White, Black)\n for c in line] for line in lines]\n\n return tixel_lines\n","repo_name":"BekaValentine/RetroUI","sub_path":"src/retroui/terminal/scroller.py","file_name":"scroller.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10051493842","text":"import torch\nfrom torchvision.ops import boxes as box_ops\nfrom torchvision.ops import nms # BC-compat\n\n\ndef batched_nms(boxes, scores, idxs, iou_threshold):\n \"\"\"\n Same as torchvision.ops.boxes.batched_nms, but safer.\n \"\"\"\n assert boxes.shape[-1] == 4\n if len(boxes) < 40000:\n return box_ops.batched_nms(boxes, scores, idxs, iou_threshold)\n\n result_mask = scores.new_zeros(scores.size(), dtype=torch.bool)\n for id in torch.unique(idxs).cpu().tolist():\n mask = (idxs == id).nonzero(as_tuple=False).view(-1)\n keep = nms(boxes[mask], scores[mask], iou_threshold)\n result_mask[mask[keep]] = True\n keep = result_mask.nonzero(as_tuple=False).view(-1)\n keep = keep[scores[keep].argsort(descending=True)]\n return keep\n\n\ndef generalized_batched_nms(\n boxes, scores, idxs, iou_threshold, score_threshold=0.001, nms_type=\"normal\"\n):\n assert boxes.shape[-1] == 4\n\n if nms_type == \"normal\":\n keep = batched_nms(boxes, scores, idxs, iou_threshold)\n else:\n raise NotImplementedError('NMS type not implemented: \"{}\"'.format(nms_type))\n\n return keep\n\n\ndef bboxes_iou(bboxes_a, bboxes_b, xyxy=True):\n \"\"\"Calculate the Intersection of Unions (IoUs) between bounding boxes.\n IoU is calculated as a ratio of area of the intersection\n and area of the union.\n\n Args:\n bbox_a (array): An array whose shape is :math:`(N, 4)`.\n :math:`N` is the number of bounding boxes.\n The dtype should be :obj:`numpy.float32`.\n bbox_b (array): An array similar to :obj:`bbox_a`,\n whose shape is :math:`(K, 4)`.\n The dtype should be :obj:`numpy.float32`.\n Returns:\n array:\n An array whose shape is :math:`(N, K)`. \\\n An element at index :math:`(n, k)` contains IoUs between \\\n :math:`n` th bounding box in :obj:`bbox_a` and :math:`k` th bounding \\\n box in :obj:`bbox_b`.\n\n from: https://github.com/chainer/chainercv\n \"\"\"\n if bboxes_a.shape[1] != 4 or bboxes_b.shape[1] != 4:\n raise IndexError\n\n if xyxy:\n tl = torch.max(bboxes_a[:, None, :2], bboxes_b[:, :2])\n br = torch.min(bboxes_a[:, None, 2:], bboxes_b[:, 2:])\n area_a = torch.prod(bboxes_a[:, 2:] - bboxes_a[:, :2], 1)\n area_b = torch.prod(bboxes_b[:, 2:] - bboxes_b[:, :2], 1)\n else:\n tl = torch.max(\n (bboxes_a[:, None, :2] - bboxes_a[:, None, 2:] / 2),\n (bboxes_b[:, :2] - bboxes_b[:, 2:] / 2),\n )\n br = torch.min(\n (bboxes_a[:, None, :2] + bboxes_a[:, None, 2:] / 2),\n (bboxes_b[:, :2] + bboxes_b[:, 2:] / 2),\n )\n\n area_a = torch.prod(bboxes_a[:, 2:], 1)\n area_b = torch.prod(bboxes_b[:, 2:], 1)\n en = (tl < br).type(tl.type()).prod(dim=2)\n area_i = torch.prod(br - tl, 2) * en # * ((tl < br).all())\n\n return area_i / (area_a[:, None] + area_b - area_i)\n\n\ndef decode_predictions(\n input, anchors, image_size, num_classes, num_anchors, is_train=False\n):\n bs = input.size(0) # batch_size\n in_h = input.size(2) # input_height\n in_w = input.size(3) # input_weight\n stride_h = image_size[1] / in_h\n stride_w = image_size[0] / in_w\n bbox_attrs = 1 + 4 + num_classes\n prediction = (\n input.view(bs, num_anchors, bbox_attrs, in_h, in_w)\n .permute(0, 1, 3, 4, 2)\n .contiguous()\n )\n\n # Get outputs\n scaled_anchors = [(a_w, a_h) for a_w, a_h in anchors]\n x = torch.sigmoid(prediction[..., 0]) # Center x\n y = torch.sigmoid(prediction[..., 1]) # Center y\n w = prediction[..., 2] # Width\n h = prediction[..., 3] # Height\n # conf = torch.sigmoid(prediction[..., 4]) # Conf\n # pred_cls = torch.sigmoid(prediction[..., 5:]) # Cls pred.\n pred_confs = prediction[..., 4] # Conf\n pred_cls = prediction[..., 5:] # Cls pred.\n\n FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor\n LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor\n # Calculate offsets for each grid\n grid_x = (\n torch.linspace(0, in_w - 1, in_w)\n .repeat(in_h, 1)\n .repeat(bs * num_anchors, 1, 1)\n .view(x.shape)\n .type(FloatTensor)\n )\n grid_y = (\n torch.linspace(0, in_h - 1, in_h)\n .repeat(in_w, 1)\n .t()\n .repeat(bs * num_anchors, 1, 1)\n .view(y.shape)\n .type(FloatTensor)\n )\n # Calculate anchor w, h\n anchor_w = FloatTensor(scaled_anchors).index_select(1, LongTensor([0]))\n anchor_h = FloatTensor(scaled_anchors).index_select(1, LongTensor([1]))\n anchor_w = anchor_w.repeat(bs, 1).repeat(1, 1, in_h * in_w).view(w.shape)\n anchor_h = anchor_h.repeat(bs, 1).repeat(1, 1, in_h * in_w).view(h.shape)\n # Add offset and scale with anchors\n pred_boxes = prediction[..., :4].clone()\n pred_boxes[..., 0] = x.data + grid_x\n pred_boxes[..., 1] = y.data + grid_y\n pred_boxes[..., 2] = torch.exp(w.data) * anchor_w\n pred_boxes[..., 3] = torch.exp(h.data) * anchor_h\n pred_boxes[..., 0] *= stride_w\n pred_boxes[..., 1] *= stride_h\n pred_boxes = pred_boxes.data\n if is_train:\n return pred_confs, x, y, w, h, pred_cls, pred_boxes\n else:\n pred_confs = torch.sigmoid(pred_confs)\n pred_cls = torch.sigmoid(pred_cls)\n\n pred_boxes = pred_boxes.detach()\n pred_confs = pred_confs.detach()\n pred_cls = pred_cls.detach()\n\n output = torch.cat(\n (\n pred_boxes.view(bs, -1, 4),\n pred_confs.view(bs, -1, 1),\n pred_cls.view(bs, -1, num_classes),\n ),\n -1,\n )\n return output\n\n\ndef postprocess(\n prediction, num_classes, conf_thre=0.7, nms_thre=0.5, nms_type=\"normal\"\n):\n \"\"\"\n Postprocess for the output of YOLO model\n perform box transformation, specify the class for each detection,\n and perform class-wise non-maximum suppression.\n Args:\n prediction (torch tensor): The shape is :math:`(N, B, 4)`.\n :math:`N` is the number of predictions,\n :math:`B` the number of boxes. The last axis consists of\n :math:`xc, yc, w, h` where `xc` and `yc` represent a center\n of a bounding box.\n num_classes (int):\n number of dataset classes.\n conf_thre (float):\n confidence threshold ranging from 0 to 1,\n which is defined in the config file.\n nms_thre (float):\n IoU threshold of non-max suppression ranging from 0 to 1.\n\n Returns:\n output (list of torch tensor):\n\n \"\"\"\n box_corner = prediction.new(prediction.shape)\n box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2\n box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2\n box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2\n box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2\n prediction[:, :, :4] = box_corner[:, :, :4]\n\n output = [None for _ in range(len(prediction))]\n for i, image_pred in enumerate(prediction):\n\n # If none are remaining => process next image\n if not image_pred.size(0):\n continue\n # Get score and class with highest confidence\n class_conf, class_pred = torch.max(\n image_pred[:, 5 : 5 + num_classes], 1, keepdim=True\n )\n\n conf_mask = (image_pred[:, 4] * class_conf.squeeze() >= conf_thre).squeeze()\n # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)\n detections = torch.cat((image_pred[:, :5], class_conf, class_pred.float()), 1)\n detections = detections[conf_mask]\n if not detections.size(0):\n continue\n\n confidence = detections[:, 4] * detections[:, 5]\n nms_out_index = generalized_batched_nms(\n detections[:, :4],\n confidence,\n detections[:, -1],\n nms_thre,\n nms_type=nms_type,\n )\n detections[:, 4] = confidence / detections[:, 5]\n\n detections = detections[nms_out_index]\n\n # Iterate through all predicted classes\n unique_labels = detections[:, -1].unique()\n\n for c in unique_labels:\n # Get the detections with the particular class\n detections_class = detections[detections[:, -1] == c]\n if output[i] is None:\n output[i] = detections_class\n else:\n output[i] = torch.cat((output[i], detections_class))\n return output\n\n\ndef decode_outputs(\n outputs,\n arch,\n image_size,\n num_classes,\n conf_thre=0.7,\n nms_thre=0.5,\n):\n anchors_dict = {\n \"yolov3\": [\n [[116, 90], [156, 198], [373, 326]],\n [[30, 61], [62, 45], [42, 119]],\n [[10, 13], [16, 30], [33, 23]],\n ],\n \"yolov4\": [\n [[142, 110], [192, 243], [459, 401]],\n [[36, 75], [76, 55], [72, 146]],\n [[12, 16], [19, 36], [40, 28]],\n ],\n }\n anchors = anchors_dict[arch]\n predictions = [\n decode_predictions(\n out,\n a,\n image_size,\n num_classes,\n len(a),\n is_train=False,\n )\n for out, a in zip(outputs, anchors)\n ]\n predictions = torch.cat(predictions, 1)\n detections = postprocess(predictions, num_classes, conf_thre, nms_thre)\n return detections\n","repo_name":"megvii-research/Sparsebit","sub_path":"examples/post_training_quantization/coco2017/yolo_series/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9422,"program_lang":"python","lang":"en","doc_type":"code","stars":308,"dataset":"github-code","pt":"81"} +{"seq_id":"35365733546","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport requests\n\nproxies = {\"http\": \"http://127.0.0.1:8888\", \"https\": \"http://127.0.0.1:8888\", }\n\nr = requests.get('http://www.baidu.com', proxies=proxies)\n\nprint(r.text)\n","repo_name":"magicnian/python_image_spider","sub_path":"flasklearn/fiddlertest.py","file_name":"fiddlertest.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35523483040","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# This Python file adds a POS tag to each token in the tokenized NYTimes documents\n# It also adds stop-word dummies and stems to the NYTimes vocabulary\n\nimport pandas as pd\nimport numpy as np\nfrom nltk import pos_tag\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\n\n# Import NYTimes tokens and vocabulary\ntokens = pd.read_csv(\"tokenized_nyt.csv\", index_col=[0,1,2])\nvocab = pd.read_csv(\"vocab_nyt.csv\", index_col=0)\n\n# Adjust data types of tokens and terms\ntokens[\"token_str\"] = tokens[\"token_str\"].astype(\"str\")\ntokens[\"term_str\"] = tokens[\"term_str\"].astype(\"str\")\nvocab[\"term_str\"] = vocab[\"term_str\"].astype(\"str\")\n\n# Add stop-word dummies to vocabulary\nsw = pd.DataFrame(stopwords.words('english'), columns=['term_str'])\nsw = sw.reset_index().set_index('term_str')\nsw.columns = ['dummy']\nsw.dummy = 1\nvocab[\"stop\"] = vocab[\"term_str\"].map(sw[\"dummy\"])\nvocab[\"stop\"] = vocab[\"stop\"].fillna(0).astype(\"int\")\n\n# Add stems to vocabulary\nstemmer = PorterStemmer()\nvocab[\"stem\"] = vocab[\"term_str\"].astype(\"str\").apply(stemmer.stem)\n\n# Generate part-of-speech tags for each token in a sentence\ndef get_pos(sent):\n try:\n pos_list = pos_tag(sent)\n except:\n pos_list = [(\"~\",\"~\")]\n pos = pd.Series(pos_list).to_frame()\n return pos\n\n# Generate the part-of-speeches tags for the tokenized NYTimes documents\nPOS = tokens.groupby([\"doc_num\", \"sent_num\"]).token_str.apply(get_pos)\nPOS.index.names = [\"doc_num\", \"sent_num\", \"token_num\"]\nPOS.columns = [\"pos\"]\n\n# Add POS tags to the tokens table\ntokens.loc[tokens[\"punc\"]==0, \"pos_tuple\"] = POS[\"pos\"]\ntokens.loc[tokens[\"punc\"]==0, \"pos\"] = POS[\"pos\"].apply(lambda x: x[1])\n\n# Export the new vocab and tokens data frames\ntokens.to_csv(\"tokens_nltk.csv\", index=True)\nvocab.to_csv(\"vocab_nltk.csv\", index=True)\n","repo_name":"yangwudi398/DS5999-Final-Project","sub_path":"Text Representation/02_nltk_annotation.py","file_name":"02_nltk_annotation.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19584009266","text":"#!/usr/bin/env python\n\n\n#--------Include modules---------------\nimport rospy\nfrom visualization_msgs.msg import Marker, MarkerArray\nfrom nav_msgs.msg import Odometry, OccupancyGrid\nfrom geometry_msgs.msg import PointStamped\nfrom getfrontier import getfrontier\nfrom tf.transformations import euler_from_quaternion\nimport numpy as np\n\n#-----------------------------------------------------\n# Subscribers' callbacks------------------------------\nmapData=OccupancyGrid()\n\nrobot_states = np.zeros((2,3))\ngoal_point = np.zeros((2,2))\norigem_map = [0.0,0.0]\nwidth = 0\nheight = 0\nresol = 0\n\n\ndef mapCallBack(data):\n\tglobal mapData, width,height,resol,origem_map\n\tresol = data.info.resolution\n\twidth = data.info.width\n\theight = data.info.height\n\torigem_map = [data.info.origin.position.x,data.info.origin.position.y]\n\tmapData=data\n\t\n\n\t\n\n# Node----------------------------------------------\ndef node():\n\t\tglobal mapData, robot_states, width,height,resol,origem_map\n\t\texploration_goal=PointStamped()\n\t\tgoal = PointStamped()\n\t\trospy.init_node('FrontierDetector', anonymous=False)\n\t\t# map_topic= rospy.get_param('~map_topic','/robot_1/map')\n\t\tmap_topic = '/map'\n\t\trospy.Subscriber(map_topic, OccupancyGrid, mapCallBack)\n\n\t\ttargetspub = rospy.Publisher('/frontier_points', PointStamped, queue_size=10)\n\t\t# pub = rospy.Publisher('shapes', Marker, queue_size=10)\n\t\tpub = rospy.Publisher('/frontier_markers', MarkerArray, queue_size=1)\n\n\t\t# wait until map is received, when a map is received, mapData.header.seq will not be < 1\n\t\twhile mapData.header.seq<1 or len(mapData.data)<1:\n\t\t\tpass\n\t\t \t\n\t\trate = rospy.Rate(20)\t\t\t\n\t\t\n#-------------------------------OpenCV frontier detection------------------------------------------\n\t\twhile not rospy.is_shutdown():\n\t\t\tfrontiers=getfrontier(mapData)\n\t\t\tpointArray=MarkerArray()\n\t\t\tfor i in range(len(frontiers)):\n\t\t\t\tpoints=Marker()\n\t\t\t\t#Set the frame ID and timestamp. See the TF tutorials for information on these.\n\t\t\t\tpoints.header.frame_id=mapData.header.frame_id\n\t\t\t\tpoints.header.stamp=rospy.Time.now()\n\n\t\t\t\t# points.ns= \"markers\"\n\t\t\t\tpoints.id = i\n\n\t\t\t\tpoints.type = Marker.SPHERE\n\t\t\t\tpoints.scale.x = 0.1\n\t\t\t\tpoints.scale.y = 0.1\n\t\t\t\tpoints.scale.z = 0.1\n\t\t\t\t#Set the marker action. Options are ADD, DELETE, and new in ROS Indigo: 3 (DELETEALL)\n\t\t\t\tpoints.action = Marker.ADD;\n\n\t\t\t\tpoints.pose.orientation.w = 1.0;\n\t\t\t\tpoints.scale.x=points.scale.y=0.1;\n\t\t\t\tpoints.color.r = 255.0/255.0\n\t\t\t\tpoints.color.g = 0.0/255.0\n\t\t\t\tpoints.color.b = 0.0/255.0\n\t\t\t\tpoints.color.a=1;\n\t\t\t\tpoints.lifetime == rospy.Duration();\n\n\n\t\t\t\tx=frontiers[i]\n\n\t\t\t\tpoints.pose.position.x = x[0]\n\t\t\t\tpoints.pose.position.y = x[1]\n\t\t\t\tpoints.pose.position.z = 0\n\n\t\t\t\texploration_goal.header.frame_id= mapData.header.frame_id\n\t\t\t\texploration_goal.header.stamp=rospy.Time(0)\n\t\t\t\texploration_goal.point.x=x[0]\n\t\t\t\texploration_goal.point.y=x[1]\n\t\t\t\texploration_goal.point.z=0\t\n\n\t\t\t\ttargetspub.publish(exploration_goal)\n\t\t\t\t# points.points=[exploration_goal.point]\n\n\t\t\t\tpointArray.markers.append(points)\n\t\t\t\t\n\n\n\t\t\tpub.publish(pointArray) \n\t\t\trate.sleep()\n\t\t \t\n\t\t\n\n\t \t#rate.sleep()\n\n\n\n#_____________________________________________________________________________\n\nif __name__ == '__main__':\n\ttry:\n\t\tnode()\n\texcept rospy.ROSInterruptException:\n\t\tpass\n \n \n \n \n","repo_name":"victorRFmiranda/autonomous_exploration","sub_path":"autonomous_exploration/scripts/frontier/frontier_opencv.py","file_name":"frontier_opencv.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"23122168954","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\n\ncmd = 'mvn clean tomcat7:run-war'\n\npid = os.popen(\"ps -ef | grep 'Tickets-Server' | grep -v grep \"\n \"| awk '{ print $2 }'\").read()\n\nif len(pid) > 0:\n os.system(\"sudo kill %s\" % pid)\n\nos.system(\"nohup %s &\" % cmd)\n","repo_name":"saltedFish62/Tickets-Server","sub_path":"deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"70524639945","text":"import sys\nimport logging\n\nimport cPickle\nimport os\nimport unittest\n\nfrom itertools import izip\nimport numpy as np\n\nfrom core.flixdata import FlixData\nfrom core.flixdata_subsampler import FlixDataSubsampler\n\nfrom core.ratings import Ratings\n\nfrom solvers.kFoldsCrossValidator import KFolds\nfrom solvers.svd.svd_solver import SvdSolver\nfrom solvers.svd.svd_solver import GradientDescentMethod\n\nflix_data_root = \"../data/arrays/\"\nsaved_data = \"cross_valid_svd.pkl\"\n\n\nclass TestSvd(object):\n\n def __init__(self, k, learning_rate, gamma, epsilon=1e-7,\n solver=GradientDescentMethod.stochastic, maxiters=10000, include_bias=True):\n self.k = k\n self.learning_rate = learning_rate\n self.gamma = gamma\n self.epsilon = epsilon\n self.solver = solver\n self.maxiters = maxiters\n self.include_bias = include_bias\n\n def setUp(self):\n if os.path.isfile(saved_data):\n logging.info(\"Trying to read existing data from \" + saved_data)\n with open(saved_data) as f:\n self.ratings, self.kfolds = cPickle.load(f)\n print (\"Done!\")\n else:\n logging.info(\"Couldn't find \" + saved_data + \", regenerating\")\n fd = FlixData(flix_data_root)\n logging.info(\"total ratings: %d\" % fd.numratings)\n #self.ratings = FlixDataSubsampler.get_all(fd)\n self.ratings = FlixDataSubsampler.random_sample_movies(fd, seed=12345, N=int(2e4), M=int(1e3))\n self.kfolds = KFolds(self.ratings.size, 5, 12345)\n\n with open(saved_data, \"wb\") as f:\n cPickle.dump((self.ratings, self.kfolds), f, cPickle.HIGHEST_PROTOCOL)\n logging.info(\"Using dataset with %d users, %d movies\" % (self.ratings.shape[0], self.ratings.shape[1]))\n\n\n def test_perform_cv(self):\n logging.info(\"######################################\")\n logging.info(\"Starting cross validation with params:\")\n logging.info(\" k: %d\" % self.k)\n logging.info(\" learn: %f\" % self.learning_rate)\n logging.info(\" gamma: %f\" % self.gamma)\n logging.info(\" epsilon: %e\" % self.epsilon)\n logging.info(\" solver: %s\" % str(self.solver))\n logging.info(\" maxiters: %d\" % self.maxiters)\n logging.info(\" bias: %s\" % str(self.include_bias))\n\n errs = list()\n for fold in range(self.kfolds.k):\n logging.info(\"Starting fold %d \" % fold)\n rmse = self.perform_one_fold(fold)\n errs.append(rmse)\n logging.info(\"Finished fold %d with rmse: %e\" % (fold, rmse))\n\n logging.info(\"Finished Cross Validation\")\n logging.info(\"RMS: %s\" % str(errs))\n logging.info(\"NaNs: %d\" % np.isnan(rmse).sum())\n err_summary = (np.nanmean(errs), np.nanmax(errs), np.nanmin(errs), np.nanstd(errs))\n\n logging.info(\"mean: %e, max: %e, min: %e, sd: %e\" % err_summary)\n\n return err_summary\n\n def perform_one_fold(self, k):\n logging.info(\"Splitting data into train, test\")\n test, train = self.kfolds.get(k)\n test_set = self.ratings.get_index_split(test)\n train_set = self.ratings.get_index_split(train)\n logging.info(\"Training SvdNeighbourSolver...\")\n svd = SvdSolver(self.k,\n learning_rate=self.learning_rate,\n epsilon=self.epsilon,\n solver=self.solver,\n maxiters=self.maxiters,\n gamma=self.gamma,\n include_bias=self.include_bias)\n\n svd.train(train_set)\n\n logging.info(\"predicting %d testratings: \" % test_set.get_coo_matrix().nnz)\n pred = svd.predict(test_set)\n logging.info( \"Done!\")\n logging.info(\"len(pred): \" + str(len(pred)))\n\n y = test_set.get_coo_matrix().data\n\n logging.info(\"%d / %d are None (%f)\" % ((pred==None).sum(), pred.shape[0], (pred==None).mean()))\n logging.info(\"%d / %d are NaN (%f)\" % ((np.isnan(pred)).sum(), pred.shape[0], (np.isnan(pred)).mean()))\n logging.info([(_, __) for _, __ in izip(y, pred)])\n\n logging.info(\"Test SSE: %f\" % np.nansum((y-pred)**2))\n logging.info(\"Test MSE: %f\" % np.nanmean((y-pred)**2))\n rmse = np.sqrt(np.nanmean((y-pred)**2))\n logging.info(\"Test RMS: %f\" % rmse)\n logging.info(\"mean of error: %f\" % np.nanmean(y-pred))\n logging.info(\"SD of error: %f\" % np.nanstd(y-pred))\n\n return rmse\n\n\nif __name__ == '__main__':\n logging.basicConfig(filename=\"svd_cross_validate_grid.log\",\n format='%(asctime)s %(levelname)s %(message)s',\n level=logging.DEBUG)\n logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))\n\n logging.info(\"*******************************************\")\n logging.info(\"Starting run of Cross Validation\")\n\n ks = [20, 25, 30, 40, 50]\n gammas = [0.01, 0.03, 0.1, 0.2]\n\n err_summaries = []\n\n for k in ks:\n for gamma in gammas:\n lrn = 0.005\n\n test_svd = TestSvd(k, lrn, gamma)\n test_svd.setUp()\n es = test_svd.test_perform_cv()\n err_summaries.append(es)\n\n logging.info(\"Finished run of Cross Validation\")\n\n i = 0\n for k in ks:\n for gamma in gammas:\n logging.info(\" error summaries:\")\n logging.info(\" k: %d, gamma: %d -- \" % (k, gamma))\n logging.info(\" mean: %e, max: %e, min: %e, sd: %e\" % err_summaries[i])\n i += 1\n\n logging.info(\"*******************************************\")\n\n","repo_name":"andrewkho/netflix-play","sub_path":"bin/svd_cross_validate.py","file_name":"svd_cross_validate.py","file_ext":"py","file_size_in_byte":5627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21531157700","text":"#!/usr/bin/env python3\nfrom typing import Dict, Union\n\nfrom dataclass_utils import dataclass_from_dict\nfrom modules.common.component_state import BatState\nfrom modules.common.component_type import ComponentDescriptor\nfrom modules.common.fault_state import ComponentInfo\nfrom modules.common.modbus import ModbusTcpClient_, ModbusDataType\nfrom modules.common.simcount import SimCounter\nfrom modules.common.store import get_bat_value_store\nfrom modules.devices.sma_sunny_boy.config import SmaSunnyBoySmartEnergyBatSetup\n\n\nclass SunnyBoySmartEnergyBat:\n SMA_INT32_NAN = 0xFFFFFFFF # SMA uses this value to represent NaN\n\n def __init__(self,\n device_id: int,\n component_config: Union[Dict, SmaSunnyBoySmartEnergyBatSetup],\n tcp_client: ModbusTcpClient_) -> None:\n self.__device_id = device_id\n self.component_config = dataclass_from_dict(SmaSunnyBoySmartEnergyBatSetup, component_config)\n self.__tcp_client = tcp_client\n self.sim_counter = SimCounter(self.__device_id, self.component_config.id, prefix=\"speicher\")\n self.store = get_bat_value_store(self.component_config.id)\n self.component_info = ComponentInfo.from_component_config(self.component_config)\n\n def update(self) -> None:\n self.store.set(self.read())\n\n def read(self) -> BatState:\n unit = 3\n soc = self.__tcp_client.read_holding_registers(30845, ModbusDataType.UINT_32, unit=unit)\n current = self.__tcp_client.read_holding_registers(30843, ModbusDataType.INT_32, unit=unit)/-1000\n voltage = self.__tcp_client.read_holding_registers(30851, ModbusDataType.INT_32, unit=unit)/100\n\n if soc == self.SMA_INT32_NAN:\n # If the storage is empty and nothing is produced on the DC side, the inverter does not supply any values.\n soc = 0\n power = 0\n else:\n power = current*voltage\n imported, exported = self.sim_counter.sim_count(power)\n\n return BatState(\n power=power,\n soc=soc,\n imported=imported,\n exported=exported\n )\n\n\ncomponent_descriptor = ComponentDescriptor(configuration_factory=SmaSunnyBoySmartEnergyBatSetup)\n","repo_name":"snaptec/openWB","sub_path":"packages/modules/devices/sma_sunny_boy/bat_smart_energy.py","file_name":"bat_smart_energy.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","stars":322,"dataset":"github-code","pt":"81"} +{"seq_id":"41262982717","text":"#\n# @lc app=leetcode id=78 lang=python\n#\n# [78] Subsets\n#\n\n# @lc code=start\nclass Solution(object):\n def subsets(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n result, candidate = [], []\n\n def dfs(idx):\n result.append(candidate[:])\n if idx == len(nums):\n return\n for i in range(idx, len(nums)):\n candidate.append(nums[i])\n dfs(i+1)\n candidate.pop()\n\n dfs(0)\n return result\n \n# @lc code=end\n\n","repo_name":"WuLC/LeetCode","sub_path":"Algorithm/Python/78.subsets.py","file_name":"78.subsets.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"81"} +{"seq_id":"40449674744","text":"import json, warnings, os\nfrom typing import List, Dict, Union, Callable, Any\nfrom .chattool import Chat\nimport tqdm\n\ndef load_chats( checkpoint:str):\n \"\"\"Load chats from a checkpoint file\n \n Args:\n checkpoint (str): path to the checkpoint file\n\n Returns:\n list: chats\n \"\"\"\n # if the checkpoint file does not exist, return empty list\n if not os.path.exists(checkpoint):\n # warnings.warn(f\"checkpoint file {checkpoint} does not exist\")\n return []\n # load chats from the checkpoint file\n with open(checkpoint, 'r', encoding='utf-8') as f:\n txts = f.read().strip().split('\\n')\n ## empty file\n if len(txts) == 1 and txts[0] == '': return []\n # get the chatlogs\n logs = [json.loads(txt) for txt in txts]\n chat_size, chatlogs = 1, [None]\n for log in logs:\n idx = log['index']\n if idx >= chat_size: # extend chatlogs\n chatlogs.extend([None] * (idx - chat_size + 1))\n chat_size = idx + 1\n chatlogs[idx] = log['chat_log']\n # check if there are missing chatlogs\n if None in chatlogs:\n warnings.warn(f\"checkpoint file {checkpoint} has unfinished chats\")\n # return Chat class\n return [Chat(chat_log) if chat_log is not None else None for chat_log in chatlogs]\n\ndef process_chats( data:List[Any]\n , data2chat:Callable[[Any], Chat]\n , checkpoint:str\n , clearfile:bool=False\n , isjupyter:bool=False):\n \"\"\"Process chats and save to a checkpoint file(non-asyncio version)\n \n Args:\n data (List[Any]): data to be processed\n data2chat (Callable[[Any], Chat]): function to convert data to Chat\n checkpoint (str): path to the checkpoint file\n clearfile (bool, optional): whether to clear the checkpoint file. Defaults to False.\n isjupyter (bool, optional): whether to use tqdm in Jupiter Notebook. Defaults to False.\n\n Returns:\n list: chats or last messages of chats\n \"\"\"\n if clearfile and os.path.exists(checkpoint):\n # Warning: You are about to delete the checkpoint file\n os.system(f\"rm {checkpoint}\")\n ## load chats from the checkpoint file\n chats = load_chats(checkpoint)\n if len(chats) > len(data):\n warnings.warn(f\"checkpoint file {checkpoint} has more chats than the data to be processed\")\n return chats[:len(data)]\n chats.extend([None] * (len(data) - len(chats)))\n ## process chats\n tq = tqdm.tqdm if not isjupyter else tqdm.notebook.tqdm\n for i in tq(range(len(data))):\n if chats[i] is not None: continue\n chat = data2chat(data[i])\n chat.save(checkpoint, mode='a', index=i)\n chats[i] = chat\n return chats","repo_name":"cubenlp/ChatTool","sub_path":"chattool/checkpoint.py","file_name":"checkpoint.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"81"} +{"seq_id":"7613993378","text":"#implement a function to check if a linked list is a palindrome.\n\n#slow pointer and fast pointer\n#go to the middle of a list - fast traverses twice as fast, so by the end of linked list slow pointer will be in the middle\n#reverse the second half\n#compare the first and second half nodes\n\n\n\nimport unittest\n\ndef is_palindrome(head):\n\n #start at head\n fast = slow = head\n\n #get the mid node, fast traverses twice as fast\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n\n #reverse the second half\n #nodes are in place, the pointers reverse\n prev = None\n while slow:\n #nxt is the next node after slow\n nxt = slow.next\n #slow.next pointer reverses to prev\n slow.next = prev\n #prev node moves to the place or orig slow\n prev = slow\n #slow node moves to the place of nxt\n slow = nxt\n\n #check if reversed part is same as first part\n while prev:\n if prev.data != head.data:\n return False\n prev = prev.next\n head = head.next\n return True\n\n\n#O(n) time O(1) extra space\n\nclass Node():\n def __init__(self, data, next=None):\n self.data, self.next = data, next\n\n def __str__(self):\n string = str(self.data)\n if self.next:\n string += ',' + str(self.next)\n return string\n\nclass Test(unittest.TestCase):\n def test_palindrome(self):\n list1 = Node(10)\n self.assertTrue(is_palindrome(list1))\n list2 = Node(10,Node(10))\n self.assertTrue(is_palindrome(list2))\n list3 = Node(10,Node(20))\n self.assertFalse(is_palindrome(list3))\n list4 = Node(10,Node(70,Node(30,Node(70,Node(10)))))\n self.assertTrue(is_palindrome(list4))\n \n \n\nunittest.main()","repo_name":"hhchong/linked-lists-practice","sub_path":"palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7315992402","text":"from django.urls import path\nfrom django.conf.urls import url\nfrom . import views\n\napp_name = 'shop'\n\nurlpatterns = [\n path('', views.product_list, name='product_list'),\n path('services/', views.get_services, name='get_services'),\n path('send_message/', views.send_message, name='send_message'),\n path('send_service/', views.send_service, name='send_service'),\n path('/', views.product_detail, name='product_detail'),\n url(r'^(?P[-\\w]+)/$',\n views.product_list,\n name='product_list_by_category'),\n url(r'^(?P\\d+)/(?P[-\\w]+)/$',\n views.product_detail,\n name='product_detail'),\n]\n","repo_name":"zhanpeissov3/DiplomaFinal","sub_path":"diplom/apps/shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14493816325","text":"#!/usr/bin/env python3\nimport sys, os\n\n\ndef calc_error(original_file, svd_file):\n original_numbers = list()\n svd_numbers = list()\n with open(original_file, \"r\") as fp:\n count = 0\n for line in fp:\n if count < 3: \n if \"#\" not in line:\n count += 1\n continue\n else:\n for number in line.strip().split():\n original_numbers.append(int(number))\n with open(svd_file, \"r\") as fp:\n count = 0\n for line in fp:\n if count < 3: \n if \"#\" not in line:\n count += 1\n continue\n else:\n for number in line.strip().split():\n svd_numbers.append(int(number))\n \n differences = list()\n\n for orig, svd in zip(original_numbers, svd_numbers):\n differences.append(abs(orig-svd))\n\n total_diff = 0\n\n for diff in differences:\n total_diff += diff\n\n average_diff = total_diff / len(differences)\n\n print(str(average_diff))\n\n\ndef main(argv):\n original_file = argv[0]\n\n svd_file = argv[1]\n\n calc_error(original_file, svd_file)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"esarver/svd-pca","sub_path":"tools/error_calc.py","file_name":"error_calc.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35672796955","text":"import sys\nsys.stdout = open(\"output.txt\",\"w\")\n\nimport csv\n\nfield_names = [\"Name\",\"Author\",\"Publisher\",\"Price\",\"Category\"]\n\nbook1 = {\"Name\": \"Computer Programming, Part 1\",\"Author\": \"Tamim Shahriar Subeen\",\"Publisher\": \"Onnorokom Prokashoni\",\"Price\": \"240.00\",\"Category\": \"Programming\"}\n\nbook2 = {\"Name\": \"Computer Programming, Part 2\",\"Author\": \"Tamim Shahriar Subeen\",\"Publisher\": \"Onnorokom Prokashoni\",\"Price\": \"250.00\",\"Category\": \"Programming\"}\n\nbook3 = {\"Name\": \"Learn Programming with Python\",\"Author\": \"Tamim Shahriar Subeen\",\"Publisher\": \"Dimik Prokashoni\",\"Price\": \"200.00\",\"Category\": \"Programming\"}\n\nbook_list = [book1,book2,book3]\n\nwith open(\"DictBooks.csv\",\"w\") as csvf:\n\tcsv_writer = csv.DictWriter(csvf, fieldnames = field_names)\n\tcsv_writer.writeheader()\n\tcsv_writer.writerows(book_list)\n\n# with open(\"DictBooks.csv\", newline='') as csvfile:\n# \tcsv_reader = csv.reader(csvfile)\n# \t#print(csv_reader)\n# \tfor row in csv_reader:\n# \t\tprint(row)","repo_name":"FahimCC/Python","sub_path":"Web Crawling/CSVfile using dict.py","file_name":"CSVfile using dict.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72094615304","text":"opposite_directions = {'North': 'South', 'South': 'North', 'East': 'West', 'West': 'East'}\r\n\r\n\r\ndef remove_opposites(direction):\r\n result = []\r\n list_length = len(direction)\r\n i = 0\r\n while i != list_length:\r\n item = direction[i]\r\n if i == list_length-1:\r\n result.append(item)\r\n break\r\n else:\r\n next_item = direction[i+1]\r\n if next_item == opposite_directions[item]:\r\n i += 2\r\n else:\r\n result.append(item)\r\n i = i+1\r\n return result\r\n\r\n\r\ndef check_opposites(list):\r\n for item in range(len(list)-1):\r\n first_item = list[item]\r\n next_item = list[item+1]\r\n if next_item == opposite_directions[first_item]:\r\n return True\r\n return False\r\n\r\n\r\ndirections = ['East', 'West', 'North', 'South', 'West', 'North', 'East', 'West']\r\nresult = directions\r\nwhile check_opposites(result):\r\n result = remove_opposites(result)\r\n\r\nprint(result)\r\n\r\n\r\n'''for item in range(list_length):\r\n if i == list_length-1:\r\n break\r\n else:\r\nwhile i != list_length:\r\n zero_item = directions[i]\r\n if i == list_length-1:\r\n #final_directions.append(zero_item)\r\n break\r\n else:\r\n first_item = directions[i + 1]\r\n if first_item != opposite_directions[zero_item]:\r\n final_directions.append(zero_item)\r\n final_directions.append(first_item)\r\n i += 1\r\n else:\r\n i += 2\r\n\r\nprint(final_directions)'''\r\n'''i = 0\r\nj = 0\r\nlist_length = len(directions)\r\nwhile j <= list_length:\r\n zero_item = directions[i]\r\n if zero_item == directions[list_length-1]:\r\n break\r\n\r\n else:\r\n first_item = directions[i+1]\r\n if first_item == opposite_directions[zero_item]:\r\n directions.remove(zero_item)\r\n directions.remove(first_item)\r\n i = 0\r\n else:\r\n i += 1\r\n j += 1\r\nprint(directions)'''\r\n\r\n","repo_name":"TanishquaBhute/PythonCodePractice","sub_path":"PythonCourse/direction.py","file_name":"direction.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11074398539","text":"número_primo = int(input(\"insira um número: \"))\r\nx = 2\r\ndivisores = []\r\nwhile x < número_primo:\r\n determinação = número_primo % x\r\n if determinação == 0:\r\n divisores.append(x)\r\n x = x + 1\r\n\r\nif len(divisores) > 0:\r\n print(f\"Não é primo, e seus divisores são {divisores}\")\r\nelse:\r\n print(\"É primo!\")\r\n","repo_name":"Eduardo-Kaluf/exercicios-python-brasil","sub_path":"estrutura_de_repeticao/ex_22.py","file_name":"ex_22.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34450592686","text":"import random\nimport datetime\n\nprint(\"Lets figure how to make it work out in our furniture business\")\n\n\nfurniture = {\n \"staffs\" : [\"Secretary\", \"GateMen\", \"Cleaners\", \"Manager\", \"Director\", \"Labourer\"],\n \"source\" : [\"Forest\", \"Raw Materials\", \"Tools\"],\n \"salary\" : [\"Monthly\", \"Weekly\", \"Daily\"],\n \"age\" : [\"from 18years\", \"To your strength\"],\n \"duration\" : [\"Contract\", \"Full Staff\"]\n }\nwhile True:\n question = input().split(\"_\")\n for fun in question:\n if fun in furniture.keys():\n print(random.choice(furniture[fun]))\n break\n else:\n print(\"More ideas are welcome\")\n","repo_name":"everybees/parsel_tongue","sub_path":"adesuwa/chat_box_dict.py","file_name":"chat_box_dict.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"42131602541","text":"from tkinter import W\n\n\nclass Solution:\n def search(self, nums: List[int], target: int) -> int:\n # [4,5,6,7,0,1,2] tar = 0\n # [1,2,3,4,5,6]\n l, r = 0, len(nums)-1\n\n while l <= r:\n mid = (l+r) // 2\n\n if nums[mid] == target:\n return mid\n\n # Determine which side of the array is sorted then move accordingly\n if nums[l] <= nums[mid]:\n # NOW WE chech whether or not does our target value falls bettwen the boundaris of l<= targ <= mid\n if nums[l] <= target <= nums[mid]:\n # in other words our target falls into the left sorted portion\n r = mid - 1\n else:\n l = mid + 1\n else:\n # if targ is mid >= targ >= right\n if nums[mid] <= target <= nums[r]:\n l = mid + 1\n else:\n r = mid - 1\n return -1\n","repo_name":"mdiallo98/python-dataStructures-Algos","sub_path":"LeetcodeQuestions/Binary_Search/search_rotated_sorted_arrat.py","file_name":"search_rotated_sorted_arrat.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"12138586915","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 2 15:54:13 2021\n\n@author: donguk\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import proj3d\nfrom itertools import product, combinations\nfrom draw_obj_3D import *\n\nfig = plt.figure()\nax = plt.subplot(projection='3d')\n# ax.set_aspect(\"auto\")\nax.set_box_aspect((1, 1, 1))\nax.view_init(azim=15, elev=30)\n\n\ndraw_frame(ax, \"gray\")\n\n\n# draw a vector\nO = [0,0,0]\nU = [1,0,0]\nV = [0,1,0]\nW = [0,0,1]\nC = [1,1,1]\nL = [-1,0,0]\nM = [0,-1,0]\nN = [0,0,-1]\ndraw_vector_3D(ax, O, U, \"black\")\ndraw_vector_3D(ax, O, V, \"black\")\ndraw_vector_3D(ax, O, W, \"black\")\n\n# phi1 = 45\n# PHI = 54.1\n# phi2 = 0\n#Rz1=rot_mat_z(phi1)\n# RU = np.matmul(Rz1, U)\n# RV = np.matmul(Rz1, V)\n#R_euler = Euler_rot_bunge(phi1, PHI, phi2)\n#RU = np.matmul(R_euler,U)\n#RV = np.matmul(R_euler,V)\n# RU = Rz1*U\n# RV = Rz1*V\n#draw_vector_3D(ax, O, RU, \"blue\")\n#draw_vector_3D(ax, O, RV, \"cyan\")\nF = Deform_gradient()\nFU = np.matmul(F,U)\nFV = np.matmul(F,V)\nFW = np.matmul(F,W)\n\n\ndraw_vector_3D(ax, O, FU, \"red\")\ndraw_vector_3D(ax, O, FV, \"red\")\ndraw_vector_3D(ax, O, FW, \"red\")\n\n\nplt.show()","repo_name":"eastglow-zz/mypython","sub_path":"vector_plot_3D.py","file_name":"vector_plot_3D.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24167059108","text":"# This Python file uses the following encoding: utf-8\n\nfrom PySide6.QtCore import QObject, Property, Signal, Slot\nfrom AppData import AppData\nfrom MqttCommunicator import MqttCommunicator\n\nclient_name = \"mqtt-qml-paho-test\"\n\nclass AppController(QObject):\n messageReceived = Signal(str, str)\n\n def __init__(self, appData):\n QObject.__init__(self)\n\n self.__appData = appData\n self.__client = MqttCommunicator(client_name)\n self.__client.messageReceived.connect(self.onMessageReceived)\n self.__client.disconnected.connect(self.onDisconnected)\n\n @Slot(result = bool)\n def connectToBroker(self):\n if (self.__appData.isConnected):\n return False\n\n self.__client.connectToBroker(self.__appData.hostname, self.__appData.port)\n ret = True\n #ret = m_mqttCommunicator.connect(m_appData.hostname(), m_appData.port());\n\n self.__appData.isConnected = ret\n return ret\n\n @Slot()\n def disconnect(self):\n self.__client.disconnect()\n\n @Slot(str, result = bool)\n def subscribe_sync(self, topic):\n if (not self.__appData.isConnected):\n return False\n\n ret = self.__client.subscribe(topic)\n #ret = True\n return ret\n\n @Slot(str, str, result = bool)\n def publish_string(self, topic, message):\n if (not self.__appData.isConnected):\n return False\n\n ret = self.__client.publish_string(topic, message)\n return ret\n\n @Slot(str, str)\n def onMessageReceived(self, topic, message):\n print(\"onMessageReceived: topic: \" + topic + \" message: \" + message)\n self.messageReceived.emit(topic, message)\n\n @Slot(int)\n def onDisconnected(self, rc):\n self.__appData.isConnected = False\n","repo_name":"alex-poniz/mqtt_qml_py","sub_path":"AppController.py","file_name":"AppController.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3071976716","text":"import pandas as pd\r\nimport pickle\r\nimport numpy as np\r\nimport sklearn\r\nfrom flask import Flask,render_template , request,jsonify\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route(\"/\")\r\n\r\ndef Home():\r\n return render_template('index.html')\r\n\r\n@app.route('/predict' ,methods = ['POST','GET'])\r\ndef results():\r\n gender = request.form['gender']\r\n age = int(request.form['age'])\r\n hypertension = int(request.form['hypertension'])\r\n heart_disease = int(request.form['heart_disease'])\r\n ever_married = request.form['ever_married']\r\n work_type = request.form['work_type']\r\n Residence_type = request.form['Residence_type']\r\n avg_glucose_level = float(request.form['avg_glucose_level'])\r\n bmi = float(request.form['bmi'])\r\n smoking_status = request.form['smoking_status']\r\n\r\n # gender\r\n if \"Male\" == gender:\r\n gender_Male = 1\r\n gender_Other = 0\r\n\r\n\r\n elif gender == \"Other\":\r\n gender_Male = 0\r\n gender_Other = 1\r\n else:\r\n gender_Male=0\r\n gender_Other=0\r\n\r\n # married\r\n if ever_married==\"Yes\":\r\n ever_married_Yes = 1\r\n else:\r\n ever_married_Yes= 0\r\n\r\n # work_type\r\n if work_type=='Self-employed':\r\n work_type_Never_worked= 0\r\n work_type_Private= 0\r\n work_type_Self_employed= 1\r\n work_type_children=0\r\n elif work_type == 'Private':\r\n work_type_Never_worked = 0\r\n work_type_Private = 1\r\n work_type_Self_employed = 0\r\n work_type_children=0\r\n elif work_type ==\"children\":\r\n work_type_Never_worked = 0\r\n work_type_Private = 0\r\n work_type_Self_employed = 0\r\n work_type_children=1\r\n elif work_type==\"Never_worked\":\r\n work_type_Never_worked = 1\r\n work_type_Private = 0\r\n work_type_Self_employed = 0\r\n work_type_children=0\r\n else:\r\n work_type_Never_worked = 0\r\n work_type_Private = 0\r\n work_type_Self_employed = 0\r\n work_type_children=0\r\n # residence type\r\n if Residence_type ==\"Urban\":\r\n Residence_type_Urban=1\r\n else:\r\n Residence_type_Urban=0\r\n # smoking status\r\n if smoking_status == 'formerly smoked':\r\n smoking_status_formerly_smoked = 1\r\n smoking_status_never_smoked = 0\r\n smoking_status_smokes = 0\r\n elif smoking_status == 'smokes':\r\n smoking_status_formerly_smoked = 0\r\n smoking_status_never_smoked = 0\r\n smoking_status_smokes = 1\r\n elif smoking_status == \"never smoked\":\r\n smoking_status_formerly_smoked = 0\r\n smoking_status_never_smoked = 1\r\n smoking_status_smokes = 0\r\n else:\r\n smoking_status_formerly_smoked = 0\r\n smoking_status_never_smoked = 0\r\n smoking_status_smokes = 0\r\n\r\n X = np.array([[age, hypertension, heart_disease\t,avg_glucose_level,bmi ,gender_Male , gender_Other, ever_married_Yes,work_type_Never_worked,work_type_Private,work_type_Self_employed,work_type_children,Residence_type_Urban, smoking_status_formerly_smoked, smoking_status_never_smoked, smoking_status_smokes]])\r\n\r\n\r\n sc = pickle.load(open('sc.pkl','rb'))\r\n\r\n X_std = sc.transform(X)\r\n\r\n \r\n model = pickle.load(open('model_stroke_predictor.pkl','rb'))\r\n \r\n Y_prediction = model.predict(X_std)\r\n if Y_prediction==0:\r\n Y_prediction = \"NO Stroke\" \r\n else:\r\n Y_prediction = \"Stroke\"\r\n\r\n return jsonify({'Model Prediction': (Y_prediction)})\r\n\r\n \r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n\r\n\r\n\r\n\r\n","repo_name":"saadhash7860/Stroke-Predictor-End-to-End-Project","sub_path":"Stroke-Predictor/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19879753248","text":"import os\n\nimport requests\nfrom celery import shared_task\nfrom dotenv import load_dotenv\n\nfrom celery_app import app\n\n# from mailproj.views import send_email_view\nload_dotenv()\nurl = \"https://weatherapi-com.p.rapidapi.com/astronomy.json\"\n\n# querystring = {\"q\": \"London\"}\n\nheaders = {\n \"X-RapidAPI-Key\": os.getenv('XRAPIDAPIKEY'),\n \"X-RapidAPI-Host\": \"weatherapi-com.p.rapidapi.com\"\n}\n\n\nfrom django.core.mail import send_mail\n\n\n@shared_task\ndef get_weather_task(city_name, email_user):\n querystring = {\"q\": f\"{city_name}\"}\n response = requests.get(url, headers=headers, params=querystring)\n if response.status_code == 200:\n data = response.json()\n send_email_view_task.delay(message=data, email=email_user)\n return data\n else:\n return f\"Failed to fetch weather data for {city_name}\"\n\n\n@shared_task\ndef send_email_view_task(message, email: str):\n subject = 'Greetings from Django'\n messages = str(message)\n from_email = os.getenv('EMAIL')\n recipient_list = [email]\n send_mail(subject, messages, from_email, recipient_list, fail_silently=False)\n","repo_name":"SaskerSours/Celery_rabbitmq","sub_path":"wheather/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31037668805","text":"#name\nimport re\nfrom API_0419.common.config import config\nimport configparser\nclass Context:\n loan_id = None\n\ndef replace(data):\n p = \"#(.*?)#\"\n while re.search(p, data):\n # print(data)\n m = re.search(p, data)\n g = m.group(1)\n try:\n v = config.get('data', g)\n except configparser.NoOptionError as e:\n if hasattr(Context,g):\n # print('参数化的值')\n v = getattr(Context,g)\n # print(type(v),v)\n else:\n print('找不到参数化的值')\n raise e\n\n data = re.sub(p, v, data, count=1)\n # print('标ID',type(Context.loan_id),Context.loan_id)\n return data\n# replace(\"{'id':'#loan_id#','status':'4'}\")\n# replace(\"{'mobilephone':'#admin_user#','pwd':'#admin_pwd#'}\")\n","repo_name":"zenglip/python15-api-tes","sub_path":"common/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14899002875","text":"import time\n\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\nfrom browser import Browser\n\nclass shopping_cart_page(Browser):\n remove_button = (By.XPATH, '//table[@id=\"shopping-cart-table\"]/tbody[1]/tr[2]/td/div/a[2]')\n counter = (By.XPATH, '//span[@class=\"counter-number\"]')\n edit_button = (By.XPATH, '//table[@id=\"shopping-cart-table\"]/tbody[1]/tr[2]/td/div/a[1]')\n item_size = (By.XPATH,'//table[@id=\"shopping-cart-table\"]/tbody[1]/tr/td/div/dl/dd[1]')\n item_colour = (By.XPATH,'//table[@id=\"shopping-cart-table\"]/tbody[1]/tr/td/div/dl/dd[2]')\n quantity = (By.XPATH,'//table[@id=\"shopping-cart-table\"]/tbody[1]/tr/td[3]/div/div/label/input')\n cart_items = (By.XPATH,'//tbody[@class=\"cart item\"]')\n new_size = (By.XPATH, '//div[@id=\"option-label-size-143-item-166\"]')\n new_colour = (By.XPATH,'//div[@id=\"option-label-color-93-item-49\"]')\n new_quantity = (By.XPATH,'//div[@class=\"control\"]/input[@type=\"number\"]')\n update_cart = (By.XPATH,'//div[@class=\"actions\"]/button[@title=\"Update Cart\"]')\n message = (By.XPATH,'//div[@data-bind=\"html: $parent.prepareMessageForHtml(message.text)\"]')\n check_out = (By.XPATH,'//button[@data-role=\"proceed-to-checkout\"]')\n subtotal = (By.XPATH,'//td[@class=\"amount\"]/span[@class=\"price\"]')\n order_discount = (By.XPATH,'//td[@class=\"amount\"]/span/span[@class=\"price\"]')\n order_total = (By.XPATH,'//strong/span[@class=\"price\"]')\n update_shopping_cart = (By.XPATH,'//button[@title=\"Update Shopping Cart\"]')\n\n\n def navigate_to_shopping_cart(self):\n time.sleep(2)\n self.chrome.get('https://magento.softwaretestingboard.com/checkout/cart/')\n\n\n def check_cart_has_one_less_item(self):\n counter_w = WebDriverWait(self.chrome, 5).until(\n EC.presence_of_element_located((By.XPATH, '//span[@class=\"counter-number\"]')))\n time.sleep(2)\n initial = self.chrome.find_element(*self.counter).text\n self.chrome.find_element(*self.remove_button).click()\n counter_w = WebDriverWait(self.chrome, 5).until(\n EC.presence_of_element_located((By.XPATH, '//span[@class=\"counter-number\"]')))\n time.sleep(2)\n number = self.chrome.find_element(*self.counter).text\n assert number < initial, f\"We got {number} expected {self.initial}\"\n\n def clear_all_items(self):\n items = self.chrome.find_elements(*self.cart_items)\n for item in items:\n self.chrome.find_element(*self.remove_button).click()\n\n\n def edit_item_details(self):\n old_size = self.chrome.find_element(*self.item_size).text\n old_colour = self.chrome.find_element(*self.item_colour).text\n qty = self.chrome.find_element(*self.quantity)\n old_quantity = qty.get_attribute('value')\n self.chrome.find_element(*self.edit_button).click()\n self.chrome.find_element(*self.new_size).click()\n self.chrome.find_element(*self.new_colour).click()\n self.chrome.find_element(*self.new_quantity).clear()\n self.chrome.find_element(*self.new_quantity).send_keys(2)\n self.chrome.find_element(*self.update_cart).click()\n time.sleep(2)\n new_size = self.chrome.find_element(*self.item_size).text\n new_colour = self.chrome.find_element(*self.item_colour).text\n qty_n = self.chrome.find_element(*self.quantity)\n new_quantity = qty_n.get_attribute('value')\n assert old_size != new_size, f\"Size didn't change, expected {new_size} but got {old_size}\"\n assert old_colour != new_colour, f\"Colour didn't change, expected {new_colour} but got {old_colour}\"\n assert old_quantity != new_quantity, f\"Quantity didn't change, expected {new_quantity} but got {old_quantity}\"\n\n def update_quantity(self):\n self.chrome.find_element(*self.quantity).clear()\n self.chrome.find_element(*self.quantity).send_keys(3)\n self.chrome.find_element(*self.update_shopping_cart).click()\n\n def discount_is_calculated_corectly(self):\n time.sleep(2)\n order_subtotal = self.chrome.find_element(*self.subtotal).text\n if \",\" in order_subtotal:\n order_subtotal = order_subtotal.replace(\",\",\"\")\n subtotal = order_subtotal[1:]\n order_discount = self.chrome.find_element(*self.order_discount).text\n if \",\" in order_discount:\n order_discount = order_discount.replace(\",\",\"\")\n discount = order_discount[2:]\n order_total = self.chrome.find_element(*self.order_total).text\n if \",\" in order_total:\n order_total = order_total.replace(\",\",\"\")\n order = order_total[1:]\n expected_discount = round(float(subtotal)*0.20,2)\n assert float(discount) == expected_discount, f\"Expected discount {expected_discount}, received {discount}\"\n expected_order = float(subtotal) - float(discount)\n assert expected_order == float(order), f'Order total expected {expected_order}, received {order}'\n\n\n\n\n\n\n\n\n\n","repo_name":"MaximDigitalKL/Python_BDD_ecomerce","sub_path":"pages/shopping_cart_page.py","file_name":"shopping_cart_page.py","file_ext":"py","file_size_in_byte":5190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10209125152","text":"from django.urls import path\n\nfrom pages.views import ContactCreateView, AboutTemplateView, HomeTemplateView, WishlistListView, add_to_wishlist\n\napp_name = 'pages'\n\nurlpatterns = [\n path('contact/', ContactCreateView.as_view(), name='contact'),\n path('about/', AboutTemplateView.as_view(), name='about'),\n path('wishlist/', WishlistListView.as_view(), name='wishlist'),\n path('wishlist//', add_to_wishlist, name='add-wishlist'),\n path('', HomeTemplateView.as_view(), name='home'),\n]\n","repo_name":"elyor27/test2","sub_path":"shop1/pages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74600222664","text":"from . import SimStage\nfrom .tsne import TSNEStage\nfrom .tsne import TSNESaveEmbedding\n\nimport inspect\nimport logging\nimport multiprocessing\nfrom collections import Iterable\nfrom types import SimpleNamespace\nfrom time import time\n\nimport numpy as np\nfrom sklearn.base import BaseEstimator\n\nimport openTSNE\nfrom openTSNE import _tsne\nfrom openTSNE import initialization as initialization_scheme\nfrom openTSNE.affinity import Affinities, PerplexityBasedNN\nfrom openTSNE.quad_tree import QuadTree\nfrom openTSNE import utils\n\nfrom .bhumap import estimate_negative_gradient_bh, estimate_negative_gradient_elastic\n\nEPSILON = np.finfo(np.float64).eps\n\nlog = logging.getLogger(__name__)\n\n\ndef _handle_nice_params(embedding: np.ndarray, optim_params: dict) -> None:\n \"\"\"Convert the user friendly params into something the optimizer can\n understand.\"\"\"\n # Handle callbacks\n optim_params[\"callbacks\"] = openTSNE.tsne._check_callbacks(\n optim_params.get(\"callbacks\")\n )\n optim_params[\"use_callbacks\"] = optim_params[\"callbacks\"] is not None\n\n # Handle negative gradient method\n negative_gradient_method = optim_params.pop(\"negative_gradient_method\")\n if callable(negative_gradient_method):\n negative_gradient_method = negative_gradient_method\n elif negative_gradient_method in {\"umap\", \"UMAP\", \"bhumap\"}:\n negative_gradient_method = bh_umap\n elif negative_gradient_method in {\"elastic\"}:\n negative_gradient_method = bh_elastic\n elif negative_gradient_method in {\"bh\", \"BH\", \"barnes-hut\"}:\n negative_gradient_method = openTSNE.kl_divergence_bh\n elif negative_gradient_method in {\"fft\", \"FFT\", \"interpolation\"}:\n negative_gradient_method = openTSNE.kl_divergence_fft\n else:\n raise ValueError(\n \"Unrecognized gradient method. Please choose one of \"\n \"the supported methods or provide a valid callback.\"\n )\n # `gradient_descent` uses the more informative name `objective_function`\n optim_params[\"objective_function\"] = negative_gradient_method\n\n # Handle number of jobs\n n_jobs = optim_params.get(\"n_jobs\", 1)\n if n_jobs < 0:\n n_cores = multiprocessing.cpu_count()\n # Add negative number of n_jobs to the number of cores, but increment by\n # one because -1 indicates using all cores, -2 all except one, and so on\n n_jobs = n_cores + n_jobs + 1\n\n # If the number of jobs, after this correction is still <= 0, then the user\n # probably thought they had more cores, so we'll default to 1\n if n_jobs <= 0:\n log.warning(\n \"`n_jobs` receieved value %d but only %d cores are available. \"\n \"Defaulting to single job.\" % (optim_params[\"n_jobs\"], n_cores)\n )\n n_jobs = 1\n\n optim_params[\"n_jobs\"] = n_jobs\n\n # Determine learning rate if requested\n if optim_params.get(\"learning_rate\", \"auto\") == \"auto\":\n optim_params[\"learning_rate\"] = max(200, embedding.shape[0] / 12)\n\n\ndef bh_umap(\n embedding,\n P,\n dof,\n bh_params,\n reference_embedding=None,\n eps=0.001,\n should_eval_error=False,\n n_jobs=1,\n **_,\n):\n gradient = np.zeros_like(embedding, dtype=np.float64, order=\"C\")\n\n # In the event that we wish to embed new points into an existing embedding\n # using simple optimization, we compute optimize the new embedding points\n # w.r.t. the existing embedding. Otherwise, we want to optimize the\n # embedding w.r.t. itself. We've also got to make sure that the points'\n # interactions don't interfere with each other\n pairwise_normalization = reference_embedding is None\n if reference_embedding is None:\n reference_embedding = embedding\n\n # Compute negative gradient\n tree = QuadTree(reference_embedding)\n sum_Q = estimate_negative_gradient_bh(\n tree,\n embedding,\n gradient,\n **bh_params,\n eps=eps,\n num_threads=n_jobs,\n pairwise_normalization=pairwise_normalization,\n )\n del tree\n\n # Compute positive gradient\n sum_P, kl_divergence_ = _tsne.estimate_positive_gradient_nn(\n P.indices,\n P.indptr,\n P.data,\n embedding,\n reference_embedding,\n gradient,\n dof,\n num_threads=n_jobs,\n should_eval_error=should_eval_error,\n )\n\n # Computing positive gradients summed up only unnormalized q_ijs, so we\n # have to include normalziation term separately\n if should_eval_error:\n kl_divergence_ += sum_P * np.log(sum_Q + EPSILON)\n\n return kl_divergence_, gradient\n\n\ndef bh_elastic(\n embedding,\n P,\n dof,\n bh_params,\n reference_embedding=None,\n elastic_const=10000,\n eps=1.0,\n should_eval_error=False,\n n_jobs=1,\n **_,\n):\n gradient = np.zeros_like(embedding, dtype=np.float64, order=\"C\")\n\n # In the event that we wish to embed new points into an existing embedding\n # using simple optimization, we compute optimize the new embedding points\n # w.r.t. the existing embedding. Otherwise, we want to optimize the\n # embedding w.r.t. itself. We've also got to make sure that the points'\n # interactions don't interfere with each other\n pairwise_normalization = reference_embedding is None\n if reference_embedding is None:\n reference_embedding = embedding\n\n # Compute negative gradient\n tree = QuadTree(reference_embedding)\n sum_Q = estimate_negative_gradient_elastic(\n tree,\n embedding,\n gradient,\n **bh_params,\n eps=eps,\n elastic_const=elastic_const,\n num_threads=n_jobs,\n pairwise_normalization=pairwise_normalization,\n )\n del tree\n\n # Compute positive gradient\n sum_P, kl_divergence_ = _tsne.estimate_positive_gradient_nn(\n P.indices,\n P.indptr,\n P.data,\n embedding,\n reference_embedding,\n gradient,\n dof,\n num_threads=n_jobs,\n should_eval_error=should_eval_error,\n )\n\n # Computing positive gradients summed up only unnormalized q_ijs, so we\n # have to include normalziation term separately\n if should_eval_error:\n kl_divergence_ += sum_P * np.log(sum_Q + EPSILON)\n\n return kl_divergence_, gradient\n\n\nclass UMAPEmbedding(openTSNE.TSNEEmbedding):\n def __new__(\n cls,\n embedding,\n affinities,\n reference_embedding=None,\n dof=1,\n n_interpolation_points=3,\n min_num_intervals=50,\n ints_in_interval=1,\n negative_gradient_method=\"umap\",\n random_state=None,\n optimizer=None,\n **gradient_descent_params,\n ):\n # init_checks.num_samples(embedding.shape[0], affinities.P.shape[0])\n\n obj = np.asarray(embedding, dtype=np.float64, order=\"C\").view(UMAPEmbedding)\n\n obj.reference_embedding = reference_embedding\n obj.P = affinities.P\n obj.gradient_descent_params = gradient_descent_params\n obj.affinities = affinities # type: Affinities\n obj.gradient_descent_params = gradient_descent_params # type: dict\n obj.gradient_descent_params.update(\n {\n \"negative_gradient_method\": negative_gradient_method,\n \"n_interpolation_points\": n_interpolation_points,\n \"min_num_intervals\": min_num_intervals,\n \"ints_in_interval\": ints_in_interval,\n \"dof\": dof,\n }\n )\n obj.random_state = random_state\n\n if optimizer is None:\n optimizer = gradient_descent()\n elif not isinstance(optimizer, openTSNE.tsne.gradient_descent):\n raise TypeError(\n \"`optimizer` must be an instance of `%s`, but got `%s`.\"\n % (openTSNE.tsne.gradient_descent.__class__.__name__, type(optimizer))\n )\n obj.optimizer = optimizer\n\n obj.kl_divergence = None\n\n # Interpolation grid variables\n obj.interp_coeffs = None\n obj.box_x_lower_bounds = None\n obj.box_y_lower_bounds = None\n\n return obj\n\n def optimize(\n self,\n n_iter,\n inplace=False,\n propagate_exception=False,\n **gradient_descent_params,\n ):\n \"\"\"Run optmization on the embedding for a given number of steps.\n\n Parameters\n ----------\n n_iter: int\n The number of optimization iterations.\n\n learning_rate: Union[str, float]\n The learning rate for t-SNE optimization. When\n ``learning_rate=\"auto\"`` the appropriate learning rate is selected\n according to max(200, N / 12), as determined in Belkina et al.\n \"Automated optimized parameters for t-distributed stochastic\n neighbor embedding improve visualization and analysis of large\n datasets\", 2019. Note that this should *not* be used when adding\n samples into existing embeddings, where the learning rate often\n needs to be much lower to obtain convergence.\n\n exaggeration: float\n The exaggeration factor is used to increase the attractive forces of\n nearby points, producing more compact clusters.\n\n momentum: float\n Momentum accounts for gradient directions from previous iterations,\n resulting in faster convergence.\n\n negative_gradient_method: str\n Specifies the negative gradient approximation method to use. For\n smaller data sets, the Barnes-Hut approximation is appropriate and\n can be set using one of the following aliases: ``bh``, ``BH`` or\n ``barnes-hut``. For larger data sets, the FFT accelerated\n interpolation method is more appropriate and can be set using one of\n the following aliases: ``fft``, ``FFT`` or ``ìnterpolation``.\n\n theta: float\n This is the trade-off parameter between speed and accuracy of the\n tree approximation method. Typical values range from 0.2 to 0.8. The\n value 0 indicates that no approximation is to be made and produces\n exact results also producing longer runtime.\n\n n_interpolation_points: int\n Only used when ``negative_gradient_method=\"fft\"`` or its other\n aliases. The number of interpolation points to use within each grid\n cell for interpolation based t-SNE. It is highly recommended leaving\n this value at the default 3.\n\n min_num_intervals: int\n Only used when ``negative_gradient_method=\"fft\"`` or its other\n aliases. The minimum number of grid cells to use, regardless of the\n ``ints_in_interval`` parameter. Higher values provide more accurate\n gradient estimations.\n\n inplace: bool\n Whether or not to create a copy of the embedding or to perform\n updates inplace.\n\n propagate_exception: bool\n The optimization process can be interrupted using callbacks. This\n flag indicates whether we should propagate that exception or to\n simply stop optimization and return the resulting embedding.\n\n random_state: Union[int, RandomState]\n The random state parameter follows the convention used in\n scikit-learn. If the value is an int, random_state is the seed used\n by the random number generator. If the value is a RandomState\n instance, then it will be used as the random number generator. If\n the value is None, the random number generator is the RandomState\n instance used by `np.random`.\n\n n_jobs: int\n The number of threads to use while running t-SNE. This follows the\n scikit-learn convention, ``-1`` meaning all processors, ``-2``\n meaning all but one, etc.\n\n callbacks: Callable[[int, float, np.ndarray] -> bool]\n Callbacks, which will be run every ``callbacks_every_iters``\n iterations.\n\n callbacks_every_iters: int\n How many iterations should pass between each time the callbacks are\n invoked.\n\n Returns\n -------\n PartialTSNEEmbedding\n An optimized partial t-SNE embedding.\n\n Raises\n ------\n OptimizationInterrupt\n If a callback stops the optimization and the ``propagate_exception``\n flag is set, then an exception is raised.\n\n \"\"\"\n # Typically we want to return a new embedding and keep the old one intact\n if inplace:\n embedding = self\n else:\n embedding = UMAPEmbedding(\n np.copy(self),\n self.reference_embedding,\n self.P,\n optimizer=self.optimizer.copy(),\n **self.gradient_descent_params,\n )\n\n # If optimization parameters were passed to this funciton, prefer those\n # over the defaults specified in the TSNE object\n optim_params = dict(self.gradient_descent_params)\n optim_params.update(gradient_descent_params)\n optim_params[\"n_iter\"] = n_iter\n # this calls the function I patched in this file, not the one\n # from openTSNE\n _handle_nice_params(embedding, optim_params)\n\n try:\n # Run gradient descent with the embedding optimizer so gains are\n # properly updated and kept\n error, embedding = embedding.optimizer(\n embedding=embedding,\n reference_embedding=self.reference_embedding,\n P=self.P,\n **optim_params,\n )\n\n except openTSNE.OptimizationInterrupt as ex:\n log.info(\"Optimization was interrupted with callback.\")\n if propagate_exception:\n raise ex\n error, embedding = ex.error, ex.final_embedding\n\n embedding.kl_divergence = error\n\n return embedding\n\n\nclass UMAPBH(SimStage):\n def __init__(\n self,\n path,\n dataname=\"nns.mtx\",\n initname=\"data.npy\",\n n_components=2,\n random_state=None,\n learning_rate=\"umap\",\n n_iter_early=250,\n n_iter=500,\n exaggeration=1,\n early_exaggeration=1,\n eps=0.001,\n early_momentum=0.5,\n momentum=0.8,\n n_jobs=-1,\n save_iter_freq=250,\n ):\n super().__init__(\n path,\n dataname=dataname,\n initname=initname,\n n_components=n_components,\n random_state=random_state,\n )\n self.learning_rate = learning_rate\n self.n_iter = n_iter\n self.n_iter_early = n_iter_early\n self.exaggeration = exaggeration\n self.early_exaggeration = early_exaggeration\n self.eps = eps\n self.early_momentum = early_momentum\n self.momentum = momentum\n\n self.n_jobs = n_jobs\n self.saver = TSNESaveEmbedding(self.outdir)\n self.save_iter_freq = save_iter_freq\n\n def transform(self):\n affinities = Affinities()\n affinities.P = self.data.tocsr()\n\n if self.learning_rate == \"auto\":\n n = self.init.shape[0]\n self.learning_rate = n / self.exaggeration\n elif self.learning_rate == \"umap\":\n self.learning_rate = 1 / self.exaggeration\n\n umap = UMAPEmbedding(\n embedding=self.init,\n affinities=affinities,\n negative_gradient_method=bh_umap,\n learning_rate=self.learning_rate,\n n_jobs=self.n_jobs,\n random_state=self.random_state,\n callbacks_every_iters=self.save_iter_freq,\n callbacks=self.saver,\n )\n\n self.data_ = umap.optimize(\n n_iter=self.n_iter_early,\n exaggeration=self.early_exaggeration,\n eps=self.eps,\n momentum=self.early_momentum,\n inplace=True,\n n_jobs=self.n_jobs,\n propagate_exception=True,\n )\n\n self.data_ = umap.optimize(\n n_iter=self.n_iter,\n exaggeration=self.exaggeration,\n eps=self.eps,\n momentum=self.momentum,\n inplace=True,\n n_jobs=self.n_jobs,\n propagate_exception=True,\n )\n\n return self.data_\n\n\nclass TSNEElastic(TSNEStage):\n def __init__(\n self,\n path,\n dataname=\"nns.mtx\",\n initname=\"data.npy\",\n n_components=2,\n random_state=None,\n learning_rate=\"auto\",\n n_iter_early=250,\n n_iter=500,\n exaggeration=1,\n early_exaggeration=12,\n elastic_const=10000,\n eps=1,\n early_momentum=0.5,\n momentum=0.8,\n negative_gradient_method=bh_elastic,\n n_jobs=-1,\n save_iter_freq=250,\n ):\n super().__init__(\n path,\n dataname=dataname,\n initname=initname,\n n_components=n_components,\n random_state=random_state,\n learning_rate=learning_rate,\n n_iter=n_iter,\n exaggeration=exaggeration,\n momentum=momentum,\n negative_gradient_method=negative_gradient_method,\n n_jobs=n_jobs,\n save_iter_freq=save_iter_freq,\n )\n self.early_exaggeration = early_exaggeration\n self.n_iter_early = n_iter_early\n self.early_momentum = early_momentum\n self.elastic_const = elastic_const\n self.eps = eps\n\n def transform(self):\n affinities = Affinities()\n affinities.P = self.data.tocsr()\n\n if self.learning_rate == \"auto\":\n n = self.init.shape[0]\n self.learning_rate = n / self.early_exaggeration\n\n tsne = UMAPEmbedding(\n embedding=self.init,\n affinities=affinities,\n negative_gradient_method=self.negative_gradient_method,\n learning_rate=self.learning_rate,\n n_jobs=self.n_jobs,\n random_state=self.random_state,\n callbacks_every_iters=self.save_iter_freq,\n callbacks=self.saver,\n )\n\n self.data_ = tsne.optimize(\n n_iter=self.n_iter_early,\n exaggeration=self.early_exaggeration,\n momentum=self.early_momentum,\n elastic_const=self.elastic_const,\n eps=self.eps,\n inplace=True,\n n_jobs=self.n_jobs,\n propagate_exception=True,\n )\n\n self.data_ = tsne.optimize(\n n_iter=self.n_iter,\n exaggeration=self.exaggeration,\n momentum=self.momentum,\n elastic_const=self.elastic_const,\n eps=self.eps,\n inplace=True,\n n_jobs=self.n_jobs,\n propagate_exception=True,\n )\n\n return self.data_\n\n\nclass gradient_descent(openTSNE.tsne.gradient_descent):\n def __call__(\n self,\n embedding,\n P,\n n_iter,\n objective_function,\n learning_rate=200,\n momentum=0.5,\n exaggeration=None,\n dof=1,\n min_gain=0.01,\n max_grad_norm=None,\n max_step_norm=5,\n eps=0.001,\n elastic_const=10000,\n theta=0.5,\n n_interpolation_points=3,\n min_num_intervals=50,\n ints_in_interval=1,\n reference_embedding=None,\n n_jobs=1,\n use_callbacks=False,\n callbacks=None,\n callbacks_every_iters=50,\n verbose=False,\n **kwargs,\n ):\n \"\"\"Perform batch gradient descent with momentum and gains.\n\n Parameters\n ----------\n embedding: np.ndarray\n The embedding :math:`Y`.\n\n P: array_like\n Joint probability matrix :math:`P`.\n\n n_iter: int\n The number of iterations to run for.\n\n objective_function: Callable[..., Tuple[float, np.ndarray]]\n A callable that evaluates the error and gradient for the current\n embedding.\n\n learning_rate: Union[str, float]\n The learning rate for t-SNE optimization. When\n ``learning_rate=\"auto\"`` the appropriate learning rate is selected\n according to max(200, N / 12), as determined in Belkina et al.\n \"Automated optimized parameters for t-distributed stochastic\n neighbor embedding improve visualization and analysis of large\n datasets\", 2019.\n\n momentum: float\n Momentum accounts for gradient directions from previous iterations,\n resulting in faster convergence.\n\n exaggeration: float\n The exaggeration factor is used to increase the attractive forces of\n nearby points, producing more compact clusters.\n\n dof: float\n Degrees of freedom of the Student's t-distribution.\n\n min_gain: float\n Minimum individual gain for each parameter.\n\n max_grad_norm: float\n Maximum gradient norm. If the norm exceeds this value, it will be\n clipped. This is most beneficial when adding points into an existing\n embedding and the new points overlap with the reference points,\n leading to large gradients. This can make points \"shoot off\" from\n the embedding, causing the interpolation method to compute a very\n large grid, and leads to worse results.\n\n max_step_norm: float\n Maximum update norm. If the norm exceeds this value, it will be\n clipped. This prevents points from \"shooting off\" from\n the embedding.\n\n theta: float\n This is the trade-off parameter between speed and accuracy of the\n tree approximation method. Typical values range from 0.2 to 0.8. The\n value 0 indicates that no approximation is to be made and produces\n exact results also producing longer runtime.\n\n n_interpolation_points: int\n Only used when ``negative_gradient_method=\"fft\"`` or its other\n aliases. The number of interpolation points to use within each grid\n cell for interpolation based t-SNE. It is highly recommended leaving\n this value at the default 3.\n\n min_num_intervals: int\n Only used when ``negative_gradient_method=\"fft\"`` or its other\n aliases. The minimum number of grid cells to use, regardless of the\n ``ints_in_interval`` parameter. Higher values provide more accurate\n gradient estimations.\n\n ints_in_interval: float\n Only used when ``negative_gradient_method=\"fft\"`` or its other\n aliases. Indicates how large a grid cell should be e.g. a value of 3\n indicates a grid side length of 3. Lower values provide more\n accurate gradient estimations.\n\n reference_embedding: np.ndarray\n If we are adding points to an existing embedding, we have to compute\n the gradients and errors w.r.t. the existing embedding.\n\n n_jobs: int\n The number of threads to use while running t-SNE. This follows the\n scikit-learn convention, ``-1`` meaning all processors, ``-2``\n meaning all but one, etc.\n\n use_callbacks: bool\n\n callbacks: Callable[[int, float, np.ndarray] -> bool]\n Callbacks, which will be run every ``callbacks_every_iters``\n iterations.\n\n callbacks_every_iters: int\n How many iterations should pass between each time the callbacks are\n invoked.\n\n Returns\n -------\n float\n The KL divergence of the optimized embedding.\n np.ndarray\n The optimized embedding Y.\n\n Raises\n ------\n OptimizationInterrupt\n If the provided callback interrupts the optimization, this is raised.\n\n \"\"\"\n assert isinstance(embedding, np.ndarray), (\n \"`embedding` must be an instance of `np.ndarray`. Got `%s` instead\"\n % type(embedding)\n )\n\n if reference_embedding is not None:\n assert isinstance(reference_embedding, np.ndarray), (\n \"`reference_embedding` must be an instance of `np.ndarray`. Got \"\n \"`%s` instead\" % type(reference_embedding)\n )\n\n # If we're running transform and using the interpolation scheme, then we\n # should limit the range where new points can go to\n should_limit_range = False\n if reference_embedding is not None:\n if reference_embedding.box_x_lower_bounds is not None:\n should_limit_range = True\n lower_limit = reference_embedding.box_x_lower_bounds[0]\n upper_limit = reference_embedding.box_x_lower_bounds[-1]\n\n update = np.zeros_like(embedding)\n if self.gains is None:\n self.gains = np.ones_like(embedding)\n\n bh_params = {\"theta\": theta}\n fft_params = {\n \"n_interpolation_points\": n_interpolation_points,\n \"min_num_intervals\": min_num_intervals,\n \"ints_in_interval\": ints_in_interval,\n }\n\n # Lie about the P values for bigger attraction forces\n if exaggeration is None:\n exaggeration = 1\n\n if exaggeration != 1:\n P *= exaggeration\n\n # Notify the callbacks that the optimization is about to start\n if isinstance(callbacks, Iterable):\n for callback in callbacks:\n # Only call function if present on object\n getattr(callback, \"optimization_about_to_start\", lambda: ...)()\n\n timer = utils.Timer(\n \"Running optimization with exaggeration=%.2f, lr=%.2f for %d iterations...\"\n % (exaggeration, learning_rate, n_iter),\n verbose=verbose,\n )\n timer.__enter__()\n\n if verbose:\n start_time = time()\n\n for iteration in range(n_iter):\n should_call_callback = (\n use_callbacks and (iteration + 1) % callbacks_every_iters == 0\n )\n # Evaluate error on 50 iterations for logging, or when callbacks\n should_eval_error = should_call_callback or (\n verbose and (iteration + 1) % 50 == 0\n )\n\n error, gradient = objective_function(\n embedding,\n P,\n dof=dof,\n bh_params=bh_params,\n fft_params=fft_params,\n eps=eps,\n elastic_const=elastic_const,\n reference_embedding=reference_embedding,\n n_jobs=n_jobs,\n should_eval_error=should_eval_error,\n **kwargs,\n )\n\n # Clip gradients to avoid points shooting off. This can be an issue\n # when applying transform and points are initialized so that the new\n # points overlap with the reference points, leading to large\n # gradients\n if max_grad_norm is not None:\n norm = np.linalg.norm(gradient, axis=1)\n coeff = max_grad_norm / (norm + 1e-6)\n mask = coeff < 1\n gradient[mask] *= coeff[mask, None]\n\n # Correct the KL divergence w.r.t. the exaggeration if needed\n if should_eval_error and exaggeration != 1:\n error = error / exaggeration - np.log(exaggeration)\n\n if should_call_callback:\n # Continue only if all the callbacks say so\n should_stop = any(\n (bool(c(iteration + 1, error, embedding)) for c in callbacks)\n )\n if should_stop:\n # Make sure to un-exaggerate P so it's not corrupted in future runs\n if exaggeration != 1:\n P /= exaggeration\n raise openTSNE.tsne.OptimizationInterrupt(\n error=error, final_embedding=embedding\n )\n\n # Update the embedding using the gradient\n grad_direction_flipped = np.sign(update) != np.sign(gradient)\n grad_direction_same = np.invert(grad_direction_flipped)\n self.gains[grad_direction_flipped] += 0.2\n self.gains[grad_direction_same] = (\n self.gains[grad_direction_same] * 0.8 + min_gain\n )\n update = momentum * update - learning_rate * self.gains * gradient\n\n # Clip the update sizes\n if max_step_norm is not None:\n update_norms = np.linalg.norm(update, axis=1, keepdims=True)\n mask = update_norms.squeeze() > max_step_norm\n update[mask] /= update_norms[mask]\n update[mask] *= max_step_norm\n\n embedding += update\n\n # Zero-mean the embedding only if we're not adding new data points,\n # otherwise this will reset point positions\n if reference_embedding is None:\n embedding -= np.mean(embedding, axis=0)\n\n # Limit any new points within the circle defined by the interpolation grid\n if should_limit_range:\n if embedding.shape[1] == 1:\n mask = (lower_limit < embedding) & (embedding < upper_limit)\n np.clip(embedding, lower_limit, upper_limit, out=embedding)\n elif embedding.shape[1] == 2:\n r = np.linalg.norm(embedding, axis=1)\n phi = np.arctan2(embedding[:, 0], embedding[:, 1])\n mask = (lower_limit < embedding) & (embedding < upper_limit)\n mask = np.any(mask, axis=1)\n np.clip(r, lower_limit, upper_limit, out=r)\n embedding[:, 0] = r * np.cos(phi)\n embedding[:, 1] = r * np.sin(phi)\n # Zero out the momentum terms for the points that hit the boundary\n self.gains[~mask] = 0\n\n if verbose and (iteration + 1) % 50 == 0:\n stop_time = time()\n print(\n \"Iteration %4d, KL divergence %6.4f, 50 iterations in %.4f sec\"\n % (iteration + 1, error, stop_time - start_time)\n )\n start_time = time()\n\n timer.__exit__()\n\n # Make sure to un-exaggerate P so it's not corrupted in future runs\n if exaggeration != 1:\n P /= exaggeration\n\n # The error from the loop is the one for the previous, non-updated\n # embedding. We need to return the error for the actual final embedding, so\n # compute that at the end before returning\n error, _ = objective_function(\n embedding,\n P,\n dof=dof,\n bh_params=bh_params,\n fft_params=fft_params,\n reference_embedding=reference_embedding,\n n_jobs=n_jobs,\n should_eval_error=True,\n )\n\n return error, embedding\n","repo_name":"berenslab/ne-spectrum","sub_path":"jnb_msc/transformer/umap_bh.py","file_name":"umap_bh.py","file_ext":"py","file_size_in_byte":30943,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"81"} +{"seq_id":"12251494200","text":"filename = 'learning_python.txt'\n\n# Chose `for line` approach since it seems the most concise\nwith open(filename) as file_object:\n for line in file_object:\n print(line.strip().replace('Python', 'C').replace('.', '?'))\n # I'm only 95% positive that all three of my things learned\n # are also true in C, so decided to try a multiple replace.\n # chaining `replace()` was what my gut said to try, and Stack\n # Overflow seems to confirm it is the fastest.\n","repo_name":"smithmts/Homework-NoStarchPress-PythonCrashCourse","sub_path":"10-02_learning_c/learning_python.py","file_name":"learning_python.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37052059402","text":"from abc import ABC\nfrom dataclasses import dataclass, field\nfrom port import Port\nfrom context import protoblocks\n\n\n@dataclass(slots=True, frozen=True)\nclass BlockSettings:\n \"\"\"Class of settings for Block.\"\"\"\n\n memory: int\n cpu: int\n timeout: int\n\n def get_title(self) -> str:\n params = []\n if self.memory:\n params.append(f\"Memory: {self.memory}\")\n\n if self.cpu:\n params.append(f\"CPU: {self.cpu}\")\n\n if self.timeout:\n params.append(f\"Timeout: {self.timeout}\")\n\n return f'({\", \".join(params)})'\n\n\n@dataclass(slots=True, frozen=True)\nclass Block(ABC):\n \"\"\"Class of block from a workflow.\"\"\"\n\n guid: str = field(compare=True)\n type: str = field(compare=False)\n name: str = field(compare=False)\n parent: str = field(compare=False)\n position: tuple[int] = field(compare=False)\n description: str = field(compare=False)\n settings: BlockSettings = field(compare=False)\n ports: tuple[Port] = field(compare=False)\n\n @classmethod\n def from_dict(cls, dictionary: dict):\n \"\"\"Creating an object of Block from the dictionary.\"\"\"\n\n ports = cls._get_ports(dictionary['ports'])\n description = cls._get_description(dictionary['description'], dictionary['description_default'])\n settings = cls._get_settings(dictionary)\n\n return cls(\n guid=dictionary['guid'],\n type=dictionary['type'],\n name=dictionary['name'],\n parent=dictionary['parent'],\n position=tuple(dictionary['ui']['position'].values()),\n description=description,\n settings=settings,\n ports=ports\n )\n\n @staticmethod\n def _get_settings(dictionary: dict) -> BlockSettings:\n \"\"\"\"\"\"\n\n if dictionary.get('resources_customized') is not None:\n memory = dictionary['resources_customized']['run']['requests']['memory']\n cpu = dictionary['resources_customized']['run']['requests']['cpu']\n else:\n memory, cpu = None, None\n\n timeout = dictionary.get('idle_timeout_user')\n\n return BlockSettings(memory, cpu, timeout)\n\n @staticmethod\n def _get_description(description: dict, description_default: dict) -> str | None:\n \"\"\"Returns a tuple of Ports which are included in the Block.\"\"\"\n\n if description is not None:\n return description['']\n elif description_default is not None:\n return description_default['']\n\n return description\n\n @staticmethod\n def _get_ports(ports: list[dict]) -> tuple[Port]:\n \"\"\"Returns a tuple of Ports which are included in the Block.\"\"\"\n\n return tuple(Port.from_dict(port) for port in ports)\n\n def get_title(self) -> str:\n \"\"\"Returns briefly block description.\"\"\"\n\n return self.name\n\n def get_path(self, workflow: list) -> str:\n \"\"\"Returns a path of Blocks parents.\"\"\"\n\n parent_guid = self.parent\n list_of_parents = [self.get_title()]\n\n while parent_guid is not None:\n for block in workflow:\n if block.guid == parent_guid:\n list_of_parents.append(block.get_title())\n parent_guid = block.parent\n\n break\n\n block_path = ''\n for parent in list(reversed(list_of_parents))[:-1]:\n block_path += parent + ' / '\n block_path += list_of_parents[0]\n\n return block_path\n\n\n@dataclass(slots=True, frozen=True)\nclass Composite(Block):\n \"\"\"Class of composite block from workflow.\"\"\"\n\n pass\n\n\n@dataclass(slots=True, frozen=True)\nclass Protoblock(Block):\n \"\"\"Class of protoblock block from workflow.\"\"\"\n\n protoblock_id: str = field(compare=False)\n protoblock_version: int = field(compare=False)\n protoblock_name: str = field(compare=False)\n\n @classmethod\n def from_dict(cls, dictionary: dict):\n ports = cls._get_ports(dictionary['ports'])\n description = cls._get_description(dictionary['description'], dictionary['description_default'])\n settings = cls._get_settings(dictionary)\n protoblock_id = dictionary['protoblock']['id']\n protoblock_version = dictionary['protoblock']['version']\n\n if protoblocks:\n protoblock_name = cls._get_protoblock_name(protoblock_id, protoblock_version)\n else:\n protoblock_name = None\n\n return cls(\n guid=dictionary['guid'],\n type=dictionary['type'],\n name=dictionary['name'],\n parent=dictionary['parent'],\n position=tuple(dictionary['ui']['position'].values()),\n description=description,\n settings=settings,\n ports=ports,\n protoblock_id=protoblock_id,\n protoblock_version=protoblock_version,\n protoblock_name=protoblock_name\n )\n\n @staticmethod\n def _get_protoblock_name(pb_id: str, pb_version: str) -> str:\n \"\"\"Returns a name of protoblock.\"\"\"\n\n id_version = f'{pb_id}-{pb_version}'\n\n if id_version in protoblocks[0].keys():\n name = protoblocks[0][id_version]\n elif id_version in protoblocks[1].keys():\n name = protoblocks[1][id_version]\n else:\n name = None\n\n return name\n\n def get_title(self) -> str:\n\n if self.protoblock_name is not None and self.protoblock_name != self.name:\n return f\"{self.name} ({self.protoblock_name})\"\n else:\n return f\"{self.name}\"\n\n\ndef create_block(dictionary: dict) -> Block:\n \"\"\"A block factory which returns Composite of Protoblock object.\"\"\"\n\n factory_dict = {\n 'COMPOSITE': Composite,\n 'BLOCK': Protoblock\n }\n return factory_dict[dictionary['type']].from_dict(dictionary)\n","repo_name":"MediaFoxxx/workflow-diff","sub_path":"workflow_diff/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":5806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5390358682","text":"import subprocess,os\nimport gzip,shutil\n\n\ndef minify(t):\n def removeComments(s):\n nest = 0\n print(s)\n c= 0;\n while c 0 and s[c-1]==\"\\\\\"):\n nest+=1\n print (\"entering nest\",nest,s[0:c])\n else:\n nest-=1\n print (\"leaving nest\",nest,s[0:c])\n elif c>0 and s[c-1:c+1] == '//' and nest ==0:\n if c==1:\n return \"\"\n else:\n print (\"stripped\",s[0:c-1])\n return s[0:c-1]\n elif (s[c-1:c+1] in [ '\\\\r','\\\\n','\\\\d','\\\\.','\\\\+']):\n print('nested special',s)\n s = s[:c]+\"\\\\\"+s[c:]\n c+=1\n \n c+=1\n\n\n return s\n\n res = []\n\n for a in t:\n a= a.strip();\n a = removeComments(a);\n d = \"\"\n for c in range(len(a)):\n if a[c] == \"\\\"\" and (c==0 or a[c-1]!=\"\\\\\"):\n d+=\"\\\\\\\"\"\n else:\n d+=a[c]\n\n if(d):\n res+=[str(d).strip()]\n\n\n\n return res\n\ndef minifyDict(ls):\n depth = 0;\n res = []\n tmp =\"\"\n for l in ls:\n for c in l:\n if c == '{':\n depth+=1\n elif c=='}':\n depth-=1\n if depth==0:\n tmp+=l\n res+=[tmp]\n tmp = \"\"\n else:\n tmp+=l\n return res\n\n\ndef getDeps(f):\n from html.parser import HTMLParser\n\n class MyHTMLParser(HTMLParser):\n def __init__(self):\n HTMLParser.__init__(self)\n self.localSources = []\n self.localSourceStart = None\n def handle_starttag(self, tag, attrs):\n if(tag==\"script\"):\n for attr in attrs:\n if attr[0]=='src' and not attr[1].startswith(\"http\"):\n self.localSources+=[attr[1]]\n self.localSourceStart = True\n def handle_endtag(self,tag):\n \n if(tag==\"script\") and (self.localSourceStart==True):\n self.localSourceStart = False\n\n parser = MyHTMLParser()\n depLines = {}\n num = 0;\n with open(f,'r') as fp:\n for l in fp.readlines():\n parser.feed(l)\n st = parser.localSourceStart\n if st!=None :\n if not parser.localSources[-1] in depLines:\n depLines[parser.localSources[-1]] ={'lines':[num,-1]}\n depLines[parser.localSources[-1]]['lines'][1] = num\n if(st==False):parser.localSourceStart = None\n num+=1\n return depLines;\n\ndef addNPMPath():\n if not 'npm-packages' in os.environ['PATH']:\n os.environ['PATH']+=':'+os.path.expanduser('~/.npm-packages/bin')\n\n\ndef uglify(s):\n addNPMPath();\n sex = os.path.splitext(s);\n sout = sex[0]+'.min'+sex[1]\n out = subprocess.check_output(['uglifyjs','-cm','-o',sout,'--warn','--',s])\n with open(sout,'r') as fp:\n data = ''.join(fp.readlines());\n print(out)\n return {'name':sout,'data':data}\n\ndef htmlmin(s,sout):\n addNPMPath();\n\n ls = ['html-minifier','--minify-css','true','--minify-js','true','--remove-comments','--html5','--remove-tag-whitespace ','--collapse-whitespace ','--remove-redundant-attributes','-o',sout,'--',s]\n out = subprocess.check_output(ls)\n print(out)\n return sout\n\ndef replace_deps(inHtml,deps,sout):\n \n with open(inHtml,'r') as fp:\n l = fp.readlines();\n for k,v in deps.items():\n start = v['lines'][0]\n end = max(start,v['lines'][1])\n for k in range(start,end+1):\n del l[k]\n\n l = l[:start] +[str('')]+l[start:]\n with open(sout,'w') as outf:\n outf.writelines(l)\n\ndef removeEmptyLines(inf,outf):\n lines = []\n with open(inf,'r') as fp:\n for l in fp.readlines():\n n = l.strip()\n if n :\n lines+=[n]\n else:\n print('removing')\n with open(outf,'w') as fp:\n fp.write('\\n'.join(lines))\n\n\n\n\nif __name__ == \"__main__\":\n pack = True\n file = \"index.html\"\n deps = getDeps(file)\n \n \n for d in deps.keys():\n deps[d]['min']=uglify(d)\n \n sex = os.path.splitext(file);\n htmlMin = sex[0]+'.min'+sex[1]\n if pack:\n shutil.copyfile(file,htmlMin)\n replace_deps(htmlMin,deps,htmlMin)\n htmlmin(htmlMin,htmlMin)\n else:\n htmlmin(file,htmlMin)\n\n removeEmptyLines(htmlMin,htmlMin)\n\n \n filesToCompress = [htmlMin]\n if not pack:\n filesToCompress+=[v['min']['name'] for v in deps.values()]\n\n for f in filesToCompress:\n localgz = 'FS/'+f+'.gz'\n \n print (f,localgz)\n with open(f, 'rb') as f_in, gzip.open(localgz, 'wb',compresslevel=9) as f_out:\n shutil.copyfileobj(f_in, f_out)\n\n # exit()\n \n# oneline = True\n# with open(file,'r') as fp:\n# lines = fp.readlines();\n# minl = minify(lines)\n\n# minl = minifyDict(minl)\n# # for l in minl:\n# # print(l)\n\n# stringT = \"\".join(minl)\n# # print( eval(\"\\\"%s\\\"%(stringT)\"))\n\n\n# import os,shutil\n# sex = os.path.splitext(file)\n# minPath = sex[0]+\".min\"+sex[1]\n# with open(minPath,'w') as fp:\n# minlH = []\n# for i in range(len(minl)):\n# x = minl[i]\n# comp = x.replace(\"\\\\\\\"\",\"\\\"\")\n# comp=comp.replace(\"\\\\\\\\r\",\"\\\\r\")\n# comp=comp.replace(\"\\\\\\\\n\",\"\\\\n\")\n# comp=comp.replace(\"\\\\\\\\d\",\"\\\\d\")\n# comp=comp.replace(\"\\\\\\\\.\",\"\\\\.\")\n# comp=comp.replace(\"\\\\\\\\+\",\"\\\\+\")\n# if not oneline :\n# comp+='\\n'\n# else:\n# l = comp[-1]\n# n = ''\n# if i < len(minl)-1:\n# n = minl[i+1][0]\n# if not l in ['{','}','>',','] and (l != ';') and n not in ['{','}']:\n# comp+=';'\n# minlH+=[comp]\n# fp.writelines(minlH)\n\n# localgz = 'FS/'+minPath+'.gz'\n# import gzip\n# print (minPath,localgz)\n# with open(minPath, 'rb') as f_in, gzip.open(localgz, 'wb',compresslevel=9) as f_out:\n# shutil.copyfileobj(f_in, f_out)\n\n\n\n from ftplib import FTP\n import socket\n print('getting ip');\n ip = socket.gethostbyname('smartpotier.local')\n print(ip)\n print('logging in');\n ftp = FTP(ip,user='esp32',passwd='esp32')\n print('logged in');\n def advance(v):\n print(\"block done : \",len(v))\n\n\n with open(localgz,'rb') as fp:\n ftp.storbinary(\"STOR \"+os.path.basename(localgz),fp,callback=advance)\n # with open('httpoterie.h','w') as fp:\n # fp.writelines(['const char * http_page = '])\n # minlH = ['\"'+x+'\\\\n\"\\n' for x in minl]\n # fp.writelines(minlH+[\";\"])\n\n\n","repo_name":"MartinHN/Smartpotier","sub_path":"minify.py","file_name":"minify.py","file_ext":"py","file_size_in_byte":6094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30543184933","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom django import forms\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms import layout, bootstrap\n\nfrom models import UploadedImage\nfrom ajax_upload.widgets import AjaxClearableFileInput\n\n\n## changed from Charfiled for file_path\nclass UploadedImageForm(forms.ModelForm):\n class Meta:\n model = UploadedImage\n widgets = {\n 'image_field': AjaxClearableFileInput\n }\n fields = [\"title\"]\n\n file_path = forms.CharField(\n max_length=255,\n widget=AjaxClearableFileInput(),\n required=False,\n )\n delete_image = forms.BooleanField(\n widget=forms.HiddenInput(),\n required=False,\n )\n\n\n def __init__(self, *args, **kwargs):\n super(UploadedImageForm, self).__init__(*args, **kwargs)\n\n self.helper = FormHelper()\n self.helper.form_action = \"\"\n self.helper.form_method = \"POST\"\n #self.helper.inputs =\n self.helper.form_tag = False\n self.helper.layout = layout.Layout(\n layout.Fieldset(\n _(\"UploadedImage form\"),\n \"title\",\n layout.HTML(u\"\"\"{% load i18n %}\n
\n
\n {% if instance.image %}\n \"\"\n {% endif %}\n
\n
\n \n
\n

{% trans \"Available formats are JPG, GIF, and PNG.\" %}

\n
\n
\n \"\"\"),\n \"file_path\",\n \"delete_image\",\n ),\n bootstrap.FormActions(\n layout.Submit('submit', _('Save'), css_class=\"btn btn-warning\"),\n )\n )\n\n\nfrom django.forms.models import formset_factory\nUploadedImageFormset = formset_factory(UploadedImageForm, extra = 3)\nuploadedimage_formset = UploadedImageFormset()\n\n\n#### dj ajax upload widget ver\nfrom django import forms\n\nfrom ajax_upload.widgets import AjaxClearableFileInput\n\nfrom models import UploadedImage\n\n\nclass UploadedImageFormAlt(forms.ModelForm):\n class Meta:\n model = UploadedImage\n widgets = {\n 'image': AjaxClearableFileInput\n }\n\n\n####################\n#from django.db.models import FileField\n#from django.forms import forms\n#from django.template.defaultfilters import filesizeformat\n#from django.utils.translation import ugettext_lazy as _\n#\n#class ContentTypeRestrictedFileField(FileField):\n# \"\"\"\n# Same as FileField, but you can specify:\n# * content_types - list containing allowed content_types. Example: ['application/pdf', 'image/jpeg']\n# * max_upload_size - a number indicating the maximum file size allowed for upload.\n# 2.5MB - 2621440\n# 5MB - 5242880\n# 10MB - 10485760\n# 20MB - 20971520\n# 50MB - 5242880\n# 100MB 104857600\n# 250MB - 214958080\n# 500MB - 429916160\n# \"\"\"\n# def __init__(self, content_types=None,max_upload_size=104857600, **kwargs):\n# self.content_types = kwargs.pop('video/avi', 'video/mp4', 'video/3gp', 'video/wmp', 'video/flv', 'video/mov')\n# self.max_upload_size = max_upload_size\n#\n# super(ContentTypeRestrictedFileField, self).__init__(**kwargs)\n#\n#\n# def clean(self, *args, **kwargs):\n# data = super(ContentTypeRestrictedFileField, self).clean(*args, **kwargs)\n#\n# file = data.file\n# try:\n# content_type = file.content_type\n# if content_type in self.content_types:\n# if file._size > self.max_upload_size:\n# raise forms.ValidationError(_('Please keep filesize under %s. Current filesize %s') % (filesizeformat(self.max_upload_size), filesizeformat(file._size)))\n# else:\n# raise forms.ValidationError(_('Filetype not supported.'))\n# except AttributeError:\n# pass\n#\n# return data\n#\n# #from south.modelsinspector import add_introspection_rules\n# #add_introspection_rules([], [\"^app\\.extra\\.ContentTypeRestrictedFileField\"])","repo_name":"connectthefuture/djdam165","sub_path":"uploader/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16216215256","text":"import torch\n\nfrom utils.utils import would_user_further_response, set_random_seed\n\ndef trans_index(origin_dict):\n new_dict = {}\n for key in origin_dict:\n new_dict[int(key)] = origin_dict[key]\n return new_dict\n\n\nclass DialogueManager:\n def __init__(self, config, rec, agent, user, convhis):\n self.config = config\n self.attribute_tree = config.config.att_tree_dict\n self.attribute_tree = trans_index(self.attribute_tree)\n\n self.ask_action_index = config.ask_action_index\n self.rec_action_index = config.rec_action_index\n self.rec_with_att_feedback_action_index = config.rec_with_att_feedback_action_index\n self.rec_success_reward = config.rec_success_reward\n self.pos_attribute_reward = config.pos_attribute_reward\n self.neg_attribute_reward = config.neg_attribute_reward\n self.neg_feedback_attribute_reward = config.neg_feedback_attribute_reward\n self.rec_with_att_feedback_action_index = config.rec_with_att_feedback_action_index\n self.user_quit_reward = config.user_quit_reward\n self.every_turn_reward = config.every_turn_reward\n self.turn_limit = config.turn_limit\n\n self.rec = rec\n self.agent = agent\n self.user = user\n self.convhis = convhis\n\n self.target_user = None\n self.target_item = None\n self.silence = True\n self.turn_num = None\n self.current_agent_action = None\n self.current_neg_item_set = set()\n set_random_seed(1)\n\n def initialize_dialogue(self, target_user, target_item, silence, given_init_parent_attribute=None):\n self.target_user = target_user\n self.target_item = target_item\n self.silence = silence\n self.turn_num = 1\n self.current_agent_action = None\n\n self.agent.init_episode()\n init_pos_attribute_set, init_neg_attribute_set, init_parent_attribute \\\n = self.user.init_episode(target_user, target_item, given_init_parent_attribute)\n self.convhis.init_conv(target_user, target_item, init_pos_attribute_set, init_neg_attribute_set,\n init_parent_attribute)\n\n return list(init_pos_attribute_set)\n\n def user_turn(self, ask_attribute_list, ask_item_list):\n if ask_attribute_list != None:\n attribute_list = self.user.next_turn(ask_attribute_list)\n return attribute_list\n if ask_item_list != None:\n item_list = []\n for item in ask_item_list:\n if item == self.target_item:\n item_list.append(item)\n break\n return item_list\n\n def user_turn_for_rec_att_feedback(self, ask_attribute_list):\n if ask_attribute_list != None:\n attribute_list = self.user.next_turn(ask_attribute_list)\n return attribute_list\n\n def user_turn_for_rec_feedback(self, ask_item_list):\n target_item_info = self.config.config.item_info[self.target_item]\n accept_attribute = set()\n for item in ask_item_list:\n item_info = self.config.config.item_info[item]\n if item_info.issubset(target_item_info):\n accept_attribute = accept_attribute.union(item_info)\n return accept_attribute\n\n def get_current_agent_action(self):\n return self.current_agent_action\n\n def initialize_episode(self, target_user, target_item, given_init_parent_attribute=None, silence=True):\n self.target_user = target_user\n self.target_item = target_item\n self.turn_num = 1\n self.current_agent_action = None\n self.silence = silence\n\n init_pos_attribute_set, init_neg_attribute_set, init_parent_attribute \\\n = self.user.init_episode(target_user, target_item, given_init_parent_attribute)\n self.convhis.init_conv(target_user, target_item, init_pos_attribute_set, init_neg_attribute_set, init_parent_attribute)\n\n return self.get_state()\n\n def step(self, action_index):\n is_ask = action_index == self.ask_action_index\n is_rec_feed = action_index == self.rec_with_att_feedback_action_index\n\n candidate_list = self.convhis.get_candidate_list()\n\n conv_neg_item_list = set(self.convhis.get_conv_neg_item_list())\n neg_item_list = conv_neg_item_list - self.current_neg_item_set\n if neg_item_list != None:\n self.current_neg_item_set = conv_neg_item_list\n ask_item_list, cand_score_list = self.rec.get_recommend_item_list(self.target_item, list(neg_item_list), candidate_list)\n action_index = self.convhis.get_max_attribute_entropy_index()\n feedback_rec_item_list, feedback_att_list = self.get_feedback_recommend(ask_item_list)\n if len(self.convhis.get_pos_attribute()) == 2 and len(feedback_att_list) > 0:\n print(\"pos: \", self.convhis.get_pos_attribute(), \" item: \", feedback_rec_item_list[-len(feedback_att_list):], \" att: \", feedback_att_list, \" ; u: \", self.target_user, \" ; i: \", self.target_item)\n\n feedback_attribute_list = self.user_turn_for_rec_att_feedback(feedback_att_list)\n feedback_pos_attribute_set = set(feedback_attribute_list)\n feedback_neg_attribute_set = set(feedback_att_list) - feedback_pos_attribute_set\n\n be_len, be_rank = self.convhis.get_candidate_len_and_target_rank_base_list(cand_score_list)\n\n feedback_rec_len = 0\n feedback_rec_rank = 0\n feedback_rec_list = []\n feedback_rec_success = False\n feedback_item_list = self.user_turn(None, feedback_rec_item_list)\n if len(feedback_item_list) == 0:\n feedback_rec_len, feedback_rec_rank, feedback_rec_list = self.convhis.get_candidate_len_and_target_rank_for_feedback_rec(feedback_rec_item_list, feedback_pos_attribute_set, feedback_neg_attribute_set)\n else:\n feedback_rec_success = True\n\n ask_attribute_list = self.attribute_tree[action_index]\n attribute_list = self.user_turn(ask_attribute_list, None)\n pos_attribute_set = set(attribute_list)\n neg_attribute_set = set(ask_attribute_list) - pos_attribute_set\n ask_len, ask_rank, ask_list = self.convhis.get_candidate_len_and_target_rank_for_ask(pos_attribute_set,\n neg_attribute_set)\n\n success = False\n step_state = -1\n\n if is_ask:\n self.convhis.set_candidate_len_and_target_rank_and_can_list(ask_len, ask_rank, ask_list)\n ask_reward = self.agent.get_reward(self.get_reward_state(self.ask_action_index)) - torch.tensor(0.1).cuda()\n feedback_rec_reward = self.agent.get_reward(self.get_reward_state(self.rec_with_att_feedback_action_index)) - torch.tensor(0.1).cuda()\n reward = ask_reward\n\n ask_attribute_list = self.attribute_tree[action_index]\n attribute_list = self.user_turn(ask_attribute_list, None)\n pos_attribute_set = set(attribute_list)\n neg_attribute_set = set(ask_attribute_list) - pos_attribute_set\n\n if len(pos_attribute_set) == 0:\n step_state = \"0\" + str(list(neg_attribute_set)[0])\n else:\n step_state = \"1\" + str(list(pos_attribute_set)[0])\n\n self.convhis.add_new_attribute(pos_attribute_set, action_index)\n self.convhis.update_conv_his(len(pos_attribute_set) > 0, action_index)\n self.convhis.update_attribute_entropy()\n\n if len(attribute_list) > 0:\n IsOver = False\n else:\n IsOver = False\n else:\n self.convhis.set_candidate_len_and_target_rank_and_can_list(feedback_rec_len, feedback_rec_rank, feedback_rec_list)\n ask_reward = self.agent.get_reward(self.get_reward_state(self.ask_action_index)) - torch.tensor(0.1).cuda()\n feedback_rec_reward = self.agent.get_reward(self.get_reward_state(self.rec_with_att_feedback_action_index)) - torch.tensor(0.1).cuda()\n\n item_list = self.user_turn(None, feedback_rec_item_list)\n uf = would_user_further_response()\n if uf:\n accept_att = self.user_turn_for_rec_att_feedback(feedback_rec_item_list)\n if len(accept_att) > 0:\n self.convhis.add_pos_attribute(accept_att)\n\n if len(item_list) > 0:\n IsOver = True\n success = True\n feedback_rec_reward = feedback_rec_reward + torch.tensor(1.0).cuda()\n\n step_state = \"2\"\n else:\n if len(feedback_att_list) > 0:\n step_state = \"4\" + str(feedback_rec_item_list[-1]) + \"qwe\" + str(feedback_att_list[-1])\n else:\n step_state = \"3\" + str(feedback_rec_item_list[0])\n\n\n self.convhis.add_pos_attribute(feedback_pos_attribute_set)\n self.convhis.add_neg_attribute(feedback_neg_attribute_set)\n self.convhis.add_conv_neg_item_list(feedback_rec_item_list)\n IsOver = False\n\n reward = feedback_rec_reward\n\n self.turn_num += 1\n if self.turn_num == self.turn_limit + 1:\n reward = reward - torch.tensor(0.3).cuda()\n if success:\n success = False\n if not IsOver:\n IsOver = True\n\n return IsOver, self.get_state(), reward, success, be_len, be_rank, ask_len, ask_rank, \\\n feedback_rec_len, feedback_rec_rank, ask_reward, feedback_rec_reward, step_state\n\n def get_state(self):\n state_user = self.convhis.get_user_vertor()\n state_convhis = self.convhis.get_convhis_vector().copy()\n state_len = self.convhis.get_length_vector().copy()\n dialogue_state = state_user + state_convhis + state_len\n dialogue_state = torch.tensor(dialogue_state)\n return dialogue_state\n\n def get_reward_state(self, action_index):\n state_user = self.convhis.get_user_vertor()\n state_convhis = self.convhis.get_convhis_vector().copy()\n state_len = self.convhis.get_length_vector().copy()\n dialogue_state = state_user + state_convhis + state_len + [action_index]\n dialogue_state = torch.tensor(dialogue_state)\n return dialogue_state\n\n def get_feedback_recommend(self, norm_rec_list):\n candidate_att_item = self.convhis.get_available_items_for_recommend_feedback()\n candidate_item_list = list(candidate_att_item.keys())\n candidate_att_list = list(candidate_att_item.values())\n feedback_num = len(candidate_att_item)\n if feedback_num > 5:\n feedback_num = 5\n rec_item_list = norm_rec_list[:(10 - feedback_num)]\n rec_item_list = rec_item_list + candidate_item_list[:feedback_num]\n return rec_item_list, candidate_att_list[:feedback_num]\n","repo_name":"Studenthc/CRIF","sub_path":"dialoguemanager/DialogueManager.py","file_name":"DialogueManager.py","file_ext":"py","file_size_in_byte":10900,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"81"} +{"seq_id":"1319350258","text":"import tflearn as tl\r\nimport tflearn.datasets.mnist as mn\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n\r\nclass MnistData(object):\r\n def __init__(self):\r\n self.__train_img = None\r\n self.__train_label = None\r\n self.__test_img = None\r\n\r\n def load_data(self):\r\n self.__load_train_data()\r\n self.__load_test_data()\r\n\r\n def __load_test_data(self):\r\n test_path = \"../data/test.csv\"\r\n test_df = pd.read_csv(test_path)\r\n self.__test_img = np.reshape(np.array(test_df), [-1, 28 * 28])\r\n\r\n def __load_train_data(self):\r\n train_path = \"../data/train.csv\"\r\n train_df = pd.read_csv(train_path)\r\n train_img = train_df.iloc[:, 1:]\r\n self.__train_label = np.reshape(np.array(pd.get_dummies(train_df[\"label\"])), [-1, 10])\r\n self.__train_img = np.reshape(np.array(train_img), [-1, 28 * 28])\r\n\r\n @property\r\n def train_img(self):\r\n return self.__train_img\r\n\r\n @property\r\n def train_label(self):\r\n return self.__train_label\r\n\r\n @property\r\n def test_img(self):\r\n return self.__test_img\r\n\r\n def show_pic(self):\r\n num = np.random.randint(0, np.shape(self.__train_img)[0])\r\n img = np.reshape(self.__train_img[num], [28, 28])\r\n plt.figure()\r\n plt.imshow(img, cmap='Greys_r')\r\n plt.show()\r\n\r\n\r\nclass Net(object):\r\n def __init__(self,\r\n x_data,\r\n y_data,\r\n epoch=10,\r\n learning_rate=0.01):\r\n self.x_data = x_data\r\n self.y_data = y_data\r\n self.epoch = epoch\r\n self.lr = learning_rate\r\n self.model = None\r\n self.sess = None\r\n self.net = None\r\n tl.init_graph(gpu_memory_fraction=0.6)\r\n\r\n def build_net(self):\r\n self.net = tl.input_data(shape=[None, 28, 28, 1])\r\n self.net = tl.conv_2d(incoming=self.net, nb_filter=32, filter_size=3, activation='relu')\r\n self.net = tl.max_pool_2d(incoming=self.net, kernel_size=3, strides=2)\r\n self.net = tl.conv_2d(incoming=self.net, nb_filter=64, filter_size=3, activation='relu')\r\n self.net = tl.max_pool_2d(incoming=self.net, kernel_size=3, strides=2)\r\n # self.net = tl.conv_2d(incoming=self.net, nb_filter=256, filter_size=3, activation='relu')\r\n # self.net = tl.max_pool_2d(incoming=self.net, kernel_size=3, strides=2)\r\n self.net = tl.fully_connected(incoming=self.net, n_units=7 * 7 * 64, activation='relu')\r\n self.net = tl.fully_connected(incoming=self.net, n_units=10, activation='softmax')\r\n self.net = tl.regression(incoming=self.net)\r\n\r\n self.model = tl.DNN(network=self.net, tensorboard_verbose=2, tensorboard_dir=\"D:/tmp/tflearn_logs/mnist\")\r\n\r\n def train_model(self):\r\n self.model.fit(np.reshape(self.x_data, [-1, 28, 28, 1]), self.y_data, n_epoch=self.epoch, show_metric=True, batch_size=128)\r\n self.model.save(\"../model/cnn/cnn_model\")\r\n\r\n def set_test_set(self, v_img):\r\n self.v_img = v_img\r\n\r\n def test_model(self):\r\n self.model.load(\"../model/cnn/cnn_model\")\r\n submission_df = pd.DataFrame()\r\n batch_size = 5000\r\n n = 0\r\n l = np.shape(self.v_img)[0]\r\n pre = []\r\n while n + batch_size <= l:\r\n pre.append([np.argmax(i) for i in self.model.predict(X=np.reshape(self.v_img[n: n + batch_size], [-1, 28, 28, 1]))])\r\n n += batch_size\r\n pre = np.reshape(pre, [-1, 1])\r\n pre = np.concatenate((pre, np.reshape([np.argmax(i) for i in self.model.predict(X=np.reshape(self.v_img[n:], [-1, 28, 28, 1]))], [-1, 1])))\r\n\r\n submission_df[\"ImageId\"] = [i for i in range(len(pre) + 1) if i != 0]\r\n submission_df[\"Label\"] = pre\r\n submission_df.to_csv(\"../data/sample_submission.csv\", index=False)\r\n\r\n def validate_model(self, val_img, val_label):\r\n self.model.load(\"../model/cnn/cnn_model\")\r\n batch_size = 5000\r\n n = 0\r\n l = np.shape(val_img)[0]\r\n pre = []\r\n while n + batch_size <= l:\r\n pre.append([np.argmax(i) for i in\r\n self.model.predict(X=np.reshape(val_img[n: n + batch_size], [-1, 28, 28, 1]))])\r\n n += batch_size\r\n pre = np.reshape(pre, [-1, 1])\r\n pre = np.concatenate((pre, np.reshape(\r\n [np.argmax(i) for i in self.model.predict(X=np.reshape(val_img[n:], [-1, 28, 28, 1]))], [-1, 1])))\r\n\r\n pre = np.ravel(pre)\r\n label = np.ravel([np.argmax(i) for i in val_label])\r\n diff_index = np.ravel(np.argwhere(pre != label))\r\n self.__draw(pre, val_img, diff_index)\r\n\r\n def __draw(self, pre, val_img, diff_index):\r\n for i in diff_index:\r\n plt.figure()\r\n plt.title(pre[i])\r\n plt.imshow(np.reshape(val_img[i], [28, 28]), cmap='Greys_r')\r\n plt.savefig(\"../check/%s\" % i)\r\n plt.close()\r\n\r\nif __name__ == \"__main__\":\r\n data = MnistData()\r\n data.load_data()\r\n train_img = data.train_img / 255\r\n test_img = data.test_img / 255\r\n model = Net(x_data=train_img, y_data=data.train_label, epoch=5, learning_rate=0.001)\r\n model.set_test_set(test_img)\r\n model.build_net()\r\n # model.train_model()\r\n # model.test_model()\r\n model.validate_model(train_img, data.train_label)\r\n","repo_name":"zz984230/kaggle","sub_path":"mnist/code/cnn_mnist.py","file_name":"cnn_mnist.py","file_ext":"py","file_size_in_byte":5346,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"2005308411","text":"\ndef msort (arr):\n\n\tmid = len(arr)//2\n\tl = arr[0:mid]\n\tr = arr[mid:]\n\t\n\tif len(arr) <= 1:\n\t\treturn arr\n\n\tif len(arr) > 1:\n\t\tmsort(l)\n\t\tmsort(r)\n\n\n\ti = 0\n\tj = 0\n\tk = 0\n\n\twhile (i < len(l) and j < len(r)):\n\n\t\tif l[i] < r[j]:\n\t\t\ttemp = l[i]\n\t\t\tl[i] = r[j]\n\t\t\tl[i] = temp\n\t\telse:\n\t\t\ttemp = l\n","repo_name":"NatuMyers/pythonpractice","sub_path":"sorts2.py","file_name":"sorts2.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30089960201","text":"\r\nimport urllib.request\r\nimport config \r\n\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\ndef makeURL(sNode, search_text ):\r\n base = \"https://openapi.naver.com/v1/search\"\r\n node = \"/%s.xml\" % sNode\r\n parameters = \"?query=%s\" % urllib.parse.quote( search_text )\r\n url = base + node + parameters\r\n return url\r\n\r\ndef requestURL(url):\r\n req = urllib.request.Request(url) \r\n \r\n req.add_header(\"X-Naver-Client-Id\", config.client_id) # open api 키를 header에 추가\r\n req.add_header(\"X-Naver-Client-Secret\", config.client_secret) # open api 키를 header에 추가\r\n try:\r\n response = urllib.request.urlopen(req) \r\n if response.status == 200 : \r\n # print(\"Url Request Success\")\r\n data = response.read().decode('utf-8')\r\n return data \r\n except Exception as e:\r\n print(e)\r\n print(\"Error for URL : %s\" %url) \r\n return None\r\n \r\n \r\ndef searchText(text):\r\n sNode = 'news'\r\n search_text = text\r\n\r\n targetUrl = makeURL(sNode, search_text)\r\n data = requestURL(targetUrl)\r\n \r\n bs = BeautifulSoup(data, \"lxml\")\r\n list = bs.find_all('item')\r\n\r\n return list\r\n\r\n\r\ndef removeTag(n):\r\n n = n.replace('','').replace('','').replace('<','<').replace('>','>').replace('"', '')\r\n return n\r\n\r\n\r\ndef searchResult(list):\r\n title = []\r\n link = []\r\n pubDate = []\r\n\r\n for item in list:\r\n temp_title = (item.find('title')).get_text()\r\n temp_link = (item.find('originallink')).get_text()\r\n temp_pubDate = (item.find('pubdate')).get_text()\r\n title.append(removeTag(temp_title))\r\n link.append(removeTag(temp_link))\r\n\r\n # 각각 리스트로 반환 = 인덱스 번호순으로 대응 title[0] - link[0]\r\n return title, link\r\n\r\n# 사용 예시\r\n\r\ndef main(name:str):\r\n print('크롤링메인뉴스')\r\n list = searchText(name)\r\n title, link = searchResult(list)\r\n return title, link\r\n\r\n# def main():\r\n# list = searchText('삼성전자')\r\n# title, link = searchResult(list)\r\n# return title, link\r\n\r\n# title, link = main()\r\n","repo_name":"ym7596/MiniProjectDataCloud","sub_path":"news_crawling.py","file_name":"news_crawling.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72545094665","text":"from django.contrib.auth.decorators import login_required, user_passes_test\nfrom django.shortcuts import render, redirect\nfrom django.template.loader import render_to_string\nfrom .models import Company, Copier, DriverFile\nfrom .forms import CopierForm, CompanyScannerForm\nfrom django.http import HttpResponse, JsonResponse\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\n# Create your views here.\n\n\n# @user_passes_test(lambda u: u.is_superuser)\n@login_required\ndef copier_update(request, pk):\n if not request.user.is_superuser:\n return redirect('home')\n copier = Copier.objects.get(id=pk)\n if request.method == 'POST':\n copier_form = CopierForm(request.POST, instance=copier)\n if copier_form.is_valid():\n copier_form.save(True)\n return redirect('copier:list')\n copier_form = CopierForm(instance=copier)\n html = render_to_string('copier_update_form.html', context={'form': copier_form, 'instance': copier}, request=request)\n return HttpResponse(html)\n\n\n@login_required\ndef company_update(request, pk):\n if not request.user.is_superuser:\n return redirect('home')\n company = Company.objects.get(id=pk)\n if request.method == 'POST':\n company_form = CompanyScannerForm(request.POST, request.FILES, instance=company)\n if company_form.is_valid():\n company_form.save(True)\n return redirect('copier:list')\n company_form = CompanyScannerForm(instance=company)\n html = render_to_string('company_update_form.html', context={'form': company_form, 'instance': company}, request=request)\n return HttpResponse(html)\n\n\n@login_required\ndef copier_upload_files(request):\n if request.method == 'POST':\n files = request.FILES.getlist('files')\n if files:\n DriverFile.objects.bulk_create(DriverFile(file=file) for file in files)\n return redirect('copier:list')\n\n\n# @user_passes_test(lambda u: u.is_superuser)\n@login_required\ndef copier_list(request):\n if not request.user.is_superuser:\n return redirect('home')\n copier_list = Copier.objects.all()\n page = request.GET.get('page', 1)\n if request.method == 'POST':\n copier_form = CopierForm(request.POST)\n if copier_form.is_valid():\n copier_form.save(commit=True)\n return redirect('copier:list')\n else:\n copier_form = CopierForm()\n paginator = Paginator(copier_list, 10)\n try:\n copiers = paginator.page(page)\n except PageNotAnInteger:\n copiers = paginator.page(1)\n except EmptyPage:\n copiers = paginator.page(paginator.num_pages)\n\n return render(request, 'copier_list.html', {\n 'form': copier_form,\n 'copier_list': copiers\n })\n","repo_name":"Clark-Park/printer","sub_path":"copier/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13347413562","text":"import pdb\nimport numpy as np\nimport tensorflow as tf\nimport gpflow\nfrom gpflow.config import default_float, default_jitter\n\ngpflow.config.set_default_float(np.float64)\ngpflow.config.set_default_jitter(1e-6)\n\ndef reparameterise(mean, var, z, full_cov=False):\n \"\"\"Implements the reparameterisation trick for the Gaussian, either full\n rank or diagonal.\n\n If z is a sample from N(0,I), the output is a sample from N(mean,var).\n\n :mean: A tensor, the mean of shape [S,N,D].\n :var: A tensor, the coariance of shape [S,N,D] or [S,N,N,D].\n :z: A tensor, samples from a unit Gaussian of shape [S,N,D].\n :full_cov: A boolean, indicates the shape of var.\"\"\"\n if var is None:\n return mean\n\n if full_cov is False:\n return mean + z * (var + default_jitter()) ** 0.5\n\n else:\n S, N, D = tf.shape(mean)[0], tf.shape(mean)[1], tf.shape(mean)[2]\n mean = tf.transpose(mean, (0, 2, 1)) # [S,N,D]\n var = tf.transpose(var, (0, 3, 1, 2)) # [S, D, N, N]\n I = default_jitter() * tf.eye(N, dtype=default_float())\\\n [None, None, :, :] # [1,1,N,N]\n chol = tf.linalg.cholesky(var + I)\n z_SDN1 = tf.transpose(z, (0, 2, 1))[:, :, :, None]\n f = mean + tf.matmul(chol, z_SDN1)[:, :, :, 0]\n return tf.transpose(f, (0, 2, 1)) # [S,N,D]\n","repo_name":"MattAshman/MLMI4","sub_path":"code/dgp/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"27135839862","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def widthOfBinaryTree(self, root: Optional[TreeNode]) -> int:\n if root is None:\n return 0\n\n # 把每一个节点标记 index 这样就能用每一层的最右边的点的 index 减去\n # 最左边的点的 index 来获得 width\n # 再打擂台得到 max_width\n\n max_width = 0\n queue = [(root, 0)] # 第一个节点的 index 设为 0\n\n while queue:\n # 进到每一层时,找出最左边和最右边的点的 index 就能计算长度\n left_most_node, left_idx = queue[0]\n right_most_node, right_idx = queue[-1]\n max_width = max(max_width, right_idx - left_idx + 1)\n\n # 把下一层的节点加入到 queue 里面\n # 左子树节点 index = 2 * 当前节点 index + 1\n # 右子树节点 index = 2 * 当前节点 index + 2\n # 结点的 index 的公式可以由画图归纳出来\n for i in range(len(queue)):\n node, idx = queue.pop(0)\n if node.left is not None:\n queue.append((node.left, 2 * idx + 1))\n if node.right is not None:\n queue.append((node.right, 2 * idx + 2))\n\n return max_width\n \n\n","repo_name":"ytatus94/Leetcode","sub_path":"python3/0662_Maximum_Width_of_Binary_Tree.py","file_name":"0662_Maximum_Width_of_Binary_Tree.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15100664106","text":"import sys\nsys.stdin = open(\"test_input.txt\")\nfrom collections import deque\n\ninput = sys.stdin.readline\n\ndi = [1, 0, -1, 0]\ndj = [0, 1, 0, -1]\n\ndef bfs(x, y):\n tmp = 1\n queue = deque()\n queue.append((x, y))\n visited[x][y] = 1\n\n while queue:\n i, j = queue.popleft()\n\n for _ in range(4):\n ni = i + di[_]\n nj = j + dj[_]\n\n if 0 <= ni < n and 0 <= nj < m:\n if info[ni][nj] == 1 and not visited[ni][nj]:\n visited[ni][nj] = 1\n queue.append((ni, nj))\n tmp += 1\n return tmp\n\n\nn, m = map(int, input().split())\nvisited = [[0] * m for _ in range(n)]\ninfo = [list(map(int, input().split())) for _ in range(n)]\n\n\n\ncnt, max_value = 0, 0\nfor i in range(n):\n for j in range(m):\n if info[i][j] == 1 and not visited[i][j]:\n cnt += 1\n max_value = max(max_value, bfs(i, j))\n\nprint(cnt)\nprint(max_value)","repo_name":"choihyeonseok/algo","sub_path":"bj/1926.py","file_name":"1926.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41115500777","text":"\n'''\nMake representative plots for the appendix and formalize the csv files\n'''\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sb\nimport numpy as np\n\nfont_scale = 1.25\n\nwidth = 8.4\n# Keep the default ratio used in seaborn. This can get overwritten.\nheight = (4.4 / 6.4) * width\n\nfigsize = (width, height)\n\nsb.set(font='Times New Roman', style='ticks',\n rc={'text.usetex': True})\nsb.set_context(\"paper\", font_scale=font_scale,\n rc={\"figure.figsize\": figsize})\nsb.set_palette(\"colorblind\")\n\nplt.rcParams['axes.unicode_minus'] = False\n\ntab_2D = pd.read_csv(\"../twoD_scaling_tests.csv\", index_col=0)\ntab_3D = pd.read_csv(\"../threeD_scaling_tests.csv\", index_col=0)\n\nmask = slice(3, None)\n\nxvals = tab_2D.index[mask]\n\nsymbols = ['s', 'o', 'p', 'P', 'X']\n\nfig = plt.figure(figsize=figsize)\n\nval_range = np.logspace(np.log10(128), np.log10(2100))\n\n# 2D memory\nax_2D_mem = plt.subplot(2, 2, 1)\nax_2D_mem.semilogy(xvals, tab_2D['Wavelet_memory'][mask], symbols[0],\n linestyle='--', label='Wavelet')\nax_2D_mem.semilogy(xvals, tab_2D['DeltaVariance_memory'][mask], symbols[1],\n linestyle='--', label='Delta-Variance')\nax_2D_mem.semilogy(xvals, tab_2D['Genus_memory'][mask], symbols[2],\n linestyle='--', label='Genus')\nax_2D_mem.semilogy(xvals, tab_2D['Dendrogram_Stats_memory'][mask], symbols[3],\n linestyle='--', label='Dendrograms')\nax_2D_mem.semilogy(xvals, tab_2D['PowerSpectrum_memory'][mask], symbols[4],\n linestyle='--', label='Power-Spectrum')\nax_2D_mem.semilogy(val_range, (val_range / 256.)**2,\n linestyle='-', linewidth=5, color='k',\n alpha=0.3, zorder=-1)\nax_2D_mem.text(2150., 70, r\"$N^2$\", color='k', alpha=0.5)\n\nax_2D_mem.semilogy(val_range, (val_range / 256.)**3,\n linestyle=':', linewidth=5, color='k',\n alpha=0.3, zorder=-1)\nax_2D_mem.text(2150., 450., r\"$N^3$\", color='k', alpha=0.5)\nax_2D_mem.set_ylabel('Memory (MB)')\nax_2D_mem.grid()\nax_2D_mem.legend(frameon=True, ncol=2, loc='lower center', fontsize=10)\nax_2D_mem.set_ylim([10**-3, 5 * 10**5])\nax_2D_mem.set_xlim([100, 2350])\nplt.setp(ax_2D_mem.get_xticklabels(), visible=False)\n\nax_2D_time = plt.subplot(2, 2, 3, sharex=ax_2D_mem)\nax_2D_time.semilogy(xvals, tab_2D['Dendrogram_Stats_time'][mask], symbols[0],\n linestyle='--', label='Dendrograms')\nax_2D_time.semilogy(xvals, tab_2D['Genus_time'][mask], symbols[1],\n linestyle='--', label='Genus')\nax_2D_time.semilogy(xvals, tab_2D['DeltaVariance_time'][mask], symbols[2],\n linestyle='--', label='Delta-Variance')\nax_2D_time.semilogy(xvals, tab_2D['Bispectrum_time'][mask], symbols[3],\n linestyle='--', label='Bispectrum')\nax_2D_time.semilogy(xvals, tab_2D['PowerSpectrum_time'][mask], symbols[4],\n linestyle='--', label='Power-Spectrum')\nax_2D_time.semilogy(val_range, (val_range / 256.)**2,\n linestyle='-', linewidth=5, color='k',\n alpha=0.3, zorder=-1)\nax_2D_time.text(2150., 70, r\"$N^2$\", color='k', alpha=0.5)\n\nax_2D_time.semilogy(val_range, (val_range / 256.)**3,\n linestyle=':', linewidth=5, color='k',\n alpha=0.3, zorder=-1)\nax_2D_time.text(2150., 450., r\"$N^3$\", color='k', alpha=0.5)\n\nax_2D_time.set_ylabel('Time (s)')\nax_2D_time.set_xlabel('Size (pix)')\nax_2D_time.grid()\nax_2D_time.legend(frameon=True, ncol=2, loc='upper center', fontsize=10)\n\nax_2D_time.set_ylim([10**0, 8 * 10**6])\n\n\nax_3D_mem = plt.subplot(2, 2, 2, sharex=ax_2D_mem, sharey=ax_2D_mem)\nax_3D_mem.semilogy(xvals, tab_3D['SCF_memory'][mask], symbols[0],\n linestyle='--', label='SCF')\nax_3D_mem.semilogy(xvals, tab_3D['VCS_memory'][mask], symbols[1],\n linestyle='--', label='VCS')\nax_3D_mem.semilogy(xvals, tab_3D['VCA_memory'][mask], symbols[2],\n linestyle='--', label='VCA')\nax_3D_mem.semilogy(xvals, tab_3D['Cramer_Distance_memory'][mask], symbols[3],\n linestyle='--', label='Cramer')\nax_3D_mem.semilogy(xvals, tab_3D['PCA_memory'][mask], symbols[4],\n linestyle='--', label='PCA')\nax_3D_mem.semilogy(val_range, (val_range / 256.)**2,\n linestyle='-', linewidth=5, color='k',\n alpha=0.3, zorder=-1)\nax_3D_mem.text(2150., 70, r\"$N^2$\", color='k', alpha=0.5)\n\nax_3D_mem.semilogy(val_range, (val_range / 256.)**3,\n linestyle=':', linewidth=5, color='k',\n alpha=0.3, zorder=-1)\nax_3D_mem.text(2150., 450., r\"$N^3$\", color='k', alpha=0.5)\nax_3D_mem.grid()\nax_3D_mem.legend(frameon=True, ncol=2, fontsize=10)\nplt.setp(ax_3D_mem.get_xticklabels(), visible=False)\nplt.setp(ax_3D_mem.get_yticklabels(), visible=False)\n\nax_3D_time = plt.subplot(2, 2, 4, sharex=ax_2D_mem, sharey=ax_2D_time)\nax_3D_time.semilogy(xvals, tab_3D['PCA_time'][mask], symbols[0],\n linestyle='--', label='PCA')\nax_3D_time.semilogy(xvals, tab_3D['SCF_time'][mask], symbols[1],\n linestyle='--', label='SCF')\nax_3D_time.semilogy(xvals, tab_3D['VCA_time'][mask], symbols[2],\n linestyle='--', label='VCA')\nax_3D_time.semilogy(xvals, tab_3D['Cramer_Distance_time'][mask], symbols[3],\n linestyle='--', label='Cramer')\nax_3D_time.semilogy(xvals, tab_3D['VCS_time'][mask], symbols[4],\n linestyle='--', label='VCS')\nax_3D_time.semilogy(val_range, (val_range / 256.)**2,\n linestyle='-', linewidth=5, color='k',\n alpha=0.3, zorder=-1)\nax_3D_time.text(2150., 70, r\"$N^2$\", color='k', alpha=0.5)\n\nax_3D_time.semilogy(val_range, (val_range / 256.)**3,\n linestyle=':', linewidth=5, color='k',\n alpha=0.3, zorder=-1)\nax_3D_time.text(2150., 450., r\"$N^3$\", color='k', alpha=0.5)\n\nax_3D_time.set_xlabel('Size (pix)')\nax_3D_time.set_xticks([256, 512, 1024, 2048])\nax_3D_time.grid()\nax_3D_time.legend(frameon=True, ncol=2, loc='upper left', fontsize=10)\nplt.setp(ax_3D_time.get_yticklabels(), visible=False)\n\nplt.subplots_adjust(hspace=0.03, wspace=0.03)\n\nplt.savefig(\"../figures/scaling_tests.png\")\nplt.savefig(\"../figures/scaling_tests.pdf\")\nplt.close()\n","repo_name":"Astroua/TurbuStat","sub_path":"Examples/paper_plots/summarize_performance_tests.py","file_name":"summarize_performance_tests.py","file_ext":"py","file_size_in_byte":6302,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"81"} +{"seq_id":"2027990492","text":"from app import db\nfrom app.models import Routine\nfrom crontab import CronTab\nimport os\n\n\nclass RoutineControl():\n cron = None\n\n def __init__(self):\n basedir = os.path.abspath(os.path.dirname(__file__))\n self.file = os.path.join(basedir, 'tasks.tab')\n self.cron = CronTab(tabfile=self.file)\n\n\n def new_cron(self, routineId, task, days, times, light_splice):\n hours = []\n minutes = []\n dates = []\n days = days.split(',')\n for day in days:\n day = day.replace(',', '').strip()\n day = day.replace('Sunday', '0')\n day = day.replace('Monday', '1')\n day = day.replace('Tuesday', '2')\n day = day.replace('Wednesday', '3')\n day = day.replace('Thursday', '4')\n day = day.replace('Friday', '5')\n day = day.replace('Saturday', '6')\n if len(day) > 0:\n day = int(day)\n dates.append(day)\n times = times.split(',')\n for time in times:\n time = time.replace(',', '').strip().lower()\n hour = time.split(':')[0]\n minute = time.split(':')[1]\n hours.append(hour)\n minutes.append(minute)\n \n if task == 'Light' and len(light_splice) > 1:\n light_splice = light_splice.split(',')\n red = light_splice[0]\n green = light_splice[1]\n blue = light_splice[2]\n brightness = light_splice[3]\n task = 'lights/light_control.py {} {} {} {}'.format(red, green, blue, brightness)\n pretask = 'light_status.py {} {} {} {}'.format(red, green, blue, brightness)\n lightcmd = '$(which python3) /home/pi/IoT-Bootcamp/Project1/code/app/controllers/lights/' + pretask +' >> ~/light-status.log 2>&1'\n job = self.cron.new(command=lightcmd, comment=str(routineId))\n job.dow.on(*dates)\n job.hour.on(*hours)\n job.minute.on(*minutes)\n cmd = '$(which python3) /home/pi/IoT-Bootcamp/Project1/code/app/controllers/'+ task + ' >> ~/cron.log 2>&1'\n \n job = self.cron.new(command=cmd, comment=str(routineId))\n job.dow.on(*dates)\n job.hour.on(*hours)\n job.minute.on(*minutes)\n self.cron.write()\n os.system(\"crontab \" + self.file)\n\n def remove_cron(self, comment):\n self.cron.remove_all(comment=str(comment))\n self.cron.write()\n os.system(\"crontab \" + self.file)\n\n def save_routine(self, routineId, task, days, times, light_splice):\n self.new_cron(routineId, task, days, times, light_splice)\n routine = Routine(id=routineId, task=task, days=days, times=times, light_splice=light_splice)\n db.session.add(routine)\n db.session.commit()\n task = {\n \"id\": routine.id,\n \"task\": routine.task,\n \"days\": routine.days,\n \"times\": routine.times,\n \"light_splice\": routine.light_splice\n }\n return task\n\n def delete_routine(self, routineId):\n self.remove_cron(routineId)\n routine = Routine.query.filter_by(id=routineId).first_or_404()\n db.session.delete(routine)\n db.session.commit()\n return 0\n\n def get_routines(self):\n routines = Routine.query.all()\n response = []\n for r in routines:\n light_splice = ''\n if len(r.light_splice) > 1:\n light_splice = r.light_splice.split(',')\n red = int(light_splice[0])\n green = int(light_splice[1])\n blue = int(light_splice[2])\n brightness = float(light_splice[3])\n light_splice = '#%02x%02x%02x' % (red, green, blue)\n routine = {\n \"id\" : r.id,\n \"task\": r.task,\n \"days\": r.days,\n \"times\": r.times,\n \"light_splice\": light_splice\n }\n response.append(routine)\n return response\n","repo_name":"teznatech/IoT-Bootcamp","sub_path":"Project1/code/app/controllers/routines/routine_control.py","file_name":"routine_control.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25593625115","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport time\n\nclass CalendarSelection():\n\n def test1(self):\n baseUrl = \"http://www.expedia.com\"\n driver = webdriver.Firefox()\n driver.maximize_window()\n driver.get(baseUrl)\n driver.implicitly_wait(3)\n\n # Click flights tab\n driver.find_element_by_id(\"tab-flight-tab\").click()\n # Find departing field\n departingField = driver.find_element_by_id(\"flight-departing\")\n # Click departing field\n departingField.click()\n # Find the date to be selected\n # Expedia website has changed the DOM after the lecture was made\n # Updated new xpath\n dateToSelect = driver.find_element(By.XPATH,\n \"(//div[@class='datepicker-cal-month'])[1]//button[text()='30']\")\n # Click the date\n dateToSelect.click()\n\n time.sleep(3)\n driver.quit()\n\n def test2(self):\n baseUrl = \"http://www.expedia.com\"\n driver = webdriver.Firefox()\n driver.maximize_window()\n driver.get(baseUrl)\n driver.implicitly_wait(3)\n\n # Click flights tab\n driver.find_element(By.XPATH, \"//button[@id='tab-flight-tab-hp']\").click()\n # Click departing field\n driver.find_element(By.ID, \"flight-departing-hp-flight\").click()\n # Expedia website has changed the DOM after the lecture was made\n # Updated new xpath\n calMonth = driver.find_element(By.XPATH, \"//div[@class='datepicker-cal-month'][1]\") #finding the section, month section, left\n allValidDates = calMonth.find_elements(By.TAG_NAME, \"button\") #Once the section is located, then inside it look for all the \"button\" tags\n\n time.sleep(2)\n\n #with a \"for\" look iterate over the located list of button tags, to search for the date.\n for date in allValidDates:\n # print(date.text) to see what date.text contains\n if \"25\" in date.text:\n date.click()\n break #to get out of the for loop\n driver.implicitly_wait(3)\n #time.sleep(10)\nff = CalendarSelection()\nff.test2()","repo_name":"juancoan/python","sub_path":"Browser_Tests(System Variable 4 WebDriver Path)/Calendar_Demo_2.py","file_name":"Calendar_Demo_2.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23743128384","text":"import ut\nimport itertools\n\n\ndef get_initial_active(dimensions):\n initial_active = {}\n raw_map = ut.read_input().splitlines()\n for y in range(len(raw_map)):\n for x in range(len(raw_map[0])):\n\n if dimensions == 3:\n initial_active[(x, y, 0)] = raw_map[y][x]\n elif dimensions == 4:\n initial_active[(x, y, 0, 0)] = raw_map[y][x]\n\n return initial_active\n\n\ndef get_delta_positions(dimensions):\n if dimensions == 3:\n delta_positions = list(itertools.product([-1, 0, 1], repeat=3))\n delta_positions.remove((0, 0, 0))\n\n elif dimensions == 4:\n delta_positions = list(itertools.product([-1, 0, 1], repeat=4))\n delta_positions.remove((0, 0, 0, 0))\n\n return delta_positions\n\n\nclass ConwayDimension:\n\n def __init__(self, dimensions):\n self.active = get_initial_active(dimensions)\n self.inactive_to_update = []\n self.delta_positions = get_delta_positions(dimensions)\n\n def get_neighbours(self, pos):\n return [ut.pos_add(pos, delta_pos) for delta_pos in self.delta_positions]\n\n # updates a single position in the grid, return the status of the updated position\n def update(self, pos):\n neighbours = self.get_neighbours(pos)\n active_neighbours = 0\n\n for neighbour in neighbours:\n if self.active.get(neighbour, '.') == '#':\n active_neighbours += 1\n else:\n self.inactive_to_update.append(neighbour)\n\n if self.active.get(pos, '.') == '#':\n if active_neighbours in [2, 3]:\n return '#'\n else:\n return '.'\n else:\n if active_neighbours == 3:\n return '#'\n else:\n return '.'\n\n def update_positions(self, positions_to_update, new_grid_state):\n for pos_to_update in positions_to_update:\n new_status = self.update(pos_to_update)\n\n # only add to active if the new status is active\n if new_status == '#':\n new_grid_state[pos_to_update] = '#'\n return new_grid_state\n\n # Update the 3d grid one cycle\n def cycle(self):\n active_to_update = self.active.copy()\n self.inactive_to_update = []\n\n # update all previously active cubes\n new_active = self.update_positions(active_to_update, {})\n\n # update all inactive neighbours\n inactive_to_update = dict.fromkeys(self.inactive_to_update).keys()\n new_active = self.update_positions(inactive_to_update, new_active)\n\n # update the active\n self.active = new_active\n\n\ndef part_one():\n conway_dimension = ConwayDimension(3)\n cycles = 6\n for i in range(cycles):\n conway_dimension.cycle()\n ut.print_answer(len(conway_dimension.active))\n\n\ndef part_two():\n conway_dimension = ConwayDimension(4)\n cycles = 6\n for i in range(cycles):\n conway_dimension.cycle()\n ut.print_answer(len(conway_dimension.active))\n\n\n\n","repo_name":"Erasimos/AoC_2020","sub_path":"day_17.py","file_name":"day_17.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29702960655","text":"class Parameters:\n \"\"\"\n A data sack for storing parameters in a collective representation.\n\n \"\"\"\n\n def __init__(self, sync: bool, n: int, d: int, k: int, alpha: float,\n beta: float, lamda: float, ptns: int, report: int, filename: str,\n normalize: bool, bold: bool, verbose: bool, method: str):\n \"\"\"\n Initializes an instance of Parameters\n\n :param sync: Sync/Async\n :param n: Number of workers\n :param d: Duration - Iterations for Sync / Seconds for Async\n :param k: Latent factor\n :param alpha: Learning rate\n :param beta: Decay rate\n :param lamda: Normalization factor\n :param ptns: (deprecated)\n :param report:\n :param filename: Path to sparse matrix ratings file\n :param normalize: Normalizes the matrix if True\n :param bold: Utilize bold-driver tuning if True\n :param method: Partitioning method\n :param verbose: Displays more details through logging if True\n \"\"\"\n self.sync = sync\n self.n = n\n self.d = d\n self.k = k\n self.alpha = alpha\n self.beta = beta\n self.lamda = lamda\n self.ptns = ptns\n self.report = report\n self.filename = filename\n self.normalize = normalize\n self.bold = bold\n self.method = method\n self.verbose = verbose\n","repo_name":"cobelu/PyWander","sub_path":"parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25753032001","text":"#!/usr/bin/env python2\n# coding=utf-8\n\nimport json\n\nimport common\nimport mailing\nimport config\n\n\ndef _parse(raw):\n obj = json.loads(raw, encoding=\"utf-8\")\n desc = u\"\"\n for i in range(2):\n d = obj[\"results\"][0][\"weather_data\"][i]\n desc += d[\"date\"] + u\":\" + d[\"weather\"] + u\",\"\n desc += u\"风力\" + d[\"wind\"] + u\",\"\n desc += u\"气温\" + d[\"temperature\"] + u\"。\"\n\n return desc\n\n\ndef main():\n url = \"http://api.map.baidu.com/telematics/v3/weather?location=%s&output=json&ak=%s\"\n raw = common.guarded_read(url % (config.location, config.api_key))\n result = _parse(raw)\n mailing.send_mail(result)\n\n print(\"DONE!\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"vanxining/SmsWeather","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"1051039068","text":"from django.urls import path\nfrom . import views\nimport random\n\nurlpatterns = [\n path(\"\",views.indexPage,name=\"home\"),\n path(\"login/\",views.loginUser,name=\"login\"),\n path(\"addtodo/\",views.addtodoPage,name=\"addtodo\"),\n path(\"logout/\",views.logoutUser,name=\"logout\"),\n path(\"signin/\",views.signInUser,name=\"signin\"),\n path(\"delete/sjbcsxx 122443 dbnxcx x234242 cviuv esjcerfh ucbdcnj d8hi45928cvsdscsdw24334432424v 31y21 cv 928ej vddffdxccxbvdc wddxfgdxfgfdsdw9i k3rwedc/\",views.deleteTodo,name=\"delete\"),\n path(\"update/\",views.updateTodo,name=\"update\"),\n]\n","repo_name":"Pandeyashish17/django-crud-todo-app-with-authentication","sub_path":"todoapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20012902775","text":"n = int(input())\ns = set(map(int, input().split()))\nc = int(input())\nfor i in range(c):\n result = input().split()\n if 'pop' in result:\n s.pop()\n elif 'remove' in result:\n s.remove(int(result[1]))\n elif 'discard' in result:\n s.discard(int(result[1]))\nprint(sum(s))\n","repo_name":"FoRGoG/HackerRank","sub_path":"Python/04. Sets/04. Set .discard(), .remove() & .pop().py","file_name":"04. Set .discard(), .remove() & .pop().py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14684007312","text":"from PIL import Image\nimport numpy as np\npath=input()\nimg=Image.open(path)\nif img.mode == 'L':\n\timg=img.convert('RGB')\nw,h=img.size\nimg=img.resize((300,300),Image.ANTIALIAS)\nimg=np.array(img)\nif len(img.shape)==3:\n\timg=np.swapaxes(img,1,2)\n\timg=np.swapaxes(img,1,0)\nimg=img[[2,1,0],:,:]\nimg=img.astype('float32')\nimg=img-127.5\nimg=img*0.007843\nwith open('mat.txt','w') as f:\n\tfor i in range(3):\n\t\tfor j in range(300):\n\t\t\tfor k in range(300):\n\t\t\t\tf.write(str(img[i][j][k])+'\\n')\n","repo_name":"xingchenyijiu/Car-Infer","sub_path":"ssd/genmat.py","file_name":"genmat.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22543194785","text":"import abc\nfrom datetime import datetime\nfrom typing import List, Union, NoReturn\nfrom users.models.mongo import User\nfrom board.models.mongo import Article, Comment\nfrom board.entities import ArticleEntity, CommentEntity\nfrom projects.exceptions import NotFoundException\nfrom projects.converters import Converter\n\n\nclass BoardRepository:\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractmethod\n def _find_article(self, article_id: int) -> Article:\n raise NotImplementedError\n\n @abc.abstractmethod\n def get_article(self, article_id: int) -> ArticleEntity:\n raise NotImplementedError\n\n @abc.abstractmethod\n def get_article_list(self) -> List[ArticleEntity]:\n raise NotImplementedError\n\n @abc.abstractmethod\n def save_article(self, entity: ArticleEntity) -> Union[ArticleEntity, NoReturn]:\n raise NotImplementedError\n\n @abc.abstractmethod\n def save_comment(self, entity: CommentEntity, article_id: int) -> ArticleEntity:\n raise NotImplementedError\n\n\nclass BoardMongoRepository(BoardRepository):\n def __init__(self):\n self.converter = Converter()\n self.current_time = datetime.now().replace(microsecond=0)\n\n def _find_article(self, article_id: int) -> Article:\n raise NotImplementedError\n\n def get_article(self, article_id: int) -> ArticleEntity:\n raise NotImplementedError\n\n def get_article_list(self) -> List[ArticleEntity]:\n articles = Article.objects.all()\n article_entity = [\n self.converter.model_to_entity(model=article, entity=ArticleEntity)\n for article in articles\n ]\n return article_entity\n\n def save_article(self, entity: ArticleEntity) -> Union[ArticleEntity, NoReturn]:\n user = User.objects(user_id=entity.user_id).first()\n if user is None:\n raise NotFoundException\n\n article = Article(\n user_id=user,\n subject=entity.subject,\n body=entity.body,\n comments=[],\n password=entity.password,\n created_at=self.current_time,\n updated_at=self.current_time\n ).save()\n return self.converter.model_to_entity(model=article, entity=ArticleEntity)\n\n def save_comment(self, entity: CommentEntity, article_id: int) -> ArticleEntity:\n article = Article.objects(article_id=article_id).first()\n user = User.objects(user_id=entity.user_id).first()\n if article is None or user is None:\n raise NotFoundException\n\n comment = Comment(\n user_id=user,\n body=entity.body\n )\n\n article.comments.append(comment)\n article.save()\n\n return self.converter.model_to_entity(model=article, entity=ArticleEntity)\n","repo_name":"teamhide/drf_cookiecutter_mongo","sub_path":"board/repositories/board_repository.py","file_name":"board_repository.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"20408284447","text":"from sys import stdin\nfrom collections import Counter\n\ndef banolim(n) :\n if n > 0 :\n if n % 1 >= 0.5 :\n return int(n + 0.5)\n else :\n return int(n)\n else :\n if n % -1 <= -0.5 :\n return int(n - 0.5)\n else :\n return int(n)\n\nn = int(input())\nlst = []\n\nfor i in range(n) :\n lst.append(int(stdin.readline().rstrip()))\n\nlst.sort()\n\nprint(banolim(sum(lst) / n)) # 산술평균\nprint(lst[((n - 1) // 2)]) # 중앙값\n\nmode = Counter(lst).most_common()\nif len(mode) <= 2 :\n print(mode[0][0])\nelse :\n print(mode[1][0] if mode[0][1] == mode[1][1] else mode[0][0]) # 최빈값\n\nprint(max(lst) - min(lst)) # 범위","repo_name":"importjason/Baekjoon","sub_path":"백준/Silver/2108.통계학/통계학.py","file_name":"통계학.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31380388172","text":"# for i in range(900):\n# print(\"hello\")\n#\n# sum=0\n# n=int(input(\"enter a num\"))\n# for i in range(1,n+1):\n# sum+=i\n#\n# my_lst=[4,1,8,\"fs\",True]\n#\n# for elem in my_lst:\n# print(elem)\n#\n# my_str=\"ejkfgefk\"\n# for char in my_str:\n# print(char)\n#\n# for i in range(len(my_str)):\n# if my_str[i]==\"f\":\n# print(i)\n# break\n# else:\n# print(\"not f\")\n#\n# lst=[7,8,2]\n# print(lst[0])\n# lst[1]=0\n# lst.append(5)\n#\n# my_tuple=(7,8,2,4)\n# print(my_tuple[0])\nimport random\n\nmy_set={1,5,2,3,1,2,5,6,1}\n# print(my_set)\n# for i in my_set:\n# print(i)\n\nlst_set=list(my_set)\nprint(lst_set)\n\nnum=589345\n\n# for i in num:\n\n#3:\n# n=int(input(\"give me a N\"))\n# for i in range(1,n+1):\n# print(((n-i)*\" \")+(i*\"*\"))\n\n\n#6:\n# num=int(input(\"enter a num\"))\n# list1=[]\n# for i in range(num):\n# x=float(input(\"enter a number\"))\n# list1.append(x)\n# print(max(list1))\n\n\n# num=int(input(\"enter a num\"))\n# maxi=0\n# for i in range(num):\n# x=float(input(\"enter a number\"))\n# if i==0:\n# maxi=x\n# else:\n# if x>maxi:\n# maxi=x\n# print(maxi)\n#\n# n= int(input(\"enter a number\"))\n# for i in range(2,n):\n# if n%i==0:\n# print(\"not prime\")\n# break\n# else:\n# print(\"prime\")\n\n# for hour in range(0,24):\n# for minute in range(0,60):\n# if hour<10 and minute<10:\n# print(f\"0{hour}:0{minute}\")\n# elif hour<10:\n# print(f\"0{hour}:{minute}\")\n# elif minute<10:\n# print(f\"{hour}:0{minute}\")\n# else:\n# print(f\"{hour}:{minute}\")\n\nlist1=[]\ncounter_list=[]\nfor i in range(100):\n x=random.randint(0,99)\n list1.append(x)\n counter_list.append(0)\nfor number in list1:\n counter_list[number]+=1\nprint(list1)\nprint(counter_list)","repo_name":"ohad1s/Intro_to_Python","sub_path":"SemB/TA4/second.py","file_name":"second.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8120867481","text":"# Scrapy settings for yelpspider project\n#\n# For simplicity, this file contains only the most important settings by\n# default. All the other settings are documented here:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n#\n\nBOT_NAME = 'yelpspider'\n\nSPIDER_MODULES = ['yelpspider.spiders']\nNEWSPIDER_MODULE = 'yelpspider.spiders'\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n#USER_AGENT = 'yelpspider (+http://www.yourdomain.com)'\n","repo_name":"Billibilli/yelp-scraper","sub_path":"yelpspider/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"17816157239","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 8 21:30:06 2017\n\n@author: maha\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom functools import reduce\nimport re\nimport pandas as pd\nimport unidecode\n\n\nURL_ROOT = 'https://www.leboncoin.fr/voitures/offres/'\nURL_PART = '/?q=renault%20zoe&f=p'\nURL_PRO = '?q=renault%20zoe&f=c'\n\nURL_MODEL_ROOT = 'https://www.lacentrale.fr/cote-voitures-renault-zoe-'\nURL_COTE_ROOT = 'https://www.lacentrale.fr/cote-auto-renault-zoe'\n\nMODEL_COTES = ['intens+charge+rapide']\n\nREGIONS=['ile_de_france','aquitaine','provence_alpes_cote_d_azur']\nANNEES = ['2012','2013','2014','2015','2016','2017']\nSUFFIX = '.html'\n\n#MODEL_LIST2= getModelFromLaCentrale()\nMODEL_LIST =['INTENS','INTENS','LIFE','ZEN']\n\nMODEL_COTES2= {'INTENS': 'intens+charge+rapide',\n 'INTENSE':'intens+charge+rapide',\n 'LIFE': 'life+charge+rapide',\n 'ZEN': 'zen+charge+rapide'\n }\n\ndef getSoupFromURL(url, method='get', data={}):\n\n try:\n if method == 'get':\n res = requests.get(url)\n elif method == 'post':\n res = requests.post(url, data=data)\n else:\n return None\n\n if(res.status_code != 200):\n print(\n \"status code is invalid [%s] for url: %s\", res.status_code, url)\n return None\n if res.status_code == 200:\n return BeautifulSoup(res.content, 'html.parser')\n else:\n return None\n except requests.exceptions.ConnectionError as e:\n print(\"HTTP connexion error or Invalid URL: %s\", url)\n return None\n \ndef getsubUrls(url):\n \n soup = getSoupFromURL(url)\n listsubA = [subLi.find(\"a\") for subLi in \n soup.find_all('li',itemtype=\"http://schema.org/Offer\")]\n subUrls = [subA['href'] for subA in listsubA ]\n \n print(subUrls)\n return subUrls\n\n# The function that extract the properties of Zoe cars from Leboncoin\n# this fill the dataframe with car version, car Km, car price, car description, car \"cote\" and the type of the seller\n \ndef getCarProperties():\n region_urls = {}\n \n # Build url based on region\n URLS_sub1 = {region: URL_ROOT+ region for region in REGIONS}\n \n # Build url based on type of seller (particulier/professionnel)\n URLS = {region:{'PART': url_sub1 + URL_PART , 'PRO': url_sub1 + URL_PRO} \\\n for region,url_sub1 in URLS_sub1.items()}\n \n columnNames=[\"version\",\"annee\",\"kilometrage\",\"prix\",\"telephone\",\"proprietaire\",\"phone\",\"description\",\"region\"]\n \n carRows = []\n \n #for rootUrl in URLS:\n for region, urls in URLS.items():\n for seltype,rootUrl in urls.items():\n subUrls = getsubUrls(rootUrl) # get the href related to cars for specific seller type\n proprietaire = seltype\n telephone =''\n version=''\n \n for url in subUrls: # sub url to extract car properties (per car)\n print(url)\n soupCar = getSoupFromURL(\"http:\"+ url)\n price = soupCar.find_all(class_= \"item_price clearfix\")[0].\\\n find(class_=\"value\").getText().strip()\n price = unidecode.unidecode(price).strip()\n price = int(re.sub(r\"\\s+\",\"\", price.strip('EUR').strip(' '), flags=re.UNICODE))\n re.I\n kilometrage = int(\"\".join(re.findall(\"([0-9 ]+) ?km\", str(soupCar.find_all(class_= \"value\")),\\\n re.I)[0].strip().split()))\n annee = soupCar.find_all(class_= \"value\",itemprop=\"releaseDate\")[0].getText().strip()\n \n description = soupCar.find_all(class_= \"value\",itemprop=\"description\")[0].getText()\n Intense_Found = len(re.findall(\"(intenses)\",str(description).lower()))!=0\n \n carRow = {'version': version,'annee':annee,\\\n 'kilometrage':kilometrage,'prix':price,'telephone':telephone,\\\n 'proprietaire':proprietaire,'region':region, 'description':description}\n \n carRows.append(carRow)\n \n #print(carRows)\n df = pd.DataFrame(carRows,columns=columnNames)\n df['phone'] = df['description'].apply(extract_phoneNumber)\n df['model']= df['description'].apply(extract_model)\n print('Start Extracting cotes...')\n df['cote'] = df.apply(lambda x: extract_cote(x['annee'], x['model']), axis=1)\n print('Extracting terminated.')\n # df.apply(f, axis=1)\n return(df)\n\n\n# Function that extract the \"cote\" from Lacentrale based on the model end the year\n \ndef extract_cote(x,y):\n \n year= str(x)\n soup = getSoupFromURL(URL_COTE_ROOT + '-'+MODEL_COTES2.get(str(y),'intens+charge+rapide')+'-'+year+SUFFIX)\n cote = int(\"\".join(unidecode.unidecode(soup.find(class_='jsRefinedQuot').getText()).split()))\n \n return cote\n\n\n\n# Function that extract the \"model\" from the description of the car based on the provided model list\ndef extract_model(x):\n \n for model in MODEL_LIST:\n \n res = re.search( r'(.*) ' + model + ' (.*?) .*',x,re.M|re.I) \n if res!= None : \n return model #'INTENS CHARGE RAPIDE'\n \n else: return \n \n#Function that extract the \"Telephone number\" from the description \ndef extract_phoneNumber(x):\n \n res = re.findall(r'((?:\\+33\\s|0)[1-9](?:[\\s.-]*\\d{2}){4})',x,re.I)\n \n if len(res)!=0 : \n return res[0] \n \n else: return \n# Function that extract the list of car models from Lacentrale\ndef getModelFromLaCentrale():\n \n modelList = {}\n \n for annee in ANNEES:\n url= URL_MODEL_ROOT+ \"-\"+ annee +\"-\" + SUFFIX \n modelList[annee]= [model.getText(url) for model in getSoupFromURL(url).\\\n find(id='listing_quot').find_all('h3')]\n \n print(modelList)\n return modelList\n\n# Display the cars with price > argus Cote \ndf_car = getCarProperties()\ndf_expensive = df_car[df_car['prix'] > df_car['cote']]\nprint(\" The list of the cars for which the price is superior than Argus rating regardless of Km:\\n\" ,\\\n df_expensive[['model','annee','kilometrage','prix','cote','proprietaire','phone','region']])","repo_name":"MS-BGD-2018-KIT-BIGDATA/maha-bousbaa","sub_path":"Lesson_4/exo_dom_lesson4.py","file_name":"exo_dom_lesson4.py","file_ext":"py","file_size_in_byte":6227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34304232475","text":"import RPi.GPIO as GPIO\nimport time\n\n\nREDLIGHT = 37\nGREENLIGHT = 35\nMOTOR = 38\nP_SERVO = MOTOR\nBUTTON = 16\n\nfPWM = 50\n\n\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BOARD)\n\n#Lights\nGPIO.setup(REDLIGHT,GPIO.OUT)\nGPIO.setup(GREENLIGHT,GPIO.OUT)\n\n#Motors\nGPIO.setup(MOTOR,GPIO.OUT)\n\nopened = 1\nclosed = 0\nstatus = closed\n\ndef init_door():\n print(\"Hello world\")\n \ndef rotate_up():\n a=10\n b=2\n direction=50\n time.sleep(0.5)\n #GPIO.output(MOTOR,GPIO.HIGH)\n pwm = GPIO.PWM(P_SERVO, fPWM)\n pwm.start(0)\n duty = a / 180 * direction + b\n pwm.ChangeDutyCycle(duty)\n print (\"direction =\", direction, \"-> duty =\", duty)\n \n time.sleep(5)\n GPIO.output(MOTOR,GPIO.LOW)\n \ndef rotate_down():\n time.sleep(0.5)\n GPIO.output(MOTOR,GPIO.HIGH)\n time.sleep(5)\n GPIO.output(MOTOR,GPIO.LOW)\n \ndef exit_door(): \n GPIO.output(REDLIGHT,GPIO.LOW)\n GPIO.output(GREENLIGHT,GPIO.LOW)\n GPIO.output(MOTOR,GPIO.LOW)\n GPIO.cleanup()\n print(\"\\nEnd of program\\n\")\n return\n\ndef close_door():\n status=closed\n GPIO.output(REDLIGHT,GPIO.HIGH)\n GPIO.output(GREENLIGHT,GPIO.LOW)\n rotate_down()\n \ndef open_door():\n status=opened\n GPIO.output(REDLIGHT,GPIO.LOW)\n GPIO.output(GREENLIGHT,GPIO.HIGH)\n rotate_up()\n\n\ntry:\n init_door()\n while True: \n open_door()\n time.sleep(1)\n close_door()\n time.sleep(1)\n \nexcept KeyboardInterrupt:\n exit_door()\n \n\n \n","repo_name":"arnaudbenistant/robot","sub_path":"tests/motor.py","file_name":"motor.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9567511646","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass ResidualBlock(nn.Module):\n def __init__(self, in_features):\n super(ResidualBlock, self).__init__()\n\n conv_block = [\n nn.ReflectionPad2d(1),\n nn.Conv2d(in_features, in_features, 3),\n nn.InstanceNorm2d(in_features),\n nn.ReLU(inplace=True),\n nn.ReflectionPad2d(1),\n nn.Conv2d(in_features, in_features, 3),\n nn.InstanceNorm2d(in_features)]\n\n self.conv_block = nn.Sequential(*conv_block)\n\n def forward(self, x):\n return x + self.conv_block(x)\n\n\nclass Generator(nn.Module):\n def __init__(self, input_nc, output_nc, n_residual_blocks=9):\n super(Generator, self).__init__()\n\n # Initial convolution block\n model = [\n nn.ReflectionPad2d(3),\n nn.Conv2d(input_nc, 64, 7),\n nn.InstanceNorm2d(64),\n nn.ReLU(inplace=True)]\n\n # Downsampling\n in_features = 64\n out_features = in_features*2\n for _ in range(2):\n model += [\n nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),\n nn.InstanceNorm2d(out_features),\n nn.ReLU(inplace=True)]\n in_features = out_features\n out_features = in_features*2\n\n # Residual blocks\n for _ in range(n_residual_blocks):\n model += [ResidualBlock(in_features)]\n\n # Upsampling\n out_features = in_features//2\n for _ in range(2):\n model += [\n nn.ConvTranspose2d(\n in_features, out_features,\n 3, stride=2, padding=1, output_padding=1),\n nn.InstanceNorm2d(out_features),\n nn.ReLU(inplace=True)]\n in_features = out_features\n out_features = in_features//2\n\n # Output layer\n model += [\n nn.ReflectionPad2d(3),\n nn.Conv2d(64, output_nc, 7),\n nn.Tanh()]\n\n self.model = nn.Sequential(*model)\n\n def forward(self, x):\n return self.model(x)\n\n\nclass Discriminator(nn.Module):\n def __init__(self, input_nc):\n super(Discriminator, self).__init__()\n\n # A bunch of convolutions one after another\n model = [\n nn.Conv2d(input_nc, 64, 4, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True)]\n\n model += [\n nn.Conv2d(64, 128, 4, stride=2, padding=1),\n nn.InstanceNorm2d(128),\n nn.LeakyReLU(0.2, inplace=True)]\n\n model += [\n nn.Conv2d(128, 256, 4, stride=2, padding=1),\n nn.InstanceNorm2d(256),\n nn.LeakyReLU(0.2, inplace=True)]\n\n model += [\n nn.Conv2d(256, 512, 4, padding=1),\n nn.InstanceNorm2d(512),\n nn.LeakyReLU(0.2, inplace=True)]\n\n # FCN classification layer\n model += [nn.Conv2d(512, 1, 4, padding=1)]\n\n self.model = nn.Sequential(*model)\n\n def forward(self, x):\n x = self.model(x)\n # Average pooling and flatten\n return F.avg_pool2d(x, x.size()[2:]).view(x.size()[0], -1)\n\n\nclass VAE(nn.Module):\n def __init__(self):\n super(VAE, self).__init__()\n\n self.encode = nn.Sequential(\n # 32\n nn.Conv2d(3, 16, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n # 16\n nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n # 8\n nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n # 4\n )\n self.fc_e1 = nn.Linear(1024, 64)\n self.fc_e2 = nn.Linear(1024, 64)\n self.fc_d = nn.Linear(64, 1024)\n self.decode = nn.Sequential(\n # 4\n nn.ConvTranspose2d(64, 32, 3, 2, padding=1, output_padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n # 8\n nn.ConvTranspose2d(32, 16, 3, 2, padding=1, output_padding=1),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n # 16\n nn.ConvTranspose2d(16, 3, 3, 2, padding=1, output_padding=1),\n nn.Sigmoid()\n # 32\n )\n self.image_size = 32\n self.batch_size = 64\n\n def vae_encode(self, x):\n h1 = self.encode(x)\n h1 = h1.view(h1.size(0), -1)\n return self.fc_e1(h1), self.fc_e2(h1)\n\n def reparameterize(self, mu, logvar):\n std = torch.exp(0.5*logvar)\n eps = torch.randn_like(std)\n return mu + eps*std\n\n def vae_decode(self, z):\n h2 = self.fc_d(z)\n h2 = h2.view(self.batch_size, 64, 4, 4)\n return self.decode(h2)\n\n def forward(self, x):\n self.batch_size = x.size(0)\n self.image_size = x.size(1)\n # encode\n mu, logvar = self.vae_encode(x)\n mu = mu.view(self.batch_size, -1)\n logvar = logvar.view(x.size(0), -1)\n # reparam\n z = self.reparameterize(mu, logvar)\n # decode\n output = self.vae_decode(z)\n return output, mu, logvar\n","repo_name":"james77777778/NCTU-DeepLearning-2019","sub_path":"DL_HW3/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73027147145","text":"import sys\nfrom datetime import datetime\n\nfrom PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtWidgets import QWidget, QApplication, QFileDialog\n\nimport RPi.GPIO as gpio\nfrom .interfaces.widget_buttons import Ui_Form as Ui_Buttons\n \n\nclass ButtonsWidget(QWidget):\n\n def __init__(self, parent=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.ui = Ui_Buttons()\n self.ui.setupUi(self)\n self.parent = parent\n self.image_name_default = str()\n self.video_name_default = str()\n\n self.image_save_signal = pyqtSignal\n self.exit_signal = pyqtSignal\n self.set_default_signal = pyqtSignal\n self.__setup_buttons()\n self.ui.stopButton.setEnabled(False)\n self.set_pixmap('green')\n \n self.formats = ['.jpeg', '.png', '.bmp']\n\n def __setup_buttons(self):\n self.ui.captureButton.pressed.connect(self.__capture_button_pressed)\n self.ui.captureseriesButton.pressed.connect(self.__capture_series_button_pressed)\n self.ui.startButton.pressed.connect(self.__start_recording_button_pressed)\n self.ui.stopButton.pressed.connect(self.__stop_recording_button_pressed)\n self.ui.setdefaultButton.pressed.connect(self.__set_default_button_pressed)\n self.ui.exitButton.pressed.connect(self.__exit_button_pressed)\n\n def set_image_name_default(self, name: str = 'Image') -> None:\n \"\"\"\n Set name by default for images\n :param name:\n :return:\n \"\"\"\n self.image_name_default = name\n self.video_name_default = name\n\n def set_video_name_default(self, name: str = 'Video') -> None:\n \"\"\"\n Set name by default for video\n :param name:\n :return:\n \"\"\"\n self.image_name_default = name\n\n def set_bubble_color(self, color: str = 'white') -> None:\n \"\"\"\n Set QLabel with a new color according to current state of App:\n green - Ready to Use\n red - Occupied by capturing video\n white - No signal from camer\n Basic Setting is for white\n :param color:\n :return:\n \"\"\"\n if type(color) is str:\n style = f'background-color:{color}'\n self.ui.indicator.setStyleSheet(style)\n else:\n raise NameError('No such color in the list')\n\n def save_image(self, name=False):\n if not name:\n name = QFileDialog.getSaveFileName(caption='Open Image', filter=\"Image Files (*.png *.jpg *.bmp *.tiff)\")\n else:\n self.image_save_signal()\n\n def set_pixmap(self, color: str):\n \"\"\"\n Set pixmap for indicator\n Acsess color as a parameter:\n green for green aquarium .ico\n red for red aquarium .ico\n :param color:\n \"\"\"\n if color == 'green':\n self.ui.indicator.setPixmap(QPixmap('/home/pi/Desktop/RPiCameraApp_newest_v/source/icons/green_aquarium.png'))\n elif color == 'red':\n self.ui.indicator.setPixmap(QPixmap('/home/pi/Desktop/RPiCameraApp_newest_v/source/icons/red_aquarium.png'))\n else:\n raise ValueError(f'No such color {color} available')\n\n def __capture_button_pressed(self):\n self.set_pixmap('red')\n \n name, res, br, iso, ss, method = self.__get_name_and_parameter()\n im_format = '.png'\n image_name = f'{name[0]}_{res}_{br}_{iso}_{ss}_{method}'\n self.parent.camera.camera.capture(image_name + im_format)\n \n self.set_pixmap('green')\n\n def __capture_series_button_pressed(self):\n self.set_pixmap('red')\n \n name, res, br, iso, ss, method = self.__get_name_and_parameter()\n image_name = f'{name[0]}_{res}_{br}_{iso}_{ss}_{method}' \n for i in range(len(self.formats)):\n self.parent.camera.camera.capture(image_name+self.formats[i])\n \n self.set_pixmap('green')\n \n def __get_name_and_parameter(self,is_video=False):\n if is_video:\n name = QFileDialog.getSaveFileName(caption='Save Video As', filter=\"Video Files (*.h264 *.mpeg *.mp4 *)\")\n else:\n name = QFileDialog.getSaveFileName(caption='Open Image', filter=\"Image Files (*.png *.jpg *.bmp *)\")\n if not name:\n return\n res = self.parent.camera_additional.get_current_resolution()\n br = self.parent.camera_basic.get_current_brightness()\n iso = self.parent.camera_basic.get_current_iso()\n ss = self.parent.camera_basic.get_current_shutter_speed()\n method = self.parent.camera_additional.get_current_wb_mode()\n if ' ' in method:\n method.replace(' ','_')\n return name,res,br,iso,ss,method\n \n def __start_recording_button_pressed(self):\n try:\n name, res, br, iso, ss, method = self.__get_name_and_parameter(is_video=True)\n self.video_default_name = f'{name[0]}_{res}_{br}_{iso}_{ss}_{method}'\n self.parent.camera.camera.start_recording(self.video_default_name + '.h264')\n except (NameError, ValueError, IndexError, PiCameraError,Exception) as exc:\n self.__add_to_log(exc) \n else:\n self.ui.stopButton.setEnabled(True)\n self.set_pixmap('red')\n \n\n def __stop_recording_button_pressed(self):\n try:\n self.parent.camera.camera.stop_recording()\n except (NameError, ValueError, IndexError, PiCameraError, Exception) as exc:\n self.__add_to_log(exc) \n else:\n self.ui.stopButton.setEnabled(False)\n self.set_pixmap('green')\n \n def __set_default_button_pressed(self):\n self.parent.camera.camera.stop_preview()\n self.parent.set_slider_brightness(50)\n self.parent.set_slider_contrast(0)\n self.parent.set_slider_iso(100)\n self.parent.set_slider_shutter_speed(12000)\n self.parent.set_slider_gain_r(1.1)\n self.parent.set_slider_gain_b(1.05)\n self.parent.camera.change_brightness(50)\n self.parent.camera.change_contrast(0)\n self.parent.camera.change_iso(100)\n self.parent.camera.change_shutter_speed(12000)\n self.parent.camera.change_gain_r(1.1)\n self.parent.camera.change_gain_b(1.05)\n self.parent.camera.change_awb_mode('auto')\n self.parent.camera.change_exposure_mode('off')\n self.parent.camera.change_resolution((1920,1080))\n self.parent.camera.camera.start_preview(fullscreen=False,window=(800,150,920,720))\n\n def __exit_button_pressed(self):\n try:\n gpio.cleanup()\n self.parent.close()\n except (RuntimeWarning, Exception) as error:\n self.__add_to_log(error)\n \n def __add_to_log(self,to_log:str):\n time = datetime.now()\n with open('Rpi_application_log.log','a+') as log:\n log.write(f\"{time}: {to_log} \")\n \nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n window = ButtonsWidget()\n window.show()\n sys.exit(app.exec())\n","repo_name":"plailok/RPiCamera_Controller","sub_path":"source/WIDGET_Buttons.py","file_name":"WIDGET_Buttons.py","file_ext":"py","file_size_in_byte":7108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15676170945","text":"# TUWIEN - WS2020 CV: Task2 - Image Stitching\n# *********+++++++++*******++++INSERT GROUP NO. HERE\nfrom typing import List, Tuple\nfrom matplotlib.pyplot import angle_spectrum\nimport numpy as np\n# from cyvlfeat.sift.sift import sift\n# import cyvlfeat\nimport os\nimport cv2\n\n\ndef get_panorama_data(path: str) -> Tuple[List[np.ndarray], List[cv2.KeyPoint], List[np.ndarray]]:\n # Loop through images in given folder, extract SIFT points\n # and return images, keypoints and descriptors\n # This time we need to work with color images. Since OpenCV uses BGR you need to swap the channels to RGB\n # HINT: os.walk(..), cv2.imread(..), sift=cv2.SIFT_create(), sift.detectAndCompute(..)\n # path : path to image folder\n\n # student_code start\n img_data = []\n all_keypoints = []\n all_descriptors = []\n for root, dirs, files in os.walk(path, topdown = True):\n for f in files:\n print(f)\n im = cv2.imread(os.path.join(root, f))\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n img_data.append(im)\n im_gray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)\n sift = cv2.SIFT_create()\n kp, desc = sift.detectAndCompute(im_gray, None)\n all_keypoints.append(kp)\n all_descriptors.append(desc)\n # kp_img=cv2.drawKeypoints(im_gray,kp,None,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n # cv2.imwrite(f\"results/kp_{f}\", kp_img)\n \n # student_code end\n\n # img_data : list of images\n # all_keypoints : list of keypoints ([number_of_images x number_of_keypoints] - KeyPoint)\n # all_descriptors : list of descriptors ([number_of_images x num_of_keypoints, 128] - float)\n return img_data, all_keypoints, all_descriptors\n","repo_name":"Normale/tuw_cv","sub_path":"TASK2/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23487328210","text":"# -*- coding: utf-8 -*-\n\"\"\"\n1、加载样板图片\n2、判断样板图片和截图是否一致\n\nload 函数的作用\n\"\"\"\n\n\nimport os\nfrom PIL import Image\nfrom io import BytesIO\n\n#◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆\n#SAMPLE_IMAGEP_FOLDER = os.path.dirname(os.getcwd()) + '\\\\sampleImage\\\\'\nSAMPLE_IMAGEP_FOLDER = '../sampleImage/'\nCARD_A_DATA = 0 #card a binary data\n\n\n\n#◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆◆\n\ndef getCardA() -> object:\n file = SAMPLE_IMAGEP_FOLDER + 'a.png'\n im = Image.open(file)\n return im.convert('L')\n\ndef getCardSample():\n file = SAMPLE_IMAGEP_FOLDER + 'sample.png'\n im = Image.open(file)\n return im.convert('L')\n\ndef getCardTest():\n file = SAMPLE_IMAGEP_FOLDER + 'tn.png'\n im = Image.open(file)\n return im\n\ndef getCardDragonSample():\n file = SAMPLE_IMAGEP_FOLDER + 'dragonSample.png'\n im = Image.open(file)\n return im.convert('L')\n\ndef getScreenShot(x = 0, y = 0, width = 600, height = 600):\n box = (x, y, width, height)\n im = ImageGrab.grab(box)\n return im\n\ndef getScreenShotAndSaved(x = 0, y = 0, width = 600, height = 600):\n ts = int(time.time() * 1000)\n name = str(PICTURE_PATH) + str(ts) + '.png'\n box = (x, y, width, height)\n im = ImageGrab.grab(box)\n im.save(name, 'PNG')\n return im\n\ndef checkTwoImage(searchImage, fatherImage, x = 0, y = 0):\n fw, fh = fatherImage.size\n sw, sh = searchImage.size\n\n num = 0\n\n for w in range(x, fw):\n for h in range(y, fh):\n \n isFound = True\n for iw in range(0, sw):\n for ih in range(0, sh):\n num = num + 1\n\n tw = w + iw\n th = h + ih\n\n fPixel = fatherImage.getpixel((tw, th))\n sPixel = searchImage.getpixel((iw, ih))\n\n\n if fPixel == sPixel:\n continue\n\n if fPixel != sPixel:\n isFound = False\n break\n\n if isFound == False :\n break\n\n \n else:\n print('num=',num)\n return False\n\ndef main():\n #GlobalUtils.MessLog(\"begain\")\n cardA = getCardA()\n cardSample = getCardSample()\n cardTest = getCardTest()\n cardDragonSample = getCardDragonSample()\n w,h = cardA.size\n\n out = cardTest.transpose(Image.FLIP_LEFT_RIGHT);\n out.save(\"../sampleImage/out1.png\",\"PNG\");\n\n#box = (820, 870, 860, 910) #tiger\n box = (820, 750, 860, 790) #he\n #region = cardDragonSample.crop(box)\n region = cardA.convert(\"L\")\n region.save(\"../sampleImage/hecc.png\",\"PNG\")\n\n #res = checkTwoImage(region, cardSample)\n\n cardSample.show()\n\nif __name__ == '__main__':\n main()","repo_name":"1078559858/js","sub_path":"svn/code/majiangAuto/common/checkImage.py","file_name":"checkImage.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10485727006","text":"\"\"\"\n tasks for elastic search\n\"\"\"\n\nfrom elasticsearch import Elasticsearch\nfrom gevent import sleep\nfrom sqlalchemy.orm import joinedload\n\nfrom app import flaskapp, db\nfrom app.resources.users.models import User\nfrom app.resources.accounts.models import Account\nfrom app.corporate_access_resources.corporate_access_event_invitees.models \\\n import CorporateAccessEventInvitee\nfrom app.corporate_access_resources.corporate_access_event_collaborators.models\\\n import CorporateAccessEventCollaborator\nfrom app.corporate_access_resources.corporate_access_event_participants.models\\\n import CorporateAccessEventParticipant\nfrom app.corporate_access_resources.corporate_access_event_hosts.models \\\n import CorporateAccessEventHost\nfrom app.corporate_access_resources.corporate_access_event_rsvps.models \\\n import CorporateAccessEventRSVP\nfrom app.webinar_resources.webinar_invitees.models import WebinarInvitee\nfrom app.webinar_resources.webinar_participants.models \\\n import WebinarParticipant\nfrom app.webinar_resources.webinar_hosts.models import WebinarHost\nfrom app.webinar_resources.webinar_rsvps.models import WebinarRSVP\nfrom app.webcast_resources.webcast_invitees.models import WebcastInvitee\nfrom app.webcast_resources.webcast_participants.models import (\n WebcastParticipant)\nfrom app.webcast_resources.webcast_hosts.models import WebcastHost\nfrom app.webcast_resources.webcast_rsvps.models import WebcastRSVP\n\nfrom app.base import constants as APP\n\nfrom queueapp.tasks import celery_app, logger\n\nes = Elasticsearch([flaskapp.config['ELASTICSEARCH_URL']])\n\n\n@celery_app.task(bind=True, ignore_result=True)\ndef update_elastic_document(self, row_id, model_name, account_id,\n corporate_account_id, user_id, from_script,\n *args, **kwargs):\n \"\"\"\n insert related user emails in document so that only those can search\n the document.\n \"\"\"\n\n try:\n if not from_script:\n sleep(.1)\n index = APP.DF_ES_INDEX\n doc_id = model_name + '_' + str(row_id)\n user_emails = []\n body = {'doc': {}}\n if model_name == 'CorporateAccessEvent':\n Invitee = CorporateAccessEventInvitee\n invitees = Invitee.query.join(\n User, Invitee.user_id == User.row_id,\n isouter=True).with_entities(\n Invitee.invitee_email, User.email).filter(\n Invitee.corporate_access_event_id == row_id).all()\n\n for invitee in invitees:\n email = invitee.email\n if not email:\n email = invitee.invitee_email\n if email:\n user_emails.append(email)\n\n Collab = CorporateAccessEventCollaborator\n collaborators = db.session.query(\n Collab.row_id.label('row_id'), User.email.label('email')\n ).join(User, Collab.collaborator_id == User.row_id).filter(\n Collab.corporate_access_event_id == row_id)\n\n collaborators = list(collaborators)\n for collaborator in collaborators:\n if collaborator.email:\n user_emails.append(collaborator.email)\n\n Participant = CorporateAccessEventParticipant\n participants = Participant.query.join(\n User, User.row_id == Participant.participant_id,\n isouter=True).with_entities(\n User.email, Participant.participant_email).filter(\n Participant.corporate_access_event_id == row_id)\n\n for participant in list(participants):\n email = participant.email\n if not email:\n email = participant.participant_email\n if email:\n user_emails.append(email)\n\n Host = CorporateAccessEventHost\n hosts = Host.query.join(\n User, Host.host_id == User.row_id).with_entities(\n User.email, Host.host_email).filter(\n Host.corporate_access_event_id == row_id)\n\n for host in list(hosts):\n email = host.email\n if not email:\n email = host.host_email\n if email:\n user_emails.append(email)\n\n Rsvp = CorporateAccessEventRSVP\n rsvps = Rsvp.query.with_entities( Rsvp.email).filter(\n Rsvp.corporate_access_event_id == row_id)\n\n for rsvp in list(rsvps):\n email = rsvp.email\n if email:\n user_emails.append(email)\n\n if model_name == 'Webinar':\n invitees = WebinarInvitee.query.join(\n User, WebinarInvitee.invitee_id == User.row_id,\n isouter=True).with_entities(\n WebinarInvitee.invitee_email, User.email).filter(\n WebinarInvitee.webinar_id == row_id).all()\n\n for invitee in invitees:\n email = invitee.email\n if not email:\n email = invitee.invitee_email\n if email:\n user_emails.append(email)\n\n Participant = WebinarParticipant\n participants = Participant.query.join(\n User, User.row_id == Participant.participant_id,\n isouter=True).with_entities(\n User.email, Participant.participant_email).filter(\n Participant.webinar_id == row_id)\n\n for participant in list(participants):\n email = participant.email\n if not email:\n email = participant.participant_email\n if email:\n user_emails.append(email)\n\n Host = WebinarHost\n hosts = Host.query.join(\n User, Host.host_id == User.row_id).with_entities(\n User.email, Host.host_email).filter(\n Host.webinar_id == row_id)\n\n for host in list(hosts):\n email = host.email\n if not email:\n email = host.host_email\n if email:\n user_emails.append(email)\n\n Rsvp = WebinarRSVP\n rsvps = Rsvp.query.with_entities(Rsvp.email).filter(\n Rsvp.webinar_id == row_id)\n\n for rsvp in list(rsvps):\n email = rsvp.email\n if email:\n user_emails.append(email)\n\n if model_name == 'Webcast':\n invitees = WebcastInvitee.query.join(\n User, WebcastInvitee.invitee_id == User.row_id,\n isouter=True).with_entities(\n WebcastInvitee.invitee_email, User.email).filter(\n WebcastInvitee.webcast_id == row_id).all()\n\n for invitee in invitees:\n email = invitee.email\n if not email:\n email = invitee.invitee_email\n if email:\n user_emails.append(email)\n\n Participant = WebcastParticipant\n participants = Participant.query.join(\n User, User.row_id == Participant.participant_id,\n isouter=True).with_entities(\n User.email, Participant.participant_email).filter(\n Participant.webcast_id == row_id)\n\n for participant in list(participants):\n email = participant.email\n if not email:\n email = participant.participant_email\n if email:\n user_emails.append(email)\n\n Host = WebcastHost\n hosts = Host.query.join(\n User, Host.host_id == User.row_id).with_entities(\n User.email, Host.host_email).filter(\n Host.webcast_id == row_id)\n\n for host in list(hosts):\n email = host.email\n if not email:\n email = host.host_email\n if email:\n user_emails.append(email)\n\n Rsvp = WebcastRSVP\n rsvps = Rsvp.query.with_entities(Rsvp.email).filter(\n Rsvp.webcast_id == row_id)\n\n for rsvp in list(rsvps):\n email = rsvp.email\n if email:\n user_emails.append(email)\n\n if user_emails:\n body['doc']['user_emails'] = user_emails\n\n from app.global_search.helpers import get_act_dict\n if account_id:\n account = Account.query.get(account_id)\n body['doc']['account'] = get_act_dict(account)\n if corporate_account_id:\n account = Account.query.get(corporate_account_id)\n body['doc']['corporate_account'] = get_act_dict(account)\n if user_id:\n privacy_settings = {}\n privacy_fileds = [\n 'search_privacy', 'search_privacy_designation_level',\n 'search_privacy_industry', 'search_privacy_sector',\n 'search_privacy_market_cap_min',\n 'search_privacy_market_cap_max']\n user = User.query.filter(User.row_id==user_id).options(\n joinedload(User.settings).load_only(*privacy_fileds),\n joinedload(User.profile)).first()\n privacy_settings['designation_level'] = \"\"\n if user.profile.designation_link:\n privacy_settings['designation_level'] = \\\n user.profile.designation_link.designation_level\n for field in privacy_fileds:\n privacy_settings[field] = getattr(user.settings, field)\n body['doc'].update(privacy_settings)\n\n es.update(index=index, doc_type=index, id=doc_id, body=body)\n\n result = True\n except Exception as e:\n logger.exception(e)\n result = False\n\n return result\n\n\n@celery_app.task(bind=True, ignore_result=True)\ndef update_matched_docs(self, account_id, account_data, *args, **kwargs):\n index = APP.DF_ES_INDEX\n q = {\n \"script\": {\n \"inline\": \"ctx._source.account = params.account\",\n \"lang\": \"painless\",\n \"params\": {\n \"account\": account_data\n }\n },\n \"query\": {'term': {'account.row_id': account_id}}\n }\n es.update_by_query(body=q, doc_type=index, index=index)\n","repo_name":"Witzcode0/Exchange-connect","sub_path":"queueapp/elastic_search_tasks.py","file_name":"elastic_search_tasks.py","file_ext":"py","file_size_in_byte":10359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26914926216","text":"import mailparser\r\nimport os\r\nimport logging\r\nimport json\r\nimport sys\r\nimport shutil\r\nimport hashlib\r\nimport re\r\nimport base64\r\nimport json\r\nimport time\r\n\r\n############################ MSG ############################\r\n# https://pypi.org/project/mail-parser/\r\n# https://github.com/SpamScope/mail-parser\r\n# $ sudo apt-get install libemail-outlook-message-perl\r\n# pip install mail-parser\r\n#############################################################\r\n\r\nclass MSGExtractor():\r\n def __init__(self):\r\n try:\r\n file_path = os.path.join('config', 'ioc_definitions.json')\r\n with open(file_path, 'r', encoding='utf-8') as config_file:\r\n self.ioc_definitions = json.load(config_file)\r\n except:\r\n print(f\"[{time.strftime('%H:%M:%S')}] [ERROR] Error loading '{file_path}' file\")\r\n logging.error(f\"Error loading '{file_path}' file\")\r\n sys.exit(1)\r\n\r\n try:\r\n file_path = os.path.join('config', 'header_whitelist.json')\r\n with open(file_path, 'r', encoding='utf-8') as whitelist_file:\r\n loaded_data = json.load(whitelist_file)\r\n self.x_header_whitelist = loaded_data['headers']\r\n except:\r\n print(f\"[{time.strftime('%H:%M:%S')}] [ERROR] Error loading '{file_path}' file\")\r\n logging.error(f\"Error loading '{file_path}' file\")\r\n sys.exit(1)\r\n\r\n try:\r\n file_path = os.path.join('config', 'interesting_headers.json')\r\n with open(file_path, 'r', encoding='utf-8') as interesting_headers_file:\r\n loaded_data = json.load(interesting_headers_file)\r\n self.interesting_headers = loaded_data['headers']\r\n except:\r\n print(f\"[{time.strftime('%H:%M:%S')}] [ERROR] Error loading '{file_path}' file\")\r\n logging.error(f\"Error loading '{file_path}' file\")\r\n sys.exit(1)\r\n\r\n def process_sample(self, sample_path):\r\n self.sample_path = sample_path\r\n self.sample_dir, self.sample_file = os.path.split(self.sample_path)\r\n self.sample_name, self.sample_extension = os.path.splitext(\r\n self.sample_file)\r\n\r\n print(f\"[{time.strftime('%H:%M:%S')}] [INFO] Processing '{self.sample_path}'\")\r\n logging.info(f\"Processing '{self.sample_path}'\")\r\n\r\n self.base_dir = os.path.join(self.sample_dir, self.sample_name)\r\n if not os.path.isdir(self.base_dir):\r\n os.mkdir(self.base_dir)\r\n os.mkdir(os.path.join(self.base_dir, 'attachments'))\r\n\r\n self.anal_path = os.path.join(self.base_dir, '.'.join([self.sample_name, 'msg']))\r\n print(f\"[{time.strftime('%H:%M:%S')}] [INFO] Ready for parsing '{self.anal_path}' file\")\r\n logging.info(f\"Ready for parsing '{self.anal_path}' file\")\r\n shutil.copy(os.path.join(self.sample_path), self.anal_path)\r\n self.message = mailparser.parse_from_file_msg(self.anal_path)\r\n self.msg_extractor()\r\n\r\n def msg_extractor(self):\r\n mail = self.message\r\n extracted_data = dict()\r\n\r\n # BODY\r\n # hyperlinks are shown inside '< >'\r\n body = mail.body\r\n extracted_data['email_body'] = body\r\n file_path = os.path.join(self.base_dir, 'email_body.txt')\r\n with open(file_path, \"w\") as out_file:\r\n out_file.write(body)\r\n print(f\"[{time.strftime('%H:%M:%S')}] [INFO] Extracted email body to '{file_path}'\")\r\n logging.info(f\"Extracted email body to '{file_path}'\")\r\n\r\n # HEADERS\r\n extracted_data['interesting_headers'] = []\r\n extracted_header_path = os.path.join(self.base_dir, 'header.txt')\r\n header = mail.headers\r\n received_raw = mail.received_raw\r\n received_json = mail.received_json\r\n\r\n file_path = os.path.join(self.base_dir, 'received_parsed.json')\r\n with open(file_path, \"w\") as out_file:\r\n out_file.write(received_json)\r\n print(f\"[{time.strftime('%H:%M:%S')}] [INFO] Parsed 'Received' headers to '{file_path}'\")\r\n logging.info(f\"Parsed 'Received' headers to '{file_path}'\")\r\n\r\n with open(extracted_header_path, \"w\") as out_file:\r\n for entry in received_raw: # mail.headers does not provide all the \"Received\" entries\r\n out_file.write(f\"Received: {entry}\\n\")\r\n for key, value in header.items():\r\n if key in self.interesting_headers:\r\n extracted_data['interesting_headers'].append(\r\n f\"{key}: {value}\")\r\n if key == \"Received\": # \"Received:\" entries are already written to the output file\r\n continue\r\n if \"IronPort\" in key:\r\n continue\r\n elif not key.startswith(\"X\"):\r\n out_file.write(f\"{key}: {value}\\n\")\r\n elif key in self.x_header_whitelist:\r\n out_file.write(f\"{key}: {value}\\n\")\r\n print(f\"[{time.strftime('%H:%M:%S')}] [INFO] Extracted all email headers\")\r\n logging.info('Extracted all email headers')\r\n\r\n # DATA from HEADERS\r\n extracted_data['email_entry_id'] = mail.message_id\r\n\r\n extracted_data['email_received_time'] = f\"{mail.date} UTC {mail.timezone}\"\r\n\r\n mail_from = [\" \".join(entry) for entry in mail.from_] \r\n extracted_data['email_sender'] = mail_from\r\n\r\n try:\r\n recipient_name = mail.to[0][0]\r\n recipient_address = mail.to[0][1]\r\n except IndexError as e:\r\n recipient_name = \"\"\r\n recipient_address = \"\"\r\n\r\n extracted_data['email_recipient'] = dict(\r\n name=recipient_name,\r\n address=recipient_address)\r\n\r\n # only name wihtout the email address\r\n # extracted_data['email_to_name'] = mail.to[0][0] # REDUNDANT\r\n\r\n # ORIGINAL VERSION STORES ONLY NAMES, NOT A NAME:EMAIL DICTIONARY\r\n extracted_data['email_cc'] = []\r\n for name, address in mail.cc:\r\n extracted_data['email_cc'].append(dict(\r\n name=name,\r\n address=address))\r\n\r\n extracted_data['email_bcc'] = []\r\n for name, address in mail.bcc:\r\n extracted_data['email_bcc'].append(dict(\r\n name=name,\r\n address=address))\r\n\r\n extracted_data['email_subject'] = mail.subject\r\n\r\n print(f\"[{time.strftime('%H:%M:%S')}] [INFO] Parsed and extracted data from email headers\")\r\n logging.info('Parsed and extracted data from email headers')\r\n\r\n extracted_data['email_attachments'] = []\r\n attachments = mail.attachments\r\n for entry in attachments:\r\n filename = entry['filename'].replace('\\n', '').replace('\\r', '')\r\n local_path = os.path.join(self.base_dir, 'attachments', filename)\r\n print(f\"[{time.strftime('%H:%M:%S')}] [INFO] Extracted attachment to '{local_path}'\")\r\n logging.info(\"Extracted attachment to '{local_path}'\")\r\n encoded_payload = entry['payload']\r\n decoded_payload = base64.urlsafe_b64decode(encoded_payload)\r\n with open(local_path, \"wb\") as att_file:\r\n att_file.write(decoded_payload)\r\n\r\n md_5_sum = hashlib.md5(decoded_payload).hexdigest()\r\n sha_1_sum = hashlib.sha1(decoded_payload).hexdigest()\r\n sha_256_sum = hashlib.sha256(decoded_payload).hexdigest()\r\n\r\n extracted_data['email_attachments'].append(dict(\r\n name=entry['filename'],\r\n type=entry['mail_content_type'],\r\n md5=md_5_sum,\r\n sha1=sha_1_sum,\r\n sha256=sha_256_sum\r\n ))\r\n\r\n # extracted_data['email_sender_mail_type'] = \"!!! -----> UNSUPPORTED <----- !!!\"\r\n # extracted_data['email_body_format'] = \"!!! -----> UNSUPPORTED <----- !!!\"\r\n\r\n # IOCs\r\n extracted_data['iocs'] = []\r\n ioc_regexes = self.ioc_definitions['definitions']\r\n for ioc_type, ioc_data in ioc_regexes.items():\r\n data = sorted(\r\n set(re.findall(ioc_data['rgx'], body, re.IGNORECASE)))\r\n if data:\r\n ioc_entry = {}\r\n ioc_entry.update({ioc_type: data})\r\n extracted_data['iocs'].append(ioc_entry)\r\n if extracted_data['iocs']:\r\n print(f\"[{time.strftime('%H:%M:%S')}] [INFO] Extracted IOCs\")\r\n logging.info(f\"Extracted IOCs\")\r\n\r\n file_path = os.path.join(self.base_dir, 'extracted_data.json') \r\n with open(file_path, \"w\") as out_file:\r\n out_file.write(json.dumps(\r\n extracted_data, indent=2, sort_keys=False))\r\n print(f\"[{time.strftime('%H:%M:%S')}] [INFO] Written extracted data to '{file_path}'\")\r\n logging.info(f\"Written extracted data to '{file_path}'\")\r\n","repo_name":"martinkubecka/mailo","sub_path":"extractors/msg_extractor.py","file_name":"msg_extractor.py","file_ext":"py","file_size_in_byte":8873,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"31051651338","text":"try: import urlparse\nexcept ImportError: import urllib.parse as urlparse\n\nclass Url(object):\n \"\"\"Encapsulates the location of a resource on a gopher server.\"\"\"\n\n def __init__(self, host, type=\"1\", path=\"\", port=70, name=\"\", query=\"\"):\n \"\"\"Initializes the URL.\"\"\"\n self.host = host\n self.type = type\n self.path = path.strip(\"/\")\n self.port = int(port)\n self.name = name\n self.query = query\n\n @classmethod\n def fromstring(cls, url):\n \"\"\"Creates an Url from a string.\"\"\"\n url = url.strip()\n parsed = urlparse.urlparse(url)\n if parsed.scheme == \"\":\n parsed = urlparse.urlparse(\"gopher://\" + url)\n\n host = parsed.netloc\n\n split = list(filter(bool, parsed.path.split(\"/\")))\n if len(split) == 0:\n type = \"1\"\n path = \"\"\n elif len(split[0]) == 1:\n type = split[0]\n path = \"/\".join(split[1:])\n else:\n type = \"1\"\n path = \"/\".join(split)\n\n port = parsed.port or 70\n\n if not parsed.scheme == \"gopher\":\n raise NotImplementedError(\"{0} protocol\".format(parsed.scheme))\n\n return cls(host, type=type, path=path, port=port,\n query=urlparse.unquote(parsed.query))\n\n def tostring(self):\n \"\"\"Assembles a string from the Url.\"\"\"\n return \"gopher://{}{}/{}/{}{}\".format(\n self.host,\n \"\" if self.port == 70 else \":70\",\n self.type,\n self.path,\n (\"?\" + urlparse.quote(self.query)) if self.query else \"\"\n )\n __str__ = tostring\n\n @classmethod\n def fromline(cls, line):\n \"\"\"\n Parses an Url from a line inside a directory listing. May raise exceptions\n if line is empty.\n \"\"\"\n split = line[1:].split(\"\\t\")\n return cls(\n type=line[0],\n name=split[0] if len(split) > 1 else \"\",\n path=split[1] if len(split) > 1 else \"\",\n host=split[2] if len(split) > 2 else \"\",\n port=split[3] if len(split) > 3 else 70\n )\n\n def toline(self, name=None):\n \"\"\"Assembles a directory listing line from the Url.\"\"\"\n return \"{}{}\\t{}\\t{}\\t{}\".format(\n self.type,\n name if name else self.name,\n self.path,\n self.host,\n self.port\n )\n","repo_name":"nucular/gophic","sub_path":"gophic/url.py","file_name":"url.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7864125636","text":"# Flask app backend\n\nfrom flask import Flask, send_from_directory, request\nimport waxing_control\n\nwaxing_control.setup()\n\napp = Flask(__name__, static_url_path='', static_folder='frontend/build')\n\n@app.route(\"/\")\ndef index():\n return send_from_directory(app.static_folder, 'index.html')\n\n@app.route(\"/move\", methods=['POST'])\ndef move():\n if request.method == 'POST':\n print(request.json)\n if request.json['moveDir'] == \"moveLeft\":\n waxing_control.moveLeft()\n if request.json['moveDir'] == \"moveRight\":\n waxing_control.moveRight()\n if request.json['moveDir'] == \"moveStop\":\n waxing_control.moveStop()\n return 'Success'\n else:\n return 'Fail'\n\nif __name__ == '__main__':\n app.run(debug=True, port=5000, host='0.0.0.0')","repo_name":"davidantaki/SkiBot","sub_path":"V2/software/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32654598048","text":"import pandas as pd\nimport smtplib\n\nclass LoadDocument:\n\n \"\"\"Загрузка документа\"\"\"\n __table = str\n __emailTeacher = str\n __path = str\n __emailStudent = str\n def __init__(self, table = str, emailTeacher = str, emailStudent = str, path = str):\n self.__table = table\n self.__emailTeacher = emailTeacher\n self.__path = path\n self.__emailStudent = emailStudent\n\n def __LoadTable (self, table = str, path = str):\n table = pd.read_excel(io=path, engine=\"openpyxl\", sheet_name=table, skiprows=[0, 2])\n return table\n\n def __LoadEmail (self, email = str, path = str):\n email = pd.read_excel(io=path, engine=\"openpyxl\", sheet_name=email)\n return email\n\n # Функция создания таблицы студентов c предметами которые они провалили и ср.балл\n def __AddStudent(self, table=pd.DataFrame, _atributes=pd.Index):\n blackList = pd.DataFrame([])\n for k in range(len(table[_atributes[0]])):\n listSubject = []\n for i in range(2, len(_atributes) - 1):\n if table[_atributes[i]][k] < 1:\n listSubject.append(_atributes[i])\n for i in range((len(_atributes) - 3) - len(listSubject)):\n listSubject.append(\"0\")\n blackList[table[_atributes[1]][k]] = listSubject\n return blackList\n\n\n # Создание таблицы с предметами\n def __AddSubject(self, table=pd.DataFrame, atributes=pd.Index):\n listOfSubject = pd.DataFrame([])\n for i in range(3, len(atributes) - 1):\n listOfStudent = []\n for j in range(len(table[atributes[0]])):\n if table[atributes[i]][j] < 1:\n listOfStudent.append(table[atributes[1]][j])\n for j in range(len(table[atributes[1]]) - len(listOfStudent)):\n listOfStudent.append(\"0\")\n listOfSubject[atributes[i]] = listOfStudent\n return listOfSubject\n\n # Функция удаления студентов у которых бал по всем предметам больше 1\n def __Delete(self, table=pd.DataFrame):\n listStudent = table.columns\n bool = False\n for i in range(len(listStudent)):\n for j in range(len(table[listStudent[i]])):\n if table[listStudent[i]][j] != \"0\":\n bool = False\n break\n else:\n bool = True\n if bool:\n table.pop(listStudent[i])\n return table\n\n\n # Получение таблицы неуспивающик стуентов\n def GetTableStudent(self):\n table = self.__LoadTable(self.__table, self.__path)\n table = self.__AddStudent(table, table.columns)\n table = self.__Delete(table)\n return table\n\n\n def GetTableSubject(self):\n table = self.__LoadTable(self.__table, self.__path)\n table = self.__AddSubject(table, table.columns)\n table = self.__Delete(table)\n return table\n\n\n\n def __MessegeStudent(self, tableOfStudent=pd.DataFrame, Student=str):\n message = \"Уважаемый \" + Student + \"\\n\"\n message = message + \"Вы являетесь должником по 6 кн, \"\n listOfSubject = []\n for i in range(len(tableOfStudent[Student])):\n if tableOfStudent[Student][i] != \"0\":\n if tableOfStudent[Student][i] != \"Ср. балл\":\n listOfSubject.append(tableOfStudent[Student][i])\n else:\n break\n if len(listOfSubject) == 1:\n message += \"по дисциплине \" + listOfSubject[\n 0] + \". Необходимо в срок до 8 недели сдать всё долги. Информацию о сдаче доводить до сведения деканата раз в 2-3 дня\"\n else:\n message += \"по дисциплинам \"\n for i in range(len(listOfSubject)):\n if i == 0:\n message += listOfSubject[i]\n else:\n message += \", \" + listOfSubject[i]\n message += \". Необходимо в срок до 8 недели сдать всё долги. Информа��ию о сдаче доводить до сведения деканата раз в 2-3 дня\"\n return message\n\n # Фуекция отправки сообщения\n def __SendMessage(self, adress_email_sender=str, password=str, andess_email_recipient=str, message=str):\n smtpObj = smtplib.SMTP(\"smtp.gmail.com\", 587)\n smtpObj.ehlo()\n smtpObj.starttls()\n smtpObj.login(adress_email_sender, password)\n smtpObj.sendmail(adress_email_sender, andess_email_recipient, message.encode(\"utf-8\"))\n smtpObj.quit()\n\n # Поиск и отправка сообщений студентам\n def __SelectStudent(self, table=pd.DataFrame, table_email=pd.DataFrame):\n listStudent = table.columns\n listAtributes = table_email.columns\n # поиск студентов со средним баллов меньше 1\n for i in range(len(listStudent)):\n for j in range(len(table[listStudent[i]])):\n if table[listStudent[i]][j] == \"Ср. балл\":\n # поиск в таблице с почтой\n for k in range(len(table_email[listAtributes[1]])):\n if table_email[listAtributes[1]][k] == listStudent[i]:\n self.__SendMessage(\"aleks24129958@gmail.com\", \"rqvzpuxoigdkqnmy\", table_email[listAtributes[2]][k],\n self.__MessegeStudent(table, listStudent[i]))\n\n def __MessegeOfTeacher(self, tableOfSubject=pd.DataFrame, subject=str):\n messege = \"У вас \"\n arraySubject = []\n for i in range(len(tableOfSubject[subject])):\n if tableOfSubject[subject][i] != \"0\":\n arraySubject.append(tableOfSubject[subject][i])\n if len(arraySubject) == 1:\n messege += \"1 должник: \" + arraySubject[0]\n else:\n if len(arraySubject) >= 2 and len(arraySubject) <= 4:\n messege += str(len(arraySubject)) + \" должника: \"\n else:\n messege += str(len(arraySubject)) + \" должников: \"\n for i in range(len(arraySubject)):\n if i == 0:\n messege += arraySubject[i]\n else:\n messege += \", \" + arraySubject[i]\n messege += \". Необходимо обеспечить сдачу долгов до 8 неделе. Доводить информацию о сдаче долгов до деканата.\"\n return messege\n\n # Поиск и отправка сообщений преподователям\n def __SelectTeacher(self, table=pd.DataFrame, table_email=pd.DataFrame):\n listOfSubject = table.columns\n listOfEmail = table_email.columns\n for i in range(len(listOfSubject)):\n for j in range(len(table_email[listOfEmail[1]])):\n if listOfSubject[i] == table_email[listOfEmail[1]][j]:\n self.__SendMessage(\"aleks24129958@gmail.com\", \"\", table_email[listOfEmail[3]][j],\n self.__MessegeOfTeacher(table, listOfSubject[i]))\n\n\n def SendMessege(self):\n tableOfStudent = self.GetTableStudent()\n emailOfStudent = self.__LoadEmail(self.__emailStudent, self.__path)\n self.__SelectStudent(tableOfStudent, emailOfStudent)\n tableOfSubject = self.GetTableSubject()\n emailOfTeacher = self.__LoadEmail(self.__emailTeacher, self.__path)\n self.__SelectTeacher(tableOfSubject, emailOfTeacher)","repo_name":"Kamegatze/SentMessege","sub_path":"LoadDocument.py","file_name":"LoadDocument.py","file_ext":"py","file_size_in_byte":7898,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11934544948","text":"class IEScoreRecord():\n def __init__(self, utils, test='IE(DFI=%T=%TG=%CD=)'):\n self.utils = utils\n params = self.utils.split_db_test_string_to_params(test)\n\n self.responsed = False\n if 'R' not in params:\n self.responsed = True\n self.dont_fragment = params['DFI']\n self.ttl = [int(value, 16) for value in params['TG']]\n self.reply_code = params['CD']\n \n def calculate_nmap_score(self, other):\n \"\"\"\n Scoring method for NMAP ICMP Echo test\n Implemented as stated in NMAP Documentation\n \"\"\"\n score = 0\n if not self.responsed and not other.responsed:\n score += 50\n elif self.responsed and other.responsed:\n score += 50\n if self.utils.is_fields_match(self.dont_fragment, other.dont_fragment):\n score += 40\n if self.utils.is_fields_match(self.ttl, other.ttl):\n score += 15\n if self.utils.is_fields_match(self.reply_code, other.reply_code):\n score += 100\n return score\n\n def __repr__(self):\n return self.test_str + '\\n'","repo_name":"peleglahav/osfingerprint","sub_path":"score_records/ie_score_record.py","file_name":"ie_score_record.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40213576140","text":"import json\nimport os\nfrom typing import List, Tuple, Union\n\nimport discord\nfrom discord.ext.commands import Context\n\nfrom .paginator import SubcommandPaginator\n\nwith open(os.path.abspath(\"./subcommands/subcommandhelp.json\"), \"r\") as file:\n _HELPDATA = json.loads(file.read())\n\n\nclass EntryData(object):\n __slots__ = [\n \"desc\",\n \"usage\",\n \"returns\",\n \"aliases\",\n \"uperms\",\n \"bperms\",\n \"note\",\n ]\n\n def __init__(self, *, desc: str = None,\n usage: str = None, returns: str = None,\n aliases: List[str] = [], uperms: List[str] = [],\n bperms: List[str] = [], note: str = None) -> None:\n self.desc = desc\n self.usage = usage\n self.returns = returns\n self.aliases = aliases\n self.uperms = uperms\n self.bperms = bperms\n self.note = note\n\n\nclass SubCommandEntry(object):\n __slots__ = [\n \"name\",\n \"pfx\",\n \"_desc\",\n \"_usage\",\n \"_returns\",\n \"_aliases\",\n \"_uperms\",\n \"_bperms\",\n \"_note\",\n \"inline\",\n \"nospaced\",\n ]\n\n def __init__(self, name: str, pfx: str, data: EntryData,\n inline: bool = False, nospaced: bool = False, **kwargs) -> None:\n self.name = name\n self.pfx = pfx\n self._desc = data.desc\n self._usage = data.usage\n self._returns = data.returns\n self._aliases = \" \".join(f\"`{alias}`\" for alias in data.aliases)\n self._uperms = \" \".join(f\"`{perm}`\" for perm in data.uperms)\n self._bperms = \" \".join(f\"`{perm}`\" for perm in data.bperms)\n self._note = data.note\n self.inline = inline\n self.nospaced = nospaced\n\n @property\n def desc(self) -> str:\n return f\"**Description:** {self._desc}\"\n\n @property\n def usage(self) -> str:\n return f\"**Usage:** `{self.pfx}{'' if self.nospaced else ' '}{self._usage}`\"\n\n @property\n def returns(self) -> str:\n return f\"**Returns:** {self._returns}\"\n\n @property\n def aliases(self) -> str:\n return f\"**Aliases:** {self._aliases}\"\n\n @property\n def uperms(self) -> str:\n return f\"**User Permissions:** {self._uperms}\"\n\n @property\n def bperms(self) -> str:\n return f\"**Bot Permissions:** {self._bperms}\"\n\n @property\n def note(self) -> str:\n return f\"**Note:** {self._note}\"\n\n @property\n def all(self) -> Tuple[str, List[str], bool]:\n return self.name, [\n getattr(self, attr) for attr in [\n \"desc\", \"usage\", \"returns\", \"aliases\", \"uperms\", \"bperms\", \"note\"\n ] if getattr(self, f\"_{attr}\")\n ], self.inline\n\n\nclass SubcommandHelp(object):\n __slots__ = [\n \"pfx\",\n \"nospaced\",\n \"title\",\n \"description\",\n \"per_page\",\n \"entries\",\n \"show_entry_count\",\n \"embed\",\n ]\n\n def __init__(self, pfx: str, nospaced=False, title: str = None,\n description: str = None, per_page: int = 3,\n show_entry_count: bool = False, embed: discord.Embed = None) -> None:\n self.pfx = str(pfx)\n self.nospaced = nospaced\n self.title = str(title)\n self.description = str(description)\n self.per_page = per_page\n self.entries: List[SubCommandEntry] = []\n self.show_entry_count = show_entry_count\n self.embed = embed\n\n def add_entry(self, name: str, data: Union[EntryData, dict], inline: bool = False) -> \"SubcommandHelp\":\n self.entries.append(\n SubCommandEntry(\n name,\n self.pfx,\n data if isinstance(data, EntryData) else EntryData(**data),\n inline,\n self.nospaced,\n )\n )\n return self\n\n def from_config(self, base_command: str) -> \"SubcommandHelp\":\n for entry in _HELPDATA[base_command]:\n self.add_entry(entry, data=_HELPDATA[base_command][entry])\n return self\n\n def _prepare_embed(self, embed: discord.Embed) -> discord.Embed:\n if not embed:\n return discord.Embed(title=self.title,\n description=self.description,\n color=discord.Color.blue())\n return self.embed\n\n async def show_help(self, ctx: Context) -> discord.Message:\n self.embed = self._prepare_embed(self.embed)\n return await SubcommandPaginator(\n ctx,\n entries=[entry.all for entry in self.entries],\n per_page=self.per_page,\n show_entry_count=self.show_entry_count,\n embed=self.embed,\n ).paginate()\n","repo_name":"marwynnsomridhivej/uconnsmashbot","sub_path":"utils/helpgenerator.py","file_name":"helpgenerator.py","file_ext":"py","file_size_in_byte":4698,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"5822488260","text":"# -*- coding: utf-8 -*-\n# @Author: leedagou\n# @Date: 2019-04-30 16:21:18\n# @Last Modified by: leedagou\n# @Last Modified time: 2019-05-07 23:27:32\n\n# encoding=utf-8\n\nimport os\nimport sys\nimport io\nsys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')\nfrom datetime import datetime\n\n\ndef splitfile(filepath, partialsize=1024 * 1024 * 100):\n print(\"开始时间\")\n print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n filedir, name = os.path.split(filepath)\n print(filedir, name)\n name, ext = os.path.splitext(name)\n print(name, ext)\n filedir = os.path.join(filedir, name)\n if not os.path.exists(filedir):\n os.mkdir(filedir)\n partno = 0\n stream = open(filepath, 'rb')\n while True:\n partfilename = os.path.join(filedir, name + '_' + str(partno) + ext)\n # print('write start %s' % partfilename)\n part_stream = open(partfilename, 'wb')\n read_count = 0\n read_size = 1024 * 512 * 16\n read_count_once = 0\n while read_count < partialsize:\n read_content = stream.read(read_size)\n read_count_once = len(read_content)\n if read_count_once > 0:\n part_stream.write(read_content)\n else:\n break\n read_count += read_count_once\n\n line = stream.readline()\n part_stream.write(line)\n part_stream.close()\n if(read_count_once < read_size):\n break\n partno += 1\n print(\"结束时间\")\n print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n\n\nif __name__ == '__main__':\n splitfile(r'')\n","repo_name":"lili1147/Database","sub_path":"spl_size.py","file_name":"spl_size.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40839423381","text":"import tensorflow as tf\nfrom networks.network import Network\n\nfrom fast_rcnn.config import cfg\n\nn_classes = 7\n\n\nclass VGGnet_test(Network):\n def __init__(self, trainable=True, anchor_scales=[8, 16, 32],\n feat_stride=[16, ], low_level_trainable=False,\n anchor_ratios=[0.5, 1, 2], transform_img=False):\n self.inputs = []\n self._anchor_scales = anchor_scales\n self._feat_stride = feat_stride\n self.data = tf.placeholder(tf.float32, shape=[None, None, None, 3])\n #target_size = cfg.TEST.SCALES[0]\n #self.data = tf.placeholder(tf.float32, shape=[1, target_size, target_size, 3])\n self.im_info = tf.placeholder(tf.float32, shape=[None, 3])\n self.keep_prob = tf.placeholder(tf.float32)\n self.layers = dict({'data': self.data, 'im_info': self.im_info})\n self.trainable = trainable\n self.low_level_trainable = low_level_trainable\n self.anchor_ratio_size = len(anchor_ratios)\n self.anchor_ratios = anchor_ratios\n self.transform_img = transform_img\n self.setup()\n\n def setup(self):\n (self.feed('data')\n #.spatial_transform(name='spt_trans', do_transform=self.transform_img, keep_prob=1)\n .conv(3, 3, 64, 1, 1, name='conv1_1', trainable=self.low_level_trainable)\n .conv(3, 3, 64, 1, 1, name='conv1_2', trainable=self.low_level_trainable)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool1')\n .conv(3, 3, 128, 1, 1, name='conv2_1', trainable=self.low_level_trainable)\n .conv(3, 3, 128, 1, 1, name='conv2_2', trainable=self.low_level_trainable)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool2')\n .conv(3, 3, 256, 1, 1, name='conv3_1')\n .conv(3, 3, 256, 1, 1, name='conv3_2')\n .conv(3, 3, 256, 1, 1, name='conv3_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool3')\n .conv(3, 3, 512, 1, 1, name='conv4_1')\n .conv(3, 3, 512, 1, 1, name='conv4_2')\n .conv(3, 3, 512, 1, 1, name='conv4_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool4')\n .conv(3, 3, 512, 1, 1, name='conv5_1')\n .conv(3, 3, 512, 1, 1, name='conv5_2')\n .conv(3, 3, 512, 1, 1, name='conv5_3'))\n\n (self.feed('conv5_3')\n .conv(3, 3, 512, 1, 1, name='rpn_conv/3x3')\n .conv(1, 1, len(self._anchor_scales) * self.anchor_ratio_size * 2, 1, 1,\n padding='VALID', relu=False, name='rpn_cls_score'))\n\n (self.feed('rpn_conv/3x3')\n .conv(1, 1, len(self._anchor_scales) * self.anchor_ratio_size * 4, 1, 1,\n padding='VALID', relu=False, name='rpn_bbox_pred'))\n\n (self.feed('rpn_cls_score')\n .reshape_layer(2, name='rpn_cls_score_reshape')\n .softmax(name='rpn_cls_prob'))\n\n (self.feed('rpn_cls_prob')\n .reshape_layer(len(self._anchor_scales) * self.anchor_ratio_size * 2, name='rpn_cls_prob_reshape'))\n\n (self.feed('rpn_cls_prob_reshape', 'rpn_bbox_pred', 'im_info')\n .proposal_layer(self._feat_stride, self._anchor_scales, self.anchor_ratios, 'TEST', name='rois'))\n\n (self.feed('conv5_3', 'rois')\n #.roi_pool(7, 7, 1.0/16, name='pool_5')\n .st_pool(7, 7, 1.0 / 16, name='pool_5', phase='TEST')\n .fc(4096, name='fc6')\n .fc(4096, name='fc7')\n .fc(n_classes, relu=False, name='cls_score')\n .softmax(name='cls_prob'))\n\n (self.feed('fc7')\n .fc(n_classes * 4, relu=False, name='bbox_pred'))\n","repo_name":"chenwuperth/rgz_rcnn","sub_path":"lib/networks/VGGnet_test.py","file_name":"VGGnet_test.py","file_ext":"py","file_size_in_byte":3676,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"81"} +{"seq_id":"1346530191","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: ajumde\n\"\"\"\n\n'''\nWrite a function to find the longest common prefix string amongst an array of strings.\n\nIf there is no common prefix, return an empty string \"\".\n\nExample 1:\n\nInput: [\"flower\",\"flow\",\"flight\"]\nOutput: \"fl\"\nExample 2:\n\nInput: [\"dog\",\"racecar\",\"car\"]\nOutput: \"\"\nExplanation: There is no common prefix among the input strings.\nNote:\n\nAll given inputs are in lowercase letters a-z.\n'''\nclass Solution:\n def longestCommonPrefix(self, strs) -> str:\n #strs = [\"aa\",\"a\"]\n '''\n if len(strs) < 1:\n return \"\"\n if len(strs) == 1:\n return strs[0]\n first = strs[0]\n print(\"first : \",first)\n index = 0\n flag = 0\n for i in first:\n for word in strs:\n print(\"Comparing\",i,\" with \",word[index],\" from \",word, \" index : \",index)\n if index < len(word) - 1:\n if i != word[index]:\n flag = 1\n break\n else:\n print(\"going here\")\n flag = 1\n break\n \n if flag ==1:\n break\n index+=1 \n \n print(\"---> iteration done :\",index)\n \n if index > 0: \n return first[0:index]\n else:\n return \"\"\n '''\n \n if not strs:\n return \"\"\n shortest = min(strs,key=len)\n for i, ch in enumerate(shortest):\n for other in strs:\n if other[i] != ch:\n return shortest[:i]\n return shortest\n '''\n #This is a brilliant solution!! \n # x = [\"abcd\", \"abcdpqr\",\"abcsd\"]\n # min(x) = 'abcd' max(X) = 'abcsd'\n \n \n m = strs\n if not m: return ''\n\t #since list of string will be sorted and retrieved min max by alphebetic order\n s1 = min(m)\n s2 = max(m)\n\n for i, c in enumerate(s1):\n if c != s2[i]:\n return s1[:i] #stop until hit the split index\n return s1\n '''\n\nif __name__ == '__main__':\n s = Solution()\n print(s.longestCommonPrefix(['abcd','abc']))\n","repo_name":"adityajumde/leetcode","sub_path":"Longest_Common_Prefix.py","file_name":"Longest_Common_Prefix.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13220598891","text":"import string\n\ndef get_word_frequency_from_file_dict(file_name):\n input_file = open(file_name, \"r\")\n words_dict = {}\n for line in input_file:\n for word in line.split():\n #delete punctuation:\n table = str.maketrans({key: None for key in string.punctuation})\n word = word.translate(table)\n word = word.lower()\n if word is not '':\n if word not in words_dict:\n words_dict[word] = 1\n else:\n words_dict[word] += 1\n return words_dict\n\ndef write_freq_dict_to_file(file_name,freq_dict):\n output_file = open(file_name,\"w\")\n output_file.write(\"sep=;\\n\")\n output_file.write(\"word;frequency\\n\")\n for key in freq_dict:\n output_string = key + \";\" + str(freq_dict.get(key)) + \"\\n\"\n output_file.write(output_string)\n output_file.close()\n\n\n\nclass trie_node:\n def __init__(self,next_nodes,freq=0):\n self.freq = freq\n self.next_nodes = next_nodes\n\n def write_to_file(self,output_file,word = None):\n if self.freq > 0:\n output_string = word +\";\" +str(self.freq) + \"\\n\"\n output_file.write(output_string)\n if self.next_nodes:\n for node in self.next_nodes:\n self.next_nodes[node].write_to_file(output_file,node)\n\nclass trie:\n def __init__(self):\n self.root = trie_node({})\n\n def add_word(self,word):\n current = self.root\n if word[0] not in current.next_nodes: # add first letter of the word when not in next_nodes from the root\n current.next_nodes[word[0]] = trie_node({})\n current = current.next_nodes[word[0]]\n if len(word) <= 1:\n current.freq += 1\n for letters in range(1, len(word)):\n w = word[:letters + 1]\n if w not in current.next_nodes:\n current.next_nodes[w] = trie_node({})\n if letters == len(word) - 1:\n current.next_nodes[w].freq += 1\n return\n current = current.next_nodes[w]\n\n\n def fill_from_file(self,file_name):\n input_file = open(file_name,'r')\n for line in input_file:\n for word in line.split():\n # delete punctuation:\n table = str.maketrans({key: None for key in string.punctuation})\n word = word.translate(table)\n word = word.lower()\n if word is not '':\n self.add_word(word)\n\n def write_to_file(self,file_name):\n output_file = open(file_name,\"w\")\n output_file.write(\"sep=;\\n\")\n output_file.write(\"word;frequency\\n\")\n self.root.write_to_file(output_file)\n\n\n\ndef test_freq_table():\n\n freq_dict = get_word_frequency_from_file_dict(\"input.txt\") #create dictionary with words and freqencies\n print(freq_dict) #show dict on screen\n write_freq_dict_to_file(\"output_dict.csv\",freq_dict) #wirte dict as frequency table to output_dict.csv\n\n t = trie() #make empty trie\n t.fill_from_file(\"input.txt\") #fill trie with contents of input.txt\n t.write_to_file(\"output_trie.csv\")#write freqency table to output_trie.csv\n\ntest_freq_table()","repo_name":"gerritvanos/ALDS","sub_path":"week 3 - inleveren/ex 4.py","file_name":"ex 4.py","file_ext":"py","file_size_in_byte":3201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37322657437","text":"# -*- coding: utf-8 -*-\n'''\nAuthor: TJUZQC\nDate: 2021-04-22 14:20:33\nLastEditors: TJUZQC\nLastEditTime: 2021-04-25 18:50:42\nDescription: None\n'''\nfrom typing import Optional\n\nimport numpy as np\nimport paddle.nn.functional as F\nimport paddle\nfrom paddle import Tensor, nn\nfrom paddleseg.models.layers import Identity\nfrom paddleseg.utils.einops import rearrange, repeat\nfrom paddleseg.utils.einops.layers.paddle import Rearrange\n\n\nclass SepConv2d(nn.Layer):\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,):\n super(SepConv2d, self).__init__()\n self.depthwise = nn.Conv2D(in_channels,\n in_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=in_channels)\n self.bn = nn.BatchNorm2D(in_channels)\n self.pointwise = nn.Conv2D(in_channels, out_channels, kernel_size=1)\n\n def forward(self, x):\n x = self.depthwise(x)\n x = self.bn(x)\n x = self.pointwise(x)\n return x\n\nclass Residual(nn.Layer):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n def forward(self, x, **kwargs):\n return self.fn(x, **kwargs) + x\n\nclass PreNorm(nn.Layer):\n def __init__(self, axis, fn):\n super().__init__()\n self.norm = nn.LayerNorm(axis)\n self.fn = fn\n def forward(self, x, **kwargs):\n return self.fn(self.norm(x), **kwargs)\n\nclass FeedForward(nn.Layer):\n def __init__(self, axis, hidden_axis, dropout = 0.):\n super().__init__()\n self.net = nn.Sequential(\n nn.Linear(axis, hidden_axis),\n nn.GELU(),\n nn.Dropout(dropout),\n nn.Linear(hidden_axis, axis),\n nn.Dropout(dropout)\n )\n def forward(self, x):\n return self.net(x)\n\nclass ConvAttention(nn.Layer):\n def __init__(self, axis, img_size, heads = 8, axis_head = 64, kernel_size=3, q_stride=1, k_stride=1, v_stride=1, dropout = 0.,\n last_stage=False):\n\n super().__init__()\n self.last_stage = last_stage\n self.img_size = img_size\n inner_axis = axis_head * heads\n project_out = not (heads == 1 and axis_head == axis)\n\n self.heads = heads\n self.scale = axis_head ** -0.5\n pad = (kernel_size - q_stride)//2\n self.to_q = SepConv2d(axis, inner_axis, kernel_size, q_stride, pad)\n self.to_k = SepConv2d(axis, inner_axis, kernel_size, k_stride, pad)\n self.to_v = SepConv2d(axis, inner_axis, kernel_size, v_stride, pad)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_axis, axis),\n nn.Dropout(dropout)\n ) if project_out else Identity()\n\n def forward(self, x):\n b, n, _, h = *x.shape, self.heads\n if self.last_stage:\n cls_token = x[:, 0]\n x = x[:, 1:]\n cls_token = rearrange(cls_token.unsqueeze(1), 'b n (h d) -> b h n d', h = h)\n x = rearrange(x, 'b (l w) n -> b n l w', l=self.img_size, w=self.img_size)\n q = self.to_q(x)\n q = rearrange(q, 'b (h d) l w -> b h (l w) d', h=h)\n\n v = self.to_v(x)\n v = rearrange(v, 'b (h d) l w -> b h (l w) d', h=h)\n\n k = self.to_k(x)\n k = rearrange(k, 'b (h d) l w -> b h (l w) d', h=h)\n\n if self.last_stage:\n q = paddle.concat((cls_token, q), axis=2)\n v = paddle.concat((cls_token, v), axis=2)\n k = paddle.concat((cls_token, k), axis=2)\n\n # dots = np.einsum('b h i d, b h j d -> b h i j', q.numpy(), k.numpy()) * self.scale\n # dots = paddle.to_tensor(dots)\n\n dots = paddle.matmul(q, rearrange(k, \"b h j d -> b h d j\")) * self.scale\n\n attn = F.softmax(dots, axis=-1)\n\n # out = paddle.to_tensor(np.einsum('b h i j, b h j d -> b h i d', attn.numpy(), v.numpy()))\n out = paddle.matmul(attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)')\n out = self.to_out(out)\n return out\n \nclass Transformer(nn.Layer):\n def __init__(self, axis, img_size, depth, heads, axis_head, mlp_axis, dropout=0., last_stage=False):\n super().__init__()\n self.layers = nn.LayerList([])\n for _ in range(depth):\n self.layers.append(nn.LayerList([\n PreNorm(axis, ConvAttention(axis, img_size, heads=heads, axis_head=axis_head, dropout=dropout, last_stage=last_stage)),\n PreNorm(axis, FeedForward(axis, mlp_axis, dropout=dropout))\n ]))\n\n def forward(self, x):\n for attn, ff in self.layers:\n x = attn(x) + x\n x = ff(x) + x\n return x\n\nclass CvT(nn.Layer):\n def __init__(self, image_size, in_channels, num_classes, axis=64, kernels=[3, 3, 3], strides=[1, 1, 1],\n heads=[1, 3, 6] , depth = [1, 2, 10], pool='cls', dropout=0., emb_dropout=0., scale_axis=4):\n super().__init__()\n\n\n\n\n assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'\n self.pool = pool\n self.axis = axis\n\n ##### Stage 1 #######\n self.stage1_conv_embed = nn.Sequential(\n nn.Conv2D(in_channels, axis, kernels[0], strides[0], 1),\n nn.MaxPool2D(2),\n Rearrange('b c h w -> b (h w) c', h = image_size//2, w = image_size//2),\n nn.LayerNorm(axis)\n )\n self.stage1_transformer = nn.Sequential(\n Transformer(axis=axis, img_size=image_size//2,depth=depth[0], heads=heads[0], axis_head=self.axis,\n mlp_axis=axis * scale_axis, dropout=dropout),\n Rearrange('b (h w) c -> b c h w', h = image_size//2, w = image_size//2)\n )\n\n ##### Stage 2 #######\n in_channels = axis\n scale = heads[1]//heads[0]\n axis = scale*axis\n self.stage2_conv_embed = nn.Sequential(\n nn.Conv2D(in_channels, axis, kernels[1], strides[1], 1),\n nn.MaxPool2D(2),\n Rearrange('b c h w -> b (h w) c', h = image_size//4, w = image_size//4),\n nn.LayerNorm(axis)\n )\n self.stage2_transformer = nn.Sequential(\n Transformer(axis=axis, img_size=image_size//4, depth=depth[1], heads=heads[1], axis_head=self.axis,\n mlp_axis=axis * scale_axis, dropout=dropout),\n Rearrange('b (h w) c -> b c h w', h = image_size//4, w = image_size//4)\n )\n\n ##### Stage 3 #######\n in_channels = axis\n scale = heads[2] // heads[1]\n axis = scale * axis\n self.stage3_conv_embed = nn.Sequential(\n nn.Conv2D(in_channels, axis, kernels[2], strides[2], 1),\n Rearrange('b c h w -> b (h w) c', h = image_size//16, w = image_size//16),\n nn.LayerNorm(axis)\n )\n self.stage3_transformer = nn.Sequential(\n Transformer(axis=axis, img_size=image_size//16, depth=depth[2], heads=heads[2], axis_head=self.axis,\n mlp_axis=axis * scale_axis, dropout=dropout, last_stage=True),\n )\n\n\n self.cls_token = self.create_parameter([1, 1, axis])\n self.dropout_large = nn.Dropout(emb_dropout)\n\n\n self.mlp_head = nn.Sequential(\n nn.LayerNorm(axis),\n nn.Linear(axis, num_classes)\n )\n\n def forward(self, img):\n # img.shape = 1,3,256,256\n xs = self.stage1_conv_embed(img) # 1,3,256,256 -> 1,4096,64\n xs = self.stage1_transformer(xs) # 1,4096,64 -> 1,64,64,64\n\n xs = self.stage2_conv_embed(xs) # 1,64,64,64 -> 1,1024,192\n xs = self.stage2_transformer(xs) # 1,1024,192 -> 1,192,32,32\n\n xs = self.stage3_conv_embed(xs) # 1,192,32,32 -> 1,256,384\n b, _, _ = xs.shape\n cls_tokens = repeat(self.cls_token, '() n d -> b n d', b=b) # 1,256,384\n xs = paddle.concat((cls_tokens, xs), axis=1) # 1,256,384 -> 1,257,384\n xs = self.stage3_transformer(xs) # 1,257,384\n xs = xs.mean(axis=1) if self.pool == 'mean' else xs[:, 0] # 1,257,384 -> 1,384\n\n xs = self.mlp_head(xs) # 1,384 -> 1,2\n return xs","repo_name":"QinchengZhang/PaddleSeg_dygraph","sub_path":"paddleseg/models/layers/ConvTransformer.py","file_name":"ConvTransformer.py","file_ext":"py","file_size_in_byte":8464,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"7124206336","text":"from ring_buffer import ring_buffer\r\nclass Queue:\r\n __slots__ = \"q\"\r\n\r\n def __init__(self,c):\r\n self.q=ring_buffer(c)\r\n\r\n def __str__(self):\r\n return str(self.q)\r\n\r\n def enqueue(self,Val):\r\n \"\"\"\r\n this enqueue method of the Queue\r\n the function is to add the element into the Queue from the back\r\n \"\"\"\r\n self.q.insert_keep_new(Val)\r\n\r\n def dequeue(self):\r\n \"\"\"\r\n this dequeue method of the Queue\r\n the function is to remove the element from the front of a Queue\r\n \"\"\"\r\n self.q.remove_newest()\r\n\r\ndef test():\r\n hp=Queue(3)\r\n hp.enqueue(1)\r\n hp.enqueue(2)\r\n hp.enqueue(5)\r\n print(\"back is located at the rightmost\")\r\n print(\"the capacity I defined is 3\")\r\n print(hp)\r\n hp.dequeue()\r\n print(\"dequeue one element from the queue\")\r\n print(hp)\r\n hp.enqueue(7)\r\n hp.enqueue(8)\r\n print(\"enqueue 7 first and then enqueue 8 into the queue\")\r\n print(\"the result shown as followed\")\r\n print(hp)\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test()","repo_name":"zl9901/CSCI603-LAB6","sub_path":"queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74856690826","text":"\"\"\"empty message\n\nRevision ID: 560079982ed7\nRevises: 372e1e161657\nCreate Date: 2019-08-12 13:38:24.946622\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '560079982ed7'\ndown_revision = '372e1e161657'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('stocks', sa.Column('coach', sa.String(length=30), nullable=True))\n op.add_column('stocks', sa.Column('race', sa.String(length=30), nullable=True))\n op.create_index(op.f('ix_stocks_coach'), 'stocks', ['coach'], unique=False)\n op.create_index(op.f('ix_stocks_race'), 'stocks', ['race'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_stocks_race'), table_name='stocks')\n op.drop_index(op.f('ix_stocks_coach'), table_name='stocks')\n op.drop_column('stocks', 'race')\n op.drop_column('stocks', 'coach')\n # ### end Alembic commands ###\n","repo_name":"ttrnecka/rebbl-stock-market","sub_path":"migrations/versions/560079982ed7_.py","file_name":"560079982ed7_.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"8889117938","text":"import json\r\nimport queue\r\nfrom typing import Dict, Optional\r\nimport requests\r\nfrom jsonpath import jsonpath\r\nimport dateparser\r\nfrom urllib.parse import urlparse\r\nimport hashlib\r\nfrom datetime import datetime\r\nfrom lxml import etree\r\n\r\nqueue = queue.Queue()\r\n\r\n\r\ndef proxy_pool() -> list:\r\n \"\"\"\r\n 使用境外动态代理\r\n :return: 代理列表\r\n \"\"\"\r\n proxy_url = 'http://api.proxy.ipidea.io/getProxyIp?num=100&return_type=json&lb=6&sb=0&flow=1®ions=&protocol=http'\r\n # proxy_url = 'http://220.194.140.39:30022/getip?num=150&ip=&key=&source=zhima'\r\n proxy_con = requests.get(proxy_url).text\r\n proxy_json = json.loads(proxy_con)\r\n print(proxy_json)\n proxy_text = [(proxy[\"ip\"] + ':' + str(proxy[\"port\"])) for proxy in proxy_json[\"data\"]]\r\n return proxy_text\r\n\r\n\r\ndef queue_empty():\r\n while True:\r\n if queue.empty():\r\n proxy_list = proxy_pool()\r\n for i in proxy_list:\r\n queue.put(i)\r\n pro = queue.get()\r\n if pro:\r\n if \"http\" not in pro:\r\n pro = \"http://\" + pro\r\n break\r\n proxies = str(pro)\r\n return proxies\r\n\r\n\r\ndef json_path(obj: Dict, expr: str, default: Optional = '', is_list: bool = False):\r\n \"\"\"\r\n jsonpath解析\r\n :param obj: 解析对象\r\n :param expr: jsonpath\r\n :param default: 未获取到返回默认值, 默认空字符串\r\n :param is_list: 是否保留list, 默认不保留\r\n :return: 解析值或者defult\r\n \"\"\"\r\n value = jsonpath(obj, expr)\r\n if value:\r\n if not is_list:\r\n return value[0]\r\n else:\r\n return value\r\n else:\r\n return default\r\n\r\n\r\ndef item_main():\r\n \"\"\"\r\n 项目字段设置\r\n :return:\r\n \"\"\"\r\n item = {\r\n # 采集关键字\r\n \"module_name\": \"\",\r\n # fork的原项目ID\r\n \"fork_original_project\": \"\",\r\n # fork的根项目ID\r\n \"fork_root_project\": \"\",\r\n # 项目名\r\n \"project_name\": \"\",\r\n # 用户id\r\n \"user_id\": \"\",\r\n # 项目作者\r\n \"author\": \"\",\r\n # 创建时间\r\n \"create_time\": \"\",\r\n # 项目提交次数\r\n \"commit_count\": \"\",\r\n # 项目标签\r\n \"tags\": \"\",\r\n # 项目星数\r\n \"stars_count\": \"\",\r\n # 项目浏览数\r\n \"watch_count\": \"\",\r\n # 项目fork数\r\n \"forks_count\": \"\",\r\n # 项目贡献数\r\n \"contributors_count\": \"\",\r\n # 项目简介\r\n \"abstract\": \"\",\r\n # read me内容\r\n \"readme\": \"\",\r\n # 基本信息\r\n \"source_url\": \"\",\r\n # 接口url\r\n \"ref_url\": \"\",\r\n # github二月新增需求\r\n \"host_ip\": \"140.82.121.4\",\r\n \"AlexaInfo\": 36,\r\n \"host\": \"\",\r\n \"sub_host\": \"\",\r\n \"website_name\": \"\",\r\n \"website_sub_name\": \"\",\r\n \"uuid\": \"\",\r\n \"crawler_time\": \"\",\r\n \"insert_time\": \"\",\r\n \"update_time\": \"\",\r\n }\r\n return item\r\n\r\n\r\nheaders = {\r\n 'authority': 'api.github.com',\r\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\r\n 'accept-language': 'zh-CN,zh;q=0.9',\r\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36'\r\n}\r\n\r\n\r\ndef md5(s):\r\n \"\"\"\r\n md5加密字符串\r\n :param s:\r\n :return:\r\n \"\"\"\r\n if s:\r\n if isinstance(s, dict):\r\n s = json.dumps(s, ensure_ascii=False)\r\n m = hashlib.md5()\r\n m.update(s.encode())\r\n md5_str = m.hexdigest()\r\n return md5_str\r\n else:\r\n print(s)\r\n raise Exception(\"不能对空值进行哈希\")\r\n\r\n\r\ndef hsd():\r\n pro = queue_empty()\r\n response = requests.get('https://api.github.com/repositories/129007092', headers=headers, proxies={\r\n 'http': pro,\r\n 'https': pro\r\n })\r\n item = item_main().copy()\r\n # fork的原项目ID\r\n item[\"fork_original_project\"] = json_path(response.json(), '$.parent.full_name')\r\n # fork的根项目ID\r\n item[\"fork_root_project\"] = json_path(response.json(), '$.source.full_name')\r\n # 项目名\r\n item[\"project_name\"] = json_path(response.json(), '$.name')\r\n # 创建时间\r\n create_time = json_path(response.json(), '$.created_at')\r\n if create_time:\r\n item[\"create_time\"] = dateparser.parse(create_time).strftime(\"%Y-%m-%d %H:%M:%S\")\r\n else:\r\n print(f\"该文章无发布时间! | 接口url:{response.url}\")\r\n # 项目作者\r\n author = json_path(response.json(), '$.owner.login')\r\n item[\"author\"] = author\r\n item[\"user_id\"] = author\r\n # 项目标签\r\n tags = json_path(response.json(), '$.topics')\r\n item[\"tags\"] = \"#\".join(tags)\r\n # 项目星数\r\n stars_count = json_path(response.json(), '$.stargazers_count')\r\n item[\"stars_count\"] = str(stars_count)\r\n # 项目浏览数\r\n watch_count = json_path(response.json(), '$.subscribers_count')\r\n item[\"watch_count\"] = str(watch_count)\r\n # 项目fork数\r\n forks_count = json_path(response.json(), '$.forks_count')\r\n item[\"forks_count\"] = str(forks_count)\r\n # 项目简介\r\n item[\"abstract\"] = json_path(response.json(), '$.description')\r\n # 项目id\r\n project_id = json_path(response.json(), '$.id')\r\n # 项目url\r\n source_url = json_path(response.json(), '$.html_url')\r\n gets(source_url, item, project_id)\r\n\r\n\r\ndef gets(source_url, item, project_id):\r\n pro = queue_empty()\r\n response = requests.get(source_url, headers=headers, proxies={\r\n 'http': pro,\r\n 'https': pro\r\n })\r\n response = etree.HTML(response.content.decode())\r\n # 项目提交次数\r\n item[\"commit_count\"] = \"\".join(response.xpath(\"//span[@class='d-none d-sm-inline']/strong/text()\"))\r\n # 项目贡献数\r\n contributors_count = \"\".join(response.xpath(\"//a[contains(text(), 'Contributors')]/span/text()\"))\r\n item[\"contributors_count\"] = contributors_count\r\n # 项目url\r\n item[\"source_url\"] = response.url\r\n # 跳转url\r\n ref_url = f\"https://api.github.com/repositories/{project_id}\"\r\n item[\"ref_url\"] = ref_url\r\n # 用ref_url MD5\r\n item[\"uuid\"] = md5(ref_url)\r\n # read me内容\r\n item[\"readme\"] = response.xpath(\"//div[@data-target='readme-toc.content']\").xpath('string(.)').get(\"\")\r\n # item[\"readme_html\"] = response.xpath(json_path(config, \"$.item_info.readme\")).get(\"\")\r\n # 基本信息\r\n item[\"website_name\"] = 'GitHub'\r\n item[\"website_sub_name\"] = 'GitHub'\r\n item[\"host\"] = 'github.com'\r\n netloc = urlparse(response.url).netloc.replace('www.', '')\r\n if netloc:\r\n item[\"sub_host\"] = netloc\r\n else:\r\n item[\"sub_host\"] = item[\"host\"]\r\n item['crawler_time'] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n item['insert_time'] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n item['update_time'] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(item)\r\n\r\n\r\nif __name__ == '__main__':\r\n hsd()\r\n","repo_name":"liulijun-king/liu_news","sub_path":"GitHubAll/a1.py","file_name":"a1.py","file_ext":"py","file_size_in_byte":7070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11813610537","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport open3d as o3d\n\nply_header = '''ply\nformat ascii 1.0\nelement vertex %(vert_num)d\nproperty float x\nproperty float y\nproperty float z\nproperty uchar red\nproperty uchar green\nproperty uchar blue\nend_header\n'''\n\ndebug = False\n\ndef project_points(points,P):\n points = P @ points.T\n return points\n\ndef getParallaxMap(disparity):\n if(debug):\n print(disparity.shape)\n h,w = disparity.shape\n disparity_map = []\n for i in range(h):\n for j in range(w):\n disparity_map.append([j,i,disparity[i,j],1])\n return np.array(disparity_map)\n\ndef cal_disparity(image_left,image_right):\n window_size = 5\n min_disp = -39\n num_disp = 144\n stereo = cv2.StereoSGBM_create(minDisparity = min_disp,\n numDisparities = num_disp,\n disp12MaxDiff = 1,\n blockSize=5,\n P1=8 * 3 * window_size ** 2,\n P2=32 * 3 * window_size ** 2,\n uniquenessRatio = 10,\n speckleWindowSize = 100,\n speckleRange = 32,\n preFilterCap=63\n )\n disparity = stereo.compute(image_left, image_right).astype(np.float32) / 64.0\n disparity = (disparity-min_disp)/num_disp\n return disparity\n\ndef reprojectImageTo3d(disparity_map,Q):\n points = []\n for dis in disparity_map:\n point = Q.dot(dis)\n points.append(point)\n return np.array(points)\n\ndef write_ply(fn, verts, colors):\n verts = verts.reshape(-1, 3)\n colors = colors.reshape(-1, 3)\n verts = np.hstack([verts, colors])\n with open(fn, 'wb') as f:\n f.write((ply_header % dict(vert_num=len(verts))).encode('utf-8'))\n np.savetxt(f, verts, fmt='%f %f %f %d %d %d ')\n\nfolder_for_left_images = './mr19-assignment3-data/img2/00000004'\nfolder_for_right_images = './mr19-assignment3-data/img3/00000004'\nimage_extension = '.png'\nstart_number = 60\nnumber_of_images = 21\n\nground_truth_file = './mr19-assignment3-data/poses.txt'\nfile_ground_truth = open(ground_truth_file,'r')\n\nprojection_matrix = []\nrow_init = np.array([0,0,0,1])\nfor line in file_ground_truth:\n p = []\n x = line.split(' ')\n for i in x:\n p.append(float(i))\n p = np.array(p).reshape((3,4))\n p = np.vstack((p,row_init))\n projection_matrix.append(p)\n\nf = 7.070912e+02;\nb = 0.53790448812;\nK = np.array([[7.070912e+02, 0.000000e+00, 6.018873e+02],\n [0.000000e+00, 7.070912e+02, 1.831104e+02],\n [0.000000e+00, 0.000000e+00, 1.000000e+00]])\n\nfinal_points = []\nfinal_colors = []\npcds = []\nfor image_number in range(0,number_of_images,1):\n\n path_for_left_image = (folder_for_left_images +\n str(start_number + image_number) +\n image_extension)\n\n path_for_right_image = (folder_for_right_images +\n str(start_number + image_number) +\n image_extension)\n\n\n if(debug):\n print (path_for_left_image)\n print(\"Loading Images\")\n\n left_image_with_color = cv2.imread(path_for_left_image)\n left_image = cv2.imread(path_for_left_image)\n right_image = cv2.imread(path_for_right_image)\n\n if(debug):\n print(\"Computing Disparity\")\n\n disparity = cal_disparity(left_image,right_image)\n h,w = disparity.shape\n Q = np.float32([[ 1, 0, 0, -w/2],\n [ 0, -1, 0, h/2],\n [ 0, 0, 0, f],\n [ 0, 0, 1/b, 0]])\n\n if(debug):\n print(\"Computing PointCloud\")\n\n disparity_map = getParallaxMap(disparity)\n point_cloud = reprojectImageTo3d(disparity_map,Q)\n colors = cv2.cvtColor(left_image_with_color, cv2.COLOR_BGR2RGB)\n mask = disparity >= disparity.min()\n colors = colors[mask]\n colors = colors / 255\n\n if(debug):\n print(\"Reprojecting 3D points\")\n\n N,three = (point_cloud.shape)\n for i in range(N):\n new_point = project_points((point_cloud[i]),projection_matrix[image_number])\n if(new_point[3] > 0):\n new_point = new_point / new_point[3]\n final_points.append(new_point[0:3])\n final_colors.append(colors[i])\n\n\n print(\"Done : \",image_number+1, \"Images out of : \", number_of_images)\n\nfinal_points = np.array(final_points)\nfinal_colors = np.array(final_colors)\nmask = ((-1500 <= final_points[:,1]) & (final_points[:,1] < 1500) &\n (-1500 <= final_points[:,2]) & (final_points[:,2] < 1500) &\n (-1500 <= final_points[:,0]) & (final_points[:,0] < 1500))\n\n\nfinal_points = final_points[mask]\nprint(final_points[0])\nfinal_colors = final_colors[mask]\nfinal_points.T[0] *= -1\nprint(final_points[0])\n\npcd = o3d.geometry.PointCloud()\npcd.points = o3d.utility.Vector3dVector(final_points)\npcd.colors = o3d.utility.Vector3dVector(final_colors)\no3d.visualization.draw_geometries([pcd])\n\nfile_name = 'for_single_image.ply'\nwrite_ply(file_name,np.array(final_points),np.array(final_colors*255))\n","repo_name":"AnuragSahu/Stereo-Dense-Reconstruction","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":4902,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"3610729996","text":"import socket\r\nimport pickle\r\nfrom appsclone_client.utils.optionArgs import *\r\nfrom appsclone_server.clientHandler import *\r\nfrom appsclone_server.utils.logs import *\r\nfrom appsclone_server.constants import *\r\nfrom _thread import *\r\n\r\nclass APPSCloneServer:\r\n \"\"\"The APPSClone server for regular users, who wish to manually upload and retrieve to and from APPS.\r\n \r\n Attributes\r\n ----------\r\n MAX_TRIES : int\r\n The maximum tries a client can fail to connect\r\n NUMBER_BYTES_TO_RECEIVE : int\r\n The max number of bytes to receive\r\n \"\"\"\r\n # == Attributes ==\r\n MAX_TRIES = 5\r\n NUMBER_BYTES_TO_RECEIVE = 16384\r\n # == Methods ==\r\n def __init__(self,ip,port,conn,toUploadDir,appsIDQueue,idQueue,regularUsersIdQueue,resultsRegular):\r\n \"\"\"Server initialization.\r\n \r\n Parameters\r\n ----------\r\n ip : int\r\n The ip of the server\r\n port : int\r\n The port to open the server on\r\n conn : Connection_APPS\r\n A connection to APPS object\r\n toUploadDir : str\r\n The directory to upload to\r\n appsIDQueue : str\r\n The queue of APPS ids\r\n idQueue : str\r\n The queue of APPSClone ids\r\n regularUsersIdQueue : str\r\n The regular users id queue\r\n resultsRegular : str\r\n The regular results directory\r\n \"\"\"\r\n self.ip = ip\r\n self.port = int(port)\r\n self.toUploadDir = toUploadDir\r\n self.appsIDQueue = appsIDQueue\r\n self.idQueue = idQueue\r\n self.regularUsersIdQueue = regularUsersIdQueue\r\n self.resultsRegular = resultsRegular\r\n self.clientHandler = ClientHandler(conn,toUploadDir,appsIDQueue,idQueue,regularUsersIdQueue,resultsRegular)\r\n\r\n def runServer(self):\r\n \"\"\"Run the server.\"\"\"\r\n with socket.socket(socket.AF_INET,socket.SOCK_STREAM) as s:\r\n s.bind((self.ip,self.port))\r\n s.listen(APPSCloneServer.MAX_TRIES)\r\n while True:\r\n client,clientAddress = s.accept()\r\n start_new_thread(self.clientThread,(client,))\r\n\r\n def clientThread(self,client):\r\n \"\"\"Thread to handle the clients' operations.\r\n\r\n Parameters\r\n ----------\r\n client : socketObject\r\n The client to handle\r\n \"\"\"\r\n while True: \r\n try:\r\n #receive data from client\r\n opt_args = pickle.loads(client.recv(APPSCloneServer.NUMBER_BYTES_TO_RECEIVE))\r\n #process client data\r\n response = self.clientHandler.process(opt_args.option,opt_args.args)\r\n #send data back to client\r\n client.send(pickle.dumps(response))\r\n except (ConnectionResetError,EOFError): #handle client disconnection gracefully\r\n pass","repo_name":"DuarteArribas/APPSClone","sub_path":"APPSClone_Server/appsclone_server/appsCloneServer.py","file_name":"appsCloneServer.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74803686986","text":"import hashlib\nimport http.client\nimport json\nimport os\nimport random\nimport re\nimport shutil\nimport time\nimport urllib.parse\n\n####################\n\"\"\"\n\n\n缺陷:\n 目前只支持百度\n 翻译时,有时翻译会把[1]标记吞掉,导致部分 不翻译字符串 {}|[] 丢失\n 目前还未匹配 %abc 类标记\n\n\"\"\"\n\n#####################\n# 你的appid\nappid = input('请输入你的百度appid:')\n# 你的密钥\nsecretKey = input('请输入你的百度secretKey:')\nfromLang = input('请输入源语言(如en):') # 源语言\ntoLang = input('请输入目标语言(如zh)')\n\n\ndef baidu_translation(content):\n httpClient = None\n myurl = '/api/trans/vip/translate'\n q = content\n\n salt = random.randint(32768, 65536)\n sign = appid + q + str(salt) + secretKey\n sign = hashlib.md5(sign.encode()).hexdigest()\n myurl = myurl + '?appid=' + appid + '&q=' + urllib.parse.quote(\n q) + '&from=' + fromLang + '&to=' + toLang + '&salt=' + str(salt) + '&sign=' + sign + \"&action=1\"\n\n try:\n httpClient = http.client.HTTPConnection('api.fanyi.baidu.com')\n httpClient.request('GET', myurl)\n response = httpClient.getresponse() # response是HTTPResponse对象\n jsonResponse = response.read().decode(\"utf-8\") # 获得返回的结果,结果为json格式\n js = json.loads(jsonResponse) # 将json格式的结果转换字典结构\n text = \"\"\n dict1 = {}\n for i in js[\"trans_result\"]:\n dict1[i[\"src\"]] = i[\"dst\"]\n return (dict1) # 打印结果\n except Exception as e:\n print(e)\n\n\ndef appendtxt(path1):\n dict1 = {}\n with open(path1, \"r\", encoding=\"utf8\") as f:\n txtlist = f.readlines()\n for i in range(len(txtlist) - 1):\n if re.findall('\"\"', txtlist[i + 1]): # 查找第二行是否有 \"\"\n list1 = re.findall('#.*\"(.*?)\".*\\n|old.*\"(.*?)\".*\\n', txtlist[i]) # 有,则提取出\"\"中文本\n if list1 != []:\n dict1[i] = (list1[0][0] + list1[0][1]).strip(\" \") + \"\\n\\n\" # 放入字典\n\n with open(os.path.dirname(__file__) + \"\\翻译.json\", \"w\", encoding=\"utf8\") as f:\n f.write(json.dumps(dict1, ensure_ascii=False, indent=4)) # 写入json\n\n\ndef tran(path1):\n try:\n os.mkdir(os.path.split(path1)[0] + \"\\\\new\\\\\") # 在翻译文本目录创建new文件夹\n except:\n pass\n path2 = os.path.split(path1)[0] + \"\\\\new\\\\\" + os.path.split(path1)[1]\n print(path2)\n\n if not os.path.isfile(path2):\n shutil.copyfile(path1, path2) # 复制文件到new文件夹\n\n appendtxt(path2)\n with open(os.path.dirname(__file__) + \"\\翻译.json\", \"r\", encoding=\"utf8\") as f:\n data = json.loads(f.read()) # 读取json字典\n\n key = list(data.keys())\n value = list(data.values())\n text = \"\"\n keylist = []\n i = 0\n num = 0\n dict1 = {}\n\n for i in range(len(data)):\n\n keylist.append(key[i]) # 添加当前翻译字符串列表\n text = text + value[i] # 字符串拼接\n # print(i)\n if len(text) > 3000: # 字符串长度大于3000\n\n time.sleep(1)\n text = text.replace(value[i], \"\", 1) # 去除最后一个字符串\n del keylist[-1] # 当前翻译字符串列表去除最后一个字符串\n while True:\n\n notran1 = re.findall(\"\\[.*?]|{.*?}\", text) # 查找[] | {}不翻译字符串\n for itx in notran1:\n text = text.replace(itx, \"[\" + str(num) + \"]\", 1) # 如[name]替换为[1]\n dict1[\"[\" + str(num) + \"]\"] = itx # 写入字典\"[1]\":\"[name]\"\n num = num + 1 # 编号加1\n\n trandict = baidu_translation(text) # 调用百度api翻译,返回dict,键值对为oldtxt:trantxt,或者自己写接口\n\n trandictkey = list(trandict.keys()) # 获取oldtxt\n for k in trandictkey:\n\n if \"[\" in k: # 判断是否有[]\n for ixxxx in re.findall(\"\\[.*?]\", k): # 查找[]\n trandict[k] = trandict[k].replace(ixxxx, dict1[ixxxx],\n 1) # 通过[num]在字典中找到对应的不翻译字符串,替换的是value\n k0 = k\n for ixxxx in re.findall(\"\\[.*?]\", k):\n k = k.replace(ixxxx, dict1[ixxxx], 1) # 通过[num]在字典中找到对应的不翻译字符串,替换的是key\n\n trandict[k] = trandict.pop(k0) # 替换key,并删除就键值对\n\n if trandict: # 如果百度api返回结果,为真,结束翻译;为假,再次翻译\n time.sleep(1)\n break\n\n with open(path2, \"r\", encoding=\"utf8\") as f:\n txtlists = f.readlines() # new文件夹下文件,行读取\n\n for ix in range(len(keylist)):\n print((keylist[ix]))\n print(data[str(keylist[ix])])\n print(trandict[data[str(keylist[ix])].split(\"\\n\\n\")[0].strip(\" \")])\n # 把翻译的到文本填入“”中\n txtlists[int(keylist[ix]) + 1] = txtlists[int(keylist[ix]) + 1].replace('\"\"', '\"' + trandict[\n data[str(keylist[ix])].split(\"\\n\\n\")[0].strip(\" \")] + '\"')\n\n with open(path2, \"w\", encoding=\"utf8\") as f:\n f.writelines(txtlists) # 写入文件\n\n keylist = [] # 清空列表\n\n text = value[i] # 添加上次未翻译文本\n keylist.append(int(key[i])) # 添加上次未翻译文本\n\n #######################\n if text: # 总文本小于3000,即收尾,原理相同\n while True:\n\n notran1 = re.findall(\"\\[.*?]|{.*?}\", text)\n for itx in notran1:\n text = text.replace(itx, \"[\" + str(num) + \"]\", 1)\n dict1[\"[\" + str(num) + \"]\"] = itx\n num = num + 1\n\n trandict = baidu_translation(text)\n trandictkey = list(trandict.keys())\n for k in trandictkey:\n\n if \"[\" in k:\n\n for ixxxx in re.findall(\"\\[.*?]\", k):\n trandict[k] = trandict[k].replace(ixxxx, dict1[ixxxx], 1)\n k0 = k\n for ixxxx in re.findall(\"\\[.*?]\", k):\n k = k.replace(ixxxx, dict1[ixxxx], 1)\n\n trandict[k] = trandict.pop(k0)\n\n if trandict:\n time.sleep(1)\n break\n\n with open(path2, \"r\", encoding=\"utf8\") as f:\n txtlists = f.readlines()\n\n for ix in range(len(keylist)):\n print(\"############################\") #####之间可以x掉\n print((keylist[ix]))\n print(data[str(keylist[ix])])\n print(trandict[data[str(keylist[ix])].split(\"\\n\\n\")[0].strip(\" \")])\n print(\"############################\") ####之间可以x掉\n\n txtlists[int(keylist[ix]) + 1] = txtlists[int(keylist[ix]) + 1].replace('\"\"', '\"' + trandict[\n data[str(keylist[ix])].split(\"\\n\\n\")[0].strip(\" \")] + '\"')\n\n with open(path2, \"w\", encoding=\"utf8\") as f:\n f.writelines(txtlists)\n\n\ndef get_rpypath(txtpath):\n rpy_pathlist = []\n for parent, dirnames, filenames in os.walk(txtpath):\n for filename in filenames:\n\n if '.rpy' == os.path.splitext(filename)[1]:\n # print(os.path.join(parent, filename)\n if \"new\" in parent:\n continue\n rpy_pathlist.append(os.path.join(parent, filename))\n return rpy_pathlist\n\n\nif __name__ == \"__main__\":\n path0 = input(\"请输入文件路径:(如D:\\game\\tl\\chinese目录下)\")\n pathlist = get_rpypath(path0)\n for i in pathlist:\n tran(i)\n","repo_name":"Dclef/renpy-tl","sub_path":"tran.py","file_name":"tran.py","file_ext":"py","file_size_in_byte":7864,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"81"} +{"seq_id":"25301918911","text":"import math\r\nimport time\r\nimport tkinter as tk\r\nfrom tkinter import messagebox\r\nimport v2vml.configuration as conf\r\nimport v2vml.globals as g\r\nfrom v2vml.node import Node\r\n\r\n# in order to make a new checkbutton:\r\n# - __init__(): bool for feature\r\n# - __init__(): add bool for feature to list\r\n# - __init__(): create checkbutton for feature\r\n# - init_buttons: create the checkbutton\r\n# - init_buttons(): set variable for the button\r\n# - init_buttons(): set function to call when clicking the buttons\r\n# - init_buttons(): add checkbutton to list of checkbuttons\r\n\r\n\r\nclass Visualizer:\r\n\r\n COLOR_BACKGROUND = '#dedddc'\r\n\r\n # node colors\r\n COLOR_NODE_GOOD = '#00a32e'\r\n COLOR_NODE_FAULTY = '#c98104'\r\n COLOR_NODE_MALICIOUS = '#7400b3'\r\n\r\n # sensor radius colors\r\n COLOR_OUTER_RADIUS = '#bbbfbd'\r\n COLOR_INNER_RADIUS = '#8e9490'\r\n\r\n # BSM colors\r\n COLOR_BSM_GOOD = '#54de7d'\r\n COLOR_BSM_FAULTY = '#e6ac49'\r\n COLOR_BSM_MALICIOUS = '#c27aeb'\r\n\r\n # line colors\r\n COLOR_LINE_NORMAL = '#36c4cf'\r\n\r\n def __init__(self, root, canvas, sim):\r\n self.root = root\r\n self.canvas = canvas\r\n self.sim = sim\r\n self.init_display = True\r\n\r\n # enable/disable features\r\n self.features = []\r\n\r\n self.auto = tk.BooleanVar(value=True)\r\n self.on_off = tk.BooleanVar(value=conf.SHOW_DEFAULT)\r\n self.show_node_type = tk.BooleanVar(value=conf.SHOW_DEFAULT)\r\n self.show_node_hist = tk.BooleanVar(value=conf.SHOW_DEFAULT)\r\n self.show_outer_radius = tk.BooleanVar(value=conf.SHOW_DEFAULT)\r\n self.show_inner_radius = tk.BooleanVar(value=conf.SHOW_DEFAULT)\r\n self.show_inner_connections = tk.BooleanVar(value=conf.SHOW_DEFAULT)\r\n self.show_outer_connections = tk.BooleanVar(value=conf.SHOW_DEFAULT)\r\n self.show_bsm_hist = tk.BooleanVar(value=conf.SHOW_DEFAULT)\r\n\r\n self.features.append(self.auto)\r\n self.features.append(self.on_off)\r\n self.features.append(self.show_node_type)\r\n self.features.append(self.show_node_hist)\r\n self.features.append(self.show_outer_radius)\r\n self.features.append(self.show_inner_radius)\r\n self.features.append(self.show_inner_connections)\r\n self.features.append(self.show_outer_connections)\r\n self.features.append(self.show_bsm_hist)\r\n\r\n # check buttons\r\n self.cb = []\r\n self.cb_auto = None\r\n self.cb_on_off = None\r\n self.cb_show_node_type = None\r\n self.cb_show_node_hist = None\r\n self.cb_show_outer_radius = None\r\n self.cb_show_inner_radius = None\r\n self.cb_show_inner_connections = None\r\n self.cb_show_outer_connections = None\r\n self.cb_show_bsm_hist = None\r\n self.cb_show_node_hist = None\r\n self.init_buttons()\r\n\r\n self.bind_mouse()\r\n\r\n # show the initial screen\r\n self.draw()\r\n\r\n # puts the simulation in motion\r\n def start(self):\r\n\r\n while self.auto.get():\r\n time.sleep(conf.SIM_EPOCH_WAIT_TIME)\r\n self.draw()\r\n self.root.update()\r\n\r\n self.root.mainloop()\r\n\r\n def bind_mouse(self):\r\n\r\n # pressing the left mouse button will cause the screen to update\r\n self.canvas.bind(\"\", self.mouse_click)\r\n\r\n # pressing the right mouse button on a node will print information about it\r\n self.canvas.bind(\"\", self.mouse_click)\r\n\r\n # called by main when the simulation is in SIM_MANUAL\r\n def mouse_click(self, event):\r\n\r\n # only process mouse events if not in auto mode\r\n if self.auto.get():\r\n return\r\n\r\n # right mouse button\r\n if event.num == 1:\r\n\r\n # advance to next epoch\r\n if 0 < event.x < conf.CANVAS_WIDTH and 0 < event.y < conf.CANVAS_HEIGHT:\r\n self.draw()\r\n # button pressed\r\n else:\r\n pass\r\n\r\n # left mouse button\r\n else:\r\n for n in self.sim.nodes:\r\n if n.x-g.RADIUS_NODE < event.x < n.x+g.RADIUS_NODE and n.y-g.RADIUS_NODE < event.y < n.y+g.RADIUS_NODE:\r\n print(n)\r\n messagebox.showinfo('Node {} Details'.format(n.id), str(n))\r\n\r\n # called by main when the simulation is in SIM_AUTO\r\n def draw(self, advance_epoch=True):\r\n\r\n # clear the canvas\r\n self.canvas.delete(\"all\")\r\n\r\n # make changes for the next epoch\r\n if self.init_display:\r\n self.init_display = False\r\n else:\r\n if advance_epoch:\r\n self.sim.next_epoch()\r\n\r\n # solid color background\r\n self.canvas.create_rectangle(0, 0, conf.CANVAS_WIDTH, conf.CANVAS_HEIGHT, fill=Visualizer.COLOR_BACKGROUND)\r\n\r\n # draw to the canvas\r\n self.draw_nodes()\r\n\r\n # throw the stuff we drew onto the canvas\r\n self.canvas.pack()\r\n\r\n self.root.title('v2vml: Attack Detection - Epoch ' + str(self.sim.epoch))\r\n\r\n def draw_nodes(self):\r\n\r\n # inner connections\r\n if self.show_inner_connections.get():\r\n self.draw_inner_connections()\r\n\r\n # outer connections\r\n if self.show_outer_connections.get():\r\n self.draw_outer_connections()\r\n\r\n # node history\r\n if self.show_node_hist.get():\r\n self.draw_node_history()\r\n\r\n # sensors outer radius\r\n if self.show_outer_radius.get():\r\n self.draw_sensor_outer_radius()\r\n\r\n # sensors inner radius\r\n if self.show_inner_radius.get():\r\n self.draw_sensor_inner_radius()\r\n\r\n # the nodes themselves\r\n self.draw_node_center()\r\n\r\n # coord reported by BSMs\r\n if self.show_bsm_hist.get():\r\n self.draw_bsm_history()\r\n\r\n def draw_inner_connections(self):\r\n\r\n for nt in self.sim.inner_neighbor_tuples:\r\n self.canvas.create_line(self.sim.hm_nodes[nt[0]].x, self.sim.hm_nodes[nt[0]].y,\r\n self.sim.hm_nodes[nt[1]].x, self.sim.hm_nodes[nt[1]].y,\r\n fill=Visualizer.COLOR_LINE_NORMAL, width=4)\r\n\r\n def draw_outer_connections(self):\r\n\r\n for nt in self.sim.outer_neighbor_tuples:\r\n self.canvas.create_line(self.sim.hm_nodes[nt[0]].x, self.sim.hm_nodes[nt[0]].y,\r\n self.sim.hm_nodes[nt[1]].x, self.sim.hm_nodes[nt[1]].y,\r\n fill=Visualizer.COLOR_LINE_NORMAL, width=1)\r\n\r\n def draw_node_history(self):\r\n\r\n for n in self.sim.nodes:\r\n\r\n color = ''\r\n if not self.show_node_type.get():\r\n color = 'black'\r\n elif Node.GOOD == n.type:\r\n color = Visualizer.COLOR_NODE_GOOD\r\n elif Node.FAULTY == n.type:\r\n color = Visualizer.COLOR_NODE_FAULTY\r\n else:\r\n color = Visualizer.COLOR_NODE_MALICIOUS\r\n\r\n for i in range(len(n.past_coord)):\r\n\r\n if 0 == i:\r\n pass\r\n else:\r\n\r\n # coord\r\n self.canvas.create_oval(n.past_coord[i][0]-g.RADIUS_PAST_COORD, n.past_coord[i][1]-g.RADIUS_PAST_COORD,\r\n n.past_coord[i][0]+g.RADIUS_PAST_COORD, n.past_coord[i][1]+g.RADIUS_PAST_COORD,\r\n fill=color, outline='')\r\n\r\n # line\r\n self.canvas.create_line(n.past_coord[i][0], n.past_coord[i][1],\r\n n.past_coord[i-1][0], n.past_coord[i-1][1],\r\n fill=color, width=2)\r\n # line\r\n if len(n.past_coord) > 0:\r\n self.canvas.create_line(n.x, n.y,\r\n n.past_coord[len(n.past_coord)-1][0], n.past_coord[len(n.past_coord)-1][1],\r\n fill=color, width=2)\r\n\r\n def draw_bsm_history(self):\r\n\r\n for n in self.sim.nodes:\r\n\r\n color = ''\r\n if not self.show_node_type.get():\r\n color = 'black'\r\n elif Node.GOOD == n.type:\r\n color = Visualizer.COLOR_BSM_GOOD\r\n elif Node.FAULTY == n.type:\r\n color = Visualizer.COLOR_BSM_FAULTY\r\n else:\r\n color = Visualizer.COLOR_BSM_MALICIOUS\r\n\r\n for i in range(len(n.past_bsm_coord)):\r\n\r\n if 0 == i:\r\n pass\r\n else:\r\n\r\n # coord\r\n self.canvas.create_oval(n.past_bsm_coord[i][0] - g.RADIUS_BSM_COORD,\r\n n.past_bsm_coord[i][1] - g.RADIUS_BSM_COORD,\r\n n.past_bsm_coord[i][0] + g.RADIUS_BSM_COORD,\r\n n.past_bsm_coord[i][1] + g.RADIUS_BSM_COORD,\r\n fill=color, outline='')\r\n\r\n # line\r\n self.canvas.create_line(n.past_bsm_coord[i][0], n.past_bsm_coord[i][1],\r\n n.past_bsm_coord[i - 1][0], n.past_bsm_coord[i - 1][1],\r\n fill=color)\r\n\r\n def draw_sensor_outer_radius(self):\r\n # draw the sensor radius\r\n for n in self.sim.nodes:\r\n self.canvas.create_oval(n.x - g.RADIUS_SENSOR, n.y - g.RADIUS_SENSOR,\r\n n.x + g.RADIUS_SENSOR, n.y + g.RADIUS_SENSOR,\r\n fill='', outline=Visualizer.COLOR_OUTER_RADIUS)\r\n\r\n def draw_sensor_inner_radius(self):\r\n # draw the sensor radius\r\n for n in self.sim.nodes:\r\n self.canvas.create_oval(n.x - g.RADIUS_SENSOR//2, n.y - g.RADIUS_SENSOR//2,\r\n n.x + g.RADIUS_SENSOR//2, n.y + g.RADIUS_SENSOR//2,\r\n fill='', outline=Visualizer.COLOR_INNER_RADIUS)\r\n\r\n def draw_node_center(self):\r\n\r\n for n in self.sim.nodes:\r\n color = ''\r\n if not self.show_node_type.get():\r\n color = 'black'\r\n elif Node.GOOD == n.type:\r\n color = Visualizer.COLOR_NODE_GOOD\r\n elif Node.FAULTY == n.type:\r\n color = Visualizer.COLOR_NODE_FAULTY\r\n else:\r\n color = Visualizer.COLOR_NODE_MALICIOUS\r\n\r\n self.canvas.create_oval(n.x-g.RADIUS_NODE, n.y-g.RADIUS_NODE,\r\n n.x+g.RADIUS_NODE, n.y+g.RADIUS_NODE,\r\n fill=color, outline='')\r\n\r\n ################################################\r\n\r\n # These functions are called when clicking on the checkbuttons\r\n\r\n def draw_no_epoch(self):\r\n self.draw(advance_epoch=False)\r\n\r\n def toggle_on_off(self):\r\n\r\n # the first check button is the auto button\r\n # the second button is the on/off check button\r\n\r\n val = self.on_off.get()\r\n\r\n for f in self.cb[2::]:\r\n\r\n if val:\r\n f.select()\r\n else:\r\n f.deselect()\r\n\r\n self.draw_no_epoch()\r\n\r\n ################################################\r\n\r\n def init_buttons(self):\r\n\r\n # create check buttons\r\n self.cb_auto = tk.Checkbutton(self.root, text=\"Auto\", variable=self.auto,\r\n indicatoron=True, onvalue=True, offvalue=False,\r\n command=self.start, width=8)\r\n\r\n self.cb_on_off = tk.Checkbutton(self.root, text=\"On/Off\", variable=self.on_off,\r\n indicatoron=True, onvalue=True, offvalue=False,\r\n command=self.toggle_on_off, width=8)\r\n\r\n self.cb_show_node_type = tk.Checkbutton(self.root, text=\"Node Type\", variable=self.show_node_type,\r\n indicatoron=True, onvalue=True, offvalue=False,\r\n command=self.draw_no_epoch, width=8)\r\n\r\n self.cb_show_node_hist = tk.Checkbutton(self.root, text=\"Node History\", variable=self.show_node_hist,\r\n indicatoron=True, onvalue=True, offvalue=False,\r\n command=self.draw_no_epoch, width=8)\r\n\r\n self.cb_show_outer_radius = tk.Checkbutton(self.root, text=\"Outer Radius\", variable=self.show_outer_radius,\r\n indicatoron=True, onvalue=True, offvalue=False,\r\n command=self.draw_no_epoch, width=8)\r\n\r\n self.cb_show_inner_radius = tk.Checkbutton(self.root, text=\"Inner Radius\", variable=self.show_inner_radius,\r\n indicatoron=True, onvalue=True, offvalue=False,\r\n command=self.draw_no_epoch, width=8)\r\n\r\n self.cb_show_outer_connections = tk.Checkbutton(self.root, text=\"Outer Connect\",\r\n variable=self.show_outer_connections, indicatoron=True,\r\n onvalue=True, offvalue=False,\r\n command=self.draw_no_epoch, width=8)\r\n\r\n self.cb_show_inner_connections = tk.Checkbutton(self.root, text=\"Inner Connect\",\r\n variable=self.show_inner_connections, indicatoron=True,\r\n onvalue=True, offvalue=False,\r\n command=self.draw_no_epoch, width=8)\r\n\r\n self.cb_show_bsm_hist = tk.Checkbutton(self.root, text=\"BSM History\", variable=self.show_bsm_hist,\r\n indicatoron=True, onvalue=True, offvalue=False,\r\n command=self.draw_no_epoch, width=8)\r\n\r\n # add check buttons to list\r\n self.cb.append(self.cb_auto)\r\n self.cb.append(self.cb_on_off)\r\n self.cb.append(self.cb_show_node_type)\r\n self.cb.append(self.cb_show_node_hist)\r\n self.cb.append(self.cb_show_outer_radius)\r\n self.cb.append(self.cb_show_inner_radius)\r\n self.cb.append(self.cb_show_outer_connections)\r\n self.cb.append(self.cb_show_inner_connections)\r\n self.cb.append(self.cb_show_bsm_hist)\r\n\r\n # place check buttons on the screen\r\n side_pad = 0\r\n top_pad = 5\r\n\r\n columns_per_row = math.ceil(len(self.cb)/conf.TOOLBAR_NUM_LEVELS)\r\n fair_share = conf.CANVAS_WIDTH // columns_per_row\r\n\r\n level = 0\r\n for i in range(len(self.cb)):\r\n\r\n if i != 0 and i % columns_per_row == 0:\r\n print(i)\r\n level += 1\r\n\r\n self.cb[i].place(x=(fair_share * (i % columns_per_row)) + side_pad,\r\n y=conf.CANVAS_HEIGHT + conf.TOOLBAR_LEVEL_HEIGHT * level + top_pad,\r\n width=fair_share - side_pad)\r\n","repo_name":"CoderTypist/V2V-ML","sub_path":"v2vml/visualizer/visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":15213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31889543386","text":"from abc import ABC, abstractmethod\nimport asyncio\nimport json\nimport time\n\nfrom aiohttp import web, ClientSession, ClientConnectionError\n\nfrom logs import logging\nfrom settings import DEBUG_MODE_ON_COMMANDS as ON_COMMANDS, RATES_BASE_URL, SERVER_PORT, INFORM_PERIODICITY\n\n\nclass AbstactBankAccont(ABC):\n\n @abstractmethod\n def print_data(self):\n return None\n\n def __init__(self, args):\n self.is_debug_mode = self.debug_mode_evaluate(args)\n self.rate_update_periodicity = args[\"period\"]\n self.database = self.database_creation(args)\n self.foreign_currency_names = self.get_foreign_currency()\n self.message_queue = asyncio.Queue()\n\n # init part\n def debug_mode_evaluate(self, command_text):\n \"\"\"This method checks if debug_mode was activated in console (not None) and whether it is turning it on,\n if command text is in on_command (\"1\", \"true\", \"True\", \"y\", \"Y\")\n params: command_text: dict\n :return bool\n \"\"\"\n self.is_debug_mode = True if command_text[\"debug_mode\"] is not None and \\\n command_text[\"debug_mode\"] in ON_COMMANDS \\\n else False\n return self.is_debug_mode\n\n def database_creation(self, args):\n \"\"\"This method extracts currency' names given in launch-params\n and fills database dict with it's start value. If\n :param args: dict\n :return: dict {currency_name: {\"balance\": int)\n \"\"\"\n self.database = {}\n for currency in args:\n if currency not in (\"period\", \"debug_mode\") and args[currency] is not None:\n self.database[currency] = {\"balance\": int(args[currency])}\n return self.database\n\n def get_foreign_currency(self):\n \"\"\"This method extract foreign currencies from given in command-line\n :return tuple\n \"\"\"\n foreign_currency_tuple = tuple(item for item in self.database if item != \"rub\")\n return foreign_currency_tuple\n\n # business_logic part\n async def get_currency_rates(self, data):\n \"\"\"This method selects rates for particular currencies and updates data in DB.\n It also detects if rate has been changed and if so put message type in self.message_queue\n :param data: dict with all currency from responce\n :return: update database's dict\n \"\"\"\n is_rate_changed = False\n message_type = 1\n for currency in self.foreign_currency_names:\n try:\n if \"rate\" in self.database[currency] and \\\n self.database[currency][\"rate\"] != float(data[currency.upper()][\"Value\"]):\n is_rate_changed = True\n self.database[currency][\"rate\"] = float(data[currency.upper()][\"Value\"])\n except (KeyError, TypeError):\n logging.ERROR(\"ключ не найден\")\n if is_rate_changed:\n await self.message_queue.put(message_type)\n return self.database\n\n async def get_total_amount_message(self):\n \"\"\"This method calculates total amount of money in all used currencies and creates a message text\"\"\"\n rub_total = self.database[\"rub\"][\"balance\"]\n for currency in self.foreign_currency_names:\n rub_total += self.database[currency][\"rate\"] * self.database[currency][\"balance\"]\n message = f\"\\n sum {rub_total:.2f} rub /\"\n for currency in self.foreign_currency_names:\n message += f\" {rub_total / self.database[currency]['rate']:.2f} {currency} /\"\n return message\n\n async def create_balance_message(self):\n \"\"\"This method creates message about current balance\"\"\"\n message = \"\"\n for currency in self.database:\n message += f\"{currency}: {self.database[currency]['balance']} \\n\"\n return message\n\n async def get_currency_ratio(self, currency_1, currency_2):\n \"\"\"This method calculates a foreign currency ratio between each other (usd-eur, etc)\n and create a message\"\"\"\n rate_1 = self.database[currency_1][\"rate\"]\n rate_2 = self.database[currency_2][\"rate\"]\n is_rate_1_max = True if rate_1 > rate_2 else False\n ratio = rate_1 / rate_2 if is_rate_1_max else rate_2 / rate_1\n if is_rate_1_max:\n message = f\"\\n {currency_2}-{currency_1}: {ratio:.2f} \\n\"\n else:\n message = f\"\\n{currency_1}-{currency_2}: {ratio:.2f} \\n\"\n return message\n\n async def create_rate_message(self):\n \"\"\"This method creates a messages about current rates \"\"\"\n currency_1, currency_2 = self.foreign_currency_names\n message = await self.get_currency_ratio(currency_1, currency_2)\n for currency in self.foreign_currency_names:\n message += f\"rub-{currency}: {self.database[currency]['rate']:.1f} \\n\"\n return message\n\n async def create_message(self):\n \"\"\"This method combines final message for total amount handler\"\"\"\n balance_message = await self.create_balance_message()\n total_amount_message = await self.get_total_amount_message()\n ratio_message = await self.create_rate_message()\n total_message = balance_message + ratio_message + total_amount_message\n return total_message\n\n # views\n async def currency_balance_handler(self, request):\n \"\"\"This method handles curency balance request\n Currency name is taken from response\"\"\"\n try:\n currency = request.match_info[\"currency_name\"]\n print(currency)\n current_balance = self.database[currency][\"balance\"]\n message = f\"{currency}: {current_balance}\"\n except KeyError:\n message = f\"{currency} не используется в системе\"\n if self.is_debug_mode:\n print(request)\n return web.Response(text=message, content_type=\"text/plain\")\n\n async def total_amount_handler(self, request):\n \"\"\"This method informs about:\n - total amount of money in rub and each foreign currency, calculated on current\n rate,\n - current balance and rate of every currency\"\"\"\n message = await self.create_message()\n if not self.is_debug_mode:\n print(message)\n else:\n print(request)\n return web.Response(text=message, content_type=\"text/plain\")\n\n async def set_amount_handler(self, request):\n \"\"\"This method handles set request\"\"\"\n message_type = 2\n is_balance_has_changed = False\n request_body = await request.json()\n try:\n for currency in request_body:\n if self.database[currency][\"balance\"] != float(request_body[currency]):\n is_balance_has_changed = True\n self.database[currency][\"balance\"] = float(request_body[currency])\n if is_balance_has_changed:\n await self.message_queue.put(message_type)\n message = \"Данные успешно обновлены\"\n except KeyError:\n message = f\"{currency} не поддерживается системой\"\n if self.is_debug_mode:\n print(request)\n return web.Response(text=message, content_type=\"text/plain\")\n\n async def modify_handler(self, request):\n \"\"\"This method handles modify request\"\"\"\n message_type = 3\n try:\n request_body = await request.json()\n for currency in request_body:\n self.database[currency][\"balance\"] += float(request_body[currency])\n await self.message_queue.put(message_type)\n message = \"Данные успешно обновлены\"\n except KeyError:\n message = f\"{currency} не поддерживается системой\"\n if self.is_debug_mode:\n print(request)\n return web.Response(text=message, content_type=\"text/plain\")\n\n # app part\n def create_server(self):\n \"\"\"This method creates instance of web server application and binds routes\n :return: app instance\n \"\"\"\n app = web.Application()\n self.setup_routes(app)\n return app\n\n def setup_routes(self, app):\n \"\"\"This method defines endpoints and binds them with handlers\"\"\"\n app.add_routes([web.get(\"/amount/get\", self.total_amount_handler),\n web.get(r\"/{currency_name}/get\", self.currency_balance_handler),\n web.post(\"/amount/set\", self.set_amount_handler),\n web.post(\"/amount/modify\", self.modify_handler)\n ])\n\n async def create_tasks(self, app):\n \"\"\"This methods launch server runner and 2 endless background task:\n - currency's rate update task\n - send messages about changes of rate or balance every 60 seconds\n \"\"\"\n runner = web.AppRunner(app)\n await runner.setup()\n site = web.TCPSite(runner, host='127.0.0.1', port=SERVER_PORT)\n await site.start()\n if not self.is_debug_mode:\n print(\"Приложение успешно запущено\")\n logging.debug(\"Приложение успешно запущено\")\n changes_informer = asyncio.create_task(self.changes_informer())\n data_request = asyncio.create_task(self.rate_update())\n await asyncio.gather(changes_informer, data_request)\n\n async def changes_informer(self):\n \"\"\"This method checks if there are any changing, creates message and send one of them every 60\n seconds\"\"\"\n while True:\n if not self.message_queue.empty():\n await self.message_queue.get()\n message = await self.create_message()\n print(message)\n await asyncio.sleep(INFORM_PERIODICITY)\n\n async def rate_update(self):\n \"\"\"This method is a periodic task to update currencies rate and write it to database.\"\"\"\n while True:\n session = ClientSession()\n currency_rates = await self.make_request(session)\n if currency_rates is not None:\n self.database = await self.get_currency_rates(currency_rates)\n print(\"Курсы валют успешно получены\")\n await asyncio.sleep(self.rate_update_periodicity)\n\n async def make_request(self, session):\n \"\"\"This method loads json data about all currencies\"\"\"\n try:\n async with session.get(RATES_BASE_URL) as response:\n if response.status == 200:\n responce_data = await response.text()\n responce_data_dict = json.loads(responce_data)\n if self.is_debug_mode:\n print(response)\n else:\n logging.error(response.status, \"Получен код ошибки\")\n return None\n await session.close()\n return responce_data_dict[\"Valute\"]\n except ClientConnectionError as error:\n logging.error(error)\n\n def start_server(self):\n app = self.create_server()\n loop = asyncio.get_event_loop()\n loop.run_until_complete(self.create_tasks(app))\n loop.run_forever()\n","repo_name":"islamariya/unicorn","sub_path":"abstact_class.py","file_name":"abstact_class.py","file_ext":"py","file_size_in_byte":11278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26032627821","text":"from selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium import webdriver \n\nfrom file_functions import create_folder, save_json, download_img\nfrom convert_data import convert\n\nimport pandas as pd\nimport time\n\ndef wait(func): # Wait wrapper: sleeps for 2 seconds after a function is completed to make sure website does not think I am a bot\n '''\n This function is a wrapper to sleep for 2 seconds after the function is called - so that the website does not suspect a bot.\n\n Args:\n func (function): the function to be wrapped\n\n Returns:\n wrapper (function): wrapped function\n '''\n def wrapper(*args, **kwargs):\n output = func(*args, **kwargs)\n time.sleep(2)\n return output\n return wrapper\n\ndef dict_append(dict, dict_to_add):\n if len(dict) == 0:\n for key in dict_to_add:\n if dict_to_add[key][0] == None:\n dict_to_add[key] == ['N/A']\n return dict_to_add\n elif len(dict_to_add) == 0:\n return dict\n else:\n for key in dict:\n if dict_to_add[key][0] == None:\n dict[key] += ['N/A']\n else:\n dict[key] += dict_to_add[key]\n return dict\n\nclass Scraper:\n '''\n This class is used to represent a yahoo finance scraping tool.\n\n Attributes:\n driver (selenium.webdriver): a driver to drive the selenium scraping in a browser.\n landing_page (bool): true when the browser is on the landing page.\n img_id (int): id for naming image file names.\n sfirst_search (bool): true if this is the first search and therefore the browser should check for the cookies banner and other banner.\n '''\n # Constructor\n def __init__(self):\n '''\n See help(Scraper) for accurate signature\n '''\n options = Options()\n options.add_argument('--no-sandbox')\n options.add_argument('--disable-dev-shm-usage') \n options.headless = True\n self.driver = webdriver.Chrome(options=options)\n self.landing_page = True\n self.img_id = 0\n self.first_search = True\n\n # Methods\n @wait\n def look_at(self, url):\n '''\n This function is used to look at a certain url using a selenium driver.\n\n Args:\n url (str): the url to look at.\n '''\n self.driver.get(url)\n delay = 5\n if self.first_search == True:\n try:\n WebDriverWait(self.driver, delay).until(EC.presence_of_element_located((By.XPATH, '//div[@class=\"con-wizard\"]')))\n accept_cookies_button = WebDriverWait(self.driver, delay).until(EC.presence_of_element_located((By.XPATH, '//button[@class=\"btn primary\"]')))\n accept_cookies_button.click()\n try:\n WebDriverWait(self.driver, delay).until(EC.presence_of_element_located((By.XPATH, '//button[@aria-label=\"Close\"]')))\n accept_cookies_button = WebDriverWait(self.driver, delay).until(EC.presence_of_element_located((By.XPATH, '//button[@aria-label=\"Close\"]')))\n accept_cookies_button.click()\n except TimeoutException:\n print(\"Loading took too much time!\")\n\n except TimeoutException:\n print(\"Loading took too much time!\")\n\n self.first_search = False\n\n @wait\n def search(self, string):\n '''\n This function is used to search something in the search bar.\n\n Args:\n string (str): the string to be searched.\n '''\n search_bar = self.driver.find_element(by=By.XPATH, value='//*[@id=\"yfin-usr-qry\"]')\n search_bar.click()\n search_bar.send_keys(string)\n search_bar.send_keys(Keys.RETURN)\n self.landing_page = False\n\n @wait\n def scroll_bottom(self):\n '''\n This function is used to scroll to the bottom of the page.\n '''\n self.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n @wait\n def scroll(self, pixels):\n '''\n This function is used to scroll by a specific number of pixels.\n\n Args:\n pixels (int): the number of pixels to scroll down by.\n '''\n self.driver.execute_script(f\"window.scrollBy(0,{pixels});\")\n\n def ticker_to_link(self, tickers):\n '''\n This function is used to turn a list of strings containing ticker symbols to a list of corresponding yahoo finance urls.\n\n Args:\n tickers (list[str]): the list of tickers\n\n Returns:\n links (): the list of urls\n '''\n links = []\n for ticker in tickers:\n links.append((f'https://finance.yahoo.com/quote/{ticker}/', ticker))\n return links\n\n def __extract_statistics_page(self):\n '''\n This function is used to extract the data from the statsitics page of an equity.\n\n Returns:\n data_dict (dictionary): the dictionary of data containing the statistics from the page\n '''\n # Whole table\n table_section = self.driver.find_element(by=By.XPATH, value='//section[@data-yaft-module=\"tdv2-applet-KeyStatistics\"]')\n table_element = table_section.find_elements(by=By.XPATH, value='./div')[-1]\n sub_table_elements = table_element.find_elements(by=By.XPATH, value='./div')\n\n valuation_measures = sub_table_elements[0]\n valuation_measure_rows = valuation_measures.find_elements(by=By.XPATH, value='.//tr')\n trading_information = sub_table_elements[1]\n trading_information_rows = trading_information.find_elements(by=By.XPATH, value='.//tr')\n financial_highlights = sub_table_elements[2]\n financial_highlight_rows = financial_highlights.find_elements(by=By.XPATH, value='.//tr')\n\n # Valuation measures\n market_cap = valuation_measure_rows[0].find_elements(by=By.XPATH, value='.//td')[1].text\n market_cap = convert(market_cap)\n trailing_pe = valuation_measure_rows[2].find_elements(by=By.XPATH, value='.//td')[1].text\n trailing_pe = convert(trailing_pe)\n forward_pe = valuation_measure_rows[3].find_elements(by=By.XPATH, value='.//td')[1].text\n forward_pe = convert(forward_pe)\n trailing_ps = valuation_measure_rows[5].find_elements(by=By.XPATH, value='.//td')[1].text\n trailing_ps = convert(trailing_ps)\n\n # Financial highlights\n profit_margin = financial_highlight_rows[2].find_elements(by=By.XPATH, value='.//td')[1].text\n profit_margin = convert(profit_margin)\n return_on_assets = financial_highlight_rows[4].find_elements(by=By.XPATH, value='.//td')[1].text\n return_on_assets = convert(return_on_assets)\n ebitda = financial_highlight_rows[10].find_elements(by=By.XPATH, value='.//td')[1].text\n ebitda = convert(ebitda)\n current_ratio = financial_highlight_rows[-4].find_elements(by=By.XPATH, value='.//td')[1].text\n current_ratio = convert(current_ratio)\n\n # Trading information\n short_ratio = trading_information_rows[15].find_elements(by=By.XPATH, value='.//td')[1].text\n short_ratio = convert(short_ratio)\n \n # Collate data into dictionary\n data_dict = {'market_cap': [market_cap], \n 'trailing_pe': [trailing_pe], \n 'forward_pe': [forward_pe],\n 'trailing_ps': [trailing_ps],\n 'profit_margin': [profit_margin],\n 'return_on_assets': [return_on_assets],\n 'ebitda': [ebitda],\n 'current_ratio': [current_ratio],\n 'short_ratio': [short_ratio]}\n\n return data_dict\n\n def __extract_summary_page(self):\n '''\n This function is used to extract data from the summary page.\n\n Returns:\n data_dict (dictionary): the dictionary of data from the summary page\n '''\n target_estimate = self.driver.find_element(by=By.XPATH, value='//td[@data-test=\"ONE_YEAR_TARGET_PRICE-value\"]').text\n previous_close = self.driver.find_element(by=By.XPATH, value='//td[@data-test=\"PREV_CLOSE-value\"]').text\n data_dict = {'previous_close': [float(previous_close)], 'target_estimate': [float(target_estimate)]}\n return data_dict\n\n def __extract_data_ticker(self, link):\n '''\n This function is used to extract the data for the specific ticker.\n\n Args:\n link (str): the link to the specific ticker\n\n Returns:\n data (dicitonary): the dictionary of all data for one ticker\n '''\n try:\n # Increment id to ensure each ticker has its own unique id\n self.id += 1\n\n # Initialise a data dictionary with the specific idea for this ticker\n data = {'id': [self.id], 'ticker': [link[1]]}\n\n # Visit the tickers yahoo finance page \n self.look_at(link[0])\n\n # Extract data from the summary page\n data_summary = self.__extract_summary_page()\n data = data | data_summary\n\n # Move to the statistics page by clicking on the button\n self.driver.find_element(by=By.XPATH, value='//li[@data-test=\"STATISTICS\"]').click()\n\n # Wait 1 seconds so that the website does not suspect a bot\n time.sleep(1)\n\n # Extract data from the statistics page\n data_statistics = self.__extract_statistics_page()\n data = data | data_statistics\n\n return data\n except:\n return {}\n\n def extract_all_data(self, tickers):\n '''\n This function is used to extract the data for all tickers and save it to a json file for each ticker.\n\n Args:\n tickers (list[str]): the list of tickers\n '''\n # Collect a list of links to the tickers to visit\n list_of_links = self.ticker_to_link(tickers)\n\n # Initialise the ids of the data rows which will then be returned\n self.id = 0\n\n # Create a raw data folder to store data\n folder_path = create_folder('raw_data')\n\n # Loop through the list of links and extract the data needed\n data_dictionary = dict()\n\n for link in list_of_links:\n data = self.__extract_data_ticker(link)\n data_dictionary = dict_append(data_dictionary, data)\n save_json(data, link[1], folder_path) # Save file in raw_data folder\n\n self.all_data_dict = data_dictionary\n save_json(data_dictionary, 'all_data', folder_path)\n\n return data_dictionary\n\n def extract_all_data_letter(self):\n '''\n This function is used to extract the data for all tickers on the nasdaq starting with letter and save it to a json file for each ticker.\n\n Args:\n letter (str): the letter for ticker to start with\n '''\n letter = input('Please enter a single letter: ')\n\n if letter.isalpha() and len(letter) == 1:\n \n df_nasdaq = pd.read_csv('nasdaq_tickers.csv').dropna(axis=0)\n tickers = [tick for tick in df_nasdaq['Symbol'].to_list() if tick[0] == letter.upper()]\n\n # Collect a list of links to the tickers to visit\n list_of_links = self.ticker_to_link(tickers)\n\n # Initialise the ids of the data rows which will then be returned\n self.id = 0\n\n # Create a raw data folder to store data\n folder_path = create_folder('raw_data')\n\n # Loop through the list of links and extract the data needed\n data_dictionary = dict()\n\n for link in list_of_links:\n data = self.__extract_data_ticker(link)\n data_dictionary = dict_append(data_dictionary, data)\n save_json(data, link[1], folder_path) # Save file in raw_data folder\n\n self.all_data_dict = data_dictionary\n save_json(data_dictionary, 'all_data', folder_path)\n\n return data_dictionary\n else:\n self.extract_all_data_letter()\n\n def extract_logo(self):\n '''\n This function is used to extract the yahoo finance logo and download it.\n '''\n logo_element = self.driver.find_element(by=By.XPATH, value='//a[@id=\"header-logo\"]')\n link_long = logo_element.get_attribute('style')\n index_start = link_long.find('url(')\n link_img = link_long[index_start+4:-2]\n self.img_id += 1\n download_img(link_img, self.img_id)\n\n def end_session(self):\n '''\n This function is used to end the session.\n '''\n self.driver.quit()\n\nif __name__ == \"__main__\":\n \n yahoo_finance = Scraper()\n data = yahoo_finance.extract_all_data_letter()\n df = pd.DataFrame.from_dict(data)\n print(df.head())\n yahoo_finance.end_session()","repo_name":"eharaldson/data-collection-pipeline","sub_path":"yahoo_scraper.py","file_name":"yahoo_scraper.py","file_ext":"py","file_size_in_byte":13167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14378923735","text":"\"\"\"\nTest cases for oding.http.routes.default DefaultRoute class.\n\nTim Nicholls, STFC Application Engineering\n\"\"\"\nimport logging\nimport re\n\nimport pytest\n\nfrom odin.http.routes.default import DefaultRoute\nfrom tests.utils import log_message_seen\n\nclass TestDefaultRoute():\n \"\"\"Test DefaultRoute class.\"\"\"\n\n def test_default_route_relative_path(self):\n \"\"\"Test DefaultRoute resolves relative path for static content.\"\"\"\n path = '.'\n def_route = DefaultRoute(path)\n #assert_regexp_matches(def_route.default_handler_args['path'], '.')\n assert path in def_route.default_handler_args['path']\n\n def test_default_route_absolute_path(self):\n \"\"\"Test DefaultRoute treats absolute path for static content correctly.\"\"\"\n path = '/absolute/path'\n def_route = DefaultRoute(path)\n assert path in def_route.default_handler_args['path']\n\n def test_default_route_bad_path(self, caplog):\n \"\"\"Test DefaultRoute logs warning about bad path to static content.\"\"\"\n path = '../missing_path'\n def_route = DefaultRoute(path)\n assert path in def_route.default_handler_args['path']\n\n assert log_message_seen(caplog, logging.WARNING,\n 'Default handler static path does not exist')","repo_name":"odin-detector/odin-control","sub_path":"tests/routes/test_default.py","file_name":"test_default.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"13205879300","text":"from __future__ import print_function, division\n\nimport copy\n\nfrom Point1 import Point, Rectangle, print_point\nfrom Point1_soln import distance_between_points\n\n\nclass Circle:\n \"\"\"Represents a circle.\n\n Attributes: center, radius\n \"\"\"\n\n\ndef point_in_circle(point, circle):\n \"\"\"Checks whether a point lies inside a circle (or on the boundary).\n\n point: Point object\n circle: Circle object\n \"\"\"\n d = distance_between_points(point, circle.center)\n print(d)\n return d <= circle.radius\n\n\ndef rect_in_circle(rect, circle):\n \"\"\"Checks whether the corners of a rect fall in/on a circle.\n\n rect: Rectangle object\n circle: Circle object\n \"\"\"\n p = copy.copy(rect.corner)\n print_point(p)\n if not point_in_circle(p, circle):\n return False\n\n p.x += rect.width\n print_point(p)\n if not point_in_circle(p, circle):\n return False\n\n p.y -= rect.height\n print_point(p)\n if not point_in_circle(p, circle):\n return False\n\n p.x -= rect.width\n print_point(p)\n if not point_in_circle(p, circle):\n return False\n\n return True\n\n\ndef rect_circle_overlap(rect, circle):\n \"\"\"Checks whether any corners of a rect fall in/on a circle.\n\n rect: Rectangle object\n circle: Circle object\n \"\"\"\n p = copy.copy(rect.corner)\n print_point(p)\n if point_in_circle(p, circle):\n return True\n\n p.x += rect.width\n print_point(p)\n if point_in_circle(p, circle):\n return True\n\n p.y -= rect.height\n print_point(p)\n if point_in_circle(p, circle):\n return True\n\n p.x -= rect.width\n print_point(p)\n if point_in_circle(p, circle):\n return True\n\n return False\n\n\ndef main():\n box = Rectangle()\n box.width = 100.0\n box.height = 200.0\n box.corner = Point()\n box.corner.x = 50.0\n box.corner.y = 50.0\n\n print(box.corner.x)\n print(box.corner.y)\n\n circle = Circle\n circle.center = Point()\n circle.center.x = 150.0\n circle.center.y = 100.0\n circle.radius = 75.0\n\n print(circle.center.x)\n print(circle.center.y)\n print(circle.radius)\n\n print(point_in_circle(box.corner, circle))\n print(rect_in_circle(box, circle))\n print(rect_circle_overlap(box, circle))\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"AllenDowney/ThinkPython2","sub_path":"code/Circle.py","file_name":"Circle.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","stars":2385,"dataset":"github-code","pt":"81"} +{"seq_id":"74732126346","text":"#!/usr/bin/env python3\n\nimport psycopg2\n\nDBNAME = \"news\"\n\n\ndef report_most_popular_articles_ever():\n \"\"\" Reports the most popular articles \"\"\"\n db = psycopg2.connect(database=DBNAME)\n print(\"What are the most popular three articles of all time?\")\n c = db.cursor()\n c.execute(\"SELECT * FROM most_popular_articles_ever ORDER BY count DESC;\")\n results = c.fetchall()\n db.close()\n\n for result in results:\n print(\"'%s' — %s views\" % (result[1], result[0]))\n\n print(\"\\n\")\n\n\ndef report_most_popular_authors_ever():\n \"\"\" Reports the most popular authors \"\"\"\n db = psycopg2.connect(database=DBNAME)\n print(\"Who are the most popular article authors of all time?\")\n c = db.cursor()\n c.execute(\"SELECT * FROM most_popular_authors ORDER BY count DESC;\")\n results = c.fetchall()\n db.close()\n\n for result in results:\n print(\"%s — %s views\" % (result[0], result[1]))\n\n print(\"\\n\")\n\n\ndef report_request_errors():\n \"\"\" Reports the days with more than 1% error rate \"\"\"\n db = psycopg2.connect(database=DBNAME)\n print(\"On which days did more than 1% of requests lead to errors?\")\n c = db.cursor()\n c.execute(\"SELECT * FROM requests_error ORDER BY day;\")\n results = c.fetchall()\n db.close()\n\n for result in results:\n print(\"%s — %s%% errors\" % (result[0], result[1]))\n\n print(\"\\n\")\n\n\nif __name__ == '__main__':\n print(\"##### Log-Analysis-Tool #####\")\n report_most_popular_articles_ever()\n report_most_popular_authors_ever()\n report_request_errors()\n","repo_name":"dkarnutsch/ud-log-analysis","sub_path":"log_report.py","file_name":"log_report.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8931369682","text":"from os import listdir\nfrom os.path import isfile, isdir, join\nimport glob\nimport re\n\n\n#TODO : on peut l'améliorer simplement : ne pas se soucier de si hasParam ou hasReturn, simplement regarder le type de la fonction, et recherger si il est présent sur la ligne (1 ou 2 fois ?)\n\nclass MaskFunction(object):\n\tname = \"\"\n\ttype = \"\" #request, running, alarm, interrupt\n\thasReturn = 0 #yes if it has uint32_t/EventBits_t as param\n\thasParam = 0 #yes if it has uint32_t/EventBits as return value\n\t\n\tdef __init__ (self, name, type, hasReturn, hasParam):\n\t\tself.name = name\n\t\tself.type = type\n\t\tself.hasReturn = hasReturn\n\t\tself.hasParam = hasParam\n\t\t\n\tdef __repr__ (self):\n\t\treturn \"Name : {} - Type : {} - HasReturn : {} - HasParam : {}\\r\\n\".format(self.name, self.type, self.hasReturn, self.hasParam)\n\t\t\ndef parseMaskFunctions(maskFunctionList):\n\tfileName = \"functionList.txt\"\n\twith open(fileName, 'rt', encoding=\"utf-8\", errors='replace') as input:\n\t\tfor line in input:\n\t\t\thasReturn = 0\n\t\t\thasParam = 0\n\t\t\tspaceSplit = line.split(' ')\n\t\t\t#print(spaceSplit)\n\t\t\tif spaceSplit[0] == 'EventBits_t' or spaceSplit[0] == 'uint32_t' :\n\t\t\t\thasReturn = 1\n\t\t\tparenthesisSplit = spaceSplit[1].replace('(',')').split(')')\n\t\t\t#print(parenthesisSplit)\n\t\t\tname = parenthesisSplit[0]\n\t\t\ttype=''\n\t\t\tif name.lower().find('running') > -1:\n\t\t\t\ttype = 'running'\n\t\t\tif name.lower().find('request') > -1:\n\t\t\t\ttype = 'request'\n\t\t\tif name.lower().find('alarm') > -1:\n\t\t\t\ttype = 'alarm'\n\t\t\tif name.lower().find('interrupt') > -1:\n\t\t\t\ttype = 'interrupt'\n\t\t\tif parenthesisSplit[1] == 'EventBits_t' or parenthesisSplit[1] == 'uint32_t' :\n\t\t\t\thasParam = 1\n\t\t\tmaskFunction = MaskFunction(name, type, hasReturn, hasParam)\n\t\t\tmaskFunctionList.append(maskFunction)\n\ndef getReturnType(line):\n\tequalSplit = line.split(\"=\")\n\tif len(equalSplit) > 1:\n\t\tif equalSplit[0].lower().find('running') > -1:\n\t\t\treturn 'running'\n\t\tif equalSplit[0].lower().find('request') > -1:\n\t\t\treturn 'request'\n\t\tif equalSplit[0].lower().find('alarm') > -1:\n\t\t\treturn'alarm'\n\t\tif equalSplit[0].lower().find('interrupt') > -1:\n\t\t\treturn 'interrupt'\n\treturn ''\n\n\ndef getParamType(line):\n\tequalSplit = line.split(\"=\")\n\tfunctionIndex = 0\n\tequalSplitIndex = 0\n\tif len(equalSplit) > 1:\n\t\tequalSplitIndex = 1\n\tparenthesisSplit = equalSplit[equalSplitIndex].replace('(',')').split(')')\n\tif len(parenthesisSplit) > 1:\n\t\t#print(parenthesisSplit[1])\n\t\tif parenthesisSplit[1].lower().find('running') > -1:\n\t\t\treturn 'running'\n\t\tif parenthesisSplit[1].lower().find('request') > -1:\n\t\t\treturn 'request'\n\t\tif parenthesisSplit[1].lower().find('alarm') > -1:\n\t\t\treturn 'alarm'\n\t\tif parenthesisSplit[1].lower().find('interrupt') > -1:\n\t\t\treturn 'interrupt'\n\treturn ''\n\t\ndef checkMaskFunctionUse(maskFunctionList, pathFiles):\n\terrors = 0\n\tfor file in pathFiles:\n\t\tif file.find(\".c\") > -1 :\n\t\t\twith open(file, 'rt', encoding=\"utf-8\", errors='replace') as input:\n\t\t\t\tlineNum = 0\n\t\t\t\tfor line in input:\n\t\t\t\t\tlineNum = lineNum + 1\n\t\t\t\t\tfor maskFunction in maskFunctionList:\n\t\t\t\t\t\tif line.find(maskFunction.name) > -1:\n\t\t\t\t\t\t\tif maskFunction.hasReturn:\n\t\t\t\t\t\t\t\treturnType = getReturnType(line)\n\t\t\t\t\t\t\t\tif returnType != maskFunction.type and returnType != \"\":\n\t\t\t\t\t\t\t\t\terrors = errors + 1\n\t\t\t\t\t\t\t\t\tprint(\"Error : returning {}, expected {} - file {}, line {} : {}\\r\\n\".format(returnType, maskFunction.type, file, lineNum, line))\n\t\t\t\t\t\t\tif maskFunction.hasParam:\n\t\t\t\t\t\t\t\tparamType = getParamType(line)\n\t\t\t\t\t\t\t\tif paramType != maskFunction.type and paramType != \"\":\n\t\t\t\t\t\t\t\t\terrors = errors + 1\n\t\t\t\t\t\t\t\t\tprint(\"Error : param {}, expected {} - file {}, line {} : {}\\r\\n\".format(paramType, maskFunction.type, file, lineNum, line))\n\treturn errors\n\t\t\t\n\t\t\n\t\t\nmypath = \"../tracker2\"\ndirectories = listdir(mypath)\npathfiles = []\n#print(directories)\nfor directory in directories:\n\tdirpath = join(mypath,directory)\n\tif(isdir(dirpath)):\n\t\tfiles = listdir(dirpath)\n\t\t#print(files)\n\t\tfor file in files:\n\t\t\tpathfile = join(dirpath, file)\n\t\t\tpathfiles.append(pathfile.replace(\"\\\\\",\"/\"))\n#onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n#print(pathfiles)\nmaskFunctionList = []\nparseMaskFunctions(maskFunctionList)\n#print(maskFunctionList)\nerrors = checkMaskFunctionUse(maskFunctionList, pathfiles)\nprint(\"{} errors found.\".format(errors))","repo_name":"StephanArnas/SeekiosEmbeddedGps","sub_path":"mask_analysis/mask_analysis.py","file_name":"mask_analysis.py","file_ext":"py","file_size_in_byte":4253,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"}